aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp91
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp63
-rw-r--r--clang/lib/AST/CMakeLists.txt1
-rw-r--r--clang/lib/AST/ExprConstant.cpp36
-rw-r--r--clang/lib/AST/InferAlloc.cpp201
-rw-r--r--clang/lib/AST/StmtOpenACC.cpp44
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp89
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp21
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXX.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp16
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp26
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp39
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp57
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp9
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp3
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp198
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.cpp100
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.h34
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp15
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.h4
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h9
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp29
-rw-r--r--clang/lib/Driver/ToolChains/PS4CPU.cpp11
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp22
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp5
-rw-r--r--clang/lib/Headers/CMakeLists.txt3
-rw-r--r--clang/lib/Headers/__float_float.h176
-rw-r--r--clang/lib/Headers/__float_header_macro.h12
-rw-r--r--clang/lib/Headers/__float_infinity_nan.h20
-rw-r--r--clang/lib/Headers/avx2intrin.h21
-rw-r--r--clang/lib/Headers/float.h180
-rw-r--r--clang/lib/Headers/module.modulemap18
-rw-r--r--clang/lib/Headers/tmmintrin.h52
-rw-r--r--clang/lib/Lex/HeaderSearch.cpp6
-rw-r--r--clang/lib/Sema/SemaChecking.cpp22
-rw-r--r--clang/lib/Sema/SemaConcept.cpp25
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp105
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp1
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp4
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp11
42 files changed, 1173 insertions, 617 deletions
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index f15b3c1..836d22f 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -1842,7 +1842,6 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
const Expr *Init, PrimType T,
bool Activate = false) -> bool {
InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init));
- InitLinkScope<Emitter> ILS(this, InitLink::Field(FieldToInit->Offset));
if (!this->visit(Init))
return false;
@@ -5385,55 +5384,57 @@ bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
// instance pointer of the current function frame, but e.g. to the declaration
// currently being initialized. Here we emit the necessary instruction(s) for
// this scenario.
- if (!InitStackActive)
+ if (!InitStackActive || InitStack.empty())
return this->emitThis(E);
- if (!InitStack.empty()) {
- // If our init stack is, for example:
- // 0 Stack: 3 (decl)
- // 1 Stack: 6 (init list)
- // 2 Stack: 1 (field)
- // 3 Stack: 6 (init list)
- // 4 Stack: 1 (field)
- //
- // We want to find the LAST element in it that's an init list,
- // which is marked with the K_InitList marker. The index right
- // before that points to an init list. We need to find the
- // elements before the K_InitList element that point to a base
- // (e.g. a decl or This), optionally followed by field, elem, etc.
- // In the example above, we want to emit elements [0..2].
- unsigned StartIndex = 0;
- unsigned EndIndex = 0;
- // Find the init list.
- for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
- InitStack[StartIndex].Kind == InitLink::K_This) {
- EndIndex = StartIndex;
- --StartIndex;
- break;
- }
+ // If our init stack is, for example:
+ // 0 Stack: 3 (decl)
+ // 1 Stack: 6 (init list)
+ // 2 Stack: 1 (field)
+ // 3 Stack: 6 (init list)
+ // 4 Stack: 1 (field)
+ //
+ // We want to find the LAST element in it that's an init list,
+ // which is marked with the K_InitList marker. The index right
+ // before that points to an init list. We need to find the
+ // elements before the K_InitList element that point to a base
+ // (e.g. a decl or This), optionally followed by field, elem, etc.
+ // In the example above, we want to emit elements [0..2].
+ unsigned StartIndex = 0;
+ unsigned EndIndex = 0;
+ // Find the init list.
+ for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
+ InitStack[StartIndex].Kind == InitLink::K_This) {
+ EndIndex = StartIndex;
+ --StartIndex;
+ break;
}
+ }
- // Walk backwards to find the base.
- for (; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList)
- continue;
+ // Walk backwards to find the base.
+ for (; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_InitList)
+ continue;
- if (InitStack[StartIndex].Kind != InitLink::K_Field &&
- InitStack[StartIndex].Kind != InitLink::K_Elem)
- break;
- }
+ if (InitStack[StartIndex].Kind != InitLink::K_Field &&
+ InitStack[StartIndex].Kind != InitLink::K_Elem)
+ break;
+ }
- // Emit the instructions.
- for (unsigned I = StartIndex; I != EndIndex; ++I) {
- if (InitStack[I].Kind == InitLink::K_InitList)
- continue;
- if (!InitStack[I].template emit<Emitter>(this, E))
- return false;
- }
- return true;
+ if (StartIndex == 0 && EndIndex == 0)
+ EndIndex = InitStack.size() - 1;
+
+ assert(StartIndex < EndIndex);
+
+ // Emit the instructions.
+ for (unsigned I = StartIndex; I != (EndIndex + 1); ++I) {
+ if (InitStack[I].Kind == InitLink::K_InitList)
+ continue;
+ if (!InitStack[I].template emit<Emitter>(this, E))
+ return false;
}
- return this->emitThis(E);
+ return true;
}
template <class Emitter> bool Compiler<Emitter>::visitStmt(const Stmt *S) {
@@ -6295,6 +6296,10 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) {
}
assert(NestedField);
+ unsigned FirstLinkOffset =
+ R->getField(cast<FieldDecl>(IFD->chain()[0]))->Offset;
+ InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(InitExpr));
+ InitLinkScope<Emitter> ILS(this, InitLink::Field(FirstLinkOffset));
if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr,
IsUnion))
return false;
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index ff83c52..ff50e6d 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -12,12 +12,14 @@
#include "InterpHelpers.h"
#include "PrimType.h"
#include "Program.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/AllocToken.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SipHash.h"
@@ -1307,6 +1309,45 @@ interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call) {
+ const ASTContext &ASTCtx = S.getASTContext();
+ uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
+ auto Mode =
+ ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
+ uint64_t MaxTokens =
+ ASTCtx.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth));
+
+ // We do not read any of the arguments; discard them.
+ for (int I = Call->getNumArgs() - 1; I >= 0; --I)
+ discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
+
+ // Note: Type inference from a surrounding cast is not supported in
+ // constexpr evaluation.
+ QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
+ if (AllocType.isNull()) {
+ S.CCEDiag(Call,
+ diag::note_constexpr_infer_alloc_token_type_inference_failed);
+ return false;
+ }
+
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
+ if (!ATMD) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
+ return false;
+ }
+
+ auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
+ if (!MaybeToken) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
+ return false;
+ }
+
+ pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
+ return true;
+}
+
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
@@ -3471,7 +3512,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI_lrotl:
case Builtin::BI_rotl64:
return interp__builtin_elementwise_int_binop(
- S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt {
+ S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
return Value.rotl(Amount);
});
@@ -3485,7 +3526,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI_lrotr:
case Builtin::BI_rotr64:
return interp__builtin_elementwise_int_binop(
- S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt {
+ S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
return Value.rotr(Amount);
});
@@ -3694,6 +3735,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_ptrauth_string_discriminator:
return interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Call);
+ case Builtin::BI__builtin_infer_alloc_token:
+ return interp__builtin_infer_alloc_token(S, OpPC, Frame, Call);
+
case Builtin::BI__noop:
pushInteger(S, 0, Call->getType());
return true;
@@ -3809,6 +3853,21 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
}
+ case X86::BI__builtin_ia32_psignb128:
+ case X86::BI__builtin_ia32_psignb256:
+ case X86::BI__builtin_ia32_psignw128:
+ case X86::BI__builtin_ia32_psignw256:
+ case X86::BI__builtin_ia32_psignd128:
+ case X86::BI__builtin_ia32_psignd256:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
+ if (BElem.isZero())
+ return APInt::getZero(AElem.getBitWidth());
+ if (BElem.isNegative())
+ return -AElem;
+ return AElem;
+ });
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
diff --git a/clang/lib/AST/CMakeLists.txt b/clang/lib/AST/CMakeLists.txt
index d4fd7a7..fd50e95 100644
--- a/clang/lib/AST/CMakeLists.txt
+++ b/clang/lib/AST/CMakeLists.txt
@@ -66,6 +66,7 @@ add_clang_library(clangAST
ExternalASTMerger.cpp
ExternalASTSource.cpp
FormatString.cpp
+ InferAlloc.cpp
InheritViz.cpp
ByteCode/BitcastBuffer.cpp
ByteCode/ByteCodeEmitter.cpp
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 00aaaab..2bd4476 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -44,6 +44,7 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/CurrentSourceLocExprScope.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/OptionalDiagnostic.h"
#include "clang/AST/RecordLayout.h"
@@ -12312,6 +12313,20 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+ case X86::BI__builtin_ia32_psignb128:
+ case X86::BI__builtin_ia32_psignb256:
+ case X86::BI__builtin_ia32_psignw128:
+ case X86::BI__builtin_ia32_psignw256:
+ case X86::BI__builtin_ia32_psignd128:
+ case X86::BI__builtin_ia32_psignd256:
+ return EvaluateBinOpExpr([](const APInt &AElem, const APInt &BElem) {
+ if (BElem.isZero())
+ return APInt::getZero(AElem.getBitWidth());
+ if (BElem.isNegative())
+ return -AElem;
+ return AElem;
+ });
+
case X86::BI__builtin_ia32_blendvpd:
case X86::BI__builtin_ia32_blendvpd256:
case X86::BI__builtin_ia32_blendvps:
@@ -14649,6 +14664,27 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Result, E);
}
+ case Builtin::BI__builtin_infer_alloc_token: {
+ // If we fail to infer a type, this fails to be a constant expression; this
+ // can be checked with __builtin_constant_p(...).
+ QualType AllocType = infer_alloc::inferPossibleType(E, Info.Ctx, nullptr);
+ if (AllocType.isNull())
+ return Error(
+ E, diag::note_constexpr_infer_alloc_token_type_inference_failed);
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, Info.Ctx);
+ if (!ATMD)
+ return Error(E, diag::note_constexpr_infer_alloc_token_no_metadata);
+ auto Mode =
+ Info.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
+ uint64_t BitWidth = Info.Ctx.getTypeSize(Info.Ctx.getSizeType());
+ uint64_t MaxTokens =
+ Info.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth));
+ auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
+ if (!MaybeToken)
+ return Error(E, diag::note_constexpr_infer_alloc_token_stateful_mode);
+ return Success(llvm::APInt(BitWidth, *MaybeToken), E);
+ }
+
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
case Builtin::BI__builtin_ffsll: {
diff --git a/clang/lib/AST/InferAlloc.cpp b/clang/lib/AST/InferAlloc.cpp
new file mode 100644
index 0000000..e439ed4
--- /dev/null
+++ b/clang/lib/AST/InferAlloc.cpp
@@ -0,0 +1,201 @@
+//===--- InferAlloc.cpp - Allocation type inference -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements allocation-related type inference.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/InferAlloc.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/SmallPtrSet.h"
+
+using namespace clang;
+using namespace infer_alloc;
+
+static bool
+typeContainsPointer(QualType T,
+ llvm::SmallPtrSet<const RecordDecl *, 4> &VisitedRD,
+ bool &IncompleteType) {
+ QualType CanonicalType = T.getCanonicalType();
+ if (CanonicalType->isPointerType())
+ return true; // base case
+
+ // Look through typedef chain to check for special types.
+ for (QualType CurrentT = T; const auto *TT = CurrentT->getAs<TypedefType>();
+ CurrentT = TT->getDecl()->getUnderlyingType()) {
+ const IdentifierInfo *II = TT->getDecl()->getIdentifier();
+ // Special Case: Syntactically uintptr_t is not a pointer; semantically,
+ // however, very likely used as such. Therefore, classify uintptr_t as a
+ // pointer, too.
+ if (II && II->isStr("uintptr_t"))
+ return true;
+ }
+
+ // The type is an array; check the element type.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(CanonicalType))
+ return typeContainsPointer(AT->getElementType(), VisitedRD, IncompleteType);
+ // The type is a struct, class, or union.
+ if (const RecordDecl *RD = CanonicalType->getAsRecordDecl()) {
+ if (!RD->isCompleteDefinition()) {
+ IncompleteType = true;
+ return false;
+ }
+ if (!VisitedRD.insert(RD).second)
+ return false; // already visited
+ // Check all fields.
+ for (const FieldDecl *Field : RD->fields()) {
+ if (typeContainsPointer(Field->getType(), VisitedRD, IncompleteType))
+ return true;
+ }
+ // For C++ classes, also check base classes.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Polymorphic types require a vptr.
+ if (CXXRD->isDynamicClass())
+ return true;
+ for (const CXXBaseSpecifier &Base : CXXRD->bases()) {
+ if (typeContainsPointer(Base.getType(), VisitedRD, IncompleteType))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/// Infer type from a simple sizeof expression.
+static QualType inferTypeFromSizeofExpr(const Expr *E) {
+ const Expr *Arg = E->IgnoreParenImpCasts();
+ if (const auto *UET = dyn_cast<UnaryExprOrTypeTraitExpr>(Arg)) {
+ if (UET->getKind() == UETT_SizeOf) {
+ if (UET->isArgumentType())
+ return UET->getArgumentTypeInfo()->getType();
+ else
+ return UET->getArgumentExpr()->getType();
+ }
+ }
+ return QualType();
+}
+
+/// Infer type from an arithmetic expression involving a sizeof. For example:
+///
+/// malloc(sizeof(MyType) + padding); // infers 'MyType'
+/// malloc(sizeof(MyType) * 32); // infers 'MyType'
+/// malloc(32 * sizeof(MyType)); // infers 'MyType'
+/// malloc(sizeof(MyType) << 1); // infers 'MyType'
+/// ...
+///
+/// More complex arithmetic expressions are supported, but are a heuristic, e.g.
+/// when considering allocations for structs with flexible array members:
+///
+/// malloc(sizeof(HasFlexArray) + sizeof(int) * 32); // infers 'HasFlexArray'
+///
+static QualType inferPossibleTypeFromArithSizeofExpr(const Expr *E) {
+ const Expr *Arg = E->IgnoreParenImpCasts();
+ // The argument is a lone sizeof expression.
+ if (QualType T = inferTypeFromSizeofExpr(Arg); !T.isNull())
+ return T;
+ if (const auto *BO = dyn_cast<BinaryOperator>(Arg)) {
+ // Argument is an arithmetic expression. Cover common arithmetic patterns
+ // involving sizeof.
+ switch (BO->getOpcode()) {
+ case BO_Add:
+ case BO_Div:
+ case BO_Mul:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_Sub:
+ if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getLHS());
+ !T.isNull())
+ return T;
+ if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getRHS());
+ !T.isNull())
+ return T;
+ break;
+ default:
+ break;
+ }
+ }
+ return QualType();
+}
+
+/// If the expression E is a reference to a variable, infer the type from a
+/// variable's initializer if it contains a sizeof. Beware, this is a heuristic
+/// and ignores if a variable is later reassigned. For example:
+///
+/// size_t my_size = sizeof(MyType);
+/// void *x = malloc(my_size); // infers 'MyType'
+///
+static QualType inferPossibleTypeFromVarInitSizeofExpr(const Expr *E) {
+ const Expr *Arg = E->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (const Expr *Init = VD->getInit())
+ return inferPossibleTypeFromArithSizeofExpr(Init);
+ }
+ }
+ return QualType();
+}
+
+/// Deduces the allocated type by checking if the allocation call's result
+/// is immediately used in a cast expression. For example:
+///
+/// MyType *x = (MyType *)malloc(4096); // infers 'MyType'
+///
+static QualType inferPossibleTypeFromCastExpr(const CallExpr *CallE,
+ const CastExpr *CastE) {
+ if (!CastE)
+ return QualType();
+ QualType PtrType = CastE->getType();
+ if (PtrType->isPointerType())
+ return PtrType->getPointeeType();
+ return QualType();
+}
+
+QualType infer_alloc::inferPossibleType(const CallExpr *E,
+ const ASTContext &Ctx,
+ const CastExpr *CastE) {
+ QualType AllocType;
+ // First check arguments.
+ for (const Expr *Arg : E->arguments()) {
+ AllocType = inferPossibleTypeFromArithSizeofExpr(Arg);
+ if (AllocType.isNull())
+ AllocType = inferPossibleTypeFromVarInitSizeofExpr(Arg);
+ if (!AllocType.isNull())
+ break;
+ }
+ // Then check later casts.
+ if (AllocType.isNull())
+ AllocType = inferPossibleTypeFromCastExpr(E, CastE);
+ return AllocType;
+}
+
+std::optional<llvm::AllocTokenMetadata>
+infer_alloc::getAllocTokenMetadata(QualType T, const ASTContext &Ctx) {
+ llvm::AllocTokenMetadata ATMD;
+
+ // Get unique type name.
+ PrintingPolicy Policy(Ctx.getLangOpts());
+ Policy.SuppressTagKeyword = true;
+ Policy.FullyQualifiedName = true;
+ llvm::raw_svector_ostream TypeNameOS(ATMD.TypeName);
+ T.getCanonicalType().print(TypeNameOS, Policy);
+
+ // Check if QualType contains a pointer. Implements a simple DFS to
+ // recursively check if a type contains a pointer type.
+ llvm::SmallPtrSet<const RecordDecl *, 4> VisitedRD;
+ bool IncompleteType = false;
+ ATMD.ContainsPointer = typeContainsPointer(T, VisitedRD, IncompleteType);
+ if (!ATMD.ContainsPointer && IncompleteType)
+ return std::nullopt;
+
+ return ATMD;
+}
diff --git a/clang/lib/AST/StmtOpenACC.cpp b/clang/lib/AST/StmtOpenACC.cpp
index 2b56c1e..462a10d 100644
--- a/clang/lib/AST/StmtOpenACC.cpp
+++ b/clang/lib/AST/StmtOpenACC.cpp
@@ -324,6 +324,18 @@ OpenACCAtomicConstruct *OpenACCAtomicConstruct::Create(
return Inst;
}
+static std::pair<const Expr *, const Expr *> getBinaryOpArgs(const Expr *Op) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(Op)) {
+ assert(BO->getOpcode() == BO_Assign);
+ return {BO->getLHS(), BO->getRHS()};
+ }
+
+ const auto *OO = cast<CXXOperatorCallExpr>(Op);
+ assert(OO->getOperator() == OO_Equal);
+
+ return {OO->getArg(0), OO->getArg(1)};
+}
+
const OpenACCAtomicConstruct::StmtInfo
OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
// This ends up being a vastly simplified version of SemaOpenACCAtomic, since
@@ -333,27 +345,35 @@ OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
switch (AtomicKind) {
case OpenACCAtomicKind::None:
- case OpenACCAtomicKind::Write:
case OpenACCAtomicKind::Update:
case OpenACCAtomicKind::Capture:
- assert(false && "Only 'read' has been implemented here");
+ assert(false && "Only 'read'/'write' have been implemented here");
return {};
case OpenACCAtomicKind::Read: {
// Read only supports the format 'v = x'; where both sides are a scalar
// expression. This can come in 2 forms; BinaryOperator or
// CXXOperatorCallExpr (rarely).
- const Expr *AssignExpr = cast<const Expr>(getAssociatedStmt());
- if (const auto *BO = dyn_cast<BinaryOperator>(AssignExpr)) {
- assert(BO->getOpcode() == BO_Assign);
- return {BO->getLHS()->IgnoreImpCasts(), BO->getRHS()->IgnoreImpCasts()};
- }
-
- const auto *OO = cast<CXXOperatorCallExpr>(AssignExpr);
- assert(OO->getOperator() == OO_Equal);
-
- return {OO->getArg(0)->IgnoreImpCasts(), OO->getArg(1)->IgnoreImpCasts()};
+ std::pair<const Expr *, const Expr *> BinaryArgs =
+ getBinaryOpArgs(cast<const Expr>(getAssociatedStmt()));
+ // We want the L-value for each side, so we ignore implicit casts.
+ return {BinaryArgs.first->IgnoreImpCasts(),
+ BinaryArgs.second->IgnoreImpCasts(), /*expr=*/nullptr};
}
+ case OpenACCAtomicKind::Write: {
+ // Write supports only the format 'x = expr', where the expression is scalar
+ // type, and 'x' is a scalar l value. As above, this can come in 2 forms;
+ // Binary Operator or CXXOperatorCallExpr.
+ std::pair<const Expr *, const Expr *> BinaryArgs =
+ getBinaryOpArgs(cast<const Expr>(getAssociatedStmt()));
+ // We want the L-value for ONLY the X side, so we ignore implicit casts. For
+ // the right side (the expr), we emit it as an r-value so we need to
+ // maintain implicit casts.
+ return {/*v=*/nullptr, BinaryArgs.first->IgnoreImpCasts(),
+ BinaryArgs.second};
}
+ }
+
+ llvm_unreachable("unknown OpenACC atomic kind");
}
OpenACCCacheConstruct *OpenACCCacheConstruct::CreateEmpty(const ASTContext &C,
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
index 598d33a..90551c2 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
@@ -168,6 +168,15 @@ static auto isNotOkStatusCall() {
"::absl::UnimplementedError", "::absl::UnknownError"))));
}
+static auto isPointerComparisonOperatorCall(std::string operator_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return binaryOperator(hasOperatorName(operator_name),
+ hasLHS(hasType(hasCanonicalType(pointerType(
+ pointee(anyOf(statusOrType(), statusType())))))),
+ hasRHS(hasType(hasCanonicalType(pointerType(
+ pointee(anyOf(statusOrType(), statusType())))))));
+}
+
static auto
buildDiagnoseMatchSwitch(const UncheckedStatusOrAccessModelOptions &Options) {
return CFGMatchSwitchBuilder<const Environment,
@@ -438,6 +447,58 @@ static void transferComparisonOperator(const CXXOperatorCallExpr *Expr,
State.Env.setValue(*Expr, *LhsAndRhsVal);
}
+static RecordStorageLocation *getPointeeLocation(const Expr &Expr,
+ Environment &Env) {
+ if (auto *PointerVal = Env.get<PointerValue>(Expr))
+ return dyn_cast<RecordStorageLocation>(&PointerVal->getPointeeLoc());
+ return nullptr;
+}
+
+static BoolValue *evaluatePointerEquality(const Expr *LhsExpr,
+ const Expr *RhsExpr,
+ Environment &Env) {
+ assert(LhsExpr->getType()->isPointerType());
+ assert(RhsExpr->getType()->isPointerType());
+ RecordStorageLocation *LhsStatusLoc = nullptr;
+ RecordStorageLocation *RhsStatusLoc = nullptr;
+ if (isStatusOrType(LhsExpr->getType()->getPointeeType()) &&
+ isStatusOrType(RhsExpr->getType()->getPointeeType())) {
+ auto *LhsStatusOrLoc = getPointeeLocation(*LhsExpr, Env);
+ auto *RhsStatusOrLoc = getPointeeLocation(*RhsExpr, Env);
+ if (LhsStatusOrLoc == nullptr || RhsStatusOrLoc == nullptr)
+ return nullptr;
+ LhsStatusLoc = &locForStatus(*LhsStatusOrLoc);
+ RhsStatusLoc = &locForStatus(*RhsStatusOrLoc);
+ } else if (isStatusType(LhsExpr->getType()->getPointeeType()) &&
+ isStatusType(RhsExpr->getType()->getPointeeType())) {
+ LhsStatusLoc = getPointeeLocation(*LhsExpr, Env);
+ RhsStatusLoc = getPointeeLocation(*RhsExpr, Env);
+ }
+ if (LhsStatusLoc == nullptr || RhsStatusLoc == nullptr)
+ return nullptr;
+ auto &LhsOkVal = valForOk(*LhsStatusLoc, Env);
+ auto &RhsOkVal = valForOk(*RhsStatusLoc, Env);
+ auto &Res = Env.makeAtomicBoolValue();
+ auto &A = Env.arena();
+ Env.assume(A.makeImplies(
+ Res.formula(), A.makeEquals(LhsOkVal.formula(), RhsOkVal.formula())));
+ return &Res;
+}
+
+static void transferPointerComparisonOperator(const BinaryOperator *Expr,
+ LatticeTransferState &State,
+ bool IsNegative) {
+ auto *LhsAndRhsVal =
+ evaluatePointerEquality(Expr->getLHS(), Expr->getRHS(), State.Env);
+ if (LhsAndRhsVal == nullptr)
+ return;
+
+ if (IsNegative)
+ State.Env.setValue(*Expr, State.Env.makeNot(*LhsAndRhsVal));
+ else
+ State.Env.setValue(*Expr, *LhsAndRhsVal);
+}
+
static void transferOkStatusCall(const CallExpr *Expr,
const MatchFinder::MatchResult &,
LatticeTransferState &State) {
@@ -455,6 +516,18 @@ static void transferNotOkStatusCall(const CallExpr *Expr,
State.Env.assume(A.makeNot(OkVal.formula()));
}
+static void transferEmplaceCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ RecordStorageLocation *StatusOrLoc =
+ getImplicitObjectLocation(*Expr, State.Env);
+ if (StatusOrLoc == nullptr)
+ return;
+
+ auto &OkVal = valForOk(locForStatus(*StatusOrLoc), State.Env);
+ State.Env.assume(OkVal.formula());
+}
+
CFGMatchSwitch<LatticeTransferState>
buildTransferMatchSwitch(ASTContext &Ctx,
CFGMatchSwitchBuilder<LatticeTransferState> Builder) {
@@ -482,8 +555,24 @@ buildTransferMatchSwitch(ASTContext &Ctx,
transferComparisonOperator(Expr, State,
/*IsNegative=*/true);
})
+ .CaseOfCFGStmt<BinaryOperator>(
+ isPointerComparisonOperatorCall("=="),
+ [](const BinaryOperator *Expr, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferPointerComparisonOperator(Expr, State,
+ /*IsNegative=*/false);
+ })
+ .CaseOfCFGStmt<BinaryOperator>(
+ isPointerComparisonOperatorCall("!="),
+ [](const BinaryOperator *Expr, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferPointerComparisonOperator(Expr, State,
+ /*IsNegative=*/true);
+ })
.CaseOfCFGStmt<CallExpr>(isOkStatusCall(), transferOkStatusCall)
.CaseOfCFGStmt<CallExpr>(isNotOkStatusCall(), transferNotOkStatusCall)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("emplace"),
+ transferEmplaceCall)
.Build();
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 27c4d11..62fa04e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -454,6 +454,27 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
assert(!cir::MissingFeatures::coroSizeBuiltinCall());
return getUndefRValue(e->getType());
}
+ case Builtin::BI__builtin_prefetch: {
+ auto evaluateOperandAsInt = [&](const Expr *arg) {
+ Expr::EvalResult res;
+ [[maybe_unused]] bool evalSucceed =
+ arg->EvaluateAsInt(res, cgm.getASTContext());
+ assert(evalSucceed && "expression should be able to evaluate as int");
+ return res.Val.getInt().getZExtValue();
+ };
+
+ bool isWrite = false;
+ if (e->getNumArgs() > 1)
+ isWrite = evaluateOperandAsInt(e->getArg(1));
+
+ int locality = 3;
+ if (e->getNumArgs() > 2)
+ locality = evaluateOperandAsInt(e->getArg(2));
+
+ mlir::Value address = emitScalarExpr(e->getArg(0));
+ cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
+ return RValue::get(nullptr);
+ }
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
index ca421e5..a3e2081 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp
@@ -53,7 +53,7 @@ static void emitDeclInit(CIRGenFunction &cgf, const VarDecl *varDecl,
cgf.emitScalarInit(init, cgf.getLoc(varDecl->getLocation()), lv, false);
break;
case cir::TEK_Complex:
- cgf.cgm.errorNYI(varDecl->getSourceRange(), "complex global initializer");
+ cgf.emitComplexExprIntoLValue(init, lv, /*isInit=*/true);
break;
case cir::TEK_Aggregate:
assert(!cir::MissingFeatures::aggValueSlotGC());
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index c78f9b0..d3c7dac0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -124,6 +124,8 @@ public:
virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0;
virtual void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) = 0;
+ virtual void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc) = 0;
+
virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc,
QualType ty) = 0;
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index cc5050a..df6ee56 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1820,10 +1820,12 @@ CIRGenCallee CIRGenFunction::emitCallee(const clang::Expr *e) {
// Resolve direct calls.
const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
return emitDirectCallee(funcDecl);
- } else if (isa<MemberExpr>(e)) {
- cgm.errorNYI(e->getSourceRange(),
- "emitCallee: call to member function is NYI");
- return {};
+ } else if (auto me = dyn_cast<MemberExpr>(e)) {
+ if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
+ emitIgnoredExpr(me->getBase());
+ return emitDirectCallee(fd);
+ }
+ // Else fall through to the indirect reference handling below.
} else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
return CIRGenCallee::forPseudoDestructor(pde);
}
@@ -2063,7 +2065,11 @@ mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
// a surrounding cir.scope, make sure the alloca ends up in the surrounding
// scope instead. This is necessary in order to guarantee all SSA values are
// reachable during cleanups.
- assert(!cir::MissingFeatures::tryOp());
+ if (auto tryOp =
+ llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
+ if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
+ entryBlock = &scopeOp.getScopeRegion().front();
+ }
return emitAlloca(name, ty, loc, alignment,
builder.getBestAllocaInsertPoint(entryBlock), arraySize);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index fe9e210..a3cdf19 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -801,6 +801,26 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,
emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs);
}
+static mlir::Value emitDynamicCastToNull(CIRGenFunction &cgf,
+ mlir::Location loc, QualType destTy) {
+ mlir::Type destCIRTy = cgf.convertType(destTy);
+ assert(mlir::isa<cir::PointerType>(destCIRTy) &&
+ "result of dynamic_cast should be a ptr");
+
+ if (!destTy->isPointerType()) {
+ mlir::Region *currentRegion = cgf.getBuilder().getBlock()->getParent();
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ cgf.cgm.getCXXABI().emitBadCastCall(cgf, loc);
+
+ // The call to bad_cast will terminate the current block. Create a new block
+ // to hold any follow up code.
+ cgf.getBuilder().createBlock(currentRegion, currentRegion->end());
+ }
+
+ return cgf.getBuilder().getNullPtr(destCIRTy, loc);
+}
+
mlir::Value CIRGenFunction::emitDynamicCast(Address thisAddr,
const CXXDynamicCastExpr *dce) {
mlir::Location loc = getLoc(dce->getSourceRange());
@@ -831,10 +851,8 @@ mlir::Value CIRGenFunction::emitDynamicCast(Address thisAddr,
assert(srcRecordTy->isRecordType() && "source type must be a record type!");
assert(!cir::MissingFeatures::emitTypeCheck());
- if (dce->isAlwaysNull()) {
- cgm.errorNYI(dce->getSourceRange(), "emitDynamicCastToNull");
- return {};
- }
+ if (dce->isAlwaysNull())
+ return emitDynamicCastToNull(*this, loc, destTy);
auto destCirTy = mlir::cast<cir::PointerType>(convertType(destTy));
return cgm.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy,
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 800262a..7de3dd0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -179,8 +179,23 @@ bool ConstantAggregateBuilder::add(mlir::TypedAttr typedAttr, CharUnits offset,
}
// Uncommon case: constant overlaps what we've already created.
- cgm.errorNYI("overlapping constants");
- return false;
+ std::optional<size_t> firstElemToReplace = splitAt(offset);
+ if (!firstElemToReplace)
+ return false;
+
+ CharUnits cSize = getSize(typedAttr);
+ std::optional<size_t> lastElemToReplace = splitAt(offset + cSize);
+ if (!lastElemToReplace)
+ return false;
+
+ assert((firstElemToReplace == lastElemToReplace || allowOverwrite) &&
+ "unexpectedly overwriting field");
+
+ Element newElt(typedAttr, offset);
+ replace(elements, *firstElemToReplace, *lastElemToReplace, {newElt});
+ size = std::max(size, offset + cSize);
+ naturalLayout = false;
+ return true;
}
bool ConstantAggregateBuilder::addBits(llvm::APInt bits, uint64_t offsetInBits,
@@ -612,10 +627,7 @@ bool ConstRecordBuilder::applyZeroInitPadding(const ASTRecordLayout &layout,
}
bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
- RecordDecl *rd = ile->getType()
- ->castAs<clang::RecordType>()
- ->getDecl()
- ->getDefinitionOrSelf();
+ RecordDecl *rd = ile->getType()->castAsRecordDecl();
const ASTRecordLayout &layout = cgm.getASTContext().getASTRecordLayout(rd);
// Bail out if we have base classes. We could support these, but they only
@@ -671,17 +683,14 @@ bool ConstRecordBuilder::build(InitListExpr *ile, bool allowOverwrite) {
return false;
}
- mlir::TypedAttr eltInit;
- if (init)
- eltInit = mlir::cast<mlir::TypedAttr>(
- emitter.tryEmitPrivateForMemory(init, field->getType()));
- else
- eltInit = mlir::cast<mlir::TypedAttr>(emitter.emitNullForMemory(
- cgm.getLoc(ile->getSourceRange()), field->getType()));
-
- if (!eltInit)
+ mlir::Attribute eltInitAttr =
+ init ? emitter.tryEmitPrivateForMemory(init, field->getType())
+ : emitter.emitNullForMemory(cgm.getLoc(ile->getSourceRange()),
+ field->getType());
+ if (!eltInitAttr)
return false;
+ mlir::TypedAttr eltInit = mlir::cast<mlir::TypedAttr>(eltInitAttr);
if (!field->isBitField()) {
// Handle non-bitfield members.
if (!appendField(field, layout.getFieldOffset(index), eltInit,
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index f7c4d18..2dce0b1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -120,6 +120,8 @@ public:
return true;
}
+ void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc) override;
+
mlir::Value
getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &cgf,
Address thisAddr, const CXXRecordDecl *classDecl,
@@ -1883,6 +1885,11 @@ static void emitCallToBadCast(CIRGenFunction &cgf, mlir::Location loc) {
cgf.getBuilder().clearInsertionPoint();
}
+void CIRGenItaniumCXXABI::emitBadCastCall(CIRGenFunction &cgf,
+ mlir::Location loc) {
+ emitCallToBadCast(cgf, loc);
+}
+
// TODO(cir): This could be shared with classic codegen.
static CharUnits computeOffsetHint(ASTContext &astContext,
const CXXRecordDecl *src,
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
index 77e6f83..349b111 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
@@ -306,9 +306,10 @@ CIRGenFunction::emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s) {
mlir::LogicalResult
CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
- // For now, we are only support 'read', so diagnose. We can switch on the kind
- // later once we start implementing the other 3 forms.
- if (s.getAtomicKind() != OpenACCAtomicKind::Read) {
+ // For now, we are only support 'read'/'write', so diagnose. We can switch on
+ // the kind later once we start implementing the other 2 forms. While we
+ if (s.getAtomicKind() != OpenACCAtomicKind::Read &&
+ s.getAtomicKind() != OpenACCAtomicKind::Write) {
cgm.errorNYI(s.getSourceRange(), "OpenACC Atomic Construct");
return mlir::failure();
}
@@ -318,17 +319,41 @@ CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
// it has custom emit logic.
mlir::Location start = getLoc(s.getSourceRange().getBegin());
OpenACCAtomicConstruct::StmtInfo inf = s.getAssociatedStmtInfo();
- // Atomic 'read' only permits 'v = x', where v and x are both scalar L values.
- // The getAssociatedStmtInfo strips off implicit casts, which includes
- // implicit conversions and L-to-R-Value conversions, so we can just emit it
- // as an L value. The Flang implementation has no problem with different
- // types, so it appears that the dialect can handle the conversions.
- mlir::Value v = emitLValue(inf.V).getPointer();
- mlir::Value x = emitLValue(inf.X).getPointer();
- mlir::Type resTy = convertType(inf.V->getType());
- auto op = mlir::acc::AtomicReadOp::create(builder, start, x, v, resTy,
- /*ifCond=*/{});
- emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
- s.clauses());
- return mlir::success();
+
+ switch (s.getAtomicKind()) {
+ case OpenACCAtomicKind::None:
+ case OpenACCAtomicKind::Update:
+ case OpenACCAtomicKind::Capture:
+ llvm_unreachable("Unimplemented atomic construct type, should have "
+ "diagnosed/returned above");
+ return mlir::failure();
+ case OpenACCAtomicKind::Read: {
+
+ // Atomic 'read' only permits 'v = x', where v and x are both scalar L
+ // values. The getAssociatedStmtInfo strips off implicit casts, which
+ // includes implicit conversions and L-to-R-Value conversions, so we can
+ // just emit it as an L value. The Flang implementation has no problem with
+ // different types, so it appears that the dialect can handle the
+ // conversions.
+ mlir::Value v = emitLValue(inf.V).getPointer();
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ mlir::Type resTy = convertType(inf.V->getType());
+ auto op = mlir::acc::AtomicReadOp::create(builder, start, x, v, resTy,
+ /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ return mlir::success();
+ }
+ case OpenACCAtomicKind::Write: {
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ mlir::Value expr = emitAnyExpr(inf.RefExpr).getValue();
+ auto op = mlir::acc::AtomicWriteOp::create(builder, start, x, expr,
+ /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ return mlir::success();
+ }
+ }
+
+ llvm_unreachable("unknown OpenACC atomic kind");
}
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index a30ae02..5a6193f 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1695,6 +1695,15 @@ static uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) {
return llvm::divideCeil(layout.getTypeSizeInBits(type), 8);
}
+mlir::LogicalResult CIRToLLVMPrefetchOpLowering::matchAndRewrite(
+ cir::PrefetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::Prefetch>(
+ op, adaptor.getAddr(), adaptor.getIsWrite(), adaptor.getLocality(),
+ /*DataCache=*/1);
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMPtrDiffOpLowering::matchAndRewrite(
cir::PtrDiffOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index c423c4b..468c930 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -684,7 +684,8 @@ static void addKCFIPass(const Triple &TargetTriple, const LangOptions &LangOpts,
PassBuilder &PB) {
// If the back-end supports KCFI operand bundle lowering, skip KCFIPass.
if (TargetTriple.getArch() == llvm::Triple::x86_64 ||
- TargetTriple.isAArch64(64) || TargetTriple.isRISCV())
+ TargetTriple.isAArch64(64) || TargetTriple.isRISCV() ||
+ TargetTriple.isARM() || TargetTriple.isThumb())
return;
// Ensure we lower KCFI operand bundles with -O0.
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index fd73314..301d577 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -29,6 +29,7 @@
#include "clang/AST/ASTLambda.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/StmtVisitor.h"
@@ -1273,194 +1274,39 @@ void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData, Index);
}
-static bool
-typeContainsPointer(QualType T,
- llvm::SmallPtrSet<const RecordDecl *, 4> &VisitedRD,
- bool &IncompleteType) {
- QualType CanonicalType = T.getCanonicalType();
- if (CanonicalType->isPointerType())
- return true; // base case
-
- // Look through typedef chain to check for special types.
- for (QualType CurrentT = T; const auto *TT = CurrentT->getAs<TypedefType>();
- CurrentT = TT->getDecl()->getUnderlyingType()) {
- const IdentifierInfo *II = TT->getDecl()->getIdentifier();
- // Special Case: Syntactically uintptr_t is not a pointer; semantically,
- // however, very likely used as such. Therefore, classify uintptr_t as a
- // pointer, too.
- if (II && II->isStr("uintptr_t"))
- return true;
- }
-
- // The type is an array; check the element type.
- if (const ArrayType *AT = dyn_cast<ArrayType>(CanonicalType))
- return typeContainsPointer(AT->getElementType(), VisitedRD, IncompleteType);
- // The type is a struct, class, or union.
- if (const RecordDecl *RD = CanonicalType->getAsRecordDecl()) {
- if (!RD->isCompleteDefinition()) {
- IncompleteType = true;
- return false;
- }
- if (!VisitedRD.insert(RD).second)
- return false; // already visited
- // Check all fields.
- for (const FieldDecl *Field : RD->fields()) {
- if (typeContainsPointer(Field->getType(), VisitedRD, IncompleteType))
- return true;
- }
- // For C++ classes, also check base classes.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- // Polymorphic types require a vptr.
- if (CXXRD->isDynamicClass())
- return true;
- for (const CXXBaseSpecifier &Base : CXXRD->bases()) {
- if (typeContainsPointer(Base.getType(), VisitedRD, IncompleteType))
- return true;
- }
- }
- }
- return false;
-}
-
-void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, QualType AllocType) {
- assert(SanOpts.has(SanitizerKind::AllocToken) &&
- "Only needed with -fsanitize=alloc-token");
+llvm::MDNode *CodeGenFunction::buildAllocToken(QualType AllocType) {
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, getContext());
+ if (!ATMD)
+ return nullptr;
llvm::MDBuilder MDB(getLLVMContext());
-
- // Get unique type name.
- PrintingPolicy Policy(CGM.getContext().getLangOpts());
- Policy.SuppressTagKeyword = true;
- Policy.FullyQualifiedName = true;
- SmallString<64> TypeName;
- llvm::raw_svector_ostream TypeNameOS(TypeName);
- AllocType.getCanonicalType().print(TypeNameOS, Policy);
- auto *TypeNameMD = MDB.createString(TypeNameOS.str());
-
- // Check if QualType contains a pointer. Implements a simple DFS to
- // recursively check if a type contains a pointer type.
- llvm::SmallPtrSet<const RecordDecl *, 4> VisitedRD;
- bool IncompleteType = false;
- const bool ContainsPtr =
- typeContainsPointer(AllocType, VisitedRD, IncompleteType);
- if (!ContainsPtr && IncompleteType)
- return;
- auto *ContainsPtrC = Builder.getInt1(ContainsPtr);
+ auto *TypeNameMD = MDB.createString(ATMD->TypeName);
+ auto *ContainsPtrC = Builder.getInt1(ATMD->ContainsPointer);
auto *ContainsPtrMD = MDB.createConstant(ContainsPtrC);
// Format: !{<type-name>, <contains-pointer>}
- auto *MDN =
- llvm::MDNode::get(CGM.getLLVMContext(), {TypeNameMD, ContainsPtrMD});
- CB->setMetadata(llvm::LLVMContext::MD_alloc_token, MDN);
-}
-
-namespace {
-/// Infer type from a simple sizeof expression.
-QualType inferTypeFromSizeofExpr(const Expr *E) {
- const Expr *Arg = E->IgnoreParenImpCasts();
- if (const auto *UET = dyn_cast<UnaryExprOrTypeTraitExpr>(Arg)) {
- if (UET->getKind() == UETT_SizeOf) {
- if (UET->isArgumentType())
- return UET->getArgumentTypeInfo()->getType();
- else
- return UET->getArgumentExpr()->getType();
- }
- }
- return QualType();
-}
-
-/// Infer type from an arithmetic expression involving a sizeof. For example:
-///
-/// malloc(sizeof(MyType) + padding); // infers 'MyType'
-/// malloc(sizeof(MyType) * 32); // infers 'MyType'
-/// malloc(32 * sizeof(MyType)); // infers 'MyType'
-/// malloc(sizeof(MyType) << 1); // infers 'MyType'
-/// ...
-///
-/// More complex arithmetic expressions are supported, but are a heuristic, e.g.
-/// when considering allocations for structs with flexible array members:
-///
-/// malloc(sizeof(HasFlexArray) + sizeof(int) * 32); // infers 'HasFlexArray'
-///
-QualType inferPossibleTypeFromArithSizeofExpr(const Expr *E) {
- const Expr *Arg = E->IgnoreParenImpCasts();
- // The argument is a lone sizeof expression.
- if (QualType T = inferTypeFromSizeofExpr(Arg); !T.isNull())
- return T;
- if (const auto *BO = dyn_cast<BinaryOperator>(Arg)) {
- // Argument is an arithmetic expression. Cover common arithmetic patterns
- // involving sizeof.
- switch (BO->getOpcode()) {
- case BO_Add:
- case BO_Div:
- case BO_Mul:
- case BO_Shl:
- case BO_Shr:
- case BO_Sub:
- if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getLHS());
- !T.isNull())
- return T;
- if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getRHS());
- !T.isNull())
- return T;
- break;
- default:
- break;
- }
- }
- return QualType();
+ return llvm::MDNode::get(CGM.getLLVMContext(), {TypeNameMD, ContainsPtrMD});
}
-/// If the expression E is a reference to a variable, infer the type from a
-/// variable's initializer if it contains a sizeof. Beware, this is a heuristic
-/// and ignores if a variable is later reassigned. For example:
-///
-/// size_t my_size = sizeof(MyType);
-/// void *x = malloc(my_size); // infers 'MyType'
-///
-QualType inferPossibleTypeFromVarInitSizeofExpr(const Expr *E) {
- const Expr *Arg = E->IgnoreParenImpCasts();
- if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg)) {
- if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
- if (const Expr *Init = VD->getInit())
- return inferPossibleTypeFromArithSizeofExpr(Init);
- }
- }
- return QualType();
+void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, QualType AllocType) {
+ assert(SanOpts.has(SanitizerKind::AllocToken) &&
+ "Only needed with -fsanitize=alloc-token");
+ CB->setMetadata(llvm::LLVMContext::MD_alloc_token,
+ buildAllocToken(AllocType));
}
-/// Deduces the allocated type by checking if the allocation call's result
-/// is immediately used in a cast expression. For example:
-///
-/// MyType *x = (MyType *)malloc(4096); // infers 'MyType'
-///
-QualType inferPossibleTypeFromCastExpr(const CallExpr *CallE,
- const CastExpr *CastE) {
- if (!CastE)
- return QualType();
- QualType PtrType = CastE->getType();
- if (PtrType->isPointerType())
- return PtrType->getPointeeType();
- return QualType();
+llvm::MDNode *CodeGenFunction::buildAllocToken(const CallExpr *E) {
+ QualType AllocType = infer_alloc::inferPossibleType(E, getContext(), CurCast);
+ if (!AllocType.isNull())
+ return buildAllocToken(AllocType);
+ return nullptr;
}
-} // end anonymous namespace
void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, const CallExpr *E) {
- QualType AllocType;
- // First check arguments.
- for (const Expr *Arg : E->arguments()) {
- AllocType = inferPossibleTypeFromArithSizeofExpr(Arg);
- if (AllocType.isNull())
- AllocType = inferPossibleTypeFromVarInitSizeofExpr(Arg);
- if (!AllocType.isNull())
- break;
- }
- // Then check later casts.
- if (AllocType.isNull())
- AllocType = inferPossibleTypeFromCastExpr(E, CurCast);
- // Emit if we were able to infer the type.
- if (!AllocType.isNull())
- EmitAllocToken(CB, AllocType);
+ assert(SanOpts.has(SanitizerKind::AllocToken) &&
+ "Only needed with -fsanitize=alloc-token");
+ if (llvm::MDNode *MDN = buildAllocToken(E))
+ CB->setMetadata(llvm::LLVMContext::MD_alloc_token, MDN);
}
CodeGenFunction::ComplexPairTy CodeGenFunction::
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index ecab933..945f9e2 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -562,17 +562,16 @@ static llvm::Value *createSPIRVBuiltinLoad(IRBuilder<> &B, llvm::Module &M,
return B.CreateLoad(Ty, GV);
}
-llvm::Value *
-CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic) {
- if (isa<HLSLSV_GroupIndexAttr>(ActiveSemantic.Semantic)) {
+llvm::Value *CGHLSLRuntime::emitSystemSemanticLoad(
+ IRBuilder<> &B, llvm::Type *Type, const clang::DeclaratorDecl *Decl,
+ Attr *Semantic, std::optional<unsigned> Index) {
+ if (isa<HLSLSV_GroupIndexAttr>(Semantic)) {
llvm::Function *GroupIndex =
CGM.getIntrinsic(getFlattenedThreadIdInGroupIntrinsic());
return B.CreateCall(FunctionCallee(GroupIndex));
}
- if (isa<HLSLSV_DispatchThreadIDAttr>(ActiveSemantic.Semantic)) {
+ if (isa<HLSLSV_DispatchThreadIDAttr>(Semantic)) {
llvm::Intrinsic::ID IntrinID = getThreadIdIntrinsic();
llvm::Function *ThreadIDIntrinsic =
llvm::Intrinsic::isOverloaded(IntrinID)
@@ -581,7 +580,7 @@ CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
return buildVectorInput(B, ThreadIDIntrinsic, Type);
}
- if (isa<HLSLSV_GroupThreadIDAttr>(ActiveSemantic.Semantic)) {
+ if (isa<HLSLSV_GroupThreadIDAttr>(Semantic)) {
llvm::Intrinsic::ID IntrinID = getGroupThreadIdIntrinsic();
llvm::Function *GroupThreadIDIntrinsic =
llvm::Intrinsic::isOverloaded(IntrinID)
@@ -590,7 +589,7 @@ CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
return buildVectorInput(B, GroupThreadIDIntrinsic, Type);
}
- if (isa<HLSLSV_GroupIDAttr>(ActiveSemantic.Semantic)) {
+ if (isa<HLSLSV_GroupIDAttr>(Semantic)) {
llvm::Intrinsic::ID IntrinID = getGroupIdIntrinsic();
llvm::Function *GroupIDIntrinsic =
llvm::Intrinsic::isOverloaded(IntrinID)
@@ -599,8 +598,7 @@ CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
return buildVectorInput(B, GroupIDIntrinsic, Type);
}
- if (HLSLSV_PositionAttr *S =
- dyn_cast<HLSLSV_PositionAttr>(ActiveSemantic.Semantic)) {
+ if (HLSLSV_PositionAttr *S = dyn_cast<HLSLSV_PositionAttr>(Semantic)) {
if (CGM.getTriple().getEnvironment() == Triple::EnvironmentType::Pixel)
return createSPIRVBuiltinLoad(B, CGM.getModule(), Type,
S->getAttrName()->getName(),
@@ -611,29 +609,56 @@ CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
}
llvm::Value *
-CGHLSLRuntime::handleScalarSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic) {
-
- if (!ActiveSemantic.Semantic) {
- ActiveSemantic.Semantic = Decl->getAttr<HLSLSemanticAttr>();
- if (!ActiveSemantic.Semantic) {
- CGM.getDiags().Report(Decl->getInnerLocStart(),
- diag::err_hlsl_semantic_missing);
- return nullptr;
+CGHLSLRuntime::handleScalarSemanticLoad(IRBuilder<> &B, const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl) {
+
+ HLSLSemanticAttr *Semantic = nullptr;
+ for (HLSLSemanticAttr *Item : FD->specific_attrs<HLSLSemanticAttr>()) {
+ if (Item->getTargetDecl() == Decl) {
+ Semantic = Item;
+ break;
}
- ActiveSemantic.Index = ActiveSemantic.Semantic->getSemanticIndex();
}
+ // Sema must create one attribute per scalar field.
+ assert(Semantic);
- return emitSystemSemanticLoad(B, Type, Decl, ActiveSemantic);
+ std::optional<unsigned> Index = std::nullopt;
+ if (Semantic->isSemanticIndexExplicit())
+ Index = Semantic->getSemanticIndex();
+ return emitSystemSemanticLoad(B, Type, Decl, Semantic, Index);
}
llvm::Value *
-CGHLSLRuntime::handleSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic) {
- assert(!Type->isStructTy());
- return handleScalarSemanticLoad(B, Type, Decl, ActiveSemantic);
+CGHLSLRuntime::handleStructSemanticLoad(IRBuilder<> &B, const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl) {
+ const llvm::StructType *ST = cast<StructType>(Type);
+ const clang::RecordDecl *RD = Decl->getType()->getAsRecordDecl();
+
+ assert(std::distance(RD->field_begin(), RD->field_end()) ==
+ ST->getNumElements());
+
+ llvm::Value *Aggregate = llvm::PoisonValue::get(Type);
+ auto FieldDecl = RD->field_begin();
+ for (unsigned I = 0; I < ST->getNumElements(); ++I) {
+ llvm::Value *ChildValue =
+ handleSemanticLoad(B, FD, ST->getElementType(I), *FieldDecl);
+ assert(ChildValue);
+ Aggregate = B.CreateInsertValue(Aggregate, ChildValue, I);
+ ++FieldDecl;
+ }
+
+ return Aggregate;
+}
+
+llvm::Value *
+CGHLSLRuntime::handleSemanticLoad(IRBuilder<> &B, const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl) {
+ if (Type->isStructTy())
+ return handleStructSemanticLoad(B, FD, Type, Decl);
+ return handleScalarSemanticLoad(B, FD, Type, Decl);
}
void CGHLSLRuntime::emitEntryFunction(const FunctionDecl *FD,
@@ -680,8 +705,25 @@ void CGHLSLRuntime::emitEntryFunction(const FunctionDecl *FD,
}
const ParmVarDecl *PD = FD->getParamDecl(Param.getArgNo() - SRetOffset);
- SemanticInfo ActiveSemantic = {nullptr, 0};
- Args.push_back(handleSemanticLoad(B, Param.getType(), PD, ActiveSemantic));
+ llvm::Value *SemanticValue = nullptr;
+ if ([[maybe_unused]] HLSLParamModifierAttr *MA =
+ PD->getAttr<HLSLParamModifierAttr>()) {
+ llvm_unreachable("Not handled yet");
+ } else {
+ llvm::Type *ParamType =
+ Param.hasByValAttr() ? Param.getParamByValType() : Param.getType();
+ SemanticValue = handleSemanticLoad(B, FD, ParamType, PD);
+ if (!SemanticValue)
+ return;
+ if (Param.hasByValAttr()) {
+ llvm::Value *Var = B.CreateAlloca(Param.getParamByValType());
+ B.CreateStore(SemanticValue, Var);
+ SemanticValue = Var;
+ }
+ }
+
+ assert(SemanticValue);
+ Args.push_back(SemanticValue);
}
CallInst *CI = B.CreateCall(FunctionCallee(Fn), Args, OB);
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index 103b4a9..d35df52 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -144,26 +144,24 @@ public:
protected:
CodeGenModule &CGM;
- void collectInputSemantic(llvm::IRBuilder<> &B, const DeclaratorDecl *D,
- llvm::Type *Type,
- SmallVectorImpl<llvm::Value *> &Inputs);
-
- struct SemanticInfo {
- clang::HLSLSemanticAttr *Semantic;
- uint32_t Index;
- };
-
llvm::Value *emitSystemSemanticLoad(llvm::IRBuilder<> &B, llvm::Type *Type,
const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic);
-
- llvm::Value *handleScalarSemanticLoad(llvm::IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic);
-
- llvm::Value *handleSemanticLoad(llvm::IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic);
+ Attr *Semantic,
+ std::optional<unsigned> Index);
+
+ llvm::Value *handleScalarSemanticLoad(llvm::IRBuilder<> &B,
+ const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl);
+
+ llvm::Value *handleStructSemanticLoad(llvm::IRBuilder<> &B,
+ const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl);
+
+ llvm::Value *handleSemanticLoad(llvm::IRBuilder<> &B, const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl);
public:
CGHLSLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 85b2404..66fea92 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -2713,14 +2713,6 @@ llvm::Value *CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF,
}
llvm::Value *
-CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF,
- const OMPMessageClause *MessageClause) {
- return emitMessageClause(
- CGF, MessageClause ? MessageClause->getMessageString() : nullptr,
- MessageClause->getBeginLoc());
-}
-
-llvm::Value *
CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity,
SourceLocation Loc) {
// OpenMP 6.0, 10.4: "If no severity clause is specified then the effect is
@@ -2729,13 +2721,6 @@ CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity,
Severity == OMPC_SEVERITY_warning ? 1 : 2);
}
-llvm::Value *
-CGOpenMPRuntime::emitSeverityClause(const OMPSeverityClause *SeverityClause) {
- return emitSeverityClause(SeverityClause ? SeverityClause->getSeverityKind()
- : OMPC_SEVERITY_unknown,
- SeverityClause->getBeginLoc());
-}
-
void CGOpenMPRuntime::emitNumThreadsClause(
CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc,
OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity,
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h
index ba76ba6b..6bfd7d6 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -1051,13 +1051,9 @@ public:
virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF,
const Expr *Message,
SourceLocation Loc);
- virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF,
- const OMPMessageClause *MessageClause);
virtual llvm::Value *emitSeverityClause(OpenMPSeverityClauseKind Severity,
SourceLocation Loc);
- virtual llvm::Value *
- emitSeverityClause(const OMPSeverityClause *SeverityClause);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 1f0be2d..8c4c1c8 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -3352,9 +3352,14 @@ public:
SanitizerAnnotateDebugInfo(ArrayRef<SanitizerKind::SanitizerOrdinal> Ordinals,
SanitizerHandler Handler);
- /// Emit additional metadata used by the AllocToken instrumentation.
+ /// Build metadata used by the AllocToken instrumentation.
+ llvm::MDNode *buildAllocToken(QualType AllocType);
+ /// Emit and set additional metadata used by the AllocToken instrumentation.
void EmitAllocToken(llvm::CallBase *CB, QualType AllocType);
- /// Emit additional metadata used by the AllocToken instrumentation,
+ /// Build additional metadata used by the AllocToken instrumentation,
+ /// inferring the type from an allocation call expression.
+ llvm::MDNode *buildAllocToken(const CallExpr *E);
+ /// Emit and set additional metadata used by the AllocToken instrumentation,
/// inferring the type from an allocation call expression.
void EmitAllocToken(llvm::CallBase *CB, const CallExpr *E);
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 99400ac..727af69 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -949,6 +949,24 @@ bool tools::isTLSDESCEnabled(const ToolChain &TC,
return EnableTLSDESC;
}
+void tools::addDTLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) {
+ if (Arg *A = Args.getLastArg(options::OPT_fthinlto_distributor_EQ)) {
+ CmdArgs.push_back(
+ Args.MakeArgString("--thinlto-distributor=" + Twine(A->getValue())));
+ const Driver &D = ToolChain.getDriver();
+ CmdArgs.push_back(Args.MakeArgString("--thinlto-remote-compiler=" +
+ Twine(D.getClangProgramPath())));
+ if (auto *PA = D.getPrependArg())
+ CmdArgs.push_back(Args.MakeArgString(
+ "--thinlto-remote-compiler-prepend-arg=" + Twine(PA)));
+
+ for (const auto &A :
+ Args.getAllArgValues(options::OPT_Xthinlto_distributor_EQ))
+ CmdArgs.push_back(Args.MakeArgString("--thinlto-distributor-arg=" + A));
+ }
+}
+
void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfoList &Inputs, bool IsThinLTO) {
@@ -1350,16 +1368,7 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + "-time-passes"));
- if (Arg *A = Args.getLastArg(options::OPT_fthinlto_distributor_EQ)) {
- CmdArgs.push_back(
- Args.MakeArgString("--thinlto-distributor=" + Twine(A->getValue())));
- CmdArgs.push_back(
- Args.MakeArgString("--thinlto-remote-compiler=" +
- Twine(ToolChain.getDriver().getClangProgramPath())));
-
- for (auto A : Args.getAllArgValues(options::OPT_Xthinlto_distributor_EQ))
- CmdArgs.push_back(Args.MakeArgString("--thinlto-distributor-arg=" + A));
- }
+ addDTLTOOptions(ToolChain, Args, CmdArgs);
}
void tools::addOpenMPRuntimeLibraryPath(const ToolChain &TC,
diff --git a/clang/lib/Driver/ToolChains/PS4CPU.cpp b/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 61afc61..34ec65a 100644
--- a/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -344,16 +344,7 @@ void tools::PS5cpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// pass LTO options to ensure proper codegen, metadata production, etc if
// LTO indeed occurs.
- if (const Arg *A = Args.getLastArg(options::OPT_fthinlto_distributor_EQ)) {
- CmdArgs.push_back(
- Args.MakeArgString("--thinlto-distributor=" + Twine(A->getValue())));
- CmdArgs.push_back(Args.MakeArgString("--thinlto-remote-compiler=" +
- Twine(D.getClangProgramPath())));
-
- for (const auto &A :
- Args.getAllArgValues(options::OPT_Xthinlto_distributor_EQ))
- CmdArgs.push_back(Args.MakeArgString("--thinlto-distributor-arg=" + A));
- }
+ tools::addDTLTOOptions(TC, Args, CmdArgs);
if (Args.hasFlag(options::OPT_funified_lto, options::OPT_fno_unified_lto,
true))
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 25971d2..1d0dfd0b 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -3791,18 +3791,12 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
if (Current.is(TT_FunctionDeclarationName))
return true;
- if (!Current.Tok.getIdentifierInfo())
+ if (Current.isNoneOf(tok::identifier, tok::kw_operator))
return false;
const auto *Prev = Current.getPreviousNonComment();
assert(Prev);
- if (Prev->is(tok::coloncolon))
- Prev = Prev->Previous;
-
- if (!Prev)
- return false;
-
const auto &Previous = *Prev;
if (const auto *PrevPrev = Previous.getPreviousNonComment();
@@ -3851,6 +3845,8 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
// Find parentheses of parameter list.
if (Current.is(tok::kw_operator)) {
+ if (Line.startsWith(tok::kw_friend))
+ return true;
if (Previous.Tok.getIdentifierInfo() &&
Previous.isNoneOf(tok::kw_return, tok::kw_co_return)) {
return true;
@@ -4411,8 +4407,12 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
// breaking after it.
if (Right.is(TT_SelectorName))
return 0;
- if (Left.is(tok::colon) && Left.is(TT_ObjCMethodExpr))
- return Line.MightBeFunctionDecl ? 50 : 500;
+ if (Left.is(tok::colon)) {
+ if (Left.is(TT_ObjCMethodExpr))
+ return Line.MightBeFunctionDecl ? 50 : 500;
+ if (Left.is(TT_ObjCSelector))
+ return 500;
+ }
// In Objective-C type declarations, avoid breaking after the category's
// open paren (we'll prefer breaking after the protocol list's opening
@@ -6295,7 +6295,9 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
TT_BitFieldColon)) {
return false;
}
- if (Left.is(tok::colon) && Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
+ if (Left.is(tok::colon) && Left.isOneOf(TT_ObjCSelector, TT_ObjCMethodExpr))
+ return true;
+ if (Left.is(tok::colon) && Left.is(TT_DictLiteral)) {
if (Style.isProto()) {
if (!Style.AlwaysBreakBeforeMultilineStrings && Right.isStringLiteral())
return false;
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index 374138f..e3bf0ea 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -546,14 +546,11 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
std::string CompilerInstance::getSpecificModuleCachePath(StringRef ModuleHash) {
assert(FileMgr && "Specific module cache path requires a FileManager");
- if (getHeaderSearchOpts().ModuleCachePath.empty())
- return "";
-
// Set up the module path, including the hash for the module-creation options.
SmallString<256> SpecificModuleCache;
normalizeModuleCachePath(*FileMgr, getHeaderSearchOpts().ModuleCachePath,
SpecificModuleCache);
- if (!getHeaderSearchOpts().DisableModuleHash)
+ if (!SpecificModuleCache.empty() && !getHeaderSearchOpts().DisableModuleHash)
llvm::sys::path::append(SpecificModuleCache, ModuleHash);
return std::string(SpecificModuleCache);
}
diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index 32a6be8..1858912 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -4,6 +4,9 @@
set(core_files
builtins.h
float.h
+ __float_float.h
+ __float_header_macro.h
+ __float_infinity_nan.h
inttypes.h
iso646.h
limits.h
diff --git a/clang/lib/Headers/__float_float.h b/clang/lib/Headers/__float_float.h
new file mode 100644
index 0000000..267c072
--- /dev/null
+++ b/clang/lib/Headers/__float_float.h
@@ -0,0 +1,176 @@
+/*===---- __float_float.h --------------------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_FLOAT_FLOAT_H
+#define __CLANG_FLOAT_FLOAT_H
+
+#if (defined(__MINGW32__) || defined(_MSC_VER) || defined(_AIX)) && \
+ __STDC_HOSTED__
+
+/* Undefine anything that we'll be redefining below. */
+# undef FLT_EVAL_METHOD
+# undef FLT_ROUNDS
+# undef FLT_RADIX
+# undef FLT_MANT_DIG
+# undef DBL_MANT_DIG
+# undef LDBL_MANT_DIG
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
+# undef DECIMAL_DIG
+# endif
+# undef FLT_DIG
+# undef DBL_DIG
+# undef LDBL_DIG
+# undef FLT_MIN_EXP
+# undef DBL_MIN_EXP
+# undef LDBL_MIN_EXP
+# undef FLT_MIN_10_EXP
+# undef DBL_MIN_10_EXP
+# undef LDBL_MIN_10_EXP
+# undef FLT_MAX_EXP
+# undef DBL_MAX_EXP
+# undef LDBL_MAX_EXP
+# undef FLT_MAX_10_EXP
+# undef DBL_MAX_10_EXP
+# undef LDBL_MAX_10_EXP
+# undef FLT_MAX
+# undef DBL_MAX
+# undef LDBL_MAX
+# undef FLT_EPSILON
+# undef DBL_EPSILON
+# undef LDBL_EPSILON
+# undef FLT_MIN
+# undef DBL_MIN
+# undef LDBL_MIN
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201703L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
+# undef FLT_TRUE_MIN
+# undef DBL_TRUE_MIN
+# undef LDBL_TRUE_MIN
+# undef FLT_DECIMAL_DIG
+# undef DBL_DECIMAL_DIG
+# undef LDBL_DECIMAL_DIG
+# undef FLT_HAS_SUBNORM
+# undef DBL_HAS_SUBNORM
+# undef LDBL_HAS_SUBNORM
+# endif
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+# undef FLT_NORM_MAX
+# undef DBL_NORM_MAX
+# undef LDBL_NORM_MAX
+#endif
+#endif
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+# undef FLT_SNAN
+# undef DBL_SNAN
+# undef LDBL_SNAN
+#endif
+
+/* Characteristics of floating point types, C99 5.2.4.2.2 */
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L)
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+#endif
+#define FLT_ROUNDS (__builtin_flt_rounds())
+#define FLT_RADIX __FLT_RADIX__
+
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
+# define DECIMAL_DIG __DECIMAL_DIG__
+#endif
+
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201703L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
+# define FLT_TRUE_MIN __FLT_DENORM_MIN__
+# define DBL_TRUE_MIN __DBL_DENORM_MIN__
+# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+# define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
+# define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
+# define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
+# define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
+# define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
+# define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
+#endif
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+ /* C23 5.2.5.3.2p28 */
+# define FLT_SNAN (__builtin_nansf(""))
+# define DBL_SNAN (__builtin_nans(""))
+# define LDBL_SNAN (__builtin_nansl(""))
+
+ /* C23 5.2.5.3.3p32 */
+# define FLT_NORM_MAX __FLT_NORM_MAX__
+# define DBL_NORM_MAX __DBL_NORM_MAX__
+# define LDBL_NORM_MAX __LDBL_NORM_MAX__
+#endif
+
+#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
+# define FLT16_MANT_DIG __FLT16_MANT_DIG__
+# define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__
+# define FLT16_DIG __FLT16_DIG__
+# define FLT16_MIN_EXP __FLT16_MIN_EXP__
+# define FLT16_MIN_10_EXP __FLT16_MIN_10_EXP__
+# define FLT16_MAX_EXP __FLT16_MAX_EXP__
+# define FLT16_MAX_10_EXP __FLT16_MAX_10_EXP__
+# define FLT16_MAX __FLT16_MAX__
+# define FLT16_EPSILON __FLT16_EPSILON__
+# define FLT16_MIN __FLT16_MIN__
+# define FLT16_TRUE_MIN __FLT16_TRUE_MIN__
+#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */
+
+#endif /* __CLANG_FLOAT_FLOAT_H */
diff --git a/clang/lib/Headers/__float_header_macro.h b/clang/lib/Headers/__float_header_macro.h
new file mode 100644
index 0000000..11b270e
--- /dev/null
+++ b/clang/lib/Headers/__float_header_macro.h
@@ -0,0 +1,12 @@
+/*===---- __float_header_macro.h -------------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_FLOAT_H
+#define __CLANG_FLOAT_H
+#endif /* __CLANG_FLOAT_H */
diff --git a/clang/lib/Headers/__float_infinity_nan.h b/clang/lib/Headers/__float_infinity_nan.h
new file mode 100644
index 0000000..7e253d0
--- /dev/null
+++ b/clang/lib/Headers/__float_infinity_nan.h
@@ -0,0 +1,20 @@
+/*===---- __float_infinity_nan.h -------------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_FLOAT_INFINITY_NAN_H
+#define __CLANG_FLOAT_INFINITY_NAN_H
+
+/* C23 5.2.5.3.3p29-30 */
+#undef INFINITY
+#undef NAN
+
+#define INFINITY (__builtin_inff())
+#define NAN (__builtin_nanf(""))
+
+#endif /* __CLANG_FLOAT_INFINITY_NAN_H */
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index fdb825f..3cbaaec 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -1975,10 +1975,9 @@ _mm256_shuffle_epi8(__m256i __a, __m256i __b) {
/// \param __b
/// A 256-bit integer vector].
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_sign_epi8(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
}
/// Sets each element of the result to the corresponding element of the
@@ -1996,10 +1995,9 @@ _mm256_sign_epi8(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_sign_epi16(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
}
/// Sets each element of the result to the corresponding element of the
@@ -2017,10 +2015,9 @@ _mm256_sign_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [8 x i32].
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_sign_epi32(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
}
/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
diff --git a/clang/lib/Headers/float.h b/clang/lib/Headers/float.h
index 30427c2..82974f6 100644
--- a/clang/lib/Headers/float.h
+++ b/clang/lib/Headers/float.h
@@ -7,13 +7,21 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef __CLANG_FLOAT_H
-#define __CLANG_FLOAT_H
-
#if defined(__MVS__) && __has_include_next(<float.h>)
+#include <__float_header_macro.h>
#include_next <float.h>
#else
+#if !defined(__need_infinity_nan)
+#define __need_float_float
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+#define __need_infinity_nan
+#endif
+#include <__float_header_macro.h>
+#endif
+
+#ifdef __need_float_float
/* If we're on MinGW, fall back to the system's float.h, which might have
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
@@ -26,171 +34,15 @@
# include_next <float.h>
-/* Undefine anything that we'll be redefining below. */
-# undef FLT_EVAL_METHOD
-# undef FLT_ROUNDS
-# undef FLT_RADIX
-# undef FLT_MANT_DIG
-# undef DBL_MANT_DIG
-# undef LDBL_MANT_DIG
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
- !defined(__STRICT_ANSI__) || \
- (defined(__cplusplus) && __cplusplus >= 201103L) || \
- (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
-# undef DECIMAL_DIG
-# endif
-# undef FLT_DIG
-# undef DBL_DIG
-# undef LDBL_DIG
-# undef FLT_MIN_EXP
-# undef DBL_MIN_EXP
-# undef LDBL_MIN_EXP
-# undef FLT_MIN_10_EXP
-# undef DBL_MIN_10_EXP
-# undef LDBL_MIN_10_EXP
-# undef FLT_MAX_EXP
-# undef DBL_MAX_EXP
-# undef LDBL_MAX_EXP
-# undef FLT_MAX_10_EXP
-# undef DBL_MAX_10_EXP
-# undef LDBL_MAX_10_EXP
-# undef FLT_MAX
-# undef DBL_MAX
-# undef LDBL_MAX
-# undef FLT_EPSILON
-# undef DBL_EPSILON
-# undef LDBL_EPSILON
-# undef FLT_MIN
-# undef DBL_MIN
-# undef LDBL_MIN
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
- !defined(__STRICT_ANSI__) || \
- (defined(__cplusplus) && __cplusplus >= 201703L) || \
- (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
-# undef FLT_TRUE_MIN
-# undef DBL_TRUE_MIN
-# undef LDBL_TRUE_MIN
-# undef FLT_DECIMAL_DIG
-# undef DBL_DECIMAL_DIG
-# undef LDBL_DECIMAL_DIG
-# undef FLT_HAS_SUBNORM
-# undef DBL_HAS_SUBNORM
-# undef LDBL_HAS_SUBNORM
-# endif
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
- !defined(__STRICT_ANSI__)
-# undef FLT_NORM_MAX
-# undef DBL_NORM_MAX
-# undef LDBL_NORM_MAX
-#endif
-#endif
-
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
- !defined(__STRICT_ANSI__)
-# undef INFINITY
-# undef NAN
-# undef FLT_SNAN
-# undef DBL_SNAN
-# undef LDBL_SNAN
-#endif
-
-/* Characteristics of floating point types, C99 5.2.4.2.2 */
-
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
- (defined(__cplusplus) && __cplusplus >= 201103L)
-#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
#endif
-#define FLT_ROUNDS (__builtin_flt_rounds())
-#define FLT_RADIX __FLT_RADIX__
-#define FLT_MANT_DIG __FLT_MANT_DIG__
-#define DBL_MANT_DIG __DBL_MANT_DIG__
-#define LDBL_MANT_DIG __LDBL_MANT_DIG__
-
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
- !defined(__STRICT_ANSI__) || \
- (defined(__cplusplus) && __cplusplus >= 201103L) || \
- (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
-# define DECIMAL_DIG __DECIMAL_DIG__
-#endif
-
-#define FLT_DIG __FLT_DIG__
-#define DBL_DIG __DBL_DIG__
-#define LDBL_DIG __LDBL_DIG__
-
-#define FLT_MIN_EXP __FLT_MIN_EXP__
-#define DBL_MIN_EXP __DBL_MIN_EXP__
-#define LDBL_MIN_EXP __LDBL_MIN_EXP__
-
-#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
-#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
-#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
-
-#define FLT_MAX_EXP __FLT_MAX_EXP__
-#define DBL_MAX_EXP __DBL_MAX_EXP__
-#define LDBL_MAX_EXP __LDBL_MAX_EXP__
-
-#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
-#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
-#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
-
-#define FLT_MAX __FLT_MAX__
-#define DBL_MAX __DBL_MAX__
-#define LDBL_MAX __LDBL_MAX__
-
-#define FLT_EPSILON __FLT_EPSILON__
-#define DBL_EPSILON __DBL_EPSILON__
-#define LDBL_EPSILON __LDBL_EPSILON__
-
-#define FLT_MIN __FLT_MIN__
-#define DBL_MIN __DBL_MIN__
-#define LDBL_MIN __LDBL_MIN__
-
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
- !defined(__STRICT_ANSI__) || \
- (defined(__cplusplus) && __cplusplus >= 201703L) || \
- (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
-# define FLT_TRUE_MIN __FLT_DENORM_MIN__
-# define DBL_TRUE_MIN __DBL_DENORM_MIN__
-# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
-# define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
-# define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
-# define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
-# define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
-# define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
-# define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
+#include <__float_float.h>
+#undef __need_float_float
#endif
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
- !defined(__STRICT_ANSI__)
- /* C23 5.2.5.3.2p28 */
-# define FLT_SNAN (__builtin_nansf(""))
-# define DBL_SNAN (__builtin_nans(""))
-# define LDBL_SNAN (__builtin_nansl(""))
-
- /* C23 5.2.5.3.3p29-30 */
-# define INFINITY (__builtin_inff())
-# define NAN (__builtin_nanf(""))
-
- /* C23 5.2.5.3.3p32 */
-# define FLT_NORM_MAX __FLT_NORM_MAX__
-# define DBL_NORM_MAX __DBL_NORM_MAX__
-# define LDBL_NORM_MAX __LDBL_NORM_MAX__
+#ifdef __need_infinity_nan
+#include <__float_infinity_nan.h>
+#undef __need_infinity_nan
#endif
-#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
-# define FLT16_MANT_DIG __FLT16_MANT_DIG__
-# define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__
-# define FLT16_DIG __FLT16_DIG__
-# define FLT16_MIN_EXP __FLT16_MIN_EXP__
-# define FLT16_MIN_10_EXP __FLT16_MIN_10_EXP__
-# define FLT16_MAX_EXP __FLT16_MAX_EXP__
-# define FLT16_MAX_10_EXP __FLT16_MAX_10_EXP__
-# define FLT16_MAX __FLT16_MAX__
-# define FLT16_EPSILON __FLT16_EPSILON__
-# define FLT16_MIN __FLT16_MIN__
-# define FLT16_TRUE_MIN __FLT16_TRUE_MIN__
-#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */
-
#endif /* __MVS__ */
-#endif /* __CLANG_FLOAT_H */
diff --git a/clang/lib/Headers/module.modulemap b/clang/lib/Headers/module.modulemap
index bdf5119..2e4d533 100644
--- a/clang/lib/Headers/module.modulemap
+++ b/clang/lib/Headers/module.modulemap
@@ -171,8 +171,22 @@ module _Builtin_intrinsics [system] [extern_c] {
// that module. The system float.h (if present) will be treated
// as a textual header in the sytem module.
module _Builtin_float [system] {
- header "float.h"
- export *
+ textual header "float.h"
+
+ explicit module float {
+ header "__float_float.h"
+ export *
+ }
+
+ explicit module header_macro {
+ header "__float_header_macro.h"
+ export *
+ }
+
+ explicit module infinity_nan {
+ header "__float_infinity_nan.h"
+ export *
+ }
}
module _Builtin_inttypes [system] {
diff --git a/clang/lib/Headers/tmmintrin.h b/clang/lib/Headers/tmmintrin.h
index 5d0f20f..cb4b36e 100644
--- a/clang/lib/Headers/tmmintrin.h
+++ b/clang/lib/Headers/tmmintrin.h
@@ -26,9 +26,6 @@
#define __zext128(x) \
(__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
1, 2, 3)
-#define __anyext128(x) \
- (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
- 1, -1, -1)
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
@@ -641,10 +638,9 @@ _mm_shuffle_pi8(__m64 __a, __m64 __b) {
/// A 128-bit integer vector containing control bytes corresponding to
/// positions in the destination.
/// \returns A 128-bit integer vector containing the resultant values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sign_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_sign_epi8(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
}
/// For each 16-bit integer in the first source operand, perform one of
@@ -667,10 +663,9 @@ _mm_sign_epi8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing control words corresponding to
/// positions in the destination.
/// \returns A 128-bit integer vector containing the resultant values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sign_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_sign_epi16(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
}
/// For each 32-bit integer in the first source operand, perform one of
@@ -693,10 +688,9 @@ _mm_sign_epi16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing control doublewords corresponding to
/// positions in the destination.
/// \returns A 128-bit integer vector containing the resultant values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sign_epi32(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_sign_epi32(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
}
/// For each 8-bit integer in the first source operand, perform one of
@@ -719,11 +713,10 @@ _mm_sign_epi32(__m128i __a, __m128i __b)
/// A 64-bit integer vector containing control bytes corresponding to
/// positions in the destination.
/// \returns A 64-bit integer vector containing the resultant values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_sign_pi8(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_psignb128((__v16qi)__anyext128(__a),
- (__v16qi)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sign_pi8(__m64 __a,
+ __m64 __b) {
+ return __trunc64(__builtin_ia32_psignb128((__v16qi)__zext128(__a),
+ (__v16qi)__zext128(__b)));
}
/// For each 16-bit integer in the first source operand, perform one of
@@ -746,11 +739,10 @@ _mm_sign_pi8(__m64 __a, __m64 __b)
/// A 64-bit integer vector containing control words corresponding to
/// positions in the destination.
/// \returns A 64-bit integer vector containing the resultant values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_sign_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_psignw128((__v8hi)__anyext128(__a),
- (__v8hi)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sign_pi16(__m64 __a,
+ __m64 __b) {
+ return __trunc64(
+ __builtin_ia32_psignw128((__v8hi)__zext128(__a), (__v8hi)__zext128(__b)));
}
/// For each 32-bit integer in the first source operand, perform one of
@@ -773,14 +765,12 @@ _mm_sign_pi16(__m64 __a, __m64 __b)
/// A 64-bit integer vector containing two control doublewords corresponding
/// to positions in the destination.
/// \returns A 64-bit integer vector containing the resultant values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_sign_pi32(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_psignd128((__v4si)__anyext128(__a),
- (__v4si)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sign_pi32(__m64 __a,
+ __m64 __b) {
+ return __trunc64(
+ __builtin_ia32_psignd128((__v4si)__zext128(__a), (__v4si)__zext128(__b)));
}
-#undef __anyext128
#undef __zext128
#undef __trunc64
#undef __DEFAULT_FN_ATTRS
diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp
index 238c5e2..65c324c 100644
--- a/clang/lib/Lex/HeaderSearch.cpp
+++ b/clang/lib/Lex/HeaderSearch.cpp
@@ -2186,6 +2186,8 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
void clang::normalizeModuleCachePath(FileManager &FileMgr, StringRef Path,
SmallVectorImpl<char> &NormalizedPath) {
NormalizedPath.assign(Path.begin(), Path.end());
- FileMgr.makeAbsolutePath(NormalizedPath);
- llvm::sys::path::remove_dots(NormalizedPath);
+ if (!NormalizedPath.empty()) {
+ FileMgr.makeAbsolutePath(NormalizedPath);
+ llvm::sys::path::remove_dots(NormalizedPath);
+ }
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 2990fd6..f99c01e 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -1498,6 +1498,24 @@ static void builtinAllocaAddrSpace(Sema &S, CallExpr *TheCall) {
TheCall->setType(S.Context.getPointerType(RT));
}
+static bool checkBuiltinInferAllocToken(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCountAtLeast(TheCall, 1))
+ return true;
+
+ for (Expr *Arg : TheCall->arguments()) {
+ // If argument is dependent on a template parameter, we can't resolve now.
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ continue;
+ // Reject void types.
+ QualType ArgTy = Arg->IgnoreParenImpCasts()->getType();
+ if (ArgTy->isVoidType())
+ return S.Diag(Arg->getBeginLoc(), diag::err_param_with_void_type);
+ }
+
+ TheCall->setType(S.Context.UnsignedLongLongTy);
+ return false;
+}
+
namespace {
enum PointerAuthOpKind {
PAO_Strip,
@@ -2779,6 +2797,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
builtinAllocaAddrSpace(*this, TheCall);
}
break;
+ case Builtin::BI__builtin_infer_alloc_token:
+ if (checkBuiltinInferAllocToken(*this, TheCall))
+ return ExprError();
+ break;
case Builtin::BI__arithmetic_fence:
if (BuiltinArithmeticFence(TheCall))
return ExprError();
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index a1163e9..f04cc45 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -385,6 +385,28 @@ public:
return inherited::TraverseStmt(E->getReplacement());
}
+ bool TraverseTemplateName(TemplateName Template) {
+ if (auto *TTP = dyn_cast_if_present<TemplateTemplateParmDecl>(
+ Template.getAsTemplateDecl());
+ TTP && TTP->getDepth() < TemplateArgs.getNumLevels()) {
+ if (!TemplateArgs.hasTemplateArgument(TTP->getDepth(),
+ TTP->getPosition()))
+ return true;
+
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
+ if (TTP->isParameterPack() && SemaRef.ArgPackSubstIndex) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+ Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg);
+ }
+ assert(!Arg.getAsTemplate().isNull() &&
+ "Null template template argument");
+ UsedTemplateArgs.push_back(
+ SemaRef.Context.getCanonicalTemplateArgument(Arg));
+ }
+ return inherited::TraverseTemplateName(Template);
+ }
+
void VisitConstraint(const NormalizedConstraintWithParamMapping &Constraint) {
if (!Constraint.hasParameterMapping()) {
for (const auto &List : TemplateArgs)
@@ -2678,8 +2700,9 @@ FormulaType SubsumptionChecker::Normalize(const NormalizedConstraint &NC) {
});
if (Compound.getCompoundKind() == FormulaType::Kind) {
+ unsigned SizeLeft = Left.size();
Res = std::move(Left);
- Res.reserve(Left.size() + Right.size());
+ Res.reserve(SizeLeft + Right.size());
std::for_each(std::make_move_iterator(Right.begin()),
std::make_move_iterator(Right.end()), Add);
return Res;
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 5b3e89f..2a485da 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -770,23 +770,81 @@ void SemaHLSL::ActOnTopLevelFunction(FunctionDecl *FD) {
}
}
-bool SemaHLSL::isSemanticValid(FunctionDecl *FD, DeclaratorDecl *D) {
- const auto *AnnotationAttr = D->getAttr<HLSLAnnotationAttr>();
- if (AnnotationAttr) {
- CheckSemanticAnnotation(FD, D, AnnotationAttr);
- return true;
+HLSLSemanticAttr *SemaHLSL::createSemantic(const SemanticInfo &Info,
+ DeclaratorDecl *TargetDecl) {
+ std::string SemanticName = Info.Semantic->getAttrName()->getName().upper();
+
+ if (SemanticName == "SV_DISPATCHTHREADID") {
+ return createSemanticAttr<HLSLSV_DispatchThreadIDAttr>(
+ *Info.Semantic, TargetDecl, Info.Index);
+ } else if (SemanticName == "SV_GROUPINDEX") {
+ return createSemanticAttr<HLSLSV_GroupIndexAttr>(*Info.Semantic, TargetDecl,
+ Info.Index);
+ } else if (SemanticName == "SV_GROUPTHREADID") {
+ return createSemanticAttr<HLSLSV_GroupThreadIDAttr>(*Info.Semantic,
+ TargetDecl, Info.Index);
+ } else if (SemanticName == "SV_GROUPID") {
+ return createSemanticAttr<HLSLSV_GroupIDAttr>(*Info.Semantic, TargetDecl,
+ Info.Index);
+ } else if (SemanticName == "SV_POSITION") {
+ return createSemanticAttr<HLSLSV_PositionAttr>(*Info.Semantic, TargetDecl,
+ Info.Index);
+ } else
+ Diag(Info.Semantic->getLoc(), diag::err_hlsl_unknown_semantic)
+ << *Info.Semantic;
+
+ return nullptr;
+}
+
+bool SemaHLSL::determineActiveSemanticOnScalar(FunctionDecl *FD,
+ DeclaratorDecl *D,
+ SemanticInfo &ActiveSemantic) {
+ if (ActiveSemantic.Semantic == nullptr) {
+ ActiveSemantic.Semantic = D->getAttr<HLSLSemanticAttr>();
+ if (ActiveSemantic.Semantic &&
+ ActiveSemantic.Semantic->isSemanticIndexExplicit())
+ ActiveSemantic.Index = ActiveSemantic.Semantic->getSemanticIndex();
+ }
+
+ if (!ActiveSemantic.Semantic) {
+ Diag(D->getLocation(), diag::err_hlsl_missing_semantic_annotation);
+ return false;
+ }
+
+ auto *A = createSemantic(ActiveSemantic, D);
+ if (!A)
+ return false;
+
+ checkSemanticAnnotation(FD, D, A);
+ FD->addAttr(A);
+ return true;
+}
+
+bool SemaHLSL::determineActiveSemantic(FunctionDecl *FD, DeclaratorDecl *D,
+ SemanticInfo &ActiveSemantic) {
+ if (ActiveSemantic.Semantic == nullptr) {
+ ActiveSemantic.Semantic = D->getAttr<HLSLSemanticAttr>();
+ if (ActiveSemantic.Semantic &&
+ ActiveSemantic.Semantic->isSemanticIndexExplicit())
+ ActiveSemantic.Index = ActiveSemantic.Semantic->getSemanticIndex();
}
const Type *T = D->getType()->getUnqualifiedDesugaredType();
const RecordType *RT = dyn_cast<RecordType>(T);
if (!RT)
- return false;
+ return determineActiveSemanticOnScalar(FD, D, ActiveSemantic);
const RecordDecl *RD = RT->getDecl();
for (FieldDecl *Field : RD->fields()) {
- if (!isSemanticValid(FD, Field))
+ SemanticInfo Info = ActiveSemantic;
+ if (!determineActiveSemantic(FD, Field, Info)) {
+ Diag(Field->getLocation(), diag::note_hlsl_semantic_used_here) << Field;
return false;
+ }
+ if (ActiveSemantic.Semantic)
+ ActiveSemantic = Info;
}
+
return true;
}
@@ -853,8 +911,11 @@ void SemaHLSL::CheckEntryPoint(FunctionDecl *FD) {
}
for (ParmVarDecl *Param : FD->parameters()) {
- if (!isSemanticValid(FD, Param)) {
- Diag(FD->getLocation(), diag::err_hlsl_missing_semantic_annotation);
+ SemanticInfo ActiveSemantic;
+ ActiveSemantic.Semantic = nullptr;
+ ActiveSemantic.Index = std::nullopt;
+
+ if (!determineActiveSemantic(FD, Param, ActiveSemantic)) {
Diag(Param->getLocation(), diag::note_previous_decl) << Param;
FD->setInvalidDecl();
}
@@ -862,31 +923,31 @@ void SemaHLSL::CheckEntryPoint(FunctionDecl *FD) {
// FIXME: Verify return type semantic annotation.
}
-void SemaHLSL::CheckSemanticAnnotation(
- FunctionDecl *EntryPoint, const Decl *Param,
- const HLSLAnnotationAttr *AnnotationAttr) {
+void SemaHLSL::checkSemanticAnnotation(FunctionDecl *EntryPoint,
+ const Decl *Param,
+ const HLSLSemanticAttr *SemanticAttr) {
auto *ShaderAttr = EntryPoint->getAttr<HLSLShaderAttr>();
assert(ShaderAttr && "Entry point has no shader attribute");
llvm::Triple::EnvironmentType ST = ShaderAttr->getType();
- switch (AnnotationAttr->getKind()) {
+ switch (SemanticAttr->getKind()) {
case attr::HLSLSV_DispatchThreadID:
case attr::HLSLSV_GroupIndex:
case attr::HLSLSV_GroupThreadID:
case attr::HLSLSV_GroupID:
if (ST == llvm::Triple::Compute)
return;
- DiagnoseAttrStageMismatch(AnnotationAttr, ST, {llvm::Triple::Compute});
+ DiagnoseAttrStageMismatch(SemanticAttr, ST, {llvm::Triple::Compute});
break;
case attr::HLSLSV_Position:
// TODO(#143523): allow use on other shader types & output once the overall
// semantic logic is implemented.
if (ST == llvm::Triple::Pixel)
return;
- DiagnoseAttrStageMismatch(AnnotationAttr, ST, {llvm::Triple::Pixel});
+ DiagnoseAttrStageMismatch(SemanticAttr, ST, {llvm::Triple::Pixel});
break;
default:
- llvm_unreachable("Unknown HLSLAnnotationAttr");
+ llvm_unreachable("Unknown SemanticAttr");
}
}
@@ -1661,28 +1722,30 @@ void SemaHLSL::diagnoseSystemSemanticAttr(Decl *D, const ParsedAttr &AL,
diagnoseInputIDType(ValueType, AL);
if (IsOutput)
Diag(AL.getLoc(), diag::err_hlsl_semantic_output_not_supported) << AL;
- Attribute = createSemanticAttr<HLSLSV_DispatchThreadIDAttr>(AL, Index);
+ Attribute =
+ createSemanticAttr<HLSLSV_DispatchThreadIDAttr>(AL, nullptr, Index);
} else if (SemanticName == "SV_GROUPINDEX") {
if (IsOutput)
Diag(AL.getLoc(), diag::err_hlsl_semantic_output_not_supported) << AL;
- Attribute = createSemanticAttr<HLSLSV_GroupIndexAttr>(AL, Index);
+ Attribute = createSemanticAttr<HLSLSV_GroupIndexAttr>(AL, nullptr, Index);
} else if (SemanticName == "SV_GROUPTHREADID") {
diagnoseInputIDType(ValueType, AL);
if (IsOutput)
Diag(AL.getLoc(), diag::err_hlsl_semantic_output_not_supported) << AL;
- Attribute = createSemanticAttr<HLSLSV_GroupThreadIDAttr>(AL, Index);
+ Attribute =
+ createSemanticAttr<HLSLSV_GroupThreadIDAttr>(AL, nullptr, Index);
} else if (SemanticName == "SV_GROUPID") {
diagnoseInputIDType(ValueType, AL);
if (IsOutput)
Diag(AL.getLoc(), diag::err_hlsl_semantic_output_not_supported) << AL;
- Attribute = createSemanticAttr<HLSLSV_GroupIDAttr>(AL, Index);
+ Attribute = createSemanticAttr<HLSLSV_GroupIDAttr>(AL, nullptr, Index);
} else if (SemanticName == "SV_POSITION") {
const auto *VT = ValueType->getAs<VectorType>();
if (!ValueType->hasFloatingRepresentation() ||
(VT && VT->getNumElements() > 4))
Diag(AL.getLoc(), diag::err_hlsl_attr_invalid_type)
<< AL << "float/float1/float2/float3/float4";
- Attribute = createSemanticAttr<HLSLSV_PositionAttr>(AL, Index);
+ Attribute = createSemanticAttr<HLSLSV_PositionAttr>(AL, nullptr, Index);
} else
Diag(AL.getLoc(), diag::err_hlsl_unknown_semantic) << AL;
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index ee9b2b3..f0f3832 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -3086,6 +3086,7 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
case OpenACCReductionOperator::Invalid:
llvm_unreachable("Invalid should have been caught above");
}
+ llvm_unreachable("Unhandled case");
};
auto tryCombiner = [&, this](DeclRefExpr *LHSDRE, DeclRefExpr *RHSDRE,
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 5b5b1b6..6d5cb0f 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -7246,7 +7246,9 @@ void SemaOpenMP::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *UDecl = nullptr;
if (IsTemplated && isa<FunctionTemplateDecl>(CandidateDecl)) {
auto *FTD = cast<FunctionTemplateDecl>(CandidateDecl);
- if (FTD->getTemplateParameters()->size() == TemplateParamLists.size())
+ // FIXME: Should this compare the template parameter lists on all levels?
+ if (SemaRef.Context.isSameTemplateParameterList(
+ FTD->getTemplateParameters(), TemplateParamLists.back()))
UDecl = FTD->getTemplatedDecl();
} else if (!IsTemplated)
UDecl = dyn_cast<FunctionDecl>(CandidateDecl);
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp
index 05d5669..42f52d0 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp
@@ -524,13 +524,12 @@ bool initializeScanCompilerInstance(
// Use the dependency scanning optimized file system if requested to do so.
if (DepFS) {
DepFS->resetBypassedPathPrefix();
- if (!ScanInstance.getHeaderSearchOpts().ModuleCachePath.empty()) {
- SmallString<256> ModulesCachePath;
- normalizeModuleCachePath(
- ScanInstance.getFileManager(),
- ScanInstance.getHeaderSearchOpts().ModuleCachePath, ModulesCachePath);
+ SmallString<256> ModulesCachePath;
+ normalizeModuleCachePath(ScanInstance.getFileManager(),
+ ScanInstance.getHeaderSearchOpts().ModuleCachePath,
+ ModulesCachePath);
+ if (!ModulesCachePath.empty())
DepFS->setBypassedPathPrefix(ModulesCachePath);
- }
ScanInstance.setDependencyDirectivesGetter(
std::make_unique<ScanningDependencyDirectivesGetter>(