aboutsummaryrefslogtreecommitdiff
path: root/clang
diff options
context:
space:
mode:
Diffstat (limited to 'clang')
-rw-r--r--clang/docs/AllocToken.rst43
-rw-r--r--clang/docs/ReleaseNotes.rst3
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.td54
-rw-r--r--clang/include/clang/Driver/Options.td6
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp32
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp83
-rw-r--r--clang/lib/AST/ExprConstant.cpp83
-rw-r--r--clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp4
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp28
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp9
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h20
-rw-r--r--clang/lib/Sema/SemaConcept.cpp9
-rw-r--r--clang/test/AST/ByteCode/cxx20.cpp18
-rw-r--r--clang/test/CIR/CodeGen/try-catch.cpp30
-rw-r--r--clang/test/CodeGen/AArch64/neon-across.c56
-rw-r--r--clang/test/CodeGen/X86/avx512vlbw-builtins.c38
-rw-r--r--clang/test/CodeGen/lto-newpm-pipeline.c8
-rw-r--r--clang/test/CodeGenCXX/alloc-token-builtin.cpp97
-rw-r--r--clang/test/Driver/sycl.c5
-rw-r--r--clang/test/SemaTemplate/concepts.cpp26
20 files changed, 549 insertions, 103 deletions
diff --git a/clang/docs/AllocToken.rst b/clang/docs/AllocToken.rst
index b65e18c..1a740e5 100644
--- a/clang/docs/AllocToken.rst
+++ b/clang/docs/AllocToken.rst
@@ -49,6 +49,39 @@ change or removal. These may (experimentally) be selected with ``-Xclang
* ``increment``: This mode assigns a simple, incrementally increasing token ID
to each allocation site.
+The following command-line options affect generated token IDs:
+
+* ``-falloc-token-max=<N>``
+ Configures the maximum number of tokens. No max by default (tokens bounded
+ by ``SIZE_MAX``).
+
+Querying Token IDs with ``__builtin_infer_alloc_token``
+=======================================================
+
+For use cases where the token ID must be known at compile time, Clang provides
+a builtin function:
+
+.. code-block:: c
+
+ size_t __builtin_infer_alloc_token(<args>, ...);
+
+This builtin returns the token ID inferred from its argument expressions, which
+mirror arguments normally passed to any allocation function. The argument
+expressions are **unevaluated**, so it can be used with expressions that would
+have side effects without any runtime impact.
+
+For example, it can be used as follows:
+
+.. code-block:: c
+
+ struct MyType { ... };
+ void *__partition_alloc(size_t size, size_t partition);
+ #define partition_alloc(...) __partition_alloc(__VA_ARGS__, __builtin_infer_alloc_token(__VA_ARGS__))
+
+ void foo(void) {
+ MyType *x = partition_alloc(sizeof(*x));
+ }
+
Allocation Token Instrumentation
================================
@@ -70,16 +103,6 @@ example:
// Instrumented:
ptr = __alloc_token_malloc(size, <token id>);
-The following command-line options affect generated token IDs:
-
-* ``-falloc-token-max=<N>``
- Configures the maximum number of tokens. No max by default (tokens bounded
- by ``SIZE_MAX``).
-
- .. code-block:: console
-
- % clang++ -fsanitize=alloc-token -falloc-token-max=512 example.cc
-
Runtime Interface
-----------------
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index e6e33e7..add1582 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -281,6 +281,9 @@ Non-comprehensive list of changes in this release
allocator-level heap organization strategies. A feature to instrument all
allocation functions with a token ID can be enabled via the
``-fsanitize=alloc-token`` flag.
+- A builtin ``__builtin_infer_alloc_token(<args>, ...)`` is provided to allow
+ compile-time querying of allocation token IDs, where the builtin arguments
+ mirror those normally passed to an allocation function.
- Clang now rejects the invalid use of ``constexpr`` with ``auto`` and an explicit type in C. (#GH163090)
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index 0c85e28..500aa85 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -1282,81 +1282,99 @@ let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
def knotdi : X86Builtin<"unsigned long long int(unsigned long long int)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def cmpb128_mask : X86Builtin<"unsigned short(_Vector<16, char>, _Vector<16, char>, _Constant int, unsigned short)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def cmpd128_mask : X86Builtin<"unsigned char(_Vector<4, int>, _Vector<4, int>, _Constant int, unsigned char)">;
def cmpq128_mask : X86Builtin<"unsigned char(_Vector<2, long long int>, _Vector<2, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def cmpw128_mask : X86Builtin<"unsigned char(_Vector<8, short>, _Vector<8, short>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def cmpb256_mask : X86Builtin<"unsigned int(_Vector<32, char>, _Vector<32, char>, _Constant int, unsigned int)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def cmpd256_mask : X86Builtin<"unsigned char(_Vector<8, int>, _Vector<8, int>, _Constant int, unsigned char)">;
def cmpq256_mask : X86Builtin<"unsigned char(_Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def cmpw256_mask : X86Builtin<"unsigned short(_Vector<16, short>, _Vector<16, short>, _Constant int, unsigned short)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def cmpb512_mask : X86Builtin<"unsigned long long int(_Vector<64, char>, _Vector<64, char>, _Constant int, unsigned long long int)">;
}
-let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def cmpd512_mask : X86Builtin<"unsigned short(_Vector<16, int>, _Vector<16, int>, _Constant int, unsigned short)">;
def cmpq512_mask : X86Builtin<"unsigned char(_Vector<8, long long int>, _Vector<8, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def cmpw512_mask : X86Builtin<"unsigned int(_Vector<32, short>, _Vector<32, short>, _Constant int, unsigned int)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def ucmpb128_mask : X86Builtin<"unsigned short(_Vector<16, char>, _Vector<16, char>, _Constant int, unsigned short)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def ucmpd128_mask : X86Builtin<"unsigned char(_Vector<4, int>, _Vector<4, int>, _Constant int, unsigned char)">;
def ucmpq128_mask : X86Builtin<"unsigned char(_Vector<2, long long int>, _Vector<2, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def ucmpw128_mask : X86Builtin<"unsigned char(_Vector<8, short>, _Vector<8, short>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def ucmpb256_mask : X86Builtin<"unsigned int(_Vector<32, char>, _Vector<32, char>, _Constant int, unsigned int)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def ucmpd256_mask : X86Builtin<"unsigned char(_Vector<8, int>, _Vector<8, int>, _Constant int, unsigned char)">;
def ucmpq256_mask : X86Builtin<"unsigned char(_Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def ucmpw256_mask : X86Builtin<"unsigned short(_Vector<16, short>, _Vector<16, short>, _Constant int, unsigned short)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def ucmpb512_mask : X86Builtin<"unsigned long long int(_Vector<64, char>, _Vector<64, char>, _Constant int, unsigned long long int)">;
}
-let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def ucmpd512_mask : X86Builtin<"unsigned short(_Vector<16, int>, _Vector<16, int>, _Constant int, unsigned short)">;
def ucmpq512_mask : X86Builtin<"unsigned char(_Vector<8, long long int>, _Vector<8, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def ucmpw512_mask : X86Builtin<"unsigned int(_Vector<32, short>, _Vector<32, short>, _Constant int, unsigned int)">;
}
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index bca8b26..8784c9d 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -5980,10 +5980,10 @@ def : Flag<["-"], "nocudainc">, Alias<no_offload_inc>;
def no_offloadlib
: Flag<["--"], "no-offloadlib">,
MarshallingInfoFlag<LangOpts<"NoGPULib">>,
- Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
- HelpText<"Do not link device library for CUDA/HIP device compilation">;
+ Visibility<[ClangOption, CC1Option, CLOption, FlangOption, FC1Option]>,
+ HelpText<"Do not link device library for CUDA/HIP/SYCL device compilation">;
def offloadlib : Flag<["--"], "offloadlib">,
- Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ Visibility<[ClangOption, CC1Option, CLOption, FlangOption, FC1Option]>,
HelpText<"Link device libraries for GPU device compilation">;
def : Flag<["-"], "nogpulib">,
Alias<no_offloadlib>,
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index d640be0..a2fb0fb 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -1651,8 +1651,8 @@ static bool GetDynamicDecl(InterpState &S, CodePtr OpPC, Pointer TypePtr,
QualType DynamicType = TypePtr.getType();
if (TypePtr.isStatic() || TypePtr.isConst()) {
- const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl();
- if (!VD->isConstexpr()) {
+ if (const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl();
+ VD && !VD->isConstexpr()) {
const Expr *E = S.Current->getExpr(OpPC);
APValue V = TypePtr.toAPValue(S.getASTContext());
QualType TT = S.getASTContext().getLValueReferenceType(DynamicType);
@@ -1683,20 +1683,6 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
const FunctionDecl *Callee = Func->getDecl();
- if (!Func->isFullyCompiled())
- compileFunction(S, Func);
-
- // C++2a [class.abstract]p6:
- // the effect of making a virtual call to a pure virtual function [...] is
- // undefined
- if (Callee->isPureVirtual()) {
- S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call,
- 1)
- << Callee;
- S.Note(Callee->getLocation(), diag::note_declared_at);
- return false;
- }
-
const CXXRecordDecl *DynamicDecl = nullptr;
if (!GetDynamicDecl(S, OpPC, ThisPtr, DynamicDecl))
return false;
@@ -1706,7 +1692,8 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
const auto *InitialFunction = cast<CXXMethodDecl>(Callee);
const CXXMethodDecl *Overrider;
- if (StaticDecl != DynamicDecl) {
+ if (StaticDecl != DynamicDecl &&
+ !llvm::is_contained(S.InitializingBlocks, ThisPtr.block())) {
if (!DynamicDecl->isDerivedFrom(StaticDecl))
return false;
Overrider = S.getContext().getOverridingFunction(DynamicDecl, StaticDecl,
@@ -1716,6 +1703,17 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
Overrider = InitialFunction;
}
+ // C++2a [class.abstract]p6:
+ // the effect of making a virtual call to a pure virtual function [...] is
+ // undefined
+ if (Overrider->isPureVirtual()) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call,
+ 1)
+ << Callee;
+ S.Note(Callee->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
if (Overrider != InitialFunction) {
// DR1872: An instantiated virtual constexpr function can't be called in a
// constant expression (prior to C++20). We can still constant-fold such a
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 8f23001..ab6b3ed 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3296,6 +3296,60 @@ static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC,
return true;
}
+static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
+ bool IsUnsigned) {
+ switch (Imm & 0x7) {
+ case 0x00: // _MM_CMPINT_EQ
+ return (A == B);
+ case 0x01: // _MM_CMPINT_LT
+ return IsUnsigned ? A.ult(B) : A.slt(B);
+ case 0x02: // _MM_CMPINT_LE
+ return IsUnsigned ? A.ule(B) : A.sle(B);
+ case 0x03: // _MM_CMPINT_FALSE
+ return false;
+ case 0x04: // _MM_CMPINT_NE
+ return (A != B);
+ case 0x05: // _MM_CMPINT_NLT
+ return IsUnsigned ? A.ugt(B) : A.sgt(B);
+ case 0x06: // _MM_CMPINT_NLE
+ return IsUnsigned ? A.uge(B) : A.sge(B);
+ case 0x07: // _MM_CMPINT_TRUE
+ return true;
+ default:
+ llvm_unreachable("Invalid Op");
+ }
+}
+
+static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call, unsigned ID,
+ bool IsUnsigned) {
+ assert(Call->getNumArgs() == 4);
+
+ APSInt Mask = popToAPSInt(S, Call->getArg(3));
+ APSInt Opcode = popToAPSInt(S, Call->getArg(2));
+ unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+
+ assert(LHS.getNumElems() == RHS.getNumElems());
+
+ APInt RetMask = APInt::getZero(LHS.getNumElems());
+ unsigned VectorLen = LHS.getNumElems();
+ PrimType ElemT = LHS.getFieldDesc()->getPrimType();
+
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ APSInt A, B;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ A = LHS.elem<T>(ElemNum).toAPSInt();
+ B = RHS.elem<T>(ElemNum).toAPSInt();
+ });
+ RetMask.setBitVal(ElemNum,
+ Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
+ }
+ pushInteger(S, RetMask, Call->getType());
+ return true;
+}
+
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
const CallExpr *Call) {
assert(Call->getNumArgs() == 1);
@@ -4488,6 +4542,35 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned=*/false);
+
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned=*/true);
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift:
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 29ee089..d0404b9 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -15766,6 +15766,89 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1));
return Success(Vec.getVectorElt(Idx).getInt(), E);
}
+
+ case clang::X86::BI__builtin_ia32_cmpb128_mask:
+ case clang::X86::BI__builtin_ia32_cmpw128_mask:
+ case clang::X86::BI__builtin_ia32_cmpd128_mask:
+ case clang::X86::BI__builtin_ia32_cmpq128_mask:
+ case clang::X86::BI__builtin_ia32_cmpb256_mask:
+ case clang::X86::BI__builtin_ia32_cmpw256_mask:
+ case clang::X86::BI__builtin_ia32_cmpd256_mask:
+ case clang::X86::BI__builtin_ia32_cmpq256_mask:
+ case clang::X86::BI__builtin_ia32_cmpb512_mask:
+ case clang::X86::BI__builtin_ia32_cmpw512_mask:
+ case clang::X86::BI__builtin_ia32_cmpd512_mask:
+ case clang::X86::BI__builtin_ia32_cmpq512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq512_mask: {
+ assert(E->getNumArgs() == 4);
+
+ bool IsUnsigned =
+ (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask &&
+ BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpq512_mask);
+
+ APValue LHS, RHS;
+ APSInt Mask, Opcode;
+ if (!EvaluateVector(E->getArg(0), LHS, Info) ||
+ !EvaluateVector(E->getArg(1), RHS, Info) ||
+ !EvaluateInteger(E->getArg(2), Opcode, Info) ||
+ !EvaluateInteger(E->getArg(3), Mask, Info))
+ return false;
+
+ assert(LHS.getVectorLength() == RHS.getVectorLength());
+
+ unsigned VectorLen = LHS.getVectorLength();
+ unsigned RetWidth = Mask.getBitWidth();
+
+ APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
+
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ const APSInt &A = LHS.getVectorElt(ElemNum).getInt();
+ const APSInt &B = RHS.getVectorElt(ElemNum).getInt();
+ bool Result = false;
+
+ switch (Opcode.getExtValue() & 0x7) {
+ case 0: // _MM_CMPINT_EQ
+ Result = (A == B);
+ break;
+ case 1: // _MM_CMPINT_LT
+ Result = IsUnsigned ? A.ult(B) : A.slt(B);
+ break;
+ case 2: // _MM_CMPINT_LE
+ Result = IsUnsigned ? A.ule(B) : A.sle(B);
+ break;
+ case 3: // _MM_CMPINT_FALSE
+ Result = false;
+ break;
+ case 4: // _MM_CMPINT_NE
+ Result = (A != B);
+ break;
+ case 5: // _MM_CMPINT_NLT (>=)
+ Result = IsUnsigned ? A.uge(B) : A.sge(B);
+ break;
+ case 6: // _MM_CMPINT_NLE (>)
+ Result = IsUnsigned ? A.ugt(B) : A.sgt(B);
+ break;
+ case 7: // _MM_CMPINT_TRUE
+ Result = true;
+ break;
+ }
+
+ RetMask.setBitVal(ElemNum, Mask[ElemNum] && Result);
+ }
+
+ return Success(APValue(RetMask), E);
+ }
}
}
diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
index 21c96fe..ca7554e 100644
--- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
@@ -606,10 +606,12 @@ public:
// `cir.try_call`.
llvm::SmallVector<cir::CallOp, 4> callsToRewrite;
tryOp.getTryRegion().walk([&](CallOp op) {
+ if (op.getNothrow())
+ return;
+
// Only grab calls within immediate closest TryOp scope.
if (op->getParentOfType<cir::TryOp>() != tryOp)
return;
- assert(!cir::MissingFeatures::opCallExceptionAttr());
callsToRewrite.push_back(op);
});
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index aefc262..3c31314 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -800,16 +800,6 @@ static void addSanitizers(const Triple &TargetTriple,
MPM.addPass(DataFlowSanitizerPass(LangOpts.NoSanitizeFiles,
PB.getVirtualFileSystemPtr()));
}
-
- if (LangOpts.Sanitize.has(SanitizerKind::AllocToken)) {
- if (Level == OptimizationLevel::O0) {
- // The default pass builder only infers libcall function attrs when
- // optimizing, so we insert it here because we need it for accurate
- // memory allocation function detection.
- MPM.addPass(InferFunctionAttrsPass());
- }
- MPM.addPass(AllocTokenPass(getAllocTokenOptions(LangOpts, CodeGenOpts)));
- }
};
if (ClSanitizeOnOptimizerEarlyEP) {
PB.registerOptimizerEarlyEPCallback(
@@ -852,6 +842,23 @@ static void addSanitizers(const Triple &TargetTriple,
}
}
+static void addAllocTokenPass(const Triple &TargetTriple,
+ const CodeGenOptions &CodeGenOpts,
+ const LangOptions &LangOpts, PassBuilder &PB) {
+ PB.registerOptimizerLastEPCallback([&](ModulePassManager &MPM,
+ OptimizationLevel Level,
+ ThinOrFullLTOPhase) {
+ if (Level == OptimizationLevel::O0 &&
+ LangOpts.Sanitize.has(SanitizerKind::AllocToken)) {
+ // The default pass builder only infers libcall function attrs when
+ // optimizing, so we insert it here because we need it for accurate
+ // memory allocation function detection with -fsanitize=alloc-token.
+ MPM.addPass(InferFunctionAttrsPass());
+ }
+ MPM.addPass(AllocTokenPass(getAllocTokenOptions(LangOpts, CodeGenOpts)));
+ });
+}
+
void EmitAssemblyHelper::RunOptimizationPipeline(
BackendAction Action, std::unique_ptr<raw_pwrite_stream> &OS,
std::unique_ptr<llvm::ToolOutputFile> &ThinLinkOS, BackendConsumer *BC) {
@@ -1106,6 +1113,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!IsThinLTOPostLink) {
addSanitizers(TargetTriple, CodeGenOpts, LangOpts, PB);
addKCFIPass(TargetTriple, LangOpts, PB);
+ addAllocTokenPass(TargetTriple, CodeGenOpts, LangOpts, PB);
}
if (std::optional<GCOVOptions> Options =
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index fd14cd6..b81e0d0 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -4506,6 +4506,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(AI);
}
+ case Builtin::BI__builtin_infer_alloc_token: {
+ llvm::MDNode *MDN = buildAllocToken(E);
+ llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
+ llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
+ return RValue::get(TokenID);
+ }
+
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 0fcfe37..263a107 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -2385,22 +2385,19 @@ _mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
(__mmask32) __U);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_test_epi8_mask (__m128i __A, __m128i __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_test_epi8_mask(__m128i __A, __m128i __B) {
return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B),
_mm_setzero_si128());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_test_epi8_mask (__m256i __A, __m256i __B)
-{
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B),
_mm256_setzero_si256());
}
@@ -2439,9 +2436,8 @@ _mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
_mm256_setzero_si256());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_testn_epi8_mask (__m128i __A, __m128i __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_testn_epi8_mask(__m128i __A, __m128i __B) {
return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index f04cc45..fb4d0b45 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -2408,11 +2408,16 @@ const NormalizedConstraint *Sema::getNormalizedAssociatedConstraints(
if (CacheEntry == NormalizationCache.end()) {
auto *Normalized = NormalizedConstraint::fromAssociatedConstraints(
*this, ND, AssociatedConstraints);
+ if (!Normalized) {
+ NormalizationCache.try_emplace(ConstrainedDeclOrNestedReq, nullptr);
+ return nullptr;
+ }
+ // substitute() can invalidate iterators of NormalizationCache.
+ bool Failed = SubstituteParameterMappings(*this).substitute(*Normalized);
CacheEntry =
NormalizationCache.try_emplace(ConstrainedDeclOrNestedReq, Normalized)
.first;
- if (!Normalized ||
- SubstituteParameterMappings(*this).substitute(*Normalized))
+ if (Failed)
return nullptr;
}
return CacheEntry->second;
diff --git a/clang/test/AST/ByteCode/cxx20.cpp b/clang/test/AST/ByteCode/cxx20.cpp
index 1888998..cb788fa 100644
--- a/clang/test/AST/ByteCode/cxx20.cpp
+++ b/clang/test/AST/ByteCode/cxx20.cpp
@@ -1183,3 +1183,21 @@ namespace VirtualFunctionCallThroughArrayElem {
static_assert(a[2][3].foo()); // both-error {{not an integral constant expression}} \
// both-note {{virtual function called on object 'a[2][3]' whose dynamic type is not constant}}
}
+
+namespace NonPureVirtualCall {
+ struct A {
+ constexpr virtual void call(int) = 0;
+ constexpr void call2() { call(0); }
+ };
+
+ struct B : A {
+ constexpr void call(int) override {}
+ };
+
+ consteval void check() {
+ B b;
+ b.call2();
+ }
+
+ int main() { check(); }
+}
diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp
index 1e4d2a6..27e3d8e 100644
--- a/clang/test/CIR/CodeGen/try-catch.cpp
+++ b/clang/test/CIR/CodeGen/try-catch.cpp
@@ -164,3 +164,33 @@ void try_catch_with_alloca() {
// OGCG: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
// OGCG: %[[RESULT:.*]] = add nsw i32 %[[TMP_A]], %[[TMP_B]]
// OGCG: store i32 %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+void function_with_noexcept() noexcept;
+
+void calling_noexcept_function_inside_try_block() {
+ try {
+ function_with_noexcept();
+ } catch (...) {
+ }
+}
+
+// CIR: cir.scope {
+// CIR: cir.try {
+// CIR: cir.call @_Z22function_with_noexceptv() nothrow : () -> ()
+// CIR: cir.yield
+// CIR: }
+// CIR: }
+
+// LLVM: br label %[[LABEL_1:.*]]
+// LLVM: [[LABEL_1]]:
+// LLVM: br label %[[LABEL_2:.*]]
+// LLVM: [[LABEL_2]]:
+// LLVM: call void @_Z22function_with_noexceptv()
+// LLVM: br label %[[LABEL_3:.*]]
+// LLVM: [[LABEL_3]]:
+// LLVM: br label %[[LABEL_4:.*]]
+// LLVM: [[LABEL_4]]:
+// LLVM: ret void
+
+// OGCG: call void @_Z22function_with_noexceptv()
+// OGCG: ret void
diff --git a/clang/test/CodeGen/AArch64/neon-across.c b/clang/test/CodeGen/AArch64/neon-across.c
index d365975..aa0387d 100644
--- a/clang/test/CodeGen/AArch64/neon-across.c
+++ b/clang/test/CodeGen/AArch64/neon-across.c
@@ -49,7 +49,7 @@ uint32_t test_vaddlv_u16(uint16x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_s8
-// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
@@ -60,7 +60,7 @@ int16_t test_vaddlvq_s8(int8x16_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_s16
-// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> [[A]])
// CHECK-NEXT: ret i32 [[VADDLV_I]]
@@ -70,7 +70,7 @@ int32_t test_vaddlvq_s16(int16x8_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_s32
-// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDLVQ_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i64 [[VADDLVQ_S32_I]]
@@ -80,7 +80,7 @@ int64_t test_vaddlvq_s32(int32x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_u8
-// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
@@ -91,7 +91,7 @@ uint16_t test_vaddlvq_u8(uint8x16_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_u16
-// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> [[A]])
// CHECK-NEXT: ret i32 [[VADDLV_I]]
@@ -101,7 +101,7 @@ uint32_t test_vaddlvq_u16(uint16x8_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddlvq_u32
-// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i64 [[VADDLVQ_U32_I]]
@@ -155,7 +155,7 @@ uint16_t test_vmaxv_u16(uint16x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s8
-// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
@@ -166,7 +166,7 @@ int8_t test_vmaxvq_s8(int8x16_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s16
-// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
@@ -177,7 +177,7 @@ int16_t test_vmaxvq_s16(int16x8_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s32
-// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXVQ_S32_I]]
@@ -187,7 +187,7 @@ int32_t test_vmaxvq_s32(int32x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u8
-// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
@@ -198,7 +198,7 @@ uint8_t test_vmaxvq_u8(uint8x16_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u16
-// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
@@ -209,7 +209,7 @@ uint16_t test_vmaxvq_u16(uint16x8_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u32
-// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXVQ_U32_I]]
@@ -263,7 +263,7 @@ uint16_t test_vminv_u16(uint16x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_s8
-// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
@@ -274,7 +274,7 @@ int8_t test_vminvq_s8(int8x16_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_s16
-// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
@@ -285,7 +285,7 @@ int16_t test_vminvq_s16(int16x8_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_s32
-// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINVQ_S32_I]]
@@ -295,7 +295,7 @@ int32_t test_vminvq_s32(int32x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_u8
-// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
@@ -306,7 +306,7 @@ uint8_t test_vminvq_u8(uint8x16_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_u16
-// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
@@ -317,7 +317,7 @@ uint16_t test_vminvq_u16(uint16x8_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_u32
-// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINVQ_U32_I]]
@@ -371,7 +371,7 @@ uint16_t test_vaddv_u16(uint16x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s8
-// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
@@ -382,7 +382,7 @@ int8_t test_vaddvq_s8(int8x16_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s16
-// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
@@ -393,7 +393,7 @@ int16_t test_vaddvq_s16(int16x8_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s32
-// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDVQ_S32_I]]
@@ -403,7 +403,7 @@ int32_t test_vaddvq_s32(int32x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u8
-// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
@@ -414,7 +414,7 @@ uint8_t test_vaddvq_u8(uint8x16_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u16
-// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> [[A]])
// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
@@ -425,7 +425,7 @@ uint16_t test_vaddvq_u16(uint16x8_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u32
-// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDVQ_U32_I]]
@@ -435,7 +435,7 @@ uint32_t test_vaddvq_u32(uint32x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_f32
-// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMAXVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> [[A]])
// CHECK-NEXT: ret float [[VMAXVQ_F32_I]]
@@ -445,7 +445,7 @@ float32_t test_vmaxvq_f32(float32x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_f32
-// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMINVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> [[A]])
// CHECK-NEXT: ret float [[VMINVQ_F32_I]]
@@ -455,7 +455,7 @@ float32_t test_vminvq_f32(float32x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vmaxnmvq_f32
-// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMAXNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> [[A]])
// CHECK-NEXT: ret float [[VMAXNMVQ_F32_I]]
@@ -465,7 +465,7 @@ float32_t test_vmaxnmvq_f32(float32x4_t a) {
}
// CHECK-LABEL: define {{[^@]+}}@test_vminnmvq_f32
-// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VMINNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> [[A]])
// CHECK-NEXT: ret float [[VMINNMVQ_F32_I]]
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index 116d86f..febef46 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -645,6 +645,21 @@ __mmask16 test_mm_cmp_epi8_mask(__m128i __a, __m128i __b) {
return (__mmask16)_mm_cmp_epi8_mask(__a, __b, 0);
}
+TEST_CONSTEXPR(_mm_cmpeq_epi8_mask(
+ ((__m128i)(__v16qi){5, 3, 7, 2, 9, 3, 7, 1, 5, 4, 8, 2, 9, 6, 7, 5}),
+ ((__m128i)(__v16qi){5, 2, 7, 3, 9, 4, 6, 1, 5, 3, 8, 1, 9, 5, 7, 5})
+) == (__mmask16)0xd595);
+
+TEST_CONSTEXPR(_mm_cmplt_epi8_mask(
+ ((__m128i)(__v16qi){1, 5, 3, 7, 2, 8, 4, 6, 9, 5, 3, 11, 2, 6, 15, 8}),
+ ((__m128i)(__v16qi){2, 4, 6, 8, 3, 5, 7, 9, 4, 6, 8, 10, 5, 7, 9, 11})
+) == (__mmask16)0xb6dd);
+
+TEST_CONSTEXPR(_mm_cmple_epi8_mask(
+ ((__m128i)(__v16qi){1, 3, 5, 7, 2, 6, 6, 8, 1, 3, 9, 7, 2, 4, 6, 10}),
+ ((__m128i)(__v16qi){2, 3, 4, 7, 3, 4, 5, 8, 2, 3, 4, 7, 3, 4, 5, 8})
+) == (__mmask16)0x3b9b);
+
__mmask16 test_mm_mask_cmp_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
// CHECK-LABEL: test_mm_mask_cmp_epi8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
@@ -2894,6 +2909,12 @@ __mmask16 test_mm_test_epi8_mask(__m128i __A, __m128i __B) {
return _mm_test_epi8_mask(__A, __B);
}
+TEST_CONSTEXPR(_mm_test_epi8_mask(
+ (__m128i)(__v16qi){1, 2, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ (__m128i)(__v16qi){1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+)
+== (__mmask16)0xfffb);
+
__mmask16 test_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_mask_test_epi8_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
@@ -2901,6 +2922,12 @@ __mmask16 test_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return _mm_mask_test_epi8_mask(__U, __A, __B);
}
+TEST_CONSTEXPR(_mm_mask_test_epi8_mask(
+ 0xFFFF,
+ (__m128i)(__v16qi){1, 2, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ (__m128i)(__v16qi){1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+)
+== (__mmask16)0xfffb);
__mmask32 test_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: test_mm256_test_epi8_mask
@@ -2908,6 +2935,11 @@ __mmask32 test_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
// CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
return _mm256_test_epi8_mask(__A, __B);
}
+TEST_CONSTEXPR(_mm256_test_epi8_mask(
+ (__m256i)(__v32qi){1, 2, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ (__m256i)(__v32qi){1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+)
+== (__mmask32)0xfffbfffb);
__mmask32 test_mm256_mask_test_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: test_mm256_mask_test_epi8_mask
@@ -2954,6 +2986,12 @@ __mmask16 test_mm_testn_epi8_mask(__m128i __A, __m128i __B) {
return _mm_testn_epi8_mask(__A, __B);
}
+TEST_CONSTEXPR(_mm_testn_epi8_mask(
+ (__m128i)(__v16qi){1, 2, 77, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1, 16, 16},
+ (__m128i)(__v16qi){2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15}
+)
+== (__mmask16)0xe001);
+
__mmask16 test_mm_mask_testn_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_mask_testn_epi8_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
diff --git a/clang/test/CodeGen/lto-newpm-pipeline.c b/clang/test/CodeGen/lto-newpm-pipeline.c
index ea9784a..dceaaf1 100644
--- a/clang/test/CodeGen/lto-newpm-pipeline.c
+++ b/clang/test/CodeGen/lto-newpm-pipeline.c
@@ -32,10 +32,12 @@
// CHECK-FULL-O0-NEXT: Running pass: AlwaysInlinerPass
// CHECK-FULL-O0-NEXT: Running analysis: ProfileSummaryAnalysis
// CHECK-FULL-O0-NEXT: Running pass: CoroConditionalWrapper
+// CHECK-FULL-O0-NEXT: Running pass: AllocTokenPass
+// CHECK-FULL-O0-NEXT: Running analysis: OptimizationRemarkEmitterAnalysis
+// CHECK-FULL-O0-NEXT: Running analysis: TargetLibraryAnalysis
// CHECK-FULL-O0-NEXT: Running pass: CanonicalizeAliasesPass
// CHECK-FULL-O0-NEXT: Running pass: NameAnonGlobalPass
// CHECK-FULL-O0-NEXT: Running pass: AnnotationRemarksPass
-// CHECK-FULL-O0-NEXT: Running analysis: TargetLibraryAnalysis
// CHECK-FULL-O0-NEXT: Running pass: VerifierPass
// CHECK-FULL-O0-NEXT: Running pass: BitcodeWriterPass
@@ -46,10 +48,12 @@
// CHECK-THIN-O0-NEXT: Running pass: AlwaysInlinerPass
// CHECK-THIN-O0-NEXT: Running analysis: ProfileSummaryAnalysis
// CHECK-THIN-O0-NEXT: Running pass: CoroConditionalWrapper
+// CHECK-THIN-O0-NEXT: Running pass: AllocTokenPass
+// CHECK-THIN-O0-NEXT: Running analysis: OptimizationRemarkEmitterAnalysis
+// CHECK-THIN-O0-NEXT: Running analysis: TargetLibraryAnalysis
// CHECK-THIN-O0-NEXT: Running pass: CanonicalizeAliasesPass
// CHECK-THIN-O0-NEXT: Running pass: NameAnonGlobalPass
// CHECK-THIN-O0-NEXT: Running pass: AnnotationRemarksPass
-// CHECK-THIN-O0-NEXT: Running analysis: TargetLibraryAnalysis
// CHECK-THIN-O0-NEXT: Running pass: VerifierPass
// CHECK-THIN-O0-NEXT: Running pass: ThinLTOBitcodeWriterPass
diff --git a/clang/test/CodeGenCXX/alloc-token-builtin.cpp b/clang/test/CodeGenCXX/alloc-token-builtin.cpp
new file mode 100644
index 0000000..adadf7b
--- /dev/null
+++ b/clang/test/CodeGenCXX/alloc-token-builtin.cpp
@@ -0,0 +1,97 @@
+// To test IR generation of the builtin without evaluating the LLVM intrinsic,
+// we set the mode to a stateful mode, which prohibits constant evaluation.
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -Werror -std=c++20 -emit-llvm -falloc-token-mode=random -disable-llvm-passes %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-CODEGEN
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -Werror -std=c++20 -emit-llvm -falloc-token-max=2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LOWER
+
+extern "C" void *my_malloc(unsigned long, unsigned long);
+
+struct NoPtr {
+ int x;
+ long y;
+};
+
+struct WithPtr {
+ int a;
+ char *buf;
+};
+
+int unevaluated_fn();
+
+// CHECK-LABEL: @_Z16test_builtin_intv(
+// CHECK-CODEGEN: call i64 @llvm.alloc.token.id.i64(metadata ![[META_INT:[0-9]+]])
+// CHECK-LOWER: ret i64 0
+unsigned long test_builtin_int() {
+ return __builtin_infer_alloc_token(sizeof(1));
+}
+
+// CHECK-LABEL: @_Z16test_builtin_ptrv(
+// CHECK-CODEGEN: call i64 @llvm.alloc.token.id.i64(metadata ![[META_PTR:[0-9]+]])
+// CHECK-LOWER: ret i64 1
+unsigned long test_builtin_ptr() {
+ return __builtin_infer_alloc_token(sizeof(int *));
+}
+
+// CHECK-LABEL: @_Z25test_builtin_struct_noptrv(
+// CHECK-CODEGEN: call i64 @llvm.alloc.token.id.i64(metadata ![[META_NOPTR:[0-9]+]])
+// CHECK-LOWER: ret i64 0
+unsigned long test_builtin_struct_noptr() {
+ return __builtin_infer_alloc_token(sizeof(NoPtr));
+}
+
+// CHECK-LABEL: @_Z25test_builtin_struct_w_ptrv(
+// CHECK-CODEGEN: call i64 @llvm.alloc.token.id.i64(metadata ![[META_WITHPTR:[0-9]+]])
+// CHECK-LOWER: ret i64 1
+unsigned long test_builtin_struct_w_ptr() {
+ return __builtin_infer_alloc_token(sizeof(WithPtr), 123);
+}
+
+// CHECK-LABEL: @_Z24test_builtin_unevaluatedv(
+// CHECK-NOT: call{{.*}}unevaluated_fn
+// CHECK-CODEGEN: call i64 @llvm.alloc.token.id.i64(metadata ![[META_INT:[0-9]+]])
+// CHECK-LOWER: ret i64 0
+unsigned long test_builtin_unevaluated() {
+ return __builtin_infer_alloc_token(sizeof(int) * unevaluated_fn());
+}
+
+// CHECK-LABEL: @_Z36test_builtin_unsequenced_unevaluatedi(
+// CHECK: add nsw
+// CHECK-NOT: add nsw
+// CHECK-CODEGEN: %[[REG:[0-9]+]] = call i64 @llvm.alloc.token.id.i64(metadata ![[META_UNKNOWN:[0-9]+]])
+// CHECK-CODEGEN: call{{.*}}@my_malloc({{.*}}, i64 noundef %[[REG]])
+// CHECK-LOWER: call{{.*}}@my_malloc({{.*}}, i64 noundef 0)
+void test_builtin_unsequenced_unevaluated(int x) {
+ my_malloc(++x, __builtin_infer_alloc_token(++x));
+}
+
+// CHECK-LABEL: @_Z20test_builtin_unknownv(
+// CHECK-CODEGEN: call i64 @llvm.alloc.token.id.i64(metadata ![[META_UNKNOWN:[0-9]+]])
+// CHECK-LOWER: ret i64 0
+unsigned long test_builtin_unknown() {
+ return __builtin_infer_alloc_token(4096);
+}
+
+// Test template instantiation.
+template <typename T>
+constexpr unsigned long get_token() {
+ return __builtin_infer_alloc_token(sizeof(T));
+}
+
+// CHECK-LABEL: @_Z13get_token_intv()
+// CHECK-CODEGEN: call i64 @llvm.alloc.token.id.i64(metadata ![[META_INT]])
+// CHECK-LOWER: ret i64 0
+unsigned long get_token_int() {
+ return get_token<int>();
+}
+
+// CHECK-LABEL: @_Z13get_token_ptrv()
+// CHECK-CODEGEN: call i64 @llvm.alloc.token.id.i64(metadata ![[META_PTR]])
+// CHECK-LOWER: ret i64 1
+unsigned long get_token_ptr() {
+ return get_token<int *>();
+}
+
+// CHECK-CODEGEN: ![[META_INT]] = !{!"int", i1 false}
+// CHECK-CODEGEN: ![[META_PTR]] = !{!"int *", i1 true}
+// CHECK-CODEGEN: ![[META_NOPTR]] = !{!"NoPtr", i1 false}
+// CHECK-CODEGEN: ![[META_WITHPTR]] = !{!"WithPtr", i1 true}
+// CHECK-CODEGEN: ![[META_UNKNOWN]] = !{}
diff --git a/clang/test/Driver/sycl.c b/clang/test/Driver/sycl.c
index 2a672cc..5c210c8 100644
--- a/clang/test/Driver/sycl.c
+++ b/clang/test/Driver/sycl.c
@@ -25,3 +25,8 @@
// RUN: %clang_cl -### -fsycl -- %s 2>&1 | FileCheck %s --check-prefix=DEFAULT
// DEFAULT: "-sycl-std=2020"
+
+// RUN: %clang -### -fsycl -sycl-std=2017 --no-offloadlib -- %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OFFLOADLIB
+// RUN: %clangxx -### -fsycl -sycl-std=2017 --no-offloadlib -- %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OFFLOADLIB
+// RUN: %clang_cl -### -fsycl -sycl-std=2017 --no-offloadlib -- %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OFFLOADLIB
+// CHECK-NO-OFFLOADLIB-NOT: warning: unknown argument ignored in clang-cl: '--no-offloadlib'
diff --git a/clang/test/SemaTemplate/concepts.cpp b/clang/test/SemaTemplate/concepts.cpp
index becf546..c90af41 100644
--- a/clang/test/SemaTemplate/concepts.cpp
+++ b/clang/test/SemaTemplate/concepts.cpp
@@ -1632,3 +1632,29 @@ void fn3() {
}
}
+
+namespace GH165238 {
+
+namespace std {
+template <typename, typename _Tp>
+concept output_iterator = requires(_Tp __t) { __t; };
+template <typename _Out> struct basic_format_context {
+ static_assert(output_iterator<_Out, int>);
+ using char_type = _Out;
+};
+template <typename> class basic_format_parse_context;
+template <typename, typename _Context, typename _Formatter,
+ typename = basic_format_parse_context<typename _Context::char_type>>
+concept __parsable_with = requires(_Formatter __f) { __f; };
+template <typename _Tp, typename _CharT,
+ typename _Context = basic_format_context<_CharT>>
+concept __formattable_impl = __parsable_with<_Tp, _Context, _Context>;
+template <typename _Tp, typename _CharT>
+concept formattable = __formattable_impl<_Tp, _CharT>;
+} // namespace std
+struct {
+ void operator()(std::formattable<char> auto);
+} call;
+void foo() { call(""); }
+
+}