aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ASTContext.cpp4
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp145
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp9
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp187
-rw-r--r--clang/lib/AST/ByteCode/Opcodes.td10
-rw-r--r--clang/lib/AST/ExprConstant.cpp180
-rw-r--r--clang/lib/AST/StmtOpenACC.cpp76
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp89
-rw-r--r--clang/lib/Basic/FileManager.cpp7
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp20
-rw-r--r--clang/lib/Basic/Targets/AArch64.h13
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp4
-rw-r--r--clang/lib/Basic/Targets/Hexagon.cpp6
-rw-r--r--clang/lib/Basic/Targets/OSTargets.cpp1
-rw-r--r--clang/lib/Basic/Targets/OSTargets.h6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp34
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp79
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp32
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp35
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h9
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp25
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp109
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp9
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp3
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.cpp100
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.h34
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp15
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.h4
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp2
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/RISCV.cpp2
-rw-r--r--clang/lib/CodeGen/TargetInfo.h1
-rw-r--r--clang/lib/Driver/ToolChain.cpp1
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.cpp21
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.h2
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp59
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp29
-rw-r--r--clang/lib/Driver/ToolChains/Fuchsia.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/Linux.cpp96
-rw-r--r--clang/lib/Driver/ToolChains/Linux.h7
-rw-r--r--clang/lib/Driver/ToolChains/PS4CPU.cpp11
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp22
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp7
-rw-r--r--clang/lib/Headers/CMakeLists.txt3
-rw-r--r--clang/lib/Headers/__float_float.h176
-rw-r--r--clang/lib/Headers/__float_header_macro.h12
-rw-r--r--clang/lib/Headers/__float_infinity_nan.h20
-rw-r--r--clang/lib/Headers/avx2intrin.h21
-rw-r--r--clang/lib/Headers/float.h180
-rw-r--r--clang/lib/Headers/module.modulemap18
-rw-r--r--clang/lib/Headers/sifive_vector.h56
-rw-r--r--clang/lib/Headers/tmmintrin.h52
-rw-r--r--clang/lib/Lex/HeaderSearch.cpp6
-rw-r--r--clang/lib/Parse/ParseHLSL.cpp6
-rw-r--r--clang/lib/Sema/CheckExprLifetime.cpp1
-rw-r--r--clang/lib/Sema/SemaChecking.cpp52
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp144
-rw-r--r--clang/lib/Sema/SemaInit.cpp87
-rw-r--r--clang/lib/Sema/SemaObjCProperty.cpp2
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp1
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp4
-rw-r--r--clang/lib/Sema/SemaOverload.cpp11
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp74
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefLambdaCapturesChecker.cpp107
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp27
-rw-r--r--clang/lib/Support/RISCVVIntrinsicUtils.cpp5
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp11
70 files changed, 1950 insertions, 653 deletions
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 32c8f62..687cd46 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -1648,6 +1648,9 @@ ASTContext::findPointerAuthContent(QualType T) const {
if (!RD)
return PointerAuthContent::None;
+ if (RD->isInvalidDecl())
+ return PointerAuthContent::None;
+
if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(RD);
Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
return Existing->second;
@@ -3517,7 +3520,6 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
assert(!T->isDependentType() &&
"cannot compute type discriminator of a dependent type");
-
SmallString<256> Str;
llvm::raw_svector_ostream Out(Str);
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index f15b3c1..f4ddbf4 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -1842,7 +1842,6 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
const Expr *Init, PrimType T,
bool Activate = false) -> bool {
InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init));
- InitLinkScope<Emitter> ILS(this, InitLink::Field(FieldToInit->Offset));
if (!this->visit(Init))
return false;
@@ -3274,34 +3273,43 @@ bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
}
if (T->isArrayType()) {
- const ConstantArrayType *CAT =
- Ctx.getASTContext().getAsConstantArrayType(E->getType());
- if (!CAT)
- return false;
-
- size_t NumElems = CAT->getZExtSize();
const Function *Func = getFunction(E->getConstructor());
if (!Func)
return false;
- // FIXME(perf): We're calling the constructor once per array element here,
- // in the old intepreter we had a special-case for trivial constructors.
- for (size_t I = 0; I != NumElems; ++I) {
- if (!this->emitConstUint64(I, E))
- return false;
- if (!this->emitArrayElemPtrUint64(E))
- return false;
+ if (!this->emitDupPtr(E))
+ return false;
- // Constructor arguments.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
- return false;
+ std::function<bool(QualType)> initArrayDimension;
+ initArrayDimension = [&](QualType T) -> bool {
+ if (!T->isArrayType()) {
+ // Constructor arguments.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ return this->emitCall(Func, 0, E);
}
- if (!this->emitCall(Func, 0, E))
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(T);
+ if (!CAT)
return false;
- }
- return true;
+ QualType ElemTy = CAT->getElementType();
+ unsigned NumElems = CAT->getZExtSize();
+ for (size_t I = 0; I != NumElems; ++I) {
+ if (!this->emitConstUint64(I, E))
+ return false;
+ if (!this->emitArrayElemPtrUint64(E))
+ return false;
+ if (!initArrayDimension(ElemTy))
+ return false;
+ }
+ return this->emitPopPtr(E);
+ };
+
+ return initArrayDimension(E->getType());
}
return false;
@@ -3600,8 +3608,6 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
if (PlacementDest) {
if (!this->visit(PlacementDest))
return false;
- if (!this->emitStartLifetime(E))
- return false;
if (!this->emitGetLocal(SizeT, ArrayLen, E))
return false;
if (!this->emitCheckNewTypeMismatchArray(SizeT, E, E))
@@ -3741,10 +3747,9 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
if (PlacementDest) {
if (!this->visit(PlacementDest))
return false;
- if (!this->emitStartLifetime(E))
- return false;
if (!this->emitCheckNewTypeMismatch(E, E))
return false;
+
} else {
// Allocate just one element.
if (!this->emitAlloc(Desc, E))
@@ -5385,55 +5390,57 @@ bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
// instance pointer of the current function frame, but e.g. to the declaration
// currently being initialized. Here we emit the necessary instruction(s) for
// this scenario.
- if (!InitStackActive)
+ if (!InitStackActive || InitStack.empty())
return this->emitThis(E);
- if (!InitStack.empty()) {
- // If our init stack is, for example:
- // 0 Stack: 3 (decl)
- // 1 Stack: 6 (init list)
- // 2 Stack: 1 (field)
- // 3 Stack: 6 (init list)
- // 4 Stack: 1 (field)
- //
- // We want to find the LAST element in it that's an init list,
- // which is marked with the K_InitList marker. The index right
- // before that points to an init list. We need to find the
- // elements before the K_InitList element that point to a base
- // (e.g. a decl or This), optionally followed by field, elem, etc.
- // In the example above, we want to emit elements [0..2].
- unsigned StartIndex = 0;
- unsigned EndIndex = 0;
- // Find the init list.
- for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
- InitStack[StartIndex].Kind == InitLink::K_This) {
- EndIndex = StartIndex;
- --StartIndex;
- break;
- }
+ // If our init stack is, for example:
+ // 0 Stack: 3 (decl)
+ // 1 Stack: 6 (init list)
+ // 2 Stack: 1 (field)
+ // 3 Stack: 6 (init list)
+ // 4 Stack: 1 (field)
+ //
+ // We want to find the LAST element in it that's an init list,
+ // which is marked with the K_InitList marker. The index right
+ // before that points to an init list. We need to find the
+ // elements before the K_InitList element that point to a base
+ // (e.g. a decl or This), optionally followed by field, elem, etc.
+ // In the example above, we want to emit elements [0..2].
+ unsigned StartIndex = 0;
+ unsigned EndIndex = 0;
+ // Find the init list.
+ for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
+ InitStack[StartIndex].Kind == InitLink::K_This) {
+ EndIndex = StartIndex;
+ --StartIndex;
+ break;
}
+ }
- // Walk backwards to find the base.
- for (; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList)
- continue;
+ // Walk backwards to find the base.
+ for (; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_InitList)
+ continue;
- if (InitStack[StartIndex].Kind != InitLink::K_Field &&
- InitStack[StartIndex].Kind != InitLink::K_Elem)
- break;
- }
+ if (InitStack[StartIndex].Kind != InitLink::K_Field &&
+ InitStack[StartIndex].Kind != InitLink::K_Elem)
+ break;
+ }
- // Emit the instructions.
- for (unsigned I = StartIndex; I != EndIndex; ++I) {
- if (InitStack[I].Kind == InitLink::K_InitList)
- continue;
- if (!InitStack[I].template emit<Emitter>(this, E))
- return false;
- }
- return true;
+ if (StartIndex == 0 && EndIndex == 0)
+ EndIndex = InitStack.size() - 1;
+
+ assert(StartIndex < EndIndex);
+
+ // Emit the instructions.
+ for (unsigned I = StartIndex; I != (EndIndex + 1); ++I) {
+ if (InitStack[I].Kind == InitLink::K_InitList)
+ continue;
+ if (!InitStack[I].template emit<Emitter>(this, E))
+ return false;
}
- return this->emitThis(E);
+ return true;
}
template <class Emitter> bool Compiler<Emitter>::visitStmt(const Stmt *S) {
@@ -6295,6 +6302,10 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) {
}
assert(NestedField);
+ unsigned FirstLinkOffset =
+ R->getField(cast<FieldDecl>(IFD->chain()[0]))->Offset;
+ InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(InitExpr));
+ InitLinkScope<Emitter> ILS(this, InitLink::Field(FirstLinkOffset));
if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr,
IsUnion))
return false;
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index a72282c..169a9a2 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -1903,12 +1903,19 @@ bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E,
if (Ptr.inUnion() && Ptr.getBase().getRecord()->isUnion())
Ptr.activate();
+ if (Ptr.isZero()) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
+ << AK_Construct;
+ return false;
+ }
+
if (!Ptr.isBlockPointer())
return false;
+ startLifetimeRecurse(Ptr);
+
// Similar to CheckStore(), but with the additional CheckTemporary() call and
// the AccessKinds are different.
-
if (!Ptr.block()->isAccessible()) {
if (!CheckExtern(S, OpPC, Ptr))
return false;
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 2d5ad4a..d0b97a1 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -12,12 +12,14 @@
#include "InterpHelpers.h"
#include "PrimType.h"
#include "Program.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/AllocToken.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SipHash.h"
@@ -1307,6 +1309,45 @@ interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call) {
+ const ASTContext &ASTCtx = S.getASTContext();
+ uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
+ auto Mode =
+ ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
+ uint64_t MaxTokens =
+ ASTCtx.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth));
+
+ // We do not read any of the arguments; discard them.
+ for (int I = Call->getNumArgs() - 1; I >= 0; --I)
+ discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
+
+ // Note: Type inference from a surrounding cast is not supported in
+ // constexpr evaluation.
+ QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
+ if (AllocType.isNull()) {
+ S.CCEDiag(Call,
+ diag::note_constexpr_infer_alloc_token_type_inference_failed);
+ return false;
+ }
+
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
+ if (!ATMD) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
+ return false;
+ }
+
+ auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
+ if (!MaybeToken) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
+ return false;
+ }
+
+ pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
+ return true;
+}
+
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
@@ -3279,6 +3320,65 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_x86_byteshift(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID,
+ llvm::function_ref<APInt(const Pointer &, unsigned Lane, unsigned I,
+ unsigned Shift)>
+ Fn) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Shift = ImmAPS.getZExtValue() & 0xff;
+
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned NumElems = Src.getNumElems();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ for (unsigned Lane = 0; Lane != NumElems; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ unsigned Base = Lane + I;
+ APSInt Result = APSInt(Fn(Src, Lane, I, Shift));
+ INT_TYPE_SWITCH_NO_BOOL(ElemT,
+ { Dst.elem<T>(Base) = static_cast<T>(Result); });
+ }
+ }
+
+ Dst.initializeAllElements();
+
+ return true;
+}
+
+static bool interp__builtin_ia32_shuffle_generic(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)>
+ GetSourceIndex) {
+
+ assert(Call->getNumArgs() == 3);
+ unsigned ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
+
+ QualType Arg0Type = Call->getArg(0)->getType();
+ const auto *VecT = Arg0Type->castAs<VectorType>();
+ PrimType ElemT = *S.getContext().classify(VecT->getElementType());
+ unsigned NumElems = VecT->getNumElements();
+
+ const Pointer &B = S.Stk.pop<Pointer>();
+ const Pointer &A = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
+ auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
+ const Pointer &Src = (SrcVecIdx == 0) ? A : B;
+ TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
+ }
+ Dst.initializeAllElements();
+
+ return true;
+}
+
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
uint32_t BuiltinID) {
if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
@@ -3694,6 +3794,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_ptrauth_string_discriminator:
return interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Call);
+ case Builtin::BI__builtin_infer_alloc_token:
+ return interp__builtin_infer_alloc_token(S, OpPC, Frame, Call);
+
case Builtin::BI__noop:
pushInteger(S, 0, Call->getType());
return true;
@@ -3809,6 +3912,21 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
}
+ case X86::BI__builtin_ia32_psignb128:
+ case X86::BI__builtin_ia32_psignb256:
+ case X86::BI__builtin_ia32_psignw128:
+ case X86::BI__builtin_ia32_psignw256:
+ case X86::BI__builtin_ia32_psignd128:
+ case X86::BI__builtin_ia32_psignd256:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
+ if (BElem.isZero())
+ return APInt::getZero(AElem.getBitWidth());
+ if (BElem.isNegative())
+ return -AElem;
+ return AElem;
+ });
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
@@ -4191,6 +4309,42 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_selectpd_512:
return interp__builtin_select(S, OpPC, Call);
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 4;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 2;
+ unsigned IndexMask = 0x3;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index};
+ });
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 2;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 1;
+ unsigned IndexMask = 0x1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index};
+ });
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512:
@@ -4331,6 +4485,39 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ // These SLLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I < Shift) {
+ return APInt(8, 0);
+ }
+ return APInt(8, Src.elem<uint8_t>(Lane + I - Shift));
+ });
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ // These SRLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I + Shift < 16) {
+ return APInt(8, Src.elem<uint8_t>(Lane + I + Shift));
+ }
+
+ return APInt(8, 0);
+ });
+
default:
S.FFDiag(S.Current->getLocation(OpPC),
diag::note_invalid_subexpr_in_const_expr)
diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td
index 406feb5..1c17ad9e 100644
--- a/clang/lib/AST/ByteCode/Opcodes.td
+++ b/clang/lib/AST/ByteCode/Opcodes.td
@@ -866,19 +866,13 @@ def Free : Opcode {
let Args = [ArgBool, ArgBool];
}
-def CheckNewTypeMismatch : Opcode {
- let Args = [ArgExpr];
-}
-
-def InvalidNewDeleteExpr : Opcode {
- let Args = [ArgExpr];
-}
-
+def CheckNewTypeMismatch : Opcode { let Args = [ArgExpr]; }
def CheckNewTypeMismatchArray : Opcode {
let Types = [IntegerTypeClass];
let Args = [ArgExpr];
let HasGroup = 1;
}
+def InvalidNewDeleteExpr : Opcode { let Args = [ArgExpr]; }
def IsConstantContext: Opcode;
def CheckAllocations : Opcode;
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 00aaaab..29ee089 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -44,6 +44,7 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/CurrentSourceLocExprScope.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/OptionalDiagnostic.h"
#include "clang/AST/RecordLayout.h"
@@ -11618,6 +11619,39 @@ static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result,
return true;
}
+static bool evalShuffleGeneric(
+ EvalInfo &Info, const CallExpr *Call, APValue &Out,
+ llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)>
+ GetSourceIndex) {
+
+ const auto *VT = Call->getType()->getAs<VectorType>();
+ if (!VT)
+ return false;
+
+ APSInt MaskImm;
+ if (!EvaluateInteger(Call->getArg(2), MaskImm, Info))
+ return false;
+ unsigned ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
+
+ APValue A, B;
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), B))
+ return false;
+
+ unsigned NumElts = VT->getNumElements();
+ SmallVector<APValue, 16> ResultElements;
+ ResultElements.reserve(NumElts);
+
+ for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) {
+ auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
+ const APValue &Src = (SrcVecIdx == 0) ? A : B;
+ ResultElements.push_back(Src.getVectorElt(SrcIdx));
+ }
+
+ Out = APValue(ResultElements.data(), ResultElements.size());
+ return true;
+}
+
static bool evalPshufbBuiltin(EvalInfo &Info, const CallExpr *Call,
APValue &Out) {
APValue SrcVec, ControlVec;
@@ -12312,6 +12346,20 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+ case X86::BI__builtin_ia32_psignb128:
+ case X86::BI__builtin_ia32_psignb256:
+ case X86::BI__builtin_ia32_psignw128:
+ case X86::BI__builtin_ia32_psignw256:
+ case X86::BI__builtin_ia32_psignd128:
+ case X86::BI__builtin_ia32_psignd256:
+ return EvaluateBinOpExpr([](const APInt &AElem, const APInt &BElem) {
+ if (BElem.isZero())
+ return APInt::getZero(AElem.getBitWidth());
+ if (BElem.isNegative())
+ return -AElem;
+ return AElem;
+ });
+
case X86::BI__builtin_ia32_blendvpd:
case X86::BI__builtin_ia32_blendvpd256:
case X86::BI__builtin_ia32_blendvps:
@@ -12383,7 +12431,56 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
-
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, unsigned> {
+ constexpr unsigned LaneBits = 128u;
+ unsigned NumElemPerLane = LaneBits / 32;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 2;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return {SrcIdx, LaneOffset + Index};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, unsigned> {
+ constexpr unsigned LaneBits = 128u;
+ unsigned NumElemPerLane = LaneBits / 64;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 1;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return {SrcIdx, LaneOffset + Index};
+ }))
+ return false;
+ return Success(R, E);
+ }
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512: {
@@ -12891,6 +12988,66 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(Elems.data(), NumElems), E);
}
+
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift: {
+ assert(E->getNumArgs() == 2);
+
+ APValue Src;
+ APSInt Imm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info))
+ return false;
+
+ unsigned VecLen = Src.getVectorLength();
+ unsigned Shift = Imm.getZExtValue() & 0xff;
+
+ SmallVector<APValue> ResultElements;
+ for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ if (I < Shift) {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ } else {
+ ResultElements.push_back(Src.getVectorElt(Lane + I - Shift));
+ }
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift: {
+ assert(E->getNumArgs() == 2);
+
+ APValue Src;
+ APSInt Imm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info))
+ return false;
+
+ unsigned VecLen = Src.getVectorLength();
+ unsigned Shift = Imm.getZExtValue() & 0xff;
+
+ SmallVector<APValue> ResultElements;
+ for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ if (I + Shift < 16) {
+ ResultElements.push_back(Src.getVectorElt(Lane + I + Shift));
+ } else {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ }
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
}
}
@@ -14649,6 +14806,27 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Result, E);
}
+ case Builtin::BI__builtin_infer_alloc_token: {
+ // If we fail to infer a type, this fails to be a constant expression; this
+ // can be checked with __builtin_constant_p(...).
+ QualType AllocType = infer_alloc::inferPossibleType(E, Info.Ctx, nullptr);
+ if (AllocType.isNull())
+ return Error(
+ E, diag::note_constexpr_infer_alloc_token_type_inference_failed);
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, Info.Ctx);
+ if (!ATMD)
+ return Error(E, diag::note_constexpr_infer_alloc_token_no_metadata);
+ auto Mode =
+ Info.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
+ uint64_t BitWidth = Info.Ctx.getTypeSize(Info.Ctx.getSizeType());
+ uint64_t MaxTokens =
+ Info.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth));
+ auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
+ if (!MaybeToken)
+ return Error(E, diag::note_constexpr_infer_alloc_token_stateful_mode);
+ return Success(llvm::APInt(BitWidth, *MaybeToken), E);
+ }
+
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
case Builtin::BI__builtin_ffsll: {
diff --git a/clang/lib/AST/StmtOpenACC.cpp b/clang/lib/AST/StmtOpenACC.cpp
index 2b56c1e..39dfa19 100644
--- a/clang/lib/AST/StmtOpenACC.cpp
+++ b/clang/lib/AST/StmtOpenACC.cpp
@@ -324,6 +324,32 @@ OpenACCAtomicConstruct *OpenACCAtomicConstruct::Create(
return Inst;
}
+static std::pair<const Expr *, const Expr *> getBinaryOpArgs(const Expr *Op) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(Op)) {
+ assert(BO->isAssignmentOp());
+ return {BO->getLHS(), BO->getRHS()};
+ }
+
+ const auto *OO = cast<CXXOperatorCallExpr>(Op);
+ assert(OO->isAssignmentOp());
+ return {OO->getArg(0), OO->getArg(1)};
+}
+
+static std::pair<bool, const Expr *> getUnaryOpArgs(const Expr *Op) {
+ if (const auto *UO = dyn_cast<UnaryOperator>(Op))
+ return {true, UO->getSubExpr()};
+
+ if (const auto *OpCall = dyn_cast<CXXOperatorCallExpr>(Op)) {
+ // Post-inc/dec have a second unused argument to differentiate it, so we
+ // accept -- or ++ as unary, or any operator call with only 1 arg.
+ if (OpCall->getNumArgs() == 1 || OpCall->getOperator() != OO_PlusPlus ||
+ OpCall->getOperator() != OO_MinusMinus)
+ return {true, OpCall->getArg(0)};
+ }
+
+ return {false, nullptr};
+}
+
const OpenACCAtomicConstruct::StmtInfo
OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
// This ends up being a vastly simplified version of SemaOpenACCAtomic, since
@@ -331,29 +357,51 @@ OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
// asserts to ensure we don't get off into the weeds.
assert(getAssociatedStmt() && "invalid associated stmt?");
+ const Expr *AssocStmt = cast<const Expr>(getAssociatedStmt());
switch (AtomicKind) {
- case OpenACCAtomicKind::None:
- case OpenACCAtomicKind::Write:
- case OpenACCAtomicKind::Update:
case OpenACCAtomicKind::Capture:
- assert(false && "Only 'read' has been implemented here");
+ assert(false && "Only 'read'/'write'/'update' have been implemented here");
return {};
case OpenACCAtomicKind::Read: {
// Read only supports the format 'v = x'; where both sides are a scalar
// expression. This can come in 2 forms; BinaryOperator or
// CXXOperatorCallExpr (rarely).
- const Expr *AssignExpr = cast<const Expr>(getAssociatedStmt());
- if (const auto *BO = dyn_cast<BinaryOperator>(AssignExpr)) {
- assert(BO->getOpcode() == BO_Assign);
- return {BO->getLHS()->IgnoreImpCasts(), BO->getRHS()->IgnoreImpCasts()};
- }
-
- const auto *OO = cast<CXXOperatorCallExpr>(AssignExpr);
- assert(OO->getOperator() == OO_Equal);
-
- return {OO->getArg(0)->IgnoreImpCasts(), OO->getArg(1)->IgnoreImpCasts()};
+ std::pair<const Expr *, const Expr *> BinaryArgs =
+ getBinaryOpArgs(AssocStmt);
+ // We want the L-value for each side, so we ignore implicit casts.
+ return {BinaryArgs.first->IgnoreImpCasts(),
+ BinaryArgs.second->IgnoreImpCasts(), /*expr=*/nullptr};
}
+ case OpenACCAtomicKind::Write: {
+ // Write supports only the format 'x = expr', where the expression is scalar
+ // type, and 'x' is a scalar l value. As above, this can come in 2 forms;
+ // Binary Operator or CXXOperatorCallExpr.
+ std::pair<const Expr *, const Expr *> BinaryArgs =
+ getBinaryOpArgs(AssocStmt);
+ // We want the L-value for ONLY the X side, so we ignore implicit casts. For
+ // the right side (the expr), we emit it as an r-value so we need to
+ // maintain implicit casts.
+ return {/*v=*/nullptr, BinaryArgs.first->IgnoreImpCasts(),
+ BinaryArgs.second};
}
+ case OpenACCAtomicKind::None:
+ case OpenACCAtomicKind::Update: {
+ std::pair<bool, const Expr *> UnaryArgs = getUnaryOpArgs(AssocStmt);
+ if (UnaryArgs.first)
+ return {/*v=*/nullptr, UnaryArgs.second->IgnoreImpCasts(),
+ /*expr=*/nullptr};
+
+ std::pair<const Expr *, const Expr *> BinaryArgs =
+ getBinaryOpArgs(AssocStmt);
+ // For binary args, we just store the RHS as an expression (in the
+ // expression slot), since the codegen just wants the whole thing for a
+ // recipe.
+ return {/*v=*/nullptr, BinaryArgs.first->IgnoreImpCasts(),
+ BinaryArgs.second};
+ }
+ }
+
+ llvm_unreachable("unknown OpenACC atomic kind");
}
OpenACCCacheConstruct *OpenACCCacheConstruct::CreateEmpty(const ASTContext &C,
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
index 598d33a..90551c2 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
@@ -168,6 +168,15 @@ static auto isNotOkStatusCall() {
"::absl::UnimplementedError", "::absl::UnknownError"))));
}
+static auto isPointerComparisonOperatorCall(std::string operator_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return binaryOperator(hasOperatorName(operator_name),
+ hasLHS(hasType(hasCanonicalType(pointerType(
+ pointee(anyOf(statusOrType(), statusType())))))),
+ hasRHS(hasType(hasCanonicalType(pointerType(
+ pointee(anyOf(statusOrType(), statusType())))))));
+}
+
static auto
buildDiagnoseMatchSwitch(const UncheckedStatusOrAccessModelOptions &Options) {
return CFGMatchSwitchBuilder<const Environment,
@@ -438,6 +447,58 @@ static void transferComparisonOperator(const CXXOperatorCallExpr *Expr,
State.Env.setValue(*Expr, *LhsAndRhsVal);
}
+static RecordStorageLocation *getPointeeLocation(const Expr &Expr,
+ Environment &Env) {
+ if (auto *PointerVal = Env.get<PointerValue>(Expr))
+ return dyn_cast<RecordStorageLocation>(&PointerVal->getPointeeLoc());
+ return nullptr;
+}
+
+static BoolValue *evaluatePointerEquality(const Expr *LhsExpr,
+ const Expr *RhsExpr,
+ Environment &Env) {
+ assert(LhsExpr->getType()->isPointerType());
+ assert(RhsExpr->getType()->isPointerType());
+ RecordStorageLocation *LhsStatusLoc = nullptr;
+ RecordStorageLocation *RhsStatusLoc = nullptr;
+ if (isStatusOrType(LhsExpr->getType()->getPointeeType()) &&
+ isStatusOrType(RhsExpr->getType()->getPointeeType())) {
+ auto *LhsStatusOrLoc = getPointeeLocation(*LhsExpr, Env);
+ auto *RhsStatusOrLoc = getPointeeLocation(*RhsExpr, Env);
+ if (LhsStatusOrLoc == nullptr || RhsStatusOrLoc == nullptr)
+ return nullptr;
+ LhsStatusLoc = &locForStatus(*LhsStatusOrLoc);
+ RhsStatusLoc = &locForStatus(*RhsStatusOrLoc);
+ } else if (isStatusType(LhsExpr->getType()->getPointeeType()) &&
+ isStatusType(RhsExpr->getType()->getPointeeType())) {
+ LhsStatusLoc = getPointeeLocation(*LhsExpr, Env);
+ RhsStatusLoc = getPointeeLocation(*RhsExpr, Env);
+ }
+ if (LhsStatusLoc == nullptr || RhsStatusLoc == nullptr)
+ return nullptr;
+ auto &LhsOkVal = valForOk(*LhsStatusLoc, Env);
+ auto &RhsOkVal = valForOk(*RhsStatusLoc, Env);
+ auto &Res = Env.makeAtomicBoolValue();
+ auto &A = Env.arena();
+ Env.assume(A.makeImplies(
+ Res.formula(), A.makeEquals(LhsOkVal.formula(), RhsOkVal.formula())));
+ return &Res;
+}
+
+static void transferPointerComparisonOperator(const BinaryOperator *Expr,
+ LatticeTransferState &State,
+ bool IsNegative) {
+ auto *LhsAndRhsVal =
+ evaluatePointerEquality(Expr->getLHS(), Expr->getRHS(), State.Env);
+ if (LhsAndRhsVal == nullptr)
+ return;
+
+ if (IsNegative)
+ State.Env.setValue(*Expr, State.Env.makeNot(*LhsAndRhsVal));
+ else
+ State.Env.setValue(*Expr, *LhsAndRhsVal);
+}
+
static void transferOkStatusCall(const CallExpr *Expr,
const MatchFinder::MatchResult &,
LatticeTransferState &State) {
@@ -455,6 +516,18 @@ static void transferNotOkStatusCall(const CallExpr *Expr,
State.Env.assume(A.makeNot(OkVal.formula()));
}
+static void transferEmplaceCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ RecordStorageLocation *StatusOrLoc =
+ getImplicitObjectLocation(*Expr, State.Env);
+ if (StatusOrLoc == nullptr)
+ return;
+
+ auto &OkVal = valForOk(locForStatus(*StatusOrLoc), State.Env);
+ State.Env.assume(OkVal.formula());
+}
+
CFGMatchSwitch<LatticeTransferState>
buildTransferMatchSwitch(ASTContext &Ctx,
CFGMatchSwitchBuilder<LatticeTransferState> Builder) {
@@ -482,8 +555,24 @@ buildTransferMatchSwitch(ASTContext &Ctx,
transferComparisonOperator(Expr, State,
/*IsNegative=*/true);
})
+ .CaseOfCFGStmt<BinaryOperator>(
+ isPointerComparisonOperatorCall("=="),
+ [](const BinaryOperator *Expr, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferPointerComparisonOperator(Expr, State,
+ /*IsNegative=*/false);
+ })
+ .CaseOfCFGStmt<BinaryOperator>(
+ isPointerComparisonOperatorCall("!="),
+ [](const BinaryOperator *Expr, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferPointerComparisonOperator(Expr, State,
+ /*IsNegative=*/true);
+ })
.CaseOfCFGStmt<CallExpr>(isOkStatusCall(), transferOkStatusCall)
.CaseOfCFGStmt<CallExpr>(isNotOkStatusCall(), transferNotOkStatusCall)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("emplace"),
+ transferEmplaceCall)
.Build();
}
diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp
index 7481e1e..e744cc0 100644
--- a/clang/lib/Basic/FileManager.cpp
+++ b/clang/lib/Basic/FileManager.cpp
@@ -474,8 +474,9 @@ OptionalFileEntryRef FileManager::getBypassFile(FileEntryRef VF) {
return FileEntryRef(*Insertion.first);
}
-bool FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
- StringRef pathRef(path.data(), path.size());
+bool FileManager::fixupRelativePath(const FileSystemOptions &FileSystemOpts,
+ SmallVectorImpl<char> &Path) {
+ StringRef pathRef(Path.data(), Path.size());
if (FileSystemOpts.WorkingDir.empty()
|| llvm::sys::path::is_absolute(pathRef))
@@ -483,7 +484,7 @@ bool FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
SmallString<128> NewPath(FileSystemOpts.WorkingDir);
llvm::sys::path::append(NewPath, pathRef);
- path = NewPath;
+ Path = NewPath;
return true;
}
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 18641a9..c2d1bc1 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -206,8 +206,7 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
StringRef AArch64TargetInfo::getABI() const { return ABI; }
bool AArch64TargetInfo::setABI(const std::string &Name) {
- if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs" &&
- Name != "pauthtest")
+ if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs")
return false;
ABI = Name;
@@ -221,12 +220,6 @@ bool AArch64TargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
Diags.Report(diag::err_target_unsupported_abi_with_fpu) << ABI;
return false;
}
- if (getTriple().getEnvironment() == llvm::Triple::PAuthTest &&
- getTriple().getOS() != llvm::Triple::Linux) {
- Diags.Report(diag::err_target_unsupported_abi_for_triple)
- << getTriple().getEnvironmentName() << getTriple().getTriple();
- return false;
- }
return true;
}
@@ -398,6 +391,12 @@ void AArch64TargetInfo::getTargetDefinesARMV96A(const LangOptions &Opts,
getTargetDefinesARMV95A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV97A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.7-A does not have a v8.* equivalent, but is a superset of v9.6-A.
+ getTargetDefinesARMV96A(Opts, Builder);
+}
+
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
@@ -714,6 +713,8 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
getTargetDefinesARMV95A(Opts, Builder);
else if (*ArchInfo == llvm::AArch64::ARMV9_6A)
getTargetDefinesARMV96A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_7A)
+ getTargetDefinesARMV97A(Opts, Builder);
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
@@ -1152,6 +1153,9 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
if (Feature == "+v9.6a" &&
ArchInfo->Version < llvm::AArch64::ARMV9_6A.Version)
ArchInfo = &llvm::AArch64::ARMV9_6A;
+ if (Feature == "+v9.7a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_7A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_7A;
if (Feature == "+v8r")
ArchInfo = &llvm::AArch64::ARMV8R;
if (Feature == "+fullfp16") {
diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h
index dfd89be..7d0737b 100644
--- a/clang/lib/Basic/Targets/AArch64.h
+++ b/clang/lib/Basic/Targets/AArch64.h
@@ -135,6 +135,7 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
const llvm::AArch64::ArchInfo *ArchInfo = &llvm::AArch64::ARMV8A;
+protected:
std::string ABI;
public:
@@ -190,6 +191,8 @@ public:
MacroBuilder &Builder) const;
void getTargetDefinesARMV96A(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ void getTargetDefinesARMV97A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -277,6 +280,16 @@ private:
void setDataLayout() override;
};
+template <>
+inline bool
+LinuxTargetInfo<AArch64leTargetInfo>::setABI(const std::string &Name) {
+ if (Name == "pauthtest") {
+ ABI = Name;
+ return true;
+ }
+ return AArch64leTargetInfo::setABI(Name);
+}
+
class LLVM_LIBRARY_VISIBILITY WindowsARM64TargetInfo
: public WindowsTargetInfo<AArch64leTargetInfo> {
const llvm::Triple Triple;
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index d00a3a4..394b50b 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -231,6 +231,8 @@ StringRef ARMTargetInfo::getCPUAttr() const {
return "9_5A";
case llvm::ARM::ArchKind::ARMV9_6A:
return "9_6A";
+ case llvm::ARM::ArchKind::ARMV9_7A:
+ return "9_7A";
case llvm::ARM::ArchKind::ARMV8MBaseline:
return "8M_BASE";
case llvm::ARM::ArchKind::ARMV8MMainline:
@@ -904,6 +906,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::ARM::ArchKind::ARMV9_4A:
case llvm::ARM::ArchKind::ARMV9_5A:
case llvm::ARM::ArchKind::ARMV9_6A:
+ case llvm::ARM::ArchKind::ARMV9_7A:
// Filter __arm_cdp, __arm_ldcl, __arm_stcl in arm_acle.h
FeatureCoprocBF = FEATURE_COPROC_B1 | FEATURE_COPROC_B3;
break;
@@ -1074,6 +1077,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::ARM::ArchKind::ARMV9_4A:
case llvm::ARM::ArchKind::ARMV9_5A:
case llvm::ARM::ArchKind::ARMV9_6A:
+ case llvm::ARM::ArchKind::ARMV9_7A:
getTargetDefinesARMV83A(Opts, Builder);
break;
}
diff --git a/clang/lib/Basic/Targets/Hexagon.cpp b/clang/lib/Basic/Targets/Hexagon.cpp
index cea64f9..d5b413cb 100644
--- a/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/clang/lib/Basic/Targets/Hexagon.cpp
@@ -83,6 +83,9 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
} else if (CPU == "hexagonv79") {
Builder.defineMacro("__HEXAGON_V79__");
Builder.defineMacro("__HEXAGON_ARCH__", "79");
+ } else if (CPU == "hexagonv81") {
+ Builder.defineMacro("__HEXAGON_V81__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "81");
}
if (hasFeature("hvx-length64b")) {
@@ -252,8 +255,7 @@ static constexpr CPUSuffix Suffixes[] = {
{{"hexagonv68"}, {"68"}}, {{"hexagonv69"}, {"69"}},
{{"hexagonv71"}, {"71"}}, {{"hexagonv71t"}, {"71t"}},
{{"hexagonv73"}, {"73"}}, {{"hexagonv75"}, {"75"}},
- {{"hexagonv79"}, {"79"}},
-};
+ {{"hexagonv79"}, {"79"}}, {{"hexagonv81"}, {"81"}}};
std::optional<unsigned> HexagonTargetInfo::getHexagonCPURev(StringRef Name) {
StringRef Arch = Name;
diff --git a/clang/lib/Basic/Targets/OSTargets.cpp b/clang/lib/Basic/Targets/OSTargets.cpp
index e744e84..e99bbd1 100644
--- a/clang/lib/Basic/Targets/OSTargets.cpp
+++ b/clang/lib/Basic/Targets/OSTargets.cpp
@@ -10,6 +10,7 @@
//===----------------------------------------------------------------------===//
#include "OSTargets.h"
+#include "AArch64.h"
#include "clang/Basic/MacroBuilder.h"
#include "llvm/ADT/StringRef.h"
diff --git a/clang/lib/Basic/Targets/OSTargets.h b/clang/lib/Basic/Targets/OSTargets.h
index bd6ffcf..4d81c9a 100644
--- a/clang/lib/Basic/Targets/OSTargets.h
+++ b/clang/lib/Basic/Targets/OSTargets.h
@@ -408,6 +408,12 @@ public:
const char *getStaticInitSectionSpecifier() const override {
return ".text.startup";
}
+
+ // This allows template specializations, see
+ // LinuxTargetInfo<AArch64leTargetInfo>::setABI
+ bool setABI(const std::string &Name) override {
+ return OSTargetInfo<Target>::setABI(Name);
+ }
};
// Managarm Target
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 27c4d11..e35100f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -449,10 +449,36 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
}
case Builtin::BI__builtin_coro_free:
case Builtin::BI__builtin_coro_size: {
- cgm.errorNYI(e->getSourceRange(),
- "BI__builtin_coro_free, BI__builtin_coro_size NYI");
- assert(!cir::MissingFeatures::coroSizeBuiltinCall());
- return getUndefRValue(e->getType());
+ GlobalDecl gd{fd};
+ mlir::Type ty = cgm.getTypes().getFunctionType(
+ cgm.getTypes().arrangeGlobalDeclaration(gd));
+ const auto *nd = cast<NamedDecl>(gd.getDecl());
+ cir::FuncOp fnOp =
+ cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
+ fnOp.setBuiltin(true);
+ return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
+ returnValue);
+ }
+ case Builtin::BI__builtin_prefetch: {
+ auto evaluateOperandAsInt = [&](const Expr *arg) {
+ Expr::EvalResult res;
+ [[maybe_unused]] bool evalSucceed =
+ arg->EvaluateAsInt(res, cgm.getASTContext());
+ assert(evalSucceed && "expression should be able to evaluate as int");
+ return res.Val.getInt().getZExtValue();
+ };
+
+ bool isWrite = false;
+ if (e->getNumArgs() > 1)
+ isWrite = evaluateOperandAsInt(e->getArg(1));
+
+ int locality = 3;
+ if (e->getNumArgs() > 2)
+ locality = evaluateOperandAsInt(e->getArg(2));
+
+ mlir::Value address = emitScalarExpr(e->getArg(0));
+ cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
+ return RValue::get(nullptr);
}
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index c78f9b0..13dc9f3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -124,6 +124,8 @@ public:
virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0;
virtual void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) = 0;
+ virtual void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc) = 0;
+
virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc,
QualType ty) = 0;
@@ -185,6 +187,11 @@ public:
virtual void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
mlir::Value addr) = 0;
+ virtual void emitVirtualObjectDelete(CIRGenFunction &cgf,
+ const CXXDeleteExpr *de, Address ptr,
+ QualType elementType,
+ const CXXDestructorDecl *dtor) = 0;
+
/// Checks if ABI requires extra virtual offset for vtable field.
virtual bool
isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
index c25cce4..8723a6e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CIR/Dialect/IR/CIRTypes.h"
+#include "clang/CIR/MissingFeatures.h"
using namespace clang;
using namespace clang::CIRGen;
@@ -23,6 +24,9 @@ struct clang::CIRGen::CGCoroData {
// Stores the __builtin_coro_id emitted in the function so that we can supply
// it as the first argument to other builtins.
cir::CallOp coroId = nullptr;
+
+ // Stores the result of __builtin_coro_begin call.
+ mlir::Value coroBegin = nullptr;
};
// Defining these here allows to keep CGCoroData private to this file.
@@ -63,6 +67,46 @@ cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc,
nullPtr, nullPtr, nullPtr});
}
+cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) {
+ cir::BoolType boolTy = builder.getBoolTy();
+
+ mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroAlloc);
+
+ cir::FuncOp fnOp;
+ if (!builtin) {
+ fnOp = cgm.createCIRBuiltinFunction(loc, cgm.builtinCoroAlloc,
+ cir::FuncType::get({UInt32Ty}, boolTy),
+ /*fd=*/nullptr);
+ assert(fnOp && "should always succeed");
+ } else {
+ fnOp = cast<cir::FuncOp>(builtin);
+ }
+
+ return builder.createCallOp(
+ loc, fnOp, mlir::ValueRange{curCoro.data->coroId.getResult()});
+}
+
+cir::CallOp
+CIRGenFunction::emitCoroBeginBuiltinCall(mlir::Location loc,
+ mlir::Value coroframeAddr) {
+ mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroBegin);
+
+ cir::FuncOp fnOp;
+ if (!builtin) {
+ fnOp = cgm.createCIRBuiltinFunction(
+ loc, cgm.builtinCoroBegin,
+ cir::FuncType::get({UInt32Ty, VoidPtrTy}, VoidPtrTy),
+ /*fd=*/nullptr);
+ assert(fnOp && "should always succeed");
+ } else {
+ fnOp = cast<cir::FuncOp>(builtin);
+ }
+
+ return builder.createCallOp(
+ loc, fnOp,
+ mlir::ValueRange{curCoro.data->coroId.getResult(), coroframeAddr});
+}
+
mlir::LogicalResult
CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &s) {
mlir::Location openCurlyLoc = getLoc(s.getBeginLoc());
@@ -73,10 +117,39 @@ CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &s) {
cir::CallOp coroId = emitCoroIDBuiltinCall(openCurlyLoc, nullPtrCst);
createCoroData(*this, curCoro, coroId);
- assert(!cir::MissingFeatures::coroAllocBuiltinCall());
-
- assert(!cir::MissingFeatures::coroBeginBuiltinCall());
+ // Backend is allowed to elide memory allocations, to help it, emit
+ // auto mem = coro.alloc() ? 0 : ... allocation code ...;
+ cir::CallOp coroAlloc = emitCoroAllocBuiltinCall(openCurlyLoc);
+
+ // Initialize address of coroutine frame to null
+ CanQualType astVoidPtrTy = cgm.getASTContext().VoidPtrTy;
+ mlir::Type allocaTy = convertTypeForMem(astVoidPtrTy);
+ Address coroFrame =
+ createTempAlloca(allocaTy, getContext().getTypeAlignInChars(astVoidPtrTy),
+ openCurlyLoc, "__coro_frame_addr",
+ /*ArraySize=*/nullptr);
+
+ mlir::Value storeAddr = coroFrame.getPointer();
+ builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr);
+ cir::IfOp::create(
+ builder, openCurlyLoc, coroAlloc.getResult(),
+ /*withElseRegion=*/false,
+ /*thenBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) {
+ builder.CIRBaseBuilderTy::createStore(
+ loc, emitScalarExpr(s.getAllocate()), storeAddr);
+ cir::YieldOp::create(builder, loc);
+ });
+ curCoro.data->coroBegin =
+ emitCoroBeginBuiltinCall(
+ openCurlyLoc,
+ cir::LoadOp::create(builder, openCurlyLoc, allocaTy, storeAddr))
+ .getResult();
+
+ // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
+ if (s.getReturnStmtOnAllocFailure())
+ cgm.errorNYI("handle coroutine return alloc failure");
assert(!cir::MissingFeatures::generateDebugInfo());
+ assert(!cir::MissingFeatures::emitBodyAndFallthrough());
return mlir::success();
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 9df88ad..df6ee56 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -2065,7 +2065,11 @@ mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
// a surrounding cir.scope, make sure the alloca ends up in the surrounding
// scope instead. This is necessary in order to guarantee all SSA values are
// reachable during cleanups.
- assert(!cir::MissingFeatures::tryOp());
+ if (auto tryOp =
+ llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
+ if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
+ entryBlock = &scopeOp.getScopeRegion().front();
+ }
return emitAlloca(name, ty, loc, alignment,
builder.getBestAllocaInsertPoint(entryBlock), arraySize);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index d6d226b..8fe0d9b4 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -362,8 +362,7 @@ public:
cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXTypeidExpr");
}
void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "AggExprEmitter: VisitMaterializeTemporaryExpr");
+ Visit(e->getSubExpr());
}
void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(),
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index fe9e210..7a35382 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -565,8 +565,10 @@ static void emitObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de,
dtor = rd->getDestructor();
if (dtor->isVirtual()) {
- cgf.cgm.errorNYI(de->getSourceRange(),
- "emitObjectDelete: virtual destructor");
+ assert(!cir::MissingFeatures::devirtualizeDestructor());
+ cgf.cgm.getCXXABI().emitVirtualObjectDelete(cgf, de, ptr, elementType,
+ dtor);
+ return;
}
}
}
@@ -801,6 +803,26 @@ void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD,
emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs);
}
+static mlir::Value emitDynamicCastToNull(CIRGenFunction &cgf,
+ mlir::Location loc, QualType destTy) {
+ mlir::Type destCIRTy = cgf.convertType(destTy);
+ assert(mlir::isa<cir::PointerType>(destCIRTy) &&
+ "result of dynamic_cast should be a ptr");
+
+ if (!destTy->isPointerType()) {
+ mlir::Region *currentRegion = cgf.getBuilder().getBlock()->getParent();
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ cgf.cgm.getCXXABI().emitBadCastCall(cgf, loc);
+
+ // The call to bad_cast will terminate the current block. Create a new block
+ // to hold any follow up code.
+ cgf.getBuilder().createBlock(currentRegion, currentRegion->end());
+ }
+
+ return cgf.getBuilder().getNullPtr(destCIRTy, loc);
+}
+
mlir::Value CIRGenFunction::emitDynamicCast(Address thisAddr,
const CXXDynamicCastExpr *dce) {
mlir::Location loc = getLoc(dce->getSourceRange());
@@ -831,10 +853,8 @@ mlir::Value CIRGenFunction::emitDynamicCast(Address thisAddr,
assert(srcRecordTy->isRecordType() && "source type must be a record type!");
assert(!cir::MissingFeatures::emitTypeCheck());
- if (dce->isAlwaysNull()) {
- cgm.errorNYI(dce->getSourceRange(), "emitDynamicCastToNull");
- return {};
- }
+ if (dce->isAlwaysNull())
+ return emitDynamicCastToNull(*this, loc, destTy);
auto destCirTy = mlir::cast<cir::PointerType>(convertType(destTy));
return cgm.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy,
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 7de3dd0..928e5aa 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -922,9 +922,9 @@ public:
}
mlir::Attribute VisitCastExpr(CastExpr *e, QualType destType) {
- if (isa<ExplicitCastExpr>(e))
- cgm.errorNYI(e->getBeginLoc(),
- "ConstExprEmitter::VisitCastExpr explicit cast");
+ if (const auto *ece = dyn_cast<ExplicitCastExpr>(e))
+ cgm.emitExplicitCastExprType(ece,
+ const_cast<CIRGenFunction *>(emitter.cgf));
Expr *subExpr = e->getSubExpr();
@@ -1078,9 +1078,32 @@ public:
mlir::Attribute VisitCXXConstructExpr(CXXConstructExpr *e, QualType ty) {
if (!e->getConstructor()->isTrivial())
- return nullptr;
- cgm.errorNYI(e->getBeginLoc(), "trivial constructor const handling");
- return {};
+ return {};
+
+ // Only default and copy/move constructors can be trivial.
+ if (e->getNumArgs()) {
+ assert(e->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+ assert(e->getConstructor()->isCopyOrMoveConstructor() &&
+ "trivial ctor has argument but isn't a copy/move ctor");
+
+ Expr *arg = e->getArg(0);
+ assert(cgm.getASTContext().hasSameUnqualifiedType(ty, arg->getType()) &&
+ "argument to copy ctor is of wrong type");
+
+ // Look through the temporary; it's just converting the value to an lvalue
+ // to pass it to the constructor.
+ if (auto const *mte = dyn_cast<MaterializeTemporaryExpr>(arg))
+ return Visit(mte->getSubExpr(), ty);
+
+ // TODO: Investigate whether there are cases that can fall through to here
+ // that need to be handled. This is missing in classic codegen also.
+ assert(!cir::MissingFeatures::ctorConstLvalueToRvalueConversion());
+
+ // Don't try to support arbitrary lvalue-to-rvalue conversions for now.
+ return {};
+ }
+
+ return cgm.getBuilder().getZeroInitAttr(cgm.convertType(ty));
}
mlir::Attribute VisitStringLiteral(StringLiteral *e, QualType t) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 5f9dbdc..d791130 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -665,6 +665,12 @@ public:
symbolTable.insert(vd, addr.getPointer());
}
+ // Replaces the address of the local variable, if it exists. Else does the
+ // same thing as setAddrOfLocalVar.
+ void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
+ localDeclMap.insert_or_assign(vd, addr);
+ }
+
// A class to allow reverting changes to a var-decl's registration to the
// localDeclMap. This is used in cases where things are being inserted into
// the variable list but don't follow normal lookup/search rules, like in
@@ -1326,6 +1332,9 @@ public:
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
+ cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
+ cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
+ mlir::Value coroframeAddr);
void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index f7c4d18..88fedf1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -74,6 +74,9 @@ public:
QualType thisTy) override;
void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
mlir::Value addr) override;
+ void emitVirtualObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de,
+ Address ptr, QualType elementType,
+ const CXXDestructorDecl *dtor) override;
void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override;
void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) override;
@@ -120,6 +123,8 @@ public:
return true;
}
+ void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc) override;
+
mlir::Value
getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &cgf,
Address thisAddr, const CXXRecordDecl *classDecl,
@@ -1883,6 +1888,11 @@ static void emitCallToBadCast(CIRGenFunction &cgf, mlir::Location loc) {
cgf.getBuilder().clearInsertionPoint();
}
+void CIRGenItaniumCXXABI::emitBadCastCall(CIRGenFunction &cgf,
+ mlir::Location loc) {
+ emitCallToBadCast(cgf, loc);
+}
+
// TODO(cir): This could be shared with classic codegen.
static CharUnits computeOffsetHint(ASTContext &astContext,
const CXXRecordDecl *src,
@@ -2168,6 +2178,21 @@ mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf,
isRefCast, castInfo);
}
+/// The Itanium ABI always places an offset to the complete object
+/// at entry -2 in the vtable.
+void CIRGenItaniumCXXABI::emitVirtualObjectDelete(
+ CIRGenFunction &cgf, const CXXDeleteExpr *delExpr, Address ptr,
+ QualType elementType, const CXXDestructorDecl *dtor) {
+ bool useGlobalDelete = delExpr->isGlobalDelete();
+ if (useGlobalDelete) {
+ cgf.cgm.errorNYI(delExpr->getSourceRange(),
+ "emitVirtualObjectDelete: global delete");
+ }
+
+ CXXDtorType dtorType = useGlobalDelete ? Dtor_Complete : Dtor_Deleting;
+ emitVirtualDestructorCall(cgf, dtor, dtorType, ptr, delExpr);
+}
+
/************************** Array allocation cookies **************************/
CharUnits CIRGenItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 1fc116d..186913d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -496,6 +496,8 @@ public:
bool assumeConvergent = false);
static constexpr const char *builtinCoroId = "__builtin_coro_id";
+ static constexpr const char *builtinCoroAlloc = "__builtin_coro_alloc";
+ static constexpr const char *builtinCoroBegin = "__builtin_coro_begin";
/// Given a builtin id for a function like "__builtin_fabsf", return a
/// Function* for "fabsf".
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
index 77e6f83..9e55bd5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
@@ -304,11 +304,21 @@ CIRGenFunction::emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s) {
return mlir::success();
}
+const VarDecl *getLValueDecl(const Expr *e) {
+ // We are going to assume that after stripping implicit casts, that the LValue
+ // is just a DRE around the var-decl.
+
+ e = e->IgnoreImpCasts();
+
+ const auto *dre = cast<DeclRefExpr>(e);
+ return cast<VarDecl>(dre->getDecl());
+}
+
mlir::LogicalResult
CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
- // For now, we are only support 'read', so diagnose. We can switch on the kind
- // later once we start implementing the other 3 forms.
- if (s.getAtomicKind() != OpenACCAtomicKind::Read) {
+ // For now, we are only support 'read'/'write'/'update', so diagnose. We can
+ // switch on the kind later once we implement the 'capture' form.
+ if (s.getAtomicKind() == OpenACCAtomicKind::Capture) {
cgm.errorNYI(s.getSourceRange(), "OpenACC Atomic Construct");
return mlir::failure();
}
@@ -317,18 +327,85 @@ CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
// expression it is associated with rather than emitting it inside of it. So
// it has custom emit logic.
mlir::Location start = getLoc(s.getSourceRange().getBegin());
+ mlir::Location end = getLoc(s.getSourceRange().getEnd());
OpenACCAtomicConstruct::StmtInfo inf = s.getAssociatedStmtInfo();
- // Atomic 'read' only permits 'v = x', where v and x are both scalar L values.
- // The getAssociatedStmtInfo strips off implicit casts, which includes
- // implicit conversions and L-to-R-Value conversions, so we can just emit it
- // as an L value. The Flang implementation has no problem with different
- // types, so it appears that the dialect can handle the conversions.
- mlir::Value v = emitLValue(inf.V).getPointer();
- mlir::Value x = emitLValue(inf.X).getPointer();
- mlir::Type resTy = convertType(inf.V->getType());
- auto op = mlir::acc::AtomicReadOp::create(builder, start, x, v, resTy,
- /*ifCond=*/{});
- emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
- s.clauses());
- return mlir::success();
+
+ switch (s.getAtomicKind()) {
+ case OpenACCAtomicKind::Capture:
+ llvm_unreachable("Unimplemented atomic construct type, should have "
+ "diagnosed/returned above");
+ return mlir::failure();
+ case OpenACCAtomicKind::Read: {
+
+ // Atomic 'read' only permits 'v = x', where v and x are both scalar L
+ // values. The getAssociatedStmtInfo strips off implicit casts, which
+ // includes implicit conversions and L-to-R-Value conversions, so we can
+ // just emit it as an L value. The Flang implementation has no problem with
+ // different types, so it appears that the dialect can handle the
+ // conversions.
+ mlir::Value v = emitLValue(inf.V).getPointer();
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ mlir::Type resTy = convertType(inf.V->getType());
+ auto op = mlir::acc::AtomicReadOp::create(builder, start, x, v, resTy,
+ /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ return mlir::success();
+ }
+ case OpenACCAtomicKind::Write: {
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ mlir::Value expr = emitAnyExpr(inf.RefExpr).getValue();
+ auto op = mlir::acc::AtomicWriteOp::create(builder, start, x, expr,
+ /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ return mlir::success();
+ }
+ case OpenACCAtomicKind::None:
+ case OpenACCAtomicKind::Update: {
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ auto op =
+ mlir::acc::AtomicUpdateOp::create(builder, start, x, /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ mlir::LogicalResult res = mlir::success();
+ {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+ mlir::Type argTy = cast<cir::PointerType>(x.getType()).getPointee();
+ std::array<mlir::Type, 1> recipeType{argTy};
+ std::array<mlir::Location, 1> recipeLoc{start};
+ mlir::Block *recipeBlock = builder.createBlock(
+ &op.getRegion(), op.getRegion().end(), recipeType, recipeLoc);
+ builder.setInsertionPointToEnd(recipeBlock);
+
+ // Since we have an initial value that we know is a scalar type, we can
+ // just emit the entire statement here after sneaking-in our 'alloca' in
+ // the right place, then loading out of it. Flang does a lot less work
+ // (probably does its own emitting!), but we have more complicated AST
+ // nodes to worry about, so we can just count on opt to remove the extra
+ // alloca/load/store set.
+ auto alloca = cir::AllocaOp::create(
+ builder, start, x.getType(), argTy, "x_var",
+ cgm.getSize(getContext().getTypeAlignInChars(inf.X->getType())));
+
+ alloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
+ builder.CIRBaseBuilderTy::createStore(start, recipeBlock->getArgument(0),
+ alloca);
+
+ const VarDecl *xval = getLValueDecl(inf.X);
+ CIRGenFunction::DeclMapRevertingRAII declMapRAII{*this, xval};
+ replaceAddrOfLocalVar(
+ xval, Address{alloca, argTy, getContext().getDeclAlign(xval)});
+
+ res = emitStmt(s.getAssociatedStmt(), /*useCurrentScope=*/true);
+
+ auto load = cir::LoadOp::create(builder, start, {alloca});
+ mlir::acc::YieldOp::create(builder, end, {load});
+ }
+
+ return res;
+ }
+ }
+
+ llvm_unreachable("unknown OpenACC atomic kind");
}
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index a30ae02..5a6193f 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1695,6 +1695,15 @@ static uint64_t getTypeSize(mlir::Type type, mlir::Operation &op) {
return llvm::divideCeil(layout.getTypeSizeInBits(type), 8);
}
+mlir::LogicalResult CIRToLLVMPrefetchOpLowering::matchAndRewrite(
+ cir::PrefetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::Prefetch>(
+ op, adaptor.getAddr(), adaptor.getIsWrite(), adaptor.getLocality(),
+ /*DataCache=*/1);
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMPtrDiffOpLowering::matchAndRewrite(
cir::PtrDiffOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index c423c4b..468c930 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -684,7 +684,8 @@ static void addKCFIPass(const Triple &TargetTriple, const LangOptions &LangOpts,
PassBuilder &PB) {
// If the back-end supports KCFI operand bundle lowering, skip KCFIPass.
if (TargetTriple.getArch() == llvm::Triple::x86_64 ||
- TargetTriple.isAArch64(64) || TargetTriple.isRISCV())
+ TargetTriple.isAArch64(64) || TargetTriple.isRISCV() ||
+ TargetTriple.isARM() || TargetTriple.isThumb())
return;
// Ensure we lower KCFI operand bundles with -O0.
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index ecab933..945f9e2 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -562,17 +562,16 @@ static llvm::Value *createSPIRVBuiltinLoad(IRBuilder<> &B, llvm::Module &M,
return B.CreateLoad(Ty, GV);
}
-llvm::Value *
-CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic) {
- if (isa<HLSLSV_GroupIndexAttr>(ActiveSemantic.Semantic)) {
+llvm::Value *CGHLSLRuntime::emitSystemSemanticLoad(
+ IRBuilder<> &B, llvm::Type *Type, const clang::DeclaratorDecl *Decl,
+ Attr *Semantic, std::optional<unsigned> Index) {
+ if (isa<HLSLSV_GroupIndexAttr>(Semantic)) {
llvm::Function *GroupIndex =
CGM.getIntrinsic(getFlattenedThreadIdInGroupIntrinsic());
return B.CreateCall(FunctionCallee(GroupIndex));
}
- if (isa<HLSLSV_DispatchThreadIDAttr>(ActiveSemantic.Semantic)) {
+ if (isa<HLSLSV_DispatchThreadIDAttr>(Semantic)) {
llvm::Intrinsic::ID IntrinID = getThreadIdIntrinsic();
llvm::Function *ThreadIDIntrinsic =
llvm::Intrinsic::isOverloaded(IntrinID)
@@ -581,7 +580,7 @@ CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
return buildVectorInput(B, ThreadIDIntrinsic, Type);
}
- if (isa<HLSLSV_GroupThreadIDAttr>(ActiveSemantic.Semantic)) {
+ if (isa<HLSLSV_GroupThreadIDAttr>(Semantic)) {
llvm::Intrinsic::ID IntrinID = getGroupThreadIdIntrinsic();
llvm::Function *GroupThreadIDIntrinsic =
llvm::Intrinsic::isOverloaded(IntrinID)
@@ -590,7 +589,7 @@ CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
return buildVectorInput(B, GroupThreadIDIntrinsic, Type);
}
- if (isa<HLSLSV_GroupIDAttr>(ActiveSemantic.Semantic)) {
+ if (isa<HLSLSV_GroupIDAttr>(Semantic)) {
llvm::Intrinsic::ID IntrinID = getGroupIdIntrinsic();
llvm::Function *GroupIDIntrinsic =
llvm::Intrinsic::isOverloaded(IntrinID)
@@ -599,8 +598,7 @@ CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
return buildVectorInput(B, GroupIDIntrinsic, Type);
}
- if (HLSLSV_PositionAttr *S =
- dyn_cast<HLSLSV_PositionAttr>(ActiveSemantic.Semantic)) {
+ if (HLSLSV_PositionAttr *S = dyn_cast<HLSLSV_PositionAttr>(Semantic)) {
if (CGM.getTriple().getEnvironment() == Triple::EnvironmentType::Pixel)
return createSPIRVBuiltinLoad(B, CGM.getModule(), Type,
S->getAttrName()->getName(),
@@ -611,29 +609,56 @@ CGHLSLRuntime::emitSystemSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
}
llvm::Value *
-CGHLSLRuntime::handleScalarSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic) {
-
- if (!ActiveSemantic.Semantic) {
- ActiveSemantic.Semantic = Decl->getAttr<HLSLSemanticAttr>();
- if (!ActiveSemantic.Semantic) {
- CGM.getDiags().Report(Decl->getInnerLocStart(),
- diag::err_hlsl_semantic_missing);
- return nullptr;
+CGHLSLRuntime::handleScalarSemanticLoad(IRBuilder<> &B, const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl) {
+
+ HLSLSemanticAttr *Semantic = nullptr;
+ for (HLSLSemanticAttr *Item : FD->specific_attrs<HLSLSemanticAttr>()) {
+ if (Item->getTargetDecl() == Decl) {
+ Semantic = Item;
+ break;
}
- ActiveSemantic.Index = ActiveSemantic.Semantic->getSemanticIndex();
}
+ // Sema must create one attribute per scalar field.
+ assert(Semantic);
- return emitSystemSemanticLoad(B, Type, Decl, ActiveSemantic);
+ std::optional<unsigned> Index = std::nullopt;
+ if (Semantic->isSemanticIndexExplicit())
+ Index = Semantic->getSemanticIndex();
+ return emitSystemSemanticLoad(B, Type, Decl, Semantic, Index);
}
llvm::Value *
-CGHLSLRuntime::handleSemanticLoad(IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic) {
- assert(!Type->isStructTy());
- return handleScalarSemanticLoad(B, Type, Decl, ActiveSemantic);
+CGHLSLRuntime::handleStructSemanticLoad(IRBuilder<> &B, const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl) {
+ const llvm::StructType *ST = cast<StructType>(Type);
+ const clang::RecordDecl *RD = Decl->getType()->getAsRecordDecl();
+
+ assert(std::distance(RD->field_begin(), RD->field_end()) ==
+ ST->getNumElements());
+
+ llvm::Value *Aggregate = llvm::PoisonValue::get(Type);
+ auto FieldDecl = RD->field_begin();
+ for (unsigned I = 0; I < ST->getNumElements(); ++I) {
+ llvm::Value *ChildValue =
+ handleSemanticLoad(B, FD, ST->getElementType(I), *FieldDecl);
+ assert(ChildValue);
+ Aggregate = B.CreateInsertValue(Aggregate, ChildValue, I);
+ ++FieldDecl;
+ }
+
+ return Aggregate;
+}
+
+llvm::Value *
+CGHLSLRuntime::handleSemanticLoad(IRBuilder<> &B, const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl) {
+ if (Type->isStructTy())
+ return handleStructSemanticLoad(B, FD, Type, Decl);
+ return handleScalarSemanticLoad(B, FD, Type, Decl);
}
void CGHLSLRuntime::emitEntryFunction(const FunctionDecl *FD,
@@ -680,8 +705,25 @@ void CGHLSLRuntime::emitEntryFunction(const FunctionDecl *FD,
}
const ParmVarDecl *PD = FD->getParamDecl(Param.getArgNo() - SRetOffset);
- SemanticInfo ActiveSemantic = {nullptr, 0};
- Args.push_back(handleSemanticLoad(B, Param.getType(), PD, ActiveSemantic));
+ llvm::Value *SemanticValue = nullptr;
+ if ([[maybe_unused]] HLSLParamModifierAttr *MA =
+ PD->getAttr<HLSLParamModifierAttr>()) {
+ llvm_unreachable("Not handled yet");
+ } else {
+ llvm::Type *ParamType =
+ Param.hasByValAttr() ? Param.getParamByValType() : Param.getType();
+ SemanticValue = handleSemanticLoad(B, FD, ParamType, PD);
+ if (!SemanticValue)
+ return;
+ if (Param.hasByValAttr()) {
+ llvm::Value *Var = B.CreateAlloca(Param.getParamByValType());
+ B.CreateStore(SemanticValue, Var);
+ SemanticValue = Var;
+ }
+ }
+
+ assert(SemanticValue);
+ Args.push_back(SemanticValue);
}
CallInst *CI = B.CreateCall(FunctionCallee(Fn), Args, OB);
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index 103b4a9..d35df52 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -144,26 +144,24 @@ public:
protected:
CodeGenModule &CGM;
- void collectInputSemantic(llvm::IRBuilder<> &B, const DeclaratorDecl *D,
- llvm::Type *Type,
- SmallVectorImpl<llvm::Value *> &Inputs);
-
- struct SemanticInfo {
- clang::HLSLSemanticAttr *Semantic;
- uint32_t Index;
- };
-
llvm::Value *emitSystemSemanticLoad(llvm::IRBuilder<> &B, llvm::Type *Type,
const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic);
-
- llvm::Value *handleScalarSemanticLoad(llvm::IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic);
-
- llvm::Value *handleSemanticLoad(llvm::IRBuilder<> &B, llvm::Type *Type,
- const clang::DeclaratorDecl *Decl,
- SemanticInfo &ActiveSemantic);
+ Attr *Semantic,
+ std::optional<unsigned> Index);
+
+ llvm::Value *handleScalarSemanticLoad(llvm::IRBuilder<> &B,
+ const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl);
+
+ llvm::Value *handleStructSemanticLoad(llvm::IRBuilder<> &B,
+ const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl);
+
+ llvm::Value *handleSemanticLoad(llvm::IRBuilder<> &B, const FunctionDecl *FD,
+ llvm::Type *Type,
+ const clang::DeclaratorDecl *Decl);
public:
CGHLSLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 85b2404..66fea92 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -2713,14 +2713,6 @@ llvm::Value *CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF,
}
llvm::Value *
-CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF,
- const OMPMessageClause *MessageClause) {
- return emitMessageClause(
- CGF, MessageClause ? MessageClause->getMessageString() : nullptr,
- MessageClause->getBeginLoc());
-}
-
-llvm::Value *
CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity,
SourceLocation Loc) {
// OpenMP 6.0, 10.4: "If no severity clause is specified then the effect is
@@ -2729,13 +2721,6 @@ CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity,
Severity == OMPC_SEVERITY_warning ? 1 : 2);
}
-llvm::Value *
-CGOpenMPRuntime::emitSeverityClause(const OMPSeverityClause *SeverityClause) {
- return emitSeverityClause(SeverityClause ? SeverityClause->getSeverityKind()
- : OMPC_SEVERITY_unknown,
- SeverityClause->getBeginLoc());
-}
-
void CGOpenMPRuntime::emitNumThreadsClause(
CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc,
OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity,
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h
index ba76ba6b..6bfd7d6 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -1051,13 +1051,9 @@ public:
virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF,
const Expr *Message,
SourceLocation Loc);
- virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF,
- const OMPMessageClause *MessageClause);
virtual llvm::Value *emitSeverityClause(OpenMPSeverityClauseKind Severity,
SourceLocation Loc);
- virtual llvm::Value *
- emitSeverityClause(const OMPSeverityClause *SeverityClause);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 3746bc04..0fea57b 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -146,8 +146,6 @@ createTargetCodeGenInfo(CodeGenModule &CGM) {
return createWindowsAArch64TargetCodeGenInfo(CGM, AArch64ABIKind::Win64);
else if (Target.getABI() == "aapcs-soft")
Kind = AArch64ABIKind::AAPCSSoft;
- else if (Target.getABI() == "pauthtest")
- Kind = AArch64ABIKind::PAuthTest;
return createAArch64TargetCodeGenInfo(CGM, Kind);
}
diff --git a/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp b/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
index 920d285..1300722 100644
--- a/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
@@ -1121,6 +1121,8 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
bool IsMasked = false;
// This is used by segment load/store to determine it's llvm type.
unsigned SegInstSEW = 8;
+ // This is used by XSfmm.
+ unsigned TWiden = 0;
// Required for overloaded intrinsics.
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index d0edae1..f63e900 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -483,7 +483,6 @@ enum class AArch64ABIKind {
DarwinPCS,
Win64,
AAPCSSoft,
- PAuthTest,
};
std::unique_ptr<TargetCodeGenInfo>
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 3d5cac6..eea5c2f 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -1253,7 +1253,6 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
}
case llvm::Triple::aarch64: {
llvm::Triple Triple = getTriple();
- tools::aarch64::setPAuthABIInTriple(getDriver(), Args, Triple);
if (!Triple.isOSBinFormatMachO())
return Triple.getTriple();
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index eb5d542..e8d5e38 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -466,27 +466,6 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
Features.push_back("+no-bti-at-return-twice");
}
-void aarch64::setPAuthABIInTriple(const Driver &D, const ArgList &Args,
- llvm::Triple &Triple) {
- Arg *ABIArg = Args.getLastArg(options::OPT_mabi_EQ);
- bool HasPAuthABI =
- ABIArg ? (StringRef(ABIArg->getValue()) == "pauthtest") : false;
-
- switch (Triple.getEnvironment()) {
- case llvm::Triple::UnknownEnvironment:
- if (HasPAuthABI)
- Triple.setEnvironment(llvm::Triple::PAuthTest);
- break;
- case llvm::Triple::PAuthTest:
- break;
- default:
- if (HasPAuthABI)
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << ABIArg->getAsString(Args) << Triple.getTriple();
- break;
- }
-}
-
/// Is the triple {aarch64.aarch64_be}-none-elf?
bool aarch64::isAArch64BareMetal(const llvm::Triple &Triple) {
if (Triple.getArch() != llvm::Triple::aarch64 &&
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.h b/clang/lib/Driver/ToolChains/Arch/AArch64.h
index 2765ee8..97ebfa6 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.h
@@ -28,8 +28,6 @@ void getAArch64TargetFeatures(const Driver &D, const llvm::Triple &Triple,
std::string getAArch64TargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple, llvm::opt::Arg *&A);
-void setPAuthABIInTriple(const Driver &D, const llvm::opt::ArgList &Args,
- llvm::Triple &triple);
bool isAArch64BareMetal(const llvm::Triple &Triple);
} // end namespace aarch64
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index caf7478..79edc56 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -1348,59 +1348,6 @@ void AddUnalignedAccessWarning(ArgStringList &CmdArgs) {
}
}
-// Each combination of options here forms a signing schema, and in most cases
-// each signing schema is its own incompatible ABI. The default values of the
-// options represent the default signing schema.
-static void handlePAuthABI(const ArgList &DriverArgs, ArgStringList &CC1Args) {
- if (!DriverArgs.hasArg(options::OPT_fptrauth_intrinsics,
- options::OPT_fno_ptrauth_intrinsics))
- CC1Args.push_back("-fptrauth-intrinsics");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_calls,
- options::OPT_fno_ptrauth_calls))
- CC1Args.push_back("-fptrauth-calls");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_returns,
- options::OPT_fno_ptrauth_returns))
- CC1Args.push_back("-fptrauth-returns");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_auth_traps,
- options::OPT_fno_ptrauth_auth_traps))
- CC1Args.push_back("-fptrauth-auth-traps");
-
- if (!DriverArgs.hasArg(
- options::OPT_fptrauth_vtable_pointer_address_discrimination,
- options::OPT_fno_ptrauth_vtable_pointer_address_discrimination))
- CC1Args.push_back("-fptrauth-vtable-pointer-address-discrimination");
-
- if (!DriverArgs.hasArg(
- options::OPT_fptrauth_vtable_pointer_type_discrimination,
- options::OPT_fno_ptrauth_vtable_pointer_type_discrimination))
- CC1Args.push_back("-fptrauth-vtable-pointer-type-discrimination");
-
- if (!DriverArgs.hasArg(
- options::OPT_fptrauth_type_info_vtable_pointer_discrimination,
- options::OPT_fno_ptrauth_type_info_vtable_pointer_discrimination))
- CC1Args.push_back("-fptrauth-type-info-vtable-pointer-discrimination");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_indirect_gotos,
- options::OPT_fno_ptrauth_indirect_gotos))
- CC1Args.push_back("-fptrauth-indirect-gotos");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_init_fini,
- options::OPT_fno_ptrauth_init_fini))
- CC1Args.push_back("-fptrauth-init-fini");
-
- if (!DriverArgs.hasArg(
- options::OPT_fptrauth_init_fini_address_discrimination,
- options::OPT_fno_ptrauth_init_fini_address_discrimination))
- CC1Args.push_back("-fptrauth-init-fini-address-discrimination");
-
- if (!DriverArgs.hasArg(options::OPT_faarch64_jump_table_hardening,
- options::OPT_fno_aarch64_jump_table_hardening))
- CC1Args.push_back("-faarch64-jump-table-hardening");
-}
-
static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs, bool isAArch64) {
const llvm::Triple &Triple = TC.getEffectiveTriple();
@@ -1638,7 +1585,9 @@ void RenderAArch64ABI(const llvm::Triple &Triple, const ArgList &Args,
ABIName = A->getValue();
else if (Triple.isOSDarwin())
ABIName = "darwinpcs";
- else if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
+ // TODO: we probably want to have some target hook here.
+ else if (Triple.isOSLinux() &&
+ Triple.getEnvironment() == llvm::Triple::PAuthTest)
ABIName = "pauthtest";
else
ABIName = "aapcs";
@@ -1758,8 +1707,6 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
options::OPT_fno_ptrauth_objc_interface_sel);
Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_objc_class_ro,
options::OPT_fno_ptrauth_objc_class_ro);
- if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
- handlePAuthABI(Args, CmdArgs);
// Enable/disable return address signing and indirect branch targets.
CollectARMPACBTIOptions(getToolChain(), Args, CmdArgs, true /*isAArch64*/);
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 99400ac..727af69 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -949,6 +949,24 @@ bool tools::isTLSDESCEnabled(const ToolChain &TC,
return EnableTLSDESC;
}
+void tools::addDTLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) {
+ if (Arg *A = Args.getLastArg(options::OPT_fthinlto_distributor_EQ)) {
+ CmdArgs.push_back(
+ Args.MakeArgString("--thinlto-distributor=" + Twine(A->getValue())));
+ const Driver &D = ToolChain.getDriver();
+ CmdArgs.push_back(Args.MakeArgString("--thinlto-remote-compiler=" +
+ Twine(D.getClangProgramPath())));
+ if (auto *PA = D.getPrependArg())
+ CmdArgs.push_back(Args.MakeArgString(
+ "--thinlto-remote-compiler-prepend-arg=" + Twine(PA)));
+
+ for (const auto &A :
+ Args.getAllArgValues(options::OPT_Xthinlto_distributor_EQ))
+ CmdArgs.push_back(Args.MakeArgString("--thinlto-distributor-arg=" + A));
+ }
+}
+
void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfoList &Inputs, bool IsThinLTO) {
@@ -1350,16 +1368,7 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + "-time-passes"));
- if (Arg *A = Args.getLastArg(options::OPT_fthinlto_distributor_EQ)) {
- CmdArgs.push_back(
- Args.MakeArgString("--thinlto-distributor=" + Twine(A->getValue())));
- CmdArgs.push_back(
- Args.MakeArgString("--thinlto-remote-compiler=" +
- Twine(ToolChain.getDriver().getClangProgramPath())));
-
- for (auto A : Args.getAllArgValues(options::OPT_Xthinlto_distributor_EQ))
- CmdArgs.push_back(Args.MakeArgString("--thinlto-distributor-arg=" + A));
- }
+ addDTLTOOptions(ToolChain, Args, CmdArgs);
}
void tools::addOpenMPRuntimeLibraryPath(const ToolChain &TC,
diff --git a/clang/lib/Driver/ToolChains/Fuchsia.cpp b/clang/lib/Driver/ToolChains/Fuchsia.cpp
index 146dc8b..31c2f3f 100644
--- a/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -481,9 +481,11 @@ SanitizerMask Fuchsia::getSupportedSanitizers() const {
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::Leak;
- Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Scudo;
Res |= SanitizerKind::Thread;
+ if (getTriple().getArch() == llvm::Triple::x86_64) {
+ Res |= SanitizerKind::SafeStack;
+ }
return Res;
}
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index 8eb4d34e..94a9fe8 100644
--- a/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
@@ -445,6 +445,102 @@ std::string Linux::computeSysRoot() const {
return std::string();
}
+static void setPAuthABIInTriple(const Driver &D, const ArgList &Args,
+ llvm::Triple &Triple) {
+ Arg *ABIArg = Args.getLastArg(options::OPT_mabi_EQ);
+ bool HasPAuthABI =
+ ABIArg ? (StringRef(ABIArg->getValue()) == "pauthtest") : false;
+
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::UnknownEnvironment:
+ if (HasPAuthABI)
+ Triple.setEnvironment(llvm::Triple::PAuthTest);
+ break;
+ case llvm::Triple::PAuthTest:
+ break;
+ default:
+ if (HasPAuthABI)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << ABIArg->getAsString(Args) << Triple.getTriple();
+ break;
+ }
+}
+
+std::string Linux::ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
+ types::ID InputType) const {
+ std::string TripleString =
+ Generic_ELF::ComputeEffectiveClangTriple(Args, InputType);
+ if (getTriple().isAArch64()) {
+ llvm::Triple Triple(TripleString);
+ setPAuthABIInTriple(getDriver(), Args, Triple);
+ return Triple.getTriple();
+ }
+ return TripleString;
+}
+
+// Each combination of options here forms a signing schema, and in most cases
+// each signing schema is its own incompatible ABI. The default values of the
+// options represent the default signing schema.
+static void handlePAuthABI(const Driver &D, const ArgList &DriverArgs,
+ ArgStringList &CC1Args) {
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_intrinsics,
+ options::OPT_fno_ptrauth_intrinsics))
+ CC1Args.push_back("-fptrauth-intrinsics");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_calls,
+ options::OPT_fno_ptrauth_calls))
+ CC1Args.push_back("-fptrauth-calls");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_returns,
+ options::OPT_fno_ptrauth_returns))
+ CC1Args.push_back("-fptrauth-returns");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_auth_traps,
+ options::OPT_fno_ptrauth_auth_traps))
+ CC1Args.push_back("-fptrauth-auth-traps");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_vtable_pointer_address_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_address_discrimination))
+ CC1Args.push_back("-fptrauth-vtable-pointer-address-discrimination");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_vtable_pointer_type_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_type_discrimination))
+ CC1Args.push_back("-fptrauth-vtable-pointer-type-discrimination");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_type_info_vtable_pointer_discrimination,
+ options::OPT_fno_ptrauth_type_info_vtable_pointer_discrimination))
+ CC1Args.push_back("-fptrauth-type-info-vtable-pointer-discrimination");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_indirect_gotos,
+ options::OPT_fno_ptrauth_indirect_gotos))
+ CC1Args.push_back("-fptrauth-indirect-gotos");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_init_fini,
+ options::OPT_fno_ptrauth_init_fini))
+ CC1Args.push_back("-fptrauth-init-fini");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_init_fini_address_discrimination,
+ options::OPT_fno_ptrauth_init_fini_address_discrimination))
+ CC1Args.push_back("-fptrauth-init-fini-address-discrimination");
+
+ if (!DriverArgs.hasArg(options::OPT_faarch64_jump_table_hardening,
+ options::OPT_fno_aarch64_jump_table_hardening))
+ CC1Args.push_back("-faarch64-jump-table-hardening");
+}
+
+void Linux::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const {
+ llvm::Triple Triple(ComputeEffectiveClangTriple(DriverArgs));
+ if (Triple.isAArch64() && Triple.getEnvironment() == llvm::Triple::PAuthTest)
+ handlePAuthABI(getDriver(), DriverArgs, CC1Args);
+ Generic_ELF::addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadKind);
+}
+
std::string Linux::getDynamicLinker(const ArgList &Args) const {
const llvm::Triple::ArchType Arch = getArch();
const llvm::Triple &Triple = getTriple();
diff --git a/clang/lib/Driver/ToolChains/Linux.h b/clang/lib/Driver/ToolChains/Linux.h
index 2eb2d05..97bad77 100644
--- a/clang/lib/Driver/ToolChains/Linux.h
+++ b/clang/lib/Driver/ToolChains/Linux.h
@@ -53,7 +53,14 @@ public:
SanitizerMask getSupportedSanitizers() const override;
void addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ std::string ComputeEffectiveClangTriple(
+ const llvm::opt::ArgList &Args,
+ types::ID InputType = types::TY_INVALID) const override;
std::string computeSysRoot() const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
diff --git a/clang/lib/Driver/ToolChains/PS4CPU.cpp b/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 61afc61..34ec65a 100644
--- a/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -344,16 +344,7 @@ void tools::PS5cpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// pass LTO options to ensure proper codegen, metadata production, etc if
// LTO indeed occurs.
- if (const Arg *A = Args.getLastArg(options::OPT_fthinlto_distributor_EQ)) {
- CmdArgs.push_back(
- Args.MakeArgString("--thinlto-distributor=" + Twine(A->getValue())));
- CmdArgs.push_back(Args.MakeArgString("--thinlto-remote-compiler=" +
- Twine(D.getClangProgramPath())));
-
- for (const auto &A :
- Args.getAllArgValues(options::OPT_Xthinlto_distributor_EQ))
- CmdArgs.push_back(Args.MakeArgString("--thinlto-distributor-arg=" + A));
- }
+ tools::addDTLTOOptions(TC, Args, CmdArgs);
if (Args.hasFlag(options::OPT_funified_lto, options::OPT_fno_unified_lto,
true))
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index c97a9e8..a8a9c51 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -3791,12 +3791,18 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
if (Current.is(TT_FunctionDeclarationName))
return true;
- if (Current.isNoneOf(tok::identifier, tok::kw_operator))
+ if (!Current.Tok.getIdentifierInfo())
return false;
const auto *Prev = Current.getPreviousNonComment();
assert(Prev);
+ if (Prev->is(tok::coloncolon))
+ Prev = Prev->Previous;
+
+ if (!Prev)
+ return false;
+
const auto &Previous = *Prev;
if (const auto *PrevPrev = Previous.getPreviousNonComment();
@@ -3845,8 +3851,6 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
// Find parentheses of parameter list.
if (Current.is(tok::kw_operator)) {
- if (Line.startsWith(tok::kw_friend))
- return true;
if (Previous.Tok.getIdentifierInfo() &&
Previous.isNoneOf(tok::kw_return, tok::kw_co_return)) {
return true;
@@ -4407,8 +4411,12 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
// breaking after it.
if (Right.is(TT_SelectorName))
return 0;
- if (Left.is(tok::colon) && Left.is(TT_ObjCMethodExpr))
- return Line.MightBeFunctionDecl ? 50 : 500;
+ if (Left.is(tok::colon)) {
+ if (Left.is(TT_ObjCMethodExpr))
+ return Line.MightBeFunctionDecl ? 50 : 500;
+ if (Left.is(TT_ObjCSelector))
+ return 500;
+ }
// In Objective-C type declarations, avoid breaking after the category's
// open paren (we'll prefer breaking after the protocol list's opening
@@ -6291,7 +6299,9 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
TT_BitFieldColon)) {
return false;
}
- if (Left.is(tok::colon) && Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
+ if (Left.is(tok::colon) && Left.isOneOf(TT_ObjCSelector, TT_ObjCMethodExpr))
+ return true;
+ if (Left.is(tok::colon) && Left.is(TT_DictLiteral)) {
if (Style.isProto()) {
if (!Style.AlwaysBreakBeforeMultilineStrings && Right.isStringLiteral())
return false;
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index 374138f..6b09f7f 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -546,14 +546,11 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
std::string CompilerInstance::getSpecificModuleCachePath(StringRef ModuleHash) {
assert(FileMgr && "Specific module cache path requires a FileManager");
- if (getHeaderSearchOpts().ModuleCachePath.empty())
- return "";
-
// Set up the module path, including the hash for the module-creation options.
SmallString<256> SpecificModuleCache;
normalizeModuleCachePath(*FileMgr, getHeaderSearchOpts().ModuleCachePath,
SpecificModuleCache);
- if (!getHeaderSearchOpts().DisableModuleHash)
+ if (!SpecificModuleCache.empty() && !getHeaderSearchOpts().DisableModuleHash)
llvm::sys::path::append(SpecificModuleCache, ModuleHash);
return std::string(SpecificModuleCache);
}
@@ -885,7 +882,7 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
"File Manager is required to fix up relative path.\n");
AbsPath.emplace(OutputPath);
- FileMgr->FixupRelativePath(*AbsPath);
+ FileManager::fixupRelativePath(getFileSystemOpts(), *AbsPath);
OutputPath = *AbsPath;
}
diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index 32a6be8..1858912 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -4,6 +4,9 @@
set(core_files
builtins.h
float.h
+ __float_float.h
+ __float_header_macro.h
+ __float_infinity_nan.h
inttypes.h
iso646.h
limits.h
diff --git a/clang/lib/Headers/__float_float.h b/clang/lib/Headers/__float_float.h
new file mode 100644
index 0000000..267c072
--- /dev/null
+++ b/clang/lib/Headers/__float_float.h
@@ -0,0 +1,176 @@
+/*===---- __float_float.h --------------------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_FLOAT_FLOAT_H
+#define __CLANG_FLOAT_FLOAT_H
+
+#if (defined(__MINGW32__) || defined(_MSC_VER) || defined(_AIX)) && \
+ __STDC_HOSTED__
+
+/* Undefine anything that we'll be redefining below. */
+# undef FLT_EVAL_METHOD
+# undef FLT_ROUNDS
+# undef FLT_RADIX
+# undef FLT_MANT_DIG
+# undef DBL_MANT_DIG
+# undef LDBL_MANT_DIG
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
+# undef DECIMAL_DIG
+# endif
+# undef FLT_DIG
+# undef DBL_DIG
+# undef LDBL_DIG
+# undef FLT_MIN_EXP
+# undef DBL_MIN_EXP
+# undef LDBL_MIN_EXP
+# undef FLT_MIN_10_EXP
+# undef DBL_MIN_10_EXP
+# undef LDBL_MIN_10_EXP
+# undef FLT_MAX_EXP
+# undef DBL_MAX_EXP
+# undef LDBL_MAX_EXP
+# undef FLT_MAX_10_EXP
+# undef DBL_MAX_10_EXP
+# undef LDBL_MAX_10_EXP
+# undef FLT_MAX
+# undef DBL_MAX
+# undef LDBL_MAX
+# undef FLT_EPSILON
+# undef DBL_EPSILON
+# undef LDBL_EPSILON
+# undef FLT_MIN
+# undef DBL_MIN
+# undef LDBL_MIN
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201703L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
+# undef FLT_TRUE_MIN
+# undef DBL_TRUE_MIN
+# undef LDBL_TRUE_MIN
+# undef FLT_DECIMAL_DIG
+# undef DBL_DECIMAL_DIG
+# undef LDBL_DECIMAL_DIG
+# undef FLT_HAS_SUBNORM
+# undef DBL_HAS_SUBNORM
+# undef LDBL_HAS_SUBNORM
+# endif
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+# undef FLT_NORM_MAX
+# undef DBL_NORM_MAX
+# undef LDBL_NORM_MAX
+#endif
+#endif
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+# undef FLT_SNAN
+# undef DBL_SNAN
+# undef LDBL_SNAN
+#endif
+
+/* Characteristics of floating point types, C99 5.2.4.2.2 */
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L)
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+#endif
+#define FLT_ROUNDS (__builtin_flt_rounds())
+#define FLT_RADIX __FLT_RADIX__
+
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
+# define DECIMAL_DIG __DECIMAL_DIG__
+#endif
+
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201703L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
+# define FLT_TRUE_MIN __FLT_DENORM_MIN__
+# define DBL_TRUE_MIN __DBL_DENORM_MIN__
+# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+# define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
+# define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
+# define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
+# define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
+# define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
+# define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
+#endif
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+ /* C23 5.2.5.3.2p28 */
+# define FLT_SNAN (__builtin_nansf(""))
+# define DBL_SNAN (__builtin_nans(""))
+# define LDBL_SNAN (__builtin_nansl(""))
+
+ /* C23 5.2.5.3.3p32 */
+# define FLT_NORM_MAX __FLT_NORM_MAX__
+# define DBL_NORM_MAX __DBL_NORM_MAX__
+# define LDBL_NORM_MAX __LDBL_NORM_MAX__
+#endif
+
+#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
+# define FLT16_MANT_DIG __FLT16_MANT_DIG__
+# define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__
+# define FLT16_DIG __FLT16_DIG__
+# define FLT16_MIN_EXP __FLT16_MIN_EXP__
+# define FLT16_MIN_10_EXP __FLT16_MIN_10_EXP__
+# define FLT16_MAX_EXP __FLT16_MAX_EXP__
+# define FLT16_MAX_10_EXP __FLT16_MAX_10_EXP__
+# define FLT16_MAX __FLT16_MAX__
+# define FLT16_EPSILON __FLT16_EPSILON__
+# define FLT16_MIN __FLT16_MIN__
+# define FLT16_TRUE_MIN __FLT16_TRUE_MIN__
+#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */
+
+#endif /* __CLANG_FLOAT_FLOAT_H */
diff --git a/clang/lib/Headers/__float_header_macro.h b/clang/lib/Headers/__float_header_macro.h
new file mode 100644
index 0000000..11b270e
--- /dev/null
+++ b/clang/lib/Headers/__float_header_macro.h
@@ -0,0 +1,12 @@
+/*===---- __float_header_macro.h -------------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_FLOAT_H
+#define __CLANG_FLOAT_H
+#endif /* __CLANG_FLOAT_H */
diff --git a/clang/lib/Headers/__float_infinity_nan.h b/clang/lib/Headers/__float_infinity_nan.h
new file mode 100644
index 0000000..7e253d0
--- /dev/null
+++ b/clang/lib/Headers/__float_infinity_nan.h
@@ -0,0 +1,20 @@
+/*===---- __float_infinity_nan.h -------------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_FLOAT_INFINITY_NAN_H
+#define __CLANG_FLOAT_INFINITY_NAN_H
+
+/* C23 5.2.5.3.3p29-30 */
+#undef INFINITY
+#undef NAN
+
+#define INFINITY (__builtin_inff())
+#define NAN (__builtin_nanf(""))
+
+#endif /* __CLANG_FLOAT_INFINITY_NAN_H */
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index fdb825f..3cbaaec 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -1975,10 +1975,9 @@ _mm256_shuffle_epi8(__m256i __a, __m256i __b) {
/// \param __b
/// A 256-bit integer vector].
/// \returns A 256-bit integer vector containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_sign_epi8(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
}
/// Sets each element of the result to the corresponding element of the
@@ -1996,10 +1995,9 @@ _mm256_sign_epi8(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_sign_epi16(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
}
/// Sets each element of the result to the corresponding element of the
@@ -2017,10 +2015,9 @@ _mm256_sign_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [8 x i32].
/// \returns A 256-bit vector of [8 x i32] containing the result.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_sign_epi32(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
}
/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
diff --git a/clang/lib/Headers/float.h b/clang/lib/Headers/float.h
index 30427c2..82974f6 100644
--- a/clang/lib/Headers/float.h
+++ b/clang/lib/Headers/float.h
@@ -7,13 +7,21 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef __CLANG_FLOAT_H
-#define __CLANG_FLOAT_H
-
#if defined(__MVS__) && __has_include_next(<float.h>)
+#include <__float_header_macro.h>
#include_next <float.h>
#else
+#if !defined(__need_infinity_nan)
+#define __need_float_float
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+#define __need_infinity_nan
+#endif
+#include <__float_header_macro.h>
+#endif
+
+#ifdef __need_float_float
/* If we're on MinGW, fall back to the system's float.h, which might have
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
@@ -26,171 +34,15 @@
# include_next <float.h>
-/* Undefine anything that we'll be redefining below. */
-# undef FLT_EVAL_METHOD
-# undef FLT_ROUNDS
-# undef FLT_RADIX
-# undef FLT_MANT_DIG
-# undef DBL_MANT_DIG
-# undef LDBL_MANT_DIG
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
- !defined(__STRICT_ANSI__) || \
- (defined(__cplusplus) && __cplusplus >= 201103L) || \
- (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
-# undef DECIMAL_DIG
-# endif
-# undef FLT_DIG
-# undef DBL_DIG
-# undef LDBL_DIG
-# undef FLT_MIN_EXP
-# undef DBL_MIN_EXP
-# undef LDBL_MIN_EXP
-# undef FLT_MIN_10_EXP
-# undef DBL_MIN_10_EXP
-# undef LDBL_MIN_10_EXP
-# undef FLT_MAX_EXP
-# undef DBL_MAX_EXP
-# undef LDBL_MAX_EXP
-# undef FLT_MAX_10_EXP
-# undef DBL_MAX_10_EXP
-# undef LDBL_MAX_10_EXP
-# undef FLT_MAX
-# undef DBL_MAX
-# undef LDBL_MAX
-# undef FLT_EPSILON
-# undef DBL_EPSILON
-# undef LDBL_EPSILON
-# undef FLT_MIN
-# undef DBL_MIN
-# undef LDBL_MIN
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
- !defined(__STRICT_ANSI__) || \
- (defined(__cplusplus) && __cplusplus >= 201703L) || \
- (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
-# undef FLT_TRUE_MIN
-# undef DBL_TRUE_MIN
-# undef LDBL_TRUE_MIN
-# undef FLT_DECIMAL_DIG
-# undef DBL_DECIMAL_DIG
-# undef LDBL_DECIMAL_DIG
-# undef FLT_HAS_SUBNORM
-# undef DBL_HAS_SUBNORM
-# undef LDBL_HAS_SUBNORM
-# endif
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
- !defined(__STRICT_ANSI__)
-# undef FLT_NORM_MAX
-# undef DBL_NORM_MAX
-# undef LDBL_NORM_MAX
-#endif
-#endif
-
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
- !defined(__STRICT_ANSI__)
-# undef INFINITY
-# undef NAN
-# undef FLT_SNAN
-# undef DBL_SNAN
-# undef LDBL_SNAN
-#endif
-
-/* Characteristics of floating point types, C99 5.2.4.2.2 */
-
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
- (defined(__cplusplus) && __cplusplus >= 201103L)
-#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
#endif
-#define FLT_ROUNDS (__builtin_flt_rounds())
-#define FLT_RADIX __FLT_RADIX__
-#define FLT_MANT_DIG __FLT_MANT_DIG__
-#define DBL_MANT_DIG __DBL_MANT_DIG__
-#define LDBL_MANT_DIG __LDBL_MANT_DIG__
-
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
- !defined(__STRICT_ANSI__) || \
- (defined(__cplusplus) && __cplusplus >= 201103L) || \
- (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
-# define DECIMAL_DIG __DECIMAL_DIG__
-#endif
-
-#define FLT_DIG __FLT_DIG__
-#define DBL_DIG __DBL_DIG__
-#define LDBL_DIG __LDBL_DIG__
-
-#define FLT_MIN_EXP __FLT_MIN_EXP__
-#define DBL_MIN_EXP __DBL_MIN_EXP__
-#define LDBL_MIN_EXP __LDBL_MIN_EXP__
-
-#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
-#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
-#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
-
-#define FLT_MAX_EXP __FLT_MAX_EXP__
-#define DBL_MAX_EXP __DBL_MAX_EXP__
-#define LDBL_MAX_EXP __LDBL_MAX_EXP__
-
-#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
-#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
-#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
-
-#define FLT_MAX __FLT_MAX__
-#define DBL_MAX __DBL_MAX__
-#define LDBL_MAX __LDBL_MAX__
-
-#define FLT_EPSILON __FLT_EPSILON__
-#define DBL_EPSILON __DBL_EPSILON__
-#define LDBL_EPSILON __LDBL_EPSILON__
-
-#define FLT_MIN __FLT_MIN__
-#define DBL_MIN __DBL_MIN__
-#define LDBL_MIN __LDBL_MIN__
-
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
- !defined(__STRICT_ANSI__) || \
- (defined(__cplusplus) && __cplusplus >= 201703L) || \
- (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
-# define FLT_TRUE_MIN __FLT_DENORM_MIN__
-# define DBL_TRUE_MIN __DBL_DENORM_MIN__
-# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
-# define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
-# define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
-# define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
-# define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
-# define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
-# define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
+#include <__float_float.h>
+#undef __need_float_float
#endif
-#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
- !defined(__STRICT_ANSI__)
- /* C23 5.2.5.3.2p28 */
-# define FLT_SNAN (__builtin_nansf(""))
-# define DBL_SNAN (__builtin_nans(""))
-# define LDBL_SNAN (__builtin_nansl(""))
-
- /* C23 5.2.5.3.3p29-30 */
-# define INFINITY (__builtin_inff())
-# define NAN (__builtin_nanf(""))
-
- /* C23 5.2.5.3.3p32 */
-# define FLT_NORM_MAX __FLT_NORM_MAX__
-# define DBL_NORM_MAX __DBL_NORM_MAX__
-# define LDBL_NORM_MAX __LDBL_NORM_MAX__
+#ifdef __need_infinity_nan
+#include <__float_infinity_nan.h>
+#undef __need_infinity_nan
#endif
-#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
-# define FLT16_MANT_DIG __FLT16_MANT_DIG__
-# define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__
-# define FLT16_DIG __FLT16_DIG__
-# define FLT16_MIN_EXP __FLT16_MIN_EXP__
-# define FLT16_MIN_10_EXP __FLT16_MIN_10_EXP__
-# define FLT16_MAX_EXP __FLT16_MAX_EXP__
-# define FLT16_MAX_10_EXP __FLT16_MAX_10_EXP__
-# define FLT16_MAX __FLT16_MAX__
-# define FLT16_EPSILON __FLT16_EPSILON__
-# define FLT16_MIN __FLT16_MIN__
-# define FLT16_TRUE_MIN __FLT16_TRUE_MIN__
-#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */
-
#endif /* __MVS__ */
-#endif /* __CLANG_FLOAT_H */
diff --git a/clang/lib/Headers/module.modulemap b/clang/lib/Headers/module.modulemap
index bdf5119..2e4d533 100644
--- a/clang/lib/Headers/module.modulemap
+++ b/clang/lib/Headers/module.modulemap
@@ -171,8 +171,22 @@ module _Builtin_intrinsics [system] [extern_c] {
// that module. The system float.h (if present) will be treated
// as a textual header in the sytem module.
module _Builtin_float [system] {
- header "float.h"
- export *
+ textual header "float.h"
+
+ explicit module float {
+ header "__float_float.h"
+ export *
+ }
+
+ explicit module header_macro {
+ header "__float_header_macro.h"
+ export *
+ }
+
+ explicit module infinity_nan {
+ header "__float_infinity_nan.h"
+ export *
+ }
}
module _Builtin_inttypes [system] {
diff --git a/clang/lib/Headers/sifive_vector.h b/clang/lib/Headers/sifive_vector.h
index 4e67ad6..ae01627 100644
--- a/clang/lib/Headers/sifive_vector.h
+++ b/clang/lib/Headers/sifive_vector.h
@@ -115,4 +115,60 @@
#endif
#endif
+#define __riscv_sf_vsettnt_e8w1(atn) __riscv_sf_vsettnt(atn, 0, 1);
+#define __riscv_sf_vsettnt_e8w2(atn) __riscv_sf_vsettnt(atn, 0, 2);
+#define __riscv_sf_vsettnt_e8w4(atn) __riscv_sf_vsettnt(atn, 0, 3);
+#define __riscv_sf_vsettnt_e16w1(atn) __riscv_sf_vsettnt(atn, 1, 1);
+#define __riscv_sf_vsettnt_e16w2(atn) __riscv_sf_vsettnt(atn, 1, 2);
+#define __riscv_sf_vsettnt_e16w4(atn) __riscv_sf_vsettnt(atn, 1, 3);
+#define __riscv_sf_vsettnt_e32w1(atn) __riscv_sf_vsettnt(atn, 2, 1);
+#define __riscv_sf_vsettnt_e32w2(atn) __riscv_sf_vsettnt(atn, 2, 2);
+#define __riscv_sf_vsettm_e8w1(atm) __riscv_sf_vsettm(atm, 0, 1);
+#define __riscv_sf_vsettm_e8w2(atm) __riscv_sf_vsettm(atm, 0, 2);
+#define __riscv_sf_vsettm_e8w4(atm) __riscv_sf_vsettm(atm, 0, 3);
+#define __riscv_sf_vsettm_e16w1(atm) __riscv_sf_vsettm(atm, 1, 1);
+#define __riscv_sf_vsettm_e16w2(atm) __riscv_sf_vsettm(atm, 1, 2);
+#define __riscv_sf_vsettm_e16w4(atm) __riscv_sf_vsettm(atm, 1, 3);
+#define __riscv_sf_vsettm_e32w1(atm) __riscv_sf_vsettm(atm, 2, 1);
+#define __riscv_sf_vsettm_e32w2(atm) __riscv_sf_vsettm(atm, 2, 2);
+#define __riscv_sf_vsettn_e8w1(atn) __riscv_sf_vsettn(atn, 0, 1);
+#define __riscv_sf_vsettn_e8w2(atn) __riscv_sf_vsettn(atn, 0, 2);
+#define __riscv_sf_vsettn_e8w4(atn) __riscv_sf_vsettn(atn, 0, 3);
+#define __riscv_sf_vsettn_e16w1(atn) __riscv_sf_vsettn(atn, 1, 1);
+#define __riscv_sf_vsettn_e16w2(atn) __riscv_sf_vsettn(atn, 1, 2);
+#define __riscv_sf_vsettn_e16w4(atn) __riscv_sf_vsettn(atn, 1, 3);
+#define __riscv_sf_vsettn_e32w1(atn) __riscv_sf_vsettn(atn, 2, 1);
+#define __riscv_sf_vsettn_e32w2(atn) __riscv_sf_vsettn(atn, 2, 2);
+#define __riscv_sf_vsettk_e8w1(atk) __riscv_sf_vsettk(atk, 0, 1);
+#define __riscv_sf_vsettk_e8w2(atk) __riscv_sf_vsettk(atk, 0, 2);
+#define __riscv_sf_vsettk_e8w4(atk) __riscv_sf_vsettk(atk, 0, 3);
+#define __riscv_sf_vsettk_e16w1(atk) __riscv_sf_vsettk(atk, 1, 1);
+#define __riscv_sf_vsettk_e16w2(atk) __riscv_sf_vsettk(atk, 1, 2);
+#define __riscv_sf_vsettk_e16w4(atk) __riscv_sf_vsettk(atk, 1, 3);
+#define __riscv_sf_vsettk_e32w1(atk) __riscv_sf_vsettk(atk, 2, 1);
+#define __riscv_sf_vsettk_e32w2(atk) __riscv_sf_vsettk(atk, 2, 2);
+#define __riscv_sf_vtzero_t_e8w1(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 3, 1);
+#define __riscv_sf_vtzero_t_e8w2(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 3, 2);
+#define __riscv_sf_vtzero_t_e8w4(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 3, 4);
+#define __riscv_sf_vtzero_t_e16w1(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 4, 1);
+#define __riscv_sf_vtzero_t_e16w2(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 4, 2);
+#define __riscv_sf_vtzero_t_e16w4(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 4, 4);
+#define __riscv_sf_vtzero_t_e32w1(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 5, 1);
+#define __riscv_sf_vtzero_t_e32w2(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 5, 2);
+#if __riscv_v_elen >= 64
+#define __riscv_sf_vsettnt_e64w1(atn) __riscv_sf_vsettnt(atn, 3, 1);
+#define __riscv_sf_vsettm_e64w1(atm) __riscv_sf_vsettm(atm, 3, 1);
+#define __riscv_sf_vsettn_e64w1(atn) __riscv_sf_vsettn(atn, 3, 1);
+#define __riscv_sf_vsettk_e64w1(atk) __riscv_sf_vsettk(atk, 3, 1);
+#define __riscv_sf_vtzero_t_e64w1(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 6, 1);
+#endif
#endif //_SIFIVE_VECTOR_H_
diff --git a/clang/lib/Headers/tmmintrin.h b/clang/lib/Headers/tmmintrin.h
index 5d0f20f..cb4b36e 100644
--- a/clang/lib/Headers/tmmintrin.h
+++ b/clang/lib/Headers/tmmintrin.h
@@ -26,9 +26,6 @@
#define __zext128(x) \
(__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
1, 2, 3)
-#define __anyext128(x) \
- (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
- 1, -1, -1)
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
@@ -641,10 +638,9 @@ _mm_shuffle_pi8(__m64 __a, __m64 __b) {
/// A 128-bit integer vector containing control bytes corresponding to
/// positions in the destination.
/// \returns A 128-bit integer vector containing the resultant values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sign_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_sign_epi8(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
}
/// For each 16-bit integer in the first source operand, perform one of
@@ -667,10 +663,9 @@ _mm_sign_epi8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing control words corresponding to
/// positions in the destination.
/// \returns A 128-bit integer vector containing the resultant values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sign_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_sign_epi16(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
}
/// For each 32-bit integer in the first source operand, perform one of
@@ -693,10 +688,9 @@ _mm_sign_epi16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing control doublewords corresponding to
/// positions in the destination.
/// \returns A 128-bit integer vector containing the resultant values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sign_epi32(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_sign_epi32(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
}
/// For each 8-bit integer in the first source operand, perform one of
@@ -719,11 +713,10 @@ _mm_sign_epi32(__m128i __a, __m128i __b)
/// A 64-bit integer vector containing control bytes corresponding to
/// positions in the destination.
/// \returns A 64-bit integer vector containing the resultant values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_sign_pi8(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_psignb128((__v16qi)__anyext128(__a),
- (__v16qi)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sign_pi8(__m64 __a,
+ __m64 __b) {
+ return __trunc64(__builtin_ia32_psignb128((__v16qi)__zext128(__a),
+ (__v16qi)__zext128(__b)));
}
/// For each 16-bit integer in the first source operand, perform one of
@@ -746,11 +739,10 @@ _mm_sign_pi8(__m64 __a, __m64 __b)
/// A 64-bit integer vector containing control words corresponding to
/// positions in the destination.
/// \returns A 64-bit integer vector containing the resultant values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_sign_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_psignw128((__v8hi)__anyext128(__a),
- (__v8hi)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sign_pi16(__m64 __a,
+ __m64 __b) {
+ return __trunc64(
+ __builtin_ia32_psignw128((__v8hi)__zext128(__a), (__v8hi)__zext128(__b)));
}
/// For each 32-bit integer in the first source operand, perform one of
@@ -773,14 +765,12 @@ _mm_sign_pi16(__m64 __a, __m64 __b)
/// A 64-bit integer vector containing two control doublewords corresponding
/// to positions in the destination.
/// \returns A 64-bit integer vector containing the resultant values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_sign_pi32(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_psignd128((__v4si)__anyext128(__a),
- (__v4si)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sign_pi32(__m64 __a,
+ __m64 __b) {
+ return __trunc64(
+ __builtin_ia32_psignd128((__v4si)__zext128(__a), (__v4si)__zext128(__b)));
}
-#undef __anyext128
#undef __zext128
#undef __trunc64
#undef __DEFAULT_FN_ATTRS
diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp
index 238c5e2..65c324c 100644
--- a/clang/lib/Lex/HeaderSearch.cpp
+++ b/clang/lib/Lex/HeaderSearch.cpp
@@ -2186,6 +2186,8 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
void clang::normalizeModuleCachePath(FileManager &FileMgr, StringRef Path,
SmallVectorImpl<char> &NormalizedPath) {
NormalizedPath.assign(Path.begin(), Path.end());
- FileMgr.makeAbsolutePath(NormalizedPath);
- llvm::sys::path::remove_dots(NormalizedPath);
+ if (!NormalizedPath.empty()) {
+ FileMgr.makeAbsolutePath(NormalizedPath);
+ llvm::sys::path::remove_dots(NormalizedPath);
+ }
}
diff --git a/clang/lib/Parse/ParseHLSL.cpp b/clang/lib/Parse/ParseHLSL.cpp
index 51f2aef..c727ee3 100644
--- a/clang/lib/Parse/ParseHLSL.cpp
+++ b/clang/lib/Parse/ParseHLSL.cpp
@@ -126,15 +126,9 @@ Parser::ParsedSemantic Parser::ParseHLSLSemantic() {
// semantic index. The semantic index is the number at the end of
// the semantic, including leading zeroes. Digits located before
// the last letter are part of the semantic name.
- bool Invalid = false;
SmallString<256> Buffer;
Buffer.resize(Tok.getLength() + 1);
StringRef Identifier = PP.getSpelling(Tok, Buffer);
- if (Invalid) {
- Diag(Tok.getLocation(), diag::err_expected_semantic_identifier);
- return {};
- }
-
assert(Identifier.size() > 0);
// Determine the start of the semantic index.
unsigned IndexIndex = Identifier.find_last_not_of("0123456789") + 1;
diff --git a/clang/lib/Sema/CheckExprLifetime.cpp b/clang/lib/Sema/CheckExprLifetime.cpp
index e797400..f9665b5 100644
--- a/clang/lib/Sema/CheckExprLifetime.cpp
+++ b/clang/lib/Sema/CheckExprLifetime.cpp
@@ -155,6 +155,7 @@ getEntityLifetime(const InitializedEntity *Entity,
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
case InitializedEntity::EK_LambdaCapture:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
return {nullptr, LK_FullExpression};
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 2990fd6..f451787 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -1498,6 +1498,24 @@ static void builtinAllocaAddrSpace(Sema &S, CallExpr *TheCall) {
TheCall->setType(S.Context.getPointerType(RT));
}
+static bool checkBuiltinInferAllocToken(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCountAtLeast(TheCall, 1))
+ return true;
+
+ for (Expr *Arg : TheCall->arguments()) {
+ // If argument is dependent on a template parameter, we can't resolve now.
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ continue;
+ // Reject void types.
+ QualType ArgTy = Arg->IgnoreParenImpCasts()->getType();
+ if (ArgTy->isVoidType())
+ return S.Diag(Arg->getBeginLoc(), diag::err_param_with_void_type);
+ }
+
+ TheCall->setType(S.Context.UnsignedLongLongTy);
+ return false;
+}
+
namespace {
enum PointerAuthOpKind {
PAO_Strip,
@@ -2779,6 +2797,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
builtinAllocaAddrSpace(*this, TheCall);
}
break;
+ case Builtin::BI__builtin_infer_alloc_token:
+ if (checkBuiltinInferAllocToken(*this, TheCall))
+ return ExprError();
+ break;
case Builtin::BI__arithmetic_fence:
if (BuiltinArithmeticFence(TheCall))
return ExprError();
@@ -12351,14 +12373,9 @@ static void DiagnoseMixedUnicodeImplicitConversion(Sema &S, const Type *Source,
}
}
-enum CFIUncheckedCalleeChange {
- None,
- Adding,
- Discarding,
-};
-
-static CFIUncheckedCalleeChange AdjustingCFIUncheckedCallee(QualType From,
- QualType To) {
+bool Sema::DiscardingCFIUncheckedCallee(QualType From, QualType To) const {
+ From = Context.getCanonicalType(From);
+ To = Context.getCanonicalType(To);
QualType MaybePointee = From->getPointeeType();
if (!MaybePointee.isNull() && MaybePointee->getAs<FunctionType>())
From = MaybePointee;
@@ -12370,25 +12387,10 @@ static CFIUncheckedCalleeChange AdjustingCFIUncheckedCallee(QualType From,
if (const auto *ToFn = To->getAs<FunctionType>()) {
if (FromFn->getCFIUncheckedCalleeAttr() &&
!ToFn->getCFIUncheckedCalleeAttr())
- return Discarding;
- if (!FromFn->getCFIUncheckedCalleeAttr() &&
- ToFn->getCFIUncheckedCalleeAttr())
- return Adding;
+ return true;
}
}
- return None;
-}
-
-bool Sema::DiscardingCFIUncheckedCallee(QualType From, QualType To) const {
- From = Context.getCanonicalType(From);
- To = Context.getCanonicalType(To);
- return ::AdjustingCFIUncheckedCallee(From, To) == Discarding;
-}
-
-bool Sema::AddingCFIUncheckedCallee(QualType From, QualType To) const {
- From = Context.getCanonicalType(From);
- To = Context.getCanonicalType(To);
- return ::AdjustingCFIUncheckedCallee(From, To) == Adding;
+ return false;
}
void Sema::CheckImplicitConversion(Expr *E, QualType T, SourceLocation CC,
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 5b3e89f..96d5142 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/HLSLResource.h"
#include "clang/AST/Type.h"
+#include "clang/AST/TypeBase.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DiagnosticSema.h"
@@ -770,23 +771,81 @@ void SemaHLSL::ActOnTopLevelFunction(FunctionDecl *FD) {
}
}
-bool SemaHLSL::isSemanticValid(FunctionDecl *FD, DeclaratorDecl *D) {
- const auto *AnnotationAttr = D->getAttr<HLSLAnnotationAttr>();
- if (AnnotationAttr) {
- CheckSemanticAnnotation(FD, D, AnnotationAttr);
- return true;
+HLSLSemanticAttr *SemaHLSL::createSemantic(const SemanticInfo &Info,
+ DeclaratorDecl *TargetDecl) {
+ std::string SemanticName = Info.Semantic->getAttrName()->getName().upper();
+
+ if (SemanticName == "SV_DISPATCHTHREADID") {
+ return createSemanticAttr<HLSLSV_DispatchThreadIDAttr>(
+ *Info.Semantic, TargetDecl, Info.Index);
+ } else if (SemanticName == "SV_GROUPINDEX") {
+ return createSemanticAttr<HLSLSV_GroupIndexAttr>(*Info.Semantic, TargetDecl,
+ Info.Index);
+ } else if (SemanticName == "SV_GROUPTHREADID") {
+ return createSemanticAttr<HLSLSV_GroupThreadIDAttr>(*Info.Semantic,
+ TargetDecl, Info.Index);
+ } else if (SemanticName == "SV_GROUPID") {
+ return createSemanticAttr<HLSLSV_GroupIDAttr>(*Info.Semantic, TargetDecl,
+ Info.Index);
+ } else if (SemanticName == "SV_POSITION") {
+ return createSemanticAttr<HLSLSV_PositionAttr>(*Info.Semantic, TargetDecl,
+ Info.Index);
+ } else
+ Diag(Info.Semantic->getLoc(), diag::err_hlsl_unknown_semantic)
+ << *Info.Semantic;
+
+ return nullptr;
+}
+
+bool SemaHLSL::determineActiveSemanticOnScalar(FunctionDecl *FD,
+ DeclaratorDecl *D,
+ SemanticInfo &ActiveSemantic) {
+ if (ActiveSemantic.Semantic == nullptr) {
+ ActiveSemantic.Semantic = D->getAttr<HLSLSemanticAttr>();
+ if (ActiveSemantic.Semantic &&
+ ActiveSemantic.Semantic->isSemanticIndexExplicit())
+ ActiveSemantic.Index = ActiveSemantic.Semantic->getSemanticIndex();
+ }
+
+ if (!ActiveSemantic.Semantic) {
+ Diag(D->getLocation(), diag::err_hlsl_missing_semantic_annotation);
+ return false;
+ }
+
+ auto *A = createSemantic(ActiveSemantic, D);
+ if (!A)
+ return false;
+
+ checkSemanticAnnotation(FD, D, A);
+ FD->addAttr(A);
+ return true;
+}
+
+bool SemaHLSL::determineActiveSemantic(FunctionDecl *FD, DeclaratorDecl *D,
+ SemanticInfo &ActiveSemantic) {
+ if (ActiveSemantic.Semantic == nullptr) {
+ ActiveSemantic.Semantic = D->getAttr<HLSLSemanticAttr>();
+ if (ActiveSemantic.Semantic &&
+ ActiveSemantic.Semantic->isSemanticIndexExplicit())
+ ActiveSemantic.Index = ActiveSemantic.Semantic->getSemanticIndex();
}
const Type *T = D->getType()->getUnqualifiedDesugaredType();
const RecordType *RT = dyn_cast<RecordType>(T);
if (!RT)
- return false;
+ return determineActiveSemanticOnScalar(FD, D, ActiveSemantic);
const RecordDecl *RD = RT->getDecl();
for (FieldDecl *Field : RD->fields()) {
- if (!isSemanticValid(FD, Field))
+ SemanticInfo Info = ActiveSemantic;
+ if (!determineActiveSemantic(FD, Field, Info)) {
+ Diag(Field->getLocation(), diag::note_hlsl_semantic_used_here) << Field;
return false;
+ }
+ if (ActiveSemantic.Semantic)
+ ActiveSemantic = Info;
}
+
return true;
}
@@ -853,8 +912,11 @@ void SemaHLSL::CheckEntryPoint(FunctionDecl *FD) {
}
for (ParmVarDecl *Param : FD->parameters()) {
- if (!isSemanticValid(FD, Param)) {
- Diag(FD->getLocation(), diag::err_hlsl_missing_semantic_annotation);
+ SemanticInfo ActiveSemantic;
+ ActiveSemantic.Semantic = nullptr;
+ ActiveSemantic.Index = std::nullopt;
+
+ if (!determineActiveSemantic(FD, Param, ActiveSemantic)) {
Diag(Param->getLocation(), diag::note_previous_decl) << Param;
FD->setInvalidDecl();
}
@@ -862,31 +924,31 @@ void SemaHLSL::CheckEntryPoint(FunctionDecl *FD) {
// FIXME: Verify return type semantic annotation.
}
-void SemaHLSL::CheckSemanticAnnotation(
- FunctionDecl *EntryPoint, const Decl *Param,
- const HLSLAnnotationAttr *AnnotationAttr) {
+void SemaHLSL::checkSemanticAnnotation(FunctionDecl *EntryPoint,
+ const Decl *Param,
+ const HLSLSemanticAttr *SemanticAttr) {
auto *ShaderAttr = EntryPoint->getAttr<HLSLShaderAttr>();
assert(ShaderAttr && "Entry point has no shader attribute");
llvm::Triple::EnvironmentType ST = ShaderAttr->getType();
- switch (AnnotationAttr->getKind()) {
+ switch (SemanticAttr->getKind()) {
case attr::HLSLSV_DispatchThreadID:
case attr::HLSLSV_GroupIndex:
case attr::HLSLSV_GroupThreadID:
case attr::HLSLSV_GroupID:
if (ST == llvm::Triple::Compute)
return;
- DiagnoseAttrStageMismatch(AnnotationAttr, ST, {llvm::Triple::Compute});
+ DiagnoseAttrStageMismatch(SemanticAttr, ST, {llvm::Triple::Compute});
break;
case attr::HLSLSV_Position:
// TODO(#143523): allow use on other shader types & output once the overall
// semantic logic is implemented.
if (ST == llvm::Triple::Pixel)
return;
- DiagnoseAttrStageMismatch(AnnotationAttr, ST, {llvm::Triple::Pixel});
+ DiagnoseAttrStageMismatch(SemanticAttr, ST, {llvm::Triple::Pixel});
break;
default:
- llvm_unreachable("Unknown HLSLAnnotationAttr");
+ llvm_unreachable("Unknown SemanticAttr");
}
}
@@ -1661,28 +1723,30 @@ void SemaHLSL::diagnoseSystemSemanticAttr(Decl *D, const ParsedAttr &AL,
diagnoseInputIDType(ValueType, AL);
if (IsOutput)
Diag(AL.getLoc(), diag::err_hlsl_semantic_output_not_supported) << AL;
- Attribute = createSemanticAttr<HLSLSV_DispatchThreadIDAttr>(AL, Index);
+ Attribute =
+ createSemanticAttr<HLSLSV_DispatchThreadIDAttr>(AL, nullptr, Index);
} else if (SemanticName == "SV_GROUPINDEX") {
if (IsOutput)
Diag(AL.getLoc(), diag::err_hlsl_semantic_output_not_supported) << AL;
- Attribute = createSemanticAttr<HLSLSV_GroupIndexAttr>(AL, Index);
+ Attribute = createSemanticAttr<HLSLSV_GroupIndexAttr>(AL, nullptr, Index);
} else if (SemanticName == "SV_GROUPTHREADID") {
diagnoseInputIDType(ValueType, AL);
if (IsOutput)
Diag(AL.getLoc(), diag::err_hlsl_semantic_output_not_supported) << AL;
- Attribute = createSemanticAttr<HLSLSV_GroupThreadIDAttr>(AL, Index);
+ Attribute =
+ createSemanticAttr<HLSLSV_GroupThreadIDAttr>(AL, nullptr, Index);
} else if (SemanticName == "SV_GROUPID") {
diagnoseInputIDType(ValueType, AL);
if (IsOutput)
Diag(AL.getLoc(), diag::err_hlsl_semantic_output_not_supported) << AL;
- Attribute = createSemanticAttr<HLSLSV_GroupIDAttr>(AL, Index);
+ Attribute = createSemanticAttr<HLSLSV_GroupIDAttr>(AL, nullptr, Index);
} else if (SemanticName == "SV_POSITION") {
const auto *VT = ValueType->getAs<VectorType>();
if (!ValueType->hasFloatingRepresentation() ||
(VT && VT->getNumElements() > 4))
Diag(AL.getLoc(), diag::err_hlsl_attr_invalid_type)
<< AL << "float/float1/float2/float3/float4";
- Attribute = createSemanticAttr<HLSLSV_PositionAttr>(AL, Index);
+ Attribute = createSemanticAttr<HLSLSV_PositionAttr>(AL, nullptr, Index);
} else
Diag(AL.getLoc(), diag::err_hlsl_unknown_semantic) << AL;
@@ -3369,6 +3433,11 @@ static void BuildFlattenedTypeList(QualType BaseTy,
List.insert(List.end(), VT->getNumElements(), VT->getElementType());
continue;
}
+ if (const auto *MT = dyn_cast<ConstantMatrixType>(T)) {
+ List.insert(List.end(), MT->getNumElementsFlattened(),
+ MT->getElementType());
+ continue;
+ }
if (const auto *RD = T->getAsCXXRecordDecl()) {
if (RD->isStandardLayout())
RD = RD->getStandardLayoutBaseWithFields();
@@ -4167,6 +4236,32 @@ class InitListTransformer {
}
return true;
}
+ if (auto *MTy = Ty->getAs<ConstantMatrixType>()) {
+ unsigned Rows = MTy->getNumRows();
+ unsigned Cols = MTy->getNumColumns();
+ QualType ElemTy = MTy->getElementType();
+
+ for (unsigned C = 0; C < Cols; ++C) {
+ for (unsigned R = 0; R < Rows; ++R) {
+ // row index literal
+ Expr *RowIdx = IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getIntWidth(Ctx.IntTy), R), Ctx.IntTy,
+ E->getBeginLoc());
+ // column index literal
+ Expr *ColIdx = IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getIntWidth(Ctx.IntTy), C), Ctx.IntTy,
+ E->getBeginLoc());
+ ExprResult ElExpr = S.CreateBuiltinMatrixSubscriptExpr(
+ E, RowIdx, ColIdx, E->getEndLoc());
+ if (ElExpr.isInvalid())
+ return false;
+ if (!castInitializer(ElExpr.get()))
+ return false;
+ ElExpr.get()->setType(ElemTy);
+ }
+ }
+ return true;
+ }
if (auto *ArrTy = dyn_cast<ConstantArrayType>(Ty.getTypePtr())) {
uint64_t Size = ArrTy->getZExtSize();
@@ -4220,14 +4315,17 @@ class InitListTransformer {
return *(ArgIt++);
llvm::SmallVector<Expr *> Inits;
- assert(!isa<MatrixType>(Ty) && "Matrix types not yet supported in HLSL");
Ty = Ty.getDesugaredType(Ctx);
- if (Ty->isVectorType() || Ty->isConstantArrayType()) {
+ if (Ty->isVectorType() || Ty->isConstantArrayType() ||
+ Ty->isConstantMatrixType()) {
QualType ElTy;
uint64_t Size = 0;
if (auto *ATy = Ty->getAs<VectorType>()) {
ElTy = ATy->getElementType();
Size = ATy->getNumElements();
+ } else if (auto *CMTy = Ty->getAs<ConstantMatrixType>()) {
+ ElTy = CMTy->getElementType();
+ Size = CMTy->getNumElementsFlattened();
} else {
auto *VTy = cast<ConstantArrayType>(Ty.getTypePtr());
ElTy = VTy->getElementType();
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index 7debe33..073010d 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/IgnoreExpr.h"
+#include "clang/AST/TypeBase.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
@@ -403,6 +404,9 @@ class InitListChecker {
unsigned &Index,
InitListExpr *StructuredList,
unsigned &StructuredIndex);
+ void CheckMatrixType(const InitializedEntity &Entity, InitListExpr *IList,
+ QualType DeclType, unsigned &Index,
+ InitListExpr *StructuredList, unsigned &StructuredIndex);
void CheckVectorType(const InitializedEntity &Entity,
InitListExpr *IList, QualType DeclType, unsigned &Index,
InitListExpr *StructuredList,
@@ -1004,7 +1008,8 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
return;
if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement ||
- ElementEntity.getKind() == InitializedEntity::EK_VectorElement)
+ ElementEntity.getKind() == InitializedEntity::EK_VectorElement ||
+ ElementEntity.getKind() == InitializedEntity::EK_MatrixElement)
ElementEntity.setElementIndex(Init);
if (Init >= NumInits && (ILE->hasArrayFiller() || SkipEmptyInitChecks))
@@ -1274,6 +1279,7 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
switch (Entity.getKind()) {
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_Parameter:
@@ -1373,11 +1379,12 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
SemaRef.Diag(IList->getInit(Index)->getBeginLoc(), DK)
<< T << IList->getInit(Index)->getSourceRange();
} else {
- int initKind = T->isArrayType() ? 0 :
- T->isVectorType() ? 1 :
- T->isScalarType() ? 2 :
- T->isUnionType() ? 3 :
- 4;
+ int initKind = T->isArrayType() ? 0
+ : T->isVectorType() ? 1
+ : T->isMatrixType() ? 2
+ : T->isScalarType() ? 3
+ : T->isUnionType() ? 4
+ : 5;
unsigned DK = ExtraInitsIsError ? diag::err_excess_initializers
: diag::ext_excess_initializers;
@@ -1431,6 +1438,9 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
} else if (DeclType->isVectorType()) {
CheckVectorType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
+ } else if (DeclType->isMatrixType()) {
+ CheckMatrixType(Entity, IList, DeclType, Index, StructuredList,
+ StructuredIndex);
} else if (const RecordDecl *RD = DeclType->getAsRecordDecl()) {
auto Bases =
CXXRecordDecl::base_class_const_range(CXXRecordDecl::base_class_const_iterator(),
@@ -1878,6 +1888,37 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
AggrDeductionCandidateParamTypes->push_back(DeclType);
}
+void InitListChecker::CheckMatrixType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ if (!SemaRef.getLangOpts().HLSL)
+ return;
+
+ const ConstantMatrixType *MT = DeclType->castAs<ConstantMatrixType>();
+ QualType ElemTy = MT->getElementType();
+ const unsigned MaxElts = MT->getNumElementsFlattened();
+
+ unsigned NumEltsInit = 0;
+ InitializedEntity ElemEnt =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ while (NumEltsInit < MaxElts && Index < IList->getNumInits()) {
+ // Not a sublist: just consume directly.
+ ElemEnt.setElementIndex(Index);
+ CheckSubElementType(ElemEnt, IList, ElemTy, Index, StructuredList,
+ StructuredIndex);
+ ++NumEltsInit;
+ }
+
+ // For HLSL The error for this case is handled in SemaHLSL's initializer
+ // list diagnostics, That means the execution should require NumEltsInit
+ // to equal Max initializers. In other words execution should never
+ // reach this point if this condition is not true".
+ assert(NumEltsInit == MaxElts && "NumEltsInit must equal MaxElts");
+}
+
void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
InitListExpr *IList, QualType DeclType,
unsigned &Index,
@@ -3640,6 +3681,9 @@ InitializedEntity::InitializedEntity(ASTContext &Context, unsigned Index,
} else if (const VectorType *VT = Parent.getType()->getAs<VectorType>()) {
Kind = EK_VectorElement;
Type = VT->getElementType();
+ } else if (const MatrixType *MT = Parent.getType()->getAs<MatrixType>()) {
+ Kind = EK_MatrixElement;
+ Type = MT->getElementType();
} else {
const ComplexType *CT = Parent.getType()->getAs<ComplexType>();
assert(CT && "Unexpected type");
@@ -3688,6 +3732,7 @@ DeclarationName InitializedEntity::getName() const {
case EK_Delegating:
case EK_ArrayElement:
case EK_VectorElement:
+ case EK_MatrixElement:
case EK_ComplexElement:
case EK_BlockElement:
case EK_LambdaToBlockConversionBlockElement:
@@ -3721,6 +3766,7 @@ ValueDecl *InitializedEntity::getDecl() const {
case EK_Delegating:
case EK_ArrayElement:
case EK_VectorElement:
+ case EK_MatrixElement:
case EK_ComplexElement:
case EK_BlockElement:
case EK_LambdaToBlockConversionBlockElement:
@@ -3754,6 +3800,7 @@ bool InitializedEntity::allowsNRVO() const {
case EK_Delegating:
case EK_ArrayElement:
case EK_VectorElement:
+ case EK_MatrixElement:
case EK_ComplexElement:
case EK_BlockElement:
case EK_LambdaToBlockConversionBlockElement:
@@ -3793,6 +3840,9 @@ unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
case EK_Delegating: OS << "Delegating"; break;
case EK_ArrayElement: OS << "ArrayElement " << Index; break;
case EK_VectorElement: OS << "VectorElement " << Index; break;
+ case EK_MatrixElement:
+ OS << "MatrixElement " << Index;
+ break;
case EK_ComplexElement: OS << "ComplexElement " << Index; break;
case EK_BlockElement: OS << "Block"; break;
case EK_LambdaToBlockConversionBlockElement:
@@ -6030,7 +6080,7 @@ static void TryOrBuildParenListInitialization(
Sequence.SetFailed(InitializationSequence::FK_ParenthesizedListInitFailed);
if (!VerifyOnly) {
QualType T = Entity.getType();
- int InitKind = T->isArrayType() ? 0 : T->isUnionType() ? 3 : 4;
+ int InitKind = T->isArrayType() ? 0 : T->isUnionType() ? 4 : 5;
SourceRange ExcessInitSR(Args[EntityIndexToProcess]->getBeginLoc(),
Args.back()->getEndLoc());
S.Diag(Kind.getLocation(), diag::err_excess_initializers)
@@ -6823,7 +6873,8 @@ void InitializationSequence::InitializeFrom(Sema &S,
// For HLSL ext vector types we allow list initialization behavior for C++
// functional cast expressions which look like constructor syntax. This is
// accomplished by converting initialization arguments to InitListExpr.
- if (S.getLangOpts().HLSL && Args.size() > 1 && DestType->isExtVectorType() &&
+ if (S.getLangOpts().HLSL && Args.size() > 1 &&
+ (DestType->isExtVectorType() || DestType->isConstantMatrixType()) &&
(SourceType.isNull() ||
!Context.hasSameUnqualifiedType(SourceType, DestType))) {
InitListExpr *ILE = new (Context)
@@ -6988,6 +7039,7 @@ static AssignmentAction getAssignmentAction(const InitializedEntity &Entity,
case InitializedEntity::EK_Binding:
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_BlockElement:
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
@@ -7013,6 +7065,7 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_Base:
case InitializedEntity::EK_Delegating:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_Exception:
case InitializedEntity::EK_BlockElement:
@@ -7043,6 +7096,7 @@ static bool shouldDestroyEntity(const InitializedEntity &Entity) {
case InitializedEntity::EK_Base:
case InitializedEntity::EK_Delegating:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_BlockElement:
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
@@ -7096,6 +7150,7 @@ static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
case InitializedEntity::EK_Base:
case InitializedEntity::EK_Delegating:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_BlockElement:
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
@@ -7845,11 +7900,13 @@ ExprResult InitializationSequence::Perform(Sema &S,
ExprResult CurInit((Expr *)nullptr);
SmallVector<Expr*, 4> ArrayLoopCommonExprs;
- // HLSL allows vector initialization to function like list initialization, but
- // use the syntax of a C++-like constructor.
- bool IsHLSLVectorInit = S.getLangOpts().HLSL && DestType->isExtVectorType() &&
- isa<InitListExpr>(Args[0]);
- (void)IsHLSLVectorInit;
+ // HLSL allows vector/matrix initialization to function like list
+ // initialization, but use the syntax of a C++-like constructor.
+ bool IsHLSLVectorOrMatrixInit =
+ S.getLangOpts().HLSL &&
+ (DestType->isExtVectorType() || DestType->isConstantMatrixType()) &&
+ isa<InitListExpr>(Args[0]);
+ (void)IsHLSLVectorOrMatrixInit;
// For initialization steps that start with a single initializer,
// grab the only argument out the Args and place it into the "current"
@@ -7888,7 +7945,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_StdInitializerList:
case SK_OCLSamplerInit:
case SK_OCLZeroOpaqueType: {
- assert(Args.size() == 1 || IsHLSLVectorInit);
+ assert(Args.size() == 1 || IsHLSLVectorOrMatrixInit);
CurInit = Args[0];
if (!CurInit.get()) return ExprError();
break;
@@ -9105,7 +9162,7 @@ bool InitializationSequence::Diagnose(Sema &S,
<< R;
else
S.Diag(Kind.getLocation(), diag::err_excess_initializers)
- << /*scalar=*/2 << R;
+ << /*scalar=*/3 << R;
break;
}
diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp
index 1880cec..67c554c 100644
--- a/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/clang/lib/Sema/SemaObjCProperty.cpp
@@ -1041,7 +1041,7 @@ RedeclarePropertyAccessor(ASTContext &Context, ObjCImplementationDecl *Impl,
Decl->getSelector(), Decl->getReturnType(),
Decl->getReturnTypeSourceInfo(), Impl, Decl->isInstanceMethod(),
Decl->isVariadic(), Decl->isPropertyAccessor(),
- /* isSynthesized*/ true, Decl->isImplicit(), Decl->isDefined(),
+ /*isSynthesizedAccessorStub=*/true, Decl->isImplicit(), Decl->isDefined(),
Decl->getImplementationControl(), Decl->hasRelatedResultType());
ImplDecl->getMethodFamily();
if (Decl->hasAttrs())
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index ee9b2b3..f0f3832 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -3086,6 +3086,7 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
case OpenACCReductionOperator::Invalid:
llvm_unreachable("Invalid should have been caught above");
}
+ llvm_unreachable("Unhandled case");
};
auto tryCombiner = [&, this](DeclRefExpr *LHSDRE, DeclRefExpr *RHSDRE,
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 5b5b1b6..6d5cb0f 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -7246,7 +7246,9 @@ void SemaOpenMP::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *UDecl = nullptr;
if (IsTemplated && isa<FunctionTemplateDecl>(CandidateDecl)) {
auto *FTD = cast<FunctionTemplateDecl>(CandidateDecl);
- if (FTD->getTemplateParameters()->size() == TemplateParamLists.size())
+ // FIXME: Should this compare the template parameter lists on all levels?
+ if (SemaRef.Context.isSameTemplateParameterList(
+ FTD->getTemplateParameters(), TemplateParamLists.back()))
UDecl = FTD->getTemplatedDecl();
} else if (!IsTemplated)
UDecl = dyn_cast<FunctionDecl>(CandidateDecl);
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 1f25111..37f3511 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -2532,15 +2532,12 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
SCS.setToType(2, FromType);
- // If we have not converted the argument type to the parameter type,
- // this is a bad conversion sequence, unless we're resolving an overload in C.
- //
- // Permit conversions from a function without `cfi_unchecked_callee` to a
- // function with `cfi_unchecked_callee`.
- if (CanonFrom == CanonTo || S.AddingCFIUncheckedCallee(CanonFrom, CanonTo))
+ if (CanonFrom == CanonTo)
return true;
- if ((S.getLangOpts().CPlusPlus || !InOverloadResolution))
+ // If we have not converted the argument type to the parameter type,
+ // this is a bad conversion sequence, unless we're resolving an overload in C.
+ if (S.getLangOpts().CPlusPlus || !InOverloadResolution)
return false;
ExprResult ER = ExprResult{From};
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index b5f91a3..75dba80 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -664,6 +664,80 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
return CheckVSetVL(1, 2);
case RISCVVector::BI__builtin_rvv_vsetvlimax:
return CheckVSetVL(0, 1);
+ case RISCVVector::BI__builtin_rvv_sf_vsettnt:
+ case RISCVVector::BI__builtin_rvv_sf_vsettm:
+ case RISCVVector::BI__builtin_rvv_sf_vsettn:
+ case RISCVVector::BI__builtin_rvv_sf_vsettk:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 1, 3);
+ case RISCVVector::BI__builtin_rvv_sf_mm_f_f_w1:
+ case RISCVVector::BI__builtin_rvv_sf_mm_f_f_w2:
+ case RISCVVector::BI__builtin_rvv_sf_mm_e5m2_e4m3_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_e5m2_e5m2_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_e4m3_e4m3_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_e4m3_e5m2_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_u_u_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_u_s_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_s_u_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_s_s_w4: {
+ QualType Arg1Type = TheCall->getArg(1)->getType();
+ ASTContext::BuiltinVectorTypeInfo Info =
+ SemaRef.Context.getBuiltinVectorTypeInfo(
+ Arg1Type->castAs<BuiltinType>());
+ unsigned EltSize = SemaRef.Context.getTypeSize(Info.ElementType);
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(0);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaRef.BuiltinConstantArg(TheCall, 0, Result))
+ return true;
+
+ // For TEW = 32, mtd can only be 0, 4, 8, 12.
+ // For TEW = 64, mtd can only be 0, 2, 4, 6, 8, 10, 12, 14.
+ // Only `sf_mm_f_f_w1` and `sf_mm_f_f_w2` might have TEW = 64.
+ if ((BuiltinID == RISCVVector::BI__builtin_rvv_sf_mm_f_f_w1 &&
+ EltSize == 64) ||
+ (BuiltinID == RISCVVector::BI__builtin_rvv_sf_mm_f_f_w2 &&
+ EltSize == 32))
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15) ||
+ SemaRef.BuiltinConstantArgMultiple(TheCall, 0, 2);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15) ||
+ SemaRef.BuiltinConstantArgMultiple(TheCall, 0, 4);
+ }
+ case RISCVVector::BI__builtin_rvv_sf_vtzero_t: {
+ llvm::APSInt Log2SEWResult;
+ llvm::APSInt TWidenResult;
+ if (SemaRef.BuiltinConstantArg(TheCall, 3, Log2SEWResult) ||
+ SemaRef.BuiltinConstantArg(TheCall, 4, TWidenResult))
+ return true;
+
+ int Log2SEW = Log2SEWResult.getSExtValue();
+ int TWiden = TWidenResult.getSExtValue();
+
+ // 3 <= LogSEW <= 6
+ if (SemaRef.BuiltinConstantArgRange(TheCall, 3, 3, 6))
+ return true;
+
+ // TWiden
+ if (TWiden != 1 && TWiden != 2 && TWiden != 4)
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_invalid_twiden);
+
+ int TEW = (1 << Log2SEW) * TWiden;
+
+ // For TEW = 8, mtd can be 0~15.
+ // For TEW = 16 or 64, mtd can only be 0, 2, 4, 6, 8, 10, 12, 14.
+ // For TEW = 32, mtd can only be 0, 4, 8, 12.
+ if (SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15))
+ return true;
+ if (TEW == 16 || TEW == 64)
+ return SemaRef.BuiltinConstantArgMultiple(TheCall, 0, 2);
+ return SemaRef.BuiltinConstantArgMultiple(TheCall, 0, 4);
+ }
case RISCVVector::BI__builtin_rvv_vget_v: {
ASTContext::BuiltinVectorTypeInfo ResVecInfo =
Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefLambdaCapturesChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefLambdaCapturesChecker.cpp
index 033eb8c..f60d193 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefLambdaCapturesChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefLambdaCapturesChecker.cpp
@@ -50,7 +50,9 @@ public:
llvm::DenseSet<const DeclRefExpr *> DeclRefExprsToIgnore;
llvm::DenseSet<const LambdaExpr *> LambdasToIgnore;
llvm::DenseSet<const ValueDecl *> ProtectedThisDecls;
+ llvm::DenseSet<const CallExpr *> CallToIgnore;
llvm::DenseSet<const CXXConstructExpr *> ConstructToIgnore;
+ llvm::DenseMap<const VarDecl *, const LambdaExpr *> LambdaOwnerMap;
QualType ClsType;
@@ -101,10 +103,60 @@ public:
auto *Init = VD->getInit();
if (!Init)
return true;
- auto *L = dyn_cast_or_null<LambdaExpr>(Init->IgnoreParenCasts());
- if (!L)
+ if (auto *L = dyn_cast_or_null<LambdaExpr>(Init->IgnoreParenCasts())) {
+ LambdasToIgnore.insert(L); // Evaluate lambdas in VisitDeclRefExpr.
+ return true;
+ }
+ if (!VD->hasLocalStorage())
return true;
- LambdasToIgnore.insert(L); // Evaluate lambdas in VisitDeclRefExpr.
+ if (auto *E = dyn_cast<ExprWithCleanups>(Init))
+ Init = E->getSubExpr();
+ if (auto *E = dyn_cast<CXXBindTemporaryExpr>(Init))
+ Init = E->getSubExpr();
+ if (auto *CE = dyn_cast<CallExpr>(Init)) {
+ if (auto *Callee = CE->getDirectCallee()) {
+ auto FnName = safeGetName(Callee);
+ unsigned ArgCnt = CE->getNumArgs();
+ if (FnName == "makeScopeExit" && ArgCnt == 1) {
+ auto *Arg = CE->getArg(0);
+ if (auto *E = dyn_cast<MaterializeTemporaryExpr>(Arg))
+ Arg = E->getSubExpr();
+ if (auto *L = dyn_cast<LambdaExpr>(Arg)) {
+ LambdaOwnerMap.insert(std::make_pair(VD, L));
+ CallToIgnore.insert(CE);
+ LambdasToIgnore.insert(L);
+ }
+ } else if (FnName == "makeVisitor") {
+ for (unsigned ArgIndex = 0; ArgIndex < ArgCnt; ++ArgIndex) {
+ auto *Arg = CE->getArg(ArgIndex);
+ if (auto *E = dyn_cast<MaterializeTemporaryExpr>(Arg))
+ Arg = E->getSubExpr();
+ if (auto *L = dyn_cast<LambdaExpr>(Arg)) {
+ LambdaOwnerMap.insert(std::make_pair(VD, L));
+ CallToIgnore.insert(CE);
+ LambdasToIgnore.insert(L);
+ }
+ }
+ }
+ }
+ } else if (auto *CE = dyn_cast<CXXConstructExpr>(Init)) {
+ if (auto *Ctor = CE->getConstructor()) {
+ if (auto *Cls = Ctor->getParent()) {
+ auto FnName = safeGetName(Cls);
+ unsigned ArgCnt = CE->getNumArgs();
+ if (FnName == "ScopeExit" && ArgCnt == 1) {
+ auto *Arg = CE->getArg(0);
+ if (auto *E = dyn_cast<MaterializeTemporaryExpr>(Arg))
+ Arg = E->getSubExpr();
+ if (auto *L = dyn_cast<LambdaExpr>(Arg)) {
+ LambdaOwnerMap.insert(std::make_pair(VD, L));
+ ConstructToIgnore.insert(CE);
+ LambdasToIgnore.insert(L);
+ }
+ }
+ }
+ }
+ }
return true;
}
@@ -114,6 +166,12 @@ public:
auto *VD = dyn_cast_or_null<VarDecl>(DRE->getDecl());
if (!VD)
return true;
+ if (auto It = LambdaOwnerMap.find(VD); It != LambdaOwnerMap.end()) {
+ auto *L = It->second;
+ Checker->visitLambdaExpr(L, shouldCheckThis() && !hasProtectedThis(L),
+ ClsType);
+ return true;
+ }
auto *Init = VD->getInit();
if (!Init)
return true;
@@ -167,10 +225,14 @@ public:
}
bool VisitCallExpr(CallExpr *CE) override {
+ if (CallToIgnore.contains(CE))
+ return true;
checkCalleeLambda(CE);
- if (auto *Callee = CE->getDirectCallee())
+ if (auto *Callee = CE->getDirectCallee()) {
+ if (isVisitFunction(CE, Callee))
+ return true;
checkParameters(CE, Callee);
- else if (auto *CalleeE = CE->getCallee()) {
+ } else if (auto *CalleeE = CE->getCallee()) {
if (auto *DRE = dyn_cast<DeclRefExpr>(CalleeE->IgnoreParenCasts())) {
if (auto *Callee = dyn_cast_or_null<FunctionDecl>(DRE->getDecl()))
checkParameters(CE, Callee);
@@ -179,6 +241,34 @@ public:
return true;
}
+ bool isVisitFunction(CallExpr *CallExpr, FunctionDecl *FnDecl) {
+ bool IsVisitFn = safeGetName(FnDecl) == "visit";
+ if (!IsVisitFn)
+ return false;
+ bool ArgCnt = CallExpr->getNumArgs();
+ if (!ArgCnt)
+ return false;
+ auto *Ns = FnDecl->getParent();
+ if (!Ns)
+ return false;
+ auto NsName = safeGetName(Ns);
+ if (NsName != "WTF" && NsName != "std")
+ return false;
+ auto *Arg = CallExpr->getArg(0);
+ if (!Arg)
+ return false;
+ auto *DRE = dyn_cast<DeclRefExpr>(Arg->IgnoreParenCasts());
+ if (!DRE)
+ return false;
+ auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD)
+ return false;
+ if (!LambdaOwnerMap.contains(VD))
+ return false;
+ DeclRefExprsToIgnore.insert(DRE);
+ return true;
+ }
+
void checkParameters(CallExpr *CE, FunctionDecl *Callee) {
unsigned ArgIndex = isa<CXXOperatorCallExpr>(CE);
bool TreatAllArgsAsNoEscape = shouldTreatAllArgAsNoEscape(Callee);
@@ -280,7 +370,7 @@ public:
LambdasToIgnore.insert(L);
}
- bool hasProtectedThis(LambdaExpr *L) {
+ bool hasProtectedThis(const LambdaExpr *L) {
for (const LambdaCapture &OtherCapture : L->captures()) {
if (!OtherCapture.capturesVariable())
continue;
@@ -378,7 +468,8 @@ public:
visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
}
- void visitLambdaExpr(LambdaExpr *L, bool shouldCheckThis, const QualType T,
+ void visitLambdaExpr(const LambdaExpr *L, bool shouldCheckThis,
+ const QualType T,
bool ignoreParamVarDecl = false) const {
if (TFA.isTrivial(L->getBody()))
return;
@@ -410,7 +501,7 @@ public:
}
void reportBug(const LambdaCapture &Capture, ValueDecl *CapturedVar,
- const QualType T, LambdaExpr *L) const {
+ const QualType T, const LambdaExpr *L) const {
assert(CapturedVar);
auto Location = Capture.getLocation();
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 82b560b..e0deec1 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -128,7 +128,6 @@ public:
std::unique_ptr<llvm::Timer> SyntaxCheckTimer;
std::unique_ptr<llvm::Timer> ExprEngineTimer;
std::unique_ptr<llvm::Timer> BugReporterTimer;
- bool ShouldClearTimersToPreventDisplayingThem;
/// The information about analyzed functions shared throughout the
/// translation unit.
@@ -149,7 +148,10 @@ public:
if (Opts.AnalyzerDisplayProgress || Opts.PrintStats ||
Opts.ShouldSerializeStats || !Opts.DumpEntryPointStatsToCSV.empty()) {
AnalyzerTimers = std::make_unique<llvm::TimerGroup>(
- "analyzer", "Analyzer timers");
+ "analyzer", "Analyzer timers",
+ /*PrintOnExit=*/
+ (Opts.AnalyzerDisplayProgress || Opts.PrintStats ||
+ Opts.ShouldSerializeStats));
SyntaxCheckTimer = std::make_unique<llvm::Timer>(
"syntaxchecks", "Syntax-based analysis time", *AnalyzerTimers);
ExprEngineTimer = std::make_unique<llvm::Timer>(
@@ -159,12 +161,6 @@ public:
*AnalyzerTimers);
}
- // Avoid displaying the timers created above in case we only want to record
- // per-entry-point stats.
- ShouldClearTimersToPreventDisplayingThem = !Opts.AnalyzerDisplayProgress &&
- !Opts.PrintStats &&
- !Opts.ShouldSerializeStats;
-
if (Opts.PrintStats || Opts.ShouldSerializeStats) {
llvm::EnableStatistics(/* DoPrintOnExit= */ false);
}
@@ -287,9 +283,6 @@ public:
checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
if (SyntaxCheckTimer)
SyntaxCheckTimer->stopTimer();
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
}
return true;
}
@@ -583,9 +576,6 @@ void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
if (SyntaxCheckTimer)
SyntaxCheckTimer->stopTimer();
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
// Run the AST-only checks using the order in which functions are defined.
// If inlining is not turned on, use the simplest function order for path
@@ -765,9 +755,6 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
FunctionSummaries.findOrInsertSummary(D)->second.SyntaxRunningTime =
std::lround(CheckerDuration.getWallTime() * 1000);
DisplayTime(CheckerDuration);
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
}
}
@@ -830,9 +817,6 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
PathRunningTime.set(static_cast<unsigned>(
std::lround(ExprEngineDuration.getWallTime() * 1000)));
DisplayTime(ExprEngineDuration);
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
}
if (!Mgr->options.DumpExplodedGraphTo.empty())
@@ -843,9 +827,6 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
Eng.ViewGraph(Mgr->options.TrimGraph);
flushReports(BugReporterTimer.get(), Eng.getBugReporter());
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
}
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index dad3d0da..12e209a 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -980,11 +980,12 @@ RVVIntrinsic::RVVIntrinsic(
bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme,
bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen,
const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
- unsigned NF, Policy NewPolicyAttrs, bool HasFRMRoundModeOp)
+ unsigned NF, Policy NewPolicyAttrs, bool HasFRMRoundModeOp, unsigned TWiden)
: IRName(IRName), IsMasked(IsMasked),
HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme),
SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias),
- ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs) {
+ ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs),
+ TWiden(TWiden) {
// Init BuiltinName, Name and OverloadedName
BuiltinName = NewName.str();
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp
index 05d5669..42f52d0 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp
@@ -524,13 +524,12 @@ bool initializeScanCompilerInstance(
// Use the dependency scanning optimized file system if requested to do so.
if (DepFS) {
DepFS->resetBypassedPathPrefix();
- if (!ScanInstance.getHeaderSearchOpts().ModuleCachePath.empty()) {
- SmallString<256> ModulesCachePath;
- normalizeModuleCachePath(
- ScanInstance.getFileManager(),
- ScanInstance.getHeaderSearchOpts().ModuleCachePath, ModulesCachePath);
+ SmallString<256> ModulesCachePath;
+ normalizeModuleCachePath(ScanInstance.getFileManager(),
+ ScanInstance.getHeaderSearchOpts().ModuleCachePath,
+ ModulesCachePath);
+ if (!ModulesCachePath.empty())
DepFS->setBypassedPathPrefix(ModulesCachePath);
- }
ScanInstance.setDependencyDirectivesGetter(
std::make_unique<ScanningDependencyDirectivesGetter>(