aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp128
-rw-r--r--clang/lib/AST/ExprConstant.cpp144
-rw-r--r--clang/lib/AST/StmtOpenACC.cpp44
-rw-r--r--clang/lib/Basic/FileManager.cpp7
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp21
-rw-r--r--clang/lib/Basic/Targets/AArch64.h11
-rw-r--r--clang/lib/Basic/Targets/Hexagon.cpp6
-rw-r--r--clang/lib/Basic/Targets/OSTargets.cpp1
-rw-r--r--clang/lib/Basic/Targets/OSTargets.h6
-rw-r--r--clang/lib/Basic/Targets/PPC.h30
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp13
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.cpp9
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.h49
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp79
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenException.cpp165
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp35
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h11
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp18
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp64
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp2
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/RISCV.cpp2
-rw-r--r--clang/lib/CodeGen/TargetInfo.h1
-rw-r--r--clang/lib/Driver/ToolChain.cpp1
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.cpp21
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.h2
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp59
-rw-r--r--clang/lib/Driver/ToolChains/Fuchsia.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/Linux.cpp96
-rw-r--r--clang/lib/Driver/ToolChains/Linux.h7
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp2
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp2
-rw-r--r--clang/lib/Headers/sifive_vector.h56
-rw-r--r--clang/lib/Lex/LiteralSupport.cpp8
-rw-r--r--clang/lib/Parse/ParseHLSL.cpp6
-rw-r--r--clang/lib/Sema/CheckExprLifetime.cpp1
-rw-r--r--clang/lib/Sema/SemaChecking.cpp30
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp39
-rw-r--r--clang/lib/Sema/SemaInit.cpp87
-rw-r--r--clang/lib/Sema/SemaObjCProperty.cpp2
-rw-r--r--clang/lib/Sema/SemaOverload.cpp11
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp74
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp27
-rw-r--r--clang/lib/Support/RISCVVIntrinsicUtils.cpp5
47 files changed, 1162 insertions, 240 deletions
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index ff50e6d..d0b97a1 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3320,6 +3320,65 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_x86_byteshift(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID,
+ llvm::function_ref<APInt(const Pointer &, unsigned Lane, unsigned I,
+ unsigned Shift)>
+ Fn) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Shift = ImmAPS.getZExtValue() & 0xff;
+
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned NumElems = Src.getNumElems();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ for (unsigned Lane = 0; Lane != NumElems; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ unsigned Base = Lane + I;
+ APSInt Result = APSInt(Fn(Src, Lane, I, Shift));
+ INT_TYPE_SWITCH_NO_BOOL(ElemT,
+ { Dst.elem<T>(Base) = static_cast<T>(Result); });
+ }
+ }
+
+ Dst.initializeAllElements();
+
+ return true;
+}
+
+static bool interp__builtin_ia32_shuffle_generic(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)>
+ GetSourceIndex) {
+
+ assert(Call->getNumArgs() == 3);
+ unsigned ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
+
+ QualType Arg0Type = Call->getArg(0)->getType();
+ const auto *VecT = Arg0Type->castAs<VectorType>();
+ PrimType ElemT = *S.getContext().classify(VecT->getElementType());
+ unsigned NumElems = VecT->getNumElements();
+
+ const Pointer &B = S.Stk.pop<Pointer>();
+ const Pointer &A = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
+ auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
+ const Pointer &Src = (SrcVecIdx == 0) ? A : B;
+ TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
+ }
+ Dst.initializeAllElements();
+
+ return true;
+}
+
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
uint32_t BuiltinID) {
if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
@@ -4250,6 +4309,42 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_selectpd_512:
return interp__builtin_select(S, OpPC, Call);
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 4;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 2;
+ unsigned IndexMask = 0x3;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index};
+ });
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 2;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 1;
+ unsigned IndexMask = 0x1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index};
+ });
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512:
@@ -4390,6 +4485,39 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ // These SLLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I < Shift) {
+ return APInt(8, 0);
+ }
+ return APInt(8, Src.elem<uint8_t>(Lane + I - Shift));
+ });
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ // These SRLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I + Shift < 16) {
+ return APInt(8, Src.elem<uint8_t>(Lane + I + Shift));
+ }
+
+ return APInt(8, 0);
+ });
+
default:
S.FFDiag(S.Current->getLocation(OpPC),
diag::note_invalid_subexpr_in_const_expr)
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 2bd4476..29ee089 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11619,6 +11619,39 @@ static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result,
return true;
}
+static bool evalShuffleGeneric(
+ EvalInfo &Info, const CallExpr *Call, APValue &Out,
+ llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)>
+ GetSourceIndex) {
+
+ const auto *VT = Call->getType()->getAs<VectorType>();
+ if (!VT)
+ return false;
+
+ APSInt MaskImm;
+ if (!EvaluateInteger(Call->getArg(2), MaskImm, Info))
+ return false;
+ unsigned ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
+
+ APValue A, B;
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), B))
+ return false;
+
+ unsigned NumElts = VT->getNumElements();
+ SmallVector<APValue, 16> ResultElements;
+ ResultElements.reserve(NumElts);
+
+ for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) {
+ auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
+ const APValue &Src = (SrcVecIdx == 0) ? A : B;
+ ResultElements.push_back(Src.getVectorElt(SrcIdx));
+ }
+
+ Out = APValue(ResultElements.data(), ResultElements.size());
+ return true;
+}
+
static bool evalPshufbBuiltin(EvalInfo &Info, const CallExpr *Call,
APValue &Out) {
APValue SrcVec, ControlVec;
@@ -12398,7 +12431,56 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
-
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, unsigned> {
+ constexpr unsigned LaneBits = 128u;
+ unsigned NumElemPerLane = LaneBits / 32;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 2;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return {SrcIdx, LaneOffset + Index};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, unsigned> {
+ constexpr unsigned LaneBits = 128u;
+ unsigned NumElemPerLane = LaneBits / 64;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 1;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return {SrcIdx, LaneOffset + Index};
+ }))
+ return false;
+ return Success(R, E);
+ }
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512: {
@@ -12906,6 +12988,66 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(Elems.data(), NumElems), E);
}
+
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift: {
+ assert(E->getNumArgs() == 2);
+
+ APValue Src;
+ APSInt Imm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info))
+ return false;
+
+ unsigned VecLen = Src.getVectorLength();
+ unsigned Shift = Imm.getZExtValue() & 0xff;
+
+ SmallVector<APValue> ResultElements;
+ for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ if (I < Shift) {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ } else {
+ ResultElements.push_back(Src.getVectorElt(Lane + I - Shift));
+ }
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift: {
+ assert(E->getNumArgs() == 2);
+
+ APValue Src;
+ APSInt Imm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info))
+ return false;
+
+ unsigned VecLen = Src.getVectorLength();
+ unsigned Shift = Imm.getZExtValue() & 0xff;
+
+ SmallVector<APValue> ResultElements;
+ for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ if (I + Shift < 16) {
+ ResultElements.push_back(Src.getVectorElt(Lane + I + Shift));
+ } else {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ }
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
}
}
diff --git a/clang/lib/AST/StmtOpenACC.cpp b/clang/lib/AST/StmtOpenACC.cpp
index 462a10d..39dfa19 100644
--- a/clang/lib/AST/StmtOpenACC.cpp
+++ b/clang/lib/AST/StmtOpenACC.cpp
@@ -326,16 +326,30 @@ OpenACCAtomicConstruct *OpenACCAtomicConstruct::Create(
static std::pair<const Expr *, const Expr *> getBinaryOpArgs(const Expr *Op) {
if (const auto *BO = dyn_cast<BinaryOperator>(Op)) {
- assert(BO->getOpcode() == BO_Assign);
+ assert(BO->isAssignmentOp());
return {BO->getLHS(), BO->getRHS()};
}
const auto *OO = cast<CXXOperatorCallExpr>(Op);
- assert(OO->getOperator() == OO_Equal);
-
+ assert(OO->isAssignmentOp());
return {OO->getArg(0), OO->getArg(1)};
}
+static std::pair<bool, const Expr *> getUnaryOpArgs(const Expr *Op) {
+ if (const auto *UO = dyn_cast<UnaryOperator>(Op))
+ return {true, UO->getSubExpr()};
+
+ if (const auto *OpCall = dyn_cast<CXXOperatorCallExpr>(Op)) {
+ // Post-inc/dec have a second unused argument to differentiate it, so we
+ // accept -- or ++ as unary, or any operator call with only 1 arg.
+ if (OpCall->getNumArgs() == 1 || OpCall->getOperator() != OO_PlusPlus ||
+ OpCall->getOperator() != OO_MinusMinus)
+ return {true, OpCall->getArg(0)};
+ }
+
+ return {false, nullptr};
+}
+
const OpenACCAtomicConstruct::StmtInfo
OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
// This ends up being a vastly simplified version of SemaOpenACCAtomic, since
@@ -343,18 +357,17 @@ OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
// asserts to ensure we don't get off into the weeds.
assert(getAssociatedStmt() && "invalid associated stmt?");
+ const Expr *AssocStmt = cast<const Expr>(getAssociatedStmt());
switch (AtomicKind) {
- case OpenACCAtomicKind::None:
- case OpenACCAtomicKind::Update:
case OpenACCAtomicKind::Capture:
- assert(false && "Only 'read'/'write' have been implemented here");
+ assert(false && "Only 'read'/'write'/'update' have been implemented here");
return {};
case OpenACCAtomicKind::Read: {
// Read only supports the format 'v = x'; where both sides are a scalar
// expression. This can come in 2 forms; BinaryOperator or
// CXXOperatorCallExpr (rarely).
std::pair<const Expr *, const Expr *> BinaryArgs =
- getBinaryOpArgs(cast<const Expr>(getAssociatedStmt()));
+ getBinaryOpArgs(AssocStmt);
// We want the L-value for each side, so we ignore implicit casts.
return {BinaryArgs.first->IgnoreImpCasts(),
BinaryArgs.second->IgnoreImpCasts(), /*expr=*/nullptr};
@@ -364,13 +377,28 @@ OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
// type, and 'x' is a scalar l value. As above, this can come in 2 forms;
// Binary Operator or CXXOperatorCallExpr.
std::pair<const Expr *, const Expr *> BinaryArgs =
- getBinaryOpArgs(cast<const Expr>(getAssociatedStmt()));
+ getBinaryOpArgs(AssocStmt);
// We want the L-value for ONLY the X side, so we ignore implicit casts. For
// the right side (the expr), we emit it as an r-value so we need to
// maintain implicit casts.
return {/*v=*/nullptr, BinaryArgs.first->IgnoreImpCasts(),
BinaryArgs.second};
}
+ case OpenACCAtomicKind::None:
+ case OpenACCAtomicKind::Update: {
+ std::pair<bool, const Expr *> UnaryArgs = getUnaryOpArgs(AssocStmt);
+ if (UnaryArgs.first)
+ return {/*v=*/nullptr, UnaryArgs.second->IgnoreImpCasts(),
+ /*expr=*/nullptr};
+
+ std::pair<const Expr *, const Expr *> BinaryArgs =
+ getBinaryOpArgs(AssocStmt);
+ // For binary args, we just store the RHS as an expression (in the
+ // expression slot), since the codegen just wants the whole thing for a
+ // recipe.
+ return {/*v=*/nullptr, BinaryArgs.first->IgnoreImpCasts(),
+ BinaryArgs.second};
+ }
}
llvm_unreachable("unknown OpenACC atomic kind");
diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp
index 7481e1e..e744cc0 100644
--- a/clang/lib/Basic/FileManager.cpp
+++ b/clang/lib/Basic/FileManager.cpp
@@ -474,8 +474,9 @@ OptionalFileEntryRef FileManager::getBypassFile(FileEntryRef VF) {
return FileEntryRef(*Insertion.first);
}
-bool FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
- StringRef pathRef(path.data(), path.size());
+bool FileManager::fixupRelativePath(const FileSystemOptions &FileSystemOpts,
+ SmallVectorImpl<char> &Path) {
+ StringRef pathRef(Path.data(), Path.size());
if (FileSystemOpts.WorkingDir.empty()
|| llvm::sys::path::is_absolute(pathRef))
@@ -483,7 +484,7 @@ bool FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
SmallString<128> NewPath(FileSystemOpts.WorkingDir);
llvm::sys::path::append(NewPath, pathRef);
- path = NewPath;
+ Path = NewPath;
return true;
}
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 146f058..a97e934 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -206,8 +206,7 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
StringRef AArch64TargetInfo::getABI() const { return ABI; }
bool AArch64TargetInfo::setABI(const std::string &Name) {
- if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs" &&
- Name != "pauthtest")
+ if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs")
return false;
ABI = Name;
@@ -221,12 +220,6 @@ bool AArch64TargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
Diags.Report(diag::err_target_unsupported_abi_with_fpu) << ABI;
return false;
}
- if (getTriple().getEnvironment() == llvm::Triple::PAuthTest &&
- getTriple().getOS() != llvm::Triple::Linux) {
- Diags.Report(diag::err_target_unsupported_abi_for_triple)
- << getTriple().getEnvironmentName() << getTriple().getTriple();
- return false;
- }
return true;
}
@@ -818,10 +811,10 @@ bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
- .Cases("aarch64", "arm64", "arm", true)
+ .Cases({"aarch64", "arm64", "arm"}, true)
.Case("fmv", HasFMV)
.Case("fp", FPU & FPUMode)
- .Cases("neon", "simd", FPU & NeonMode)
+ .Cases({"neon", "simd"}, FPU & NeonMode)
.Case("jscvt", HasJSCVT)
.Case("fcma", HasFCMA)
.Case("rng", HasRandGen)
@@ -836,8 +829,8 @@ bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
.Case("cssc", HasCSSC)
.Case("sha2", HasSHA2)
.Case("sha3", HasSHA3)
- .Cases("aes", "pmull", HasAES)
- .Cases("fp16", "fullfp16", HasFullFP16)
+ .Cases({"aes", "pmull"}, HasAES)
+ .Cases({"fp16", "fullfp16"}, HasFullFP16)
.Case("dit", HasDIT)
.Case("dpb", HasCCPP)
.Case("dpb2", HasCCDP)
@@ -866,9 +859,9 @@ bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
.Case("memtag", HasMTE)
.Case("sb", HasSB)
.Case("predres", HasPredRes)
- .Cases("ssbs", "ssbs2", HasSSBS)
+ .Cases({"ssbs", "ssbs2"}, HasSSBS)
.Case("bti", HasBTI)
- .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
+ .Cases({"ls64", "ls64_v", "ls64_accdata"}, HasLS64)
.Case("wfxt", HasWFxT)
.Case("rcpc3", HasRCPC3)
.Case("fp8", HasFP8)
diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h
index 3952e7b..7d0737b 100644
--- a/clang/lib/Basic/Targets/AArch64.h
+++ b/clang/lib/Basic/Targets/AArch64.h
@@ -135,6 +135,7 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
const llvm::AArch64::ArchInfo *ArchInfo = &llvm::AArch64::ARMV8A;
+protected:
std::string ABI;
public:
@@ -279,6 +280,16 @@ private:
void setDataLayout() override;
};
+template <>
+inline bool
+LinuxTargetInfo<AArch64leTargetInfo>::setABI(const std::string &Name) {
+ if (Name == "pauthtest") {
+ ABI = Name;
+ return true;
+ }
+ return AArch64leTargetInfo::setABI(Name);
+}
+
class LLVM_LIBRARY_VISIBILITY WindowsARM64TargetInfo
: public WindowsTargetInfo<AArch64leTargetInfo> {
const llvm::Triple Triple;
diff --git a/clang/lib/Basic/Targets/Hexagon.cpp b/clang/lib/Basic/Targets/Hexagon.cpp
index cea64f9..d5b413cb 100644
--- a/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/clang/lib/Basic/Targets/Hexagon.cpp
@@ -83,6 +83,9 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
} else if (CPU == "hexagonv79") {
Builder.defineMacro("__HEXAGON_V79__");
Builder.defineMacro("__HEXAGON_ARCH__", "79");
+ } else if (CPU == "hexagonv81") {
+ Builder.defineMacro("__HEXAGON_V81__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "81");
}
if (hasFeature("hvx-length64b")) {
@@ -252,8 +255,7 @@ static constexpr CPUSuffix Suffixes[] = {
{{"hexagonv68"}, {"68"}}, {{"hexagonv69"}, {"69"}},
{{"hexagonv71"}, {"71"}}, {{"hexagonv71t"}, {"71t"}},
{{"hexagonv73"}, {"73"}}, {{"hexagonv75"}, {"75"}},
- {{"hexagonv79"}, {"79"}},
-};
+ {{"hexagonv79"}, {"79"}}, {{"hexagonv81"}, {"81"}}};
std::optional<unsigned> HexagonTargetInfo::getHexagonCPURev(StringRef Name) {
StringRef Arch = Name;
diff --git a/clang/lib/Basic/Targets/OSTargets.cpp b/clang/lib/Basic/Targets/OSTargets.cpp
index e744e84..e99bbd1 100644
--- a/clang/lib/Basic/Targets/OSTargets.cpp
+++ b/clang/lib/Basic/Targets/OSTargets.cpp
@@ -10,6 +10,7 @@
//===----------------------------------------------------------------------===//
#include "OSTargets.h"
+#include "AArch64.h"
#include "clang/Basic/MacroBuilder.h"
#include "llvm/ADT/StringRef.h"
diff --git a/clang/lib/Basic/Targets/OSTargets.h b/clang/lib/Basic/Targets/OSTargets.h
index bd6ffcf..4d81c9a 100644
--- a/clang/lib/Basic/Targets/OSTargets.h
+++ b/clang/lib/Basic/Targets/OSTargets.h
@@ -408,6 +408,12 @@ public:
const char *getStaticInitSectionSpecifier() const override {
return ".text.startup";
}
+
+ // This allows template specializations, see
+ // LinuxTargetInfo<AArch64leTargetInfo>::setABI
+ bool setABI(const std::string &Name) override {
+ return OSTargetInfo<Target>::setABI(Name);
+ }
};
// Managarm Target
diff --git a/clang/lib/Basic/Targets/PPC.h b/clang/lib/Basic/Targets/PPC.h
index 9f3a4cd..846b240 100644
--- a/clang/lib/Basic/Targets/PPC.h
+++ b/clang/lib/Basic/Targets/PPC.h
@@ -122,41 +122,41 @@ public:
.Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr |
ArchDefinePpcsq)
.Case("a2", ArchDefineA2)
- .Cases("power3", "pwr3", ArchDefinePpcgr)
- .Cases("power4", "pwr4",
+ .Cases({"power3", "pwr3"}, ArchDefinePpcgr)
+ .Cases({"power4", "pwr4"},
ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("power5", "pwr5",
ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
ArchDefinePpcsq)
- .Cases("power5x", "pwr5x",
+ .Cases({"power5x", "pwr5x"},
ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
ArchDefinePpcgr | ArchDefinePpcsq)
- .Cases("power6", "pwr6",
- ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
- ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Cases("power6x", "pwr6x",
+ .Cases({"power6", "pwr6"}, ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Cases({"power6x", "pwr6x"},
ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
ArchDefinePpcsq)
- .Cases("power7", "pwr7",
- ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
+ .Cases({"power7", "pwr7"}, ArchDefinePwr7 | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
// powerpc64le automatically defaults to at least power8.
- .Cases("power8", "pwr8", "ppc64le",
+ .Cases({"power8", "pwr8", "ppc64le"},
ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6 |
ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
ArchDefinePpcgr | ArchDefinePpcsq)
- .Cases("power9", "pwr9",
+ .Cases({"power9", "pwr9"},
ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Cases("power10", "pwr10",
+ .Cases({"power10", "pwr10"},
ArchDefinePwr10 | ArchDefinePwr9 | ArchDefinePwr8 |
ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
ArchDefinePpcsq)
- .Cases("power11", "pwr11",
+ .Cases({"power11", "pwr11"},
ArchDefinePwr11 | ArchDefinePwr10 | ArchDefinePwr9 |
ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6 |
ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 62fa04e..e35100f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -449,10 +449,15 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
}
case Builtin::BI__builtin_coro_free:
case Builtin::BI__builtin_coro_size: {
- cgm.errorNYI(e->getSourceRange(),
- "BI__builtin_coro_free, BI__builtin_coro_size NYI");
- assert(!cir::MissingFeatures::coroSizeBuiltinCall());
- return getUndefRValue(e->getType());
+ GlobalDecl gd{fd};
+ mlir::Type ty = cgm.getTypes().getFunctionType(
+ cgm.getTypes().arrangeGlobalDeclaration(gd));
+ const auto *nd = cast<NamedDecl>(gd.getDecl());
+ cir::FuncOp fnOp =
+ cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
+ fnOp.setBuiltin(true);
+ return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
+ returnValue);
}
case Builtin::BI__builtin_prefetch: {
auto evaluateOperandAsInt = [&](const Expr *arg) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index d3c7dac0..13dc9f3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -187,6 +187,11 @@ public:
virtual void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
mlir::Value addr) = 0;
+ virtual void emitVirtualObjectDelete(CIRGenFunction &cgf,
+ const CXXDeleteExpr *de, Address ptr,
+ QualType elementType,
+ const CXXDestructorDecl *dtor) = 0;
+
/// Checks if ABI requires extra virtual offset for vtable field.
virtual bool
isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index 88aef89..50d4c03 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -671,9 +671,12 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &funcInfo,
return RValue::get(results[0]);
}
- case cir::TEK_Complex:
- cgm.errorNYI(loc, "unsupported evaluation kind of function call result");
- return getUndefRValue(retTy);
+ case cir::TEK_Complex: {
+ mlir::ResultRange results = theCall->getOpResults();
+ assert(!results.empty() &&
+ "Expected at least one result for complex rvalue");
+ return RValue::getComplex(results[0]);
+ }
}
llvm_unreachable("Invalid evaluation kind");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
index 9acf8b1..61a09a5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -15,6 +15,7 @@
#define CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
#include "Address.h"
+#include "CIRGenModule.h"
#include "EHScopeStack.h"
#include "mlir/IR/Value.h"
@@ -257,5 +258,53 @@ inline void EHScopeStack::popCatch() {
deallocate(EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers()));
}
+/// The exceptions personality for a function.
+struct EHPersonality {
+ const char *personalityFn = nullptr;
+
+ // If this is non-null, this personality requires a non-standard
+ // function for rethrowing an exception after a catchall cleanup.
+ // This function must have prototype void(void*).
+ const char *catchallRethrowFn = nullptr;
+
+ static const EHPersonality &get(CIRGenModule &cgm,
+ const clang::FunctionDecl *fd);
+ static const EHPersonality &get(CIRGenFunction &cgf);
+
+ static const EHPersonality GNU_C;
+ static const EHPersonality GNU_C_SJLJ;
+ static const EHPersonality GNU_C_SEH;
+ static const EHPersonality GNU_ObjC;
+ static const EHPersonality GNU_ObjC_SJLJ;
+ static const EHPersonality GNU_ObjC_SEH;
+ static const EHPersonality GNUstep_ObjC;
+ static const EHPersonality GNU_ObjCXX;
+ static const EHPersonality NeXT_ObjC;
+ static const EHPersonality GNU_CPlusPlus;
+ static const EHPersonality GNU_CPlusPlus_SJLJ;
+ static const EHPersonality GNU_CPlusPlus_SEH;
+ static const EHPersonality MSVC_except_handler;
+ static const EHPersonality MSVC_C_specific_handler;
+ static const EHPersonality MSVC_CxxFrameHandler3;
+ static const EHPersonality GNU_Wasm_CPlusPlus;
+ static const EHPersonality XL_CPlusPlus;
+ static const EHPersonality ZOS_CPlusPlus;
+
+ /// Does this personality use landingpads or the family of pad instructions
+ /// designed to form funclets?
+ bool usesFuncletPads() const {
+ return isMSVCPersonality() || isWasmPersonality();
+ }
+
+ bool isMSVCPersonality() const {
+ return this == &MSVC_except_handler || this == &MSVC_C_specific_handler ||
+ this == &MSVC_CxxFrameHandler3;
+ }
+
+ bool isWasmPersonality() const { return this == &GNU_Wasm_CPlusPlus; }
+
+ bool isMSVCXXPersonality() const { return this == &MSVC_CxxFrameHandler3; }
+};
+
} // namespace clang::CIRGen
#endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
index c25cce4..8723a6e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CIR/Dialect/IR/CIRTypes.h"
+#include "clang/CIR/MissingFeatures.h"
using namespace clang;
using namespace clang::CIRGen;
@@ -23,6 +24,9 @@ struct clang::CIRGen::CGCoroData {
// Stores the __builtin_coro_id emitted in the function so that we can supply
// it as the first argument to other builtins.
cir::CallOp coroId = nullptr;
+
+ // Stores the result of __builtin_coro_begin call.
+ mlir::Value coroBegin = nullptr;
};
// Defining these here allows to keep CGCoroData private to this file.
@@ -63,6 +67,46 @@ cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc,
nullPtr, nullPtr, nullPtr});
}
+cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) {
+ cir::BoolType boolTy = builder.getBoolTy();
+
+ mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroAlloc);
+
+ cir::FuncOp fnOp;
+ if (!builtin) {
+ fnOp = cgm.createCIRBuiltinFunction(loc, cgm.builtinCoroAlloc,
+ cir::FuncType::get({UInt32Ty}, boolTy),
+ /*fd=*/nullptr);
+ assert(fnOp && "should always succeed");
+ } else {
+ fnOp = cast<cir::FuncOp>(builtin);
+ }
+
+ return builder.createCallOp(
+ loc, fnOp, mlir::ValueRange{curCoro.data->coroId.getResult()});
+}
+
+cir::CallOp
+CIRGenFunction::emitCoroBeginBuiltinCall(mlir::Location loc,
+ mlir::Value coroframeAddr) {
+ mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroBegin);
+
+ cir::FuncOp fnOp;
+ if (!builtin) {
+ fnOp = cgm.createCIRBuiltinFunction(
+ loc, cgm.builtinCoroBegin,
+ cir::FuncType::get({UInt32Ty, VoidPtrTy}, VoidPtrTy),
+ /*fd=*/nullptr);
+ assert(fnOp && "should always succeed");
+ } else {
+ fnOp = cast<cir::FuncOp>(builtin);
+ }
+
+ return builder.createCallOp(
+ loc, fnOp,
+ mlir::ValueRange{curCoro.data->coroId.getResult(), coroframeAddr});
+}
+
mlir::LogicalResult
CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &s) {
mlir::Location openCurlyLoc = getLoc(s.getBeginLoc());
@@ -73,10 +117,39 @@ CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &s) {
cir::CallOp coroId = emitCoroIDBuiltinCall(openCurlyLoc, nullPtrCst);
createCoroData(*this, curCoro, coroId);
- assert(!cir::MissingFeatures::coroAllocBuiltinCall());
-
- assert(!cir::MissingFeatures::coroBeginBuiltinCall());
+ // Backend is allowed to elide memory allocations, to help it, emit
+ // auto mem = coro.alloc() ? 0 : ... allocation code ...;
+ cir::CallOp coroAlloc = emitCoroAllocBuiltinCall(openCurlyLoc);
+
+ // Initialize address of coroutine frame to null
+ CanQualType astVoidPtrTy = cgm.getASTContext().VoidPtrTy;
+ mlir::Type allocaTy = convertTypeForMem(astVoidPtrTy);
+ Address coroFrame =
+ createTempAlloca(allocaTy, getContext().getTypeAlignInChars(astVoidPtrTy),
+ openCurlyLoc, "__coro_frame_addr",
+ /*ArraySize=*/nullptr);
+
+ mlir::Value storeAddr = coroFrame.getPointer();
+ builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr);
+ cir::IfOp::create(
+ builder, openCurlyLoc, coroAlloc.getResult(),
+ /*withElseRegion=*/false,
+ /*thenBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) {
+ builder.CIRBaseBuilderTy::createStore(
+ loc, emitScalarExpr(s.getAllocate()), storeAddr);
+ cir::YieldOp::create(builder, loc);
+ });
+ curCoro.data->coroBegin =
+ emitCoroBeginBuiltinCall(
+ openCurlyLoc,
+ cir::LoadOp::create(builder, openCurlyLoc, allocaTy, storeAddr))
+ .getResult();
+
+ // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
+ if (s.getReturnStmtOnAllocFailure())
+ cgm.errorNYI("handle coroutine return alloc failure");
assert(!cir::MissingFeatures::generateDebugInfo());
+ assert(!cir::MissingFeatures::emitBodyAndFallthrough());
return mlir::success();
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp
index 717a3e0..67f46ff 100644
--- a/clang/lib/CIR/CodeGen/CIRGenException.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp
@@ -18,6 +18,171 @@
using namespace clang;
using namespace clang::CIRGen;
+const EHPersonality EHPersonality::GNU_C = {"__gcc_personality_v0", nullptr};
+const EHPersonality EHPersonality::GNU_C_SJLJ = {"__gcc_personality_sj0",
+ nullptr};
+const EHPersonality EHPersonality::GNU_C_SEH = {"__gcc_personality_seh0",
+ nullptr};
+const EHPersonality EHPersonality::NeXT_ObjC = {"__objc_personality_v0",
+ nullptr};
+const EHPersonality EHPersonality::GNU_CPlusPlus = {"__gxx_personality_v0",
+ nullptr};
+const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ = {
+ "__gxx_personality_sj0", nullptr};
+const EHPersonality EHPersonality::GNU_CPlusPlus_SEH = {
+ "__gxx_personality_seh0", nullptr};
+const EHPersonality EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0",
+ "objc_exception_throw"};
+const EHPersonality EHPersonality::GNU_ObjC_SJLJ = {
+ "__gnu_objc_personality_sj0", "objc_exception_throw"};
+const EHPersonality EHPersonality::GNU_ObjC_SEH = {
+ "__gnu_objc_personality_seh0", "objc_exception_throw"};
+const EHPersonality EHPersonality::GNU_ObjCXX = {
+ "__gnustep_objcxx_personality_v0", nullptr};
+const EHPersonality EHPersonality::GNUstep_ObjC = {
+ "__gnustep_objc_personality_v0", nullptr};
+const EHPersonality EHPersonality::MSVC_except_handler = {"_except_handler3",
+ nullptr};
+const EHPersonality EHPersonality::MSVC_C_specific_handler = {
+ "__C_specific_handler", nullptr};
+const EHPersonality EHPersonality::MSVC_CxxFrameHandler3 = {
+ "__CxxFrameHandler3", nullptr};
+const EHPersonality EHPersonality::GNU_Wasm_CPlusPlus = {
+ "__gxx_wasm_personality_v0", nullptr};
+const EHPersonality EHPersonality::XL_CPlusPlus = {"__xlcxx_personality_v1",
+ nullptr};
+const EHPersonality EHPersonality::ZOS_CPlusPlus = {"__zos_cxx_personality_v2",
+ nullptr};
+
+static const EHPersonality &getCPersonality(const TargetInfo &target,
+ const CodeGenOptions &cgOpts) {
+ const llvm::Triple &triple = target.getTriple();
+ if (triple.isWindowsMSVCEnvironment())
+ return EHPersonality::MSVC_CxxFrameHandler3;
+ if (cgOpts.hasSjLjExceptions())
+ return EHPersonality::GNU_C_SJLJ;
+ if (cgOpts.hasDWARFExceptions())
+ return EHPersonality::GNU_C;
+ if (cgOpts.hasSEHExceptions())
+ return EHPersonality::GNU_C_SEH;
+ return EHPersonality::GNU_C;
+}
+
+static const EHPersonality &getObjCPersonality(const TargetInfo &target,
+ const LangOptions &langOpts,
+ const CodeGenOptions &cgOpts) {
+ const llvm::Triple &triple = target.getTriple();
+ if (triple.isWindowsMSVCEnvironment())
+ return EHPersonality::MSVC_CxxFrameHandler3;
+
+ switch (langOpts.ObjCRuntime.getKind()) {
+ case ObjCRuntime::FragileMacOSX:
+ return getCPersonality(target, cgOpts);
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
+ return EHPersonality::NeXT_ObjC;
+ case ObjCRuntime::GNUstep:
+ if (langOpts.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
+ return EHPersonality::GNUstep_ObjC;
+ [[fallthrough]];
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
+ if (cgOpts.hasSjLjExceptions())
+ return EHPersonality::GNU_ObjC_SJLJ;
+ if (cgOpts.hasSEHExceptions())
+ return EHPersonality::GNU_ObjC_SEH;
+ return EHPersonality::GNU_ObjC;
+ }
+ llvm_unreachable("bad runtime kind");
+}
+
+static const EHPersonality &getCXXPersonality(const TargetInfo &target,
+ const CodeGenOptions &cgOpts) {
+ const llvm::Triple &triple = target.getTriple();
+ if (triple.isWindowsMSVCEnvironment())
+ return EHPersonality::MSVC_CxxFrameHandler3;
+ if (triple.isOSAIX())
+ return EHPersonality::XL_CPlusPlus;
+ if (cgOpts.hasSjLjExceptions())
+ return EHPersonality::GNU_CPlusPlus_SJLJ;
+ if (cgOpts.hasDWARFExceptions())
+ return EHPersonality::GNU_CPlusPlus;
+ if (cgOpts.hasSEHExceptions())
+ return EHPersonality::GNU_CPlusPlus_SEH;
+ if (cgOpts.hasWasmExceptions())
+ return EHPersonality::GNU_Wasm_CPlusPlus;
+ return EHPersonality::GNU_CPlusPlus;
+}
+
+/// Determines the personality function to use when both C++
+/// and Objective-C exceptions are being caught.
+static const EHPersonality &getObjCXXPersonality(const TargetInfo &target,
+ const LangOptions &langOpts,
+ const CodeGenOptions &cgOpts) {
+ if (target.getTriple().isWindowsMSVCEnvironment())
+ return EHPersonality::MSVC_CxxFrameHandler3;
+
+ switch (langOpts.ObjCRuntime.getKind()) {
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
+ case ObjCRuntime::FragileMacOSX:
+ return getCXXPersonality(target, cgOpts);
+
+ // The ObjC personality defers to the C++ personality for non-ObjC
+ // handlers. Unlike the C++ case, we use the same personality
+ // function on targets using (backend-driven) SJLJ EH.
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
+ return getObjCPersonality(target, langOpts, cgOpts);
+
+ case ObjCRuntime::GNUstep:
+ return EHPersonality::GNU_ObjCXX;
+
+ // The GCC runtime's personality function inherently doesn't support
+ // mixed EH. Use the ObjC personality just to avoid returning null.
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
+ return getObjCPersonality(target, langOpts, cgOpts);
+ }
+ llvm_unreachable("bad runtime kind");
+}
+
+static const EHPersonality &getSEHPersonalityMSVC(const llvm::Triple &triple) {
+ return triple.getArch() == llvm::Triple::x86
+ ? EHPersonality::MSVC_except_handler
+ : EHPersonality::MSVC_C_specific_handler;
+}
+
+const EHPersonality &EHPersonality::get(CIRGenModule &cgm,
+ const FunctionDecl *fd) {
+ const llvm::Triple &triple = cgm.getTarget().getTriple();
+ const LangOptions &langOpts = cgm.getLangOpts();
+ const CodeGenOptions &cgOpts = cgm.getCodeGenOpts();
+ const TargetInfo &target = cgm.getTarget();
+
+ // Functions using SEH get an SEH personality.
+ if (fd && fd->usesSEHTry())
+ return getSEHPersonalityMSVC(triple);
+
+ if (langOpts.ObjC) {
+ return langOpts.CPlusPlus ? getObjCXXPersonality(target, langOpts, cgOpts)
+ : getObjCPersonality(target, langOpts, cgOpts);
+ }
+ return langOpts.CPlusPlus ? getCXXPersonality(target, cgOpts)
+ : getCPersonality(target, cgOpts);
+}
+
+const EHPersonality &EHPersonality::get(CIRGenFunction &cgf) {
+ const auto *fg = cgf.curCodeDecl;
+ // For outlined finallys and filters, use the SEH personality in case they
+ // contain more SEH. This mostly only affects finallys. Filters could
+ // hypothetically use gnu statement expressions to sneak in nested SEH.
+ fg = fg ? fg : cgf.curSEHParent.getDecl();
+ return get(cgf.cgm, dyn_cast_or_null<FunctionDecl>(fg));
+}
+
void CIRGenFunction::emitCXXThrowExpr(const CXXThrowExpr *e) {
const llvm::Triple &triple = getTarget().getTriple();
if (cgm.getLangOpts().OpenMPIsTargetDevice &&
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index d6d226b..8fe0d9b4 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -362,8 +362,7 @@ public:
cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXTypeidExpr");
}
void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "AggExprEmitter: VisitMaterializeTemporaryExpr");
+ Visit(e->getSubExpr());
}
void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(),
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index a3cdf19..7a35382 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -565,8 +565,10 @@ static void emitObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de,
dtor = rd->getDestructor();
if (dtor->isVirtual()) {
- cgf.cgm.errorNYI(de->getSourceRange(),
- "emitObjectDelete: virtual destructor");
+ assert(!cir::MissingFeatures::devirtualizeDestructor());
+ cgf.cgm.getCXXABI().emitVirtualObjectDelete(cgf, de, ptr, elementType,
+ dtor);
+ return;
}
}
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 7de3dd0..928e5aa 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -922,9 +922,9 @@ public:
}
mlir::Attribute VisitCastExpr(CastExpr *e, QualType destType) {
- if (isa<ExplicitCastExpr>(e))
- cgm.errorNYI(e->getBeginLoc(),
- "ConstExprEmitter::VisitCastExpr explicit cast");
+ if (const auto *ece = dyn_cast<ExplicitCastExpr>(e))
+ cgm.emitExplicitCastExprType(ece,
+ const_cast<CIRGenFunction *>(emitter.cgf));
Expr *subExpr = e->getSubExpr();
@@ -1078,9 +1078,32 @@ public:
mlir::Attribute VisitCXXConstructExpr(CXXConstructExpr *e, QualType ty) {
if (!e->getConstructor()->isTrivial())
- return nullptr;
- cgm.errorNYI(e->getBeginLoc(), "trivial constructor const handling");
- return {};
+ return {};
+
+ // Only default and copy/move constructors can be trivial.
+ if (e->getNumArgs()) {
+ assert(e->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+ assert(e->getConstructor()->isCopyOrMoveConstructor() &&
+ "trivial ctor has argument but isn't a copy/move ctor");
+
+ Expr *arg = e->getArg(0);
+ assert(cgm.getASTContext().hasSameUnqualifiedType(ty, arg->getType()) &&
+ "argument to copy ctor is of wrong type");
+
+ // Look through the temporary; it's just converting the value to an lvalue
+ // to pass it to the constructor.
+ if (auto const *mte = dyn_cast<MaterializeTemporaryExpr>(arg))
+ return Visit(mte->getSubExpr(), ty);
+
+ // TODO: Investigate whether there are cases that can fall through to here
+ // that need to be handled. This is missing in classic codegen also.
+ assert(!cir::MissingFeatures::ctorConstLvalueToRvalueConversion());
+
+ // Don't try to support arbitrary lvalue-to-rvalue conversions for now.
+ return {};
+ }
+
+ return cgm.getBuilder().getZeroInitAttr(cgm.convertType(ty));
}
mlir::Attribute VisitStringLiteral(StringLiteral *e, QualType t) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 5f9dbdc..c3fcd1a6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -120,6 +120,8 @@ public:
/// Tracks function scope overall cleanup handling.
EHScopeStack ehStack;
+ GlobalDecl curSEHParent;
+
llvm::DenseMap<const clang::ValueDecl *, clang::FieldDecl *>
lambdaCaptureFields;
clang::FieldDecl *lambdaThisCaptureField = nullptr;
@@ -665,6 +667,12 @@ public:
symbolTable.insert(vd, addr.getPointer());
}
+ // Replaces the address of the local variable, if it exists. Else does the
+ // same thing as setAddrOfLocalVar.
+ void replaceAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
+ localDeclMap.insert_or_assign(vd, addr);
+ }
+
// A class to allow reverting changes to a var-decl's registration to the
// localDeclMap. This is used in cases where things are being inserted into
// the variable list but don't follow normal lookup/search rules, like in
@@ -1326,6 +1334,9 @@ public:
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s);
cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr);
+ cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc);
+ cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc,
+ mlir::Value coroframeAddr);
void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index 2dce0b1..88fedf1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -74,6 +74,9 @@ public:
QualType thisTy) override;
void registerGlobalDtor(const VarDecl *vd, cir::FuncOp dtor,
mlir::Value addr) override;
+ void emitVirtualObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de,
+ Address ptr, QualType elementType,
+ const CXXDestructorDecl *dtor) override;
void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override;
void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) override;
@@ -2175,6 +2178,21 @@ mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf,
isRefCast, castInfo);
}
+/// The Itanium ABI always places an offset to the complete object
+/// at entry -2 in the vtable.
+void CIRGenItaniumCXXABI::emitVirtualObjectDelete(
+ CIRGenFunction &cgf, const CXXDeleteExpr *delExpr, Address ptr,
+ QualType elementType, const CXXDestructorDecl *dtor) {
+ bool useGlobalDelete = delExpr->isGlobalDelete();
+ if (useGlobalDelete) {
+ cgf.cgm.errorNYI(delExpr->getSourceRange(),
+ "emitVirtualObjectDelete: global delete");
+ }
+
+ CXXDtorType dtorType = useGlobalDelete ? Dtor_Complete : Dtor_Deleting;
+ emitVirtualDestructorCall(cgf, dtor, dtorType, ptr, delExpr);
+}
+
/************************** Array allocation cookies **************************/
CharUnits CIRGenItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 1fc116d..186913d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -496,6 +496,8 @@ public:
bool assumeConvergent = false);
static constexpr const char *builtinCoroId = "__builtin_coro_id";
+ static constexpr const char *builtinCoroAlloc = "__builtin_coro_alloc";
+ static constexpr const char *builtinCoroBegin = "__builtin_coro_begin";
/// Given a builtin id for a function like "__builtin_fabsf", return a
/// Function* for "fabsf".
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
index 349b111..9e55bd5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
@@ -304,12 +304,21 @@ CIRGenFunction::emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s) {
return mlir::success();
}
+const VarDecl *getLValueDecl(const Expr *e) {
+ // We are going to assume that after stripping implicit casts, that the LValue
+ // is just a DRE around the var-decl.
+
+ e = e->IgnoreImpCasts();
+
+ const auto *dre = cast<DeclRefExpr>(e);
+ return cast<VarDecl>(dre->getDecl());
+}
+
mlir::LogicalResult
CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
- // For now, we are only support 'read'/'write', so diagnose. We can switch on
- // the kind later once we start implementing the other 2 forms. While we
- if (s.getAtomicKind() != OpenACCAtomicKind::Read &&
- s.getAtomicKind() != OpenACCAtomicKind::Write) {
+ // For now, we are only support 'read'/'write'/'update', so diagnose. We can
+ // switch on the kind later once we implement the 'capture' form.
+ if (s.getAtomicKind() == OpenACCAtomicKind::Capture) {
cgm.errorNYI(s.getSourceRange(), "OpenACC Atomic Construct");
return mlir::failure();
}
@@ -318,11 +327,10 @@ CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
// expression it is associated with rather than emitting it inside of it. So
// it has custom emit logic.
mlir::Location start = getLoc(s.getSourceRange().getBegin());
+ mlir::Location end = getLoc(s.getSourceRange().getEnd());
OpenACCAtomicConstruct::StmtInfo inf = s.getAssociatedStmtInfo();
switch (s.getAtomicKind()) {
- case OpenACCAtomicKind::None:
- case OpenACCAtomicKind::Update:
case OpenACCAtomicKind::Capture:
llvm_unreachable("Unimplemented atomic construct type, should have "
"diagnosed/returned above");
@@ -353,6 +361,50 @@ CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
s.clauses());
return mlir::success();
}
+ case OpenACCAtomicKind::None:
+ case OpenACCAtomicKind::Update: {
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ auto op =
+ mlir::acc::AtomicUpdateOp::create(builder, start, x, /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ mlir::LogicalResult res = mlir::success();
+ {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+ mlir::Type argTy = cast<cir::PointerType>(x.getType()).getPointee();
+ std::array<mlir::Type, 1> recipeType{argTy};
+ std::array<mlir::Location, 1> recipeLoc{start};
+ mlir::Block *recipeBlock = builder.createBlock(
+ &op.getRegion(), op.getRegion().end(), recipeType, recipeLoc);
+ builder.setInsertionPointToEnd(recipeBlock);
+
+ // Since we have an initial value that we know is a scalar type, we can
+ // just emit the entire statement here after sneaking-in our 'alloca' in
+ // the right place, then loading out of it. Flang does a lot less work
+ // (probably does its own emitting!), but we have more complicated AST
+ // nodes to worry about, so we can just count on opt to remove the extra
+ // alloca/load/store set.
+ auto alloca = cir::AllocaOp::create(
+ builder, start, x.getType(), argTy, "x_var",
+ cgm.getSize(getContext().getTypeAlignInChars(inf.X->getType())));
+
+ alloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
+ builder.CIRBaseBuilderTy::createStore(start, recipeBlock->getArgument(0),
+ alloca);
+
+ const VarDecl *xval = getLValueDecl(inf.X);
+ CIRGenFunction::DeclMapRevertingRAII declMapRAII{*this, xval};
+ replaceAddrOfLocalVar(
+ xval, Address{alloca, argTy, getContext().getDeclAlign(xval)});
+
+ res = emitStmt(s.getAssociatedStmt(), /*useCurrentScope=*/true);
+
+ auto load = cir::LoadOp::create(builder, start, {alloca});
+ mlir::acc::YieldOp::create(builder, end, {load});
+ }
+
+ return res;
+ }
}
llvm_unreachable("unknown OpenACC atomic kind");
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 3746bc04..0fea57b 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -146,8 +146,6 @@ createTargetCodeGenInfo(CodeGenModule &CGM) {
return createWindowsAArch64TargetCodeGenInfo(CGM, AArch64ABIKind::Win64);
else if (Target.getABI() == "aapcs-soft")
Kind = AArch64ABIKind::AAPCSSoft;
- else if (Target.getABI() == "pauthtest")
- Kind = AArch64ABIKind::PAuthTest;
return createAArch64TargetCodeGenInfo(CGM, Kind);
}
diff --git a/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp b/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
index 920d285..1300722 100644
--- a/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
@@ -1121,6 +1121,8 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
bool IsMasked = false;
// This is used by segment load/store to determine it's llvm type.
unsigned SegInstSEW = 8;
+ // This is used by XSfmm.
+ unsigned TWiden = 0;
// Required for overloaded intrinsics.
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index d0edae1..f63e900 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -483,7 +483,6 @@ enum class AArch64ABIKind {
DarwinPCS,
Win64,
AAPCSSoft,
- PAuthTest,
};
std::unique_ptr<TargetCodeGenInfo>
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 3d5cac6..eea5c2f 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -1253,7 +1253,6 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
}
case llvm::Triple::aarch64: {
llvm::Triple Triple = getTriple();
- tools::aarch64::setPAuthABIInTriple(getDriver(), Args, Triple);
if (!Triple.isOSBinFormatMachO())
return Triple.getTriple();
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index eb5d542..e8d5e38 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -466,27 +466,6 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
Features.push_back("+no-bti-at-return-twice");
}
-void aarch64::setPAuthABIInTriple(const Driver &D, const ArgList &Args,
- llvm::Triple &Triple) {
- Arg *ABIArg = Args.getLastArg(options::OPT_mabi_EQ);
- bool HasPAuthABI =
- ABIArg ? (StringRef(ABIArg->getValue()) == "pauthtest") : false;
-
- switch (Triple.getEnvironment()) {
- case llvm::Triple::UnknownEnvironment:
- if (HasPAuthABI)
- Triple.setEnvironment(llvm::Triple::PAuthTest);
- break;
- case llvm::Triple::PAuthTest:
- break;
- default:
- if (HasPAuthABI)
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << ABIArg->getAsString(Args) << Triple.getTriple();
- break;
- }
-}
-
/// Is the triple {aarch64.aarch64_be}-none-elf?
bool aarch64::isAArch64BareMetal(const llvm::Triple &Triple) {
if (Triple.getArch() != llvm::Triple::aarch64 &&
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.h b/clang/lib/Driver/ToolChains/Arch/AArch64.h
index 2765ee8..97ebfa6 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.h
@@ -28,8 +28,6 @@ void getAArch64TargetFeatures(const Driver &D, const llvm::Triple &Triple,
std::string getAArch64TargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple, llvm::opt::Arg *&A);
-void setPAuthABIInTriple(const Driver &D, const llvm::opt::ArgList &Args,
- llvm::Triple &triple);
bool isAArch64BareMetal(const llvm::Triple &Triple);
} // end namespace aarch64
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index caf7478..79edc56 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -1348,59 +1348,6 @@ void AddUnalignedAccessWarning(ArgStringList &CmdArgs) {
}
}
-// Each combination of options here forms a signing schema, and in most cases
-// each signing schema is its own incompatible ABI. The default values of the
-// options represent the default signing schema.
-static void handlePAuthABI(const ArgList &DriverArgs, ArgStringList &CC1Args) {
- if (!DriverArgs.hasArg(options::OPT_fptrauth_intrinsics,
- options::OPT_fno_ptrauth_intrinsics))
- CC1Args.push_back("-fptrauth-intrinsics");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_calls,
- options::OPT_fno_ptrauth_calls))
- CC1Args.push_back("-fptrauth-calls");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_returns,
- options::OPT_fno_ptrauth_returns))
- CC1Args.push_back("-fptrauth-returns");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_auth_traps,
- options::OPT_fno_ptrauth_auth_traps))
- CC1Args.push_back("-fptrauth-auth-traps");
-
- if (!DriverArgs.hasArg(
- options::OPT_fptrauth_vtable_pointer_address_discrimination,
- options::OPT_fno_ptrauth_vtable_pointer_address_discrimination))
- CC1Args.push_back("-fptrauth-vtable-pointer-address-discrimination");
-
- if (!DriverArgs.hasArg(
- options::OPT_fptrauth_vtable_pointer_type_discrimination,
- options::OPT_fno_ptrauth_vtable_pointer_type_discrimination))
- CC1Args.push_back("-fptrauth-vtable-pointer-type-discrimination");
-
- if (!DriverArgs.hasArg(
- options::OPT_fptrauth_type_info_vtable_pointer_discrimination,
- options::OPT_fno_ptrauth_type_info_vtable_pointer_discrimination))
- CC1Args.push_back("-fptrauth-type-info-vtable-pointer-discrimination");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_indirect_gotos,
- options::OPT_fno_ptrauth_indirect_gotos))
- CC1Args.push_back("-fptrauth-indirect-gotos");
-
- if (!DriverArgs.hasArg(options::OPT_fptrauth_init_fini,
- options::OPT_fno_ptrauth_init_fini))
- CC1Args.push_back("-fptrauth-init-fini");
-
- if (!DriverArgs.hasArg(
- options::OPT_fptrauth_init_fini_address_discrimination,
- options::OPT_fno_ptrauth_init_fini_address_discrimination))
- CC1Args.push_back("-fptrauth-init-fini-address-discrimination");
-
- if (!DriverArgs.hasArg(options::OPT_faarch64_jump_table_hardening,
- options::OPT_fno_aarch64_jump_table_hardening))
- CC1Args.push_back("-faarch64-jump-table-hardening");
-}
-
static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs, bool isAArch64) {
const llvm::Triple &Triple = TC.getEffectiveTriple();
@@ -1638,7 +1585,9 @@ void RenderAArch64ABI(const llvm::Triple &Triple, const ArgList &Args,
ABIName = A->getValue();
else if (Triple.isOSDarwin())
ABIName = "darwinpcs";
- else if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
+ // TODO: we probably want to have some target hook here.
+ else if (Triple.isOSLinux() &&
+ Triple.getEnvironment() == llvm::Triple::PAuthTest)
ABIName = "pauthtest";
else
ABIName = "aapcs";
@@ -1758,8 +1707,6 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
options::OPT_fno_ptrauth_objc_interface_sel);
Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_objc_class_ro,
options::OPT_fno_ptrauth_objc_class_ro);
- if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
- handlePAuthABI(Args, CmdArgs);
// Enable/disable return address signing and indirect branch targets.
CollectARMPACBTIOptions(getToolChain(), Args, CmdArgs, true /*isAArch64*/);
diff --git a/clang/lib/Driver/ToolChains/Fuchsia.cpp b/clang/lib/Driver/ToolChains/Fuchsia.cpp
index 146dc8b..31c2f3f 100644
--- a/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -481,9 +481,11 @@ SanitizerMask Fuchsia::getSupportedSanitizers() const {
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::Leak;
- Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Scudo;
Res |= SanitizerKind::Thread;
+ if (getTriple().getArch() == llvm::Triple::x86_64) {
+ Res |= SanitizerKind::SafeStack;
+ }
return Res;
}
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index 8eb4d34e..94a9fe8 100644
--- a/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
@@ -445,6 +445,102 @@ std::string Linux::computeSysRoot() const {
return std::string();
}
+static void setPAuthABIInTriple(const Driver &D, const ArgList &Args,
+ llvm::Triple &Triple) {
+ Arg *ABIArg = Args.getLastArg(options::OPT_mabi_EQ);
+ bool HasPAuthABI =
+ ABIArg ? (StringRef(ABIArg->getValue()) == "pauthtest") : false;
+
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::UnknownEnvironment:
+ if (HasPAuthABI)
+ Triple.setEnvironment(llvm::Triple::PAuthTest);
+ break;
+ case llvm::Triple::PAuthTest:
+ break;
+ default:
+ if (HasPAuthABI)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << ABIArg->getAsString(Args) << Triple.getTriple();
+ break;
+ }
+}
+
+std::string Linux::ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
+ types::ID InputType) const {
+ std::string TripleString =
+ Generic_ELF::ComputeEffectiveClangTriple(Args, InputType);
+ if (getTriple().isAArch64()) {
+ llvm::Triple Triple(TripleString);
+ setPAuthABIInTriple(getDriver(), Args, Triple);
+ return Triple.getTriple();
+ }
+ return TripleString;
+}
+
+// Each combination of options here forms a signing schema, and in most cases
+// each signing schema is its own incompatible ABI. The default values of the
+// options represent the default signing schema.
+static void handlePAuthABI(const Driver &D, const ArgList &DriverArgs,
+ ArgStringList &CC1Args) {
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_intrinsics,
+ options::OPT_fno_ptrauth_intrinsics))
+ CC1Args.push_back("-fptrauth-intrinsics");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_calls,
+ options::OPT_fno_ptrauth_calls))
+ CC1Args.push_back("-fptrauth-calls");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_returns,
+ options::OPT_fno_ptrauth_returns))
+ CC1Args.push_back("-fptrauth-returns");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_auth_traps,
+ options::OPT_fno_ptrauth_auth_traps))
+ CC1Args.push_back("-fptrauth-auth-traps");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_vtable_pointer_address_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_address_discrimination))
+ CC1Args.push_back("-fptrauth-vtable-pointer-address-discrimination");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_vtable_pointer_type_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_type_discrimination))
+ CC1Args.push_back("-fptrauth-vtable-pointer-type-discrimination");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_type_info_vtable_pointer_discrimination,
+ options::OPT_fno_ptrauth_type_info_vtable_pointer_discrimination))
+ CC1Args.push_back("-fptrauth-type-info-vtable-pointer-discrimination");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_indirect_gotos,
+ options::OPT_fno_ptrauth_indirect_gotos))
+ CC1Args.push_back("-fptrauth-indirect-gotos");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_init_fini,
+ options::OPT_fno_ptrauth_init_fini))
+ CC1Args.push_back("-fptrauth-init-fini");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_init_fini_address_discrimination,
+ options::OPT_fno_ptrauth_init_fini_address_discrimination))
+ CC1Args.push_back("-fptrauth-init-fini-address-discrimination");
+
+ if (!DriverArgs.hasArg(options::OPT_faarch64_jump_table_hardening,
+ options::OPT_fno_aarch64_jump_table_hardening))
+ CC1Args.push_back("-faarch64-jump-table-hardening");
+}
+
+void Linux::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const {
+ llvm::Triple Triple(ComputeEffectiveClangTriple(DriverArgs));
+ if (Triple.isAArch64() && Triple.getEnvironment() == llvm::Triple::PAuthTest)
+ handlePAuthABI(getDriver(), DriverArgs, CC1Args);
+ Generic_ELF::addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadKind);
+}
+
std::string Linux::getDynamicLinker(const ArgList &Args) const {
const llvm::Triple::ArchType Arch = getArch();
const llvm::Triple &Triple = getTriple();
diff --git a/clang/lib/Driver/ToolChains/Linux.h b/clang/lib/Driver/ToolChains/Linux.h
index 2eb2d05..97bad77 100644
--- a/clang/lib/Driver/ToolChains/Linux.h
+++ b/clang/lib/Driver/ToolChains/Linux.h
@@ -53,7 +53,14 @@ public:
SanitizerMask getSupportedSanitizers() const override;
void addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ std::string ComputeEffectiveClangTriple(
+ const llvm::opt::ArgList &Args,
+ types::ID InputType = types::TY_INVALID) const override;
std::string computeSysRoot() const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index e3bf0ea..6b09f7f 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -882,7 +882,7 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
"File Manager is required to fix up relative path.\n");
AbsPath.emplace(OutputPath);
- FileMgr->FixupRelativePath(*AbsPath);
+ FileManager::fixupRelativePath(getFileSystemOpts(), *AbsPath);
OutputPath = *AbsPath;
}
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index d2cb751..bd36eb4 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -3214,7 +3214,7 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
DashX = llvm::StringSwitch<InputKind>(XValue)
.Case("cpp-output", InputKind(Language::C).getPreprocessed())
.Case("assembler-with-cpp", Language::Asm)
- .Cases("ast", "pcm", "precompiled-header",
+ .Cases({"ast", "pcm", "precompiled-header"},
InputKind(Language::Unknown, InputKind::Precompiled))
.Case("ir", Language::LLVM_IR)
.Case("cir", Language::CIR)
diff --git a/clang/lib/Headers/sifive_vector.h b/clang/lib/Headers/sifive_vector.h
index 4e67ad6..ae01627 100644
--- a/clang/lib/Headers/sifive_vector.h
+++ b/clang/lib/Headers/sifive_vector.h
@@ -115,4 +115,60 @@
#endif
#endif
+#define __riscv_sf_vsettnt_e8w1(atn) __riscv_sf_vsettnt(atn, 0, 1);
+#define __riscv_sf_vsettnt_e8w2(atn) __riscv_sf_vsettnt(atn, 0, 2);
+#define __riscv_sf_vsettnt_e8w4(atn) __riscv_sf_vsettnt(atn, 0, 3);
+#define __riscv_sf_vsettnt_e16w1(atn) __riscv_sf_vsettnt(atn, 1, 1);
+#define __riscv_sf_vsettnt_e16w2(atn) __riscv_sf_vsettnt(atn, 1, 2);
+#define __riscv_sf_vsettnt_e16w4(atn) __riscv_sf_vsettnt(atn, 1, 3);
+#define __riscv_sf_vsettnt_e32w1(atn) __riscv_sf_vsettnt(atn, 2, 1);
+#define __riscv_sf_vsettnt_e32w2(atn) __riscv_sf_vsettnt(atn, 2, 2);
+#define __riscv_sf_vsettm_e8w1(atm) __riscv_sf_vsettm(atm, 0, 1);
+#define __riscv_sf_vsettm_e8w2(atm) __riscv_sf_vsettm(atm, 0, 2);
+#define __riscv_sf_vsettm_e8w4(atm) __riscv_sf_vsettm(atm, 0, 3);
+#define __riscv_sf_vsettm_e16w1(atm) __riscv_sf_vsettm(atm, 1, 1);
+#define __riscv_sf_vsettm_e16w2(atm) __riscv_sf_vsettm(atm, 1, 2);
+#define __riscv_sf_vsettm_e16w4(atm) __riscv_sf_vsettm(atm, 1, 3);
+#define __riscv_sf_vsettm_e32w1(atm) __riscv_sf_vsettm(atm, 2, 1);
+#define __riscv_sf_vsettm_e32w2(atm) __riscv_sf_vsettm(atm, 2, 2);
+#define __riscv_sf_vsettn_e8w1(atn) __riscv_sf_vsettn(atn, 0, 1);
+#define __riscv_sf_vsettn_e8w2(atn) __riscv_sf_vsettn(atn, 0, 2);
+#define __riscv_sf_vsettn_e8w4(atn) __riscv_sf_vsettn(atn, 0, 3);
+#define __riscv_sf_vsettn_e16w1(atn) __riscv_sf_vsettn(atn, 1, 1);
+#define __riscv_sf_vsettn_e16w2(atn) __riscv_sf_vsettn(atn, 1, 2);
+#define __riscv_sf_vsettn_e16w4(atn) __riscv_sf_vsettn(atn, 1, 3);
+#define __riscv_sf_vsettn_e32w1(atn) __riscv_sf_vsettn(atn, 2, 1);
+#define __riscv_sf_vsettn_e32w2(atn) __riscv_sf_vsettn(atn, 2, 2);
+#define __riscv_sf_vsettk_e8w1(atk) __riscv_sf_vsettk(atk, 0, 1);
+#define __riscv_sf_vsettk_e8w2(atk) __riscv_sf_vsettk(atk, 0, 2);
+#define __riscv_sf_vsettk_e8w4(atk) __riscv_sf_vsettk(atk, 0, 3);
+#define __riscv_sf_vsettk_e16w1(atk) __riscv_sf_vsettk(atk, 1, 1);
+#define __riscv_sf_vsettk_e16w2(atk) __riscv_sf_vsettk(atk, 1, 2);
+#define __riscv_sf_vsettk_e16w4(atk) __riscv_sf_vsettk(atk, 1, 3);
+#define __riscv_sf_vsettk_e32w1(atk) __riscv_sf_vsettk(atk, 2, 1);
+#define __riscv_sf_vsettk_e32w2(atk) __riscv_sf_vsettk(atk, 2, 2);
+#define __riscv_sf_vtzero_t_e8w1(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 3, 1);
+#define __riscv_sf_vtzero_t_e8w2(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 3, 2);
+#define __riscv_sf_vtzero_t_e8w4(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 3, 4);
+#define __riscv_sf_vtzero_t_e16w1(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 4, 1);
+#define __riscv_sf_vtzero_t_e16w2(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 4, 2);
+#define __riscv_sf_vtzero_t_e16w4(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 4, 4);
+#define __riscv_sf_vtzero_t_e32w1(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 5, 1);
+#define __riscv_sf_vtzero_t_e32w2(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 5, 2);
+#if __riscv_v_elen >= 64
+#define __riscv_sf_vsettnt_e64w1(atn) __riscv_sf_vsettnt(atn, 3, 1);
+#define __riscv_sf_vsettm_e64w1(atm) __riscv_sf_vsettm(atm, 3, 1);
+#define __riscv_sf_vsettn_e64w1(atn) __riscv_sf_vsettn(atn, 3, 1);
+#define __riscv_sf_vsettk_e64w1(atk) __riscv_sf_vsettk(atk, 3, 1);
+#define __riscv_sf_vtzero_t_e64w1(tile, atm, atn) \
+ __riscv_sf_vtzero_t(tile, atm, atn, 6, 1);
+#endif
#endif //_SIFIVE_VECTOR_H_
diff --git a/clang/lib/Lex/LiteralSupport.cpp b/clang/lib/Lex/LiteralSupport.cpp
index 5b08d7f..1c06152 100644
--- a/clang/lib/Lex/LiteralSupport.cpp
+++ b/clang/lib/Lex/LiteralSupport.cpp
@@ -1283,10 +1283,10 @@ bool NumericLiteralParser::isValidUDSuffix(const LangOptions &LangOpts,
// Per tweaked N3660, "il", "i", and "if" are also used in the library.
// In C++2a "d" and "y" are used in the library.
return llvm::StringSwitch<bool>(Suffix)
- .Cases("h", "min", "s", true)
- .Cases("ms", "us", "ns", true)
- .Cases("il", "i", "if", true)
- .Cases("d", "y", LangOpts.CPlusPlus20)
+ .Cases({"h", "min", "s"}, true)
+ .Cases({"ms", "us", "ns"}, true)
+ .Cases({"il", "i", "if"}, true)
+ .Cases({"d", "y"}, LangOpts.CPlusPlus20)
.Default(false);
}
diff --git a/clang/lib/Parse/ParseHLSL.cpp b/clang/lib/Parse/ParseHLSL.cpp
index 51f2aef..c727ee3 100644
--- a/clang/lib/Parse/ParseHLSL.cpp
+++ b/clang/lib/Parse/ParseHLSL.cpp
@@ -126,15 +126,9 @@ Parser::ParsedSemantic Parser::ParseHLSLSemantic() {
// semantic index. The semantic index is the number at the end of
// the semantic, including leading zeroes. Digits located before
// the last letter are part of the semantic name.
- bool Invalid = false;
SmallString<256> Buffer;
Buffer.resize(Tok.getLength() + 1);
StringRef Identifier = PP.getSpelling(Tok, Buffer);
- if (Invalid) {
- Diag(Tok.getLocation(), diag::err_expected_semantic_identifier);
- return {};
- }
-
assert(Identifier.size() > 0);
// Determine the start of the semantic index.
unsigned IndexIndex = Identifier.find_last_not_of("0123456789") + 1;
diff --git a/clang/lib/Sema/CheckExprLifetime.cpp b/clang/lib/Sema/CheckExprLifetime.cpp
index e797400..f9665b5 100644
--- a/clang/lib/Sema/CheckExprLifetime.cpp
+++ b/clang/lib/Sema/CheckExprLifetime.cpp
@@ -155,6 +155,7 @@ getEntityLifetime(const InitializedEntity *Entity,
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
case InitializedEntity::EK_LambdaCapture:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
return {nullptr, LK_FullExpression};
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index f99c01e..f451787 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -12373,14 +12373,9 @@ static void DiagnoseMixedUnicodeImplicitConversion(Sema &S, const Type *Source,
}
}
-enum CFIUncheckedCalleeChange {
- None,
- Adding,
- Discarding,
-};
-
-static CFIUncheckedCalleeChange AdjustingCFIUncheckedCallee(QualType From,
- QualType To) {
+bool Sema::DiscardingCFIUncheckedCallee(QualType From, QualType To) const {
+ From = Context.getCanonicalType(From);
+ To = Context.getCanonicalType(To);
QualType MaybePointee = From->getPointeeType();
if (!MaybePointee.isNull() && MaybePointee->getAs<FunctionType>())
From = MaybePointee;
@@ -12392,25 +12387,10 @@ static CFIUncheckedCalleeChange AdjustingCFIUncheckedCallee(QualType From,
if (const auto *ToFn = To->getAs<FunctionType>()) {
if (FromFn->getCFIUncheckedCalleeAttr() &&
!ToFn->getCFIUncheckedCalleeAttr())
- return Discarding;
- if (!FromFn->getCFIUncheckedCalleeAttr() &&
- ToFn->getCFIUncheckedCalleeAttr())
- return Adding;
+ return true;
}
}
- return None;
-}
-
-bool Sema::DiscardingCFIUncheckedCallee(QualType From, QualType To) const {
- From = Context.getCanonicalType(From);
- To = Context.getCanonicalType(To);
- return ::AdjustingCFIUncheckedCallee(From, To) == Discarding;
-}
-
-bool Sema::AddingCFIUncheckedCallee(QualType From, QualType To) const {
- From = Context.getCanonicalType(From);
- To = Context.getCanonicalType(To);
- return ::AdjustingCFIUncheckedCallee(From, To) == Adding;
+ return false;
}
void Sema::CheckImplicitConversion(Expr *E, QualType T, SourceLocation CC,
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 2a485da..96d5142 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/HLSLResource.h"
#include "clang/AST/Type.h"
+#include "clang/AST/TypeBase.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DiagnosticSema.h"
@@ -3432,6 +3433,11 @@ static void BuildFlattenedTypeList(QualType BaseTy,
List.insert(List.end(), VT->getNumElements(), VT->getElementType());
continue;
}
+ if (const auto *MT = dyn_cast<ConstantMatrixType>(T)) {
+ List.insert(List.end(), MT->getNumElementsFlattened(),
+ MT->getElementType());
+ continue;
+ }
if (const auto *RD = T->getAsCXXRecordDecl()) {
if (RD->isStandardLayout())
RD = RD->getStandardLayoutBaseWithFields();
@@ -4230,6 +4236,32 @@ class InitListTransformer {
}
return true;
}
+ if (auto *MTy = Ty->getAs<ConstantMatrixType>()) {
+ unsigned Rows = MTy->getNumRows();
+ unsigned Cols = MTy->getNumColumns();
+ QualType ElemTy = MTy->getElementType();
+
+ for (unsigned C = 0; C < Cols; ++C) {
+ for (unsigned R = 0; R < Rows; ++R) {
+ // row index literal
+ Expr *RowIdx = IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getIntWidth(Ctx.IntTy), R), Ctx.IntTy,
+ E->getBeginLoc());
+ // column index literal
+ Expr *ColIdx = IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getIntWidth(Ctx.IntTy), C), Ctx.IntTy,
+ E->getBeginLoc());
+ ExprResult ElExpr = S.CreateBuiltinMatrixSubscriptExpr(
+ E, RowIdx, ColIdx, E->getEndLoc());
+ if (ElExpr.isInvalid())
+ return false;
+ if (!castInitializer(ElExpr.get()))
+ return false;
+ ElExpr.get()->setType(ElemTy);
+ }
+ }
+ return true;
+ }
if (auto *ArrTy = dyn_cast<ConstantArrayType>(Ty.getTypePtr())) {
uint64_t Size = ArrTy->getZExtSize();
@@ -4283,14 +4315,17 @@ class InitListTransformer {
return *(ArgIt++);
llvm::SmallVector<Expr *> Inits;
- assert(!isa<MatrixType>(Ty) && "Matrix types not yet supported in HLSL");
Ty = Ty.getDesugaredType(Ctx);
- if (Ty->isVectorType() || Ty->isConstantArrayType()) {
+ if (Ty->isVectorType() || Ty->isConstantArrayType() ||
+ Ty->isConstantMatrixType()) {
QualType ElTy;
uint64_t Size = 0;
if (auto *ATy = Ty->getAs<VectorType>()) {
ElTy = ATy->getElementType();
Size = ATy->getNumElements();
+ } else if (auto *CMTy = Ty->getAs<ConstantMatrixType>()) {
+ ElTy = CMTy->getElementType();
+ Size = CMTy->getNumElementsFlattened();
} else {
auto *VTy = cast<ConstantArrayType>(Ty.getTypePtr());
ElTy = VTy->getElementType();
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index 7debe33..073010d 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/IgnoreExpr.h"
+#include "clang/AST/TypeBase.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
@@ -403,6 +404,9 @@ class InitListChecker {
unsigned &Index,
InitListExpr *StructuredList,
unsigned &StructuredIndex);
+ void CheckMatrixType(const InitializedEntity &Entity, InitListExpr *IList,
+ QualType DeclType, unsigned &Index,
+ InitListExpr *StructuredList, unsigned &StructuredIndex);
void CheckVectorType(const InitializedEntity &Entity,
InitListExpr *IList, QualType DeclType, unsigned &Index,
InitListExpr *StructuredList,
@@ -1004,7 +1008,8 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
return;
if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement ||
- ElementEntity.getKind() == InitializedEntity::EK_VectorElement)
+ ElementEntity.getKind() == InitializedEntity::EK_VectorElement ||
+ ElementEntity.getKind() == InitializedEntity::EK_MatrixElement)
ElementEntity.setElementIndex(Init);
if (Init >= NumInits && (ILE->hasArrayFiller() || SkipEmptyInitChecks))
@@ -1274,6 +1279,7 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
switch (Entity.getKind()) {
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_Parameter:
@@ -1373,11 +1379,12 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
SemaRef.Diag(IList->getInit(Index)->getBeginLoc(), DK)
<< T << IList->getInit(Index)->getSourceRange();
} else {
- int initKind = T->isArrayType() ? 0 :
- T->isVectorType() ? 1 :
- T->isScalarType() ? 2 :
- T->isUnionType() ? 3 :
- 4;
+ int initKind = T->isArrayType() ? 0
+ : T->isVectorType() ? 1
+ : T->isMatrixType() ? 2
+ : T->isScalarType() ? 3
+ : T->isUnionType() ? 4
+ : 5;
unsigned DK = ExtraInitsIsError ? diag::err_excess_initializers
: diag::ext_excess_initializers;
@@ -1431,6 +1438,9 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
} else if (DeclType->isVectorType()) {
CheckVectorType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
+ } else if (DeclType->isMatrixType()) {
+ CheckMatrixType(Entity, IList, DeclType, Index, StructuredList,
+ StructuredIndex);
} else if (const RecordDecl *RD = DeclType->getAsRecordDecl()) {
auto Bases =
CXXRecordDecl::base_class_const_range(CXXRecordDecl::base_class_const_iterator(),
@@ -1878,6 +1888,37 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
AggrDeductionCandidateParamTypes->push_back(DeclType);
}
+void InitListChecker::CheckMatrixType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ if (!SemaRef.getLangOpts().HLSL)
+ return;
+
+ const ConstantMatrixType *MT = DeclType->castAs<ConstantMatrixType>();
+ QualType ElemTy = MT->getElementType();
+ const unsigned MaxElts = MT->getNumElementsFlattened();
+
+ unsigned NumEltsInit = 0;
+ InitializedEntity ElemEnt =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ while (NumEltsInit < MaxElts && Index < IList->getNumInits()) {
+ // Not a sublist: just consume directly.
+ ElemEnt.setElementIndex(Index);
+ CheckSubElementType(ElemEnt, IList, ElemTy, Index, StructuredList,
+ StructuredIndex);
+ ++NumEltsInit;
+ }
+
+ // For HLSL The error for this case is handled in SemaHLSL's initializer
+ // list diagnostics, That means the execution should require NumEltsInit
+ // to equal Max initializers. In other words execution should never
+ // reach this point if this condition is not true".
+ assert(NumEltsInit == MaxElts && "NumEltsInit must equal MaxElts");
+}
+
void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
InitListExpr *IList, QualType DeclType,
unsigned &Index,
@@ -3640,6 +3681,9 @@ InitializedEntity::InitializedEntity(ASTContext &Context, unsigned Index,
} else if (const VectorType *VT = Parent.getType()->getAs<VectorType>()) {
Kind = EK_VectorElement;
Type = VT->getElementType();
+ } else if (const MatrixType *MT = Parent.getType()->getAs<MatrixType>()) {
+ Kind = EK_MatrixElement;
+ Type = MT->getElementType();
} else {
const ComplexType *CT = Parent.getType()->getAs<ComplexType>();
assert(CT && "Unexpected type");
@@ -3688,6 +3732,7 @@ DeclarationName InitializedEntity::getName() const {
case EK_Delegating:
case EK_ArrayElement:
case EK_VectorElement:
+ case EK_MatrixElement:
case EK_ComplexElement:
case EK_BlockElement:
case EK_LambdaToBlockConversionBlockElement:
@@ -3721,6 +3766,7 @@ ValueDecl *InitializedEntity::getDecl() const {
case EK_Delegating:
case EK_ArrayElement:
case EK_VectorElement:
+ case EK_MatrixElement:
case EK_ComplexElement:
case EK_BlockElement:
case EK_LambdaToBlockConversionBlockElement:
@@ -3754,6 +3800,7 @@ bool InitializedEntity::allowsNRVO() const {
case EK_Delegating:
case EK_ArrayElement:
case EK_VectorElement:
+ case EK_MatrixElement:
case EK_ComplexElement:
case EK_BlockElement:
case EK_LambdaToBlockConversionBlockElement:
@@ -3793,6 +3840,9 @@ unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
case EK_Delegating: OS << "Delegating"; break;
case EK_ArrayElement: OS << "ArrayElement " << Index; break;
case EK_VectorElement: OS << "VectorElement " << Index; break;
+ case EK_MatrixElement:
+ OS << "MatrixElement " << Index;
+ break;
case EK_ComplexElement: OS << "ComplexElement " << Index; break;
case EK_BlockElement: OS << "Block"; break;
case EK_LambdaToBlockConversionBlockElement:
@@ -6030,7 +6080,7 @@ static void TryOrBuildParenListInitialization(
Sequence.SetFailed(InitializationSequence::FK_ParenthesizedListInitFailed);
if (!VerifyOnly) {
QualType T = Entity.getType();
- int InitKind = T->isArrayType() ? 0 : T->isUnionType() ? 3 : 4;
+ int InitKind = T->isArrayType() ? 0 : T->isUnionType() ? 4 : 5;
SourceRange ExcessInitSR(Args[EntityIndexToProcess]->getBeginLoc(),
Args.back()->getEndLoc());
S.Diag(Kind.getLocation(), diag::err_excess_initializers)
@@ -6823,7 +6873,8 @@ void InitializationSequence::InitializeFrom(Sema &S,
// For HLSL ext vector types we allow list initialization behavior for C++
// functional cast expressions which look like constructor syntax. This is
// accomplished by converting initialization arguments to InitListExpr.
- if (S.getLangOpts().HLSL && Args.size() > 1 && DestType->isExtVectorType() &&
+ if (S.getLangOpts().HLSL && Args.size() > 1 &&
+ (DestType->isExtVectorType() || DestType->isConstantMatrixType()) &&
(SourceType.isNull() ||
!Context.hasSameUnqualifiedType(SourceType, DestType))) {
InitListExpr *ILE = new (Context)
@@ -6988,6 +7039,7 @@ static AssignmentAction getAssignmentAction(const InitializedEntity &Entity,
case InitializedEntity::EK_Binding:
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_BlockElement:
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
@@ -7013,6 +7065,7 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_Base:
case InitializedEntity::EK_Delegating:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_Exception:
case InitializedEntity::EK_BlockElement:
@@ -7043,6 +7096,7 @@ static bool shouldDestroyEntity(const InitializedEntity &Entity) {
case InitializedEntity::EK_Base:
case InitializedEntity::EK_Delegating:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_BlockElement:
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
@@ -7096,6 +7150,7 @@ static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
case InitializedEntity::EK_Base:
case InitializedEntity::EK_Delegating:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_MatrixElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_BlockElement:
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
@@ -7845,11 +7900,13 @@ ExprResult InitializationSequence::Perform(Sema &S,
ExprResult CurInit((Expr *)nullptr);
SmallVector<Expr*, 4> ArrayLoopCommonExprs;
- // HLSL allows vector initialization to function like list initialization, but
- // use the syntax of a C++-like constructor.
- bool IsHLSLVectorInit = S.getLangOpts().HLSL && DestType->isExtVectorType() &&
- isa<InitListExpr>(Args[0]);
- (void)IsHLSLVectorInit;
+ // HLSL allows vector/matrix initialization to function like list
+ // initialization, but use the syntax of a C++-like constructor.
+ bool IsHLSLVectorOrMatrixInit =
+ S.getLangOpts().HLSL &&
+ (DestType->isExtVectorType() || DestType->isConstantMatrixType()) &&
+ isa<InitListExpr>(Args[0]);
+ (void)IsHLSLVectorOrMatrixInit;
// For initialization steps that start with a single initializer,
// grab the only argument out the Args and place it into the "current"
@@ -7888,7 +7945,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_StdInitializerList:
case SK_OCLSamplerInit:
case SK_OCLZeroOpaqueType: {
- assert(Args.size() == 1 || IsHLSLVectorInit);
+ assert(Args.size() == 1 || IsHLSLVectorOrMatrixInit);
CurInit = Args[0];
if (!CurInit.get()) return ExprError();
break;
@@ -9105,7 +9162,7 @@ bool InitializationSequence::Diagnose(Sema &S,
<< R;
else
S.Diag(Kind.getLocation(), diag::err_excess_initializers)
- << /*scalar=*/2 << R;
+ << /*scalar=*/3 << R;
break;
}
diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp
index 1880cec..67c554c 100644
--- a/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/clang/lib/Sema/SemaObjCProperty.cpp
@@ -1041,7 +1041,7 @@ RedeclarePropertyAccessor(ASTContext &Context, ObjCImplementationDecl *Impl,
Decl->getSelector(), Decl->getReturnType(),
Decl->getReturnTypeSourceInfo(), Impl, Decl->isInstanceMethod(),
Decl->isVariadic(), Decl->isPropertyAccessor(),
- /* isSynthesized*/ true, Decl->isImplicit(), Decl->isDefined(),
+ /*isSynthesizedAccessorStub=*/true, Decl->isImplicit(), Decl->isDefined(),
Decl->getImplementationControl(), Decl->hasRelatedResultType());
ImplDecl->getMethodFamily();
if (Decl->hasAttrs())
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 1f25111..37f3511 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -2532,15 +2532,12 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
SCS.setToType(2, FromType);
- // If we have not converted the argument type to the parameter type,
- // this is a bad conversion sequence, unless we're resolving an overload in C.
- //
- // Permit conversions from a function without `cfi_unchecked_callee` to a
- // function with `cfi_unchecked_callee`.
- if (CanonFrom == CanonTo || S.AddingCFIUncheckedCallee(CanonFrom, CanonTo))
+ if (CanonFrom == CanonTo)
return true;
- if ((S.getLangOpts().CPlusPlus || !InOverloadResolution))
+ // If we have not converted the argument type to the parameter type,
+ // this is a bad conversion sequence, unless we're resolving an overload in C.
+ if (S.getLangOpts().CPlusPlus || !InOverloadResolution)
return false;
ExprResult ER = ExprResult{From};
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index b5f91a3..75dba80 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -664,6 +664,80 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
return CheckVSetVL(1, 2);
case RISCVVector::BI__builtin_rvv_vsetvlimax:
return CheckVSetVL(0, 1);
+ case RISCVVector::BI__builtin_rvv_sf_vsettnt:
+ case RISCVVector::BI__builtin_rvv_sf_vsettm:
+ case RISCVVector::BI__builtin_rvv_sf_vsettn:
+ case RISCVVector::BI__builtin_rvv_sf_vsettk:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 1, 3);
+ case RISCVVector::BI__builtin_rvv_sf_mm_f_f_w1:
+ case RISCVVector::BI__builtin_rvv_sf_mm_f_f_w2:
+ case RISCVVector::BI__builtin_rvv_sf_mm_e5m2_e4m3_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_e5m2_e5m2_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_e4m3_e4m3_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_e4m3_e5m2_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_u_u_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_u_s_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_s_u_w4:
+ case RISCVVector::BI__builtin_rvv_sf_mm_s_s_w4: {
+ QualType Arg1Type = TheCall->getArg(1)->getType();
+ ASTContext::BuiltinVectorTypeInfo Info =
+ SemaRef.Context.getBuiltinVectorTypeInfo(
+ Arg1Type->castAs<BuiltinType>());
+ unsigned EltSize = SemaRef.Context.getTypeSize(Info.ElementType);
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(0);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaRef.BuiltinConstantArg(TheCall, 0, Result))
+ return true;
+
+ // For TEW = 32, mtd can only be 0, 4, 8, 12.
+ // For TEW = 64, mtd can only be 0, 2, 4, 6, 8, 10, 12, 14.
+ // Only `sf_mm_f_f_w1` and `sf_mm_f_f_w2` might have TEW = 64.
+ if ((BuiltinID == RISCVVector::BI__builtin_rvv_sf_mm_f_f_w1 &&
+ EltSize == 64) ||
+ (BuiltinID == RISCVVector::BI__builtin_rvv_sf_mm_f_f_w2 &&
+ EltSize == 32))
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15) ||
+ SemaRef.BuiltinConstantArgMultiple(TheCall, 0, 2);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15) ||
+ SemaRef.BuiltinConstantArgMultiple(TheCall, 0, 4);
+ }
+ case RISCVVector::BI__builtin_rvv_sf_vtzero_t: {
+ llvm::APSInt Log2SEWResult;
+ llvm::APSInt TWidenResult;
+ if (SemaRef.BuiltinConstantArg(TheCall, 3, Log2SEWResult) ||
+ SemaRef.BuiltinConstantArg(TheCall, 4, TWidenResult))
+ return true;
+
+ int Log2SEW = Log2SEWResult.getSExtValue();
+ int TWiden = TWidenResult.getSExtValue();
+
+ // 3 <= LogSEW <= 6
+ if (SemaRef.BuiltinConstantArgRange(TheCall, 3, 3, 6))
+ return true;
+
+ // TWiden
+ if (TWiden != 1 && TWiden != 2 && TWiden != 4)
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_invalid_twiden);
+
+ int TEW = (1 << Log2SEW) * TWiden;
+
+ // For TEW = 8, mtd can be 0~15.
+ // For TEW = 16 or 64, mtd can only be 0, 2, 4, 6, 8, 10, 12, 14.
+ // For TEW = 32, mtd can only be 0, 4, 8, 12.
+ if (SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15))
+ return true;
+ if (TEW == 16 || TEW == 64)
+ return SemaRef.BuiltinConstantArgMultiple(TheCall, 0, 2);
+ return SemaRef.BuiltinConstantArgMultiple(TheCall, 0, 4);
+ }
case RISCVVector::BI__builtin_rvv_vget_v: {
ASTContext::BuiltinVectorTypeInfo ResVecInfo =
Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 82b560b..e0deec1 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -128,7 +128,6 @@ public:
std::unique_ptr<llvm::Timer> SyntaxCheckTimer;
std::unique_ptr<llvm::Timer> ExprEngineTimer;
std::unique_ptr<llvm::Timer> BugReporterTimer;
- bool ShouldClearTimersToPreventDisplayingThem;
/// The information about analyzed functions shared throughout the
/// translation unit.
@@ -149,7 +148,10 @@ public:
if (Opts.AnalyzerDisplayProgress || Opts.PrintStats ||
Opts.ShouldSerializeStats || !Opts.DumpEntryPointStatsToCSV.empty()) {
AnalyzerTimers = std::make_unique<llvm::TimerGroup>(
- "analyzer", "Analyzer timers");
+ "analyzer", "Analyzer timers",
+ /*PrintOnExit=*/
+ (Opts.AnalyzerDisplayProgress || Opts.PrintStats ||
+ Opts.ShouldSerializeStats));
SyntaxCheckTimer = std::make_unique<llvm::Timer>(
"syntaxchecks", "Syntax-based analysis time", *AnalyzerTimers);
ExprEngineTimer = std::make_unique<llvm::Timer>(
@@ -159,12 +161,6 @@ public:
*AnalyzerTimers);
}
- // Avoid displaying the timers created above in case we only want to record
- // per-entry-point stats.
- ShouldClearTimersToPreventDisplayingThem = !Opts.AnalyzerDisplayProgress &&
- !Opts.PrintStats &&
- !Opts.ShouldSerializeStats;
-
if (Opts.PrintStats || Opts.ShouldSerializeStats) {
llvm::EnableStatistics(/* DoPrintOnExit= */ false);
}
@@ -287,9 +283,6 @@ public:
checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
if (SyntaxCheckTimer)
SyntaxCheckTimer->stopTimer();
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
}
return true;
}
@@ -583,9 +576,6 @@ void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
if (SyntaxCheckTimer)
SyntaxCheckTimer->stopTimer();
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
// Run the AST-only checks using the order in which functions are defined.
// If inlining is not turned on, use the simplest function order for path
@@ -765,9 +755,6 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
FunctionSummaries.findOrInsertSummary(D)->second.SyntaxRunningTime =
std::lround(CheckerDuration.getWallTime() * 1000);
DisplayTime(CheckerDuration);
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
}
}
@@ -830,9 +817,6 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
PathRunningTime.set(static_cast<unsigned>(
std::lround(ExprEngineDuration.getWallTime() * 1000)));
DisplayTime(ExprEngineDuration);
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
}
if (!Mgr->options.DumpExplodedGraphTo.empty())
@@ -843,9 +827,6 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
Eng.ViewGraph(Mgr->options.TrimGraph);
flushReports(BugReporterTimer.get(), Eng.getBugReporter());
- if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
- AnalyzerTimers->clear();
- }
}
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index dad3d0da..12e209a 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -980,11 +980,12 @@ RVVIntrinsic::RVVIntrinsic(
bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme,
bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen,
const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
- unsigned NF, Policy NewPolicyAttrs, bool HasFRMRoundModeOp)
+ unsigned NF, Policy NewPolicyAttrs, bool HasFRMRoundModeOp, unsigned TWiden)
: IRName(IRName), IsMasked(IsMasked),
HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme),
SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias),
- ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs) {
+ ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs),
+ TWiden(TWiden) {
// Init BuiltinName, Name and OverloadedName
BuiltinName = NewName.str();