aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp11
-rw-r--r--clang/lib/AST/ByteCode/Compiler.h17
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp245
-rw-r--r--clang/lib/AST/ExprConstant.cpp315
-rw-r--r--clang/lib/Analysis/AnalysisDeclContext.cpp5
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp44
-rw-r--r--clang/lib/Basic/Targets/RISCV.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp4
-rw-r--r--clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp8
-rw-r--r--clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp7
-rw-r--r--clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp7
-rw-r--r--clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp7
-rw-r--r--clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp7
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp8
-rw-r--r--clang/lib/CIR/Dialect/Transforms/PassDetail.h2
-rw-r--r--clang/lib/CodeGen/CGCall.cpp1
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.cpp46
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.h35
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp3
-rw-r--r--clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp37
-rw-r--r--clang/lib/CodeGen/HLSLBufferLayoutBuilder.h10
-rw-r--r--clang/lib/CodeGen/TargetInfo.h6
-rw-r--r--clang/lib/CodeGen/Targets/DirectX.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/SPIR.cpp19
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp3
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp47
-rw-r--r--clang/lib/Driver/ToolChains/Flang.cpp3
-rw-r--r--clang/lib/Headers/__clang_cuda_device_functions.h2
-rw-r--r--clang/lib/Parse/ParseDecl.cpp2
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp2
-rw-r--r--clang/lib/Parse/ParseInit.cpp23
-rw-r--r--clang/lib/Parse/ParseOpenMP.cpp2
-rw-r--r--clang/lib/Sema/AnalysisBasedWarnings.cpp105
-rw-r--r--clang/lib/Sema/SemaDecl.cpp17
-rw-r--r--clang/lib/Sema/SemaExpr.cpp43
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp126
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp6
37 files changed, 733 insertions, 503 deletions
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index f68422c..1243380 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -5432,8 +5432,7 @@ bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
unsigned EndIndex = 0;
// Find the init list.
for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
- InitStack[StartIndex].Kind == InitLink::K_This) {
+ if (InitStack[StartIndex].Kind == InitLink::K_DIE) {
EndIndex = StartIndex;
--StartIndex;
break;
@@ -5446,7 +5445,8 @@ bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
continue;
if (InitStack[StartIndex].Kind != InitLink::K_Field &&
- InitStack[StartIndex].Kind != InitLink::K_Elem)
+ InitStack[StartIndex].Kind != InitLink::K_Elem &&
+ InitStack[StartIndex].Kind != InitLink::K_DIE)
break;
}
@@ -5457,7 +5457,8 @@ bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
// Emit the instructions.
for (unsigned I = StartIndex; I != (EndIndex + 1); ++I) {
- if (InitStack[I].Kind == InitLink::K_InitList)
+ if (InitStack[I].Kind == InitLink::K_InitList ||
+ InitStack[I].Kind == InitLink::K_DIE)
continue;
if (!InitStack[I].template emit<Emitter>(this, E))
return false;
@@ -6328,8 +6329,8 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) {
unsigned FirstLinkOffset =
R->getField(cast<FieldDecl>(IFD->chain()[0]))->Offset;
- InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(InitExpr));
InitLinkScope<Emitter> ILS(this, InitLink::Field(FirstLinkOffset));
+ InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(InitExpr));
if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr,
IsUnion))
return false;
diff --git a/clang/lib/AST/ByteCode/Compiler.h b/clang/lib/AST/ByteCode/Compiler.h
index 5c46f75..0c6cab9 100644
--- a/clang/lib/AST/ByteCode/Compiler.h
+++ b/clang/lib/AST/ByteCode/Compiler.h
@@ -52,12 +52,14 @@ public:
K_Decl = 3,
K_Elem = 5,
K_RVO = 6,
- K_InitList = 7
+ K_InitList = 7,
+ K_DIE = 8,
};
static InitLink This() { return InitLink{K_This}; }
static InitLink InitList() { return InitLink{K_InitList}; }
static InitLink RVO() { return InitLink{K_RVO}; }
+ static InitLink DIE() { return InitLink{K_DIE}; }
static InitLink Field(unsigned Offset) {
InitLink IL{K_Field};
IL.Offset = Offset;
@@ -668,22 +670,29 @@ public:
~InitLinkScope() { this->Ctx->InitStack.pop_back(); }
-private:
+public:
Compiler<Emitter> *Ctx;
};
template <class Emitter> class InitStackScope final {
public:
InitStackScope(Compiler<Emitter> *Ctx, bool Active)
- : Ctx(Ctx), OldValue(Ctx->InitStackActive) {
+ : Ctx(Ctx), OldValue(Ctx->InitStackActive), Active(Active) {
Ctx->InitStackActive = Active;
+ if (Active)
+ Ctx->InitStack.push_back(InitLink::DIE());
}
- ~InitStackScope() { this->Ctx->InitStackActive = OldValue; }
+ ~InitStackScope() {
+ this->Ctx->InitStackActive = OldValue;
+ if (Active)
+ Ctx->InitStack.pop_back();
+ }
private:
Compiler<Emitter> *Ctx;
bool OldValue;
+ bool Active;
};
} // namespace interp
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 0ef130c..6c7b2f5 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -2841,76 +2841,6 @@ static bool interp__builtin_blend(InterpState &S, CodePtr OpPC,
return true;
}
-static bool interp__builtin_ia32_pshufb(InterpState &S, CodePtr OpPC,
- const CallExpr *Call) {
- assert(Call->getNumArgs() == 2 && "masked forms handled via select*");
- const Pointer &Control = S.Stk.pop<Pointer>();
- const Pointer &Src = S.Stk.pop<Pointer>();
- const Pointer &Dst = S.Stk.peek<Pointer>();
-
- unsigned NumElems = Dst.getNumElems();
- assert(NumElems == Control.getNumElems());
- assert(NumElems == Dst.getNumElems());
-
- for (unsigned Idx = 0; Idx != NumElems; ++Idx) {
- uint8_t Ctlb = static_cast<uint8_t>(Control.elem<int8_t>(Idx));
-
- if (Ctlb & 0x80) {
- Dst.elem<int8_t>(Idx) = 0;
- } else {
- unsigned LaneBase = (Idx / 16) * 16;
- unsigned SrcOffset = Ctlb & 0x0F;
- unsigned SrcIdx = LaneBase + SrcOffset;
-
- Dst.elem<int8_t>(Idx) = Src.elem<int8_t>(SrcIdx);
- }
- }
- Dst.initializeAllElements();
- return true;
-}
-
-static bool interp__builtin_ia32_pshuf(InterpState &S, CodePtr OpPC,
- const CallExpr *Call, bool IsShufHW) {
- assert(Call->getNumArgs() == 2 && "masked forms handled via select*");
- APSInt ControlImm = popToAPSInt(S, Call->getArg(1));
- const Pointer &Src = S.Stk.pop<Pointer>();
- const Pointer &Dst = S.Stk.peek<Pointer>();
-
- unsigned NumElems = Dst.getNumElems();
- PrimType ElemT = Dst.getFieldDesc()->getPrimType();
-
- unsigned ElemBits = static_cast<unsigned>(primSize(ElemT) * 8);
- if (ElemBits != 16 && ElemBits != 32)
- return false;
-
- unsigned LaneElts = 128u / ElemBits;
- assert(LaneElts && (NumElems % LaneElts == 0));
-
- uint8_t Ctl = static_cast<uint8_t>(ControlImm.getZExtValue());
-
- for (unsigned Idx = 0; Idx != NumElems; Idx++) {
- unsigned LaneBase = (Idx / LaneElts) * LaneElts;
- unsigned LaneIdx = Idx % LaneElts;
- unsigned SrcIdx = Idx;
- unsigned Sel = (Ctl >> (2 * (LaneIdx & 0x3))) & 0x3;
- if (ElemBits == 32) {
- SrcIdx = LaneBase + Sel;
- } else {
- constexpr unsigned HalfSize = 4;
- bool InHigh = LaneIdx >= HalfSize;
- if (!IsShufHW && !InHigh) {
- SrcIdx = LaneBase + Sel;
- } else if (IsShufHW && InHigh) {
- SrcIdx = LaneBase + HalfSize + Sel;
- }
- }
-
- INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(Idx) = Src.elem<T>(SrcIdx); });
- }
- Dst.initializeAllElements();
- return true;
-}
-
static bool interp__builtin_ia32_test_op(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) {
@@ -3377,61 +3307,46 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
-static bool interp__builtin_x86_byteshift(
- InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID,
- llvm::function_ref<APInt(const Pointer &, unsigned Lane, unsigned I,
- unsigned Shift)>
- Fn) {
- assert(Call->getNumArgs() == 2);
-
- APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
- uint64_t Shift = ImmAPS.getZExtValue() & 0xff;
-
- const Pointer &Src = S.Stk.pop<Pointer>();
- if (!Src.getFieldDesc()->isPrimitiveArray())
- return false;
-
- unsigned NumElems = Src.getNumElems();
- const Pointer &Dst = S.Stk.peek<Pointer>();
- PrimType ElemT = Src.getFieldDesc()->getPrimType();
-
- for (unsigned Lane = 0; Lane != NumElems; Lane += 16) {
- for (unsigned I = 0; I != 16; ++I) {
- unsigned Base = Lane + I;
- APSInt Result = APSInt(Fn(Src, Lane, I, Shift));
- INT_TYPE_SWITCH_NO_BOOL(ElemT,
- { Dst.elem<T>(Base) = static_cast<T>(Result); });
- }
- }
-
- Dst.initializeAllElements();
-
- return true;
-}
-
static bool interp__builtin_ia32_shuffle_generic(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
GetSourceIndex) {
- assert(Call->getNumArgs() == 3);
+ assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
unsigned ShuffleMask = 0;
Pointer A, MaskVector, B;
-
- QualType Arg2Type = Call->getArg(2)->getType();
bool IsVectorMask = false;
- if (Arg2Type->isVectorType()) {
- IsVectorMask = true;
- B = S.Stk.pop<Pointer>();
- MaskVector = S.Stk.pop<Pointer>();
- A = S.Stk.pop<Pointer>();
- } else if (Arg2Type->isIntegerType()) {
- ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
- B = S.Stk.pop<Pointer>();
- A = S.Stk.pop<Pointer>();
+ bool IsSingleOperand = (Call->getNumArgs() == 2);
+
+ if (IsSingleOperand) {
+ QualType MaskType = Call->getArg(1)->getType();
+ if (MaskType->isVectorType()) {
+ IsVectorMask = true;
+ MaskVector = S.Stk.pop<Pointer>();
+ A = S.Stk.pop<Pointer>();
+ B = A;
+ } else if (MaskType->isIntegerType()) {
+ ShuffleMask = popToAPSInt(S, Call->getArg(1)).getZExtValue();
+ A = S.Stk.pop<Pointer>();
+ B = A;
+ } else {
+ return false;
+ }
} else {
- return false;
+ QualType Arg2Type = Call->getArg(2)->getType();
+ if (Arg2Type->isVectorType()) {
+ IsVectorMask = true;
+ B = S.Stk.pop<Pointer>();
+ MaskVector = S.Stk.pop<Pointer>();
+ A = S.Stk.pop<Pointer>();
+ } else if (Arg2Type->isIntegerType()) {
+ ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
+ B = S.Stk.pop<Pointer>();
+ A = S.Stk.pop<Pointer>();
+ } else {
+ return false;
+ }
}
QualType Arg0Type = Call->getArg(0)->getType();
@@ -3455,6 +3370,7 @@ static bool interp__builtin_ia32_shuffle_generic(
ShuffleMask = static_cast<unsigned>(MaskVector.elem<T>(DstIdx));
});
}
+
auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
if (SrcIdx < 0) {
@@ -4555,22 +4471,58 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512:
- return interp__builtin_ia32_pshufb(S, OpPC, Call);
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
+ if (Ctlb & 0x80)
+ return std::make_pair(0, -1);
+
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned SrcOffset = Ctlb & 0x0F;
+ unsigned SrcIdx = LaneBase + SrcOffset;
+ return std::make_pair(0, static_cast<int>(SrcIdx));
+ });
case X86::BI__builtin_ia32_pshuflw:
case X86::BI__builtin_ia32_pshuflw256:
case X86::BI__builtin_ia32_pshuflw512:
- return interp__builtin_ia32_pshuf(S, OpPC, Call, false);
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned LaneBase = (DstIdx / 8) * 8;
+ unsigned LaneIdx = DstIdx % 8;
+ if (LaneIdx < 4) {
+ unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + Sel));
+ }
+
+ return std::make_pair(0, static_cast<int>(DstIdx));
+ });
case X86::BI__builtin_ia32_pshufhw:
case X86::BI__builtin_ia32_pshufhw256:
case X86::BI__builtin_ia32_pshufhw512:
- return interp__builtin_ia32_pshuf(S, OpPC, Call, true);
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned LaneBase = (DstIdx / 8) * 8;
+ unsigned LaneIdx = DstIdx % 8;
+ if (LaneIdx >= 4) {
+ unsigned Sel = (ShuffleMask >> (2 * (LaneIdx - 4))) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + 4 + Sel));
+ }
+
+ return std::make_pair(0, static_cast<int>(DstIdx));
+ });
case X86::BI__builtin_ia32_pshufd:
case X86::BI__builtin_ia32_pshufd256:
case X86::BI__builtin_ia32_pshufd512:
- return interp__builtin_ia32_pshuf(S, OpPC, Call, false);
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned LaneBase = (DstIdx / 4) * 4;
+ unsigned LaneIdx = DstIdx % 4;
+ unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + Sel));
+ });
case X86::BI__builtin_ia32_kandqi:
case X86::BI__builtin_ia32_kandhi:
@@ -4728,13 +4680,16 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
// The lane width is hardcoded to 16 to match the SIMD register size,
// but the algorithm processes one byte per iteration,
// so APInt(8, ...) is correct and intentional.
- return interp__builtin_x86_byteshift(
- S, OpPC, Call, BuiltinID,
- [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
- if (I < Shift) {
- return APInt(8, 0);
- }
- return APInt(8, Src.elem<uint8_t>(Lane + I - Shift));
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call,
+ [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned LaneIdx = DstIdx % 16;
+ if (LaneIdx < Shift)
+ return std::make_pair(0, -1);
+
+ return std::make_pair(0,
+ static_cast<int>(LaneBase + LaneIdx - Shift));
});
case X86::BI__builtin_ia32_psrldqi128_byteshift:
@@ -4744,14 +4699,40 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
// The lane width is hardcoded to 16 to match the SIMD register size,
// but the algorithm processes one byte per iteration,
// so APInt(8, ...) is correct and intentional.
- return interp__builtin_x86_byteshift(
- S, OpPC, Call, BuiltinID,
- [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
- if (I + Shift < 16) {
- return APInt(8, Src.elem<uint8_t>(Lane + I + Shift));
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call,
+ [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned LaneIdx = DstIdx % 16;
+ if (LaneIdx + Shift < 16)
+ return std::make_pair(0,
+ static_cast<int>(LaneBase + LaneIdx + Shift));
+
+ return std::make_pair(0, -1);
+ });
+
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256:
+ case X86::BI__builtin_ia32_palignr512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned Shift) {
+ // Default to -1 → zero-fill this destination element
+ unsigned VecIdx = 1;
+ int ElemIdx = -1;
+
+ int Lane = DstIdx / 16;
+ int Offset = DstIdx % 16;
+
+ // Elements come from VecB first, then VecA after the shift boundary
+ unsigned ShiftedIdx = Offset + (Shift & 0xFF);
+ if (ShiftedIdx < 16) { // from VecB
+ ElemIdx = ShiftedIdx + (Lane * 16);
+ } else if (ShiftedIdx < 32) { // from VecA
+ VecIdx = 0;
+ ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
}
- return APInt(8, 0);
+ return std::pair<unsigned, int>{VecIdx, ElemIdx};
});
default:
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 972d9fe..1bfea24 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -12090,24 +12090,46 @@ static bool evalShuffleGeneric(
unsigned ShuffleMask = 0;
APValue A, MaskVector, B;
bool IsVectorMask = false;
-
- QualType Arg2Type = Call->getArg(2)->getType();
- if (Arg2Type->isVectorType()) {
- IsVectorMask = true;
- if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
- !EvaluateAsRValue(Info, Call->getArg(1), MaskVector) ||
- !EvaluateAsRValue(Info, Call->getArg(2), B))
- return false;
- } else if (Arg2Type->isIntegerType()) {
- APSInt MaskImm;
- if (!EvaluateInteger(Call->getArg(2), MaskImm, Info))
- return false;
- ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
- if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
- !EvaluateAsRValue(Info, Call->getArg(1), B))
+ bool IsSingleOperand = (Call->getNumArgs() == 2);
+
+ if (IsSingleOperand) {
+ QualType MaskType = Call->getArg(1)->getType();
+ if (MaskType->isVectorType()) {
+ IsVectorMask = true;
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), MaskVector))
+ return false;
+ B = A;
+ } else if (MaskType->isIntegerType()) {
+ APSInt MaskImm;
+ if (!EvaluateInteger(Call->getArg(1), MaskImm, Info))
+ return false;
+ ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A))
+ return false;
+ B = A;
+ } else {
return false;
+ }
} else {
- return false;
+ QualType Arg2Type = Call->getArg(2)->getType();
+ if (Arg2Type->isVectorType()) {
+ IsVectorMask = true;
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), MaskVector) ||
+ !EvaluateAsRValue(Info, Call->getArg(2), B))
+ return false;
+ } else if (Arg2Type->isIntegerType()) {
+ APSInt MaskImm;
+ if (!EvaluateInteger(Call->getArg(2), MaskImm, Info))
+ return false;
+ ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), B))
+ return false;
+ } else {
+ return false;
+ }
}
unsigned NumElts = VT->getNumElements();
@@ -12124,8 +12146,16 @@ static bool evalShuffleGeneric(
if (SrcIdx < 0) {
// Zero out this element
QualType ElemTy = VT->getElementType();
- ResultElements.push_back(
- APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy))));
+ if (ElemTy->isRealFloatingType()) {
+ ResultElements.push_back(
+ APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy))));
+ } else if (ElemTy->isIntegerType()) {
+ APValue Zero(Info.Ctx.MakeIntValue(0, ElemTy));
+ ResultElements.push_back(APValue(Zero));
+ } else {
+ // Other types of fallback logic
+ ResultElements.push_back(APValue());
+ }
} else {
const APValue &Src = (SrcVecIdx == 0) ? A : B;
ResultElements.push_back(Src.getVectorElt(SrcIdx));
@@ -12136,98 +12166,6 @@ static bool evalShuffleGeneric(
return true;
}
-static bool evalPshufbBuiltin(EvalInfo &Info, const CallExpr *Call,
- APValue &Out) {
- APValue SrcVec, ControlVec;
- if (!EvaluateAsRValue(Info, Call->getArg(0), SrcVec))
- return false;
- if (!EvaluateAsRValue(Info, Call->getArg(1), ControlVec))
- return false;
-
- const auto *VT = Call->getType()->getAs<VectorType>();
- if (!VT)
- return false;
-
- QualType ElemT = VT->getElementType();
- unsigned NumElts = VT->getNumElements();
-
- SmallVector<APValue, 64> ResultElements;
- ResultElements.reserve(NumElts);
-
- for (unsigned Idx = 0; Idx != NumElts; ++Idx) {
- APValue CtlVal = ControlVec.getVectorElt(Idx);
- APSInt CtlByte = CtlVal.getInt();
- uint8_t Ctl = static_cast<uint8_t>(CtlByte.getZExtValue());
-
- if (Ctl & 0x80) {
- APValue Zero(Info.Ctx.MakeIntValue(0, ElemT));
- ResultElements.push_back(Zero);
- } else {
- unsigned LaneBase = (Idx / 16) * 16;
- unsigned SrcOffset = Ctl & 0x0F;
- unsigned SrcIdx = LaneBase + SrcOffset;
-
- ResultElements.push_back(SrcVec.getVectorElt(SrcIdx));
- }
- }
- Out = APValue(ResultElements.data(), ResultElements.size());
- return true;
-}
-
-static bool evalPshufBuiltin(EvalInfo &Info, const CallExpr *Call,
- bool IsShufHW, APValue &Out) {
- APValue Vec;
- APSInt Imm;
- if (!EvaluateAsRValue(Info, Call->getArg(0), Vec))
- return false;
- if (!EvaluateInteger(Call->getArg(1), Imm, Info))
- return false;
-
- const auto *VT = Call->getType()->getAs<VectorType>();
- if (!VT)
- return false;
-
- QualType ElemT = VT->getElementType();
- unsigned ElemBits = Info.Ctx.getTypeSize(ElemT);
- unsigned NumElts = VT->getNumElements();
-
- unsigned LaneBits = 128u;
- unsigned LaneElts = LaneBits / ElemBits;
- if (!LaneElts || (NumElts % LaneElts) != 0)
- return false;
-
- uint8_t Ctl = static_cast<uint8_t>(Imm.getZExtValue());
-
- SmallVector<APValue, 32> ResultElements;
- ResultElements.reserve(NumElts);
-
- for (unsigned Idx = 0; Idx != NumElts; Idx++) {
- unsigned LaneBase = (Idx / LaneElts) * LaneElts;
- unsigned LaneIdx = Idx % LaneElts;
- unsigned SrcIdx = Idx;
- unsigned Sel = (Ctl >> (2 * LaneIdx)) & 0x3;
-
- if (ElemBits == 32) {
- SrcIdx = LaneBase + Sel;
- } else {
- constexpr unsigned HalfSize = 4;
- bool InHigh = LaneIdx >= HalfSize;
- if (!IsShufHW && !InHigh) {
- SrcIdx = LaneBase + Sel;
- } else if (IsShufHW && InHigh) {
- unsigned Rel = LaneIdx - HalfSize;
- Sel = (Ctl >> (2 * Rel)) & 0x3;
- SrcIdx = LaneBase + HalfSize + Sel;
- }
- }
-
- ResultElements.push_back(Vec.getVectorElt(SrcIdx));
- }
-
- Out = APValue(ResultElements.data(), ResultElements.size());
- return true;
-}
-
bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (!IsConstantEvaluatedBuiltinCall(E))
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -12993,7 +12931,19 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512: {
APValue R;
- if (!evalPshufbBuiltin(Info, E, R))
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, int> {
+ uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
+ if (Ctlb & 0x80)
+ return std::make_pair(0, -1);
+
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned SrcOffset = Ctlb & 0x0F;
+ unsigned SrcIdx = LaneBase + SrcOffset;
+ return std::make_pair(0, static_cast<int>(SrcIdx));
+ }))
return false;
return Success(R, E);
}
@@ -13002,7 +12952,21 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case X86::BI__builtin_ia32_pshuflw256:
case X86::BI__builtin_ia32_pshuflw512: {
APValue R;
- if (!evalPshufBuiltin(Info, E, false, R))
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
+ constexpr unsigned LaneBits = 128u;
+ constexpr unsigned ElemBits = 16u;
+ constexpr unsigned LaneElts = LaneBits / ElemBits;
+ constexpr unsigned HalfSize = 4;
+ unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
+ unsigned LaneIdx = DstIdx % LaneElts;
+ if (LaneIdx < HalfSize) {
+ unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + Sel));
+ }
+ return std::make_pair(0, static_cast<int>(DstIdx));
+ }))
return false;
return Success(R, E);
}
@@ -13011,7 +12975,23 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case X86::BI__builtin_ia32_pshufhw256:
case X86::BI__builtin_ia32_pshufhw512: {
APValue R;
- if (!evalPshufBuiltin(Info, E, true, R))
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
+ constexpr unsigned LaneBits = 128u;
+ constexpr unsigned ElemBits = 16u;
+ constexpr unsigned LaneElts = LaneBits / ElemBits;
+ constexpr unsigned HalfSize = 4;
+ unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
+ unsigned LaneIdx = DstIdx % LaneElts;
+ if (LaneIdx >= HalfSize) {
+ unsigned Rel = LaneIdx - HalfSize;
+ unsigned Sel = (Mask >> (2 * Rel)) & 0x3;
+ return std::make_pair(
+ 0, static_cast<int>(LaneBase + HalfSize + Sel));
+ }
+ return std::make_pair(0, static_cast<int>(DstIdx));
+ }))
return false;
return Success(R, E);
}
@@ -13020,7 +13000,17 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case X86::BI__builtin_ia32_pshufd256:
case X86::BI__builtin_ia32_pshufd512: {
APValue R;
- if (!evalPshufBuiltin(Info, E, false, R))
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
+ constexpr unsigned LaneBits = 128u;
+ constexpr unsigned ElemBits = 32u;
+ constexpr unsigned LaneElts = LaneBits / ElemBits;
+ unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
+ unsigned LaneIdx = DstIdx % LaneElts;
+ unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + Sel));
+ }))
return false;
return Success(R, E);
}
@@ -13500,61 +13490,66 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift: {
- assert(E->getNumArgs() == 2);
-
- APValue Src;
- APSInt Imm;
- if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
- !EvaluateInteger(E->getArg(1), Imm, Info))
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned LaneIdx = DstIdx % 16;
+ if (LaneIdx < Shift)
+ return std::make_pair(0, -1);
+
+ return std::make_pair(
+ 0, static_cast<int>(LaneBase + LaneIdx - Shift));
+ }))
return false;
-
- unsigned VecLen = Src.getVectorLength();
- unsigned Shift = Imm.getZExtValue() & 0xff;
-
- SmallVector<APValue> ResultElements;
- for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
- for (unsigned I = 0; I != 16; ++I) {
- if (I < Shift) {
- APSInt Zero(8, /*isUnsigned=*/true);
- Zero = 0;
- ResultElements.push_back(APValue(Zero));
- } else {
- ResultElements.push_back(Src.getVectorElt(Lane + I - Shift));
- }
- }
- }
-
- return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ return Success(R, E);
}
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift:
case X86::BI__builtin_ia32_psrldqi512_byteshift: {
- assert(E->getNumArgs() == 2);
-
- APValue Src;
- APSInt Imm;
- if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
- !EvaluateInteger(E->getArg(1), Imm, Info))
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned LaneIdx = DstIdx % 16;
+ if (LaneIdx + Shift < 16)
+ return std::make_pair(
+ 0, static_cast<int>(LaneBase + LaneIdx + Shift));
+
+ return std::make_pair(0, -1);
+ }))
return false;
+ return Success(R, E);
+ }
- unsigned VecLen = Src.getVectorLength();
- unsigned Shift = Imm.getZExtValue() & 0xff;
-
- SmallVector<APValue> ResultElements;
- for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
- for (unsigned I = 0; I != 16; ++I) {
- if (I + Shift < 16) {
- ResultElements.push_back(Src.getVectorElt(Lane + I + Shift));
- } else {
- APSInt Zero(8, /*isUnsigned=*/true);
- Zero = 0;
- ResultElements.push_back(APValue(Zero));
- }
- }
- }
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256:
+ case X86::BI__builtin_ia32_palignr512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R, [](unsigned DstIdx, unsigned Shift) {
+ // Default to -1 → zero-fill this destination element
+ unsigned VecIdx = 1;
+ int ElemIdx = -1;
+
+ int Lane = DstIdx / 16;
+ int Offset = DstIdx % 16;
+
+ // Elements come from VecB first, then VecA after the shift boundary
+ unsigned ShiftedIdx = Offset + (Shift & 0xFF);
+ if (ShiftedIdx < 16) { // from VecB
+ ElemIdx = ShiftedIdx + (Lane * 16);
+ } else if (ShiftedIdx < 32) { // from VecA
+ VecIdx = 0;
+ ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
+ }
- return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ return std::pair<unsigned, int>{VecIdx, ElemIdx};
+ }))
+ return false;
+ return Success(R, E);
}
case X86::BI__builtin_ia32_vpermi2varq128:
case X86::BI__builtin_ia32_vpermi2varpd128: {
diff --git a/clang/lib/Analysis/AnalysisDeclContext.cpp b/clang/lib/Analysis/AnalysisDeclContext.cpp
index 5a52056..f188fc6 100644
--- a/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -117,6 +117,11 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
return BD->getBody();
else if (const auto *FunTmpl = dyn_cast_or_null<FunctionTemplateDecl>(D))
return FunTmpl->getTemplatedDecl()->getBody();
+ else if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
+ if (VD->isFileVarDecl()) {
+ return const_cast<Stmt *>(dyn_cast_or_null<Stmt>(VD->getInit()));
+ }
+ }
llvm_unreachable("unknown code decl");
}
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
index 0fa333e..d90f5d4 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
@@ -1153,26 +1153,34 @@ auto buildDiagnoseMatchSwitch(
// FIXME: Evaluate the efficiency of matchers. If using matchers results in a
// lot of duplicated work (e.g. string comparisons), consider providing APIs
// that avoid it through memoization.
- auto IgnorableOptional = ignorableOptional(Options);
- return CFGMatchSwitchBuilder<
- const Environment,
- llvm::SmallVector<UncheckedOptionalAccessDiagnostic>>()
- // optional::value
- .CaseOfCFGStmt<CXXMemberCallExpr>(
- valueCall(IgnorableOptional),
- [](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
- const Environment &Env) {
- return diagnoseUnwrapCall(E->getImplicitObjectArgument(), Env);
- })
-
- // optional::operator*, optional::operator->
- .CaseOfCFGStmt<CallExpr>(valueOperatorCall(IgnorableOptional),
- [](const CallExpr *E,
+ const auto IgnorableOptional = ignorableOptional(Options);
+
+ auto DiagBuilder =
+ CFGMatchSwitchBuilder<
+ const Environment,
+ llvm::SmallVector<UncheckedOptionalAccessDiagnostic>>()
+ // optional::operator*, optional::operator->
+ .CaseOfCFGStmt<CallExpr>(
+ valueOperatorCall(IgnorableOptional),
+ [](const CallExpr *E, const MatchFinder::MatchResult &,
+ const Environment &Env) {
+ return diagnoseUnwrapCall(E->getArg(0), Env);
+ });
+
+ auto Builder = Options.IgnoreValueCalls
+ ? std::move(DiagBuilder)
+ : std::move(DiagBuilder)
+ // optional::value
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ valueCall(IgnorableOptional),
+ [](const CXXMemberCallExpr *E,
const MatchFinder::MatchResult &,
const Environment &Env) {
- return diagnoseUnwrapCall(E->getArg(0), Env);
- })
- .Build();
+ return diagnoseUnwrapCall(
+ E->getImplicitObjectArgument(), Env);
+ });
+
+ return std::move(Builder).Build();
}
} // namespace
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index 85fa4cc..21555b9 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -126,7 +126,7 @@ public:
llvm::APInt getFMVPriority(ArrayRef<StringRef> Features) const override;
std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
- return std::make_pair(32, 32);
+ return std::make_pair(64, 64);
}
bool supportsCpuSupports() const override { return getTriple().isOSLinux(); }
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 5eba5ba..c1a3613 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -438,6 +438,10 @@ public:
return cgf.emitVAArg(ve);
}
+ mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
+ return Visit(e->getSemanticForm());
+ }
+
mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
mlir::Value
VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
index fbecab9..2ef09b7 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
@@ -26,6 +26,11 @@
using namespace mlir;
using namespace cir;
+namespace mlir {
+#define GEN_PASS_DEF_CIRCANONICALIZE
+#include "clang/CIR/Dialect/Passes.h.inc"
+} // namespace mlir
+
namespace {
/// Removes branches between two blocks if it is the only branch.
@@ -101,7 +106,8 @@ struct RemoveEmptySwitch : public OpRewritePattern<SwitchOp> {
// CIRCanonicalizePass
//===----------------------------------------------------------------------===//
-struct CIRCanonicalizePass : public CIRCanonicalizeBase<CIRCanonicalizePass> {
+struct CIRCanonicalizePass
+ : public impl::CIRCanonicalizeBase<CIRCanonicalizePass> {
using CIRCanonicalizeBase::CIRCanonicalizeBase;
// The same operation rewriting done here could have been performed
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp
index 3c6f768..dcef9dd 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp
@@ -21,6 +21,11 @@
using namespace mlir;
using namespace cir;
+namespace mlir {
+#define GEN_PASS_DEF_CIRSIMPLIFY
+#include "clang/CIR/Dialect/Passes.h.inc"
+} // namespace mlir
+
//===----------------------------------------------------------------------===//
// Rewrite patterns
//===----------------------------------------------------------------------===//
@@ -283,7 +288,7 @@ struct SimplifyVecSplat : public OpRewritePattern<VecSplatOp> {
// CIRSimplifyPass
//===----------------------------------------------------------------------===//
-struct CIRSimplifyPass : public CIRSimplifyBase<CIRSimplifyPass> {
+struct CIRSimplifyPass : public impl::CIRSimplifyBase<CIRSimplifyPass> {
using CIRSimplifyBase::CIRSimplifyBase;
void runOnOperation() override;
diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
index ca7554e..69a5334 100644
--- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
@@ -26,6 +26,11 @@
using namespace mlir;
using namespace cir;
+namespace mlir {
+#define GEN_PASS_DEF_CIRFLATTENCFG
+#include "clang/CIR/Dialect/Passes.h.inc"
+} // namespace mlir
+
namespace {
/// Lowers operations with the terminator trait that have a single successor.
@@ -50,7 +55,7 @@ void walkRegionSkipping(
});
}
-struct CIRFlattenCFGPass : public CIRFlattenCFGBase<CIRFlattenCFGPass> {
+struct CIRFlattenCFGPass : public impl::CIRFlattenCFGBase<CIRFlattenCFGPass> {
CIRFlattenCFGPass() = default;
void runOnOperation() override;
diff --git a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp
index c0db984..00972b6 100644
--- a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp
@@ -14,9 +14,14 @@
using namespace mlir;
using namespace cir;
+namespace mlir {
+#define GEN_PASS_DEF_GOTOSOLVER
+#include "clang/CIR/Dialect/Passes.h.inc"
+} // namespace mlir
+
namespace {
-struct GotoSolverPass : public GotoSolverBase<GotoSolverPass> {
+struct GotoSolverPass : public impl::GotoSolverBase<GotoSolverPass> {
GotoSolverPass() = default;
void runOnOperation() override;
};
diff --git a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp
index 72bbf08..74b22fa 100644
--- a/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/HoistAllocas.cpp
@@ -20,9 +20,14 @@
using namespace mlir;
using namespace cir;
+namespace mlir {
+#define GEN_PASS_DEF_HOISTALLOCAS
+#include "clang/CIR/Dialect/Passes.h.inc"
+} // namespace mlir
+
namespace {
-struct HoistAllocasPass : public HoistAllocasBase<HoistAllocasPass> {
+struct HoistAllocasPass : public impl::HoistAllocasBase<HoistAllocasPass> {
HoistAllocasPass() = default;
void runOnOperation() override;
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index cba0464..29b1211 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -23,6 +23,11 @@
using namespace mlir;
using namespace cir;
+namespace mlir {
+#define GEN_PASS_DEF_LOWERINGPREPARE
+#include "clang/CIR/Dialect/Passes.h.inc"
+} // namespace mlir
+
static SmallString<128> getTransformedFileName(mlir::ModuleOp mlirModule) {
SmallString<128> fileName;
@@ -53,7 +58,8 @@ static cir::FuncOp getCalledFunction(cir::CallOp callOp) {
}
namespace {
-struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
+struct LoweringPreparePass
+ : public impl::LoweringPrepareBase<LoweringPreparePass> {
LoweringPreparePass() = default;
void runOnOperation() override;
diff --git a/clang/lib/CIR/Dialect/Transforms/PassDetail.h b/clang/lib/CIR/Dialect/Transforms/PassDetail.h
index 600dde5..ef42a85 100644
--- a/clang/lib/CIR/Dialect/Transforms/PassDetail.h
+++ b/clang/lib/CIR/Dialect/Transforms/PassDetail.h
@@ -21,7 +21,7 @@ namespace mlir {
template <typename ConcreteDialect>
void registerDialect(DialectRegistry &registry);
-#define GEN_PASS_CLASSES
+#define GEN_PASS_DECL
#include "clang/CIR/Dialect/Passes.h.inc"
} // namespace mlir
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index d4d5ea8..efacb3c 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1991,6 +1991,7 @@ static void getTrivialDefaultFunctionAttributes(
// This is the default behavior.
break;
case CodeGenOptions::FramePointerKind::Reserved:
+ case CodeGenOptions::FramePointerKind::NonLeafNoReserve:
case CodeGenOptions::FramePointerKind::NonLeaf:
case CodeGenOptions::FramePointerKind::All:
FuncAttrs.addAttribute("frame-pointer",
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index e392a12..4bdba9b 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -261,12 +261,12 @@ static std::optional<llvm::Value *> initializeLocalResourceArray(
llvm::Type *
CGHLSLRuntime::convertHLSLSpecificType(const Type *T,
- SmallVector<int32_t> *Packoffsets) {
+ const CGHLSLOffsetInfo &OffsetInfo) {
assert(T->isHLSLSpecificType() && "Not an HLSL specific type!");
// Check if the target has a specific translation for this type first.
if (llvm::Type *TargetTy =
- CGM.getTargetCodeGenInfo().getHLSLType(CGM, T, Packoffsets))
+ CGM.getTargetCodeGenInfo().getHLSLType(CGM, T, OffsetInfo))
return TargetTy;
llvm_unreachable("Generic handling of HLSL types is not supported.");
@@ -357,25 +357,14 @@ createBufferHandleType(const HLSLBufferDecl *BufDecl) {
return cast<HLSLAttributedResourceType>(QT.getTypePtr());
}
-// Iterates over all declarations in the HLSL buffer and based on the
-// packoffset or register(c#) annotations it fills outs the Layout
-// vector with the user-specified layout offsets.
-// The buffer offsets can be specified 2 ways:
-// 1. declarations in cbuffer {} block can have a packoffset annotation
-// (translates to HLSLPackOffsetAttr)
-// 2. default constant buffer declarations at global scope can have
-// register(c#) annotations (translates to HLSLResourceBindingAttr with
-// RegisterType::C)
-// It is not guaranteed that all declarations in a buffer have an annotation.
-// For those where it is not specified a -1 value is added to the Layout
-// vector. In the final layout these declarations will be placed at the end
-// of the HLSL buffer after all of the elements with specified offset.
-static void fillPackoffsetLayout(const HLSLBufferDecl *BufDecl,
- SmallVector<int32_t> &Layout) {
- assert(Layout.empty() && "expected empty vector for layout");
- assert(BufDecl->hasValidPackoffset());
+CGHLSLOffsetInfo CGHLSLOffsetInfo::fromDecl(const HLSLBufferDecl &BufDecl) {
+ CGHLSLOffsetInfo Result;
- for (Decl *D : BufDecl->buffer_decls()) {
+ // If we don't have packoffset info, just return an empty result.
+ if (!BufDecl.hasValidPackoffset())
+ return Result;
+
+ for (Decl *D : BufDecl.buffer_decls()) {
if (isa<CXXRecordDecl, EmptyDecl>(D) || isa<FunctionDecl>(D)) {
continue;
}
@@ -384,11 +373,11 @@ static void fillPackoffsetLayout(const HLSLBufferDecl *BufDecl,
continue;
if (!VD->hasAttrs()) {
- Layout.push_back(-1);
+ Result.Offsets.push_back(Unspecified);
continue;
}
- int32_t Offset = -1;
+ uint32_t Offset = Unspecified;
for (auto *Attr : VD->getAttrs()) {
if (auto *POA = dyn_cast<HLSLPackOffsetAttr>(Attr)) {
Offset = POA->getOffsetInBytes();
@@ -401,8 +390,9 @@ static void fillPackoffsetLayout(const HLSLBufferDecl *BufDecl,
break;
}
}
- Layout.push_back(Offset);
+ Result.Offsets.push_back(Offset);
}
+ return Result;
}
// Codegen for HLSLBufferDecl
@@ -419,13 +409,9 @@ void CGHLSLRuntime::addBuffer(const HLSLBufferDecl *BufDecl) {
return;
// create global variable for the constant buffer
- SmallVector<int32_t> Layout;
- if (BufDecl->hasValidPackoffset())
- fillPackoffsetLayout(BufDecl, Layout);
-
- llvm::TargetExtType *TargetTy =
- cast<llvm::TargetExtType>(convertHLSLSpecificType(
- ResHandleTy, BufDecl->hasValidPackoffset() ? &Layout : nullptr));
+ CGHLSLOffsetInfo OffsetInfo = CGHLSLOffsetInfo::fromDecl(*BufDecl);
+ llvm::TargetExtType *TargetTy = cast<llvm::TargetExtType>(
+ convertHLSLSpecificType(ResHandleTy, OffsetInfo));
llvm::GlobalVariable *BufGV = new GlobalVariable(
TargetTy, /*isConstant*/ false,
GlobalValue::LinkageTypes::ExternalLinkage, PoisonValue::get(TargetTy),
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index 9d31714..488a322 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -81,6 +81,33 @@ class CodeGenModule;
class CodeGenFunction;
class LValue;
+class CGHLSLOffsetInfo {
+ SmallVector<uint32_t> Offsets;
+
+public:
+ static const uint32_t Unspecified = ~0U;
+
+ /// Iterates over all declarations in the HLSL buffer and based on the
+ /// packoffset or register(c#) annotations it fills outs the Offsets vector
+ /// with the user-specified layout offsets. The buffer offsets can be
+ /// specified 2 ways: 1. declarations in cbuffer {} block can have a
+ /// packoffset annotation (translates to HLSLPackOffsetAttr) 2. default
+ /// constant buffer declarations at global scope can have register(c#)
+ /// annotations (translates to HLSLResourceBindingAttr with RegisterType::C)
+ /// It is not guaranteed that all declarations in a buffer have an annotation.
+ /// For those where it is not specified a `~0U` value is added to the Offsets
+ /// vector. In the final layout these declarations will be placed at the end
+ /// of the HLSL buffer after all of the elements with specified offset.
+ static CGHLSLOffsetInfo fromDecl(const HLSLBufferDecl &BufDecl);
+
+ /// Get the given offset, or `~0U` if there is no offset for the member.
+ uint32_t operator[](size_t I) const {
+ if (Offsets.empty())
+ return Unspecified;
+ return Offsets[I];
+ }
+};
+
class CGHLSLRuntime {
public:
//===----------------------------------------------------------------------===//
@@ -167,9 +194,11 @@ public:
CGHLSLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
virtual ~CGHLSLRuntime() {}
- llvm::Type *
- convertHLSLSpecificType(const Type *T,
- SmallVector<int32_t> *Packoffsets = nullptr);
+ llvm::Type *convertHLSLSpecificType(const Type *T,
+ const CGHLSLOffsetInfo &OffsetInfo);
+ llvm::Type *convertHLSLSpecificType(const Type *T) {
+ return convertHLSLSpecificType(T, CGHLSLOffsetInfo());
+ }
void generateGlobalCtorDtorCalls();
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 98d59b7..f303550 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -1512,6 +1512,9 @@ void CodeGenModule::Release() {
case CodeGenOptions::FramePointerKind::Reserved:
getModule().setFramePointer(llvm::FramePointerKind::Reserved);
break;
+ case CodeGenOptions::FramePointerKind::NonLeafNoReserve:
+ getModule().setFramePointer(llvm::FramePointerKind::NonLeafNoReserve);
+ break;
case CodeGenOptions::FramePointerKind::NonLeaf:
getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
break;
diff --git a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
index 838903c..4bc6d56 100644
--- a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
@@ -66,8 +66,9 @@ namespace CodeGen {
// annotation though. For those that don't, the PackOffsets array will contain
// -1 value instead. These elements must be placed at the end of the layout
// after all of the elements with specific offset.
-llvm::TargetExtType *HLSLBufferLayoutBuilder::createLayoutType(
- const RecordType *RT, const llvm::SmallVector<int32_t> *PackOffsets) {
+llvm::TargetExtType *
+HLSLBufferLayoutBuilder::createLayoutType(const RecordType *RT,
+ const CGHLSLOffsetInfo &OffsetInfo) {
// check if we already have the layout type for this struct
if (llvm::TargetExtType *Ty =
@@ -101,14 +102,10 @@ llvm::TargetExtType *HLSLBufferLayoutBuilder::createLayoutType(
const CXXRecordDecl *RD = RecordDecls.pop_back_val();
for (const auto *FD : RD->fields()) {
- assert((!PackOffsets || Index < PackOffsets->size()) &&
- "number of elements in layout struct does not match number of "
- "packoffset annotations");
-
// No PackOffset info at all, or have a valid packoffset/register(c#)
// annotations value -> layout the field.
- const int PO = PackOffsets ? (*PackOffsets)[Index++] : -1;
- if (!PackOffsets || PO != -1) {
+ const uint32_t PO = OffsetInfo[Index++];
+ if (PO != CGHLSLOffsetInfo::Unspecified) {
if (!layoutField(FD, EndOffset, FieldOffset, FieldType, PO))
return nullptr;
Layout.push_back(FieldOffset);
@@ -175,7 +172,7 @@ bool HLSLBufferLayoutBuilder::layoutField(const FieldDecl *FD,
unsigned &EndOffset,
unsigned &FieldOffset,
llvm::Type *&FieldType,
- int Packoffset) {
+ uint32_t Packoffset) {
// Size of element; for arrays this is a size of a single element in the
// array. Total array size of calculated as (ArrayCount-1) * ArrayStride +
@@ -201,8 +198,9 @@ bool HLSLBufferLayoutBuilder::layoutField(const FieldDecl *FD,
// For array of structures, create a new array with a layout type
// instead of the structure type.
if (Ty->isStructureOrClassType()) {
+ CGHLSLOffsetInfo EmptyOffsets;
llvm::Type *NewTy = cast<llvm::TargetExtType>(
- createLayoutType(Ty->getAsCanonical<RecordType>()));
+ createLayoutType(Ty->getAsCanonical<RecordType>(), EmptyOffsets));
if (!NewTy)
return false;
assert(isa<llvm::TargetExtType>(NewTy) && "expected target type");
@@ -216,17 +214,20 @@ bool HLSLBufferLayoutBuilder::layoutField(const FieldDecl *FD,
ElemLayoutTy = CGM.getTypes().ConvertTypeForMem(FieldTy);
}
ArrayStride = llvm::alignTo(ElemSize, CBufferRowSizeInBytes);
- ElemOffset = (Packoffset != -1) ? Packoffset : NextRowOffset;
+ ElemOffset = (Packoffset != CGHLSLOffsetInfo::Unspecified) ? Packoffset
+ : NextRowOffset;
} else if (FieldTy->isStructureOrClassType()) {
// Create a layout type for the structure
+ CGHLSLOffsetInfo EmptyOffsets;
ElemLayoutTy = createLayoutType(
- cast<RecordType>(FieldTy->getAsCanonical<RecordType>()));
+ cast<RecordType>(FieldTy->getAsCanonical<RecordType>()), EmptyOffsets);
if (!ElemLayoutTy)
return false;
assert(isa<llvm::TargetExtType>(ElemLayoutTy) && "expected target type");
ElemSize = cast<llvm::TargetExtType>(ElemLayoutTy)->getIntParameter(0);
- ElemOffset = (Packoffset != -1) ? Packoffset : NextRowOffset;
+ ElemOffset = (Packoffset != CGHLSLOffsetInfo::Unspecified) ? Packoffset
+ : NextRowOffset;
} else {
// scalar or vector - find element size and alignment
@@ -246,7 +247,7 @@ bool HLSLBufferLayoutBuilder::layoutField(const FieldDecl *FD,
}
// calculate or get element offset for the vector or scalar
- if (Packoffset != -1) {
+ if (Packoffset != CGHLSLOffsetInfo::Unspecified) {
ElemOffset = Packoffset;
} else {
ElemOffset = llvm::alignTo(EndOffset, Align);
@@ -269,5 +270,13 @@ bool HLSLBufferLayoutBuilder::layoutField(const FieldDecl *FD,
return true;
}
+bool HLSLBufferLayoutBuilder::layoutField(const FieldDecl *FD,
+ unsigned &EndOffset,
+ unsigned &FieldOffset,
+ llvm::Type *&FieldType) {
+ return layoutField(FD, EndOffset, FieldOffset, FieldType,
+ CGHLSLOffsetInfo::Unspecified);
+}
+
} // namespace CodeGen
} // namespace clang
diff --git a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.h b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.h
index 61240b2..916e60e 100644
--- a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.h
+++ b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.h
@@ -14,6 +14,7 @@ class RecordType;
class FieldDecl;
namespace CodeGen {
+class CGHLSLOffsetInfo;
class CodeGenModule;
//===----------------------------------------------------------------------===//
@@ -33,14 +34,15 @@ public:
// Returns LLVM target extension type with the name LayoutTypeName
// for given structure type and layout data. The first number in
// the Layout is the size followed by offsets for each struct element.
- llvm::TargetExtType *
- createLayoutType(const RecordType *StructType,
- const llvm::SmallVector<int32_t> *Packoffsets = nullptr);
+ llvm::TargetExtType *createLayoutType(const RecordType *StructType,
+ const CGHLSLOffsetInfo &OffsetInfo);
private:
bool layoutField(const clang::FieldDecl *FD, unsigned &EndOffset,
unsigned &FieldOffset, llvm::Type *&FieldType,
- int Packoffset = -1);
+ uint32_t Packoffset);
+ bool layoutField(const clang::FieldDecl *FD, unsigned &EndOffset,
+ unsigned &FieldOffset, llvm::Type *&FieldType);
};
} // namespace CodeGen
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index f63e900..383f52f 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -39,6 +39,7 @@ class ABIInfo;
class CallArgList;
class CodeGenFunction;
class CGBlockInfo;
+class CGHLSLOffsetInfo;
class SwiftABIInfo;
/// TargetCodeGenInfo - This class organizes various target-specific
@@ -442,9 +443,8 @@ public:
}
/// Return an LLVM type that corresponds to a HLSL type
- virtual llvm::Type *
- getHLSLType(CodeGenModule &CGM, const Type *T,
- const SmallVector<int32_t> *Packoffsets = nullptr) const {
+ virtual llvm::Type *getHLSLType(CodeGenModule &CGM, const Type *T,
+ const CGHLSLOffsetInfo &OffsetInfo) const {
return nullptr;
}
diff --git a/clang/lib/CodeGen/Targets/DirectX.cpp b/clang/lib/CodeGen/Targets/DirectX.cpp
index b4cebb9..f30b302 100644
--- a/clang/lib/CodeGen/Targets/DirectX.cpp
+++ b/clang/lib/CodeGen/Targets/DirectX.cpp
@@ -29,14 +29,13 @@ public:
DirectXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
: TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
- llvm::Type *
- getHLSLType(CodeGenModule &CGM, const Type *T,
- const SmallVector<int32_t> *Packoffsets = nullptr) const override;
+ llvm::Type *getHLSLType(CodeGenModule &CGM, const Type *T,
+ const CGHLSLOffsetInfo &OffsetInfo) const override;
};
llvm::Type *DirectXTargetCodeGenInfo::getHLSLType(
CodeGenModule &CGM, const Type *Ty,
- const SmallVector<int32_t> *Packoffsets) const {
+ const CGHLSLOffsetInfo &OffsetInfo) const {
auto *ResType = dyn_cast<HLSLAttributedResourceType>(Ty);
if (!ResType)
return nullptr;
@@ -78,7 +77,7 @@ llvm::Type *DirectXTargetCodeGenInfo::getHLSLType(
llvm::Type *BufferLayoutTy =
HLSLBufferLayoutBuilder(CGM, "dx.Layout")
.createLayoutType(ContainedTy->castAsCanonical<RecordType>(),
- Packoffsets);
+ OffsetInfo);
if (!BufferLayoutTy)
return nullptr;
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index abd049a..be7e9cc 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -53,9 +53,8 @@ public:
unsigned getDeviceKernelCallingConv() const override;
llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const override;
- llvm::Type *
- getHLSLType(CodeGenModule &CGM, const Type *Ty,
- const SmallVector<int32_t> *Packoffsets = nullptr) const override;
+ llvm::Type *getHLSLType(CodeGenModule &CGM, const Type *Ty,
+ const CGHLSLOffsetInfo &OffsetInfo) const override;
llvm::Type *getSPIRVImageTypeFromHLSLResource(
const HLSLAttributedResourceType::Attributes &attributes,
QualType SampledType, CodeGenModule &CGM) const;
@@ -260,8 +259,16 @@ CommonSPIRTargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
LangAS AS = QT->getUnqualifiedDesugaredType()->isNullPtrType()
? LangAS::Default
: QT->getPointeeType().getAddressSpace();
+ unsigned ASAsInt = static_cast<unsigned>(AS);
+ unsigned FirstTargetASAsInt =
+ static_cast<unsigned>(LangAS::FirstTargetAddressSpace);
+ unsigned CodeSectionINTELAS = FirstTargetASAsInt + 9;
+ // As per SPV_INTEL_function_pointers, it is illegal to addrspacecast
+ // function pointers to/from the generic AS.
+ bool IsFunctionPtrAS =
+ CGM.getTriple().isSPIRV() && ASAsInt == CodeSectionINTELAS;
if (AS == LangAS::Default || AS == LangAS::opencl_generic ||
- AS == LangAS::opencl_constant)
+ AS == LangAS::opencl_constant || IsFunctionPtrAS)
return llvm::ConstantPointerNull::get(PT);
auto &Ctx = CGM.getContext();
@@ -510,7 +517,7 @@ static llvm::Type *getInlineSpirvType(CodeGenModule &CGM,
llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
CodeGenModule &CGM, const Type *Ty,
- const SmallVector<int32_t> *Packoffsets) const {
+ const CGHLSLOffsetInfo &OffsetInfo) const {
llvm::LLVMContext &Ctx = CGM.getLLVMContext();
if (auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Ty))
@@ -559,7 +566,7 @@ llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
llvm::Type *BufferLayoutTy =
HLSLBufferLayoutBuilder(CGM, "spirv.Layout")
.createLayoutType(ContainedTy->castAsCanonical<RecordType>(),
- Packoffsets);
+ OffsetInfo);
uint32_t StorageClass = /* Uniform storage class */ 2;
return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer", {BufferLayoutTy},
{StorageClass, false});
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 2791b1e..8038993 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -5704,6 +5704,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
case CodeGenOptions::FramePointerKind::Reserved:
FPKeepKindStr = "-mframe-pointer=reserved";
break;
+ case CodeGenOptions::FramePointerKind::NonLeafNoReserve:
+ FPKeepKindStr = "-mframe-pointer=non-leaf-no-reserve";
+ break;
case CodeGenOptions::FramePointerKind::NonLeaf:
FPKeepKindStr = "-mframe-pointer=non-leaf";
break;
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 9e3ca9f..4c036f0 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -221,26 +221,39 @@ static bool framePointerImpliesLeafFramePointer(const llvm::opt::ArgList &Args,
clang::CodeGenOptions::FramePointerKind
getFramePointerKind(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple) {
- // There are three things to consider here:
+ // There are four things to consider here:
// * Should a frame record be created for non-leaf functions?
// * Should a frame record be created for leaf functions?
- // * Is the frame pointer register reserved, i.e. must it always point to
- // either a new, valid frame record or be un-modified?
+ // * Is the frame pointer register reserved in non-leaf functions?
+ // i.e. must it always point to either a new, valid frame record or be
+ // un-modified?
+ // * Is the frame pointer register reserved in leaf functions?
//
// Not all combinations of these are valid:
// * It's not useful to have leaf frame records without non-leaf ones.
// * It's not useful to have frame records without reserving the frame
// pointer.
//
- // | Non-leaf | Leaf | Reserved |
- // | N | N | N | FramePointerKind::None
- // | N | N | Y | FramePointerKind::Reserved
- // | N | Y | N | Invalid
- // | N | Y | Y | Invalid
- // | Y | N | N | Invalid
- // | Y | N | Y | FramePointerKind::NonLeaf
- // | Y | Y | N | Invalid
- // | Y | Y | Y | FramePointerKind::All
+ // | Frame Setup | Reg Reserved |
+ // |-----------------|-----------------|
+ // | Non-leaf | Leaf | Non-Leaf | Leaf |
+ // |----------|------|----------|------|
+ // | N | N | N | N | FramePointerKind::None
+ // | N | N | N | Y | Invalid
+ // | N | N | Y | N | Invalid
+ // | N | N | Y | Y | FramePointerKind::Reserved
+ // | N | Y | N | N | Invalid
+ // | N | Y | N | Y | Invalid
+ // | N | Y | Y | N | Invalid
+ // | N | Y | Y | Y | Invalid
+ // | Y | N | N | N | Invalid
+ // | Y | N | N | Y | Invalid
+ // | Y | N | Y | N | FramePointerKind::NonLeafNoReserve
+ // | Y | N | Y | Y | FramePointerKind::NonLeaf
+ // | Y | Y | N | N | Invalid
+ // | Y | Y | N | Y | Invalid
+ // | Y | Y | Y | N | Invalid
+ // | Y | Y | Y | Y | FramePointerKind::All
//
// The FramePointerKind::Reserved case is currently only reachable for Arm,
// which has the -mframe-chain= option which can (in combination with
@@ -259,12 +272,18 @@ getFramePointerKind(const llvm::opt::ArgList &Args,
Args.hasFlag(options::OPT_mno_omit_leaf_frame_pointer,
options::OPT_momit_leaf_frame_pointer, DefaultLeafFP);
- bool FPRegReserved = EnableFP || mustMaintainValidFrameChain(Args, Triple);
+ bool FPRegReserved = Args.hasFlag(options::OPT_mreserve_frame_pointer_reg,
+ options::OPT_mno_reserve_frame_pointer_reg,
+ mustMaintainValidFrameChain(Args, Triple));
if (EnableFP) {
if (EnableLeafFP)
return clang::CodeGenOptions::FramePointerKind::All;
- return clang::CodeGenOptions::FramePointerKind::NonLeaf;
+
+ if (FPRegReserved)
+ return clang::CodeGenOptions::FramePointerKind::NonLeaf;
+
+ return clang::CodeGenOptions::FramePointerKind::NonLeafNoReserve;
}
if (FPRegReserved)
return clang::CodeGenOptions::FramePointerKind::Reserved;
diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp
index 038395e..270904d 100644
--- a/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/clang/lib/Driver/ToolChains/Flang.cpp
@@ -1071,6 +1071,9 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
case CodeGenOptions::FramePointerKind::Reserved:
FPKeepKindStr = "-mframe-pointer=reserved";
break;
+ case CodeGenOptions::FramePointerKind::NonLeafNoReserve:
+ FPKeepKindStr = "-mframe-pointer=non-leaf-no-reserve";
+ break;
case CodeGenOptions::FramePointerKind::NonLeaf:
FPKeepKindStr = "-mframe-pointer=non-leaf";
break;
diff --git a/clang/lib/Headers/__clang_cuda_device_functions.h b/clang/lib/Headers/__clang_cuda_device_functions.h
index 8612372..0226fe9 100644
--- a/clang/lib/Headers/__clang_cuda_device_functions.h
+++ b/clang/lib/Headers/__clang_cuda_device_functions.h
@@ -528,7 +528,7 @@ __DEVICE__ float __tanf(float __a) { return __nv_fast_tanf(__a); }
__DEVICE__ void __threadfence(void) { __nvvm_membar_gl(); }
__DEVICE__ void __threadfence_block(void) { __nvvm_membar_cta(); };
__DEVICE__ void __threadfence_system(void) { __nvvm_membar_sys(); };
-__DEVICE__ void __trap(void) { __asm__ __volatile__("trap;"); }
+__DEVICE__ __attribute__((noreturn)) void __trap(void) { __builtin_trap(); }
__DEVICE__ unsigned short
__usAtomicCAS(unsigned short *__p, unsigned short __cmp, unsigned short __v) {
return __nvvm_atom_cas_gen_us(__p, __cmp, __v);
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index 5fcb659..8688ccf 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -2613,7 +2613,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
}
PreferredType.enterVariableInit(Tok.getLocation(), ThisDecl);
- ExprResult Init = ParseInitializer();
+ ExprResult Init = ParseInitializer(ThisDecl);
// If this is the only decl in (possibly) range based for statement,
// our best guess is that the user meant ':' instead of '='.
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index b96968d..d8ed7e3 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -3359,7 +3359,7 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
Diag(Tok, diag::err_ms_property_initializer) << PD;
return ExprError();
}
- return ParseInitializer();
+ return ParseInitializer(D);
}
void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
diff --git a/clang/lib/Parse/ParseInit.cpp b/clang/lib/Parse/ParseInit.cpp
index a3be374..0e86c4c 100644
--- a/clang/lib/Parse/ParseInit.cpp
+++ b/clang/lib/Parse/ParseInit.cpp
@@ -581,3 +581,26 @@ bool Parser::ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
return !trailingComma;
}
+
+ExprResult Parser::ParseInitializer(Decl *DeclForInitializer) {
+ // Set DeclForInitializer for file-scope variables.
+ // For constexpr references, set it to suppress runtime warnings.
+ // For non-constexpr references, don't set it to avoid evaluation issues
+ // with self-referencing initializers. Local variables (including local
+ // constexpr) should emit runtime warnings.
+ if (DeclForInitializer && !Actions.ExprEvalContexts.empty()) {
+ if (auto *VD = dyn_cast<VarDecl>(DeclForInitializer);
+ VD && VD->isFileVarDecl() &&
+ (!VD->getType()->isReferenceType() || VD->isConstexpr()))
+ Actions.ExprEvalContexts.back().DeclForInitializer = VD;
+ }
+
+ ExprResult init;
+ if (Tok.isNot(tok::l_brace)) {
+ init = ParseAssignmentExpression();
+ } else {
+ init = ParseBraceInitializer();
+ }
+
+ return init;
+}
diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index 334438e..32a406e 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -339,7 +339,7 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
}
PreferredType.enterVariableInit(Tok.getLocation(), OmpPrivParm);
- ExprResult Init = ParseInitializer();
+ ExprResult Init = ParseInitializer(OmpPrivParm);
if (Init.isInvalid()) {
SkipUntil(tok::r_paren, tok::annot_pragma_openmp_end, StopBeforeMatch);
diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 140b709..41a9832 100644
--- a/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -2734,6 +2734,70 @@ static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) {
S.Diag(D.Loc, D.PD);
}
+template <typename Iterator>
+static void emitPossiblyUnreachableDiags(Sema &S, AnalysisDeclContext &AC,
+ std::pair<Iterator, Iterator> PUDs) {
+
+ if (PUDs.first == PUDs.second)
+ return;
+
+ for (auto I = PUDs.first; I != PUDs.second; ++I) {
+ for (const Stmt *S : I->Stmts)
+ AC.registerForcedBlockExpression(S);
+ }
+
+ if (AC.getCFG()) {
+ CFGReverseBlockReachabilityAnalysis *Analysis =
+ AC.getCFGReachablityAnalysis();
+
+ for (auto I = PUDs.first; I != PUDs.second; ++I) {
+ const auto &D = *I;
+ if (llvm::all_of(D.Stmts, [&](const Stmt *St) {
+ const CFGBlock *Block = AC.getBlockForRegisteredExpression(St);
+ // FIXME: We should be able to assert that block is non-null, but
+ // the CFG analysis can skip potentially-evaluated expressions in
+ // edge cases; see test/Sema/vla-2.c.
+ if (Block && Analysis)
+ if (!Analysis->isReachable(&AC.getCFG()->getEntry(), Block))
+ return false;
+ return true;
+ })) {
+ S.Diag(D.Loc, D.PD);
+ }
+ }
+ } else {
+ for (auto I = PUDs.first; I != PUDs.second; ++I)
+ S.Diag(I->Loc, I->PD);
+ }
+}
+
+void sema::AnalysisBasedWarnings::registerVarDeclWarning(
+ VarDecl *VD, clang::sema::PossiblyUnreachableDiag PUD) {
+ VarDeclPossiblyUnreachableDiags.emplace(VD, PUD);
+}
+
+void sema::AnalysisBasedWarnings::issueWarningsForRegisteredVarDecl(
+ VarDecl *VD) {
+ if (!llvm::is_contained(VarDeclPossiblyUnreachableDiags, VD))
+ return;
+
+ AnalysisDeclContext AC(/*Mgr=*/nullptr, VD);
+
+ AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true;
+ AC.getCFGBuildOptions().AddEHEdges = false;
+ AC.getCFGBuildOptions().AddInitializers = true;
+ AC.getCFGBuildOptions().AddImplicitDtors = true;
+ AC.getCFGBuildOptions().AddTemporaryDtors = true;
+ AC.getCFGBuildOptions().AddCXXNewAllocator = false;
+ AC.getCFGBuildOptions().AddCXXDefaultInitExprInCtors = true;
+
+ auto Range = VarDeclPossiblyUnreachableDiags.equal_range(VD);
+ auto SecondRange =
+ llvm::make_second_range(llvm::make_range(Range.first, Range.second));
+ emitPossiblyUnreachableDiags(
+ S, AC, std::make_pair(SecondRange.begin(), SecondRange.end()));
+}
+
// An AST Visitor that calls a callback function on each callable DEFINITION
// that is NOT in a dependent context:
class CallableVisitor : public DynamicRecursiveASTVisitor {
@@ -2945,45 +3009,8 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings(
}
// Emit delayed diagnostics.
- if (!fscope->PossiblyUnreachableDiags.empty()) {
- bool analyzed = false;
-
- // Register the expressions with the CFGBuilder.
- for (const auto &D : fscope->PossiblyUnreachableDiags) {
- for (const Stmt *S : D.Stmts)
- AC.registerForcedBlockExpression(S);
- }
-
- if (AC.getCFG()) {
- analyzed = true;
- for (const auto &D : fscope->PossiblyUnreachableDiags) {
- bool AllReachable = true;
- for (const Stmt *S : D.Stmts) {
- const CFGBlock *block = AC.getBlockForRegisteredExpression(S);
- CFGReverseBlockReachabilityAnalysis *cra =
- AC.getCFGReachablityAnalysis();
- // FIXME: We should be able to assert that block is non-null, but
- // the CFG analysis can skip potentially-evaluated expressions in
- // edge cases; see test/Sema/vla-2.c.
- if (block && cra) {
- // Can this block be reached from the entrance?
- if (!cra->isReachable(&AC.getCFG()->getEntry(), block)) {
- AllReachable = false;
- break;
- }
- }
- // If we cannot map to a basic block, assume the statement is
- // reachable.
- }
-
- if (AllReachable)
- S.Diag(D.Loc, D.PD);
- }
- }
-
- if (!analyzed)
- flushDiagnostics(S, fscope);
- }
+ auto &PUDs = fscope->PossiblyUnreachableDiags;
+ emitPossiblyUnreachableDiags(S, AC, std::make_pair(PUDs.begin(), PUDs.end()));
// Warning: check missing 'return'
if (P.enableCheckFallThrough) {
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 086dd8b..25b89d6 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -59,6 +59,7 @@
#include "clang/Sema/SemaWasm.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -13117,6 +13118,13 @@ namespace {
if (isa<ParmVarDecl>(OrigDecl))
return;
+ // Skip checking for file-scope constexpr variables - constant evaluation
+ // will produce appropriate errors without needing runtime diagnostics.
+ // Local constexpr should still emit runtime warnings.
+ if (auto *VD = dyn_cast<VarDecl>(OrigDecl);
+ VD && VD->isConstexpr() && VD->isFileVarDecl())
+ return;
+
E = E->IgnoreParens();
// Skip checking T a = a where T is not a record or reference type.
@@ -13744,6 +13752,11 @@ void Sema::DiagnoseUniqueObjectDuplication(const VarDecl *VD) {
}
void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
+ auto ResetDeclForInitializer = llvm::make_scope_exit([this]() {
+ if (this->ExprEvalContexts.empty())
+ this->ExprEvalContexts.back().DeclForInitializer = nullptr;
+ });
+
// If there is no declaration, there was an error parsing it. Just ignore
// the initializer.
if (!RealDecl) {
@@ -15069,6 +15082,10 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
if (!VD)
return;
+ // Emit any deferred warnings for the variable's initializer, even if the
+ // variable is invalid
+ AnalysisWarnings.issueWarningsForRegisteredVarDecl(VD);
+
// Apply an implicit SectionAttr if '#pragma clang section bss|data|rodata' is active
if (VD->hasGlobalStorage() && VD->isThisDeclarationADefinition() &&
!inTemplateInstantiation() && !VD->hasAttr<SectionAttr>()) {
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 2159a0d..10f0ec3 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -20565,31 +20565,36 @@ void Sema::MarkDeclarationsReferencedInExpr(Expr *E,
}
/// Emit a diagnostic when statements are reachable.
-/// FIXME: check for reachability even in expressions for which we don't build a
-/// CFG (eg, in the initializer of a global or in a constant expression).
-/// For example,
-/// namespace { auto *p = new double[3][false ? (1, 2) : 3]; }
bool Sema::DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
const PartialDiagnostic &PD) {
- if (!Stmts.empty() && getCurFunctionOrMethodDecl()) {
- if (!FunctionScopes.empty())
- FunctionScopes.back()->PossiblyUnreachableDiags.push_back(
- sema::PossiblyUnreachableDiag(PD, Loc, Stmts));
- return true;
- }
-
+ VarDecl *Decl = ExprEvalContexts.back().DeclForInitializer;
// The initializer of a constexpr variable or of the first declaration of a
// static data member is not syntactically a constant evaluated constant,
// but nonetheless is always required to be a constant expression, so we
// can skip diagnosing.
- // FIXME: Using the mangling context here is a hack.
- if (auto *VD = dyn_cast_or_null<VarDecl>(
- ExprEvalContexts.back().ManglingContextDecl)) {
- if (VD->isConstexpr() ||
- (VD->isStaticDataMember() && VD->isFirstDecl() && !VD->isInline()))
- return false;
- // FIXME: For any other kind of variable, we should build a CFG for its
- // initializer and check whether the context in question is reachable.
+ if (Decl &&
+ (Decl->isConstexpr() || (Decl->isStaticDataMember() &&
+ Decl->isFirstDecl() && !Decl->isInline())))
+ return false;
+
+ if (Stmts.empty()) {
+ Diag(Loc, PD);
+ return true;
+ }
+
+ if (getCurFunction()) {
+ FunctionScopes.back()->PossiblyUnreachableDiags.push_back(
+ sema::PossiblyUnreachableDiag(PD, Loc, Stmts));
+ return true;
+ }
+
+ // For non-constexpr file-scope variables with reachability context (non-empty
+ // Stmts), build a CFG for the initializer and check whether the context in
+ // question is reachable.
+ if (Decl && Decl->isFileVarDecl()) {
+ AnalysisWarnings.registerVarDeclWarning(
+ Decl, sema::PossiblyUnreachableDiag(PD, Loc, Stmts));
+ return true;
}
Diag(Loc, PD);
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 465dab2..2ab2fd1 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -17319,45 +17319,101 @@ OMPClause *SemaOpenMP::ActOnOpenMPDefaultClause(
<< getOpenMPClauseNameForDiag(OMPC_default);
return nullptr;
}
-
- switch (M) {
- case OMP_DEFAULT_none:
- DSAStack->setDefaultDSANone(MLoc);
- break;
- case OMP_DEFAULT_shared:
- DSAStack->setDefaultDSAShared(MLoc);
- break;
- case OMP_DEFAULT_firstprivate:
- DSAStack->setDefaultDSAFirstPrivate(MLoc);
- break;
- case OMP_DEFAULT_private:
- DSAStack->setDefaultDSAPrivate(MLoc);
- break;
- default:
- llvm_unreachable("DSA unexpected in OpenMP default clause");
- }
-
- switch (VCKind) {
- case OMPC_DEFAULT_VC_aggregate:
- DSAStack->setDefaultDSAVCAggregate(VCKindLoc);
- break;
- case OMPC_DEFAULT_VC_all:
- DSAStack->setDefaultDSAVCAll(VCKindLoc);
- break;
- case OMPC_DEFAULT_VC_allocatable:
- DSAStack->setDefaultDSAVCAllocatable(VCKindLoc);
- break;
- case OMPC_DEFAULT_VC_pointer:
- DSAStack->setDefaultDSAVCPointer(VCKindLoc);
- break;
- case OMPC_DEFAULT_VC_scalar:
- DSAStack->setDefaultDSAVCScalar(VCKindLoc);
- break;
- default:
+ if (VCKind == OMPC_DEFAULT_VC_unknown) {
Diag(VCKindLoc, diag::err_omp_default_vc)
<< getOpenMPSimpleClauseTypeName(OMPC_default, unsigned(M));
+ return nullptr;
}
+ bool IsTargetDefault =
+ getLangOpts().OpenMP >= 60 &&
+ isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective());
+
+ // OpenMP 6.0, page 224, lines 3-4 default Clause, Semantics
+ // If data-sharing-attribute is shared then the clause has no effect
+ // on a target construct;
+ if (IsTargetDefault && M == OMP_DEFAULT_shared)
+ return nullptr;
+
+ auto SetDefaultClauseAttrs = [&](llvm::omp::DefaultKind M,
+ OpenMPDefaultClauseVariableCategory VCKind) {
+ OpenMPDefaultmapClauseModifier DefMapMod;
+ OpenMPDefaultmapClauseKind DefMapKind;
+ // default data-sharing-attribute
+ switch (M) {
+ case OMP_DEFAULT_none:
+ if (IsTargetDefault)
+ DefMapMod = OMPC_DEFAULTMAP_MODIFIER_none;
+ else
+ DSAStack->setDefaultDSANone(MLoc);
+ break;
+ case OMP_DEFAULT_firstprivate:
+ if (IsTargetDefault)
+ DefMapMod = OMPC_DEFAULTMAP_MODIFIER_firstprivate;
+ else
+ DSAStack->setDefaultDSAFirstPrivate(MLoc);
+ break;
+ case OMP_DEFAULT_private:
+ if (IsTargetDefault)
+ DefMapMod = OMPC_DEFAULTMAP_MODIFIER_private;
+ else
+ DSAStack->setDefaultDSAPrivate(MLoc);
+ break;
+ case OMP_DEFAULT_shared:
+ assert(!IsTargetDefault && "DSA shared invalid with target directive");
+ DSAStack->setDefaultDSAShared(MLoc);
+ break;
+ default:
+ llvm_unreachable("unexpected DSA in OpenMP default clause");
+ }
+ // default variable-category
+ switch (VCKind) {
+ case OMPC_DEFAULT_VC_aggregate:
+ if (IsTargetDefault)
+ DefMapKind = OMPC_DEFAULTMAP_aggregate;
+ else
+ DSAStack->setDefaultDSAVCAggregate(VCKindLoc);
+ break;
+ case OMPC_DEFAULT_VC_pointer:
+ if (IsTargetDefault)
+ DefMapKind = OMPC_DEFAULTMAP_pointer;
+ else
+ DSAStack->setDefaultDSAVCPointer(VCKindLoc);
+ break;
+ case OMPC_DEFAULT_VC_scalar:
+ if (IsTargetDefault)
+ DefMapKind = OMPC_DEFAULTMAP_scalar;
+ else
+ DSAStack->setDefaultDSAVCScalar(VCKindLoc);
+ break;
+ case OMPC_DEFAULT_VC_all:
+ if (IsTargetDefault)
+ DefMapKind = OMPC_DEFAULTMAP_all;
+ else
+ DSAStack->setDefaultDSAVCAll(VCKindLoc);
+ break;
+ default:
+ llvm_unreachable("unexpected variable category in OpenMP default clause");
+ }
+ // OpenMP 6.0, page 224, lines 4-5 default Clause, Semantics
+ // otherwise, its effect on a target construct is equivalent to
+ // specifying the defaultmap clause with the same data-sharing-attribute
+ // and variable-category.
+ //
+ // If earlier than OpenMP 6.0, or not a target directive, the default DSA
+ // is/was set as before.
+ if (IsTargetDefault) {
+ if (DefMapKind == OMPC_DEFAULTMAP_all) {
+ DSAStack->setDefaultDMAAttr(DefMapMod, OMPC_DEFAULTMAP_aggregate, MLoc);
+ DSAStack->setDefaultDMAAttr(DefMapMod, OMPC_DEFAULTMAP_scalar, MLoc);
+ DSAStack->setDefaultDMAAttr(DefMapMod, OMPC_DEFAULTMAP_pointer, MLoc);
+ } else {
+ DSAStack->setDefaultDMAAttr(DefMapMod, DefMapKind, MLoc);
+ }
+ }
+ };
+
+ SetDefaultClauseAttrs(M, VCKind);
return new (getASTContext())
OMPDefaultClause(M, MLoc, VCKind, VCKindLoc, StartLoc, LParenLoc, EndLoc);
}
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 4d58f00..a56017c 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -6198,6 +6198,10 @@ void Sema::InstantiateVariableInitializer(
currentEvaluationContext().RebuildDefaultArgOrDefaultInit =
parentEvaluationContext().RebuildDefaultArgOrDefaultInit;
+ // Set DeclForInitializer for this variable so DiagIfReachable can properly
+ // suppress runtime diagnostics for constexpr/static member variables
+ currentEvaluationContext().DeclForInitializer = Var;
+
if (OldVar->getInit()) {
// Instantiate the initializer.
ExprResult Init =
@@ -6467,6 +6471,8 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
PassToConsumerRAII.Var = Var;
Var->setTemplateSpecializationKind(OldVar->getTemplateSpecializationKind(),
OldVar->getPointOfInstantiation());
+ // Emit any deferred warnings for the variable's initializer
+ AnalysisWarnings.issueWarningsForRegisteredVarDecl(Var);
}
// This variable may have local implicit instantiations that need to be