aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/AST/ByteCode
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/AST/ByteCode')
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp293
-rw-r--r--clang/lib/AST/ByteCode/Context.cpp5
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp9
-rw-r--r--clang/lib/AST/ByteCode/Interp.h192
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp204
-rw-r--r--clang/lib/AST/ByteCode/InterpHelpers.h141
-rw-r--r--clang/lib/AST/ByteCode/Opcodes.td23
7 files changed, 609 insertions, 258 deletions
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index 74cae03..f4ddbf4 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -1842,7 +1842,6 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
const Expr *Init, PrimType T,
bool Activate = false) -> bool {
InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init));
- InitLinkScope<Emitter> ILS(this, InitLink::Field(FieldToInit->Offset));
if (!this->visit(Init))
return false;
@@ -3274,34 +3273,43 @@ bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
}
if (T->isArrayType()) {
- const ConstantArrayType *CAT =
- Ctx.getASTContext().getAsConstantArrayType(E->getType());
- if (!CAT)
- return false;
-
- size_t NumElems = CAT->getZExtSize();
const Function *Func = getFunction(E->getConstructor());
if (!Func)
return false;
- // FIXME(perf): We're calling the constructor once per array element here,
- // in the old intepreter we had a special-case for trivial constructors.
- for (size_t I = 0; I != NumElems; ++I) {
- if (!this->emitConstUint64(I, E))
- return false;
- if (!this->emitArrayElemPtrUint64(E))
- return false;
+ if (!this->emitDupPtr(E))
+ return false;
- // Constructor arguments.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
- return false;
+ std::function<bool(QualType)> initArrayDimension;
+ initArrayDimension = [&](QualType T) -> bool {
+ if (!T->isArrayType()) {
+ // Constructor arguments.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ return this->emitCall(Func, 0, E);
}
- if (!this->emitCall(Func, 0, E))
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(T);
+ if (!CAT)
return false;
- }
- return true;
+ QualType ElemTy = CAT->getElementType();
+ unsigned NumElems = CAT->getZExtSize();
+ for (size_t I = 0; I != NumElems; ++I) {
+ if (!this->emitConstUint64(I, E))
+ return false;
+ if (!this->emitArrayElemPtrUint64(E))
+ return false;
+ if (!initArrayDimension(ElemTy))
+ return false;
+ }
+ return this->emitPopPtr(E);
+ };
+
+ return initArrayDimension(E->getType());
}
return false;
@@ -3600,8 +3608,6 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
if (PlacementDest) {
if (!this->visit(PlacementDest))
return false;
- if (!this->emitStartLifetime(E))
- return false;
if (!this->emitGetLocal(SizeT, ArrayLen, E))
return false;
if (!this->emitCheckNewTypeMismatchArray(SizeT, E, E))
@@ -3741,10 +3747,9 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
if (PlacementDest) {
if (!this->visit(PlacementDest))
return false;
- if (!this->emitStartLifetime(E))
- return false;
if (!this->emitCheckNewTypeMismatch(E, E))
return false;
+
} else {
// Allocate just one element.
if (!this->emitAlloc(Desc, E))
@@ -4841,46 +4846,39 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init,
return !NeedsOp || this->emitCheckDecl(VD, VD);
};
- auto initGlobal = [&](unsigned GlobalIndex) -> bool {
- assert(Init);
-
- if (VarT) {
- if (!this->visit(Init))
- return checkDecl() && false;
-
- return checkDecl() && this->emitInitGlobal(*VarT, GlobalIndex, VD);
- }
-
- if (!checkDecl())
- return false;
-
- if (!this->emitGetPtrGlobal(GlobalIndex, Init))
- return false;
-
- if (!visitInitializer(Init))
- return false;
-
- return this->emitFinishInitGlobal(Init);
- };
-
DeclScope<Emitter> LocalScope(this, VD);
- // We've already seen and initialized this global.
- if (UnsignedOrNone GlobalIndex = P.getGlobal(VD)) {
+ UnsignedOrNone GlobalIndex = P.getGlobal(VD);
+ if (GlobalIndex) {
+ // We've already seen and initialized this global.
if (P.getPtrGlobal(*GlobalIndex).isInitialized())
return checkDecl();
-
// The previous attempt at initialization might've been unsuccessful,
// so let's try this one.
- return Init && checkDecl() && initGlobal(*GlobalIndex);
+ } else if ((GlobalIndex = P.createGlobal(VD, Init))) {
+ } else {
+ return false;
}
+ if (!Init)
+ return true;
- UnsignedOrNone GlobalIndex = P.createGlobal(VD, Init);
+ if (!checkDecl())
+ return false;
- if (!GlobalIndex)
+ if (VarT) {
+ if (!this->visit(Init))
+ return false;
+
+ return this->emitInitGlobal(*VarT, *GlobalIndex, VD);
+ }
+
+ if (!this->emitGetPtrGlobal(*GlobalIndex, Init))
return false;
- return !Init || (checkDecl() && initGlobal(*GlobalIndex));
+ if (!visitInitializer(Init))
+ return false;
+
+ return this->emitFinishInitGlobal(Init);
}
// Local variables.
InitLinkScope<Emitter> ILS(this, InitLink::Decl(VD));
@@ -4890,36 +4888,37 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init,
VD, *VarT, VD->getType().isConstQualified(),
VD->getType().isVolatileQualified(), nullptr, ScopeKind::Block,
IsConstexprUnknown);
- if (Init) {
- // If this is a toplevel declaration, create a scope for the
- // initializer.
- if (Toplevel) {
- LocalScope<Emitter> Scope(this);
- if (!this->visit(Init))
- return false;
- return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
- }
- if (!this->visit(Init))
- return false;
- return this->emitSetLocal(*VarT, Offset, VD);
- }
- } else {
- if (UnsignedOrNone Offset = this->allocateLocal(
- VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) {
- if (!Init)
- return true;
- if (!this->emitGetPtrLocal(*Offset, Init))
- return false;
+ if (!Init)
+ return true;
- if (!visitInitializer(Init))
+ // If this is a toplevel declaration, create a scope for the
+ // initializer.
+ if (Toplevel) {
+ LocalScope<Emitter> Scope(this);
+ if (!this->visit(Init))
return false;
-
- return this->emitFinishInitPop(Init);
+ return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
}
- return false;
+ if (!this->visit(Init))
+ return false;
+ return this->emitSetLocal(*VarT, Offset, VD);
}
- return true;
+ // Local composite variables.
+ if (UnsignedOrNone Offset = this->allocateLocal(
+ VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) {
+ if (!Init)
+ return true;
+
+ if (!this->emitGetPtrLocal(*Offset, Init))
+ return false;
+
+ if (!visitInitializer(Init))
+ return false;
+
+ return this->emitFinishInitPop(Init);
+ }
+ return false;
}
template <class Emitter>
@@ -5391,55 +5390,57 @@ bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
// instance pointer of the current function frame, but e.g. to the declaration
// currently being initialized. Here we emit the necessary instruction(s) for
// this scenario.
- if (!InitStackActive)
+ if (!InitStackActive || InitStack.empty())
return this->emitThis(E);
- if (!InitStack.empty()) {
- // If our init stack is, for example:
- // 0 Stack: 3 (decl)
- // 1 Stack: 6 (init list)
- // 2 Stack: 1 (field)
- // 3 Stack: 6 (init list)
- // 4 Stack: 1 (field)
- //
- // We want to find the LAST element in it that's an init list,
- // which is marked with the K_InitList marker. The index right
- // before that points to an init list. We need to find the
- // elements before the K_InitList element that point to a base
- // (e.g. a decl or This), optionally followed by field, elem, etc.
- // In the example above, we want to emit elements [0..2].
- unsigned StartIndex = 0;
- unsigned EndIndex = 0;
- // Find the init list.
- for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
- InitStack[StartIndex].Kind == InitLink::K_This) {
- EndIndex = StartIndex;
- --StartIndex;
- break;
- }
+ // If our init stack is, for example:
+ // 0 Stack: 3 (decl)
+ // 1 Stack: 6 (init list)
+ // 2 Stack: 1 (field)
+ // 3 Stack: 6 (init list)
+ // 4 Stack: 1 (field)
+ //
+ // We want to find the LAST element in it that's an init list,
+ // which is marked with the K_InitList marker. The index right
+ // before that points to an init list. We need to find the
+ // elements before the K_InitList element that point to a base
+ // (e.g. a decl or This), optionally followed by field, elem, etc.
+ // In the example above, we want to emit elements [0..2].
+ unsigned StartIndex = 0;
+ unsigned EndIndex = 0;
+ // Find the init list.
+ for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
+ InitStack[StartIndex].Kind == InitLink::K_This) {
+ EndIndex = StartIndex;
+ --StartIndex;
+ break;
}
+ }
- // Walk backwards to find the base.
- for (; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList)
- continue;
+ // Walk backwards to find the base.
+ for (; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_InitList)
+ continue;
- if (InitStack[StartIndex].Kind != InitLink::K_Field &&
- InitStack[StartIndex].Kind != InitLink::K_Elem)
- break;
- }
+ if (InitStack[StartIndex].Kind != InitLink::K_Field &&
+ InitStack[StartIndex].Kind != InitLink::K_Elem)
+ break;
+ }
- // Emit the instructions.
- for (unsigned I = StartIndex; I != EndIndex; ++I) {
- if (InitStack[I].Kind == InitLink::K_InitList)
- continue;
- if (!InitStack[I].template emit<Emitter>(this, E))
- return false;
- }
- return true;
+ if (StartIndex == 0 && EndIndex == 0)
+ EndIndex = InitStack.size() - 1;
+
+ assert(StartIndex < EndIndex);
+
+ // Emit the instructions.
+ for (unsigned I = StartIndex; I != (EndIndex + 1); ++I) {
+ if (InitStack[I].Kind == InitLink::K_InitList)
+ continue;
+ if (!InitStack[I].template emit<Emitter>(this, E))
+ return false;
}
- return this->emitThis(E);
+ return true;
}
template <class Emitter> bool Compiler<Emitter>::visitStmt(const Stmt *S) {
@@ -6301,6 +6302,10 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) {
}
assert(NestedField);
+ unsigned FirstLinkOffset =
+ R->getField(cast<FieldDecl>(IFD->chain()[0]))->Offset;
+ InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(InitExpr));
+ InitLinkScope<Emitter> ILS(this, InitLink::Field(FirstLinkOffset));
if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr,
IsUnion))
return false;
@@ -6438,6 +6443,13 @@ bool Compiler<Emitter>::visitFunc(const FunctionDecl *F) {
return this->emitNoRet(SourceInfo{});
}
+static uint32_t getBitWidth(const Expr *E) {
+ assert(E->refersToBitField());
+ const auto *ME = cast<MemberExpr>(E);
+ const auto *FD = cast<FieldDecl>(ME->getMemberDecl());
+ return FD->getBitWidthValue();
+}
+
template <class Emitter>
bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
const Expr *SubExpr = E->getSubExpr();
@@ -6466,10 +6478,15 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return DiscardResult ? this->emitPopPtr(E) : true;
}
- if (T == PT_Float) {
+ if (T == PT_Float)
return DiscardResult ? this->emitIncfPop(getFPOptions(E), E)
: this->emitIncf(getFPOptions(E), E);
- }
+
+ if (SubExpr->refersToBitField())
+ return DiscardResult ? this->emitIncPopBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E)
+ : this->emitIncBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E);
return DiscardResult ? this->emitIncPop(*T, E->canOverflow(), E)
: this->emitInc(*T, E->canOverflow(), E);
@@ -6490,9 +6507,15 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return DiscardResult ? this->emitPopPtr(E) : true;
}
- if (T == PT_Float) {
+ if (T == PT_Float)
return DiscardResult ? this->emitDecfPop(getFPOptions(E), E)
: this->emitDecf(getFPOptions(E), E);
+
+ if (SubExpr->refersToBitField()) {
+ return DiscardResult ? this->emitDecPopBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E)
+ : this->emitDecBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E);
}
return DiscardResult ? this->emitDecPop(*T, E->canOverflow(), E)
@@ -6521,6 +6544,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
if (DiscardResult) {
if (T == PT_Float)
return this->emitIncfPop(getFPOptions(E), E);
+ if (SubExpr->refersToBitField())
+ return DiscardResult ? this->emitIncPopBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E)
+ : this->emitIncBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E);
return this->emitIncPop(*T, E->canOverflow(), E);
}
@@ -6536,6 +6564,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return false;
if (!this->emitStoreFloat(E))
return false;
+ } else if (SubExpr->refersToBitField()) {
+ assert(isIntegralType(*T));
+ if (!this->emitPreIncBitfield(*T, E->canOverflow(), getBitWidth(SubExpr),
+ E))
+ return false;
} else {
assert(isIntegralType(*T));
if (!this->emitPreInc(*T, E->canOverflow(), E))
@@ -6566,6 +6599,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
if (DiscardResult) {
if (T == PT_Float)
return this->emitDecfPop(getFPOptions(E), E);
+ if (SubExpr->refersToBitField())
+ return DiscardResult ? this->emitDecPopBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E)
+ : this->emitDecBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E);
return this->emitDecPop(*T, E->canOverflow(), E);
}
@@ -6581,6 +6619,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return false;
if (!this->emitStoreFloat(E))
return false;
+ } else if (SubExpr->refersToBitField()) {
+ assert(isIntegralType(*T));
+ if (!this->emitPreDecBitfield(*T, E->canOverflow(), getBitWidth(SubExpr),
+ E))
+ return false;
} else {
assert(isIntegralType(*T));
if (!this->emitPreDec(*T, E->canOverflow(), E))
@@ -6633,7 +6676,7 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
if (!this->visit(SubExpr))
return false;
- if (!this->emitCheckNull(E))
+ if (!SubExpr->getType()->isFunctionPointerType() && !this->emitCheckNull(E))
return false;
if (classifyPrim(SubExpr) == PT_Ptr)
diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp
index 4f4b122..12bf3a3 100644
--- a/clang/lib/AST/ByteCode/Context.cpp
+++ b/clang/lib/AST/ByteCode/Context.cpp
@@ -7,12 +7,15 @@
//===----------------------------------------------------------------------===//
#include "Context.h"
+#include "Boolean.h"
#include "ByteCodeEmitter.h"
#include "Compiler.h"
#include "EvalEmitter.h"
-#include "Interp.h"
+#include "Integral.h"
#include "InterpFrame.h"
+#include "InterpHelpers.h"
#include "InterpStack.h"
+#include "Pointer.h"
#include "PrimType.h"
#include "Program.h"
#include "clang/AST/ASTLambda.h"
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index a72282c..169a9a2 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -1903,12 +1903,19 @@ bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E,
if (Ptr.inUnion() && Ptr.getBase().getRecord()->isUnion())
Ptr.activate();
+ if (Ptr.isZero()) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
+ << AK_Construct;
+ return false;
+ }
+
if (!Ptr.isBlockPointer())
return false;
+ startLifetimeRecurse(Ptr);
+
// Similar to CheckStore(), but with the additional CheckTemporary() call and
// the AccessKinds are different.
-
if (!Ptr.block()->isAccessible()) {
if (!CheckExtern(S, OpPC, Ptr))
return false;
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index 2f7e2d9..89f6fbe 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -22,6 +22,7 @@
#include "Function.h"
#include "InterpBuiltinBitCast.h"
#include "InterpFrame.h"
+#include "InterpHelpers.h"
#include "InterpStack.h"
#include "InterpState.h"
#include "MemberPointer.h"
@@ -43,28 +44,10 @@ using FixedPointSemantics = llvm::FixedPointSemantics;
/// Checks if the variable has externally defined storage.
bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-/// Checks if the array is offsetable.
-bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-
-/// Checks if a pointer is live and accessible.
-bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
-
-/// Checks if a pointer is a dummy pointer.
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK);
-
/// Checks if a pointer is null.
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
-/// Checks if a pointer is in range.
-bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
-
-/// Checks if a field from which a pointer is going to be derived is valid.
-bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- CheckSubobjectKind CSK);
-
/// Checks if Ptr is a one-past-the-end pointer.
bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
@@ -80,12 +63,6 @@ bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if the Descriptor is of a constexpr or const global variable.
bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc);
-/// Checks if a pointer points to a mutable field.
-bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-
-/// Checks if a value can be loaded from a block.
-bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK = AK_Read);
bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
@@ -110,12 +87,6 @@ bool CheckThis(InterpState &S, CodePtr OpPC);
/// language mode.
bool CheckDynamicMemoryAllocation(InterpState &S, CodePtr OpPC);
-/// Diagnose mismatched new[]/delete or new/delete[] pairs.
-bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC,
- DynamicAllocator::Form AllocForm,
- DynamicAllocator::Form DeleteForm, const Descriptor *D,
- const Expr *NewExpr);
-
/// Check the source of the pointer passed to delete/delete[] has actually
/// been heap allocated by us.
bool CheckDeleteSource(InterpState &S, CodePtr OpPC, const Expr *Source,
@@ -129,9 +100,6 @@ bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
const Pointer &Ptr, const APSInt &IntValue);
-/// Copy the contents of Src into Dest.
-bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest);
-
bool CallVar(InterpState &S, CodePtr OpPC, const Function *Func,
uint32_t VarArgSize);
bool Call(InterpState &S, CodePtr OpPC, const Function *Func,
@@ -149,19 +117,11 @@ bool CheckBitCast(InterpState &S, CodePtr OpPC, bool HasIndeterminateBits,
bool CheckBCPResult(InterpState &S, const Pointer &Ptr);
bool CheckDestructor(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-template <typename T>
-static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue) {
- const Expr *E = S.Current->getExpr(OpPC);
- S.CCEDiag(E, diag::note_constexpr_overflow) << SrcValue << E->getType();
- return S.noteUndefinedBehavior();
-}
bool handleFixedPointOverflow(InterpState &S, CodePtr OpPC,
const FixedPoint &FP);
bool isConstexprUnknown(const Pointer &P);
-inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems);
-
enum class ShiftDir { Left, Right };
/// Checks if the shift operation is legal.
@@ -241,43 +201,6 @@ bool CheckDivRem(InterpState &S, CodePtr OpPC, const T &LHS, const T &RHS) {
return true;
}
-template <typename SizeT>
-bool CheckArraySize(InterpState &S, CodePtr OpPC, SizeT *NumElements,
- unsigned ElemSize, bool IsNoThrow) {
- // FIXME: Both the SizeT::from() as well as the
- // NumElements.toAPSInt() in this function are rather expensive.
-
- // Can't be too many elements if the bitwidth of NumElements is lower than
- // that of Descriptor::MaxArrayElemBytes.
- if ((NumElements->bitWidth() - NumElements->isSigned()) <
- (sizeof(Descriptor::MaxArrayElemBytes) * 8))
- return true;
-
- // FIXME: GH63562
- // APValue stores array extents as unsigned,
- // so anything that is greater that unsigned would overflow when
- // constructing the array, we catch this here.
- SizeT MaxElements = SizeT::from(Descriptor::MaxArrayElemBytes / ElemSize);
- assert(MaxElements.isPositive());
- if (NumElements->toAPSInt().getActiveBits() >
- ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
- *NumElements > MaxElements) {
- if (!IsNoThrow) {
- const SourceInfo &Loc = S.Current->getSource(OpPC);
-
- if (NumElements->isSigned() && NumElements->isNegative()) {
- S.FFDiag(Loc, diag::note_constexpr_new_negative)
- << NumElements->toDiagnosticString(S.getASTContext());
- } else {
- S.FFDiag(Loc, diag::note_constexpr_new_too_large)
- << NumElements->toDiagnosticString(S.getASTContext());
- }
- }
- return false;
- }
- return true;
-}
-
/// Checks if the result of a floating-point operation is valid
/// in the current context.
bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
@@ -286,19 +209,6 @@ bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
/// Checks why the given DeclRefExpr is invalid.
bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR);
-/// Interpreter entry point.
-bool Interpret(InterpState &S);
-
-/// Interpret a builtin function.
-bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
- uint32_t BuiltinID);
-
-/// Interpret an offsetof operation.
-bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
- ArrayRef<int64_t> ArrayIndices, int64_t &Result);
-
-inline bool Invalid(InterpState &S, CodePtr OpPC);
-
enum class ArithOp { Add, Sub };
//===----------------------------------------------------------------------===//
@@ -403,13 +313,6 @@ bool Add(InterpState &S, CodePtr OpPC) {
return AddSubMulHelper<T, T::add, std::plus>(S, OpPC, Bits, LHS, RHS);
}
-static inline llvm::RoundingMode getRoundingMode(FPOptions FPO) {
- auto RM = FPO.getRoundingMode();
- if (RM == llvm::RoundingMode::Dynamic)
- return llvm::RoundingMode::NearestTiesToEven;
- return RM;
-}
-
inline bool Addf(InterpState &S, CodePtr OpPC, uint32_t FPOI) {
const Floating &RHS = S.Stk.pop<Floating>();
const Floating &LHS = S.Stk.pop<Floating>();
@@ -799,7 +702,7 @@ enum class IncDecOp {
template <typename T, IncDecOp Op, PushVal DoPush>
bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- bool CanOverflow) {
+ bool CanOverflow, UnsignedOrNone BitWidth = std::nullopt) {
assert(!Ptr.isDummy());
if (!S.inConstantContext()) {
@@ -822,12 +725,18 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
if constexpr (Op == IncDecOp::Inc) {
if (!T::increment(Value, &Result) || !CanOverflow) {
- Ptr.deref<T>() = Result;
+ if (BitWidth)
+ Ptr.deref<T>() = Result.truncate(*BitWidth);
+ else
+ Ptr.deref<T>() = Result;
return true;
}
} else {
if (!T::decrement(Value, &Result) || !CanOverflow) {
- Ptr.deref<T>() = Result;
+ if (BitWidth)
+ Ptr.deref<T>() = Result.truncate(*BitWidth);
+ else
+ Ptr.deref<T>() = Result;
return true;
}
}
@@ -871,6 +780,17 @@ bool Inc(InterpState &S, CodePtr OpPC, bool CanOverflow) {
CanOverflow);
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool IncBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ unsigned BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
/// 1) Pops a pointer from the stack
/// 2) Load the value from the pointer
/// 3) Writes the value increased by one back to the pointer
@@ -884,6 +804,17 @@ bool IncPop(InterpState &S, CodePtr OpPC, bool CanOverflow) {
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool IncPopBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
bool PreInc(InterpState &S, CodePtr OpPC, bool CanOverflow) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
@@ -892,6 +823,17 @@ bool PreInc(InterpState &S, CodePtr OpPC, bool CanOverflow) {
return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow);
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool PreIncBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
/// 1) Pops a pointer from the stack
/// 2) Load the value from the pointer
/// 3) Writes the value decreased by one back to the pointer
@@ -905,6 +847,16 @@ bool Dec(InterpState &S, CodePtr OpPC, bool CanOverflow) {
return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr,
CanOverflow);
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool DecBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
/// 1) Pops a pointer from the stack
/// 2) Load the value from the pointer
@@ -919,6 +871,17 @@ bool DecPop(InterpState &S, CodePtr OpPC, bool CanOverflow) {
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool DecPopBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
bool PreDec(InterpState &S, CodePtr OpPC, bool CanOverflow) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
@@ -926,6 +889,16 @@ bool PreDec(InterpState &S, CodePtr OpPC, bool CanOverflow) {
return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow);
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool PreDecBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
+ return false;
+ return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
template <IncDecOp Op, PushVal DoPush>
bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
uint32_t FPOI) {
@@ -3264,12 +3237,6 @@ inline bool GetMemberPtrDecl(InterpState &S, CodePtr OpPC) {
/// Just emit a diagnostic. The expression that caused emission of this
/// op is not valid in a constant context.
-inline bool Invalid(InterpState &S, CodePtr OpPC) {
- const SourceLocation &Loc = S.Current->getLocation(OpPC);
- S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr)
- << S.Current->getRange(OpPC);
- return false;
-}
inline bool Unsupported(InterpState &S, CodePtr OpPC) {
const SourceLocation &Loc = S.Current->getLocation(OpPC);
@@ -3701,17 +3668,6 @@ inline bool CheckDestruction(InterpState &S, CodePtr OpPC) {
return CheckDestructor(S, OpPC, Ptr);
}
-inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems) {
- uint64_t Limit = S.getLangOpts().ConstexprStepLimit;
- if (Limit != 0 && NumElems > Limit) {
- S.FFDiag(S.Current->getSource(OpPC),
- diag::note_constexpr_new_exceeds_limits)
- << NumElems << Limit;
- return false;
- }
- return true;
-}
-
//===----------------------------------------------------------------------===//
// Read opcode arguments
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 39b991c..d0b97a1 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -8,15 +8,18 @@
#include "../ExprConstShared.h"
#include "Boolean.h"
#include "EvalEmitter.h"
-#include "Interp.h"
#include "InterpBuiltinBitCast.h"
+#include "InterpHelpers.h"
#include "PrimType.h"
+#include "Program.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/AllocToken.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SipHash.h"
@@ -1306,6 +1309,45 @@ interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call) {
+ const ASTContext &ASTCtx = S.getASTContext();
+ uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
+ auto Mode =
+ ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
+ uint64_t MaxTokens =
+ ASTCtx.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth));
+
+ // We do not read any of the arguments; discard them.
+ for (int I = Call->getNumArgs() - 1; I >= 0; --I)
+ discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
+
+ // Note: Type inference from a surrounding cast is not supported in
+ // constexpr evaluation.
+ QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
+ if (AllocType.isNull()) {
+ S.CCEDiag(Call,
+ diag::note_constexpr_infer_alloc_token_type_inference_failed);
+ return false;
+ }
+
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
+ if (!ATMD) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
+ return false;
+ }
+
+ auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
+ if (!MaybeToken) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
+ return false;
+ }
+
+ pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
+ return true;
+}
+
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
@@ -2041,10 +2083,16 @@ static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
}
if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
+ int64_t DesiredTrunc;
+ if (S.getASTContext().CharTy->isSignedIntegerType())
+ DesiredTrunc =
+ Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue();
+ else
+ DesiredTrunc =
+ Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
// strchr compares directly to the passed integer, and therefore
// always fails if given an int that is not a char.
- if (Desired !=
- Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue()) {
+ if (Desired != DesiredTrunc) {
S.Stk.push<Pointer>();
return true;
}
@@ -3272,6 +3320,65 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_x86_byteshift(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID,
+ llvm::function_ref<APInt(const Pointer &, unsigned Lane, unsigned I,
+ unsigned Shift)>
+ Fn) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Shift = ImmAPS.getZExtValue() & 0xff;
+
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned NumElems = Src.getNumElems();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ for (unsigned Lane = 0; Lane != NumElems; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ unsigned Base = Lane + I;
+ APSInt Result = APSInt(Fn(Src, Lane, I, Shift));
+ INT_TYPE_SWITCH_NO_BOOL(ElemT,
+ { Dst.elem<T>(Base) = static_cast<T>(Result); });
+ }
+ }
+
+ Dst.initializeAllElements();
+
+ return true;
+}
+
+static bool interp__builtin_ia32_shuffle_generic(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)>
+ GetSourceIndex) {
+
+ assert(Call->getNumArgs() == 3);
+ unsigned ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
+
+ QualType Arg0Type = Call->getArg(0)->getType();
+ const auto *VecT = Arg0Type->castAs<VectorType>();
+ PrimType ElemT = *S.getContext().classify(VecT->getElementType());
+ unsigned NumElems = VecT->getNumElements();
+
+ const Pointer &B = S.Stk.pop<Pointer>();
+ const Pointer &A = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
+ auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
+ const Pointer &Src = (SrcVecIdx == 0) ? A : B;
+ TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
+ }
+ Dst.initializeAllElements();
+
+ return true;
+}
+
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
uint32_t BuiltinID) {
if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
@@ -3464,7 +3571,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI_lrotl:
case Builtin::BI_rotl64:
return interp__builtin_elementwise_int_binop(
- S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt {
+ S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
return Value.rotl(Amount);
});
@@ -3478,7 +3585,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI_lrotr:
case Builtin::BI_rotr64:
return interp__builtin_elementwise_int_binop(
- S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt {
+ S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
return Value.rotr(Amount);
});
@@ -3687,6 +3794,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_ptrauth_string_discriminator:
return interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Call);
+ case Builtin::BI__builtin_infer_alloc_token:
+ return interp__builtin_infer_alloc_token(S, OpPC, Frame, Call);
+
case Builtin::BI__noop:
pushInteger(S, 0, Call->getType());
return true;
@@ -3802,6 +3912,21 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
}
+ case X86::BI__builtin_ia32_psignb128:
+ case X86::BI__builtin_ia32_psignb256:
+ case X86::BI__builtin_ia32_psignw128:
+ case X86::BI__builtin_ia32_psignw256:
+ case X86::BI__builtin_ia32_psignd128:
+ case X86::BI__builtin_ia32_psignd256:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
+ if (BElem.isZero())
+ return APInt::getZero(AElem.getBitWidth());
+ if (BElem.isNegative())
+ return -AElem;
+ return AElem;
+ });
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
@@ -4184,6 +4309,42 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_selectpd_512:
return interp__builtin_select(S, OpPC, Call);
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 4;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 2;
+ unsigned IndexMask = 0x3;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index};
+ });
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 2;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 1;
+ unsigned IndexMask = 0x1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index};
+ });
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512:
@@ -4324,6 +4485,39 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ // These SLLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I < Shift) {
+ return APInt(8, 0);
+ }
+ return APInt(8, Src.elem<uint8_t>(Lane + I - Shift));
+ });
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ // These SRLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I + Shift < 16) {
+ return APInt(8, Src.elem<uint8_t>(Lane + I + Shift));
+ }
+
+ return APInt(8, 0);
+ });
+
default:
S.FFDiag(S.Current->getLocation(OpPC),
diag::note_invalid_subexpr_in_const_expr)
diff --git a/clang/lib/AST/ByteCode/InterpHelpers.h b/clang/lib/AST/ByteCode/InterpHelpers.h
new file mode 100644
index 0000000..6bf89d3
--- /dev/null
+++ b/clang/lib/AST/ByteCode/InterpHelpers.h
@@ -0,0 +1,141 @@
+//===--- InterpHelpers.h - Interpreter Helper Functions --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_INTERPHELPERS_H
+#define LLVM_CLANG_AST_INTERP_INTERPHELPERS_H
+
+#include "DynamicAllocator.h"
+#include "InterpState.h"
+#include "Pointer.h"
+
+namespace clang {
+class CallExpr;
+class OffsetOfExpr;
+
+namespace interp {
+class Block;
+struct Descriptor;
+
+/// Interpreter entry point.
+bool Interpret(InterpState &S);
+
+/// Interpret a builtin function.
+bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ uint32_t BuiltinID);
+
+/// Interpret an offsetof operation.
+bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
+ ArrayRef<int64_t> ArrayIndices, int64_t &Result);
+
+/// Checks if the array is offsetable.
+bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+
+/// Checks if a pointer is live and accessible.
+bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+
+/// Checks if a pointer is a dummy pointer.
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK);
+
+/// Checks if a pointer is in range.
+bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+
+/// Checks if a field from which a pointer is going to be derived is valid.
+bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ CheckSubobjectKind CSK);
+
+/// Checks if a pointer points to a mutable field.
+bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+
+/// Checks if a value can be loaded from a block.
+bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK = AK_Read);
+
+/// Diagnose mismatched new[]/delete or new/delete[] pairs.
+bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC,
+ DynamicAllocator::Form AllocForm,
+ DynamicAllocator::Form DeleteForm, const Descriptor *D,
+ const Expr *NewExpr);
+
+/// Copy the contents of Src into Dest.
+bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest);
+
+template <typename T>
+static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_overflow) << SrcValue << E->getType();
+ return S.noteUndefinedBehavior();
+}
+
+inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems) {
+ uint64_t Limit = S.getLangOpts().ConstexprStepLimit;
+ if (Limit != 0 && NumElems > Limit) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_new_exceeds_limits)
+ << NumElems << Limit;
+ return false;
+ }
+ return true;
+}
+
+static inline llvm::RoundingMode getRoundingMode(FPOptions FPO) {
+ auto RM = FPO.getRoundingMode();
+ if (RM == llvm::RoundingMode::Dynamic)
+ return llvm::RoundingMode::NearestTiesToEven;
+ return RM;
+}
+
+inline bool Invalid(InterpState &S, CodePtr OpPC) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr)
+ << S.Current->getRange(OpPC);
+ return false;
+}
+
+template <typename SizeT>
+bool CheckArraySize(InterpState &S, CodePtr OpPC, SizeT *NumElements,
+ unsigned ElemSize, bool IsNoThrow) {
+ // FIXME: Both the SizeT::from() as well as the
+ // NumElements.toAPSInt() in this function are rather expensive.
+
+ // Can't be too many elements if the bitwidth of NumElements is lower than
+ // that of Descriptor::MaxArrayElemBytes.
+ if ((NumElements->bitWidth() - NumElements->isSigned()) <
+ (sizeof(Descriptor::MaxArrayElemBytes) * 8))
+ return true;
+
+ // FIXME: GH63562
+ // APValue stores array extents as unsigned,
+ // so anything that is greater that unsigned would overflow when
+ // constructing the array, we catch this here.
+ SizeT MaxElements = SizeT::from(Descriptor::MaxArrayElemBytes / ElemSize);
+ assert(MaxElements.isPositive());
+ if (NumElements->toAPSInt().getActiveBits() >
+ ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
+ *NumElements > MaxElements) {
+ if (!IsNoThrow) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+
+ if (NumElements->isSigned() && NumElements->isNegative()) {
+ S.FFDiag(Loc, diag::note_constexpr_new_negative)
+ << NumElements->toDiagnosticString(S.getASTContext());
+ } else {
+ S.FFDiag(Loc, diag::note_constexpr_new_too_large)
+ << NumElements->toDiagnosticString(S.getASTContext());
+ }
+ }
+ return false;
+ }
+ return true;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif // LLVM_CLANG_AST_INTERP_INTERPHELPERS_H
diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td
index 532c444..1c17ad9e 100644
--- a/clang/lib/AST/ByteCode/Opcodes.td
+++ b/clang/lib/AST/ByteCode/Opcodes.td
@@ -612,12 +612,25 @@ class OverflowOpcode : Opcode {
let HasGroup = 1;
}
+class OverflowBitfieldOpcode : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [ArgBool, ArgUint32];
+ let HasGroup = 1;
+}
+
def Inc : OverflowOpcode;
+def IncBitfield : OverflowBitfieldOpcode;
def IncPop : OverflowOpcode;
+def IncPopBitfield : OverflowBitfieldOpcode;
def PreInc : OverflowOpcode;
+def PreIncBitfield : OverflowBitfieldOpcode;
+
def Dec : OverflowOpcode;
+def DecBitfield : OverflowBitfieldOpcode;
def DecPop : OverflowOpcode;
+def DecPopBitfield : OverflowBitfieldOpcode;
def PreDec : OverflowOpcode;
+def PreDecBitfield : OverflowBitfieldOpcode;
// Float increment and decrement.
def Incf: FloatOpcode;
@@ -853,19 +866,13 @@ def Free : Opcode {
let Args = [ArgBool, ArgBool];
}
-def CheckNewTypeMismatch : Opcode {
- let Args = [ArgExpr];
-}
-
-def InvalidNewDeleteExpr : Opcode {
- let Args = [ArgExpr];
-}
-
+def CheckNewTypeMismatch : Opcode { let Args = [ArgExpr]; }
def CheckNewTypeMismatchArray : Opcode {
let Types = [IntegerTypeClass];
let Args = [ArgExpr];
let HasGroup = 1;
}
+def InvalidNewDeleteExpr : Opcode { let Args = [ArgExpr]; }
def IsConstantContext: Opcode;
def CheckAllocations : Opcode;