aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/AST/ByteCode
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/AST/ByteCode')
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp32
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp83
2 files changed, 98 insertions, 17 deletions
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index d640be0..a2fb0fb 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -1651,8 +1651,8 @@ static bool GetDynamicDecl(InterpState &S, CodePtr OpPC, Pointer TypePtr,
QualType DynamicType = TypePtr.getType();
if (TypePtr.isStatic() || TypePtr.isConst()) {
- const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl();
- if (!VD->isConstexpr()) {
+ if (const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl();
+ VD && !VD->isConstexpr()) {
const Expr *E = S.Current->getExpr(OpPC);
APValue V = TypePtr.toAPValue(S.getASTContext());
QualType TT = S.getASTContext().getLValueReferenceType(DynamicType);
@@ -1683,20 +1683,6 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
const FunctionDecl *Callee = Func->getDecl();
- if (!Func->isFullyCompiled())
- compileFunction(S, Func);
-
- // C++2a [class.abstract]p6:
- // the effect of making a virtual call to a pure virtual function [...] is
- // undefined
- if (Callee->isPureVirtual()) {
- S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call,
- 1)
- << Callee;
- S.Note(Callee->getLocation(), diag::note_declared_at);
- return false;
- }
-
const CXXRecordDecl *DynamicDecl = nullptr;
if (!GetDynamicDecl(S, OpPC, ThisPtr, DynamicDecl))
return false;
@@ -1706,7 +1692,8 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
const auto *InitialFunction = cast<CXXMethodDecl>(Callee);
const CXXMethodDecl *Overrider;
- if (StaticDecl != DynamicDecl) {
+ if (StaticDecl != DynamicDecl &&
+ !llvm::is_contained(S.InitializingBlocks, ThisPtr.block())) {
if (!DynamicDecl->isDerivedFrom(StaticDecl))
return false;
Overrider = S.getContext().getOverridingFunction(DynamicDecl, StaticDecl,
@@ -1716,6 +1703,17 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
Overrider = InitialFunction;
}
+ // C++2a [class.abstract]p6:
+ // the effect of making a virtual call to a pure virtual function [...] is
+ // undefined
+ if (Overrider->isPureVirtual()) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call,
+ 1)
+ << Callee;
+ S.Note(Callee->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
if (Overrider != InitialFunction) {
// DR1872: An instantiated virtual constexpr function can't be called in a
// constant expression (prior to C++20). We can still constant-fold such a
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 8f23001..ab6b3ed 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3296,6 +3296,60 @@ static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC,
return true;
}
+static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
+ bool IsUnsigned) {
+ switch (Imm & 0x7) {
+ case 0x00: // _MM_CMPINT_EQ
+ return (A == B);
+ case 0x01: // _MM_CMPINT_LT
+ return IsUnsigned ? A.ult(B) : A.slt(B);
+ case 0x02: // _MM_CMPINT_LE
+ return IsUnsigned ? A.ule(B) : A.sle(B);
+ case 0x03: // _MM_CMPINT_FALSE
+ return false;
+ case 0x04: // _MM_CMPINT_NE
+ return (A != B);
+ case 0x05: // _MM_CMPINT_NLT
+ return IsUnsigned ? A.ugt(B) : A.sgt(B);
+ case 0x06: // _MM_CMPINT_NLE
+ return IsUnsigned ? A.uge(B) : A.sge(B);
+ case 0x07: // _MM_CMPINT_TRUE
+ return true;
+ default:
+ llvm_unreachable("Invalid Op");
+ }
+}
+
+static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call, unsigned ID,
+ bool IsUnsigned) {
+ assert(Call->getNumArgs() == 4);
+
+ APSInt Mask = popToAPSInt(S, Call->getArg(3));
+ APSInt Opcode = popToAPSInt(S, Call->getArg(2));
+ unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+
+ assert(LHS.getNumElems() == RHS.getNumElems());
+
+ APInt RetMask = APInt::getZero(LHS.getNumElems());
+ unsigned VectorLen = LHS.getNumElems();
+ PrimType ElemT = LHS.getFieldDesc()->getPrimType();
+
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ APSInt A, B;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ A = LHS.elem<T>(ElemNum).toAPSInt();
+ B = RHS.elem<T>(ElemNum).toAPSInt();
+ });
+ RetMask.setBitVal(ElemNum,
+ Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
+ }
+ pushInteger(S, RetMask, Call->getType());
+ return true;
+}
+
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
const CallExpr *Call) {
assert(Call->getNumArgs() == 1);
@@ -4488,6 +4542,35 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned=*/false);
+
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned=*/true);
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift: