aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/AST
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/AST')
-rw-r--r--clang/lib/AST/ASTContext.cpp4
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp145
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp9
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp187
-rw-r--r--clang/lib/AST/ByteCode/Opcodes.td10
-rw-r--r--clang/lib/AST/ExprConstant.cpp180
-rw-r--r--clang/lib/AST/StmtOpenACC.cpp44
7 files changed, 489 insertions, 90 deletions
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 32c8f62..687cd46 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -1648,6 +1648,9 @@ ASTContext::findPointerAuthContent(QualType T) const {
if (!RD)
return PointerAuthContent::None;
+ if (RD->isInvalidDecl())
+ return PointerAuthContent::None;
+
if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(RD);
Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
return Existing->second;
@@ -3517,7 +3520,6 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
assert(!T->isDependentType() &&
"cannot compute type discriminator of a dependent type");
-
SmallString<256> Str;
llvm::raw_svector_ostream Out(Str);
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index f15b3c1..f4ddbf4 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -1842,7 +1842,6 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
const Expr *Init, PrimType T,
bool Activate = false) -> bool {
InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init));
- InitLinkScope<Emitter> ILS(this, InitLink::Field(FieldToInit->Offset));
if (!this->visit(Init))
return false;
@@ -3274,34 +3273,43 @@ bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
}
if (T->isArrayType()) {
- const ConstantArrayType *CAT =
- Ctx.getASTContext().getAsConstantArrayType(E->getType());
- if (!CAT)
- return false;
-
- size_t NumElems = CAT->getZExtSize();
const Function *Func = getFunction(E->getConstructor());
if (!Func)
return false;
- // FIXME(perf): We're calling the constructor once per array element here,
- // in the old intepreter we had a special-case for trivial constructors.
- for (size_t I = 0; I != NumElems; ++I) {
- if (!this->emitConstUint64(I, E))
- return false;
- if (!this->emitArrayElemPtrUint64(E))
- return false;
+ if (!this->emitDupPtr(E))
+ return false;
- // Constructor arguments.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
- return false;
+ std::function<bool(QualType)> initArrayDimension;
+ initArrayDimension = [&](QualType T) -> bool {
+ if (!T->isArrayType()) {
+ // Constructor arguments.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ return this->emitCall(Func, 0, E);
}
- if (!this->emitCall(Func, 0, E))
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(T);
+ if (!CAT)
return false;
- }
- return true;
+ QualType ElemTy = CAT->getElementType();
+ unsigned NumElems = CAT->getZExtSize();
+ for (size_t I = 0; I != NumElems; ++I) {
+ if (!this->emitConstUint64(I, E))
+ return false;
+ if (!this->emitArrayElemPtrUint64(E))
+ return false;
+ if (!initArrayDimension(ElemTy))
+ return false;
+ }
+ return this->emitPopPtr(E);
+ };
+
+ return initArrayDimension(E->getType());
}
return false;
@@ -3600,8 +3608,6 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
if (PlacementDest) {
if (!this->visit(PlacementDest))
return false;
- if (!this->emitStartLifetime(E))
- return false;
if (!this->emitGetLocal(SizeT, ArrayLen, E))
return false;
if (!this->emitCheckNewTypeMismatchArray(SizeT, E, E))
@@ -3741,10 +3747,9 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
if (PlacementDest) {
if (!this->visit(PlacementDest))
return false;
- if (!this->emitStartLifetime(E))
- return false;
if (!this->emitCheckNewTypeMismatch(E, E))
return false;
+
} else {
// Allocate just one element.
if (!this->emitAlloc(Desc, E))
@@ -5385,55 +5390,57 @@ bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
// instance pointer of the current function frame, but e.g. to the declaration
// currently being initialized. Here we emit the necessary instruction(s) for
// this scenario.
- if (!InitStackActive)
+ if (!InitStackActive || InitStack.empty())
return this->emitThis(E);
- if (!InitStack.empty()) {
- // If our init stack is, for example:
- // 0 Stack: 3 (decl)
- // 1 Stack: 6 (init list)
- // 2 Stack: 1 (field)
- // 3 Stack: 6 (init list)
- // 4 Stack: 1 (field)
- //
- // We want to find the LAST element in it that's an init list,
- // which is marked with the K_InitList marker. The index right
- // before that points to an init list. We need to find the
- // elements before the K_InitList element that point to a base
- // (e.g. a decl or This), optionally followed by field, elem, etc.
- // In the example above, we want to emit elements [0..2].
- unsigned StartIndex = 0;
- unsigned EndIndex = 0;
- // Find the init list.
- for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
- InitStack[StartIndex].Kind == InitLink::K_This) {
- EndIndex = StartIndex;
- --StartIndex;
- break;
- }
+ // If our init stack is, for example:
+ // 0 Stack: 3 (decl)
+ // 1 Stack: 6 (init list)
+ // 2 Stack: 1 (field)
+ // 3 Stack: 6 (init list)
+ // 4 Stack: 1 (field)
+ //
+ // We want to find the LAST element in it that's an init list,
+ // which is marked with the K_InitList marker. The index right
+ // before that points to an init list. We need to find the
+ // elements before the K_InitList element that point to a base
+ // (e.g. a decl or This), optionally followed by field, elem, etc.
+ // In the example above, we want to emit elements [0..2].
+ unsigned StartIndex = 0;
+ unsigned EndIndex = 0;
+ // Find the init list.
+ for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
+ InitStack[StartIndex].Kind == InitLink::K_This) {
+ EndIndex = StartIndex;
+ --StartIndex;
+ break;
}
+ }
- // Walk backwards to find the base.
- for (; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList)
- continue;
+ // Walk backwards to find the base.
+ for (; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_InitList)
+ continue;
- if (InitStack[StartIndex].Kind != InitLink::K_Field &&
- InitStack[StartIndex].Kind != InitLink::K_Elem)
- break;
- }
+ if (InitStack[StartIndex].Kind != InitLink::K_Field &&
+ InitStack[StartIndex].Kind != InitLink::K_Elem)
+ break;
+ }
- // Emit the instructions.
- for (unsigned I = StartIndex; I != EndIndex; ++I) {
- if (InitStack[I].Kind == InitLink::K_InitList)
- continue;
- if (!InitStack[I].template emit<Emitter>(this, E))
- return false;
- }
- return true;
+ if (StartIndex == 0 && EndIndex == 0)
+ EndIndex = InitStack.size() - 1;
+
+ assert(StartIndex < EndIndex);
+
+ // Emit the instructions.
+ for (unsigned I = StartIndex; I != (EndIndex + 1); ++I) {
+ if (InitStack[I].Kind == InitLink::K_InitList)
+ continue;
+ if (!InitStack[I].template emit<Emitter>(this, E))
+ return false;
}
- return this->emitThis(E);
+ return true;
}
template <class Emitter> bool Compiler<Emitter>::visitStmt(const Stmt *S) {
@@ -6295,6 +6302,10 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) {
}
assert(NestedField);
+ unsigned FirstLinkOffset =
+ R->getField(cast<FieldDecl>(IFD->chain()[0]))->Offset;
+ InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(InitExpr));
+ InitLinkScope<Emitter> ILS(this, InitLink::Field(FirstLinkOffset));
if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr,
IsUnion))
return false;
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index a72282c..169a9a2 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -1903,12 +1903,19 @@ bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E,
if (Ptr.inUnion() && Ptr.getBase().getRecord()->isUnion())
Ptr.activate();
+ if (Ptr.isZero()) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
+ << AK_Construct;
+ return false;
+ }
+
if (!Ptr.isBlockPointer())
return false;
+ startLifetimeRecurse(Ptr);
+
// Similar to CheckStore(), but with the additional CheckTemporary() call and
// the AccessKinds are different.
-
if (!Ptr.block()->isAccessible()) {
if (!CheckExtern(S, OpPC, Ptr))
return false;
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 2d5ad4a..d0b97a1 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -12,12 +12,14 @@
#include "InterpHelpers.h"
#include "PrimType.h"
#include "Program.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/AllocToken.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SipHash.h"
@@ -1307,6 +1309,45 @@ interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call) {
+ const ASTContext &ASTCtx = S.getASTContext();
+ uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
+ auto Mode =
+ ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
+ uint64_t MaxTokens =
+ ASTCtx.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth));
+
+ // We do not read any of the arguments; discard them.
+ for (int I = Call->getNumArgs() - 1; I >= 0; --I)
+ discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
+
+ // Note: Type inference from a surrounding cast is not supported in
+ // constexpr evaluation.
+ QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
+ if (AllocType.isNull()) {
+ S.CCEDiag(Call,
+ diag::note_constexpr_infer_alloc_token_type_inference_failed);
+ return false;
+ }
+
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
+ if (!ATMD) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
+ return false;
+ }
+
+ auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
+ if (!MaybeToken) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
+ return false;
+ }
+
+ pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
+ return true;
+}
+
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
@@ -3279,6 +3320,65 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_x86_byteshift(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID,
+ llvm::function_ref<APInt(const Pointer &, unsigned Lane, unsigned I,
+ unsigned Shift)>
+ Fn) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Shift = ImmAPS.getZExtValue() & 0xff;
+
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned NumElems = Src.getNumElems();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ for (unsigned Lane = 0; Lane != NumElems; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ unsigned Base = Lane + I;
+ APSInt Result = APSInt(Fn(Src, Lane, I, Shift));
+ INT_TYPE_SWITCH_NO_BOOL(ElemT,
+ { Dst.elem<T>(Base) = static_cast<T>(Result); });
+ }
+ }
+
+ Dst.initializeAllElements();
+
+ return true;
+}
+
+static bool interp__builtin_ia32_shuffle_generic(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)>
+ GetSourceIndex) {
+
+ assert(Call->getNumArgs() == 3);
+ unsigned ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
+
+ QualType Arg0Type = Call->getArg(0)->getType();
+ const auto *VecT = Arg0Type->castAs<VectorType>();
+ PrimType ElemT = *S.getContext().classify(VecT->getElementType());
+ unsigned NumElems = VecT->getNumElements();
+
+ const Pointer &B = S.Stk.pop<Pointer>();
+ const Pointer &A = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
+ auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
+ const Pointer &Src = (SrcVecIdx == 0) ? A : B;
+ TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
+ }
+ Dst.initializeAllElements();
+
+ return true;
+}
+
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
uint32_t BuiltinID) {
if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
@@ -3694,6 +3794,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_ptrauth_string_discriminator:
return interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Call);
+ case Builtin::BI__builtin_infer_alloc_token:
+ return interp__builtin_infer_alloc_token(S, OpPC, Frame, Call);
+
case Builtin::BI__noop:
pushInteger(S, 0, Call->getType());
return true;
@@ -3809,6 +3912,21 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
}
+ case X86::BI__builtin_ia32_psignb128:
+ case X86::BI__builtin_ia32_psignb256:
+ case X86::BI__builtin_ia32_psignw128:
+ case X86::BI__builtin_ia32_psignw256:
+ case X86::BI__builtin_ia32_psignd128:
+ case X86::BI__builtin_ia32_psignd256:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
+ if (BElem.isZero())
+ return APInt::getZero(AElem.getBitWidth());
+ if (BElem.isNegative())
+ return -AElem;
+ return AElem;
+ });
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
@@ -4191,6 +4309,42 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_selectpd_512:
return interp__builtin_select(S, OpPC, Call);
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 4;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 2;
+ unsigned IndexMask = 0x3;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index};
+ });
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 2;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 1;
+ unsigned IndexMask = 0x1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index};
+ });
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512:
@@ -4331,6 +4485,39 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ // These SLLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I < Shift) {
+ return APInt(8, 0);
+ }
+ return APInt(8, Src.elem<uint8_t>(Lane + I - Shift));
+ });
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ // These SRLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I + Shift < 16) {
+ return APInt(8, Src.elem<uint8_t>(Lane + I + Shift));
+ }
+
+ return APInt(8, 0);
+ });
+
default:
S.FFDiag(S.Current->getLocation(OpPC),
diag::note_invalid_subexpr_in_const_expr)
diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td
index 406feb5..1c17ad9e 100644
--- a/clang/lib/AST/ByteCode/Opcodes.td
+++ b/clang/lib/AST/ByteCode/Opcodes.td
@@ -866,19 +866,13 @@ def Free : Opcode {
let Args = [ArgBool, ArgBool];
}
-def CheckNewTypeMismatch : Opcode {
- let Args = [ArgExpr];
-}
-
-def InvalidNewDeleteExpr : Opcode {
- let Args = [ArgExpr];
-}
-
+def CheckNewTypeMismatch : Opcode { let Args = [ArgExpr]; }
def CheckNewTypeMismatchArray : Opcode {
let Types = [IntegerTypeClass];
let Args = [ArgExpr];
let HasGroup = 1;
}
+def InvalidNewDeleteExpr : Opcode { let Args = [ArgExpr]; }
def IsConstantContext: Opcode;
def CheckAllocations : Opcode;
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 00aaaab..29ee089 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -44,6 +44,7 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/CurrentSourceLocExprScope.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/OptionalDiagnostic.h"
#include "clang/AST/RecordLayout.h"
@@ -11618,6 +11619,39 @@ static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result,
return true;
}
+static bool evalShuffleGeneric(
+ EvalInfo &Info, const CallExpr *Call, APValue &Out,
+ llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)>
+ GetSourceIndex) {
+
+ const auto *VT = Call->getType()->getAs<VectorType>();
+ if (!VT)
+ return false;
+
+ APSInt MaskImm;
+ if (!EvaluateInteger(Call->getArg(2), MaskImm, Info))
+ return false;
+ unsigned ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
+
+ APValue A, B;
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), B))
+ return false;
+
+ unsigned NumElts = VT->getNumElements();
+ SmallVector<APValue, 16> ResultElements;
+ ResultElements.reserve(NumElts);
+
+ for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) {
+ auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
+ const APValue &Src = (SrcVecIdx == 0) ? A : B;
+ ResultElements.push_back(Src.getVectorElt(SrcIdx));
+ }
+
+ Out = APValue(ResultElements.data(), ResultElements.size());
+ return true;
+}
+
static bool evalPshufbBuiltin(EvalInfo &Info, const CallExpr *Call,
APValue &Out) {
APValue SrcVec, ControlVec;
@@ -12312,6 +12346,20 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+ case X86::BI__builtin_ia32_psignb128:
+ case X86::BI__builtin_ia32_psignb256:
+ case X86::BI__builtin_ia32_psignw128:
+ case X86::BI__builtin_ia32_psignw256:
+ case X86::BI__builtin_ia32_psignd128:
+ case X86::BI__builtin_ia32_psignd256:
+ return EvaluateBinOpExpr([](const APInt &AElem, const APInt &BElem) {
+ if (BElem.isZero())
+ return APInt::getZero(AElem.getBitWidth());
+ if (BElem.isNegative())
+ return -AElem;
+ return AElem;
+ });
+
case X86::BI__builtin_ia32_blendvpd:
case X86::BI__builtin_ia32_blendvpd256:
case X86::BI__builtin_ia32_blendvps:
@@ -12383,7 +12431,56 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
-
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, unsigned> {
+ constexpr unsigned LaneBits = 128u;
+ unsigned NumElemPerLane = LaneBits / 32;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 2;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return {SrcIdx, LaneOffset + Index};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, unsigned> {
+ constexpr unsigned LaneBits = 128u;
+ unsigned NumElemPerLane = LaneBits / 64;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 1;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return {SrcIdx, LaneOffset + Index};
+ }))
+ return false;
+ return Success(R, E);
+ }
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512: {
@@ -12891,6 +12988,66 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(Elems.data(), NumElems), E);
}
+
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift: {
+ assert(E->getNumArgs() == 2);
+
+ APValue Src;
+ APSInt Imm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info))
+ return false;
+
+ unsigned VecLen = Src.getVectorLength();
+ unsigned Shift = Imm.getZExtValue() & 0xff;
+
+ SmallVector<APValue> ResultElements;
+ for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ if (I < Shift) {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ } else {
+ ResultElements.push_back(Src.getVectorElt(Lane + I - Shift));
+ }
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift: {
+ assert(E->getNumArgs() == 2);
+
+ APValue Src;
+ APSInt Imm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info))
+ return false;
+
+ unsigned VecLen = Src.getVectorLength();
+ unsigned Shift = Imm.getZExtValue() & 0xff;
+
+ SmallVector<APValue> ResultElements;
+ for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ if (I + Shift < 16) {
+ ResultElements.push_back(Src.getVectorElt(Lane + I + Shift));
+ } else {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ }
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
}
}
@@ -14649,6 +14806,27 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Result, E);
}
+ case Builtin::BI__builtin_infer_alloc_token: {
+ // If we fail to infer a type, this fails to be a constant expression; this
+ // can be checked with __builtin_constant_p(...).
+ QualType AllocType = infer_alloc::inferPossibleType(E, Info.Ctx, nullptr);
+ if (AllocType.isNull())
+ return Error(
+ E, diag::note_constexpr_infer_alloc_token_type_inference_failed);
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, Info.Ctx);
+ if (!ATMD)
+ return Error(E, diag::note_constexpr_infer_alloc_token_no_metadata);
+ auto Mode =
+ Info.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
+ uint64_t BitWidth = Info.Ctx.getTypeSize(Info.Ctx.getSizeType());
+ uint64_t MaxTokens =
+ Info.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth));
+ auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
+ if (!MaybeToken)
+ return Error(E, diag::note_constexpr_infer_alloc_token_stateful_mode);
+ return Success(llvm::APInt(BitWidth, *MaybeToken), E);
+ }
+
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
case Builtin::BI__builtin_ffsll: {
diff --git a/clang/lib/AST/StmtOpenACC.cpp b/clang/lib/AST/StmtOpenACC.cpp
index 2b56c1e..462a10d 100644
--- a/clang/lib/AST/StmtOpenACC.cpp
+++ b/clang/lib/AST/StmtOpenACC.cpp
@@ -324,6 +324,18 @@ OpenACCAtomicConstruct *OpenACCAtomicConstruct::Create(
return Inst;
}
+static std::pair<const Expr *, const Expr *> getBinaryOpArgs(const Expr *Op) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(Op)) {
+ assert(BO->getOpcode() == BO_Assign);
+ return {BO->getLHS(), BO->getRHS()};
+ }
+
+ const auto *OO = cast<CXXOperatorCallExpr>(Op);
+ assert(OO->getOperator() == OO_Equal);
+
+ return {OO->getArg(0), OO->getArg(1)};
+}
+
const OpenACCAtomicConstruct::StmtInfo
OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
// This ends up being a vastly simplified version of SemaOpenACCAtomic, since
@@ -333,27 +345,35 @@ OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
switch (AtomicKind) {
case OpenACCAtomicKind::None:
- case OpenACCAtomicKind::Write:
case OpenACCAtomicKind::Update:
case OpenACCAtomicKind::Capture:
- assert(false && "Only 'read' has been implemented here");
+ assert(false && "Only 'read'/'write' have been implemented here");
return {};
case OpenACCAtomicKind::Read: {
// Read only supports the format 'v = x'; where both sides are a scalar
// expression. This can come in 2 forms; BinaryOperator or
// CXXOperatorCallExpr (rarely).
- const Expr *AssignExpr = cast<const Expr>(getAssociatedStmt());
- if (const auto *BO = dyn_cast<BinaryOperator>(AssignExpr)) {
- assert(BO->getOpcode() == BO_Assign);
- return {BO->getLHS()->IgnoreImpCasts(), BO->getRHS()->IgnoreImpCasts()};
- }
-
- const auto *OO = cast<CXXOperatorCallExpr>(AssignExpr);
- assert(OO->getOperator() == OO_Equal);
-
- return {OO->getArg(0)->IgnoreImpCasts(), OO->getArg(1)->IgnoreImpCasts()};
+ std::pair<const Expr *, const Expr *> BinaryArgs =
+ getBinaryOpArgs(cast<const Expr>(getAssociatedStmt()));
+ // We want the L-value for each side, so we ignore implicit casts.
+ return {BinaryArgs.first->IgnoreImpCasts(),
+ BinaryArgs.second->IgnoreImpCasts(), /*expr=*/nullptr};
}
+ case OpenACCAtomicKind::Write: {
+ // Write supports only the format 'x = expr', where the expression is scalar
+ // type, and 'x' is a scalar l value. As above, this can come in 2 forms;
+ // Binary Operator or CXXOperatorCallExpr.
+ std::pair<const Expr *, const Expr *> BinaryArgs =
+ getBinaryOpArgs(cast<const Expr>(getAssociatedStmt()));
+ // We want the L-value for ONLY the X side, so we ignore implicit casts. For
+ // the right side (the expr), we emit it as an r-value so we need to
+ // maintain implicit casts.
+ return {/*v=*/nullptr, BinaryArgs.first->IgnoreImpCasts(),
+ BinaryArgs.second};
}
+ }
+
+ llvm_unreachable("unknown OpenACC atomic kind");
}
OpenACCCacheConstruct *OpenACCCacheConstruct::CreateEmpty(const ASTContext &C,