diff options
Diffstat (limited to 'clang/lib/AST/ExprConstant.cpp')
| -rw-r--r-- | clang/lib/AST/ExprConstant.cpp | 180 |
1 files changed, 179 insertions, 1 deletions
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 00aaaab..29ee089 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -44,6 +44,7 @@ #include "clang/AST/CharUnits.h" #include "clang/AST/CurrentSourceLocExprScope.h" #include "clang/AST/Expr.h" +#include "clang/AST/InferAlloc.h" #include "clang/AST/OSLog.h" #include "clang/AST/OptionalDiagnostic.h" #include "clang/AST/RecordLayout.h" @@ -11618,6 +11619,39 @@ static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result, return true; } +static bool evalShuffleGeneric( + EvalInfo &Info, const CallExpr *Call, APValue &Out, + llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)> + GetSourceIndex) { + + const auto *VT = Call->getType()->getAs<VectorType>(); + if (!VT) + return false; + + APSInt MaskImm; + if (!EvaluateInteger(Call->getArg(2), MaskImm, Info)) + return false; + unsigned ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue()); + + APValue A, B; + if (!EvaluateAsRValue(Info, Call->getArg(0), A) || + !EvaluateAsRValue(Info, Call->getArg(1), B)) + return false; + + unsigned NumElts = VT->getNumElements(); + SmallVector<APValue, 16> ResultElements; + ResultElements.reserve(NumElts); + + for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) { + auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask); + const APValue &Src = (SrcVecIdx == 0) ? A : B; + ResultElements.push_back(Src.getVectorElt(SrcIdx)); + } + + Out = APValue(ResultElements.data(), ResultElements.size()); + return true; +} + static bool evalPshufbBuiltin(EvalInfo &Info, const CallExpr *Call, APValue &Out) { APValue SrcVec, ControlVec; @@ -12312,6 +12346,20 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(ResultElements.data(), ResultElements.size()), E); } + case X86::BI__builtin_ia32_psignb128: + case X86::BI__builtin_ia32_psignb256: + case X86::BI__builtin_ia32_psignw128: + case X86::BI__builtin_ia32_psignw256: + case X86::BI__builtin_ia32_psignd128: + case X86::BI__builtin_ia32_psignd256: + return EvaluateBinOpExpr([](const APInt &AElem, const APInt &BElem) { + if (BElem.isZero()) + return APInt::getZero(AElem.getBitWidth()); + if (BElem.isNegative()) + return -AElem; + return AElem; + }); + case X86::BI__builtin_ia32_blendvpd: case X86::BI__builtin_ia32_blendvpd256: case X86::BI__builtin_ia32_blendvps: @@ -12383,7 +12431,56 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(ResultElements.data(), ResultElements.size()), E); } - + case X86::BI__builtin_ia32_shufps: + case X86::BI__builtin_ia32_shufps256: + case X86::BI__builtin_ia32_shufps512: { + APValue R; + if (!evalShuffleGeneric( + Info, E, R, + [](unsigned DstIdx, + unsigned ShuffleMask) -> std::pair<unsigned, unsigned> { + constexpr unsigned LaneBits = 128u; + unsigned NumElemPerLane = LaneBits / 32; + unsigned NumSelectableElems = NumElemPerLane / 2; + unsigned BitsPerElem = 2; + unsigned IndexMask = (1u << BitsPerElem) - 1; + unsigned MaskBits = 8; + unsigned Lane = DstIdx / NumElemPerLane; + unsigned ElemInLane = DstIdx % NumElemPerLane; + unsigned LaneOffset = Lane * NumElemPerLane; + unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits; + unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1; + unsigned Index = (ShuffleMask >> BitIndex) & IndexMask; + return {SrcIdx, LaneOffset + Index}; + })) + return false; + return Success(R, E); + } + case X86::BI__builtin_ia32_shufpd: + case X86::BI__builtin_ia32_shufpd256: + case X86::BI__builtin_ia32_shufpd512: { + APValue R; + if (!evalShuffleGeneric( + Info, E, R, + [](unsigned DstIdx, + unsigned ShuffleMask) -> std::pair<unsigned, unsigned> { + constexpr unsigned LaneBits = 128u; + unsigned NumElemPerLane = LaneBits / 64; + unsigned NumSelectableElems = NumElemPerLane / 2; + unsigned BitsPerElem = 1; + unsigned IndexMask = (1u << BitsPerElem) - 1; + unsigned MaskBits = 8; + unsigned Lane = DstIdx / NumElemPerLane; + unsigned ElemInLane = DstIdx % NumElemPerLane; + unsigned LaneOffset = Lane * NumElemPerLane; + unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits; + unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1; + unsigned Index = (ShuffleMask >> BitIndex) & IndexMask; + return {SrcIdx, LaneOffset + Index}; + })) + return false; + return Success(R, E); + } case X86::BI__builtin_ia32_pshufb128: case X86::BI__builtin_ia32_pshufb256: case X86::BI__builtin_ia32_pshufb512: { @@ -12891,6 +12988,66 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(Elems.data(), NumElems), E); } + + case X86::BI__builtin_ia32_pslldqi128_byteshift: + case X86::BI__builtin_ia32_pslldqi256_byteshift: + case X86::BI__builtin_ia32_pslldqi512_byteshift: { + assert(E->getNumArgs() == 2); + + APValue Src; + APSInt Imm; + if (!EvaluateAsRValue(Info, E->getArg(0), Src) || + !EvaluateInteger(E->getArg(1), Imm, Info)) + return false; + + unsigned VecLen = Src.getVectorLength(); + unsigned Shift = Imm.getZExtValue() & 0xff; + + SmallVector<APValue> ResultElements; + for (unsigned Lane = 0; Lane != VecLen; Lane += 16) { + for (unsigned I = 0; I != 16; ++I) { + if (I < Shift) { + APSInt Zero(8, /*isUnsigned=*/true); + Zero = 0; + ResultElements.push_back(APValue(Zero)); + } else { + ResultElements.push_back(Src.getVectorElt(Lane + I - Shift)); + } + } + } + + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } + + case X86::BI__builtin_ia32_psrldqi128_byteshift: + case X86::BI__builtin_ia32_psrldqi256_byteshift: + case X86::BI__builtin_ia32_psrldqi512_byteshift: { + assert(E->getNumArgs() == 2); + + APValue Src; + APSInt Imm; + if (!EvaluateAsRValue(Info, E->getArg(0), Src) || + !EvaluateInteger(E->getArg(1), Imm, Info)) + return false; + + unsigned VecLen = Src.getVectorLength(); + unsigned Shift = Imm.getZExtValue() & 0xff; + + SmallVector<APValue> ResultElements; + for (unsigned Lane = 0; Lane != VecLen; Lane += 16) { + for (unsigned I = 0; I != 16; ++I) { + if (I + Shift < 16) { + ResultElements.push_back(Src.getVectorElt(Lane + I + Shift)); + } else { + APSInt Zero(8, /*isUnsigned=*/true); + Zero = 0; + ResultElements.push_back(APValue(Zero)); + } + } + } + + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } } } @@ -14649,6 +14806,27 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, return Success(Result, E); } + case Builtin::BI__builtin_infer_alloc_token: { + // If we fail to infer a type, this fails to be a constant expression; this + // can be checked with __builtin_constant_p(...). + QualType AllocType = infer_alloc::inferPossibleType(E, Info.Ctx, nullptr); + if (AllocType.isNull()) + return Error( + E, diag::note_constexpr_infer_alloc_token_type_inference_failed); + auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, Info.Ctx); + if (!ATMD) + return Error(E, diag::note_constexpr_infer_alloc_token_no_metadata); + auto Mode = + Info.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode); + uint64_t BitWidth = Info.Ctx.getTypeSize(Info.Ctx.getSizeType()); + uint64_t MaxTokens = + Info.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth)); + auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens); + if (!MaybeToken) + return Error(E, diag::note_constexpr_infer_alloc_token_stateful_mode); + return Success(llvm::APInt(BitWidth, *MaybeToken), E); + } + case Builtin::BI__builtin_ffs: case Builtin::BI__builtin_ffsl: case Builtin::BI__builtin_ffsll: { |
