aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/AST
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/AST')
-rw-r--r--clang/lib/AST/ASTContext.cpp139
-rw-r--r--clang/lib/AST/ASTImporter.cpp81
-rw-r--r--clang/lib/AST/ASTStructuralEquivalence.cpp144
-rw-r--r--clang/lib/AST/AttrImpl.cpp81
-rw-r--r--clang/lib/AST/ByteCode/BitcastBuffer.h13
-rw-r--r--clang/lib/AST/ByteCode/ByteCodeEmitter.cpp22
-rw-r--r--clang/lib/AST/ByteCode/ByteCodeEmitter.h2
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp762
-rw-r--r--clang/lib/AST/ByteCode/Compiler.h119
-rw-r--r--clang/lib/AST/ByteCode/Context.cpp112
-rw-r--r--clang/lib/AST/ByteCode/Context.h21
-rw-r--r--clang/lib/AST/ByteCode/Descriptor.cpp40
-rw-r--r--clang/lib/AST/ByteCode/Descriptor.h42
-rw-r--r--clang/lib/AST/ByteCode/Disasm.cpp51
-rw-r--r--clang/lib/AST/ByteCode/EvalEmitter.cpp50
-rw-r--r--clang/lib/AST/ByteCode/Floating.h3
-rw-r--r--clang/lib/AST/ByteCode/Function.cpp14
-rw-r--r--clang/lib/AST/ByteCode/Function.h55
-rw-r--r--clang/lib/AST/ByteCode/InitMap.cpp54
-rw-r--r--clang/lib/AST/ByteCode/InitMap.h123
-rw-r--r--clang/lib/AST/ByteCode/Integral.h28
-rw-r--r--clang/lib/AST/ByteCode/IntegralAP.h2
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp486
-rw-r--r--clang/lib/AST/ByteCode/Interp.h510
-rw-r--r--clang/lib/AST/ByteCode/InterpBlock.cpp22
-rw-r--r--clang/lib/AST/ByteCode/InterpBlock.h10
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp2276
-rw-r--r--clang/lib/AST/ByteCode/InterpFrame.cpp44
-rw-r--r--clang/lib/AST/ByteCode/InterpFrame.h9
-rw-r--r--clang/lib/AST/ByteCode/InterpHelpers.h144
-rw-r--r--clang/lib/AST/ByteCode/InterpState.cpp30
-rw-r--r--clang/lib/AST/ByteCode/InterpState.h58
-rw-r--r--clang/lib/AST/ByteCode/MemberPointer.cpp11
-rw-r--r--clang/lib/AST/ByteCode/Opcodes.td66
-rw-r--r--clang/lib/AST/ByteCode/Pointer.cpp178
-rw-r--r--clang/lib/AST/ByteCode/Pointer.h99
-rw-r--r--clang/lib/AST/ByteCode/PrimType.h4
-rw-r--r--clang/lib/AST/ByteCode/Program.cpp97
-rw-r--r--clang/lib/AST/ByteCode/Program.h3
-rw-r--r--clang/lib/AST/ByteCode/Record.h16
-rw-r--r--clang/lib/AST/ByteCode/Source.h1
-rw-r--r--clang/lib/AST/ByteCode/State.cpp87
-rw-r--r--clang/lib/AST/ByteCode/State.h74
-rw-r--r--clang/lib/AST/CMakeLists.txt2
-rw-r--r--clang/lib/AST/CXXInheritance.cpp4
-rw-r--r--clang/lib/AST/Comment.cpp8
-rw-r--r--clang/lib/AST/CommentSema.cpp6
-rw-r--r--clang/lib/AST/ComparisonCategories.cpp12
-rw-r--r--clang/lib/AST/ComputeDependence.cpp10
-rw-r--r--clang/lib/AST/Decl.cpp115
-rw-r--r--clang/lib/AST/DeclBase.cpp2
-rw-r--r--clang/lib/AST/DeclCXX.cpp73
-rw-r--r--clang/lib/AST/DeclPrinter.cpp120
-rw-r--r--clang/lib/AST/DeclTemplate.cpp6
-rw-r--r--clang/lib/AST/Expr.cpp105
-rw-r--r--clang/lib/AST/ExprCXX.cpp18
-rw-r--r--clang/lib/AST/ExprClassification.cpp19
-rw-r--r--clang/lib/AST/ExprConcepts.cpp9
-rw-r--r--clang/lib/AST/ExprConstShared.h10
-rw-r--r--clang/lib/AST/ExprConstant.cpp2782
-rw-r--r--clang/lib/AST/ExprObjC.cpp3
-rw-r--r--clang/lib/AST/FormatString.cpp2
-rw-r--r--clang/lib/AST/InferAlloc.cpp201
-rw-r--r--clang/lib/AST/ItaniumMangle.cpp23
-rw-r--r--clang/lib/AST/JSONNodeDumper.cpp46
-rw-r--r--clang/lib/AST/MicrosoftMangle.cpp22
-rw-r--r--clang/lib/AST/OpenMPClause.cpp93
-rw-r--r--clang/lib/AST/RecordLayoutBuilder.cpp21
-rw-r--r--clang/lib/AST/Stmt.cpp65
-rw-r--r--clang/lib/AST/StmtOpenACC.cpp252
-rw-r--r--clang/lib/AST/StmtPrinter.cpp28
-rw-r--r--clang/lib/AST/StmtProfile.cpp31
-rw-r--r--clang/lib/AST/TemplateBase.cpp15
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp9
-rw-r--r--clang/lib/AST/Type.cpp4
-rw-r--r--clang/lib/AST/TypePrinter.cpp139
-rw-r--r--clang/lib/AST/VTableBuilder.cpp43
77 files changed, 8452 insertions, 2029 deletions
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 32c8f62..3f63420 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -1648,6 +1648,9 @@ ASTContext::findPointerAuthContent(QualType T) const {
if (!RD)
return PointerAuthContent::None;
+ if (RD->isInvalidDecl())
+ return PointerAuthContent::None;
+
if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(RD);
Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
return Existing->second;
@@ -3105,9 +3108,9 @@ TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
SourceLocation L) const {
- TypeSourceInfo *DI = CreateTypeSourceInfo(T);
- DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
- return DI;
+ TypeSourceInfo *TSI = CreateTypeSourceInfo(T);
+ TSI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
+ return TSI;
}
const ASTRecordLayout &
@@ -3517,7 +3520,6 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
assert(!T->isDependentType() &&
"cannot compute type discriminator of a dependent type");
-
SmallString<256> Str;
llvm::raw_svector_ostream Out(Str);
@@ -4710,7 +4712,7 @@ QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
Type::ConstantMatrix);
- assert(MatrixType::isValidElementType(ElementTy) &&
+ assert(MatrixType::isValidElementType(ElementTy, getLangOpts()) &&
"need a valid element type");
assert(NumRows > 0 && NumRows <= LangOpts.MaxMatrixDimension &&
NumColumns > 0 && NumColumns <= LangOpts.MaxMatrixDimension &&
@@ -5889,11 +5891,11 @@ TypeSourceInfo *ASTContext::getTemplateSpecializationTypeInfo(
QualType TST = getTemplateSpecializationType(
Keyword, Name, SpecifiedArgs.arguments(), CanonicalArgs, Underlying);
- TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
- DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>().set(
+ TypeSourceInfo *TSI = CreateTypeSourceInfo(TST);
+ TSI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>().set(
ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
SpecifiedArgs);
- return DI;
+ return TSI;
}
QualType ASTContext::getTemplateSpecializationType(
@@ -10525,6 +10527,21 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
Second->getVectorKind() != VectorKind::RVVFixedLengthMask_4)
return true;
+ // In OpenCL, treat half and _Float16 vector types as compatible.
+ if (getLangOpts().OpenCL &&
+ First->getNumElements() == Second->getNumElements()) {
+ QualType FirstElt = First->getElementType();
+ QualType SecondElt = Second->getElementType();
+
+ if ((FirstElt->isFloat16Type() && SecondElt->isHalfType()) ||
+ (FirstElt->isHalfType() && SecondElt->isFloat16Type())) {
+ if (First->getVectorKind() != VectorKind::AltiVecPixel &&
+ First->getVectorKind() != VectorKind::AltiVecBool &&
+ Second->getVectorKind() != VectorKind::AltiVecPixel &&
+ Second->getVectorKind() != VectorKind::AltiVecBool)
+ return true;
+ }
+ }
return false;
}
@@ -12038,7 +12055,7 @@ bool ASTContext::mergeExtParameterInfo(
void ASTContext::ResetObjCLayout(const ObjCInterfaceDecl *D) {
if (auto It = ObjCLayouts.find(D); It != ObjCLayouts.end()) {
It->second = nullptr;
- for (auto *SubClass : ObjCSubClasses[D])
+ for (auto *SubClass : ObjCSubClasses.lookup(D))
ResetObjCLayout(SubClass);
}
}
@@ -12400,7 +12417,8 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
// Read the base type.
switch (*Str++) {
- default: llvm_unreachable("Unknown builtin type letter!");
+ default:
+ llvm_unreachable("Unknown builtin type letter!");
case 'x':
assert(HowLong == 0 && !Signed && !Unsigned &&
"Bad modifiers used with 'x'!");
@@ -12533,6 +12551,10 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
Type = Context.AMDGPUTextureTy;
break;
}
+ case 'r': {
+ Type = Context.HLSLResourceTy;
+ break;
+ }
default:
llvm_unreachable("Unexpected target builtin type");
}
@@ -13202,6 +13224,18 @@ MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
llvm_unreachable("Unsupported ABI");
}
+MangleContext *ASTContext::cudaNVInitDeviceMC() {
+ // If the host and device have different C++ ABIs, mark it as the device
+ // mangle context so that the mangling needs to retrieve the additional
+ // device lambda mangling number instead of the regular host one.
+ if (getAuxTargetInfo() && getTargetInfo().getCXXABI().isMicrosoft() &&
+ getAuxTargetInfo()->getCXXABI().isItaniumFamily()) {
+ return createDeviceMangleContext(*getAuxTargetInfo());
+ }
+
+ return createMangleContext(getAuxTargetInfo());
+}
+
CXXABI::~CXXABI() = default;
size_t ASTContext::getSideTableAllocatedMemory() const {
@@ -13326,6 +13360,91 @@ bool ASTContext::isTypeAwareOperatorNewOrDelete(const FunctionDecl *FD) const {
return TypeAwareOperatorNewAndDeletes.contains(FD->getCanonicalDecl());
}
+void ASTContext::addOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
+ FunctionDecl *OperatorDelete,
+ OperatorDeleteKind K) const {
+ switch (K) {
+ case OperatorDeleteKind::Regular:
+ OperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] = OperatorDelete;
+ break;
+ case OperatorDeleteKind::GlobalRegular:
+ GlobalOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
+ OperatorDelete;
+ break;
+ case OperatorDeleteKind::Array:
+ ArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
+ OperatorDelete;
+ break;
+ case OperatorDeleteKind::ArrayGlobal:
+ GlobalArrayOperatorDeletesForVirtualDtor[Dtor->getCanonicalDecl()] =
+ OperatorDelete;
+ break;
+ }
+}
+
+bool ASTContext::dtorHasOperatorDelete(const CXXDestructorDecl *Dtor,
+ OperatorDeleteKind K) const {
+ switch (K) {
+ case OperatorDeleteKind::Regular:
+ return OperatorDeletesForVirtualDtor.contains(Dtor->getCanonicalDecl());
+ case OperatorDeleteKind::GlobalRegular:
+ return GlobalOperatorDeletesForVirtualDtor.contains(
+ Dtor->getCanonicalDecl());
+ case OperatorDeleteKind::Array:
+ return ArrayOperatorDeletesForVirtualDtor.contains(
+ Dtor->getCanonicalDecl());
+ case OperatorDeleteKind::ArrayGlobal:
+ return GlobalArrayOperatorDeletesForVirtualDtor.contains(
+ Dtor->getCanonicalDecl());
+ }
+ return false;
+}
+
+FunctionDecl *
+ASTContext::getOperatorDeleteForVDtor(const CXXDestructorDecl *Dtor,
+ OperatorDeleteKind K) const {
+ const CXXDestructorDecl *Canon = Dtor->getCanonicalDecl();
+ switch (K) {
+ case OperatorDeleteKind::Regular:
+ if (OperatorDeletesForVirtualDtor.contains(Canon))
+ return OperatorDeletesForVirtualDtor[Canon];
+ return nullptr;
+ case OperatorDeleteKind::GlobalRegular:
+ if (GlobalOperatorDeletesForVirtualDtor.contains(Canon))
+ return GlobalOperatorDeletesForVirtualDtor[Canon];
+ return nullptr;
+ case OperatorDeleteKind::Array:
+ if (ArrayOperatorDeletesForVirtualDtor.contains(Canon))
+ return ArrayOperatorDeletesForVirtualDtor[Canon];
+ return nullptr;
+ case OperatorDeleteKind::ArrayGlobal:
+ if (GlobalArrayOperatorDeletesForVirtualDtor.contains(Canon))
+ return GlobalArrayOperatorDeletesForVirtualDtor[Canon];
+ return nullptr;
+ }
+ return nullptr;
+}
+
+bool ASTContext::classNeedsVectorDeletingDestructor(const CXXRecordDecl *RD) {
+ if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
+ return false;
+ CXXDestructorDecl *Dtor = RD->getDestructor();
+ // The compiler can't know if new[]/delete[] will be used outside of the DLL,
+ // so just force vector deleting destructor emission if dllexport is present.
+ // This matches MSVC behavior.
+ if (Dtor && Dtor->isVirtual() && Dtor->hasAttr<DLLExportAttr>())
+ return true;
+
+ return RequireVectorDeletingDtor.count(RD);
+}
+
+void ASTContext::setClassNeedsVectorDeletingDestructor(
+ const CXXRecordDecl *RD) {
+ if (!getTargetInfo().emitVectorDeletingDtors(getLangOpts()))
+ return;
+ RequireVectorDeletingDtor.insert(RD);
+}
+
MangleNumberingContext &
ASTContext::getManglingNumberContext(const DeclContext *DC) {
assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index bf51c3e..101ab2c 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -696,6 +696,10 @@ namespace clang {
ExpectedStmt VisitCXXFoldExpr(CXXFoldExpr *E);
ExpectedStmt VisitRequiresExpr(RequiresExpr* E);
ExpectedStmt VisitConceptSpecializationExpr(ConceptSpecializationExpr* E);
+ ExpectedStmt
+ VisitSubstNonTypeTemplateParmPackExpr(SubstNonTypeTemplateParmPackExpr *E);
+ ExpectedStmt VisitPseudoObjectExpr(PseudoObjectExpr *E);
+ ExpectedStmt VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
// Helper for chaining together multiple imports. If an error is detected,
// subsequent imports will return default constructed nodes, so that failure
@@ -1287,6 +1291,26 @@ bool ASTNodeImporter::hasSameVisibilityContextAndLinkage(TypedefNameDecl *Found,
using namespace clang;
+auto ASTImporter::FunctionDeclImportCycleDetector::makeScopedCycleDetection(
+ const FunctionDecl *D) {
+ const FunctionDecl *LambdaD = nullptr;
+ if (!isCycle(D) && D) {
+ FunctionDeclsWithImportInProgress.insert(D);
+ LambdaD = D;
+ }
+ return llvm::scope_exit([this, LambdaD]() {
+ if (LambdaD) {
+ FunctionDeclsWithImportInProgress.erase(LambdaD);
+ }
+ });
+}
+
+bool ASTImporter::FunctionDeclImportCycleDetector::isCycle(
+ const FunctionDecl *D) const {
+ return FunctionDeclsWithImportInProgress.find(D) !=
+ FunctionDeclsWithImportInProgress.end();
+}
+
ExpectedType ASTNodeImporter::VisitType(const Type *T) {
Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
<< T->getTypeClassName();
@@ -2523,8 +2547,7 @@ Error ASTNodeImporter::ImportDefinition(
// Complete the definition even if error is returned.
// The RecordDecl may be already part of the AST so it is better to
// have it in complete state even if something is wrong with it.
- auto DefinitionCompleterScopeExit =
- llvm::make_scope_exit(DefinitionCompleter);
+ llvm::scope_exit DefinitionCompleterScopeExit(DefinitionCompleter);
if (Error Err = setTypedefNameForAnonDecl(From, To, Importer))
return Err;
@@ -4034,7 +4057,10 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// E.g.: auto foo() { struct X{}; return X(); }
// To avoid an infinite recursion when importing, create the FunctionDecl
// with a simplified return type.
- if (hasReturnTypeDeclaredInside(D)) {
+ // Reuse this approach for auto return types declared as typenames from
+ // template params, tracked in FindFunctionDeclImportCycle.
+ if (hasReturnTypeDeclaredInside(D) ||
+ Importer.FindFunctionDeclImportCycle.isCycle(D)) {
FromReturnTy = Importer.getFromContext().VoidTy;
UsedDifferentProtoType = true;
}
@@ -4057,6 +4083,8 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
Error Err = Error::success();
+ auto ScopedReturnTypeDeclCycleDetector =
+ Importer.FindFunctionDeclImportCycle.makeScopedCycleDetection(D);
auto T = importChecked(Err, FromTy);
auto TInfo = importChecked(Err, FromTSI);
auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
@@ -9273,6 +9301,50 @@ ASTNodeImporter::VisitConceptSpecializationExpr(ConceptSpecializationExpr *E) {
const_cast<ImplicitConceptSpecializationDecl *>(CSD), &Satisfaction);
}
+ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToPackLoc = importChecked(Err, E->getParameterPackLocation());
+ auto ToArgPack = importChecked(Err, E->getArgumentPack());
+ auto ToAssociatedDecl = importChecked(Err, E->getAssociatedDecl());
+ if (Err)
+ return std::move(Err);
+
+ return new (Importer.getToContext()) SubstNonTypeTemplateParmPackExpr(
+ ToType, E->getValueKind(), ToPackLoc, ToArgPack, ToAssociatedDecl,
+ E->getIndex(), E->getFinal());
+}
+
+ExpectedStmt ASTNodeImporter::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ SmallVector<Expr *, 4> ToSemantics(E->getNumSemanticExprs());
+ if (Error Err = ImportContainerChecked(E->semantics(), ToSemantics))
+ return std::move(Err);
+ auto ToSyntOrErr = import(E->getSyntacticForm());
+ if (!ToSyntOrErr)
+ return ToSyntOrErr.takeError();
+ return PseudoObjectExpr::Create(Importer.getToContext(), *ToSyntOrErr,
+ ToSemantics, E->getResultExprIndex());
+}
+
+ExpectedStmt
+ASTNodeImporter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToInitLoc = importChecked(Err, E->getInitLoc());
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ if (Err)
+ return std::move(Err);
+
+ SmallVector<Expr *, 4> ToArgs(E->getInitExprs().size());
+ if (Error Err = ImportContainerChecked(E->getInitExprs(), ToArgs))
+ return std::move(Err);
+ return CXXParenListInitExpr::Create(Importer.getToContext(), ToArgs, ToType,
+ E->getUserSpecifiedInitExprs().size(),
+ ToInitLoc, ToBeginLoc, ToEndLoc);
+}
+
Error ASTNodeImporter::ImportOverriddenMethods(CXXMethodDecl *ToMethod,
CXXMethodDecl *FromMethod) {
Error ImportErrors = Error::success();
@@ -9726,8 +9798,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Push FromD to the stack, and remove that when we return.
ImportPath.push(FromD);
- auto ImportPathBuilder =
- llvm::make_scope_exit([this]() { ImportPath.pop(); });
+ llvm::scope_exit ImportPathBuilder([this]() { ImportPath.pop(); });
// Check whether there was a previous failed import.
// If yes return the existing error.
diff --git a/clang/lib/AST/ASTStructuralEquivalence.cpp b/clang/lib/AST/ASTStructuralEquivalence.cpp
index da64c92..3d1ef2f 100644
--- a/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -88,6 +88,7 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
@@ -451,35 +452,73 @@ public:
};
} // namespace
+namespace {
+/// Represents the result of comparing the attribute sets on two decls. If the
+/// sets are incompatible, A1/A2 point to the offending attributes.
+struct AttrComparisonResult {
+ bool Kind = false;
+ const Attr *A1 = nullptr, *A2 = nullptr;
+};
+} // namespace
+
+namespace {
+using AttrSet = llvm::SmallVector<const Attr *, 2>;
+}
+
+/// Determines whether D1 and D2 have compatible sets of attributes for the
+/// purposes of structural equivalence checking.
+static AttrComparisonResult
+areDeclAttrsEquivalent(const Decl *D1, const Decl *D2,
+ StructuralEquivalenceContext &Context) {
+ // If either declaration is implicit (i.e., compiler-generated, like
+ // __NSConstantString_tags), treat the declarations' attributes as equivalent.
+ if (D1->isImplicit() || D2->isImplicit())
+ return {true};
+
+ AttrSet A1, A2;
+
+ // Ignore inherited attributes.
+ auto RemoveInherited = [](const Attr *A) { return !A->isInherited(); };
+
+ llvm::copy_if(D1->attrs(), std::back_inserter(A1), RemoveInherited);
+ llvm::copy_if(D2->attrs(), std::back_inserter(A2), RemoveInherited);
+
+ StructuralEquivalenceContext::AttrScopedAttrEquivalenceContext AttrCtx(
+ Context);
+ auto I1 = A1.begin(), E1 = A1.end(), I2 = A2.begin(), E2 = A2.end();
+ for (; I1 != E1 && I2 != E2; ++I1, ++I2) {
+ bool R = (*I1)->isEquivalent(**I2, Context);
+ if (R)
+ R = !Context.checkDeclQueue();
+ if (!R)
+ return {false, *I1, *I2};
+ }
+
+ if (I1 != E1)
+ return {false, *I1};
+ if (I2 != E2)
+ return {false, nullptr, *I2};
+
+ return {true};
+}
+
static bool
CheckStructurallyEquivalentAttributes(StructuralEquivalenceContext &Context,
const Decl *D1, const Decl *D2,
const Decl *PrimaryDecl = nullptr) {
- // If either declaration has an attribute on it, we treat the declarations
- // as not being structurally equivalent unless both declarations are implicit
- // (ones generated by the compiler like __NSConstantString_tag).
- //
- // FIXME: this should be handled on a case-by-case basis via tablegen in
- // Attr.td. There are multiple cases to consider: one declaration with the
- // attribute, another without it; different attribute syntax|spellings for
- // the same semantic attribute, differences in attribute arguments, order
- // in which attributes are applied, how to merge attributes if the types are
- // structurally equivalent, etc.
- const Attr *D1Attr = nullptr, *D2Attr = nullptr;
- if (D1->hasAttrs())
- D1Attr = *D1->getAttrs().begin();
- if (D2->hasAttrs())
- D2Attr = *D2->getAttrs().begin();
- if ((D1Attr || D2Attr) && !D1->isImplicit() && !D2->isImplicit()) {
- const auto *DiagnoseDecl = cast<TypeDecl>(PrimaryDecl ? PrimaryDecl : D2);
- Context.Diag2(DiagnoseDecl->getLocation(),
- diag::warn_odr_tag_type_with_attributes)
- << Context.ToCtx.getTypeDeclType(DiagnoseDecl)
- << (PrimaryDecl != nullptr);
- if (D1Attr)
- Context.Diag1(D1Attr->getLoc(), diag::note_odr_attr_here) << D1Attr;
- if (D2Attr)
- Context.Diag1(D2Attr->getLoc(), diag::note_odr_attr_here) << D2Attr;
+ if (Context.Complain) {
+ AttrComparisonResult R = areDeclAttrsEquivalent(D1, D2, Context);
+ if (!R.Kind) {
+ const auto *DiagnoseDecl = cast<TypeDecl>(PrimaryDecl ? PrimaryDecl : D2);
+ Context.Diag2(DiagnoseDecl->getLocation(),
+ diag::warn_odr_tag_type_with_attributes)
+ << Context.ToCtx.getTypeDeclType(DiagnoseDecl)
+ << (PrimaryDecl != nullptr);
+ if (R.A1)
+ Context.Diag1(R.A1->getLoc(), diag::note_odr_attr_here) << R.A1;
+ if (R.A2)
+ Context.Diag2(R.A2->getLoc(), diag::note_odr_attr_here) << R.A2;
+ }
}
// The above diagnostic is a warning which defaults to an error. If treated
@@ -523,8 +562,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
/// Determine structural equivalence of two statements.
-static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- const Stmt *S1, const Stmt *S2) {
+bool ASTStructuralEquivalence::isEquivalent(
+ StructuralEquivalenceContext &Context, const Stmt *S1, const Stmt *S2) {
if (!S1 || !S2)
return S1 == S2;
@@ -568,15 +607,25 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const Stmt *S1, const Stmt *S2) {
+ return ASTStructuralEquivalence::isEquivalent(Context, S1, S2);
+}
+
/// Determine whether two identifiers are equivalent.
-static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
- const IdentifierInfo *Name2) {
+bool ASTStructuralEquivalence::isEquivalent(const IdentifierInfo *Name1,
+ const IdentifierInfo *Name2) {
if (!Name1 || !Name2)
return Name1 == Name2;
return Name1->getName() == Name2->getName();
}
+static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
+ const IdentifierInfo *Name2) {
+ return ASTStructuralEquivalence::isEquivalent(Name1, Name2);
+}
+
/// Determine whether two nested-name-specifiers are equivalent.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
NestedNameSpecifier NNS1,
@@ -836,8 +885,8 @@ static bool IsEquivalentExceptionSpec(StructuralEquivalenceContext &Context,
}
/// Determine structural equivalence of two types.
-static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- QualType T1, QualType T2) {
+bool ASTStructuralEquivalence::isEquivalent(
+ StructuralEquivalenceContext &Context, QualType T1, QualType T2) {
if (T1.isNull() || T2.isNull())
return T1.isNull() && T2.isNull();
@@ -1493,6 +1542,11 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ QualType T1, QualType T2) {
+ return ASTStructuralEquivalence::isEquivalent(Context, T1, T2);
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
VarDecl *D1, VarDecl *D2) {
IdentifierInfo *Name1 = D1->getIdentifier();
IdentifierInfo *Name2 = D2->getIdentifier();
@@ -1612,6 +1666,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.ToCtx.getCanonicalTagType(Owner2));
}
+/// Determine structural equivalence of two IndirectFields.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ IndirectFieldDecl *ID1,
+ IndirectFieldDecl *ID2) {
+ return IsStructurallyEquivalent(Context, ID1->getAnonField(),
+ ID2->getAnonField());
+}
+
/// Determine structural equivalence of two methods.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
CXXMethodDecl *Method1,
@@ -1791,12 +1853,6 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
}
- // In C23 mode, check for structural equivalence of attributes on the record
- // itself. FIXME: Should this happen in C++ as well?
- if (Context.LangOpts.C23 &&
- !CheckStructurallyEquivalentAttributes(Context, D1, D2))
- return false;
-
// If the records occur in different context (namespace), these should be
// different. This is specially important if the definition of one or both
// records is missing. In C23, different contexts do not make for a different
@@ -1838,6 +1894,12 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!D1 || !D2)
return !Context.LangOpts.C23;
+ // In C23 mode, check for structural equivalence of attributes on the record
+ // itself. FIXME: Should this happen in C++ as well?
+ if (Context.LangOpts.C23 &&
+ !CheckStructurallyEquivalentAttributes(Context, D1, D2))
+ return false;
+
// If any of the records has external storage and we do a minimal check (or
// AST import) we assume they are equivalent. (If we didn't have this
// assumption then `RecordDecl::LoadFieldsFromExternalStorage` could trigger
@@ -2522,6 +2584,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
D1 = D1->getCanonicalDecl();
D2 = D2->getCanonicalDecl();
+
+ if (D1 == D2)
+ return true;
+
std::pair<Decl *, Decl *> P{D1, D2};
// Check whether we already know that these two declarations are not
@@ -2716,7 +2782,7 @@ bool StructuralEquivalenceContext::CheckKindSpecificEquivalence(
return true;
}
-bool StructuralEquivalenceContext::Finish() {
+bool StructuralEquivalenceContext::checkDeclQueue() {
while (!DeclsToCheck.empty()) {
// Check the next declaration.
std::pair<Decl *, Decl *> P = DeclsToCheck.front();
@@ -2740,3 +2806,5 @@ bool StructuralEquivalenceContext::Finish() {
return false;
}
+
+bool StructuralEquivalenceContext::Finish() { return checkDeclQueue(); }
diff --git a/clang/lib/AST/AttrImpl.cpp b/clang/lib/AST/AttrImpl.cpp
index 5875a92..0c7e81f 100644
--- a/clang/lib/AST/AttrImpl.cpp
+++ b/clang/lib/AST/AttrImpl.cpp
@@ -11,10 +11,12 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include <optional>
+#include <type_traits>
using namespace clang;
void LoopHintAttr::printPrettyPragma(raw_ostream &OS,
@@ -280,4 +282,83 @@ StringLiteral *FormatMatchesAttr::getFormatString() const {
return cast<StringLiteral>(getExpectedFormat());
}
+namespace {
+// Arguments whose types fail this test never compare equal unless there's a
+// specialization of equalAttrArgs for the type. Specilization for the following
+// arguments haven't been implemented yet:
+// - DeclArgument
+// - OMPTraitInfoArgument
+// - VariadicOMPInteropInfoArgument
+#define USE_DEFAULT_EQUALITY \
+ (std::is_same_v<T, StringRef> || std::is_same_v<T, VersionTuple> || \
+ std::is_same_v<T, IdentifierInfo *> || std::is_same_v<T, ParamIdx> || \
+ std::is_same_v<T, Attr *> || std::is_same_v<T, char *> || \
+ std::is_enum_v<T> || std::is_integral_v<T>)
+
+template <class T>
+typename std::enable_if_t<!USE_DEFAULT_EQUALITY, bool>
+equalAttrArgs(T A, T B, StructuralEquivalenceContext &Context) {
+ return false;
+}
+
+template <class T>
+typename std::enable_if_t<USE_DEFAULT_EQUALITY, bool>
+equalAttrArgs(T A1, T A2, StructuralEquivalenceContext &Context) {
+ return A1 == A2;
+}
+
+template <class T>
+bool equalAttrArgs(T *A1_B, T *A1_E, T *A2_B, T *A2_E,
+ StructuralEquivalenceContext &Context) {
+ if (A1_E - A1_B != A2_E - A2_B)
+ return false;
+
+ for (; A1_B != A1_E; ++A1_B, ++A2_B)
+ if (!equalAttrArgs(*A1_B, *A2_B, Context))
+ return false;
+
+ return true;
+}
+
+template <>
+bool equalAttrArgs<Attr *>(Attr *A1, Attr *A2,
+ StructuralEquivalenceContext &Context) {
+ return A1->isEquivalent(*A2, Context);
+}
+
+template <>
+bool equalAttrArgs<Expr *>(Expr *A1, Expr *A2,
+ StructuralEquivalenceContext &Context) {
+ return ASTStructuralEquivalence::isEquivalent(Context, A1, A2);
+}
+
+template <>
+bool equalAttrArgs<QualType>(QualType T1, QualType T2,
+ StructuralEquivalenceContext &Context) {
+ return ASTStructuralEquivalence::isEquivalent(Context, T1, T2);
+}
+
+template <>
+bool equalAttrArgs<const IdentifierInfo *>(
+ const IdentifierInfo *Name1, const IdentifierInfo *Name2,
+ StructuralEquivalenceContext &Context) {
+ return ASTStructuralEquivalence::isEquivalent(Name1, Name2);
+}
+
+bool areAlignedAttrsEqual(const AlignedAttr &A1, const AlignedAttr &A2,
+ StructuralEquivalenceContext &Context) {
+ if (A1.getSpelling() != A2.getSpelling())
+ return false;
+
+ if (A1.isAlignmentExpr() != A2.isAlignmentExpr())
+ return false;
+
+ if (A1.isAlignmentExpr())
+ return equalAttrArgs(A1.getAlignmentExpr(), A2.getAlignmentExpr(), Context);
+
+ return equalAttrArgs(A1.getAlignmentType()->getType(),
+ A2.getAlignmentType()->getType(), Context);
+}
+} // namespace
+
#include "clang/AST/AttrImpl.inc"
diff --git a/clang/lib/AST/ByteCode/BitcastBuffer.h b/clang/lib/AST/ByteCode/BitcastBuffer.h
index d1d6ee3..8d32351 100644
--- a/clang/lib/AST/ByteCode/BitcastBuffer.h
+++ b/clang/lib/AST/ByteCode/BitcastBuffer.h
@@ -89,6 +89,12 @@ struct BitcastBuffer {
Data = std::make_unique<std::byte[]>(ByteSize);
}
+ /// Returns the byte at the given offset.
+ std::byte *atByte(unsigned Offset) {
+ assert(Offset < FinalBitSize.roundToBytes());
+ return Data.get() + Offset;
+ }
+
/// Returns the buffer size in bits.
Bits size() const { return FinalBitSize; }
Bytes byteSize() const { return FinalBitSize.toBytes(); }
@@ -113,6 +119,13 @@ struct BitcastBuffer {
std::unique_ptr<std::byte[]> copyBits(Bits BitOffset, Bits BitWidth,
Bits FullBitWidth,
Endian TargetEndianness) const;
+
+ /// Dereferences the value at the given offset.
+ template <typename T> T deref(Bytes Offset) const {
+ assert(Offset.getQuantity() < FinalBitSize.roundToBytes());
+ assert((Offset.getQuantity() + sizeof(T)) <= FinalBitSize.roundToBytes());
+ return *reinterpret_cast<T *>(Data.get() + Offset.getQuantity());
+ }
};
} // namespace interp
diff --git a/clang/lib/AST/ByteCode/ByteCodeEmitter.cpp b/clang/lib/AST/ByteCode/ByteCodeEmitter.cpp
index ed74376..3582108 100644
--- a/clang/lib/AST/ByteCode/ByteCodeEmitter.cpp
+++ b/clang/lib/AST/ByteCode/ByteCodeEmitter.cpp
@@ -32,15 +32,15 @@ void ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl,
return;
// Set up lambda captures.
- if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl);
- MD && isLambdaCallOperator(MD)) {
+ if (Func->isLambdaCallOperator()) {
// Set up lambda capture to closure record field mapping.
- const Record *R = P.getOrCreateRecord(MD->getParent());
+ const CXXRecordDecl *ParentDecl = Func->getParentDecl();
+ const Record *R = P.getOrCreateRecord(ParentDecl);
assert(R);
llvm::DenseMap<const ValueDecl *, FieldDecl *> LC;
FieldDecl *LTC;
- MD->getParent()->getCaptureFields(LC, LTC);
+ ParentDecl->getCaptureFields(LC, LTC);
for (auto Cap : LC) {
unsigned Offset = R->getField(Cap.second)->Offset;
@@ -54,14 +54,18 @@ void ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl,
}
}
+ bool IsValid = !FuncDecl->isInvalidDecl();
// Register parameters with their offset.
unsigned ParamIndex = 0;
unsigned Drop = Func->hasRVO() +
(Func->hasThisPointer() && !Func->isThisPointerExplicit());
- for (auto ParamOffset : llvm::drop_begin(Func->ParamOffsets, Drop)) {
- const ParmVarDecl *PD = FuncDecl->parameters()[ParamIndex];
- OptPrimType T = Ctx.classify(PD->getType());
- this->Params.insert({PD, {ParamOffset, T != std::nullopt}});
+
+ for (const auto &ParamDesc : llvm::drop_begin(Func->ParamDescriptors, Drop)) {
+ const ParmVarDecl *PD = FuncDecl->getParamDecl(ParamIndex);
+ if (PD->isInvalidDecl())
+ IsValid = false;
+ this->Params.insert(
+ {PD, {ParamDesc.Offset, Ctx.canClassify(PD->getType())}});
++ParamIndex;
}
@@ -86,7 +90,7 @@ void ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl,
// Set the function's code.
Func->setCode(FuncDecl, NextLocalOffset, std::move(Code), std::move(SrcMap),
- std::move(Scopes), FuncDecl->hasBody());
+ std::move(Scopes), FuncDecl->hasBody(), IsValid);
Func->setIsFullyCompiled(true);
}
diff --git a/clang/lib/AST/ByteCode/ByteCodeEmitter.h b/clang/lib/AST/ByteCode/ByteCodeEmitter.h
index ca8dc38..dd18341 100644
--- a/clang/lib/AST/ByteCode/ByteCodeEmitter.h
+++ b/clang/lib/AST/ByteCode/ByteCodeEmitter.h
@@ -25,11 +25,11 @@ enum Opcode : uint32_t;
/// An emitter which links the program to bytecode for later use.
class ByteCodeEmitter {
protected:
- using LabelTy = uint32_t;
using AddrTy = uintptr_t;
using Local = Scope::Local;
public:
+ using LabelTy = uint32_t;
/// Compiles the function into the module.
void compileFunc(const FunctionDecl *FuncDecl, Function *Func = nullptr);
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index 74cae03..7aa7a8d 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -16,6 +16,7 @@
#include "PrimType.h"
#include "Program.h"
#include "clang/AST/Attr.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace clang::interp;
@@ -39,7 +40,7 @@ static std::optional<bool> getBoolValue(const Expr *E) {
template <class Emitter> class DeclScope final : public LocalScope<Emitter> {
public:
DeclScope(Compiler<Emitter> *Ctx, const ValueDecl *VD)
- : LocalScope<Emitter>(Ctx, VD), Scope(Ctx->P),
+ : LocalScope<Emitter>(Ctx), Scope(Ctx->P),
OldInitializingDecl(Ctx->InitializingDecl) {
Ctx->InitializingDecl = VD;
Ctx->InitStack.push_back(InitLink::Decl(VD));
@@ -476,8 +477,11 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
return this->delegate(SubExpr);
case CK_BitCast: {
+ if (CE->containsErrors())
+ return false;
+ QualType CETy = CE->getType();
// Reject bitcasts to atomic types.
- if (CE->getType()->isAtomicType()) {
+ if (CETy->isAtomicType()) {
if (!this->discard(SubExpr))
return false;
return this->emitInvalidCast(CastKind::Reinterpret, /*Fatal=*/true, CE);
@@ -494,6 +498,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
assert(isPtrType(*FromT));
assert(isPtrType(*ToT));
+ bool SrcIsVoidPtr = SubExprTy->isVoidPointerType();
if (FromT == ToT) {
if (CE->getType()->isVoidPointerType() &&
!SubExprTy->isFunctionPointerType()) {
@@ -502,6 +507,10 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
if (!this->visit(SubExpr))
return false;
+ if (!this->emitCheckBitCast(CETy->getPointeeType().getTypePtr(),
+ SrcIsVoidPtr, CE))
+ return false;
+
if (CE->getType()->isFunctionPointerType() ||
SubExprTy->isFunctionPointerType()) {
return this->emitFnPtrCast(CE);
@@ -767,6 +776,14 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
case CK_ToVoid:
return discard(SubExpr);
+ case CK_Dynamic:
+ // This initially goes through VisitCXXDynamicCastExpr, where we emit
+ // a diagnostic if appropriate.
+ return this->delegate(SubExpr);
+
+ case CK_LValueBitCast:
+ return this->emitInvalidCast(CastKind::ReinterpretLike, /*Fatal=*/true, CE);
+
default:
return this->emitInvalid(CE);
}
@@ -1033,8 +1050,15 @@ bool Compiler<Emitter>::VisitPointerArithBinOp(const BinaryOperator *E) {
if (!visitAsPointer(RHS, *RT) || !visitAsPointer(LHS, *LT))
return false;
+ QualType ElemType = LHS->getType()->getPointeeType();
+ CharUnits ElemTypeSize;
+ if (ElemType->isVoidType() || ElemType->isFunctionType())
+ ElemTypeSize = CharUnits::One();
+ else
+ ElemTypeSize = Ctx.getASTContext().getTypeSizeInChars(ElemType);
+
PrimType IntT = classifyPrim(E->getType());
- if (!this->emitSubPtr(IntT, E))
+ if (!this->emitSubPtr(IntT, ElemTypeSize.isZero(), E))
return false;
return DiscardResult ? this->emitPop(IntT, E) : true;
}
@@ -1058,24 +1082,27 @@ bool Compiler<Emitter>::VisitPointerArithBinOp(const BinaryOperator *E) {
// Do the operation and optionally transform to
// result pointer type.
- if (Op == BO_Add) {
+ switch (Op) {
+ case BO_Add:
if (!this->emitAddOffset(OffsetType, E))
return false;
-
- if (classifyPrim(E) != PT_Ptr)
- return this->emitDecayPtr(PT_Ptr, classifyPrim(E), E);
- return true;
- }
- if (Op == BO_Sub) {
+ break;
+ case BO_Sub:
if (!this->emitSubOffset(OffsetType, E))
return false;
+ break;
+ default:
+ return false;
+ }
- if (classifyPrim(E) != PT_Ptr)
- return this->emitDecayPtr(PT_Ptr, classifyPrim(E), E);
- return true;
+ if (classifyPrim(E) != PT_Ptr) {
+ if (!this->emitDecayPtr(PT_Ptr, classifyPrim(E), E))
+ return false;
}
- return false;
+ if (DiscardResult)
+ return this->emitPop(classifyPrim(E), E);
+ return true;
}
template <class Emitter>
@@ -1190,7 +1217,11 @@ bool Compiler<Emitter>::VisitComplexBinOp(const BinaryOperator *E) {
return false;
if (!this->visit(RHS))
return false;
- return this->emitMulc(ElemT, E);
+ if (!this->emitMulc(ElemT, E))
+ return false;
+ if (DiscardResult)
+ return this->emitPopPtr(E);
+ return true;
}
if (Op == BO_Div && RHSIsComplex) {
@@ -1227,7 +1258,11 @@ bool Compiler<Emitter>::VisitComplexBinOp(const BinaryOperator *E) {
if (!this->visit(RHS))
return false;
- return this->emitDivc(ElemT, E);
+ if (!this->emitDivc(ElemT, E))
+ return false;
+ if (DiscardResult)
+ return this->emitPopPtr(E);
+ return true;
}
// Evaluate LHS and save value to LHSOffset.
@@ -1362,6 +1397,10 @@ bool Compiler<Emitter>::VisitComplexBinOp(const BinaryOperator *E) {
} else {
if (!this->emitPop(ResultElemT, E))
return false;
+ // Remove the Complex temporary pointer we created ourselves at the
+ // beginning of this function.
+ if (!Initializing)
+ return this->emitPopPtr(E);
}
}
return true;
@@ -1686,6 +1725,9 @@ bool Compiler<Emitter>::VisitFixedPointUnaryOperator(const UnaryOperator *E) {
template <class Emitter>
bool Compiler<Emitter>::VisitImplicitValueInitExpr(
const ImplicitValueInitExpr *E) {
+ if (DiscardResult)
+ return true;
+
QualType QT = E->getType();
if (OptPrimType T = classify(QT))
@@ -1749,6 +1791,9 @@ bool Compiler<Emitter>::VisitImplicitValueInitExpr(
template <class Emitter>
bool Compiler<Emitter>::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ if (E->getType()->isVoidType())
+ return false;
+
const Expr *LHS = E->getLHS();
const Expr *RHS = E->getRHS();
const Expr *Index = E->getIdx();
@@ -1842,7 +1887,6 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
const Expr *Init, PrimType T,
bool Activate = false) -> bool {
InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init));
- InitLinkScope<Emitter> ILS(this, InitLink::Field(FieldToInit->Offset));
if (!this->visit(Init))
return false;
@@ -2116,8 +2160,7 @@ bool Compiler<Emitter>::visitCallArgs(ArrayRef<const Expr *> Args,
}
UnsignedOrNone LocalIndex =
- allocateLocal(std::move(Source), Arg->getType(),
- /*ExtendingDecl=*/nullptr, ScopeKind::Call);
+ allocateLocal(std::move(Source), Arg->getType(), ScopeKind::Call);
if (!LocalIndex)
return false;
@@ -2212,6 +2255,7 @@ static CharUnits AlignOfType(QualType T, const ASTContext &ASTCtx,
template <class Emitter>
bool Compiler<Emitter>::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *E) {
+
UnaryExprOrTypeTrait Kind = E->getKind();
const ASTContext &ASTCtx = Ctx.getASTContext();
@@ -2286,6 +2330,9 @@ bool Compiler<Emitter>::VisitUnaryExprOrTypeTraitExpr(
// Argument is an expression, not a type.
const Expr *Arg = E->getArgumentExpr()->IgnoreParens();
+ if (Arg->getType()->isDependentType())
+ return false;
+
// The kinds of expressions that we have special-case logic here for
// should be kept up to date with the special checks for those
// expressions in Sema.
@@ -2309,6 +2356,9 @@ bool Compiler<Emitter>::VisitUnaryExprOrTypeTraitExpr(
}
if (Kind == UETT_VectorElements) {
+ if (E->containsErrors())
+ return false;
+
if (const auto *VT = E->getTypeOfArgument()->getAs<VectorType>())
return this->emitConst(VT->getNumElements(), E);
assert(E->getTypeOfArgument()->isSizelessVectorType());
@@ -2430,7 +2480,7 @@ bool Compiler<Emitter>::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
// and the RHS is our SubExpr.
for (size_t I = 0; I != Size; ++I) {
ArrayIndexScope<Emitter> IndexScope(this, I);
- LocalScope<Emitter> BS(this);
+ LocalScope<Emitter> BS(this, ScopeKind::FullExpression);
if (!this->visitArrayElemInit(I, SubExpr, SubExprT))
return false;
@@ -2483,19 +2533,22 @@ bool Compiler<Emitter>::VisitAbstractConditionalOperator(
const Expr *TrueExpr = E->getTrueExpr();
const Expr *FalseExpr = E->getFalseExpr();
- auto visitChildExpr = [&](const Expr *E) -> bool {
- LocalScope<Emitter> S(this);
- if (!this->delegate(E))
- return false;
- return S.destroyLocals();
- };
-
if (std::optional<bool> BoolValue = getBoolValue(Condition)) {
- if (BoolValue)
- return visitChildExpr(TrueExpr);
- return visitChildExpr(FalseExpr);
- }
-
+ if (*BoolValue)
+ return this->delegate(TrueExpr);
+ return this->delegate(FalseExpr);
+ }
+
+ // Force-init the scope, which creates a InitScope op. This is necessary so
+ // the scope is not only initialized in one arm of the conditional operator.
+ this->VarScope->forceInit();
+ // The TrueExpr and FalseExpr of a conditional operator do _not_ create a
+ // scope, which means the local variables created within them unconditionally
+ // always exist. However, we need to later differentiate which branch was
+ // taken and only destroy the varibles of the active branch. This is what the
+ // "enabled" flags on local variables are used for.
+ llvm::SaveAndRestore LAAA(this->VarScope->LocalsAlwaysEnabled,
+ /*NewValue=*/false);
bool IsBcpCall = false;
if (const auto *CE = dyn_cast<CallExpr>(Condition->IgnoreParenCasts());
CE && CE->getBuiltinCallee() == Builtin::BI__builtin_constant_p) {
@@ -2525,13 +2578,15 @@ bool Compiler<Emitter>::VisitAbstractConditionalOperator(
if (!this->jumpFalse(LabelFalse))
return false;
- if (!visitChildExpr(TrueExpr))
+ if (!this->delegate(TrueExpr))
return false;
+
if (!this->jump(LabelEnd))
return false;
this->emitLabel(LabelFalse);
- if (!visitChildExpr(FalseExpr))
+ if (!this->delegate(FalseExpr))
return false;
+
this->fallthrough(LabelEnd);
this->emitLabel(LabelEnd);
@@ -2806,10 +2861,10 @@ bool Compiler<Emitter>::VisitCompoundAssignOperator(
return false;
if (!this->emitLoad(*LT, E))
return false;
- if (LT != LHSComputationT) {
- if (!this->emitCast(*LT, *LHSComputationT, E))
- return false;
- }
+ if (LT != LHSComputationT &&
+ !this->emitIntegralCast(*LT, *LHSComputationT, E->getComputationLHSType(),
+ E))
+ return false;
// Get the RHS value on the stack.
if (!this->emitGetLocal(*RT, TempOffset, E))
@@ -2862,10 +2917,9 @@ bool Compiler<Emitter>::VisitCompoundAssignOperator(
}
// And now cast from LHSComputationT to ResultT.
- if (ResultT != LHSComputationT) {
- if (!this->emitCast(*LHSComputationT, *ResultT, E))
- return false;
- }
+ if (ResultT != LHSComputationT &&
+ !this->emitIntegralCast(*LHSComputationT, *ResultT, E->getType(), E))
+ return false;
// And store the result in LHS.
if (DiscardResult) {
@@ -2880,7 +2934,7 @@ bool Compiler<Emitter>::VisitCompoundAssignOperator(
template <class Emitter>
bool Compiler<Emitter>::VisitExprWithCleanups(const ExprWithCleanups *E) {
- LocalScope<Emitter> ES(this);
+ LocalScope<Emitter> ES(this, ScopeKind::FullExpression);
const Expr *SubExpr = E->getSubExpr();
return this->delegate(SubExpr) && ES.destroyLocals(E);
@@ -2903,9 +2957,7 @@ bool Compiler<Emitter>::VisitMaterializeTemporaryExpr(
// When we're initializing a global variable *or* the storage duration of
// the temporary is explicitly static, create a global variable.
OptPrimType SubExprT = classify(SubExpr);
- bool IsStatic = E->getStorageDuration() == SD_Static;
- if (IsStatic) {
-
+ if (E->getStorageDuration() == SD_Static) {
UnsignedOrNone GlobalIndex = P.createGlobal(E);
if (!GlobalIndex)
return false;
@@ -2932,25 +2984,40 @@ bool Compiler<Emitter>::VisitMaterializeTemporaryExpr(
return this->emitInitGlobalTempComp(TempDecl, E);
}
+ ScopeKind VarScope = E->getStorageDuration() == SD_FullExpression
+ ? ScopeKind::FullExpression
+ : ScopeKind::Block;
+
// For everyhing else, use local variables.
if (SubExprT) {
bool IsConst = SubExpr->getType().isConstQualified();
bool IsVolatile = SubExpr->getType().isVolatileQualified();
- unsigned LocalIndex = allocateLocalPrimitive(
- E, *SubExprT, IsConst, IsVolatile, E->getExtendingDecl());
+ unsigned LocalIndex =
+ allocateLocalPrimitive(E, *SubExprT, IsConst, IsVolatile, VarScope);
+ if (!this->VarScope->LocalsAlwaysEnabled &&
+ !this->emitEnableLocal(LocalIndex, E))
+ return false;
+
if (!this->visit(SubExpr))
return false;
if (!this->emitSetLocal(*SubExprT, LocalIndex, E))
return false;
+
return this->emitGetPtrLocal(LocalIndex, E);
}
if (!this->checkLiteralType(SubExpr))
return false;
+
const Expr *Inner = E->getSubExpr()->skipRValueSubobjectAdjustments();
if (UnsignedOrNone LocalIndex =
- allocateLocal(E, Inner->getType(), E->getExtendingDecl())) {
+ allocateLocal(E, Inner->getType(), VarScope)) {
InitLinkScope<Emitter> ILS(this, InitLink::Temp(*LocalIndex));
+
+ if (!this->VarScope->LocalsAlwaysEnabled &&
+ !this->emitEnableLocal(*LocalIndex, E))
+ return false;
+
if (!this->emitGetPtrLocal(*LocalIndex, E))
return false;
return this->visitInitializer(SubExpr) && this->emitFinishInit(E);
@@ -3147,6 +3214,8 @@ bool Compiler<Emitter>::VisitCXXReinterpretCastExpr(
if (PointeeToT && PointeeFromT) {
if (isIntegralType(*PointeeFromT) && isIntegralType(*PointeeToT))
Fatal = false;
+ else if (E->getCastKind() == CK_LValueBitCast)
+ Fatal = false;
} else {
Fatal = SubExpr->getType().getTypePtr() != E->getType().getTypePtr();
}
@@ -3218,8 +3287,11 @@ bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
return this->visitInitializer(E->getArg(0));
// Zero initialization.
- if (E->requiresZeroInitialization()) {
+ bool ZeroInit = E->requiresZeroInitialization();
+ if (ZeroInit) {
const Record *R = getRecord(E->getType());
+ if (!R)
+ return false;
if (!this->visitZeroRecordInitializer(R, E))
return false;
@@ -3229,6 +3301,19 @@ bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
return true;
}
+ // Avoid materializing a temporary for an elidable copy/move constructor.
+ if (!ZeroInit && E->isElidable()) {
+ const Expr *SrcObj = E->getArg(0);
+ assert(SrcObj->isTemporaryObject(Ctx.getASTContext(), Ctor->getParent()));
+ assert(Ctx.getASTContext().hasSameUnqualifiedType(E->getType(),
+ SrcObj->getType()));
+ if (const auto *ME = dyn_cast<MaterializeTemporaryExpr>(SrcObj)) {
+ if (!this->emitCheckFunctionDecl(Ctor, E))
+ return false;
+ return this->visitInitializer(ME->getSubExpr());
+ }
+ }
+
const Function *Func = getFunction(Ctor);
if (!Func)
@@ -3274,34 +3359,43 @@ bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
}
if (T->isArrayType()) {
- const ConstantArrayType *CAT =
- Ctx.getASTContext().getAsConstantArrayType(E->getType());
- if (!CAT)
- return false;
-
- size_t NumElems = CAT->getZExtSize();
const Function *Func = getFunction(E->getConstructor());
if (!Func)
return false;
- // FIXME(perf): We're calling the constructor once per array element here,
- // in the old intepreter we had a special-case for trivial constructors.
- for (size_t I = 0; I != NumElems; ++I) {
- if (!this->emitConstUint64(I, E))
- return false;
- if (!this->emitArrayElemPtrUint64(E))
- return false;
+ if (!this->emitDupPtr(E))
+ return false;
- // Constructor arguments.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
- return false;
+ std::function<bool(QualType)> initArrayDimension;
+ initArrayDimension = [&](QualType T) -> bool {
+ if (!T->isArrayType()) {
+ // Constructor arguments.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ return this->emitCall(Func, 0, E);
}
- if (!this->emitCall(Func, 0, E))
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(T);
+ if (!CAT)
return false;
- }
- return true;
+ QualType ElemTy = CAT->getElementType();
+ unsigned NumElems = CAT->getZExtSize();
+ for (size_t I = 0; I != NumElems; ++I) {
+ if (!this->emitConstUint64(I, E))
+ return false;
+ if (!this->emitArrayElemPtrUint64(E))
+ return false;
+ if (!initArrayDimension(ElemTy))
+ return false;
+ }
+ return this->emitPopPtr(E);
+ };
+
+ return initArrayDimension(E->getType());
}
return false;
@@ -3525,6 +3619,9 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
const Expr *PlacementDest = nullptr;
bool IsNoThrow = false;
+ if (E->containsErrors())
+ return false;
+
if (PlacementArgs != 0) {
// FIXME: There is no restriction on this, but it's not clear that any
// other form makes any sense. We get here for cases such as:
@@ -3600,8 +3697,6 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
if (PlacementDest) {
if (!this->visit(PlacementDest))
return false;
- if (!this->emitStartLifetime(E))
- return false;
if (!this->emitGetLocal(SizeT, ArrayLen, E))
return false;
if (!this->emitCheckNewTypeMismatchArray(SizeT, E, E))
@@ -3625,34 +3720,42 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
QualType InitType = Init->getType();
size_t StaticInitElems = 0;
const Expr *DynamicInit = nullptr;
+ OptPrimType ElemT;
+
if (const ConstantArrayType *CAT =
Ctx.getASTContext().getAsConstantArrayType(InitType)) {
StaticInitElems = CAT->getZExtSize();
+ // Initialize the first S element from the initializer.
if (!this->visitInitializer(Init))
return false;
- if (const auto *ILE = dyn_cast<InitListExpr>(Init);
- ILE && ILE->hasArrayFiller())
- DynamicInit = ILE->getArrayFiller();
+ if (const auto *ILE = dyn_cast<InitListExpr>(Init)) {
+ if (ILE->hasArrayFiller())
+ DynamicInit = ILE->getArrayFiller();
+ else if (isa<StringLiteral>(ILE->getInit(0)))
+ ElemT = classifyPrim(CAT->getElementType());
+ }
}
// The initializer initializes a certain number of elements, S.
// However, the complete number of elements, N, might be larger than that.
// In this case, we need to get an initializer for the remaining elements.
- // There are to cases:
+ // There are three cases:
// 1) For the form 'new Struct[n];', the initializer is a
// CXXConstructExpr and its type is an IncompleteArrayType.
// 2) For the form 'new Struct[n]{1,2,3}', the initializer is an
// InitListExpr and the initializer for the remaining elements
// is the array filler.
+ // 3) StringLiterals don't have an array filler, so we need to zero
+ // the remaining elements.
- if (DynamicInit || InitType->isIncompleteArrayType()) {
+ if (DynamicInit || ElemT || InitType->isIncompleteArrayType()) {
const Function *CtorFunc = nullptr;
if (const auto *CE = dyn_cast<CXXConstructExpr>(Init)) {
CtorFunc = getFunction(CE->getConstructor());
if (!CtorFunc)
return false;
- } else if (!DynamicInit)
+ } else if (!DynamicInit && !ElemT)
DynamicInit = Init;
LabelTy EndLabel = this->getLabel();
@@ -3718,6 +3821,13 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
if (!this->emitPopPtr(E))
return false;
}
+ } else if (ElemT) {
+ if (!this->visitZeroInitializer(
+ *ElemT, InitType->getAsArrayTypeUnsafe()->getElementType(),
+ Init))
+ return false;
+ if (!this->emitStorePop(*ElemT, E))
+ return false;
} else {
assert(CtorFunc);
if (!this->emitCall(CtorFunc, 0, E))
@@ -3741,10 +3851,9 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
if (PlacementDest) {
if (!this->visit(PlacementDest))
return false;
- if (!this->emitStartLifetime(E))
- return false;
if (!this->emitCheckNewTypeMismatch(E, E))
return false;
+
} else {
// Allocate just one element.
if (!this->emitAlloc(Desc, E))
@@ -3943,6 +4052,8 @@ bool Compiler<Emitter>::VisitRecoveryExpr(const RecoveryExpr *E) {
template <class Emitter>
bool Compiler<Emitter>::VisitAddrLabelExpr(const AddrLabelExpr *E) {
assert(E->getType()->isVoidPointerType());
+ if (DiscardResult)
+ return true;
return this->emitDummyPtr(E, E);
}
@@ -3992,7 +4103,6 @@ bool Compiler<Emitter>::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) {
if (E->getNumSubExprs() == 2)
return this->emitInvalid(E);
- assert(Initializing);
assert(E->getNumSubExprs() > 2);
const Expr *Vecs[] = {E->getExpr(0), E->getExpr(1)};
@@ -4002,6 +4112,14 @@ bool Compiler<Emitter>::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) {
unsigned NumOutputElems = E->getNumSubExprs() - 2;
assert(NumOutputElems > 0);
+ if (!Initializing) {
+ UnsignedOrNone LocalIndex = allocateLocal(E);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
// Save both input vectors to a local variable.
unsigned VectorOffsets[2];
for (unsigned I = 0; I != 2; ++I) {
@@ -4030,6 +4148,9 @@ bool Compiler<Emitter>::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) {
return false;
}
+ if (DiscardResult)
+ return this->emitPopPtr(E);
+
return true;
}
@@ -4150,7 +4271,7 @@ bool Compiler<Emitter>::VisitStmtExpr(const StmtExpr *E) {
StmtExprScope<Emitter> SS(this);
const CompoundStmt *CS = E->getSubStmt();
- const Stmt *Result = CS->getStmtExprResult();
+ const Stmt *Result = CS->body_back();
for (const Stmt *S : CS->body()) {
if (S != Result) {
if (!this->visitStmt(S))
@@ -4214,7 +4335,8 @@ template <class Emitter> bool Compiler<Emitter>::visit(const Expr *E) {
// Create local variable to hold the return value.
if (!E->isGLValue() && !canClassify(E->getType())) {
- UnsignedOrNone LocalIndex = allocateLocal(stripDerivedToBaseCasts(E));
+ UnsignedOrNone LocalIndex = allocateLocal(
+ stripDerivedToBaseCasts(E), QualType(), ScopeKind::FullExpression);
if (!LocalIndex)
return false;
@@ -4413,6 +4535,8 @@ bool Compiler<Emitter>::visitZeroArrayInitializer(QualType T, const Expr *E) {
}
if (ElemType->isRecordType()) {
const Record *R = getRecord(ElemType);
+ if (!R)
+ return false;
for (size_t I = 0; I != NumElems; ++I) {
if (!this->emitConstUint32(I, E))
@@ -4571,9 +4695,11 @@ bool Compiler<Emitter>::emitConst(const APSInt &Value, const Expr *E) {
}
template <class Emitter>
-unsigned Compiler<Emitter>::allocateLocalPrimitive(
- DeclTy &&Src, PrimType Ty, bool IsConst, bool IsVolatile,
- const ValueDecl *ExtendingDecl, ScopeKind SC, bool IsConstexprUnknown) {
+unsigned Compiler<Emitter>::allocateLocalPrimitive(DeclTy &&Src, PrimType Ty,
+ bool IsConst,
+ bool IsVolatile,
+ ScopeKind SC,
+ bool IsConstexprUnknown) {
// FIXME: There are cases where Src.is<Expr*>() is wrong, e.g.
// (int){12} in C. Consider using Expr::isTemporaryObject() instead
// or isa<MaterializeTemporaryExpr>().
@@ -4584,16 +4710,12 @@ unsigned Compiler<Emitter>::allocateLocalPrimitive(
Scope::Local Local = this->createLocal(D);
if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>()))
Locals.insert({VD, Local});
- if (ExtendingDecl)
- VarScope->addExtended(Local, ExtendingDecl);
- else
- VarScope->addForScopeKind(Local, SC);
+ VarScope->addForScopeKind(Local, SC);
return Local.Offset;
}
template <class Emitter>
UnsignedOrNone Compiler<Emitter>::allocateLocal(DeclTy &&Src, QualType Ty,
- const ValueDecl *ExtendingDecl,
ScopeKind SC,
bool IsConstexprUnknown) {
const ValueDecl *Key = nullptr;
@@ -4613,7 +4735,8 @@ UnsignedOrNone Compiler<Emitter>::allocateLocal(DeclTy &&Src, QualType Ty,
Descriptor *D = P.createDescriptor(
Src, Ty.getTypePtr(), Descriptor::InlineDescMD, Ty.isConstQualified(),
- IsTemporary, /*IsMutable=*/false, /*IsVolatile=*/false, Init);
+ IsTemporary, /*IsMutable=*/false, /*IsVolatile=*/Ty.isVolatileQualified(),
+ Init);
if (!D)
return std::nullopt;
D->IsConstexprUnknown = IsConstexprUnknown;
@@ -4621,10 +4744,7 @@ UnsignedOrNone Compiler<Emitter>::allocateLocal(DeclTy &&Src, QualType Ty,
Scope::Local Local = this->createLocal(D);
if (Key)
Locals.insert({Key, Local});
- if (ExtendingDecl)
- VarScope->addExtended(Local, ExtendingDecl);
- else
- VarScope->addForScopeKind(Local, SC);
+ VarScope->addForScopeKind(Local, SC);
return Local.Offset;
}
@@ -4676,7 +4796,7 @@ const Function *Compiler<Emitter>::getFunction(const FunctionDecl *FD) {
template <class Emitter>
bool Compiler<Emitter>::visitExpr(const Expr *E, bool DestroyToplevelScope) {
- LocalScope<Emitter> RootScope(this);
+ LocalScope<Emitter> RootScope(this, ScopeKind::FullExpression);
// If we won't destroy the toplevel scope, check for memory leaks first.
if (!DestroyToplevelScope) {
@@ -4743,8 +4863,7 @@ VarCreationState Compiler<Emitter>::visitDecl(const VarDecl *VD,
if (!R && Context::shouldBeGloballyIndexed(VD)) {
if (auto GlobalIndex = P.getGlobal(VD)) {
Block *GlobalBlock = P.getGlobal(*GlobalIndex);
- GlobalInlineDescriptor &GD =
- *reinterpret_cast<GlobalInlineDescriptor *>(GlobalBlock->rawData());
+ auto &GD = GlobalBlock->getBlockDesc<GlobalInlineDescriptor>();
GD.InitState = GlobalInitState::InitializerFailed;
GlobalBlock->invokeDtor();
@@ -4770,7 +4889,7 @@ bool Compiler<Emitter>::visitDeclAndReturn(const VarDecl *VD, const Expr *Init,
LS.destroyLocals() && this->emitCheckAllocations(VD);
}
- LocalScope<Emitter> VDScope(this, VD);
+ LocalScope<Emitter> VDScope(this);
if (!this->visitVarDecl(VD, Init, /*Toplevel=*/true))
return false;
@@ -4805,8 +4924,7 @@ bool Compiler<Emitter>::visitDeclAndReturn(const VarDecl *VD, const Expr *Init,
auto GlobalIndex = P.getGlobal(VD);
assert(GlobalIndex);
Block *GlobalBlock = P.getGlobal(*GlobalIndex);
- GlobalInlineDescriptor &GD =
- *reinterpret_cast<GlobalInlineDescriptor *>(GlobalBlock->rawData());
+ auto &GD = GlobalBlock->getBlockDesc<GlobalInlineDescriptor>();
GD.InitState = GlobalInitState::InitializerFailed;
GlobalBlock->invokeDtor();
@@ -4841,46 +4959,42 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init,
return !NeedsOp || this->emitCheckDecl(VD, VD);
};
- auto initGlobal = [&](unsigned GlobalIndex) -> bool {
- assert(Init);
-
- if (VarT) {
- if (!this->visit(Init))
- return checkDecl() && false;
-
- return checkDecl() && this->emitInitGlobal(*VarT, GlobalIndex, VD);
- }
+ DeclScope<Emitter> LocalScope(this, VD);
- if (!checkDecl())
+ UnsignedOrNone GlobalIndex = P.getGlobal(VD);
+ if (GlobalIndex) {
+ // The global was previously created but the initializer failed.
+ if (!P.getGlobal(*GlobalIndex)->isInitialized())
return false;
+ // We've already seen and initialized this global.
+ if (P.isGlobalInitialized(*GlobalIndex))
+ return checkDecl();
+ // The previous attempt at initialization might've been unsuccessful,
+ // so let's try this one.
+ } else if ((GlobalIndex = P.createGlobal(VD, Init))) {
+ } else {
+ return false;
+ }
+ if (!Init)
+ return true;
- if (!this->emitGetPtrGlobal(GlobalIndex, Init))
- return false;
+ if (!checkDecl())
+ return false;
- if (!visitInitializer(Init))
+ if (VarT) {
+ if (!this->visit(Init))
return false;
- return this->emitFinishInitGlobal(Init);
- };
-
- DeclScope<Emitter> LocalScope(this, VD);
-
- // We've already seen and initialized this global.
- if (UnsignedOrNone GlobalIndex = P.getGlobal(VD)) {
- if (P.getPtrGlobal(*GlobalIndex).isInitialized())
- return checkDecl();
-
- // The previous attempt at initialization might've been unsuccessful,
- // so let's try this one.
- return Init && checkDecl() && initGlobal(*GlobalIndex);
+ return this->emitInitGlobal(*VarT, *GlobalIndex, VD);
}
- UnsignedOrNone GlobalIndex = P.createGlobal(VD, Init);
+ if (!this->emitGetPtrGlobal(*GlobalIndex, Init))
+ return false;
- if (!GlobalIndex)
+ if (!visitInitializer(Init))
return false;
- return !Init || (checkDecl() && initGlobal(*GlobalIndex));
+ return this->emitFinishInitGlobal(Init);
}
// Local variables.
InitLinkScope<Emitter> ILS(this, InitLink::Decl(VD));
@@ -4888,38 +5002,39 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init,
if (VarT) {
unsigned Offset = this->allocateLocalPrimitive(
VD, *VarT, VD->getType().isConstQualified(),
- VD->getType().isVolatileQualified(), nullptr, ScopeKind::Block,
+ VD->getType().isVolatileQualified(), ScopeKind::Block,
IsConstexprUnknown);
- if (Init) {
- // If this is a toplevel declaration, create a scope for the
- // initializer.
- if (Toplevel) {
- LocalScope<Emitter> Scope(this);
- if (!this->visit(Init))
- return false;
- return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
- }
- if (!this->visit(Init))
- return false;
- return this->emitSetLocal(*VarT, Offset, VD);
- }
- } else {
- if (UnsignedOrNone Offset = this->allocateLocal(
- VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) {
- if (!Init)
- return true;
- if (!this->emitGetPtrLocal(*Offset, Init))
- return false;
+ if (!Init)
+ return true;
- if (!visitInitializer(Init))
+ // If this is a toplevel declaration, create a scope for the
+ // initializer.
+ if (Toplevel) {
+ LocalScope<Emitter> Scope(this);
+ if (!this->visit(Init))
return false;
-
- return this->emitFinishInitPop(Init);
+ return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
}
- return false;
+ if (!this->visit(Init))
+ return false;
+ return this->emitSetLocal(*VarT, Offset, VD);
}
- return true;
+ // Local composite variables.
+ if (UnsignedOrNone Offset = this->allocateLocal(
+ VD, VD->getType(), ScopeKind::Block, IsConstexprUnknown)) {
+ if (!Init)
+ return true;
+
+ if (!this->emitGetPtrLocal(*Offset, Init))
+ return false;
+
+ if (!visitInitializer(Init))
+ return false;
+
+ return this->emitFinishInitPop(Init);
+ }
+ return false;
}
template <class Emitter>
@@ -4979,14 +5094,29 @@ bool Compiler<Emitter>::visitAPValueInitializer(const APValue &Val,
}
if (Val.isUnion()) {
const FieldDecl *UnionField = Val.getUnionField();
- const Record *R = this->getRecord(UnionField->getParent());
+ if (!UnionField)
+ return true;
+ const Record *R = this->getRecord(T);
assert(R);
const APValue &F = Val.getUnionValue();
const Record::Field *RF = R->getField(UnionField);
- PrimType T = classifyPrim(RF->Decl->getType());
- if (!this->visitAPValue(F, T, E))
+ QualType FieldType = RF->Decl->getType();
+
+ if (OptPrimType PT = classify(FieldType)) {
+ if (!this->visitAPValue(F, *PT, E))
+ return false;
+ if (RF->isBitField())
+ return this->emitInitBitFieldActivate(*PT, RF, E);
+ return this->emitInitFieldActivate(*PT, RF->Offset, E);
+ }
+
+ if (!this->emitGetPtrField(RF->Offset, E))
+ return false;
+ if (!this->emitActivate(E))
return false;
- return this->emitInitField(T, RF->Offset, E);
+ if (!this->visitAPValueInitializer(F, E, FieldType))
+ return false;
+ return this->emitPopPtr(E);
}
if (Val.isArray()) {
const auto *ArrType = T->getAsArrayTypeUnsafe();
@@ -5081,6 +5211,19 @@ bool Compiler<Emitter>::VisitBuiltinCallExpr(const CallExpr *E,
return false;
} break;
+ case Builtin::BI__assume:
+ case Builtin::BI__builtin_assume:
+ // Argument is not evaluated.
+ break;
+ case Builtin::BI__atomic_is_lock_free:
+ case Builtin::BI__atomic_always_lock_free: {
+ assert(E->getNumArgs() == 2);
+ if (!this->visit(E->getArg(0)))
+ return false;
+ if (!this->visitAsLValue(E->getArg(1)))
+ return false;
+ } break;
+
default:
if (!Context::isUnevaluatedBuiltin(BuiltinID)) {
// Put arguments on the stack.
@@ -5094,10 +5237,8 @@ bool Compiler<Emitter>::VisitBuiltinCallExpr(const CallExpr *E,
if (!this->emitCallBI(E, BuiltinID, E))
return false;
- if (DiscardResult && !ReturnType->isVoidType()) {
- assert(ReturnT);
- return this->emitPop(*ReturnT, E);
- }
+ if (DiscardResult && !ReturnType->isVoidType())
+ return this->emitPop(ReturnT.value_or(PT_Ptr), E);
return true;
}
@@ -5391,55 +5532,58 @@ bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
// instance pointer of the current function frame, but e.g. to the declaration
// currently being initialized. Here we emit the necessary instruction(s) for
// this scenario.
- if (!InitStackActive)
+ if (!InitStackActive || InitStack.empty())
return this->emitThis(E);
- if (!InitStack.empty()) {
- // If our init stack is, for example:
- // 0 Stack: 3 (decl)
- // 1 Stack: 6 (init list)
- // 2 Stack: 1 (field)
- // 3 Stack: 6 (init list)
- // 4 Stack: 1 (field)
- //
- // We want to find the LAST element in it that's an init list,
- // which is marked with the K_InitList marker. The index right
- // before that points to an init list. We need to find the
- // elements before the K_InitList element that point to a base
- // (e.g. a decl or This), optionally followed by field, elem, etc.
- // In the example above, we want to emit elements [0..2].
- unsigned StartIndex = 0;
- unsigned EndIndex = 0;
- // Find the init list.
- for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList ||
- InitStack[StartIndex].Kind == InitLink::K_This) {
- EndIndex = StartIndex;
- --StartIndex;
- break;
- }
+ // If our init stack is, for example:
+ // 0 Stack: 3 (decl)
+ // 1 Stack: 6 (init list)
+ // 2 Stack: 1 (field)
+ // 3 Stack: 6 (init list)
+ // 4 Stack: 1 (field)
+ //
+ // We want to find the LAST element in it that's an init list,
+ // which is marked with the K_InitList marker. The index right
+ // before that points to an init list. We need to find the
+ // elements before the K_InitList element that point to a base
+ // (e.g. a decl or This), optionally followed by field, elem, etc.
+ // In the example above, we want to emit elements [0..2].
+ unsigned StartIndex = 0;
+ unsigned EndIndex = 0;
+ // Find the init list.
+ for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_DIE) {
+ EndIndex = StartIndex;
+ --StartIndex;
+ break;
}
+ }
- // Walk backwards to find the base.
- for (; StartIndex > 0; --StartIndex) {
- if (InitStack[StartIndex].Kind == InitLink::K_InitList)
- continue;
+ // Walk backwards to find the base.
+ for (; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind == InitLink::K_InitList)
+ continue;
- if (InitStack[StartIndex].Kind != InitLink::K_Field &&
- InitStack[StartIndex].Kind != InitLink::K_Elem)
- break;
- }
+ if (InitStack[StartIndex].Kind != InitLink::K_Field &&
+ InitStack[StartIndex].Kind != InitLink::K_Elem &&
+ InitStack[StartIndex].Kind != InitLink::K_DIE)
+ break;
+ }
- // Emit the instructions.
- for (unsigned I = StartIndex; I != EndIndex; ++I) {
- if (InitStack[I].Kind == InitLink::K_InitList)
- continue;
- if (!InitStack[I].template emit<Emitter>(this, E))
- return false;
- }
- return true;
+ if (StartIndex == 0 && EndIndex == 0)
+ EndIndex = InitStack.size() - 1;
+
+ assert(StartIndex < EndIndex);
+
+ // Emit the instructions.
+ for (unsigned I = StartIndex; I != (EndIndex + 1); ++I) {
+ if (InitStack[I].Kind == InitLink::K_InitList ||
+ InitStack[I].Kind == InitLink::K_DIE)
+ continue;
+ if (!InitStack[I].template emit<Emitter>(this, E))
+ return false;
}
- return this->emitThis(E);
+ return true;
}
template <class Emitter> bool Compiler<Emitter>::visitStmt(const Stmt *S) {
@@ -5514,7 +5658,8 @@ bool Compiler<Emitter>::maybeEmitDeferredVarInit(const VarDecl *VD) {
static bool hasTrivialDefaultCtorParent(const FieldDecl *FD) {
assert(FD);
assert(FD->getParent()->isUnion());
- const auto *CXXRD = dyn_cast<CXXRecordDecl>(FD->getParent());
+ const CXXRecordDecl *CXXRD =
+ FD->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
return !CXXRD || CXXRD->hasTrivialDefaultConstructor();
}
@@ -5599,6 +5744,9 @@ bool Compiler<Emitter>::visitReturnStmt(const ReturnStmt *RS) {
if (!this->visit(RE))
return false;
} else {
+ if (RE->containsErrors())
+ return false;
+
InitLinkScope<Emitter> ILS(this, InitLink::RVO());
// RVO - construct the value in the return location.
if (!this->emitRVOPtr(RE))
@@ -5619,19 +5767,24 @@ bool Compiler<Emitter>::visitReturnStmt(const ReturnStmt *RS) {
}
template <class Emitter> bool Compiler<Emitter>::visitIfStmt(const IfStmt *IS) {
+ LocalScope<Emitter> IfScope(this);
+
auto visitChildStmt = [&](const Stmt *S) -> bool {
LocalScope<Emitter> SScope(this);
if (!visitStmt(S))
return false;
return SScope.destroyLocals();
};
- if (auto *CondInit = IS->getInit())
+
+ if (auto *CondInit = IS->getInit()) {
if (!visitStmt(CondInit))
return false;
+ }
- if (const DeclStmt *CondDecl = IS->getConditionVariableDeclStmt())
+ if (const DeclStmt *CondDecl = IS->getConditionVariableDeclStmt()) {
if (!visitDeclStmt(CondDecl))
return false;
+ }
// Save ourselves compiling some code and the jumps, etc. if the condition is
// stataically known to be either true or false. We could look at more cases
@@ -5655,8 +5808,11 @@ template <class Emitter> bool Compiler<Emitter>::visitIfStmt(const IfStmt *IS) {
if (!this->emitInv(IS))
return false;
} else {
+ LocalScope<Emitter> CondScope(this, ScopeKind::FullExpression);
if (!this->visitBool(IS->getCond()))
return false;
+ if (!CondScope.destroyLocals())
+ return false;
}
if (!this->maybeEmitDeferredVarInit(IS->getConditionVariable()))
@@ -5684,6 +5840,9 @@ template <class Emitter> bool Compiler<Emitter>::visitIfStmt(const IfStmt *IS) {
this->emitLabel(LabelEnd);
}
+ if (!IfScope.destroyLocals())
+ return false;
+
return true;
}
@@ -5900,11 +6059,15 @@ bool Compiler<Emitter>::visitBreakStmt(const BreakStmt *S) {
}
}
- assert(TargetLabel);
+ // Faulty break statement (e.g. label redefined or named loops disabled).
+ if (!TargetLabel)
+ return false;
for (VariableScope<Emitter> *C = this->VarScope; C != BreakScope;
- C = C->getParent())
- C->emitDestruction();
+ C = C->getParent()) {
+ if (!C->destroyLocals())
+ return false;
+ }
return this->jump(*TargetLabel);
}
@@ -5938,8 +6101,10 @@ bool Compiler<Emitter>::visitContinueStmt(const ContinueStmt *S) {
assert(TargetLabel);
for (VariableScope<Emitter> *C = VarScope; C != ContinueScope;
- C = C->getParent())
- C->emitDestruction();
+ C = C->getParent()) {
+ if (!C->destroyLocals())
+ return false;
+ }
return this->jump(*TargetLabel);
}
@@ -5980,12 +6145,41 @@ bool Compiler<Emitter>::visitSwitchStmt(const SwitchStmt *S) {
for (const SwitchCase *SC = S->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
if (const auto *CS = dyn_cast<CaseStmt>(SC)) {
- // FIXME: Implement ranges.
- if (CS->caseStmtIsGNURange())
- return false;
CaseLabels[SC] = this->getLabel();
+ if (CS->caseStmtIsGNURange()) {
+ LabelTy EndOfRangeCheck = this->getLabel();
+ const Expr *Low = CS->getLHS();
+ const Expr *High = CS->getRHS();
+ if (Low->isValueDependent() || High->isValueDependent())
+ return false;
+
+ if (!this->emitGetLocal(CondT, CondVar, CS))
+ return false;
+ if (!this->visit(Low))
+ return false;
+ PrimType LT = this->classifyPrim(Low->getType());
+ if (!this->emitGE(LT, S))
+ return false;
+ if (!this->jumpFalse(EndOfRangeCheck))
+ return false;
+
+ if (!this->emitGetLocal(CondT, CondVar, CS))
+ return false;
+ if (!this->visit(High))
+ return false;
+ PrimType HT = this->classifyPrim(High->getType());
+ if (!this->emitLE(HT, S))
+ return false;
+ if (!this->jumpTrue(CaseLabels[CS]))
+ return false;
+ this->emitLabel(EndOfRangeCheck);
+ continue;
+ }
+
const Expr *Value = CS->getLHS();
+ if (Value->isValueDependent())
+ return false;
PrimType ValueT = this->classifyPrim(Value->getType());
// Compare the case statement's value to the switch condition.
@@ -6019,6 +6213,7 @@ bool Compiler<Emitter>::visitSwitchStmt(const SwitchStmt *S) {
DefaultLabel);
if (!this->visitStmt(S->getBody()))
return false;
+ this->fallthrough(EndLabel);
this->emitLabel(EndLabel);
return LS.destroyLocals();
@@ -6026,6 +6221,7 @@ bool Compiler<Emitter>::visitSwitchStmt(const SwitchStmt *S) {
template <class Emitter>
bool Compiler<Emitter>::visitCaseStmt(const CaseStmt *S) {
+ this->fallthrough(CaseLabels[S]);
this->emitLabel(CaseLabels[S]);
return this->visitStmt(S->getSubStmt());
}
@@ -6049,6 +6245,14 @@ bool Compiler<Emitter>::visitDefaultStmt(const DefaultStmt *S) {
template <class Emitter>
bool Compiler<Emitter>::visitAttributedStmt(const AttributedStmt *S) {
+ const Stmt *SubStmt = S->getSubStmt();
+
+ bool IsMSVCConstexprAttr = isa<ReturnStmt>(SubStmt) &&
+ hasSpecificAttr<MSConstexprAttr>(S->getAttrs());
+
+ if (IsMSVCConstexprAttr && !this->emitPushMSVCCE(S))
+ return false;
+
if (this->Ctx.getLangOpts().CXXAssumptions &&
!this->Ctx.getLangOpts().MSVCCompat) {
for (const Attr *A : S->getAttrs()) {
@@ -6056,7 +6260,7 @@ bool Compiler<Emitter>::visitAttributedStmt(const AttributedStmt *S) {
if (!AA)
continue;
- assert(isa<NullStmt>(S->getSubStmt()));
+ assert(isa<NullStmt>(SubStmt));
const Expr *Assumption = AA->getAssumption();
if (Assumption->isValueDependent())
@@ -6075,7 +6279,12 @@ bool Compiler<Emitter>::visitAttributedStmt(const AttributedStmt *S) {
}
// Ignore other attributes.
- return this->visitStmt(S->getSubStmt());
+ if (!this->visitStmt(SubStmt))
+ return false;
+
+ if (IsMSVCConstexprAttr)
+ return this->emitPopMSVCCE(S);
+ return true;
}
template <class Emitter>
@@ -6246,7 +6455,7 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) {
InitLinkScope<Emitter> InitScope(this, InitLink::This());
for (const auto *Init : Ctor->inits()) {
// Scope needed for the initializers.
- LocalScope<Emitter> Scope(this);
+ LocalScope<Emitter> Scope(this, ScopeKind::FullExpression);
const Expr *InitExpr = Init->getInit();
if (const FieldDecl *Member = Init->getMember()) {
@@ -6301,6 +6510,10 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) {
}
assert(NestedField);
+ unsigned FirstLinkOffset =
+ R->getField(cast<FieldDecl>(IFD->chain()[0]))->Offset;
+ InitLinkScope<Emitter> ILS(this, InitLink::Field(FirstLinkOffset));
+ InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(InitExpr));
if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr,
IsUnion))
return false;
@@ -6334,9 +6547,20 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) {
return false;
}
- if (const auto *Body = Ctor->getBody())
+ if (const Stmt *Body = Ctor->getBody()) {
+ // Only emit the CtorCheck op for non-empty CompoundStmt bodies.
+ // For non-CompoundStmts, always assume they are non-empty and emit it.
+ if (const auto *CS = dyn_cast<CompoundStmt>(Body)) {
+ if (!CS->body_empty() && !this->emitCtorCheck(SourceInfo{}))
+ return false;
+ } else {
+ if (!this->emitCtorCheck(SourceInfo{}))
+ return false;
+ }
+
if (!visitStmt(Body))
return false;
+ }
return this->emitRetVoid(SourceInfo{});
}
@@ -6438,6 +6662,13 @@ bool Compiler<Emitter>::visitFunc(const FunctionDecl *F) {
return this->emitNoRet(SourceInfo{});
}
+static uint32_t getBitWidth(const Expr *E) {
+ assert(E->refersToBitField());
+ const auto *ME = cast<MemberExpr>(E);
+ const auto *FD = cast<FieldDecl>(ME->getMemberDecl());
+ return FD->getBitWidthValue();
+}
+
template <class Emitter>
bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
const Expr *SubExpr = E->getSubExpr();
@@ -6466,10 +6697,15 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return DiscardResult ? this->emitPopPtr(E) : true;
}
- if (T == PT_Float) {
+ if (T == PT_Float)
return DiscardResult ? this->emitIncfPop(getFPOptions(E), E)
: this->emitIncf(getFPOptions(E), E);
- }
+
+ if (SubExpr->refersToBitField())
+ return DiscardResult ? this->emitIncPopBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E)
+ : this->emitIncBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E);
return DiscardResult ? this->emitIncPop(*T, E->canOverflow(), E)
: this->emitInc(*T, E->canOverflow(), E);
@@ -6490,9 +6726,15 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return DiscardResult ? this->emitPopPtr(E) : true;
}
- if (T == PT_Float) {
+ if (T == PT_Float)
return DiscardResult ? this->emitDecfPop(getFPOptions(E), E)
: this->emitDecf(getFPOptions(E), E);
+
+ if (SubExpr->refersToBitField()) {
+ return DiscardResult ? this->emitDecPopBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E)
+ : this->emitDecBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E);
}
return DiscardResult ? this->emitDecPop(*T, E->canOverflow(), E)
@@ -6521,6 +6763,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
if (DiscardResult) {
if (T == PT_Float)
return this->emitIncfPop(getFPOptions(E), E);
+ if (SubExpr->refersToBitField())
+ return DiscardResult ? this->emitIncPopBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E)
+ : this->emitIncBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E);
return this->emitIncPop(*T, E->canOverflow(), E);
}
@@ -6536,6 +6783,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return false;
if (!this->emitStoreFloat(E))
return false;
+ } else if (SubExpr->refersToBitField()) {
+ assert(isIntegralType(*T));
+ if (!this->emitPreIncBitfield(*T, E->canOverflow(), getBitWidth(SubExpr),
+ E))
+ return false;
} else {
assert(isIntegralType(*T));
if (!this->emitPreInc(*T, E->canOverflow(), E))
@@ -6566,6 +6818,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
if (DiscardResult) {
if (T == PT_Float)
return this->emitDecfPop(getFPOptions(E), E);
+ if (SubExpr->refersToBitField())
+ return DiscardResult ? this->emitDecPopBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E)
+ : this->emitDecBitfield(*T, E->canOverflow(),
+ getBitWidth(SubExpr), E);
return this->emitDecPop(*T, E->canOverflow(), E);
}
@@ -6581,6 +6838,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return false;
if (!this->emitStoreFloat(E))
return false;
+ } else if (SubExpr->refersToBitField()) {
+ assert(isIntegralType(*T));
+ if (!this->emitPreDecBitfield(*T, E->canOverflow(), getBitWidth(SubExpr),
+ E))
+ return false;
} else {
assert(isIntegralType(*T));
if (!this->emitPreDec(*T, E->canOverflow(), E))
@@ -6622,6 +6884,8 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
if (E->getType()->isMemberPointerType()) {
// C++11 [expr.unary.op]p3 has very strict rules on how the address of a
// member can be formed.
+ if (DiscardResult)
+ return true;
return this->emitGetMemberPtr(cast<DeclRefExpr>(SubExpr)->getDecl(), E);
}
// We should already have a pointer when we get here.
@@ -6633,7 +6897,7 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
if (!this->visit(SubExpr))
return false;
- if (!this->emitCheckNull(E))
+ if (!SubExpr->getType()->isFunctionPointerType() && !this->emitCheckNull(E))
return false;
if (classifyPrim(SubExpr) == PT_Ptr)
@@ -6648,13 +6912,17 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return false;
return DiscardResult ? this->emitPop(*T, E) : this->emitComp(*T, E);
case UO_Real: // __real x
- assert(T);
+ if (!T)
+ return false;
return this->delegate(SubExpr);
case UO_Imag: { // __imag x
- assert(T);
+ if (!T)
+ return false;
if (!this->discard(SubExpr))
return false;
- return this->visitZeroInitializer(*T, SubExpr->getType(), SubExpr);
+ return DiscardResult
+ ? true
+ : this->visitZeroInitializer(*T, SubExpr->getType(), SubExpr);
}
case UO_Extension:
return this->delegate(SubExpr);
@@ -6900,8 +7168,9 @@ bool Compiler<Emitter>::visitDeclRef(const ValueDecl *D, const Expr *E) {
return false;
return this->emitInitGlobal(*T, *Index, E);
}
- return this->visitAPValueInitializer(TPOD->getValue(), E,
- TPOD->getType());
+ if (!this->visitAPValueInitializer(TPOD->getValue(), E, TPOD->getType()))
+ return false;
+ return this->emitFinishInit(E);
}
return false;
}
@@ -6927,6 +7196,10 @@ bool Compiler<Emitter>::visitDeclRef(const ValueDecl *D, const Expr *E) {
return this->emitGetPtrParam(It->second.Offset, E);
}
+
+ if (!Ctx.getLangOpts().CPlusPlus23 && IsReference)
+ return this->emitInvalidDeclRef(cast<DeclRefExpr>(E),
+ /*InitializerFailed=*/false, E);
}
// Local variables.
if (auto It = Locals.find(D); It != Locals.end()) {
@@ -7050,9 +7323,12 @@ bool Compiler<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
return this->visitDeclRef(D, E);
}
-template <class Emitter> void Compiler<Emitter>::emitCleanup() {
- for (VariableScope<Emitter> *C = VarScope; C; C = C->getParent())
- C->emitDestruction();
+template <class Emitter> bool Compiler<Emitter>::emitCleanup() {
+ for (VariableScope<Emitter> *C = VarScope; C; C = C->getParent()) {
+ if (!C->destroyLocals())
+ return false;
+ }
+ return true;
}
template <class Emitter>
@@ -7113,6 +7389,19 @@ bool Compiler<Emitter>::emitPrimCast(PrimType FromT, PrimType ToT,
return false;
}
+template <class Emitter>
+bool Compiler<Emitter>::emitIntegralCast(PrimType FromT, PrimType ToT,
+ QualType ToQT, const Expr *E) {
+ assert(FromT != ToT);
+
+ if (ToT == PT_IntAP)
+ return this->emitCastAP(FromT, Ctx.getBitWidth(ToQT), E);
+ if (ToT == PT_IntAPS)
+ return this->emitCastAPS(FromT, Ctx.getBitWidth(ToQT), E);
+
+ return this->emitCast(FromT, ToT, E);
+}
+
/// Emits __real(SubExpr)
template <class Emitter>
bool Compiler<Emitter>::emitComplexReal(const Expr *SubExpr) {
@@ -7185,7 +7474,8 @@ bool Compiler<Emitter>::emitComplexComparison(const Expr *LHS, const Expr *RHS,
const BinaryOperator *E) {
assert(E->isComparisonOp());
assert(!Initializing);
- assert(!DiscardResult);
+ if (DiscardResult)
+ return this->discard(LHS) && this->discard(RHS);
PrimType ElemT;
bool LHSIsComplex;
@@ -7353,8 +7643,6 @@ bool Compiler<Emitter>::emitDummyPtr(const DeclTy &D, const Expr *E) {
template <class Emitter>
bool Compiler<Emitter>::emitFloat(const APFloat &F, const Expr *E) {
- assert(!DiscardResult && "Should've been checked before");
-
if (Floating::singleWord(F.getSemantics()))
return this->emitConstFloat(Floating(F), E);
diff --git a/clang/lib/AST/ByteCode/Compiler.h b/clang/lib/AST/ByteCode/Compiler.h
index 5c46f75..1bd15c3 100644
--- a/clang/lib/AST/ByteCode/Compiler.h
+++ b/clang/lib/AST/ByteCode/Compiler.h
@@ -52,12 +52,14 @@ public:
K_Decl = 3,
K_Elem = 5,
K_RVO = 6,
- K_InitList = 7
+ K_InitList = 7,
+ K_DIE = 8,
};
static InitLink This() { return InitLink{K_This}; }
static InitLink InitList() { return InitLink{K_InitList}; }
static InitLink RVO() { return InitLink{K_RVO}; }
+ static InitLink DIE() { return InitLink{K_DIE}; }
static InitLink Field(unsigned Offset) {
InitLink IL{K_Field};
IL.Offset = Offset;
@@ -102,7 +104,7 @@ struct VarCreationState {
bool notCreated() const { return !S; }
};
-enum class ScopeKind { Call, Block };
+enum class ScopeKind { Block, FullExpression, Call };
/// Compilation context for expressions.
template <class Emitter>
@@ -256,7 +258,7 @@ protected:
protected:
/// Emits scope cleanup instructions.
- void emitCleanup();
+ bool emitCleanup();
/// Returns a record type from a record or pointer type.
const RecordType *getRecordTy(QualType Ty);
@@ -328,13 +330,11 @@ protected:
/// Creates a local primitive value.
unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsConst,
bool IsVolatile = false,
- const ValueDecl *ExtendingDecl = nullptr,
ScopeKind SC = ScopeKind::Block,
bool IsConstexprUnknown = false);
/// Allocates a space storing a local given its type.
UnsignedOrNone allocateLocal(DeclTy &&Decl, QualType Ty = QualType(),
- const ValueDecl *ExtendingDecl = nullptr,
ScopeKind = ScopeKind::Block,
bool IsConstexprUnknown = false);
UnsignedOrNone allocateTemporary(const Expr *E);
@@ -391,6 +391,8 @@ private:
}
bool emitPrimCast(PrimType FromT, PrimType ToT, QualType ToQT, const Expr *E);
+ bool emitIntegralCast(PrimType FromT, PrimType ToT, QualType ToQT,
+ const Expr *E);
PrimType classifyComplexElementType(QualType T) const {
assert(T->isAnyComplexType());
@@ -472,39 +474,18 @@ extern template class Compiler<EvalEmitter>;
/// Scope chain managing the variable lifetimes.
template <class Emitter> class VariableScope {
public:
- VariableScope(Compiler<Emitter> *Ctx, const ValueDecl *VD,
- ScopeKind Kind = ScopeKind::Block)
- : Ctx(Ctx), Parent(Ctx->VarScope), ValDecl(VD), Kind(Kind) {
+ VariableScope(Compiler<Emitter> *Ctx, ScopeKind Kind = ScopeKind::Block)
+ : Ctx(Ctx), Parent(Ctx->VarScope), Kind(Kind) {
+ if (Parent)
+ this->LocalsAlwaysEnabled = Parent->LocalsAlwaysEnabled;
Ctx->VarScope = this;
}
virtual ~VariableScope() { Ctx->VarScope = this->Parent; }
- virtual void addLocal(const Scope::Local &Local) {
+ virtual void addLocal(Scope::Local Local) {
llvm_unreachable("Shouldn't be called");
}
-
- void addExtended(const Scope::Local &Local, const ValueDecl *ExtendingDecl) {
- // Walk up the chain of scopes until we find the one for ExtendingDecl.
- // If there is no such scope, attach it to the parent one.
- VariableScope *P = this;
- while (P) {
- if (P->ValDecl == ExtendingDecl) {
- P->addLocal(Local);
- return;
- }
- P = P->Parent;
- if (!P)
- break;
- }
-
- // Use the parent scope.
- if (this->Parent)
- this->Parent->addLocal(Local);
- else
- this->addLocal(Local);
- }
-
/// Like addExtended, but adds to the nearest scope of the given kind.
void addForScopeKind(const Scope::Local &Local, ScopeKind Kind) {
VariableScope *P = this;
@@ -522,18 +503,22 @@ public:
this->addLocal(Local);
}
- virtual void emitDestruction() {}
virtual bool emitDestructors(const Expr *E = nullptr) { return true; }
virtual bool destroyLocals(const Expr *E = nullptr) { return true; }
+ virtual void forceInit() {}
VariableScope *getParent() const { return Parent; }
ScopeKind getKind() const { return Kind; }
+ /// Whether locals added to this scope are enabled by default.
+ /// This is almost always true, except for the two branches
+ /// of a conditional operator.
+ bool LocalsAlwaysEnabled = true;
+
protected:
/// Compiler instance.
Compiler<Emitter> *Ctx;
/// Link to the parent scope.
VariableScope *Parent;
- const ValueDecl *ValDecl = nullptr;
ScopeKind Kind;
};
@@ -541,9 +526,7 @@ protected:
template <class Emitter> class LocalScope : public VariableScope<Emitter> {
public:
LocalScope(Compiler<Emitter> *Ctx, ScopeKind Kind = ScopeKind::Block)
- : VariableScope<Emitter>(Ctx, nullptr, Kind) {}
- LocalScope(Compiler<Emitter> *Ctx, const ValueDecl *VD)
- : VariableScope<Emitter>(Ctx, VD) {}
+ : VariableScope<Emitter>(Ctx, Kind) {}
/// Emit a Destroy op for this scope.
~LocalScope() override {
@@ -552,16 +535,6 @@ public:
this->Ctx->emitDestroy(*Idx, SourceInfo{});
removeStoredOpaqueValues();
}
-
- /// Overriden to support explicit destruction.
- void emitDestruction() override {
- if (!Idx)
- return;
-
- this->emitDestructors();
- this->Ctx->emitDestroy(*Idx, SourceInfo{});
- }
-
/// Explicit destruction of local variables.
bool destroyLocals(const Expr *E = nullptr) override {
if (!Idx)
@@ -574,29 +547,60 @@ public:
return Success;
}
- void addLocal(const Scope::Local &Local) override {
+ void addLocal(Scope::Local Local) override {
if (!Idx) {
Idx = static_cast<unsigned>(this->Ctx->Descriptors.size());
this->Ctx->Descriptors.emplace_back();
this->Ctx->emitInitScope(*Idx, {});
}
+ Local.EnabledByDefault = this->LocalsAlwaysEnabled;
this->Ctx->Descriptors[*Idx].emplace_back(Local);
}
+ /// Force-initialize this scope. Usually, scopes are lazily initialized when
+ /// the first local variable is created, but in scenarios with conditonal
+ /// operators, we need to ensure scope is initialized just in case one of the
+ /// arms will create a local and the other won't. In such a case, the
+ /// InitScope() op would be part of the arm that created the local.
+ void forceInit() override {
+ if (!Idx) {
+ Idx = static_cast<unsigned>(this->Ctx->Descriptors.size());
+ this->Ctx->Descriptors.emplace_back();
+ this->Ctx->emitInitScope(*Idx, {});
+ }
+ }
+
bool emitDestructors(const Expr *E = nullptr) override {
if (!Idx)
return true;
+
// Emit destructor calls for local variables of record
// type with a destructor.
for (Scope::Local &Local : llvm::reverse(this->Ctx->Descriptors[*Idx])) {
if (Local.Desc->hasTrivialDtor())
continue;
- if (!this->Ctx->emitGetPtrLocal(Local.Offset, E))
- return false;
- if (!this->Ctx->emitDestructionPop(Local.Desc, Local.Desc->getLoc()))
- return false;
+ if (!Local.EnabledByDefault) {
+ typename Emitter::LabelTy EndLabel = this->Ctx->getLabel();
+ if (!this->Ctx->emitGetLocalEnabled(Local.Offset, E))
+ return false;
+ if (!this->Ctx->jumpFalse(EndLabel))
+ return false;
+
+ if (!this->Ctx->emitGetPtrLocal(Local.Offset, E))
+ return false;
+
+ if (!this->Ctx->emitDestructionPop(Local.Desc, Local.Desc->getLoc()))
+ return false;
+
+ this->Ctx->emitLabel(EndLabel);
+ } else {
+ if (!this->Ctx->emitGetPtrLocal(Local.Offset, E))
+ return false;
+ if (!this->Ctx->emitDestructionPop(Local.Desc, Local.Desc->getLoc()))
+ return false;
+ }
removeIfStoredOpaqueValue(Local);
}
@@ -668,22 +672,29 @@ public:
~InitLinkScope() { this->Ctx->InitStack.pop_back(); }
-private:
+public:
Compiler<Emitter> *Ctx;
};
template <class Emitter> class InitStackScope final {
public:
InitStackScope(Compiler<Emitter> *Ctx, bool Active)
- : Ctx(Ctx), OldValue(Ctx->InitStackActive) {
+ : Ctx(Ctx), OldValue(Ctx->InitStackActive), Active(Active) {
Ctx->InitStackActive = Active;
+ if (Active)
+ Ctx->InitStack.push_back(InitLink::DIE());
}
- ~InitStackScope() { this->Ctx->InitStackActive = OldValue; }
+ ~InitStackScope() {
+ this->Ctx->InitStackActive = OldValue;
+ if (Active)
+ Ctx->InitStack.pop_back();
+ }
private:
Compiler<Emitter> *Ctx;
bool OldValue;
+ bool Active;
};
} // namespace interp
diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp
index 683e916..879d51e 100644
--- a/clang/lib/AST/ByteCode/Context.cpp
+++ b/clang/lib/AST/ByteCode/Context.cpp
@@ -7,18 +7,20 @@
//===----------------------------------------------------------------------===//
#include "Context.h"
+#include "Boolean.h"
#include "ByteCodeEmitter.h"
#include "Compiler.h"
#include "EvalEmitter.h"
-#include "Interp.h"
+#include "Integral.h"
#include "InterpFrame.h"
+#include "InterpHelpers.h"
#include "InterpStack.h"
+#include "Pointer.h"
#include "PrimType.h"
#include "Program.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/Support/SystemZ/zOSSupport.h"
using namespace clang;
using namespace clang::interp;
@@ -243,6 +245,9 @@ bool Context::evaluateString(State &Parent, const Expr *E,
Compiler<EvalEmitter> C(*this, *P, Parent, Stk);
auto PtrRes = C.interpretAsPointer(E, [&](const Pointer &Ptr) {
+ if (!Ptr.isBlockPointer())
+ return false;
+
const Descriptor *FieldDesc = Ptr.getFieldDesc();
if (!FieldDesc->isPrimitiveArray())
return false;
@@ -283,22 +288,28 @@ bool Context::evaluateString(State &Parent, const Expr *E,
return true;
}
-bool Context::evaluateStrlen(State &Parent, const Expr *E, uint64_t &Result) {
+std::optional<uint64_t> Context::evaluateStrlen(State &Parent, const Expr *E) {
assert(Stk.empty());
Compiler<EvalEmitter> C(*this, *P, Parent, Stk);
+ std::optional<uint64_t> Result;
auto PtrRes = C.interpretAsPointer(E, [&](const Pointer &Ptr) {
+ if (!Ptr.isBlockPointer())
+ return false;
+
const Descriptor *FieldDesc = Ptr.getFieldDesc();
if (!FieldDesc->isPrimitiveArray())
return false;
- if (Ptr.isDummy() || Ptr.isUnknownSizeArray())
+ if (Ptr.isDummy() || Ptr.isUnknownSizeArray() || Ptr.isPastEnd())
return false;
unsigned N = Ptr.getNumElems();
if (Ptr.elemSize() == 1) {
- Result = strnlen(reinterpret_cast<const char *>(Ptr.getRawAddress()), N);
- return Result != N;
+ unsigned Size = N - Ptr.getIndex();
+ Result =
+ strnlen(reinterpret_cast<const char *>(Ptr.getRawAddress()), Size);
+ return Result != Size;
}
PrimType ElemT = FieldDesc->getPrimType();
@@ -308,7 +319,7 @@ bool Context::evaluateStrlen(State &Parent, const Expr *E, uint64_t &Result) {
auto Elem = Ptr.elem<T>(I);
if (Elem.isZero())
return true;
- ++Result;
+ ++(*Result);
});
}
// We didn't find a 0 byte.
@@ -318,9 +329,42 @@ bool Context::evaluateStrlen(State &Parent, const Expr *E, uint64_t &Result) {
if (PtrRes.isInvalid()) {
C.cleanup();
Stk.clear();
+ return std::nullopt;
+ }
+ return Result;
+}
+
+std::optional<uint64_t>
+Context::tryEvaluateObjectSize(State &Parent, const Expr *E, unsigned Kind) {
+ assert(Stk.empty());
+ Compiler<EvalEmitter> C(*this, *P, Parent, Stk);
+
+ std::optional<uint64_t> Result;
+
+ auto PtrRes = C.interpretAsPointer(E, [&](const Pointer &Ptr) {
+ const Descriptor *DeclDesc = Ptr.getDeclDesc();
+ if (!DeclDesc)
+ return false;
+
+ QualType T = DeclDesc->getType().getNonReferenceType();
+ if (T->isIncompleteType() || T->isFunctionType() ||
+ !T->isConstantSizeType())
+ return false;
+
+ Pointer P = Ptr;
+ if (auto ObjectSize = evaluateBuiltinObjectSize(getASTContext(), Kind, P)) {
+ Result = *ObjectSize;
+ return true;
+ }
return false;
+ });
+
+ if (PtrRes.isInvalid()) {
+ C.cleanup();
+ Stk.clear();
+ return std::nullopt;
}
- return true;
+ return Result;
}
const LangOptions &Context::getLangOpts() const { return Ctx.getLangOpts(); }
@@ -516,9 +560,7 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) {
}
// Set up argument indices.
unsigned ParamOffset = 0;
- SmallVector<PrimType, 8> ParamTypes;
- SmallVector<unsigned, 8> ParamOffsets;
- llvm::DenseMap<unsigned, Function::ParamDescriptor> ParamDescriptors;
+ llvm::SmallVector<Function::ParamDescriptor> ParamDescriptors;
// If the return is not a primitive, a pointer to the storage where the
// value is initialized in is passed as the first argument. See 'RVO'
@@ -527,8 +569,7 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) {
bool HasRVO = false;
if (!Ty->isVoidType() && !canClassify(Ty)) {
HasRVO = true;
- ParamTypes.push_back(PT_Ptr);
- ParamOffsets.push_back(ParamOffset);
+ ParamDescriptors.emplace_back(nullptr, ParamOffset, PT_Ptr);
ParamOffset += align(primSize(PT_Ptr));
}
@@ -540,8 +581,7 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) {
if (!IsLambdaStaticInvoker) {
HasThisPointer = MD->isInstance();
if (MD->isImplicitObjectMemberFunction()) {
- ParamTypes.push_back(PT_Ptr);
- ParamOffsets.push_back(ParamOffset);
+ ParamDescriptors.emplace_back(nullptr, ParamOffset, PT_Ptr);
ParamOffset += align(primSize(PT_Ptr));
}
}
@@ -551,42 +591,44 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) {
// the lambda captures.
if (!MD->getParent()->isCompleteDefinition())
return nullptr;
- llvm::DenseMap<const ValueDecl *, FieldDecl *> LC;
- FieldDecl *LTC;
+ if (MD->isStatic()) {
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LC;
+ FieldDecl *LTC;
- MD->getParent()->getCaptureFields(LC, LTC);
-
- if (MD->isStatic() && !LC.empty()) {
+ MD->getParent()->getCaptureFields(LC, LTC);
// Static lambdas cannot have any captures. If this one does,
// it has already been diagnosed and we can only ignore it.
- return nullptr;
+ if (!LC.empty())
+ return nullptr;
}
}
}
// Assign descriptors to all parameters.
// Composite objects are lowered to pointers.
- for (const ParmVarDecl *PD : FuncDecl->parameters()) {
+ const auto *FuncProto = FuncDecl->getType()->getAs<FunctionProtoType>();
+ for (auto [ParamIndex, PD] : llvm::enumerate(FuncDecl->parameters())) {
bool IsConst = PD->getType().isConstQualified();
bool IsVolatile = PD->getType().isVolatileQualified();
+ if (!getASTContext().hasSameType(PD->getType(),
+ FuncProto->getParamType(ParamIndex)))
+ return nullptr;
+
OptPrimType T = classify(PD->getType());
PrimType PT = T.value_or(PT_Ptr);
Descriptor *Desc = P->createDescriptor(PD, PT, nullptr, std::nullopt,
IsConst, /*IsTemporary=*/false,
/*IsMutable=*/false, IsVolatile);
-
- ParamDescriptors.insert({ParamOffset, {PT, Desc}});
- ParamOffsets.push_back(ParamOffset);
+ ParamDescriptors.emplace_back(Desc, ParamOffset, PT);
ParamOffset += align(primSize(PT));
- ParamTypes.push_back(PT);
}
// Create a handle over the emitted code.
assert(!P->getFunction(FuncDecl));
- const Function *Func = P->createFunction(
- FuncDecl, ParamOffset, std::move(ParamTypes), std::move(ParamDescriptors),
- std::move(ParamOffsets), HasThisPointer, HasRVO, IsLambdaStaticInvoker);
+ const Function *Func =
+ P->createFunction(FuncDecl, ParamOffset, std::move(ParamDescriptors),
+ HasThisPointer, HasRVO, IsLambdaStaticInvoker);
return Func;
}
@@ -594,9 +636,7 @@ const Function *Context::getOrCreateObjCBlock(const BlockExpr *E) {
const BlockDecl *BD = E->getBlockDecl();
// Set up argument indices.
unsigned ParamOffset = 0;
- SmallVector<PrimType, 8> ParamTypes;
- SmallVector<unsigned, 8> ParamOffsets;
- llvm::DenseMap<unsigned, Function::ParamDescriptor> ParamDescriptors;
+ llvm::SmallVector<Function::ParamDescriptor> ParamDescriptors;
// Assign descriptors to all parameters.
// Composite objects are lowered to pointers.
@@ -609,10 +649,8 @@ const Function *Context::getOrCreateObjCBlock(const BlockExpr *E) {
Descriptor *Desc = P->createDescriptor(PD, PT, nullptr, std::nullopt,
IsConst, /*IsTemporary=*/false,
/*IsMutable=*/false, IsVolatile);
- ParamDescriptors.insert({ParamOffset, {PT, Desc}});
- ParamOffsets.push_back(ParamOffset);
+ ParamDescriptors.emplace_back(Desc, ParamOffset, PT);
ParamOffset += align(primSize(PT));
- ParamTypes.push_back(PT);
}
if (BD->hasCaptures())
@@ -620,8 +658,7 @@ const Function *Context::getOrCreateObjCBlock(const BlockExpr *E) {
// Create a handle over the emitted code.
Function *Func =
- P->createFunction(E, ParamOffset, std::move(ParamTypes),
- std::move(ParamDescriptors), std::move(ParamOffsets),
+ P->createFunction(E, ParamOffset, std::move(ParamDescriptors),
/*HasThisPointer=*/false, /*HasRVO=*/false,
/*IsLambdaStaticInvoker=*/false);
@@ -629,6 +666,7 @@ const Function *Context::getOrCreateObjCBlock(const BlockExpr *E) {
Func->setDefined(true);
// We don't compile the BlockDecl code at all right now.
Func->setIsFullyCompiled(true);
+
return Func;
}
diff --git a/clang/lib/AST/ByteCode/Context.h b/clang/lib/AST/ByteCode/Context.h
index f5fa977..53afafd 100644
--- a/clang/lib/AST/ByteCode/Context.h
+++ b/clang/lib/AST/ByteCode/Context.h
@@ -73,7 +73,20 @@ public:
/// Evalute \param E and if it can be evaluated to a string literal,
/// run strlen() on it.
- bool evaluateStrlen(State &Parent, const Expr *E, uint64_t &Result);
+ std::optional<uint64_t> evaluateStrlen(State &Parent, const Expr *E);
+
+ /// If \param E evaluates to a pointer the number of accessible bytes
+ /// past the pointer is estimated in \param Result as if evaluated by
+ /// the builtin function __builtin_object_size. This is a best effort
+ /// approximation, when Kind & 2 == 0 the object size is less
+ /// than or equal to the estimated size, when Kind & 2 == 1 the
+ /// true value is greater than or equal to the estimated size.
+ /// When Kind & 1 == 1 only bytes belonging to the same subobject
+ /// as the one referred to by E are considered, when Kind & 1 == 0
+ /// bytes belonging to the same storage (stack, heap allocation,
+ /// global variable) are considered.
+ std::optional<uint64_t> tryEvaluateObjectSize(State &Parent, const Expr *E,
+ unsigned Kind);
/// Returns the AST context.
ASTContext &getASTContext() const { return Ctx; }
@@ -98,20 +111,22 @@ public:
return classify(E->getType());
}
- bool canClassify(QualType T) {
+ bool canClassify(QualType T) const {
if (const auto *BT = dyn_cast<BuiltinType>(T)) {
if (BT->isInteger() || BT->isFloatingPoint())
return true;
if (BT->getKind() == BuiltinType::Bool)
return true;
}
+ if (T->isPointerOrReferenceType())
+ return true;
if (T->isArrayType() || T->isRecordType() || T->isAnyComplexType() ||
T->isVectorType())
return false;
return classify(T) != std::nullopt;
}
- bool canClassify(const Expr *E) {
+ bool canClassify(const Expr *E) const {
if (E->isGLValue())
return true;
return canClassify(E->getType());
diff --git a/clang/lib/AST/ByteCode/Descriptor.cpp b/clang/lib/AST/ByteCode/Descriptor.cpp
index 0a81959..a3cee03 100644
--- a/clang/lib/AST/ByteCode/Descriptor.cpp
+++ b/clang/lib/AST/ByteCode/Descriptor.cpp
@@ -52,7 +52,7 @@ static void dtorTy(Block *, std::byte *Ptr, const Descriptor *) {
template <typename T>
static void ctorArrayTy(Block *, std::byte *Ptr, bool, bool, bool, bool, bool,
const Descriptor *D) {
- new (Ptr) InitMapPtr(std::nullopt);
+ new (Ptr) InitMapPtr();
if constexpr (needsCtor<T>()) {
Ptr += sizeof(InitMapPtr);
@@ -65,9 +65,7 @@ static void ctorArrayTy(Block *, std::byte *Ptr, bool, bool, bool, bool, bool,
template <typename T>
static void dtorArrayTy(Block *, std::byte *Ptr, const Descriptor *D) {
InitMapPtr &IMP = *reinterpret_cast<InitMapPtr *>(Ptr);
-
- if (IMP)
- IMP = std::nullopt;
+ IMP.deleteInitMap();
if constexpr (needsCtor<T>()) {
Ptr += sizeof(InitMapPtr);
@@ -435,6 +433,22 @@ QualType Descriptor::getDataType(const ASTContext &Ctx) const {
return getType();
}
+QualType Descriptor::getDataElemType() const {
+ if (const auto *E = asExpr()) {
+ if (isa<CXXNewExpr>(E))
+ return E->getType()->getPointeeType();
+
+ // std::allocator.allocate() call.
+ if (const auto *ME = dyn_cast<CXXMemberCallExpr>(E);
+ ME && ME->getRecordDecl()->getName() == "allocator" &&
+ ME->getMethodDecl()->getName() == "allocate")
+ return E->getType()->getPointeeType();
+ return E->getType();
+ }
+
+ return getType();
+}
+
SourceLocation Descriptor::getLocation() const {
if (auto *D = dyn_cast<const Decl *>(Source))
return D->getLocation();
@@ -467,21 +481,3 @@ bool Descriptor::hasTrivialDtor() const {
}
bool Descriptor::isUnion() const { return isRecord() && ElemRecord->isUnion(); }
-
-InitMap::InitMap(unsigned N)
- : UninitFields(N), Data(std::make_unique<T[]>(numFields(N))) {}
-
-bool InitMap::initializeElement(unsigned I) {
- unsigned Bucket = I / PER_FIELD;
- T Mask = T(1) << (I % PER_FIELD);
- if (!(data()[Bucket] & Mask)) {
- data()[Bucket] |= Mask;
- UninitFields -= 1;
- }
- return UninitFields == 0;
-}
-
-bool InitMap::isElementInitialized(unsigned I) const {
- unsigned Bucket = I / PER_FIELD;
- return data()[Bucket] & (T(1) << (I % PER_FIELD));
-}
diff --git a/clang/lib/AST/ByteCode/Descriptor.h b/clang/lib/AST/ByteCode/Descriptor.h
index 90dc2b4..b052971 100644
--- a/clang/lib/AST/ByteCode/Descriptor.h
+++ b/clang/lib/AST/ByteCode/Descriptor.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CLANG_AST_INTERP_DESCRIPTOR_H
#define LLVM_CLANG_AST_INTERP_DESCRIPTOR_H
+#include "InitMap.h"
#include "PrimType.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
@@ -22,12 +23,10 @@ namespace interp {
class Block;
class Record;
class SourceInfo;
-struct InitMap;
struct Descriptor;
enum PrimType : uint8_t;
using DeclTy = llvm::PointerUnion<const Decl *, const Expr *>;
-using InitMapPtr = std::optional<std::pair<bool, std::shared_ptr<InitMap>>>;
/// Invoked whenever a block is created. The constructor method fills in the
/// inline descriptors of all fields and array elements. It also initializes
@@ -204,6 +203,7 @@ public:
QualType getType() const;
QualType getElemQualType() const;
QualType getDataType(const ASTContext &Ctx) const;
+ QualType getDataElemType() const;
SourceLocation getLocation() const;
SourceInfo getLoc() const;
@@ -227,6 +227,10 @@ public:
return dyn_cast_if_present<RecordDecl>(asDecl());
}
+ template <typename T> const T *getAs() const {
+ return dyn_cast_if_present<T>(asDecl());
+ }
+
/// Returns the size of the object without metadata.
unsigned getSize() const {
assert(!isUnknownSizeArray() && "Array of unknown size");
@@ -276,40 +280,6 @@ public:
void dump(llvm::raw_ostream &OS) const;
void dumpFull(unsigned Offset = 0, unsigned Indent = 0) const;
};
-
-/// Bitfield tracking the initialisation status of elements of primitive arrays.
-struct InitMap final {
-private:
- /// Type packing bits.
- using T = uint64_t;
- /// Bits stored in a single field.
- static constexpr uint64_t PER_FIELD = sizeof(T) * CHAR_BIT;
-
-public:
- /// Initializes the map with no fields set.
- explicit InitMap(unsigned N);
-
-private:
- friend class Pointer;
-
- /// Returns a pointer to storage.
- T *data() { return Data.get(); }
- const T *data() const { return Data.get(); }
-
- /// Initializes an element. Returns true when object if fully initialized.
- bool initializeElement(unsigned I);
-
- /// Checks if an element was initialized.
- bool isElementInitialized(unsigned I) const;
-
- static constexpr size_t numFields(unsigned N) {
- return (N + PER_FIELD - 1) / PER_FIELD;
- }
- /// Number of fields not initialized.
- unsigned UninitFields;
- std::unique_ptr<T[]> Data;
-};
-
} // namespace interp
} // namespace clang
diff --git a/clang/lib/AST/ByteCode/Disasm.cpp b/clang/lib/AST/ByteCode/Disasm.cpp
index fd0903f..35937e3 100644
--- a/clang/lib/AST/ByteCode/Disasm.cpp
+++ b/clang/lib/AST/ByteCode/Disasm.cpp
@@ -138,9 +138,16 @@ static size_t getNumDisplayWidth(size_t N) {
return L;
}
-LLVM_DUMP_METHOD void Function::dump() const { dump(llvm::errs()); }
+LLVM_DUMP_METHOD void Function::dump(CodePtr PC) const {
+ dump(llvm::errs(), PC);
+}
-LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
+LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS,
+ CodePtr OpPC) const {
+ if (OpPC) {
+ assert(OpPC >= getCodeBegin());
+ assert(OpPC <= getCodeEnd());
+ }
{
ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_GREEN, true});
OS << getName() << " " << (const void *)this << "\n";
@@ -154,6 +161,7 @@ LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
size_t Addr;
std::string Op;
bool IsJump;
+ bool CurrentOp = false;
llvm::SmallVector<std::string> Args;
};
@@ -171,6 +179,7 @@ LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
auto Op = PC.read<Opcode>();
Text.Addr = Addr;
Text.IsJump = isJumpOpcode(Op);
+ Text.CurrentOp = (PC == OpPC);
switch (Op) {
#define GET_DISASM
#include "Opcodes.inc"
@@ -198,9 +207,15 @@ LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
Text.reserve(Code.size());
size_t LongestLine = 0;
// Print code to a string, one at a time.
- for (auto C : Code) {
+ for (const auto &C : Code) {
std::string Line;
llvm::raw_string_ostream LS(Line);
+ if (OpPC) {
+ if (C.CurrentOp)
+ LS << " * ";
+ else
+ LS << " ";
+ }
LS << C.Addr;
LS.indent(LongestAddr - getNumDisplayWidth(C.Addr) + 4);
LS << C.Op;
@@ -436,8 +451,28 @@ LLVM_DUMP_METHOD void Descriptor::dumpFull(unsigned Offset,
FO += ElemDesc->getAllocSize();
}
+ } else if (isPrimitiveArray()) {
+ OS.indent(Spaces) << "Elements: " << getNumElems() << '\n';
+ OS.indent(Spaces) << "Element type: " << primTypeToString(getPrimType())
+ << '\n';
+ unsigned FO = Offset + sizeof(InitMapPtr);
+ for (unsigned I = 0; I != getNumElems(); ++I) {
+ OS.indent(Spaces) << "Element " << I << " offset: " << FO << '\n';
+ FO += getElemSize();
+ }
} else if (isRecord()) {
ElemRecord->dump(OS, Indent + 1, Offset);
+ unsigned I = 0;
+ for (const Record::Field &F : ElemRecord->fields()) {
+ OS.indent(Spaces) << "- Field " << I << ": ";
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_RED, true});
+ OS << F.Decl->getName();
+ }
+ OS << ". Offset " << (Offset + F.Offset) << "\n";
+ F.Desc->dumpFull(Offset + F.Offset, Indent + 1);
+ ++I;
+ }
} else if (isPrimitive()) {
} else {
}
@@ -484,8 +519,14 @@ LLVM_DUMP_METHOD void InterpFrame::dump(llvm::raw_ostream &OS,
OS << " (" << F->getName() << ")";
}
OS << "\n";
- OS.indent(Spaces) << "This: " << getThis() << "\n";
- OS.indent(Spaces) << "RVO: " << getRVOPtr() << "\n";
+ if (hasThisPointer())
+ OS.indent(Spaces) << "This: " << getThis() << "\n";
+ else
+ OS.indent(Spaces) << "This: -\n";
+ if (Func && Func->hasRVO())
+ OS.indent(Spaces) << "RVO: " << getRVOPtr() << "\n";
+ else
+ OS.indent(Spaces) << "RVO: -\n";
OS.indent(Spaces) << "Depth: " << Depth << "\n";
OS.indent(Spaces) << "ArgSize: " << ArgSize << "\n";
OS.indent(Spaces) << "Args: " << (void *)Args << "\n";
diff --git a/clang/lib/AST/ByteCode/EvalEmitter.cpp b/clang/lib/AST/ByteCode/EvalEmitter.cpp
index 0073217..7c120b9 100644
--- a/clang/lib/AST/ByteCode/EvalEmitter.cpp
+++ b/clang/lib/AST/ByteCode/EvalEmitter.cpp
@@ -110,10 +110,10 @@ Scope::Local EvalEmitter::createLocal(Descriptor *D) {
B->invokeCtor();
// Initialize local variable inline descriptor.
- InlineDescriptor &Desc = *reinterpret_cast<InlineDescriptor *>(B->rawData());
+ auto &Desc = B->getBlockDesc<InlineDescriptor>();
Desc.Desc = D;
Desc.Offset = sizeof(InlineDescriptor);
- Desc.IsActive = true;
+ Desc.IsActive = false;
Desc.IsBase = false;
Desc.IsFieldMutable = false;
Desc.IsConst = false;
@@ -155,6 +155,8 @@ bool EvalEmitter::fallthrough(const LabelTy &Label) {
}
bool EvalEmitter::speculate(const CallExpr *E, const LabelTy &EndLabel) {
+ if (!isActive())
+ return true;
size_t StackSizeBefore = S.Stk.size();
const Expr *Arg = E->getArg(0);
if (!this->visit(Arg)) {
@@ -191,12 +193,6 @@ template <> bool EvalEmitter::emitRet<PT_Ptr>(SourceInfo Info) {
return true;
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (Ptr.isFunctionPointer()) {
- EvalResult.takeValue(Ptr.toAPValue(Ctx.getASTContext()));
- return true;
- }
-
// If we're returning a raw pointer, call our callback.
if (this->PtrCB)
return (*this->PtrCB)(Ptr);
@@ -206,6 +202,12 @@ template <> bool EvalEmitter::emitRet<PT_Ptr>(SourceInfo Info) {
if (CheckFullyInitialized && !EvalResult.checkFullyInitialized(S, Ptr))
return false;
+ // Function pointers are alway returned as lvalues.
+ if (Ptr.isFunctionPointer()) {
+ EvalResult.takeValue(Ptr.toAPValue(Ctx.getASTContext()));
+ return true;
+ }
+
// Implicitly convert lvalue to rvalue, if requested.
if (ConvertResultToRValue) {
if (!Ptr.isZero() && !Ptr.isDereferencable())
@@ -291,7 +293,7 @@ bool EvalEmitter::emitGetLocal(uint32_t I, SourceInfo Info) {
if (!CheckLocalLoad(S, OpPC, B))
return false;
- S.Stk.push<T>(*reinterpret_cast<T *>(B->data()));
+ S.Stk.push<T>(B->deref<T>());
return true;
}
@@ -303,8 +305,8 @@ bool EvalEmitter::emitSetLocal(uint32_t I, SourceInfo Info) {
using T = typename PrimConv<OpType>::T;
Block *B = getLocal(I);
- *reinterpret_cast<T *>(B->data()) = S.Stk.pop<T>();
- InlineDescriptor &Desc = *reinterpret_cast<InlineDescriptor *>(B->rawData());
+ B->deref<T>() = S.Stk.pop<T>();
+ auto &Desc = B->getBlockDesc<InlineDescriptor>();
Desc.IsInitialized = true;
return true;
@@ -322,6 +324,32 @@ bool EvalEmitter::emitDestroy(uint32_t I, SourceInfo Info) {
return true;
}
+bool EvalEmitter::emitGetLocalEnabled(uint32_t I, SourceInfo Info) {
+ if (!isActive())
+ return true;
+
+ Block *B = getLocal(I);
+ const auto &Desc = B->getBlockDesc<InlineDescriptor>();
+
+ S.Stk.push<bool>(Desc.IsActive);
+ return true;
+}
+
+bool EvalEmitter::emitEnableLocal(uint32_t I, SourceInfo Info) {
+ if (!isActive())
+ return true;
+
+ // FIXME: This is a little dirty, but to avoid adding a flag to
+ // InlineDescriptor that's only ever useful on the toplevel of local
+ // variables, we reuse the IsActive flag for the enabled state. We should
+ // probably use a different struct than InlineDescriptor for the block-level
+ // inline descriptor of local varaibles.
+ Block *B = getLocal(I);
+ auto &Desc = B->getBlockDesc<InlineDescriptor>();
+ Desc.IsActive = true;
+ return true;
+}
+
/// Global temporaries (LifetimeExtendedTemporary) carry their value
/// around as an APValue, which codegen accesses.
/// We set their value once when creating them, but we don't update it
diff --git a/clang/lib/AST/ByteCode/Floating.h b/clang/lib/AST/ByteCode/Floating.h
index 659892e..cc918dc 100644
--- a/clang/lib/AST/ByteCode/Floating.h
+++ b/clang/lib/AST/ByteCode/Floating.h
@@ -45,7 +45,8 @@ private:
if (singleWord())
return APFloat(getSemantics(), APInt(BitWidth, Val));
unsigned NumWords = numWords();
- return APFloat(getSemantics(), APInt(BitWidth, NumWords, Memory));
+ return APFloat(getSemantics(),
+ APInt(BitWidth, llvm::ArrayRef(Memory, NumWords)));
}
public:
diff --git a/clang/lib/AST/ByteCode/Function.cpp b/clang/lib/AST/ByteCode/Function.cpp
index a513be5..56d08a6 100644
--- a/clang/lib/AST/ByteCode/Function.cpp
+++ b/clang/lib/AST/ByteCode/Function.cpp
@@ -16,19 +16,21 @@ using namespace clang;
using namespace clang::interp;
Function::Function(Program &P, FunctionDeclTy Source, unsigned ArgSize,
- llvm::SmallVectorImpl<PrimType> &&ParamTypes,
- llvm::DenseMap<unsigned, ParamDescriptor> &&Params,
- llvm::SmallVectorImpl<unsigned> &&ParamOffsets,
+ llvm::SmallVectorImpl<ParamDescriptor> &&ParamDescriptors,
bool HasThisPointer, bool HasRVO, bool IsLambdaStaticInvoker)
: P(P), Kind(FunctionKind::Normal), Source(Source), ArgSize(ArgSize),
- ParamTypes(std::move(ParamTypes)), Params(std::move(Params)),
- ParamOffsets(std::move(ParamOffsets)), IsValid(false),
+ ParamDescriptors(std::move(ParamDescriptors)), IsValid(false),
IsFullyCompiled(false), HasThisPointer(HasThisPointer), HasRVO(HasRVO),
HasBody(false), Defined(false) {
+ for (ParamDescriptor PD : this->ParamDescriptors) {
+ Params.insert({PD.Offset, PD});
+ }
+ assert(Params.size() == this->ParamDescriptors.size());
+
if (const auto *F = dyn_cast<const FunctionDecl *>(Source)) {
Variadic = F->isVariadic();
Immediate = F->isImmediateFunction();
- Constexpr = F->isConstexpr() || F->hasAttr<MSConstexprAttr>();
+ Constexpr = F->isConstexpr();
if (const auto *CD = dyn_cast<CXXConstructorDecl>(F)) {
Virtual = CD->isVirtual();
Kind = FunctionKind::Ctor;
diff --git a/clang/lib/AST/ByteCode/Function.h b/clang/lib/AST/ByteCode/Function.h
index 95add58..544172b 100644
--- a/clang/lib/AST/ByteCode/Function.h
+++ b/clang/lib/AST/ByteCode/Function.h
@@ -41,6 +41,8 @@ public:
unsigned Offset;
/// Descriptor of the local.
Descriptor *Desc;
+ /// If the cleanup for this local should be emitted.
+ bool EnabledByDefault = true;
};
using LocalVectorTy = llvm::SmallVector<Local, 8>;
@@ -83,6 +85,17 @@ using FunctionDeclTy =
/// After the function has been called, it will remove all arguments,
/// including RVO and This pointer, from the stack.
///
+/// The parameters saved in a clang::intepr::Function include both the
+/// instance pointer as well as the RVO pointer.
+///
+/// \verbatim
+/// Stack position when calling ─────┐
+/// this Function │
+/// ▼
+/// ┌─────┬──────┬────────┬────────┬─────┬────────────────────┐
+/// │ RVO │ This │ Param1 │ Param2 │ ... │ │
+/// └─────┴──────┴────────┴────────┴─────┴────────────────────┘
+/// \endverbatim
class Function final {
public:
enum class FunctionKind {
@@ -93,7 +106,14 @@ public:
LambdaCallOperator,
CopyOrMoveOperator,
};
- using ParamDescriptor = std::pair<PrimType, Descriptor *>;
+
+ struct ParamDescriptor {
+ const Descriptor *Desc;
+ unsigned Offset;
+ PrimType T;
+ ParamDescriptor(const Descriptor *Desc, unsigned Offset, PrimType T)
+ : Desc(Desc), Offset(Offset), T(T) {}
+ };
/// Returns the size of the function's local stack.
unsigned getFrameSize() const { return FrameSize; }
@@ -138,9 +158,9 @@ public:
/// Range over argument types.
using arg_reverse_iterator =
- SmallVectorImpl<PrimType>::const_reverse_iterator;
+ SmallVectorImpl<ParamDescriptor>::const_reverse_iterator;
llvm::iterator_range<arg_reverse_iterator> args_reverse() const {
- return llvm::reverse(ParamTypes);
+ return llvm::reverse(ParamDescriptors);
}
/// Returns a specific scope.
@@ -200,7 +220,7 @@ public:
bool isVariadic() const { return Variadic; }
- unsigned getNumParams() const { return ParamTypes.size(); }
+ unsigned getNumParams() const { return ParamDescriptors.size(); }
/// Returns the number of parameter this function takes when it's called,
/// i.e excluding the instance pointer and the RVO pointer.
@@ -220,31 +240,30 @@ public:
}
unsigned getParamOffset(unsigned ParamIndex) const {
- return ParamOffsets[ParamIndex];
+ return ParamDescriptors[ParamIndex].Offset;
}
PrimType getParamType(unsigned ParamIndex) const {
- return ParamTypes[ParamIndex];
+ return ParamDescriptors[ParamIndex].T;
}
private:
/// Construct a function representing an actual function.
Function(Program &P, FunctionDeclTy Source, unsigned ArgSize,
- llvm::SmallVectorImpl<PrimType> &&ParamTypes,
- llvm::DenseMap<unsigned, ParamDescriptor> &&Params,
- llvm::SmallVectorImpl<unsigned> &&ParamOffsets, bool HasThisPointer,
- bool HasRVO, bool IsLambdaStaticInvoker);
+ llvm::SmallVectorImpl<ParamDescriptor> &&ParamDescriptors,
+ bool HasThisPointer, bool HasRVO, bool IsLambdaStaticInvoker);
/// Sets the code of a function.
void setCode(FunctionDeclTy Source, unsigned NewFrameSize,
llvm::SmallVector<std::byte> &&NewCode, SourceMap &&NewSrcMap,
- llvm::SmallVector<Scope, 2> &&NewScopes, bool NewHasBody) {
+ llvm::SmallVector<Scope, 2> &&NewScopes, bool NewHasBody,
+ bool NewIsValid) {
this->Source = Source;
FrameSize = NewFrameSize;
Code = std::move(NewCode);
SrcMap = std::move(NewSrcMap);
Scopes = std::move(NewScopes);
- IsValid = true;
+ IsValid = NewIsValid;
HasBody = NewHasBody;
}
@@ -272,12 +291,10 @@ private:
SourceMap SrcMap;
/// List of block descriptors.
llvm::SmallVector<Scope, 2> Scopes;
- /// List of argument types.
- llvm::SmallVector<PrimType, 8> ParamTypes;
- /// Map from byte offset to parameter descriptor.
+ /// List of all parameters, including RVO and instance pointer.
+ llvm::SmallVector<ParamDescriptor> ParamDescriptors;
+ /// Map from Parameter offset to parameter descriptor.
llvm::DenseMap<unsigned, ParamDescriptor> Params;
- /// List of parameter offsets.
- llvm::SmallVector<unsigned, 8> ParamOffsets;
/// Flag to indicate if the function is valid.
LLVM_PREFERRED_TYPE(bool)
unsigned IsValid : 1;
@@ -310,8 +327,8 @@ private:
public:
/// Dumps the disassembled bytecode to \c llvm::errs().
- void dump() const;
- void dump(llvm::raw_ostream &OS) const;
+ void dump(CodePtr PC = {}) const;
+ void dump(llvm::raw_ostream &OS, CodePtr PC = {}) const;
};
} // namespace interp
diff --git a/clang/lib/AST/ByteCode/InitMap.cpp b/clang/lib/AST/ByteCode/InitMap.cpp
new file mode 100644
index 0000000..2fcb61d
--- /dev/null
+++ b/clang/lib/AST/ByteCode/InitMap.cpp
@@ -0,0 +1,54 @@
+//===----------------------- InitMap.cpp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "InitMap.h"
+
+using namespace clang;
+using namespace clang::interp;
+
+bool InitMap::initializeElement(unsigned I) {
+ unsigned Bucket = I / PER_FIELD;
+ T Mask = T(1) << (I % PER_FIELD);
+ if (!(data()[Bucket] & Mask)) {
+ data()[Bucket] |= Mask;
+ UninitFields -= 1;
+ }
+ return UninitFields == 0;
+}
+
+bool InitMap::isElementInitialized(unsigned I) const {
+ if (UninitFields == 0)
+ return true;
+ unsigned Bucket = I / PER_FIELD;
+ return data()[Bucket] & (T(1) << (I % PER_FIELD));
+}
+
+// Values in the second half of data() are inverted,
+// i.e. 0 means "lifetime started".
+void InitMap::startElementLifetime(unsigned I) {
+ unsigned LifetimeIndex = NumElems + I;
+
+ unsigned Bucket = numFields(NumElems) / 2 + (I / PER_FIELD);
+ T Mask = T(1) << (LifetimeIndex % PER_FIELD);
+ if ((data()[Bucket] & Mask)) {
+ data()[Bucket] &= ~Mask;
+ --DeadFields;
+ }
+}
+
+// Values in the second half of data() are inverted,
+// i.e. 0 means "lifetime started".
+void InitMap::endElementLifetime(unsigned I) {
+ unsigned LifetimeIndex = NumElems + I;
+
+ unsigned Bucket = numFields(NumElems) / 2 + (I / PER_FIELD);
+ T Mask = T(1) << (LifetimeIndex % PER_FIELD);
+ if (!(data()[Bucket] & Mask)) {
+ data()[Bucket] |= Mask;
+ ++DeadFields;
+ }
+}
diff --git a/clang/lib/AST/ByteCode/InitMap.h b/clang/lib/AST/ByteCode/InitMap.h
new file mode 100644
index 0000000..b11c305
--- /dev/null
+++ b/clang/lib/AST/ByteCode/InitMap.h
@@ -0,0 +1,123 @@
+//===----------------------- InitMap.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_INIT_MAP_H
+#define LLVM_CLANG_AST_INTERP_INIT_MAP_H
+
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <limits>
+#include <memory>
+
+namespace clang {
+namespace interp {
+
+/// Bitfield tracking the initialisation status of elements of primitive arrays.
+struct InitMap final {
+private:
+ /// Type packing bits.
+ using T = uint64_t;
+ /// Bits stored in a single field.
+ static constexpr uint64_t PER_FIELD = sizeof(T) * CHAR_BIT;
+ /// Number of fields in the init map.
+ unsigned NumElems;
+ /// Number of fields not initialized.
+ unsigned UninitFields;
+ unsigned DeadFields = 0;
+ std::unique_ptr<T[]> Data;
+
+public:
+ /// Initializes the map with no fields set.
+ explicit InitMap(unsigned N)
+ : NumElems(N), UninitFields(N),
+ Data(std::make_unique<T[]>(numFields(N))) {}
+ explicit InitMap(unsigned N, bool AllInitialized)
+ : NumElems(N), UninitFields(AllInitialized ? 0 : N),
+ Data(std::make_unique<T[]>(numFields(N))) {
+ if (AllInitialized) {
+ for (unsigned I = 0; I != (numFields(N) / 2); ++I)
+ Data[I] = std::numeric_limits<T>::max();
+ }
+ }
+
+ void startElementLifetime(unsigned I);
+ void endElementLifetime(unsigned I);
+
+ bool isElementAlive(unsigned I) const {
+ unsigned LifetimeIndex = (NumElems + I);
+ unsigned Bucket = numFields(NumElems) / 2 + (I / PER_FIELD);
+ return !(data()[Bucket] & (T(1) << (LifetimeIndex % PER_FIELD)));
+ }
+
+ bool allElementsAlive() const { return DeadFields == 0; }
+
+ /// Initializes an element. Returns true when object if fully initialized.
+ bool initializeElement(unsigned I);
+
+ /// Checks if an element was initialized.
+ bool isElementInitialized(unsigned I) const;
+
+private:
+ /// Returns a pointer to storage.
+ T *data() { return Data.get(); }
+ const T *data() const { return Data.get(); }
+
+ static constexpr size_t numFields(unsigned N) {
+ return ((N + PER_FIELD - 1) / PER_FIELD) * 2;
+ }
+};
+
+/// A pointer-sized struct we use to allocate into data storage.
+/// An InitMapPtr is either backed by an actual InitMap, or it
+/// hold information about the absence of the InitMap.
+struct InitMapPtr final {
+ /// V's value before an initmap has been created.
+ static constexpr intptr_t NoInitMapValue = 0;
+ /// V's value after the initmap has been destroyed because
+ /// all its elements have already been initialized.
+ static constexpr intptr_t AllInitializedValue = 1;
+ uintptr_t V = 0;
+
+ explicit InitMapPtr() = default;
+ bool hasInitMap() const {
+ return V != NoInitMapValue && V != AllInitializedValue;
+ }
+ /// Are all elements in the array already initialized?
+ bool allInitialized() const { return V == AllInitializedValue; }
+
+ void setInitMap(const InitMap *IM) {
+ assert(IM != nullptr);
+ V = reinterpret_cast<uintptr_t>(IM);
+ assert(hasInitMap());
+ }
+
+ void noteAllInitialized() {
+ if (hasInitMap())
+ delete (operator->)();
+ V = AllInitializedValue;
+ }
+
+ /// Access the underlying InitMap directly.
+ InitMap *operator->() {
+ assert(hasInitMap());
+ return reinterpret_cast<InitMap *>(V);
+ }
+
+ /// Delete the InitMap if one exists.
+ void deleteInitMap() {
+ if (hasInitMap())
+ delete (operator->)();
+ V = NoInitMapValue;
+ };
+};
+static_assert(sizeof(InitMapPtr) == sizeof(void *));
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/clang/lib/AST/ByteCode/Integral.h b/clang/lib/AST/ByteCode/Integral.h
index 1318024..e90f1a9 100644
--- a/clang/lib/AST/ByteCode/Integral.h
+++ b/clang/lib/AST/ByteCode/Integral.h
@@ -202,30 +202,21 @@ public:
static Integral min(unsigned NumBits) { return Integral(Min); }
static Integral max(unsigned NumBits) { return Integral(Max); }
+ static Integral zero(unsigned BitWidth = 0) { return from(0); }
- template <typename ValT> static Integral from(ValT Value) {
- if constexpr (std::is_integral<ValT>::value)
+ template <typename ValT>
+ static Integral from(ValT Value, unsigned NumBits = 0) {
+ if constexpr (std::is_integral_v<ValT>)
return Integral(Value);
else
- return Integral::from(static_cast<Integral::ReprT>(Value));
+ return Integral(static_cast<Integral::ReprT>(Value));
}
template <unsigned SrcBits, bool SrcSign>
- static std::enable_if_t<SrcBits != 0, Integral>
- from(Integral<SrcBits, SrcSign> Value) {
+ static Integral from(Integral<SrcBits, SrcSign> Value) {
return Integral(Value.V);
}
- static Integral zero(unsigned BitWidth = 0) { return from(0); }
-
- template <typename T> static Integral from(T Value, unsigned NumBits) {
- return Integral(Value);
- }
-
- static bool inRange(int64_t Value, unsigned NumBits) {
- return CheckRange<ReprT, Min, Max>(Value);
- }
-
static bool increment(Integral A, Integral *R) {
return add(A, Integral(ReprT(1)), A.bitWidth(), R);
}
@@ -328,13 +319,6 @@ private:
return false;
}
}
- template <typename T, T Min, T Max> static bool CheckRange(int64_t V) {
- if constexpr (std::is_signed_v<T>) {
- return Min <= V && V <= Max;
- } else {
- return V >= 0 && static_cast<uint64_t>(V) <= Max;
- }
- }
};
template <unsigned Bits, bool Signed>
diff --git a/clang/lib/AST/ByteCode/IntegralAP.h b/clang/lib/AST/ByteCode/IntegralAP.h
index 6683db9..b11e6ee 100644
--- a/clang/lib/AST/ByteCode/IntegralAP.h
+++ b/clang/lib/AST/ByteCode/IntegralAP.h
@@ -63,7 +63,7 @@ public:
if (singleWord())
return APInt(BitWidth, Val, Signed);
unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
- return llvm::APInt(BitWidth, NumWords, Memory);
+ return llvm::APInt(BitWidth, llvm::ArrayRef(Memory, NumWords));
}
public:
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index a72282c..d6939c0 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -38,21 +38,21 @@ static bool RetValue(InterpState &S, CodePtr &Pt) {
static bool Jmp(InterpState &S, CodePtr &PC, int32_t Offset) {
PC += Offset;
- return true;
+ return S.noteStep(PC);
}
static bool Jt(InterpState &S, CodePtr &PC, int32_t Offset) {
if (S.Stk.pop<bool>()) {
PC += Offset;
}
- return true;
+ return S.noteStep(PC);
}
static bool Jf(InterpState &S, CodePtr &PC, int32_t Offset) {
if (!S.Stk.pop<bool>()) {
PC += Offset;
}
- return true;
+ return S.noteStep(PC);
}
// https://github.com/llvm/llvm-project/issues/102513
@@ -137,21 +137,26 @@ static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC,
static bool diagnoseUnknownDecl(InterpState &S, CodePtr OpPC,
const ValueDecl *D) {
// This function tries pretty hard to produce a good diagnostic. Just skip
- // tha if nobody will see it anyway.
+ // that if nobody will see it anyway.
if (!S.diagnosing())
return false;
if (isa<ParmVarDecl>(D)) {
if (D->getType()->isReferenceType()) {
if (S.inConstantContext() && S.getLangOpts().CPlusPlus &&
- !S.getLangOpts().CPlusPlus11)
+ !S.getLangOpts().CPlusPlus11) {
diagnoseNonConstVariable(S, OpPC, D);
- return false;
+ return false;
+ }
}
const SourceInfo &Loc = S.Current->getSource(OpPC);
- if (S.getLangOpts().CPlusPlus11) {
- S.FFDiag(Loc, diag::note_constexpr_function_param_value_unknown) << D;
+ if (S.getLangOpts().CPlusPlus23 && D->getType()->isReferenceType()) {
+ S.FFDiag(Loc, diag::note_constexpr_access_unknown_variable, 1)
+ << AK_Read << D;
+ S.Note(D->getLocation(), diag::note_declared_at) << D->getSourceRange();
+ } else if (S.getLangOpts().CPlusPlus11) {
+ S.FFDiag(Loc, diag::note_constexpr_function_param_value_unknown, 1) << D;
S.Note(D->getLocation(), diag::note_declared_at) << D->getSourceRange();
} else {
S.FFDiag(Loc);
@@ -291,8 +296,8 @@ void cleanupAfterFunctionCall(InterpState &S, CodePtr OpPC,
// And in any case, remove the fixed parameters (the non-variadic ones)
// at the end.
- for (PrimType Ty : Func->args_reverse())
- TYPE_SWITCH(Ty, S.Stk.discard<T>());
+ for (const Function::ParamDescriptor &PDesc : Func->args_reverse())
+ TYPE_SWITCH(PDesc.T, S.Stk.discard<T>());
}
bool isConstexprUnknown(const Pointer &P) {
@@ -326,12 +331,13 @@ bool CheckBCPResult(InterpState &S, const Pointer &Ptr) {
}
bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK) {
+ AccessKinds AK, bool WillActivate) {
if (Ptr.isActive())
return true;
assert(Ptr.inUnion());
+ // Find the outermost union.
Pointer U = Ptr.getBase();
Pointer C = Ptr;
while (!U.isRoot() && !U.isActive()) {
@@ -346,6 +352,7 @@ bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
U = U.getBase();
}
assert(C.isField());
+ assert(C.getBase() == U);
// Consider:
// union U {
@@ -362,6 +369,25 @@ bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
if (!U.getFieldDesc()->isUnion())
return true;
+ // When we will activate Ptr, check that none of the unions in its path have a
+ // non-trivial default constructor.
+ if (WillActivate) {
+ bool Fails = false;
+ Pointer It = Ptr;
+ while (!It.isRoot() && !It.isActive()) {
+ if (const Record *R = It.getRecord(); R && R->isUnion()) {
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(R->getDecl());
+ CXXRD && !CXXRD->hasTrivialDefaultConstructor()) {
+ Fails = true;
+ break;
+ }
+ }
+ It = It.getBase();
+ }
+ if (!Fails)
+ return true;
+ }
+
// Get the inactive field descriptor.
assert(!C.isActive());
const FieldDecl *InactiveField = C.getField();
@@ -390,8 +416,9 @@ bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!Ptr.isExtern())
return true;
- if (Ptr.isInitialized() ||
- (Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl))
+ if (!Ptr.isPastEnd() &&
+ (Ptr.isInitialized() ||
+ (Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)))
return true;
if (S.checkingPotentialConstantExpression() && S.getLangOpts().CPlusPlus &&
@@ -431,7 +458,8 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
S.FFDiag(Src, diag::note_constexpr_access_deleted_object) << AK;
} else if (!S.checkingPotentialConstantExpression()) {
bool IsTemp = Ptr.isTemporary();
- S.FFDiag(Src, diag::note_constexpr_lifetime_ended, 1) << AK << !IsTemp;
+ S.FFDiag(Src, diag::note_constexpr_access_uninit)
+ << AK << /*uninitialized=*/false << S.Current->getRange(OpPC);
if (IsTemp)
S.Note(Ptr.getDeclLoc(), diag::note_constexpr_temporary_here);
@@ -626,6 +654,10 @@ static bool CheckVolatile(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
if (!S.getLangOpts().CPlusPlus)
return Invalid(S, OpPC);
+ // Volatile object can be written-to and read if they are being constructed.
+ if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
+ return true;
+
// The reason why Ptr is volatile might be further up the hierarchy.
// Find that pointer.
Pointer P = Ptr;
@@ -736,8 +768,7 @@ static bool CheckWeak(InterpState &S, CodePtr OpPC, const Block *B) {
// For example, since those can't be members of structs, they also can't
// be mutable.
bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Block *B) {
- const auto &Desc =
- *reinterpret_cast<const GlobalInlineDescriptor *>(B->rawData());
+ const auto &Desc = B->getBlockDesc<GlobalInlineDescriptor>();
if (!B->isAccessible()) {
if (!CheckExtern(S, OpPC, Pointer(const_cast<Block *>(B))))
return false;
@@ -832,6 +863,8 @@ bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
if (!CheckVolatile(S, OpPC, Ptr, AK))
return false;
+ if (!Ptr.isConst() && !S.inConstantContext() && isConstexprUnknown(Ptr))
+ return false;
return true;
}
@@ -868,7 +901,8 @@ bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return true;
}
-bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ bool WillBeActivated) {
if (!Ptr.isBlockPointer() || Ptr.isZero())
return false;
@@ -883,7 +917,7 @@ bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Assign))
return false;
- if (!CheckActive(S, OpPC, Ptr, AK_Assign))
+ if (!CheckActive(S, OpPC, Ptr, AK_Assign, WillBeActivated))
return false;
if (!CheckGlobal(S, OpPC, Ptr))
return false;
@@ -916,36 +950,19 @@ bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return true;
}
-static bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
-
- if (F->isVirtual() && !S.getLangOpts().CPlusPlus20) {
- const SourceLocation &Loc = S.Current->getLocation(OpPC);
- S.CCEDiag(Loc, diag::note_constexpr_virtual_call);
- return false;
- }
-
- if (S.checkingPotentialConstantExpression() && S.Current->getDepth() != 0)
- return false;
-
- if (F->isValid() && F->hasBody() && F->isConstexpr())
- return true;
-
- // Implicitly constexpr.
- if (F->isLambdaStaticInvoker())
- return true;
-
+static bool diagnoseCallableDecl(InterpState &S, CodePtr OpPC,
+ const FunctionDecl *DiagDecl) {
// Bail out if the function declaration itself is invalid. We will
// have produced a relevant diagnostic while parsing it, so just
// note the problematic sub-expression.
- if (F->getDecl()->isInvalidDecl())
+ if (DiagDecl->isInvalidDecl())
return Invalid(S, OpPC);
// Diagnose failed assertions specially.
- if (S.Current->getLocation(OpPC).isMacroID() &&
- F->getDecl()->getIdentifier()) {
+ if (S.Current->getLocation(OpPC).isMacroID() && DiagDecl->getIdentifier()) {
// FIXME: Instead of checking for an implementation-defined function,
// check and evaluate the assert() macro.
- StringRef Name = F->getDecl()->getName();
+ StringRef Name = DiagDecl->getName();
bool AssertFailed =
Name == "__assert_rtn" || Name == "__assert_fail" || Name == "_wassert";
if (AssertFailed) {
@@ -955,67 +972,95 @@ static bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
}
}
- if (S.getLangOpts().CPlusPlus11) {
- const FunctionDecl *DiagDecl = F->getDecl();
+ if (!S.getLangOpts().CPlusPlus11) {
+ S.FFDiag(S.Current->getLocation(OpPC),
+ diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
- // Invalid decls have been diagnosed before.
- if (DiagDecl->isInvalidDecl())
- return false;
+ // Invalid decls have been diagnosed before.
+ if (DiagDecl->isInvalidDecl())
+ return false;
- // If this function is not constexpr because it is an inherited
- // non-constexpr constructor, diagnose that directly.
- const auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
- if (CD && CD->isInheritingConstructor()) {
- const auto *Inherited = CD->getInheritedConstructor().getConstructor();
- if (!Inherited->isConstexpr())
- DiagDecl = CD = Inherited;
- }
+ // If this function is not constexpr because it is an inherited
+ // non-constexpr constructor, diagnose that directly.
+ const auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
+ if (CD && CD->isInheritingConstructor()) {
+ const auto *Inherited = CD->getInheritedConstructor().getConstructor();
+ if (!Inherited->isConstexpr())
+ DiagDecl = CD = Inherited;
+ }
+
+ // Silently reject constructors of invalid classes. The invalid class
+ // has been rejected elsewhere before.
+ if (CD && CD->getParent()->isInvalidDecl())
+ return false;
+
+ // FIXME: If DiagDecl is an implicitly-declared special member function
+ // or an inheriting constructor, we should be much more explicit about why
+ // it's not constexpr.
+ if (CD && CD->isInheritingConstructor()) {
+ S.FFDiag(S.Current->getLocation(OpPC), diag::note_constexpr_invalid_inhctor,
+ 1)
+ << CD->getInheritedConstructor().getConstructor()->getParent();
+ S.Note(DiagDecl->getLocation(), diag::note_declared_at);
+ } else {
+ // Don't emit anything if the function isn't defined and we're checking
+ // for a constant expression. It might be defined at the point we're
+ // actually calling it.
+ bool IsExtern = DiagDecl->getStorageClass() == SC_Extern;
+ bool IsDefined = DiagDecl->isDefined();
+ if (!IsDefined && !IsExtern && DiagDecl->isConstexpr() &&
+ S.checkingPotentialConstantExpression())
+ return false;
- // Silently reject constructors of invalid classes. The invalid class
- // has been rejected elsewhere before.
- if (CD && CD->getParent()->isInvalidDecl())
+ // If the declaration is defined, declared 'constexpr' _and_ has a body,
+ // the below diagnostic doesn't add anything useful.
+ if (DiagDecl->isDefined() && DiagDecl->isConstexpr() && DiagDecl->hasBody())
return false;
- // FIXME: If DiagDecl is an implicitly-declared special member function
- // or an inheriting constructor, we should be much more explicit about why
- // it's not constexpr.
- if (CD && CD->isInheritingConstructor()) {
- S.FFDiag(S.Current->getLocation(OpPC),
- diag::note_constexpr_invalid_inhctor, 1)
- << CD->getInheritedConstructor().getConstructor()->getParent();
+ S.FFDiag(S.Current->getLocation(OpPC),
+ diag::note_constexpr_invalid_function, 1)
+ << DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
+
+ if (DiagDecl->getDefinition())
+ S.Note(DiagDecl->getDefinition()->getLocation(), diag::note_declared_at);
+ else
S.Note(DiagDecl->getLocation(), diag::note_declared_at);
- } else {
- // Don't emit anything if the function isn't defined and we're checking
- // for a constant expression. It might be defined at the point we're
- // actually calling it.
- bool IsExtern = DiagDecl->getStorageClass() == SC_Extern;
- bool IsDefined = F->isDefined();
- if (!IsDefined && !IsExtern && DiagDecl->isConstexpr() &&
- S.checkingPotentialConstantExpression())
- return false;
+ }
- // If the declaration is defined, declared 'constexpr' _and_ has a body,
- // the below diagnostic doesn't add anything useful.
- if (DiagDecl->isDefined() && DiagDecl->isConstexpr() &&
- DiagDecl->hasBody())
- return false;
+ return false;
+}
- S.FFDiag(S.Current->getLocation(OpPC),
- diag::note_constexpr_invalid_function, 1)
- << DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
+static bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
+ if (F->isVirtual() && !S.getLangOpts().CPlusPlus20) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_virtual_call);
+ return false;
+ }
- if (DiagDecl->getDefinition())
- S.Note(DiagDecl->getDefinition()->getLocation(),
- diag::note_declared_at);
- else
- S.Note(DiagDecl->getLocation(), diag::note_declared_at);
- }
- } else {
- S.FFDiag(S.Current->getLocation(OpPC),
- diag::note_invalid_subexpr_in_const_expr);
+ if (S.checkingPotentialConstantExpression() && S.Current->getDepth() != 0)
+ return false;
+
+ if (F->isValid() && F->hasBody() &&
+ (F->isConstexpr() || (S.Current->MSVCConstexprAllowed &&
+ F->getDecl()->hasAttr<MSConstexprAttr>())))
+ return true;
+
+ const FunctionDecl *DiagDecl = F->getDecl();
+ const FunctionDecl *Definition = nullptr;
+ DiagDecl->getBody(Definition);
+
+ if (!Definition && S.checkingPotentialConstantExpression() &&
+ DiagDecl->isConstexpr()) {
+ return false;
}
- return false;
+ // Implicitly constexpr.
+ if (F->isLambdaStaticInvoker())
+ return true;
+
+ return diagnoseCallableDecl(S, OpPC, DiagDecl);
}
static bool CheckCallDepth(InterpState &S, CodePtr OpPC) {
@@ -1153,6 +1198,21 @@ bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
return diagnoseUnknownDecl(S, OpPC, D);
}
+bool InvalidDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR,
+ bool InitializerFailed) {
+ assert(DR);
+
+ if (InitializerFailed) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ const auto *VD = cast<VarDecl>(DR->getDecl());
+ S.FFDiag(Loc, diag::note_constexpr_var_init_non_constant, 1) << VD;
+ S.Note(VD->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
+ return CheckDeclRef(S, OpPC, DR);
+}
+
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK) {
if (!B->isDummy())
return true;
@@ -1200,7 +1260,8 @@ static bool runRecordDestructor(InterpState &S, CodePtr OpPC,
const Record *R = Desc->ElemRecord;
assert(R);
- if (S.Current->hasThisPointer() && S.Current->getFunction()->isDestructor() &&
+ if (!S.Current->isBottomFrame() && S.Current->hasThisPointer() &&
+ S.Current->getFunction()->isDestructor() &&
Pointer::pointToSameBlock(BasePtr, S.Current->getThis())) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_double_destroy);
@@ -1277,8 +1338,7 @@ bool Free(InterpState &S, CodePtr OpPC, bool DeleteIsArrayForm,
// Remove base casts.
QualType InitialType = Ptr.getType();
- while (Ptr.isBaseClass())
- Ptr = Ptr.getBase();
+ Ptr = Ptr.stripBaseCasts();
Source = Ptr.getDeclDesc()->asExpr();
BlockToDelete = Ptr.block();
@@ -1395,7 +1455,8 @@ bool CheckLiteralType(InterpState &S, CodePtr OpPC, const Type *T) {
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1677
// Therefore, we use the C++1y behavior.
- if (S.Current->getFunction() && S.Current->getFunction()->isConstructor() &&
+ if (!S.Current->isBottomFrame() &&
+ S.Current->getFunction()->isConstructor() &&
S.Current->getThis().getDeclDesc()->asDecl() == S.EvaluatingDecl) {
return true;
}
@@ -1422,8 +1483,12 @@ static bool getField(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
if (Ptr.isIntegralPointer()) {
- S.Stk.push<Pointer>(Ptr.asIntPointer().atOffset(S.getASTContext(), Off));
- return true;
+ if (std::optional<IntPointer> IntPtr =
+ Ptr.asIntPointer().atOffset(S.getASTContext(), Off)) {
+ S.Stk.push<Pointer>(std::move(*IntPtr));
+ return true;
+ }
+ return false;
}
if (!Ptr.isBlockPointer()) {
@@ -1436,6 +1501,10 @@ static bool getField(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
+ // We can't get the field of something that's not a record.
+ if (!Ptr.getFieldDesc()->isRecord())
+ return false;
+
if ((Ptr.getByteOffset() + Off) >= Ptr.block()->getSize())
return false;
@@ -1481,6 +1550,8 @@ bool CheckDestructor(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Destroy))
return false;
+ if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK_Destroy))
+ return false;
// Can't call a dtor on a global variable.
if (Ptr.block()->isStatic()) {
@@ -1491,6 +1562,68 @@ bool CheckDestructor(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return CheckActive(S, OpPC, Ptr, AK_Destroy);
}
+/// Opcode. Check if the function decl can be called at compile time.
+bool CheckFunctionDecl(InterpState &S, CodePtr OpPC, const FunctionDecl *FD) {
+ if (S.checkingPotentialConstantExpression() && S.Current->getDepth() != 0)
+ return false;
+
+ const FunctionDecl *Definition = nullptr;
+ const Stmt *Body = FD->getBody(Definition);
+
+ if (Definition && Body &&
+ (Definition->isConstexpr() || (S.Current->MSVCConstexprAllowed &&
+ Definition->hasAttr<MSConstexprAttr>())))
+ return true;
+
+ return diagnoseCallableDecl(S, OpPC, FD);
+}
+
+bool CheckBitCast(InterpState &S, CodePtr OpPC, const Type *TargetType,
+ bool SrcIsVoidPtr) {
+ const auto &Ptr = S.Stk.peek<Pointer>();
+ if (Ptr.isZero())
+ return true;
+ if (!Ptr.isBlockPointer())
+ return true;
+
+ if (TargetType->isIntegerType())
+ return true;
+
+ if (SrcIsVoidPtr && S.getLangOpts().CPlusPlus) {
+ bool HasValidResult = !Ptr.isZero();
+
+ if (HasValidResult) {
+ if (S.getStdAllocatorCaller("allocate"))
+ return true;
+
+ const auto *E = cast<CastExpr>(S.Current->getExpr(OpPC));
+ if (S.getLangOpts().CPlusPlus26 &&
+ S.getASTContext().hasSimilarType(Ptr.getType(),
+ QualType(TargetType, 0)))
+ return true;
+
+ S.CCEDiag(E, diag::note_constexpr_invalid_void_star_cast)
+ << E->getSubExpr()->getType() << S.getLangOpts().CPlusPlus26
+ << Ptr.getType().getCanonicalType() << E->getType()->getPointeeType();
+ } else if (!S.getLangOpts().CPlusPlus26) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << diag::ConstexprInvalidCastKind::CastFrom << "'void *'"
+ << S.Current->getRange(OpPC);
+ }
+ }
+
+ QualType PtrType = Ptr.getType();
+ if (PtrType->isRecordType() &&
+ PtrType->getAsRecordDecl() != TargetType->getAsRecordDecl()) {
+ S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_invalid_cast)
+ << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
+ << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+ return false;
+ }
+ return true;
+}
+
static void compileFunction(InterpState &S, const Function *Func) {
const FunctionDecl *Definition = Func->getDecl()->getDefinition();
if (!Definition)
@@ -1551,6 +1684,11 @@ bool CallVar(InterpState &S, CodePtr OpPC, const Function *Func,
}
bool Call(InterpState &S, CodePtr OpPC, const Function *Func,
uint32_t VarArgSize) {
+
+ // C doesn't have constexpr functions.
+ if (!S.getLangOpts().CPlusPlus)
+ return Invalid(S, OpPC);
+
assert(Func);
auto cleanup = [&]() -> bool {
cleanupAfterFunctionCall(S, OpPC, Func);
@@ -1600,12 +1738,11 @@ bool Call(InterpState &S, CodePtr OpPC, const Function *Func,
if (!CheckCallable(S, OpPC, Func))
return cleanup();
- // FIXME: The isConstructor() check here is not always right. The current
- // constant evaluator is somewhat inconsistent in when it allows a function
- // call when checking for a constant expression.
- if (Func->hasThisPointer() && S.checkingPotentialConstantExpression() &&
- !Func->isConstructor())
- return cleanup();
+ // Do not evaluate any function calls in checkingPotentialConstantExpression
+ // mode. Constructors will be aborted later when their initializers are
+ // evaluated.
+ if (S.checkingPotentialConstantExpression() && !Func->isConstructor())
+ return false;
if (!CheckCallDepth(S, OpPC))
return cleanup();
@@ -1637,13 +1774,12 @@ bool Call(InterpState &S, CodePtr OpPC, const Function *Func,
static bool GetDynamicDecl(InterpState &S, CodePtr OpPC, Pointer TypePtr,
const CXXRecordDecl *&DynamicDecl) {
- while (TypePtr.isBaseClass())
- TypePtr = TypePtr.getBase();
+ TypePtr = TypePtr.stripBaseCasts();
QualType DynamicType = TypePtr.getType();
if (TypePtr.isStatic() || TypePtr.isConst()) {
- const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl();
- if (!VD->isConstexpr()) {
+ if (const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl();
+ VD && !VD->isConstexpr()) {
const Expr *E = S.Current->getExpr(OpPC);
APValue V = TypePtr.toAPValue(S.getASTContext());
QualType TT = S.getASTContext().getLValueReferenceType(DynamicType);
@@ -1674,20 +1810,6 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
const FunctionDecl *Callee = Func->getDecl();
- if (!Func->isFullyCompiled())
- compileFunction(S, Func);
-
- // C++2a [class.abstract]p6:
- // the effect of making a virtual call to a pure virtual function [...] is
- // undefined
- if (Callee->isPureVirtual()) {
- S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call,
- 1)
- << Callee;
- S.Note(Callee->getLocation(), diag::note_declared_at);
- return false;
- }
-
const CXXRecordDecl *DynamicDecl = nullptr;
if (!GetDynamicDecl(S, OpPC, ThisPtr, DynamicDecl))
return false;
@@ -1697,7 +1819,8 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
const auto *InitialFunction = cast<CXXMethodDecl>(Callee);
const CXXMethodDecl *Overrider;
- if (StaticDecl != DynamicDecl) {
+ if (StaticDecl != DynamicDecl &&
+ !llvm::is_contained(S.InitializingBlocks, ThisPtr.block())) {
if (!DynamicDecl->isDerivedFrom(StaticDecl))
return false;
Overrider = S.getContext().getOverridingFunction(DynamicDecl, StaticDecl,
@@ -1707,6 +1830,17 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
Overrider = InitialFunction;
}
+ // C++2a [class.abstract]p6:
+ // the effect of making a virtual call to a pure virtual function [...] is
+ // undefined
+ if (Overrider->isPureVirtual()) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call,
+ 1)
+ << Callee;
+ S.Note(Callee->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
if (Overrider != InitialFunction) {
// DR1872: An instantiated virtual constexpr function can't be called in a
// constant expression (prior to C++20). We can still constant-fold such a
@@ -1724,8 +1858,7 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
// If the function we call is further DOWN the hierarchy than the
// FieldDesc of our pointer, just go up the hierarchy of this field
// the furthest we can go.
- while (ThisPtr.isBaseClass())
- ThisPtr = ThisPtr.getBase();
+ ThisPtr = ThisPtr.stripBaseCasts();
}
}
@@ -1805,6 +1938,15 @@ bool CallPtr(InterpState &S, CodePtr OpPC, uint32_t ArgSize,
return false;
}
+ // We nedd to compile (and check) early for function pointer calls
+ // because the Call/CallVirt below might access the instance pointer
+ // but the Function's information about them is wrong.
+ if (!F->isFullyCompiled())
+ compileFunction(S, F);
+
+ if (!CheckCallable(S, OpPC, F))
+ return false;
+
assert(ArgSize >= F->getWrittenArgSize());
uint32_t VarArgSize = ArgSize - F->getWrittenArgSize();
@@ -1874,10 +2016,6 @@ bool EndLifetime(InterpState &S, CodePtr OpPC) {
if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy))
return false;
- // FIXME: We need per-element lifetime information for primitive arrays.
- if (Ptr.isArrayElement())
- return true;
-
endLifetimeRecurse(Ptr.narrow());
return true;
}
@@ -1888,10 +2026,6 @@ bool EndLifetimePop(InterpState &S, CodePtr OpPC) {
if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy))
return false;
- // FIXME: We need per-element lifetime information for primitive arrays.
- if (Ptr.isArrayElement())
- return true;
-
endLifetimeRecurse(Ptr.narrow());
return true;
}
@@ -1903,12 +2037,22 @@ bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E,
if (Ptr.inUnion() && Ptr.getBase().getRecord()->isUnion())
Ptr.activate();
+ if (Ptr.isZero()) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
+ << AK_Construct;
+ return false;
+ }
+
if (!Ptr.isBlockPointer())
return false;
+ if (!CheckRange(S, OpPC, Ptr, AK_Construct))
+ return false;
+
+ startLifetimeRecurse(Ptr);
+
// Similar to CheckStore(), but with the additional CheckTemporary() call and
// the AccessKinds are different.
-
if (!Ptr.block()->isAccessible()) {
if (!CheckExtern(S, OpPC, Ptr))
return false;
@@ -1984,9 +2128,12 @@ bool InvalidNewDeleteExpr(InterpState &S, CodePtr OpPC, const Expr *E) {
const FunctionDecl *OperatorNew = NewExpr->getOperatorNew();
if (NewExpr->getNumPlacementArgs() > 0) {
- // This is allowed pre-C++26, but only an std function.
- if (S.getLangOpts().CPlusPlus26 || S.Current->isStdFunction())
+ // This is allowed pre-C++26, but only an std function or if
+ // [[msvc::constexpr]] was used.
+ if (S.getLangOpts().CPlusPlus26 || S.Current->isStdFunction() ||
+ S.Current->MSVCConstexprAllowed)
return true;
+
S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_new_placement)
<< /*C++26 feature*/ 1 << E->getSourceRange();
} else if (
@@ -2044,15 +2191,15 @@ bool InvalidShuffleVectorIndex(InterpState &S, CodePtr OpPC, uint32_t Index) {
bool CheckPointerToIntegralCast(InterpState &S, CodePtr OpPC,
const Pointer &Ptr, unsigned BitWidth) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+
if (Ptr.isDummy())
return false;
if (Ptr.isFunctionPointer())
return true;
- const SourceInfo &E = S.Current->getSource(OpPC);
- S.CCEDiag(E, diag::note_constexpr_invalid_cast)
- << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
-
if (Ptr.isBlockPointer() && !Ptr.isZero()) {
// Only allow based lvalue casts if they are lossless.
if (S.getASTContext().getTargetInfo().getPointerWidth(LangAS::Default) !=
@@ -2284,6 +2431,69 @@ bool FinishInitGlobal(InterpState &S, CodePtr OpPC) {
return true;
}
+bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind, bool Fatal) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+
+ switch (Kind) {
+ case CastKind::Reinterpret:
+ S.CCEDiag(Loc, diag::note_constexpr_invalid_cast)
+ << diag::ConstexprInvalidCastKind::Reinterpret
+ << S.Current->getRange(OpPC);
+ return !Fatal;
+ case CastKind::ReinterpretLike:
+ S.CCEDiag(Loc, diag::note_constexpr_invalid_cast)
+ << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
+ << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+ return !Fatal;
+ case CastKind::Volatile:
+ if (!S.checkingPotentialConstantExpression()) {
+ const auto *E = cast<CastExpr>(S.Current->getExpr(OpPC));
+ if (S.getLangOpts().CPlusPlus)
+ S.FFDiag(E, diag::note_constexpr_access_volatile_type)
+ << AK_Read << E->getSubExpr()->getType();
+ else
+ S.FFDiag(E);
+ }
+
+ return false;
+ case CastKind::Dynamic:
+ assert(!S.getLangOpts().CPlusPlus20);
+ S.CCEDiag(Loc, diag::note_constexpr_invalid_cast)
+ << diag::ConstexprInvalidCastKind::Dynamic;
+ return true;
+ }
+ llvm_unreachable("Unhandled CastKind");
+ return false;
+}
+
+bool Destroy(InterpState &S, CodePtr OpPC, uint32_t I) {
+ assert(S.Current->getFunction());
+ // FIXME: We iterate the scope once here and then again in the destroy() call
+ // below.
+ for (auto &Local : S.Current->getFunction()->getScope(I).locals_reverse()) {
+ if (!S.Current->getLocalBlock(Local.Offset)->isInitialized())
+ continue;
+ const Pointer &Ptr = S.Current->getLocalPointer(Local.Offset);
+ if (Ptr.getLifetime() == Lifetime::Ended) {
+ // Try to use the declaration for better diagnostics
+ if (const Decl *D = Ptr.getDeclDesc()->asDecl()) {
+ auto *ND = cast<NamedDecl>(D);
+ S.FFDiag(ND->getLocation(),
+ diag::note_constexpr_destroy_out_of_lifetime)
+ << ND->getNameAsString();
+ } else {
+ S.FFDiag(Ptr.getDeclDesc()->getLocation(),
+ diag::note_constexpr_destroy_out_of_lifetime)
+ << Ptr.toDiagnosticString(S.getASTContext());
+ }
+ return false;
+ }
+ }
+
+ S.Current->destroy(I);
+ return true;
+}
+
// https://github.com/llvm/llvm-project/issues/102513
#if defined(_MSC_VER) && !defined(__clang__) && !defined(NDEBUG)
#pragma optimize("", off)
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index 812d25f..0d0f19e 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -22,6 +22,7 @@
#include "Function.h"
#include "InterpBuiltinBitCast.h"
#include "InterpFrame.h"
+#include "InterpHelpers.h"
#include "InterpStack.h"
#include "InterpState.h"
#include "MemberPointer.h"
@@ -43,28 +44,10 @@ using FixedPointSemantics = llvm::FixedPointSemantics;
/// Checks if the variable has externally defined storage.
bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-/// Checks if the array is offsetable.
-bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-
-/// Checks if a pointer is live and accessible.
-bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
-
-/// Checks if a pointer is a dummy pointer.
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK);
-
/// Checks if a pointer is null.
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
-/// Checks if a pointer is in range.
-bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
-
-/// Checks if a field from which a pointer is going to be derived is valid.
-bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- CheckSubobjectKind CSK);
-
/// Checks if Ptr is a one-past-the-end pointer.
bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
@@ -80,12 +63,6 @@ bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if the Descriptor is of a constexpr or const global variable.
bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc);
-/// Checks if a pointer points to a mutable field.
-bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-
-/// Checks if a value can be loaded from a block.
-bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK = AK_Read);
bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
@@ -98,7 +75,8 @@ bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Block *B);
bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Block *B);
/// Checks if a value can be stored in a block.
-bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ bool WillBeActivated = false);
/// Checks if a value can be initialized.
bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
@@ -110,28 +88,19 @@ bool CheckThis(InterpState &S, CodePtr OpPC);
/// language mode.
bool CheckDynamicMemoryAllocation(InterpState &S, CodePtr OpPC);
-/// Diagnose mismatched new[]/delete or new/delete[] pairs.
-bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC,
- DynamicAllocator::Form AllocForm,
- DynamicAllocator::Form DeleteForm, const Descriptor *D,
- const Expr *NewExpr);
-
/// Check the source of the pointer passed to delete/delete[] has actually
/// been heap allocated by us.
bool CheckDeleteSource(InterpState &S, CodePtr OpPC, const Expr *Source,
const Pointer &Ptr);
bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
+ AccessKinds AK, bool WillActivate = false);
/// Sets the given integral value to the pointer, which is of
/// a std::{weak,partial,strong}_ordering type.
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
const Pointer &Ptr, const APSInt &IntValue);
-/// Copy the contents of Src into Dest.
-bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest);
-
bool CallVar(InterpState &S, CodePtr OpPC, const Function *Func,
uint32_t VarArgSize);
bool Call(InterpState &S, CodePtr OpPC, const Function *Func,
@@ -148,20 +117,17 @@ bool CheckBitCast(InterpState &S, CodePtr OpPC, bool HasIndeterminateBits,
bool TargetIsUCharOrByte);
bool CheckBCPResult(InterpState &S, const Pointer &Ptr);
bool CheckDestructor(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+bool CheckFunctionDecl(InterpState &S, CodePtr OpPC, const FunctionDecl *FD);
+bool CheckBitCast(InterpState &S, CodePtr OpPC, const Type *TargetType,
+ bool SrcIsVoidPtr);
+bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind, bool Fatal);
-template <typename T>
-static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue) {
- const Expr *E = S.Current->getExpr(OpPC);
- S.CCEDiag(E, diag::note_constexpr_overflow) << SrcValue << E->getType();
- return S.noteUndefinedBehavior();
-}
bool handleFixedPointOverflow(InterpState &S, CodePtr OpPC,
const FixedPoint &FP);
+bool Destroy(InterpState &S, CodePtr OpPC, uint32_t I);
bool isConstexprUnknown(const Pointer &P);
-inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems);
-
enum class ShiftDir { Left, Right };
/// Checks if the shift operation is legal.
@@ -241,43 +207,6 @@ bool CheckDivRem(InterpState &S, CodePtr OpPC, const T &LHS, const T &RHS) {
return true;
}
-template <typename SizeT>
-bool CheckArraySize(InterpState &S, CodePtr OpPC, SizeT *NumElements,
- unsigned ElemSize, bool IsNoThrow) {
- // FIXME: Both the SizeT::from() as well as the
- // NumElements.toAPSInt() in this function are rather expensive.
-
- // Can't be too many elements if the bitwidth of NumElements is lower than
- // that of Descriptor::MaxArrayElemBytes.
- if ((NumElements->bitWidth() - NumElements->isSigned()) <
- (sizeof(Descriptor::MaxArrayElemBytes) * 8))
- return true;
-
- // FIXME: GH63562
- // APValue stores array extents as unsigned,
- // so anything that is greater that unsigned would overflow when
- // constructing the array, we catch this here.
- SizeT MaxElements = SizeT::from(Descriptor::MaxArrayElemBytes / ElemSize);
- assert(MaxElements.isPositive());
- if (NumElements->toAPSInt().getActiveBits() >
- ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
- *NumElements > MaxElements) {
- if (!IsNoThrow) {
- const SourceInfo &Loc = S.Current->getSource(OpPC);
-
- if (NumElements->isSigned() && NumElements->isNegative()) {
- S.FFDiag(Loc, diag::note_constexpr_new_negative)
- << NumElements->toDiagnosticString(S.getASTContext());
- } else {
- S.FFDiag(Loc, diag::note_constexpr_new_too_large)
- << NumElements->toDiagnosticString(S.getASTContext());
- }
- }
- return false;
- }
- return true;
-}
-
/// Checks if the result of a floating-point operation is valid
/// in the current context.
bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
@@ -285,19 +214,8 @@ bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
/// Checks why the given DeclRefExpr is invalid.
bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR);
-
-/// Interpreter entry point.
-bool Interpret(InterpState &S);
-
-/// Interpret a builtin function.
-bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
- uint32_t BuiltinID);
-
-/// Interpret an offsetof operation.
-bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
- ArrayRef<int64_t> ArrayIndices, int64_t &Result);
-
-inline bool Invalid(InterpState &S, CodePtr OpPC);
+bool InvalidDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR,
+ bool InitializerFailed);
enum class ArithOp { Add, Sub };
@@ -403,13 +321,6 @@ bool Add(InterpState &S, CodePtr OpPC) {
return AddSubMulHelper<T, T::add, std::plus>(S, OpPC, Bits, LHS, RHS);
}
-static inline llvm::RoundingMode getRoundingMode(FPOptions FPO) {
- auto RM = FPO.getRoundingMode();
- if (RM == llvm::RoundingMode::Dynamic)
- return llvm::RoundingMode::NearestTiesToEven;
- return RM;
-}
-
inline bool Addf(InterpState &S, CodePtr OpPC, uint32_t FPOI) {
const Floating &RHS = S.Stk.pop<Floating>();
const Floating &LHS = S.Stk.pop<Floating>();
@@ -498,11 +409,19 @@ inline bool Mulc(InterpState &S, CodePtr OpPC) {
// real(Result) = (real(LHS) * real(RHS)) - (imag(LHS) * imag(RHS))
T A;
+ if constexpr (needsAlloc<T>())
+ A = S.allocAP<T>(Bits);
if (T::mul(LHSR, RHSR, Bits, &A))
return false;
+
T B;
+ if constexpr (needsAlloc<T>())
+ B = S.allocAP<T>(Bits);
if (T::mul(LHSI, RHSI, Bits, &B))
return false;
+
+ if constexpr (needsAlloc<T>())
+ Result.elem<T>(0) = S.allocAP<T>(Bits);
if (T::sub(A, B, Bits, &Result.elem<T>(0)))
return false;
@@ -511,6 +430,9 @@ inline bool Mulc(InterpState &S, CodePtr OpPC) {
return false;
if (T::mul(LHSI, RHSR, Bits, &B))
return false;
+
+ if constexpr (needsAlloc<T>())
+ Result.elem<T>(1) = S.allocAP<T>(Bits);
if (T::add(A, B, Bits, &Result.elem<T>(1)))
return false;
Result.initialize();
@@ -564,10 +486,18 @@ inline bool Divc(InterpState &S, CodePtr OpPC) {
// Den = real(RHS)² + imag(RHS)²
T A, B;
+ if constexpr (needsAlloc<T>()) {
+ A = S.allocAP<T>(Bits);
+ B = S.allocAP<T>(Bits);
+ }
+
if (T::mul(RHSR, RHSR, Bits, &A) || T::mul(RHSI, RHSI, Bits, &B)) {
// Ignore overflow here, because that's what the current interpeter does.
}
T Den;
+ if constexpr (needsAlloc<T>())
+ Den = S.allocAP<T>(Bits);
+
if (T::add(A, B, Bits, &Den))
return false;
@@ -580,7 +510,10 @@ inline bool Divc(InterpState &S, CodePtr OpPC) {
// real(Result) = ((real(LHS) * real(RHS)) + (imag(LHS) * imag(RHS))) / Den
T &ResultR = Result.elem<T>(0);
T &ResultI = Result.elem<T>(1);
-
+ if constexpr (needsAlloc<T>()) {
+ ResultR = S.allocAP<T>(Bits);
+ ResultI = S.allocAP<T>(Bits);
+ }
if (T::mul(LHSR, RHSR, Bits, &A) || T::mul(LHSI, RHSI, Bits, &B))
return false;
if (T::add(A, B, Bits, &ResultR))
@@ -799,7 +732,7 @@ enum class IncDecOp {
template <typename T, IncDecOp Op, PushVal DoPush>
bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- bool CanOverflow) {
+ bool CanOverflow, UnsignedOrNone BitWidth = std::nullopt) {
assert(!Ptr.isDummy());
if (!S.inConstantContext()) {
@@ -822,12 +755,18 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
if constexpr (Op == IncDecOp::Inc) {
if (!T::increment(Value, &Result) || !CanOverflow) {
- Ptr.deref<T>() = Result;
+ if (BitWidth)
+ Ptr.deref<T>() = Result.truncate(*BitWidth);
+ else
+ Ptr.deref<T>() = Result;
return true;
}
} else {
if (!T::decrement(Value, &Result) || !CanOverflow) {
- Ptr.deref<T>() = Result;
+ if (BitWidth)
+ Ptr.deref<T>() = Result.truncate(*BitWidth);
+ else
+ Ptr.deref<T>() = Result;
return true;
}
}
@@ -866,11 +805,26 @@ bool Inc(InterpState &S, CodePtr OpPC, bool CanOverflow) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecHelper<T, IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr,
CanOverflow);
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool IncBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ unsigned BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
+ return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
/// 1) Pops a pointer from the stack
/// 2) Load the value from the pointer
/// 3) Writes the value increased by one back to the pointer
@@ -879,19 +833,49 @@ bool IncPop(InterpState &S, CodePtr OpPC, bool CanOverflow) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow);
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool IncPopBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
+ return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
bool PreInc(InterpState &S, CodePtr OpPC, bool CanOverflow) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow);
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool PreIncBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
+ return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
/// 1) Pops a pointer from the stack
/// 2) Load the value from the pointer
/// 3) Writes the value decreased by one back to the pointer
@@ -901,10 +885,24 @@ bool Dec(InterpState &S, CodePtr OpPC, bool CanOverflow) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr,
CanOverflow);
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool DecBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
+ return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
/// 1) Pops a pointer from the stack
/// 2) Load the value from the pointer
@@ -914,18 +912,47 @@ bool DecPop(InterpState &S, CodePtr OpPC, bool CanOverflow) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow);
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool DecPopBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
+ return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
bool PreDec(InterpState &S, CodePtr OpPC, bool CanOverflow) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow);
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool PreDecBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
+ return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
+ return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow,
+ BitWidth);
+}
+
template <IncDecOp Op, PushVal DoPush>
bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
uint32_t FPOI) {
@@ -951,6 +978,8 @@ inline bool Incf(InterpState &S, CodePtr OpPC, uint32_t FPOI) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecFloatHelper<IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr, FPOI);
}
@@ -959,6 +988,8 @@ inline bool IncfPop(InterpState &S, CodePtr OpPC, uint32_t FPOI) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecFloatHelper<IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, FPOI);
}
@@ -967,6 +998,8 @@ inline bool Decf(InterpState &S, CodePtr OpPC, uint32_t FPOI) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecFloatHelper<IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, FPOI);
}
@@ -975,6 +1008,8 @@ inline bool DecfPop(InterpState &S, CodePtr OpPC, uint32_t FPOI) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
return false;
+ if (!CheckConst(S, OpPC, Ptr))
+ return false;
return IncDecFloatHelper<IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, FPOI);
}
@@ -1120,22 +1155,23 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
}
if (Pointer::hasSameBase(LHS, RHS)) {
- size_t A = LHS.computeOffsetForComparison();
- size_t B = RHS.computeOffsetForComparison();
+ size_t A = LHS.computeOffsetForComparison(S.getASTContext());
+ size_t B = RHS.computeOffsetForComparison(S.getASTContext());
+
S.Stk.push<BoolT>(BoolT::from(Fn(Compare(A, B))));
return true;
}
// Otherwise we need to do a bunch of extra checks before returning Unordered.
if (LHS.isOnePastEnd() && !RHS.isOnePastEnd() && !RHS.isZero() &&
- RHS.getOffset() == 0) {
+ RHS.isBlockPointer() && RHS.getOffset() == 0) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end)
<< LHS.toDiagnosticString(S.getASTContext());
return false;
}
if (RHS.isOnePastEnd() && !LHS.isOnePastEnd() && !LHS.isZero() &&
- LHS.getOffset() == 0) {
+ LHS.isBlockPointer() && LHS.getOffset() == 0) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end)
<< RHS.toDiagnosticString(S.getASTContext());
@@ -1480,8 +1516,7 @@ bool GetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetGlobalUnchecked(InterpState &S, CodePtr OpPC, uint32_t I) {
const Block *B = S.P.getGlobal(I);
- const auto &Desc =
- *reinterpret_cast<const GlobalInlineDescriptor *>(B->rawData());
+ const auto &Desc = B->getBlockDesc<GlobalInlineDescriptor>();
if (Desc.InitState != GlobalInitState::Initialized)
return DiagnoseUninitialized(S, OpPC, B->isExtern(), B->getDescriptor(),
AK_Read);
@@ -1668,16 +1703,15 @@ bool InitBitField(InterpState &S, CodePtr OpPC, const Record::Field *F) {
const Pointer &Field = Ptr.atField(F->Offset);
+ unsigned BitWidth = std::min(F->Decl->getBitWidthValue(), Value.bitWidth());
if constexpr (needsAlloc<T>()) {
T Result = S.allocAP<T>(Value.bitWidth());
- if (T::isSigned())
- Result.copy(Value.toAPSInt()
- .trunc(F->Decl->getBitWidthValue())
- .sextOrTrunc(Value.bitWidth()));
+ if constexpr (T::isSigned())
+ Result.copy(
+ Value.toAPSInt().trunc(BitWidth).sextOrTrunc(Value.bitWidth()));
else
- Result.copy(Value.toAPSInt()
- .trunc(F->Decl->getBitWidthValue())
- .zextOrTrunc(Value.bitWidth()));
+ Result.copy(
+ Value.toAPSInt().trunc(BitWidth).zextOrTrunc(Value.bitWidth()));
Field.deref<T>() = Result;
} else {
@@ -1700,16 +1734,15 @@ bool InitBitFieldActivate(InterpState &S, CodePtr OpPC,
const Pointer &Field = Ptr.atField(F->Offset);
+ unsigned BitWidth = std::min(F->Decl->getBitWidthValue(), Value.bitWidth());
if constexpr (needsAlloc<T>()) {
T Result = S.allocAP<T>(Value.bitWidth());
- if (T::isSigned())
- Result.copy(Value.toAPSInt()
- .trunc(F->Decl->getBitWidthValue())
- .sextOrTrunc(Value.bitWidth()));
+ if constexpr (T::isSigned())
+ Result.copy(
+ Value.toAPSInt().trunc(BitWidth).sextOrTrunc(Value.bitWidth()));
else
- Result.copy(Value.toAPSInt()
- .trunc(F->Decl->getBitWidthValue())
- .zextOrTrunc(Value.bitWidth()));
+ Result.copy(
+ Value.toAPSInt().trunc(BitWidth).zextOrTrunc(Value.bitWidth()));
Field.deref<T>() = Result;
} else {
@@ -1901,10 +1934,7 @@ inline bool CheckNull(InterpState &S, CodePtr OpPC) {
inline bool VirtBaseHelper(InterpState &S, CodePtr OpPC, const RecordDecl *Decl,
const Pointer &Ptr) {
- Pointer Base = Ptr;
- while (Base.isBaseClass())
- Base = Base.getBase();
-
+ Pointer Base = Ptr.stripBaseCasts();
const Record::Base *VirtBase = Base.getRecord()->getVirtualBase(Decl);
S.Stk.push<Pointer>(Base.atField(VirtBase->Offset));
return true;
@@ -1941,6 +1971,8 @@ bool Load(InterpState &S, CodePtr OpPC) {
return false;
if (!Ptr.isBlockPointer())
return false;
+ if (!Ptr.canDeref(Name))
+ return false;
S.Stk.push<T>(Ptr.deref<T>());
return true;
}
@@ -1952,6 +1984,8 @@ bool LoadPop(InterpState &S, CodePtr OpPC) {
return false;
if (!Ptr.isBlockPointer())
return false;
+ if (!Ptr.canDeref(Name))
+ return false;
S.Stk.push<T>(Ptr.deref<T>());
return true;
}
@@ -2004,13 +2038,12 @@ bool StoreActivate(InterpState &S, CodePtr OpPC) {
const T &Value = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckStore(S, OpPC, Ptr, /*WillBeActivated=*/true))
+ return false;
if (Ptr.canBeInitialized()) {
Ptr.initialize();
Ptr.activate();
}
-
- if (!CheckStore(S, OpPC, Ptr))
- return false;
Ptr.deref<T>() = Value;
return true;
}
@@ -2020,12 +2053,12 @@ bool StoreActivatePop(InterpState &S, CodePtr OpPC) {
const T &Value = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckStore(S, OpPC, Ptr, /*WillBeActivated=*/true))
+ return false;
if (Ptr.canBeInitialized()) {
Ptr.initialize();
Ptr.activate();
}
- if (!CheckStore(S, OpPC, Ptr))
- return false;
Ptr.deref<T>() = Value;
return true;
}
@@ -2034,6 +2067,7 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool StoreBitField(InterpState &S, CodePtr OpPC) {
const T &Value = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.peek<Pointer>();
+
if (!CheckStore(S, OpPC, Ptr))
return false;
if (Ptr.canBeInitialized())
@@ -2064,12 +2098,13 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool StoreBitFieldActivate(InterpState &S, CodePtr OpPC) {
const T &Value = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.peek<Pointer>();
+
+ if (!CheckStore(S, OpPC, Ptr, /*WillBeActivated=*/true))
+ return false;
if (Ptr.canBeInitialized()) {
Ptr.initialize();
Ptr.activate();
}
- if (!CheckStore(S, OpPC, Ptr))
- return false;
if (const auto *FD = Ptr.getField())
Ptr.deref<T>() = Value.truncate(FD->getBitWidthValue());
else
@@ -2082,12 +2117,12 @@ bool StoreBitFieldActivatePop(InterpState &S, CodePtr OpPC) {
const T &Value = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckStore(S, OpPC, Ptr, /*WillBeActivated=*/true))
+ return false;
if (Ptr.canBeInitialized()) {
Ptr.initialize();
Ptr.activate();
}
- if (!CheckStore(S, OpPC, Ptr))
- return false;
if (const auto *FD = Ptr.getField())
Ptr.deref<T>() = Value.truncate(FD->getBitWidthValue());
else
@@ -2258,6 +2293,8 @@ std::optional<Pointer> OffsetHelper(InterpState &S, CodePtr OpPC,
S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_array_index)
<< N << /*non-array*/ true << 0;
return Pointer(Ptr.asFunctionPointer().getFunction(), N);
+ } else if (!Ptr.isBlockPointer()) {
+ return std::nullopt;
}
assert(Ptr.isBlockPointer());
@@ -2306,7 +2343,7 @@ std::optional<Pointer> OffsetHelper(InterpState &S, CodePtr OpPC,
}
}
- if (Invalid && S.getLangOpts().CPlusPlus)
+ if (Invalid && (S.getLangOpts().CPlusPlus || Ptr.inArray()))
return std::nullopt;
// Offset is valid - compute it on unsigned.
@@ -2333,13 +2370,11 @@ std::optional<Pointer> OffsetHelper(InterpState &S, CodePtr OpPC,
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool AddOffset(InterpState &S, CodePtr OpPC) {
const T &Offset = S.Stk.pop<T>();
- Pointer Ptr = S.Stk.pop<Pointer>();
- if (Ptr.isBlockPointer())
- Ptr = Ptr.expand();
+ const Pointer &Ptr = S.Stk.pop<Pointer>().expand();
if (std::optional<Pointer> Result = OffsetHelper<T, ArithOp::Add>(
S, OpPC, Offset, Ptr, /*IsPointerArith=*/true)) {
- S.Stk.push<Pointer>(*Result);
+ S.Stk.push<Pointer>(Result->narrow());
return true;
}
return false;
@@ -2348,11 +2383,11 @@ bool AddOffset(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool SubOffset(InterpState &S, CodePtr OpPC) {
const T &Offset = S.Stk.pop<T>();
- const Pointer &Ptr = S.Stk.pop<Pointer>();
+ const Pointer &Ptr = S.Stk.pop<Pointer>().expand();
if (std::optional<Pointer> Result = OffsetHelper<T, ArithOp::Sub>(
S, OpPC, Offset, Ptr, /*IsPointerArith=*/true)) {
- S.Stk.push<Pointer>(*Result);
+ S.Stk.push<Pointer>(Result->narrow());
return true;
}
return false;
@@ -2378,7 +2413,7 @@ static inline bool IncDecPtrHelper(InterpState &S, CodePtr OpPC,
if (std::optional<Pointer> Result =
OffsetHelper<OneT, Op>(S, OpPC, One, P, /*IsPointerArith=*/true)) {
// Store the new value.
- Ptr.deref<Pointer>() = *Result;
+ Ptr.deref<Pointer>() = Result->narrow();
return true;
}
return false;
@@ -2406,9 +2441,9 @@ static inline bool DecPtr(InterpState &S, CodePtr OpPC) {
/// 2) Pops another Pointer from the stack.
/// 3) Pushes the difference of the indices of the two pointers on the stack.
template <PrimType Name, class T = typename PrimConv<Name>::T>
-inline bool SubPtr(InterpState &S, CodePtr OpPC) {
- const Pointer &LHS = S.Stk.pop<Pointer>();
- const Pointer &RHS = S.Stk.pop<Pointer>();
+inline bool SubPtr(InterpState &S, CodePtr OpPC, bool ElemSizeIsZero) {
+ const Pointer &LHS = S.Stk.pop<Pointer>().expand();
+ const Pointer &RHS = S.Stk.pop<Pointer>().expand();
if (!Pointer::hasSameBase(LHS, RHS) && S.getLangOpts().CPlusPlus) {
S.FFDiag(S.Current->getSource(OpPC),
@@ -2418,25 +2453,23 @@ inline bool SubPtr(InterpState &S, CodePtr OpPC) {
return false;
}
- if (LHS == RHS) {
- S.Stk.push<T>();
- return true;
- }
+ if (ElemSizeIsZero) {
+ QualType PtrT = LHS.getType();
+ while (auto *AT = dyn_cast<ArrayType>(PtrT))
+ PtrT = AT->getElementType();
- for (const Pointer &P : {LHS, RHS}) {
- if (P.isZeroSizeArray()) {
- QualType PtrT = P.getType();
- while (auto *AT = dyn_cast<ArrayType>(PtrT))
- PtrT = AT->getElementType();
+ QualType ArrayTy = S.getASTContext().getConstantArrayType(
+ PtrT, APInt::getZero(1), nullptr, ArraySizeModifier::Normal, 0);
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_pointer_subtraction_zero_size)
+ << ArrayTy;
- QualType ArrayTy = S.getASTContext().getConstantArrayType(
- PtrT, APInt::getZero(1), nullptr, ArraySizeModifier::Normal, 0);
- S.FFDiag(S.Current->getSource(OpPC),
- diag::note_constexpr_pointer_subtraction_zero_size)
- << ArrayTy;
+ return false;
+ }
- return false;
- }
+ if (LHS == RHS) {
+ S.Stk.push<T>();
+ return true;
}
int64_t A64 =
@@ -2457,40 +2490,20 @@ inline bool SubPtr(InterpState &S, CodePtr OpPC) {
return true;
}
-//===----------------------------------------------------------------------===//
-// Destroy
-//===----------------------------------------------------------------------===//
-
-inline bool Destroy(InterpState &S, CodePtr OpPC, uint32_t I) {
- assert(S.Current->getFunction());
-
- // FIXME: We iterate the scope once here and then again in the destroy() call
- // below.
- for (auto &Local : S.Current->getFunction()->getScope(I).locals_reverse()) {
- const Pointer &Ptr = S.Current->getLocalPointer(Local.Offset);
-
- if (Ptr.getLifetime() == Lifetime::Ended) {
- // Try to use the declaration for better diagnostics
- if (const Decl *D = Ptr.getDeclDesc()->asDecl()) {
- auto *ND = cast<NamedDecl>(D);
- S.FFDiag(ND->getLocation(),
- diag::note_constexpr_destroy_out_of_lifetime)
- << ND->getNameAsString();
- } else {
- S.FFDiag(Ptr.getDeclDesc()->getLocation(),
- diag::note_constexpr_destroy_out_of_lifetime)
- << Ptr.toDiagnosticString(S.getASTContext());
- }
- return false;
- }
- }
+inline bool InitScope(InterpState &S, CodePtr OpPC, uint32_t I) {
+ S.Current->initScope(I);
+ return true;
+}
- S.Current->destroy(I);
+inline bool EnableLocal(InterpState &S, CodePtr OpPC, uint32_t I) {
+ assert(!S.Current->isLocalEnabled(I));
+ S.Current->enableLocal(I);
return true;
}
-inline bool InitScope(InterpState &S, CodePtr OpPC, uint32_t I) {
- S.Current->initScope(I);
+inline bool GetLocalEnabled(InterpState &S, CodePtr OpPC, uint32_t I) {
+ assert(S.Current);
+ S.Stk.push<bool>(S.Current->isLocalEnabled(I));
return true;
}
@@ -2611,8 +2624,9 @@ static inline bool CastFloatingIntegralAP(InterpState &S, CodePtr OpPC,
auto Status = F.convertToInteger(Result);
// Float-to-Integral overflow check.
- if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite())
- return handleOverflow(S, OpPC, F.getAPFloat());
+ if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite() &&
+ !handleOverflow(S, OpPC, F.getAPFloat()))
+ return false;
FPOptions FPO = FPOptions::getFromOpaqueInt(FPOI);
@@ -2632,8 +2646,9 @@ static inline bool CastFloatingIntegralAPS(InterpState &S, CodePtr OpPC,
auto Status = F.convertToInteger(Result);
// Float-to-Integral overflow check.
- if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite())
- return handleOverflow(S, OpPC, F.getAPFloat());
+ if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite() &&
+ !handleOverflow(S, OpPC, F.getAPFloat()))
+ return false;
FPOptions FPO = FPOptions::getFromOpaqueInt(FPOI);
@@ -2654,10 +2669,6 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool CastPointerIntegral(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_invalid_cast)
- << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
- << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
-
if (!CheckPointerToIntegralCast(S, OpPC, Ptr, T::bitWidth()))
return Invalid(S, OpPC);
@@ -2868,7 +2879,9 @@ inline bool DoShift(InterpState &S, CodePtr OpPC, LT &LHS, RT &RHS,
S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt();
if (!S.noteUndefinedBehavior())
return false;
- RHS = -RHS;
+
+ RHS = RHS.isMin() ? RT(APSInt::getMaxValue(RHS.bitWidth(), false)) : -RHS;
+
return DoShift<LT, RT,
Dir == ShiftDir::Left ? ShiftDir::Right : ShiftDir::Left>(
S, OpPC, LHS, RHS, Result);
@@ -3101,7 +3114,7 @@ inline bool ArrayElemPtr(InterpState &S, CodePtr OpPC) {
S.Stk.push<Pointer>(Ptr.atIndex(0).narrow());
return true;
}
- S.Stk.push<Pointer>(Ptr);
+ S.Stk.push<Pointer>(Ptr.narrow());
return true;
}
@@ -3132,7 +3145,7 @@ inline bool ArrayElemPtrPop(InterpState &S, CodePtr OpPC) {
S.Stk.push<Pointer>(Ptr.atIndex(0).narrow());
return true;
}
- S.Stk.push<Pointer>(Ptr);
+ S.Stk.push<Pointer>(Ptr.narrow());
return true;
}
@@ -3179,6 +3192,9 @@ inline bool CopyArray(InterpState &S, CodePtr OpPC, uint32_t SrcIndex,
if (SrcPtr.isDummy() || DestPtr.isDummy())
return false;
+ if (!SrcPtr.isBlockPointer() || !DestPtr.isBlockPointer())
+ return false;
+
for (uint32_t I = 0; I != Size; ++I) {
const Pointer &SP = SrcPtr.atIndex(SrcIndex + I);
@@ -3207,7 +3223,7 @@ inline bool ArrayDecay(InterpState &S, CodePtr OpPC) {
}
if (Ptr.isRoot() || !Ptr.isUnknownSizeArray()) {
- S.Stk.push<Pointer>(Ptr.atIndex(0));
+ S.Stk.push<Pointer>(Ptr.atIndex(0).narrow());
return true;
}
@@ -3262,12 +3278,6 @@ inline bool GetMemberPtrDecl(InterpState &S, CodePtr OpPC) {
/// Just emit a diagnostic. The expression that caused emission of this
/// op is not valid in a constant context.
-inline bool Invalid(InterpState &S, CodePtr OpPC) {
- const SourceLocation &Loc = S.Current->getLocation(OpPC);
- S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr)
- << S.Current->getRange(OpPC);
- return false;
-}
inline bool Unsupported(InterpState &S, CodePtr OpPC) {
const SourceLocation &Loc = S.Current->getLocation(OpPC);
@@ -3286,6 +3296,7 @@ inline bool StartSpeculation(InterpState &S, CodePtr OpPC) {
S.getEvalStatus().Diag = nullptr;
return true;
}
+
inline bool EndSpeculation(InterpState &S, CodePtr OpPC) {
assert(S.SpeculationDepth != 0);
--S.SpeculationDepth;
@@ -3305,6 +3316,19 @@ inline bool PopCC(InterpState &S, CodePtr OpPC) {
return true;
}
+inline bool PushMSVCCE(InterpState &S, CodePtr OpPC) {
+ // This is a per-frame property.
+ ++S.Current->MSVCConstexprAllowed;
+ return true;
+}
+
+inline bool PopMSVCCE(InterpState &S, CodePtr OpPC) {
+ assert(S.Current->MSVCConstexprAllowed >= 1);
+ // This is a per-frame property.
+ --S.Current->MSVCConstexprAllowed;
+ return true;
+}
+
/// Do nothing and just abort execution.
inline bool Error(InterpState &S, CodePtr OpPC) { return false; }
@@ -3312,36 +3336,14 @@ inline bool SideEffect(InterpState &S, CodePtr OpPC) {
return S.noteSideEffect();
}
-/// Same here, but only for casts.
-inline bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind,
- bool Fatal) {
- const SourceLocation &Loc = S.Current->getLocation(OpPC);
-
- if (Kind == CastKind::Reinterpret) {
- S.CCEDiag(Loc, diag::note_constexpr_invalid_cast)
- << static_cast<unsigned>(Kind) << S.Current->getRange(OpPC);
- return !Fatal;
- }
- if (Kind == CastKind::Volatile) {
- if (!S.checkingPotentialConstantExpression()) {
- const auto *E = cast<CastExpr>(S.Current->getExpr(OpPC));
- if (S.getLangOpts().CPlusPlus)
- S.FFDiag(E, diag::note_constexpr_access_volatile_type)
- << AK_Read << E->getSubExpr()->getType();
- else
- S.FFDiag(E);
- }
-
+/// Abort without a diagnostic if we're checking for a potential constant
+/// expression and this is not the bottom frame. This is used in constructors to
+/// allow evaluating their initializers but abort if we encounter anything in
+/// their body.
+inline bool CtorCheck(InterpState &S, CodePtr OpPC) {
+ if (S.checkingPotentialConstantExpression() && !S.Current->isBottomFrame())
return false;
- }
- if (Kind == CastKind::Dynamic) {
- assert(!S.getLangOpts().CPlusPlus20);
- S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_invalid_cast)
- << diag::ConstexprInvalidCastKind::Dynamic;
- return true;
- }
-
- return false;
+ return true;
}
inline bool InvalidStore(InterpState &S, CodePtr OpPC, const Type *T) {
@@ -3356,21 +3358,6 @@ inline bool InvalidStore(InterpState &S, CodePtr OpPC, const Type *T) {
return false;
}
-inline bool InvalidDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR,
- bool InitializerFailed) {
- assert(DR);
-
- if (InitializerFailed) {
- const SourceInfo &Loc = S.Current->getSource(OpPC);
- const auto *VD = cast<VarDecl>(DR->getDecl());
- S.FFDiag(Loc, diag::note_constexpr_var_init_non_constant, 1) << VD;
- S.Note(VD->getLocation(), diag::note_declared_at);
- return false;
- }
-
- return CheckDeclRef(S, OpPC, DR);
-}
-
inline bool SizelessVectorElementSize(InterpState &S, CodePtr OpPC) {
if (S.inConstantContext()) {
const SourceRange &ArgRange = S.Current->getRange(OpPC);
@@ -3699,17 +3686,6 @@ inline bool CheckDestruction(InterpState &S, CodePtr OpPC) {
return CheckDestructor(S, OpPC, Ptr);
}
-inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems) {
- uint64_t Limit = S.getLangOpts().ConstexprStepLimit;
- if (Limit != 0 && NumElems > Limit) {
- S.FFDiag(S.Current->getSource(OpPC),
- diag::note_constexpr_new_exceeds_limits)
- << NumElems << Limit;
- return false;
- }
- return true;
-}
-
//===----------------------------------------------------------------------===//
// Read opcode arguments
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/AST/ByteCode/InterpBlock.cpp b/clang/lib/AST/ByteCode/InterpBlock.cpp
index ac6f01f..dc0178a 100644
--- a/clang/lib/AST/ByteCode/InterpBlock.cpp
+++ b/clang/lib/AST/ByteCode/InterpBlock.cpp
@@ -100,6 +100,28 @@ bool Block::hasPointer(const Pointer *P) const {
}
#endif
+void Block::movePointersTo(Block *B) {
+ assert(B != this);
+ unsigned MDDiff = static_cast<int>(B->Desc->getMetadataSize()) -
+ static_cast<int>(Desc->getMetadataSize());
+
+ while (Pointers) {
+ Pointer *P = Pointers;
+
+ this->removePointer(P);
+ P->BS.Pointee = B;
+
+ // If the metadata size changed between the two blocks, move the pointer
+ // base/offset. Realistically, this should only happen when we move pointers
+ // from a dummy pointer to a global one.
+ P->BS.Base += MDDiff;
+ P->Offset += MDDiff;
+
+ B->addPointer(P);
+ }
+ assert(!this->hasPointers());
+}
+
DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
: Root(Root), B(~0u, Blk->Desc, Blk->isExtern(), Blk->IsStatic,
Blk->isWeak(), Blk->isDummy(), /*IsDead=*/true) {
diff --git a/clang/lib/AST/ByteCode/InterpBlock.h b/clang/lib/AST/ByteCode/InterpBlock.h
index 9b3dadc..57f9e7e 100644
--- a/clang/lib/AST/ByteCode/InterpBlock.h
+++ b/clang/lib/AST/ByteCode/InterpBlock.h
@@ -92,6 +92,8 @@ public:
bool isInitialized() const { return IsInitialized; }
/// The Evaluation ID this block was created in.
unsigned getEvalID() const { return EvalID; }
+ /// Move all pointers from this block to \param B.
+ void movePointersTo(Block *B);
/// Returns a pointer to the stored data.
/// You are allowed to read Desc->getSize() bytes from this address.
@@ -120,6 +122,14 @@ public:
}
template <typename T> T &deref() { return *reinterpret_cast<T *>(data()); }
+ template <typename T> T &getBlockDesc() {
+ assert(sizeof(T) == getDescriptor()->getMetadataSize());
+ return *reinterpret_cast<T *>(rawData());
+ }
+ template <typename T> const T &getBlockDesc() const {
+ return const_cast<Block *>(this)->getBlockDesc<T>();
+ }
+
/// Invokes the constructor.
void invokeCtor() {
assert(!IsInitialized);
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index a0d2c76..2f86877 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -8,22 +8,25 @@
#include "../ExprConstShared.h"
#include "Boolean.h"
#include "EvalEmitter.h"
-#include "Interp.h"
#include "InterpBuiltinBitCast.h"
+#include "InterpHelpers.h"
#include "PrimType.h"
+#include "Program.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/AllocToken.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SipHash.h"
namespace clang {
namespace interp {
-LLVM_ATTRIBUTE_UNUSED static bool isNoopBuiltin(unsigned ID) {
+[[maybe_unused]] static bool isNoopBuiltin(unsigned ID) {
switch (ID) {
case Builtin::BIas_const:
case Builtin::BIforward:
@@ -45,6 +48,11 @@ static void discard(InterpStack &Stk, PrimType T) {
TYPE_SWITCH(T, { Stk.discard<T>(); });
}
+static uint64_t popToUInt64(const InterpState &S, const Expr *E) {
+ INT_TYPE_SWITCH(*S.getContext().classify(E->getType()),
+ return static_cast<uint64_t>(S.Stk.pop<T>()));
+}
+
static APSInt popToAPSInt(InterpStack &Stk, PrimType T) {
INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
}
@@ -56,14 +64,27 @@ static APSInt popToAPSInt(InterpState &S, QualType T) {
return popToAPSInt(S.Stk, *S.getContext().classify(T));
}
+/// Check for common reasons a pointer can't be read from, which
+/// are usually not diagnosed in a builtin function.
+static bool isReadable(const Pointer &P) {
+ if (P.isDummy())
+ return false;
+ if (!P.isBlockPointer())
+ return false;
+ if (!P.isLive())
+ return false;
+ if (P.isOnePastEnd())
+ return false;
+ return true;
+}
+
/// Pushes \p Val on the stack as the type given by \p QT.
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
assert(QT->isSignedIntegerOrEnumerationType() ||
QT->isUnsignedIntegerOrEnumerationType());
OptPrimType T = S.getContext().classify(QT);
assert(T);
-
- unsigned BitWidth = S.getASTContext().getTypeSize(QT);
+ unsigned BitWidth = S.getASTContext().getIntWidth(QT);
if (T == PT_IntAPS) {
auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
@@ -126,7 +147,7 @@ static QualType getElemType(const Pointer &P) {
if (Desc->isPrimitive())
return T;
if (T->isPointerType())
- return T->getAs<PointerType>()->getPointeeType();
+ return T->castAs<PointerType>()->getPointeeType();
if (Desc->isArray())
return Desc->getElemQualType();
if (const auto *AT = T->getAsArrayTypeUnsafe())
@@ -164,6 +185,38 @@ static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
return Result;
}
+// Strict double -> float conversion used for X86 PD2PS/cvtsd2ss intrinsics.
+// Reject NaN/Inf/Subnormal inputs and any lossy/inexact conversions.
+static bool convertDoubleToFloatStrict(APFloat Src, Floating &Dst,
+ InterpState &S, const Expr *DiagExpr) {
+ if (Src.isInfinity()) {
+ if (S.diagnosing())
+ S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 0;
+ return false;
+ }
+ if (Src.isNaN()) {
+ if (S.diagnosing())
+ S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 1;
+ return false;
+ }
+ APFloat Val = Src;
+ bool LosesInfo = false;
+ APFloat::opStatus Status = Val.convert(
+ APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &LosesInfo);
+ if (LosesInfo || Val.isDenormal()) {
+ if (S.diagnosing())
+ S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic_strict);
+ return false;
+ }
+ if (Status != APFloat::opOK) {
+ if (S.diagnosing())
+ S.CCEDiag(DiagExpr, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ Dst.copy(Val);
+ return true;
+}
+
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
@@ -194,12 +247,13 @@ static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
return true;
}
-// __builtin_assume(int)
+// __builtin_assume
+// __assume (MS extension)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
+ // Nothing to be done here since the argument is NOT evaluated.
assert(Call->getNumArgs() == 1);
- discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
return true;
}
@@ -209,8 +263,7 @@ static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
uint64_t Limit = ~static_cast<uint64_t>(0);
if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
- Limit = popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
- .getZExtValue();
+ Limit = popToUInt64(S, Call->getArg(2));
const Pointer &B = S.Stk.pop<Pointer>();
const Pointer &A = S.Stk.pop<Pointer>();
@@ -293,7 +346,7 @@ static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call, unsigned ID) {
- const Pointer &StrPtr = S.Stk.pop<Pointer>();
+ const Pointer &StrPtr = S.Stk.pop<Pointer>().expand();
if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
diagnoseNonConstexprBuiltin(S, OpPC, ID);
@@ -314,8 +367,10 @@ static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
- [[maybe_unused]] const ASTContext &AC = S.getASTContext();
- assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
+ const ASTContext &AC = S.getASTContext();
+ unsigned WCharSize = AC.getTypeSizeInChars(AC.getWCharType()).getQuantity();
+ if (ElemSize != WCharSize)
+ return false;
}
size_t Len = 0;
@@ -678,6 +733,30 @@ static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_ia32_crc32(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call,
+ unsigned DataBytes) {
+ uint64_t DataVal = popToUInt64(S, Call->getArg(1));
+ uint64_t CRCVal = popToUInt64(S, Call->getArg(0));
+
+ // CRC32C polynomial (iSCSI polynomial, bit-reversed)
+ static const uint32_t CRC32C_POLY = 0x82F63B78;
+
+ // Process each byte
+ uint32_t Result = static_cast<uint32_t>(CRCVal);
+ for (unsigned I = 0; I != DataBytes; ++I) {
+ uint8_t Byte = static_cast<uint8_t>((DataVal >> (I * 8)) & 0xFF);
+ Result ^= Byte;
+ for (int J = 0; J != 8; ++J) {
+ Result = (Result >> 1) ^ ((Result & 1) ? CRC32C_POLY : 0);
+ }
+ }
+
+ pushInteger(S, Result, Call->getType());
+ return true;
+}
+
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
@@ -746,7 +825,7 @@ static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC,
const CallExpr *Call,
unsigned BuiltinOp) {
const Pointer &ResultPtr = S.Stk.pop<Pointer>();
- if (ResultPtr.isDummy())
+ if (ResultPtr.isDummy() || !ResultPtr.isBlockPointer())
return false;
PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
@@ -856,7 +935,7 @@ static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
APSInt RHS = popToAPSInt(S.Stk, RHST);
APSInt LHS = popToAPSInt(S.Stk, LHST);
- if (CarryOutPtr.isDummy())
+ if (CarryOutPtr.isDummy() || !CarryOutPtr.isBlockPointer())
return false;
APSInt CarryOut;
@@ -969,9 +1048,10 @@ static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
const APSInt &Val = popToAPSInt(S, Call->getArg(0));
- assert(Val.getActiveBits() <= 64);
-
- pushInteger(S, Val.byteSwap(), Call->getType());
+ if (Val.getBitWidth() == 8 || Val.getBitWidth() == 1)
+ pushInteger(S, Val, Call->getType());
+ else
+ pushInteger(S, Val.byteSwap(), Call->getType());
return true;
}
@@ -987,7 +1067,7 @@ static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
};
const Pointer &Ptr = S.Stk.pop<Pointer>();
- const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
+ uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
// For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
// of two less than or equal to the maximum inline atomic width, we know it
@@ -999,7 +1079,7 @@ static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
// x86-64 processors.
// Check power-of-two.
- CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
+ CharUnits Size = CharUnits::fromQuantity(SizeVal);
if (Size.isPowerOfTwo()) {
// Check against inlining width.
unsigned InlineWidthBits =
@@ -1045,7 +1125,7 @@ static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
return returnBool(false);
- return false;
+ return Invalid(S, OpPC);
}
/// bool __c11_atomic_is_lock_free(size_t)
@@ -1053,9 +1133,9 @@ static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S,
CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
- const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
+ uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
- CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
+ CharUnits Size = CharUnits::fromQuantity(SizeVal);
if (Size.isPowerOfTwo()) {
// Check against inlining width.
unsigned InlineWidthBits =
@@ -1131,7 +1211,10 @@ static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
if (!Ptr.isBlockPointer())
return false;
- unsigned PtrOffset = Ptr.getIndex();
+ // For one-past-end pointers, we can't call getIndex() since it asserts.
+ // Use getNumElems() instead which gives the correct index for past-end.
+ unsigned PtrOffset =
+ Ptr.isElementPastEnd() ? Ptr.getNumElems() : Ptr.getIndex();
CharUnits BaseAlignment =
S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl());
CharUnits PtrAlign =
@@ -1306,6 +1389,46 @@ interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call) {
+ const ASTContext &ASTCtx = S.getASTContext();
+ uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
+ auto Mode =
+ ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
+ auto MaxTokensOpt = ASTCtx.getLangOpts().AllocTokenMax;
+ uint64_t MaxTokens =
+ MaxTokensOpt.value_or(0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth));
+
+ // We do not read any of the arguments; discard them.
+ for (int I = Call->getNumArgs() - 1; I >= 0; --I)
+ discard(S.Stk, S.getContext().classify(Call->getArg(I)).value_or(PT_Ptr));
+
+ // Note: Type inference from a surrounding cast is not supported in
+ // constexpr evaluation.
+ QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
+ if (AllocType.isNull()) {
+ S.CCEDiag(Call,
+ diag::note_constexpr_infer_alloc_token_type_inference_failed);
+ return false;
+ }
+
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
+ if (!ATMD) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
+ return false;
+ }
+
+ auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
+ if (!MaybeToken) {
+ S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
+ return false;
+ }
+
+ pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
+ return true;
+}
+
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call) {
@@ -1397,7 +1520,7 @@ static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
DynamicAllocator::Form::Operator);
assert(B);
- S.Stk.push<Pointer>(Pointer(B).atIndex(0));
+ S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
return true;
}
@@ -1583,51 +1706,6 @@ static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC,
}
/// Can be called with an integer or vector as the first and only parameter.
-static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
- const InterpFrame *Frame,
- const CallExpr *Call,
- unsigned BuiltinID) {
- assert(Call->getNumArgs() == 1);
- if (Call->getArg(0)->getType()->isIntegerType()) {
- APSInt Val = popToAPSInt(S, Call->getArg(0));
-
- if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
- pushInteger(S, Val.popcount(), Call->getType());
- } else {
- pushInteger(S, Val.reverseBits(), Call->getType());
- }
- return true;
- }
- // Otherwise, the argument must be a vector.
- assert(Call->getArg(0)->getType()->isVectorType());
- const Pointer &Arg = S.Stk.pop<Pointer>();
- assert(Arg.getFieldDesc()->isPrimitiveArray());
- const Pointer &Dst = S.Stk.peek<Pointer>();
- assert(Dst.getFieldDesc()->isPrimitiveArray());
- assert(Arg.getFieldDesc()->getNumElems() ==
- Dst.getFieldDesc()->getNumElems());
-
- QualType ElemType = Arg.getFieldDesc()->getElemQualType();
- PrimType ElemT = *S.getContext().classify(ElemType);
- unsigned NumElems = Arg.getNumElems();
-
- // FIXME: Reading from uninitialized vector elements?
- for (unsigned I = 0; I != NumElems; ++I) {
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
- Dst.elem<T>(I) = T::from(Arg.elem<T>(I).toAPSInt().popcount());
- } else {
- Dst.elem<T>(I) =
- T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue());
- }
- });
- }
- Dst.initializeAllElements();
-
- return true;
-}
-
-/// Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_elementwise_countzeroes(InterpState &S,
CodePtr OpPC,
const InterpFrame *Frame,
@@ -1720,11 +1798,9 @@ static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
const CallExpr *Call, unsigned ID) {
assert(Call->getNumArgs() == 3);
const ASTContext &ASTCtx = S.getASTContext();
- APSInt Size = popToAPSInt(S, Call->getArg(2));
- const Pointer SrcPtr = S.Stk.pop<Pointer>();
- const Pointer DestPtr = S.Stk.pop<Pointer>();
-
- assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
+ uint64_t Size = popToUInt64(S, Call->getArg(2));
+ Pointer SrcPtr = S.Stk.pop<Pointer>().expand();
+ Pointer DestPtr = S.Stk.pop<Pointer>().expand();
if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
diagnoseNonConstexprBuiltin(S, OpPC, ID);
@@ -1737,7 +1813,7 @@ static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
ID == Builtin::BI__builtin_wmemmove;
// If the size is zero, we treat this as always being a valid no-op.
- if (Size.isZero()) {
+ if (Size == 0) {
S.Stk.push<Pointer>(DestPtr);
return true;
}
@@ -1761,8 +1837,7 @@ static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
return false;
}
- // Can't read from dummy pointers.
- if (DestPtr.isDummy() || SrcPtr.isDummy())
+ if (!isReadable(DestPtr) || !isReadable(SrcPtr))
return false;
if (DestPtr.getType()->isIncompleteType()) {
@@ -1799,11 +1874,10 @@ static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
if (WChar) {
uint64_t WCharSize =
ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
- Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
- /*IsUnsigend=*/true);
+ Size *= WCharSize;
}
- if (Size.urem(DestElemSize) != 0) {
+ if (Size % DestElemSize != 0) {
S.FFDiag(S.Current->getSource(OpPC),
diag::note_constexpr_memcpy_unsupported)
<< Move << WChar << 0 << DestElemType << Size << DestElemSize;
@@ -1836,40 +1910,34 @@ static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
// Check if we have enough elements to read from and write to.
size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
- if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
- APInt N = Size.udiv(DestElemSize);
+ if (Size > RemainingDestBytes || Size > RemainingSrcBytes) {
+ APInt N = APInt(64, Size / DestElemSize);
S.FFDiag(S.Current->getSource(OpPC),
diag::note_constexpr_memcpy_unsupported)
- << Move << WChar << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
- << DestElemType << toString(N, 10, /*Signed=*/false);
+ << Move << WChar << (Size > RemainingSrcBytes ? 1 : 2) << DestElemType
+ << toString(N, 10, /*Signed=*/false);
return false;
}
// Check for overlapping memory regions.
if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
// Remove base casts.
- Pointer SrcP = SrcPtr;
- while (SrcP.isBaseClass())
- SrcP = SrcP.getBase();
-
- Pointer DestP = DestPtr;
- while (DestP.isBaseClass())
- DestP = DestP.getBase();
+ Pointer SrcP = SrcPtr.stripBaseCasts();
+ Pointer DestP = DestPtr.stripBaseCasts();
unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
- unsigned N = Size.getZExtValue();
- if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
- (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
+ if ((SrcIndex <= DstIndex && (SrcIndex + Size) > DstIndex) ||
+ (DstIndex <= SrcIndex && (DstIndex + Size) > SrcIndex)) {
S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
<< /*IsWChar=*/false;
return false;
}
}
- assert(Size.getZExtValue() % DestElemSize == 0);
- if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
+ assert(Size % DestElemSize == 0);
+ if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size).toBits()))
return false;
S.Stk.push<Pointer>(DestPtr);
@@ -1886,7 +1954,7 @@ static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const CallExpr *Call, unsigned ID) {
assert(Call->getNumArgs() == 3);
- const APSInt &Size = popToAPSInt(S, Call->getArg(2));
+ uint64_t Size = popToUInt64(S, Call->getArg(2));
const Pointer &PtrB = S.Stk.pop<Pointer>();
const Pointer &PtrA = S.Stk.pop<Pointer>();
@@ -1894,11 +1962,14 @@ static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
ID == Builtin::BIwmemcmp)
diagnoseNonConstexprBuiltin(S, OpPC, ID);
- if (Size.isZero()) {
+ if (Size == 0) {
pushInteger(S, 0, Call->getType());
return true;
}
+ if (!PtrA.isBlockPointer() || !PtrB.isBlockPointer())
+ return false;
+
bool IsWide =
(ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
@@ -1919,6 +1990,10 @@ static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
if (PtrA.isDummy() || PtrB.isDummy())
return false;
+ if (!CheckRange(S, OpPC, PtrA, AK_Read) ||
+ !CheckRange(S, OpPC, PtrB, AK_Read))
+ return false;
+
// Now, read both pointers to a buffer and compare those.
BitcastBuffer BufferA(
Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
@@ -1944,14 +2019,14 @@ static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
// The Size given for the wide variants is in wide-char units. Convert it
// to bytes.
- size_t ByteSize = Size.getZExtValue() * ElemSize;
+ size_t ByteSize = Size * ElemSize;
size_t CmpSize = std::min(MinBufferSize, ByteSize);
for (size_t I = 0; I != CmpSize; I += ElemSize) {
if (IsWide) {
INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
- T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
- T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
+ T A = *reinterpret_cast<T *>(BufferA.atByte(I));
+ T B = *reinterpret_cast<T *>(BufferB.atByte(I));
if (A < B) {
pushInteger(S, -1, Call->getType());
return true;
@@ -1962,8 +2037,8 @@ static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
}
});
} else {
- std::byte A = BufferA.Data[I];
- std::byte B = BufferB.Data[I];
+ std::byte A = BufferA.deref<std::byte>(Bytes(I));
+ std::byte B = BufferB.deref<std::byte>(Bytes(I));
if (A < B) {
pushInteger(S, -1, Call->getType());
@@ -2027,6 +2102,9 @@ static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
return false;
}
+ if (!Ptr.isBlockPointer())
+ return false;
+
QualType ElemTy = Ptr.getFieldDesc()->isArray()
? Ptr.getFieldDesc()->getElemQualType()
: Ptr.getFieldDesc()->getType();
@@ -2040,11 +2118,20 @@ static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
return false;
}
+ if (!isReadable(Ptr))
+ return false;
+
if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
+ int64_t DesiredTrunc;
+ if (S.getASTContext().CharTy->isSignedIntegerType())
+ DesiredTrunc =
+ Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue();
+ else
+ DesiredTrunc =
+ Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
// strchr compares directly to the passed integer, and therefore
// always fails if given an int that is not a char.
- if (Desired !=
- Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue()) {
+ if (Desired != DesiredTrunc) {
S.Stk.push<Pointer>();
return true;
}
@@ -2191,7 +2278,8 @@ static bool pointsToLastObject(const Pointer &Ptr) {
}
/// Does Ptr point to the last object AND to a flexible array member?
-static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
+static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr,
+ bool InvalidBase) {
auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
using FAMKind = LangOptions::StrictFlexArraysLevelKind;
FAMKind StrictFlexArraysLevel =
@@ -2213,74 +2301,79 @@ static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
if (!FieldDesc->isArray())
return false;
- return Ptr.isDummy() && pointsToLastObject(Ptr) &&
+ return InvalidBase && pointsToLastObject(Ptr) &&
isFlexibleArrayMember(FieldDesc);
}
-static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC,
- const InterpFrame *Frame,
- const CallExpr *Call) {
- const ASTContext &ASTCtx = S.getASTContext();
- // From the GCC docs:
- // Kind is an integer constant from 0 to 3. If the least significant bit is
- // clear, objects are whole variables. If it is set, a closest surrounding
- // subobject is considered the object a pointer points to. The second bit
- // determines if maximum or minimum of remaining bytes is computed.
- unsigned Kind = popToAPSInt(S, Call->getArg(1)).getZExtValue();
- assert(Kind <= 3 && "unexpected kind");
- bool UseFieldDesc = (Kind & 1u);
- bool ReportMinimum = (Kind & 2u);
- const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
- // "If there are any side effects in them, it returns (size_t) -1
- // for type 0 or 1 and (size_t) 0 for type 2 or 3."
- pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
- return true;
- }
-
+UnsignedOrNone evaluateBuiltinObjectSize(const ASTContext &ASTCtx,
+ unsigned Kind, Pointer &Ptr) {
if (Ptr.isZero() || !Ptr.isBlockPointer())
- return false;
+ return std::nullopt;
- // We can't load through pointers.
if (Ptr.isDummy() && Ptr.getType()->isPointerType())
- return false;
+ return std::nullopt;
+
+ bool InvalidBase = false;
+
+ if (Ptr.isDummy()) {
+ if (const VarDecl *VD = Ptr.getDeclDesc()->asVarDecl();
+ VD && VD->getType()->isPointerType())
+ InvalidBase = true;
+ }
+
+ // According to the GCC documentation, we want the size of the subobject
+ // denoted by the pointer. But that's not quite right -- what we actually
+ // want is the size of the immediately-enclosing array, if there is one.
+ if (Ptr.isArrayElement())
+ Ptr = Ptr.expand();
bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
const Descriptor *DeclDesc = Ptr.getDeclDesc();
assert(DeclDesc);
+ bool UseFieldDesc = (Kind & 1u);
+ bool ReportMinimum = (Kind & 2u);
if (!UseFieldDesc || DetermineForCompleteObject) {
// Lower bound, so we can't fall back to this.
- if (ReportMinimum && !DetermineForCompleteObject)
- return false;
+ if (ReportMinimum && UseFieldDesc && !DetermineForCompleteObject)
+ return std::nullopt;
// Can't read beyond the pointer decl desc.
if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
- return false;
+ return std::nullopt;
+
+ if (InvalidBase)
+ return std::nullopt;
} else {
- if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
+ if (isUserWritingOffTheEnd(ASTCtx, Ptr, InvalidBase)) {
// If we cannot determine the size of the initial allocation, then we
// can't given an accurate upper-bound. However, we are still able to give
// conservative lower-bounds for Type=3.
if (Kind == 1)
- return false;
+ return std::nullopt;
}
}
+ // The "closest surrounding subobject" is NOT a base class,
+ // so strip the base class casts.
+ if (UseFieldDesc && Ptr.isBaseClass())
+ Ptr = Ptr.stripBaseCasts();
+
const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
assert(Desc);
std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
if (!FullSize)
- return false;
+ return std::nullopt;
unsigned ByteOffset;
if (UseFieldDesc) {
- if (Ptr.isBaseClass())
+ if (Ptr.isBaseClass()) {
+ assert(computePointerOffset(ASTCtx, Ptr.getBase()) <=
+ computePointerOffset(ASTCtx, Ptr));
ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
computePointerOffset(ASTCtx, Ptr);
- else {
+ } else {
if (Ptr.inArray())
ByteOffset =
computePointerOffset(ASTCtx, Ptr) -
@@ -2292,10 +2385,34 @@ static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC,
ByteOffset = computePointerOffset(ASTCtx, Ptr);
assert(ByteOffset <= *FullSize);
- unsigned Result = *FullSize - ByteOffset;
+ return *FullSize - ByteOffset;
+}
- pushInteger(S, Result, Call->getType());
- return true;
+static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call) {
+ const ASTContext &ASTCtx = S.getASTContext();
+ // From the GCC docs:
+ // Kind is an integer constant from 0 to 3. If the least significant bit is
+ // clear, objects are whole variables. If it is set, a closest surrounding
+ // subobject is considered the object a pointer points to. The second bit
+ // determines if maximum or minimum of remaining bytes is computed.
+ unsigned Kind = popToUInt64(S, Call->getArg(1));
+ assert(Kind <= 3 && "unexpected kind");
+ Pointer Ptr = S.Stk.pop<Pointer>();
+
+ if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
+ // "If there are any side effects in them, it returns (size_t) -1
+ // for type 0 or 1 and (size_t) 0 for type 2 or 3."
+ pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
+ return true;
+ }
+
+ if (auto Result = evaluateBuiltinObjectSize(ASTCtx, Kind, Ptr)) {
+ pushInteger(S, *Result, Call->getType());
+ return true;
+ }
+ return false;
}
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC,
@@ -2354,18 +2471,79 @@ static bool interp__builtin_elementwise_int_unaryop(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<APInt(const APSInt &)> Fn) {
assert(Call->getNumArgs() == 1);
- assert(Call->getType()->isIntegerType());
// Single integer case.
if (!Call->getArg(0)->getType()->isVectorType()) {
+ assert(Call->getType()->isIntegerType());
APSInt Src = popToAPSInt(S, Call->getArg(0));
APInt Result = Fn(Src);
pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType());
return true;
}
- // TODO: Add vector integer handling.
- return false;
+ // Vector case.
+ const Pointer &Arg = S.Stk.pop<Pointer>();
+ assert(Arg.getFieldDesc()->isPrimitiveArray());
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ assert(Dst.getFieldDesc()->isPrimitiveArray());
+ assert(Arg.getFieldDesc()->getNumElems() ==
+ Dst.getFieldDesc()->getNumElems());
+
+ QualType ElemType = Arg.getFieldDesc()->getElemQualType();
+ PrimType ElemT = *S.getContext().classify(ElemType);
+ unsigned NumElems = Arg.getNumElems();
+ bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
+
+ for (unsigned I = 0; I != NumElems; ++I) {
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ APSInt Src = Arg.elem<T>(I).toAPSInt();
+ APInt Result = Fn(Src);
+ Dst.elem<T>(I) = static_cast<T>(APSInt(std::move(Result), DestUnsigned));
+ });
+ }
+ Dst.initializeAllElements();
+
+ return true;
+}
+
+static bool interp__builtin_elementwise_fp_binop(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ llvm::function_ref<std::optional<APFloat>(
+ const APFloat &, const APFloat &, std::optional<APSInt> RoundingMode)>
+ Fn) {
+ assert((Call->getNumArgs() == 2) || (Call->getNumArgs() == 3));
+ const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
+ assert(VT->getElementType()->isFloatingType());
+ unsigned NumElems = VT->getNumElements();
+
+ // Vector case.
+ assert(Call->getArg(0)->getType()->isVectorType() &&
+ Call->getArg(1)->getType()->isVectorType());
+ assert(VT->getElementType() ==
+ Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
+ assert(VT->getNumElements() ==
+ Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
+
+ std::optional<APSInt> RoundingMode = std::nullopt;
+ if (Call->getNumArgs() == 3)
+ RoundingMode = popToAPSInt(S, Call->getArg(2));
+
+ const Pointer &BPtr = S.Stk.pop<Pointer>();
+ const Pointer &APtr = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ for (unsigned ElemIdx = 0; ElemIdx != NumElems; ++ElemIdx) {
+ using T = PrimConv<PT_Float>::T;
+ APFloat ElemA = APtr.elem<T>(ElemIdx).getAPFloat();
+ APFloat ElemB = BPtr.elem<T>(ElemIdx).getAPFloat();
+ std::optional<APFloat> Result = Fn(ElemA, ElemB, RoundingMode);
+ if (!Result)
+ return false;
+ Dst.elem<T>(ElemIdx) = static_cast<T>(*Result);
+ }
+
+ Dst.initializeAllElements();
+
+ return true;
}
static bool interp__builtin_elementwise_int_binop(
@@ -2663,6 +2841,94 @@ static bool interp_builtin_horizontal_fp_binop(
return true;
}
+static bool interp__builtin_ia32_addsub(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ // Addsub: alternates between subtraction and addition
+ // Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i])
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
+ llvm::RoundingMode RM = getRoundingMode(FPO);
+ const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
+ unsigned NumElems = VT->getNumElements();
+
+ using T = PrimConv<PT_Float>::T;
+ for (unsigned I = 0; I != NumElems; ++I) {
+ APFloat LElem = LHS.elem<T>(I).getAPFloat();
+ APFloat RElem = RHS.elem<T>(I).getAPFloat();
+ if (I % 2 == 0) {
+ // Even indices: subtract
+ LElem.subtract(RElem, RM);
+ } else {
+ // Odd indices: add
+ LElem.add(RElem, RM);
+ }
+ Dst.elem<T>(I) = static_cast<T>(LElem);
+ }
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_ia32_pclmulqdq(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ // PCLMULQDQ: carry-less multiplication of selected 64-bit halves
+ // imm8 bit 0: selects lower (0) or upper (1) 64 bits of first operand
+ // imm8 bit 4: selects lower (0) or upper (1) 64 bits of second operand
+ assert(Call->getArg(0)->getType()->isVectorType() &&
+ Call->getArg(1)->getType()->isVectorType());
+
+ // Extract imm8 argument
+ APSInt Imm8 = popToAPSInt(S, Call->getArg(2));
+ bool SelectUpperA = (Imm8 & 0x01) != 0;
+ bool SelectUpperB = (Imm8 & 0x10) != 0;
+
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
+ PrimType ElemT = *S.getContext().classify(VT->getElementType());
+ unsigned NumElems = VT->getNumElements();
+ const auto *DestVT = Call->getType()->castAs<VectorType>();
+ PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
+ bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
+
+ // Process each 128-bit lane (2 elements at a time)
+ for (unsigned Lane = 0; Lane < NumElems; Lane += 2) {
+ APSInt A0, A1, B0, B1;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ A0 = LHS.elem<T>(Lane + 0).toAPSInt();
+ A1 = LHS.elem<T>(Lane + 1).toAPSInt();
+ B0 = RHS.elem<T>(Lane + 0).toAPSInt();
+ B1 = RHS.elem<T>(Lane + 1).toAPSInt();
+ });
+
+ // Select the appropriate 64-bit values based on imm8
+ APInt A = SelectUpperA ? A1 : A0;
+ APInt B = SelectUpperB ? B1 : B0;
+
+ // Extend both operands to 128 bits for carry-less multiplication
+ APInt A128 = A.zext(128);
+ APInt B128 = B.zext(128);
+
+ // Use APIntOps::clmul for carry-less multiplication
+ APInt Result = llvm::APIntOps::clmul(A128, B128);
+
+ // Split the 128-bit result into two 64-bit halves
+ APSInt ResultLow(Result.extractBits(64, 0), DestUnsigned);
+ APSInt ResultHigh(Result.extractBits(64, 64), DestUnsigned);
+
+ INT_TYPE_SWITCH_NO_BOOL(DestElemT, {
+ Dst.elem<T>(Lane + 0) = static_cast<T>(ResultLow);
+ Dst.elem<T>(Lane + 1) = static_cast<T>(ResultHigh);
+ });
+ }
+
+ Dst.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_elementwise_triop_fp(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<APFloat(const APFloat &, const APFloat &,
@@ -2757,105 +3023,26 @@ static bool interp__builtin_select(InterpState &S, CodePtr OpPC,
return true;
}
-static bool interp__builtin_blend(InterpState &S, CodePtr OpPC,
- const CallExpr *Call) {
- APSInt Mask = popToAPSInt(S, Call->getArg(2));
- const Pointer &TrueVec = S.Stk.pop<Pointer>();
- const Pointer &FalseVec = S.Stk.pop<Pointer>();
- const Pointer &Dst = S.Stk.peek<Pointer>();
-
- assert(FalseVec.getNumElems() == TrueVec.getNumElems());
- assert(FalseVec.getNumElems() == Dst.getNumElems());
- unsigned NumElems = FalseVec.getNumElems();
- PrimType ElemT = FalseVec.getFieldDesc()->getPrimType();
- PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
-
- for (unsigned I = 0; I != NumElems; ++I) {
- bool MaskBit = Mask[I % 8];
- if (ElemT == PT_Float) {
- assert(DstElemT == PT_Float);
- Dst.elem<Floating>(I) =
- MaskBit ? TrueVec.elem<Floating>(I) : FalseVec.elem<Floating>(I);
- } else {
- assert(DstElemT == ElemT);
- INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
- Dst.elem<T>(I) =
- static_cast<T>(MaskBit ? TrueVec.elem<T>(I).toAPSInt()
- : FalseVec.elem<T>(I).toAPSInt());
- });
- }
- }
- Dst.initializeAllElements();
-
- return true;
-}
-
-static bool interp__builtin_ia32_pshufb(InterpState &S, CodePtr OpPC,
- const CallExpr *Call) {
- assert(Call->getNumArgs() == 2 && "masked forms handled via select*");
- const Pointer &Control = S.Stk.pop<Pointer>();
- const Pointer &Src = S.Stk.pop<Pointer>();
- const Pointer &Dst = S.Stk.peek<Pointer>();
-
- unsigned NumElems = Dst.getNumElems();
- assert(NumElems == Control.getNumElems());
- assert(NumElems == Dst.getNumElems());
-
- for (unsigned Idx = 0; Idx != NumElems; ++Idx) {
- uint8_t Ctlb = static_cast<uint8_t>(Control.elem<int8_t>(Idx));
-
- if (Ctlb & 0x80) {
- Dst.elem<int8_t>(Idx) = 0;
- } else {
- unsigned LaneBase = (Idx / 16) * 16;
- unsigned SrcOffset = Ctlb & 0x0F;
- unsigned SrcIdx = LaneBase + SrcOffset;
-
- Dst.elem<int8_t>(Idx) = Src.elem<int8_t>(SrcIdx);
- }
- }
- Dst.initializeAllElements();
- return true;
-}
+/// Scalar variant of AVX512 predicated select:
+/// Result[i] = (Mask bit 0) ? LHS[i] : RHS[i], but only element 0 may change.
+/// All other elements are taken from RHS.
+static bool interp__builtin_select_scalar(InterpState &S,
+ const CallExpr *Call) {
+ unsigned N =
+ Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements();
-static bool interp__builtin_ia32_pshuf(InterpState &S, CodePtr OpPC,
- const CallExpr *Call, bool IsShufHW) {
- assert(Call->getNumArgs() == 2 && "masked forms handled via select*");
- APSInt ControlImm = popToAPSInt(S, Call->getArg(1));
- const Pointer &Src = S.Stk.pop<Pointer>();
+ const Pointer &W = S.Stk.pop<Pointer>();
+ const Pointer &A = S.Stk.pop<Pointer>();
+ APSInt U = popToAPSInt(S, Call->getArg(0));
const Pointer &Dst = S.Stk.peek<Pointer>();
- unsigned NumElems = Dst.getNumElems();
- PrimType ElemT = Dst.getFieldDesc()->getPrimType();
-
- unsigned ElemBits = static_cast<unsigned>(primSize(ElemT) * 8);
- if (ElemBits != 16 && ElemBits != 32)
- return false;
-
- unsigned LaneElts = 128u / ElemBits;
- assert(LaneElts && (NumElems % LaneElts == 0));
-
- uint8_t Ctl = static_cast<uint8_t>(ControlImm.getZExtValue());
+ bool TakeA0 = U.getZExtValue() & 1ULL;
- for (unsigned Idx = 0; Idx != NumElems; Idx++) {
- unsigned LaneBase = (Idx / LaneElts) * LaneElts;
- unsigned LaneIdx = Idx % LaneElts;
- unsigned SrcIdx = Idx;
- unsigned Sel = (Ctl >> (2 * (LaneIdx & 0x3))) & 0x3;
- if (ElemBits == 32) {
- SrcIdx = LaneBase + Sel;
- } else {
- constexpr unsigned HalfSize = 4;
- bool InHigh = LaneIdx >= HalfSize;
- if (!IsShufHW && !InHigh) {
- SrcIdx = LaneBase + Sel;
- } else if (IsShufHW && InHigh) {
- SrcIdx = LaneBase + HalfSize + Sel;
- }
- }
+ for (unsigned I = TakeA0; I != N; ++I)
+ Dst.elem<Floating>(I) = W.elem<Floating>(I);
+ if (TakeA0)
+ Dst.elem<Floating>(0) = A.elem<Floating>(0);
- INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(Idx) = Src.elem<T>(SrcIdx); });
- }
Dst.initializeAllElements();
return true;
}
@@ -2899,6 +3086,35 @@ static bool interp__builtin_ia32_test_op(
return true;
}
+static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 1);
+
+ const Pointer &Source = S.Stk.pop<Pointer>();
+
+ unsigned SourceLen = Source.getNumElems();
+ QualType ElemQT = getElemType(Source);
+ OptPrimType ElemT = S.getContext().classify(ElemQT);
+ unsigned ResultLen =
+ S.getASTContext().getTypeSize(Call->getType()); // Always 32-bit integer.
+ APInt Result(ResultLen, 0);
+
+ for (unsigned I = 0; I != SourceLen; ++I) {
+ APInt Elem;
+ if (ElemQT->isIntegerType()) {
+ INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
+ } else if (ElemQT->isRealFloatingType()) {
+ using T = PrimConv<PT_Float>::T;
+ Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
+ } else {
+ return false;
+ }
+ Result.setBitVal(I, Elem.isNegative());
+ }
+ pushInteger(S, Result, Call->getType());
+ return true;
+}
+
static bool interp__builtin_elementwise_triop(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
@@ -2962,6 +3178,82 @@ static bool interp__builtin_elementwise_triop(
return true;
}
+static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Index = ImmAPS.getZExtValue();
+
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ if (!Dst.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned SrcElems = Src.getNumElems();
+ unsigned DstElems = Dst.getNumElems();
+
+ unsigned NumLanes = SrcElems / DstElems;
+ unsigned Lane = static_cast<unsigned>(Index % NumLanes);
+ unsigned ExtractPos = Lane * DstElems;
+
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != DstElems; ++I) {
+ Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
+ }
+ });
+
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_x86_extract_vector_masked(InterpState &S,
+ CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 4);
+
+ APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
+ const Pointer &Merge = S.Stk.pop<Pointer>();
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ const Pointer &Src = S.Stk.pop<Pointer>();
+
+ if (!Src.getFieldDesc()->isPrimitiveArray() ||
+ !Merge.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ if (!Dst.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned SrcElems = Src.getNumElems();
+ unsigned DstElems = Dst.getNumElems();
+
+ unsigned NumLanes = SrcElems / DstElems;
+ unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
+ unsigned Base = Lane * DstElems;
+
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != DstElems; ++I) {
+ if (MaskAPS[I])
+ Dst.elem<T>(I) = Src.elem<T>(Base + I);
+ else
+ Dst.elem<T>(I) = Merge.elem<T>(I);
+ }
+ });
+
+ Dst.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC,
const CallExpr *Call,
unsigned ID) {
@@ -3003,6 +3295,45 @@ static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 1);
+
+ const Pointer &Source = S.Stk.pop<Pointer>();
+ const Pointer &Dest = S.Stk.peek<Pointer>();
+
+ unsigned SourceLen = Source.getNumElems();
+ QualType ElemQT = getElemType(Source);
+ OptPrimType ElemT = S.getContext().classify(ElemQT);
+ unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
+
+ bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
+ ->castAs<VectorType>()
+ ->getElementType()
+ ->isUnsignedIntegerOrEnumerationType();
+
+ INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
+ APSInt MinIndex(ElemBitWidth, DestUnsigned);
+ APSInt MinVal = Source.elem<T>(0).toAPSInt();
+
+ for (unsigned I = 1; I != SourceLen; ++I) {
+ APSInt Val = Source.elem<T>(I).toAPSInt();
+ if (MinVal.ugt(Val)) {
+ MinVal = Val;
+ MinIndex = I;
+ }
+ }
+
+ Dest.elem<T>(0) = static_cast<T>(MinVal);
+ Dest.elem<T>(1) = static_cast<T>(MinIndex);
+ for (unsigned I = 2; I != SourceLen; ++I) {
+ Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
+ }
+ });
+ Dest.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC,
const CallExpr *Call, bool MaskZ) {
assert(Call->getNumArgs() == 5);
@@ -3101,6 +3432,60 @@ static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC,
return true;
}
+static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
+ bool IsUnsigned) {
+ switch (Imm & 0x7) {
+ case 0x00: // _MM_CMPINT_EQ
+ return (A == B);
+ case 0x01: // _MM_CMPINT_LT
+ return IsUnsigned ? A.ult(B) : A.slt(B);
+ case 0x02: // _MM_CMPINT_LE
+ return IsUnsigned ? A.ule(B) : A.sle(B);
+ case 0x03: // _MM_CMPINT_FALSE
+ return false;
+ case 0x04: // _MM_CMPINT_NE
+ return (A != B);
+ case 0x05: // _MM_CMPINT_NLT
+ return IsUnsigned ? A.ugt(B) : A.sgt(B);
+ case 0x06: // _MM_CMPINT_NLE
+ return IsUnsigned ? A.uge(B) : A.sge(B);
+ case 0x07: // _MM_CMPINT_TRUE
+ return true;
+ default:
+ llvm_unreachable("Invalid Op");
+ }
+}
+
+static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call, unsigned ID,
+ bool IsUnsigned) {
+ assert(Call->getNumArgs() == 4);
+
+ APSInt Mask = popToAPSInt(S, Call->getArg(3));
+ APSInt Opcode = popToAPSInt(S, Call->getArg(2));
+ unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+
+ assert(LHS.getNumElems() == RHS.getNumElems());
+
+ APInt RetMask = APInt::getZero(LHS.getNumElems());
+ unsigned VectorLen = LHS.getNumElems();
+ PrimType ElemT = LHS.getFieldDesc()->getPrimType();
+
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ APSInt A, B;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ A = LHS.elem<T>(ElemNum).toAPSInt();
+ B = RHS.elem<T>(ElemNum).toAPSInt();
+ });
+ RetMask.setBitVal(ElemNum,
+ Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
+ }
+ pushInteger(S, RetMask, Call->getType());
+ return true;
+}
+
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
const CallExpr *Call) {
assert(Call->getNumArgs() == 1);
@@ -3128,6 +3513,613 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_ia32_cvt_vec2mask(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 1);
+
+ const Pointer &Vec = S.Stk.pop<Pointer>();
+ unsigned RetWidth = S.getASTContext().getIntWidth(Call->getType());
+ APInt RetMask(RetWidth, 0);
+
+ unsigned VectorLen = Vec.getNumElems();
+ PrimType ElemT = Vec.getFieldDesc()->getPrimType();
+
+ for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) {
+ APSInt A;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, { A = Vec.elem<T>(ElemNum).toAPSInt(); });
+ unsigned MSB = A[A.getBitWidth() - 1];
+ RetMask.setBitVal(ElemNum, MSB);
+ }
+ pushInteger(S, RetMask, Call->getType());
+ return true;
+}
+
+static bool interp__builtin_ia32_cvt_mask2vec(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 1);
+
+ APSInt Mask = popToAPSInt(S, Call->getArg(0));
+
+ const Pointer &Vec = S.Stk.peek<Pointer>();
+ unsigned NumElems = Vec.getNumElems();
+ PrimType ElemT = Vec.getFieldDesc()->getPrimType();
+
+ for (unsigned I = 0; I != NumElems; ++I) {
+ bool BitSet = Mask[I];
+
+ INT_TYPE_SWITCH_NO_BOOL(
+ ElemT, { Vec.elem<T>(I) = BitSet ? T::from(-1) : T::from(0); });
+ }
+
+ Vec.initializeAllElements();
+
+ return true;
+}
+
+static bool interp__builtin_ia32_cvtsd2ss(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ bool HasRoundingMask) {
+ APSInt Rounding, MaskInt;
+ Pointer Src, B, A;
+
+ if (HasRoundingMask) {
+ assert(Call->getNumArgs() == 5);
+ Rounding = popToAPSInt(S, Call->getArg(4));
+ MaskInt = popToAPSInt(S, Call->getArg(3));
+ Src = S.Stk.pop<Pointer>();
+ B = S.Stk.pop<Pointer>();
+ A = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B) ||
+ !CheckLoad(S, OpPC, Src))
+ return false;
+ } else {
+ assert(Call->getNumArgs() == 2);
+ B = S.Stk.pop<Pointer>();
+ A = S.Stk.pop<Pointer>();
+ if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B))
+ return false;
+ }
+
+ const auto *DstVTy = Call->getType()->castAs<VectorType>();
+ unsigned NumElems = DstVTy->getNumElements();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ // Copy all elements except lane 0 (overwritten below) from A to Dst.
+ for (unsigned I = 1; I != NumElems; ++I)
+ Dst.elem<Floating>(I) = A.elem<Floating>(I);
+
+ // Convert element 0 from double to float, or use Src if masked off.
+ if (!HasRoundingMask || (MaskInt.getZExtValue() & 0x1)) {
+ assert(S.getASTContext().FloatTy == DstVTy->getElementType() &&
+ "cvtsd2ss requires float element type in destination vector");
+
+ Floating Conv = S.allocFloat(
+ S.getASTContext().getFloatTypeSemantics(DstVTy->getElementType()));
+ APFloat SrcVal = B.elem<Floating>(0).getAPFloat();
+ if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
+ return false;
+ Dst.elem<Floating>(0) = Conv;
+ } else {
+ Dst.elem<Floating>(0) = Src.elem<Floating>(0);
+ }
+
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_ia32_cvtpd2ps(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call, bool IsMasked,
+ bool HasRounding) {
+
+ APSInt MaskVal;
+ Pointer PassThrough;
+ Pointer Src;
+ APSInt Rounding;
+
+ if (IsMasked) {
+ // Pop in reverse order.
+ if (HasRounding) {
+ Rounding = popToAPSInt(S, Call->getArg(3));
+ MaskVal = popToAPSInt(S, Call->getArg(2));
+ PassThrough = S.Stk.pop<Pointer>();
+ Src = S.Stk.pop<Pointer>();
+ } else {
+ MaskVal = popToAPSInt(S, Call->getArg(2));
+ PassThrough = S.Stk.pop<Pointer>();
+ Src = S.Stk.pop<Pointer>();
+ }
+
+ if (!CheckLoad(S, OpPC, PassThrough))
+ return false;
+ } else {
+ // Pop source only.
+ Src = S.Stk.pop<Pointer>();
+ }
+
+ if (!CheckLoad(S, OpPC, Src))
+ return false;
+
+ const auto *RetVTy = Call->getType()->castAs<VectorType>();
+ unsigned RetElems = RetVTy->getNumElements();
+ unsigned SrcElems = Src.getNumElems();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ // Initialize destination with passthrough or zeros.
+ for (unsigned I = 0; I != RetElems; ++I)
+ if (IsMasked)
+ Dst.elem<Floating>(I) = PassThrough.elem<Floating>(I);
+ else
+ Dst.elem<Floating>(I) = Floating(APFloat(0.0f));
+
+ assert(S.getASTContext().FloatTy == RetVTy->getElementType() &&
+ "cvtpd2ps requires float element type in return vector");
+
+ // Convert double to float for enabled elements (only process source elements
+ // that exist).
+ for (unsigned I = 0; I != SrcElems; ++I) {
+ if (IsMasked && !MaskVal[I])
+ continue;
+
+ APFloat SrcVal = Src.elem<Floating>(I).getAPFloat();
+
+ Floating Conv = S.allocFloat(
+ S.getASTContext().getFloatTypeSemantics(RetVTy->getElementType()));
+ if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
+ return false;
+ Dst.elem<Floating>(I) = Conv;
+ }
+
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_ia32_shuffle_generic(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
+ GetSourceIndex) {
+
+ assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
+
+ unsigned ShuffleMask = 0;
+ Pointer A, MaskVector, B;
+ bool IsVectorMask = false;
+ bool IsSingleOperand = (Call->getNumArgs() == 2);
+
+ if (IsSingleOperand) {
+ QualType MaskType = Call->getArg(1)->getType();
+ if (MaskType->isVectorType()) {
+ IsVectorMask = true;
+ MaskVector = S.Stk.pop<Pointer>();
+ A = S.Stk.pop<Pointer>();
+ B = A;
+ } else if (MaskType->isIntegerType()) {
+ ShuffleMask = popToAPSInt(S, Call->getArg(1)).getZExtValue();
+ A = S.Stk.pop<Pointer>();
+ B = A;
+ } else {
+ return false;
+ }
+ } else {
+ QualType Arg2Type = Call->getArg(2)->getType();
+ if (Arg2Type->isVectorType()) {
+ IsVectorMask = true;
+ B = S.Stk.pop<Pointer>();
+ MaskVector = S.Stk.pop<Pointer>();
+ A = S.Stk.pop<Pointer>();
+ } else if (Arg2Type->isIntegerType()) {
+ ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
+ B = S.Stk.pop<Pointer>();
+ A = S.Stk.pop<Pointer>();
+ } else {
+ return false;
+ }
+ }
+
+ QualType Arg0Type = Call->getArg(0)->getType();
+ const auto *VecT = Arg0Type->castAs<VectorType>();
+ PrimType ElemT = *S.getContext().classify(VecT->getElementType());
+ unsigned NumElems = VecT->getNumElements();
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ PrimType MaskElemT = PT_Uint32;
+ if (IsVectorMask) {
+ QualType Arg1Type = Call->getArg(1)->getType();
+ const auto *MaskVecT = Arg1Type->castAs<VectorType>();
+ QualType MaskElemType = MaskVecT->getElementType();
+ MaskElemT = *S.getContext().classify(MaskElemType);
+ }
+
+ for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
+ if (IsVectorMask) {
+ INT_TYPE_SWITCH(MaskElemT, {
+ ShuffleMask = static_cast<unsigned>(MaskVector.elem<T>(DstIdx));
+ });
+ }
+
+ auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
+
+ if (SrcIdx < 0) {
+ // Zero out this element
+ if (ElemT == PT_Float) {
+ Dst.elem<Floating>(DstIdx) = Floating(
+ S.getASTContext().getFloatTypeSemantics(VecT->getElementType()));
+ } else {
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(DstIdx) = T::from(0); });
+ }
+ } else {
+ const Pointer &Src = (SrcVecIdx == 0) ? A : B;
+ TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
+ }
+ }
+ Dst.initializeAllElements();
+
+ return true;
+}
+
+static bool interp__builtin_ia32_shift_with_count(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp,
+ llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) {
+
+ assert(Call->getNumArgs() == 2);
+
+ const Pointer &Count = S.Stk.pop<Pointer>();
+ const Pointer &Source = S.Stk.pop<Pointer>();
+
+ QualType SourceType = Call->getArg(0)->getType();
+ QualType CountType = Call->getArg(1)->getType();
+ assert(SourceType->isVectorType() && CountType->isVectorType());
+
+ const auto *SourceVecT = SourceType->castAs<VectorType>();
+ const auto *CountVecT = CountType->castAs<VectorType>();
+ PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
+ PrimType CountElemT = *S.getContext().classify(CountVecT->getElementType());
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ unsigned DestEltWidth =
+ S.getASTContext().getTypeSize(SourceVecT->getElementType());
+ bool IsDestUnsigned = SourceVecT->getElementType()->isUnsignedIntegerType();
+ unsigned DestLen = SourceVecT->getNumElements();
+ unsigned CountEltWidth =
+ S.getASTContext().getTypeSize(CountVecT->getElementType());
+ unsigned NumBitsInQWord = 64;
+ unsigned NumCountElts = NumBitsInQWord / CountEltWidth;
+
+ uint64_t CountLQWord = 0;
+ for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) {
+ uint64_t Elt = 0;
+ INT_TYPE_SWITCH(CountElemT,
+ { Elt = static_cast<uint64_t>(Count.elem<T>(EltIdx)); });
+ CountLQWord |= (Elt << (EltIdx * CountEltWidth));
+ }
+
+ for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) {
+ APSInt Elt;
+ INT_TYPE_SWITCH(SourceElemT, { Elt = Source.elem<T>(EltIdx).toAPSInt(); });
+
+ APInt Result;
+ if (CountLQWord < DestEltWidth) {
+ Result = ShiftOp(Elt, CountLQWord);
+ } else {
+ Result = OverflowOp(Elt, DestEltWidth);
+ }
+ if (IsDestUnsigned) {
+ INT_TYPE_SWITCH(SourceElemT, {
+ Dst.elem<T>(EltIdx) = T::from(Result.getZExtValue());
+ });
+ } else {
+ INT_TYPE_SWITCH(SourceElemT, {
+ Dst.elem<T>(EltIdx) = T::from(Result.getSExtValue());
+ });
+ }
+ }
+
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_ia32_shufbitqmb_mask(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+
+ assert(Call->getNumArgs() == 3);
+
+ QualType SourceType = Call->getArg(0)->getType();
+ QualType ShuffleMaskType = Call->getArg(1)->getType();
+ QualType ZeroMaskType = Call->getArg(2)->getType();
+ if (!SourceType->isVectorType() || !ShuffleMaskType->isVectorType() ||
+ !ZeroMaskType->isIntegerType()) {
+ return false;
+ }
+
+ Pointer Source, ShuffleMask;
+ APSInt ZeroMask = popToAPSInt(S, Call->getArg(2));
+ ShuffleMask = S.Stk.pop<Pointer>();
+ Source = S.Stk.pop<Pointer>();
+
+ const auto *SourceVecT = SourceType->castAs<VectorType>();
+ const auto *ShuffleMaskVecT = ShuffleMaskType->castAs<VectorType>();
+ assert(SourceVecT->getNumElements() == ShuffleMaskVecT->getNumElements());
+ assert(ZeroMask.getBitWidth() == SourceVecT->getNumElements());
+
+ PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
+ PrimType ShuffleMaskElemT =
+ *S.getContext().classify(ShuffleMaskVecT->getElementType());
+
+ unsigned NumBytesInQWord = 8;
+ unsigned NumBitsInByte = 8;
+ unsigned NumBytes = SourceVecT->getNumElements();
+ unsigned NumQWords = NumBytes / NumBytesInQWord;
+ unsigned RetWidth = ZeroMask.getBitWidth();
+ APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
+
+ for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
+ APInt SourceQWord(64, 0);
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ uint64_t Byte = 0;
+ INT_TYPE_SWITCH(SourceElemT, {
+ Byte = static_cast<uint64_t>(
+ Source.elem<T>(QWordId * NumBytesInQWord + ByteIdx));
+ });
+ SourceQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
+ }
+
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx;
+ unsigned M = 0;
+ INT_TYPE_SWITCH(ShuffleMaskElemT, {
+ M = static_cast<unsigned>(ShuffleMask.elem<T>(SelIdx)) & 0x3F;
+ });
+
+ if (ZeroMask[SelIdx]) {
+ RetMask.setBitVal(SelIdx, SourceQWord[M]);
+ }
+ }
+ }
+
+ pushInteger(S, RetMask, Call->getType());
+ return true;
+}
+
+static bool interp__builtin_ia32_vcvtps2ph(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ // Arguments are: vector of floats, rounding immediate
+ assert(Call->getNumArgs() == 2);
+
+ APSInt Imm = popToAPSInt(S, Call->getArg(1));
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ assert(Src.getFieldDesc()->isPrimitiveArray());
+ assert(Dst.getFieldDesc()->isPrimitiveArray());
+
+ const auto *SrcVTy = Call->getArg(0)->getType()->castAs<VectorType>();
+ unsigned SrcNumElems = SrcVTy->getNumElements();
+ const auto *DstVTy = Call->getType()->castAs<VectorType>();
+ unsigned DstNumElems = DstVTy->getNumElements();
+
+ const llvm::fltSemantics &HalfSem =
+ S.getASTContext().getFloatTypeSemantics(S.getASTContext().HalfTy);
+
+ // imm[2] == 1 means use MXCSR rounding mode.
+ // In that case, we can only evaluate if the conversion is exact.
+ int ImmVal = Imm.getZExtValue();
+ bool UseMXCSR = (ImmVal & 4) != 0;
+ bool IsFPConstrained =
+ Call->getFPFeaturesInEffect(S.getASTContext().getLangOpts())
+ .isFPConstrained();
+
+ llvm::RoundingMode RM;
+ if (!UseMXCSR) {
+ switch (ImmVal & 3) {
+ case 0:
+ RM = llvm::RoundingMode::NearestTiesToEven;
+ break;
+ case 1:
+ RM = llvm::RoundingMode::TowardNegative;
+ break;
+ case 2:
+ RM = llvm::RoundingMode::TowardPositive;
+ break;
+ case 3:
+ RM = llvm::RoundingMode::TowardZero;
+ break;
+ default:
+ llvm_unreachable("Invalid immediate rounding mode");
+ }
+ } else {
+ // For MXCSR, we must check for exactness. We can use any rounding mode
+ // for the trial conversion since the result is the same if it's exact.
+ RM = llvm::RoundingMode::NearestTiesToEven;
+ }
+
+ QualType DstElemQT = Dst.getFieldDesc()->getElemQualType();
+ PrimType DstElemT = *S.getContext().classify(DstElemQT);
+
+ for (unsigned I = 0; I != SrcNumElems; ++I) {
+ Floating SrcVal = Src.elem<Floating>(I);
+ APFloat DstVal = SrcVal.getAPFloat();
+
+ bool LostInfo;
+ APFloat::opStatus St = DstVal.convert(HalfSem, RM, &LostInfo);
+
+ if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_dynamic_rounding);
+ return false;
+ }
+
+ INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
+ // Convert the destination value's bit pattern to an unsigned integer,
+ // then reconstruct the element using the target type's 'from' method.
+ uint64_t RawBits = DstVal.bitcastToAPInt().getZExtValue();
+ Dst.elem<T>(I) = T::from(RawBits);
+ });
+ }
+
+ // Zero out remaining elements if the destination has more elements
+ // (e.g., vcvtps2ph converting 4 floats to 8 shorts).
+ if (DstNumElems > SrcNumElems) {
+ for (unsigned I = SrcNumElems; I != DstNumElems; ++I) {
+ INT_TYPE_SWITCH_NO_BOOL(DstElemT, { Dst.elem<T>(I) = T::from(0); });
+ }
+ }
+
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_ia32_multishiftqb(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 2);
+
+ QualType ATy = Call->getArg(0)->getType();
+ QualType BTy = Call->getArg(1)->getType();
+ if (!ATy->isVectorType() || !BTy->isVectorType()) {
+ return false;
+ }
+
+ const Pointer &BPtr = S.Stk.pop<Pointer>();
+ const Pointer &APtr = S.Stk.pop<Pointer>();
+ const auto *AVecT = ATy->castAs<VectorType>();
+ assert(AVecT->getNumElements() ==
+ BTy->castAs<VectorType>()->getNumElements());
+
+ PrimType ElemT = *S.getContext().classify(AVecT->getElementType());
+
+ unsigned NumBytesInQWord = 8;
+ unsigned NumBitsInByte = 8;
+ unsigned NumBytes = AVecT->getNumElements();
+ unsigned NumQWords = NumBytes / NumBytesInQWord;
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+
+ for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
+ APInt BQWord(64, 0);
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
+ INT_TYPE_SWITCH(ElemT, {
+ uint64_t Byte = static_cast<uint64_t>(BPtr.elem<T>(Idx));
+ BQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
+ });
+ }
+
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
+ uint64_t Ctrl = 0;
+ INT_TYPE_SWITCH(
+ ElemT, { Ctrl = static_cast<uint64_t>(APtr.elem<T>(Idx)) & 0x3F; });
+
+ APInt Byte(8, 0);
+ for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
+ Byte.setBitVal(BitIdx, BQWord[(Ctrl + BitIdx) & 0x3F]);
+ }
+ INT_TYPE_SWITCH(ElemT,
+ { Dst.elem<T>(Idx) = T::from(Byte.getZExtValue()); });
+ }
+ }
+
+ Dst.initializeAllElements();
+
+ return true;
+}
+
+static bool interp_builtin_ia32_gfni_affine(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ bool Inverse) {
+ assert(Call->getNumArgs() == 3);
+ QualType XType = Call->getArg(0)->getType();
+ QualType AType = Call->getArg(1)->getType();
+ QualType ImmType = Call->getArg(2)->getType();
+ if (!XType->isVectorType() || !AType->isVectorType() ||
+ !ImmType->isIntegerType()) {
+ return false;
+ }
+
+ Pointer X, A;
+ APSInt Imm = popToAPSInt(S, Call->getArg(2));
+ A = S.Stk.pop<Pointer>();
+ X = S.Stk.pop<Pointer>();
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ const auto *AVecT = AType->castAs<VectorType>();
+ assert(XType->castAs<VectorType>()->getNumElements() ==
+ AVecT->getNumElements());
+ unsigned NumBytesInQWord = 8;
+ unsigned NumBytes = AVecT->getNumElements();
+ unsigned NumBitsInQWord = 64;
+ unsigned NumQWords = NumBytes / NumBytesInQWord;
+ unsigned NumBitsInByte = 8;
+ PrimType AElemT = *S.getContext().classify(AVecT->getElementType());
+
+ // computing A*X + Imm
+ for (unsigned QWordIdx = 0; QWordIdx != NumQWords; ++QWordIdx) {
+ // Extract the QWords from X, A
+ APInt XQWord(NumBitsInQWord, 0);
+ APInt AQWord(NumBitsInQWord, 0);
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
+ uint8_t XByte;
+ uint8_t AByte;
+ INT_TYPE_SWITCH(AElemT, {
+ XByte = static_cast<uint8_t>(X.elem<T>(Idx));
+ AByte = static_cast<uint8_t>(A.elem<T>(Idx));
+ });
+
+ XQWord.insertBits(APInt(NumBitsInByte, XByte), ByteIdx * NumBitsInByte);
+ AQWord.insertBits(APInt(NumBitsInByte, AByte), ByteIdx * NumBitsInByte);
+ }
+
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
+ uint8_t XByte =
+ XQWord.lshr(ByteIdx * NumBitsInByte).getLoBits(8).getZExtValue();
+ INT_TYPE_SWITCH(AElemT, {
+ Dst.elem<T>(Idx) = T::from(GFNIAffine(XByte, AQWord, Imm, Inverse));
+ });
+ }
+ }
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_ia32_gfni_mul(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 2);
+
+ QualType AType = Call->getArg(0)->getType();
+ QualType BType = Call->getArg(1)->getType();
+ if (!AType->isVectorType() || !BType->isVectorType()) {
+ return false;
+ }
+
+ Pointer A, B;
+ B = S.Stk.pop<Pointer>();
+ A = S.Stk.pop<Pointer>();
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ const auto *AVecT = AType->castAs<VectorType>();
+ assert(AVecT->getNumElements() ==
+ BType->castAs<VectorType>()->getNumElements());
+
+ PrimType AElemT = *S.getContext().classify(AVecT->getElementType());
+ unsigned NumBytes = A.getNumElems();
+
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytes; ++ByteIdx) {
+ uint8_t AByte, BByte;
+ INT_TYPE_SWITCH(AElemT, {
+ AByte = static_cast<uint8_t>(A.elem<T>(ByteIdx));
+ BByte = static_cast<uint8_t>(B.elem<T>(ByteIdx));
+ Dst.elem<T>(ByteIdx) = T::from(GFNIMul(AByte, BByte));
+ });
+ }
+
+ Dst.initializeAllElements();
+ return true;
+}
+
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
uint32_t BuiltinID) {
if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
@@ -3285,24 +4277,24 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll:
return interp__builtin_elementwise_int_unaryop(
- S, OpPC, Call, [](const APSInt &Val) -> APInt {
+ S, OpPC, Call, [](const APSInt &Val) {
return APInt(Val.getBitWidth(), Val.popcount() % 2);
});
case Builtin::BI__builtin_clrsb:
case Builtin::BI__builtin_clrsbl:
case Builtin::BI__builtin_clrsbll:
return interp__builtin_elementwise_int_unaryop(
- S, OpPC, Call, [](const APSInt &Val) -> APInt {
+ S, OpPC, Call, [](const APSInt &Val) {
return APInt(Val.getBitWidth(),
Val.getBitWidth() - Val.getSignificantBits());
});
+ case Builtin::BI__builtin_bitreverseg:
case Builtin::BI__builtin_bitreverse8:
case Builtin::BI__builtin_bitreverse16:
case Builtin::BI__builtin_bitreverse32:
case Builtin::BI__builtin_bitreverse64:
return interp__builtin_elementwise_int_unaryop(
- S, OpPC, Call,
- [](const APSInt &Val) -> APInt { return Val.reverseBits(); });
+ S, OpPC, Call, [](const APSInt &Val) { return Val.reverseBits(); });
case Builtin::BI__builtin_classify_type:
return interp__builtin_classify_type(S, OpPC, Frame, Call);
@@ -3315,29 +4307,49 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_rotateleft16:
case Builtin::BI__builtin_rotateleft32:
case Builtin::BI__builtin_rotateleft64:
+ case Builtin::BI__builtin_stdc_rotate_left:
case Builtin::BI_rotl8: // Microsoft variants of rotate left
case Builtin::BI_rotl16:
case Builtin::BI_rotl:
case Builtin::BI_lrotl:
case Builtin::BI_rotl64:
- return interp__builtin_elementwise_int_binop(
- S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt {
- return Value.rotl(Amount);
- });
-
case Builtin::BI__builtin_rotateright8:
case Builtin::BI__builtin_rotateright16:
case Builtin::BI__builtin_rotateright32:
case Builtin::BI__builtin_rotateright64:
+ case Builtin::BI__builtin_stdc_rotate_right:
case Builtin::BI_rotr8: // Microsoft variants of rotate right
case Builtin::BI_rotr16:
case Builtin::BI_rotr:
case Builtin::BI_lrotr:
- case Builtin::BI_rotr64:
+ case Builtin::BI_rotr64: {
+ // Determine if this is a rotate right operation
+ bool IsRotateRight;
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_rotateright8:
+ case Builtin::BI__builtin_rotateright16:
+ case Builtin::BI__builtin_rotateright32:
+ case Builtin::BI__builtin_rotateright64:
+ case Builtin::BI__builtin_stdc_rotate_right:
+ case Builtin::BI_rotr8:
+ case Builtin::BI_rotr16:
+ case Builtin::BI_rotr:
+ case Builtin::BI_lrotr:
+ case Builtin::BI_rotr64:
+ IsRotateRight = true;
+ break;
+ default:
+ IsRotateRight = false;
+ break;
+ }
+
return interp__builtin_elementwise_int_binop(
- S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt {
- return Value.rotr(Amount);
+ S, OpPC, Call, [IsRotateRight](const APSInt &Value, APSInt Amount) {
+ Amount = NormalizeRotateAmount(Value, Amount);
+ return IsRotateRight ? Value.rotr(Amount.getZExtValue())
+ : Value.rotl(Amount.getZExtValue());
});
+ }
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
@@ -3425,7 +4437,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_elementwise_ctzg:
return interp__builtin_elementwise_countzeroes(S, OpPC, Frame, Call,
BuiltinID);
-
+ case Builtin::BI__builtin_bswapg:
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64:
@@ -3449,6 +4461,15 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_assume_aligned:
return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
+ case clang::X86::BI__builtin_ia32_crc32qi:
+ return interp__builtin_ia32_crc32(S, OpPC, Frame, Call, 1);
+ case clang::X86::BI__builtin_ia32_crc32hi:
+ return interp__builtin_ia32_crc32(S, OpPC, Frame, Call, 2);
+ case clang::X86::BI__builtin_ia32_crc32si:
+ return interp__builtin_ia32_crc32(S, OpPC, Frame, Call, 4);
+ case clang::X86::BI__builtin_ia32_crc32di:
+ return interp__builtin_ia32_crc32(S, OpPC, Frame, Call, 8);
+
case clang::X86::BI__builtin_ia32_bextr_u32:
case clang::X86::BI__builtin_ia32_bextr_u64:
case clang::X86::BI__builtin_ia32_bextri_u32:
@@ -3485,6 +4506,66 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return Result;
});
+ case clang::X86::BI__builtin_ia32_ktestcqi:
+ case clang::X86::BI__builtin_ia32_ktestchi:
+ case clang::X86::BI__builtin_ia32_ktestcsi:
+ case clang::X86::BI__builtin_ia32_ktestcdi:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
+ return APInt(sizeof(unsigned char) * 8, (~A & B) == 0);
+ });
+
+ case clang::X86::BI__builtin_ia32_ktestzqi:
+ case clang::X86::BI__builtin_ia32_ktestzhi:
+ case clang::X86::BI__builtin_ia32_ktestzsi:
+ case clang::X86::BI__builtin_ia32_ktestzdi:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
+ return APInt(sizeof(unsigned char) * 8, (A & B) == 0);
+ });
+
+ case clang::X86::BI__builtin_ia32_kortestcqi:
+ case clang::X86::BI__builtin_ia32_kortestchi:
+ case clang::X86::BI__builtin_ia32_kortestcsi:
+ case clang::X86::BI__builtin_ia32_kortestcdi:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
+ return APInt(sizeof(unsigned char) * 8, ~(A | B) == 0);
+ });
+
+ case clang::X86::BI__builtin_ia32_kortestzqi:
+ case clang::X86::BI__builtin_ia32_kortestzhi:
+ case clang::X86::BI__builtin_ia32_kortestzsi:
+ case clang::X86::BI__builtin_ia32_kortestzdi:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
+ return APInt(sizeof(unsigned char) * 8, (A | B) == 0);
+ });
+
+ case clang::X86::BI__builtin_ia32_kshiftliqi:
+ case clang::X86::BI__builtin_ia32_kshiftlihi:
+ case clang::X86::BI__builtin_ia32_kshiftlisi:
+ case clang::X86::BI__builtin_ia32_kshiftlidi:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
+ unsigned Amt = RHS.getZExtValue() & 0xFF;
+ if (Amt >= LHS.getBitWidth())
+ return APInt::getZero(LHS.getBitWidth());
+ return LHS.shl(Amt);
+ });
+
+ case clang::X86::BI__builtin_ia32_kshiftriqi:
+ case clang::X86::BI__builtin_ia32_kshiftrihi:
+ case clang::X86::BI__builtin_ia32_kshiftrisi:
+ case clang::X86::BI__builtin_ia32_kshiftridi:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
+ unsigned Amt = RHS.getZExtValue() & 0xFF;
+ if (Amt >= LHS.getBitWidth())
+ return APInt::getZero(LHS.getBitWidth());
+ return LHS.lshr(Amt);
+ });
+
case clang::X86::BI__builtin_ia32_lzcnt_u16:
case clang::X86::BI__builtin_ia32_lzcnt_u32:
case clang::X86::BI__builtin_ia32_lzcnt_u64:
@@ -3544,6 +4625,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_ptrauth_string_discriminator:
return interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Call);
+ case Builtin::BI__builtin_infer_alloc_token:
+ return interp__builtin_infer_alloc_token(S, OpPC, Frame, Call);
+
case Builtin::BI__noop:
pushInteger(S, 0, Call->getType());
return true;
@@ -3567,9 +4651,13 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
case Builtin::BI__builtin_elementwise_popcount:
+ return interp__builtin_elementwise_int_unaryop(
+ S, OpPC, Call, [](const APSInt &Src) {
+ return APInt(Src.getBitWidth(), Src.popcount());
+ });
case Builtin::BI__builtin_elementwise_bitreverse:
- return interp__builtin_elementwise_popcount(S, OpPC, Frame, Call,
- BuiltinID);
+ return interp__builtin_elementwise_int_unaryop(
+ S, OpPC, Call, [](const APSInt &Src) { return Src.reverseBits(); });
case Builtin::BI__builtin_elementwise_abs:
return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
@@ -3621,6 +4709,58 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
+
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
+
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
+ case clang::X86::BI__builtin_ia32_movmskps:
+ case clang::X86::BI__builtin_ia32_movmskpd:
+ case clang::X86::BI__builtin_ia32_pmovmskb128:
+ case clang::X86::BI__builtin_ia32_pmovmskb256:
+ case clang::X86::BI__builtin_ia32_movmskps256:
+ case clang::X86::BI__builtin_ia32_movmskpd256: {
+ return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
+ }
+
+ case X86::BI__builtin_ia32_psignb128:
+ case X86::BI__builtin_ia32_psignb256:
+ case X86::BI__builtin_ia32_psignw128:
+ case X86::BI__builtin_ia32_psignw256:
+ case X86::BI__builtin_ia32_psignd128:
+ case X86::BI__builtin_ia32_psignd256:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
+ if (BElem.isZero())
+ return APInt::getZero(AElem.getBitWidth());
+ if (BElem.isNegative())
+ return -AElem;
+ return AElem;
+ });
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
@@ -3760,14 +4900,14 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case clang::X86::BI__builtin_ia32_packuswb256:
case clang::X86::BI__builtin_ia32_packuswb512:
return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
- unsigned DstBits = Src.getBitWidth() / 2;
- if (Src.isNegative())
- return APInt::getZero(DstBits);
- if (Src.isIntN(DstBits))
- return APInt(Src).trunc(DstBits);
- return APInt::getAllOnes(DstBits);
+ return APInt(Src).truncSSatU(Src.getBitWidth() / 2);
});
+ case clang::X86::BI__builtin_ia32_selectss_128:
+ case clang::X86::BI__builtin_ia32_selectsd_128:
+ case clang::X86::BI__builtin_ia32_selectsh_128:
+ case clang::X86::BI__builtin_ia32_selectsbf_128:
+ return interp__builtin_select_scalar(S, Call);
case clang::X86::BI__builtin_ia32_vprotbi:
case clang::X86::BI__builtin_ia32_vprotdi:
case clang::X86::BI__builtin_ia32_vprotqi:
@@ -3842,6 +4982,11 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
F.subtract(RHS, RM);
return F;
});
+ case clang::X86::BI__builtin_ia32_addsubpd:
+ case clang::X86::BI__builtin_ia32_addsubps:
+ case clang::X86::BI__builtin_ia32_addsubpd256:
+ case clang::X86::BI__builtin_ia32_addsubps256:
+ return interp__builtin_ia32_addsub(S, OpPC, Call);
case clang::X86::BI__builtin_ia32_pmuldq128:
case clang::X86::BI__builtin_ia32_pmuldq256:
@@ -3863,6 +5008,11 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return llvm::APIntOps::muluExtended(LoLHS, LoRHS);
});
+ case clang::X86::BI__builtin_ia32_pclmulqdq128:
+ case clang::X86::BI__builtin_ia32_pclmulqdq256:
+ case clang::X86::BI__builtin_ia32_pclmulqdq512:
+ return interp__builtin_ia32_pclmulqdq(S, OpPC, Call);
+
case Builtin::BI__builtin_elementwise_fma:
return interp__builtin_elementwise_triop_fp(
S, OpPC, Call,
@@ -3933,7 +5083,15 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case clang::X86::BI__builtin_ia32_pblendw256:
case clang::X86::BI__builtin_ia32_pblendd128:
case clang::X86::BI__builtin_ia32_pblendd256:
- return interp__builtin_blend(S, OpPC, Call);
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ // Bit index for mask.
+ unsigned MaskBit = (ShuffleMask >> (DstIdx % 8)) & 0x1;
+ unsigned SrcVecIdx = MaskBit ? 1 : 0; // 1 = TrueVec, 0 = FalseVec
+ return std::pair<unsigned, int>{SrcVecIdx, static_cast<int>(DstIdx)};
+ });
+
+
case clang::X86::BI__builtin_ia32_blendvpd:
case clang::X86::BI__builtin_ia32_blendvpd256:
@@ -4004,26 +5162,307 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_selectpd_512:
return interp__builtin_select(S, OpPC, Call);
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 4;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 2;
+ unsigned IndexMask = 0x3;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, int>{SrcIdx,
+ static_cast<int>(LaneOffset + Index)};
+ });
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 2;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 1;
+ unsigned IndexMask = 0x1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return std::pair<unsigned, int>{SrcIdx,
+ static_cast<int>(LaneOffset + Index)};
+ });
+
+ case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
+ case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
+ case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi:
+ return interp_builtin_ia32_gfni_affine(S, OpPC, Call, true);
+ case X86::BI__builtin_ia32_vgf2p8affineqb_v16qi:
+ case X86::BI__builtin_ia32_vgf2p8affineqb_v32qi:
+ case X86::BI__builtin_ia32_vgf2p8affineqb_v64qi:
+ return interp_builtin_ia32_gfni_affine(S, OpPC, Call, false);
+
+ case X86::BI__builtin_ia32_vgf2p8mulb_v16qi:
+ case X86::BI__builtin_ia32_vgf2p8mulb_v32qi:
+ case X86::BI__builtin_ia32_vgf2p8mulb_v64qi:
+ return interp__builtin_ia32_gfni_mul(S, OpPC, Call);
+
+ case X86::BI__builtin_ia32_insertps128:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned Mask) {
+ // Bits [3:0]: zero mask - if bit is set, zero this element
+ if ((Mask & (1 << DstIdx)) != 0) {
+ return std::pair<unsigned, int>{0, -1};
+ }
+ // Bits [7:6]: select element from source vector Y (0-3)
+ // Bits [5:4]: select destination position (0-3)
+ unsigned SrcElem = (Mask >> 6) & 0x3;
+ unsigned DstElem = (Mask >> 4) & 0x3;
+ if (DstIdx == DstElem) {
+ // Insert element from source vector (B) at this position
+ return std::pair<unsigned, int>{1, static_cast<int>(SrcElem)};
+ } else {
+ // Copy from destination vector (A)
+ return std::pair<unsigned, int>{0, static_cast<int>(DstIdx)};
+ }
+ });
+ case X86::BI__builtin_ia32_permvarsi256:
+ case X86::BI__builtin_ia32_permvarsf256:
+ case X86::BI__builtin_ia32_permvardf512:
+ case X86::BI__builtin_ia32_permvardi512:
+ case X86::BI__builtin_ia32_permvarhi128:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x7;
+ return std::pair<unsigned, int>{0, Offset};
+ });
+ case X86::BI__builtin_ia32_permvarqi128:
+ case X86::BI__builtin_ia32_permvarhi256:
+ case X86::BI__builtin_ia32_permvarsi512:
+ case X86::BI__builtin_ia32_permvarsf512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0xF;
+ return std::pair<unsigned, int>{0, Offset};
+ });
+ case X86::BI__builtin_ia32_permvardi256:
+ case X86::BI__builtin_ia32_permvardf256:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x3;
+ return std::pair<unsigned, int>{0, Offset};
+ });
+ case X86::BI__builtin_ia32_permvarqi256:
+ case X86::BI__builtin_ia32_permvarhi512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x1F;
+ return std::pair<unsigned, int>{0, Offset};
+ });
+ case X86::BI__builtin_ia32_permvarqi512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x3F;
+ return std::pair<unsigned, int>{0, Offset};
+ });
+ case X86::BI__builtin_ia32_vpermi2varq128:
+ case X86::BI__builtin_ia32_vpermi2varpd128:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x1;
+ unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ });
+ case X86::BI__builtin_ia32_vpermi2vard128:
+ case X86::BI__builtin_ia32_vpermi2varps128:
+ case X86::BI__builtin_ia32_vpermi2varq256:
+ case X86::BI__builtin_ia32_vpermi2varpd256:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x3;
+ unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ });
+ case X86::BI__builtin_ia32_vpermi2varhi128:
+ case X86::BI__builtin_ia32_vpermi2vard256:
+ case X86::BI__builtin_ia32_vpermi2varps256:
+ case X86::BI__builtin_ia32_vpermi2varq512:
+ case X86::BI__builtin_ia32_vpermi2varpd512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x7;
+ unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ });
+ case X86::BI__builtin_ia32_vpermi2varqi128:
+ case X86::BI__builtin_ia32_vpermi2varhi256:
+ case X86::BI__builtin_ia32_vpermi2vard512:
+ case X86::BI__builtin_ia32_vpermi2varps512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0xF;
+ unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ });
+ case X86::BI__builtin_ia32_vpermi2varqi256:
+ case X86::BI__builtin_ia32_vpermi2varhi512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x1F;
+ unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ });
+ case X86::BI__builtin_ia32_vpermi2varqi512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x3F;
+ unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ });
+ case X86::BI__builtin_ia32_vperm2f128_pd256:
+ case X86::BI__builtin_ia32_vperm2f128_ps256:
+ case X86::BI__builtin_ia32_vperm2f128_si256:
+ case X86::BI__builtin_ia32_permti256: {
+ unsigned NumElements =
+ Call->getArg(0)->getType()->castAs<VectorType>()->getNumElements();
+ unsigned PreservedBitsCnt = NumElements >> 2;
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call,
+ [PreservedBitsCnt](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned ControlBitsCnt = DstIdx >> PreservedBitsCnt << 2;
+ unsigned ControlBits = ShuffleMask >> ControlBitsCnt;
+
+ if (ControlBits & 0b1000)
+ return std::make_pair(0u, -1);
+
+ unsigned SrcVecIdx = (ControlBits & 0b10) >> 1;
+ unsigned PreservedBitsMask = (1 << PreservedBitsCnt) - 1;
+ int SrcIdx = ((ControlBits & 0b1) << PreservedBitsCnt) |
+ (DstIdx & PreservedBitsMask);
+ return std::make_pair(SrcVecIdx, SrcIdx);
+ });
+ }
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512:
- return interp__builtin_ia32_pshufb(S, OpPC, Call);
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
+ if (Ctlb & 0x80)
+ return std::make_pair(0, -1);
+
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned SrcOffset = Ctlb & 0x0F;
+ unsigned SrcIdx = LaneBase + SrcOffset;
+ return std::make_pair(0, static_cast<int>(SrcIdx));
+ });
case X86::BI__builtin_ia32_pshuflw:
case X86::BI__builtin_ia32_pshuflw256:
case X86::BI__builtin_ia32_pshuflw512:
- return interp__builtin_ia32_pshuf(S, OpPC, Call, false);
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned LaneBase = (DstIdx / 8) * 8;
+ unsigned LaneIdx = DstIdx % 8;
+ if (LaneIdx < 4) {
+ unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + Sel));
+ }
+
+ return std::make_pair(0, static_cast<int>(DstIdx));
+ });
case X86::BI__builtin_ia32_pshufhw:
case X86::BI__builtin_ia32_pshufhw256:
case X86::BI__builtin_ia32_pshufhw512:
- return interp__builtin_ia32_pshuf(S, OpPC, Call, true);
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned LaneBase = (DstIdx / 8) * 8;
+ unsigned LaneIdx = DstIdx % 8;
+ if (LaneIdx >= 4) {
+ unsigned Sel = (ShuffleMask >> (2 * (LaneIdx - 4))) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + 4 + Sel));
+ }
+
+ return std::make_pair(0, static_cast<int>(DstIdx));
+ });
case X86::BI__builtin_ia32_pshufd:
case X86::BI__builtin_ia32_pshufd256:
case X86::BI__builtin_ia32_pshufd512:
- return interp__builtin_ia32_pshuf(S, OpPC, Call, false);
+ case X86::BI__builtin_ia32_vpermilps:
+ case X86::BI__builtin_ia32_vpermilps256:
+ case X86::BI__builtin_ia32_vpermilps512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned LaneBase = (DstIdx / 4) * 4;
+ unsigned LaneIdx = DstIdx % 4;
+ unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + Sel));
+ });
+
+ case X86::BI__builtin_ia32_vpermilvarpd:
+ case X86::BI__builtin_ia32_vpermilvarpd256:
+ case X86::BI__builtin_ia32_vpermilvarpd512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 2;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned Offset = ShuffleMask & 0b10 ? 1 : 0;
+ return std::make_pair(
+ 0, static_cast<int>(Lane * NumElemPerLane + Offset));
+ });
+
+ case X86::BI__builtin_ia32_vpermilvarps:
+ case X86::BI__builtin_ia32_vpermilvarps256:
+ case X86::BI__builtin_ia32_vpermilvarps512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned NumElemPerLane = 4;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned Offset = ShuffleMask & 0b11;
+ return std::make_pair(
+ 0, static_cast<int>(Lane * NumElemPerLane + Offset));
+ });
+
+ case X86::BI__builtin_ia32_vpermilpd:
+ case X86::BI__builtin_ia32_vpermilpd256:
+ case X86::BI__builtin_ia32_vpermilpd512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
+ unsigned NumElemPerLane = 2;
+ unsigned BitsPerElem = 1;
+ unsigned MaskBits = 8;
+ unsigned IndexMask = 0x1;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (Control >> BitIndex) & IndexMask;
+ return std::make_pair(0, static_cast<int>(LaneOffset + Index));
+ });
+ case X86::BI__builtin_ia32_permdf256:
+ case X86::BI__builtin_ia32_permdi256:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
+ // permute4x64 operates on 4 64-bit elements
+ // For element i (0-3), extract bits [2*i+1:2*i] from Control
+ unsigned Index = (Control >> (2 * DstIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(Index));
+ });
+
+ case X86::BI__builtin_ia32_vpmultishiftqb128:
+ case X86::BI__builtin_ia32_vpmultishiftqb256:
+ case X86::BI__builtin_ia32_vpmultishiftqb512:
+ return interp__builtin_ia32_multishiftqb(S, OpPC, Call);
case X86::BI__builtin_ia32_kandqi:
case X86::BI__builtin_ia32_kandhi:
case X86::BI__builtin_ia32_kandsi:
@@ -4079,6 +5518,70 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
S, OpPC, Call,
[](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
+ case X86::BI__builtin_ia32_kmovb:
+ case X86::BI__builtin_ia32_kmovw:
+ case X86::BI__builtin_ia32_kmovd:
+ case X86::BI__builtin_ia32_kmovq:
+ return interp__builtin_elementwise_int_unaryop(
+ S, OpPC, Call, [](const APSInt &Src) { return Src; });
+
+ case X86::BI__builtin_ia32_kunpckhi:
+ case X86::BI__builtin_ia32_kunpckdi:
+ case X86::BI__builtin_ia32_kunpcksi:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
+ // Generic kunpack: extract lower half of each operand and concatenate
+ // Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0]
+ unsigned BW = A.getBitWidth();
+ return APSInt(A.trunc(BW / 2).concat(B.trunc(BW / 2)),
+ A.isUnsigned());
+ });
+
+ case X86::BI__builtin_ia32_phminposuw128:
+ return interp__builtin_ia32_phminposuw(S, OpPC, Call);
+
+ case X86::BI__builtin_ia32_psraq128:
+ case X86::BI__builtin_ia32_psraq256:
+ case X86::BI__builtin_ia32_psraq512:
+ case X86::BI__builtin_ia32_psrad128:
+ case X86::BI__builtin_ia32_psrad256:
+ case X86::BI__builtin_ia32_psrad512:
+ case X86::BI__builtin_ia32_psraw128:
+ case X86::BI__builtin_ia32_psraw256:
+ case X86::BI__builtin_ia32_psraw512:
+ return interp__builtin_ia32_shift_with_count(
+ S, OpPC, Call,
+ [](const APInt &Elt, uint64_t Count) { return Elt.ashr(Count); },
+ [](const APInt &Elt, unsigned Width) { return Elt.ashr(Width - 1); });
+
+ case X86::BI__builtin_ia32_psllq128:
+ case X86::BI__builtin_ia32_psllq256:
+ case X86::BI__builtin_ia32_psllq512:
+ case X86::BI__builtin_ia32_pslld128:
+ case X86::BI__builtin_ia32_pslld256:
+ case X86::BI__builtin_ia32_pslld512:
+ case X86::BI__builtin_ia32_psllw128:
+ case X86::BI__builtin_ia32_psllw256:
+ case X86::BI__builtin_ia32_psllw512:
+ return interp__builtin_ia32_shift_with_count(
+ S, OpPC, Call,
+ [](const APInt &Elt, uint64_t Count) { return Elt.shl(Count); },
+ [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
+
+ case X86::BI__builtin_ia32_psrlq128:
+ case X86::BI__builtin_ia32_psrlq256:
+ case X86::BI__builtin_ia32_psrlq512:
+ case X86::BI__builtin_ia32_psrld128:
+ case X86::BI__builtin_ia32_psrld256:
+ case X86::BI__builtin_ia32_psrld512:
+ case X86::BI__builtin_ia32_psrlw128:
+ case X86::BI__builtin_ia32_psrlw256:
+ case X86::BI__builtin_ia32_psrlw512:
+ return interp__builtin_ia32_shift_with_count(
+ S, OpPC, Call,
+ [](const APInt &Elt, uint64_t Count) { return Elt.lshr(Count); },
+ [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
+
case X86::BI__builtin_ia32_pternlogd128_mask:
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogd512_mask:
@@ -4100,6 +5603,39 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return interp__builtin_elementwise_triop(S, OpPC, Call,
llvm::APIntOps::fshr);
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i64x2: {
+ // Destination and sources A, B all have the same type.
+ QualType VecQT = Call->getArg(0)->getType();
+ const auto *VecT = VecQT->castAs<VectorType>();
+ unsigned NumElems = VecT->getNumElements();
+ unsigned ElemBits = S.getASTContext().getTypeSize(VecT->getElementType());
+ unsigned LaneBits = 128u;
+ unsigned NumLanes = (NumElems * ElemBits) / LaneBits;
+ unsigned NumElemsPerLane = LaneBits / ElemBits;
+
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call,
+ [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) {
+ // DstIdx determines source. ShuffleMask selects lane in source.
+ unsigned BitsPerElem = NumLanes / 2;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned Lane = DstIdx / NumElemsPerLane;
+ unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1;
+ unsigned BitIdx = BitsPerElem * Lane;
+ unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask;
+ unsigned ElemInLane = DstIdx % NumElemsPerLane;
+ unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane;
+ return std::pair<unsigned, int>{SrcIdx, IdxToPick};
+ });
+ }
+
case X86::BI__builtin_ia32_insertf32x4_256:
case X86::BI__builtin_ia32_inserti32x4_256:
case X86::BI__builtin_ia32_insertf64x2_256:
@@ -4118,6 +5654,10 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_insert128i256:
return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID);
+ case clang::X86::BI__builtin_ia32_vcvtps2ph:
+ case clang::X86::BI__builtin_ia32_vcvtps2ph256:
+ return interp__builtin_ia32_vcvtps2ph(S, OpPC, Call);
+
case X86::BI__builtin_ia32_vec_ext_v4hi:
case X86::BI__builtin_ia32_vec_ext_v16qi:
case X86::BI__builtin_ia32_vec_ext_v8hi:
@@ -4141,6 +5681,205 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_cvtb2mask128:
+ case X86::BI__builtin_ia32_cvtb2mask256:
+ case X86::BI__builtin_ia32_cvtb2mask512:
+ case X86::BI__builtin_ia32_cvtw2mask128:
+ case X86::BI__builtin_ia32_cvtw2mask256:
+ case X86::BI__builtin_ia32_cvtw2mask512:
+ case X86::BI__builtin_ia32_cvtd2mask128:
+ case X86::BI__builtin_ia32_cvtd2mask256:
+ case X86::BI__builtin_ia32_cvtd2mask512:
+ case X86::BI__builtin_ia32_cvtq2mask128:
+ case X86::BI__builtin_ia32_cvtq2mask256:
+ case X86::BI__builtin_ia32_cvtq2mask512:
+ return interp__builtin_ia32_cvt_vec2mask(S, OpPC, Call, BuiltinID);
+
+ case X86::BI__builtin_ia32_cvtmask2b128:
+ case X86::BI__builtin_ia32_cvtmask2b256:
+ case X86::BI__builtin_ia32_cvtmask2b512:
+ case X86::BI__builtin_ia32_cvtmask2w128:
+ case X86::BI__builtin_ia32_cvtmask2w256:
+ case X86::BI__builtin_ia32_cvtmask2w512:
+ case X86::BI__builtin_ia32_cvtmask2d128:
+ case X86::BI__builtin_ia32_cvtmask2d256:
+ case X86::BI__builtin_ia32_cvtmask2d512:
+ case X86::BI__builtin_ia32_cvtmask2q128:
+ case X86::BI__builtin_ia32_cvtmask2q256:
+ case X86::BI__builtin_ia32_cvtmask2q512:
+ return interp__builtin_ia32_cvt_mask2vec(S, OpPC, Call, BuiltinID);
+
+ case X86::BI__builtin_ia32_cvtsd2ss:
+ return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, false);
+
+ case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
+ return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, true);
+
+ case X86::BI__builtin_ia32_cvtpd2ps:
+ case X86::BI__builtin_ia32_cvtpd2ps256:
+ return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, false, false);
+ case X86::BI__builtin_ia32_cvtpd2ps_mask:
+ return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, false);
+ case X86::BI__builtin_ia32_cvtpd2ps512_mask:
+ return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, true);
+
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned=*/false);
+
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned=*/true);
+
+ case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
+ return interp__builtin_ia32_shufbitqmb_mask(S, OpPC, Call);
+
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ // These SLLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call,
+ [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned LaneIdx = DstIdx % 16;
+ if (LaneIdx < Shift)
+ return std::make_pair(0, -1);
+
+ return std::make_pair(0,
+ static_cast<int>(LaneBase + LaneIdx - Shift));
+ });
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ // These SRLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call,
+ [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned LaneIdx = DstIdx % 16;
+ if (LaneIdx + Shift < 16)
+ return std::make_pair(0,
+ static_cast<int>(LaneBase + LaneIdx + Shift));
+
+ return std::make_pair(0, -1);
+ });
+
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256:
+ case X86::BI__builtin_ia32_palignr512:
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [](unsigned DstIdx, unsigned Shift) {
+ // Default to -1 → zero-fill this destination element
+ unsigned VecIdx = 1;
+ int ElemIdx = -1;
+
+ int Lane = DstIdx / 16;
+ int Offset = DstIdx % 16;
+
+ // Elements come from VecB first, then VecA after the shift boundary
+ unsigned ShiftedIdx = Offset + (Shift & 0xFF);
+ if (ShiftedIdx < 16) { // from VecB
+ ElemIdx = ShiftedIdx + (Lane * 16);
+ } else if (ShiftedIdx < 32) { // from VecA
+ VecIdx = 0;
+ ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
+ }
+
+ return std::pair<unsigned, int>{VecIdx, ElemIdx};
+ });
+
+ case X86::BI__builtin_ia32_alignd128:
+ case X86::BI__builtin_ia32_alignd256:
+ case X86::BI__builtin_ia32_alignd512:
+ case X86::BI__builtin_ia32_alignq128:
+ case X86::BI__builtin_ia32_alignq256:
+ case X86::BI__builtin_ia32_alignq512: {
+ unsigned NumElems = Call->getType()->castAs<VectorType>()->getNumElements();
+ return interp__builtin_ia32_shuffle_generic(
+ S, OpPC, Call, [NumElems](unsigned DstIdx, unsigned Shift) {
+ unsigned Imm = Shift & 0xFF;
+ unsigned EffectiveShift = Imm & (NumElems - 1);
+ unsigned SourcePos = DstIdx + EffectiveShift;
+ unsigned VecIdx = SourcePos < NumElems ? 1u : 0u;
+ unsigned ElemIdx = SourcePos & (NumElems - 1);
+ return std::pair<unsigned, int>{VecIdx, static_cast<int>(ElemIdx)};
+ });
+ }
+
+ case clang::X86::BI__builtin_ia32_minps:
+ case clang::X86::BI__builtin_ia32_minpd:
+ case clang::X86::BI__builtin_ia32_minph128:
+ case clang::X86::BI__builtin_ia32_minph256:
+ case clang::X86::BI__builtin_ia32_minps256:
+ case clang::X86::BI__builtin_ia32_minpd256:
+ case clang::X86::BI__builtin_ia32_minps512:
+ case clang::X86::BI__builtin_ia32_minpd512:
+ case clang::X86::BI__builtin_ia32_minph512:
+ return interp__builtin_elementwise_fp_binop(
+ S, OpPC, Call,
+ [](const APFloat &A, const APFloat &B,
+ std::optional<APSInt>) -> std::optional<APFloat> {
+ if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
+ B.isInfinity() || B.isDenormal())
+ return std::nullopt;
+ if (A.isZero() && B.isZero())
+ return B;
+ return llvm::minimum(A, B);
+ });
+
+ case clang::X86::BI__builtin_ia32_maxps:
+ case clang::X86::BI__builtin_ia32_maxpd:
+ case clang::X86::BI__builtin_ia32_maxph128:
+ case clang::X86::BI__builtin_ia32_maxph256:
+ case clang::X86::BI__builtin_ia32_maxps256:
+ case clang::X86::BI__builtin_ia32_maxpd256:
+ case clang::X86::BI__builtin_ia32_maxps512:
+ case clang::X86::BI__builtin_ia32_maxpd512:
+ case clang::X86::BI__builtin_ia32_maxph512:
+ return interp__builtin_elementwise_fp_binop(
+ S, OpPC, Call,
+ [](const APFloat &A, const APFloat &B,
+ std::optional<APSInt>) -> std::optional<APFloat> {
+ if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
+ B.isInfinity() || B.isDenormal())
+ return std::nullopt;
+ if (A.isZero() && B.isZero())
+ return B;
+ return llvm::maximum(A, B);
+ });
+
default:
S.FFDiag(S.Current->getLocation(OpPC),
diag::note_invalid_subexpr_in_const_expr)
@@ -4372,6 +6111,11 @@ static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
}
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
+ if (!Src.isBlockPointer() || Src.getFieldDesc()->isPrimitive())
+ return false;
+ if (!Dest.isBlockPointer() || Dest.getFieldDesc()->isPrimitive())
+ return false;
+
return copyComposite(S, OpPC, Src, Dest);
}
diff --git a/clang/lib/AST/ByteCode/InterpFrame.cpp b/clang/lib/AST/ByteCode/InterpFrame.cpp
index 039acb5..3c185a0 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.cpp
+++ b/clang/lib/AST/ByteCode/InterpFrame.cpp
@@ -89,11 +89,23 @@ void InterpFrame::destroyScopes() {
void InterpFrame::initScope(unsigned Idx) {
if (!Func)
return;
+
for (auto &Local : Func->getScope(Idx).locals()) {
localBlock(Local.Offset)->invokeCtor();
}
}
+void InterpFrame::enableLocal(unsigned Idx) {
+ assert(Func);
+
+ // FIXME: This is a little dirty, but to avoid adding a flag to
+ // InlineDescriptor that's only ever useful on the toplevel of local
+ // variables, we reuse the IsActive flag for the enabled state. We should
+ // probably use a different struct than InlineDescriptor for the block-level
+ // inline descriptor of local varaibles.
+ localInlineDesc(Idx)->IsActive = true;
+}
+
void InterpFrame::destroy(unsigned Idx) {
for (auto &Local : Func->getScope(Idx).locals_reverse()) {
S.deallocate(localBlock(Local.Offset));
@@ -139,14 +151,21 @@ static bool shouldSkipInBacktrace(const Function *F) {
}
void InterpFrame::describe(llvm::raw_ostream &OS) const {
+ assert(Func);
// For lambda static invokers, we would just print __invoke().
- if (const auto *F = getFunction(); F && shouldSkipInBacktrace(F))
+ if (shouldSkipInBacktrace(Func))
return;
const Expr *CallExpr = Caller->getExpr(getRetPC());
const FunctionDecl *F = getCallee();
- bool IsMemberCall = isa<CXXMethodDecl>(F) && !isa<CXXConstructorDecl>(F) &&
- cast<CXXMethodDecl>(F)->isImplicitObjectMemberFunction();
+
+ bool IsMemberCall = false;
+ bool ExplicitInstanceParam = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(F)) {
+ IsMemberCall = !isa<CXXConstructorDecl>(MD) && !MD->isStatic();
+ ExplicitInstanceParam = MD->isExplicitObjectMemberFunction();
+ }
+
if (Func->hasThisPointer() && IsMemberCall) {
if (const auto *MCE = dyn_cast_if_present<CXXMemberCallExpr>(CallExpr)) {
const Expr *Object = MCE->getImplicitObjectArgument();
@@ -178,16 +197,15 @@ void InterpFrame::describe(llvm::raw_ostream &OS) const {
Off += Func->hasRVO() ? primSize(PT_Ptr) : 0;
Off += Func->hasThisPointer() ? primSize(PT_Ptr) : 0;
-
- for (unsigned I = 0, N = F->getNumParams(); I < N; ++I) {
- QualType Ty = F->getParamDecl(I)->getType();
-
+ llvm::ListSeparator Comma;
+ for (const ParmVarDecl *Param :
+ F->parameters().slice(ExplicitInstanceParam)) {
+ OS << Comma;
+ QualType Ty = Param->getType();
PrimType PrimTy = S.Ctx.classify(Ty).value_or(PT_Ptr);
TYPE_SWITCH(PrimTy, print(OS, stackRef<T>(Off), S.getASTContext(), Ty));
Off += align(primSize(PrimTy));
- if (I + 1 != N)
- OS << ", ";
}
OS << ")";
}
@@ -234,14 +252,14 @@ Pointer InterpFrame::getParamPointer(unsigned Off) {
assert(!isBottomFrame());
// Allocate memory to store the parameter and the block metadata.
- const auto &Desc = Func->getParamDescriptor(Off);
- size_t BlockSize = sizeof(Block) + Desc.second->getAllocSize();
+ const auto &PDesc = Func->getParamDescriptor(Off);
+ size_t BlockSize = sizeof(Block) + PDesc.Desc->getAllocSize();
auto Memory = std::make_unique<char[]>(BlockSize);
- auto *B = new (Memory.get()) Block(S.Ctx.getEvalID(), Desc.second);
+ auto *B = new (Memory.get()) Block(S.Ctx.getEvalID(), PDesc.Desc);
B->invokeCtor();
// Copy the initial value.
- TYPE_SWITCH(Desc.first, new (B->data()) T(stackRef<T>(Off)));
+ TYPE_SWITCH(PDesc.T, new (B->data()) T(stackRef<T>(Off)));
// Record the param.
Params.insert({Off, std::move(Memory)});
diff --git a/clang/lib/AST/ByteCode/InterpFrame.h b/clang/lib/AST/ByteCode/InterpFrame.h
index fa9de2e..61c1065 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.h
+++ b/clang/lib/AST/ByteCode/InterpFrame.h
@@ -55,6 +55,10 @@ public:
void destroy(unsigned Idx);
void initScope(unsigned Idx);
void destroyScopes();
+ void enableLocal(unsigned Idx);
+ bool isLocalEnabled(unsigned Idx) const {
+ return localInlineDesc(Idx)->IsActive;
+ }
/// Describes the frame with arguments for diagnostic purposes.
void describe(llvm::raw_ostream &OS) const override;
@@ -109,6 +113,7 @@ public:
/// Returns the 'this' pointer.
const Pointer &getThis() const {
assert(hasThisPointer());
+ assert(!isBottomFrame());
return stackRef<Pointer>(ThisPointerOffset);
}
@@ -116,6 +121,7 @@ public:
const Pointer &getRVOPtr() const {
assert(Func);
assert(Func->hasRVO());
+ assert(!isBottomFrame());
return stackRef<Pointer>(0);
}
@@ -186,6 +192,9 @@ private:
const size_t FrameOffset;
/// Mapping from arg offsets to their argument blocks.
llvm::DenseMap<unsigned, std::unique_ptr<char[]>> Params;
+
+public:
+ unsigned MSVCConstexprAllowed = 0;
};
} // namespace interp
diff --git a/clang/lib/AST/ByteCode/InterpHelpers.h b/clang/lib/AST/ByteCode/InterpHelpers.h
new file mode 100644
index 0000000..905bf1b
--- /dev/null
+++ b/clang/lib/AST/ByteCode/InterpHelpers.h
@@ -0,0 +1,144 @@
+//===--- InterpHelpers.h - Interpreter Helper Functions --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_INTERPHELPERS_H
+#define LLVM_CLANG_AST_INTERP_INTERPHELPERS_H
+
+#include "DynamicAllocator.h"
+#include "InterpState.h"
+#include "Pointer.h"
+
+namespace clang {
+class CallExpr;
+class OffsetOfExpr;
+
+namespace interp {
+class Block;
+struct Descriptor;
+
+/// Interpreter entry point.
+bool Interpret(InterpState &S);
+
+/// Interpret a builtin function.
+bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ uint32_t BuiltinID);
+
+/// Interpret an offsetof operation.
+bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
+ ArrayRef<int64_t> ArrayIndices, int64_t &Result);
+
+/// Checks if the array is offsetable.
+bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+
+/// Checks if a pointer is live and accessible.
+bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+
+/// Checks if a pointer is a dummy pointer.
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK);
+
+/// Checks if a pointer is in range.
+bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+
+/// Checks if a field from which a pointer is going to be derived is valid.
+bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ CheckSubobjectKind CSK);
+
+/// Checks if a pointer points to a mutable field.
+bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+
+/// Checks if a value can be loaded from a block.
+bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK = AK_Read);
+
+/// Diagnose mismatched new[]/delete or new/delete[] pairs.
+bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC,
+ DynamicAllocator::Form AllocForm,
+ DynamicAllocator::Form DeleteForm, const Descriptor *D,
+ const Expr *NewExpr);
+
+/// Copy the contents of Src into Dest.
+bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest);
+
+UnsignedOrNone evaluateBuiltinObjectSize(const ASTContext &ASTCtx,
+ unsigned Kind, Pointer &Ptr);
+
+template <typename T>
+static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_overflow) << SrcValue << E->getType();
+ return S.noteUndefinedBehavior();
+}
+
+inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems) {
+ uint64_t Limit = S.getLangOpts().ConstexprStepLimit;
+ if (Limit != 0 && NumElems > Limit) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_new_exceeds_limits)
+ << NumElems << Limit;
+ return false;
+ }
+ return true;
+}
+
+static inline llvm::RoundingMode getRoundingMode(FPOptions FPO) {
+ auto RM = FPO.getRoundingMode();
+ if (RM == llvm::RoundingMode::Dynamic)
+ return llvm::RoundingMode::NearestTiesToEven;
+ return RM;
+}
+
+inline bool Invalid(InterpState &S, CodePtr OpPC) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr)
+ << S.Current->getRange(OpPC);
+ return false;
+}
+
+template <typename SizeT>
+bool CheckArraySize(InterpState &S, CodePtr OpPC, SizeT *NumElements,
+ unsigned ElemSize, bool IsNoThrow) {
+ // FIXME: Both the SizeT::from() as well as the
+ // NumElements.toAPSInt() in this function are rather expensive.
+
+ // Can't be too many elements if the bitwidth of NumElements is lower than
+ // that of Descriptor::MaxArrayElemBytes.
+ if ((NumElements->bitWidth() - NumElements->isSigned()) <
+ (sizeof(Descriptor::MaxArrayElemBytes) * 8))
+ return true;
+
+ // FIXME: GH63562
+ // APValue stores array extents as unsigned,
+ // so anything that is greater that unsigned would overflow when
+ // constructing the array, we catch this here.
+ SizeT MaxElements = SizeT::from(Descriptor::MaxArrayElemBytes / ElemSize);
+ assert(MaxElements.isPositive());
+ if (NumElements->toAPSInt().getActiveBits() >
+ ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
+ *NumElements > MaxElements) {
+ if (!IsNoThrow) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+
+ if (NumElements->isSigned() && NumElements->isNegative()) {
+ S.FFDiag(Loc, diag::note_constexpr_new_negative)
+ << NumElements->toDiagnosticString(S.getASTContext());
+ } else {
+ S.FFDiag(Loc, diag::note_constexpr_new_too_large)
+ << NumElements->toDiagnosticString(S.getASTContext());
+ }
+ }
+ return false;
+ }
+ return true;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif // LLVM_CLANG_AST_INTERP_INTERPHELPERS_H
diff --git a/clang/lib/AST/ByteCode/InterpState.cpp b/clang/lib/AST/ByteCode/InterpState.cpp
index a95916c..df507bd 100644
--- a/clang/lib/AST/ByteCode/InterpState.cpp
+++ b/clang/lib/AST/ByteCode/InterpState.cpp
@@ -17,10 +17,12 @@
using namespace clang;
using namespace clang::interp;
-InterpState::InterpState(State &Parent, Program &P, InterpStack &Stk,
+InterpState::InterpState(const State &Parent, Program &P, InterpStack &Stk,
Context &Ctx, SourceMapper *M)
- : Parent(Parent), M(M), P(P), Stk(Stk), Ctx(Ctx), BottomFrame(*this),
- Current(&BottomFrame) {
+ : State(Ctx.getASTContext(), Parent.getEvalStatus()), M(M), P(P), Stk(Stk),
+ Ctx(Ctx), BottomFrame(*this), Current(&BottomFrame),
+ StepsLeft(Ctx.getLangOpts().ConstexprStepLimit),
+ InfiniteSteps(StepsLeft == 0) {
InConstantContext = Parent.InConstantContext;
CheckingPotentialConstantExpression =
Parent.CheckingPotentialConstantExpression;
@@ -28,11 +30,13 @@ InterpState::InterpState(State &Parent, Program &P, InterpStack &Stk,
EvalMode = Parent.EvalMode;
}
-InterpState::InterpState(State &Parent, Program &P, InterpStack &Stk,
+InterpState::InterpState(const State &Parent, Program &P, InterpStack &Stk,
Context &Ctx, const Function *Func)
- : Parent(Parent), M(nullptr), P(P), Stk(Stk), Ctx(Ctx),
+ : State(Ctx.getASTContext(), Parent.getEvalStatus()), M(nullptr), P(P),
+ Stk(Stk), Ctx(Ctx),
BottomFrame(*this, Func, nullptr, CodePtr(), Func->getArgSize()),
- Current(&BottomFrame) {
+ Current(&BottomFrame), StepsLeft(Ctx.getLangOpts().ConstexprStepLimit),
+ InfiniteSteps(StepsLeft == 0) {
InConstantContext = Parent.InConstantContext;
CheckingPotentialConstantExpression =
Parent.CheckingPotentialConstantExpression;
@@ -75,7 +79,7 @@ void InterpState::cleanup() {
Alloc->cleanup();
}
-Frame *InterpState::getCurrentFrame() { return Current; }
+const Frame *InterpState::getCurrentFrame() { return Current; }
void InterpState::deallocate(Block *B) {
assert(B);
@@ -153,3 +157,15 @@ StdAllocatorCaller InterpState::getStdAllocatorCaller(StringRef Name) const {
return {};
}
+
+bool InterpState::noteStep(CodePtr OpPC) {
+ if (InfiniteSteps)
+ return true;
+
+ --StepsLeft;
+ if (StepsLeft != 0)
+ return true;
+
+ FFDiag(Current->getSource(OpPC), diag::note_constexpr_step_limit_exceeded);
+ return false;
+}
diff --git a/clang/lib/AST/ByteCode/InterpState.h b/clang/lib/AST/ByteCode/InterpState.h
index e2e4d5c..83ef56e 100644
--- a/clang/lib/AST/ByteCode/InterpState.h
+++ b/clang/lib/AST/ByteCode/InterpState.h
@@ -20,17 +20,10 @@
#include "InterpFrame.h"
#include "InterpStack.h"
#include "State.h"
-#include "clang/AST/APValue.h"
-#include "clang/AST/ASTDiagnostic.h"
-#include "clang/AST/Expr.h"
-#include "clang/AST/OptionalDiagnostic.h"
namespace clang {
namespace interp {
class Context;
-class Function;
-class InterpStack;
-class InterpFrame;
class SourceMapper;
struct StdAllocatorCaller {
@@ -42,9 +35,9 @@ struct StdAllocatorCaller {
/// Interpreter context.
class InterpState final : public State, public SourceMapper {
public:
- InterpState(State &Parent, Program &P, InterpStack &Stk, Context &Ctx,
+ InterpState(const State &Parent, Program &P, InterpStack &Stk, Context &Ctx,
SourceMapper *M = nullptr);
- InterpState(State &Parent, Program &P, InterpStack &Stk, Context &Ctx,
+ InterpState(const State &Parent, Program &P, InterpStack &Stk, Context &Ctx,
const Function *Func);
~InterpState();
@@ -57,41 +50,14 @@ public:
bool diagnosing() const { return getEvalStatus().Diag != nullptr; }
// Stack frame accessors.
- Frame *getCurrentFrame() override;
+ const Frame *getCurrentFrame() override;
unsigned getCallStackDepth() override {
return Current ? (Current->getDepth() + 1) : 1;
}
const Frame *getBottomFrame() const override { return &BottomFrame; }
- // Access objects from the walker context.
- Expr::EvalStatus &getEvalStatus() const override {
- return Parent.getEvalStatus();
- }
- ASTContext &getASTContext() const override { return Ctx.getASTContext(); }
- const LangOptions &getLangOpts() const {
- return Ctx.getASTContext().getLangOpts();
- }
-
- // Forward status checks and updates to the walker.
- bool keepEvaluatingAfterFailure() const override {
- return Parent.keepEvaluatingAfterFailure();
- }
- bool keepEvaluatingAfterSideEffect() const override {
- return Parent.keepEvaluatingAfterSideEffect();
- }
- bool noteUndefinedBehavior() override {
- return Parent.noteUndefinedBehavior();
- }
+ bool stepsLeft() const override { return true; }
bool inConstantContext() const;
- bool hasActiveDiagnostic() override { return Parent.hasActiveDiagnostic(); }
- void setActiveDiagnostic(bool Flag) override {
- Parent.setActiveDiagnostic(Flag);
- }
- void setFoldFailureDiagnostic(bool Flag) override {
- Parent.setFoldFailureDiagnostic(Flag);
- }
- bool hasPriorDiagnostic() override { return Parent.hasPriorDiagnostic(); }
- bool noteSideEffect() override { return Parent.noteSideEffect(); }
/// Deallocates a pointer.
void deallocate(Block *B);
@@ -153,17 +119,21 @@ public:
return Floating(Mem, llvm::APFloatBase::SemanticsToEnum(Sem));
}
+ /// Note that a step has been executed. If there are no more steps remaining,
+ /// diagnoses and returns \c false.
+ bool noteStep(CodePtr OpPC);
+
private:
friend class EvaluationResult;
friend class InterpStateCCOverride;
- /// AST Walker state.
- State &Parent;
/// Dead block chain.
DeadBlock *DeadBlocks = nullptr;
/// Reference to the offset-source mapping.
SourceMapper *M;
/// Allocator used for dynamic allocations performed via the program.
std::unique_ptr<DynamicAllocator> Alloc;
+ /// Allocator for everything else, e.g. floating-point values.
+ mutable std::optional<llvm::BumpPtrAllocator> Allocator;
public:
/// Reference to the module containing all bytecode.
@@ -180,6 +150,12 @@ public:
SourceLocation EvalLocation;
/// Declaration we're initializing/evaluting, if any.
const VarDecl *EvaluatingDecl = nullptr;
+ /// Steps left during evaluation.
+ unsigned StepsLeft = 1;
+ /// Whether infinite evaluation steps have been requested. If this is false,
+ /// we use the StepsLeft value above.
+ const bool InfiniteSteps = false;
+
/// Things needed to do speculative execution.
SmallVectorImpl<PartialDiagnosticAt> *PrevDiags = nullptr;
unsigned SpeculationDepth = 0;
@@ -192,8 +168,6 @@ public:
/// List of blocks we're currently running either constructors or destructors
/// for.
llvm::SmallVector<const Block *> InitializingBlocks;
-
- mutable std::optional<llvm::BumpPtrAllocator> Allocator;
};
class InterpStateCCOverride final {
diff --git a/clang/lib/AST/ByteCode/MemberPointer.cpp b/clang/lib/AST/ByteCode/MemberPointer.cpp
index dfc8583..8b1b018 100644
--- a/clang/lib/AST/ByteCode/MemberPointer.cpp
+++ b/clang/lib/AST/ByteCode/MemberPointer.cpp
@@ -23,6 +23,15 @@ std::optional<Pointer> MemberPointer::toPointer(const Context &Ctx) const {
if (!Base.isBlockPointer())
return std::nullopt;
+ unsigned BlockMDSize = Base.block()->getDescriptor()->getMetadataSize();
+
+ if (PtrOffset >= 0) {
+ // If the resulting base would be too small, return nullopt.
+ if (Base.BS.Base < static_cast<unsigned>(PtrOffset) ||
+ (Base.BS.Base - PtrOffset < BlockMDSize))
+ return std::nullopt;
+ }
+
Pointer CastedBase =
(PtrOffset < 0 ? Base.atField(-PtrOffset) : Base.atFieldSub(PtrOffset));
@@ -31,7 +40,7 @@ std::optional<Pointer> MemberPointer::toPointer(const Context &Ctx) const {
return std::nullopt;
unsigned Offset = 0;
- Offset += CastedBase.block()->getDescriptor()->getMetadataSize();
+ Offset += BlockMDSize;
if (const auto *FD = dyn_cast<FieldDecl>(Dcl)) {
if (FD->getParent() == BaseRecord->getDecl())
diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td
index 532c444..b684b43 100644
--- a/clang/lib/AST/ByteCode/Opcodes.td
+++ b/clang/lib/AST/ByteCode/Opcodes.td
@@ -53,6 +53,7 @@ def ArgBool : ArgType { let Name = "bool"; }
def ArgFixedPoint : ArgType { let Name = "FixedPoint"; let AsRef = true; }
def ArgFunction : ArgType { let Name = "const Function *"; }
+def ArgFunctionDecl : ArgType { let Name = "const FunctionDecl *"; }
def ArgRecordDecl : ArgType { let Name = "const RecordDecl *"; }
def ArgRecordField : ArgType { let Name = "const Record::Field *"; }
def ArgFltSemantics : ArgType { let Name = "const llvm::fltSemantics *"; }
@@ -250,6 +251,16 @@ def InitScope : Opcode {
let Args = [ArgUint32];
}
+def GetLocalEnabled : Opcode {
+ let Args = [ArgUint32];
+ let HasCustomEval = 1;
+}
+
+def EnableLocal : Opcode {
+ let Args = [ArgUint32];
+ let HasCustomEval = 1;
+}
+
//===----------------------------------------------------------------------===//
// Constants
//===----------------------------------------------------------------------===//
@@ -360,8 +371,14 @@ def NarrowPtr : Opcode;
// [Pointer] -> [Pointer]
def ExpandPtr : Opcode;
// [Pointer, Offset] -> [Pointer]
-def ArrayElemPtr : AluOpcode;
-def ArrayElemPtrPop : AluOpcode;
+def ArrayElemPtr : Opcode {
+ let Types = [IntegralTypeClass];
+ let HasGroup = 1;
+}
+def ArrayElemPtrPop : Opcode {
+ let Types = [IntegralTypeClass];
+ let HasGroup = 1;
+}
def ArrayElemPop : Opcode {
let Args = [ArgUint32];
@@ -421,6 +438,8 @@ def CheckLiteralType : Opcode {
}
def CheckArraySize : Opcode { let Args = [ArgUint64]; }
+def CheckFunctionDecl : Opcode { let Args = [ArgFunctionDecl]; }
+def CheckBitCast : Opcode { let Args = [ArgTypePtr, ArgBool]; }
// [] -> [Value]
def GetGlobal : AccessOpcode;
@@ -533,13 +552,20 @@ def InitElemPop : Opcode {
//===----------------------------------------------------------------------===//
// [Pointer, Integral] -> [Pointer]
-def AddOffset : AluOpcode;
+def AddOffset : Opcode {
+ let Types = [IntegralTypeClass];
+ let HasGroup = 1;
+}
// [Pointer, Integral] -> [Pointer]
-def SubOffset : AluOpcode;
+def SubOffset : Opcode {
+ let Types = [IntegralTypeClass];
+ let HasGroup = 1;
+}
// [Pointer, Pointer] -> [Integral]
def SubPtr : Opcode {
let Types = [IntegerTypeClass];
+ let Args = [ArgBool];
let HasGroup = 1;
}
@@ -612,12 +638,25 @@ class OverflowOpcode : Opcode {
let HasGroup = 1;
}
+class OverflowBitfieldOpcode : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [ArgBool, ArgUint32];
+ let HasGroup = 1;
+}
+
def Inc : OverflowOpcode;
+def IncBitfield : OverflowBitfieldOpcode;
def IncPop : OverflowOpcode;
+def IncPopBitfield : OverflowBitfieldOpcode;
def PreInc : OverflowOpcode;
+def PreIncBitfield : OverflowBitfieldOpcode;
+
def Dec : OverflowOpcode;
+def DecBitfield : OverflowBitfieldOpcode;
def DecPop : OverflowOpcode;
+def DecPopBitfield : OverflowBitfieldOpcode;
def PreDec : OverflowOpcode;
+def PreDecBitfield : OverflowBitfieldOpcode;
// Float increment and decrement.
def Incf: FloatOpcode;
@@ -838,13 +877,13 @@ def Alloc : Opcode {
}
def AllocN : Opcode {
- let Types = [IntegerTypeClass];
+ let Types = [IntegralTypeClass];
let Args = [ArgPrimType, ArgExpr, ArgBool];
let HasGroup = 1;
}
def AllocCN : Opcode {
- let Types = [IntegerTypeClass];
+ let Types = [IntegralTypeClass];
let Args = [ArgDesc, ArgBool];
let HasGroup = 1;
}
@@ -853,19 +892,13 @@ def Free : Opcode {
let Args = [ArgBool, ArgBool];
}
-def CheckNewTypeMismatch : Opcode {
- let Args = [ArgExpr];
-}
-
-def InvalidNewDeleteExpr : Opcode {
- let Args = [ArgExpr];
-}
-
+def CheckNewTypeMismatch : Opcode { let Args = [ArgExpr]; }
def CheckNewTypeMismatchArray : Opcode {
let Types = [IntegerTypeClass];
let Args = [ArgExpr];
let HasGroup = 1;
}
+def InvalidNewDeleteExpr : Opcode { let Args = [ArgExpr]; }
def IsConstantContext: Opcode;
def CheckAllocations : Opcode;
@@ -890,5 +923,10 @@ def DiagTypeid : Opcode;
def CheckDestruction : Opcode;
+def CtorCheck : Opcode;
+
def PushCC : Opcode { let Args = [ArgBool]; }
def PopCC : Opcode;
+
+def PushMSVCCE : Opcode;
+def PopMSVCCE : Opcode;
diff --git a/clang/lib/AST/ByteCode/Pointer.cpp b/clang/lib/AST/ByteCode/Pointer.cpp
index e417bdf..e237013 100644
--- a/clang/lib/AST/ByteCode/Pointer.cpp
+++ b/clang/lib/AST/ByteCode/Pointer.cpp
@@ -11,6 +11,7 @@
#include "Context.h"
#include "Floating.h"
#include "Function.h"
+#include "InitMap.h"
#include "Integral.h"
#include "InterpBlock.h"
#include "MemberPointer.h"
@@ -32,12 +33,12 @@ Pointer::Pointer(Block *Pointee, uint64_t BaseAndOffset)
Pointer::Pointer(Block *Pointee, unsigned Base, uint64_t Offset)
: Offset(Offset), StorageKind(Storage::Block) {
+ assert(Pointee);
assert((Base == RootPtrMark || Base % alignof(void *) == 0) && "wrong base");
+ assert(Base >= Pointee->getDescriptor()->getMetadataSize());
BS = {Pointee, Base, nullptr, nullptr};
-
- if (Pointee)
- Pointee->addPointer(this);
+ Pointee->addPointer(this);
}
Pointer::Pointer(const Pointer &P)
@@ -247,7 +248,7 @@ APValue Pointer::toAPValue(const ASTContext &ASTCtx) const {
unsigned Index = Ptr.getIndex();
QualType ElemType = Desc->getElemQualType();
Offset += (Index * ASTCtx.getTypeSizeInChars(ElemType));
- if (Ptr.getArray().getType()->isArrayType())
+ if (Ptr.getArray().getFieldDesc()->IsArray)
Path.push_back(APValue::LValuePathEntry::ArrayIndex(Index));
Ptr = Ptr.getArray();
} else {
@@ -277,7 +278,7 @@ APValue Pointer::toAPValue(const ASTContext &ASTCtx) const {
} else {
Offset += (Index * ASTCtx.getTypeSizeInChars(ElemType));
}
- if (Ptr.getArray().getType()->isArrayType())
+ if (Ptr.getArray().getFieldDesc()->IsArray)
Path.push_back(APValue::LValuePathEntry::ArrayIndex(Index));
Ptr = Ptr.getArray();
} else {
@@ -361,7 +362,13 @@ void Pointer::print(llvm::raw_ostream &OS) const {
}
}
-size_t Pointer::computeOffsetForComparison() const {
+/// Compute an offset that can be used to compare the pointer to another one
+/// with the same base. To get accurate results, we basically _have to_ compute
+/// the lvalue offset using the ASTRecordLayout.
+///
+/// FIXME: We're still mixing values from the record layout with our internal
+/// offsets, which will inevitably lead to cryptic errors.
+size_t Pointer::computeOffsetForComparison(const ASTContext &ASTCtx) const {
switch (StorageKind) {
case Storage::Int:
return Int.Value + Offset;
@@ -377,7 +384,6 @@ size_t Pointer::computeOffsetForComparison() const {
size_t Result = 0;
Pointer P = *this;
while (true) {
-
if (P.isVirtualBaseClass()) {
Result += getInlineDesc()->Offset;
P = P.getBase();
@@ -399,28 +405,29 @@ size_t Pointer::computeOffsetForComparison() const {
if (P.isRoot()) {
if (P.isOnePastEnd())
- ++Result;
+ Result +=
+ ASTCtx.getTypeSizeInChars(P.getDeclDesc()->getType()).getQuantity();
break;
}
- if (const Record *R = P.getBase().getRecord(); R && R->isUnion()) {
- if (P.isOnePastEnd())
- ++Result;
- // Direct child of a union - all have offset 0.
- P = P.getBase();
- continue;
- }
+ assert(P.getField());
+ const Record *R = P.getBase().getRecord();
+ assert(R);
+
+ const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(R->getDecl());
+ Result += ASTCtx
+ .toCharUnitsFromBits(
+ Layout.getFieldOffset(P.getField()->getFieldIndex()))
+ .getQuantity();
- // Fields, etc.
- Result += P.getInlineDesc()->Offset;
if (P.isOnePastEnd())
- ++Result;
+ Result +=
+ ASTCtx.getTypeSizeInChars(P.getField()->getType()).getQuantity();
P = P.getBase();
if (P.isRoot())
break;
}
-
return Result;
}
@@ -443,8 +450,7 @@ bool Pointer::isInitialized() const {
if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor) &&
Offset == BS.Base) {
- const GlobalInlineDescriptor &GD =
- *reinterpret_cast<const GlobalInlineDescriptor *>(block()->rawData());
+ const auto &GD = block()->getBlockDesc<GlobalInlineDescriptor>();
return GD.InitState == GlobalInitState::Initialized;
}
@@ -472,24 +478,76 @@ bool Pointer::isElementInitialized(unsigned Index) const {
if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor) &&
Offset == BS.Base) {
- const GlobalInlineDescriptor &GD =
- *reinterpret_cast<const GlobalInlineDescriptor *>(block()->rawData());
+ const auto &GD = block()->getBlockDesc<GlobalInlineDescriptor>();
return GD.InitState == GlobalInitState::Initialized;
}
if (Desc->isPrimitiveArray()) {
- InitMapPtr &IM = getInitMap();
- if (!IM)
- return false;
+ InitMapPtr IM = getInitMap();
- if (IM->first)
+ if (IM.allInitialized())
return true;
- return IM->second->isElementInitialized(Index);
+ if (!IM.hasInitMap())
+ return false;
+ return IM->isElementInitialized(Index);
}
return isInitialized();
}
+bool Pointer::isElementAlive(unsigned Index) const {
+ assert(getFieldDesc()->isPrimitiveArray());
+
+ InitMapPtr &IM = getInitMap();
+ if (!IM.hasInitMap())
+ return true;
+
+ if (IM.allInitialized())
+ return true;
+
+ return IM->isElementAlive(Index);
+}
+
+void Pointer::startLifetime() const {
+ if (!isBlockPointer())
+ return;
+ if (BS.Base < sizeof(InlineDescriptor))
+ return;
+
+ if (inArray()) {
+ const Descriptor *Desc = getFieldDesc();
+ InitMapPtr &IM = getInitMap();
+ if (!IM.hasInitMap())
+ IM.setInitMap(new InitMap(Desc->getNumElems(), IM.allInitialized()));
+
+ IM->startElementLifetime(getIndex());
+ assert(isArrayRoot() || (this->getLifetime() == Lifetime::Started));
+ return;
+ }
+
+ getInlineDesc()->LifeState = Lifetime::Started;
+}
+
+void Pointer::endLifetime() const {
+ if (!isBlockPointer())
+ return;
+ if (BS.Base < sizeof(InlineDescriptor))
+ return;
+
+ if (inArray()) {
+ const Descriptor *Desc = getFieldDesc();
+ InitMapPtr &IM = getInitMap();
+ if (!IM.hasInitMap())
+ IM.setInitMap(new InitMap(Desc->getNumElems(), IM.allInitialized()));
+
+ IM->endElementLifetime(getIndex());
+ assert(isArrayRoot() || (this->getLifetime() == Lifetime::Ended));
+ return;
+ }
+
+ getInlineDesc()->LifeState = Lifetime::Ended;
+}
+
void Pointer::initialize() const {
if (!isBlockPointer())
return;
@@ -498,8 +556,7 @@ void Pointer::initialize() const {
if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor) &&
Offset == BS.Base) {
- GlobalInlineDescriptor &GD = *reinterpret_cast<GlobalInlineDescriptor *>(
- asBlockPointer().Pointee->rawData());
+ auto &GD = BS.Pointee->getBlockDesc<GlobalInlineDescriptor>();
GD.InitState = GlobalInitState::Initialized;
return;
}
@@ -525,37 +582,44 @@ void Pointer::initializeElement(unsigned Index) const {
assert(Index < getFieldDesc()->getNumElems());
InitMapPtr &IM = getInitMap();
- if (!IM) {
+ if (IM.allInitialized())
+ return;
+
+ if (!IM.hasInitMap()) {
const Descriptor *Desc = getFieldDesc();
- IM = std::make_pair(false, std::make_shared<InitMap>(Desc->getNumElems()));
+ IM.setInitMap(new InitMap(Desc->getNumElems()));
}
+ assert(IM.hasInitMap());
- assert(IM);
+ if (IM->initializeElement(Index))
+ IM.noteAllInitialized();
+}
- // All initialized.
- if (IM->first)
- return;
+void Pointer::initializeAllElements() const {
+ assert(getFieldDesc()->isPrimitiveArray());
+ assert(isArrayRoot());
- if (IM->second->initializeElement(Index)) {
- IM->first = true;
- IM->second.reset();
- }
+ getInitMap().noteAllInitialized();
}
-void Pointer::initializeAllElements() const {
+bool Pointer::allElementsInitialized() const {
assert(getFieldDesc()->isPrimitiveArray());
assert(isArrayRoot());
- InitMapPtr &IM = getInitMap();
- if (!IM) {
- IM = std::make_pair(true, nullptr);
- } else {
- IM->first = true;
- IM->second.reset();
+ if (isStatic() && BS.Base == 0)
+ return true;
+
+ if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor) &&
+ Offset == BS.Base) {
+ const auto &GD = block()->getBlockDesc<GlobalInlineDescriptor>();
+ return GD.InitState == GlobalInitState::Initialized;
}
+
+ InitMapPtr IM = getInitMap();
+ return IM.allInitialized();
}
-bool Pointer::allElementsInitialized() const {
+bool Pointer::allElementsAlive() const {
assert(getFieldDesc()->isPrimitiveArray());
assert(isArrayRoot());
@@ -564,13 +628,12 @@ bool Pointer::allElementsInitialized() const {
if (isRoot() && BS.Base == sizeof(GlobalInlineDescriptor) &&
Offset == BS.Base) {
- const GlobalInlineDescriptor &GD =
- *reinterpret_cast<const GlobalInlineDescriptor *>(block()->rawData());
+ const auto &GD = block()->getBlockDesc<GlobalInlineDescriptor>();
return GD.InitState == GlobalInitState::Initialized;
}
InitMapPtr &IM = getInitMap();
- return IM && IM->first;
+ return IM.allInitialized() || (IM.hasInitMap() && IM->allElementsAlive());
}
void Pointer::activate() const {
@@ -874,6 +937,10 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
llvm_unreachable("invalid value to return");
};
+ // Can't return functions as rvalues.
+ if (ResultType->isFunctionType())
+ return std::nullopt;
+
// Invalid to read from.
if (isDummy() || !isLive() || isPastEnd())
return std::nullopt;
@@ -884,6 +951,8 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
// Just load primitive types.
if (OptPrimType T = Ctx.classify(ResultType)) {
+ if (!canDeref(*T))
+ return std::nullopt;
TYPE_SWITCH(*T, return this->deref<T>().toAPValue(ASTCtx));
}
@@ -894,8 +963,8 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx,
return Result;
}
-IntPointer IntPointer::atOffset(const ASTContext &ASTCtx,
- unsigned Offset) const {
+std::optional<IntPointer> IntPointer::atOffset(const ASTContext &ASTCtx,
+ unsigned Offset) const {
if (!this->Desc)
return *this;
const Record *R = this->Desc->ElemRecord;
@@ -913,6 +982,9 @@ IntPointer IntPointer::atOffset(const ASTContext &ASTCtx,
return *this;
const FieldDecl *FD = F->Decl;
+ if (FD->getParent()->isInvalidDecl())
+ return std::nullopt;
+
const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(FD->getParent());
unsigned FieldIndex = FD->getFieldIndex();
uint64_t FieldOffset =
diff --git a/clang/lib/AST/ByteCode/Pointer.h b/clang/lib/AST/ByteCode/Pointer.h
index cd738ce..5c3f98a 100644
--- a/clang/lib/AST/ByteCode/Pointer.h
+++ b/clang/lib/AST/ByteCode/Pointer.h
@@ -15,6 +15,7 @@
#include "Descriptor.h"
#include "FunctionPointer.h"
+#include "InitMap.h"
#include "InterpBlock.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/Decl.h"
@@ -47,7 +48,8 @@ struct IntPointer {
const Descriptor *Desc;
uint64_t Value;
- IntPointer atOffset(const ASTContext &ASTCtx, unsigned Offset) const;
+ std::optional<IntPointer> atOffset(const ASTContext &ASTCtx,
+ unsigned Offset) const;
IntPointer baseCast(const ASTContext &ASTCtx, unsigned BaseOffset) const;
};
@@ -199,17 +201,19 @@ public:
return Pointer(BS.Pointee, sizeof(InlineDescriptor),
Offset == 0 ? Offset : PastEndMark);
- // Pointer is one past end - magic offset marks that.
- if (isOnePastEnd())
- return Pointer(BS.Pointee, Base, PastEndMark);
-
- if (Offset != Base) {
- // If we're pointing to a primitive array element, there's nothing to do.
- if (inPrimitiveArray())
- return *this;
- // Pointer is to a composite array element - enter it.
- if (Offset != Base)
+ if (inArray()) {
+ // Pointer is one past end - magic offset marks that.
+ if (isOnePastEnd())
+ return Pointer(BS.Pointee, Base, PastEndMark);
+
+ if (Offset != Base) {
+ // If we're pointing to a primitive array element, there's nothing to
+ // do.
+ if (inPrimitiveArray())
+ return *this;
+ // Pointer is to a composite array element - enter it.
return Pointer(BS.Pointee, Offset, Offset);
+ }
}
// Otherwise, we're pointing to a non-array element or
@@ -219,6 +223,8 @@ public:
/// Expands a pointer to the containing array, undoing narrowing.
[[nodiscard]] Pointer expand() const {
+ if (!isBlockPointer())
+ return *this;
assert(isBlockPointer());
Block *Pointee = BS.Pointee;
@@ -349,7 +355,8 @@ public:
if (const auto *CT = getFieldDesc()->getType()->getAs<VectorType>())
return CT->getElementType();
}
- return getFieldDesc()->getType();
+
+ return getFieldDesc()->getDataElemType();
}
[[nodiscard]] Pointer getDeclPtr() const { return Pointer(BS.Pointee); }
@@ -659,6 +666,15 @@ public:
return false;
}
+ /// Checks whether the pointer can be dereferenced to the given PrimType.
+ bool canDeref(PrimType T) const {
+ if (const Descriptor *FieldDesc = getFieldDesc()) {
+ return (FieldDesc->isPrimitive() || FieldDesc->isPrimitiveArray()) &&
+ FieldDesc->getPrimType() == T;
+ }
+ return false;
+ }
+
/// Dereferences the pointer, if it's live.
template <typename T> T &deref() const {
assert(isLive() && "Invalid pointer");
@@ -716,6 +732,9 @@ public:
/// Like isInitialized(), but for primitive arrays.
bool isElementInitialized(unsigned Index) const;
bool allElementsInitialized() const;
+ bool allElementsAlive() const;
+ bool isElementAlive(unsigned Index) const;
+
/// Activats a field.
void activate() const;
/// Deactivates an entire strurcutre.
@@ -726,23 +745,41 @@ public:
return Lifetime::Started;
if (BS.Base < sizeof(InlineDescriptor))
return Lifetime::Started;
+
+ if (inArray() && !isArrayRoot()) {
+ InitMapPtr &IM = getInitMap();
+
+ if (!IM.hasInitMap()) {
+ if (IM.allInitialized())
+ return Lifetime::Started;
+ return getArray().getLifetime();
+ }
+
+ return IM->isElementAlive(getIndex()) ? Lifetime::Started
+ : Lifetime::Ended;
+ }
+
return getInlineDesc()->LifeState;
}
- void endLifetime() const {
- if (!isBlockPointer())
- return;
- if (BS.Base < sizeof(InlineDescriptor))
- return;
- getInlineDesc()->LifeState = Lifetime::Ended;
- }
+ /// Start the lifetime of this pointer. This works for pointer with an
+ /// InlineDescriptor as well as primitive array elements. Pointers are usually
+ /// alive by default, unless the underlying object has been allocated with
+ /// std::allocator. This function is used by std::construct_at.
+ void startLifetime() const;
+ /// Ends the lifetime of the pointer. This works for pointer with an
+ /// InlineDescriptor as well as primitive array elements. This function is
+ /// used by std::destroy_at.
+ void endLifetime() const;
- void startLifetime() const {
- if (!isBlockPointer())
- return;
- if (BS.Base < sizeof(InlineDescriptor))
- return;
- getInlineDesc()->LifeState = Lifetime::Started;
+ /// Strip base casts from this Pointer.
+ /// The result is either a root pointer or something
+ /// that isn't a base class anymore.
+ [[nodiscard]] Pointer stripBaseCasts() const {
+ Pointer P = *this;
+ while (P.isBaseClass())
+ P = P.getBase();
+ return P;
}
/// Compare two pointers.
@@ -779,14 +816,13 @@ public:
/// Compute an integer that can be used to compare this pointer to
/// another one. This is usually NOT the same as the pointer offset
/// regarding the AST record layout.
- size_t computeOffsetForComparison() const;
+ size_t computeOffsetForComparison(const ASTContext &ASTCtx) const;
private:
friend class Block;
friend class DeadBlock;
friend class MemberPointer;
friend class InterpState;
- friend struct InitMap;
friend class DynamicAllocator;
friend class Program;
@@ -830,6 +866,15 @@ private:
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Pointer &P) {
P.print(OS);
+ OS << ' ';
+ if (const Descriptor *D = P.getFieldDesc())
+ D->dump(OS);
+ if (P.isArrayElement()) {
+ if (P.isOnePastEnd())
+ OS << " one-past-the-end";
+ else
+ OS << " index " << P.getIndex();
+ }
return OS;
}
diff --git a/clang/lib/AST/ByteCode/PrimType.h b/clang/lib/AST/ByteCode/PrimType.h
index 54fd39a..f0454b4 100644
--- a/clang/lib/AST/ByteCode/PrimType.h
+++ b/clang/lib/AST/ByteCode/PrimType.h
@@ -101,6 +101,7 @@ inline constexpr bool isSignedType(PrimType T) {
enum class CastKind : uint8_t {
Reinterpret,
+ ReinterpretLike,
Volatile,
Dynamic,
};
@@ -111,6 +112,9 @@ inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
case interp::CastKind::Reinterpret:
OS << "reinterpret_cast";
break;
+ case interp::CastKind::ReinterpretLike:
+ OS << "reinterpret_like";
+ break;
case interp::CastKind::Volatile:
OS << "volatile";
break;
diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp
index 75bfd9f..76fec63 100644
--- a/clang/lib/AST/ByteCode/Program.cpp
+++ b/clang/lib/AST/ByteCode/Program.cpp
@@ -27,7 +27,7 @@ unsigned Program::getOrCreateNativePointer(const void *Ptr) {
return It->second;
}
-const void *Program::getNativePointer(unsigned Idx) {
+const void *Program::getNativePointer(unsigned Idx) const {
return NativePointers[Idx];
}
@@ -36,30 +36,19 @@ unsigned Program::createGlobalString(const StringLiteral *S, const Expr *Base) {
const size_t BitWidth = CharWidth * Ctx.getCharBit();
unsigned StringLength = S->getLength();
- PrimType CharType;
- switch (CharWidth) {
- case 1:
- CharType = PT_Sint8;
- break;
- case 2:
- CharType = PT_Uint16;
- break;
- case 4:
- CharType = PT_Uint32;
- break;
- default:
- llvm_unreachable("unsupported character width");
- }
+ OptPrimType CharType =
+ Ctx.classify(S->getType()->castAsArrayTypeUnsafe()->getElementType());
+ assert(CharType);
if (!Base)
Base = S;
// Create a descriptor for the string.
- Descriptor *Desc =
- allocateDescriptor(Base, CharType, Descriptor::GlobalMD, StringLength + 1,
- /*isConst=*/true,
- /*isTemporary=*/false,
- /*isMutable=*/false);
+ Descriptor *Desc = allocateDescriptor(Base, *CharType, Descriptor::GlobalMD,
+ StringLength + 1,
+ /*isConst=*/true,
+ /*isTemporary=*/false,
+ /*isMutable=*/false);
// Allocate storage for the string.
// The byte length does not include the null terminator.
@@ -79,26 +68,9 @@ unsigned Program::createGlobalString(const StringLiteral *S, const Expr *Base) {
} else {
// Construct the string in storage.
for (unsigned I = 0; I <= StringLength; ++I) {
- const uint32_t CodePoint = I == StringLength ? 0 : S->getCodeUnit(I);
- switch (CharType) {
- case PT_Sint8: {
- using T = PrimConv<PT_Sint8>::T;
- Ptr.elem<T>(I) = T::from(CodePoint, BitWidth);
- break;
- }
- case PT_Uint16: {
- using T = PrimConv<PT_Uint16>::T;
- Ptr.elem<T>(I) = T::from(CodePoint, BitWidth);
- break;
- }
- case PT_Uint32: {
- using T = PrimConv<PT_Uint32>::T;
- Ptr.elem<T>(I) = T::from(CodePoint, BitWidth);
- break;
- }
- default:
- llvm_unreachable("unsupported character type");
- }
+ uint32_t CodePoint = I == StringLength ? 0 : S->getCodeUnit(I);
+ INT_TYPE_SWITCH_NO_BOOL(*CharType,
+ Ptr.elem<T>(I) = T::from(CodePoint, BitWidth););
}
}
Ptr.initializeAllElements();
@@ -218,22 +190,43 @@ UnsignedOrNone Program::createGlobal(const ValueDecl *VD, const Expr *Init) {
return std::nullopt;
Global *NewGlobal = Globals[*Idx];
+ // Note that this loop has one iteration where Redecl == VD.
for (const Decl *Redecl : VD->redecls()) {
- unsigned &PIdx = GlobalIndices[Redecl];
+
+ // If this redecl was registered as a dummy variable, it is now a proper
+ // global variable and points to the block we just created.
+ if (auto DummyIt = DummyVariables.find(Redecl);
+ DummyIt != DummyVariables.end()) {
+ Global *Dummy = Globals[DummyIt->second];
+ Dummy->block()->movePointersTo(NewGlobal->block());
+ Globals[DummyIt->second] = NewGlobal;
+ DummyVariables.erase(DummyIt);
+ }
+ // If the redeclaration hasn't been registered yet at all, we just set its
+ // global index to Idx. If it has been registered yet, it might have
+ // pointers pointing to it and we need to transfer those pointers to the new
+ // block.
+ auto [Iter, Inserted] = GlobalIndices.try_emplace(Redecl);
+ if (Inserted) {
+ GlobalIndices[Redecl] = *Idx;
+ continue;
+ }
+
if (Redecl != VD) {
- if (Block *RedeclBlock = Globals[PIdx]->block();
+ if (Block *RedeclBlock = Globals[Iter->second]->block();
RedeclBlock->isExtern()) {
- Globals[PIdx] = NewGlobal;
+
// All pointers pointing to the previous extern decl now point to the
// new decl.
- for (Pointer *Ptr = RedeclBlock->Pointers; Ptr; Ptr = Ptr->BS.Next) {
- RedeclBlock->removePointer(Ptr);
- Ptr->BS.Pointee = NewGlobal->block();
- NewGlobal->block()->addPointer(Ptr);
- }
+ // A previous iteration might've already fixed up the pointers for this
+ // global.
+ if (RedeclBlock != NewGlobal->block())
+ RedeclBlock->movePointersTo(NewGlobal->block());
+
+ Globals[Iter->second] = NewGlobal;
}
}
- PIdx = *Idx;
+ Iter->second = *Idx;
}
return *Idx;
@@ -418,8 +411,8 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
if (OptPrimType T = Ctx.classify(ElemTy)) {
// Arrays of primitives.
unsigned ElemSize = primSize(*T);
- if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems) {
- return {};
+ if ((Descriptor::MaxArrayElemBytes / ElemSize) < NumElems) {
+ return nullptr;
}
return allocateDescriptor(D, *T, MDSize, NumElems, IsConst, IsTemporary,
IsMutable);
@@ -432,7 +425,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
return nullptr;
unsigned ElemSize = ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems)
- return {};
+ return nullptr;
return allocateDescriptor(D, Ty, ElemDesc, MDSize, NumElems, IsConst,
IsTemporary, IsMutable);
}
diff --git a/clang/lib/AST/ByteCode/Program.h b/clang/lib/AST/ByteCode/Program.h
index 28fcc97..c879550 100644
--- a/clang/lib/AST/ByteCode/Program.h
+++ b/clang/lib/AST/ByteCode/Program.h
@@ -58,7 +58,7 @@ public:
unsigned getOrCreateNativePointer(const void *Ptr);
/// Returns the value of a marshalled native pointer.
- const void *getNativePointer(unsigned Idx);
+ const void *getNativePointer(unsigned Idx) const;
/// Emits a string literal among global data.
unsigned createGlobalString(const StringLiteral *S,
@@ -205,7 +205,6 @@ private:
const Block *block() const { return &B; }
private:
- /// Required metadata - does not actually track pointers.
Block B;
};
diff --git a/clang/lib/AST/ByteCode/Record.h b/clang/lib/AST/ByteCode/Record.h
index 8245eef..7b66c3b 100644
--- a/clang/lib/AST/ByteCode/Record.h
+++ b/clang/lib/AST/ByteCode/Record.h
@@ -61,14 +61,6 @@ public:
unsigned getSize() const { return BaseSize; }
/// Returns the full size of the record, including records.
unsigned getFullSize() const { return BaseSize + VirtualSize; }
- /// Returns a field.
- const Field *getField(const FieldDecl *FD) const;
- /// Returns a base descriptor.
- const Base *getBase(const RecordDecl *FD) const;
- /// Returns a base descriptor.
- const Base *getBase(QualType T) const;
- /// Returns a virtual base descriptor.
- const Base *getVirtualBase(const RecordDecl *RD) const;
/// Returns the destructor of the record, if any.
const CXXDestructorDecl *getDestructor() const {
if (const auto *CXXDecl = dyn_cast<CXXRecordDecl>(Decl))
@@ -87,6 +79,8 @@ public:
unsigned getNumFields() const { return Fields.size(); }
const Field *getField(unsigned I) const { return &Fields[I]; }
+ /// Returns a field.
+ const Field *getField(const FieldDecl *FD) const;
using const_base_iter = BaseList::const_iterator;
llvm::iterator_range<const_base_iter> bases() const {
@@ -98,6 +92,10 @@ public:
assert(I < getNumBases());
return &Bases[I];
}
+ /// Returns a base descriptor.
+ const Base *getBase(QualType T) const;
+ /// Returns a base descriptor.
+ const Base *getBase(const RecordDecl *FD) const;
using const_virtual_iter = VirtualBaseList::const_iterator;
llvm::iterator_range<const_virtual_iter> virtual_bases() const {
@@ -106,6 +104,8 @@ public:
unsigned getNumVirtualBases() const { return VirtualBases.size(); }
const Base *getVirtualBase(unsigned I) const { return &VirtualBases[I]; }
+ /// Returns a virtual base descriptor.
+ const Base *getVirtualBase(const RecordDecl *RD) const;
void dump(llvm::raw_ostream &OS, unsigned Indentation = 0,
unsigned Offset = 0) const;
diff --git a/clang/lib/AST/ByteCode/Source.h b/clang/lib/AST/ByteCode/Source.h
index f355d14..56ca197 100644
--- a/clang/lib/AST/ByteCode/Source.h
+++ b/clang/lib/AST/ByteCode/Source.h
@@ -51,6 +51,7 @@ public:
explicit operator bool() const { return Ptr; }
bool operator<=(const CodePtr &RHS) const { return Ptr <= RHS.Ptr; }
bool operator>=(const CodePtr &RHS) const { return Ptr >= RHS.Ptr; }
+ bool operator==(const CodePtr RHS) const { return Ptr == RHS.Ptr; }
/// Reads data and advances the pointer.
template <typename T> std::enable_if_t<!std::is_pointer<T>::value, T> read() {
diff --git a/clang/lib/AST/ByteCode/State.cpp b/clang/lib/AST/ByteCode/State.cpp
index 323231f..00e3b1a 100644
--- a/clang/lib/AST/ByteCode/State.cpp
+++ b/clang/lib/AST/ByteCode/State.cpp
@@ -25,15 +25,15 @@ OptionalDiagnostic State::FFDiag(SourceLocation Loc, diag::kind DiagId,
OptionalDiagnostic State::FFDiag(const Expr *E, diag::kind DiagId,
unsigned ExtraNotes) {
- if (getEvalStatus().Diag)
+ if (EvalStatus.Diag)
return diag(E->getExprLoc(), DiagId, ExtraNotes, false);
setActiveDiagnostic(false);
return OptionalDiagnostic();
}
-OptionalDiagnostic State::FFDiag(const SourceInfo &SI, diag::kind DiagId,
+OptionalDiagnostic State::FFDiag(SourceInfo SI, diag::kind DiagId,
unsigned ExtraNotes) {
- if (getEvalStatus().Diag)
+ if (EvalStatus.Diag)
return diag(SI.getLoc(), DiagId, ExtraNotes, false);
setActiveDiagnostic(false);
return OptionalDiagnostic();
@@ -43,7 +43,7 @@ OptionalDiagnostic State::CCEDiag(SourceLocation Loc, diag::kind DiagId,
unsigned ExtraNotes) {
// Don't override a previous diagnostic. Don't bother collecting
// diagnostics if we're evaluating for overflow.
- if (!getEvalStatus().Diag || !getEvalStatus().Diag->empty()) {
+ if (!EvalStatus.Diag || !EvalStatus.Diag->empty()) {
setActiveDiagnostic(false);
return OptionalDiagnostic();
}
@@ -55,7 +55,7 @@ OptionalDiagnostic State::CCEDiag(const Expr *E, diag::kind DiagId,
return CCEDiag(E->getExprLoc(), DiagId, ExtraNotes);
}
-OptionalDiagnostic State::CCEDiag(const SourceInfo &SI, diag::kind DiagId,
+OptionalDiagnostic State::CCEDiag(SourceInfo SI, diag::kind DiagId,
unsigned ExtraNotes) {
return CCEDiag(SI.getLoc(), DiagId, ExtraNotes);
}
@@ -68,31 +68,29 @@ OptionalDiagnostic State::Note(SourceLocation Loc, diag::kind DiagId) {
void State::addNotes(ArrayRef<PartialDiagnosticAt> Diags) {
if (hasActiveDiagnostic())
- llvm::append_range(*getEvalStatus().Diag, Diags);
+ llvm::append_range(*EvalStatus.Diag, Diags);
}
DiagnosticBuilder State::report(SourceLocation Loc, diag::kind DiagId) {
- return getASTContext().getDiagnostics().Report(Loc, DiagId);
+ return Ctx.getDiagnostics().Report(Loc, DiagId);
}
/// Add a diagnostic to the diagnostics list.
PartialDiagnostic &State::addDiag(SourceLocation Loc, diag::kind DiagId) {
- PartialDiagnostic PD(DiagId, getASTContext().getDiagAllocator());
- getEvalStatus().Diag->push_back(std::make_pair(Loc, PD));
- return getEvalStatus().Diag->back().second;
+ PartialDiagnostic PD(DiagId, Ctx.getDiagAllocator());
+ EvalStatus.Diag->push_back(std::make_pair(Loc, PD));
+ return EvalStatus.Diag->back().second;
}
OptionalDiagnostic State::diag(SourceLocation Loc, diag::kind DiagId,
unsigned ExtraNotes, bool IsCCEDiag) {
- Expr::EvalStatus &EvalStatus = getEvalStatus();
if (EvalStatus.Diag) {
if (hasPriorDiagnostic()) {
return OptionalDiagnostic();
}
unsigned CallStackNotes = getCallStackDepth() - 1;
- unsigned Limit =
- getASTContext().getDiagnostics().getConstexprBacktraceLimit();
+ unsigned Limit = Ctx.getDiagnostics().getConstexprBacktraceLimit();
if (Limit)
CallStackNotes = std::min(CallStackNotes, Limit + 1);
if (checkingPotentialConstantExpression())
@@ -158,3 +156,66 @@ void State::addCallStack(unsigned Limit) {
<< Out.str() << CallRange;
}
}
+
+bool State::hasPriorDiagnostic() {
+ if (!EvalStatus.Diag->empty()) {
+ switch (EvalMode) {
+ case EvaluationMode::ConstantFold:
+ case EvaluationMode::IgnoreSideEffects:
+ if (!HasFoldFailureDiagnostic)
+ break;
+ // We've already failed to fold something. Keep that diagnostic.
+ [[fallthrough]];
+ case EvaluationMode::ConstantExpression:
+ case EvaluationMode::ConstantExpressionUnevaluated:
+ setActiveDiagnostic(false);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool State::keepEvaluatingAfterFailure() const {
+ uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit;
+ if (Limit != 0 && !stepsLeft())
+ return false;
+
+ switch (EvalMode) {
+ case EvaluationMode::ConstantExpression:
+ case EvaluationMode::ConstantExpressionUnevaluated:
+ case EvaluationMode::ConstantFold:
+ case EvaluationMode::IgnoreSideEffects:
+ return checkingPotentialConstantExpression() ||
+ checkingForUndefinedBehavior();
+ }
+ llvm_unreachable("Missed EvalMode case");
+}
+
+bool State::keepEvaluatingAfterSideEffect() const {
+ switch (EvalMode) {
+ case EvaluationMode::IgnoreSideEffects:
+ return true;
+
+ case EvaluationMode::ConstantExpression:
+ case EvaluationMode::ConstantExpressionUnevaluated:
+ case EvaluationMode::ConstantFold:
+ // By default, assume any side effect might be valid in some other
+ // evaluation of this expression from a different context.
+ return checkingPotentialConstantExpression() ||
+ checkingForUndefinedBehavior();
+ }
+ llvm_unreachable("Missed EvalMode case");
+}
+
+bool State::keepEvaluatingAfterUndefinedBehavior() const {
+ switch (EvalMode) {
+ case EvaluationMode::IgnoreSideEffects:
+ case EvaluationMode::ConstantFold:
+ return true;
+
+ case EvaluationMode::ConstantExpression:
+ case EvaluationMode::ConstantExpressionUnevaluated:
+ return checkingForUndefinedBehavior();
+ }
+ llvm_unreachable("Missed EvalMode case");
+}
diff --git a/clang/lib/AST/ByteCode/State.h b/clang/lib/AST/ByteCode/State.h
index 0695c61..a720033 100644
--- a/clang/lib/AST/ByteCode/State.h
+++ b/clang/lib/AST/ByteCode/State.h
@@ -13,8 +13,10 @@
#ifndef LLVM_CLANG_AST_INTERP_STATE_H
#define LLVM_CLANG_AST_INTERP_STATE_H
+#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/OptionalDiagnostic.h"
namespace clang {
class OptionalDiagnostic;
@@ -78,21 +80,40 @@ class SourceInfo;
/// Interface for the VM to interact with the AST walker's context.
class State {
public:
+ State(ASTContext &ASTCtx, Expr::EvalStatus &EvalStatus)
+ : Ctx(ASTCtx), EvalStatus(EvalStatus) {}
virtual ~State();
- virtual bool noteUndefinedBehavior() = 0;
- virtual bool keepEvaluatingAfterFailure() const = 0;
- virtual bool keepEvaluatingAfterSideEffect() const = 0;
- virtual Frame *getCurrentFrame() = 0;
+ virtual const Frame *getCurrentFrame() = 0;
virtual const Frame *getBottomFrame() const = 0;
- virtual bool hasActiveDiagnostic() = 0;
- virtual void setActiveDiagnostic(bool Flag) = 0;
- virtual void setFoldFailureDiagnostic(bool Flag) = 0;
- virtual Expr::EvalStatus &getEvalStatus() const = 0;
- virtual ASTContext &getASTContext() const = 0;
- virtual bool hasPriorDiagnostic() = 0;
virtual unsigned getCallStackDepth() = 0;
- virtual bool noteSideEffect() = 0;
+ virtual bool stepsLeft() const = 0;
+
+ Expr::EvalStatus &getEvalStatus() const { return EvalStatus; }
+ ASTContext &getASTContext() const { return Ctx; }
+ const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
+
+ /// Note that we have had a side-effect, and determine whether we should
+ /// keep evaluating.
+ bool noteSideEffect() const {
+ EvalStatus.HasSideEffects = true;
+ return keepEvaluatingAfterSideEffect();
+ }
+
+ /// Should we continue evaluation as much as possible after encountering a
+ /// construct which can't be reduced to a value?
+ bool keepEvaluatingAfterFailure() const;
+ /// Should we continue evaluation after encountering a side-effect that we
+ /// couldn't model?
+ bool keepEvaluatingAfterSideEffect() const;
+
+ /// Note that we hit something that was technically undefined behavior, but
+ /// that we can evaluate past it (such as signed overflow or floating-point
+ /// division by zero.)
+ bool noteUndefinedBehavior() const {
+ EvalStatus.HasUndefinedBehavior = true;
+ return keepEvaluatingAfterUndefinedBehavior();
+ }
/// Are we checking whether the expression is a potential constant
/// expression?
@@ -104,8 +125,6 @@ public:
return CheckingForUndefinedBehavior;
}
-public:
- State() = default;
/// Diagnose that the evaluation could not be folded (FF => FoldFailure)
OptionalDiagnostic
FFDiag(SourceLocation Loc,
@@ -118,7 +137,7 @@ public:
unsigned ExtraNotes = 0);
OptionalDiagnostic
- FFDiag(const SourceInfo &SI,
+ FFDiag(SourceInfo SI,
diag::kind DiagId = diag::note_invalid_subexpr_in_const_expr,
unsigned ExtraNotes = 0);
@@ -138,7 +157,7 @@ public:
unsigned ExtraNotes = 0);
OptionalDiagnostic
- CCEDiag(const SourceInfo &SI,
+ CCEDiag(SourceInfo SI,
diag::kind DiagId = diag::note_invalid_subexpr_in_const_expr,
unsigned ExtraNotes = 0);
@@ -169,14 +188,39 @@ public:
bool CheckingForUndefinedBehavior = false;
EvaluationMode EvalMode;
+ ASTContext &Ctx;
+ Expr::EvalStatus &EvalStatus;
private:
+ /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
+ /// notes attached to it will also be stored, otherwise they will not be.
+ bool HasActiveDiagnostic = false;
+
+ /// Have we emitted a diagnostic explaining why we couldn't constant
+ /// fold (not just why it's not strictly a constant expression)?
+ bool HasFoldFailureDiagnostic = false;
+
void addCallStack(unsigned Limit);
PartialDiagnostic &addDiag(SourceLocation Loc, diag::kind DiagId);
OptionalDiagnostic diag(SourceLocation Loc, diag::kind DiagId,
unsigned ExtraNotes, bool IsCCEDiag);
+
+ /// Should we continue evaluation after encountering undefined behavior?
+ bool keepEvaluatingAfterUndefinedBehavior() const;
+
+ // If we have a prior diagnostic, it will be noting that the expression
+ // isn't a constant expression. This diagnostic is more important,
+ // unless we require this evaluation to produce a constant expression.
+ //
+ // FIXME: We might want to show both diagnostics to the user in
+ // EvaluationMode::ConstantFold mode.
+ bool hasPriorDiagnostic();
+
+ void setFoldFailureDiagnostic(bool Flag) { HasFoldFailureDiagnostic = Flag; };
+ void setActiveDiagnostic(bool Flag) { HasActiveDiagnostic = Flag; };
+ bool hasActiveDiagnostic() const { return HasActiveDiagnostic; }
};
} // namespace interp
diff --git a/clang/lib/AST/CMakeLists.txt b/clang/lib/AST/CMakeLists.txt
index d4fd7a7..f9a5f4f0 100644
--- a/clang/lib/AST/CMakeLists.txt
+++ b/clang/lib/AST/CMakeLists.txt
@@ -66,6 +66,7 @@ add_clang_library(clangAST
ExternalASTMerger.cpp
ExternalASTSource.cpp
FormatString.cpp
+ InferAlloc.cpp
InheritViz.cpp
ByteCode/BitcastBuffer.cpp
ByteCode/ByteCodeEmitter.cpp
@@ -81,6 +82,7 @@ add_clang_library(clangAST
ByteCode/Floating.cpp
ByteCode/EvaluationResult.cpp
ByteCode/DynamicAllocator.cpp
+ ByteCode/InitMap.cpp
ByteCode/Interp.cpp
ByteCode/InterpBlock.cpp
ByteCode/InterpFrame.cpp
diff --git a/clang/lib/AST/CXXInheritance.cpp b/clang/lib/AST/CXXInheritance.cpp
index 7a3e7ea..29f5916 100644
--- a/clang/lib/AST/CXXInheritance.cpp
+++ b/clang/lib/AST/CXXInheritance.cpp
@@ -34,9 +34,9 @@ using namespace clang;
/// ambiguous, i.e., there are two or more paths that refer to
/// different base class subobjects of the same type. BaseType must be
/// an unqualified, canonical class type.
-bool CXXBasePaths::isAmbiguous(CanQualType BaseType) {
+bool CXXBasePaths::isAmbiguous(CanQualType BaseType) const {
BaseType = BaseType.getUnqualifiedType();
- IsVirtBaseAndNumberNonVirtBases Subobjects = ClassSubobjects[BaseType];
+ IsVirtBaseAndNumberNonVirtBases Subobjects = ClassSubobjects.lookup(BaseType);
return Subobjects.NumberOfNonVirtBases + (Subobjects.IsVirtBase ? 1 : 0) > 1;
}
diff --git a/clang/lib/AST/Comment.cpp b/clang/lib/AST/Comment.cpp
index 37e21c3..361a8a7e 100644
--- a/clang/lib/AST/Comment.cpp
+++ b/clang/lib/AST/Comment.cpp
@@ -56,16 +56,16 @@ good implements_child_begin_end(Comment::child_iterator (T::*)() const) {
return good();
}
-LLVM_ATTRIBUTE_UNUSED
-static inline bad implements_child_begin_end(
- Comment::child_iterator (Comment::*)() const) {
+[[maybe_unused]]
+static inline bad
+implements_child_begin_end(Comment::child_iterator (Comment::*)() const) {
return bad();
}
#define ASSERT_IMPLEMENTS_child_begin(function) \
(void) good(implements_child_begin_end(function))
-LLVM_ATTRIBUTE_UNUSED
+[[maybe_unused]]
static inline void CheckCommentASTNodes() {
#define ABSTRACT_COMMENT(COMMENT)
#define COMMENT(CLASS, PARENT) \
diff --git a/clang/lib/AST/CommentSema.cpp b/clang/lib/AST/CommentSema.cpp
index 649fba9..d5ba240 100644
--- a/clang/lib/AST/CommentSema.cpp
+++ b/clang/lib/AST/CommentSema.cpp
@@ -225,7 +225,7 @@ static ParamCommandPassDirection getParamPassDirection(StringRef Arg) {
return llvm::StringSwitch<ParamCommandPassDirection>(Arg)
.Case("[in]", ParamCommandPassDirection::In)
.Case("[out]", ParamCommandPassDirection::Out)
- .Cases("[in,out]", "[out,in]", ParamCommandPassDirection::InOut)
+ .Cases({"[in,out]", "[out,in]"}, ParamCommandPassDirection::InOut)
.Default(static_cast<ParamCommandPassDirection>(-1));
}
@@ -1061,8 +1061,8 @@ InlineCommandRenderKind Sema::getInlineCommandRenderKind(StringRef Name) const {
return llvm::StringSwitch<InlineCommandRenderKind>(Name)
.Case("b", InlineCommandRenderKind::Bold)
- .Cases("c", "p", InlineCommandRenderKind::Monospaced)
- .Cases("a", "e", "em", InlineCommandRenderKind::Emphasized)
+ .Cases({"c", "p"}, InlineCommandRenderKind::Monospaced)
+ .Cases({"a", "e", "em"}, InlineCommandRenderKind::Emphasized)
.Case("anchor", InlineCommandRenderKind::Anchor)
.Default(InlineCommandRenderKind::Normal);
}
diff --git a/clang/lib/AST/ComparisonCategories.cpp b/clang/lib/AST/ComparisonCategories.cpp
index 0c7a7f4..b2197bd 100644
--- a/clang/lib/AST/ComparisonCategories.cpp
+++ b/clang/lib/AST/ComparisonCategories.cpp
@@ -49,7 +49,7 @@ bool ComparisonCategoryInfo::ValueInfo::hasValidIntValue() const {
// Before we attempt to get the value of the first field, ensure that we
// actually have one (and only one) field.
const auto *Record = VD->getType()->getAsCXXRecordDecl();
- if (std::distance(Record->field_begin(), Record->field_end()) != 1 ||
+ if (!Record || Record->getNumFields() != 1 ||
!Record->field_begin()->getType()->isIntegralOrEnumerationType())
return false;
@@ -83,7 +83,15 @@ ComparisonCategoryInfo::ValueInfo *ComparisonCategoryInfo::lookupValueInfo(
&Ctx.Idents.get(ComparisonCategories::getResultString(ValueKind)));
if (Lookup.empty() || !isa<VarDecl>(Lookup.front()))
return nullptr;
- Objects.emplace_back(ValueKind, cast<VarDecl>(Lookup.front()));
+ // The static member must have the same type as the comparison category class
+ // itself (e.g., std::partial_ordering::less must be of type
+ // partial_ordering).
+ VarDecl *VD = cast<VarDecl>(Lookup.front());
+ const CXXRecordDecl *VDRecord = VD->getType()->getAsCXXRecordDecl();
+ if (!VDRecord || VDRecord->getCanonicalDecl() != Record->getCanonicalDecl())
+ return nullptr;
+
+ Objects.emplace_back(ValueKind, VD);
return &Objects.back();
}
diff --git a/clang/lib/AST/ComputeDependence.cpp b/clang/lib/AST/ComputeDependence.cpp
index e0cf0de..34167ee 100644
--- a/clang/lib/AST/ComputeDependence.cpp
+++ b/clang/lib/AST/ComputeDependence.cpp
@@ -115,6 +115,10 @@ ExprDependence clang::computeDependence(ArraySubscriptExpr *E) {
return E->getLHS()->getDependence() | E->getRHS()->getDependence();
}
+ExprDependence clang::computeDependence(MatrixSingleSubscriptExpr *E) {
+ return E->getBase()->getDependence() | E->getRowIdx()->getDependence();
+}
+
ExprDependence clang::computeDependence(MatrixSubscriptExpr *E) {
return E->getBase()->getDependence() | E->getRowIdx()->getDependence() |
(E->getColumnIdx() ? E->getColumnIdx()->getDependence()
@@ -178,7 +182,7 @@ ExprDependence clang::computeDependence(StmtExpr *E, unsigned TemplateDepth) {
auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
// Propagate dependence of the result.
if (const auto *CompoundExprResult =
- dyn_cast_or_null<ValueStmt>(E->getSubStmt()->getStmtExprResult()))
+ dyn_cast_or_null<ValueStmt>(E->getSubStmt()->body_back()))
if (const Expr *ResultExpr = CompoundExprResult->getExprStmt())
D |= ResultExpr->getDependence();
// Note: we treat a statement-expression in a dependent context as always
@@ -252,6 +256,10 @@ ExprDependence clang::computeDependence(ExtVectorElementExpr *E) {
return E->getBase()->getDependence();
}
+ExprDependence clang::computeDependence(MatrixElementExpr *E) {
+ return E->getBase()->getDependence();
+}
+
ExprDependence clang::computeDependence(BlockExpr *E,
bool ContainsUnexpandedParameterPack) {
auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index f048076..66c625f 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -1742,6 +1742,9 @@ void NamedDecl::printNestedNameSpecifier(raw_ostream &OS,
// Collect named contexts.
DeclarationName NameInScope = getDeclName();
for (; Ctx; Ctx = Ctx->getParent()) {
+ if (P.Callbacks && P.Callbacks->isScopeVisible(Ctx))
+ continue;
+
// Suppress anonymous namespace if requested.
if (P.SuppressUnwrittenScope && isa<NamespaceDecl>(Ctx) &&
cast<NamespaceDecl>(Ctx)->isAnonymousNamespace())
@@ -1750,9 +1753,11 @@ void NamedDecl::printNestedNameSpecifier(raw_ostream &OS,
// Suppress inline namespace if it doesn't make the result ambiguous.
if (Ctx->isInlineNamespace() && NameInScope) {
if (P.SuppressInlineNamespace ==
- PrintingPolicy::SuppressInlineNamespaceMode::All ||
+ llvm::to_underlying(
+ PrintingPolicy::SuppressInlineNamespaceMode::All) ||
(P.SuppressInlineNamespace ==
- PrintingPolicy::SuppressInlineNamespaceMode::Redundant &&
+ llvm::to_underlying(
+ PrintingPolicy::SuppressInlineNamespaceMode::Redundant) &&
cast<NamespaceDecl>(Ctx)->isRedundantInlineQualifierFor(
NameInScope))) {
continue;
@@ -1786,11 +1791,17 @@ void NamedDecl::printNestedNameSpecifier(raw_ostream &OS,
}
else
OS << *ND;
- } else if (const auto *RD = dyn_cast<RecordDecl>(DC)) {
- if (!RD->getIdentifier())
- OS << "(anonymous " << RD->getKindName() << ')';
- else
- OS << *RD;
+ } else if (const auto *RD = llvm::dyn_cast<RecordDecl>(DC)) {
+ PrintingPolicy Copy(P);
+ // As part of a scope we want to print anonymous names as:
+ // ..::(anonymous struct)::..
+ //
+ // I.e., suppress tag locations, suppress leading keyword, *don't*
+ // suppress tag in name
+ Copy.SuppressTagKeyword = true;
+ Copy.SuppressTagKeywordInAnonNames = false;
+ Copy.AnonymousTagLocations = false;
+ RD->printName(OS, Copy);
} else if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
const FunctionProtoType *FT = nullptr;
if (FD->hasWrittenPrototype())
@@ -3180,7 +3191,7 @@ void FunctionDecl::DefaultedOrDeletedFunctionInfo::setDeletedMessage(
}
FunctionDecl::DefaultedOrDeletedFunctionInfo *
-FunctionDecl::getDefalutedOrDeletedInfo() const {
+FunctionDecl::getDefaultedOrDeletedInfo() const {
return FunctionDeclBits.HasDefaultedOrDeletedInfo ? DefaultedOrDeletedInfo
: nullptr;
}
@@ -3380,11 +3391,11 @@ bool FunctionDecl::isMSVCRTEntryPoint() const {
return false;
return llvm::StringSwitch<bool>(getName())
- .Cases("main", // an ANSI console app
- "wmain", // a Unicode console App
- "WinMain", // an ANSI GUI app
- "wWinMain", // a Unicode GUI app
- "DllMain", // a DLL
+ .Cases({"main", // an ANSI console app
+ "wmain", // a Unicode console App
+ "WinMain", // an ANSI GUI app
+ "wWinMain", // a Unicode GUI app
+ "DllMain"}, // a DLL
true)
.Default(false);
}
@@ -4949,19 +4960,76 @@ void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
}
}
+void TagDecl::printAnonymousTagDecl(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ if (TypedefNameDecl *Typedef = getTypedefNameForAnonDecl()) {
+ assert(Typedef->getIdentifier() && "Typedef without identifier?");
+ OS << Typedef->getIdentifier()->getName();
+ return;
+ }
+
+ bool SuppressTagKeywordInName = Policy.SuppressTagKeywordInAnonNames;
+
+ // Emit leading keyword. Since we printed a leading keyword make sure we
+ // don't print the tag as part of the name too.
+ if (!Policy.SuppressTagKeyword) {
+ OS << getKindName() << ' ';
+ SuppressTagKeywordInName = true;
+ }
+
+ // Make an unambiguous representation for anonymous types, e.g.
+ // (anonymous enum at /usr/include/string.h:120:9)
+ OS << (Policy.MSVCFormatting ? '`' : '(');
+
+ if (isa<CXXRecordDecl>(this) && cast<CXXRecordDecl>(this)->isLambda()) {
+ OS << "lambda";
+ SuppressTagKeywordInName = true;
+ } else if ((isa<RecordDecl>(this) &&
+ cast<RecordDecl>(this)->isAnonymousStructOrUnion())) {
+ OS << "anonymous";
+ } else {
+ OS << "unnamed";
+ }
+
+ if (!SuppressTagKeywordInName)
+ OS << ' ' << getKindName();
+
+ if (Policy.AnonymousTagLocations) {
+ PresumedLoc PLoc =
+ getASTContext().getSourceManager().getPresumedLoc(getLocation());
+ if (PLoc.isValid()) {
+ OS << " at ";
+ StringRef File = PLoc.getFilename();
+ llvm::SmallString<1024> WrittenFile(File);
+ if (auto *Callbacks = Policy.Callbacks)
+ WrittenFile = Callbacks->remapPath(File);
+ // Fix inconsistent path separator created by
+ // clang::DirectoryLookup::LookupFile when the file path is relative
+ // path.
+ llvm::sys::path::Style Style =
+ llvm::sys::path::is_absolute(WrittenFile)
+ ? llvm::sys::path::Style::native
+ : (Policy.MSVCFormatting
+ ? llvm::sys::path::Style::windows_backslash
+ : llvm::sys::path::Style::posix);
+ llvm::sys::path::native(WrittenFile, Style);
+ OS << WrittenFile << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
+ }
+ }
+
+ OS << (Policy.MSVCFormatting ? '\'' : ')');
+}
+
void TagDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const {
DeclarationName Name = getDeclName();
// If the name is supposed to have an identifier but does not have one, then
// the tag is anonymous and we should print it differently.
if (Name.isIdentifier() && !Name.getAsIdentifierInfo()) {
- // If the caller wanted to print a qualified name, they've already printed
- // the scope. And if the caller doesn't want that, the scope information
- // is already printed as part of the type.
- PrintingPolicy Copy(Policy);
- Copy.SuppressScope = true;
- QualType(getASTContext().getCanonicalTagType(this)).print(OS, Copy);
+ printAnonymousTagDecl(OS, Policy);
+
return;
}
+
// Otherwise, do the normal printing.
Name.print(OS, Policy);
}
@@ -5237,7 +5305,14 @@ void RecordDecl::completeDefinition() {
/// This which can be turned on with an attribute, pragma, or the
/// -mms-bitfields command-line option.
bool RecordDecl::isMsStruct(const ASTContext &C) const {
- return hasAttr<MSStructAttr>() || C.getLangOpts().MSBitfields == 1;
+ if (hasAttr<GCCStructAttr>())
+ return false;
+ if (hasAttr<MSStructAttr>())
+ return true;
+ auto LayoutCompatibility = C.getLangOpts().getLayoutCompatibility();
+ if (LayoutCompatibility == LangOptions::LayoutCompatibilityKind::Default)
+ return C.defaultsToMsStruct();
+ return LayoutCompatibility == LangOptions::LayoutCompatibilityKind::Microsoft;
}
void RecordDecl::reorderDecls(const SmallVectorImpl<Decl *> &Decls) {
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
index 30c6d3e..0a1e442 100644
--- a/clang/lib/AST/DeclBase.cpp
+++ b/clang/lib/AST/DeclBase.cpp
@@ -711,7 +711,7 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
// Make sure that this declaration has already been introduced.
if (!A->getIntroduced().empty() &&
EnclosingVersion < A->getIntroduced()) {
- IdentifierInfo *IIEnv = A->getEnvironment();
+ const IdentifierInfo *IIEnv = A->getEnvironment();
auto &Triple = Context.getTargetInfo().getTriple();
StringRef TargetEnv = Triple.getEnvironmentName();
StringRef EnvName =
diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp
index 24e4f18..c16b1bb 100644
--- a/clang/lib/AST/DeclCXX.cpp
+++ b/clang/lib/AST/DeclCXX.cpp
@@ -3110,12 +3110,15 @@ CXXDestructorDecl *CXXDestructorDecl::Create(
}
void CXXDestructorDecl::setOperatorDelete(FunctionDecl *OD, Expr *ThisArg) {
- auto *First = cast<CXXDestructorDecl>(getFirstDecl());
- if (OD && !First->OperatorDelete) {
- First->OperatorDelete = OD;
- First->OperatorDeleteThisArg = ThisArg;
+ assert(!OD || (OD->getDeclName().getCXXOverloadedOperator() == OO_Delete));
+ if (OD && !getASTContext().dtorHasOperatorDelete(
+ this, ASTContext::OperatorDeleteKind::Regular)) {
+ getASTContext().addOperatorDeleteForVDtor(
+ this, OD, ASTContext::OperatorDeleteKind::Regular);
+ getCanonicalDecl()->OperatorDeleteThisArg = ThisArg;
if (auto *L = getASTMutationListener())
- L->ResolvedOperatorDelete(First, OD, ThisArg);
+ L->ResolvedOperatorDelete(cast<CXXDestructorDecl>(getCanonicalDecl()), OD,
+ ThisArg);
}
}
@@ -3127,14 +3130,63 @@ void CXXDestructorDecl::setOperatorGlobalDelete(FunctionDecl *OD) {
assert(!OD ||
(OD->getDeclName().getCXXOverloadedOperator() == OO_Delete &&
OD->getDeclContext()->getRedeclContext()->isTranslationUnit()));
- auto *Canonical = cast<CXXDestructorDecl>(getCanonicalDecl());
- if (!Canonical->OperatorGlobalDelete) {
- Canonical->OperatorGlobalDelete = OD;
+ if (OD && !getASTContext().dtorHasOperatorDelete(
+ this, ASTContext::OperatorDeleteKind::GlobalRegular)) {
+ getASTContext().addOperatorDeleteForVDtor(
+ this, OD, ASTContext::OperatorDeleteKind::GlobalRegular);
if (auto *L = getASTMutationListener())
- L->ResolvedOperatorGlobDelete(Canonical, OD);
+ L->ResolvedOperatorGlobDelete(cast<CXXDestructorDecl>(getCanonicalDecl()),
+ OD);
}
}
+void CXXDestructorDecl::setOperatorArrayDelete(FunctionDecl *OD) {
+ assert(!OD ||
+ (OD->getDeclName().getCXXOverloadedOperator() == OO_Array_Delete));
+ if (OD && !getASTContext().dtorHasOperatorDelete(
+ this, ASTContext::OperatorDeleteKind::Array)) {
+ getASTContext().addOperatorDeleteForVDtor(
+ this, OD, ASTContext::OperatorDeleteKind::Array);
+ if (auto *L = getASTMutationListener())
+ L->ResolvedOperatorArrayDelete(
+ cast<CXXDestructorDecl>(getCanonicalDecl()), OD);
+ }
+}
+
+void CXXDestructorDecl::setGlobalOperatorArrayDelete(FunctionDecl *OD) {
+ assert(!OD ||
+ (OD->getDeclName().getCXXOverloadedOperator() == OO_Array_Delete &&
+ OD->getDeclContext()->getRedeclContext()->isTranslationUnit()));
+ if (OD && !getASTContext().dtorHasOperatorDelete(
+ this, ASTContext::OperatorDeleteKind::ArrayGlobal)) {
+ getASTContext().addOperatorDeleteForVDtor(
+ this, OD, ASTContext::OperatorDeleteKind::ArrayGlobal);
+ if (auto *L = getASTMutationListener())
+ L->ResolvedOperatorGlobArrayDelete(
+ cast<CXXDestructorDecl>(getCanonicalDecl()), OD);
+ }
+}
+
+const FunctionDecl *CXXDestructorDecl::getOperatorDelete() const {
+ return getASTContext().getOperatorDeleteForVDtor(
+ this, ASTContext::OperatorDeleteKind::Regular);
+}
+
+const FunctionDecl *CXXDestructorDecl::getOperatorGlobalDelete() const {
+ return getASTContext().getOperatorDeleteForVDtor(
+ this, ASTContext::OperatorDeleteKind::GlobalRegular);
+}
+
+const FunctionDecl *CXXDestructorDecl::getArrayOperatorDelete() const {
+ return getASTContext().getOperatorDeleteForVDtor(
+ this, ASTContext::OperatorDeleteKind::Array);
+}
+
+const FunctionDecl *CXXDestructorDecl::getGlobalArrayOperatorDelete() const {
+ return getASTContext().getOperatorDeleteForVDtor(
+ this, ASTContext::OperatorDeleteKind::ArrayGlobal);
+}
+
bool CXXDestructorDecl::isCalledByDelete(const FunctionDecl *OpDel) const {
// C++20 [expr.delete]p6: If the value of the operand of the delete-
// expression is not a null pointer value and the selected deallocation
@@ -3146,7 +3198,8 @@ bool CXXDestructorDecl::isCalledByDelete(const FunctionDecl *OpDel) const {
// delete operator, as that destructor is never called, unless the
// destructor is virtual (see [expr.delete]p8.1) because then the
// selected operator depends on the dynamic type of the pointer.
- const FunctionDecl *SelectedOperatorDelete = OpDel ? OpDel : OperatorDelete;
+ const FunctionDecl *SelectedOperatorDelete =
+ OpDel ? OpDel : getOperatorDelete();
if (!SelectedOperatorDelete)
return true;
diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp
index 47ae613..5e377a6 100644
--- a/clang/lib/AST/DeclPrinter.cpp
+++ b/clang/lib/AST/DeclPrinter.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/PrettyPrinter.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -124,9 +125,10 @@ namespace {
void printTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const TemplateParameterList *Params);
enum class AttrPosAsWritten { Default = 0, Left, Right };
- bool
+ std::optional<std::string>
prettyPrintAttributes(const Decl *D,
AttrPosAsWritten Pos = AttrPosAsWritten::Default);
+
void prettyPrintPragmas(Decl *D);
void printDeclType(QualType T, StringRef DeclName, bool Pack = false);
};
@@ -252,41 +254,40 @@ static DeclPrinter::AttrPosAsWritten getPosAsWritten(const Attr *A,
return DeclPrinter::AttrPosAsWritten::Right;
}
-// returns true if an attribute was printed.
-bool DeclPrinter::prettyPrintAttributes(const Decl *D,
- AttrPosAsWritten Pos /*=Default*/) {
- bool hasPrinted = false;
+std::optional<std::string>
+DeclPrinter::prettyPrintAttributes(const Decl *D,
+ AttrPosAsWritten Pos /*=Default*/) {
+ if (Policy.SuppressDeclAttributes || !D->hasAttrs())
+ return std::nullopt;
- if (D->hasAttrs()) {
- const AttrVec &Attrs = D->getAttrs();
- for (auto *A : Attrs) {
- if (A->isInherited() || A->isImplicit())
- continue;
- // Print out the keyword attributes, they aren't regular attributes.
- if (Policy.PolishForDeclaration && !A->isKeywordAttribute())
- continue;
- switch (A->getKind()) {
+ std::string AttrStr;
+ llvm::raw_string_ostream AOut(AttrStr);
+ llvm::ListSeparator LS(" ");
+ for (auto *A : D->getAttrs()) {
+ if (A->isInherited() || A->isImplicit())
+ continue;
+ // Print out the keyword attributes, they aren't regular attributes.
+ if (Policy.PolishForDeclaration && !A->isKeywordAttribute())
+ continue;
+ switch (A->getKind()) {
#define ATTR(X)
#define PRAGMA_SPELLING_ATTR(X) case attr::X:
#include "clang/Basic/AttrList.inc"
- break;
- default:
- AttrPosAsWritten APos = getPosAsWritten(A, D);
- assert(APos != AttrPosAsWritten::Default &&
- "Default not a valid for an attribute location");
- if (Pos == AttrPosAsWritten::Default || Pos == APos) {
- if (Pos != AttrPosAsWritten::Left)
- Out << ' ';
- A->printPretty(Out, Policy);
- hasPrinted = true;
- if (Pos == AttrPosAsWritten::Left)
- Out << ' ';
- }
- break;
+ break;
+ default:
+ AttrPosAsWritten APos = getPosAsWritten(A, D);
+ assert(APos != AttrPosAsWritten::Default &&
+ "Default not a valid for an attribute location");
+ if (Pos == AttrPosAsWritten::Default || Pos == APos) {
+ AOut << LS;
+ A->printPretty(AOut, Policy);
}
+ break;
}
}
- return hasPrinted;
+ if (AttrStr.empty())
+ return std::nullopt;
+ return AttrStr;
}
void DeclPrinter::PrintOpenACCRoutineOnLambda(Decl *D) {
@@ -584,12 +585,15 @@ void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) {
}
QualType Ty = D->getTypeSourceInfo()->getType();
Ty.print(Out, Policy, D->getName(), Indentation);
- prettyPrintAttributes(D);
+
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(D))
+ Out << ' ' << *Attrs;
}
void DeclPrinter::VisitTypeAliasDecl(TypeAliasDecl *D) {
Out << "using " << *D;
- prettyPrintAttributes(D);
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(D))
+ Out << ' ' << *Attrs;
Out << " = " << D->getTypeSourceInfo()->getType().getAsString(Policy);
}
@@ -604,7 +608,8 @@ void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
Out << " struct";
}
- prettyPrintAttributes(D);
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(D))
+ Out << ' ' << *Attrs;
if (D->getDeclName())
Out << ' ' << D->getDeclName();
@@ -624,7 +629,8 @@ void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
Out << "__module_private__ ";
Out << D->getKindName();
- prettyPrintAttributes(D);
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(D))
+ Out << ' ' << *Attrs;
if (D->getIdentifier())
Out << ' ' << *D;
@@ -638,7 +644,8 @@ void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
Out << *D;
- prettyPrintAttributes(D);
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(D))
+ Out << ' ' << *Attrs;
if (Expr *Init = D->getInitExpr()) {
Out << " = ";
Init->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
@@ -664,7 +671,9 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (!D->getDescribedFunctionTemplate() &&
!D->isFunctionTemplateSpecialization()) {
prettyPrintPragmas(D);
- prettyPrintAttributes(D, AttrPosAsWritten::Left);
+ if (std::optional<std::string> Attrs =
+ prettyPrintAttributes(D, AttrPosAsWritten::Left))
+ Out << *Attrs << ' ';
}
if (D->isFunctionTemplateSpecialization())
@@ -836,7 +845,9 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Ty.print(Out, Policy, Proto);
}
- prettyPrintAttributes(D, AttrPosAsWritten::Right);
+ if (std::optional<std::string> Attrs =
+ prettyPrintAttributes(D, AttrPosAsWritten::Right))
+ Out << ' ' << *Attrs;
if (D->isPureVirtual())
Out << " = 0";
@@ -928,7 +939,8 @@ void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
Out << " = ";
Init->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
}
- prettyPrintAttributes(D);
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(D))
+ Out << ' ' << *Attrs;
}
void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
@@ -938,7 +950,9 @@ void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
void DeclPrinter::VisitVarDecl(VarDecl *D) {
prettyPrintPragmas(D);
- prettyPrintAttributes(D, AttrPosAsWritten::Left);
+ if (std::optional<std::string> Attrs =
+ prettyPrintAttributes(D, AttrPosAsWritten::Left))
+ Out << *Attrs << ' ';
if (const auto *Param = dyn_cast<ParmVarDecl>(D);
Param && Param->isExplicitObjectParameter())
@@ -981,7 +995,9 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
? D->getIdentifier()->deuglifiedName()
: D->getName());
- prettyPrintAttributes(D, AttrPosAsWritten::Right);
+ if (std::optional<std::string> Attrs =
+ prettyPrintAttributes(D, AttrPosAsWritten::Right))
+ Out << ' ' << *Attrs;
Expr *Init = D->getInit();
if (!Policy.SuppressInitializers && Init) {
@@ -1073,7 +1089,8 @@ void DeclPrinter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
}
void DeclPrinter::VisitEmptyDecl(EmptyDecl *D) {
- prettyPrintAttributes(D);
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(D))
+ Out << *Attrs;
}
void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
@@ -1083,10 +1100,9 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
Out << D->getKindName() << ' ';
- // FIXME: Move before printing the decl kind to match the behavior of the
- // attribute printing for variables and function where they are printed first.
- if (prettyPrintAttributes(D, AttrPosAsWritten::Left))
- Out << ' ';
+ if (std::optional<std::string> Attrs =
+ prettyPrintAttributes(D, AttrPosAsWritten::Left))
+ Out << *Attrs << ' ';
if (D->getIdentifier()) {
D->getQualifier().print(Out, Policy);
@@ -1104,7 +1120,9 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
}
}
- prettyPrintAttributes(D, AttrPosAsWritten::Right);
+ if (std::optional<std::string> Attrs =
+ prettyPrintAttributes(D, AttrPosAsWritten::Right))
+ Out << ' ' << *Attrs;
if (D->isCompleteDefinition()) {
Out << ' ';
@@ -1421,7 +1439,8 @@ void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
if (OMD->isVariadic())
Out << ", ...";
- prettyPrintAttributes(OMD);
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(OMD))
+ Out << ' ' << *Attrs;
if (OMD->getBody() && !Policy.TerseOutput) {
Out << ' ';
@@ -1477,10 +1496,8 @@ void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
return;
}
bool eolnOut = false;
- if (OID->hasAttrs()) {
- prettyPrintAttributes(OID);
- Out << "\n";
- }
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(OID))
+ Out << *Attrs << "\n";
Out << "@interface " << I;
@@ -1777,7 +1794,8 @@ void DeclPrinter::VisitHLSLBufferDecl(HLSLBufferDecl *D) {
Out << *D;
- prettyPrintAttributes(D);
+ if (std::optional<std::string> Attrs = prettyPrintAttributes(D))
+ Out << ' ' << *Attrs;
Out << " {\n";
VisitDeclContext(D);
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
index 2f7ae6d..e76e464 100644
--- a/clang/lib/AST/DeclTemplate.cpp
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -369,12 +369,6 @@ bool RedeclarableTemplateDecl::loadLazySpecializationsImpl(
if (!ExternalSource)
return false;
- // If TPL is not null, it implies that we're loading specializations for
- // partial templates. We need to load all specializations in such cases.
- if (TPL)
- return ExternalSource->LoadExternalSpecializations(this->getCanonicalDecl(),
- /*OnlyPartial=*/false);
-
return ExternalSource->LoadExternalSpecializations(this->getCanonicalDecl(),
Args);
}
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index 340bb4b..9632d88 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -25,6 +25,7 @@
#include "clang/AST/IgnoreExpr.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/AST/TypeBase.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/SourceManager.h"
@@ -71,6 +72,9 @@ const CXXRecordDecl *Expr::getBestDynamicClassType() const {
if (const PointerType *PTy = DerivedType->getAs<PointerType>())
DerivedType = PTy->getPointeeType();
+ while (const ArrayType *ATy = DerivedType->getAsArrayTypeUnsafe())
+ DerivedType = ATy->getElementType();
+
if (DerivedType->isDependentType())
return nullptr;
@@ -1934,6 +1938,7 @@ bool CastExpr::CastConsistency() const {
case CK_FixedPointToBoolean:
case CK_HLSLArrayRValue:
case CK_HLSLVectorTruncation:
+ case CK_HLSLMatrixTruncation:
case CK_HLSLElementwiseCast:
case CK_HLSLAggregateSplatCast:
CheckNoBasePath:
@@ -3730,6 +3735,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case PackIndexingExprClass:
case HLSLOutArgExprClass:
case OpenACCAsteriskSizeExprClass:
+ case CXXReflectExprClass:
// These never have a side-effect.
return false;
@@ -3788,6 +3794,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case ParenExprClass:
case ArraySubscriptExprClass:
+ case MatrixSingleSubscriptExprClass:
case MatrixSubscriptExprClass:
case ArraySectionExprClass:
case OMPArrayShapingExprClass:
@@ -3797,6 +3804,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case BinaryConditionalOperatorClass:
case CompoundLiteralExprClass:
case ExtVectorElementExprClass:
+ case MatrixElementExprClass:
case DesignatedInitExprClass:
case DesignatedInitUpdateExprClass:
case ArrayInitLoopExprClass:
@@ -4417,7 +4425,14 @@ unsigned ExtVectorElementExpr::getNumElements() const {
return 1;
}
-/// containsDuplicateElements - Return true if any element access is repeated.
+unsigned MatrixElementExpr::getNumElements() const {
+ if (const auto *MT = getType()->getAs<ConstantMatrixType>())
+ return MT->getNumElementsFlattened();
+ return 1;
+}
+
+/// containsDuplicateElements - Return true if any Vector element access is
+/// repeated.
bool ExtVectorElementExpr::containsDuplicateElements() const {
// FIXME: Refactor this code to an accessor on the AST node which returns the
// "type" of component access, and share with code below and in Sema.
@@ -4438,6 +4453,78 @@ bool ExtVectorElementExpr::containsDuplicateElements() const {
return false;
}
+namespace {
+struct MatrixAccessorFormat {
+ bool IsZeroIndexed = false;
+ unsigned ChunkLen = 0;
+};
+
+static MatrixAccessorFormat GetHLSLMatrixAccessorFormat(StringRef Comp) {
+ assert(!Comp.empty() && Comp[0] == '_' && "invalid matrix accessor");
+
+ MatrixAccessorFormat F;
+ if (Comp.size() >= 2 && Comp[0] == '_' && Comp[1] == 'm') {
+ F.IsZeroIndexed = true;
+ F.ChunkLen = 4; // _mRC
+ } else {
+ F.IsZeroIndexed = false;
+ F.ChunkLen = 3; // _RC
+ }
+
+ assert(F.ChunkLen != 0 && "unrecognized matrix swizzle format");
+ assert(Comp.size() % F.ChunkLen == 0 &&
+ "matrix swizzle accessor has invalid length");
+ return F;
+}
+
+template <typename Fn>
+static bool ForEachMatrixAccessorIndex(StringRef Comp, unsigned Rows,
+ unsigned Cols, Fn &&F) {
+ auto Format = GetHLSLMatrixAccessorFormat(Comp);
+
+ for (unsigned I = 0, E = Comp.size(); I < E; I += Format.ChunkLen) {
+ unsigned Row = 0, Col = 0;
+ unsigned ZeroIndexOffset = static_cast<unsigned>(Format.IsZeroIndexed);
+ unsigned OneIndexOffset = static_cast<unsigned>(!Format.IsZeroIndexed);
+ Row = static_cast<unsigned>(Comp[I + ZeroIndexOffset + 1] - '0') -
+ OneIndexOffset;
+ Col = static_cast<unsigned>(Comp[I + ZeroIndexOffset + 2] - '0') -
+ OneIndexOffset;
+
+ assert(Row < Rows && Col < Cols && "matrix swizzle index out of bounds");
+ const unsigned Index = Row * Cols + Col;
+ // Callback returns true to continue, false to stop early.
+ if (!F(Index))
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+/// containsDuplicateElements - Return true if any Matrix element access is
+/// repeated.
+bool MatrixElementExpr::containsDuplicateElements() const {
+ StringRef Comp = Accessor->getName();
+ const auto *MT = getBase()->getType()->castAs<ConstantMatrixType>();
+ const unsigned Rows = MT->getNumRows();
+ const unsigned Cols = MT->getNumColumns();
+ const unsigned Max = Rows * Cols;
+
+ llvm::BitVector Seen(Max, /*t=*/false);
+ bool HasDup = false;
+ ForEachMatrixAccessorIndex(Comp, Rows, Cols, [&](unsigned Index) -> bool {
+ if (Seen[Index]) {
+ HasDup = true;
+ return false; // exit early
+ }
+ Seen.set(Index);
+ return true;
+ });
+
+ return HasDup;
+}
+
/// getEncodedElementAccess - We encode the fields as a llvm ConstantArray.
void ExtVectorElementExpr::getEncodedElementAccess(
SmallVectorImpl<uint32_t> &Elts) const {
@@ -4471,6 +4558,18 @@ void ExtVectorElementExpr::getEncodedElementAccess(
}
}
+void MatrixElementExpr::getEncodedElementAccess(
+ SmallVectorImpl<uint32_t> &Elts) const {
+ StringRef Comp = Accessor->getName();
+ const auto *MT = getBase()->getType()->castAs<ConstantMatrixType>();
+ const unsigned Rows = MT->getNumRows();
+ const unsigned Cols = MT->getNumColumns();
+ ForEachMatrixAccessorIndex(Comp, Rows, Cols, [&](unsigned Index) -> bool {
+ Elts.push_back(Index);
+ return true;
+ });
+}
+
ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr *> args,
QualType Type, SourceLocation BLoc,
SourceLocation RP)
@@ -5191,6 +5290,8 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__atomic_max_fetch:
case AO__atomic_fetch_min:
case AO__atomic_fetch_max:
+ case AO__atomic_fetch_uinc:
+ case AO__atomic_fetch_udec:
return 3;
case AO__scoped_atomic_load:
@@ -5213,6 +5314,8 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__scoped_atomic_fetch_min:
case AO__scoped_atomic_fetch_max:
case AO__scoped_atomic_exchange_n:
+ case AO__scoped_atomic_fetch_uinc:
+ case AO__scoped_atomic_fetch_udec:
case AO__hip_atomic_exchange:
case AO__hip_atomic_fetch_add:
case AO__hip_atomic_fetch_sub:
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
index c7f0ff0..bcc481f 100644
--- a/clang/lib/AST/ExprCXX.cpp
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -1939,6 +1939,24 @@ TypeTraitExpr *TypeTraitExpr::CreateDeserialized(const ASTContext &C,
return new (Mem) TypeTraitExpr(EmptyShell(), IsStoredAsBool);
}
+CXXReflectExpr::CXXReflectExpr(EmptyShell Empty)
+ : Expr(CXXReflectExprClass, Empty) {}
+
+CXXReflectExpr::CXXReflectExpr(SourceLocation CaretCaretLoc,
+ const TypeSourceInfo *TSI)
+ : Expr(CXXReflectExprClass, TSI->getType(), VK_PRValue, OK_Ordinary),
+ CaretCaretLoc(CaretCaretLoc), Operand(TSI) {}
+
+CXXReflectExpr *CXXReflectExpr::Create(ASTContext &C,
+ SourceLocation CaretCaretLoc,
+ TypeSourceInfo *TSI) {
+ return new (C) CXXReflectExpr(CaretCaretLoc, TSI);
+}
+
+CXXReflectExpr *CXXReflectExpr::CreateEmpty(ASTContext &C) {
+ return new (C) CXXReflectExpr(EmptyShell());
+}
+
CUDAKernelCallExpr::CUDAKernelCallExpr(Expr *Fn, CallExpr *Config,
ArrayRef<Expr *> Args, QualType Ty,
ExprValueKind VK, SourceLocation RP,
diff --git a/clang/lib/AST/ExprClassification.cpp b/clang/lib/AST/ExprClassification.cpp
index aeacd0d..a83c170 100644
--- a/clang/lib/AST/ExprClassification.cpp
+++ b/clang/lib/AST/ExprClassification.cpp
@@ -63,6 +63,7 @@ Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
case Cl::CL_Void:
case Cl::CL_AddressableVoid:
case Cl::CL_DuplicateVectorComponents:
+ case Cl::CL_DuplicateMatrixComponents:
case Cl::CL_MemberFunction:
case Cl::CL_SubObjCPropertySetting:
case Cl::CL_ClassTemporary:
@@ -216,6 +217,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::SourceLocExprClass:
case Expr::ConceptSpecializationExprClass:
case Expr::RequiresExprClass:
+ case Expr::CXXReflectExprClass:
return Cl::CL_PRValue;
case Expr::EmbedExprClass:
@@ -259,6 +261,9 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
}
return Cl::CL_LValue;
+ case Expr::MatrixSingleSubscriptExprClass:
+ return ClassifyInternal(Ctx, cast<MatrixSingleSubscriptExpr>(E)->getBase());
+
// Subscripting matrix types behaves like member accesses.
case Expr::MatrixSubscriptExprClass:
return ClassifyInternal(Ctx, cast<MatrixSubscriptExpr>(E)->getBase());
@@ -369,6 +374,16 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_LValue;
return ClassifyInternal(Ctx, cast<ExtVectorElementExpr>(E)->getBase());
+ // Matrix element access is an lvalue unless there are duplicates
+ // in the shuffle expression.
+ case Expr::MatrixElementExprClass:
+ if (cast<MatrixElementExpr>(E)->containsDuplicateElements())
+ return Cl::CL_DuplicateMatrixComponents;
+ // NOTE: MatrixElementExpr is currently only used by HLSL which does not
+ // have pointers so there is no isArrow() necessary or way to test
+ // Cl::CL_LValue
+ return ClassifyInternal(Ctx, cast<MatrixElementExpr>(E)->getBase());
+
// Simply look at the actual default argument.
case Expr::CXXDefaultArgExprClass:
return ClassifyInternal(Ctx, cast<CXXDefaultArgExpr>(E)->getExpr());
@@ -735,6 +750,8 @@ Expr::LValueClassification Expr::ClassifyLValue(ASTContext &Ctx) const {
case Cl::CL_Void: return LV_InvalidExpression;
case Cl::CL_AddressableVoid: return LV_IncompleteVoidType;
case Cl::CL_DuplicateVectorComponents: return LV_DuplicateVectorComponents;
+ case Cl::CL_DuplicateMatrixComponents:
+ return LV_DuplicateMatrixComponents;
case Cl::CL_MemberFunction: return LV_MemberFunction;
case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting;
case Cl::CL_ClassTemporary: return LV_ClassTemporary;
@@ -756,6 +773,8 @@ Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
case Cl::CL_Void: return MLV_InvalidExpression;
case Cl::CL_AddressableVoid: return MLV_IncompleteVoidType;
case Cl::CL_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
+ case Cl::CL_DuplicateMatrixComponents:
+ return MLV_DuplicateMatrixComponents;
case Cl::CL_MemberFunction: return MLV_MemberFunction;
case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
case Cl::CL_ClassTemporary: return MLV_ClassTemporary;
diff --git a/clang/lib/AST/ExprConcepts.cpp b/clang/lib/AST/ExprConcepts.cpp
index 36f910d..b2e4d6b 100644
--- a/clang/lib/AST/ExprConcepts.cpp
+++ b/clang/lib/AST/ExprConcepts.cpp
@@ -101,8 +101,13 @@ concepts::ExprRequirement::ReturnTypeRequirement::getTypeConstraint() const {
// Search through the requirements, and see if any have a RecoveryExpr in it,
// which means this RequiresExpr ALSO needs to be invalid.
static bool RequirementContainsError(concepts::Requirement *R) {
- if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(R))
- return ExprReq->getExpr() && ExprReq->getExpr()->containsErrors();
+ if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(R)) {
+ if (ExprReq->isExprSubstitutionFailure())
+ return true;
+ if (auto *E = ExprReq->getExpr())
+ return E->containsErrors();
+ return false;
+ }
if (auto *NestedReq = dyn_cast<concepts::NestedRequirement>(R))
return !NestedReq->hasInvalidConstraint() &&
diff --git a/clang/lib/AST/ExprConstShared.h b/clang/lib/AST/ExprConstShared.h
index 401ae62..550b36c 100644
--- a/clang/lib/AST/ExprConstShared.h
+++ b/clang/lib/AST/ExprConstShared.h
@@ -15,9 +15,12 @@
#define LLVM_CLANG_LIB_AST_EXPRCONSTSHARED_H
#include "clang/Basic/TypeTraits.h"
+#include <cstdint>
namespace llvm {
class APFloat;
+class APSInt;
+class APInt;
}
namespace clang {
class QualType;
@@ -74,4 +77,11 @@ void HandleComplexComplexDiv(llvm::APFloat A, llvm::APFloat B, llvm::APFloat C,
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E,
UnaryExprOrTypeTrait ExprKind);
+uint8_t GFNIMultiplicativeInverse(uint8_t Byte);
+uint8_t GFNIMul(uint8_t AByte, uint8_t BByte);
+uint8_t GFNIAffine(uint8_t XByte, const llvm::APInt &AQword,
+ const llvm::APSInt &Imm, bool Inverse = false);
+llvm::APSInt NormalizeRotateAmount(const llvm::APSInt &Value,
+ const llvm::APSInt &Amount);
+
#endif
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 16141b2..23a40c9 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -44,6 +44,7 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/CurrentSourceLocExprScope.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/InferAlloc.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/OptionalDiagnostic.h"
#include "clang/AST/RecordLayout.h"
@@ -793,13 +794,8 @@ namespace {
/// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
/// evaluate the expression regardless of what the RHS is, but C only allows
/// certain things in certain situations.
- class EvalInfo : public interp::State {
+ class EvalInfo final : public interp::State {
public:
- ASTContext &Ctx;
-
- /// EvalStatus - Contains information about the evaluation.
- Expr::EvalStatus &EvalStatus;
-
/// CurrentCall - The top of the constexpr call stack.
CallStackFrame *CurrentCall;
@@ -918,16 +914,8 @@ namespace {
/// initialization.
uint64_t ArrayInitIndex = -1;
- /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
- /// notes attached to it will also be stored, otherwise they will not be.
- bool HasActiveDiagnostic;
-
- /// Have we emitted a diagnostic explaining why we couldn't constant
- /// fold (not just why it's not strictly a constant expression)?
- bool HasFoldFailureDiagnostic;
-
EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode)
- : Ctx(const_cast<ASTContext &>(C)), EvalStatus(S), CurrentCall(nullptr),
+ : State(const_cast<ASTContext &>(C), S), CurrentCall(nullptr),
CallStackDepth(0), NextCallIndex(1),
StepsLeft(C.getLangOpts().ConstexprStepLimit),
EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
@@ -935,8 +923,7 @@ namespace {
/*This=*/nullptr,
/*CallExpr=*/nullptr, CallRef()),
EvaluatingDecl((const ValueDecl *)nullptr),
- EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
- HasFoldFailureDiagnostic(false) {
+ EvaluatingDeclValue(nullptr) {
EvalMode = Mode;
}
@@ -944,9 +931,6 @@ namespace {
discardCleanups();
}
- ASTContext &getASTContext() const override { return Ctx; }
- const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
-
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value,
EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) {
EvaluatingDecl = Base;
@@ -989,7 +973,7 @@ namespace {
// We use the number of constexpr steps as a proxy for the maximum size
// of arrays to avoid exhausting the system resources, as initialization
// of each element is likely to take some number of steps anyway.
- uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit;
+ uint64_t Limit = getLangOpts().ConstexprStepLimit;
if (Limit != 0 && ElemCount > Limit) {
if (Diag)
FFDiag(Loc, diag::note_constexpr_new_exceeds_limits)
@@ -1016,7 +1000,7 @@ namespace {
}
bool nextStep(const Stmt *S) {
- if (Ctx.getLangOpts().ConstexprStepLimit == 0)
+ if (getLangOpts().ConstexprStepLimit == 0)
return true;
if (!StepsLeft) {
@@ -1100,110 +1084,13 @@ namespace {
}
private:
- interp::Frame *getCurrentFrame() override { return CurrentCall; }
+ const interp::Frame *getCurrentFrame() override { return CurrentCall; }
const interp::Frame *getBottomFrame() const override { return &BottomFrame; }
- bool hasActiveDiagnostic() override { return HasActiveDiagnostic; }
- void setActiveDiagnostic(bool Flag) override { HasActiveDiagnostic = Flag; }
-
- void setFoldFailureDiagnostic(bool Flag) override {
- HasFoldFailureDiagnostic = Flag;
- }
-
- Expr::EvalStatus &getEvalStatus() const override { return EvalStatus; }
-
- // If we have a prior diagnostic, it will be noting that the expression
- // isn't a constant expression. This diagnostic is more important,
- // unless we require this evaluation to produce a constant expression.
- //
- // FIXME: We might want to show both diagnostics to the user in
- // EvaluationMode::ConstantFold mode.
- bool hasPriorDiagnostic() override {
- if (!EvalStatus.Diag->empty()) {
- switch (EvalMode) {
- case EvaluationMode::ConstantFold:
- case EvaluationMode::IgnoreSideEffects:
- if (!HasFoldFailureDiagnostic)
- break;
- // We've already failed to fold something. Keep that diagnostic.
- [[fallthrough]];
- case EvaluationMode::ConstantExpression:
- case EvaluationMode::ConstantExpressionUnevaluated:
- setActiveDiagnostic(false);
- return true;
- }
- }
- return false;
- }
-
unsigned getCallStackDepth() override { return CallStackDepth; }
+ bool stepsLeft() const override { return StepsLeft > 0; }
public:
- /// Should we continue evaluation after encountering a side-effect that we
- /// couldn't model?
- bool keepEvaluatingAfterSideEffect() const override {
- switch (EvalMode) {
- case EvaluationMode::IgnoreSideEffects:
- return true;
-
- case EvaluationMode::ConstantExpression:
- case EvaluationMode::ConstantExpressionUnevaluated:
- case EvaluationMode::ConstantFold:
- // By default, assume any side effect might be valid in some other
- // evaluation of this expression from a different context.
- return checkingPotentialConstantExpression() ||
- checkingForUndefinedBehavior();
- }
- llvm_unreachable("Missed EvalMode case");
- }
-
- /// Note that we have had a side-effect, and determine whether we should
- /// keep evaluating.
- bool noteSideEffect() override {
- EvalStatus.HasSideEffects = true;
- return keepEvaluatingAfterSideEffect();
- }
-
- /// Should we continue evaluation after encountering undefined behavior?
- bool keepEvaluatingAfterUndefinedBehavior() {
- switch (EvalMode) {
- case EvaluationMode::IgnoreSideEffects:
- case EvaluationMode::ConstantFold:
- return true;
-
- case EvaluationMode::ConstantExpression:
- case EvaluationMode::ConstantExpressionUnevaluated:
- return checkingForUndefinedBehavior();
- }
- llvm_unreachable("Missed EvalMode case");
- }
-
- /// Note that we hit something that was technically undefined behavior, but
- /// that we can evaluate past it (such as signed overflow or floating-point
- /// division by zero.)
- bool noteUndefinedBehavior() override {
- EvalStatus.HasUndefinedBehavior = true;
- return keepEvaluatingAfterUndefinedBehavior();
- }
-
- /// Should we continue evaluation as much as possible after encountering a
- /// construct which can't be reduced to a value?
- bool keepEvaluatingAfterFailure() const override {
- uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit;
- if (Limit != 0 && !StepsLeft)
- return false;
-
- switch (EvalMode) {
- case EvaluationMode::ConstantExpression:
- case EvaluationMode::ConstantExpressionUnevaluated:
- case EvaluationMode::ConstantFold:
- case EvaluationMode::IgnoreSideEffects:
- return checkingPotentialConstantExpression() ||
- checkingForUndefinedBehavior();
- }
- llvm_unreachable("Missed EvalMode case");
- }
-
/// Notes that we failed to evaluate an expression that other expressions
/// directly depend on, and determine if we should keep evaluating. This
/// should only be called if we actually intend to keep evaluating.
@@ -1888,9 +1775,9 @@ static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
EvalInfo &Info);
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
-static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
- EvalInfo &Info,
- std::string *StringResult = nullptr);
+static std::optional<uint64_t>
+EvaluateBuiltinStrLen(const Expr *E, EvalInfo &Info,
+ std::string *StringResult = nullptr);
/// Evaluate an integer or fixed point expression into an APResult.
static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
@@ -1972,10 +1859,12 @@ APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) {
/// Produce a string describing the given constexpr call.
void CallStackFrame::describe(raw_ostream &Out) const {
- unsigned ArgIndex = 0;
- bool IsMemberCall =
- isa<CXXMethodDecl>(Callee) && !isa<CXXConstructorDecl>(Callee) &&
- cast<CXXMethodDecl>(Callee)->isImplicitObjectMemberFunction();
+ bool IsMemberCall = false;
+ bool ExplicitInstanceParam = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(Callee)) {
+ IsMemberCall = !isa<CXXConstructorDecl>(MD) && !MD->isStatic();
+ ExplicitInstanceParam = MD->isExplicitObjectMemberFunction();
+ }
if (!IsMemberCall)
Callee->getNameForDiagnostic(Out, Info.Ctx.getPrintingPolicy(),
@@ -2006,25 +1895,19 @@ void CallStackFrame::describe(raw_ostream &Out) const {
}
Callee->getNameForDiagnostic(Out, Info.Ctx.getPrintingPolicy(),
/*Qualified=*/false);
- IsMemberCall = false;
}
Out << '(';
- for (FunctionDecl::param_const_iterator I = Callee->param_begin(),
- E = Callee->param_end(); I != E; ++I, ++ArgIndex) {
- if (ArgIndex > (unsigned)IsMemberCall)
- Out << ", ";
-
- const ParmVarDecl *Param = *I;
- APValue *V = Info.getParamSlot(Arguments, Param);
+ llvm::ListSeparator Comma;
+ for (const ParmVarDecl *Param :
+ Callee->parameters().slice(ExplicitInstanceParam)) {
+ Out << Comma;
+ const APValue *V = Info.getParamSlot(Arguments, Param);
if (V)
V->printPretty(Out, Info.Ctx, Param->getType());
else
Out << "<...>";
-
- if (ArgIndex == 0 && IsMemberCall)
- Out << "->" << *Callee << '(';
}
Out << ')';
@@ -2406,16 +2289,17 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
return false;
// A dllimport variable never acts like a constant, unless we're
- // evaluating a value for use only in name mangling.
- if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>())
- // FIXME: Diagnostic!
+ // evaluating a value for use only in name mangling, and unless it's a
+ // static local. For the latter case, we'd still need to evaluate the
+ // constant expression in case we're inside a (inlined) function.
+ if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>() &&
+ !Var->isStaticLocal())
return false;
// In CUDA/HIP device compilation, only device side variables have
// constant addresses.
- if (Info.getASTContext().getLangOpts().CUDA &&
- Info.getASTContext().getLangOpts().CUDAIsDevice &&
- Info.getASTContext().CUDAConstantEvalCtx.NoWrongSidedVars) {
+ if (Info.getLangOpts().CUDA && Info.getLangOpts().CUDAIsDevice &&
+ Info.Ctx.CUDAConstantEvalCtx.NoWrongSidedVars) {
if ((!Var->hasAttr<CUDADeviceAttr>() &&
!Var->hasAttr<CUDAConstantAttr>() &&
!Var->getType()->isCUDADeviceBuiltinSurfaceType() &&
@@ -2768,7 +2652,7 @@ static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
/// So return "tonearest" mode instead of "dynamic".
static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E) {
llvm::RoundingMode RM =
- E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).getRoundingMode();
+ E->getFPFeaturesInEffect(Info.getLangOpts()).getRoundingMode();
if (RM == llvm::RoundingMode::Dynamic)
RM = llvm::RoundingMode::NearestTiesToEven;
return RM;
@@ -2782,7 +2666,7 @@ static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E,
if (Info.InConstantContext)
return true;
- FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ FPOptions FPO = E->getFPFeaturesInEffect(Info.getLangOpts());
if ((St & APFloat::opInexact) &&
FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) {
// Inexact result means that it depends on rounding mode. If the requested
@@ -3828,6 +3712,350 @@ static bool CheckArraySize(EvalInfo &Info, const ConstantArrayType *CAT,
/*Diag=*/true);
}
+static bool handleScalarCast(EvalInfo &Info, const FPOptions FPO, const Expr *E,
+ QualType SourceTy, QualType DestTy,
+ APValue const &Original, APValue &Result) {
+ // boolean must be checked before integer
+ // since IsIntegerType() is true for bool
+ if (SourceTy->isBooleanType()) {
+ if (DestTy->isBooleanType()) {
+ Result = Original;
+ return true;
+ }
+ if (DestTy->isIntegerType() || DestTy->isRealFloatingType()) {
+ bool BoolResult;
+ if (!HandleConversionToBool(Original, BoolResult))
+ return false;
+ uint64_t IntResult = BoolResult;
+ QualType IntType = DestTy->isIntegerType()
+ ? DestTy
+ : Info.Ctx.getIntTypeForBitwidth(64, false);
+ Result = APValue(Info.Ctx.MakeIntValue(IntResult, IntType));
+ }
+ if (DestTy->isRealFloatingType()) {
+ APValue Result2 = APValue(APFloat(0.0));
+ if (!HandleIntToFloatCast(Info, E, FPO,
+ Info.Ctx.getIntTypeForBitwidth(64, false),
+ Result.getInt(), DestTy, Result2.getFloat()))
+ return false;
+ Result = Result2;
+ }
+ return true;
+ }
+ if (SourceTy->isIntegerType()) {
+ if (DestTy->isRealFloatingType()) {
+ Result = APValue(APFloat(0.0));
+ return HandleIntToFloatCast(Info, E, FPO, SourceTy, Original.getInt(),
+ DestTy, Result.getFloat());
+ }
+ if (DestTy->isBooleanType()) {
+ bool BoolResult;
+ if (!HandleConversionToBool(Original, BoolResult))
+ return false;
+ uint64_t IntResult = BoolResult;
+ Result = APValue(Info.Ctx.MakeIntValue(IntResult, DestTy));
+ return true;
+ }
+ if (DestTy->isIntegerType()) {
+ Result = APValue(
+ HandleIntToIntCast(Info, E, DestTy, SourceTy, Original.getInt()));
+ return true;
+ }
+ } else if (SourceTy->isRealFloatingType()) {
+ if (DestTy->isRealFloatingType()) {
+ Result = Original;
+ return HandleFloatToFloatCast(Info, E, SourceTy, DestTy,
+ Result.getFloat());
+ }
+ if (DestTy->isBooleanType()) {
+ bool BoolResult;
+ if (!HandleConversionToBool(Original, BoolResult))
+ return false;
+ uint64_t IntResult = BoolResult;
+ Result = APValue(Info.Ctx.MakeIntValue(IntResult, DestTy));
+ return true;
+ }
+ if (DestTy->isIntegerType()) {
+ Result = APValue(APSInt());
+ return HandleFloatToIntCast(Info, E, SourceTy, Original.getFloat(),
+ DestTy, Result.getInt());
+ }
+ }
+
+ Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+}
+
+// do the heavy lifting for casting to aggregate types
+// because we have to deal with bitfields specially
+static bool constructAggregate(EvalInfo &Info, const FPOptions FPO,
+ const Expr *E, APValue &Result,
+ QualType ResultType,
+ SmallVectorImpl<APValue> &Elements,
+ SmallVectorImpl<QualType> &ElTypes) {
+
+ SmallVector<std::tuple<APValue *, QualType, unsigned>> WorkList = {
+ {&Result, ResultType, 0}};
+
+ unsigned ElI = 0;
+ while (!WorkList.empty() && ElI < Elements.size()) {
+ auto [Res, Type, BitWidth] = WorkList.pop_back_val();
+
+ if (Type->isRealFloatingType()) {
+ if (!handleScalarCast(Info, FPO, E, ElTypes[ElI], Type, Elements[ElI],
+ *Res))
+ return false;
+ ElI++;
+ continue;
+ }
+ if (Type->isIntegerType()) {
+ if (!handleScalarCast(Info, FPO, E, ElTypes[ElI], Type, Elements[ElI],
+ *Res))
+ return false;
+ if (BitWidth > 0) {
+ if (!Res->isInt())
+ return false;
+ APSInt &Int = Res->getInt();
+ unsigned OldBitWidth = Int.getBitWidth();
+ unsigned NewBitWidth = BitWidth;
+ if (NewBitWidth < OldBitWidth)
+ Int = Int.trunc(NewBitWidth).extend(OldBitWidth);
+ }
+ ElI++;
+ continue;
+ }
+ if (Type->isVectorType()) {
+ QualType ElTy = Type->castAs<VectorType>()->getElementType();
+ unsigned NumEl = Type->castAs<VectorType>()->getNumElements();
+ SmallVector<APValue> Vals(NumEl);
+ for (unsigned I = 0; I < NumEl; ++I) {
+ if (!handleScalarCast(Info, FPO, E, ElTypes[ElI], ElTy, Elements[ElI],
+ Vals[I]))
+ return false;
+ ElI++;
+ }
+ *Res = APValue(Vals.data(), NumEl);
+ continue;
+ }
+ if (Type->isConstantArrayType()) {
+ QualType ElTy = cast<ConstantArrayType>(Info.Ctx.getAsArrayType(Type))
+ ->getElementType();
+ uint64_t Size =
+ cast<ConstantArrayType>(Info.Ctx.getAsArrayType(Type))->getZExtSize();
+ *Res = APValue(APValue::UninitArray(), Size, Size);
+ for (int64_t I = Size - 1; I > -1; --I)
+ WorkList.emplace_back(&Res->getArrayInitializedElt(I), ElTy, 0u);
+ continue;
+ }
+ if (Type->isRecordType()) {
+ const RecordDecl *RD = Type->getAsRecordDecl();
+
+ unsigned NumBases = 0;
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ NumBases = CXXRD->getNumBases();
+
+ *Res = APValue(APValue::UninitStruct(), NumBases, RD->getNumFields());
+
+ SmallVector<std::tuple<APValue *, QualType, unsigned>> ReverseList;
+ // we need to traverse backwards
+ // Visit the base classes.
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (CXXRD->getNumBases() > 0) {
+ assert(CXXRD->getNumBases() == 1);
+ const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0];
+ ReverseList.emplace_back(&Res->getStructBase(0), BS.getType(), 0u);
+ }
+ }
+
+ // Visit the fields.
+ for (FieldDecl *FD : RD->fields()) {
+ unsigned FDBW = 0;
+ if (FD->isUnnamedBitField())
+ continue;
+ if (FD->isBitField()) {
+ FDBW = FD->getBitWidthValue();
+ }
+
+ ReverseList.emplace_back(&Res->getStructField(FD->getFieldIndex()),
+ FD->getType(), FDBW);
+ }
+
+ std::reverse(ReverseList.begin(), ReverseList.end());
+ llvm::append_range(WorkList, ReverseList);
+ continue;
+ }
+ Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ return true;
+}
+
+static bool handleElementwiseCast(EvalInfo &Info, const Expr *E,
+ const FPOptions FPO,
+ SmallVectorImpl<APValue> &Elements,
+ SmallVectorImpl<QualType> &SrcTypes,
+ SmallVectorImpl<QualType> &DestTypes,
+ SmallVectorImpl<APValue> &Results) {
+
+ assert((Elements.size() == SrcTypes.size()) &&
+ (Elements.size() == DestTypes.size()));
+
+ for (unsigned I = 0, ESz = Elements.size(); I < ESz; ++I) {
+ APValue Original = Elements[I];
+ QualType SourceTy = SrcTypes[I];
+ QualType DestTy = DestTypes[I];
+
+ if (!handleScalarCast(Info, FPO, E, SourceTy, DestTy, Original, Results[I]))
+ return false;
+ }
+ return true;
+}
+
+static unsigned elementwiseSize(EvalInfo &Info, QualType BaseTy) {
+
+ SmallVector<QualType> WorkList = {BaseTy};
+
+ unsigned Size = 0;
+ while (!WorkList.empty()) {
+ QualType Type = WorkList.pop_back_val();
+ if (Type->isRealFloatingType() || Type->isIntegerType() ||
+ Type->isBooleanType()) {
+ ++Size;
+ continue;
+ }
+ if (Type->isVectorType()) {
+ unsigned NumEl = Type->castAs<VectorType>()->getNumElements();
+ Size += NumEl;
+ continue;
+ }
+ if (Type->isConstantArrayType()) {
+ QualType ElTy = cast<ConstantArrayType>(Info.Ctx.getAsArrayType(Type))
+ ->getElementType();
+ uint64_t ArrSize =
+ cast<ConstantArrayType>(Info.Ctx.getAsArrayType(Type))->getZExtSize();
+ for (uint64_t I = 0; I < ArrSize; ++I) {
+ WorkList.push_back(ElTy);
+ }
+ continue;
+ }
+ if (Type->isRecordType()) {
+ const RecordDecl *RD = Type->getAsRecordDecl();
+
+ // Visit the base classes.
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (CXXRD->getNumBases() > 0) {
+ assert(CXXRD->getNumBases() == 1);
+ const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0];
+ WorkList.push_back(BS.getType());
+ }
+ }
+
+ // visit the fields.
+ for (FieldDecl *FD : RD->fields()) {
+ if (FD->isUnnamedBitField())
+ continue;
+ WorkList.push_back(FD->getType());
+ }
+ continue;
+ }
+ }
+ return Size;
+}
+
+static bool hlslAggSplatHelper(EvalInfo &Info, const Expr *E, APValue &SrcVal,
+ QualType &SrcTy) {
+ SrcTy = E->getType();
+
+ if (!Evaluate(SrcVal, Info, E))
+ return false;
+
+ assert((SrcVal.isFloat() || SrcVal.isInt() ||
+ (SrcVal.isVector() && SrcVal.getVectorLength() == 1)) &&
+ "Not a valid HLSLAggregateSplatCast.");
+
+ if (SrcVal.isVector()) {
+ assert(SrcTy->isVectorType() && "Type mismatch.");
+ SrcTy = SrcTy->castAs<VectorType>()->getElementType();
+ SrcVal = SrcVal.getVectorElt(0);
+ }
+ return true;
+}
+
+static bool flattenAPValue(EvalInfo &Info, const Expr *E, APValue Value,
+ QualType BaseTy, SmallVectorImpl<APValue> &Elements,
+ SmallVectorImpl<QualType> &Types, unsigned Size) {
+
+ SmallVector<std::pair<APValue, QualType>> WorkList = {{Value, BaseTy}};
+ unsigned Populated = 0;
+ while (!WorkList.empty() && Populated < Size) {
+ auto [Work, Type] = WorkList.pop_back_val();
+
+ if (Work.isFloat() || Work.isInt()) {
+ Elements.push_back(Work);
+ Types.push_back(Type);
+ Populated++;
+ continue;
+ }
+ if (Work.isVector()) {
+ assert(Type->isVectorType() && "Type mismatch.");
+ QualType ElTy = Type->castAs<VectorType>()->getElementType();
+ for (unsigned I = 0; I < Work.getVectorLength() && Populated < Size;
+ I++) {
+ Elements.push_back(Work.getVectorElt(I));
+ Types.push_back(ElTy);
+ Populated++;
+ }
+ continue;
+ }
+ if (Work.isArray()) {
+ assert(Type->isConstantArrayType() && "Type mismatch.");
+ QualType ElTy = cast<ConstantArrayType>(Info.Ctx.getAsArrayType(Type))
+ ->getElementType();
+ for (int64_t I = Work.getArraySize() - 1; I > -1; --I) {
+ WorkList.emplace_back(Work.getArrayInitializedElt(I), ElTy);
+ }
+ continue;
+ }
+
+ if (Work.isStruct()) {
+ assert(Type->isRecordType() && "Type mismatch.");
+
+ const RecordDecl *RD = Type->getAsRecordDecl();
+
+ SmallVector<std::pair<APValue, QualType>> ReverseList;
+ // Visit the fields.
+ for (FieldDecl *FD : RD->fields()) {
+ if (FD->isUnnamedBitField())
+ continue;
+ ReverseList.emplace_back(Work.getStructField(FD->getFieldIndex()),
+ FD->getType());
+ }
+
+ std::reverse(ReverseList.begin(), ReverseList.end());
+ llvm::append_range(WorkList, ReverseList);
+
+ // Visit the base classes.
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (CXXRD->getNumBases() > 0) {
+ assert(CXXRD->getNumBases() == 1);
+ const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0];
+ const APValue &Base = Work.getStructBase(0);
+
+ // Can happen in error cases.
+ if (!Base.isStruct())
+ return false;
+
+ WorkList.emplace_back(Base, BS.getType());
+ }
+ }
+ continue;
+ }
+ Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ return true;
+}
+
namespace {
/// A handle to a complete object (an object that is not a subobject of
/// another object).
@@ -4288,8 +4516,8 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
std::tie(Frame, Depth) =
Info.getCallFrameAndDepth(LVal.getLValueCallIndex());
if (!Frame) {
- Info.FFDiag(E, diag::note_constexpr_lifetime_ended, 1)
- << AK << LVal.Base.is<const ValueDecl*>();
+ Info.FFDiag(E, diag::note_constexpr_access_uninit, 1)
+ << AK << /*Indeterminate=*/false << E->getSourceRange();
NoteLValueLocation(Info, LVal.Base);
return CompleteObject();
}
@@ -4638,6 +4866,30 @@ handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type,
return Obj && extractSubobject(Info, Conv, Obj, LVal.Designator, RVal, AK);
}
+static bool hlslElementwiseCastHelper(EvalInfo &Info, const Expr *E,
+ QualType DestTy,
+ SmallVectorImpl<APValue> &SrcVals,
+ SmallVectorImpl<QualType> &SrcTypes) {
+ APValue Val;
+ if (!Evaluate(Val, Info, E))
+ return false;
+
+ // must be dealing with a record
+ if (Val.isLValue()) {
+ LValue LVal;
+ LVal.setFrom(Info.Ctx, Val);
+ if (!handleLValueToRValueConversion(Info, E, E->getType(), LVal, Val))
+ return false;
+ }
+
+ unsigned NEls = elementwiseSize(Info, DestTy);
+ // flatten the source
+ if (!flattenAPValue(Info, E, Val, E->getType(), SrcVals, SrcTypes, NEls))
+ return false;
+
+ return true;
+}
+
/// Perform an assignment of Val to LVal. Takes ownership of Val.
static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
QualType LValType, APValue &Val) {
@@ -5159,8 +5411,8 @@ static bool handleDefaultInitValue(QualType T, APValue &Result) {
Result = APValue((const FieldDecl *)nullptr);
return true;
}
- Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
- std::distance(RD->field_begin(), RD->field_end()));
+ Result =
+ APValue(APValue::UninitStruct(), RD->getNumBases(), RD->getNumFields());
unsigned Index = 0;
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
@@ -5451,10 +5703,13 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
}
const CaseStmt *CS = cast<CaseStmt>(SC);
- APSInt LHS = CS->getLHS()->EvaluateKnownConstInt(Info.Ctx);
- APSInt RHS = CS->getRHS() ? CS->getRHS()->EvaluateKnownConstInt(Info.Ctx)
- : LHS;
- if (LHS <= Value && Value <= RHS) {
+ const Expr *LHS = CS->getLHS();
+ const Expr *RHS = CS->getRHS();
+ if (LHS->isValueDependent() || (RHS && RHS->isValueDependent()))
+ return ESR_Failed;
+ APSInt LHSValue = LHS->EvaluateKnownConstInt(Info.Ctx);
+ APSInt RHSValue = RHS ? RHS->EvaluateKnownConstInt(Info.Ctx) : LHSValue;
+ if (LHSValue <= Value && Value <= RHSValue) {
Found = SC;
break;
}
@@ -5956,7 +6211,7 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
*Info.CurrentCall, hasSpecificAttr<MSConstexprAttr>(AS->getAttrs()) &&
isa<ReturnStmt>(SS));
- auto LO = Info.getASTContext().getLangOpts();
+ auto LO = Info.Ctx.getLangOpts();
if (LO.CXXAssumptions && !LO.MSVCCompat) {
for (auto *Attr : AS->getAttrs()) {
auto *AA = dyn_cast<CXXAssumeAttr>(Attr);
@@ -5967,7 +6222,7 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
if (Assumption->isValueDependent())
return ESR_Failed;
- if (Assumption->HasSideEffects(Info.getASTContext()))
+ if (Assumption->HasSideEffects(Info.Ctx))
continue;
bool Value;
@@ -6811,7 +7066,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
if (!Result.hasValue()) {
if (!RD->isUnion())
Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
- std::distance(RD->field_begin(), RD->field_end()));
+ RD->getNumFields());
else
// A union starts with no active member.
Result = APValue((const FieldDecl*)nullptr);
@@ -7762,8 +8017,7 @@ class BufferToAPValueConverter {
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
NumBases = CXXRD->getNumBases();
- APValue ResultVal(APValue::UninitStruct(), NumBases,
- std::distance(RD->field_begin(), RD->field_end()));
+ APValue ResultVal(APValue::UninitStruct(), NumBases, RD->getNumFields());
// Visit the base classes.
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
@@ -8666,6 +8920,25 @@ public:
case CK_UserDefinedConversion:
return StmtVisitorTy::Visit(E->getSubExpr());
+ case CK_HLSLArrayRValue: {
+ const Expr *SubExpr = E->getSubExpr();
+ if (!SubExpr->isGLValue()) {
+ APValue Val;
+ if (!Evaluate(Val, Info, SubExpr))
+ return false;
+ return DerivedSuccess(Val, E);
+ }
+
+ LValue LVal;
+ if (!EvaluateLValue(SubExpr, LVal, Info))
+ return false;
+ APValue RVal;
+ // Note, we use the subexpression's type in order to retain cv-qualifiers.
+ if (!handleLValueToRValueConversion(Info, E, SubExpr->getType(), LVal,
+ RVal))
+ return false;
+ return DerivedSuccess(RVal, E);
+ }
case CK_LValueToRValue: {
LValue LVal;
if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
@@ -8937,8 +9210,8 @@ public:
bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
bool VisitMemberExpr(const MemberExpr *E);
bool VisitStringLiteral(const StringLiteral *E) {
- return Success(APValue::LValueBase(
- E, 0, Info.getASTContext().getNextStringLiteralVersion()));
+ return Success(
+ APValue::LValueBase(E, 0, Info.Ctx.getNextStringLiteralVersion()));
}
bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(E); }
bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
@@ -10754,7 +11027,7 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
assert(!RD->isUnion() && "Expected non-union class type");
const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
- std::distance(RD->field_begin(), RD->field_end()));
+ RD->getNumFields());
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
@@ -10850,6 +11123,42 @@ bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
Result = *Value;
return true;
}
+ case CK_HLSLAggregateSplatCast: {
+ APValue Val;
+ QualType ValTy;
+
+ if (!hlslAggSplatHelper(Info, E->getSubExpr(), Val, ValTy))
+ return false;
+
+ unsigned NEls = elementwiseSize(Info, E->getType());
+ // splat our Val
+ SmallVector<APValue> SplatEls(NEls, Val);
+ SmallVector<QualType> SplatType(NEls, ValTy);
+
+ // cast the elements and construct our struct result
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ if (!constructAggregate(Info, FPO, E, Result, E->getType(), SplatEls,
+ SplatType))
+ return false;
+
+ return true;
+ }
+ case CK_HLSLElementwiseCast: {
+ SmallVector<APValue> SrcEls;
+ SmallVector<QualType> SrcTypes;
+
+ if (!hlslElementwiseCastHelper(Info, E->getSubExpr(), E->getType(), SrcEls,
+ SrcTypes))
+ return false;
+
+ // cast the elements and construct our struct result
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ if (!constructAggregate(Info, FPO, E, Result, E->getType(), SrcEls,
+ SrcTypes))
+ return false;
+
+ return true;
+ }
}
}
@@ -10914,7 +11223,7 @@ bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
if (!Result.hasValue())
Result = APValue(APValue::UninitStruct(), CXXRD ? CXXRD->getNumBases() : 0,
- std::distance(RD->field_begin(), RD->field_end()));
+ RD->getNumFields());
unsigned ElementNo = 0;
bool Success = true;
@@ -11121,8 +11430,7 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
if (ClosureClass->isInvalidDecl())
return false;
- const size_t NumFields =
- std::distance(ClosureClass->field_begin(), ClosureClass->field_end());
+ const size_t NumFields = ClosureClass->getNumFields();
assert(NumFields == (size_t)std::distance(E->capture_init_begin(),
E->capture_init_end()) &&
@@ -11345,6 +11653,42 @@ bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
Elements.push_back(Val.getVectorElt(I));
return Success(Elements, E);
}
+ case CK_HLSLMatrixTruncation: {
+ // TODO: See #168935. Add matrix truncation support to expr constant.
+ return Error(E);
+ }
+ case CK_HLSLAggregateSplatCast: {
+ APValue Val;
+ QualType ValTy;
+
+ if (!hlslAggSplatHelper(Info, SE, Val, ValTy))
+ return false;
+
+ // cast our Val once.
+ APValue Result;
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ if (!handleScalarCast(Info, FPO, E, ValTy, VTy->getElementType(), Val,
+ Result))
+ return false;
+
+ SmallVector<APValue, 4> SplatEls(NElts, Result);
+ return Success(SplatEls, E);
+ }
+ case CK_HLSLElementwiseCast: {
+ SmallVector<APValue> SrcVals;
+ SmallVector<QualType> SrcTypes;
+
+ if (!hlslElementwiseCastHelper(Info, SE, E->getType(), SrcVals, SrcTypes))
+ return false;
+
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ SmallVector<QualType, 4> DestTypes(NElts, VTy->getElementType());
+ SmallVector<APValue, 4> ResultEls(NElts);
+ if (!handleElementwiseCast(Info, E, FPO, SrcVals, SrcTypes, DestTypes,
+ ResultEls))
+ return false;
+ return Success(ResultEls, E);
+ }
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
}
@@ -11618,95 +11962,166 @@ static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result,
return true;
}
-static bool evalPshufbBuiltin(EvalInfo &Info, const CallExpr *Call,
- APValue &Out) {
- APValue SrcVec, ControlVec;
- if (!EvaluateAsRValue(Info, Call->getArg(0), SrcVec))
- return false;
- if (!EvaluateAsRValue(Info, Call->getArg(1), ControlVec))
- return false;
+static bool evalShuffleGeneric(
+ EvalInfo &Info, const CallExpr *Call, APValue &Out,
+ llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
+ GetSourceIndex) {
const auto *VT = Call->getType()->getAs<VectorType>();
if (!VT)
return false;
- QualType ElemT = VT->getElementType();
- unsigned NumElts = VT->getNumElements();
+ unsigned ShuffleMask = 0;
+ APValue A, MaskVector, B;
+ bool IsVectorMask = false;
+ bool IsSingleOperand = (Call->getNumArgs() == 2);
+
+ if (IsSingleOperand) {
+ QualType MaskType = Call->getArg(1)->getType();
+ if (MaskType->isVectorType()) {
+ IsVectorMask = true;
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), MaskVector))
+ return false;
+ B = A;
+ } else if (MaskType->isIntegerType()) {
+ APSInt MaskImm;
+ if (!EvaluateInteger(Call->getArg(1), MaskImm, Info))
+ return false;
+ ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A))
+ return false;
+ B = A;
+ } else {
+ return false;
+ }
+ } else {
+ QualType Arg2Type = Call->getArg(2)->getType();
+ if (Arg2Type->isVectorType()) {
+ IsVectorMask = true;
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), MaskVector) ||
+ !EvaluateAsRValue(Info, Call->getArg(2), B))
+ return false;
+ } else if (Arg2Type->isIntegerType()) {
+ APSInt MaskImm;
+ if (!EvaluateInteger(Call->getArg(2), MaskImm, Info))
+ return false;
+ ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
+ if (!EvaluateAsRValue(Info, Call->getArg(0), A) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), B))
+ return false;
+ } else {
+ return false;
+ }
+ }
+ unsigned NumElts = VT->getNumElements();
SmallVector<APValue, 64> ResultElements;
ResultElements.reserve(NumElts);
- for (unsigned Idx = 0; Idx != NumElts; ++Idx) {
- APValue CtlVal = ControlVec.getVectorElt(Idx);
- APSInt CtlByte = CtlVal.getInt();
- uint8_t Ctl = static_cast<uint8_t>(CtlByte.getZExtValue());
+ for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) {
+ if (IsVectorMask) {
+ ShuffleMask = static_cast<unsigned>(
+ MaskVector.getVectorElt(DstIdx).getInt().getZExtValue());
+ }
+ auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
- if (Ctl & 0x80) {
- APValue Zero(Info.Ctx.MakeIntValue(0, ElemT));
- ResultElements.push_back(Zero);
+ if (SrcIdx < 0) {
+ // Zero out this element
+ QualType ElemTy = VT->getElementType();
+ if (ElemTy->isRealFloatingType()) {
+ ResultElements.push_back(
+ APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy))));
+ } else if (ElemTy->isIntegerType()) {
+ APValue Zero(Info.Ctx.MakeIntValue(0, ElemTy));
+ ResultElements.push_back(APValue(Zero));
+ } else {
+ // Other types of fallback logic
+ ResultElements.push_back(APValue());
+ }
} else {
- unsigned LaneBase = (Idx / 16) * 16;
- unsigned SrcOffset = Ctl & 0x0F;
- unsigned SrcIdx = LaneBase + SrcOffset;
-
- ResultElements.push_back(SrcVec.getVectorElt(SrcIdx));
+ const APValue &Src = (SrcVecIdx == 0) ? A : B;
+ ResultElements.push_back(Src.getVectorElt(SrcIdx));
}
}
+
Out = APValue(ResultElements.data(), ResultElements.size());
return true;
}
+static bool ConvertDoubleToFloatStrict(EvalInfo &Info, const Expr *E,
+ APFloat OrigVal, APValue &Result) {
-static bool evalPshufBuiltin(EvalInfo &Info, const CallExpr *Call,
- bool IsShufHW, APValue &Out) {
- APValue Vec;
- APSInt Imm;
- if (!EvaluateAsRValue(Info, Call->getArg(0), Vec))
- return false;
- if (!EvaluateInteger(Call->getArg(1), Imm, Info))
+ if (OrigVal.isInfinity()) {
+ Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << 0;
return false;
-
- const auto *VT = Call->getType()->getAs<VectorType>();
- if (!VT)
+ }
+ if (OrigVal.isNaN()) {
+ Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << 1;
return false;
+ }
- QualType ElemT = VT->getElementType();
- unsigned ElemBits = Info.Ctx.getTypeSize(ElemT);
- unsigned NumElts = VT->getNumElements();
+ APFloat Val = OrigVal;
+ bool LosesInfo = false;
+ APFloat::opStatus Status = Val.convert(
+ APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &LosesInfo);
- unsigned LaneBits = 128u;
- unsigned LaneElts = LaneBits / ElemBits;
- if (!LaneElts || (NumElts % LaneElts) != 0)
+ if (LosesInfo || Val.isDenormal()) {
+ Info.CCEDiag(E, diag::note_constexpr_float_arithmetic_strict);
return false;
+ }
- uint8_t Ctl = static_cast<uint8_t>(Imm.getZExtValue());
+ if (Status != APFloat::opOK) {
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
- SmallVector<APValue, 32> ResultElements;
- ResultElements.reserve(NumElts);
+ Result = APValue(Val);
+ return true;
+}
+static bool evalShiftWithCount(
+ EvalInfo &Info, const CallExpr *Call, APValue &Out,
+ llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp,
+ llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) {
- for (unsigned Idx = 0; Idx != NumElts; Idx++) {
- unsigned LaneBase = (Idx / LaneElts) * LaneElts;
- unsigned LaneIdx = Idx % LaneElts;
- unsigned SrcIdx = Idx;
- unsigned Sel = (Ctl >> (2 * LaneIdx)) & 0x3;
+ APValue Source, Count;
+ if (!EvaluateAsRValue(Info, Call->getArg(0), Source) ||
+ !EvaluateAsRValue(Info, Call->getArg(1), Count))
+ return false;
- if (ElemBits == 32) {
- SrcIdx = LaneBase + Sel;
+ assert(Call->getNumArgs() == 2);
+
+ QualType SourceTy = Call->getArg(0)->getType();
+ assert(SourceTy->isVectorType() &&
+ Call->getArg(1)->getType()->isVectorType());
+
+ QualType DestEltTy = SourceTy->castAs<VectorType>()->getElementType();
+ unsigned DestEltWidth = Source.getVectorElt(0).getInt().getBitWidth();
+ unsigned DestLen = Source.getVectorLength();
+ bool IsDestUnsigned = DestEltTy->isUnsignedIntegerType();
+ unsigned CountEltWidth = Count.getVectorElt(0).getInt().getBitWidth();
+ unsigned NumBitsInQWord = 64;
+ unsigned NumCountElts = NumBitsInQWord / CountEltWidth;
+ SmallVector<APValue, 64> Result;
+ Result.reserve(DestLen);
+
+ uint64_t CountLQWord = 0;
+ for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) {
+ uint64_t Elt = Count.getVectorElt(EltIdx).getInt().getZExtValue();
+ CountLQWord |= (Elt << (EltIdx * CountEltWidth));
+ }
+
+ for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) {
+ APInt Elt = Source.getVectorElt(EltIdx).getInt();
+ if (CountLQWord < DestEltWidth) {
+ Result.push_back(
+ APValue(APSInt(ShiftOp(Elt, CountLQWord), IsDestUnsigned)));
} else {
- constexpr unsigned HalfSize = 4;
- bool InHigh = LaneIdx >= HalfSize;
- if (!IsShufHW && !InHigh) {
- SrcIdx = LaneBase + Sel;
- } else if (IsShufHW && InHigh) {
- unsigned Rel = LaneIdx - HalfSize;
- Sel = (Ctl >> (2 * Rel)) & 0x3;
- SrcIdx = LaneBase + HalfSize + Sel;
- }
+ Result.push_back(
+ APValue(APSInt(OverflowOp(Elt, DestEltWidth), IsDestUnsigned)));
}
-
- ResultElements.push_back(Vec.getVectorElt(SrcIdx));
}
-
- Out = APValue(ResultElements.data(), ResultElements.size());
+ Out = APValue(Result.data(), Result.size());
return true;
}
@@ -11746,6 +12161,60 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), SourceLen), E);
};
+ auto EvaluateFpBinOpExpr =
+ [&](llvm::function_ref<std::optional<APFloat>(
+ const APFloat &, const APFloat &, std::optional<APSInt>)>
+ Fn) {
+ assert(E->getNumArgs() == 2 || E->getNumArgs() == 3);
+ APValue A, B;
+ if (!EvaluateAsRValue(Info, E->getArg(0), A) ||
+ !EvaluateAsRValue(Info, E->getArg(1), B))
+ return false;
+
+ assert(A.isVector() && B.isVector());
+ assert(A.getVectorLength() == B.getVectorLength());
+
+ std::optional<APSInt> RoundingMode;
+ if (E->getNumArgs() == 3) {
+ APSInt Imm;
+ if (!EvaluateInteger(E->getArg(2), Imm, Info))
+ return false;
+ RoundingMode = Imm;
+ }
+
+ unsigned NumElems = A.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(NumElems);
+
+ for (unsigned EltNum = 0; EltNum < NumElems; ++EltNum) {
+ const APFloat &EltA = A.getVectorElt(EltNum).getFloat();
+ const APFloat &EltB = B.getVectorElt(EltNum).getFloat();
+ std::optional<APFloat> Result = Fn(EltA, EltB, RoundingMode);
+ if (!Result)
+ return false;
+ ResultElements.push_back(APValue(*Result));
+ }
+ return Success(APValue(ResultElements.data(), NumElems), E);
+ };
+
+ auto EvalSelectScalar = [&](unsigned Len) -> bool {
+ APSInt Mask;
+ APValue AVal, WVal;
+ if (!EvaluateInteger(E->getArg(0), Mask, Info) ||
+ !EvaluateAsRValue(Info, E->getArg(1), AVal) ||
+ !EvaluateAsRValue(Info, E->getArg(2), WVal))
+ return false;
+
+ bool TakeA0 = (Mask.getZExtValue() & 1u) != 0;
+ SmallVector<APValue, 4> Res;
+ Res.reserve(Len);
+ Res.push_back(TakeA0 ? AVal.getVectorElt(0) : WVal.getVectorElt(0));
+ for (unsigned I = 1; I < Len; ++I)
+ Res.push_back(WVal.getVectorElt(I));
+ APValue V(Res.data(), Res.size());
+ return Success(V, E);
+ };
+
switch (E->getBuiltinCallee()) {
default:
return false;
@@ -11811,6 +12280,108 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256: {
+ APValue SourceVec, SourceImm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceVec) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceImm))
+ return false;
+
+ if (!SourceVec.isVector())
+ return false;
+
+ const auto *RetVT = E->getType()->castAs<VectorType>();
+ unsigned RetLen = RetVT->getNumElements();
+ unsigned Idx = SourceImm.getInt().getZExtValue() & 1;
+
+ SmallVector<APValue, 32> ResultElements;
+ ResultElements.reserve(RetLen);
+
+ for (unsigned I = 0; I < RetLen; I++)
+ ResultElements.push_back(SourceVec.getVectorElt(Idx * RetLen + I));
+
+ return Success(APValue(ResultElements.data(), RetLen), E);
+ }
+
+ case clang::X86::BI__builtin_ia32_cvtmask2b128:
+ case clang::X86::BI__builtin_ia32_cvtmask2b256:
+ case clang::X86::BI__builtin_ia32_cvtmask2b512:
+ case clang::X86::BI__builtin_ia32_cvtmask2w128:
+ case clang::X86::BI__builtin_ia32_cvtmask2w256:
+ case clang::X86::BI__builtin_ia32_cvtmask2w512:
+ case clang::X86::BI__builtin_ia32_cvtmask2d128:
+ case clang::X86::BI__builtin_ia32_cvtmask2d256:
+ case clang::X86::BI__builtin_ia32_cvtmask2d512:
+ case clang::X86::BI__builtin_ia32_cvtmask2q128:
+ case clang::X86::BI__builtin_ia32_cvtmask2q256:
+ case clang::X86::BI__builtin_ia32_cvtmask2q512: {
+ assert(E->getNumArgs() == 1);
+ APSInt Mask;
+ if (!EvaluateInteger(E->getArg(0), Mask, Info))
+ return false;
+
+ QualType VecTy = E->getType();
+ const VectorType *VT = VecTy->castAs<VectorType>();
+ unsigned VectorLen = VT->getNumElements();
+ QualType ElemTy = VT->getElementType();
+ unsigned ElemWidth = Info.Ctx.getTypeSize(ElemTy);
+
+ SmallVector<APValue, 16> Elems;
+ for (unsigned I = 0; I != VectorLen; ++I) {
+ bool BitSet = Mask[I];
+ APSInt ElemVal(ElemWidth, /*isUnsigned=*/false);
+ if (BitSet) {
+ ElemVal.setAllBits();
+ }
+ Elems.push_back(APValue(ElemVal));
+ }
+ return Success(APValue(Elems.data(), VectorLen), E);
+ }
+
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extractf64x4_mask: {
+ APValue SourceVec, MergeVec;
+ APSInt Imm, MaskImm;
+
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceVec) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info) ||
+ !EvaluateAsRValue(Info, E->getArg(2), MergeVec) ||
+ !EvaluateInteger(E->getArg(3), MaskImm, Info))
+ return false;
+
+ const auto *RetVT = E->getType()->castAs<VectorType>();
+ unsigned RetLen = RetVT->getNumElements();
+
+ if (!SourceVec.isVector() || !MergeVec.isVector())
+ return false;
+ unsigned SrcLen = SourceVec.getVectorLength();
+ unsigned Lanes = SrcLen / RetLen;
+ unsigned Lane = static_cast<unsigned>(Imm.getZExtValue() % Lanes);
+ unsigned Base = Lane * RetLen;
+
+ SmallVector<APValue, 32> ResultElements;
+ ResultElements.reserve(RetLen);
+ for (unsigned I = 0; I < RetLen; ++I) {
+ if (MaskImm[I])
+ ResultElements.push_back(SourceVec.getVectorElt(Base + I));
+ else
+ ResultElements.push_back(MergeVec.getVectorElt(I));
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
@@ -11819,6 +12390,14 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case clang::X86::BI__builtin_ia32_pavgw512:
return EvaluateBinOpExpr(llvm::APIntOps::avgCeilU);
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
case clang::X86::BI__builtin_ia32_pmaddubsw128:
case clang::X86::BI__builtin_ia32_pmaddubsw256:
case clang::X86::BI__builtin_ia32_pmaddubsw512:
@@ -11967,13 +12546,15 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case X86::BI__builtin_ia32_packuswb256:
case X86::BI__builtin_ia32_packuswb512:
return evalPackBuiltin(E, Info, Result, [](const APSInt &Src) {
- unsigned DstBits = Src.getBitWidth() / 2;
- if (Src.isNegative())
- return APInt::getZero(DstBits);
- if (Src.isIntN(DstBits))
- return APInt((Src).trunc(DstBits));
- return APInt::getAllOnes(DstBits);
+ return APSInt(Src).truncSSatU(Src.getBitWidth() / 2);
});
+ case clang::X86::BI__builtin_ia32_selectss_128:
+ return EvalSelectScalar(4);
+ case clang::X86::BI__builtin_ia32_selectsd_128:
+ return EvalSelectScalar(2);
+ case clang::X86::BI__builtin_ia32_selectsh_128:
+ case clang::X86::BI__builtin_ia32_selectsbf_128:
+ return EvalSelectScalar(8);
case clang::X86::BI__builtin_ia32_pmuldq128:
case clang::X86::BI__builtin_ia32_pmuldq256:
case clang::X86::BI__builtin_ia32_pmuldq512:
@@ -12237,6 +12818,20 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+ case X86::BI__builtin_ia32_psignb128:
+ case X86::BI__builtin_ia32_psignb256:
+ case X86::BI__builtin_ia32_psignw128:
+ case X86::BI__builtin_ia32_psignw256:
+ case X86::BI__builtin_ia32_psignd128:
+ case X86::BI__builtin_ia32_psignd256:
+ return EvaluateBinOpExpr([](const APInt &AElem, const APInt &BElem) {
+ if (BElem.isZero())
+ return APInt::getZero(AElem.getBitWidth());
+ if (BElem.isNegative())
+ return -AElem;
+ return AElem;
+ });
+
case X86::BI__builtin_ia32_blendvpd:
case X86::BI__builtin_ia32_blendvpd256:
case X86::BI__builtin_ia32_blendvps:
@@ -12309,11 +12904,210 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+ case X86::BI__builtin_ia32_cvtsd2ss: {
+ APValue VecA, VecB;
+ if (!EvaluateAsRValue(Info, E->getArg(0), VecA) ||
+ !EvaluateAsRValue(Info, E->getArg(1), VecB))
+ return false;
+
+ SmallVector<APValue, 4> Elements;
+
+ APValue ResultVal;
+ if (!ConvertDoubleToFloatStrict(Info, E, VecB.getVectorElt(0).getFloat(),
+ ResultVal))
+ return false;
+
+ Elements.push_back(ResultVal);
+
+ unsigned NumEltsA = VecA.getVectorLength();
+ for (unsigned I = 1; I < NumEltsA; ++I) {
+ Elements.push_back(VecA.getVectorElt(I));
+ }
+
+ return Success(Elements, E);
+ }
+ case X86::BI__builtin_ia32_cvtsd2ss_round_mask: {
+ APValue VecA, VecB, VecSrc, MaskValue;
+
+ if (!EvaluateAsRValue(Info, E->getArg(0), VecA) ||
+ !EvaluateAsRValue(Info, E->getArg(1), VecB) ||
+ !EvaluateAsRValue(Info, E->getArg(2), VecSrc) ||
+ !EvaluateAsRValue(Info, E->getArg(3), MaskValue))
+ return false;
+
+ unsigned Mask = MaskValue.getInt().getZExtValue();
+ SmallVector<APValue, 4> Elements;
+
+ if (Mask & 1) {
+ APValue ResultVal;
+ if (!ConvertDoubleToFloatStrict(Info, E, VecB.getVectorElt(0).getFloat(),
+ ResultVal))
+ return false;
+ Elements.push_back(ResultVal);
+ } else {
+ Elements.push_back(VecSrc.getVectorElt(0));
+ }
+
+ unsigned NumEltsA = VecA.getVectorLength();
+ for (unsigned I = 1; I < NumEltsA; ++I) {
+ Elements.push_back(VecA.getVectorElt(I));
+ }
+
+ return Success(Elements, E);
+ }
+ case X86::BI__builtin_ia32_cvtpd2ps:
+ case X86::BI__builtin_ia32_cvtpd2ps256:
+ case X86::BI__builtin_ia32_cvtpd2ps_mask:
+ case X86::BI__builtin_ia32_cvtpd2ps512_mask: {
+
+ const auto BuiltinID = E->getBuiltinCallee();
+ bool IsMasked = (BuiltinID == X86::BI__builtin_ia32_cvtpd2ps_mask ||
+ BuiltinID == X86::BI__builtin_ia32_cvtpd2ps512_mask);
+
+ APValue InputValue;
+ if (!EvaluateAsRValue(Info, E->getArg(0), InputValue))
+ return false;
+
+ APValue MergeValue;
+ unsigned Mask = 0xFFFFFFFF;
+ bool NeedsMerge = false;
+ if (IsMasked) {
+ APValue MaskValue;
+ if (!EvaluateAsRValue(Info, E->getArg(2), MaskValue))
+ return false;
+ Mask = MaskValue.getInt().getZExtValue();
+ auto NumEltsResult = E->getType()->getAs<VectorType>()->getNumElements();
+ for (unsigned I = 0; I < NumEltsResult; ++I) {
+ if (!((Mask >> I) & 1)) {
+ NeedsMerge = true;
+ break;
+ }
+ }
+ if (NeedsMerge) {
+ if (!EvaluateAsRValue(Info, E->getArg(1), MergeValue))
+ return false;
+ }
+ }
+
+ unsigned NumEltsResult =
+ E->getType()->getAs<VectorType>()->getNumElements();
+ unsigned NumEltsInput = InputValue.getVectorLength();
+ SmallVector<APValue, 8> Elements;
+ for (unsigned I = 0; I < NumEltsResult; ++I) {
+ if (IsMasked && !((Mask >> I) & 1)) {
+ if (!NeedsMerge) {
+ return false;
+ }
+ Elements.push_back(MergeValue.getVectorElt(I));
+ continue;
+ }
+
+ if (I >= NumEltsInput) {
+ Elements.push_back(APValue(APFloat::getZero(APFloat::IEEEsingle())));
+ continue;
+ }
+
+ APValue ResultVal;
+ if (!ConvertDoubleToFloatStrict(
+ Info, E, InputValue.getVectorElt(I).getFloat(), ResultVal))
+ return false;
+
+ Elements.push_back(ResultVal);
+ }
+ return Success(Elements, E);
+ }
+
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, int> {
+ constexpr unsigned LaneBits = 128u;
+ unsigned NumElemPerLane = LaneBits / 32;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 2;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return {SrcIdx, static_cast<int>(LaneOffset + Index)};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, int> {
+ constexpr unsigned LaneBits = 128u;
+ unsigned NumElemPerLane = LaneBits / 64;
+ unsigned NumSelectableElems = NumElemPerLane / 2;
+ unsigned BitsPerElem = 1;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned MaskBits = 8;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned ElemInLane = DstIdx % NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
+ unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
+ return {SrcIdx, static_cast<int>(LaneOffset + Index)};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_insertps128: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
+ // Bits [3:0]: zero mask - if bit is set, zero this element
+ if ((Mask & (1 << DstIdx)) != 0) {
+ return {0, -1};
+ }
+ // Bits [7:6]: select element from source vector Y (0-3)
+ // Bits [5:4]: select destination position (0-3)
+ unsigned SrcElem = (Mask >> 6) & 0x3;
+ unsigned DstElem = (Mask >> 4) & 0x3;
+ if (DstIdx == DstElem) {
+ // Insert element from source vector (B) at this position
+ return {1, static_cast<int>(SrcElem)};
+ } else {
+ // Copy from destination vector (A)
+ return {0, static_cast<int>(DstIdx)};
+ }
+ }))
+ return false;
+ return Success(R, E);
+ }
case X86::BI__builtin_ia32_pshufb128:
case X86::BI__builtin_ia32_pshufb256:
case X86::BI__builtin_ia32_pshufb512: {
APValue R;
- if (!evalPshufbBuiltin(Info, E, R))
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx,
+ unsigned ShuffleMask) -> std::pair<unsigned, int> {
+ uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
+ if (Ctlb & 0x80)
+ return std::make_pair(0, -1);
+
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned SrcOffset = Ctlb & 0x0F;
+ unsigned SrcIdx = LaneBase + SrcOffset;
+ return std::make_pair(0, static_cast<int>(SrcIdx));
+ }))
return false;
return Success(R, E);
}
@@ -12322,7 +13116,21 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case X86::BI__builtin_ia32_pshuflw256:
case X86::BI__builtin_ia32_pshuflw512: {
APValue R;
- if (!evalPshufBuiltin(Info, E, false, R))
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
+ constexpr unsigned LaneBits = 128u;
+ constexpr unsigned ElemBits = 16u;
+ constexpr unsigned LaneElts = LaneBits / ElemBits;
+ constexpr unsigned HalfSize = 4;
+ unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
+ unsigned LaneIdx = DstIdx % LaneElts;
+ if (LaneIdx < HalfSize) {
+ unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + Sel));
+ }
+ return std::make_pair(0, static_cast<int>(DstIdx));
+ }))
return false;
return Success(R, E);
}
@@ -12331,16 +13139,244 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case X86::BI__builtin_ia32_pshufhw256:
case X86::BI__builtin_ia32_pshufhw512: {
APValue R;
- if (!evalPshufBuiltin(Info, E, true, R))
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
+ constexpr unsigned LaneBits = 128u;
+ constexpr unsigned ElemBits = 16u;
+ constexpr unsigned LaneElts = LaneBits / ElemBits;
+ constexpr unsigned HalfSize = 4;
+ unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
+ unsigned LaneIdx = DstIdx % LaneElts;
+ if (LaneIdx >= HalfSize) {
+ unsigned Rel = LaneIdx - HalfSize;
+ unsigned Sel = (Mask >> (2 * Rel)) & 0x3;
+ return std::make_pair(
+ 0, static_cast<int>(LaneBase + HalfSize + Sel));
+ }
+ return std::make_pair(0, static_cast<int>(DstIdx));
+ }))
return false;
return Success(R, E);
}
case X86::BI__builtin_ia32_pshufd:
case X86::BI__builtin_ia32_pshufd256:
- case X86::BI__builtin_ia32_pshufd512: {
+ case X86::BI__builtin_ia32_pshufd512:
+ case X86::BI__builtin_ia32_vpermilps:
+ case X86::BI__builtin_ia32_vpermilps256:
+ case X86::BI__builtin_ia32_vpermilps512: {
APValue R;
- if (!evalPshufBuiltin(Info, E, false, R))
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
+ constexpr unsigned LaneBits = 128u;
+ constexpr unsigned ElemBits = 32u;
+ constexpr unsigned LaneElts = LaneBits / ElemBits;
+ unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
+ unsigned LaneIdx = DstIdx % LaneElts;
+ unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(LaneBase + Sel));
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_vpermilvarpd:
+ case X86::BI__builtin_ia32_vpermilvarpd256:
+ case X86::BI__builtin_ia32_vpermilvarpd512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
+ unsigned NumElemPerLane = 2;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned Offset = Mask & 0b10 ? 1 : 0;
+ return std::make_pair(
+ 0, static_cast<int>(Lane * NumElemPerLane + Offset));
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_vpermilpd:
+ case X86::BI__builtin_ia32_vpermilpd256:
+ case X86::BI__builtin_ia32_vpermilpd512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R, [](unsigned DstIdx, unsigned Control) {
+ unsigned NumElemPerLane = 2;
+ unsigned BitsPerElem = 1;
+ unsigned MaskBits = 8;
+ unsigned IndexMask = 0x1;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned LaneOffset = Lane * NumElemPerLane;
+ unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
+ unsigned Index = (Control >> BitIndex) & IndexMask;
+ return std::make_pair(0, static_cast<int>(LaneOffset + Index));
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_permdf256:
+ case X86::BI__builtin_ia32_permdi256: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R, [](unsigned DstIdx, unsigned Control) {
+ // permute4x64 operates on 4 64-bit elements
+ // For element i (0-3), extract bits [2*i+1:2*i] from Control
+ unsigned Index = (Control >> (2 * DstIdx)) & 0x3;
+ return std::make_pair(0, static_cast<int>(Index));
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_vpermilvarps:
+ case X86::BI__builtin_ia32_vpermilvarps256:
+ case X86::BI__builtin_ia32_vpermilvarps512: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
+ unsigned NumElemPerLane = 4;
+ unsigned Lane = DstIdx / NumElemPerLane;
+ unsigned Offset = Mask & 0b11;
+ return std::make_pair(
+ 0, static_cast<int>(Lane * NumElemPerLane + Offset));
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_vpmultishiftqb128:
+ case X86::BI__builtin_ia32_vpmultishiftqb256:
+ case X86::BI__builtin_ia32_vpmultishiftqb512: {
+ assert(E->getNumArgs() == 2);
+
+ APValue A, B;
+ if (!Evaluate(A, Info, E->getArg(0)) || !Evaluate(B, Info, E->getArg(1)))
+ return false;
+
+ assert(A.getVectorLength() == B.getVectorLength());
+ unsigned NumBytesInQWord = 8;
+ unsigned NumBitsInByte = 8;
+ unsigned NumBytes = A.getVectorLength();
+ unsigned NumQWords = NumBytes / NumBytesInQWord;
+ SmallVector<APValue, 64> Result;
+ Result.reserve(NumBytes);
+
+ for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
+ APInt BQWord(64, 0);
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
+ uint64_t Byte = B.getVectorElt(Idx).getInt().getZExtValue();
+ BQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
+ }
+
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
+ uint64_t Ctrl = A.getVectorElt(Idx).getInt().getZExtValue() & 0x3F;
+
+ APInt Byte(8, 0);
+ for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
+ Byte.setBitVal(BitIdx, BQWord[(Ctrl + BitIdx) & 0x3F]);
+ }
+ Result.push_back(APValue(APSInt(Byte, /*isUnsigned*/ true)));
+ }
+ }
+ return Success(APValue(Result.data(), Result.size()), E);
+ }
+
+ case X86::BI__builtin_ia32_phminposuw128: {
+ APValue Source;
+ if (!Evaluate(Source, Info, E->getArg(0)))
+ return false;
+ unsigned SourceLen = Source.getVectorLength();
+ const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>();
+ QualType ElemQT = VT->getElementType();
+ unsigned ElemBitWidth = Info.Ctx.getTypeSize(ElemQT);
+
+ APInt MinIndex(ElemBitWidth, 0);
+ APInt MinVal = Source.getVectorElt(0).getInt();
+ for (unsigned I = 1; I != SourceLen; ++I) {
+ APInt Val = Source.getVectorElt(I).getInt();
+ if (MinVal.ugt(Val)) {
+ MinVal = Val;
+ MinIndex = I;
+ }
+ }
+
+ bool ResultUnsigned = E->getCallReturnType(Info.Ctx)
+ ->castAs<VectorType>()
+ ->getElementType()
+ ->isUnsignedIntegerOrEnumerationType();
+
+ SmallVector<APValue, 8> Result;
+ Result.reserve(SourceLen);
+ Result.emplace_back(APSInt(MinVal, ResultUnsigned));
+ Result.emplace_back(APSInt(MinIndex, ResultUnsigned));
+ for (unsigned I = 0; I != SourceLen - 2; ++I) {
+ Result.emplace_back(APSInt(APInt(ElemBitWidth, 0), ResultUnsigned));
+ }
+ return Success(APValue(Result.data(), Result.size()), E);
+ }
+
+ case X86::BI__builtin_ia32_psraq128:
+ case X86::BI__builtin_ia32_psraq256:
+ case X86::BI__builtin_ia32_psraq512:
+ case X86::BI__builtin_ia32_psrad128:
+ case X86::BI__builtin_ia32_psrad256:
+ case X86::BI__builtin_ia32_psrad512:
+ case X86::BI__builtin_ia32_psraw128:
+ case X86::BI__builtin_ia32_psraw256:
+ case X86::BI__builtin_ia32_psraw512: {
+ APValue R;
+ if (!evalShiftWithCount(
+ Info, E, R,
+ [](const APInt &Elt, uint64_t Count) { return Elt.ashr(Count); },
+ [](const APInt &Elt, unsigned Width) {
+ return Elt.ashr(Width - 1);
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_psllq128:
+ case X86::BI__builtin_ia32_psllq256:
+ case X86::BI__builtin_ia32_psllq512:
+ case X86::BI__builtin_ia32_pslld128:
+ case X86::BI__builtin_ia32_pslld256:
+ case X86::BI__builtin_ia32_pslld512:
+ case X86::BI__builtin_ia32_psllw128:
+ case X86::BI__builtin_ia32_psllw256:
+ case X86::BI__builtin_ia32_psllw512: {
+ APValue R;
+ if (!evalShiftWithCount(
+ Info, E, R,
+ [](const APInt &Elt, uint64_t Count) { return Elt.shl(Count); },
+ [](const APInt &Elt, unsigned Width) {
+ return APInt::getZero(Width);
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_psrlq128:
+ case X86::BI__builtin_ia32_psrlq256:
+ case X86::BI__builtin_ia32_psrlq512:
+ case X86::BI__builtin_ia32_psrld128:
+ case X86::BI__builtin_ia32_psrld256:
+ case X86::BI__builtin_ia32_psrld512:
+ case X86::BI__builtin_ia32_psrlw128:
+ case X86::BI__builtin_ia32_psrlw256:
+ case X86::BI__builtin_ia32_psrlw512: {
+ APValue R;
+ if (!evalShiftWithCount(
+ Info, E, R,
+ [](const APInt &Elt, uint64_t Count) { return Elt.lshr(Count); },
+ [](const APInt &Elt, unsigned Width) {
+ return APInt::getZero(Width);
+ }))
return false;
return Success(R, E);
}
@@ -12669,6 +13705,90 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
}
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+ case clang::X86::BI__builtin_ia32_addsubpd:
+ case clang::X86::BI__builtin_ia32_addsubps:
+ case clang::X86::BI__builtin_ia32_addsubpd256:
+ case clang::X86::BI__builtin_ia32_addsubps256: {
+ // Addsub: alternates between subtraction and addition
+ // Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i])
+ APValue SourceLHS, SourceRHS;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
+ return false;
+ unsigned NumElems = SourceLHS.getVectorLength();
+ SmallVector<APValue, 8> ResultElements;
+ ResultElements.reserve(NumElems);
+ llvm::RoundingMode RM = getActiveRoundingMode(getEvalInfo(), E);
+
+ for (unsigned I = 0; I != NumElems; ++I) {
+ APFloat LHS = SourceLHS.getVectorElt(I).getFloat();
+ APFloat RHS = SourceRHS.getVectorElt(I).getFloat();
+ if (I % 2 == 0) {
+ // Even indices: subtract
+ LHS.subtract(RHS, RM);
+ } else {
+ // Odd indices: add
+ LHS.add(RHS, RM);
+ }
+ ResultElements.push_back(APValue(LHS));
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+ case clang::X86::BI__builtin_ia32_pclmulqdq128:
+ case clang::X86::BI__builtin_ia32_pclmulqdq256:
+ case clang::X86::BI__builtin_ia32_pclmulqdq512: {
+ // PCLMULQDQ: carry-less multiplication of selected 64-bit halves
+ // imm8 bit 0: selects lower (0) or upper (1) 64 bits of first operand
+ // imm8 bit 4: selects lower (0) or upper (1) 64 bits of second operand
+ APValue SourceLHS, SourceRHS;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
+ return false;
+
+ APSInt Imm8;
+ if (!EvaluateInteger(E->getArg(2), Imm8, Info))
+ return false;
+
+ // Extract bits 0 and 4 from imm8
+ bool SelectUpperA = (Imm8 & 0x01) != 0;
+ bool SelectUpperB = (Imm8 & 0x10) != 0;
+
+ unsigned NumElems = SourceLHS.getVectorLength();
+ SmallVector<APValue, 8> ResultElements;
+ ResultElements.reserve(NumElems);
+ QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+ bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
+
+ // Process each 128-bit lane
+ for (unsigned Lane = 0; Lane < NumElems; Lane += 2) {
+ // Get the two 64-bit halves of the first operand
+ APSInt A0 = SourceLHS.getVectorElt(Lane + 0).getInt();
+ APSInt A1 = SourceLHS.getVectorElt(Lane + 1).getInt();
+ // Get the two 64-bit halves of the second operand
+ APSInt B0 = SourceRHS.getVectorElt(Lane + 0).getInt();
+ APSInt B1 = SourceRHS.getVectorElt(Lane + 1).getInt();
+
+ // Select the appropriate 64-bit values based on imm8
+ APInt A = SelectUpperA ? A1 : A0;
+ APInt B = SelectUpperB ? B1 : B0;
+
+ // Extend both operands to 128 bits for carry-less multiplication
+ APInt A128 = A.zext(128);
+ APInt B128 = B.zext(128);
+
+ // Use APIntOps::clmul for carry-less multiplication
+ APInt Result = llvm::APIntOps::clmul(A128, B128);
+
+ // Split the 128-bit result into two 64-bit halves
+ APSInt ResultLow(Result.extractBits(64, 0), DestUnsigned);
+ APSInt ResultHigh(Result.extractBits(64, 64), DestUnsigned);
+
+ ResultElements.push_back(APValue(ResultLow));
+ ResultElements.push_back(APValue(ResultHigh));
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
case Builtin::BI__builtin_elementwise_fshl:
case Builtin::BI__builtin_elementwise_fshr: {
APValue SourceHi, SourceLo, SourceShift;
@@ -12703,6 +13823,139 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i64x2: {
+ APValue SourceA, SourceB;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceA) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceB))
+ return false;
+
+ APSInt Imm;
+ if (!EvaluateInteger(E->getArg(2), Imm, Info))
+ return false;
+
+ // Destination and sources A, B all have the same type.
+ unsigned NumElems = SourceA.getVectorLength();
+ const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>();
+ QualType ElemQT = VT->getElementType();
+ unsigned ElemBits = Info.Ctx.getTypeSize(ElemQT);
+ unsigned LaneBits = 128u;
+ unsigned NumLanes = (NumElems * ElemBits) / LaneBits;
+ unsigned NumElemsPerLane = LaneBits / ElemBits;
+
+ unsigned DstLen = SourceA.getVectorLength();
+ SmallVector<APValue, 16> ResultElements;
+ ResultElements.reserve(DstLen);
+
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask)
+ -> std::pair<unsigned, int> {
+ // DstIdx determines source. ShuffleMask selects lane in source.
+ unsigned BitsPerElem = NumLanes / 2;
+ unsigned IndexMask = (1u << BitsPerElem) - 1;
+ unsigned Lane = DstIdx / NumElemsPerLane;
+ unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1;
+ unsigned BitIdx = BitsPerElem * Lane;
+ unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask;
+ unsigned ElemInLane = DstIdx % NumElemsPerLane;
+ unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane;
+ return {SrcIdx, IdxToPick};
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
+ case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
+ case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi:
+ case X86::BI__builtin_ia32_vgf2p8affineqb_v16qi:
+ case X86::BI__builtin_ia32_vgf2p8affineqb_v32qi:
+ case X86::BI__builtin_ia32_vgf2p8affineqb_v64qi: {
+
+ APValue X, A;
+ APSInt Imm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), X) ||
+ !EvaluateAsRValue(Info, E->getArg(1), A) ||
+ !EvaluateInteger(E->getArg(2), Imm, Info))
+ return false;
+
+ assert(X.isVector() && A.isVector());
+ assert(X.getVectorLength() == A.getVectorLength());
+
+ bool IsInverse = false;
+ switch (E->getBuiltinCallee()) {
+ case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
+ case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
+ case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi: {
+ IsInverse = true;
+ }
+ }
+
+ unsigned NumBitsInByte = 8;
+ unsigned NumBytesInQWord = 8;
+ unsigned NumBitsInQWord = 64;
+ unsigned NumBytes = A.getVectorLength();
+ unsigned NumQWords = NumBytes / NumBytesInQWord;
+ SmallVector<APValue, 64> Result;
+ Result.reserve(NumBytes);
+
+ // computing A*X + Imm
+ for (unsigned QWordIdx = 0; QWordIdx != NumQWords; ++QWordIdx) {
+ // Extract the QWords from X, A
+ APInt XQWord(NumBitsInQWord, 0);
+ APInt AQWord(NumBitsInQWord, 0);
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
+ APInt XByte = X.getVectorElt(Idx).getInt();
+ APInt AByte = A.getVectorElt(Idx).getInt();
+ XQWord.insertBits(XByte, ByteIdx * NumBitsInByte);
+ AQWord.insertBits(AByte, ByteIdx * NumBitsInByte);
+ }
+
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ uint8_t XByte =
+ XQWord.lshr(ByteIdx * NumBitsInByte).getLoBits(8).getZExtValue();
+ Result.push_back(APValue(APSInt(
+ APInt(8, GFNIAffine(XByte, AQWord, Imm, IsInverse)), false)));
+ }
+ }
+
+ return Success(APValue(Result.data(), Result.size()), E);
+ }
+
+ case X86::BI__builtin_ia32_vgf2p8mulb_v16qi:
+ case X86::BI__builtin_ia32_vgf2p8mulb_v32qi:
+ case X86::BI__builtin_ia32_vgf2p8mulb_v64qi: {
+ APValue A, B;
+ if (!EvaluateAsRValue(Info, E->getArg(0), A) ||
+ !EvaluateAsRValue(Info, E->getArg(1), B))
+ return false;
+
+ assert(A.isVector() && B.isVector());
+ assert(A.getVectorLength() == B.getVectorLength());
+
+ unsigned NumBytes = A.getVectorLength();
+ SmallVector<APValue, 64> Result;
+ Result.reserve(NumBytes);
+
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytes; ++ByteIdx) {
+ uint8_t AByte = A.getVectorElt(ByteIdx).getInt().getZExtValue();
+ uint8_t BByte = B.getVectorElt(ByteIdx).getInt().getZExtValue();
+ Result.push_back(APValue(
+ APSInt(APInt(8, GFNIMul(AByte, BByte)), /*IsUnsigned=*/false)));
+ }
+
+ return Success(APValue(Result.data(), Result.size()), E);
+ }
+
case X86::BI__builtin_ia32_insertf32x4_256:
case X86::BI__builtin_ia32_inserti32x4_256:
case X86::BI__builtin_ia32_insertf64x2_256:
@@ -12782,6 +14035,371 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(Elems.data(), NumElems), E);
}
+
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned LaneIdx = DstIdx % 16;
+ if (LaneIdx < Shift)
+ return std::make_pair(0, -1);
+
+ return std::make_pair(
+ 0, static_cast<int>(LaneBase + LaneIdx - Shift));
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift: {
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
+ unsigned LaneBase = (DstIdx / 16) * 16;
+ unsigned LaneIdx = DstIdx % 16;
+ if (LaneIdx + Shift < 16)
+ return std::make_pair(
+ 0, static_cast<int>(LaneBase + LaneIdx + Shift));
+
+ return std::make_pair(0, -1);
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256:
+ case X86::BI__builtin_ia32_palignr512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R, [](unsigned DstIdx, unsigned Shift) {
+ // Default to -1 → zero-fill this destination element
+ unsigned VecIdx = 1;
+ int ElemIdx = -1;
+
+ int Lane = DstIdx / 16;
+ int Offset = DstIdx % 16;
+
+ // Elements come from VecB first, then VecA after the shift boundary
+ unsigned ShiftedIdx = Offset + (Shift & 0xFF);
+ if (ShiftedIdx < 16) { // from VecB
+ ElemIdx = ShiftedIdx + (Lane * 16);
+ } else if (ShiftedIdx < 32) { // from VecA
+ VecIdx = 0;
+ ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
+ }
+
+ return std::pair<unsigned, int>{VecIdx, ElemIdx};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_alignd128:
+ case X86::BI__builtin_ia32_alignd256:
+ case X86::BI__builtin_ia32_alignd512:
+ case X86::BI__builtin_ia32_alignq128:
+ case X86::BI__builtin_ia32_alignq256:
+ case X86::BI__builtin_ia32_alignq512: {
+ APValue R;
+ unsigned NumElems = E->getType()->castAs<VectorType>()->getNumElements();
+ if (!evalShuffleGeneric(Info, E, R,
+ [NumElems](unsigned DstIdx, unsigned Shift) {
+ unsigned Imm = Shift & 0xFF;
+ unsigned EffectiveShift = Imm & (NumElems - 1);
+ unsigned SourcePos = DstIdx + EffectiveShift;
+ unsigned VecIdx = SourcePos < NumElems ? 1 : 0;
+ unsigned ElemIdx = SourcePos & (NumElems - 1);
+
+ return std::pair<unsigned, int>{
+ VecIdx, static_cast<int>(ElemIdx)};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_permvarsi256:
+ case X86::BI__builtin_ia32_permvarsf256:
+ case X86::BI__builtin_ia32_permvardf512:
+ case X86::BI__builtin_ia32_permvardi512:
+ case X86::BI__builtin_ia32_permvarhi128: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x7;
+ return std::pair<unsigned, int>{0, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_permvarqi128:
+ case X86::BI__builtin_ia32_permvarhi256:
+ case X86::BI__builtin_ia32_permvarsi512:
+ case X86::BI__builtin_ia32_permvarsf512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0xF;
+ return std::pair<unsigned, int>{0, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_permvardi256:
+ case X86::BI__builtin_ia32_permvardf256: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x3;
+ return std::pair<unsigned, int>{0, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_permvarqi256:
+ case X86::BI__builtin_ia32_permvarhi512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x1F;
+ return std::pair<unsigned, int>{0, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_permvarqi512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x3F;
+ return std::pair<unsigned, int>{0, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_vpermi2varq128:
+ case X86::BI__builtin_ia32_vpermi2varpd128: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x1;
+ unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_vpermi2vard128:
+ case X86::BI__builtin_ia32_vpermi2varps128:
+ case X86::BI__builtin_ia32_vpermi2varq256:
+ case X86::BI__builtin_ia32_vpermi2varpd256: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x3;
+ unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_vpermi2varhi128:
+ case X86::BI__builtin_ia32_vpermi2vard256:
+ case X86::BI__builtin_ia32_vpermi2varps256:
+ case X86::BI__builtin_ia32_vpermi2varq512:
+ case X86::BI__builtin_ia32_vpermi2varpd512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x7;
+ unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_vpermi2varqi128:
+ case X86::BI__builtin_ia32_vpermi2varhi256:
+ case X86::BI__builtin_ia32_vpermi2vard512:
+ case X86::BI__builtin_ia32_vpermi2varps512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0xF;
+ unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_vpermi2varqi256:
+ case X86::BI__builtin_ia32_vpermi2varhi512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x1F;
+ unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+ case X86::BI__builtin_ia32_vpermi2varqi512: {
+ APValue R;
+ if (!evalShuffleGeneric(Info, E, R,
+ [](unsigned DstIdx, unsigned ShuffleMask) {
+ int Offset = ShuffleMask & 0x3F;
+ unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
+ return std::pair<unsigned, int>{SrcIdx, Offset};
+ }))
+ return false;
+ return Success(R, E);
+ }
+
+ case clang::X86::BI__builtin_ia32_minps:
+ case clang::X86::BI__builtin_ia32_minpd:
+ case clang::X86::BI__builtin_ia32_minps256:
+ case clang::X86::BI__builtin_ia32_minpd256:
+ case clang::X86::BI__builtin_ia32_minps512:
+ case clang::X86::BI__builtin_ia32_minpd512:
+ case clang::X86::BI__builtin_ia32_minph128:
+ case clang::X86::BI__builtin_ia32_minph256:
+ case clang::X86::BI__builtin_ia32_minph512:
+ return EvaluateFpBinOpExpr(
+ [](const APFloat &A, const APFloat &B,
+ std::optional<APSInt>) -> std::optional<APFloat> {
+ if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
+ B.isInfinity() || B.isDenormal())
+ return std::nullopt;
+ if (A.isZero() && B.isZero())
+ return B;
+ return llvm::minimum(A, B);
+ });
+
+ case clang::X86::BI__builtin_ia32_maxps:
+ case clang::X86::BI__builtin_ia32_maxpd:
+ case clang::X86::BI__builtin_ia32_maxps256:
+ case clang::X86::BI__builtin_ia32_maxpd256:
+ case clang::X86::BI__builtin_ia32_maxps512:
+ case clang::X86::BI__builtin_ia32_maxpd512:
+ case clang::X86::BI__builtin_ia32_maxph128:
+ case clang::X86::BI__builtin_ia32_maxph256:
+ case clang::X86::BI__builtin_ia32_maxph512:
+ return EvaluateFpBinOpExpr(
+ [](const APFloat &A, const APFloat &B,
+ std::optional<APSInt>) -> std::optional<APFloat> {
+ if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
+ B.isInfinity() || B.isDenormal())
+ return std::nullopt;
+ if (A.isZero() && B.isZero())
+ return B;
+ return llvm::maximum(A, B);
+ });
+
+ case clang::X86::BI__builtin_ia32_vcvtps2ph:
+ case clang::X86::BI__builtin_ia32_vcvtps2ph256: {
+ APValue SrcVec;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SrcVec))
+ return false;
+
+ APSInt Imm;
+ if (!EvaluateInteger(E->getArg(1), Imm, Info))
+ return false;
+
+ const auto *SrcVTy = E->getArg(0)->getType()->castAs<VectorType>();
+ unsigned SrcNumElems = SrcVTy->getNumElements();
+ const auto *DstVTy = E->getType()->castAs<VectorType>();
+ unsigned DstNumElems = DstVTy->getNumElements();
+ QualType DstElemTy = DstVTy->getElementType();
+
+ const llvm::fltSemantics &HalfSem =
+ Info.Ctx.getFloatTypeSemantics(Info.Ctx.HalfTy);
+
+ int ImmVal = Imm.getZExtValue();
+ bool UseMXCSR = (ImmVal & 4) != 0;
+ bool IsFPConstrained =
+ E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).isFPConstrained();
+
+ llvm::RoundingMode RM;
+ if (!UseMXCSR) {
+ switch (ImmVal & 3) {
+ case 0:
+ RM = llvm::RoundingMode::NearestTiesToEven;
+ break;
+ case 1:
+ RM = llvm::RoundingMode::TowardNegative;
+ break;
+ case 2:
+ RM = llvm::RoundingMode::TowardPositive;
+ break;
+ case 3:
+ RM = llvm::RoundingMode::TowardZero;
+ break;
+ default:
+ llvm_unreachable("Invalid immediate rounding mode");
+ }
+ } else {
+ RM = llvm::RoundingMode::NearestTiesToEven;
+ }
+
+ SmallVector<APValue, 8> ResultElements;
+ ResultElements.reserve(DstNumElems);
+
+ for (unsigned I = 0; I < SrcNumElems; ++I) {
+ APFloat SrcVal = SrcVec.getVectorElt(I).getFloat();
+
+ bool LostInfo;
+ APFloat::opStatus St = SrcVal.convert(HalfSem, RM, &LostInfo);
+
+ if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) {
+ Info.FFDiag(E, diag::note_constexpr_dynamic_rounding);
+ return false;
+ }
+
+ APSInt DstInt(SrcVal.bitcastToAPInt(),
+ DstElemTy->isUnsignedIntegerOrEnumerationType());
+ ResultElements.push_back(APValue(DstInt));
+ }
+
+ if (DstNumElems > SrcNumElems) {
+ APSInt Zero = Info.Ctx.MakeIntValue(0, DstElemTy);
+ for (unsigned I = SrcNumElems; I < DstNumElems; ++I) {
+ ResultElements.push_back(APValue(Zero));
+ }
+ }
+
+ return Success(ResultElements, E);
+ }
+ case X86::BI__builtin_ia32_vperm2f128_pd256:
+ case X86::BI__builtin_ia32_vperm2f128_ps256:
+ case X86::BI__builtin_ia32_vperm2f128_si256:
+ case X86::BI__builtin_ia32_permti256: {
+ unsigned NumElements =
+ E->getArg(0)->getType()->getAs<VectorType>()->getNumElements();
+ unsigned PreservedBitsCnt = NumElements >> 2;
+ APValue R;
+ if (!evalShuffleGeneric(
+ Info, E, R,
+ [PreservedBitsCnt](unsigned DstIdx, unsigned ShuffleMask) {
+ unsigned ControlBitsCnt = DstIdx >> PreservedBitsCnt << 2;
+ unsigned ControlBits = ShuffleMask >> ControlBitsCnt;
+
+ if (ControlBits & 0b1000)
+ return std::make_pair(0u, -1);
+
+ unsigned SrcVecIdx = (ControlBits & 0b10) >> 1;
+ unsigned PreservedBitsMask = (1 << PreservedBitsCnt) - 1;
+ int SrcIdx = ((ControlBits & 0b1) << PreservedBitsCnt) |
+ (DstIdx & PreservedBitsMask);
+ return std::make_pair(SrcVecIdx, SrcIdx);
+ }))
+ return false;
+ return Success(R, E);
+ }
}
}
@@ -12920,6 +14538,7 @@ namespace {
bool VisitCallExpr(const CallExpr *E) {
return handleCallExpr(E, Result, &This);
}
+ bool VisitCastExpr(const CastExpr *E);
bool VisitInitListExpr(const InitListExpr *E,
QualType AllocType = QualType());
bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E);
@@ -12990,6 +14609,49 @@ static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) {
return true;
}
+bool ArrayExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const Expr *SE = E->getSubExpr();
+
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ case CK_HLSLAggregateSplatCast: {
+ APValue Val;
+ QualType ValTy;
+
+ if (!hlslAggSplatHelper(Info, SE, Val, ValTy))
+ return false;
+
+ unsigned NEls = elementwiseSize(Info, E->getType());
+
+ SmallVector<APValue> SplatEls(NEls, Val);
+ SmallVector<QualType> SplatType(NEls, ValTy);
+
+ // cast the elements
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ if (!constructAggregate(Info, FPO, E, Result, E->getType(), SplatEls,
+ SplatType))
+ return false;
+
+ return true;
+ }
+ case CK_HLSLElementwiseCast: {
+ SmallVector<APValue> SrcEls;
+ SmallVector<QualType> SrcTypes;
+
+ if (!hlslElementwiseCastHelper(Info, SE, E->getType(), SrcEls, SrcTypes))
+ return false;
+
+ // cast the elements
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ if (!constructAggregate(Info, FPO, E, Result, E->getType(), SrcEls,
+ SrcTypes))
+ return false;
+ return true;
+ }
+ }
+}
+
bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E,
QualType AllocType) {
const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
@@ -14119,8 +15781,8 @@ static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
///
/// If @p WasError is non-null, this will report whether the failure to evaluate
/// is to be treated as an Error in IntExprEvaluator.
-static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
- EvalInfo &Info, uint64_t &Size) {
+static std::optional<uint64_t>
+tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type, EvalInfo &Info) {
// Determine the denoted object.
LValue LVal;
{
@@ -14135,31 +15797,27 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
// Expr::tryEvaluateObjectSize.
APValue RVal;
if (!EvaluateAsRValue(Info, E, RVal))
- return false;
+ return std::nullopt;
LVal.setFrom(Info.Ctx, RVal);
} else if (!EvaluatePointer(ignorePointerCastsAndParens(E), LVal, Info,
/*InvalidBaseOK=*/true))
- return false;
+ return std::nullopt;
}
// If we point to before the start of the object, there are no accessible
// bytes.
- if (LVal.getLValueOffset().isNegative()) {
- Size = 0;
- return true;
- }
+ if (LVal.getLValueOffset().isNegative())
+ return 0;
CharUnits EndOffset;
if (!determineEndOffset(Info, E->getExprLoc(), Type, LVal, EndOffset))
- return false;
+ return std::nullopt;
// If we've fallen outside of the end offset, just pretend there's nothing to
// write to/read from.
if (EndOffset <= LVal.getLValueOffset())
- Size = 0;
- else
- Size = (EndOffset - LVal.getLValueOffset()).getQuantity();
- return true;
+ return 0;
+ return (EndOffset - LVal.getLValueOffset()).getQuantity();
}
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
@@ -14245,10 +15903,44 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(APValue(ResultInt), E);
};
+ auto HandleCRC32 = [&](unsigned DataBytes) -> bool {
+ APSInt CRC, Data;
+ if (!EvaluateInteger(E->getArg(0), CRC, Info) ||
+ !EvaluateInteger(E->getArg(1), Data, Info))
+ return false;
+
+ uint64_t CRCVal = CRC.getZExtValue();
+ uint64_t DataVal = Data.getZExtValue();
+
+ // CRC32C polynomial (iSCSI polynomial, bit-reversed)
+ static const uint32_t CRC32C_POLY = 0x82F63B78;
+
+ // Process each byte
+ uint32_t Result = static_cast<uint32_t>(CRCVal);
+ for (unsigned I = 0; I != DataBytes; ++I) {
+ uint8_t Byte = static_cast<uint8_t>((DataVal >> (I * 8)) & 0xFF);
+ Result ^= Byte;
+ for (int J = 0; J != 8; ++J) {
+ Result = (Result >> 1) ^ ((Result & 1) ? CRC32C_POLY : 0);
+ }
+ }
+
+ return Success(Result, E);
+ };
+
switch (BuiltinOp) {
default:
return false;
+ case X86::BI__builtin_ia32_crc32qi:
+ return HandleCRC32(1);
+ case X86::BI__builtin_ia32_crc32hi:
+ return HandleCRC32(2);
+ case X86::BI__builtin_ia32_crc32si:
+ return HandleCRC32(4);
+ case X86::BI__builtin_ia32_crc32di:
+ return HandleCRC32(8);
+
case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size: {
// The type was checked when we built the expression.
@@ -14256,9 +15948,9 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
assert(Type <= 3 && "unexpected type");
- uint64_t Size;
- if (tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size))
- return Success(Size, E);
+ if (std::optional<uint64_t> Size =
+ tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info))
+ return Success(*Size, E);
if (E->getArg(0)->HasSideEffects(Info.Ctx))
return Success((Type & 2) ? 0 : -1, E);
@@ -14345,6 +16037,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(AlignedVal, E);
}
+ case Builtin::BI__builtin_bitreverseg:
case Builtin::BI__builtin_bitreverse8:
case Builtin::BI__builtin_bitreverse16:
case Builtin::BI__builtin_bitreverse32:
@@ -14356,13 +16049,15 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Val.reverseBits(), E);
}
-
+ case Builtin::BI__builtin_bswapg:
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
+ if (Val.getBitWidth() == 8 || Val.getBitWidth() == 1)
+ return Success(Val, E);
return Success(Val.byteSwap(), E);
}
@@ -14540,6 +16235,28 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Result, E);
}
+ case Builtin::BI__builtin_infer_alloc_token: {
+ // If we fail to infer a type, this fails to be a constant expression; this
+ // can be checked with __builtin_constant_p(...).
+ QualType AllocType = infer_alloc::inferPossibleType(E, Info.Ctx, nullptr);
+ if (AllocType.isNull())
+ return Error(
+ E, diag::note_constexpr_infer_alloc_token_type_inference_failed);
+ auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, Info.Ctx);
+ if (!ATMD)
+ return Error(E, diag::note_constexpr_infer_alloc_token_no_metadata);
+ auto Mode =
+ Info.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
+ uint64_t BitWidth = Info.Ctx.getTypeSize(Info.Ctx.getSizeType());
+ auto MaxTokensOpt = Info.getLangOpts().AllocTokenMax;
+ uint64_t MaxTokens =
+ MaxTokensOpt.value_or(0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth));
+ auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
+ if (!MaybeToken)
+ return Error(E, diag::note_constexpr_infer_alloc_token_stateful_mode);
+ return Success(llvm::APInt(BitWidth, *MaybeToken), E);
+ }
+
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
case Builtin::BI__builtin_ffsll: {
@@ -14720,34 +16437,46 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_rotateleft16:
case Builtin::BI__builtin_rotateleft32:
case Builtin::BI__builtin_rotateleft64:
- case Builtin::BI_rotl8: // Microsoft variants of rotate right
- case Builtin::BI_rotl16:
- case Builtin::BI_rotl:
- case Builtin::BI_lrotl:
- case Builtin::BI_rotl64: {
- APSInt Val, Amt;
- if (!EvaluateInteger(E->getArg(0), Val, Info) ||
- !EvaluateInteger(E->getArg(1), Amt, Info))
- return false;
-
- return Success(Val.rotl(Amt), E);
- }
-
case Builtin::BI__builtin_rotateright8:
case Builtin::BI__builtin_rotateright16:
case Builtin::BI__builtin_rotateright32:
case Builtin::BI__builtin_rotateright64:
+ case Builtin::BI__builtin_stdc_rotate_left:
+ case Builtin::BI__builtin_stdc_rotate_right:
+ case Builtin::BI_rotl8: // Microsoft variants of rotate left
+ case Builtin::BI_rotl16:
+ case Builtin::BI_rotl:
+ case Builtin::BI_lrotl:
+ case Builtin::BI_rotl64:
case Builtin::BI_rotr8: // Microsoft variants of rotate right
case Builtin::BI_rotr16:
case Builtin::BI_rotr:
case Builtin::BI_lrotr:
case Builtin::BI_rotr64: {
- APSInt Val, Amt;
- if (!EvaluateInteger(E->getArg(0), Val, Info) ||
- !EvaluateInteger(E->getArg(1), Amt, Info))
+ APSInt Value, Amount;
+ if (!EvaluateInteger(E->getArg(0), Value, Info) ||
+ !EvaluateInteger(E->getArg(1), Amount, Info))
return false;
- return Success(Val.rotr(Amt), E);
+ Amount = NormalizeRotateAmount(Value, Amount);
+
+ switch (BuiltinOp) {
+ case Builtin::BI__builtin_rotateright8:
+ case Builtin::BI__builtin_rotateright16:
+ case Builtin::BI__builtin_rotateright32:
+ case Builtin::BI__builtin_rotateright64:
+ case Builtin::BI__builtin_stdc_rotate_right:
+ case Builtin::BI_rotr8:
+ case Builtin::BI_rotr16:
+ case Builtin::BI_rotr:
+ case Builtin::BI_lrotr:
+ case Builtin::BI_rotr64:
+ return Success(
+ APSInt(Value.rotr(Amount.getZExtValue()), Value.isUnsigned()), E);
+ default:
+ return Success(
+ APSInt(Value.rotl(Amount.getZExtValue()), Value.isUnsigned()), E);
+ }
}
case Builtin::BI__builtin_elementwise_add_sat: {
@@ -14820,9 +16549,9 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_wcslen: {
// As an extension, we support __builtin_strlen() as a constant expression,
// and support folding strlen() to a constant.
- uint64_t StrLen;
- if (EvaluateBuiltinStrLen(E->getArg(0), StrLen, Info))
- return Success(StrLen, E);
+ if (std::optional<uint64_t> StrLen =
+ EvaluateBuiltinStrLen(E->getArg(0), Info))
+ return Success(*StrLen, E);
return false;
}
@@ -15260,6 +16989,36 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(CarryOut, E);
}
+ case clang::X86::BI__builtin_ia32_movmskps:
+ case clang::X86::BI__builtin_ia32_movmskpd:
+ case clang::X86::BI__builtin_ia32_pmovmskb128:
+ case clang::X86::BI__builtin_ia32_pmovmskb256:
+ case clang::X86::BI__builtin_ia32_movmskps256:
+ case clang::X86::BI__builtin_ia32_movmskpd256: {
+ APValue Source;
+ if (!Evaluate(Source, Info, E->getArg(0)))
+ return false;
+ unsigned SourceLen = Source.getVectorLength();
+ const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>();
+ QualType ElemQT = VT->getElementType();
+ unsigned ResultLen = Info.Ctx.getTypeSize(
+ E->getCallReturnType(Info.Ctx)); // Always 32-bit integer.
+ APInt Result(ResultLen, 0);
+
+ for (unsigned I = 0; I != SourceLen; ++I) {
+ APInt Elem;
+ if (ElemQT->isIntegerType()) {
+ Elem = Source.getVectorElt(I).getInt();
+ } else if (ElemQT->isRealFloatingType()) {
+ Elem = Source.getVectorElt(I).getFloat().bitcastToAPInt();
+ } else {
+ return false;
+ }
+ Result.setBitVal(I, Elem.isNegative());
+ }
+ return Success(Result, E);
+ }
+
case clang::X86::BI__builtin_ia32_bextr_u32:
case clang::X86::BI__builtin_ia32_bextr_u64:
case clang::X86::BI__builtin_ia32_bextri_u32:
@@ -15297,6 +17056,69 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Val, E);
}
+ case clang::X86::BI__builtin_ia32_ktestcqi:
+ case clang::X86::BI__builtin_ia32_ktestchi:
+ case clang::X86::BI__builtin_ia32_ktestcsi:
+ case clang::X86::BI__builtin_ia32_ktestcdi: {
+ APSInt A, B;
+ if (!EvaluateInteger(E->getArg(0), A, Info) ||
+ !EvaluateInteger(E->getArg(1), B, Info))
+ return false;
+
+ return Success((~A & B) == 0, E);
+ }
+
+ case clang::X86::BI__builtin_ia32_ktestzqi:
+ case clang::X86::BI__builtin_ia32_ktestzhi:
+ case clang::X86::BI__builtin_ia32_ktestzsi:
+ case clang::X86::BI__builtin_ia32_ktestzdi: {
+ APSInt A, B;
+ if (!EvaluateInteger(E->getArg(0), A, Info) ||
+ !EvaluateInteger(E->getArg(1), B, Info))
+ return false;
+
+ return Success((A & B) == 0, E);
+ }
+
+ case clang::X86::BI__builtin_ia32_kortestcqi:
+ case clang::X86::BI__builtin_ia32_kortestchi:
+ case clang::X86::BI__builtin_ia32_kortestcsi:
+ case clang::X86::BI__builtin_ia32_kortestcdi: {
+ APSInt A, B;
+ if (!EvaluateInteger(E->getArg(0), A, Info) ||
+ !EvaluateInteger(E->getArg(1), B, Info))
+ return false;
+
+ return Success(~(A | B) == 0, E);
+ }
+
+ case clang::X86::BI__builtin_ia32_kortestzqi:
+ case clang::X86::BI__builtin_ia32_kortestzhi:
+ case clang::X86::BI__builtin_ia32_kortestzsi:
+ case clang::X86::BI__builtin_ia32_kortestzdi: {
+ APSInt A, B;
+ if (!EvaluateInteger(E->getArg(0), A, Info) ||
+ !EvaluateInteger(E->getArg(1), B, Info))
+ return false;
+
+ return Success((A | B) == 0, E);
+ }
+
+ case clang::X86::BI__builtin_ia32_kunpckhi:
+ case clang::X86::BI__builtin_ia32_kunpckdi:
+ case clang::X86::BI__builtin_ia32_kunpcksi: {
+ APSInt A, B;
+ if (!EvaluateInteger(E->getArg(0), A, Info) ||
+ !EvaluateInteger(E->getArg(1), B, Info))
+ return false;
+
+ // Generic kunpack: extract lower half of each operand and concatenate
+ // Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0]
+ unsigned BW = A.getBitWidth();
+ APSInt Result(A.trunc(BW / 2).concat(B.trunc(BW / 2)), A.isUnsigned());
+ return Success(Result, E);
+ }
+
case clang::X86::BI__builtin_ia32_lzcnt_u16:
case clang::X86::BI__builtin_ia32_lzcnt_u32:
case clang::X86::BI__builtin_ia32_lzcnt_u64: {
@@ -15431,6 +17253,40 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
[](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
}
+ case X86::BI__builtin_ia32_kmovb:
+ case X86::BI__builtin_ia32_kmovw:
+ case X86::BI__builtin_ia32_kmovd:
+ case X86::BI__builtin_ia32_kmovq: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+ return Success(Val, E);
+ }
+
+ case X86::BI__builtin_ia32_kshiftliqi:
+ case X86::BI__builtin_ia32_kshiftlihi:
+ case X86::BI__builtin_ia32_kshiftlisi:
+ case X86::BI__builtin_ia32_kshiftlidi: {
+ return HandleMaskBinOp([](const APSInt &LHS, const APSInt &RHS) {
+ unsigned Amt = RHS.getZExtValue() & 0xFF;
+ if (Amt >= LHS.getBitWidth())
+ return APSInt(APInt::getZero(LHS.getBitWidth()), LHS.isUnsigned());
+ return APSInt(LHS.shl(Amt), LHS.isUnsigned());
+ });
+ }
+
+ case X86::BI__builtin_ia32_kshiftriqi:
+ case X86::BI__builtin_ia32_kshiftrihi:
+ case X86::BI__builtin_ia32_kshiftrisi:
+ case X86::BI__builtin_ia32_kshiftridi: {
+ return HandleMaskBinOp([](const APSInt &LHS, const APSInt &RHS) {
+ unsigned Amt = RHS.getZExtValue() & 0xFF;
+ if (Amt >= LHS.getBitWidth())
+ return APSInt(APInt::getZero(LHS.getBitWidth()), LHS.isUnsigned());
+ return APSInt(LHS.lshr(Amt), LHS.isUnsigned());
+ });
+ }
+
case clang::X86::BI__builtin_ia32_vec_ext_v4hi:
case clang::X86::BI__builtin_ia32_vec_ext_v16qi:
case clang::X86::BI__builtin_ia32_vec_ext_v8hi:
@@ -15449,6 +17305,162 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1));
return Success(Vec.getVectorElt(Idx).getInt(), E);
}
+
+ case clang::X86::BI__builtin_ia32_cvtb2mask128:
+ case clang::X86::BI__builtin_ia32_cvtb2mask256:
+ case clang::X86::BI__builtin_ia32_cvtb2mask512:
+ case clang::X86::BI__builtin_ia32_cvtw2mask128:
+ case clang::X86::BI__builtin_ia32_cvtw2mask256:
+ case clang::X86::BI__builtin_ia32_cvtw2mask512:
+ case clang::X86::BI__builtin_ia32_cvtd2mask128:
+ case clang::X86::BI__builtin_ia32_cvtd2mask256:
+ case clang::X86::BI__builtin_ia32_cvtd2mask512:
+ case clang::X86::BI__builtin_ia32_cvtq2mask128:
+ case clang::X86::BI__builtin_ia32_cvtq2mask256:
+ case clang::X86::BI__builtin_ia32_cvtq2mask512: {
+ assert(E->getNumArgs() == 1);
+ APValue Vec;
+ if (!EvaluateVector(E->getArg(0), Vec, Info))
+ return false;
+
+ unsigned VectorLen = Vec.getVectorLength();
+ unsigned RetWidth = Info.Ctx.getIntWidth(E->getType());
+ llvm::APInt Bits(RetWidth, 0);
+
+ for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) {
+ const APSInt &A = Vec.getVectorElt(ElemNum).getInt();
+ unsigned MSB = A[A.getBitWidth() - 1];
+ Bits.setBitVal(ElemNum, MSB);
+ }
+
+ APSInt RetMask(Bits, /*isUnsigned=*/true);
+ return Success(APValue(RetMask), E);
+ }
+
+ case clang::X86::BI__builtin_ia32_cmpb128_mask:
+ case clang::X86::BI__builtin_ia32_cmpw128_mask:
+ case clang::X86::BI__builtin_ia32_cmpd128_mask:
+ case clang::X86::BI__builtin_ia32_cmpq128_mask:
+ case clang::X86::BI__builtin_ia32_cmpb256_mask:
+ case clang::X86::BI__builtin_ia32_cmpw256_mask:
+ case clang::X86::BI__builtin_ia32_cmpd256_mask:
+ case clang::X86::BI__builtin_ia32_cmpq256_mask:
+ case clang::X86::BI__builtin_ia32_cmpb512_mask:
+ case clang::X86::BI__builtin_ia32_cmpw512_mask:
+ case clang::X86::BI__builtin_ia32_cmpd512_mask:
+ case clang::X86::BI__builtin_ia32_cmpq512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq512_mask: {
+ assert(E->getNumArgs() == 4);
+
+ bool IsUnsigned =
+ (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask &&
+ BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpw512_mask);
+
+ APValue LHS, RHS;
+ APSInt Mask, Opcode;
+ if (!EvaluateVector(E->getArg(0), LHS, Info) ||
+ !EvaluateVector(E->getArg(1), RHS, Info) ||
+ !EvaluateInteger(E->getArg(2), Opcode, Info) ||
+ !EvaluateInteger(E->getArg(3), Mask, Info))
+ return false;
+
+ assert(LHS.getVectorLength() == RHS.getVectorLength());
+
+ unsigned VectorLen = LHS.getVectorLength();
+ unsigned RetWidth = Mask.getBitWidth();
+
+ APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
+
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ const APSInt &A = LHS.getVectorElt(ElemNum).getInt();
+ const APSInt &B = RHS.getVectorElt(ElemNum).getInt();
+ bool Result = false;
+
+ switch (Opcode.getExtValue() & 0x7) {
+ case 0: // _MM_CMPINT_EQ
+ Result = (A == B);
+ break;
+ case 1: // _MM_CMPINT_LT
+ Result = IsUnsigned ? A.ult(B) : A.slt(B);
+ break;
+ case 2: // _MM_CMPINT_LE
+ Result = IsUnsigned ? A.ule(B) : A.sle(B);
+ break;
+ case 3: // _MM_CMPINT_FALSE
+ Result = false;
+ break;
+ case 4: // _MM_CMPINT_NE
+ Result = (A != B);
+ break;
+ case 5: // _MM_CMPINT_NLT (>=)
+ Result = IsUnsigned ? A.uge(B) : A.sge(B);
+ break;
+ case 6: // _MM_CMPINT_NLE (>)
+ Result = IsUnsigned ? A.ugt(B) : A.sgt(B);
+ break;
+ case 7: // _MM_CMPINT_TRUE
+ Result = true;
+ break;
+ }
+
+ RetMask.setBitVal(ElemNum, Mask[ElemNum] && Result);
+ }
+
+ return Success(APValue(RetMask), E);
+ }
+ case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
+ assert(E->getNumArgs() == 3);
+
+ APValue Source, ShuffleMask;
+ APSInt ZeroMask;
+ if (!EvaluateVector(E->getArg(0), Source, Info) ||
+ !EvaluateVector(E->getArg(1), ShuffleMask, Info) ||
+ !EvaluateInteger(E->getArg(2), ZeroMask, Info))
+ return false;
+
+ assert(Source.getVectorLength() == ShuffleMask.getVectorLength());
+ assert(ZeroMask.getBitWidth() == Source.getVectorLength());
+
+ unsigned NumBytesInQWord = 8;
+ unsigned NumBitsInByte = 8;
+ unsigned NumBytes = Source.getVectorLength();
+ unsigned NumQWords = NumBytes / NumBytesInQWord;
+ unsigned RetWidth = ZeroMask.getBitWidth();
+ APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
+
+ for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
+ APInt SourceQWord(64, 0);
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ uint64_t Byte = Source.getVectorElt(QWordId * NumBytesInQWord + ByteIdx)
+ .getInt()
+ .getZExtValue();
+ SourceQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
+ }
+
+ for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
+ unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx;
+ unsigned M =
+ ShuffleMask.getVectorElt(SelIdx).getInt().getZExtValue() & 0x3F;
+ if (ZeroMask[SelIdx]) {
+ RetMask.setBitVal(SelIdx, SourceQWord[M]);
+ }
+ }
+ }
+ return Success(APValue(RetMask), E);
+ }
}
}
@@ -16662,7 +18674,6 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_NoOp:
case CK_LValueToRValueBitCast:
case CK_HLSLArrayRValue:
- case CK_HLSLElementwiseCast:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_MemberPointerToBoolean:
@@ -16708,12 +18719,15 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (!Result.isInt()) {
// Allow casts of address-of-label differences if they are no-ops
- // or narrowing. (The narrowing case isn't actually guaranteed to
+ // or narrowing, if the result is at least 32 bits wide.
+ // (The narrowing case isn't actually guaranteed to
// be constant-evaluatable except in some narrow cases which are hard
// to detect here. We let it through on the assumption the user knows
// what they are doing.)
- if (Result.isAddrLabelDiff())
- return Info.Ctx.getTypeSize(DestType) <= Info.Ctx.getTypeSize(SrcType);
+ if (Result.isAddrLabelDiff()) {
+ unsigned DestBits = Info.Ctx.getTypeSize(DestType);
+ return DestBits >= 32 && DestBits <= Info.Ctx.getTypeSize(SrcType);
+ }
// Only allow casts of lvalues if they are lossless.
return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType);
}
@@ -16809,6 +18823,25 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
return Error(E);
return Success(Val.getVectorElt(0), E);
}
+ case CK_HLSLMatrixTruncation: {
+ // TODO: See #168935. Add matrix truncation support to expr constant.
+ return Error(E);
+ }
+ case CK_HLSLElementwiseCast: {
+ SmallVector<APValue> SrcVals;
+ SmallVector<QualType> SrcTypes;
+
+ if (!hlslElementwiseCastHelper(Info, SubExpr, DestType, SrcVals, SrcTypes))
+ return false;
+
+ // cast our single element
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ APValue ResultVal;
+ if (!handleScalarCast(Info, FPO, E, SrcTypes[0], DestType, SrcVals[0],
+ ResultVal))
+ return false;
+ return Success(ResultVal, E);
+ }
}
llvm_unreachable("unknown cast resulting in integral value");
@@ -17346,6 +19379,9 @@ bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ case CK_HLSLAggregateSplatCast:
+ llvm_unreachable("invalid cast kind for floating value");
+
case CK_IntegralToFloating: {
APSInt IntResult;
const FPOptions FPO = E->getFPFeaturesInEffect(
@@ -17384,6 +19420,27 @@ bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
return Error(E);
return Success(Val.getVectorElt(0), E);
}
+ case CK_HLSLMatrixTruncation: {
+ // TODO: See #168935. Add matrix truncation support to expr constant.
+ return Error(E);
+ }
+ case CK_HLSLElementwiseCast: {
+ SmallVector<APValue> SrcVals;
+ SmallVector<QualType> SrcTypes;
+
+ if (!hlslElementwiseCastHelper(Info, SubExpr, E->getType(), SrcVals,
+ SrcTypes))
+ return false;
+ APValue Val;
+
+ // cast our single element
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ APValue ResultVal;
+ if (!handleScalarCast(Info, FPO, E, SrcTypes[0], E->getType(), SrcVals[0],
+ ResultVal))
+ return false;
+ return Success(ResultVal, E);
+ }
}
}
@@ -17524,6 +19581,7 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_IntegralToFixedPoint:
case CK_MatrixCast:
case CK_HLSLVectorTruncation:
+ case CK_HLSLMatrixTruncation:
case CK_HLSLElementwiseCast:
case CK_HLSLAggregateSplatCast:
llvm_unreachable("invalid cast kind for complex value");
@@ -17619,6 +19677,88 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
llvm_unreachable("unknown cast resulting in complex value");
}
+uint8_t GFNIMultiplicativeInverse(uint8_t Byte) {
+ // Lookup Table for Multiplicative Inverse in GF(2^8)
+ const uint8_t GFInv[256] = {
+ 0x00, 0x01, 0x8d, 0xf6, 0xcb, 0x52, 0x7b, 0xd1, 0xe8, 0x4f, 0x29, 0xc0,
+ 0xb0, 0xe1, 0xe5, 0xc7, 0x74, 0xb4, 0xaa, 0x4b, 0x99, 0x2b, 0x60, 0x5f,
+ 0x58, 0x3f, 0xfd, 0xcc, 0xff, 0x40, 0xee, 0xb2, 0x3a, 0x6e, 0x5a, 0xf1,
+ 0x55, 0x4d, 0xa8, 0xc9, 0xc1, 0x0a, 0x98, 0x15, 0x30, 0x44, 0xa2, 0xc2,
+ 0x2c, 0x45, 0x92, 0x6c, 0xf3, 0x39, 0x66, 0x42, 0xf2, 0x35, 0x20, 0x6f,
+ 0x77, 0xbb, 0x59, 0x19, 0x1d, 0xfe, 0x37, 0x67, 0x2d, 0x31, 0xf5, 0x69,
+ 0xa7, 0x64, 0xab, 0x13, 0x54, 0x25, 0xe9, 0x09, 0xed, 0x5c, 0x05, 0xca,
+ 0x4c, 0x24, 0x87, 0xbf, 0x18, 0x3e, 0x22, 0xf0, 0x51, 0xec, 0x61, 0x17,
+ 0x16, 0x5e, 0xaf, 0xd3, 0x49, 0xa6, 0x36, 0x43, 0xf4, 0x47, 0x91, 0xdf,
+ 0x33, 0x93, 0x21, 0x3b, 0x79, 0xb7, 0x97, 0x85, 0x10, 0xb5, 0xba, 0x3c,
+ 0xb6, 0x70, 0xd0, 0x06, 0xa1, 0xfa, 0x81, 0x82, 0x83, 0x7e, 0x7f, 0x80,
+ 0x96, 0x73, 0xbe, 0x56, 0x9b, 0x9e, 0x95, 0xd9, 0xf7, 0x02, 0xb9, 0xa4,
+ 0xde, 0x6a, 0x32, 0x6d, 0xd8, 0x8a, 0x84, 0x72, 0x2a, 0x14, 0x9f, 0x88,
+ 0xf9, 0xdc, 0x89, 0x9a, 0xfb, 0x7c, 0x2e, 0xc3, 0x8f, 0xb8, 0x65, 0x48,
+ 0x26, 0xc8, 0x12, 0x4a, 0xce, 0xe7, 0xd2, 0x62, 0x0c, 0xe0, 0x1f, 0xef,
+ 0x11, 0x75, 0x78, 0x71, 0xa5, 0x8e, 0x76, 0x3d, 0xbd, 0xbc, 0x86, 0x57,
+ 0x0b, 0x28, 0x2f, 0xa3, 0xda, 0xd4, 0xe4, 0x0f, 0xa9, 0x27, 0x53, 0x04,
+ 0x1b, 0xfc, 0xac, 0xe6, 0x7a, 0x07, 0xae, 0x63, 0xc5, 0xdb, 0xe2, 0xea,
+ 0x94, 0x8b, 0xc4, 0xd5, 0x9d, 0xf8, 0x90, 0x6b, 0xb1, 0x0d, 0xd6, 0xeb,
+ 0xc6, 0x0e, 0xcf, 0xad, 0x08, 0x4e, 0xd7, 0xe3, 0x5d, 0x50, 0x1e, 0xb3,
+ 0x5b, 0x23, 0x38, 0x34, 0x68, 0x46, 0x03, 0x8c, 0xdd, 0x9c, 0x7d, 0xa0,
+ 0xcd, 0x1a, 0x41, 0x1c};
+
+ return GFInv[Byte];
+}
+
+uint8_t GFNIAffine(uint8_t XByte, const APInt &AQword, const APSInt &Imm,
+ bool Inverse) {
+ unsigned NumBitsInByte = 8;
+ // Computing the affine transformation
+ uint8_t RetByte = 0;
+ for (uint32_t BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
+ uint8_t AByte =
+ AQword.lshr((7 - static_cast<int32_t>(BitIdx)) * NumBitsInByte)
+ .getLoBits(8)
+ .getZExtValue();
+ uint8_t Product;
+ if (Inverse) {
+ Product = AByte & GFNIMultiplicativeInverse(XByte);
+ } else {
+ Product = AByte & XByte;
+ }
+ uint8_t Parity = 0;
+
+ // Dot product in GF(2) uses XOR instead of addition
+ for (unsigned PBitIdx = 0; PBitIdx != NumBitsInByte; ++PBitIdx) {
+ Parity = Parity ^ ((Product >> PBitIdx) & 0x1);
+ }
+
+ uint8_t Temp = Imm[BitIdx] ? 1 : 0;
+ RetByte |= (Temp ^ Parity) << BitIdx;
+ }
+ return RetByte;
+}
+
+uint8_t GFNIMul(uint8_t AByte, uint8_t BByte) {
+ // Multiplying two polynomials of degree 7
+ // Polynomial of degree 7
+ // x^7 + x^6 + x^5 + x^4 + x^3 + x^2 + x + 1
+ uint16_t TWord = 0;
+ unsigned NumBitsInByte = 8;
+ for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
+ if ((BByte >> BitIdx) & 0x1) {
+ TWord = TWord ^ (AByte << BitIdx);
+ }
+ }
+
+ // When multiplying two polynomials of degree 7
+ // results in a polynomial of degree 14
+ // so the result has to be reduced to 7
+ // Reduction polynomial is x^8 + x^4 + x^3 + x + 1 i.e. 0x11B
+ for (int32_t BitIdx = 14; BitIdx > 7; --BitIdx) {
+ if ((TWord >> BitIdx) & 0x1) {
+ TWord = TWord ^ (0x11B << (BitIdx - 8));
+ }
+ }
+ return (TWord & 0xFF);
+}
+
void HandleComplexComplexMul(APFloat A, APFloat B, APFloat C, APFloat D,
APFloat &ResR, APFloat &ResI) {
// This is an implementation of complex multiplication according to the
@@ -17717,6 +19857,46 @@ void HandleComplexComplexDiv(APFloat A, APFloat B, APFloat C, APFloat D,
}
}
+APSInt NormalizeRotateAmount(const APSInt &Value, const APSInt &Amount) {
+ // Normalize shift amount to [0, BitWidth) range to match runtime behavior
+ APSInt NormAmt = Amount;
+ unsigned BitWidth = Value.getBitWidth();
+ unsigned AmtBitWidth = NormAmt.getBitWidth();
+ if (BitWidth == 1) {
+ // Rotating a 1-bit value is always a no-op
+ NormAmt = APSInt(APInt(AmtBitWidth, 0), NormAmt.isUnsigned());
+ } else if (BitWidth == 2) {
+ // For 2-bit values: rotation amount is 0 or 1 based on
+ // whether the amount is even or odd. We can't use srem here because
+ // the divisor (2) would be misinterpreted as -2 in 2-bit signed arithmetic.
+ NormAmt =
+ APSInt(APInt(AmtBitWidth, NormAmt[0] ? 1 : 0), NormAmt.isUnsigned());
+ } else {
+ APInt Divisor;
+ if (AmtBitWidth > BitWidth) {
+ Divisor = llvm::APInt(AmtBitWidth, BitWidth);
+ } else {
+ Divisor = llvm::APInt(BitWidth, BitWidth);
+ if (AmtBitWidth < BitWidth) {
+ NormAmt = NormAmt.extend(BitWidth);
+ }
+ }
+
+ // Normalize to [0, BitWidth)
+ if (NormAmt.isSigned()) {
+ NormAmt = APSInt(NormAmt.srem(Divisor), /*isUnsigned=*/false);
+ if (NormAmt.isNegative()) {
+ APSInt SignedDivisor(Divisor, /*isUnsigned=*/false);
+ NormAmt += SignedDivisor;
+ }
+ } else {
+ NormAmt = APSInt(NormAmt.urem(Divisor), /*isUnsigned=*/true);
+ }
+ }
+
+ return NormAmt;
+}
+
bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
@@ -18808,6 +20988,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::ImaginaryLiteralClass:
case Expr::StringLiteralClass:
case Expr::ArraySubscriptExprClass:
+ case Expr::MatrixSingleSubscriptExprClass:
case Expr::MatrixSubscriptExprClass:
case Expr::ArraySectionExprClass:
case Expr::OMPArrayShapingExprClass:
@@ -18816,6 +20997,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::CompoundAssignOperatorClass:
case Expr::CompoundLiteralExprClass:
case Expr::ExtVectorElementExprClass:
+ case Expr::MatrixElementExprClass:
case Expr::DesignatedInitExprClass:
case Expr::ArrayInitLoopExprClass:
case Expr::ArrayInitIndexExprClass:
@@ -18933,6 +21115,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::CXXNoexceptExprClass:
+ case Expr::CXXReflectExprClass:
return NoDiag();
case Expr::CallExprClass:
case Expr::CXXOperatorCallExprClass: {
@@ -19475,25 +21658,28 @@ bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
return Diags.empty();
}
-bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx,
- unsigned Type) const {
+std::optional<uint64_t> Expr::tryEvaluateObjectSize(const ASTContext &Ctx,
+ unsigned Type) const {
if (!getType()->isPointerType())
- return false;
+ return std::nullopt;
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold);
- return tryEvaluateBuiltinObjectSize(this, Type, Info, Result);
+ if (Info.EnableNewConstInterp)
+ return Info.Ctx.getInterpContext().tryEvaluateObjectSize(Info, this, Type);
+ return tryEvaluateBuiltinObjectSize(this, Type, Info);
}
-static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
- EvalInfo &Info, std::string *StringResult) {
+static std::optional<uint64_t>
+EvaluateBuiltinStrLen(const Expr *E, EvalInfo &Info,
+ std::string *StringResult) {
if (!E->getType()->hasPointerRepresentation() || !E->isPRValue())
- return false;
+ return std::nullopt;
LValue String;
if (!EvaluatePointer(E, String, Info))
- return false;
+ return std::nullopt;
QualType CharTy = E->getType()->getPointeeType();
@@ -19512,10 +21698,9 @@ static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
if (Pos != StringRef::npos)
Str = Str.substr(0, Pos);
- Result = Str.size();
if (StringResult)
*StringResult = Str;
- return true;
+ return Str.size();
}
// Fall through to slow path.
@@ -19526,21 +21711,19 @@ static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
APValue Char;
if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) ||
!Char.isInt())
- return false;
- if (!Char.getInt()) {
- Result = Strlen;
- return true;
- } else if (StringResult)
+ return std::nullopt;
+ if (!Char.getInt())
+ return Strlen;
+ else if (StringResult)
StringResult->push_back(Char.getInt().getExtValue());
if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1))
- return false;
+ return std::nullopt;
}
}
std::optional<std::string> Expr::tryEvaluateString(ASTContext &Ctx) const {
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold);
- uint64_t Result;
std::string StringResult;
if (Info.EnableNewConstInterp) {
@@ -19549,7 +21732,7 @@ std::optional<std::string> Expr::tryEvaluateString(ASTContext &Ctx) const {
return StringResult;
}
- if (EvaluateBuiltinStrLen(this, Result, Info, &StringResult))
+ if (EvaluateBuiltinStrLen(this, Info, &StringResult))
return StringResult;
return std::nullopt;
}
@@ -19626,14 +21809,13 @@ bool Expr::EvaluateCharRangeAsString(APValue &Result,
PtrExpression, Ctx, Status);
}
-bool Expr::tryEvaluateStrLen(uint64_t &Result, ASTContext &Ctx) const {
+std::optional<uint64_t> Expr::tryEvaluateStrLen(const ASTContext &Ctx) const {
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold);
if (Info.EnableNewConstInterp)
- return Info.Ctx.getInterpContext().evaluateStrlen(Info, this, Result);
-
- return EvaluateBuiltinStrLen(this, Result, Info);
+ return Info.Ctx.getInterpContext().evaluateStrlen(Info, this);
+ return EvaluateBuiltinStrLen(this, Info);
}
namespace {
diff --git a/clang/lib/AST/ExprObjC.cpp b/clang/lib/AST/ExprObjC.cpp
index 83419a1..3509182 100644
--- a/clang/lib/AST/ExprObjC.cpp
+++ b/clang/lib/AST/ExprObjC.cpp
@@ -330,8 +330,7 @@ Stmt::child_range ObjCMessageExpr::children() {
}
Stmt::const_child_range ObjCMessageExpr::children() const {
- auto Children = const_cast<ObjCMessageExpr *>(this)->children();
- return const_child_range(Children.begin(), Children.end());
+ return const_cast<ObjCMessageExpr *>(this)->children();
}
StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
diff --git a/clang/lib/AST/FormatString.cpp b/clang/lib/AST/FormatString.cpp
index d4cb89b..36c5f57 100644
--- a/clang/lib/AST/FormatString.cpp
+++ b/clang/lib/AST/FormatString.cpp
@@ -371,7 +371,7 @@ static clang::analyze_format_string::ArgType::MatchKind
matchesSizeTPtrdiffT(ASTContext &C, QualType T, QualType E) {
using MatchKind = clang::analyze_format_string::ArgType::MatchKind;
- if (!T->isIntegerType())
+ if (!T->isIntegerType() || T->isBooleanType())
return MatchKind::NoMatch;
if (C.hasSameType(T, E))
diff --git a/clang/lib/AST/InferAlloc.cpp b/clang/lib/AST/InferAlloc.cpp
new file mode 100644
index 0000000..e439ed4
--- /dev/null
+++ b/clang/lib/AST/InferAlloc.cpp
@@ -0,0 +1,201 @@
+//===--- InferAlloc.cpp - Allocation type inference -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements allocation-related type inference.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/InferAlloc.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/SmallPtrSet.h"
+
+using namespace clang;
+using namespace infer_alloc;
+
+static bool
+typeContainsPointer(QualType T,
+ llvm::SmallPtrSet<const RecordDecl *, 4> &VisitedRD,
+ bool &IncompleteType) {
+ QualType CanonicalType = T.getCanonicalType();
+ if (CanonicalType->isPointerType())
+ return true; // base case
+
+ // Look through typedef chain to check for special types.
+ for (QualType CurrentT = T; const auto *TT = CurrentT->getAs<TypedefType>();
+ CurrentT = TT->getDecl()->getUnderlyingType()) {
+ const IdentifierInfo *II = TT->getDecl()->getIdentifier();
+ // Special Case: Syntactically uintptr_t is not a pointer; semantically,
+ // however, very likely used as such. Therefore, classify uintptr_t as a
+ // pointer, too.
+ if (II && II->isStr("uintptr_t"))
+ return true;
+ }
+
+ // The type is an array; check the element type.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(CanonicalType))
+ return typeContainsPointer(AT->getElementType(), VisitedRD, IncompleteType);
+ // The type is a struct, class, or union.
+ if (const RecordDecl *RD = CanonicalType->getAsRecordDecl()) {
+ if (!RD->isCompleteDefinition()) {
+ IncompleteType = true;
+ return false;
+ }
+ if (!VisitedRD.insert(RD).second)
+ return false; // already visited
+ // Check all fields.
+ for (const FieldDecl *Field : RD->fields()) {
+ if (typeContainsPointer(Field->getType(), VisitedRD, IncompleteType))
+ return true;
+ }
+ // For C++ classes, also check base classes.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Polymorphic types require a vptr.
+ if (CXXRD->isDynamicClass())
+ return true;
+ for (const CXXBaseSpecifier &Base : CXXRD->bases()) {
+ if (typeContainsPointer(Base.getType(), VisitedRD, IncompleteType))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/// Infer type from a simple sizeof expression.
+static QualType inferTypeFromSizeofExpr(const Expr *E) {
+ const Expr *Arg = E->IgnoreParenImpCasts();
+ if (const auto *UET = dyn_cast<UnaryExprOrTypeTraitExpr>(Arg)) {
+ if (UET->getKind() == UETT_SizeOf) {
+ if (UET->isArgumentType())
+ return UET->getArgumentTypeInfo()->getType();
+ else
+ return UET->getArgumentExpr()->getType();
+ }
+ }
+ return QualType();
+}
+
+/// Infer type from an arithmetic expression involving a sizeof. For example:
+///
+/// malloc(sizeof(MyType) + padding); // infers 'MyType'
+/// malloc(sizeof(MyType) * 32); // infers 'MyType'
+/// malloc(32 * sizeof(MyType)); // infers 'MyType'
+/// malloc(sizeof(MyType) << 1); // infers 'MyType'
+/// ...
+///
+/// More complex arithmetic expressions are supported, but are a heuristic, e.g.
+/// when considering allocations for structs with flexible array members:
+///
+/// malloc(sizeof(HasFlexArray) + sizeof(int) * 32); // infers 'HasFlexArray'
+///
+static QualType inferPossibleTypeFromArithSizeofExpr(const Expr *E) {
+ const Expr *Arg = E->IgnoreParenImpCasts();
+ // The argument is a lone sizeof expression.
+ if (QualType T = inferTypeFromSizeofExpr(Arg); !T.isNull())
+ return T;
+ if (const auto *BO = dyn_cast<BinaryOperator>(Arg)) {
+ // Argument is an arithmetic expression. Cover common arithmetic patterns
+ // involving sizeof.
+ switch (BO->getOpcode()) {
+ case BO_Add:
+ case BO_Div:
+ case BO_Mul:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_Sub:
+ if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getLHS());
+ !T.isNull())
+ return T;
+ if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getRHS());
+ !T.isNull())
+ return T;
+ break;
+ default:
+ break;
+ }
+ }
+ return QualType();
+}
+
+/// If the expression E is a reference to a variable, infer the type from a
+/// variable's initializer if it contains a sizeof. Beware, this is a heuristic
+/// and ignores if a variable is later reassigned. For example:
+///
+/// size_t my_size = sizeof(MyType);
+/// void *x = malloc(my_size); // infers 'MyType'
+///
+static QualType inferPossibleTypeFromVarInitSizeofExpr(const Expr *E) {
+ const Expr *Arg = E->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (const Expr *Init = VD->getInit())
+ return inferPossibleTypeFromArithSizeofExpr(Init);
+ }
+ }
+ return QualType();
+}
+
+/// Deduces the allocated type by checking if the allocation call's result
+/// is immediately used in a cast expression. For example:
+///
+/// MyType *x = (MyType *)malloc(4096); // infers 'MyType'
+///
+static QualType inferPossibleTypeFromCastExpr(const CallExpr *CallE,
+ const CastExpr *CastE) {
+ if (!CastE)
+ return QualType();
+ QualType PtrType = CastE->getType();
+ if (PtrType->isPointerType())
+ return PtrType->getPointeeType();
+ return QualType();
+}
+
+QualType infer_alloc::inferPossibleType(const CallExpr *E,
+ const ASTContext &Ctx,
+ const CastExpr *CastE) {
+ QualType AllocType;
+ // First check arguments.
+ for (const Expr *Arg : E->arguments()) {
+ AllocType = inferPossibleTypeFromArithSizeofExpr(Arg);
+ if (AllocType.isNull())
+ AllocType = inferPossibleTypeFromVarInitSizeofExpr(Arg);
+ if (!AllocType.isNull())
+ break;
+ }
+ // Then check later casts.
+ if (AllocType.isNull())
+ AllocType = inferPossibleTypeFromCastExpr(E, CastE);
+ return AllocType;
+}
+
+std::optional<llvm::AllocTokenMetadata>
+infer_alloc::getAllocTokenMetadata(QualType T, const ASTContext &Ctx) {
+ llvm::AllocTokenMetadata ATMD;
+
+ // Get unique type name.
+ PrintingPolicy Policy(Ctx.getLangOpts());
+ Policy.SuppressTagKeyword = true;
+ Policy.FullyQualifiedName = true;
+ llvm::raw_svector_ostream TypeNameOS(ATMD.TypeName);
+ T.getCanonicalType().print(TypeNameOS, Policy);
+
+ // Check if QualType contains a pointer. Implements a simple DFS to
+ // recursively check if a type contains a pointer type.
+ llvm::SmallPtrSet<const RecordDecl *, 4> VisitedRD;
+ bool IncompleteType = false;
+ ATMD.ContainsPointer = typeContainsPointer(T, VisitedRD, IncompleteType);
+ if (!ATMD.ContainsPointer && IncompleteType)
+ return std::nullopt;
+
+ return ATMD;
+}
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index 5572e0a..70acc8a 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -4190,9 +4190,11 @@ void CXXNameMangler::mangleRISCVFixedRVVVectorType(const VectorType *T) {
TypeNameOS << "uint32";
break;
case BuiltinType::Long:
+ case BuiltinType::LongLong:
TypeNameOS << "int64";
break;
case BuiltinType::ULong:
+ case BuiltinType::ULongLong:
TypeNameOS << "uint64";
break;
case BuiltinType::Float16:
@@ -4204,6 +4206,9 @@ void CXXNameMangler::mangleRISCVFixedRVVVectorType(const VectorType *T) {
case BuiltinType::Double:
TypeNameOS << "float64";
break;
+ case BuiltinType::BFloat16:
+ TypeNameOS << "bfloat16";
+ break;
default:
llvm_unreachable("unexpected element type for fixed-length RVV vector!");
}
@@ -4946,11 +4951,18 @@ recurse:
E = cast<ConstantExpr>(E)->getSubExpr();
goto recurse;
+ case Expr::CXXReflectExprClass: {
+ // TODO(Reflection): implement this after introducing std::meta::info
+ assert(false && "unimplemented");
+ break;
+ }
+
// FIXME: invent manglings for all these.
case Expr::BlockExprClass:
case Expr::ChooseExprClass:
case Expr::CompoundLiteralExprClass:
case Expr::ExtVectorElementExprClass:
+ case Expr::MatrixElementExprClass:
case Expr::GenericSelectionExprClass:
case Expr::ObjCEncodeExprClass:
case Expr::ObjCIsaExprClass:
@@ -5482,6 +5494,15 @@ recurse:
break;
}
+ case Expr::MatrixSingleSubscriptExprClass: {
+ NotPrimaryExpr();
+ const MatrixSingleSubscriptExpr *ME = cast<MatrixSingleSubscriptExpr>(E);
+ Out << "ix";
+ mangleExpression(ME->getBase());
+ mangleExpression(ME->getRowIdx());
+ break;
+ }
+
case Expr::MatrixSubscriptExprClass: {
NotPrimaryExpr();
const MatrixSubscriptExpr *ME = cast<MatrixSubscriptExpr>(E);
@@ -6040,6 +6061,8 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
case Dtor_Comdat:
Out << "D5";
break;
+ case Dtor_VectorDeleting:
+ llvm_unreachable("Itanium ABI does not use vector deleting dtors");
}
}
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index 9f4dba9..3138f95 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -272,15 +272,13 @@ void JSONNodeDumper::writeIncludeStack(PresumedLoc Loc, bool JustFirst) {
JOS.attributeEnd();
}
-void JSONNodeDumper::writeBareSourceLocation(SourceLocation Loc,
- bool IsSpelling) {
+void JSONNodeDumper::writeBareSourceLocation(SourceLocation Loc) {
PresumedLoc Presumed = SM.getPresumedLoc(Loc);
- unsigned ActualLine = IsSpelling ? SM.getSpellingLineNumber(Loc)
- : SM.getExpansionLineNumber(Loc);
- StringRef ActualFile = SM.getBufferName(Loc);
-
if (Presumed.isValid()) {
- JOS.attribute("offset", SM.getDecomposedLoc(Loc).second);
+ StringRef ActualFile = SM.getBufferName(Loc);
+ auto [FID, FilePos] = SM.getDecomposedLoc(Loc);
+ unsigned ActualLine = SM.getLineNumber(FID, FilePos);
+ JOS.attribute("offset", FilePos);
if (LastLocFilename != ActualFile) {
JOS.attribute("file", ActualFile);
JOS.attribute("line", ActualLine);
@@ -318,18 +316,17 @@ void JSONNodeDumper::writeSourceLocation(SourceLocation Loc) {
if (Expansion != Spelling) {
// If the expansion and the spelling are different, output subobjects
// describing both locations.
- JOS.attributeObject("spellingLoc", [Spelling, this] {
- writeBareSourceLocation(Spelling, /*IsSpelling*/ true);
- });
+ JOS.attributeObject(
+ "spellingLoc", [Spelling, this] { writeBareSourceLocation(Spelling); });
JOS.attributeObject("expansionLoc", [Expansion, Loc, this] {
- writeBareSourceLocation(Expansion, /*IsSpelling*/ false);
+ writeBareSourceLocation(Expansion);
// If there is a macro expansion, add extra information if the interesting
// bit is the macro arg expansion.
if (SM.isMacroArgExpansion(Loc))
JOS.attribute("isMacroArgExpansion", true);
});
} else
- writeBareSourceLocation(Spelling, /*IsSpelling*/ true);
+ writeBareSourceLocation(Spelling);
}
void JSONNodeDumper::writeSourceRange(SourceRange R) {
@@ -599,6 +596,27 @@ void JSONNodeDumper::VisitTLSModelAttr(const TLSModelAttr *TA) {
JOS.attribute("tls_model", TA->getModel());
}
+void JSONNodeDumper::VisitAvailabilityAttr(const AvailabilityAttr *AA) {
+ if (const IdentifierInfo *Platform = AA->getPlatform())
+ JOS.attribute("platform", Platform->getName());
+ if (!AA->getIntroduced().empty())
+ JOS.attribute("introduced", AA->getIntroduced().getAsString());
+ if (!AA->getDeprecated().empty())
+ JOS.attribute("deprecated", AA->getDeprecated().getAsString());
+ if (!AA->getObsoleted().empty())
+ JOS.attribute("obsoleted", AA->getObsoleted().getAsString());
+ attributeOnlyIfTrue("unavailable", AA->getUnavailable());
+ if (!AA->getMessage().empty())
+ JOS.attribute("message", AA->getMessage());
+ attributeOnlyIfTrue("strict", AA->getStrict());
+ if (!AA->getReplacement().empty())
+ JOS.attribute("replacement", AA->getReplacement());
+ if (AA->getPriority() != 0)
+ JOS.attribute("priority", AA->getPriority());
+ if (const IdentifierInfo *Env = AA->getEnvironment())
+ JOS.attribute("environment", Env->getName());
+}
+
void JSONNodeDumper::VisitTypedefType(const TypedefType *TT) {
JOS.attribute("decl", createBareDeclRef(TT->getDecl()));
if (!TT->typeMatchesDecl())
@@ -1610,6 +1628,10 @@ void JSONNodeDumper::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) {
attributeOnlyIfTrue("hasRewrittenInit", Node->hasRewrittenInit());
}
+void JSONNodeDumper::VisitLambdaExpr(const LambdaExpr *LE) {
+ JOS.attribute("hasExplicitParameters", LE->hasExplicitParameters());
+}
+
void JSONNodeDumper::VisitCXXDependentScopeMemberExpr(
const CXXDependentScopeMemberExpr *DSME) {
JOS.attribute("isArrow", DSME->isArrow());
diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp
index f1baf9f..551aa7b 100644
--- a/clang/lib/AST/MicrosoftMangle.cpp
+++ b/clang/lib/AST/MicrosoftMangle.cpp
@@ -1492,8 +1492,9 @@ void MicrosoftCXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
// <operator-name> ::= ?_G # scalar deleting destructor
case Dtor_Deleting: Out << "?_G"; return;
// <operator-name> ::= ?_E # vector deleting destructor
- // FIXME: Add a vector deleting dtor type. It goes in the vtable, so we need
- // it.
+ case Dtor_VectorDeleting:
+ Out << "?_E";
+ return;
case Dtor_Comdat:
llvm_unreachable("not expecting a COMDAT");
case Dtor_Unified:
@@ -2913,9 +2914,12 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
// ::= @ # structors (they have no declared return type)
if (IsStructor) {
if (isa<CXXDestructorDecl>(D) && isStructorDecl(D)) {
- // The scalar deleting destructor takes an extra int argument which is not
- // reflected in the AST.
- if (StructorType == Dtor_Deleting) {
+ // The deleting destructors take an extra argument of type int that
+ // indicates whether the storage for the object should be deleted and
+ // whether a single object or an array of objects is being destroyed. This
+ // extra argument is not reflected in the AST.
+ if (StructorType == Dtor_Deleting ||
+ StructorType == Dtor_VectorDeleting) {
Out << (PointersAre64Bit ? "PEAXI@Z" : "PAXI@Z");
return;
}
@@ -3911,10 +3915,10 @@ void MicrosoftMangleContextImpl::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
const ThunkInfo &Thunk,
bool /*ElideOverrideInfo*/,
raw_ostream &Out) {
- // FIXME: Actually, the dtor thunk should be emitted for vector deleting
- // dtors rather than scalar deleting dtors. Just use the vector deleting dtor
- // mangling manually until we support both deleting dtor types.
- assert(Type == Dtor_Deleting);
+ // The dtor thunk should use vector deleting dtor mangling, however as an
+ // optimization we may end up emitting only scalar deleting dtor body, so just
+ // use the vector deleting dtor mangling manually.
+ assert(Type == Dtor_Deleting || Type == Dtor_VectorDeleting);
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO, DD, Type);
Mangler.getStream() << "??_E";
diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp
index 791df7e..9e7a8a4 100644
--- a/clang/lib/AST/OpenMPClause.cpp
+++ b/clang/lib/AST/OpenMPClause.cpp
@@ -105,6 +105,8 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
return static_cast<const OMPFilterClause *>(C);
case OMPC_ompx_dyn_cgroup_mem:
return static_cast<const OMPXDynCGroupMemClause *>(C);
+ case OMPC_dyn_groupprivate:
+ return static_cast<const OMPDynGroupprivateClause *>(C);
case OMPC_message:
return static_cast<const OMPMessageClause *>(C);
case OMPC_default:
@@ -124,6 +126,8 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_nowait:
case OMPC_untied:
case OMPC_mergeable:
+ case OMPC_threadset:
+ case OMPC_transparent:
case OMPC_threadprivate:
case OMPC_groupprivate:
case OMPC_flush:
@@ -1318,7 +1322,7 @@ OMPToClause *OMPToClause::Create(
const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs,
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ Expr *IteratorModifier, ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId) {
OMPMappableExprListSizeTy Sizes;
@@ -1340,7 +1344,7 @@ OMPToClause *OMPToClause::Create(
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ 2 * Sizes.NumVars + 1, Sizes.NumUniqueDeclarations,
Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
Sizes.NumComponents));
@@ -1350,6 +1354,7 @@ OMPToClause *OMPToClause::Create(
Clause->setVarRefs(Vars);
Clause->setUDMapperRefs(UDMapperRefs);
Clause->setClauseInfo(Declarations, ComponentLists);
+ Clause->setIteratorModifier(IteratorModifier);
return Clause;
}
@@ -1358,17 +1363,19 @@ OMPToClause *OMPToClause::CreateEmpty(const ASTContext &C,
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ 2 * Sizes.NumVars + 1, Sizes.NumUniqueDeclarations,
Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
Sizes.NumComponents));
- return new (Mem) OMPToClause(Sizes);
+ OMPToClause *Clause = new (Mem) OMPToClause(Sizes);
+ Clause->setIteratorModifier(nullptr);
+ return Clause;
}
OMPFromClause *OMPFromClause::Create(
const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs,
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
+ Expr *IteratorModifier, ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId) {
OMPMappableExprListSizeTy Sizes;
@@ -1390,7 +1397,7 @@ OMPFromClause *OMPFromClause::Create(
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ 2 * Sizes.NumVars + 1, Sizes.NumUniqueDeclarations,
Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
Sizes.NumComponents));
@@ -1401,6 +1408,7 @@ OMPFromClause *OMPFromClause::Create(
Clause->setVarRefs(Vars);
Clause->setUDMapperRefs(UDMapperRefs);
Clause->setClauseInfo(Declarations, ComponentLists);
+ Clause->setIteratorModifier(IteratorModifier);
return Clause;
}
@@ -1410,10 +1418,12 @@ OMPFromClause::CreateEmpty(const ASTContext &C,
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ 2 * Sizes.NumVars + 1, Sizes.NumUniqueDeclarations,
Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
Sizes.NumComponents));
- return new (Mem) OMPFromClause(Sizes);
+ OMPFromClause *Clause = new (Mem) OMPFromClause(Sizes);
+ Clause->setIteratorModifier(nullptr);
+ return Clause;
}
void OMPUseDevicePtrClause::setPrivateCopies(ArrayRef<Expr *> VL) {
@@ -1432,7 +1442,9 @@ OMPUseDevicePtrClause *OMPUseDevicePtrClause::Create(
const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits,
ArrayRef<ValueDecl *> Declarations,
- MappableExprComponentListsRef ComponentLists) {
+ MappableExprComponentListsRef ComponentLists,
+ OpenMPUseDevicePtrFallbackModifier FallbackModifier,
+ SourceLocation FallbackModifierLoc) {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Vars.size();
Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
@@ -1456,7 +1468,8 @@ OMPUseDevicePtrClause *OMPUseDevicePtrClause::Create(
Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
Sizes.NumComponents));
- OMPUseDevicePtrClause *Clause = new (Mem) OMPUseDevicePtrClause(Locs, Sizes);
+ OMPUseDevicePtrClause *Clause = new (Mem)
+ OMPUseDevicePtrClause(Locs, Sizes, FallbackModifier, FallbackModifierLoc);
Clause->setVarRefs(Vars);
Clause->setPrivateCopies(PrivateVars);
@@ -2035,6 +2048,22 @@ void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPThreadsetClause(OMPThreadsetClause *Node) {
+ OS << "threadset("
+ << getOpenMPSimpleClauseTypeName(OMPC_threadset,
+ unsigned(Node->getThreadsetKind()))
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPTransparentClause(OMPTransparentClause *Node) {
+ OS << "transparent(";
+ if (Node->getImpexType())
+ Node->getImpexType()->printPretty(OS, nullptr, Policy, 0);
+ else
+ OS << "omp_impex";
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPProcBindClause(OMPProcBindClause *Node) {
OS << "proc_bind("
<< getOpenMPSimpleClauseTypeName(OMPC_proc_bind,
@@ -2684,12 +2713,16 @@ template <typename T> void OMPClausePrinter::VisitOMPMotionClause(T *Node) {
OS << '(';
for (unsigned I = 0; I < NumberOfOMPMotionModifiers; ++I) {
if (Node->getMotionModifier(I) != OMPC_MOTION_MODIFIER_unknown) {
- OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
- Node->getMotionModifier(I));
- if (Node->getMotionModifier(I) == OMPC_MOTION_MODIFIER_mapper)
- PrintMapper(OS, Node, Policy);
- if (I < ModifierCount - 1)
- OS << ", ";
+ if (Node->getMotionModifier(I) == OMPC_MOTION_MODIFIER_iterator) {
+ PrintIterator(OS, Node, Policy);
+ } else {
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
+ Node->getMotionModifier(I));
+ if (Node->getMotionModifier(I) == OMPC_MOTION_MODIFIER_mapper)
+ PrintMapper(OS, Node, Policy);
+ if (I < ModifierCount - 1)
+ OS << ", ";
+ }
}
}
OS << ':';
@@ -2733,7 +2766,15 @@ void OMPClausePrinter::VisitOMPDefaultmapClause(OMPDefaultmapClause *Node) {
void OMPClausePrinter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *Node) {
if (!Node->varlist_empty()) {
OS << "use_device_ptr";
- VisitOMPClauseList(Node, '(');
+ if (Node->getFallbackModifier() != OMPC_USE_DEVICE_PTR_FALLBACK_unknown) {
+ OS << "("
+ << getOpenMPSimpleClauseTypeName(OMPC_use_device_ptr,
+ Node->getFallbackModifier())
+ << ":";
+ VisitOMPClauseList(Node, ' ');
+ } else {
+ VisitOMPClauseList(Node, '(');
+ }
OS << ")";
}
}
@@ -2849,6 +2890,24 @@ void OMPClausePrinter::VisitOMPXDynCGroupMemClause(
OS << ")";
}
+void OMPClausePrinter::VisitOMPDynGroupprivateClause(
+ OMPDynGroupprivateClause *Node) {
+ OS << "dyn_groupprivate(";
+ if (Node->getDynGroupprivateModifier() != OMPC_DYN_GROUPPRIVATE_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_dyn_groupprivate,
+ Node->getDynGroupprivateModifier());
+ if (Node->getDynGroupprivateFallbackModifier() !=
+ OMPC_DYN_GROUPPRIVATE_FALLBACK_unknown) {
+ OS << ", ";
+ OS << getOpenMPSimpleClauseTypeName(
+ OMPC_dyn_groupprivate, Node->getDynGroupprivateFallbackModifier());
+ }
+ OS << ": ";
+ }
+ Node->getSize()->printPretty(OS, nullptr, Policy, 0);
+ OS << ')';
+}
+
void OMPClausePrinter::VisitOMPDoacrossClause(OMPDoacrossClause *Node) {
OS << "doacross(";
OpenMPDoacrossClauseModifier DepType = Node->getDependenceType();
diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp
index ac18d4d..36b8eea 100644
--- a/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -2794,6 +2794,13 @@ void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) {
UseExternalLayout = Source->layoutRecordType(
RD, External.Size, External.Align, External.FieldOffsets,
External.BaseOffsets, External.VirtualBaseOffsets);
+
+ if (!RD->isMsStruct(Context)) {
+ auto Location = RD->getLocation();
+ if (Location.isValid())
+ Context.getDiagnostics().Report(Location,
+ diag::err_itanium_layout_unimplemented);
+ }
}
void
@@ -3358,21 +3365,25 @@ void MicrosoftRecordLayoutBuilder::computeVtorDispSet(
}
}
+bool ASTContext::defaultsToMsStruct() const {
+ return getTargetInfo().hasMicrosoftRecordLayout() ||
+ getTargetInfo().getTriple().isWindowsGNUEnvironment();
+}
+
/// getASTRecordLayout - Get or compute information about the layout of the
/// specified record (struct/union/class), which indicates its size and field
/// position information.
const ASTRecordLayout &
ASTContext::getASTRecordLayout(const RecordDecl *D) const {
- // These asserts test different things. A record has a definition
- // as soon as we begin to parse the definition. That definition is
- // not a complete definition (which is what isDefinition() tests)
- // until we *finish* parsing the definition.
-
if (D->hasExternalLexicalStorage() && !D->getDefinition())
getExternalSource()->CompleteType(const_cast<RecordDecl*>(D));
// Complete the redecl chain (if necessary).
(void)D->getMostRecentDecl();
+ // These asserts test different things. A record has a definition
+ // as soon as we begin to parse the definition. That definition is
+ // not a complete definition (which is what isCompleteDefinition() tests)
+ // until we *finish* parsing the definition.
D = D->getDefinition();
assert(D && "Cannot get layout of forward declarations!");
assert(!D->isInvalidDecl() && "Cannot get layout of invalid decl!");
diff --git a/clang/lib/AST/Stmt.cpp b/clang/lib/AST/Stmt.cpp
index 9ae8aea..5b745dd 100644
--- a/clang/lib/AST/Stmt.cpp
+++ b/clang/lib/AST/Stmt.cpp
@@ -252,7 +252,7 @@ namespace {
template <class T> good implements_children(children_t T::*) {
return good();
}
- LLVM_ATTRIBUTE_UNUSED
+ [[maybe_unused]]
static bad implements_children(children_t Stmt::*) {
return bad();
}
@@ -261,15 +261,19 @@ namespace {
template <class T> good implements_getBeginLoc(getBeginLoc_t T::*) {
return good();
}
- LLVM_ATTRIBUTE_UNUSED
- static bad implements_getBeginLoc(getBeginLoc_t Stmt::*) { return bad(); }
+ [[maybe_unused]]
+ static bad implements_getBeginLoc(getBeginLoc_t Stmt::*) {
+ return bad();
+ }
typedef SourceLocation getLocEnd_t() const;
template <class T> good implements_getEndLoc(getLocEnd_t T::*) {
return good();
}
- LLVM_ATTRIBUTE_UNUSED
- static bad implements_getEndLoc(getLocEnd_t Stmt::*) { return bad(); }
+ [[maybe_unused]]
+ static bad implements_getEndLoc(getLocEnd_t Stmt::*) {
+ return bad();
+ }
#define ASSERT_IMPLEMENTS_children(type) \
(void) is_good(implements_children(&type::children))
@@ -282,7 +286,7 @@ namespace {
/// Check whether the various Stmt classes implement their member
/// functions.
-LLVM_ATTRIBUTE_UNUSED
+[[maybe_unused]]
static inline void check_implementations() {
#define ABSTRACT_STMT(type)
#define STMT(type, base) \
@@ -446,6 +450,39 @@ AttributedStmt *AttributedStmt::CreateEmpty(const ASTContext &C,
return new (Mem) AttributedStmt(EmptyShell(), NumAttrs);
}
+std::string
+AsmStmt::addVariableConstraints(StringRef Constraint, const Expr &AsmExpr,
+ const TargetInfo &Target, bool EarlyClobber,
+ UnsupportedConstraintCallbackTy UnsupportedCB,
+ std::string *GCCReg) const {
+ const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
+ if (!AsmDeclRef)
+ return Constraint.str();
+ const ValueDecl &Value = *AsmDeclRef->getDecl();
+ const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
+ if (!Variable)
+ return Constraint.str();
+ if (Variable->getStorageClass() != SC_Register)
+ return Constraint.str();
+ AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return Constraint.str();
+ StringRef Register = Attr->getLabel();
+ assert(Target.isValidGCCRegisterName(Register));
+ // We're using validateOutputConstraint here because we only care if
+ // this is a register constraint.
+ TargetInfo::ConstraintInfo Info(Constraint, "");
+ if (Target.validateOutputConstraint(Info) && !Info.allowsRegister()) {
+ UnsupportedCB(this, "__asm__");
+ return Constraint.str();
+ }
+ // Canonicalize the register here before returning it.
+ Register = Target.getNormalizedGCCRegisterName(Register);
+ if (GCCReg != nullptr)
+ *GCCReg = Register.str();
+ return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
+}
+
std::string AsmStmt::generateAsmString(const ASTContext &C) const {
if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->generateAsmString(C);
@@ -1495,3 +1532,19 @@ const Stmt *LoopControlStmt::getNamedLoopOrSwitch() const {
return nullptr;
return getLabelDecl()->getStmt()->getInnermostLabeledStmt();
}
+
+DeferStmt::DeferStmt(EmptyShell Empty) : Stmt(DeferStmtClass, Empty) {}
+DeferStmt::DeferStmt(SourceLocation DeferLoc, Stmt *Body)
+ : Stmt(DeferStmtClass) {
+ setDeferLoc(DeferLoc);
+ setBody(Body);
+}
+
+DeferStmt *DeferStmt::CreateEmpty(ASTContext &Context, EmptyShell Empty) {
+ return new (Context) DeferStmt(Empty);
+}
+
+DeferStmt *DeferStmt::Create(ASTContext &Context, SourceLocation DeferLoc,
+ Stmt *Body) {
+ return new (Context) DeferStmt(DeferLoc, Body);
+}
diff --git a/clang/lib/AST/StmtOpenACC.cpp b/clang/lib/AST/StmtOpenACC.cpp
index 07e3de8..2191c6a 100644
--- a/clang/lib/AST/StmtOpenACC.cpp
+++ b/clang/lib/AST/StmtOpenACC.cpp
@@ -12,7 +12,9 @@
#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
+
using namespace clang;
OpenACCComputeConstruct *
@@ -322,6 +324,256 @@ OpenACCAtomicConstruct *OpenACCAtomicConstruct::Create(
return Inst;
}
+static std::optional<std::pair<const Expr *, const Expr *>>
+getBinaryAssignOpArgs(const Expr *Op, bool &IsCompoundAssign) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(Op)) {
+ if (!BO->isAssignmentOp())
+ return std::nullopt;
+ IsCompoundAssign = BO->isCompoundAssignmentOp();
+ return std::pair<const Expr *, const Expr *>(BO->getLHS(), BO->getRHS());
+ }
+
+ if (const auto *OO = dyn_cast<CXXOperatorCallExpr>(Op)) {
+ if (!OO->isAssignmentOp())
+ return std::nullopt;
+ IsCompoundAssign = OO->getOperator() != OO_Equal;
+ return std::pair<const Expr *, const Expr *>(OO->getArg(0), OO->getArg(1));
+ }
+ return std::nullopt;
+}
+static std::optional<std::pair<const Expr *, const Expr *>>
+getBinaryAssignOpArgs(const Expr *Op) {
+ bool IsCompoundAssign;
+ return getBinaryAssignOpArgs(Op, IsCompoundAssign);
+}
+
+static std::optional<std::pair<const Expr *, bool>>
+getUnaryOpArgs(const Expr *Op) {
+ if (const auto *UO = dyn_cast<UnaryOperator>(Op))
+ return {{UO->getSubExpr(), UO->isPostfix()}};
+
+ if (const auto *OpCall = dyn_cast<CXXOperatorCallExpr>(Op)) {
+ // Post-inc/dec have a second unused argument to differentiate it, so we
+ // accept -- or ++ as unary, or any operator call with only 1 arg.
+ if (OpCall->getNumArgs() == 1 || OpCall->getOperator() == OO_PlusPlus ||
+ OpCall->getOperator() == OO_MinusMinus)
+ return {{OpCall->getArg(0), /*IsPostfix=*/OpCall->getNumArgs() == 1}};
+ }
+
+ return std::nullopt;
+}
+
+// Read is of the form `v = x;`, where both sides are scalar L-values. This is a
+// BinaryOperator or CXXOperatorCallExpr.
+static std::optional<OpenACCAtomicConstruct::SingleStmtInfo>
+getReadStmtInfo(const Expr *E, bool ForAtomicComputeSingleStmt = false) {
+ std::optional<std::pair<const Expr *, const Expr *>> BinaryArgs =
+ getBinaryAssignOpArgs(E);
+
+ if (!BinaryArgs)
+ return std::nullopt;
+
+ // We want the L-value for each side, so we ignore implicit casts.
+ auto Res = OpenACCAtomicConstruct::SingleStmtInfo::createRead(
+ E, BinaryArgs->first->IgnoreImpCasts(),
+ BinaryArgs->second->IgnoreImpCasts());
+
+ // The atomic compute single-stmt variant has to do a 'fixup' step for the 'X'
+ // value, since it is dependent on the RHS. So if we're in that version, we
+ // skip the checks on X.
+ if ((!ForAtomicComputeSingleStmt &&
+ (!Res.X->isLValue() || !Res.X->getType()->isScalarType())) ||
+ !Res.V->isLValue() || !Res.V->getType()->isScalarType())
+ return std::nullopt;
+
+ return Res;
+}
+
+// Write supports only the format 'x = expr', where the expression is scalar
+// type, and 'x' is a scalar l value. As above, this can come in 2 forms;
+// Binary Operator or CXXOperatorCallExpr.
+static std::optional<OpenACCAtomicConstruct::SingleStmtInfo>
+getWriteStmtInfo(const Expr *E) {
+ std::optional<std::pair<const Expr *, const Expr *>> BinaryArgs =
+ getBinaryAssignOpArgs(E);
+ if (!BinaryArgs)
+ return std::nullopt;
+ // We want the L-value for ONLY the X side, so we ignore implicit casts. For
+ // the right side (the expr), we emit it as an r-value so we need to
+ // maintain implicit casts.
+ auto Res = OpenACCAtomicConstruct::SingleStmtInfo::createWrite(
+ E, BinaryArgs->first->IgnoreImpCasts(), BinaryArgs->second);
+
+ if (!Res.X->isLValue() || !Res.X->getType()->isScalarType())
+ return std::nullopt;
+ return Res;
+}
+
+static std::optional<OpenACCAtomicConstruct::SingleStmtInfo>
+getUpdateStmtInfo(const Expr *E) {
+ std::optional<std::pair<const Expr *, bool>> UnaryArgs = getUnaryOpArgs(E);
+ if (UnaryArgs) {
+ auto Res = OpenACCAtomicConstruct::SingleStmtInfo::createUpdate(
+ E, UnaryArgs->first->IgnoreImpCasts(), UnaryArgs->second);
+
+ if (!Res.X->isLValue() || !Res.X->getType()->isScalarType())
+ return std::nullopt;
+
+ return Res;
+ }
+
+ bool IsRHSCompoundAssign = false;
+ std::optional<std::pair<const Expr *, const Expr *>> BinaryArgs =
+ getBinaryAssignOpArgs(E, IsRHSCompoundAssign);
+ if (!BinaryArgs)
+ return std::nullopt;
+
+ auto Res = OpenACCAtomicConstruct::SingleStmtInfo::createUpdate(
+ E, BinaryArgs->first->IgnoreImpCasts(), /*PostFixIncDec=*/false);
+
+ if (!Res.X->isLValue() || !Res.X->getType()->isScalarType())
+ return std::nullopt;
+
+ // 'update' has to be either a compound-assignment operation, or
+ // assignment-to-a-binary-op. Return nullopt if these are not the case.
+ // If we are already compound-assign, we're done!
+ if (IsRHSCompoundAssign)
+ return Res;
+
+ // else we have to check that we have a binary operator.
+ const Expr *RHS = BinaryArgs->second->IgnoreImpCasts();
+
+ if (isa<BinaryOperator>(RHS)) {
+ return Res;
+ } else if (const auto *OO = dyn_cast<CXXOperatorCallExpr>(RHS)) {
+ if (OO->isInfixBinaryOp())
+ return Res;
+ }
+
+ return std::nullopt;
+}
+
+/// The statement associated with an atomic capture comes in 1 of two forms: A
+/// compound statement containing two statements, or a single statement. In
+/// either case, the compound/single statement is decomposed into 2 separate
+/// operations, eihter a read/write, read/update, or update/read. This function
+/// figures out that information in the form listed in the standard (filling in
+/// V, X, or Expr) for each of these operations.
+static OpenACCAtomicConstruct::StmtInfo
+getCaptureStmtInfo(const Stmt *AssocStmt) {
+
+ if (const auto *CmpdStmt = dyn_cast<CompoundStmt>(AssocStmt)) {
+ // We checked during Sema to ensure we only have 2 statements here, and
+ // that both are expressions, we can look at these to see what the valid
+ // options are.
+ const Expr *Stmt1 = cast<Expr>(*CmpdStmt->body().begin())->IgnoreImpCasts();
+ const Expr *Stmt2 =
+ cast<Expr>(*(CmpdStmt->body().begin() + 1))->IgnoreImpCasts();
+
+ // The compound statement form allows read/write, read/update, or
+ // update/read. First we get the information for a 'Read' to see if this is
+ // one of the former two.
+ std::optional<OpenACCAtomicConstruct::SingleStmtInfo> Read =
+ getReadStmtInfo(Stmt1);
+
+ if (Read) {
+ // READ : WRITE
+ // v = x; x = expr
+ // READ : UPDATE
+ // v = x; x binop = expr
+ // v = x; x = x binop expr
+ // v = x; x = expr binop x
+ // v = x; x++
+ // v = x; ++x
+ // v = x; x--
+ // v = x; --x
+ std::optional<OpenACCAtomicConstruct::SingleStmtInfo> Update =
+ getUpdateStmtInfo(Stmt2);
+ // Since we already know the first operation is a read, the second is
+ // either an update, which we check, or a write, which we can assume next.
+ if (Update)
+ return OpenACCAtomicConstruct::StmtInfo::createReadUpdate(*Read,
+ *Update);
+
+ std::optional<OpenACCAtomicConstruct::SingleStmtInfo> Write =
+ getWriteStmtInfo(Stmt2);
+ return OpenACCAtomicConstruct::StmtInfo::createReadWrite(*Read, *Write);
+ }
+ // UPDATE: READ
+ // x binop = expr; v = x
+ // x = x binop expr; v = x
+ // x = expr binop x ; v = x
+ // ++ x; v = x
+ // x++; v = x
+ // --x; v = x
+ // x--; v = x
+ // Otherwise, it is one of the above forms for update/read.
+ std::optional<OpenACCAtomicConstruct::SingleStmtInfo> Update =
+ getUpdateStmtInfo(Stmt1);
+ Read = getReadStmtInfo(Stmt2);
+
+ return OpenACCAtomicConstruct::StmtInfo::createUpdateRead(*Update, *Read);
+ }
+
+ // All of the forms that can be done in a single line fall into 2
+ // categories: update/read, or read/update. The special cases are the
+ // postfix unary operators, which we have to make sure we do the 'read'
+ // first. However, we still parse these as the RHS first, so we have a
+ // 'reversing' step. READ: UPDATE v = x++; v = x--; UPDATE: READ v = ++x; v
+ // = --x; v = x binop=expr v = x = x binop expr v = x = expr binop x
+
+ const Expr *E = cast<const Expr>(AssocStmt);
+
+ std::optional<OpenACCAtomicConstruct::SingleStmtInfo> Read =
+ getReadStmtInfo(E, /*ForAtomicComputeSingleStmt=*/true);
+ std::optional<OpenACCAtomicConstruct::SingleStmtInfo> Update =
+ getUpdateStmtInfo(Read->X);
+
+ // Fixup this, since the 'X' for the read is the result after write, but is
+ // the same value as the LHS-most variable of the update(its X).
+ Read->X = Update->X;
+
+ // Postfix is a read FIRST, then an update.
+ if (Update->IsPostfixIncDec)
+ return OpenACCAtomicConstruct::StmtInfo::createReadUpdate(*Read, *Update);
+
+ return OpenACCAtomicConstruct::StmtInfo::createUpdateRead(*Update, *Read);
+}
+
+const OpenACCAtomicConstruct::StmtInfo
+OpenACCAtomicConstruct::getAssociatedStmtInfo() const {
+ // This ends up being a vastly simplified version of SemaOpenACCAtomic, since
+ // it doesn't have to worry about erroring out, but we should do a lot of
+ // asserts to ensure we don't get off into the weeds.
+ assert(getAssociatedStmt() && "invalid associated stmt?");
+
+ switch (AtomicKind) {
+ case OpenACCAtomicKind::Read:
+ return OpenACCAtomicConstruct::StmtInfo{
+ OpenACCAtomicConstruct::StmtInfo::StmtForm::Read,
+ *getReadStmtInfo(cast<const Expr>(getAssociatedStmt())),
+ OpenACCAtomicConstruct::SingleStmtInfo::Empty()};
+
+ case OpenACCAtomicKind::Write:
+ return OpenACCAtomicConstruct::StmtInfo{
+ OpenACCAtomicConstruct::StmtInfo::StmtForm::Write,
+ *getWriteStmtInfo(cast<const Expr>(getAssociatedStmt())),
+ OpenACCAtomicConstruct::SingleStmtInfo::Empty()};
+
+ case OpenACCAtomicKind::None:
+ case OpenACCAtomicKind::Update:
+ return OpenACCAtomicConstruct::StmtInfo{
+ OpenACCAtomicConstruct::StmtInfo::StmtForm::Update,
+ *getUpdateStmtInfo(cast<const Expr>(getAssociatedStmt())),
+ OpenACCAtomicConstruct::SingleStmtInfo::Empty()};
+
+ case OpenACCAtomicKind::Capture:
+ return getCaptureStmtInfo(getAssociatedStmt());
+ }
+
+ llvm_unreachable("unknown OpenACC atomic kind");
+}
+
OpenACCCacheConstruct *OpenACCCacheConstruct::CreateEmpty(const ASTContext &C,
unsigned NumVars) {
void *Mem =
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index 586c300..f4ce4a7 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -151,11 +151,11 @@ namespace {
else StmtVisitor<StmtPrinter>::Visit(S);
}
- void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
+ [[maybe_unused]] void VisitStmt(Stmt *Node) {
Indent() << "<<unknown stmt type>>" << NL;
}
- void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED {
+ [[maybe_unused]] void VisitExpr(Expr *Node) {
OS << "<<unknown expr type>>";
}
@@ -491,6 +491,11 @@ void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
if (Policy.IncludeNewlines) OS << NL;
}
+void StmtPrinter::VisitDeferStmt(DeferStmt *Node) {
+ Indent() << "_Defer";
+ PrintControlledStmt(Node->getBody());
+}
+
void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
Indent() << "return";
if (Node->getRetValue()) {
@@ -1685,6 +1690,14 @@ void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
OS << "]";
}
+void StmtPrinter::VisitMatrixSingleSubscriptExpr(
+ MatrixSingleSubscriptExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << "[";
+ PrintExpr(Node->getRowIdx());
+ OS << "]";
+}
+
void StmtPrinter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *Node) {
PrintExpr(Node->getBase());
OS << "[";
@@ -1813,6 +1826,12 @@ void StmtPrinter::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
OS << Node->getAccessor().getName();
}
+void StmtPrinter::VisitMatrixElementExpr(MatrixElementExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ OS << Node->getAccessor().getName();
+}
+
void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
OS << '(';
Node->getTypeAsWritten().print(OS, Policy);
@@ -2566,6 +2585,11 @@ void StmtPrinter::VisitCXXUnresolvedConstructExpr(
OS << ')';
}
+void StmtPrinter::VisitCXXReflectExpr(CXXReflectExpr *S) {
+ // TODO(Reflection): Implement this.
+ assert(false && "not implemented yet");
+}
+
void StmtPrinter::VisitCXXDependentScopeMemberExpr(
CXXDependentScopeMemberExpr *Node) {
if (!Node->isImplicitAccess()) {
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 05b64cc..6239051 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -323,6 +323,8 @@ void StmtProfiler::VisitReturnStmt(const ReturnStmt *S) {
VisitStmt(S);
}
+void StmtProfiler::VisitDeferStmt(const DeferStmt *S) { VisitStmt(S); }
+
void StmtProfiler::VisitGCCAsmStmt(const GCCAsmStmt *S) {
VisitStmt(S);
ID.AddBoolean(S->isVolatile());
@@ -546,6 +548,14 @@ void OMPClauseProfiler::VisitOMPNocontextClause(const OMPNocontextClause *C) {
void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
+void OMPClauseProfiler::VisitOMPThreadsetClause(const OMPThreadsetClause *C) {}
+
+void OMPClauseProfiler::VisitOMPTransparentClause(
+ const OMPTransparentClause *C) {
+ if (C->getImpexType())
+ Profiler->VisitStmt(C->getImpexType());
+}
+
void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { }
void OMPClauseProfiler::VisitOMPUnifiedAddressClause(
@@ -966,6 +976,12 @@ void OMPClauseProfiler::VisitOMPXDynCGroupMemClause(
if (Expr *Size = C->getSize())
Profiler->VisitStmt(Size);
}
+void OMPClauseProfiler::VisitOMPDynGroupprivateClause(
+ const OMPDynGroupprivateClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ if (auto *Size = C->getSize())
+ Profiler->VisitStmt(Size);
+}
void OMPClauseProfiler::VisitOMPDoacrossClause(const OMPDoacrossClause *C) {
VisitOMPClauseList(C);
}
@@ -1500,6 +1516,11 @@ void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) {
VisitExpr(S);
}
+void StmtProfiler::VisitMatrixSingleSubscriptExpr(
+ const MatrixSingleSubscriptExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitMatrixSubscriptExpr(const MatrixSubscriptExpr *S) {
VisitExpr(S);
}
@@ -1659,6 +1680,11 @@ void StmtProfiler::VisitExtVectorElementExpr(const ExtVectorElementExpr *S) {
VisitName(&S->getAccessor());
}
+void StmtProfiler::VisitMatrixElementExpr(const MatrixElementExpr *S) {
+ VisitExpr(S);
+ VisitName(&S->getAccessor());
+}
+
void StmtProfiler::VisitBlockExpr(const BlockExpr *S) {
VisitExpr(S);
VisitDecl(S->getBlockDecl());
@@ -2167,6 +2193,11 @@ StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
ID.AddInteger(Hasher.CalculateHash());
}
+void StmtProfiler::VisitCXXReflectExpr(const CXXReflectExpr *E) {
+ // TODO(Reflection): Implement this.
+ assert(false && "not implemented yet");
+}
+
void
StmtProfiler::VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *S) {
VisitExpr(S);
diff --git a/clang/lib/AST/TemplateBase.cpp b/clang/lib/AST/TemplateBase.cpp
index 76f96fb..131ae6e 100644
--- a/clang/lib/AST/TemplateBase.cpp
+++ b/clang/lib/AST/TemplateBase.cpp
@@ -340,13 +340,14 @@ bool TemplateArgument::isPackExpansion() const {
}
bool TemplateArgument::isConceptOrConceptTemplateParameter() const {
- if (getKind() == TemplateArgument::Template) {
- if (isa<ConceptDecl>(getAsTemplate().getAsTemplateDecl()))
- return true;
- else if (auto *TTP = dyn_cast_if_present<TemplateTemplateParmDecl>(
- getAsTemplate().getAsTemplateDecl()))
- return TTP->templateParameterKind() == TNK_Concept_template;
- }
+ if (getKind() != TemplateArgument::Template)
+ return false;
+
+ if (isa_and_nonnull<ConceptDecl>(getAsTemplate().getAsTemplateDecl()))
+ return true;
+ if (auto *TTP = llvm::dyn_cast_or_null<TemplateTemplateParmDecl>(
+ getAsTemplate().getAsTemplateDecl()))
+ return TTP->templateParameterKind() == TNK_Concept_template;
return false;
}
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 41aebdb..aebfb9f 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -850,7 +850,10 @@ void TextNodeDumper::Visit(const APValue &Value, QualType Ty) {
return;
}
case APValue::AddrLabelDiff:
- OS << "AddrLabelDiff <todo>";
+ OS << "AddrLabelDiff ";
+ OS << "&&" << Value.getAddrLabelDiffLHS()->getLabel()->getName();
+ OS << " - ";
+ OS << "&&" << Value.getAddrLabelDiffRHS()->getLabel()->getName();
return;
}
llvm_unreachable("Unknown APValue kind!");
@@ -1672,6 +1675,10 @@ void TextNodeDumper::VisitExtVectorElementExpr(
OS << " " << Node->getAccessor().getNameStart();
}
+void TextNodeDumper::VisitMatrixElementExpr(const MatrixElementExpr *Node) {
+ OS << " " << Node->getAccessor().getNameStart();
+}
+
void TextNodeDumper::VisitBinaryOperator(const BinaryOperator *Node) {
OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
if (Node->hasStoredFPFeatures())
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index 4548af1..dcdbb62 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -101,6 +101,7 @@ bool Qualifiers::isTargetAddressSpaceSupersetOf(LangAS A, LangAS B,
(A == LangAS::Default && B == LangAS::hlsl_private) ||
(A == LangAS::Default && B == LangAS::hlsl_device) ||
(A == LangAS::Default && B == LangAS::hlsl_input) ||
+ (A == LangAS::Default && B == LangAS::hlsl_push_constant) ||
// Conversions from target specific address spaces may be legal
// depending on the target information.
Ctx.getTargetInfo().isAddressSpaceSupersetOf(A, B);
@@ -2946,7 +2947,8 @@ bool QualType::isWebAssemblyExternrefType() const {
bool QualType::isWebAssemblyFuncrefType() const {
return getTypePtr()->isFunctionPointerType() &&
- getAddressSpace() == LangAS::wasm_funcref;
+ (getTypePtr()->getPointeeType().getAddressSpace() ==
+ LangAS::wasm_funcref);
}
QualType::PrimitiveDefaultInitializeKind
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index 2da7789..3a2f6a1 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -131,8 +131,6 @@ public:
void printBefore(QualType T, raw_ostream &OS);
void printAfter(QualType T, raw_ostream &OS);
- void AppendScope(DeclContext *DC, raw_ostream &OS,
- DeclarationName NameInScope);
void printTagType(const TagType *T, raw_ostream &OS);
void printFunctionAfter(const FunctionType::ExtInfo &Info, raw_ostream &OS);
#define ABSTRACT_TYPE(CLASS, PARENT)
@@ -1226,7 +1224,7 @@ void TypePrinter::printTypeSpec(NamedDecl *D, raw_ostream &OS) {
// In C, this will always be empty except when the type
// being printed is anonymous within other Record.
if (!Policy.SuppressScope)
- AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ D->printNestedNameSpecifier(OS, Policy);
IdentifierInfo *II = D->getIdentifier();
OS << II->getName();
@@ -1240,7 +1238,7 @@ void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T,
OS << ' ';
auto *D = T->getDecl();
if (Policy.FullyQualifiedName || T->isCanonicalUnqualified()) {
- AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ D->printNestedNameSpecifier(OS, Policy);
} else {
T->getQualifier().print(OS, Policy);
}
@@ -1257,7 +1255,7 @@ void TypePrinter::printUsingBefore(const UsingType *T, raw_ostream &OS) {
OS << ' ';
auto *D = T->getDecl();
if (Policy.FullyQualifiedName) {
- AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ D->printNestedNameSpecifier(OS, Policy);
} else {
T->getQualifier().print(OS, Policy);
}
@@ -1273,7 +1271,7 @@ void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
OS << ' ';
auto *D = T->getDecl();
if (Policy.FullyQualifiedName) {
- AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ D->printNestedNameSpecifier(OS, Policy);
} else {
T->getQualifier().print(OS, Policy);
}
@@ -1511,59 +1509,6 @@ void TypePrinter::printPredefinedSugarBefore(const PredefinedSugarType *T,
void TypePrinter::printPredefinedSugarAfter(const PredefinedSugarType *T,
raw_ostream &OS) {}
-/// Appends the given scope to the end of a string.
-void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS,
- DeclarationName NameInScope) {
- if (DC->isTranslationUnit())
- return;
-
- // FIXME: Consider replacing this with NamedDecl::printNestedNameSpecifier,
- // which can also print names for function and method scopes.
- if (DC->isFunctionOrMethod())
- return;
-
- if (Policy.Callbacks && Policy.Callbacks->isScopeVisible(DC))
- return;
-
- if (const auto *NS = dyn_cast<NamespaceDecl>(DC)) {
- if (Policy.SuppressUnwrittenScope && NS->isAnonymousNamespace())
- return AppendScope(DC->getParent(), OS, NameInScope);
-
- // Only suppress an inline namespace if the name has the same lookup
- // results in the enclosing namespace.
- if (Policy.SuppressInlineNamespace !=
- PrintingPolicy::SuppressInlineNamespaceMode::None &&
- NS->isInline() && NameInScope &&
- NS->isRedundantInlineQualifierFor(NameInScope))
- return AppendScope(DC->getParent(), OS, NameInScope);
-
- AppendScope(DC->getParent(), OS, NS->getDeclName());
- if (NS->getIdentifier())
- OS << NS->getName() << "::";
- else
- OS << "(anonymous namespace)::";
- } else if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
- AppendScope(DC->getParent(), OS, Spec->getDeclName());
- IncludeStrongLifetimeRAII Strong(Policy);
- OS << Spec->getIdentifier()->getName();
- const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- printTemplateArgumentList(
- OS, TemplateArgs.asArray(), Policy,
- Spec->getSpecializedTemplate()->getTemplateParameters());
- OS << "::";
- } else if (const auto *Tag = dyn_cast<TagDecl>(DC)) {
- AppendScope(DC->getParent(), OS, Tag->getDeclName());
- if (TypedefNameDecl *Typedef = Tag->getTypedefNameForAnonDecl())
- OS << Typedef->getIdentifier()->getName() << "::";
- else if (Tag->getIdentifier())
- OS << Tag->getIdentifier()->getName() << "::";
- else
- return;
- } else {
- AppendScope(DC->getParent(), OS, NameInScope);
- }
-}
-
void TypePrinter::printTagType(const TagType *T, raw_ostream &OS) {
TagDecl *D = T->getDecl();
@@ -1573,18 +1518,19 @@ void TypePrinter::printTagType(const TagType *T, raw_ostream &OS) {
return;
}
- bool HasKindDecoration = false;
-
+ bool PrintedKindDecoration = false;
if (T->isCanonicalUnqualified()) {
if (!Policy.SuppressTagKeyword && !D->getTypedefNameForAnonDecl()) {
- HasKindDecoration = true;
+ PrintedKindDecoration = true;
OS << D->getKindName();
OS << ' ';
}
} else {
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
- if (T->getKeyword() != ElaboratedTypeKeyword::None)
+ if (T->getKeyword() != ElaboratedTypeKeyword::None) {
+ PrintedKindDecoration = true;
OS << ' ';
+ }
}
if (!Policy.FullyQualifiedName && !T->isCanonicalUnqualified()) {
@@ -1593,58 +1539,21 @@ void TypePrinter::printTagType(const TagType *T, raw_ostream &OS) {
// Compute the full nested-name-specifier for this type.
// In C, this will always be empty except when the type
// being printed is anonymous within other Record.
- AppendScope(D->getDeclContext(), OS, D->getDeclName());
+ D->printNestedNameSpecifier(OS, Policy);
}
if (const IdentifierInfo *II = D->getIdentifier())
OS << II->getName();
- else if (TypedefNameDecl *Typedef = D->getTypedefNameForAnonDecl()) {
- assert(Typedef->getIdentifier() && "Typedef without identifier?");
- OS << Typedef->getIdentifier()->getName();
- } else {
- // Make an unambiguous representation for anonymous types, e.g.
- // (anonymous enum at /usr/include/string.h:120:9)
- OS << (Policy.MSVCFormatting ? '`' : '(');
-
- if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
- OS << "lambda";
- HasKindDecoration = true;
- } else if ((isa<RecordDecl>(D) && cast<RecordDecl>(D)->isAnonymousStructOrUnion())) {
- OS << "anonymous";
- } else {
- OS << "unnamed";
- }
+ else {
+ clang::PrintingPolicy Copy(Policy);
- if (Policy.AnonymousTagLocations) {
- // Suppress the redundant tag keyword if we just printed one.
- // We don't have to worry about ElaboratedTypes here because you can't
- // refer to an anonymous type with one.
- if (!HasKindDecoration)
- OS << " " << D->getKindName();
-
- PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
- D->getLocation());
- if (PLoc.isValid()) {
- OS << " at ";
- StringRef File = PLoc.getFilename();
- llvm::SmallString<1024> WrittenFile(File);
- if (auto *Callbacks = Policy.Callbacks)
- WrittenFile = Callbacks->remapPath(File);
- // Fix inconsistent path separator created by
- // clang::DirectoryLookup::LookupFile when the file path is relative
- // path.
- llvm::sys::path::Style Style =
- llvm::sys::path::is_absolute(WrittenFile)
- ? llvm::sys::path::Style::native
- : (Policy.MSVCFormatting
- ? llvm::sys::path::Style::windows_backslash
- : llvm::sys::path::Style::posix);
- llvm::sys::path::native(WrittenFile, Style);
- OS << WrittenFile << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
- }
+ // Suppress the redundant tag keyword if we just printed one.
+ if (PrintedKindDecoration) {
+ Copy.SuppressTagKeywordInAnonNames = true;
+ Copy.SuppressTagKeyword = true;
}
- OS << (Policy.MSVCFormatting ? '\'' : ')');
+ D->printName(OS, Copy);
}
// If this is a class template specialization, print the template
@@ -1809,7 +1718,7 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
// FIXME: Null TD never exercised in test suite.
if (FullyQualify && TD) {
if (!Policy.SuppressScope)
- AppendScope(TD->getDeclContext(), OS, TD->getDeclName());
+ TD->printNestedNameSpecifier(OS, Policy);
OS << TD->getName();
} else {
@@ -2063,6 +1972,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::HLSLRawBuffer:
case attr::HLSLContainedType:
case attr::HLSLIsCounter:
+ case attr::HLSLResourceDimension:
llvm_unreachable("HLSL resource type attributes handled separately");
case attr::OpenCLPrivateAddressSpace:
@@ -2147,9 +2057,6 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
}
case attr::AArch64VectorPcs: OS << "aarch64_vector_pcs"; break;
case attr::AArch64SVEPcs: OS << "aarch64_sve_pcs"; break;
- case attr::DeviceKernel:
- OS << T->getAttr()->getSpelling();
- break;
case attr::IntelOclBicc:
OS << "inteloclbicc";
break;
@@ -2221,6 +2128,12 @@ void TypePrinter::printHLSLAttributedResourceAfter(
printAfter(ContainedTy, OS);
OS << ")]]";
}
+
+ if (Attrs.ResourceDimension != llvm::dxil::ResourceDimension::Unknown)
+ OS << " [[hlsl::resource_dimension("
+ << HLSLResourceDimensionAttr::ConvertResourceDimensionToStr(
+ Attrs.ResourceDimension)
+ << ")]]";
}
void TypePrinter::printHLSLInlineSpirvBefore(const HLSLInlineSpirvType *T,
@@ -2752,6 +2665,8 @@ std::string Qualifiers::getAddrSpaceAsString(LangAS AS) {
return "hlsl_device";
case LangAS::hlsl_input:
return "hlsl_input";
+ case LangAS::hlsl_push_constant:
+ return "hlsl_push_constant";
case LangAS::wasm_funcref:
return "__funcref";
default:
diff --git a/clang/lib/AST/VTableBuilder.cpp b/clang/lib/AST/VTableBuilder.cpp
index 3ded3a5..d97f10c 100644
--- a/clang/lib/AST/VTableBuilder.cpp
+++ b/clang/lib/AST/VTableBuilder.cpp
@@ -999,7 +999,7 @@ private:
public:
/// Component indices of the first component of each of the vtables in the
/// vtable group.
- SmallVector<size_t, 4> VTableIndices;
+ VTableLayout::VTableIndicesTy VTableIndices;
ItaniumVTableBuilder(ItaniumVTableContext &VTables,
const CXXRecordDecl *MostDerivedClass,
@@ -2306,18 +2306,19 @@ MakeAddressPointIndices(const VTableLayout::AddressPointsMapTy &addressPoints,
return indexMap;
}
-VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
+VTableLayout::VTableLayout(VTableIndicesTy VTableIndices,
ArrayRef<VTableComponent> VTableComponents,
ArrayRef<VTableThunkTy> VTableThunks,
const AddressPointsMapTy &AddressPoints)
- : VTableComponents(VTableComponents), VTableThunks(VTableThunks),
- AddressPoints(AddressPoints), AddressPointIndices(MakeAddressPointIndices(
- AddressPoints, VTableIndices.size())) {
- if (VTableIndices.size() <= 1)
- assert(VTableIndices.size() == 1 && VTableIndices[0] == 0);
- else
- this->VTableIndices = OwningArrayRef<size_t>(VTableIndices);
-
+ : VTableIndices(std::move(VTableIndices)),
+ VTableComponents(VTableComponents), VTableThunks(VTableThunks),
+ AddressPoints(AddressPoints),
+ AddressPointIndices(
+ MakeAddressPointIndices(AddressPoints, this->VTableIndices.size())) {
+ assert(!this->VTableIndices.empty() &&
+ "VTableLayout requires at least one index.");
+ assert(this->VTableIndices.front() == 0 &&
+ "VTableLayout requires the first index is 0.");
llvm::sort(this->VTableThunks, [](const VTableLayout::VTableThunkTy &LHS,
const VTableLayout::VTableThunkTy &RHS) {
assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
@@ -2658,7 +2659,12 @@ private:
MethodVFTableLocation Loc(MI.VBTableIndex, WhichVFPtr.getVBaseWithVPtr(),
WhichVFPtr.NonVirtualOffset, MI.VFTableIndex);
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- MethodVFTableLocations[GlobalDecl(DD, Dtor_Deleting)] = Loc;
+ // In Microsoft ABI vftable always references vector deleting dtor.
+ CXXDtorType DtorTy = Context.getTargetInfo().emitVectorDeletingDtors(
+ Context.getLangOpts())
+ ? Dtor_VectorDeleting
+ : Dtor_Deleting;
+ MethodVFTableLocations[GlobalDecl(DD, DtorTy)] = Loc;
} else {
MethodVFTableLocations[MD] = Loc;
}
@@ -3288,7 +3294,11 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) {
const CXXDestructorDecl *DD = Component.getDestructorDecl();
DD->printQualifiedName(Out);
- Out << "() [scalar deleting]";
+ if (Context.getTargetInfo().emitVectorDeletingDtors(
+ Context.getLangOpts()))
+ Out << "() [vector deleting]";
+ else
+ Out << "() [scalar deleting]";
if (DD->isPureVirtual())
Out << " [pure]";
@@ -3721,8 +3731,8 @@ void MicrosoftVTableContext::computeVTableRelatedInformation(
SmallVector<VTableLayout::VTableThunkTy, 1> VTableThunks(
Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
VFTableLayouts[id] = std::make_unique<VTableLayout>(
- ArrayRef<size_t>{0}, Builder.vtable_components(), VTableThunks,
- EmptyAddressPointsMap);
+ VTableLayout::VTableIndicesTy{0}, Builder.vtable_components(),
+ VTableThunks, EmptyAddressPointsMap);
Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -3758,7 +3768,7 @@ void MicrosoftVTableContext::dumpMethodLocations(
PredefinedIdentKind::PrettyFunctionNoVirtual, MD);
if (isa<CXXDestructorDecl>(MD)) {
- IndicesMap[I.second] = MethodName + " [scalar deleting]";
+ IndicesMap[I.second] = MethodName + " [vector deleting]";
} else {
IndicesMap[I.second] = MethodName;
}
@@ -3874,7 +3884,8 @@ MicrosoftVTableContext::getMethodVFTableLocation(GlobalDecl GD) {
assert(hasVtableSlot(cast<CXXMethodDecl>(GD.getDecl())) &&
"Only use this method for virtual methods or dtors");
if (isa<CXXDestructorDecl>(GD.getDecl()))
- assert(GD.getDtorType() == Dtor_Deleting);
+ assert(GD.getDtorType() == Dtor_VectorDeleting ||
+ GD.getDtorType() == Dtor_Deleting);
GD = GD.getCanonicalDecl();