aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp104
-rw-r--r--clang/lib/AST/ByteCode/Context.cpp5
-rw-r--r--clang/lib/AST/ByteCode/Interp.h116
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp13
-rw-r--r--clang/lib/AST/ByteCode/InterpHelpers.h141
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp74
-rw-r--r--clang/lib/Basic/Targets.cpp4
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp3
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp5
-rw-r--r--clang/lib/Basic/Targets/RISCV.h6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenAtomic.cpp169
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp95
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp814
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h25
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.cpp227
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.h112
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenException.cpp151
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp20
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp73
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp43
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h70
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp75
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp13
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp121
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypeCache.h10
-rw-r--r--clang/lib/CIR/CodeGen/CMakeLists.txt3
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h90
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp15
-rw-r--r--clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp94
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp181
-rw-r--r--clang/lib/CodeGen/CGCall.cpp18
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp65
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h7
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp29
-rw-r--r--clang/lib/CodeGen/Targets/AMDGPU.cpp4
-rw-r--r--clang/lib/CodeGen/Targets/SPIR.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Arch/ARM.cpp5
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp11
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp2
-rw-r--r--clang/lib/Sema/Sema.cpp11
-rw-r--r--clang/lib/Sema/SemaBase.cpp17
-rw-r--r--clang/lib/Sema/SemaOverload.cpp13
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp16
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp16
47 files changed, 2759 insertions, 344 deletions
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index 74cae03..6b98927 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -4841,46 +4841,39 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init,
return !NeedsOp || this->emitCheckDecl(VD, VD);
};
- auto initGlobal = [&](unsigned GlobalIndex) -> bool {
- assert(Init);
-
- if (VarT) {
- if (!this->visit(Init))
- return checkDecl() && false;
-
- return checkDecl() && this->emitInitGlobal(*VarT, GlobalIndex, VD);
- }
-
- if (!checkDecl())
- return false;
-
- if (!this->emitGetPtrGlobal(GlobalIndex, Init))
- return false;
-
- if (!visitInitializer(Init))
- return false;
-
- return this->emitFinishInitGlobal(Init);
- };
-
DeclScope<Emitter> LocalScope(this, VD);
- // We've already seen and initialized this global.
- if (UnsignedOrNone GlobalIndex = P.getGlobal(VD)) {
+ UnsignedOrNone GlobalIndex = P.getGlobal(VD);
+ if (GlobalIndex) {
+ // We've already seen and initialized this global.
if (P.getPtrGlobal(*GlobalIndex).isInitialized())
return checkDecl();
-
// The previous attempt at initialization might've been unsuccessful,
// so let's try this one.
- return Init && checkDecl() && initGlobal(*GlobalIndex);
+ } else if ((GlobalIndex = P.createGlobal(VD, Init))) {
+ } else {
+ return false;
}
+ if (!Init)
+ return true;
- UnsignedOrNone GlobalIndex = P.createGlobal(VD, Init);
+ if (!checkDecl())
+ return false;
- if (!GlobalIndex)
+ if (VarT) {
+ if (!this->visit(Init))
+ return false;
+
+ return this->emitInitGlobal(*VarT, *GlobalIndex, VD);
+ }
+
+ if (!this->emitGetPtrGlobal(*GlobalIndex, Init))
+ return false;
+
+ if (!visitInitializer(Init))
return false;
- return !Init || (checkDecl() && initGlobal(*GlobalIndex));
+ return this->emitFinishInitGlobal(Init);
}
// Local variables.
InitLinkScope<Emitter> ILS(this, InitLink::Decl(VD));
@@ -4890,36 +4883,37 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init,
VD, *VarT, VD->getType().isConstQualified(),
VD->getType().isVolatileQualified(), nullptr, ScopeKind::Block,
IsConstexprUnknown);
- if (Init) {
- // If this is a toplevel declaration, create a scope for the
- // initializer.
- if (Toplevel) {
- LocalScope<Emitter> Scope(this);
- if (!this->visit(Init))
- return false;
- return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
- }
- if (!this->visit(Init))
- return false;
- return this->emitSetLocal(*VarT, Offset, VD);
- }
- } else {
- if (UnsignedOrNone Offset = this->allocateLocal(
- VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) {
- if (!Init)
- return true;
- if (!this->emitGetPtrLocal(*Offset, Init))
- return false;
+ if (!Init)
+ return true;
- if (!visitInitializer(Init))
+ // If this is a toplevel declaration, create a scope for the
+ // initializer.
+ if (Toplevel) {
+ LocalScope<Emitter> Scope(this);
+ if (!this->visit(Init))
return false;
-
- return this->emitFinishInitPop(Init);
+ return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
}
- return false;
+ if (!this->visit(Init))
+ return false;
+ return this->emitSetLocal(*VarT, Offset, VD);
}
- return true;
+ // Local composite variables.
+ if (UnsignedOrNone Offset = this->allocateLocal(
+ VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) {
+ if (!Init)
+ return true;
+
+ if (!this->emitGetPtrLocal(*Offset, Init))
+ return false;
+
+ if (!visitInitializer(Init))
+ return false;
+
+ return this->emitFinishInitPop(Init);
+ }
+ return false;
}
template <class Emitter>
@@ -6633,7 +6627,7 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
if (!this->visit(SubExpr))
return false;
- if (!this->emitCheckNull(E))
+ if (!SubExpr->getType()->isFunctionPointerType() && !this->emitCheckNull(E))
return false;
if (classifyPrim(SubExpr) == PT_Ptr)
diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp
index 4f4b122..12bf3a3 100644
--- a/clang/lib/AST/ByteCode/Context.cpp
+++ b/clang/lib/AST/ByteCode/Context.cpp
@@ -7,12 +7,15 @@
//===----------------------------------------------------------------------===//
#include "Context.h"
+#include "Boolean.h"
#include "ByteCodeEmitter.h"
#include "Compiler.h"
#include "EvalEmitter.h"
-#include "Interp.h"
+#include "Integral.h"
#include "InterpFrame.h"
+#include "InterpHelpers.h"
#include "InterpStack.h"
+#include "Pointer.h"
#include "PrimType.h"
#include "Program.h"
#include "clang/AST/ASTLambda.h"
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index 2f7e2d9..d8529da 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -22,6 +22,7 @@
#include "Function.h"
#include "InterpBuiltinBitCast.h"
#include "InterpFrame.h"
+#include "InterpHelpers.h"
#include "InterpStack.h"
#include "InterpState.h"
#include "MemberPointer.h"
@@ -43,28 +44,10 @@ using FixedPointSemantics = llvm::FixedPointSemantics;
/// Checks if the variable has externally defined storage.
bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-/// Checks if the array is offsetable.
-bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-
-/// Checks if a pointer is live and accessible.
-bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
-
-/// Checks if a pointer is a dummy pointer.
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK);
-
/// Checks if a pointer is null.
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
-/// Checks if a pointer is in range.
-bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK);
-
-/// Checks if a field from which a pointer is going to be derived is valid.
-bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- CheckSubobjectKind CSK);
-
/// Checks if Ptr is a one-past-the-end pointer.
bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
@@ -80,12 +63,6 @@ bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if the Descriptor is of a constexpr or const global variable.
bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc);
-/// Checks if a pointer points to a mutable field.
-bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-
-/// Checks if a value can be loaded from a block.
-bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK = AK_Read);
bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
@@ -110,12 +87,6 @@ bool CheckThis(InterpState &S, CodePtr OpPC);
/// language mode.
bool CheckDynamicMemoryAllocation(InterpState &S, CodePtr OpPC);
-/// Diagnose mismatched new[]/delete or new/delete[] pairs.
-bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC,
- DynamicAllocator::Form AllocForm,
- DynamicAllocator::Form DeleteForm, const Descriptor *D,
- const Expr *NewExpr);
-
/// Check the source of the pointer passed to delete/delete[] has actually
/// been heap allocated by us.
bool CheckDeleteSource(InterpState &S, CodePtr OpPC, const Expr *Source,
@@ -129,9 +100,6 @@ bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
const Pointer &Ptr, const APSInt &IntValue);
-/// Copy the contents of Src into Dest.
-bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest);
-
bool CallVar(InterpState &S, CodePtr OpPC, const Function *Func,
uint32_t VarArgSize);
bool Call(InterpState &S, CodePtr OpPC, const Function *Func,
@@ -149,19 +117,11 @@ bool CheckBitCast(InterpState &S, CodePtr OpPC, bool HasIndeterminateBits,
bool CheckBCPResult(InterpState &S, const Pointer &Ptr);
bool CheckDestructor(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-template <typename T>
-static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue) {
- const Expr *E = S.Current->getExpr(OpPC);
- S.CCEDiag(E, diag::note_constexpr_overflow) << SrcValue << E->getType();
- return S.noteUndefinedBehavior();
-}
bool handleFixedPointOverflow(InterpState &S, CodePtr OpPC,
const FixedPoint &FP);
bool isConstexprUnknown(const Pointer &P);
-inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems);
-
enum class ShiftDir { Left, Right };
/// Checks if the shift operation is legal.
@@ -241,43 +201,6 @@ bool CheckDivRem(InterpState &S, CodePtr OpPC, const T &LHS, const T &RHS) {
return true;
}
-template <typename SizeT>
-bool CheckArraySize(InterpState &S, CodePtr OpPC, SizeT *NumElements,
- unsigned ElemSize, bool IsNoThrow) {
- // FIXME: Both the SizeT::from() as well as the
- // NumElements.toAPSInt() in this function are rather expensive.
-
- // Can't be too many elements if the bitwidth of NumElements is lower than
- // that of Descriptor::MaxArrayElemBytes.
- if ((NumElements->bitWidth() - NumElements->isSigned()) <
- (sizeof(Descriptor::MaxArrayElemBytes) * 8))
- return true;
-
- // FIXME: GH63562
- // APValue stores array extents as unsigned,
- // so anything that is greater that unsigned would overflow when
- // constructing the array, we catch this here.
- SizeT MaxElements = SizeT::from(Descriptor::MaxArrayElemBytes / ElemSize);
- assert(MaxElements.isPositive());
- if (NumElements->toAPSInt().getActiveBits() >
- ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
- *NumElements > MaxElements) {
- if (!IsNoThrow) {
- const SourceInfo &Loc = S.Current->getSource(OpPC);
-
- if (NumElements->isSigned() && NumElements->isNegative()) {
- S.FFDiag(Loc, diag::note_constexpr_new_negative)
- << NumElements->toDiagnosticString(S.getASTContext());
- } else {
- S.FFDiag(Loc, diag::note_constexpr_new_too_large)
- << NumElements->toDiagnosticString(S.getASTContext());
- }
- }
- return false;
- }
- return true;
-}
-
/// Checks if the result of a floating-point operation is valid
/// in the current context.
bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
@@ -286,19 +209,6 @@ bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
/// Checks why the given DeclRefExpr is invalid.
bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR);
-/// Interpreter entry point.
-bool Interpret(InterpState &S);
-
-/// Interpret a builtin function.
-bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
- uint32_t BuiltinID);
-
-/// Interpret an offsetof operation.
-bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
- ArrayRef<int64_t> ArrayIndices, int64_t &Result);
-
-inline bool Invalid(InterpState &S, CodePtr OpPC);
-
enum class ArithOp { Add, Sub };
//===----------------------------------------------------------------------===//
@@ -403,13 +313,6 @@ bool Add(InterpState &S, CodePtr OpPC) {
return AddSubMulHelper<T, T::add, std::plus>(S, OpPC, Bits, LHS, RHS);
}
-static inline llvm::RoundingMode getRoundingMode(FPOptions FPO) {
- auto RM = FPO.getRoundingMode();
- if (RM == llvm::RoundingMode::Dynamic)
- return llvm::RoundingMode::NearestTiesToEven;
- return RM;
-}
-
inline bool Addf(InterpState &S, CodePtr OpPC, uint32_t FPOI) {
const Floating &RHS = S.Stk.pop<Floating>();
const Floating &LHS = S.Stk.pop<Floating>();
@@ -3264,12 +3167,6 @@ inline bool GetMemberPtrDecl(InterpState &S, CodePtr OpPC) {
/// Just emit a diagnostic. The expression that caused emission of this
/// op is not valid in a constant context.
-inline bool Invalid(InterpState &S, CodePtr OpPC) {
- const SourceLocation &Loc = S.Current->getLocation(OpPC);
- S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr)
- << S.Current->getRange(OpPC);
- return false;
-}
inline bool Unsupported(InterpState &S, CodePtr OpPC) {
const SourceLocation &Loc = S.Current->getLocation(OpPC);
@@ -3701,17 +3598,6 @@ inline bool CheckDestruction(InterpState &S, CodePtr OpPC) {
return CheckDestructor(S, OpPC, Ptr);
}
-inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems) {
- uint64_t Limit = S.getLangOpts().ConstexprStepLimit;
- if (Limit != 0 && NumElems > Limit) {
- S.FFDiag(S.Current->getSource(OpPC),
- diag::note_constexpr_new_exceeds_limits)
- << NumElems << Limit;
- return false;
- }
- return true;
-}
-
//===----------------------------------------------------------------------===//
// Read opcode arguments
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 39b991c..ff83c52 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -8,9 +8,10 @@
#include "../ExprConstShared.h"
#include "Boolean.h"
#include "EvalEmitter.h"
-#include "Interp.h"
#include "InterpBuiltinBitCast.h"
+#include "InterpHelpers.h"
#include "PrimType.h"
+#include "Program.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
@@ -2041,10 +2042,16 @@ static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
}
if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
+ int64_t DesiredTrunc;
+ if (S.getASTContext().CharTy->isSignedIntegerType())
+ DesiredTrunc =
+ Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue();
+ else
+ DesiredTrunc =
+ Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
// strchr compares directly to the passed integer, and therefore
// always fails if given an int that is not a char.
- if (Desired !=
- Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue()) {
+ if (Desired != DesiredTrunc) {
S.Stk.push<Pointer>();
return true;
}
diff --git a/clang/lib/AST/ByteCode/InterpHelpers.h b/clang/lib/AST/ByteCode/InterpHelpers.h
new file mode 100644
index 0000000..6bf89d3
--- /dev/null
+++ b/clang/lib/AST/ByteCode/InterpHelpers.h
@@ -0,0 +1,141 @@
+//===--- InterpHelpers.h - Interpreter Helper Functions --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_INTERPHELPERS_H
+#define LLVM_CLANG_AST_INTERP_INTERPHELPERS_H
+
+#include "DynamicAllocator.h"
+#include "InterpState.h"
+#include "Pointer.h"
+
+namespace clang {
+class CallExpr;
+class OffsetOfExpr;
+
+namespace interp {
+class Block;
+struct Descriptor;
+
+/// Interpreter entry point.
+bool Interpret(InterpState &S);
+
+/// Interpret a builtin function.
+bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ uint32_t BuiltinID);
+
+/// Interpret an offsetof operation.
+bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
+ ArrayRef<int64_t> ArrayIndices, int64_t &Result);
+
+/// Checks if the array is offsetable.
+bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+
+/// Checks if a pointer is live and accessible.
+bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+
+/// Checks if a pointer is a dummy pointer.
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK);
+
+/// Checks if a pointer is in range.
+bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+
+/// Checks if a field from which a pointer is going to be derived is valid.
+bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ CheckSubobjectKind CSK);
+
+/// Checks if a pointer points to a mutable field.
+bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+
+/// Checks if a value can be loaded from a block.
+bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK = AK_Read);
+
+/// Diagnose mismatched new[]/delete or new/delete[] pairs.
+bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC,
+ DynamicAllocator::Form AllocForm,
+ DynamicAllocator::Form DeleteForm, const Descriptor *D,
+ const Expr *NewExpr);
+
+/// Copy the contents of Src into Dest.
+bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest);
+
+template <typename T>
+static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_overflow) << SrcValue << E->getType();
+ return S.noteUndefinedBehavior();
+}
+
+inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems) {
+ uint64_t Limit = S.getLangOpts().ConstexprStepLimit;
+ if (Limit != 0 && NumElems > Limit) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_new_exceeds_limits)
+ << NumElems << Limit;
+ return false;
+ }
+ return true;
+}
+
+static inline llvm::RoundingMode getRoundingMode(FPOptions FPO) {
+ auto RM = FPO.getRoundingMode();
+ if (RM == llvm::RoundingMode::Dynamic)
+ return llvm::RoundingMode::NearestTiesToEven;
+ return RM;
+}
+
+inline bool Invalid(InterpState &S, CodePtr OpPC) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr)
+ << S.Current->getRange(OpPC);
+ return false;
+}
+
+template <typename SizeT>
+bool CheckArraySize(InterpState &S, CodePtr OpPC, SizeT *NumElements,
+ unsigned ElemSize, bool IsNoThrow) {
+ // FIXME: Both the SizeT::from() as well as the
+ // NumElements.toAPSInt() in this function are rather expensive.
+
+ // Can't be too many elements if the bitwidth of NumElements is lower than
+ // that of Descriptor::MaxArrayElemBytes.
+ if ((NumElements->bitWidth() - NumElements->isSigned()) <
+ (sizeof(Descriptor::MaxArrayElemBytes) * 8))
+ return true;
+
+ // FIXME: GH63562
+ // APValue stores array extents as unsigned,
+ // so anything that is greater that unsigned would overflow when
+ // constructing the array, we catch this here.
+ SizeT MaxElements = SizeT::from(Descriptor::MaxArrayElemBytes / ElemSize);
+ assert(MaxElements.isPositive());
+ if (NumElements->toAPSInt().getActiveBits() >
+ ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
+ *NumElements > MaxElements) {
+ if (!IsNoThrow) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+
+ if (NumElements->isSigned() && NumElements->isNegative()) {
+ S.FFDiag(Loc, diag::note_constexpr_new_negative)
+ << NumElements->toDiagnosticString(S.getASTContext());
+ } else {
+ S.FFDiag(Loc, diag::note_constexpr_new_too_large)
+ << NumElements->toDiagnosticString(S.getASTContext());
+ }
+ }
+ return false;
+ }
+ return true;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif // LLVM_CLANG_AST_INTERP_INTERPHELPERS_H
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
index c88a470..f068be5 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
@@ -24,6 +24,7 @@
#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/RecordOps.h"
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "clang/Basic/LLVM.h"
@@ -95,6 +96,18 @@ static QualType getStatusOrValueType(ClassTemplateSpecializationDecl *TRD) {
return TRD->getTemplateArgs().get(0).getAsType();
}
+static auto ofClassStatus() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return ofClass(hasName("::absl::Status"));
+}
+
+static auto isStatusMemberCallWithName(llvm::StringRef member_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxMemberCallExpr(
+ on(expr(unless(cxxThisExpr()))),
+ callee(cxxMethodDecl(hasName(member_name), ofClassStatus())));
+}
+
static auto isStatusOrMemberCallWithName(llvm::StringRef member_name) {
using namespace ::clang::ast_matchers; // NOLINT: Too many names
return cxxMemberCallExpr(
@@ -244,6 +257,61 @@ static void transferStatusOrOkCall(const CXXMemberCallExpr *Expr,
State.Env.setValue(*Expr, OkVal);
}
+static void transferStatusCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ RecordStorageLocation *StatusOrLoc =
+ getImplicitObjectLocation(*Expr, State.Env);
+ if (StatusOrLoc == nullptr)
+ return;
+
+ RecordStorageLocation &StatusLoc = locForStatus(*StatusOrLoc);
+
+ if (State.Env.getValue(locForOk(StatusLoc)) == nullptr)
+ initializeStatusOr(*StatusOrLoc, State.Env);
+
+ if (Expr->isPRValue())
+ copyRecord(StatusLoc, State.Env.getResultObjectLocation(*Expr), State.Env);
+ else
+ State.Env.setStorageLocation(*Expr, StatusLoc);
+}
+
+static void transferStatusOkCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ RecordStorageLocation *StatusLoc =
+ getImplicitObjectLocation(*Expr, State.Env);
+ if (StatusLoc == nullptr)
+ return;
+
+ if (Value *Val = State.Env.getValue(locForOk(*StatusLoc)))
+ State.Env.setValue(*Expr, *Val);
+}
+
+static void transferStatusUpdateCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ // S.Update(OtherS) sets S to the error code of OtherS if it is OK,
+ // otherwise does nothing.
+ assert(Expr->getNumArgs() == 1);
+ auto *Arg = Expr->getArg(0);
+ RecordStorageLocation *ArgRecord =
+ Arg->isPRValue() ? &State.Env.getResultObjectLocation(*Arg)
+ : State.Env.get<RecordStorageLocation>(*Arg);
+ RecordStorageLocation *ThisLoc = getImplicitObjectLocation(*Expr, State.Env);
+ if (ThisLoc == nullptr || ArgRecord == nullptr)
+ return;
+
+ auto &ThisOkVal = valForOk(*ThisLoc, State.Env);
+ auto &ArgOkVal = valForOk(*ArgRecord, State.Env);
+ auto &A = State.Env.arena();
+ auto &NewVal = State.Env.makeAtomicBoolValue();
+ State.Env.assume(A.makeImplies(A.makeNot(ThisOkVal.formula()),
+ A.makeNot(NewVal.formula())));
+ State.Env.assume(A.makeImplies(NewVal.formula(), ArgOkVal.formula()));
+ State.Env.setValue(locForOk(*ThisLoc), NewVal);
+}
+
CFGMatchSwitch<LatticeTransferState>
buildTransferMatchSwitch(ASTContext &Ctx,
CFGMatchSwitchBuilder<LatticeTransferState> Builder) {
@@ -251,6 +319,12 @@ buildTransferMatchSwitch(ASTContext &Ctx,
return std::move(Builder)
.CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("ok"),
transferStatusOrOkCall)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("status"),
+ transferStatusCall)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusMemberCallWithName("ok"),
+ transferStatusOkCall)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusMemberCallWithName("Update"),
+ transferStatusUpdateCall)
.Build();
}
diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp
index b7e8bad..f39c698 100644
--- a/clang/lib/Basic/Targets.cpp
+++ b/clang/lib/Basic/Targets.cpp
@@ -222,6 +222,8 @@ std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
return std::make_unique<OHOSTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
return std::make_unique<FreeBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return std::make_unique<FuchsiaTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
return std::make_unique<NetBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
@@ -254,6 +256,8 @@ std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
return std::make_unique<AppleMachOARMTargetInfo>(Triple, Opts);
switch (os) {
+ case llvm::Triple::Fuchsia:
+ return std::make_unique<FuchsiaTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::Linux:
return std::make_unique<LinuxTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index 3de17d2..d00a3a4 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -260,6 +260,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
: TargetInfo(Triple), FPMath(FP_Default), IsAAPCS(true), LDREX(0),
HW_FP(0) {
bool IsFreeBSD = Triple.isOSFreeBSD();
+ bool IsFuchsia = Triple.isOSFuchsia();
bool IsOpenBSD = Triple.isOSOpenBSD();
bool IsNetBSD = Triple.isOSNetBSD();
bool IsHaiku = Triple.isOSHaiku();
@@ -332,7 +333,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
default:
if (IsNetBSD)
setABI("apcs-gnu");
- else if (IsFreeBSD || IsOpenBSD || IsHaiku || IsOHOS)
+ else if (IsFreeBSD || IsFuchsia || IsOpenBSD || IsHaiku || IsOHOS)
setABI("aapcs-linux");
else
setABI("aapcs");
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 04da4e6..685925b 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -192,8 +192,11 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__riscv_muldiv");
}
- if (ISAInfo->hasExtension("a")) {
+ // The "a" extension is composed of "zalrsc" and "zaamo"
+ if (ISAInfo->hasExtension("a"))
Builder.defineMacro("__riscv_atomic");
+
+ if (ISAInfo->hasExtension("zalrsc")) {
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index d8b0e64..85fa4cc 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -195,7 +195,8 @@ public:
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;
- if (ISAInfo->hasExtension("a"))
+ // "a" implies "zalrsc" which is sufficient to inline atomics
+ if (ISAInfo->hasExtension("zalrsc"))
MaxAtomicInlineWidth = 32;
}
};
@@ -225,7 +226,8 @@ public:
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;
- if (ISAInfo->hasExtension("a"))
+ // "a" implies "zalrsc" which is sufficient to inline atomics
+ if (ISAInfo->hasExtension("zalrsc"))
MaxAtomicInlineWidth = 64;
}
};
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index a9983f8..7db6e28 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -346,6 +346,8 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
CIRGenBuilderTy &builder = cgf.getBuilder();
mlir::Location loc = cgf.getLoc(expr->getSourceRange());
auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
+ cir::AtomicFetchKindAttr fetchAttr;
+ bool fetchFirst = true;
switch (expr->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
@@ -407,6 +409,103 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
opName = cir::AtomicXchgOp::getOperationName();
break;
+ case AtomicExpr::AO__atomic_add_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Add);
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Sub);
+ break;
+
+ case AtomicExpr::AO__atomic_min_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_min:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Min);
+ break;
+
+ case AtomicExpr::AO__atomic_max_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_max:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Max);
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::And);
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Or);
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Xor);
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Nand);
+ break;
+
+ case AtomicExpr::AO__atomic_test_and_set: {
+ auto op = cir::AtomicTestAndSetOp::create(
+ builder, loc, ptr.getPointer(), order,
+ builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
+ expr->isVolatile());
+ builder.createStore(loc, op, dest);
+ return;
+ }
+
+ case AtomicExpr::AO__atomic_clear: {
+ cir::AtomicClearOp::create(
+ builder, loc, ptr.getPointer(), order,
+ builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
+ expr->isVolatile());
+ return;
+ }
+
case AtomicExpr::AO__opencl_atomic_init:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
@@ -433,79 +532,51 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
case AtomicExpr::AO__scoped_atomic_exchange_n:
case AtomicExpr::AO__scoped_atomic_exchange:
- case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__scoped_atomic_add_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
- case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__scoped_atomic_fetch_add:
- case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__scoped_atomic_sub_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__scoped_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_min_fetch:
case AtomicExpr::AO__scoped_atomic_min_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__scoped_atomic_fetch_min:
- case AtomicExpr::AO__atomic_max_fetch:
case AtomicExpr::AO__scoped_atomic_max_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
- case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__scoped_atomic_fetch_max:
- case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__scoped_atomic_and_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
- case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__scoped_atomic_fetch_and:
- case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__scoped_atomic_or_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
- case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__scoped_atomic_fetch_or:
- case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__scoped_atomic_xor_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
- case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__scoped_atomic_fetch_xor:
- case AtomicExpr::AO__atomic_nand_fetch:
case AtomicExpr::AO__scoped_atomic_nand_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_nand:
- case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__scoped_atomic_fetch_nand:
-
- case AtomicExpr::AO__atomic_test_and_set:
-
- case AtomicExpr::AO__atomic_clear:
cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
return;
}
@@ -518,9 +589,13 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
atomicOperands, atomicResTys);
+ if (fetchAttr)
+ rmwOp->setAttr("binop", fetchAttr);
rmwOp->setAttr("mem_order", orderAttr);
if (expr->isVolatile())
rmwOp->setAttr("is_volatile", builder.getUnitAttr());
+ if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
+ rmwOp->setAttr("fetch_first", builder.getUnitAttr());
mlir::Value result = rmwOp->getResult(0);
builder.createStore(loc, result, dest);
@@ -581,6 +656,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_test_and_set:
+ case AtomicExpr::AO__atomic_clear:
break;
case AtomicExpr::AO__atomic_load:
@@ -614,8 +691,41 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
isWeakExpr = e->getWeak();
break;
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (memTy->isPointerType()) {
+ cgm.errorNYI(e->getSourceRange(),
+ "atomic fetch-and-add and fetch-and-sub for pointers");
+ return RValue::get(nullptr);
+ }
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ shouldCastToIntPtrTy = !memTy->isFloatingType();
+ [[fallthrough]];
+
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__c11_atomic_store:
val1 = emitValToTemp(*this, e->getVal1());
@@ -640,6 +750,9 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
dest = atomics.castToAtomicIntPointer(dest);
} else if (e->isCmpXChg()) {
dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
+ } else if (e->getOp() == AtomicExpr::AO__atomic_test_and_set) {
+ dest = createMemTemp(resultTy, getLoc(e->getSourceRange()),
+ "test_and_set.bool");
} else if (!resultTy->isVoidType()) {
dest = atomics.createTempAlloca();
if (shouldCastToIntPtrTy)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index ea31871..798e9d9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -463,12 +463,107 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return emitLibraryCall(*this, fd, e,
cgm.getBuiltinLibFunction(fd, builtinID));
+ // Some target-specific builtins can have aggregate return values, e.g.
+ // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
+ // returnValue to be non-null, so that the target-specific emission code can
+ // always just emit into it.
+ cir::TypeEvaluationKind evalKind = getEvaluationKind(e->getType());
+ if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
+ cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
+ return getUndefRValue(e->getType());
+ }
+
+ // Now see if we can emit a target-specific builtin.
+ if (mlir::Value v = emitTargetBuiltinExpr(builtinID, e, returnValue)) {
+ switch (evalKind) {
+ case cir::TEK_Scalar:
+ if (mlir::isa<cir::VoidType>(v.getType()))
+ return RValue::get(nullptr);
+ return RValue::get(v);
+ case cir::TEK_Aggregate:
+ cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
+ return getUndefRValue(e->getType());
+ case cir::TEK_Complex:
+ llvm_unreachable("No current target builtin returns complex");
+ }
+ llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
+ }
+
cgm.errorNYI(e->getSourceRange(),
std::string("unimplemented builtin call: ") +
getContext().BuiltinInfo.getName(builtinID));
return getUndefRValue(e->getType());
}
+static mlir::Value emitTargetArchBuiltinExpr(CIRGenFunction *cgf,
+ unsigned builtinID,
+ const CallExpr *e,
+ ReturnValueSlot &returnValue,
+ llvm::Triple::ArchType arch) {
+ // When compiling in HipStdPar mode we have to be conservative in rejecting
+ // target specific features in the FE, and defer the possible error to the
+ // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
+ // referenced by an accelerator executable function, we emit an error.
+ // Returning nullptr here leads to the builtin being handled in
+ // EmitStdParUnsupportedBuiltin.
+ if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
+ arch != cgf->getTarget().getTriple().getArch())
+ return {};
+
+ switch (arch) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::bpfeb:
+ case llvm::Triple::bpfel:
+ // These are actually NYI, but that will be reported by emitBuiltinExpr.
+ // At this point, we don't even know that the builtin is target-specific.
+ return nullptr;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return cgf->emitX86BuiltinExpr(builtinID, e);
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
+ case llvm::Triple::systemz:
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ case llvm::Triple::hexagon:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ // These are actually NYI, but that will be reported by emitBuiltinExpr.
+ // At this point, we don't even know that the builtin is target-specific.
+ return {};
+ default:
+ return {};
+ }
+}
+
+mlir::Value
+CIRGenFunction::emitTargetBuiltinExpr(unsigned builtinID, const CallExpr *e,
+ ReturnValueSlot &returnValue) {
+ if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
+ assert(getContext().getAuxTargetInfo() && "Missing aux target info");
+ return emitTargetArchBuiltinExpr(
+ this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
+ returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
+ }
+
+ return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
+ getTarget().getTriple().getArch());
+}
+
/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
/// for "fabsf".
cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *fd,
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
new file mode 100644
index 0000000..3c9c7ec
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -0,0 +1,814 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit x86/x86_64 Builtin calls as CIR or a function
+// call to be later resolved.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "CIRGenModule.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/CIR/MissingFeatures.h"
+#include "llvm/IR/IntrinsicsX86.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
+ const CallExpr *e) {
+ if (builtinID == Builtin::BI__builtin_cpu_is) {
+ cgm.errorNYI(e->getSourceRange(), "__builtin_cpu_is");
+ return {};
+ }
+ if (builtinID == Builtin::BI__builtin_cpu_supports) {
+ cgm.errorNYI(e->getSourceRange(), "__builtin_cpu_supports");
+ return {};
+ }
+ if (builtinID == Builtin::BI__builtin_cpu_init) {
+ cgm.errorNYI(e->getSourceRange(), "__builtin_cpu_init");
+ return {};
+ }
+
+ // Handle MSVC intrinsics before argument evaluation to prevent double
+ // evaluation.
+ assert(!cir::MissingFeatures::msvcBuiltins());
+
+ // Find out if any arguments are required to be integer constant expressions.
+ assert(!cir::MissingFeatures::handleBuiltinICEArguments());
+
+ switch (builtinID) {
+ default:
+ return {};
+ case X86::BI_mm_prefetch:
+ case X86::BI_mm_clflush:
+ case X86::BI_mm_lfence:
+ case X86::BI_mm_pause:
+ case X86::BI_mm_mfence:
+ case X86::BI_mm_sfence:
+ case X86::BI__rdtsc:
+ case X86::BI__builtin_ia32_rdtscp:
+ case X86::BI__builtin_ia32_lzcnt_u16:
+ case X86::BI__builtin_ia32_lzcnt_u32:
+ case X86::BI__builtin_ia32_lzcnt_u64:
+ case X86::BI__builtin_ia32_tzcnt_u16:
+ case X86::BI__builtin_ia32_tzcnt_u32:
+ case X86::BI__builtin_ia32_tzcnt_u64:
+ case X86::BI__builtin_ia32_undef128:
+ case X86::BI__builtin_ia32_undef256:
+ case X86::BI__builtin_ia32_undef512:
+ case X86::BI__builtin_ia32_vec_ext_v4hi:
+ case X86::BI__builtin_ia32_vec_ext_v16qi:
+ case X86::BI__builtin_ia32_vec_ext_v8hi:
+ case X86::BI__builtin_ia32_vec_ext_v4si:
+ case X86::BI__builtin_ia32_vec_ext_v4sf:
+ case X86::BI__builtin_ia32_vec_ext_v2di:
+ case X86::BI__builtin_ia32_vec_ext_v32qi:
+ case X86::BI__builtin_ia32_vec_ext_v16hi:
+ case X86::BI__builtin_ia32_vec_ext_v8si:
+ case X86::BI__builtin_ia32_vec_ext_v4di:
+ case X86::BI__builtin_ia32_vec_set_v4hi:
+ case X86::BI__builtin_ia32_vec_set_v16qi:
+ case X86::BI__builtin_ia32_vec_set_v8hi:
+ case X86::BI__builtin_ia32_vec_set_v4si:
+ case X86::BI__builtin_ia32_vec_set_v2di:
+ case X86::BI__builtin_ia32_vec_set_v32qi:
+ case X86::BI__builtin_ia32_vec_set_v16hi:
+ case X86::BI__builtin_ia32_vec_set_v8si:
+ case X86::BI__builtin_ia32_vec_set_v4di:
+ case X86::BI_mm_setcsr:
+ case X86::BI__builtin_ia32_ldmxcsr:
+ case X86::BI_mm_getcsr:
+ case X86::BI__builtin_ia32_stmxcsr:
+ case X86::BI__builtin_ia32_xsave:
+ case X86::BI__builtin_ia32_xsave64:
+ case X86::BI__builtin_ia32_xrstor:
+ case X86::BI__builtin_ia32_xrstor64:
+ case X86::BI__builtin_ia32_xsaveopt:
+ case X86::BI__builtin_ia32_xsaveopt64:
+ case X86::BI__builtin_ia32_xrstors:
+ case X86::BI__builtin_ia32_xrstors64:
+ case X86::BI__builtin_ia32_xsavec:
+ case X86::BI__builtin_ia32_xsavec64:
+ case X86::BI__builtin_ia32_xsaves:
+ case X86::BI__builtin_ia32_xsaves64:
+ case X86::BI__builtin_ia32_xsetbv:
+ case X86::BI_xsetbv:
+ case X86::BI__builtin_ia32_xgetbv:
+ case X86::BI_xgetbv:
+ case X86::BI__builtin_ia32_storedqudi128_mask:
+ case X86::BI__builtin_ia32_storedqusi128_mask:
+ case X86::BI__builtin_ia32_storedquhi128_mask:
+ case X86::BI__builtin_ia32_storedquqi128_mask:
+ case X86::BI__builtin_ia32_storeupd128_mask:
+ case X86::BI__builtin_ia32_storeups128_mask:
+ case X86::BI__builtin_ia32_storedqudi256_mask:
+ case X86::BI__builtin_ia32_storedqusi256_mask:
+ case X86::BI__builtin_ia32_storedquhi256_mask:
+ case X86::BI__builtin_ia32_storedquqi256_mask:
+ case X86::BI__builtin_ia32_storeupd256_mask:
+ case X86::BI__builtin_ia32_storeups256_mask:
+ case X86::BI__builtin_ia32_storedqudi512_mask:
+ case X86::BI__builtin_ia32_storedqusi512_mask:
+ case X86::BI__builtin_ia32_storedquhi512_mask:
+ case X86::BI__builtin_ia32_storedquqi512_mask:
+ case X86::BI__builtin_ia32_storeupd512_mask:
+ case X86::BI__builtin_ia32_storeups512_mask:
+ case X86::BI__builtin_ia32_storesbf16128_mask:
+ case X86::BI__builtin_ia32_storesh128_mask:
+ case X86::BI__builtin_ia32_storess128_mask:
+ case X86::BI__builtin_ia32_storesd128_mask:
+ case X86::BI__builtin_ia32_cvtmask2b128:
+ case X86::BI__builtin_ia32_cvtmask2b256:
+ case X86::BI__builtin_ia32_cvtmask2b512:
+ case X86::BI__builtin_ia32_cvtmask2w128:
+ case X86::BI__builtin_ia32_cvtmask2w256:
+ case X86::BI__builtin_ia32_cvtmask2w512:
+ case X86::BI__builtin_ia32_cvtmask2d128:
+ case X86::BI__builtin_ia32_cvtmask2d256:
+ case X86::BI__builtin_ia32_cvtmask2d512:
+ case X86::BI__builtin_ia32_cvtmask2q128:
+ case X86::BI__builtin_ia32_cvtmask2q256:
+ case X86::BI__builtin_ia32_cvtmask2q512:
+ case X86::BI__builtin_ia32_cvtb2mask128:
+ case X86::BI__builtin_ia32_cvtb2mask256:
+ case X86::BI__builtin_ia32_cvtb2mask512:
+ case X86::BI__builtin_ia32_cvtw2mask128:
+ case X86::BI__builtin_ia32_cvtw2mask256:
+ case X86::BI__builtin_ia32_cvtw2mask512:
+ case X86::BI__builtin_ia32_cvtd2mask128:
+ case X86::BI__builtin_ia32_cvtd2mask256:
+ case X86::BI__builtin_ia32_cvtd2mask512:
+ case X86::BI__builtin_ia32_cvtq2mask128:
+ case X86::BI__builtin_ia32_cvtq2mask256:
+ case X86::BI__builtin_ia32_cvtq2mask512:
+ case X86::BI__builtin_ia32_cvtdq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtqq2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
+ case X86::BI__builtin_ia32_cvtudq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
+ case X86::BI__builtin_ia32_vfmaddss3:
+ case X86::BI__builtin_ia32_vfmaddsd3:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask:
+ case X86::BI__builtin_ia32_vfmaddss3_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask:
+ case X86::BI__builtin_ia32_vfmaddss:
+ case X86::BI__builtin_ia32_vfmaddsd:
+ case X86::BI__builtin_ia32_vfmaddsh3_maskz:
+ case X86::BI__builtin_ia32_vfmaddss3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsd3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask3:
+ case X86::BI__builtin_ia32_vfmaddss3_mask3:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask3:
+ case X86::BI__builtin_ia32_vfmsubsh3_mask3:
+ case X86::BI__builtin_ia32_vfmsubss3_mask3:
+ case X86::BI__builtin_ia32_vfmsubsd3_mask3:
+ case X86::BI__builtin_ia32_vfmaddph512_mask:
+ case X86::BI__builtin_ia32_vfmaddph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddps512_mask:
+ case X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ case X86::BI__builtin_ia32_movdqa32store128_mask:
+ case X86::BI__builtin_ia32_movdqa64store128_mask:
+ case X86::BI__builtin_ia32_storeaps128_mask:
+ case X86::BI__builtin_ia32_storeapd128_mask:
+ case X86::BI__builtin_ia32_movdqa32store256_mask:
+ case X86::BI__builtin_ia32_movdqa64store256_mask:
+ case X86::BI__builtin_ia32_storeaps256_mask:
+ case X86::BI__builtin_ia32_storeapd256_mask:
+ case X86::BI__builtin_ia32_movdqa32store512_mask:
+ case X86::BI__builtin_ia32_movdqa64store512_mask:
+ case X86::BI__builtin_ia32_storeaps512_mask:
+ case X86::BI__builtin_ia32_storeapd512_mask:
+ case X86::BI__builtin_ia32_loadups128_mask:
+ case X86::BI__builtin_ia32_loadups256_mask:
+ case X86::BI__builtin_ia32_loadups512_mask:
+ case X86::BI__builtin_ia32_loadupd128_mask:
+ case X86::BI__builtin_ia32_loadupd256_mask:
+ case X86::BI__builtin_ia32_loadupd512_mask:
+ case X86::BI__builtin_ia32_loaddquqi128_mask:
+ case X86::BI__builtin_ia32_loaddquqi256_mask:
+ case X86::BI__builtin_ia32_loaddquqi512_mask:
+ case X86::BI__builtin_ia32_loaddquhi128_mask:
+ case X86::BI__builtin_ia32_loaddquhi256_mask:
+ case X86::BI__builtin_ia32_loaddquhi512_mask:
+ case X86::BI__builtin_ia32_loaddqusi128_mask:
+ case X86::BI__builtin_ia32_loaddqusi256_mask:
+ case X86::BI__builtin_ia32_loaddqusi512_mask:
+ case X86::BI__builtin_ia32_loaddqudi128_mask:
+ case X86::BI__builtin_ia32_loaddqudi256_mask:
+ case X86::BI__builtin_ia32_loaddqudi512_mask:
+ case X86::BI__builtin_ia32_loadsbf16128_mask:
+ case X86::BI__builtin_ia32_loadsh128_mask:
+ case X86::BI__builtin_ia32_loadss128_mask:
+ case X86::BI__builtin_ia32_loadsd128_mask:
+ case X86::BI__builtin_ia32_loadaps128_mask:
+ case X86::BI__builtin_ia32_loadaps256_mask:
+ case X86::BI__builtin_ia32_loadaps512_mask:
+ case X86::BI__builtin_ia32_loadapd128_mask:
+ case X86::BI__builtin_ia32_loadapd256_mask:
+ case X86::BI__builtin_ia32_loadapd512_mask:
+ case X86::BI__builtin_ia32_movdqa32load128_mask:
+ case X86::BI__builtin_ia32_movdqa32load256_mask:
+ case X86::BI__builtin_ia32_movdqa32load512_mask:
+ case X86::BI__builtin_ia32_movdqa64load128_mask:
+ case X86::BI__builtin_ia32_movdqa64load256_mask:
+ case X86::BI__builtin_ia32_movdqa64load512_mask:
+ case X86::BI__builtin_ia32_expandloaddf128_mask:
+ case X86::BI__builtin_ia32_expandloaddf256_mask:
+ case X86::BI__builtin_ia32_expandloaddf512_mask:
+ case X86::BI__builtin_ia32_expandloadsf128_mask:
+ case X86::BI__builtin_ia32_expandloadsf256_mask:
+ case X86::BI__builtin_ia32_expandloadsf512_mask:
+ case X86::BI__builtin_ia32_expandloaddi128_mask:
+ case X86::BI__builtin_ia32_expandloaddi256_mask:
+ case X86::BI__builtin_ia32_expandloaddi512_mask:
+ case X86::BI__builtin_ia32_expandloadsi128_mask:
+ case X86::BI__builtin_ia32_expandloadsi256_mask:
+ case X86::BI__builtin_ia32_expandloadsi512_mask:
+ case X86::BI__builtin_ia32_expandloadhi128_mask:
+ case X86::BI__builtin_ia32_expandloadhi256_mask:
+ case X86::BI__builtin_ia32_expandloadhi512_mask:
+ case X86::BI__builtin_ia32_expandloadqi128_mask:
+ case X86::BI__builtin_ia32_expandloadqi256_mask:
+ case X86::BI__builtin_ia32_expandloadqi512_mask:
+ case X86::BI__builtin_ia32_compressstoredf128_mask:
+ case X86::BI__builtin_ia32_compressstoredf256_mask:
+ case X86::BI__builtin_ia32_compressstoredf512_mask:
+ case X86::BI__builtin_ia32_compressstoresf128_mask:
+ case X86::BI__builtin_ia32_compressstoresf256_mask:
+ case X86::BI__builtin_ia32_compressstoresf512_mask:
+ case X86::BI__builtin_ia32_compressstoredi128_mask:
+ case X86::BI__builtin_ia32_compressstoredi256_mask:
+ case X86::BI__builtin_ia32_compressstoredi512_mask:
+ case X86::BI__builtin_ia32_compressstoresi128_mask:
+ case X86::BI__builtin_ia32_compressstoresi256_mask:
+ case X86::BI__builtin_ia32_compressstoresi512_mask:
+ case X86::BI__builtin_ia32_compressstorehi128_mask:
+ case X86::BI__builtin_ia32_compressstorehi256_mask:
+ case X86::BI__builtin_ia32_compressstorehi512_mask:
+ case X86::BI__builtin_ia32_compressstoreqi128_mask:
+ case X86::BI__builtin_ia32_compressstoreqi256_mask:
+ case X86::BI__builtin_ia32_compressstoreqi512_mask:
+ case X86::BI__builtin_ia32_expanddf128_mask:
+ case X86::BI__builtin_ia32_expanddf256_mask:
+ case X86::BI__builtin_ia32_expanddf512_mask:
+ case X86::BI__builtin_ia32_expandsf128_mask:
+ case X86::BI__builtin_ia32_expandsf256_mask:
+ case X86::BI__builtin_ia32_expandsf512_mask:
+ case X86::BI__builtin_ia32_expanddi128_mask:
+ case X86::BI__builtin_ia32_expanddi256_mask:
+ case X86::BI__builtin_ia32_expanddi512_mask:
+ case X86::BI__builtin_ia32_expandsi128_mask:
+ case X86::BI__builtin_ia32_expandsi256_mask:
+ case X86::BI__builtin_ia32_expandsi512_mask:
+ case X86::BI__builtin_ia32_expandhi128_mask:
+ case X86::BI__builtin_ia32_expandhi256_mask:
+ case X86::BI__builtin_ia32_expandhi512_mask:
+ case X86::BI__builtin_ia32_expandqi128_mask:
+ case X86::BI__builtin_ia32_expandqi256_mask:
+ case X86::BI__builtin_ia32_expandqi512_mask:
+ case X86::BI__builtin_ia32_compressdf128_mask:
+ case X86::BI__builtin_ia32_compressdf256_mask:
+ case X86::BI__builtin_ia32_compressdf512_mask:
+ case X86::BI__builtin_ia32_compresssf128_mask:
+ case X86::BI__builtin_ia32_compresssf256_mask:
+ case X86::BI__builtin_ia32_compresssf512_mask:
+ case X86::BI__builtin_ia32_compressdi128_mask:
+ case X86::BI__builtin_ia32_compressdi256_mask:
+ case X86::BI__builtin_ia32_compressdi512_mask:
+ case X86::BI__builtin_ia32_compresssi128_mask:
+ case X86::BI__builtin_ia32_compresssi256_mask:
+ case X86::BI__builtin_ia32_compresssi512_mask:
+ case X86::BI__builtin_ia32_compresshi128_mask:
+ case X86::BI__builtin_ia32_compresshi256_mask:
+ case X86::BI__builtin_ia32_compresshi512_mask:
+ case X86::BI__builtin_ia32_compressqi128_mask:
+ case X86::BI__builtin_ia32_compressqi256_mask:
+ case X86::BI__builtin_ia32_compressqi512_mask:
+ case X86::BI__builtin_ia32_gather3div2df:
+ case X86::BI__builtin_ia32_gather3div2di:
+ case X86::BI__builtin_ia32_gather3div4df:
+ case X86::BI__builtin_ia32_gather3div4di:
+ case X86::BI__builtin_ia32_gather3div4sf:
+ case X86::BI__builtin_ia32_gather3div4si:
+ case X86::BI__builtin_ia32_gather3div8sf:
+ case X86::BI__builtin_ia32_gather3div8si:
+ case X86::BI__builtin_ia32_gather3siv2df:
+ case X86::BI__builtin_ia32_gather3siv2di:
+ case X86::BI__builtin_ia32_gather3siv4df:
+ case X86::BI__builtin_ia32_gather3siv4di:
+ case X86::BI__builtin_ia32_gather3siv4sf:
+ case X86::BI__builtin_ia32_gather3siv4si:
+ case X86::BI__builtin_ia32_gather3siv8sf:
+ case X86::BI__builtin_ia32_gather3siv8si:
+ case X86::BI__builtin_ia32_gathersiv8df:
+ case X86::BI__builtin_ia32_gathersiv16sf:
+ case X86::BI__builtin_ia32_gatherdiv8df:
+ case X86::BI__builtin_ia32_gatherdiv16sf:
+ case X86::BI__builtin_ia32_gathersiv8di:
+ case X86::BI__builtin_ia32_gathersiv16si:
+ case X86::BI__builtin_ia32_gatherdiv8di:
+ case X86::BI__builtin_ia32_gatherdiv16si:
+ case X86::BI__builtin_ia32_scattersiv8df:
+ case X86::BI__builtin_ia32_scattersiv16sf:
+ case X86::BI__builtin_ia32_scatterdiv8df:
+ case X86::BI__builtin_ia32_scatterdiv16sf:
+ case X86::BI__builtin_ia32_scattersiv8di:
+ case X86::BI__builtin_ia32_scattersiv16si:
+ case X86::BI__builtin_ia32_scatterdiv8di:
+ case X86::BI__builtin_ia32_scatterdiv16si:
+ case X86::BI__builtin_ia32_scatterdiv2df:
+ case X86::BI__builtin_ia32_scatterdiv2di:
+ case X86::BI__builtin_ia32_scatterdiv4df:
+ case X86::BI__builtin_ia32_scatterdiv4di:
+ case X86::BI__builtin_ia32_scatterdiv4sf:
+ case X86::BI__builtin_ia32_scatterdiv4si:
+ case X86::BI__builtin_ia32_scatterdiv8sf:
+ case X86::BI__builtin_ia32_scatterdiv8si:
+ case X86::BI__builtin_ia32_scattersiv2df:
+ case X86::BI__builtin_ia32_scattersiv2di:
+ case X86::BI__builtin_ia32_scattersiv4df:
+ case X86::BI__builtin_ia32_scattersiv4di:
+ case X86::BI__builtin_ia32_scattersiv4sf:
+ case X86::BI__builtin_ia32_scattersiv4si:
+ case X86::BI__builtin_ia32_scattersiv8sf:
+ case X86::BI__builtin_ia32_scattersiv8si:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_vinsertf128_pd256:
+ case X86::BI__builtin_ia32_vinsertf128_ps256:
+ case X86::BI__builtin_ia32_vinsertf128_si256:
+ case X86::BI__builtin_ia32_insert128i256:
+ case X86::BI__builtin_ia32_insertf64x4:
+ case X86::BI__builtin_ia32_insertf32x4:
+ case X86::BI__builtin_ia32_inserti64x4:
+ case X86::BI__builtin_ia32_inserti32x4:
+ case X86::BI__builtin_ia32_insertf32x8:
+ case X86::BI__builtin_ia32_inserti32x8:
+ case X86::BI__builtin_ia32_insertf32x4_256:
+ case X86::BI__builtin_ia32_inserti32x4_256:
+ case X86::BI__builtin_ia32_insertf64x2_256:
+ case X86::BI__builtin_ia32_inserti64x2_256:
+ case X86::BI__builtin_ia32_insertf64x2_512:
+ case X86::BI__builtin_ia32_inserti64x2_512:
+ case X86::BI__builtin_ia32_pmovqd512_mask:
+ case X86::BI__builtin_ia32_pmovwb512_mask:
+ case X86::BI__builtin_ia32_pblendw128:
+ case X86::BI__builtin_ia32_blendpd:
+ case X86::BI__builtin_ia32_blendps:
+ case X86::BI__builtin_ia32_blendpd256:
+ case X86::BI__builtin_ia32_blendps256:
+ case X86::BI__builtin_ia32_pblendw256:
+ case X86::BI__builtin_ia32_pblendd128:
+ case X86::BI__builtin_ia32_pblendd256:
+ case X86::BI__builtin_ia32_pshuflw:
+ case X86::BI__builtin_ia32_pshuflw256:
+ case X86::BI__builtin_ia32_pshuflw512:
+ case X86::BI__builtin_ia32_pshufhw:
+ case X86::BI__builtin_ia32_pshufhw256:
+ case X86::BI__builtin_ia32_pshufhw512:
+ case X86::BI__builtin_ia32_pshufd:
+ case X86::BI__builtin_ia32_pshufd256:
+ case X86::BI__builtin_ia32_pshufd512:
+ case X86::BI__builtin_ia32_vpermilpd:
+ case X86::BI__builtin_ia32_vpermilps:
+ case X86::BI__builtin_ia32_vpermilpd256:
+ case X86::BI__builtin_ia32_vpermilps256:
+ case X86::BI__builtin_ia32_vpermilpd512:
+ case X86::BI__builtin_ia32_vpermilps512:
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512:
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ case X86::BI__builtin_ia32_permdi256:
+ case X86::BI__builtin_ia32_permdf256:
+ case X86::BI__builtin_ia32_permdi512:
+ case X86::BI__builtin_ia32_permdf512:
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256:
+ case X86::BI__builtin_ia32_palignr512:
+ case X86::BI__builtin_ia32_alignd128:
+ case X86::BI__builtin_ia32_alignd256:
+ case X86::BI__builtin_ia32_alignd512:
+ case X86::BI__builtin_ia32_alignq128:
+ case X86::BI__builtin_ia32_alignq256:
+ case X86::BI__builtin_ia32_alignq512:
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_i64x2:
+ case X86::BI__builtin_ia32_vperm2f128_pd256:
+ case X86::BI__builtin_ia32_vperm2f128_ps256:
+ case X86::BI__builtin_ia32_vperm2f128_si256:
+ case X86::BI__builtin_ia32_permti256:
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ case X86::BI__builtin_ia32_kshiftliqi:
+ case X86::BI__builtin_ia32_kshiftlihi:
+ case X86::BI__builtin_ia32_kshiftlisi:
+ case X86::BI__builtin_ia32_kshiftlidi:
+ case X86::BI__builtin_ia32_kshiftriqi:
+ case X86::BI__builtin_ia32_kshiftrihi:
+ case X86::BI__builtin_ia32_kshiftrisi:
+ case X86::BI__builtin_ia32_kshiftridi:
+ case X86::BI__builtin_ia32_vprotbi:
+ case X86::BI__builtin_ia32_vprotwi:
+ case X86::BI__builtin_ia32_vprotdi:
+ case X86::BI__builtin_ia32_vprotqi:
+ case X86::BI__builtin_ia32_prold128:
+ case X86::BI__builtin_ia32_prold256:
+ case X86::BI__builtin_ia32_prold512:
+ case X86::BI__builtin_ia32_prolq128:
+ case X86::BI__builtin_ia32_prolq256:
+ case X86::BI__builtin_ia32_prolq512:
+ case X86::BI__builtin_ia32_prord128:
+ case X86::BI__builtin_ia32_prord256:
+ case X86::BI__builtin_ia32_prord512:
+ case X86::BI__builtin_ia32_prorq128:
+ case X86::BI__builtin_ia32_prorq256:
+ case X86::BI__builtin_ia32_prorq512:
+ case X86::BI__builtin_ia32_selectb_128:
+ case X86::BI__builtin_ia32_selectb_256:
+ case X86::BI__builtin_ia32_selectb_512:
+ case X86::BI__builtin_ia32_selectw_128:
+ case X86::BI__builtin_ia32_selectw_256:
+ case X86::BI__builtin_ia32_selectw_512:
+ case X86::BI__builtin_ia32_selectd_128:
+ case X86::BI__builtin_ia32_selectd_256:
+ case X86::BI__builtin_ia32_selectd_512:
+ case X86::BI__builtin_ia32_selectq_128:
+ case X86::BI__builtin_ia32_selectq_256:
+ case X86::BI__builtin_ia32_selectq_512:
+ case X86::BI__builtin_ia32_selectph_128:
+ case X86::BI__builtin_ia32_selectph_256:
+ case X86::BI__builtin_ia32_selectph_512:
+ case X86::BI__builtin_ia32_selectpbf_128:
+ case X86::BI__builtin_ia32_selectpbf_256:
+ case X86::BI__builtin_ia32_selectpbf_512:
+ case X86::BI__builtin_ia32_selectps_128:
+ case X86::BI__builtin_ia32_selectps_256:
+ case X86::BI__builtin_ia32_selectps_512:
+ case X86::BI__builtin_ia32_selectpd_128:
+ case X86::BI__builtin_ia32_selectpd_256:
+ case X86::BI__builtin_ia32_selectpd_512:
+ case X86::BI__builtin_ia32_selectsh_128:
+ case X86::BI__builtin_ia32_selectsbf_128:
+ case X86::BI__builtin_ia32_selectss_128:
+ case X86::BI__builtin_ia32_selectsd_128:
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ case X86::BI__builtin_ia32_vpcomb:
+ case X86::BI__builtin_ia32_vpcomw:
+ case X86::BI__builtin_ia32_vpcomd:
+ case X86::BI__builtin_ia32_vpcomq:
+ case X86::BI__builtin_ia32_vpcomub:
+ case X86::BI__builtin_ia32_vpcomuw:
+ case X86::BI__builtin_ia32_vpcomud:
+ case X86::BI__builtin_ia32_vpcomuq:
+ case X86::BI__builtin_ia32_kortestcqi:
+ case X86::BI__builtin_ia32_kortestchi:
+ case X86::BI__builtin_ia32_kortestcsi:
+ case X86::BI__builtin_ia32_kortestcdi:
+ case X86::BI__builtin_ia32_kortestzqi:
+ case X86::BI__builtin_ia32_kortestzhi:
+ case X86::BI__builtin_ia32_kortestzsi:
+ case X86::BI__builtin_ia32_kortestzdi:
+ case X86::BI__builtin_ia32_ktestcqi:
+ case X86::BI__builtin_ia32_ktestzqi:
+ case X86::BI__builtin_ia32_ktestchi:
+ case X86::BI__builtin_ia32_ktestzhi:
+ case X86::BI__builtin_ia32_ktestcsi:
+ case X86::BI__builtin_ia32_ktestzsi:
+ case X86::BI__builtin_ia32_ktestcdi:
+ case X86::BI__builtin_ia32_ktestzdi:
+ case X86::BI__builtin_ia32_kaddqi:
+ case X86::BI__builtin_ia32_kaddhi:
+ case X86::BI__builtin_ia32_kaddsi:
+ case X86::BI__builtin_ia32_kadddi:
+ case X86::BI__builtin_ia32_kandqi:
+ case X86::BI__builtin_ia32_kandhi:
+ case X86::BI__builtin_ia32_kandsi:
+ case X86::BI__builtin_ia32_kanddi:
+ case X86::BI__builtin_ia32_kandnqi:
+ case X86::BI__builtin_ia32_kandnhi:
+ case X86::BI__builtin_ia32_kandnsi:
+ case X86::BI__builtin_ia32_kandndi:
+ case X86::BI__builtin_ia32_korqi:
+ case X86::BI__builtin_ia32_korhi:
+ case X86::BI__builtin_ia32_korsi:
+ case X86::BI__builtin_ia32_kordi:
+ case X86::BI__builtin_ia32_kxnorqi:
+ case X86::BI__builtin_ia32_kxnorhi:
+ case X86::BI__builtin_ia32_kxnorsi:
+ case X86::BI__builtin_ia32_kxnordi:
+ case X86::BI__builtin_ia32_kxorqi:
+ case X86::BI__builtin_ia32_kxorhi:
+ case X86::BI__builtin_ia32_kxorsi:
+ case X86::BI__builtin_ia32_kxordi:
+ case X86::BI__builtin_ia32_knotqi:
+ case X86::BI__builtin_ia32_knothi:
+ case X86::BI__builtin_ia32_knotsi:
+ case X86::BI__builtin_ia32_knotdi:
+ case X86::BI__builtin_ia32_kmovb:
+ case X86::BI__builtin_ia32_kmovw:
+ case X86::BI__builtin_ia32_kmovd:
+ case X86::BI__builtin_ia32_kmovq:
+ case X86::BI__builtin_ia32_kunpckdi:
+ case X86::BI__builtin_ia32_kunpcksi:
+ case X86::BI__builtin_ia32_kunpckhi:
+ case X86::BI__builtin_ia32_sqrtsh_round_mask:
+ case X86::BI__builtin_ia32_sqrtsd_round_mask:
+ case X86::BI__builtin_ia32_sqrtss_round_mask:
+ case X86::BI__builtin_ia32_sqrtpd256:
+ case X86::BI__builtin_ia32_sqrtpd:
+ case X86::BI__builtin_ia32_sqrtps256:
+ case X86::BI__builtin_ia32_sqrtps:
+ case X86::BI__builtin_ia32_sqrtph256:
+ case X86::BI__builtin_ia32_sqrtph:
+ case X86::BI__builtin_ia32_sqrtph512:
+ case X86::BI__builtin_ia32_vsqrtbf16256:
+ case X86::BI__builtin_ia32_vsqrtbf16:
+ case X86::BI__builtin_ia32_vsqrtbf16512:
+ case X86::BI__builtin_ia32_sqrtps512:
+ case X86::BI__builtin_ia32_sqrtpd512:
+ case X86::BI__builtin_ia32_pmuludq128:
+ case X86::BI__builtin_ia32_pmuludq256:
+ case X86::BI__builtin_ia32_pmuludq512:
+ case X86::BI__builtin_ia32_pmuldq128:
+ case X86::BI__builtin_ia32_pmuldq256:
+ case X86::BI__builtin_ia32_pmuldq512:
+ case X86::BI__builtin_ia32_pternlogd512_mask:
+ case X86::BI__builtin_ia32_pternlogq512_mask:
+ case X86::BI__builtin_ia32_pternlogd128_mask:
+ case X86::BI__builtin_ia32_pternlogd256_mask:
+ case X86::BI__builtin_ia32_pternlogq128_mask:
+ case X86::BI__builtin_ia32_pternlogq256_mask:
+ case X86::BI__builtin_ia32_pternlogd512_maskz:
+ case X86::BI__builtin_ia32_pternlogq512_maskz:
+ case X86::BI__builtin_ia32_pternlogd128_maskz:
+ case X86::BI__builtin_ia32_pternlogd256_maskz:
+ case X86::BI__builtin_ia32_pternlogq128_maskz:
+ case X86::BI__builtin_ia32_pternlogq256_maskz:
+ case X86::BI__builtin_ia32_vpshldd128:
+ case X86::BI__builtin_ia32_vpshldd256:
+ case X86::BI__builtin_ia32_vpshldd512:
+ case X86::BI__builtin_ia32_vpshldq128:
+ case X86::BI__builtin_ia32_vpshldq256:
+ case X86::BI__builtin_ia32_vpshldq512:
+ case X86::BI__builtin_ia32_vpshldw128:
+ case X86::BI__builtin_ia32_vpshldw256:
+ case X86::BI__builtin_ia32_vpshldw512:
+ case X86::BI__builtin_ia32_vpshrdd128:
+ case X86::BI__builtin_ia32_vpshrdd256:
+ case X86::BI__builtin_ia32_vpshrdd512:
+ case X86::BI__builtin_ia32_vpshrdq128:
+ case X86::BI__builtin_ia32_vpshrdq256:
+ case X86::BI__builtin_ia32_vpshrdq512:
+ case X86::BI__builtin_ia32_vpshrdw128:
+ case X86::BI__builtin_ia32_vpshrdw256:
+ case X86::BI__builtin_ia32_vpshrdw512:
+ case X86::BI__builtin_ia32_reduce_fadd_pd512:
+ case X86::BI__builtin_ia32_reduce_fadd_ps512:
+ case X86::BI__builtin_ia32_reduce_fadd_ph512:
+ case X86::BI__builtin_ia32_reduce_fadd_ph256:
+ case X86::BI__builtin_ia32_reduce_fadd_ph128:
+ case X86::BI__builtin_ia32_reduce_fmul_pd512:
+ case X86::BI__builtin_ia32_reduce_fmul_ps512:
+ case X86::BI__builtin_ia32_reduce_fmul_ph512:
+ case X86::BI__builtin_ia32_reduce_fmul_ph256:
+ case X86::BI__builtin_ia32_reduce_fmul_ph128:
+ case X86::BI__builtin_ia32_reduce_fmax_pd512:
+ case X86::BI__builtin_ia32_reduce_fmax_ps512:
+ case X86::BI__builtin_ia32_reduce_fmax_ph512:
+ case X86::BI__builtin_ia32_reduce_fmax_ph256:
+ case X86::BI__builtin_ia32_reduce_fmax_ph128:
+ case X86::BI__builtin_ia32_reduce_fmin_pd512:
+ case X86::BI__builtin_ia32_reduce_fmin_ps512:
+ case X86::BI__builtin_ia32_reduce_fmin_ph512:
+ case X86::BI__builtin_ia32_reduce_fmin_ph256:
+ case X86::BI__builtin_ia32_reduce_fmin_ph128:
+ case X86::BI__builtin_ia32_rdrand16_step:
+ case X86::BI__builtin_ia32_rdrand32_step:
+ case X86::BI__builtin_ia32_rdrand64_step:
+ case X86::BI__builtin_ia32_rdseed16_step:
+ case X86::BI__builtin_ia32_rdseed32_step:
+ case X86::BI__builtin_ia32_rdseed64_step:
+ case X86::BI__builtin_ia32_addcarryx_u32:
+ case X86::BI__builtin_ia32_addcarryx_u64:
+ case X86::BI__builtin_ia32_subborrow_u32:
+ case X86::BI__builtin_ia32_subborrow_u64:
+ case X86::BI__builtin_ia32_fpclassps128_mask:
+ case X86::BI__builtin_ia32_fpclassps256_mask:
+ case X86::BI__builtin_ia32_fpclassps512_mask:
+ case X86::BI__builtin_ia32_vfpclassbf16128_mask:
+ case X86::BI__builtin_ia32_vfpclassbf16256_mask:
+ case X86::BI__builtin_ia32_vfpclassbf16512_mask:
+ case X86::BI__builtin_ia32_fpclassph128_mask:
+ case X86::BI__builtin_ia32_fpclassph256_mask:
+ case X86::BI__builtin_ia32_fpclassph512_mask:
+ case X86::BI__builtin_ia32_fpclasspd128_mask:
+ case X86::BI__builtin_ia32_fpclasspd256_mask:
+ case X86::BI__builtin_ia32_fpclasspd512_mask:
+ case X86::BI__builtin_ia32_vp2intersect_q_512:
+ case X86::BI__builtin_ia32_vp2intersect_q_256:
+ case X86::BI__builtin_ia32_vp2intersect_q_128:
+ case X86::BI__builtin_ia32_vp2intersect_d_512:
+ case X86::BI__builtin_ia32_vp2intersect_d_256:
+ case X86::BI__builtin_ia32_vp2intersect_d_128:
+ case X86::BI__builtin_ia32_vpmultishiftqb128:
+ case X86::BI__builtin_ia32_vpmultishiftqb256:
+ case X86::BI__builtin_ia32_vpmultishiftqb512:
+ case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpordps:
+ case X86::BI__builtin_ia32_cmpordpd:
+ case X86::BI__builtin_ia32_cmpph128_mask:
+ case X86::BI__builtin_ia32_cmpph256_mask:
+ case X86::BI__builtin_ia32_cmpph512_mask:
+ case X86::BI__builtin_ia32_cmpps128_mask:
+ case X86::BI__builtin_ia32_cmpps256_mask:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmppd128_mask:
+ case X86::BI__builtin_ia32_cmppd256_mask:
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ case X86::BI__builtin_ia32_vcmpbf16512_mask:
+ case X86::BI__builtin_ia32_vcmpbf16256_mask:
+ case X86::BI__builtin_ia32_vcmpbf16128_mask:
+ case X86::BI__builtin_ia32_cmpps:
+ case X86::BI__builtin_ia32_cmpps256:
+ case X86::BI__builtin_ia32_cmppd:
+ case X86::BI__builtin_ia32_cmppd256:
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpordss:
+ case X86::BI__builtin_ia32_cmpeqsd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ case X86::BI__builtin_ia32_cmplesd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ case X86::BI__builtin_ia32_vcvtph2ps_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps256_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ case X86::BI__builtin_ia32_cvtneps2bf16_128_mask:
+ case X86::BI__builtin_ia32_cvtsbf162ss_32:
+ case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
+ case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
+ case X86::BI__cpuid:
+ case X86::BI__cpuidex:
+ case X86::BI__emul:
+ case X86::BI__emulu:
+ case X86::BI__mulh:
+ case X86::BI__umulh:
+ case X86::BI_mul128:
+ case X86::BI_umul128:
+ case X86::BI__faststorefence:
+ case X86::BI__shiftleft128:
+ case X86::BI__shiftright128:
+ case X86::BI_ReadWriteBarrier:
+ case X86::BI_ReadBarrier:
+ case X86::BI_WriteBarrier:
+ case X86::BI_AddressOfReturnAddress:
+ case X86::BI__stosb:
+ case X86::BI__builtin_ia32_t2rpntlvwz0_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz0rs_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz0t1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz0rst1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1rs_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1t1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1rst1_internal:
+ case X86::BI__ud2:
+ case X86::BI__int2c:
+ case X86::BI__readfsbyte:
+ case X86::BI__readfsword:
+ case X86::BI__readfsdword:
+ case X86::BI__readfsqword:
+ case X86::BI__readgsbyte:
+ case X86::BI__readgsword:
+ case X86::BI__readgsdword:
+ case X86::BI__readgsqword:
+ case X86::BI__builtin_ia32_encodekey128_u32:
+ case X86::BI__builtin_ia32_encodekey256_u32:
+ case X86::BI__builtin_ia32_aesenc128kl_u8:
+ case X86::BI__builtin_ia32_aesdec128kl_u8:
+ case X86::BI__builtin_ia32_aesenc256kl_u8:
+ case X86::BI__builtin_ia32_aesdec256kl_u8:
+ case X86::BI__builtin_ia32_aesencwide128kl_u8:
+ case X86::BI__builtin_ia32_aesdecwide128kl_u8:
+ case X86::BI__builtin_ia32_aesencwide256kl_u8:
+ case X86::BI__builtin_ia32_aesdecwide256kl_u8:
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_prefetchi:
+ cgm.errorNYI(e->getSourceRange(),
+ std::string("unimplemented X86 builtin call: ") +
+ getContext().BuiltinInfo.getName(builtinID));
+ return {};
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp
index df42af8..eef3739 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp
@@ -37,6 +37,10 @@ CIRGenCXXABI::AddedStructorArgCounts CIRGenCXXABI::addImplicitConstructorArgs(
addedArgs.suffix.size());
}
+CatchTypeInfo CIRGenCXXABI::getCatchAllTypeInfo() {
+ return CatchTypeInfo{{}, 0};
+}
+
void CIRGenCXXABI::buildThisParam(CIRGenFunction &cgf,
FunctionArgList &params) {
const auto *md = cast<CXXMethodDecl>(cgf.curGD.getDecl());
@@ -81,8 +85,7 @@ CharUnits CIRGenCXXABI::getArrayCookieSize(const CXXNewExpr *e) {
if (!requiresArrayCookie(e))
return CharUnits::Zero();
- cgm.errorNYI(e->getSourceRange(), "CIRGenCXXABI::getArrayCookieSize");
- return CharUnits::Zero();
+ return getArrayCookieSizeImpl(e->getAllocatedType());
}
bool CIRGenCXXABI::requiresArrayCookie(const CXXNewExpr *e) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index 6d3741c4..c78f9b0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H
#include "CIRGenCall.h"
+#include "CIRGenCleanup.h"
#include "CIRGenFunction.h"
#include "CIRGenModule.h"
@@ -155,6 +156,8 @@ public:
/// Loads the incoming C++ this pointer as it was passed by the caller.
mlir::Value loadIncomingCXXThis(CIRGenFunction &cgf);
+ virtual CatchTypeInfo getCatchAllTypeInfo();
+
/// Get the implicit (second) parameter that comes after the "this" pointer,
/// or nullptr if there is isn't one.
virtual mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &cgf,
@@ -299,8 +302,28 @@ public:
/// - non-array allocations never need a cookie
/// - calls to \::operator new(size_t, void*) never need a cookie
///
- /// \param E - the new-expression being allocated.
+ /// \param e - the new-expression being allocated.
virtual CharUnits getArrayCookieSize(const CXXNewExpr *e);
+
+ /// Initialize the array cookie for the given allocation.
+ ///
+ /// \param newPtr - a char* which is the presumed-non-null
+ /// return value of the allocation function
+ /// \param numElements - the computed number of elements,
+ /// potentially collapsed from the multidimensional array case;
+ /// always a size_t
+ /// \param elementType - the base element allocated type,
+ /// i.e. the allocated type after stripping all array types
+ virtual Address initializeArrayCookie(CIRGenFunction &cgf, Address newPtr,
+ mlir::Value numElements,
+ const CXXNewExpr *e,
+ QualType elementType) = 0;
+
+protected:
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. Assumes that an array cookie is
+ /// required.
+ virtual CharUnits getArrayCookieSizeImpl(QualType elementType) = 0;
};
/// Creates and Itanium-family ABI
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index 8700697..851328a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -28,6 +28,46 @@ using namespace clang::CIRGen;
// CIRGenFunction cleanup related
//===----------------------------------------------------------------------===//
+/// Build a unconditional branch to the lexical scope cleanup block
+/// or with the labeled blocked if already solved.
+///
+/// Track on scope basis, goto's we need to fix later.
+cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location loc,
+ JumpDest dest) {
+ // Insert a branch: to the cleanup block (unsolved) or to the already
+ // materialized label. Keep track of unsolved goto's.
+ assert(dest.getBlock() && "assumes incoming valid dest");
+ auto brOp = cir::BrOp::create(builder, loc, dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator topCleanup =
+ ehStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (topCleanup == ehStack.stable_end() ||
+ topCleanup.encloses(dest.getScopeDepth())) { // works for invalid
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!dest.getScopeDepth().isValid()) {
+ BranchFixup &fixup = ehStack.addBranchFixup();
+ fixup.destination = dest.getBlock();
+ fixup.destinationIndex = dest.getDestIndex();
+ fixup.initialBranch = brOp;
+ fixup.optimisticBranchBlock = nullptr;
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ cgm.errorNYI(loc, "emitBranchThroughCleanup: valid destination scope depth");
+ return brOp;
+}
+
/// Emits all the code to cause the given temporary to be cleaned up.
void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
QualType tempType, Address ptr) {
@@ -40,6 +80,19 @@ void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
void EHScopeStack::Cleanup::anchor() {}
+EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ stable_iterator si = getInnermostNormalCleanup();
+ stable_iterator se = stable_end();
+ while (si != se) {
+ EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
+ if (cleanup.isActive())
+ return si;
+ si = cleanup.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
/// Push an entry of the given size onto this protected-scope stack.
char *EHScopeStack::allocate(size_t size) {
size = llvm::alignTo(size, ScopeStackAlignment);
@@ -75,14 +128,30 @@ void EHScopeStack::deallocate(size_t size) {
startOfData += llvm::alignTo(size, ScopeStackAlignment);
}
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ cgf->cgm.errorNYI("popNullFixups");
+}
+
void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
+ bool isNormalCleanup = kind & NormalCleanup;
bool isEHCleanup = kind & EHCleanup;
bool isLifetimeMarker = kind & LifetimeMarker;
assert(!cir::MissingFeatures::innermostEHScope());
- EHCleanupScope *scope = new (buffer) EHCleanupScope(size);
+ EHCleanupScope *scope = new (buffer)
+ EHCleanupScope(size, branchFixups.size(), innermostNormalCleanup);
+
+ if (isNormalCleanup)
+ innermostNormalCleanup = stable_begin();
if (isLifetimeMarker)
cgf->cgm.errorNYI("push lifetime marker cleanup");
@@ -100,12 +169,30 @@ void EHScopeStack::popCleanup() {
assert(isa<EHCleanupScope>(*begin()));
EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
+ innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
deallocate(cleanup.getAllocatedSize());
// Destroy the cleanup.
cleanup.destroy();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // Check whether we can shrink the branch-fixups stack.
+ if (!branchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups()) {
+ branchFixups.clear();
+ } else {
+ // Otherwise we can still trim out unnecessary nulls.
+ popNullFixups();
+ }
+ }
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
+ char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
+ assert(!cir::MissingFeatures::innermostEHScope());
+ EHCatchScope *scope = new (buffer) EHCatchScope(numHandlers);
+ return scope;
}
static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
@@ -116,6 +203,18 @@ static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
}
+static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
+ EHCleanupScope &scope) {
+ assert(scope.isNormalCleanup());
+ mlir::Block *entry = scope.getNormalBlock();
+ if (!entry) {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ entry = cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
+ scope.setNormalBlock(entry);
+ }
+ return entry;
+}
+
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
@@ -123,17 +222,21 @@ void CIRGenFunction::popCleanupBlock() {
assert(!ehStack.empty() && "cleanup stack is empty!");
assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.begin());
+ assert(scope.getFixupDepth() <= ehStack.getNumBranchFixups());
// Remember activation information.
bool isActive = scope.isActive();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // - whether there are branch fix-ups through this cleanup
+ unsigned fixupDepth = scope.getFixupDepth();
+ bool hasFixups = ehStack.getNumBranchFixups() != fixupDepth;
// - whether there's a fallthrough
mlir::Block *fallthroughSource = builder.getInsertionBlock();
bool hasFallthrough = fallthroughSource != nullptr && isActive;
- bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
+ bool requiresNormalCleanup =
+ scope.isNormalCleanup() && (hasFixups || hasFallthrough);
// If we don't need the cleanup at all, we're done.
assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
@@ -168,9 +271,119 @@ void CIRGenFunction::popCleanupBlock() {
assert(!cir::MissingFeatures::ehCleanupFlags());
- ehStack.popCleanup();
- scope.markEmitted();
- emitCleanup(*this, cleanup);
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (hasFallthrough && !hasFixups) {
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+ ehStack.popCleanup();
+ scope.markEmitted();
+ emitCleanup(*this, cleanup);
+ } else {
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+
+ // Force the entry block to exist.
+ mlir::Block *normalEntry = createNormalEntry(*this, scope);
+
+ // I. Set up the fallthrough edge in.
+ mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (hasFallthrough) {
+ assert(!cir::MissingFeatures::ehCleanupHasPrebranchedFallthrough());
+
+ } else if (fallthroughSource) {
+ // Otherwise, save and clear the IP if we don't have fallthrough
+ // because the cleanup is inactive.
+ assert(!isActive && "source without fallthrough for active cleanup");
+ savedInactiveFallthroughIP = builder.saveInsertionPoint();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ builder.setInsertionPointToEnd(normalEntry);
+
+ // intercept normal cleanup to mark SEH scope end
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+ bool hasEnclosingCleanups =
+ (scope.getEnclosingNormalCleanup() != ehStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ if (hasFixups && hasEnclosingCleanups)
+ cgm.errorNYI("cleanup branch-through dest");
+
+ mlir::Block *fallthroughDest = nullptr;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ // Skip for SEH, since ExitSwitch is used to generate code to indicate
+ // abnormal termination. (SEH: Except _leave and fall-through at
+ // the end, all other exits in a _try (return/goto/continue/break)
+ // are considered as abnormal terminations, using NormalCleanupDestSlot
+ // to indicate abnormal termination)
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // IV. Pop the cleanup and emit it.
+ scope.markEmitted();
+ ehStack.popCleanup();
+ assert(ehStack.hasNormalCleanups() == hasEnclosingCleanups);
+
+ emitCleanup(*this, cleanup);
+
+ // Append the prepared cleanup prologue from above.
+ assert(!cir::MissingFeatures::cleanupAppendInsts());
+
+ // Optimistically hope that any fixups will continue falling through.
+ if (fixupDepth != ehStack.getNumBranchFixups())
+ cgm.errorNYI("cleanup fixup depth mismatch");
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but doesn't branch to the
+ // cleanup because the cleanup is inactive.
+ if (!hasFallthrough && fallthroughSource) {
+ // Prebranched fallthrough was forwarded earlier.
+ // Non-prebranched fallthrough doesn't need to be forwarded.
+ // Either way, all we need to do is restore the IP we cleared before.
+ assert(!isActive);
+ cgm.errorNYI("cleanup inactive fallthrough");
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (hasFallthrough && fallthroughDest) {
+ cgm.errorNYI("cleanup fallthrough destination");
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (hasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ // FIXME(cir): should we clear insertion point here?
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ //
+ // If it did invalidate those pointers, and normalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ assert(!cir::MissingFeatures::simplifyCleanupEntry());
+ }
}
/// Pops cleanup blocks until the given savepoint is reached.
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
index 30f5607..9acf8b1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -20,6 +20,13 @@
namespace clang::CIRGen {
+/// The MS C++ ABI needs a pointer to RTTI data plus some flags to describe the
+/// type of a catch handler, so we use this wrapper.
+struct CatchTypeInfo {
+ mlir::TypedAttr rtti;
+ unsigned flags;
+};
+
/// A protected scope for zero-cost EH handling.
class EHScope {
class CommonBitFields {
@@ -29,6 +36,12 @@ class EHScope {
enum { NumCommonBits = 3 };
protected:
+ class CatchBitFields {
+ friend class EHCatchScope;
+ unsigned : NumCommonBits;
+ unsigned numHandlers : 32 - NumCommonBits;
+ };
+
class CleanupBitFields {
friend class EHCleanupScope;
unsigned : NumCommonBits;
@@ -58,6 +71,7 @@ protected:
union {
CommonBitFields commonBits;
+ CatchBitFields catchBits;
CleanupBitFields cleanupBits;
};
@@ -67,11 +81,88 @@ public:
EHScope(Kind kind) { commonBits.kind = kind; }
Kind getKind() const { return static_cast<Kind>(commonBits.kind); }
+
+ bool mayThrow() const {
+ // Traditional LLVM codegen also checks for `!block->use_empty()`, but
+ // in CIRGen the block content is not important, just used as a way to
+ // signal `hasEHBranches`.
+ assert(!cir::MissingFeatures::ehstackBranches());
+ return false;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C \@finally blocks are represented using a cleanup scope
+/// after the catch scope.
+
+class EHCatchScope : public EHScope {
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null MLIR attribute for a catch-all
+ CatchTypeInfo type;
+
+ /// The catch handler for this type.
+ mlir::Region *region;
+ };
+
+private:
+ friend class EHScopeStack;
+
+ Handler *getHandlers() { return reinterpret_cast<Handler *>(this + 1); }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned n) {
+ return sizeof(EHCatchScope) + n * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned numHandlers) : EHScope(Catch) {
+ catchBits.numHandlers = numHandlers;
+ assert(catchBits.numHandlers == numHandlers && "NumHandlers overflow?");
+ }
+
+ unsigned getNumHandlers() const { return catchBits.numHandlers; }
+
+ void setHandler(unsigned i, CatchTypeInfo type, mlir::Region *region) {
+ assert(i < getNumHandlers());
+ getHandlers()[i].type = type;
+ getHandlers()[i].region = region;
+ }
+
+ // Clear all handler blocks.
+ // FIXME: it's better to always call clearHandlerBlocks in DTOR and have a
+ // 'takeHandler' or some such function which removes ownership from the
+ // EHCatchScope object if the handlers should live longer than EHCatchScope.
+ void clearHandlerBlocks() {
+ // The blocks are owned by TryOp, nothing to delete.
+ }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Catch;
+ }
};
/// A cleanup scope which generates the cleanup blocks lazily.
class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
: public EHScope {
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator enclosingNormal;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ mlir::Block *normalBlock = nullptr;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned fixupDepth = 0;
+
public:
/// Gets the size required for a lazy cleanup scope with the given
/// cleanup-data requirements.
@@ -83,7 +174,10 @@ public:
return sizeof(EHCleanupScope) + cleanupBits.cleanupSize;
}
- EHCleanupScope(unsigned cleanupSize) : EHScope(EHScope::Cleanup) {
+ EHCleanupScope(unsigned cleanupSize, unsigned fixupDepth,
+ EHScopeStack::stable_iterator enclosingNormal)
+ : EHScope(EHScope::Cleanup), enclosingNormal(enclosingNormal),
+ fixupDepth(fixupDepth) {
// TODO(cir): When exception handling is upstreamed, isNormalCleanup and
// isEHCleanup will be arguments to the constructor.
cleanupBits.isNormalCleanup = true;
@@ -101,11 +195,19 @@ public:
// Objects of EHCleanupScope are not destructed. Use destroy().
~EHCleanupScope() = delete;
+ mlir::Block *getNormalBlock() const { return normalBlock; }
+ void setNormalBlock(mlir::Block *bb) { normalBlock = bb; }
+
bool isNormalCleanup() const { return cleanupBits.isNormalCleanup; }
bool isActive() const { return cleanupBits.isActive; }
void setActive(bool isActive) { cleanupBits.isActive = isActive; }
+ unsigned getFixupDepth() const { return fixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return enclosingNormal;
+ }
+
size_t getCleanupSize() const { return cleanupBits.cleanupSize; }
void *getCleanupBuffer() { return this + 1; }
@@ -147,5 +249,13 @@ EHScopeStack::find(stable_iterator savePoint) const {
return iterator(endOfBuffer - savePoint.size);
}
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHCatchScope &scope = llvm::cast<EHCatchScope>(*begin());
+ assert(!cir::MissingFeatures::innermostEHScope());
+ deallocate(EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers()));
+}
+
} // namespace clang::CIRGen
#endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp
index f9ff37b..717a3e0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenException.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp
@@ -69,6 +69,153 @@ mlir::LogicalResult CIRGenFunction::emitCXXTryStmt(const CXXTryStmt &s) {
if (s.getTryBlock()->body_empty())
return mlir::LogicalResult::success();
- cgm.errorNYI("exitCXXTryStmt: CXXTryStmt with non-empty body");
- return mlir::LogicalResult::success();
+ mlir::Location loc = getLoc(s.getSourceRange());
+ // Create a scope to hold try local storage for catch params.
+
+ mlir::OpBuilder::InsertPoint scopeIP;
+ cir::ScopeOp::create(
+ builder, loc,
+ /*scopeBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) {
+ scopeIP = builder.saveInsertionPoint();
+ });
+
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeIP);
+ mlir::LogicalResult result = emitCXXTryStmtUnderScope(s);
+ cir::YieldOp::create(builder, loc);
+ return result;
+}
+
+mlir::LogicalResult
+CIRGenFunction::emitCXXTryStmtUnderScope(const CXXTryStmt &s) {
+ const llvm::Triple &t = getTarget().getTriple();
+ // If we encounter a try statement on in an OpenMP target region offloaded to
+ // a GPU, we treat it as a basic block.
+ const bool isTargetDevice =
+ (cgm.getLangOpts().OpenMPIsTargetDevice && (t.isNVPTX() || t.isAMDGCN()));
+ if (isTargetDevice) {
+ cgm.errorNYI(
+ "emitCXXTryStmtUnderScope: OpenMP target region offloaded to GPU");
+ return mlir::success();
+ }
+
+ unsigned numHandlers = s.getNumHandlers();
+ mlir::Location tryLoc = getLoc(s.getBeginLoc());
+ mlir::OpBuilder::InsertPoint beginInsertTryBody;
+
+ bool hasCatchAll = false;
+ for (unsigned i = 0; i != numHandlers; ++i) {
+ hasCatchAll |= s.getHandler(i)->getExceptionDecl() == nullptr;
+ if (hasCatchAll)
+ break;
+ }
+
+ // Create the scope to represent only the C/C++ `try {}` part. However,
+ // don't populate right away. Create regions for the catch handlers,
+ // but don't emit the handler bodies yet. For now, only make sure the
+ // scope returns the exception information.
+ auto tryOp = cir::TryOp::create(
+ builder, tryLoc,
+ /*tryBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ beginInsertTryBody = builder.saveInsertionPoint();
+ },
+ /*handlersBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc,
+ mlir::OperationState &result) {
+ mlir::OpBuilder::InsertionGuard guard(b);
+
+ // We create an extra region for an unwind catch handler in case the
+ // catch-all handler doesn't exists
+ unsigned numRegionsToCreate =
+ hasCatchAll ? numHandlers : numHandlers + 1;
+
+ for (unsigned i = 0; i != numRegionsToCreate; ++i) {
+ mlir::Region *region = result.addRegion();
+ builder.createBlock(region);
+ }
+ });
+
+ // Finally emit the body for try/catch.
+ {
+ mlir::Location loc = tryOp.getLoc();
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(beginInsertTryBody);
+ CIRGenFunction::LexicalScope tryScope{*this, loc,
+ builder.getInsertionBlock()};
+
+ tryScope.setAsTry(tryOp);
+
+ // Attach the basic blocks for the catch regions.
+ enterCXXTryStmt(s, tryOp);
+
+ // Emit the body for the `try {}` part.
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ CIRGenFunction::LexicalScope tryBodyScope{*this, loc,
+ builder.getInsertionBlock()};
+ if (emitStmt(s.getTryBlock(), /*useCurrentScope=*/true).failed())
+ return mlir::failure();
+ }
+
+ // Emit catch clauses.
+ exitCXXTryStmt(s);
+ }
+
+ return mlir::success();
+}
+
+void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
+ bool isFnTryBlock) {
+ unsigned numHandlers = s.getNumHandlers();
+ EHCatchScope *catchScope = ehStack.pushCatch(numHandlers);
+ for (unsigned i = 0; i != numHandlers; ++i) {
+ const CXXCatchStmt *catchStmt = s.getHandler(i);
+ if (catchStmt->getExceptionDecl()) {
+ cgm.errorNYI("enterCXXTryStmt: CatchStmt with ExceptionDecl");
+ return;
+ }
+
+ // No exception decl indicates '...', a catch-all.
+ mlir::Region *handler = &tryOp.getHandlerRegions()[i];
+ catchScope->setHandler(i, cgm.getCXXABI().getCatchAllTypeInfo(), handler);
+
+ // Under async exceptions, catch(...) needs to catch HW exception too
+ // Mark scope with SehTryBegin as a SEH __try scope
+ if (getLangOpts().EHAsynch) {
+ cgm.errorNYI("enterCXXTryStmt: EHAsynch");
+ return;
+ }
+ }
+}
+
+void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock) {
+ unsigned numHandlers = s.getNumHandlers();
+ EHCatchScope &catchScope = cast<EHCatchScope>(*ehStack.begin());
+ assert(catchScope.getNumHandlers() == numHandlers);
+ cir::TryOp tryOp = curLexScope->getTry();
+
+ // If the catch was not required, bail out now.
+ if (!catchScope.mayThrow()) {
+ catchScope.clearHandlerBlocks();
+ ehStack.popCatch();
+
+ // Drop all basic block from all catch regions.
+ SmallVector<mlir::Block *> eraseBlocks;
+ for (mlir::Region &handlerRegion : tryOp.getHandlerRegions()) {
+ if (handlerRegion.empty())
+ continue;
+
+ for (mlir::Block &b : handlerRegion.getBlocks())
+ eraseBlocks.push_back(&b);
+ }
+
+ for (mlir::Block *b : eraseBlocks)
+ b->erase();
+
+ tryOp.setHandlerTypesAttr({});
+ return;
+ }
+
+ cgm.errorNYI("exitCXXTryStmt: Required catch");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 9732c9c..52021fc 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1675,7 +1675,25 @@ CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
// name to make it clear it's not the actual builtin.
auto fn = cast<cir::FuncOp>(curFn);
if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
- cgm.errorNYI("Inline only builtin function calls");
+ cir::FuncOp clone =
+ mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
+
+ if (!clone) {
+ // Create a forward declaration - the body will be generated in
+ // generateCode when the function definition is processed
+ cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToStart(cgm.getModule().getBody());
+
+ clone = builder.create<cir::FuncOp>(calleeFunc.getLoc(), fdInlineName,
+ calleeFunc.getFunctionType());
+ clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
+ &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
+ clone.setSymVisibility("private");
+ clone.setInlineKindAttr(cir::InlineAttr::get(
+ &cgm.getMLIRContext(), cir::InlineKind::AlwaysInline));
+ }
+ return CIRGenCallee::forDirect(clone, gd);
}
// Replaceable builtins provide their own implementation of a builtin. If we
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 568cbdb..d6d226b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -280,6 +280,7 @@ public:
void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
+
void VisitPredefinedExpr(const PredefinedExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPredefinedExpr");
@@ -670,7 +671,7 @@ void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
return;
}
- cgf.cgm.errorNYI("emitStoreThroughBitfieldLValue");
+ cgf.emitStoreThroughBitfieldLValue(RValue::get(null), lv);
return;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index b1e9e76..fe9e210 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -306,6 +306,7 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
mlir::cast<cir::IntAttr>(constNumElements).getValue();
unsigned numElementsWidth = count.getBitWidth();
+ bool hasAnyOverflow = false;
// The equivalent code in CodeGen/CGExprCXX.cpp handles these cases as
// overflow, but that should never happen. The size argument is implicitly
@@ -336,11 +337,22 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
// Add in the cookie, and check whether it's overflowed.
if (cookieSize != 0) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "emitCXXNewAllocSize: array cookie");
+ // Save the current size without a cookie. This shouldn't be
+ // used if there was overflow
+ sizeWithoutCookie = cgf.getBuilder().getConstInt(
+ loc, allocationSize.zextOrTrunc(sizeWidth));
+
+ allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
+ hasAnyOverflow |= overflow;
}
- size = cgf.getBuilder().getConstInt(loc, allocationSize);
+ // On overflow, produce a -1 so operator new will fail
+ if (hasAnyOverflow) {
+ size =
+ cgf.getBuilder().getConstInt(loc, llvm::APInt::getAllOnes(sizeWidth));
+ } else {
+ size = cgf.getBuilder().getConstInt(loc, allocationSize);
+ }
} else {
// TODO: Handle the variable size case
cgf.cgm.errorNYI(e->getSourceRange(),
@@ -390,7 +402,50 @@ void CIRGenFunction::emitNewArrayInitializer(
if (!e->hasInitializer())
return;
- cgm.errorNYI(e->getSourceRange(), "emitNewArrayInitializer");
+ unsigned initListElements = 0;
+
+ const Expr *init = e->getInitializer();
+ const InitListExpr *ile = dyn_cast<InitListExpr>(init);
+ if (ile) {
+ cgm.errorNYI(ile->getSourceRange(), "emitNewArrayInitializer: init list");
+ return;
+ }
+
+ // If all elements have already been initialized, skip any further
+ // initialization.
+ auto constOp = mlir::dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
+ if (constOp) {
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constOp.getValue());
+ // Just skip out if the constant count is zero.
+ if (constIntAttr && constIntAttr.getUInt() <= initListElements)
+ return;
+ }
+
+ assert(init && "have trailing elements to initialize but no initializer");
+
+ // If this is a constructor call, try to optimize it out, and failing that
+ // emit a single loop to initialize all remaining elements.
+ if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(init)) {
+ CXXConstructorDecl *ctor = cce->getConstructor();
+ if (ctor->isTrivial()) {
+ // If new expression did not specify value-initialization, then there
+ // is no initialization.
+ if (!cce->requiresZeroInitialization())
+ return;
+
+ cgm.errorNYI(cce->getSourceRange(),
+ "emitNewArrayInitializer: trivial ctor zero-init");
+ return;
+ }
+
+ cgm.errorNYI(cce->getSourceRange(),
+ "emitNewArrayInitializer: ctor initializer");
+ return;
+ }
+
+ cgm.errorNYI(init->getSourceRange(),
+ "emitNewArrayInitializer: unsupported initializer");
+ return;
}
static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e,
@@ -586,9 +641,6 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
// If there is a brace-initializer, cannot allocate fewer elements than inits.
unsigned minElements = 0;
- if (e->isArray() && e->hasInitializer()) {
- cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array initializer");
- }
mlir::Value numElements = nullptr;
mlir::Value allocSizeWithoutCookie = nullptr;
@@ -667,8 +719,11 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
!e->getOperatorDelete()->isReservedGlobalPlacementOperator())
cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: operator delete");
- if (allocSize != allocSizeWithoutCookie)
- cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array with cookies");
+ if (allocSize != allocSizeWithoutCookie) {
+ assert(e->isArray());
+ allocation = cgm.getCXXABI().initializeArrayCookie(
+ *this, allocation, numElements, e, allocType);
+ }
mlir::Type elementTy;
if (e->isArray()) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 138082b..33eb748 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -2041,8 +2041,9 @@ mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
assert(!cir::MissingFeatures::tryEmitAsConstant());
Expr::EvalResult result;
if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
- cgf.cgm.errorNYI(e->getSourceRange(), "Constant interger member expr");
- // Fall through to emit this as a non-constant access.
+ llvm::APSInt value = result.Val.getInt();
+ cgf.emitIgnoredExpr(e->getBase());
+ return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value);
}
return emitLoadOfLValue(e);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 25a46df..d3c0d9f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -551,6 +551,49 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
curGD = gd;
+ if (funcDecl->isInlineBuiltinDeclaration()) {
+ // When generating code for a builtin with an inline declaration, use a
+ // mangled name to hold the actual body, while keeping an external
+ // declaration in case the function pointer is referenced somewhere.
+ std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
+ cir::FuncOp clone =
+ mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
+ if (!clone) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPoint(fn);
+ clone = builder.create<cir::FuncOp>(fn.getLoc(), fdInlineName,
+ fn.getFunctionType());
+ clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
+ clone.setSymVisibility("private");
+ clone.setInlineKind(cir::InlineKind::AlwaysInline);
+ }
+ fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
+ fn.setSymVisibility("private");
+ fn = clone;
+ } else {
+ // Detect the unusual situation where an inline version is shadowed by a
+ // non-inline version. In that case we should pick the external one
+ // everywhere. That's GCC behavior too.
+ for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
+ pd = pd->getPreviousDecl()) {
+ if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
+ std::string inlineName = funcDecl->getName().str() + ".inline";
+ if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
+ cgm.getGlobalValue(inlineName))) {
+ // Replace all uses of the .inline function with the regular function
+ // FIXME: This performs a linear walk over the module. Introduce some
+ // caching here.
+ if (inlineFn
+ .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
+ .failed())
+ llvm_unreachable("Failed to replace inline builtin symbol uses");
+ inlineFn.erase();
+ }
+ break;
+ }
+ }
+ }
+
SourceLocation loc = funcDecl->getLocation();
Stmt *body = funcDecl->getBody();
SourceRange bodyRange =
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 5a71126..e3b9b6a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -60,11 +60,44 @@ private:
/// is where the next operations will be introduced.
CIRGenBuilderTy &builder;
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() = default;
+ JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
+ unsigned index = 0)
+ : block(block) {}
+
+ bool isValid() const { return block != nullptr; }
+ mlir::Block *getBlock() const { return block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
+ unsigned getDestIndex() const { return index; }
+
+ // This should be used cautiously.
+ void setScopeDepth(EHScopeStack::stable_iterator depth) {
+ scopeDepth = depth;
+ }
+
+ private:
+ mlir::Block *block = nullptr;
+ EHScopeStack::stable_iterator scopeDepth;
+ unsigned index;
+ };
+
public:
/// The GlobalDecl for the current function being compiled or the global
/// variable currently being initialized.
clang::GlobalDecl curGD;
+ /// Unified return block.
+ /// In CIR this is a function because each scope might have
+ /// its associated return block.
+ JumpDest returnBlock(mlir::Block *retBlock) {
+ return getJumpDestInCurrentScope(retBlock);
+ }
+
+ unsigned nextCleanupDestIndex = 1;
+
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
@@ -574,6 +607,16 @@ public:
}
};
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ /// CIRGen: this mostly tracks state for figuring out the proper scope
+ /// information, no actual branches are emitted.
+ JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
+ return JumpDest(target, ehStack.getInnermostNormalCleanup(),
+ nextCleanupDestIndex++);
+ }
+
/// Perform the usual unary conversions on the specified expression and
/// compare the result against zero, returning an Int1Ty value.
mlir::Value evaluateExprAsBool(const clang::Expr *e);
@@ -954,6 +997,9 @@ public:
LexicalScope *parentScope = nullptr;
+ // Holds the actual value for ScopeKind::Try
+ cir::TryOp tryOp = nullptr;
+
// Only Regular is used at the moment. Support for other kinds will be
// added as the relevant statements/expressions are upstreamed.
enum Kind {
@@ -1013,6 +1059,10 @@ public:
void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
void setAsSwitch() { scopeKind = Kind::Switch; }
void setAsTernary() { scopeKind = Kind::Ternary; }
+ void setAsTry(cir::TryOp op) {
+ scopeKind = Kind::Try;
+ tryOp = op;
+ }
// Lazy create cleanup block or return what's available.
mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
@@ -1022,6 +1072,11 @@ public:
return cleanupBlock;
}
+ cir::TryOp getTry() {
+ assert(isTry());
+ return tryOp;
+ }
+
mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
return cleanupBlock;
}
@@ -1209,6 +1264,8 @@ public:
LValue emitBinaryOperatorLValue(const BinaryOperator *e);
+ cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
+
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
@@ -1348,6 +1405,13 @@ public:
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
+ mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s);
+
+ void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
+ bool isFnTryBlock = false);
+
+ void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock = false);
+
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor,
clang::CXXCtorType ctorType, FunctionArgList &args);
@@ -1595,6 +1659,10 @@ public:
bool buildingTopLevelCase);
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
+ mlir::Value emitTargetBuiltinExpr(unsigned builtinID,
+ const clang::CallExpr *e,
+ ReturnValueSlot &returnValue);
+
/// Given a value and its clang type, returns the value casted to its memory
/// representation.
/// Note: CIR defers most of the special casting to the final lowering passes
@@ -1633,6 +1701,8 @@ public:
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
+ mlir::Value emitX86BuiltinExpr(unsigned builtinID, const CallExpr *e);
+
/// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
/// nonnull, if 1\p LHS is marked _Nonnull.
void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index c184d4a..e620310 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -135,8 +135,14 @@ public:
cir::PointerType destCIRTy, bool isRefCast,
Address src) override;
- /**************************** RTTI Uniqueness ******************************/
+ Address initializeArrayCookie(CIRGenFunction &cgf, Address newPtr,
+ mlir::Value numElements, const CXXNewExpr *e,
+ QualType elementType) override;
+
protected:
+ CharUnits getArrayCookieSizeImpl(QualType elementType) override;
+
+ /**************************** RTTI Uniqueness ******************************/
/// Returns true if the ABI requires RTTI type_info objects to be unique
/// across a program.
virtual bool shouldRTTIBeUnique() const { return true; }
@@ -2003,3 +2009,70 @@ mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf,
return cgf.getBuilder().createDynCast(loc, src.getPointer(), destCIRTy,
isRefCast, castInfo);
}
+
+/************************** Array allocation cookies **************************/
+
+CharUnits CIRGenItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // The array cookie is a size_t; pad that up to the element alignment.
+ // The cookie is actually right-justified in that space.
+ return std::max(
+ cgm.getSizeSize(),
+ cgm.getASTContext().getPreferredTypeAlignInChars(elementType));
+}
+
+Address CIRGenItaniumCXXABI::initializeArrayCookie(CIRGenFunction &cgf,
+ Address newPtr,
+ mlir::Value numElements,
+ const CXXNewExpr *e,
+ QualType elementType) {
+ assert(requiresArrayCookie(e));
+
+ // TODO: When sanitizer support is implemented, we'll need to
+ // get the address space from `newPtr`.
+ assert(!cir::MissingFeatures::addressSpace());
+ assert(!cir::MissingFeatures::sanitizers());
+
+ ASTContext &ctx = cgm.getASTContext();
+ CharUnits sizeSize = cgf.getSizeSize();
+ mlir::Location loc = cgf.getLoc(e->getSourceRange());
+
+ // The size of the cookie.
+ CharUnits cookieSize =
+ std::max(sizeSize, ctx.getPreferredTypeAlignInChars(elementType));
+ assert(cookieSize == getArrayCookieSizeImpl(elementType));
+
+ cir::PointerType u8PtrTy = cgf.getBuilder().getUInt8PtrTy();
+ mlir::Value baseBytePtr =
+ cgf.getBuilder().createPtrBitcast(newPtr.getPointer(), u8PtrTy);
+
+ // Compute an offset to the cookie.
+ CharUnits cookieOffset = cookieSize - sizeSize;
+ mlir::Value cookiePtrValue = baseBytePtr;
+ if (!cookieOffset.isZero()) {
+ mlir::Value offsetOp = cgf.getBuilder().getSignedInt(
+ loc, cookieOffset.getQuantity(), /*width=*/32);
+ cookiePtrValue =
+ cgf.getBuilder().createPtrStride(loc, cookiePtrValue, offsetOp);
+ }
+
+ CharUnits baseAlignment = newPtr.getAlignment();
+ CharUnits cookiePtrAlignment = baseAlignment.alignmentAtOffset(cookieOffset);
+ Address cookiePtr(cookiePtrValue, u8PtrTy, cookiePtrAlignment);
+
+ // Write the number of elements into the appropriate slot.
+ Address numElementsPtr =
+ cookiePtr.withElementType(cgf.getBuilder(), cgf.SizeTy);
+ cgf.getBuilder().createStore(loc, numElements, numElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ mlir::Value dataOffset =
+ cgf.getBuilder().getSignedInt(loc, cookieSize.getQuantity(),
+ /*width=*/32);
+ mlir::Value dataPtr =
+ cgf.getBuilder().createPtrStride(loc, baseBytePtr, dataOffset);
+ mlir::Value finalPtr =
+ cgf.getBuilder().createPtrBitcast(dataPtr, newPtr.getElementType());
+ CharUnits finalAlignment = baseAlignment.alignmentAtOffset(cookieSize);
+ return Address(finalPtr, newPtr.getElementType(), finalAlignment);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 127f763..6b29373 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -102,7 +102,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
// TODO(CIR): Should be updated once TypeSizeInfoAttr is upstreamed
const unsigned sizeTypeSize =
astContext.getTypeSize(astContext.getSignedSizeType());
- SizeAlignInBytes = astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity();
+ SizeSizeInBytes = astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity();
// In CIRGenTypeCache, UIntPtrTy and SizeType are fields of the same union
UIntPtrTy =
cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/false);
@@ -1917,6 +1917,17 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl,
const Decl *decl = globalDecl.getDecl();
func.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(decl));
}
+
+ // If we plan on emitting this inline builtin, we can't treat it as a builtin.
+ const auto *fd = cast<FunctionDecl>(globalDecl.getDecl());
+ if (fd->isInlineBuiltinDeclaration()) {
+ const FunctionDecl *fdBody;
+ bool hasBody = fd->hasBody(fdBody);
+ (void)hasBody;
+ assert(hasBody && "Inline builtin declarations should always have an "
+ "available body!");
+ assert(!cir::MissingFeatures::attributeNoBuiltin());
+ }
}
void CIRGenModule::setCIRFunctionAttributesForDefinition(
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index ad8c4d0..f486c46 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -446,54 +446,89 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
mlir::Location loc = getLoc(s.getSourceRange());
const Expr *rv = s.getRetValue();
- if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
- s.getNRVOCandidate()->isNRVOVariable()) {
- assert(!cir::MissingFeatures::openMP());
- assert(!cir::MissingFeatures::nrvo());
- } else if (!rv) {
- // No return expression. Do nothing.
- } else if (rv->getType()->isVoidType()) {
- // Make sure not to return anything, but evaluate the expression
- // for side effects.
- if (rv) {
- emitAnyExpr(rv);
+ RunCleanupsScope cleanupScope(*this);
+ bool createNewScope = false;
+ if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
+ rv = ewc->getSubExpr();
+ createNewScope = true;
+ }
+
+ auto handleReturnVal = [&]() {
+ if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
+ s.getNRVOCandidate()->isNRVOVariable()) {
+ assert(!cir::MissingFeatures::openMP());
+ assert(!cir::MissingFeatures::nrvo());
+ } else if (!rv) {
+ // No return expression. Do nothing.
+ } else if (rv->getType()->isVoidType()) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (rv) {
+ emitAnyExpr(rv);
+ }
+ } else if (cast<FunctionDecl>(curGD.getDecl())
+ ->getReturnType()
+ ->isReferenceType()) {
+ // If this function returns a reference, take the address of the
+ // expression rather than the value.
+ RValue result = emitReferenceBindingToExpr(rv);
+ builder.CIRBaseBuilderTy::createStore(loc, result.getValue(),
+ *fnRetAlloca);
+ } else {
+ mlir::Value value = nullptr;
+ switch (CIRGenFunction::getEvaluationKind(rv->getType())) {
+ case cir::TEK_Scalar:
+ value = emitScalarExpr(rv);
+ if (value) { // Change this to an assert once emitScalarExpr is complete
+ builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
+ }
+ break;
+ case cir::TEK_Complex:
+ emitComplexExprIntoLValue(rv,
+ makeAddrLValue(returnValue, rv->getType()),
+ /*isInit=*/true);
+ break;
+ case cir::TEK_Aggregate:
+ assert(!cir::MissingFeatures::aggValueSlotGC());
+ emitAggExpr(rv, AggValueSlot::forAddr(returnValue, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::IsNotAliased,
+ getOverlapForReturnValue()));
+ break;
+ }
}
- } else if (cast<FunctionDecl>(curGD.getDecl())
- ->getReturnType()
- ->isReferenceType()) {
- // If this function returns a reference, take the address of the
- // expression rather than the value.
- RValue result = emitReferenceBindingToExpr(rv);
- builder.CIRBaseBuilderTy::createStore(loc, result.getValue(), *fnRetAlloca);
+ };
+
+ if (!createNewScope) {
+ handleReturnVal();
} else {
- mlir::Value value = nullptr;
- switch (CIRGenFunction::getEvaluationKind(rv->getType())) {
- case cir::TEK_Scalar:
- value = emitScalarExpr(rv);
- if (value) { // Change this to an assert once emitScalarExpr is complete
- builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
- }
- break;
- case cir::TEK_Complex:
- emitComplexExprIntoLValue(rv, makeAddrLValue(returnValue, rv->getType()),
- /*isInit=*/true);
- break;
- case cir::TEK_Aggregate:
- assert(!cir::MissingFeatures::aggValueSlotGC());
- emitAggExpr(rv, AggValueSlot::forAddr(returnValue, Qualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::IsNotAliased,
- getOverlapForReturnValue()));
- break;
+ mlir::Location scopeLoc =
+ getLoc(rv ? rv->getSourceRange() : s.getSourceRange());
+ // First create cir.scope and later emit it's body. Otherwise all CIRGen
+ // dispatched by `handleReturnVal()` might needs to manipulate blocks and
+ // look into parents, which are all unlinked.
+ mlir::OpBuilder::InsertPoint scopeBody;
+ cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ scopeBody = b.saveInsertionPoint();
+ });
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeBody);
+ CIRGenFunction::LexicalScope lexScope{*this, scopeLoc,
+ builder.getInsertionBlock()};
+ handleReturnVal();
}
}
+ cleanupScope.forceCleanup();
+
+ // In CIR we might have returns in different scopes.
+ // FIXME(cir): cleanup code is handling actual return emission, the logic
+ // should try to match traditional codegen more closely (to the extent which
+ // is possible).
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
- // This should emit a branch through the cleanup block if one exists.
- builder.create<cir::BrOp>(loc, retBlock);
- assert(!cir::MissingFeatures::emitBranchThroughCleanup());
- if (ehStack.stable_begin() != currentCleanupStackDepth)
- cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
+ emitBranchThroughCleanup(loc, returnBlock(retBlock));
// Insert the new block to continue codegen after branch to ret block.
builder.createBlock(builder.getBlock()->getParent());
@@ -1063,5 +1098,5 @@ void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv,
assert(!cir::MissingFeatures::emitBranchThroughCleanup());
builder.create<cir::BrOp>(loc, retBlock);
if (ehStack.stable_begin() != currentCleanupStackDepth)
- cgm.errorNYI(loc, "return with cleanup stack");
+ cgm.errorNYI(loc, "return of r-value with cleanup stack");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
index b5612d9..ff5842c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
@@ -74,11 +74,17 @@ struct CIRGenTypeCache {
unsigned char PointerSizeInBytes;
};
- /// The alignment of size_t.
- unsigned char SizeAlignInBytes;
+ /// The size and alignment of size_t.
+ union {
+ unsigned char SizeSizeInBytes; // sizeof(size_t)
+ unsigned char SizeAlignInBytes;
+ };
cir::TargetAddressSpaceAttr cirAllocaAddressSpace;
+ clang::CharUnits getSizeSize() const {
+ return clang::CharUnits::fromQuantity(SizeSizeInBytes);
+ }
clang::CharUnits getSizeAlign() const {
return clang::CharUnits::fromQuantity(SizeAlignInBytes);
}
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index 36db4bd..7c31bea 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -11,13 +11,14 @@ add_clang_library(clangCIR
CIRGenAsm.cpp
CIRGenAtomic.cpp
CIRGenBuilder.cpp
+ CIRGenBuiltin.cpp
+ CIRGenBuiltinX86.cpp
CIRGenCall.cpp
CIRGenClass.cpp
CIRGenCleanup.cpp
CIRGenCoroutine.cpp
CIRGenCXX.cpp
CIRGenCXXABI.cpp
- CIRGenBuiltin.cpp
CIRGenDecl.cpp
CIRGenDeclCXX.cpp
CIRGenDeclOpenACC.cpp
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 67a72f5..4198c23 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -18,12 +18,38 @@
#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "llvm/ADT/SmallVector.h"
namespace clang::CIRGen {
class CIRGenFunction;
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ mlir::Block *optimisticBranchBlock = nullptr;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ mlir::Block *destination = nullptr;
+
+ /// The destination index value.
+ unsigned destinationIndex = 0;
+
+ /// The initial branch of the fixup.
+ cir::BrOp initialBranch = {};
+};
+
enum CleanupKind : unsigned {
/// Denotes a cleanup that should run when a scope is exited using exceptional
/// control flow (a throw statement leading to stack unwinding, ).
@@ -126,9 +152,31 @@ private:
/// The first valid entry in the buffer.
char *startOfData = nullptr;
+ /// The innermost normal cleanup on the stack.
+ stable_iterator innermostNormalCleanup = stable_end();
+
/// The CGF this Stack belong to
CIRGenFunction *cgf = nullptr;
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup> branchFixups;
+
// This class uses a custom allocator for maximum efficiency because cleanups
// are allocated and freed very frequently. It's basically a bump pointer
// allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets
@@ -155,9 +203,29 @@ public:
/// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
void popCleanup();
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned numHandlers);
+
+ /// Pops a catch scope off the stack. This is private to CIRGenException.cpp.
+ void popCatch();
+
/// Determines whether the exception-scopes stack is empty.
bool empty() const { return startOfData == endOfBuffer; }
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return innermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return innermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const;
+
/// An unstable reference to a scope-stack depth. Invalidated by
/// pushes but not pops.
class iterator;
@@ -172,12 +240,30 @@ public:
return stable_iterator(endOfBuffer - startOfData);
}
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() { return stable_iterator(0); }
+
/// Turn a stable reference to a scope depth into a unstable pointer
/// to the EH stack.
iterator find(stable_iterator savePoint) const;
- /// Create a stable reference to the bottom of the EH stack.
- static stable_iterator stable_end() { return stable_iterator(0); }
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ branchFixups.push_back(BranchFixup());
+ return branchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return branchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned i) {
+ assert(i < getNumBranchFixups());
+ return branchFixups[i];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index ed606b7..fa180f5 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2941,6 +2941,21 @@ mlir::LogicalResult cir::ThrowOp::verify() {
}
//===----------------------------------------------------------------------===//
+// AtomicFetchOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::AtomicFetchOp::verify() {
+ if (getBinop() != cir::AtomicFetchKind::Add &&
+ getBinop() != cir::AtomicFetchKind::Sub &&
+ getBinop() != cir::AtomicFetchKind::Max &&
+ getBinop() != cir::AtomicFetchKind::Min &&
+ !mlir::isa<cir::IntType>(getVal().getType()))
+ return emitError("only atomic add, sub, max, and min operation could "
+ "operate on floating-point values");
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// TypeInfoAttr
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
index 8589a2e..46bd186 100644
--- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
@@ -551,10 +551,100 @@ public:
}
};
+class CIRTryOpFlattening : public mlir::OpRewritePattern<cir::TryOp> {
+public:
+ using OpRewritePattern<cir::TryOp>::OpRewritePattern;
+
+ mlir::Block *buildTryBody(cir::TryOp tryOp,
+ mlir::PatternRewriter &rewriter) const {
+ // Split the current block before the TryOp to create the inlining
+ // point.
+ mlir::Block *beforeTryScopeBlock = rewriter.getInsertionBlock();
+ mlir::Block *afterTry =
+ rewriter.splitBlock(beforeTryScopeBlock, rewriter.getInsertionPoint());
+
+ // Inline body region.
+ mlir::Block *beforeBody = &tryOp.getTryRegion().front();
+ rewriter.inlineRegionBefore(tryOp.getTryRegion(), afterTry);
+
+ // Branch into the body of the region.
+ rewriter.setInsertionPointToEnd(beforeTryScopeBlock);
+ cir::BrOp::create(rewriter, tryOp.getLoc(), mlir::ValueRange(), beforeBody);
+ return afterTry;
+ }
+
+ void buildHandlers(cir::TryOp tryOp, mlir::PatternRewriter &rewriter,
+ mlir::Block *afterBody, mlir::Block *afterTry,
+ SmallVectorImpl<cir::CallOp> &callsToRewrite,
+ SmallVectorImpl<mlir::Block *> &landingPads) const {
+ // Replace the tryOp return with a branch that jumps out of the body.
+ rewriter.setInsertionPointToEnd(afterBody);
+
+ mlir::Block *beforeCatch = rewriter.getInsertionBlock();
+ rewriter.setInsertionPointToEnd(beforeCatch);
+
+ // Check if the terminator is a YieldOp because there could be another
+ // terminator, e.g. unreachable
+ if (auto tryBodyYield = dyn_cast<cir::YieldOp>(afterBody->getTerminator()))
+ rewriter.replaceOpWithNewOp<cir::BrOp>(tryBodyYield, afterTry);
+
+ mlir::ArrayAttr handlers = tryOp.getHandlerTypesAttr();
+ if (!handlers || handlers.empty())
+ return;
+
+ llvm_unreachable("TryOpFlattening buildHandlers with CallsOp is NYI");
+ }
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::TryOp tryOp,
+ mlir::PatternRewriter &rewriter) const override {
+ mlir::OpBuilder::InsertionGuard guard(rewriter);
+ mlir::Block *afterBody = &tryOp.getTryRegion().back();
+
+ // Grab the collection of `cir.call exception`s to rewrite to
+ // `cir.try_call`.
+ llvm::SmallVector<cir::CallOp, 4> callsToRewrite;
+ tryOp.getTryRegion().walk([&](CallOp op) {
+ // Only grab calls within immediate closest TryOp scope.
+ if (op->getParentOfType<cir::TryOp>() != tryOp)
+ return;
+ assert(!cir::MissingFeatures::opCallExceptionAttr());
+ callsToRewrite.push_back(op);
+ });
+
+ if (!callsToRewrite.empty())
+ llvm_unreachable(
+ "TryOpFlattening with try block that contains CallOps is NYI");
+
+ // Build try body.
+ mlir::Block *afterTry = buildTryBody(tryOp, rewriter);
+
+ // Build handlers.
+ llvm::SmallVector<mlir::Block *, 4> landingPads;
+ buildHandlers(tryOp, rewriter, afterBody, afterTry, callsToRewrite,
+ landingPads);
+
+ rewriter.eraseOp(tryOp);
+
+ assert((landingPads.size() == callsToRewrite.size()) &&
+ "expected matching number of entries");
+
+ // Quick block cleanup: no indirection to the post try block.
+ auto brOp = dyn_cast<cir::BrOp>(afterTry->getTerminator());
+ if (brOp && brOp.getDest()->hasNoPredecessors()) {
+ mlir::Block *srcBlock = brOp.getDest();
+ rewriter.eraseOp(brOp);
+ rewriter.mergeBlocks(srcBlock, afterTry);
+ }
+
+ return mlir::success();
+ }
+};
+
void populateFlattenCFGPatterns(RewritePatternSet &patterns) {
patterns
.add<CIRIfFlattening, CIRLoopOpInterfaceFlattening, CIRScopeOpFlattening,
- CIRSwitchOpFlattening, CIRTernaryOpFlattening>(
+ CIRSwitchOpFlattening, CIRTernaryOpFlattening, CIRTryOpFlattening>(
patterns.getContext());
}
@@ -568,7 +658,7 @@ void CIRFlattenCFGPass::runOnOperation() {
assert(!cir::MissingFeatures::ifOp());
assert(!cir::MissingFeatures::switchOp());
assert(!cir::MissingFeatures::tryOp());
- if (isa<IfOp, ScopeOp, SwitchOp, LoopOpInterface, TernaryOp>(op))
+ if (isa<IfOp, ScopeOp, SwitchOp, LoopOpInterface, TernaryOp, TryOp>(op))
ops.push_back(op);
});
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index dc26dac..bb75f2d 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -730,6 +730,187 @@ mlir::LogicalResult CIRToLLVMAtomicXchgOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMAtomicTestAndSetOpLowering::matchAndRewrite(
+ cir::AtomicTestAndSetOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(op.getMemOrder());
+
+ auto one = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ rewriter.getI8Type(), 1);
+ auto rmw = mlir::LLVM::AtomicRMWOp::create(
+ rewriter, op.getLoc(), mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(),
+ one, llvmOrder, /*syncscope=*/llvm::StringRef(),
+ adaptor.getAlignment().value_or(0), op.getIsVolatile());
+
+ auto zero = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ rewriter.getI8Type(), 0);
+ auto cmp = mlir::LLVM::ICmpOp::create(
+ rewriter, op.getLoc(), mlir::LLVM::ICmpPredicate::ne, rmw, zero);
+
+ rewriter.replaceOp(op, cmp);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMAtomicClearOpLowering::matchAndRewrite(
+ cir::AtomicClearOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(op.getMemOrder());
+ auto zero = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ rewriter.getI8Type(), 0);
+ auto store = mlir::LLVM::StoreOp::create(
+ rewriter, op.getLoc(), zero, adaptor.getPtr(),
+ adaptor.getAlignment().value_or(0), op.getIsVolatile(),
+ /*isNonTemporal=*/false, /*isInvariantGroup=*/false, llvmOrder);
+
+ rewriter.replaceOp(op, store);
+ return mlir::success();
+}
+
+static mlir::LLVM::AtomicBinOp
+getLLVMAtomicBinOp(cir::AtomicFetchKind k, bool isInt, bool isSignedInt) {
+ switch (k) {
+ case cir::AtomicFetchKind::Add:
+ return isInt ? mlir::LLVM::AtomicBinOp::add : mlir::LLVM::AtomicBinOp::fadd;
+ case cir::AtomicFetchKind::Sub:
+ return isInt ? mlir::LLVM::AtomicBinOp::sub : mlir::LLVM::AtomicBinOp::fsub;
+ case cir::AtomicFetchKind::And:
+ return mlir::LLVM::AtomicBinOp::_and;
+ case cir::AtomicFetchKind::Xor:
+ return mlir::LLVM::AtomicBinOp::_xor;
+ case cir::AtomicFetchKind::Or:
+ return mlir::LLVM::AtomicBinOp::_or;
+ case cir::AtomicFetchKind::Nand:
+ return mlir::LLVM::AtomicBinOp::nand;
+ case cir::AtomicFetchKind::Max: {
+ if (!isInt)
+ return mlir::LLVM::AtomicBinOp::fmax;
+ return isSignedInt ? mlir::LLVM::AtomicBinOp::max
+ : mlir::LLVM::AtomicBinOp::umax;
+ }
+ case cir::AtomicFetchKind::Min: {
+ if (!isInt)
+ return mlir::LLVM::AtomicBinOp::fmin;
+ return isSignedInt ? mlir::LLVM::AtomicBinOp::min
+ : mlir::LLVM::AtomicBinOp::umin;
+ }
+ }
+ llvm_unreachable("Unknown atomic fetch opcode");
+}
+
+static llvm::StringLiteral getLLVMBinop(cir::AtomicFetchKind k, bool isInt) {
+ switch (k) {
+ case cir::AtomicFetchKind::Add:
+ return isInt ? mlir::LLVM::AddOp::getOperationName()
+ : mlir::LLVM::FAddOp::getOperationName();
+ case cir::AtomicFetchKind::Sub:
+ return isInt ? mlir::LLVM::SubOp::getOperationName()
+ : mlir::LLVM::FSubOp::getOperationName();
+ case cir::AtomicFetchKind::And:
+ return mlir::LLVM::AndOp::getOperationName();
+ case cir::AtomicFetchKind::Xor:
+ return mlir::LLVM::XOrOp::getOperationName();
+ case cir::AtomicFetchKind::Or:
+ return mlir::LLVM::OrOp::getOperationName();
+ case cir::AtomicFetchKind::Nand:
+ // There's no nand binop in LLVM, this is later fixed with a not.
+ return mlir::LLVM::AndOp::getOperationName();
+ case cir::AtomicFetchKind::Max:
+ case cir::AtomicFetchKind::Min:
+ llvm_unreachable("handled in buildMinMaxPostOp");
+ }
+ llvm_unreachable("Unknown atomic fetch opcode");
+}
+
+mlir::Value CIRToLLVMAtomicFetchOpLowering::buildPostOp(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal,
+ bool isInt) const {
+ SmallVector<mlir::Value> atomicOperands = {rmwVal, adaptor.getVal()};
+ SmallVector<mlir::Type> atomicResTys = {rmwVal.getType()};
+ return rewriter
+ .create(op.getLoc(),
+ rewriter.getStringAttr(getLLVMBinop(op.getBinop(), isInt)),
+ atomicOperands, atomicResTys, {})
+ ->getResult(0);
+}
+
+mlir::Value CIRToLLVMAtomicFetchOpLowering::buildMinMaxPostOp(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, bool isInt,
+ bool isSigned) const {
+ mlir::Location loc = op.getLoc();
+
+ if (!isInt) {
+ if (op.getBinop() == cir::AtomicFetchKind::Max)
+ return mlir::LLVM::MaxNumOp::create(rewriter, loc, rmwVal,
+ adaptor.getVal());
+ return mlir::LLVM::MinNumOp::create(rewriter, loc, rmwVal,
+ adaptor.getVal());
+ }
+
+ mlir::LLVM::ICmpPredicate pred;
+ if (op.getBinop() == cir::AtomicFetchKind::Max) {
+ pred = isSigned ? mlir::LLVM::ICmpPredicate::sgt
+ : mlir::LLVM::ICmpPredicate::ugt;
+ } else { // Min
+ pred = isSigned ? mlir::LLVM::ICmpPredicate::slt
+ : mlir::LLVM::ICmpPredicate::ult;
+ }
+ mlir::Value cmp = mlir::LLVM::ICmpOp::create(
+ rewriter, loc,
+ mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), pred), rmwVal,
+ adaptor.getVal());
+ return mlir::LLVM::SelectOp::create(rewriter, loc, cmp, rmwVal,
+ adaptor.getVal());
+}
+
+mlir::LogicalResult CIRToLLVMAtomicFetchOpLowering::matchAndRewrite(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ bool isInt = false;
+ bool isSignedInt = false;
+ if (auto intTy = mlir::dyn_cast<cir::IntType>(op.getVal().getType())) {
+ isInt = true;
+ isSignedInt = intTy.isSigned();
+ } else if (mlir::isa<cir::SingleType, cir::DoubleType>(
+ op.getVal().getType())) {
+ isInt = false;
+ } else {
+ return op.emitError() << "Unsupported type: " << op.getVal().getType();
+ }
+
+ mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(op.getMemOrder());
+ mlir::LLVM::AtomicBinOp llvmBinOp =
+ getLLVMAtomicBinOp(op.getBinop(), isInt, isSignedInt);
+ auto rmwVal = mlir::LLVM::AtomicRMWOp::create(rewriter, op.getLoc(),
+ llvmBinOp, adaptor.getPtr(),
+ adaptor.getVal(), llvmOrder);
+
+ mlir::Value result = rmwVal.getResult();
+ if (!op.getFetchFirst()) {
+ if (op.getBinop() == cir::AtomicFetchKind::Max ||
+ op.getBinop() == cir::AtomicFetchKind::Min)
+ result = buildMinMaxPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt,
+ isSignedInt);
+ else
+ result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt);
+
+ // Compensate lack of nand binop in LLVM IR.
+ if (op.getBinop() == cir::AtomicFetchKind::Nand) {
+ auto negOne = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ result.getType(), -1);
+ result = mlir::LLVM::XOrOp::create(rewriter, op.getLoc(), result, negOne);
+ }
+ }
+
+ rewriter.replaceOp(op, result);
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite(
cir::BitClrsbOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 741fa44..465f3f4 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -5937,8 +5937,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CI->getCalledFunction()->getName().starts_with("_Z4sqrt")) {
SetSqrtFPAccuracy(CI);
}
- if (callOrInvoke)
+ if (callOrInvoke) {
*callOrInvoke = CI;
+ if (CGM.getCodeGenOpts().CallGraphSection) {
+ QualType CST;
+ if (TargetDecl && TargetDecl->getFunctionType())
+ CST = QualType(TargetDecl->getFunctionType(), 0);
+ else if (const auto *FPT =
+ Callee.getAbstractInfo().getCalleeFunctionProtoType())
+ CST = QualType(FPT, 0);
+ else
+ llvm_unreachable(
+ "Cannot find the callee type to generate callee_type metadata.");
+
+ // Set type identifier metadata of indirect calls for call graph section.
+ if (!CST.isNull())
+ CGM.createCalleeTypeMetadataForIcall(CST, *callOrInvoke);
+ }
+ }
// If this is within a function that has the guard(nocf) attribute and is an
// indirect call, add the "guard_nocf" attribute to this call to indicate that
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index c5eb14e..e490b1c 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -83,6 +83,7 @@ static llvm::cl::opt<bool> LimitedCoverage(
llvm::cl::desc("Emit limited coverage mapping information (experimental)"));
static const char AnnotationSection[] = "llvm.metadata";
+static constexpr auto ErrnoTBAAMDName = "llvm.errno.tbaa";
static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
switch (CGM.getContext().getCXXABIKind()) {
@@ -1583,6 +1584,17 @@ void CodeGenModule::Release() {
}
}
}
+
+ // Emit `!llvm.errno.tbaa`, a module-level metadata that specifies the TBAA
+ // for an int access. This allows LLVM to reason about what memory can be
+ // accessed by certain library calls that only touch errno.
+ if (TBAA) {
+ TBAAAccessInfo TBAAInfo = getTBAAAccessInfo(Context.IntTy);
+ if (llvm::MDNode *IntegerNode = getTBAAAccessTagInfo(TBAAInfo)) {
+ auto *ErrnoTBAAMD = TheModule.getOrInsertNamedMetadata(ErrnoTBAAMDName);
+ ErrnoTBAAMD->addOperand(IntegerNode);
+ }
+ }
}
void CodeGenModule::EmitOpenCLMetadata() {
@@ -2851,6 +2863,11 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
}
}
+ if (CodeGenOpts.CallGraphSection) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ createIndirectFunctionTypeMD(FD, F);
+ }
+
// Emit type metadata on member functions for member function pointer checks.
// These are only ever necessary on definitions; we're guaranteed that the
// definition will be present in the LTO unit as a result of LTO visibility.
@@ -3054,6 +3071,26 @@ static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
}
+static bool hasExistingGeneralizedTypeMD(llvm::Function *F) {
+ llvm::MDNode *MD = F->getMetadata(llvm::LLVMContext::MD_type);
+ return MD && MD->hasGeneralizedMDString();
+}
+
+void CodeGenModule::createIndirectFunctionTypeMD(const FunctionDecl *FD,
+ llvm::Function *F) {
+ // Return if generalized type metadata is already attached.
+ if (hasExistingGeneralizedTypeMD(F))
+ return;
+
+ // All functions which are not internal linkage could be indirect targets.
+ // Address taken functions with internal linkage could be indirect targets.
+ if (!F->hasLocalLinkage() ||
+ F->getFunction().hasAddressTaken(nullptr, /*IgnoreCallbackUses=*/true,
+ /*IgnoreAssumeLikeCalls=*/true,
+ /*IgnoreLLVMUsed=*/false))
+ F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
+}
+
void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
llvm::Function *F) {
// Only if we are checking indirect calls.
@@ -3069,10 +3106,12 @@ void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
/*GeneralizePointers=*/false);
llvm::Metadata *MD = CreateMetadataIdentifierForType(FnType);
F->addTypeMetadata(0, MD);
-
- QualType GenPtrFnType = GeneralizeFunctionType(getContext(), FD->getType(),
- /*GeneralizePointers=*/true);
- F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(GenPtrFnType));
+ // Add the generalized identifier if not added already.
+ if (!hasExistingGeneralizedTypeMD(F)) {
+ QualType GenPtrFnType = GeneralizeFunctionType(getContext(), FD->getType(),
+ /*GeneralizePointers=*/true);
+ F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(GenPtrFnType));
+ }
// Emit a hash-based bit set entry for cross-DSO calls.
if (CodeGenOpts.SanitizeCfiCrossDso)
@@ -3080,6 +3119,21 @@ void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
}
+void CodeGenModule::createCalleeTypeMetadataForIcall(const QualType &QT,
+ llvm::CallBase *CB) {
+ // Only if needed for call graph section and only for indirect calls.
+ if (!CodeGenOpts.CallGraphSection || !CB->isIndirectCall())
+ return;
+
+ llvm::Metadata *TypeIdMD = CreateMetadataIdentifierGeneralized(QT);
+ llvm::MDTuple *TypeTuple = llvm::MDTuple::get(
+ getLLVMContext(), {llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt64Ty(getLLVMContext()), 0)),
+ TypeIdMD});
+ llvm::MDTuple *MDN = llvm::MDNode::get(getLLVMContext(), {TypeTuple});
+ CB->setMetadata(llvm::LLVMContext::MD_callee_type, MDN);
+}
+
void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) {
llvm::LLVMContext &Ctx = F->getContext();
llvm::MDBuilder MDB(Ctx);
@@ -3215,6 +3269,9 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
!CodeGenOpts.SanitizeCfiCanonicalJumpTables)
createFunctionTypeMetadataForIcall(FD, F);
+ if (CodeGenOpts.CallGraphSection)
+ createIndirectFunctionTypeMD(FD, F);
+
if (LangOpts.Sanitize.has(SanitizerKind::KCFI))
setKCFIType(FD, F);
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index 3971b29..a253bcd 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -1644,6 +1644,13 @@ public:
void createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
llvm::Function *F);
+ /// Create and attach type metadata if the function is a potential indirect
+ /// call target to support call graph section.
+ void createIndirectFunctionTypeMD(const FunctionDecl *FD, llvm::Function *F);
+
+ /// Create and attach type metadata to the given call.
+ void createCalleeTypeMetadataForIcall(const QualType &QT, llvm::CallBase *CB);
+
/// Set type metadata to the given function.
void setKCFIType(const FunctionDecl *FD, llvm::Function *F);
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index 5049a0a..f49a5af 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -12,6 +12,7 @@
#include "CGBuiltin.h"
#include "CodeGenFunction.h"
+#include "clang/Basic/SyncScope.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -313,33 +314,33 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
}
// Older builtins had an enum argument for the memory scope.
+ const char *SSN = nullptr;
int scope = cast<llvm::ConstantInt>(Scope)->getZExtValue();
switch (scope) {
- case 0: // __MEMORY_SCOPE_SYSTEM
+ case AtomicScopeGenericModel::System: // __MEMORY_SCOPE_SYSTEM
SSID = llvm::SyncScope::System;
break;
- case 1: // __MEMORY_SCOPE_DEVICE
- if (getTarget().getTriple().isSPIRV())
- SSID = getLLVMContext().getOrInsertSyncScopeID("device");
- else
- SSID = getLLVMContext().getOrInsertSyncScopeID("agent");
+ case AtomicScopeGenericModel::Device: // __MEMORY_SCOPE_DEVICE
+ SSN = getTarget().getTriple().isSPIRV() ? "device" : "agent";
break;
- case 2: // __MEMORY_SCOPE_WRKGRP
- SSID = getLLVMContext().getOrInsertSyncScopeID("workgroup");
+ case AtomicScopeGenericModel::Workgroup: // __MEMORY_SCOPE_WRKGRP
+ SSN = "workgroup";
break;
- case 3: // __MEMORY_SCOPE_WVFRNT
- if (getTarget().getTriple().isSPIRV())
- SSID = getLLVMContext().getOrInsertSyncScopeID("subgroup");
- else
- SSID = getLLVMContext().getOrInsertSyncScopeID("wavefront");
+ case AtomicScopeGenericModel::Cluster: // __MEMORY_SCOPE_CLUSTR
+ SSN = getTarget().getTriple().isSPIRV() ? "workgroup" : "cluster";
+ break;
+ case AtomicScopeGenericModel::Wavefront: // __MEMORY_SCOPE_WVFRNT
+ SSN = getTarget().getTriple().isSPIRV() ? "subgroup" : "wavefront";
break;
- case 4: // __MEMORY_SCOPE_SINGLE
+ case AtomicScopeGenericModel::Single: // __MEMORY_SCOPE_SINGLE
SSID = llvm::SyncScope::SingleThread;
break;
default:
SSID = llvm::SyncScope::System;
break;
}
+ if (SSN)
+ SSID = getLLVMContext().getOrInsertSyncScopeID(SSN);
}
llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 16d5919..0bc4b4b7 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -508,6 +508,10 @@ AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
case SyncScope::WavefrontScope:
Name = "wavefront";
break;
+ case SyncScope::HIPCluster:
+ case SyncScope::ClusterScope:
+ Name = "cluster";
+ break;
case SyncScope::HIPWorkgroup:
case SyncScope::OpenCLWorkGroup:
case SyncScope::WorkgroupScope:
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index 3f6d4e0..80e096e 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -93,6 +93,8 @@ inline StringRef mapClangSyncScopeToLLVM(SyncScope Scope) {
case SyncScope::OpenCLSubGroup:
case SyncScope::WavefrontScope:
return "subgroup";
+ case SyncScope::HIPCluster:
+ case SyncScope::ClusterScope:
case SyncScope::HIPWorkgroup:
case SyncScope::OpenCLWorkGroup:
case SyncScope::WorkgroupScope:
diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index 954ecab..61beb04 100644
--- a/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -290,6 +290,8 @@ void arm::setArchNameInTriple(const Driver &D, const ArgList &Args,
// Thumb2 is the default for V7 on Darwin.
(llvm::ARM::parseArchVersion(Suffix) == 7 &&
Triple.isOSBinFormatMachO()) ||
+ // Thumb2 is the default for Fuchsia.
+ Triple.isOSFuchsia() ||
// FIXME: this is invalid for WindowsCE
Triple.isOSWindows();
@@ -452,6 +454,9 @@ arm::FloatABI arm::getDefaultFloatABI(const llvm::Triple &Triple) {
case llvm::Triple::OpenBSD:
return FloatABI::SoftFP;
+ case llvm::Triple::Fuchsia:
+ return FloatABI::Hard;
+
default:
if (Triple.isOHOSFamily())
return FloatABI::Soft;
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index 37c10c6..e5abf83 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -798,9 +798,11 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
}
if (!DryRun) {
+ const bool ContinuePPDirective =
+ State.Line->InMacroBody && Current.isNot(TT_LineComment);
Whitespaces.replaceWhitespace(Current, /*Newlines=*/0, Spaces,
State.Column + Spaces + PPColumnCorrection,
- /*IsAligned=*/false, State.Line->InMacroBody);
+ /*IsAligned=*/false, ContinuePPDirective);
}
// If "BreakBeforeInheritanceComma" mode, don't break within the inheritance
@@ -1176,10 +1178,11 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// about removing empty lines on closing blocks. Special case them here.
MaxEmptyLinesToKeep = 1;
}
- unsigned Newlines =
+ const unsigned Newlines =
std::max(1u, std::min(Current.NewlinesBefore, MaxEmptyLinesToKeep));
- bool ContinuePPDirective =
- State.Line->InPPDirective && State.Line->Type != LT_ImportStatement;
+ const bool ContinuePPDirective = State.Line->InPPDirective &&
+ State.Line->Type != LT_ImportStatement &&
+ Current.isNot(TT_LineComment);
Whitespaces.replaceWhitespace(Current, Newlines, State.Column, State.Column,
CurrentState.IsAligned, ContinuePPDirective);
}
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index baad6317..47f1d5a 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -585,6 +585,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__HIP_MEMORY_SCOPE_WORKGROUP", "3");
Builder.defineMacro("__HIP_MEMORY_SCOPE_AGENT", "4");
Builder.defineMacro("__HIP_MEMORY_SCOPE_SYSTEM", "5");
+ Builder.defineMacro("__HIP_MEMORY_SCOPE_CLUSTER", "6");
if (LangOpts.HIPStdPar) {
Builder.defineMacro("__HIPSTDPAR__");
if (LangOpts.HIPStdParInterposeAlloc) {
@@ -873,6 +874,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__MEMORY_SCOPE_WRKGRP", "2");
Builder.defineMacro("__MEMORY_SCOPE_WVFRNT", "3");
Builder.defineMacro("__MEMORY_SCOPE_SINGLE", "4");
+ Builder.defineMacro("__MEMORY_SCOPE_CLUSTR", "5");
// Define macros for the OpenCL memory scope.
// The values should match AtomicScopeOpenCLModel::ID enum.
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index 39fa25f..215ac18 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -2214,9 +2214,9 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
else
PD << "expression";
- if (Diag(Loc, PD, FD)
- << false /*show bit size*/ << 0 << Ty << false /*return*/
- << TI.getTriple().str()) {
+ if (Diag(Loc, PD) << false /*show bit size*/ << 0 << Ty
+ << false /*return*/
+ << TI.getTriple().str()) {
if (D)
D->setInvalidDecl();
}
@@ -2233,9 +2233,8 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
else
PD << "expression";
- if (Diag(Loc, PD, FD)
- << false /*show bit size*/ << 0 << Ty << true /*return*/
- << TI.getTriple().str()) {
+ if (Diag(Loc, PD) << false /*show bit size*/ << 0 << Ty << true /*return*/
+ << TI.getTriple().str()) {
if (D)
D->setInvalidDecl();
}
diff --git a/clang/lib/Sema/SemaBase.cpp b/clang/lib/Sema/SemaBase.cpp
index 9b677f4..bf32491 100644
--- a/clang/lib/Sema/SemaBase.cpp
+++ b/clang/lib/Sema/SemaBase.cpp
@@ -58,13 +58,13 @@ SemaBase::SemaDiagnosticBuilder::getDeviceDeferredDiags() const {
return S.DeviceDeferredDiags;
}
-Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc, unsigned DiagID,
- bool DeferHint) {
+Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc,
+ unsigned DiagID) {
bool IsError =
getDiagnostics().getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
bool ShouldDefer = getLangOpts().CUDA && getLangOpts().GPUDeferDiag &&
DiagnosticIDs::isDeferrable(DiagID) &&
- (DeferHint || SemaRef.DeferDiags || !IsError);
+ (SemaRef.DeferDiags || !IsError);
auto SetIsLastErrorImmediate = [&](bool Flag) {
if (IsError)
SemaRef.IsLastErrorImmediate = Flag;
@@ -83,16 +83,13 @@ Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc, unsigned DiagID,
}
Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc,
- const PartialDiagnostic &PD,
- bool DeferHint) {
- return Diag(Loc, PD.getDiagID(), DeferHint) << PD;
+ const PartialDiagnostic &PD) {
+ return Diag(Loc, PD.getDiagID()) << PD;
}
SemaBase::SemaDiagnosticBuilder SemaBase::DiagCompat(SourceLocation Loc,
- unsigned CompatDiagId,
- bool DeferHint) {
+ unsigned CompatDiagId) {
return Diag(Loc,
- DiagnosticIDs::getCXXCompatDiagId(getLangOpts(), CompatDiagId),
- DeferHint);
+ DiagnosticIDs::getCXXCompatDiagId(getLangOpts(), CompatDiagId));
}
} // namespace clang
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 7da09e8..1f25111 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -13208,7 +13208,10 @@ void OverloadCandidateSet::NoteCandidates(
auto Cands = CompleteCandidates(S, OCD, Args, OpLoc, Filter);
- S.Diag(PD.first, PD.second, shouldDeferDiags(S, Args, OpLoc));
+ {
+ Sema::DeferDiagsRAII RAII{S, shouldDeferDiags(S, Args, OpLoc)};
+ S.Diag(PD.first, PD.second);
+ }
// In WebAssembly we don't want to emit further diagnostics if a table is
// passed as an argument to a function.
@@ -13271,10 +13274,10 @@ void OverloadCandidateSet::NoteCandidates(Sema &S, ArrayRef<Expr *> Args,
// inform the future value of S.Diags.getNumOverloadCandidatesToShow().
S.Diags.overloadCandidatesShown(CandsShown);
- if (I != E)
- S.Diag(OpLoc, diag::note_ovl_too_many_candidates,
- shouldDeferDiags(S, Args, OpLoc))
- << int(E - I);
+ if (I != E) {
+ Sema::DeferDiagsRAII RAII{S, shouldDeferDiags(S, Args, OpLoc)};
+ S.Diag(OpLoc, diag::note_ovl_too_many_candidates) << int(E - I);
+ }
}
static SourceLocation
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index c5ef0d5..b5f91a3 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1445,21 +1445,21 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
!FeatureMap.lookup("zve64d"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve64d";
// (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
// least zve64x
else if (((EltSize == 64 && Info.ElementType->isIntegerType()) ||
MinElts == 1) &&
!FeatureMap.lookup("zve64x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve64x";
else if (Info.ElementType->isFloat16Type() && !FeatureMap.lookup("zvfh") &&
!FeatureMap.lookup("zvfhmin") &&
!FeatureMap.lookup("xandesvpackfph"))
if (DeclareAndesVectorBuiltins) {
- Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ Diag(Loc, diag::err_riscv_type_requires_extension)
<< Ty << "zvfh, zvfhmin or xandesvpackfph";
} else {
- Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ Diag(Loc, diag::err_riscv_type_requires_extension)
<< Ty << "zvfh or zvfhmin";
}
else if (Info.ElementType->isBFloat16Type() &&
@@ -1467,18 +1467,18 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
!FeatureMap.lookup("xandesvbfhcvt") &&
!FeatureMap.lookup("experimental-zvfbfa"))
if (DeclareAndesVectorBuiltins) {
- Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ Diag(Loc, diag::err_riscv_type_requires_extension)
<< Ty << "zvfbfmin or xandesvbfhcvt";
} else {
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zvfbfmin";
}
else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
!FeatureMap.lookup("zve32f"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve32f";
// Given that caller already checked isRVVType() before calling this function,
// if we don't have at least zve32x supported, then we need to emit error.
else if (!FeatureMap.lookup("zve32x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve32x";
}
/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 871400e..82b560b 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -760,11 +760,11 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
++NumFunctionsAnalyzedSyntaxOnly;
if (SyntaxCheckTimer) {
SyntaxCheckTimer->stopTimer();
- llvm::TimeRecord CheckerEndTime = SyntaxCheckTimer->getTotalTime();
- CheckerEndTime -= CheckerStartTime;
+ llvm::TimeRecord CheckerDuration =
+ SyntaxCheckTimer->getTotalTime() - CheckerStartTime;
FunctionSummaries.findOrInsertSummary(D)->second.SyntaxRunningTime =
- std::lround(CheckerEndTime.getWallTime() * 1000);
- DisplayTime(CheckerEndTime);
+ std::lround(CheckerDuration.getWallTime() * 1000);
+ DisplayTime(CheckerDuration);
if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
AnalyzerTimers->clear();
}
@@ -825,11 +825,11 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
Mgr->options.MaxNodesPerTopLevelFunction);
if (ExprEngineTimer) {
ExprEngineTimer->stopTimer();
- llvm::TimeRecord ExprEngineEndTime = ExprEngineTimer->getTotalTime();
- ExprEngineEndTime -= ExprEngineStartTime;
+ llvm::TimeRecord ExprEngineDuration =
+ ExprEngineTimer->getTotalTime() - ExprEngineStartTime;
PathRunningTime.set(static_cast<unsigned>(
- std::lround(ExprEngineEndTime.getWallTime() * 1000)));
- DisplayTime(ExprEngineEndTime);
+ std::lround(ExprEngineDuration.getWallTime() * 1000)));
+ DisplayTime(ExprEngineDuration);
if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
AnalyzerTimers->clear();
}