aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR')
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenAtomic.cpp169
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp95
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp814
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h25
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.cpp227
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.h112
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenException.cpp151
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp28
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp126
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp73
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp43
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h86
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp75
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp13
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp19
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp121
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp27
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypeCache.h10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenValue.h9
-rw-r--r--clang/lib/CIR/CodeGen/CMakeLists.txt3
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h90
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp15
-rw-r--r--clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp94
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp183
28 files changed, 2504 insertions, 136 deletions
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index a9983f8..7db6e28 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -346,6 +346,8 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
CIRGenBuilderTy &builder = cgf.getBuilder();
mlir::Location loc = cgf.getLoc(expr->getSourceRange());
auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
+ cir::AtomicFetchKindAttr fetchAttr;
+ bool fetchFirst = true;
switch (expr->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
@@ -407,6 +409,103 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
opName = cir::AtomicXchgOp::getOperationName();
break;
+ case AtomicExpr::AO__atomic_add_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Add);
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Sub);
+ break;
+
+ case AtomicExpr::AO__atomic_min_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_min:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Min);
+ break;
+
+ case AtomicExpr::AO__atomic_max_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_max:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Max);
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::And);
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Or);
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Xor);
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Nand);
+ break;
+
+ case AtomicExpr::AO__atomic_test_and_set: {
+ auto op = cir::AtomicTestAndSetOp::create(
+ builder, loc, ptr.getPointer(), order,
+ builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
+ expr->isVolatile());
+ builder.createStore(loc, op, dest);
+ return;
+ }
+
+ case AtomicExpr::AO__atomic_clear: {
+ cir::AtomicClearOp::create(
+ builder, loc, ptr.getPointer(), order,
+ builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
+ expr->isVolatile());
+ return;
+ }
+
case AtomicExpr::AO__opencl_atomic_init:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
@@ -433,79 +532,51 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
case AtomicExpr::AO__scoped_atomic_exchange_n:
case AtomicExpr::AO__scoped_atomic_exchange:
- case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__scoped_atomic_add_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
- case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__scoped_atomic_fetch_add:
- case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__scoped_atomic_sub_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__scoped_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_min_fetch:
case AtomicExpr::AO__scoped_atomic_min_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__scoped_atomic_fetch_min:
- case AtomicExpr::AO__atomic_max_fetch:
case AtomicExpr::AO__scoped_atomic_max_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
- case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__scoped_atomic_fetch_max:
- case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__scoped_atomic_and_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
- case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__scoped_atomic_fetch_and:
- case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__scoped_atomic_or_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
- case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__scoped_atomic_fetch_or:
- case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__scoped_atomic_xor_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
- case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__scoped_atomic_fetch_xor:
- case AtomicExpr::AO__atomic_nand_fetch:
case AtomicExpr::AO__scoped_atomic_nand_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_nand:
- case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__scoped_atomic_fetch_nand:
-
- case AtomicExpr::AO__atomic_test_and_set:
-
- case AtomicExpr::AO__atomic_clear:
cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
return;
}
@@ -518,9 +589,13 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
atomicOperands, atomicResTys);
+ if (fetchAttr)
+ rmwOp->setAttr("binop", fetchAttr);
rmwOp->setAttr("mem_order", orderAttr);
if (expr->isVolatile())
rmwOp->setAttr("is_volatile", builder.getUnitAttr());
+ if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
+ rmwOp->setAttr("fetch_first", builder.getUnitAttr());
mlir::Value result = rmwOp->getResult(0);
builder.createStore(loc, result, dest);
@@ -581,6 +656,8 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_test_and_set:
+ case AtomicExpr::AO__atomic_clear:
break;
case AtomicExpr::AO__atomic_load:
@@ -614,8 +691,41 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
isWeakExpr = e->getWeak();
break;
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (memTy->isPointerType()) {
+ cgm.errorNYI(e->getSourceRange(),
+ "atomic fetch-and-add and fetch-and-sub for pointers");
+ return RValue::get(nullptr);
+ }
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ shouldCastToIntPtrTy = !memTy->isFloatingType();
+ [[fallthrough]];
+
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__c11_atomic_store:
val1 = emitValToTemp(*this, e->getVal1());
@@ -640,6 +750,9 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
dest = atomics.castToAtomicIntPointer(dest);
} else if (e->isCmpXChg()) {
dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
+ } else if (e->getOp() == AtomicExpr::AO__atomic_test_and_set) {
+ dest = createMemTemp(resultTy, getLoc(e->getSourceRange()),
+ "test_and_set.bool");
} else if (!resultTy->isVoidType()) {
dest = atomics.createTempAlloca();
if (shouldCastToIntPtrTy)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index ea31871..798e9d9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -463,12 +463,107 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return emitLibraryCall(*this, fd, e,
cgm.getBuiltinLibFunction(fd, builtinID));
+ // Some target-specific builtins can have aggregate return values, e.g.
+ // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
+ // returnValue to be non-null, so that the target-specific emission code can
+ // always just emit into it.
+ cir::TypeEvaluationKind evalKind = getEvaluationKind(e->getType());
+ if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
+ cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
+ return getUndefRValue(e->getType());
+ }
+
+ // Now see if we can emit a target-specific builtin.
+ if (mlir::Value v = emitTargetBuiltinExpr(builtinID, e, returnValue)) {
+ switch (evalKind) {
+ case cir::TEK_Scalar:
+ if (mlir::isa<cir::VoidType>(v.getType()))
+ return RValue::get(nullptr);
+ return RValue::get(v);
+ case cir::TEK_Aggregate:
+ cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
+ return getUndefRValue(e->getType());
+ case cir::TEK_Complex:
+ llvm_unreachable("No current target builtin returns complex");
+ }
+ llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
+ }
+
cgm.errorNYI(e->getSourceRange(),
std::string("unimplemented builtin call: ") +
getContext().BuiltinInfo.getName(builtinID));
return getUndefRValue(e->getType());
}
+static mlir::Value emitTargetArchBuiltinExpr(CIRGenFunction *cgf,
+ unsigned builtinID,
+ const CallExpr *e,
+ ReturnValueSlot &returnValue,
+ llvm::Triple::ArchType arch) {
+ // When compiling in HipStdPar mode we have to be conservative in rejecting
+ // target specific features in the FE, and defer the possible error to the
+ // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
+ // referenced by an accelerator executable function, we emit an error.
+ // Returning nullptr here leads to the builtin being handled in
+ // EmitStdParUnsupportedBuiltin.
+ if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
+ arch != cgf->getTarget().getTriple().getArch())
+ return {};
+
+ switch (arch) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::bpfeb:
+ case llvm::Triple::bpfel:
+ // These are actually NYI, but that will be reported by emitBuiltinExpr.
+ // At this point, we don't even know that the builtin is target-specific.
+ return nullptr;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return cgf->emitX86BuiltinExpr(builtinID, e);
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
+ case llvm::Triple::systemz:
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ case llvm::Triple::hexagon:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ // These are actually NYI, but that will be reported by emitBuiltinExpr.
+ // At this point, we don't even know that the builtin is target-specific.
+ return {};
+ default:
+ return {};
+ }
+}
+
+mlir::Value
+CIRGenFunction::emitTargetBuiltinExpr(unsigned builtinID, const CallExpr *e,
+ ReturnValueSlot &returnValue) {
+ if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
+ assert(getContext().getAuxTargetInfo() && "Missing aux target info");
+ return emitTargetArchBuiltinExpr(
+ this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
+ returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
+ }
+
+ return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
+ getTarget().getTriple().getArch());
+}
+
/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
/// for "fabsf".
cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *fd,
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
new file mode 100644
index 0000000..3c9c7ec
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -0,0 +1,814 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit x86/x86_64 Builtin calls as CIR or a function
+// call to be later resolved.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "CIRGenModule.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/CIR/MissingFeatures.h"
+#include "llvm/IR/IntrinsicsX86.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
+ const CallExpr *e) {
+ if (builtinID == Builtin::BI__builtin_cpu_is) {
+ cgm.errorNYI(e->getSourceRange(), "__builtin_cpu_is");
+ return {};
+ }
+ if (builtinID == Builtin::BI__builtin_cpu_supports) {
+ cgm.errorNYI(e->getSourceRange(), "__builtin_cpu_supports");
+ return {};
+ }
+ if (builtinID == Builtin::BI__builtin_cpu_init) {
+ cgm.errorNYI(e->getSourceRange(), "__builtin_cpu_init");
+ return {};
+ }
+
+ // Handle MSVC intrinsics before argument evaluation to prevent double
+ // evaluation.
+ assert(!cir::MissingFeatures::msvcBuiltins());
+
+ // Find out if any arguments are required to be integer constant expressions.
+ assert(!cir::MissingFeatures::handleBuiltinICEArguments());
+
+ switch (builtinID) {
+ default:
+ return {};
+ case X86::BI_mm_prefetch:
+ case X86::BI_mm_clflush:
+ case X86::BI_mm_lfence:
+ case X86::BI_mm_pause:
+ case X86::BI_mm_mfence:
+ case X86::BI_mm_sfence:
+ case X86::BI__rdtsc:
+ case X86::BI__builtin_ia32_rdtscp:
+ case X86::BI__builtin_ia32_lzcnt_u16:
+ case X86::BI__builtin_ia32_lzcnt_u32:
+ case X86::BI__builtin_ia32_lzcnt_u64:
+ case X86::BI__builtin_ia32_tzcnt_u16:
+ case X86::BI__builtin_ia32_tzcnt_u32:
+ case X86::BI__builtin_ia32_tzcnt_u64:
+ case X86::BI__builtin_ia32_undef128:
+ case X86::BI__builtin_ia32_undef256:
+ case X86::BI__builtin_ia32_undef512:
+ case X86::BI__builtin_ia32_vec_ext_v4hi:
+ case X86::BI__builtin_ia32_vec_ext_v16qi:
+ case X86::BI__builtin_ia32_vec_ext_v8hi:
+ case X86::BI__builtin_ia32_vec_ext_v4si:
+ case X86::BI__builtin_ia32_vec_ext_v4sf:
+ case X86::BI__builtin_ia32_vec_ext_v2di:
+ case X86::BI__builtin_ia32_vec_ext_v32qi:
+ case X86::BI__builtin_ia32_vec_ext_v16hi:
+ case X86::BI__builtin_ia32_vec_ext_v8si:
+ case X86::BI__builtin_ia32_vec_ext_v4di:
+ case X86::BI__builtin_ia32_vec_set_v4hi:
+ case X86::BI__builtin_ia32_vec_set_v16qi:
+ case X86::BI__builtin_ia32_vec_set_v8hi:
+ case X86::BI__builtin_ia32_vec_set_v4si:
+ case X86::BI__builtin_ia32_vec_set_v2di:
+ case X86::BI__builtin_ia32_vec_set_v32qi:
+ case X86::BI__builtin_ia32_vec_set_v16hi:
+ case X86::BI__builtin_ia32_vec_set_v8si:
+ case X86::BI__builtin_ia32_vec_set_v4di:
+ case X86::BI_mm_setcsr:
+ case X86::BI__builtin_ia32_ldmxcsr:
+ case X86::BI_mm_getcsr:
+ case X86::BI__builtin_ia32_stmxcsr:
+ case X86::BI__builtin_ia32_xsave:
+ case X86::BI__builtin_ia32_xsave64:
+ case X86::BI__builtin_ia32_xrstor:
+ case X86::BI__builtin_ia32_xrstor64:
+ case X86::BI__builtin_ia32_xsaveopt:
+ case X86::BI__builtin_ia32_xsaveopt64:
+ case X86::BI__builtin_ia32_xrstors:
+ case X86::BI__builtin_ia32_xrstors64:
+ case X86::BI__builtin_ia32_xsavec:
+ case X86::BI__builtin_ia32_xsavec64:
+ case X86::BI__builtin_ia32_xsaves:
+ case X86::BI__builtin_ia32_xsaves64:
+ case X86::BI__builtin_ia32_xsetbv:
+ case X86::BI_xsetbv:
+ case X86::BI__builtin_ia32_xgetbv:
+ case X86::BI_xgetbv:
+ case X86::BI__builtin_ia32_storedqudi128_mask:
+ case X86::BI__builtin_ia32_storedqusi128_mask:
+ case X86::BI__builtin_ia32_storedquhi128_mask:
+ case X86::BI__builtin_ia32_storedquqi128_mask:
+ case X86::BI__builtin_ia32_storeupd128_mask:
+ case X86::BI__builtin_ia32_storeups128_mask:
+ case X86::BI__builtin_ia32_storedqudi256_mask:
+ case X86::BI__builtin_ia32_storedqusi256_mask:
+ case X86::BI__builtin_ia32_storedquhi256_mask:
+ case X86::BI__builtin_ia32_storedquqi256_mask:
+ case X86::BI__builtin_ia32_storeupd256_mask:
+ case X86::BI__builtin_ia32_storeups256_mask:
+ case X86::BI__builtin_ia32_storedqudi512_mask:
+ case X86::BI__builtin_ia32_storedqusi512_mask:
+ case X86::BI__builtin_ia32_storedquhi512_mask:
+ case X86::BI__builtin_ia32_storedquqi512_mask:
+ case X86::BI__builtin_ia32_storeupd512_mask:
+ case X86::BI__builtin_ia32_storeups512_mask:
+ case X86::BI__builtin_ia32_storesbf16128_mask:
+ case X86::BI__builtin_ia32_storesh128_mask:
+ case X86::BI__builtin_ia32_storess128_mask:
+ case X86::BI__builtin_ia32_storesd128_mask:
+ case X86::BI__builtin_ia32_cvtmask2b128:
+ case X86::BI__builtin_ia32_cvtmask2b256:
+ case X86::BI__builtin_ia32_cvtmask2b512:
+ case X86::BI__builtin_ia32_cvtmask2w128:
+ case X86::BI__builtin_ia32_cvtmask2w256:
+ case X86::BI__builtin_ia32_cvtmask2w512:
+ case X86::BI__builtin_ia32_cvtmask2d128:
+ case X86::BI__builtin_ia32_cvtmask2d256:
+ case X86::BI__builtin_ia32_cvtmask2d512:
+ case X86::BI__builtin_ia32_cvtmask2q128:
+ case X86::BI__builtin_ia32_cvtmask2q256:
+ case X86::BI__builtin_ia32_cvtmask2q512:
+ case X86::BI__builtin_ia32_cvtb2mask128:
+ case X86::BI__builtin_ia32_cvtb2mask256:
+ case X86::BI__builtin_ia32_cvtb2mask512:
+ case X86::BI__builtin_ia32_cvtw2mask128:
+ case X86::BI__builtin_ia32_cvtw2mask256:
+ case X86::BI__builtin_ia32_cvtw2mask512:
+ case X86::BI__builtin_ia32_cvtd2mask128:
+ case X86::BI__builtin_ia32_cvtd2mask256:
+ case X86::BI__builtin_ia32_cvtd2mask512:
+ case X86::BI__builtin_ia32_cvtq2mask128:
+ case X86::BI__builtin_ia32_cvtq2mask256:
+ case X86::BI__builtin_ia32_cvtq2mask512:
+ case X86::BI__builtin_ia32_cvtdq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtqq2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
+ case X86::BI__builtin_ia32_cvtudq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
+ case X86::BI__builtin_ia32_vfmaddss3:
+ case X86::BI__builtin_ia32_vfmaddsd3:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask:
+ case X86::BI__builtin_ia32_vfmaddss3_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask:
+ case X86::BI__builtin_ia32_vfmaddss:
+ case X86::BI__builtin_ia32_vfmaddsd:
+ case X86::BI__builtin_ia32_vfmaddsh3_maskz:
+ case X86::BI__builtin_ia32_vfmaddss3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsd3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask3:
+ case X86::BI__builtin_ia32_vfmaddss3_mask3:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask3:
+ case X86::BI__builtin_ia32_vfmsubsh3_mask3:
+ case X86::BI__builtin_ia32_vfmsubss3_mask3:
+ case X86::BI__builtin_ia32_vfmsubsd3_mask3:
+ case X86::BI__builtin_ia32_vfmaddph512_mask:
+ case X86::BI__builtin_ia32_vfmaddph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddps512_mask:
+ case X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ case X86::BI__builtin_ia32_movdqa32store128_mask:
+ case X86::BI__builtin_ia32_movdqa64store128_mask:
+ case X86::BI__builtin_ia32_storeaps128_mask:
+ case X86::BI__builtin_ia32_storeapd128_mask:
+ case X86::BI__builtin_ia32_movdqa32store256_mask:
+ case X86::BI__builtin_ia32_movdqa64store256_mask:
+ case X86::BI__builtin_ia32_storeaps256_mask:
+ case X86::BI__builtin_ia32_storeapd256_mask:
+ case X86::BI__builtin_ia32_movdqa32store512_mask:
+ case X86::BI__builtin_ia32_movdqa64store512_mask:
+ case X86::BI__builtin_ia32_storeaps512_mask:
+ case X86::BI__builtin_ia32_storeapd512_mask:
+ case X86::BI__builtin_ia32_loadups128_mask:
+ case X86::BI__builtin_ia32_loadups256_mask:
+ case X86::BI__builtin_ia32_loadups512_mask:
+ case X86::BI__builtin_ia32_loadupd128_mask:
+ case X86::BI__builtin_ia32_loadupd256_mask:
+ case X86::BI__builtin_ia32_loadupd512_mask:
+ case X86::BI__builtin_ia32_loaddquqi128_mask:
+ case X86::BI__builtin_ia32_loaddquqi256_mask:
+ case X86::BI__builtin_ia32_loaddquqi512_mask:
+ case X86::BI__builtin_ia32_loaddquhi128_mask:
+ case X86::BI__builtin_ia32_loaddquhi256_mask:
+ case X86::BI__builtin_ia32_loaddquhi512_mask:
+ case X86::BI__builtin_ia32_loaddqusi128_mask:
+ case X86::BI__builtin_ia32_loaddqusi256_mask:
+ case X86::BI__builtin_ia32_loaddqusi512_mask:
+ case X86::BI__builtin_ia32_loaddqudi128_mask:
+ case X86::BI__builtin_ia32_loaddqudi256_mask:
+ case X86::BI__builtin_ia32_loaddqudi512_mask:
+ case X86::BI__builtin_ia32_loadsbf16128_mask:
+ case X86::BI__builtin_ia32_loadsh128_mask:
+ case X86::BI__builtin_ia32_loadss128_mask:
+ case X86::BI__builtin_ia32_loadsd128_mask:
+ case X86::BI__builtin_ia32_loadaps128_mask:
+ case X86::BI__builtin_ia32_loadaps256_mask:
+ case X86::BI__builtin_ia32_loadaps512_mask:
+ case X86::BI__builtin_ia32_loadapd128_mask:
+ case X86::BI__builtin_ia32_loadapd256_mask:
+ case X86::BI__builtin_ia32_loadapd512_mask:
+ case X86::BI__builtin_ia32_movdqa32load128_mask:
+ case X86::BI__builtin_ia32_movdqa32load256_mask:
+ case X86::BI__builtin_ia32_movdqa32load512_mask:
+ case X86::BI__builtin_ia32_movdqa64load128_mask:
+ case X86::BI__builtin_ia32_movdqa64load256_mask:
+ case X86::BI__builtin_ia32_movdqa64load512_mask:
+ case X86::BI__builtin_ia32_expandloaddf128_mask:
+ case X86::BI__builtin_ia32_expandloaddf256_mask:
+ case X86::BI__builtin_ia32_expandloaddf512_mask:
+ case X86::BI__builtin_ia32_expandloadsf128_mask:
+ case X86::BI__builtin_ia32_expandloadsf256_mask:
+ case X86::BI__builtin_ia32_expandloadsf512_mask:
+ case X86::BI__builtin_ia32_expandloaddi128_mask:
+ case X86::BI__builtin_ia32_expandloaddi256_mask:
+ case X86::BI__builtin_ia32_expandloaddi512_mask:
+ case X86::BI__builtin_ia32_expandloadsi128_mask:
+ case X86::BI__builtin_ia32_expandloadsi256_mask:
+ case X86::BI__builtin_ia32_expandloadsi512_mask:
+ case X86::BI__builtin_ia32_expandloadhi128_mask:
+ case X86::BI__builtin_ia32_expandloadhi256_mask:
+ case X86::BI__builtin_ia32_expandloadhi512_mask:
+ case X86::BI__builtin_ia32_expandloadqi128_mask:
+ case X86::BI__builtin_ia32_expandloadqi256_mask:
+ case X86::BI__builtin_ia32_expandloadqi512_mask:
+ case X86::BI__builtin_ia32_compressstoredf128_mask:
+ case X86::BI__builtin_ia32_compressstoredf256_mask:
+ case X86::BI__builtin_ia32_compressstoredf512_mask:
+ case X86::BI__builtin_ia32_compressstoresf128_mask:
+ case X86::BI__builtin_ia32_compressstoresf256_mask:
+ case X86::BI__builtin_ia32_compressstoresf512_mask:
+ case X86::BI__builtin_ia32_compressstoredi128_mask:
+ case X86::BI__builtin_ia32_compressstoredi256_mask:
+ case X86::BI__builtin_ia32_compressstoredi512_mask:
+ case X86::BI__builtin_ia32_compressstoresi128_mask:
+ case X86::BI__builtin_ia32_compressstoresi256_mask:
+ case X86::BI__builtin_ia32_compressstoresi512_mask:
+ case X86::BI__builtin_ia32_compressstorehi128_mask:
+ case X86::BI__builtin_ia32_compressstorehi256_mask:
+ case X86::BI__builtin_ia32_compressstorehi512_mask:
+ case X86::BI__builtin_ia32_compressstoreqi128_mask:
+ case X86::BI__builtin_ia32_compressstoreqi256_mask:
+ case X86::BI__builtin_ia32_compressstoreqi512_mask:
+ case X86::BI__builtin_ia32_expanddf128_mask:
+ case X86::BI__builtin_ia32_expanddf256_mask:
+ case X86::BI__builtin_ia32_expanddf512_mask:
+ case X86::BI__builtin_ia32_expandsf128_mask:
+ case X86::BI__builtin_ia32_expandsf256_mask:
+ case X86::BI__builtin_ia32_expandsf512_mask:
+ case X86::BI__builtin_ia32_expanddi128_mask:
+ case X86::BI__builtin_ia32_expanddi256_mask:
+ case X86::BI__builtin_ia32_expanddi512_mask:
+ case X86::BI__builtin_ia32_expandsi128_mask:
+ case X86::BI__builtin_ia32_expandsi256_mask:
+ case X86::BI__builtin_ia32_expandsi512_mask:
+ case X86::BI__builtin_ia32_expandhi128_mask:
+ case X86::BI__builtin_ia32_expandhi256_mask:
+ case X86::BI__builtin_ia32_expandhi512_mask:
+ case X86::BI__builtin_ia32_expandqi128_mask:
+ case X86::BI__builtin_ia32_expandqi256_mask:
+ case X86::BI__builtin_ia32_expandqi512_mask:
+ case X86::BI__builtin_ia32_compressdf128_mask:
+ case X86::BI__builtin_ia32_compressdf256_mask:
+ case X86::BI__builtin_ia32_compressdf512_mask:
+ case X86::BI__builtin_ia32_compresssf128_mask:
+ case X86::BI__builtin_ia32_compresssf256_mask:
+ case X86::BI__builtin_ia32_compresssf512_mask:
+ case X86::BI__builtin_ia32_compressdi128_mask:
+ case X86::BI__builtin_ia32_compressdi256_mask:
+ case X86::BI__builtin_ia32_compressdi512_mask:
+ case X86::BI__builtin_ia32_compresssi128_mask:
+ case X86::BI__builtin_ia32_compresssi256_mask:
+ case X86::BI__builtin_ia32_compresssi512_mask:
+ case X86::BI__builtin_ia32_compresshi128_mask:
+ case X86::BI__builtin_ia32_compresshi256_mask:
+ case X86::BI__builtin_ia32_compresshi512_mask:
+ case X86::BI__builtin_ia32_compressqi128_mask:
+ case X86::BI__builtin_ia32_compressqi256_mask:
+ case X86::BI__builtin_ia32_compressqi512_mask:
+ case X86::BI__builtin_ia32_gather3div2df:
+ case X86::BI__builtin_ia32_gather3div2di:
+ case X86::BI__builtin_ia32_gather3div4df:
+ case X86::BI__builtin_ia32_gather3div4di:
+ case X86::BI__builtin_ia32_gather3div4sf:
+ case X86::BI__builtin_ia32_gather3div4si:
+ case X86::BI__builtin_ia32_gather3div8sf:
+ case X86::BI__builtin_ia32_gather3div8si:
+ case X86::BI__builtin_ia32_gather3siv2df:
+ case X86::BI__builtin_ia32_gather3siv2di:
+ case X86::BI__builtin_ia32_gather3siv4df:
+ case X86::BI__builtin_ia32_gather3siv4di:
+ case X86::BI__builtin_ia32_gather3siv4sf:
+ case X86::BI__builtin_ia32_gather3siv4si:
+ case X86::BI__builtin_ia32_gather3siv8sf:
+ case X86::BI__builtin_ia32_gather3siv8si:
+ case X86::BI__builtin_ia32_gathersiv8df:
+ case X86::BI__builtin_ia32_gathersiv16sf:
+ case X86::BI__builtin_ia32_gatherdiv8df:
+ case X86::BI__builtin_ia32_gatherdiv16sf:
+ case X86::BI__builtin_ia32_gathersiv8di:
+ case X86::BI__builtin_ia32_gathersiv16si:
+ case X86::BI__builtin_ia32_gatherdiv8di:
+ case X86::BI__builtin_ia32_gatherdiv16si:
+ case X86::BI__builtin_ia32_scattersiv8df:
+ case X86::BI__builtin_ia32_scattersiv16sf:
+ case X86::BI__builtin_ia32_scatterdiv8df:
+ case X86::BI__builtin_ia32_scatterdiv16sf:
+ case X86::BI__builtin_ia32_scattersiv8di:
+ case X86::BI__builtin_ia32_scattersiv16si:
+ case X86::BI__builtin_ia32_scatterdiv8di:
+ case X86::BI__builtin_ia32_scatterdiv16si:
+ case X86::BI__builtin_ia32_scatterdiv2df:
+ case X86::BI__builtin_ia32_scatterdiv2di:
+ case X86::BI__builtin_ia32_scatterdiv4df:
+ case X86::BI__builtin_ia32_scatterdiv4di:
+ case X86::BI__builtin_ia32_scatterdiv4sf:
+ case X86::BI__builtin_ia32_scatterdiv4si:
+ case X86::BI__builtin_ia32_scatterdiv8sf:
+ case X86::BI__builtin_ia32_scatterdiv8si:
+ case X86::BI__builtin_ia32_scattersiv2df:
+ case X86::BI__builtin_ia32_scattersiv2di:
+ case X86::BI__builtin_ia32_scattersiv4df:
+ case X86::BI__builtin_ia32_scattersiv4di:
+ case X86::BI__builtin_ia32_scattersiv4sf:
+ case X86::BI__builtin_ia32_scattersiv4si:
+ case X86::BI__builtin_ia32_scattersiv8sf:
+ case X86::BI__builtin_ia32_scattersiv8si:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_vinsertf128_pd256:
+ case X86::BI__builtin_ia32_vinsertf128_ps256:
+ case X86::BI__builtin_ia32_vinsertf128_si256:
+ case X86::BI__builtin_ia32_insert128i256:
+ case X86::BI__builtin_ia32_insertf64x4:
+ case X86::BI__builtin_ia32_insertf32x4:
+ case X86::BI__builtin_ia32_inserti64x4:
+ case X86::BI__builtin_ia32_inserti32x4:
+ case X86::BI__builtin_ia32_insertf32x8:
+ case X86::BI__builtin_ia32_inserti32x8:
+ case X86::BI__builtin_ia32_insertf32x4_256:
+ case X86::BI__builtin_ia32_inserti32x4_256:
+ case X86::BI__builtin_ia32_insertf64x2_256:
+ case X86::BI__builtin_ia32_inserti64x2_256:
+ case X86::BI__builtin_ia32_insertf64x2_512:
+ case X86::BI__builtin_ia32_inserti64x2_512:
+ case X86::BI__builtin_ia32_pmovqd512_mask:
+ case X86::BI__builtin_ia32_pmovwb512_mask:
+ case X86::BI__builtin_ia32_pblendw128:
+ case X86::BI__builtin_ia32_blendpd:
+ case X86::BI__builtin_ia32_blendps:
+ case X86::BI__builtin_ia32_blendpd256:
+ case X86::BI__builtin_ia32_blendps256:
+ case X86::BI__builtin_ia32_pblendw256:
+ case X86::BI__builtin_ia32_pblendd128:
+ case X86::BI__builtin_ia32_pblendd256:
+ case X86::BI__builtin_ia32_pshuflw:
+ case X86::BI__builtin_ia32_pshuflw256:
+ case X86::BI__builtin_ia32_pshuflw512:
+ case X86::BI__builtin_ia32_pshufhw:
+ case X86::BI__builtin_ia32_pshufhw256:
+ case X86::BI__builtin_ia32_pshufhw512:
+ case X86::BI__builtin_ia32_pshufd:
+ case X86::BI__builtin_ia32_pshufd256:
+ case X86::BI__builtin_ia32_pshufd512:
+ case X86::BI__builtin_ia32_vpermilpd:
+ case X86::BI__builtin_ia32_vpermilps:
+ case X86::BI__builtin_ia32_vpermilpd256:
+ case X86::BI__builtin_ia32_vpermilps256:
+ case X86::BI__builtin_ia32_vpermilpd512:
+ case X86::BI__builtin_ia32_vpermilps512:
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512:
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ case X86::BI__builtin_ia32_permdi256:
+ case X86::BI__builtin_ia32_permdf256:
+ case X86::BI__builtin_ia32_permdi512:
+ case X86::BI__builtin_ia32_permdf512:
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256:
+ case X86::BI__builtin_ia32_palignr512:
+ case X86::BI__builtin_ia32_alignd128:
+ case X86::BI__builtin_ia32_alignd256:
+ case X86::BI__builtin_ia32_alignd512:
+ case X86::BI__builtin_ia32_alignq128:
+ case X86::BI__builtin_ia32_alignq256:
+ case X86::BI__builtin_ia32_alignq512:
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_i64x2:
+ case X86::BI__builtin_ia32_vperm2f128_pd256:
+ case X86::BI__builtin_ia32_vperm2f128_ps256:
+ case X86::BI__builtin_ia32_vperm2f128_si256:
+ case X86::BI__builtin_ia32_permti256:
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ case X86::BI__builtin_ia32_kshiftliqi:
+ case X86::BI__builtin_ia32_kshiftlihi:
+ case X86::BI__builtin_ia32_kshiftlisi:
+ case X86::BI__builtin_ia32_kshiftlidi:
+ case X86::BI__builtin_ia32_kshiftriqi:
+ case X86::BI__builtin_ia32_kshiftrihi:
+ case X86::BI__builtin_ia32_kshiftrisi:
+ case X86::BI__builtin_ia32_kshiftridi:
+ case X86::BI__builtin_ia32_vprotbi:
+ case X86::BI__builtin_ia32_vprotwi:
+ case X86::BI__builtin_ia32_vprotdi:
+ case X86::BI__builtin_ia32_vprotqi:
+ case X86::BI__builtin_ia32_prold128:
+ case X86::BI__builtin_ia32_prold256:
+ case X86::BI__builtin_ia32_prold512:
+ case X86::BI__builtin_ia32_prolq128:
+ case X86::BI__builtin_ia32_prolq256:
+ case X86::BI__builtin_ia32_prolq512:
+ case X86::BI__builtin_ia32_prord128:
+ case X86::BI__builtin_ia32_prord256:
+ case X86::BI__builtin_ia32_prord512:
+ case X86::BI__builtin_ia32_prorq128:
+ case X86::BI__builtin_ia32_prorq256:
+ case X86::BI__builtin_ia32_prorq512:
+ case X86::BI__builtin_ia32_selectb_128:
+ case X86::BI__builtin_ia32_selectb_256:
+ case X86::BI__builtin_ia32_selectb_512:
+ case X86::BI__builtin_ia32_selectw_128:
+ case X86::BI__builtin_ia32_selectw_256:
+ case X86::BI__builtin_ia32_selectw_512:
+ case X86::BI__builtin_ia32_selectd_128:
+ case X86::BI__builtin_ia32_selectd_256:
+ case X86::BI__builtin_ia32_selectd_512:
+ case X86::BI__builtin_ia32_selectq_128:
+ case X86::BI__builtin_ia32_selectq_256:
+ case X86::BI__builtin_ia32_selectq_512:
+ case X86::BI__builtin_ia32_selectph_128:
+ case X86::BI__builtin_ia32_selectph_256:
+ case X86::BI__builtin_ia32_selectph_512:
+ case X86::BI__builtin_ia32_selectpbf_128:
+ case X86::BI__builtin_ia32_selectpbf_256:
+ case X86::BI__builtin_ia32_selectpbf_512:
+ case X86::BI__builtin_ia32_selectps_128:
+ case X86::BI__builtin_ia32_selectps_256:
+ case X86::BI__builtin_ia32_selectps_512:
+ case X86::BI__builtin_ia32_selectpd_128:
+ case X86::BI__builtin_ia32_selectpd_256:
+ case X86::BI__builtin_ia32_selectpd_512:
+ case X86::BI__builtin_ia32_selectsh_128:
+ case X86::BI__builtin_ia32_selectsbf_128:
+ case X86::BI__builtin_ia32_selectss_128:
+ case X86::BI__builtin_ia32_selectsd_128:
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ case X86::BI__builtin_ia32_vpcomb:
+ case X86::BI__builtin_ia32_vpcomw:
+ case X86::BI__builtin_ia32_vpcomd:
+ case X86::BI__builtin_ia32_vpcomq:
+ case X86::BI__builtin_ia32_vpcomub:
+ case X86::BI__builtin_ia32_vpcomuw:
+ case X86::BI__builtin_ia32_vpcomud:
+ case X86::BI__builtin_ia32_vpcomuq:
+ case X86::BI__builtin_ia32_kortestcqi:
+ case X86::BI__builtin_ia32_kortestchi:
+ case X86::BI__builtin_ia32_kortestcsi:
+ case X86::BI__builtin_ia32_kortestcdi:
+ case X86::BI__builtin_ia32_kortestzqi:
+ case X86::BI__builtin_ia32_kortestzhi:
+ case X86::BI__builtin_ia32_kortestzsi:
+ case X86::BI__builtin_ia32_kortestzdi:
+ case X86::BI__builtin_ia32_ktestcqi:
+ case X86::BI__builtin_ia32_ktestzqi:
+ case X86::BI__builtin_ia32_ktestchi:
+ case X86::BI__builtin_ia32_ktestzhi:
+ case X86::BI__builtin_ia32_ktestcsi:
+ case X86::BI__builtin_ia32_ktestzsi:
+ case X86::BI__builtin_ia32_ktestcdi:
+ case X86::BI__builtin_ia32_ktestzdi:
+ case X86::BI__builtin_ia32_kaddqi:
+ case X86::BI__builtin_ia32_kaddhi:
+ case X86::BI__builtin_ia32_kaddsi:
+ case X86::BI__builtin_ia32_kadddi:
+ case X86::BI__builtin_ia32_kandqi:
+ case X86::BI__builtin_ia32_kandhi:
+ case X86::BI__builtin_ia32_kandsi:
+ case X86::BI__builtin_ia32_kanddi:
+ case X86::BI__builtin_ia32_kandnqi:
+ case X86::BI__builtin_ia32_kandnhi:
+ case X86::BI__builtin_ia32_kandnsi:
+ case X86::BI__builtin_ia32_kandndi:
+ case X86::BI__builtin_ia32_korqi:
+ case X86::BI__builtin_ia32_korhi:
+ case X86::BI__builtin_ia32_korsi:
+ case X86::BI__builtin_ia32_kordi:
+ case X86::BI__builtin_ia32_kxnorqi:
+ case X86::BI__builtin_ia32_kxnorhi:
+ case X86::BI__builtin_ia32_kxnorsi:
+ case X86::BI__builtin_ia32_kxnordi:
+ case X86::BI__builtin_ia32_kxorqi:
+ case X86::BI__builtin_ia32_kxorhi:
+ case X86::BI__builtin_ia32_kxorsi:
+ case X86::BI__builtin_ia32_kxordi:
+ case X86::BI__builtin_ia32_knotqi:
+ case X86::BI__builtin_ia32_knothi:
+ case X86::BI__builtin_ia32_knotsi:
+ case X86::BI__builtin_ia32_knotdi:
+ case X86::BI__builtin_ia32_kmovb:
+ case X86::BI__builtin_ia32_kmovw:
+ case X86::BI__builtin_ia32_kmovd:
+ case X86::BI__builtin_ia32_kmovq:
+ case X86::BI__builtin_ia32_kunpckdi:
+ case X86::BI__builtin_ia32_kunpcksi:
+ case X86::BI__builtin_ia32_kunpckhi:
+ case X86::BI__builtin_ia32_sqrtsh_round_mask:
+ case X86::BI__builtin_ia32_sqrtsd_round_mask:
+ case X86::BI__builtin_ia32_sqrtss_round_mask:
+ case X86::BI__builtin_ia32_sqrtpd256:
+ case X86::BI__builtin_ia32_sqrtpd:
+ case X86::BI__builtin_ia32_sqrtps256:
+ case X86::BI__builtin_ia32_sqrtps:
+ case X86::BI__builtin_ia32_sqrtph256:
+ case X86::BI__builtin_ia32_sqrtph:
+ case X86::BI__builtin_ia32_sqrtph512:
+ case X86::BI__builtin_ia32_vsqrtbf16256:
+ case X86::BI__builtin_ia32_vsqrtbf16:
+ case X86::BI__builtin_ia32_vsqrtbf16512:
+ case X86::BI__builtin_ia32_sqrtps512:
+ case X86::BI__builtin_ia32_sqrtpd512:
+ case X86::BI__builtin_ia32_pmuludq128:
+ case X86::BI__builtin_ia32_pmuludq256:
+ case X86::BI__builtin_ia32_pmuludq512:
+ case X86::BI__builtin_ia32_pmuldq128:
+ case X86::BI__builtin_ia32_pmuldq256:
+ case X86::BI__builtin_ia32_pmuldq512:
+ case X86::BI__builtin_ia32_pternlogd512_mask:
+ case X86::BI__builtin_ia32_pternlogq512_mask:
+ case X86::BI__builtin_ia32_pternlogd128_mask:
+ case X86::BI__builtin_ia32_pternlogd256_mask:
+ case X86::BI__builtin_ia32_pternlogq128_mask:
+ case X86::BI__builtin_ia32_pternlogq256_mask:
+ case X86::BI__builtin_ia32_pternlogd512_maskz:
+ case X86::BI__builtin_ia32_pternlogq512_maskz:
+ case X86::BI__builtin_ia32_pternlogd128_maskz:
+ case X86::BI__builtin_ia32_pternlogd256_maskz:
+ case X86::BI__builtin_ia32_pternlogq128_maskz:
+ case X86::BI__builtin_ia32_pternlogq256_maskz:
+ case X86::BI__builtin_ia32_vpshldd128:
+ case X86::BI__builtin_ia32_vpshldd256:
+ case X86::BI__builtin_ia32_vpshldd512:
+ case X86::BI__builtin_ia32_vpshldq128:
+ case X86::BI__builtin_ia32_vpshldq256:
+ case X86::BI__builtin_ia32_vpshldq512:
+ case X86::BI__builtin_ia32_vpshldw128:
+ case X86::BI__builtin_ia32_vpshldw256:
+ case X86::BI__builtin_ia32_vpshldw512:
+ case X86::BI__builtin_ia32_vpshrdd128:
+ case X86::BI__builtin_ia32_vpshrdd256:
+ case X86::BI__builtin_ia32_vpshrdd512:
+ case X86::BI__builtin_ia32_vpshrdq128:
+ case X86::BI__builtin_ia32_vpshrdq256:
+ case X86::BI__builtin_ia32_vpshrdq512:
+ case X86::BI__builtin_ia32_vpshrdw128:
+ case X86::BI__builtin_ia32_vpshrdw256:
+ case X86::BI__builtin_ia32_vpshrdw512:
+ case X86::BI__builtin_ia32_reduce_fadd_pd512:
+ case X86::BI__builtin_ia32_reduce_fadd_ps512:
+ case X86::BI__builtin_ia32_reduce_fadd_ph512:
+ case X86::BI__builtin_ia32_reduce_fadd_ph256:
+ case X86::BI__builtin_ia32_reduce_fadd_ph128:
+ case X86::BI__builtin_ia32_reduce_fmul_pd512:
+ case X86::BI__builtin_ia32_reduce_fmul_ps512:
+ case X86::BI__builtin_ia32_reduce_fmul_ph512:
+ case X86::BI__builtin_ia32_reduce_fmul_ph256:
+ case X86::BI__builtin_ia32_reduce_fmul_ph128:
+ case X86::BI__builtin_ia32_reduce_fmax_pd512:
+ case X86::BI__builtin_ia32_reduce_fmax_ps512:
+ case X86::BI__builtin_ia32_reduce_fmax_ph512:
+ case X86::BI__builtin_ia32_reduce_fmax_ph256:
+ case X86::BI__builtin_ia32_reduce_fmax_ph128:
+ case X86::BI__builtin_ia32_reduce_fmin_pd512:
+ case X86::BI__builtin_ia32_reduce_fmin_ps512:
+ case X86::BI__builtin_ia32_reduce_fmin_ph512:
+ case X86::BI__builtin_ia32_reduce_fmin_ph256:
+ case X86::BI__builtin_ia32_reduce_fmin_ph128:
+ case X86::BI__builtin_ia32_rdrand16_step:
+ case X86::BI__builtin_ia32_rdrand32_step:
+ case X86::BI__builtin_ia32_rdrand64_step:
+ case X86::BI__builtin_ia32_rdseed16_step:
+ case X86::BI__builtin_ia32_rdseed32_step:
+ case X86::BI__builtin_ia32_rdseed64_step:
+ case X86::BI__builtin_ia32_addcarryx_u32:
+ case X86::BI__builtin_ia32_addcarryx_u64:
+ case X86::BI__builtin_ia32_subborrow_u32:
+ case X86::BI__builtin_ia32_subborrow_u64:
+ case X86::BI__builtin_ia32_fpclassps128_mask:
+ case X86::BI__builtin_ia32_fpclassps256_mask:
+ case X86::BI__builtin_ia32_fpclassps512_mask:
+ case X86::BI__builtin_ia32_vfpclassbf16128_mask:
+ case X86::BI__builtin_ia32_vfpclassbf16256_mask:
+ case X86::BI__builtin_ia32_vfpclassbf16512_mask:
+ case X86::BI__builtin_ia32_fpclassph128_mask:
+ case X86::BI__builtin_ia32_fpclassph256_mask:
+ case X86::BI__builtin_ia32_fpclassph512_mask:
+ case X86::BI__builtin_ia32_fpclasspd128_mask:
+ case X86::BI__builtin_ia32_fpclasspd256_mask:
+ case X86::BI__builtin_ia32_fpclasspd512_mask:
+ case X86::BI__builtin_ia32_vp2intersect_q_512:
+ case X86::BI__builtin_ia32_vp2intersect_q_256:
+ case X86::BI__builtin_ia32_vp2intersect_q_128:
+ case X86::BI__builtin_ia32_vp2intersect_d_512:
+ case X86::BI__builtin_ia32_vp2intersect_d_256:
+ case X86::BI__builtin_ia32_vp2intersect_d_128:
+ case X86::BI__builtin_ia32_vpmultishiftqb128:
+ case X86::BI__builtin_ia32_vpmultishiftqb256:
+ case X86::BI__builtin_ia32_vpmultishiftqb512:
+ case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpordps:
+ case X86::BI__builtin_ia32_cmpordpd:
+ case X86::BI__builtin_ia32_cmpph128_mask:
+ case X86::BI__builtin_ia32_cmpph256_mask:
+ case X86::BI__builtin_ia32_cmpph512_mask:
+ case X86::BI__builtin_ia32_cmpps128_mask:
+ case X86::BI__builtin_ia32_cmpps256_mask:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmppd128_mask:
+ case X86::BI__builtin_ia32_cmppd256_mask:
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ case X86::BI__builtin_ia32_vcmpbf16512_mask:
+ case X86::BI__builtin_ia32_vcmpbf16256_mask:
+ case X86::BI__builtin_ia32_vcmpbf16128_mask:
+ case X86::BI__builtin_ia32_cmpps:
+ case X86::BI__builtin_ia32_cmpps256:
+ case X86::BI__builtin_ia32_cmppd:
+ case X86::BI__builtin_ia32_cmppd256:
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpordss:
+ case X86::BI__builtin_ia32_cmpeqsd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ case X86::BI__builtin_ia32_cmplesd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ case X86::BI__builtin_ia32_vcvtph2ps_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps256_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ case X86::BI__builtin_ia32_cvtneps2bf16_128_mask:
+ case X86::BI__builtin_ia32_cvtsbf162ss_32:
+ case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
+ case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
+ case X86::BI__cpuid:
+ case X86::BI__cpuidex:
+ case X86::BI__emul:
+ case X86::BI__emulu:
+ case X86::BI__mulh:
+ case X86::BI__umulh:
+ case X86::BI_mul128:
+ case X86::BI_umul128:
+ case X86::BI__faststorefence:
+ case X86::BI__shiftleft128:
+ case X86::BI__shiftright128:
+ case X86::BI_ReadWriteBarrier:
+ case X86::BI_ReadBarrier:
+ case X86::BI_WriteBarrier:
+ case X86::BI_AddressOfReturnAddress:
+ case X86::BI__stosb:
+ case X86::BI__builtin_ia32_t2rpntlvwz0_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz0rs_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz0t1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz0rst1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1rs_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1t1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1rst1_internal:
+ case X86::BI__ud2:
+ case X86::BI__int2c:
+ case X86::BI__readfsbyte:
+ case X86::BI__readfsword:
+ case X86::BI__readfsdword:
+ case X86::BI__readfsqword:
+ case X86::BI__readgsbyte:
+ case X86::BI__readgsword:
+ case X86::BI__readgsdword:
+ case X86::BI__readgsqword:
+ case X86::BI__builtin_ia32_encodekey128_u32:
+ case X86::BI__builtin_ia32_encodekey256_u32:
+ case X86::BI__builtin_ia32_aesenc128kl_u8:
+ case X86::BI__builtin_ia32_aesdec128kl_u8:
+ case X86::BI__builtin_ia32_aesenc256kl_u8:
+ case X86::BI__builtin_ia32_aesdec256kl_u8:
+ case X86::BI__builtin_ia32_aesencwide128kl_u8:
+ case X86::BI__builtin_ia32_aesdecwide128kl_u8:
+ case X86::BI__builtin_ia32_aesencwide256kl_u8:
+ case X86::BI__builtin_ia32_aesdecwide256kl_u8:
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_prefetchi:
+ cgm.errorNYI(e->getSourceRange(),
+ std::string("unimplemented X86 builtin call: ") +
+ getContext().BuiltinInfo.getName(builtinID));
+ return {};
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp
index df42af8..eef3739 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp
@@ -37,6 +37,10 @@ CIRGenCXXABI::AddedStructorArgCounts CIRGenCXXABI::addImplicitConstructorArgs(
addedArgs.suffix.size());
}
+CatchTypeInfo CIRGenCXXABI::getCatchAllTypeInfo() {
+ return CatchTypeInfo{{}, 0};
+}
+
void CIRGenCXXABI::buildThisParam(CIRGenFunction &cgf,
FunctionArgList &params) {
const auto *md = cast<CXXMethodDecl>(cgf.curGD.getDecl());
@@ -81,8 +85,7 @@ CharUnits CIRGenCXXABI::getArrayCookieSize(const CXXNewExpr *e) {
if (!requiresArrayCookie(e))
return CharUnits::Zero();
- cgm.errorNYI(e->getSourceRange(), "CIRGenCXXABI::getArrayCookieSize");
- return CharUnits::Zero();
+ return getArrayCookieSizeImpl(e->getAllocatedType());
}
bool CIRGenCXXABI::requiresArrayCookie(const CXXNewExpr *e) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index 6d3741c4..c78f9b0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H
#include "CIRGenCall.h"
+#include "CIRGenCleanup.h"
#include "CIRGenFunction.h"
#include "CIRGenModule.h"
@@ -155,6 +156,8 @@ public:
/// Loads the incoming C++ this pointer as it was passed by the caller.
mlir::Value loadIncomingCXXThis(CIRGenFunction &cgf);
+ virtual CatchTypeInfo getCatchAllTypeInfo();
+
/// Get the implicit (second) parameter that comes after the "this" pointer,
/// or nullptr if there is isn't one.
virtual mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &cgf,
@@ -299,8 +302,28 @@ public:
/// - non-array allocations never need a cookie
/// - calls to \::operator new(size_t, void*) never need a cookie
///
- /// \param E - the new-expression being allocated.
+ /// \param e - the new-expression being allocated.
virtual CharUnits getArrayCookieSize(const CXXNewExpr *e);
+
+ /// Initialize the array cookie for the given allocation.
+ ///
+ /// \param newPtr - a char* which is the presumed-non-null
+ /// return value of the allocation function
+ /// \param numElements - the computed number of elements,
+ /// potentially collapsed from the multidimensional array case;
+ /// always a size_t
+ /// \param elementType - the base element allocated type,
+ /// i.e. the allocated type after stripping all array types
+ virtual Address initializeArrayCookie(CIRGenFunction &cgf, Address newPtr,
+ mlir::Value numElements,
+ const CXXNewExpr *e,
+ QualType elementType) = 0;
+
+protected:
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. Assumes that an array cookie is
+ /// required.
+ virtual CharUnits getArrayCookieSizeImpl(QualType elementType) = 0;
};
/// Creates and Itanium-family ABI
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index 8700697..851328a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -28,6 +28,46 @@ using namespace clang::CIRGen;
// CIRGenFunction cleanup related
//===----------------------------------------------------------------------===//
+/// Build a unconditional branch to the lexical scope cleanup block
+/// or with the labeled blocked if already solved.
+///
+/// Track on scope basis, goto's we need to fix later.
+cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location loc,
+ JumpDest dest) {
+ // Insert a branch: to the cleanup block (unsolved) or to the already
+ // materialized label. Keep track of unsolved goto's.
+ assert(dest.getBlock() && "assumes incoming valid dest");
+ auto brOp = cir::BrOp::create(builder, loc, dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator topCleanup =
+ ehStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (topCleanup == ehStack.stable_end() ||
+ topCleanup.encloses(dest.getScopeDepth())) { // works for invalid
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!dest.getScopeDepth().isValid()) {
+ BranchFixup &fixup = ehStack.addBranchFixup();
+ fixup.destination = dest.getBlock();
+ fixup.destinationIndex = dest.getDestIndex();
+ fixup.initialBranch = brOp;
+ fixup.optimisticBranchBlock = nullptr;
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ cgm.errorNYI(loc, "emitBranchThroughCleanup: valid destination scope depth");
+ return brOp;
+}
+
/// Emits all the code to cause the given temporary to be cleaned up.
void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
QualType tempType, Address ptr) {
@@ -40,6 +80,19 @@ void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
void EHScopeStack::Cleanup::anchor() {}
+EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ stable_iterator si = getInnermostNormalCleanup();
+ stable_iterator se = stable_end();
+ while (si != se) {
+ EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
+ if (cleanup.isActive())
+ return si;
+ si = cleanup.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
/// Push an entry of the given size onto this protected-scope stack.
char *EHScopeStack::allocate(size_t size) {
size = llvm::alignTo(size, ScopeStackAlignment);
@@ -75,14 +128,30 @@ void EHScopeStack::deallocate(size_t size) {
startOfData += llvm::alignTo(size, ScopeStackAlignment);
}
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ cgf->cgm.errorNYI("popNullFixups");
+}
+
void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
+ bool isNormalCleanup = kind & NormalCleanup;
bool isEHCleanup = kind & EHCleanup;
bool isLifetimeMarker = kind & LifetimeMarker;
assert(!cir::MissingFeatures::innermostEHScope());
- EHCleanupScope *scope = new (buffer) EHCleanupScope(size);
+ EHCleanupScope *scope = new (buffer)
+ EHCleanupScope(size, branchFixups.size(), innermostNormalCleanup);
+
+ if (isNormalCleanup)
+ innermostNormalCleanup = stable_begin();
if (isLifetimeMarker)
cgf->cgm.errorNYI("push lifetime marker cleanup");
@@ -100,12 +169,30 @@ void EHScopeStack::popCleanup() {
assert(isa<EHCleanupScope>(*begin()));
EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
+ innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
deallocate(cleanup.getAllocatedSize());
// Destroy the cleanup.
cleanup.destroy();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // Check whether we can shrink the branch-fixups stack.
+ if (!branchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups()) {
+ branchFixups.clear();
+ } else {
+ // Otherwise we can still trim out unnecessary nulls.
+ popNullFixups();
+ }
+ }
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
+ char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
+ assert(!cir::MissingFeatures::innermostEHScope());
+ EHCatchScope *scope = new (buffer) EHCatchScope(numHandlers);
+ return scope;
}
static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
@@ -116,6 +203,18 @@ static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
}
+static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
+ EHCleanupScope &scope) {
+ assert(scope.isNormalCleanup());
+ mlir::Block *entry = scope.getNormalBlock();
+ if (!entry) {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ entry = cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
+ scope.setNormalBlock(entry);
+ }
+ return entry;
+}
+
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
@@ -123,17 +222,21 @@ void CIRGenFunction::popCleanupBlock() {
assert(!ehStack.empty() && "cleanup stack is empty!");
assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.begin());
+ assert(scope.getFixupDepth() <= ehStack.getNumBranchFixups());
// Remember activation information.
bool isActive = scope.isActive();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // - whether there are branch fix-ups through this cleanup
+ unsigned fixupDepth = scope.getFixupDepth();
+ bool hasFixups = ehStack.getNumBranchFixups() != fixupDepth;
// - whether there's a fallthrough
mlir::Block *fallthroughSource = builder.getInsertionBlock();
bool hasFallthrough = fallthroughSource != nullptr && isActive;
- bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
+ bool requiresNormalCleanup =
+ scope.isNormalCleanup() && (hasFixups || hasFallthrough);
// If we don't need the cleanup at all, we're done.
assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
@@ -168,9 +271,119 @@ void CIRGenFunction::popCleanupBlock() {
assert(!cir::MissingFeatures::ehCleanupFlags());
- ehStack.popCleanup();
- scope.markEmitted();
- emitCleanup(*this, cleanup);
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (hasFallthrough && !hasFixups) {
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+ ehStack.popCleanup();
+ scope.markEmitted();
+ emitCleanup(*this, cleanup);
+ } else {
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+
+ // Force the entry block to exist.
+ mlir::Block *normalEntry = createNormalEntry(*this, scope);
+
+ // I. Set up the fallthrough edge in.
+ mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (hasFallthrough) {
+ assert(!cir::MissingFeatures::ehCleanupHasPrebranchedFallthrough());
+
+ } else if (fallthroughSource) {
+ // Otherwise, save and clear the IP if we don't have fallthrough
+ // because the cleanup is inactive.
+ assert(!isActive && "source without fallthrough for active cleanup");
+ savedInactiveFallthroughIP = builder.saveInsertionPoint();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ builder.setInsertionPointToEnd(normalEntry);
+
+ // intercept normal cleanup to mark SEH scope end
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+ bool hasEnclosingCleanups =
+ (scope.getEnclosingNormalCleanup() != ehStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ if (hasFixups && hasEnclosingCleanups)
+ cgm.errorNYI("cleanup branch-through dest");
+
+ mlir::Block *fallthroughDest = nullptr;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ // Skip for SEH, since ExitSwitch is used to generate code to indicate
+ // abnormal termination. (SEH: Except _leave and fall-through at
+ // the end, all other exits in a _try (return/goto/continue/break)
+ // are considered as abnormal terminations, using NormalCleanupDestSlot
+ // to indicate abnormal termination)
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // IV. Pop the cleanup and emit it.
+ scope.markEmitted();
+ ehStack.popCleanup();
+ assert(ehStack.hasNormalCleanups() == hasEnclosingCleanups);
+
+ emitCleanup(*this, cleanup);
+
+ // Append the prepared cleanup prologue from above.
+ assert(!cir::MissingFeatures::cleanupAppendInsts());
+
+ // Optimistically hope that any fixups will continue falling through.
+ if (fixupDepth != ehStack.getNumBranchFixups())
+ cgm.errorNYI("cleanup fixup depth mismatch");
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but doesn't branch to the
+ // cleanup because the cleanup is inactive.
+ if (!hasFallthrough && fallthroughSource) {
+ // Prebranched fallthrough was forwarded earlier.
+ // Non-prebranched fallthrough doesn't need to be forwarded.
+ // Either way, all we need to do is restore the IP we cleared before.
+ assert(!isActive);
+ cgm.errorNYI("cleanup inactive fallthrough");
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (hasFallthrough && fallthroughDest) {
+ cgm.errorNYI("cleanup fallthrough destination");
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (hasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ // FIXME(cir): should we clear insertion point here?
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ //
+ // If it did invalidate those pointers, and normalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ assert(!cir::MissingFeatures::simplifyCleanupEntry());
+ }
}
/// Pops cleanup blocks until the given savepoint is reached.
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
index 30f5607..9acf8b1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -20,6 +20,13 @@
namespace clang::CIRGen {
+/// The MS C++ ABI needs a pointer to RTTI data plus some flags to describe the
+/// type of a catch handler, so we use this wrapper.
+struct CatchTypeInfo {
+ mlir::TypedAttr rtti;
+ unsigned flags;
+};
+
/// A protected scope for zero-cost EH handling.
class EHScope {
class CommonBitFields {
@@ -29,6 +36,12 @@ class EHScope {
enum { NumCommonBits = 3 };
protected:
+ class CatchBitFields {
+ friend class EHCatchScope;
+ unsigned : NumCommonBits;
+ unsigned numHandlers : 32 - NumCommonBits;
+ };
+
class CleanupBitFields {
friend class EHCleanupScope;
unsigned : NumCommonBits;
@@ -58,6 +71,7 @@ protected:
union {
CommonBitFields commonBits;
+ CatchBitFields catchBits;
CleanupBitFields cleanupBits;
};
@@ -67,11 +81,88 @@ public:
EHScope(Kind kind) { commonBits.kind = kind; }
Kind getKind() const { return static_cast<Kind>(commonBits.kind); }
+
+ bool mayThrow() const {
+ // Traditional LLVM codegen also checks for `!block->use_empty()`, but
+ // in CIRGen the block content is not important, just used as a way to
+ // signal `hasEHBranches`.
+ assert(!cir::MissingFeatures::ehstackBranches());
+ return false;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C \@finally blocks are represented using a cleanup scope
+/// after the catch scope.
+
+class EHCatchScope : public EHScope {
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null MLIR attribute for a catch-all
+ CatchTypeInfo type;
+
+ /// The catch handler for this type.
+ mlir::Region *region;
+ };
+
+private:
+ friend class EHScopeStack;
+
+ Handler *getHandlers() { return reinterpret_cast<Handler *>(this + 1); }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned n) {
+ return sizeof(EHCatchScope) + n * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned numHandlers) : EHScope(Catch) {
+ catchBits.numHandlers = numHandlers;
+ assert(catchBits.numHandlers == numHandlers && "NumHandlers overflow?");
+ }
+
+ unsigned getNumHandlers() const { return catchBits.numHandlers; }
+
+ void setHandler(unsigned i, CatchTypeInfo type, mlir::Region *region) {
+ assert(i < getNumHandlers());
+ getHandlers()[i].type = type;
+ getHandlers()[i].region = region;
+ }
+
+ // Clear all handler blocks.
+ // FIXME: it's better to always call clearHandlerBlocks in DTOR and have a
+ // 'takeHandler' or some such function which removes ownership from the
+ // EHCatchScope object if the handlers should live longer than EHCatchScope.
+ void clearHandlerBlocks() {
+ // The blocks are owned by TryOp, nothing to delete.
+ }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Catch;
+ }
};
/// A cleanup scope which generates the cleanup blocks lazily.
class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
: public EHScope {
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator enclosingNormal;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ mlir::Block *normalBlock = nullptr;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned fixupDepth = 0;
+
public:
/// Gets the size required for a lazy cleanup scope with the given
/// cleanup-data requirements.
@@ -83,7 +174,10 @@ public:
return sizeof(EHCleanupScope) + cleanupBits.cleanupSize;
}
- EHCleanupScope(unsigned cleanupSize) : EHScope(EHScope::Cleanup) {
+ EHCleanupScope(unsigned cleanupSize, unsigned fixupDepth,
+ EHScopeStack::stable_iterator enclosingNormal)
+ : EHScope(EHScope::Cleanup), enclosingNormal(enclosingNormal),
+ fixupDepth(fixupDepth) {
// TODO(cir): When exception handling is upstreamed, isNormalCleanup and
// isEHCleanup will be arguments to the constructor.
cleanupBits.isNormalCleanup = true;
@@ -101,11 +195,19 @@ public:
// Objects of EHCleanupScope are not destructed. Use destroy().
~EHCleanupScope() = delete;
+ mlir::Block *getNormalBlock() const { return normalBlock; }
+ void setNormalBlock(mlir::Block *bb) { normalBlock = bb; }
+
bool isNormalCleanup() const { return cleanupBits.isNormalCleanup; }
bool isActive() const { return cleanupBits.isActive; }
void setActive(bool isActive) { cleanupBits.isActive = isActive; }
+ unsigned getFixupDepth() const { return fixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return enclosingNormal;
+ }
+
size_t getCleanupSize() const { return cleanupBits.cleanupSize; }
void *getCleanupBuffer() { return this + 1; }
@@ -147,5 +249,13 @@ EHScopeStack::find(stable_iterator savePoint) const {
return iterator(endOfBuffer - savePoint.size);
}
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHCatchScope &scope = llvm::cast<EHCatchScope>(*begin());
+ assert(!cir::MissingFeatures::innermostEHScope());
+ deallocate(EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers()));
+}
+
} // namespace clang::CIRGen
#endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 4a19d91..5667273 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -740,6 +740,16 @@ struct CallStackRestore final : EHScopeStack::Cleanup {
};
} // namespace
+/// Push the standard destructor for the given type as
+/// at least a normal cleanup.
+void CIRGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
+ Address addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind));
+}
+
void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
QualType type, Destroyer *destroyer) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp
index f9ff37b..717a3e0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenException.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp
@@ -69,6 +69,153 @@ mlir::LogicalResult CIRGenFunction::emitCXXTryStmt(const CXXTryStmt &s) {
if (s.getTryBlock()->body_empty())
return mlir::LogicalResult::success();
- cgm.errorNYI("exitCXXTryStmt: CXXTryStmt with non-empty body");
- return mlir::LogicalResult::success();
+ mlir::Location loc = getLoc(s.getSourceRange());
+ // Create a scope to hold try local storage for catch params.
+
+ mlir::OpBuilder::InsertPoint scopeIP;
+ cir::ScopeOp::create(
+ builder, loc,
+ /*scopeBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) {
+ scopeIP = builder.saveInsertionPoint();
+ });
+
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeIP);
+ mlir::LogicalResult result = emitCXXTryStmtUnderScope(s);
+ cir::YieldOp::create(builder, loc);
+ return result;
+}
+
+mlir::LogicalResult
+CIRGenFunction::emitCXXTryStmtUnderScope(const CXXTryStmt &s) {
+ const llvm::Triple &t = getTarget().getTriple();
+ // If we encounter a try statement on in an OpenMP target region offloaded to
+ // a GPU, we treat it as a basic block.
+ const bool isTargetDevice =
+ (cgm.getLangOpts().OpenMPIsTargetDevice && (t.isNVPTX() || t.isAMDGCN()));
+ if (isTargetDevice) {
+ cgm.errorNYI(
+ "emitCXXTryStmtUnderScope: OpenMP target region offloaded to GPU");
+ return mlir::success();
+ }
+
+ unsigned numHandlers = s.getNumHandlers();
+ mlir::Location tryLoc = getLoc(s.getBeginLoc());
+ mlir::OpBuilder::InsertPoint beginInsertTryBody;
+
+ bool hasCatchAll = false;
+ for (unsigned i = 0; i != numHandlers; ++i) {
+ hasCatchAll |= s.getHandler(i)->getExceptionDecl() == nullptr;
+ if (hasCatchAll)
+ break;
+ }
+
+ // Create the scope to represent only the C/C++ `try {}` part. However,
+ // don't populate right away. Create regions for the catch handlers,
+ // but don't emit the handler bodies yet. For now, only make sure the
+ // scope returns the exception information.
+ auto tryOp = cir::TryOp::create(
+ builder, tryLoc,
+ /*tryBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ beginInsertTryBody = builder.saveInsertionPoint();
+ },
+ /*handlersBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc,
+ mlir::OperationState &result) {
+ mlir::OpBuilder::InsertionGuard guard(b);
+
+ // We create an extra region for an unwind catch handler in case the
+ // catch-all handler doesn't exists
+ unsigned numRegionsToCreate =
+ hasCatchAll ? numHandlers : numHandlers + 1;
+
+ for (unsigned i = 0; i != numRegionsToCreate; ++i) {
+ mlir::Region *region = result.addRegion();
+ builder.createBlock(region);
+ }
+ });
+
+ // Finally emit the body for try/catch.
+ {
+ mlir::Location loc = tryOp.getLoc();
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(beginInsertTryBody);
+ CIRGenFunction::LexicalScope tryScope{*this, loc,
+ builder.getInsertionBlock()};
+
+ tryScope.setAsTry(tryOp);
+
+ // Attach the basic blocks for the catch regions.
+ enterCXXTryStmt(s, tryOp);
+
+ // Emit the body for the `try {}` part.
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ CIRGenFunction::LexicalScope tryBodyScope{*this, loc,
+ builder.getInsertionBlock()};
+ if (emitStmt(s.getTryBlock(), /*useCurrentScope=*/true).failed())
+ return mlir::failure();
+ }
+
+ // Emit catch clauses.
+ exitCXXTryStmt(s);
+ }
+
+ return mlir::success();
+}
+
+void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
+ bool isFnTryBlock) {
+ unsigned numHandlers = s.getNumHandlers();
+ EHCatchScope *catchScope = ehStack.pushCatch(numHandlers);
+ for (unsigned i = 0; i != numHandlers; ++i) {
+ const CXXCatchStmt *catchStmt = s.getHandler(i);
+ if (catchStmt->getExceptionDecl()) {
+ cgm.errorNYI("enterCXXTryStmt: CatchStmt with ExceptionDecl");
+ return;
+ }
+
+ // No exception decl indicates '...', a catch-all.
+ mlir::Region *handler = &tryOp.getHandlerRegions()[i];
+ catchScope->setHandler(i, cgm.getCXXABI().getCatchAllTypeInfo(), handler);
+
+ // Under async exceptions, catch(...) needs to catch HW exception too
+ // Mark scope with SehTryBegin as a SEH __try scope
+ if (getLangOpts().EHAsynch) {
+ cgm.errorNYI("enterCXXTryStmt: EHAsynch");
+ return;
+ }
+ }
+}
+
+void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock) {
+ unsigned numHandlers = s.getNumHandlers();
+ EHCatchScope &catchScope = cast<EHCatchScope>(*ehStack.begin());
+ assert(catchScope.getNumHandlers() == numHandlers);
+ cir::TryOp tryOp = curLexScope->getTry();
+
+ // If the catch was not required, bail out now.
+ if (!catchScope.mayThrow()) {
+ catchScope.clearHandlerBlocks();
+ ehStack.popCatch();
+
+ // Drop all basic block from all catch regions.
+ SmallVector<mlir::Block *> eraseBlocks;
+ for (mlir::Region &handlerRegion : tryOp.getHandlerRegions()) {
+ if (handlerRegion.empty())
+ continue;
+
+ for (mlir::Block &b : handlerRegion.getBlocks())
+ eraseBlocks.push_back(&b);
+ }
+
+ for (mlir::Block *b : eraseBlocks)
+ b->erase();
+
+ tryOp.setHandlerTypesAttr({});
+ return;
+ }
+
+ cgm.errorNYI("exitCXXTryStmt: Required catch");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 0d364e1..52021fc 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1626,14 +1626,15 @@ LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) {
/// Emit code to compute the specified expression which
/// can have any type. The result is returned as an RValue struct.
-RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot) {
+RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot,
+ bool ignoreResult) {
switch (CIRGenFunction::getEvaluationKind(e->getType())) {
case cir::TEK_Scalar:
return RValue::get(emitScalarExpr(e));
case cir::TEK_Complex:
return RValue::getComplex(emitComplexExpr(e));
case cir::TEK_Aggregate: {
- if (aggSlot.isIgnored())
+ if (!ignoreResult && aggSlot.isIgnored())
aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
getCounterAggTmpAsString());
emitAggExpr(e, aggSlot);
@@ -1674,7 +1675,25 @@ CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
// name to make it clear it's not the actual builtin.
auto fn = cast<cir::FuncOp>(curFn);
if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
- cgm.errorNYI("Inline only builtin function calls");
+ cir::FuncOp clone =
+ mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
+
+ if (!clone) {
+ // Create a forward declaration - the body will be generated in
+ // generateCode when the function definition is processed
+ cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToStart(cgm.getModule().getBody());
+
+ clone = builder.create<cir::FuncOp>(calleeFunc.getLoc(), fdInlineName,
+ calleeFunc.getFunctionType());
+ clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
+ &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
+ clone.setSymVisibility("private");
+ clone.setInlineKindAttr(cir::InlineAttr::get(
+ &cgm.getMLIRContext(), cir::InlineKind::AlwaysInline));
+ }
+ return CIRGenCallee::forDirect(clone, gd);
}
// Replaceable builtins provide their own implementation of a builtin. If we
@@ -1869,8 +1888,7 @@ RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *e,
/// Emit code to compute the specified expression, ignoring the result.
void CIRGenFunction::emitIgnoredExpr(const Expr *e) {
if (e->isPRValue()) {
- assert(!cir::MissingFeatures::aggValueSlot());
- emitAnyExpr(e);
+ emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
return;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index ddee000..d6d226b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -24,6 +24,73 @@ using namespace clang;
using namespace clang::CIRGen;
namespace {
+// FIXME(cir): This should be a common helper between CIRGen
+// and traditional CodeGen
+/// Is the value of the given expression possibly a reference to or
+/// into a __block variable?
+static bool isBlockVarRef(const Expr *e) {
+ // Make sure we look through parens.
+ e = e->IgnoreParens();
+
+ // Check for a direct reference to a __block variable.
+ if (const DeclRefExpr *dre = dyn_cast<DeclRefExpr>(e)) {
+ const VarDecl *var = dyn_cast<VarDecl>(dre->getDecl());
+ return (var && var->hasAttr<BlocksAttr>());
+ }
+
+ // More complicated stuff.
+
+ // Binary operators.
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(e)) {
+ // For an assignment or pointer-to-member operation, just care
+ // about the LHS.
+ if (op->isAssignmentOp() || op->isPtrMemOp())
+ return isBlockVarRef(op->getLHS());
+
+ // For a comma, just care about the RHS.
+ if (op->getOpcode() == BO_Comma)
+ return isBlockVarRef(op->getRHS());
+
+ // FIXME: pointer arithmetic?
+ return false;
+
+ // Check both sides of a conditional operator.
+ } else if (const AbstractConditionalOperator *op =
+ dyn_cast<AbstractConditionalOperator>(e)) {
+ return isBlockVarRef(op->getTrueExpr()) ||
+ isBlockVarRef(op->getFalseExpr());
+
+ // OVEs are required to support BinaryConditionalOperators.
+ } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(e)) {
+ if (const Expr *src = op->getSourceExpr())
+ return isBlockVarRef(src);
+
+ // Casts are necessary to get things like (*(int*)&var) = foo().
+ // We don't really care about the kind of cast here, except
+ // we don't want to look through l2r casts, because it's okay
+ // to get the *value* in a __block variable.
+ } else if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ if (cast->getCastKind() == CK_LValueToRValue)
+ return false;
+ return isBlockVarRef(cast->getSubExpr());
+
+ // Handle unary operators. Again, just aggressively look through
+ // it, ignoring the operation.
+ } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
+ return isBlockVarRef(uop->getSubExpr());
+
+ // Look into the base of a field access.
+ } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(e)) {
+ return isBlockVarRef(mem->getBase());
+
+ // Look into the base of a subscript.
+ } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(e)) {
+ return isBlockVarRef(sub->getBase());
+ }
+
+ return false;
+}
+
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CIRGenFunction &cgf;
@@ -41,9 +108,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
if (!dest.isIgnored())
return dest;
-
- cgf.cgm.errorNYI(loc, "Slot for ignored address");
- return dest;
+ return cgf.createAggTemp(t, loc, "agg.tmp.ensured");
}
void ensureDest(mlir::Location loc, QualType ty) {
@@ -89,6 +154,47 @@ public:
(void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
}
+ void VisitBinAssign(const BinaryOperator *e) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
+ e->getRHS()->getType()) &&
+ "Invalid assignment");
+
+ if (isBlockVarRef(e->getLHS()) &&
+ e->getRHS()->HasSideEffects(cgf.getContext())) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "block var reference with side effects");
+ return;
+ }
+
+ LValue lhs = cgf.emitLValue(e->getLHS());
+
+ // If we have an atomic type, evaluate into the destination and then
+ // do an atomic copy.
+ assert(!cir::MissingFeatures::atomicTypes());
+
+ // Codegen the RHS so that it stores directly into the LHS.
+ assert(!cir::MissingFeatures::aggValueSlotGC());
+ AggValueSlot lhsSlot = AggValueSlot::forLValue(
+ lhs, AggValueSlot::IsDestructed, AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
+
+ // A non-volatile aggregate destination might have volatile member.
+ if (!lhsSlot.isVolatile() && cgf.hasVolatileMember(e->getLHS()->getType()))
+ lhsSlot.setVolatile(true);
+
+ cgf.emitAggExpr(e->getRHS(), lhsSlot);
+
+ // Copy into the destination if the assignment isn't ignored.
+ emitFinalDestCopy(e->getType(), lhs);
+
+ if (!dest.isIgnored() && !dest.isExternallyDestructed() &&
+ e->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ cgf.pushDestroy(QualType::DK_nontrivial_c_struct, dest.getAddress(),
+ e->getType());
+ }
+
void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
void VisitInitListExpr(InitListExpr *e);
@@ -174,6 +280,7 @@ public:
void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
+
void VisitPredefinedExpr(const PredefinedExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPredefinedExpr");
@@ -186,9 +293,6 @@ public:
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
}
- void VisitBinAssign(const BinaryOperator *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinAssign");
- }
void VisitBinComma(const BinaryOperator *e) {
cgf.emitIgnoredExpr(e->getLHS());
Visit(e->getRHS());
@@ -503,7 +607,8 @@ void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
assert(!cir::MissingFeatures::aggValueSlotVolatile());
- cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap());
+ cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap(),
+ dest.isVolatile() || src.isVolatile());
}
void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
@@ -566,7 +671,7 @@ void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
return;
}
- cgf.cgm.errorNYI("emitStoreThroughBitfieldLValue");
+ cgf.emitStoreThroughBitfieldLValue(RValue::get(null), lv);
return;
}
@@ -804,7 +909,8 @@ void CIRGenFunction::emitAggExpr(const Expr *e, AggValueSlot slot) {
}
void CIRGenFunction::emitAggregateCopy(LValue dest, LValue src, QualType ty,
- AggValueSlot::Overlap_t mayOverlap) {
+ AggValueSlot::Overlap_t mayOverlap,
+ bool isVolatile) {
// TODO(cir): this function needs improvements, commented code for now since
// this will be touched again soon.
assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
@@ -860,7 +966,7 @@ void CIRGenFunction::emitAggregateCopy(LValue dest, LValue src, QualType ty,
cgm.errorNYI("emitAggregateCopy: GC");
[[maybe_unused]] cir::CopyOp copyOp =
- builder.createCopy(destPtr.getPointer(), srcPtr.getPointer());
+ builder.createCopy(destPtr.getPointer(), srcPtr.getPointer(), isVolatile);
assert(!cir::MissingFeatures::opTBAA());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index b1e9e76..fe9e210 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -306,6 +306,7 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
mlir::cast<cir::IntAttr>(constNumElements).getValue();
unsigned numElementsWidth = count.getBitWidth();
+ bool hasAnyOverflow = false;
// The equivalent code in CodeGen/CGExprCXX.cpp handles these cases as
// overflow, but that should never happen. The size argument is implicitly
@@ -336,11 +337,22 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
// Add in the cookie, and check whether it's overflowed.
if (cookieSize != 0) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "emitCXXNewAllocSize: array cookie");
+ // Save the current size without a cookie. This shouldn't be
+ // used if there was overflow
+ sizeWithoutCookie = cgf.getBuilder().getConstInt(
+ loc, allocationSize.zextOrTrunc(sizeWidth));
+
+ allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
+ hasAnyOverflow |= overflow;
}
- size = cgf.getBuilder().getConstInt(loc, allocationSize);
+ // On overflow, produce a -1 so operator new will fail
+ if (hasAnyOverflow) {
+ size =
+ cgf.getBuilder().getConstInt(loc, llvm::APInt::getAllOnes(sizeWidth));
+ } else {
+ size = cgf.getBuilder().getConstInt(loc, allocationSize);
+ }
} else {
// TODO: Handle the variable size case
cgf.cgm.errorNYI(e->getSourceRange(),
@@ -390,7 +402,50 @@ void CIRGenFunction::emitNewArrayInitializer(
if (!e->hasInitializer())
return;
- cgm.errorNYI(e->getSourceRange(), "emitNewArrayInitializer");
+ unsigned initListElements = 0;
+
+ const Expr *init = e->getInitializer();
+ const InitListExpr *ile = dyn_cast<InitListExpr>(init);
+ if (ile) {
+ cgm.errorNYI(ile->getSourceRange(), "emitNewArrayInitializer: init list");
+ return;
+ }
+
+ // If all elements have already been initialized, skip any further
+ // initialization.
+ auto constOp = mlir::dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
+ if (constOp) {
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constOp.getValue());
+ // Just skip out if the constant count is zero.
+ if (constIntAttr && constIntAttr.getUInt() <= initListElements)
+ return;
+ }
+
+ assert(init && "have trailing elements to initialize but no initializer");
+
+ // If this is a constructor call, try to optimize it out, and failing that
+ // emit a single loop to initialize all remaining elements.
+ if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(init)) {
+ CXXConstructorDecl *ctor = cce->getConstructor();
+ if (ctor->isTrivial()) {
+ // If new expression did not specify value-initialization, then there
+ // is no initialization.
+ if (!cce->requiresZeroInitialization())
+ return;
+
+ cgm.errorNYI(cce->getSourceRange(),
+ "emitNewArrayInitializer: trivial ctor zero-init");
+ return;
+ }
+
+ cgm.errorNYI(cce->getSourceRange(),
+ "emitNewArrayInitializer: ctor initializer");
+ return;
+ }
+
+ cgm.errorNYI(init->getSourceRange(),
+ "emitNewArrayInitializer: unsupported initializer");
+ return;
}
static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e,
@@ -586,9 +641,6 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
// If there is a brace-initializer, cannot allocate fewer elements than inits.
unsigned minElements = 0;
- if (e->isArray() && e->hasInitializer()) {
- cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array initializer");
- }
mlir::Value numElements = nullptr;
mlir::Value allocSizeWithoutCookie = nullptr;
@@ -667,8 +719,11 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
!e->getOperatorDelete()->isReservedGlobalPlacementOperator())
cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: operator delete");
- if (allocSize != allocSizeWithoutCookie)
- cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array with cookies");
+ if (allocSize != allocSizeWithoutCookie) {
+ assert(e->isArray());
+ allocation = cgm.getCXXABI().initializeArrayCookie(
+ *this, allocation, numElements, e, allocType);
+ }
mlir::Type elementTy;
if (e->isArray()) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 19ed656..800262a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -1011,9 +1011,9 @@ public:
}
mlir::Attribute VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die, QualType t) {
- cgm.errorNYI(die->getBeginLoc(),
- "ConstExprEmitter::VisitCXXDefaultInitExpr");
- return {};
+ // No need for a DefaultInitExprScope: we don't handle 'this' in a
+ // constant expression.
+ return Visit(die->getExpr(), t);
}
mlir::Attribute VisitExprWithCleanups(ExprWithCleanups *e, QualType t) {
@@ -1028,9 +1028,7 @@ public:
mlir::Attribute VisitImplicitValueInitExpr(ImplicitValueInitExpr *e,
QualType t) {
- cgm.errorNYI(e->getBeginLoc(),
- "ConstExprEmitter::VisitImplicitValueInitExpr");
- return {};
+ return cgm.getBuilder().getZeroInitAttr(cgm.convertType(t));
}
mlir::Attribute VisitInitListExpr(InitListExpr *ile, QualType t) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 138082b..33eb748 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -2041,8 +2041,9 @@ mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
assert(!cir::MissingFeatures::tryEmitAsConstant());
Expr::EvalResult result;
if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
- cgf.cgm.errorNYI(e->getSourceRange(), "Constant interger member expr");
- // Fall through to emit this as a non-constant access.
+ llvm::APSInt value = result.Val.getInt();
+ cgf.emitIgnoredExpr(e->getBase());
+ return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value);
}
return emitLoadOfLValue(e);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 25a46df..d3c0d9f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -551,6 +551,49 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
curGD = gd;
+ if (funcDecl->isInlineBuiltinDeclaration()) {
+ // When generating code for a builtin with an inline declaration, use a
+ // mangled name to hold the actual body, while keeping an external
+ // declaration in case the function pointer is referenced somewhere.
+ std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
+ cir::FuncOp clone =
+ mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
+ if (!clone) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPoint(fn);
+ clone = builder.create<cir::FuncOp>(fn.getLoc(), fdInlineName,
+ fn.getFunctionType());
+ clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
+ clone.setSymVisibility("private");
+ clone.setInlineKind(cir::InlineKind::AlwaysInline);
+ }
+ fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
+ fn.setSymVisibility("private");
+ fn = clone;
+ } else {
+ // Detect the unusual situation where an inline version is shadowed by a
+ // non-inline version. In that case we should pick the external one
+ // everywhere. That's GCC behavior too.
+ for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
+ pd = pd->getPreviousDecl()) {
+ if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
+ std::string inlineName = funcDecl->getName().str() + ".inline";
+ if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
+ cgm.getGlobalValue(inlineName))) {
+ // Replace all uses of the .inline function with the regular function
+ // FIXME: This performs a linear walk over the module. Introduce some
+ // caching here.
+ if (inlineFn
+ .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
+ .failed())
+ llvm_unreachable("Failed to replace inline builtin symbol uses");
+ inlineFn.erase();
+ }
+ break;
+ }
+ }
+ }
+
SourceLocation loc = funcDecl->getLocation();
Stmt *body = funcDecl->getBody();
SourceRange bodyRange =
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index b8492a8..e3b9b6a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -60,11 +60,44 @@ private:
/// is where the next operations will be introduced.
CIRGenBuilderTy &builder;
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() = default;
+ JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
+ unsigned index = 0)
+ : block(block) {}
+
+ bool isValid() const { return block != nullptr; }
+ mlir::Block *getBlock() const { return block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
+ unsigned getDestIndex() const { return index; }
+
+ // This should be used cautiously.
+ void setScopeDepth(EHScopeStack::stable_iterator depth) {
+ scopeDepth = depth;
+ }
+
+ private:
+ mlir::Block *block = nullptr;
+ EHScopeStack::stable_iterator scopeDepth;
+ unsigned index;
+ };
+
public:
/// The GlobalDecl for the current function being compiled or the global
/// variable currently being initialized.
clang::GlobalDecl curGD;
+ /// Unified return block.
+ /// In CIR this is a function because each scope might have
+ /// its associated return block.
+ JumpDest returnBlock(mlir::Block *retBlock) {
+ return getJumpDestInCurrentScope(retBlock);
+ }
+
+ unsigned nextCleanupDestIndex = 1;
+
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
@@ -574,6 +607,16 @@ public:
}
};
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ /// CIRGen: this mostly tracks state for figuring out the proper scope
+ /// information, no actual branches are emitted.
+ JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
+ return JumpDest(target, ehStack.getInnermostNormalCleanup(),
+ nextCleanupDestIndex++);
+ }
+
/// Perform the usual unary conversions on the specified expression and
/// compare the result against zero, returning an Int1Ty value.
mlir::Value evaluateExprAsBool(const clang::Expr *e);
@@ -858,6 +901,13 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// returns true if aggregate type has a volatile member.
+ bool hasVolatileMember(QualType t) {
+ if (const auto *rd = t->getAsRecordDecl())
+ return rd->hasVolatileMember();
+ return false;
+ }
+
/// The cleanup depth enclosing all the cleanups associated with the
/// parameters.
EHScopeStack::stable_iterator prologueCleanupDepth;
@@ -947,6 +997,9 @@ public:
LexicalScope *parentScope = nullptr;
+ // Holds the actual value for ScopeKind::Try
+ cir::TryOp tryOp = nullptr;
+
// Only Regular is used at the moment. Support for other kinds will be
// added as the relevant statements/expressions are upstreamed.
enum Kind {
@@ -1006,6 +1059,10 @@ public:
void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
void setAsSwitch() { scopeKind = Kind::Switch; }
void setAsTernary() { scopeKind = Kind::Ternary; }
+ void setAsTry(cir::TryOp op) {
+ scopeKind = Kind::Try;
+ tryOp = op;
+ }
// Lazy create cleanup block or return what's available.
mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
@@ -1015,6 +1072,11 @@ public:
return cleanupBlock;
}
+ cir::TryOp getTry() {
+ assert(isTry());
+ return tryOp;
+ }
+
mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
return cleanupBlock;
}
@@ -1082,6 +1144,9 @@ public:
static Destroyer destroyCXXObject;
+ void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
+ QualType type);
+
void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer);
@@ -1136,14 +1201,16 @@ public:
/// occupied by some other object. More efficient code can often be
/// generated if not.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
- AggValueSlot::Overlap_t mayOverlap);
+ AggValueSlot::Overlap_t mayOverlap,
+ bool isVolatile = false);
/// Emit code to compute the specified expression which can have any type. The
/// result is returned as an RValue struct. If this is an aggregate
/// expression, the aggloc/agglocvolatile arguments indicate where the result
/// should be returned.
RValue emitAnyExpr(const clang::Expr *e,
- AggValueSlot aggSlot = AggValueSlot::ignored());
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
/// Emits the code necessary to evaluate an arbitrary expression into the
/// given memory location.
@@ -1197,6 +1264,8 @@ public:
LValue emitBinaryOperatorLValue(const BinaryOperator *e);
+ cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
+
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
@@ -1336,6 +1405,13 @@ public:
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
+ mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s);
+
+ void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
+ bool isFnTryBlock = false);
+
+ void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock = false);
+
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor,
clang::CXXCtorType ctorType, FunctionArgList &args);
@@ -1583,6 +1659,10 @@ public:
bool buildingTopLevelCase);
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
+ mlir::Value emitTargetBuiltinExpr(unsigned builtinID,
+ const clang::CallExpr *e,
+ ReturnValueSlot &returnValue);
+
/// Given a value and its clang type, returns the value casted to its memory
/// representation.
/// Note: CIR defers most of the special casting to the final lowering passes
@@ -1621,6 +1701,8 @@ public:
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
+ mlir::Value emitX86BuiltinExpr(unsigned builtinID, const CallExpr *e);
+
/// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
/// nonnull, if 1\p LHS is marked _Nonnull.
void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index c184d4a..e620310 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -135,8 +135,14 @@ public:
cir::PointerType destCIRTy, bool isRefCast,
Address src) override;
- /**************************** RTTI Uniqueness ******************************/
+ Address initializeArrayCookie(CIRGenFunction &cgf, Address newPtr,
+ mlir::Value numElements, const CXXNewExpr *e,
+ QualType elementType) override;
+
protected:
+ CharUnits getArrayCookieSizeImpl(QualType elementType) override;
+
+ /**************************** RTTI Uniqueness ******************************/
/// Returns true if the ABI requires RTTI type_info objects to be unique
/// across a program.
virtual bool shouldRTTIBeUnique() const { return true; }
@@ -2003,3 +2009,70 @@ mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf,
return cgf.getBuilder().createDynCast(loc, src.getPointer(), destCIRTy,
isRefCast, castInfo);
}
+
+/************************** Array allocation cookies **************************/
+
+CharUnits CIRGenItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // The array cookie is a size_t; pad that up to the element alignment.
+ // The cookie is actually right-justified in that space.
+ return std::max(
+ cgm.getSizeSize(),
+ cgm.getASTContext().getPreferredTypeAlignInChars(elementType));
+}
+
+Address CIRGenItaniumCXXABI::initializeArrayCookie(CIRGenFunction &cgf,
+ Address newPtr,
+ mlir::Value numElements,
+ const CXXNewExpr *e,
+ QualType elementType) {
+ assert(requiresArrayCookie(e));
+
+ // TODO: When sanitizer support is implemented, we'll need to
+ // get the address space from `newPtr`.
+ assert(!cir::MissingFeatures::addressSpace());
+ assert(!cir::MissingFeatures::sanitizers());
+
+ ASTContext &ctx = cgm.getASTContext();
+ CharUnits sizeSize = cgf.getSizeSize();
+ mlir::Location loc = cgf.getLoc(e->getSourceRange());
+
+ // The size of the cookie.
+ CharUnits cookieSize =
+ std::max(sizeSize, ctx.getPreferredTypeAlignInChars(elementType));
+ assert(cookieSize == getArrayCookieSizeImpl(elementType));
+
+ cir::PointerType u8PtrTy = cgf.getBuilder().getUInt8PtrTy();
+ mlir::Value baseBytePtr =
+ cgf.getBuilder().createPtrBitcast(newPtr.getPointer(), u8PtrTy);
+
+ // Compute an offset to the cookie.
+ CharUnits cookieOffset = cookieSize - sizeSize;
+ mlir::Value cookiePtrValue = baseBytePtr;
+ if (!cookieOffset.isZero()) {
+ mlir::Value offsetOp = cgf.getBuilder().getSignedInt(
+ loc, cookieOffset.getQuantity(), /*width=*/32);
+ cookiePtrValue =
+ cgf.getBuilder().createPtrStride(loc, cookiePtrValue, offsetOp);
+ }
+
+ CharUnits baseAlignment = newPtr.getAlignment();
+ CharUnits cookiePtrAlignment = baseAlignment.alignmentAtOffset(cookieOffset);
+ Address cookiePtr(cookiePtrValue, u8PtrTy, cookiePtrAlignment);
+
+ // Write the number of elements into the appropriate slot.
+ Address numElementsPtr =
+ cookiePtr.withElementType(cgf.getBuilder(), cgf.SizeTy);
+ cgf.getBuilder().createStore(loc, numElements, numElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ mlir::Value dataOffset =
+ cgf.getBuilder().getSignedInt(loc, cookieSize.getQuantity(),
+ /*width=*/32);
+ mlir::Value dataPtr =
+ cgf.getBuilder().createPtrStride(loc, baseBytePtr, dataOffset);
+ mlir::Value finalPtr =
+ cgf.getBuilder().createPtrBitcast(dataPtr, newPtr.getElementType());
+ CharUnits finalAlignment = baseAlignment.alignmentAtOffset(cookieSize);
+ return Address(finalPtr, newPtr.getElementType(), finalAlignment);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 127f763..6b29373 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -102,7 +102,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
// TODO(CIR): Should be updated once TypeSizeInfoAttr is upstreamed
const unsigned sizeTypeSize =
astContext.getTypeSize(astContext.getSignedSizeType());
- SizeAlignInBytes = astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity();
+ SizeSizeInBytes = astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity();
// In CIRGenTypeCache, UIntPtrTy and SizeType are fields of the same union
UIntPtrTy =
cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/false);
@@ -1917,6 +1917,17 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl,
const Decl *decl = globalDecl.getDecl();
func.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(decl));
}
+
+ // If we plan on emitting this inline builtin, we can't treat it as a builtin.
+ const auto *fd = cast<FunctionDecl>(globalDecl.getDecl());
+ if (fd->isInlineBuiltinDeclaration()) {
+ const FunctionDecl *fdBody;
+ bool hasBody = fd->hasBody(fdBody);
+ (void)hasBody;
+ assert(hasBody && "Inline builtin declarations should always have an "
+ "available body!");
+ assert(!cir::MissingFeatures::attributeNoBuiltin());
+ }
}
void CIRGenModule::setCIRFunctionAttributesForDefinition(
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
index ce4ae7e..385f89c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
@@ -553,12 +553,15 @@ public:
}
void VisitIfClause(const OpenACCIfClause &clause) {
- if constexpr (isOneOfTypes<OpTy, mlir::acc::ParallelOp, mlir::acc::SerialOp,
- mlir::acc::KernelsOp, mlir::acc::InitOp,
- mlir::acc::ShutdownOp, mlir::acc::SetOp,
- mlir::acc::DataOp, mlir::acc::WaitOp,
- mlir::acc::HostDataOp, mlir::acc::EnterDataOp,
- mlir::acc::ExitDataOp, mlir::acc::UpdateOp>) {
+ if constexpr (isOneOfTypes<
+ OpTy, mlir::acc::ParallelOp, mlir::acc::SerialOp,
+ mlir::acc::KernelsOp, mlir::acc::InitOp,
+ mlir::acc::ShutdownOp, mlir::acc::SetOp,
+ mlir::acc::DataOp, mlir::acc::WaitOp,
+ mlir::acc::HostDataOp, mlir::acc::EnterDataOp,
+ mlir::acc::ExitDataOp, mlir::acc::UpdateOp,
+ mlir::acc::AtomicReadOp, mlir::acc::AtomicWriteOp,
+ mlir::acc::AtomicUpdateOp, mlir::acc::AtomicCaptureOp>) {
operation.getIfCondMutable().append(
createCondition(clause.getConditionExpr()));
} else if constexpr (isCombinedType<OpTy>) {
@@ -1144,6 +1147,10 @@ EXPL_SPEC(mlir::acc::HostDataOp)
EXPL_SPEC(mlir::acc::EnterDataOp)
EXPL_SPEC(mlir::acc::ExitDataOp)
EXPL_SPEC(mlir::acc::UpdateOp)
+EXPL_SPEC(mlir::acc::AtomicReadOp)
+EXPL_SPEC(mlir::acc::AtomicWriteOp)
+EXPL_SPEC(mlir::acc::AtomicCaptureOp)
+EXPL_SPEC(mlir::acc::AtomicUpdateOp)
#undef EXPL_SPEC
template <typename ComputeOp, typename LoopOp>
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index ad8c4d0..f486c46 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -446,54 +446,89 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
mlir::Location loc = getLoc(s.getSourceRange());
const Expr *rv = s.getRetValue();
- if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
- s.getNRVOCandidate()->isNRVOVariable()) {
- assert(!cir::MissingFeatures::openMP());
- assert(!cir::MissingFeatures::nrvo());
- } else if (!rv) {
- // No return expression. Do nothing.
- } else if (rv->getType()->isVoidType()) {
- // Make sure not to return anything, but evaluate the expression
- // for side effects.
- if (rv) {
- emitAnyExpr(rv);
+ RunCleanupsScope cleanupScope(*this);
+ bool createNewScope = false;
+ if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
+ rv = ewc->getSubExpr();
+ createNewScope = true;
+ }
+
+ auto handleReturnVal = [&]() {
+ if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
+ s.getNRVOCandidate()->isNRVOVariable()) {
+ assert(!cir::MissingFeatures::openMP());
+ assert(!cir::MissingFeatures::nrvo());
+ } else if (!rv) {
+ // No return expression. Do nothing.
+ } else if (rv->getType()->isVoidType()) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (rv) {
+ emitAnyExpr(rv);
+ }
+ } else if (cast<FunctionDecl>(curGD.getDecl())
+ ->getReturnType()
+ ->isReferenceType()) {
+ // If this function returns a reference, take the address of the
+ // expression rather than the value.
+ RValue result = emitReferenceBindingToExpr(rv);
+ builder.CIRBaseBuilderTy::createStore(loc, result.getValue(),
+ *fnRetAlloca);
+ } else {
+ mlir::Value value = nullptr;
+ switch (CIRGenFunction::getEvaluationKind(rv->getType())) {
+ case cir::TEK_Scalar:
+ value = emitScalarExpr(rv);
+ if (value) { // Change this to an assert once emitScalarExpr is complete
+ builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
+ }
+ break;
+ case cir::TEK_Complex:
+ emitComplexExprIntoLValue(rv,
+ makeAddrLValue(returnValue, rv->getType()),
+ /*isInit=*/true);
+ break;
+ case cir::TEK_Aggregate:
+ assert(!cir::MissingFeatures::aggValueSlotGC());
+ emitAggExpr(rv, AggValueSlot::forAddr(returnValue, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::IsNotAliased,
+ getOverlapForReturnValue()));
+ break;
+ }
}
- } else if (cast<FunctionDecl>(curGD.getDecl())
- ->getReturnType()
- ->isReferenceType()) {
- // If this function returns a reference, take the address of the
- // expression rather than the value.
- RValue result = emitReferenceBindingToExpr(rv);
- builder.CIRBaseBuilderTy::createStore(loc, result.getValue(), *fnRetAlloca);
+ };
+
+ if (!createNewScope) {
+ handleReturnVal();
} else {
- mlir::Value value = nullptr;
- switch (CIRGenFunction::getEvaluationKind(rv->getType())) {
- case cir::TEK_Scalar:
- value = emitScalarExpr(rv);
- if (value) { // Change this to an assert once emitScalarExpr is complete
- builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
- }
- break;
- case cir::TEK_Complex:
- emitComplexExprIntoLValue(rv, makeAddrLValue(returnValue, rv->getType()),
- /*isInit=*/true);
- break;
- case cir::TEK_Aggregate:
- assert(!cir::MissingFeatures::aggValueSlotGC());
- emitAggExpr(rv, AggValueSlot::forAddr(returnValue, Qualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::IsNotAliased,
- getOverlapForReturnValue()));
- break;
+ mlir::Location scopeLoc =
+ getLoc(rv ? rv->getSourceRange() : s.getSourceRange());
+ // First create cir.scope and later emit it's body. Otherwise all CIRGen
+ // dispatched by `handleReturnVal()` might needs to manipulate blocks and
+ // look into parents, which are all unlinked.
+ mlir::OpBuilder::InsertPoint scopeBody;
+ cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ scopeBody = b.saveInsertionPoint();
+ });
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeBody);
+ CIRGenFunction::LexicalScope lexScope{*this, scopeLoc,
+ builder.getInsertionBlock()};
+ handleReturnVal();
}
}
+ cleanupScope.forceCleanup();
+
+ // In CIR we might have returns in different scopes.
+ // FIXME(cir): cleanup code is handling actual return emission, the logic
+ // should try to match traditional codegen more closely (to the extent which
+ // is possible).
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
- // This should emit a branch through the cleanup block if one exists.
- builder.create<cir::BrOp>(loc, retBlock);
- assert(!cir::MissingFeatures::emitBranchThroughCleanup());
- if (ehStack.stable_begin() != currentCleanupStackDepth)
- cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
+ emitBranchThroughCleanup(loc, returnBlock(retBlock));
// Insert the new block to continue codegen after branch to ret block.
builder.createBlock(builder.getBlock()->getParent());
@@ -1063,5 +1098,5 @@ void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv,
assert(!cir::MissingFeatures::emitBranchThroughCleanup());
builder.create<cir::BrOp>(loc, retBlock);
if (ehStack.stable_begin() != currentCleanupStackDepth)
- cgm.errorNYI(loc, "return with cleanup stack");
+ cgm.errorNYI(loc, "return of r-value with cleanup stack");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
index e89393c..02bb46d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp
@@ -306,6 +306,29 @@ CIRGenFunction::emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s) {
mlir::LogicalResult
CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) {
- cgm.errorNYI(s.getSourceRange(), "OpenACC Atomic Construct");
- return mlir::failure();
+ // For now, we are only support 'read', so diagnose. We can switch on the kind
+ // later once we start implementing the other 3 forms.
+ if (s.getAtomicKind() != OpenACCAtomicKind::Read) {
+ cgm.errorNYI(s.getSourceRange(), "OpenACC Atomic Construct");
+ return mlir::failure();
+ }
+
+ // While Atomic is an 'associated statement' construct, it 'steals' the
+ // expression it is associated with rather than emitting it inside of it. So
+ // it has custom emit logic.
+ mlir::Location start = getLoc(s.getSourceRange().getBegin());
+ OpenACCAtomicConstruct::StmtInfo inf = s.getAssociatedStmtInfo();
+ // Atomic 'read' only permits 'v = x', where v and x are both scalar L values.
+ // The getAssociatedStmtInfo strips off implicit casts, which includes
+ // implicit conversions and L-to-R-Value conversions, so we can just emit it
+ // as an L value. The Flang implementation has no problem with different
+ // types, so it appears that the dialect can handle the conversions.
+ mlir::Value v = emitLValue(inf.V).getPointer();
+ mlir::Value x = emitLValue(inf.X).getPointer();
+ mlir::Type resTy = convertType(inf.V->getType());
+ auto op = mlir::acc::AtomicReadOp::create(builder, start, x, v, resTy,
+ /*ifCond=*/{});
+ emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(),
+ s.clauses());
+ return mlir::success();
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
index b5612d9..ff5842c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
@@ -74,11 +74,17 @@ struct CIRGenTypeCache {
unsigned char PointerSizeInBytes;
};
- /// The alignment of size_t.
- unsigned char SizeAlignInBytes;
+ /// The size and alignment of size_t.
+ union {
+ unsigned char SizeSizeInBytes; // sizeof(size_t)
+ unsigned char SizeAlignInBytes;
+ };
cir::TargetAddressSpaceAttr cirAllocaAddressSpace;
+ clang::CharUnits getSizeSize() const {
+ return clang::CharUnits::fromQuantity(SizeSizeInBytes);
+ }
clang::CharUnits getSizeAlign() const {
return clang::CharUnits::fromQuantity(SizeAlignInBytes);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index f83a952..ab245a77 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -380,6 +380,15 @@ public:
clang::Qualifiers getQualifiers() const { return quals; }
+ bool isVolatile() const { return quals.hasVolatile(); }
+
+ void setVolatile(bool flag) {
+ if (flag)
+ quals.addVolatile();
+ else
+ quals.removeVolatile();
+ }
+
Address getAddress() const { return addr; }
bool isIgnored() const { return !addr.isValid(); }
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index 36db4bd..7c31bea 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -11,13 +11,14 @@ add_clang_library(clangCIR
CIRGenAsm.cpp
CIRGenAtomic.cpp
CIRGenBuilder.cpp
+ CIRGenBuiltin.cpp
+ CIRGenBuiltinX86.cpp
CIRGenCall.cpp
CIRGenClass.cpp
CIRGenCleanup.cpp
CIRGenCoroutine.cpp
CIRGenCXX.cpp
CIRGenCXXABI.cpp
- CIRGenBuiltin.cpp
CIRGenDecl.cpp
CIRGenDeclCXX.cpp
CIRGenDeclOpenACC.cpp
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 67a72f5..4198c23 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -18,12 +18,38 @@
#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "llvm/ADT/SmallVector.h"
namespace clang::CIRGen {
class CIRGenFunction;
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ mlir::Block *optimisticBranchBlock = nullptr;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ mlir::Block *destination = nullptr;
+
+ /// The destination index value.
+ unsigned destinationIndex = 0;
+
+ /// The initial branch of the fixup.
+ cir::BrOp initialBranch = {};
+};
+
enum CleanupKind : unsigned {
/// Denotes a cleanup that should run when a scope is exited using exceptional
/// control flow (a throw statement leading to stack unwinding, ).
@@ -126,9 +152,31 @@ private:
/// The first valid entry in the buffer.
char *startOfData = nullptr;
+ /// The innermost normal cleanup on the stack.
+ stable_iterator innermostNormalCleanup = stable_end();
+
/// The CGF this Stack belong to
CIRGenFunction *cgf = nullptr;
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup> branchFixups;
+
// This class uses a custom allocator for maximum efficiency because cleanups
// are allocated and freed very frequently. It's basically a bump pointer
// allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets
@@ -155,9 +203,29 @@ public:
/// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
void popCleanup();
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned numHandlers);
+
+ /// Pops a catch scope off the stack. This is private to CIRGenException.cpp.
+ void popCatch();
+
/// Determines whether the exception-scopes stack is empty.
bool empty() const { return startOfData == endOfBuffer; }
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return innermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return innermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const;
+
/// An unstable reference to a scope-stack depth. Invalidated by
/// pushes but not pops.
class iterator;
@@ -172,12 +240,30 @@ public:
return stable_iterator(endOfBuffer - startOfData);
}
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() { return stable_iterator(0); }
+
/// Turn a stable reference to a scope depth into a unstable pointer
/// to the EH stack.
iterator find(stable_iterator savePoint) const;
- /// Create a stable reference to the bottom of the EH stack.
- static stable_iterator stable_end() { return stable_iterator(0); }
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ branchFixups.push_back(BranchFixup());
+ return branchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return branchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned i) {
+ assert(i < getNumBranchFixups());
+ return branchFixups[i];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index ed606b7..fa180f5 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2941,6 +2941,21 @@ mlir::LogicalResult cir::ThrowOp::verify() {
}
//===----------------------------------------------------------------------===//
+// AtomicFetchOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::AtomicFetchOp::verify() {
+ if (getBinop() != cir::AtomicFetchKind::Add &&
+ getBinop() != cir::AtomicFetchKind::Sub &&
+ getBinop() != cir::AtomicFetchKind::Max &&
+ getBinop() != cir::AtomicFetchKind::Min &&
+ !mlir::isa<cir::IntType>(getVal().getType()))
+ return emitError("only atomic add, sub, max, and min operation could "
+ "operate on floating-point values");
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// TypeInfoAttr
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
index 8589a2e..46bd186 100644
--- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
@@ -551,10 +551,100 @@ public:
}
};
+class CIRTryOpFlattening : public mlir::OpRewritePattern<cir::TryOp> {
+public:
+ using OpRewritePattern<cir::TryOp>::OpRewritePattern;
+
+ mlir::Block *buildTryBody(cir::TryOp tryOp,
+ mlir::PatternRewriter &rewriter) const {
+ // Split the current block before the TryOp to create the inlining
+ // point.
+ mlir::Block *beforeTryScopeBlock = rewriter.getInsertionBlock();
+ mlir::Block *afterTry =
+ rewriter.splitBlock(beforeTryScopeBlock, rewriter.getInsertionPoint());
+
+ // Inline body region.
+ mlir::Block *beforeBody = &tryOp.getTryRegion().front();
+ rewriter.inlineRegionBefore(tryOp.getTryRegion(), afterTry);
+
+ // Branch into the body of the region.
+ rewriter.setInsertionPointToEnd(beforeTryScopeBlock);
+ cir::BrOp::create(rewriter, tryOp.getLoc(), mlir::ValueRange(), beforeBody);
+ return afterTry;
+ }
+
+ void buildHandlers(cir::TryOp tryOp, mlir::PatternRewriter &rewriter,
+ mlir::Block *afterBody, mlir::Block *afterTry,
+ SmallVectorImpl<cir::CallOp> &callsToRewrite,
+ SmallVectorImpl<mlir::Block *> &landingPads) const {
+ // Replace the tryOp return with a branch that jumps out of the body.
+ rewriter.setInsertionPointToEnd(afterBody);
+
+ mlir::Block *beforeCatch = rewriter.getInsertionBlock();
+ rewriter.setInsertionPointToEnd(beforeCatch);
+
+ // Check if the terminator is a YieldOp because there could be another
+ // terminator, e.g. unreachable
+ if (auto tryBodyYield = dyn_cast<cir::YieldOp>(afterBody->getTerminator()))
+ rewriter.replaceOpWithNewOp<cir::BrOp>(tryBodyYield, afterTry);
+
+ mlir::ArrayAttr handlers = tryOp.getHandlerTypesAttr();
+ if (!handlers || handlers.empty())
+ return;
+
+ llvm_unreachable("TryOpFlattening buildHandlers with CallsOp is NYI");
+ }
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::TryOp tryOp,
+ mlir::PatternRewriter &rewriter) const override {
+ mlir::OpBuilder::InsertionGuard guard(rewriter);
+ mlir::Block *afterBody = &tryOp.getTryRegion().back();
+
+ // Grab the collection of `cir.call exception`s to rewrite to
+ // `cir.try_call`.
+ llvm::SmallVector<cir::CallOp, 4> callsToRewrite;
+ tryOp.getTryRegion().walk([&](CallOp op) {
+ // Only grab calls within immediate closest TryOp scope.
+ if (op->getParentOfType<cir::TryOp>() != tryOp)
+ return;
+ assert(!cir::MissingFeatures::opCallExceptionAttr());
+ callsToRewrite.push_back(op);
+ });
+
+ if (!callsToRewrite.empty())
+ llvm_unreachable(
+ "TryOpFlattening with try block that contains CallOps is NYI");
+
+ // Build try body.
+ mlir::Block *afterTry = buildTryBody(tryOp, rewriter);
+
+ // Build handlers.
+ llvm::SmallVector<mlir::Block *, 4> landingPads;
+ buildHandlers(tryOp, rewriter, afterBody, afterTry, callsToRewrite,
+ landingPads);
+
+ rewriter.eraseOp(tryOp);
+
+ assert((landingPads.size() == callsToRewrite.size()) &&
+ "expected matching number of entries");
+
+ // Quick block cleanup: no indirection to the post try block.
+ auto brOp = dyn_cast<cir::BrOp>(afterTry->getTerminator());
+ if (brOp && brOp.getDest()->hasNoPredecessors()) {
+ mlir::Block *srcBlock = brOp.getDest();
+ rewriter.eraseOp(brOp);
+ rewriter.mergeBlocks(srcBlock, afterTry);
+ }
+
+ return mlir::success();
+ }
+};
+
void populateFlattenCFGPatterns(RewritePatternSet &patterns) {
patterns
.add<CIRIfFlattening, CIRLoopOpInterfaceFlattening, CIRScopeOpFlattening,
- CIRSwitchOpFlattening, CIRTernaryOpFlattening>(
+ CIRSwitchOpFlattening, CIRTernaryOpFlattening, CIRTryOpFlattening>(
patterns.getContext());
}
@@ -568,7 +658,7 @@ void CIRFlattenCFGPass::runOnOperation() {
assert(!cir::MissingFeatures::ifOp());
assert(!cir::MissingFeatures::switchOp());
assert(!cir::MissingFeatures::tryOp());
- if (isa<IfOp, ScopeOp, SwitchOp, LoopOpInterface, TernaryOp>(op))
+ if (isa<IfOp, ScopeOp, SwitchOp, LoopOpInterface, TernaryOp, TryOp>(op))
ops.push_back(op);
});
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 0243bf1..bb75f2d 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -182,7 +182,7 @@ mlir::LogicalResult CIRToLLVMCopyOpLowering::matchAndRewrite(
rewriter, op.getLoc(), rewriter.getI32Type(), op.getLength(layout));
assert(!cir::MissingFeatures::aggValueSlotVolatile());
rewriter.replaceOpWithNewOp<mlir::LLVM::MemcpyOp>(
- op, adaptor.getDst(), adaptor.getSrc(), length, /*isVolatile=*/false);
+ op, adaptor.getDst(), adaptor.getSrc(), length, op.getIsVolatile());
return mlir::success();
}
@@ -730,6 +730,187 @@ mlir::LogicalResult CIRToLLVMAtomicXchgOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMAtomicTestAndSetOpLowering::matchAndRewrite(
+ cir::AtomicTestAndSetOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(op.getMemOrder());
+
+ auto one = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ rewriter.getI8Type(), 1);
+ auto rmw = mlir::LLVM::AtomicRMWOp::create(
+ rewriter, op.getLoc(), mlir::LLVM::AtomicBinOp::xchg, adaptor.getPtr(),
+ one, llvmOrder, /*syncscope=*/llvm::StringRef(),
+ adaptor.getAlignment().value_or(0), op.getIsVolatile());
+
+ auto zero = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ rewriter.getI8Type(), 0);
+ auto cmp = mlir::LLVM::ICmpOp::create(
+ rewriter, op.getLoc(), mlir::LLVM::ICmpPredicate::ne, rmw, zero);
+
+ rewriter.replaceOp(op, cmp);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMAtomicClearOpLowering::matchAndRewrite(
+ cir::AtomicClearOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(op.getMemOrder());
+ auto zero = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ rewriter.getI8Type(), 0);
+ auto store = mlir::LLVM::StoreOp::create(
+ rewriter, op.getLoc(), zero, adaptor.getPtr(),
+ adaptor.getAlignment().value_or(0), op.getIsVolatile(),
+ /*isNonTemporal=*/false, /*isInvariantGroup=*/false, llvmOrder);
+
+ rewriter.replaceOp(op, store);
+ return mlir::success();
+}
+
+static mlir::LLVM::AtomicBinOp
+getLLVMAtomicBinOp(cir::AtomicFetchKind k, bool isInt, bool isSignedInt) {
+ switch (k) {
+ case cir::AtomicFetchKind::Add:
+ return isInt ? mlir::LLVM::AtomicBinOp::add : mlir::LLVM::AtomicBinOp::fadd;
+ case cir::AtomicFetchKind::Sub:
+ return isInt ? mlir::LLVM::AtomicBinOp::sub : mlir::LLVM::AtomicBinOp::fsub;
+ case cir::AtomicFetchKind::And:
+ return mlir::LLVM::AtomicBinOp::_and;
+ case cir::AtomicFetchKind::Xor:
+ return mlir::LLVM::AtomicBinOp::_xor;
+ case cir::AtomicFetchKind::Or:
+ return mlir::LLVM::AtomicBinOp::_or;
+ case cir::AtomicFetchKind::Nand:
+ return mlir::LLVM::AtomicBinOp::nand;
+ case cir::AtomicFetchKind::Max: {
+ if (!isInt)
+ return mlir::LLVM::AtomicBinOp::fmax;
+ return isSignedInt ? mlir::LLVM::AtomicBinOp::max
+ : mlir::LLVM::AtomicBinOp::umax;
+ }
+ case cir::AtomicFetchKind::Min: {
+ if (!isInt)
+ return mlir::LLVM::AtomicBinOp::fmin;
+ return isSignedInt ? mlir::LLVM::AtomicBinOp::min
+ : mlir::LLVM::AtomicBinOp::umin;
+ }
+ }
+ llvm_unreachable("Unknown atomic fetch opcode");
+}
+
+static llvm::StringLiteral getLLVMBinop(cir::AtomicFetchKind k, bool isInt) {
+ switch (k) {
+ case cir::AtomicFetchKind::Add:
+ return isInt ? mlir::LLVM::AddOp::getOperationName()
+ : mlir::LLVM::FAddOp::getOperationName();
+ case cir::AtomicFetchKind::Sub:
+ return isInt ? mlir::LLVM::SubOp::getOperationName()
+ : mlir::LLVM::FSubOp::getOperationName();
+ case cir::AtomicFetchKind::And:
+ return mlir::LLVM::AndOp::getOperationName();
+ case cir::AtomicFetchKind::Xor:
+ return mlir::LLVM::XOrOp::getOperationName();
+ case cir::AtomicFetchKind::Or:
+ return mlir::LLVM::OrOp::getOperationName();
+ case cir::AtomicFetchKind::Nand:
+ // There's no nand binop in LLVM, this is later fixed with a not.
+ return mlir::LLVM::AndOp::getOperationName();
+ case cir::AtomicFetchKind::Max:
+ case cir::AtomicFetchKind::Min:
+ llvm_unreachable("handled in buildMinMaxPostOp");
+ }
+ llvm_unreachable("Unknown atomic fetch opcode");
+}
+
+mlir::Value CIRToLLVMAtomicFetchOpLowering::buildPostOp(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal,
+ bool isInt) const {
+ SmallVector<mlir::Value> atomicOperands = {rmwVal, adaptor.getVal()};
+ SmallVector<mlir::Type> atomicResTys = {rmwVal.getType()};
+ return rewriter
+ .create(op.getLoc(),
+ rewriter.getStringAttr(getLLVMBinop(op.getBinop(), isInt)),
+ atomicOperands, atomicResTys, {})
+ ->getResult(0);
+}
+
+mlir::Value CIRToLLVMAtomicFetchOpLowering::buildMinMaxPostOp(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, bool isInt,
+ bool isSigned) const {
+ mlir::Location loc = op.getLoc();
+
+ if (!isInt) {
+ if (op.getBinop() == cir::AtomicFetchKind::Max)
+ return mlir::LLVM::MaxNumOp::create(rewriter, loc, rmwVal,
+ adaptor.getVal());
+ return mlir::LLVM::MinNumOp::create(rewriter, loc, rmwVal,
+ adaptor.getVal());
+ }
+
+ mlir::LLVM::ICmpPredicate pred;
+ if (op.getBinop() == cir::AtomicFetchKind::Max) {
+ pred = isSigned ? mlir::LLVM::ICmpPredicate::sgt
+ : mlir::LLVM::ICmpPredicate::ugt;
+ } else { // Min
+ pred = isSigned ? mlir::LLVM::ICmpPredicate::slt
+ : mlir::LLVM::ICmpPredicate::ult;
+ }
+ mlir::Value cmp = mlir::LLVM::ICmpOp::create(
+ rewriter, loc,
+ mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), pred), rmwVal,
+ adaptor.getVal());
+ return mlir::LLVM::SelectOp::create(rewriter, loc, cmp, rmwVal,
+ adaptor.getVal());
+}
+
+mlir::LogicalResult CIRToLLVMAtomicFetchOpLowering::matchAndRewrite(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ bool isInt = false;
+ bool isSignedInt = false;
+ if (auto intTy = mlir::dyn_cast<cir::IntType>(op.getVal().getType())) {
+ isInt = true;
+ isSignedInt = intTy.isSigned();
+ } else if (mlir::isa<cir::SingleType, cir::DoubleType>(
+ op.getVal().getType())) {
+ isInt = false;
+ } else {
+ return op.emitError() << "Unsupported type: " << op.getVal().getType();
+ }
+
+ mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(op.getMemOrder());
+ mlir::LLVM::AtomicBinOp llvmBinOp =
+ getLLVMAtomicBinOp(op.getBinop(), isInt, isSignedInt);
+ auto rmwVal = mlir::LLVM::AtomicRMWOp::create(rewriter, op.getLoc(),
+ llvmBinOp, adaptor.getPtr(),
+ adaptor.getVal(), llvmOrder);
+
+ mlir::Value result = rmwVal.getResult();
+ if (!op.getFetchFirst()) {
+ if (op.getBinop() == cir::AtomicFetchKind::Max ||
+ op.getBinop() == cir::AtomicFetchKind::Min)
+ result = buildMinMaxPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt,
+ isSignedInt);
+ else
+ result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt);
+
+ // Compensate lack of nand binop in LLVM IR.
+ if (op.getBinop() == cir::AtomicFetchKind::Nand) {
+ auto negOne = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ result.getType(), -1);
+ result = mlir::LLVM::XOrOp::create(rewriter, op.getLoc(), result, negOne);
+ }
+ }
+
+ rewriter.replaceOp(op, result);
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite(
cir::BitClrsbOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {