aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR')
-rw-r--r--clang/lib/CIR/CodeGen/Address.h6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenAsm.cpp136
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenAtomic.cpp569
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuilder.cpp64
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuilder.h97
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp93
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h56
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp53
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.cpp46
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.h67
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp319
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.cpp143
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.h142
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp30
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenException.cpp41
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp212
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp286
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp36
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp332
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp161
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp87
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp73
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h225
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp263
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp153
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h60
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp374
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayout.h4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp131
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp153
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypeCache.h1
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypes.cpp63
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypes.h7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenVTables.cpp244
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenVTables.h74
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenValue.h9
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenerator.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CMakeLists.txt5
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h101
-rw-r--r--clang/lib/CIR/CodeGen/TargetInfo.cpp14
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRAttrs.cpp101
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp41
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp341
-rw-r--r--clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp4
-rw-r--r--clang/lib/CIR/Dialect/Transforms/CMakeLists.txt1
-rw-r--r--clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp57
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp281
-rw-r--r--clang/lib/CIR/Lowering/CIRPasses.cpp1
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp519
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h137
51 files changed, 5912 insertions, 511 deletions
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h
index 6c927e9..a851d06 100644
--- a/clang/lib/CIR/CodeGen/Address.h
+++ b/clang/lib/CIR/CodeGen/Address.h
@@ -68,6 +68,12 @@ public:
return pointerAndKnownNonNull.getPointer() != nullptr;
}
+ /// Return address with different pointer, but same element type and
+ /// alignment.
+ Address withPointer(mlir::Value newPtr) const {
+ return Address(newPtr, getElementType(), getAlignment());
+ }
+
/// Return address with different element type, a bitcast pointer, and
/// the same alignment.
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const;
diff --git a/clang/lib/CIR/CodeGen/CIRGenAsm.cpp b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
new file mode 100644
index 0000000..17dffb3
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
@@ -0,0 +1,136 @@
+//===--- CIRGenAsm.cpp - Inline Assembly Support for CIR CodeGen ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to emit inline assembly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "clang/CIR/MissingFeatures.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+using namespace cir;
+
+static AsmFlavor inferFlavor(const CIRGenModule &cgm, const AsmStmt &s) {
+ AsmFlavor gnuAsmFlavor =
+ cgm.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
+ ? AsmFlavor::x86_att
+ : AsmFlavor::x86_intel;
+
+ return isa<MSAsmStmt>(&s) ? AsmFlavor::x86_intel : gnuAsmFlavor;
+}
+
+static void collectClobbers(const CIRGenFunction &cgf, const AsmStmt &s,
+ std::string &constraints, bool &hasUnwindClobber,
+ bool &readOnly, bool readNone) {
+
+ hasUnwindClobber = false;
+ const CIRGenModule &cgm = cgf.getCIRGenModule();
+
+ // Clobbers
+ for (unsigned i = 0, e = s.getNumClobbers(); i != e; i++) {
+ std::string clobber = s.getClobber(i);
+ if (clobber == "memory") {
+ readOnly = readNone = false;
+ } else if (clobber == "unwind") {
+ hasUnwindClobber = true;
+ continue;
+ } else if (clobber != "cc") {
+ clobber = cgf.getTarget().getNormalizedGCCRegisterName(clobber);
+ if (cgm.getCodeGenOpts().StackClashProtector &&
+ cgf.getTarget().isSPRegName(clobber))
+ cgm.getDiags().Report(s.getAsmLoc(),
+ diag::warn_stack_clash_protection_inline_asm);
+ }
+
+ if (isa<MSAsmStmt>(&s)) {
+ if (clobber == "eax" || clobber == "edx") {
+ if (constraints.find("=&A") != std::string::npos)
+ continue;
+ std::string::size_type position1 =
+ constraints.find("={" + clobber + "}");
+ if (position1 != std::string::npos) {
+ constraints.insert(position1 + 1, "&");
+ continue;
+ }
+ std::string::size_type position2 = constraints.find("=A");
+ if (position2 != std::string::npos) {
+ constraints.insert(position2 + 1, "&");
+ continue;
+ }
+ }
+ }
+ if (!constraints.empty())
+ constraints += ',';
+
+ constraints += "~{";
+ constraints += clobber;
+ constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string_view machineClobbers = cgf.getTarget().getClobbers();
+ if (!machineClobbers.empty()) {
+ if (!constraints.empty())
+ constraints += ',';
+ constraints += machineClobbers;
+ }
+}
+
+mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
+ // Assemble the final asm string.
+ std::string asmString = s.generateAsmString(getContext());
+
+ bool isGCCAsmGoto = false;
+
+ std::string constraints;
+ std::vector<mlir::Value> outArgs;
+ std::vector<mlir::Value> inArgs;
+ std::vector<mlir::Value> inOutArgs;
+
+ // An inline asm can be marked readonly if it meets the following conditions:
+ // - it doesn't have any sideeffects
+ // - it doesn't clobber memory
+ // - it doesn't return a value by-reference
+ // It can be marked readnone if it doesn't have any input memory constraints
+ // in addition to meeting the conditions listed above.
+ bool readOnly = true, readNone = true;
+
+ if (s.getNumInputs() != 0 || s.getNumOutputs() != 0) {
+ assert(!cir::MissingFeatures::asmInputOperands());
+ assert(!cir::MissingFeatures::asmOutputOperands());
+ cgm.errorNYI(s.getAsmLoc(), "asm with operands");
+ }
+
+ bool hasUnwindClobber = false;
+ collectClobbers(*this, s, constraints, hasUnwindClobber, readOnly, readNone);
+
+ std::array<mlir::ValueRange, 3> operands = {outArgs, inArgs, inOutArgs};
+
+ mlir::Type resultType;
+
+ bool hasSideEffect = s.isVolatile() || s.getNumOutputs() == 0;
+
+ cir::InlineAsmOp ia = builder.create<cir::InlineAsmOp>(
+ getLoc(s.getAsmLoc()), resultType, operands, asmString, constraints,
+ hasSideEffect, inferFlavor(cgm, s), mlir::ArrayAttr());
+
+ if (isGCCAsmGoto) {
+ assert(!cir::MissingFeatures::asmGoto());
+ } else if (hasUnwindClobber) {
+ assert(!cir::MissingFeatures::asmUnwindClobber());
+ } else {
+ assert(!cir::MissingFeatures::asmMemoryEffects());
+ }
+
+ llvm::SmallVector<mlir::Attribute> operandAttrs;
+ ia.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs));
+
+ return mlir::success();
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
new file mode 100644
index 0000000..d8981c8
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -0,0 +1,569 @@
+//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the code for emitting atomic operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "clang/CIR/MissingFeatures.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+using namespace cir;
+
+namespace {
+class AtomicInfo {
+ CIRGenFunction &cgf;
+ QualType atomicTy;
+ QualType valueTy;
+ uint64_t atomicSizeInBits = 0;
+ uint64_t valueSizeInBits = 0;
+ CharUnits atomicAlign;
+ CharUnits valueAlign;
+ TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
+ LValue lvalue;
+ mlir::Location loc;
+
+public:
+ AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
+ : cgf(cgf), loc(loc) {
+ assert(!lvalue.isGlobalReg());
+ ASTContext &ctx = cgf.getContext();
+ if (lvalue.isSimple()) {
+ atomicTy = lvalue.getType();
+ if (auto *ty = atomicTy->getAs<AtomicType>())
+ valueTy = ty->getValueType();
+ else
+ valueTy = atomicTy;
+ evaluationKind = cgf.getEvaluationKind(valueTy);
+
+ TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
+ TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
+ uint64_t valueAlignInBits = valueTypeInfo.Align;
+ uint64_t atomicAlignInBits = atomicTypeInfo.Align;
+ valueSizeInBits = valueTypeInfo.Width;
+ atomicSizeInBits = atomicTypeInfo.Width;
+ assert(valueSizeInBits <= atomicSizeInBits);
+ assert(valueAlignInBits <= atomicAlignInBits);
+
+ atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
+ valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
+ if (lvalue.getAlignment().isZero())
+ lvalue.setAlignment(atomicAlign);
+
+ this->lvalue = lvalue;
+ } else {
+ assert(!cir::MissingFeatures::atomicInfo());
+ cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
+ }
+
+ assert(!cir::MissingFeatures::atomicUseLibCall());
+ }
+
+ QualType getValueType() const { return valueTy; }
+ CharUnits getAtomicAlignment() const { return atomicAlign; }
+ TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
+ mlir::Value getAtomicPointer() const {
+ if (lvalue.isSimple())
+ return lvalue.getPointer();
+ assert(!cir::MissingFeatures::atomicInfoGetAtomicPointer());
+ return nullptr;
+ }
+ Address getAtomicAddress() const {
+ mlir::Type elemTy;
+ if (lvalue.isSimple()) {
+ elemTy = lvalue.getAddress().getElementType();
+ } else {
+ assert(!cir::MissingFeatures::atomicInfoGetAtomicAddress());
+ cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
+ }
+ return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
+ }
+
+ /// Is the atomic size larger than the underlying value type?
+ ///
+ /// Note that the absence of padding does not mean that atomic
+ /// objects are completely interchangeable with non-atomic
+ /// objects: we might have promoted the alignment of a type
+ /// without making it bigger.
+ bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
+
+ bool emitMemSetZeroIfNecessary() const;
+
+ /// Cast the given pointer to an integer pointer suitable for atomic
+ /// operations on the source.
+ Address castToAtomicIntPointer(Address addr) const;
+
+ /// If addr is compatible with the iN that will be used for an atomic
+ /// operation, bitcast it. Otherwise, create a temporary that is suitable and
+ /// copy the value across.
+ Address convertToAtomicIntPointer(Address addr) const;
+
+ /// Copy an atomic r-value into atomic-layout memory.
+ void emitCopyIntoMemory(RValue rvalue) const;
+
+ /// Project an l-value down to the value field.
+ LValue projectValue() const {
+ assert(lvalue.isSimple());
+ Address addr = getAtomicAddress();
+ if (hasPadding()) {
+ cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
+ }
+
+ assert(!cir::MissingFeatures::opTBAA());
+ return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
+ }
+
+ /// Creates temp alloca for intermediate operations on atomic value.
+ Address createTempAlloca() const;
+
+private:
+ bool requiresMemSetZero(mlir::Type ty) const;
+};
+} // namespace
+
+// This function emits any expression (scalar, complex, or aggregate)
+// into a temporary alloca.
+static Address emitValToTemp(CIRGenFunction &cgf, Expr *e) {
+ Address declPtr = cgf.createMemTemp(
+ e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
+ cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
+ /*Init*/ true);
+ return declPtr;
+}
+
+/// Does a store of the given IR type modify the full expected width?
+static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
+ uint64_t expectedSize) {
+ return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
+}
+
+/// Does the atomic type require memsetting to zero before initialization?
+///
+/// The IR type is provided as a way of making certain queries faster.
+bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
+ // If the atomic type has size padding, we definitely need a memset.
+ if (hasPadding())
+ return true;
+
+ // Otherwise, do some simple heuristics to try to avoid it:
+ switch (getEvaluationKind()) {
+ // For scalars and complexes, check whether the store size of the
+ // type uses the full size.
+ case cir::TEK_Scalar:
+ return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
+ case cir::TEK_Complex:
+ cgf.cgm.errorNYI(loc, "AtomicInfo::requiresMemSetZero: complex type");
+ return false;
+
+ // Padding in structs has an undefined bit pattern. User beware.
+ case cir::TEK_Aggregate:
+ return false;
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
+ mlir::Type ty = addr.getElementType();
+ uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
+ if (sourceSizeInBits != atomicSizeInBits) {
+ cgf.cgm.errorNYI(
+ loc,
+ "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
+ }
+
+ return castToAtomicIntPointer(addr);
+}
+
+Address AtomicInfo::createTempAlloca() const {
+ Address tempAlloca = cgf.createMemTemp(
+ (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
+ : atomicTy,
+ getAtomicAlignment(), loc, "atomic-temp");
+
+ // Cast to pointer to value type for bitfields.
+ if (lvalue.isBitField()) {
+ cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
+ }
+
+ return tempAlloca;
+}
+
+Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
+ auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
+ // Don't bother with int casts if the integer size is the same.
+ if (intTy && intTy.getWidth() == atomicSizeInBits)
+ return addr;
+ auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
+ return addr.withElementType(cgf.getBuilder(), ty);
+}
+
+bool AtomicInfo::emitMemSetZeroIfNecessary() const {
+ assert(lvalue.isSimple());
+ Address addr = lvalue.getAddress();
+ if (!requiresMemSetZero(addr.getElementType()))
+ return false;
+
+ cgf.cgm.errorNYI(loc,
+ "AtomicInfo::emitMemSetZeroIfNecessary: emit memset zero");
+ return false;
+}
+
+/// Copy an r-value into memory as part of storing to an atomic type.
+/// This needs to create a bit-pattern suitable for atomic operations.
+void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
+ assert(lvalue.isSimple());
+
+ // If we have an r-value, the rvalue should be of the atomic type,
+ // which means that the caller is responsible for having zeroed
+ // any padding. Just do an aggregate copy of that type.
+ if (rvalue.isAggregate()) {
+ cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
+ return;
+ }
+
+ // Okay, otherwise we're copying stuff.
+
+ // Zero out the buffer if necessary.
+ emitMemSetZeroIfNecessary();
+
+ // Drill past the padding if present.
+ LValue tempLValue = projectValue();
+
+ // Okay, store the rvalue in.
+ if (rvalue.isScalar()) {
+ cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
+ } else {
+ cgf.cgm.errorNYI("copying complex into atomic lvalue");
+ }
+}
+
+static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
+ Address ptr, Address val1, uint64_t size,
+ cir::MemOrder order) {
+ std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
+ if (scopeModel) {
+ assert(!cir::MissingFeatures::atomicScope());
+ cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
+ return;
+ }
+
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ mlir::Location loc = cgf.getLoc(expr->getSourceRange());
+ auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
+
+ switch (expr->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("already handled!");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load: {
+ cir::LoadOp load =
+ builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
+
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ load->setAttr("mem_order", orderAttr);
+
+ builder.createStore(loc, load->getResult(0), dest);
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_store: {
+ cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
+
+ assert(!cir::MissingFeatures::atomicSyncScopeID());
+
+ builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
+ /*align=*/mlir::IntegerAttr{}, orderAttr);
+ return;
+ }
+
+ case AtomicExpr::AO__opencl_atomic_init:
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
+
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
+
+ case AtomicExpr::AO__opencl_atomic_load:
+ case AtomicExpr::AO__hip_atomic_load:
+ case AtomicExpr::AO__scoped_atomic_load_n:
+ case AtomicExpr::AO__scoped_atomic_load:
+
+ case AtomicExpr::AO__opencl_atomic_store:
+ case AtomicExpr::AO__hip_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store_n:
+
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__hip_atomic_exchange:
+ case AtomicExpr::AO__opencl_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__scoped_atomic_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_exchange:
+
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__scoped_atomic_add_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__hip_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__scoped_atomic_fetch_add:
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__scoped_atomic_sub_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__scoped_atomic_fetch_sub:
+
+ case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__scoped_atomic_min_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__hip_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__scoped_atomic_fetch_min:
+
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__scoped_atomic_max_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__hip_atomic_fetch_max:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__scoped_atomic_fetch_max:
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__scoped_atomic_and_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__hip_atomic_fetch_and:
+ case AtomicExpr::AO__opencl_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__scoped_atomic_fetch_and:
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__scoped_atomic_or_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__hip_atomic_fetch_or:
+ case AtomicExpr::AO__opencl_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__scoped_atomic_fetch_or:
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__scoped_atomic_xor_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__hip_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__scoped_atomic_fetch_xor:
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__scoped_atomic_nand_fetch:
+
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__scoped_atomic_fetch_nand:
+
+ case AtomicExpr::AO__atomic_test_and_set:
+
+ case AtomicExpr::AO__atomic_clear:
+ cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
+ break;
+ }
+}
+
+static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
+ if (!cir::isValidCIRAtomicOrderingCABI(order))
+ return false;
+ auto memOrder = static_cast<cir::MemOrder>(order);
+ if (isStore)
+ return memOrder != cir::MemOrder::Consume &&
+ memOrder != cir::MemOrder::Acquire &&
+ memOrder != cir::MemOrder::AcquireRelease;
+ if (isLoad)
+ return memOrder != cir::MemOrder::Release &&
+ memOrder != cir::MemOrder::AcquireRelease;
+ return true;
+}
+
+RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
+ QualType atomicTy = e->getPtr()->getType()->getPointeeType();
+ QualType memTy = atomicTy;
+ if (const auto *ty = atomicTy->getAs<AtomicType>())
+ memTy = ty->getValueType();
+
+ Address val1 = Address::invalid();
+ Address dest = Address::invalid();
+ Address ptr = emitPointerWithAlignment(e->getPtr());
+
+ assert(!cir::MissingFeatures::openCL());
+ if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
+ LValue lvalue = makeAddrLValue(ptr, atomicTy);
+ emitAtomicInit(e->getVal1(), lvalue);
+ return RValue::get(nullptr);
+ }
+
+ TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
+ uint64_t size = typeInfo.Width.getQuantity();
+
+ Expr::EvalResult orderConst;
+ mlir::Value order;
+ if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
+ order = emitScalarExpr(e->getOrder());
+
+ bool shouldCastToIntPtrTy = true;
+
+ switch (e->getOp()) {
+ default:
+ cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
+ return RValue::get(nullptr);
+
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("already handled above with emitAtomicInit");
+
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__c11_atomic_load:
+ break;
+
+ case AtomicExpr::AO__atomic_load:
+ dest = emitPointerWithAlignment(e->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store:
+ val1 = emitPointerWithAlignment(e->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__c11_atomic_store:
+ val1 = emitValToTemp(*this, e->getVal1());
+ break;
+ }
+
+ QualType resultTy = e->getType().getUnqualifiedType();
+
+ // The inlined atomics only function on iN types, where N is a power of 2. We
+ // need to make sure (via temporaries if necessary) that all incoming values
+ // are compatible.
+ LValue atomicValue = makeAddrLValue(ptr, atomicTy);
+ AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
+
+ if (shouldCastToIntPtrTy) {
+ ptr = atomics.castToAtomicIntPointer(ptr);
+ if (val1.isValid())
+ val1 = atomics.convertToAtomicIntPointer(val1);
+ }
+ if (dest.isValid()) {
+ if (shouldCastToIntPtrTy)
+ dest = atomics.castToAtomicIntPointer(dest);
+ } else if (!resultTy->isVoidType()) {
+ dest = atomics.createTempAlloca();
+ if (shouldCastToIntPtrTy)
+ dest = atomics.castToAtomicIntPointer(dest);
+ }
+
+ bool powerOf2Size = (size & (size - 1)) == 0;
+ bool useLibCall = !powerOf2Size || (size > 16);
+
+ // For atomics larger than 16 bytes, emit a libcall from the frontend. This
+ // avoids the overhead of dealing with excessively-large value types in IR.
+ // Non-power-of-2 values also lower to libcall here, as they are not currently
+ // permitted in IR instructions (although that constraint could be relaxed in
+ // the future). For other cases where a libcall is required on a given
+ // platform, we let the backend handle it (this includes handling for all of
+ // the size-optimized libcall variants, which are only valid up to 16 bytes.)
+ //
+ // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
+ if (useLibCall) {
+ assert(!cir::MissingFeatures::atomicUseLibCall());
+ cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
+ return RValue::get(nullptr);
+ }
+
+ bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
+ e->getOp() == AtomicExpr::AO__hip_atomic_store ||
+ e->getOp() == AtomicExpr::AO__atomic_store ||
+ e->getOp() == AtomicExpr::AO__atomic_store_n ||
+ e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
+ e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
+ e->getOp() == AtomicExpr::AO__atomic_clear;
+ bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
+ e->getOp() == AtomicExpr::AO__hip_atomic_load ||
+ e->getOp() == AtomicExpr::AO__atomic_load ||
+ e->getOp() == AtomicExpr::AO__atomic_load_n ||
+ e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
+ e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
+
+ if (!order) {
+ // We have evaluated the memory order as an integer constant in orderConst.
+ // We should not ever get to a case where the ordering isn't a valid CABI
+ // value, but it's hard to enforce that in general.
+ uint64_t ord = orderConst.Val.getInt().getZExtValue();
+ if (isMemOrderValid(ord, isStore, isLoad))
+ emitAtomicOp(*this, e, dest, ptr, val1, size,
+ static_cast<cir::MemOrder>(ord));
+ } else {
+ assert(!cir::MissingFeatures::atomicExpr());
+ cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
+ return RValue::get(nullptr);
+ }
+
+ if (resultTy->isVoidType())
+ return RValue::get(nullptr);
+
+ return convertTempToRValue(
+ dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
+ e->getExprLoc());
+}
+
+void CIRGenFunction::emitAtomicInit(Expr *init, LValue dest) {
+ AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
+
+ switch (atomics.getEvaluationKind()) {
+ case cir::TEK_Scalar: {
+ mlir::Value value = emitScalarExpr(init);
+ atomics.emitCopyIntoMemory(RValue::get(value));
+ return;
+ }
+
+ case cir::TEK_Complex:
+ cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: complex type");
+ return;
+
+ case cir::TEK_Aggregate:
+ cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: aggregate type");
+ return;
+ }
+
+ llvm_unreachable("bad evaluation kind");
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp
index 4a5a1dd5..755c76c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "CIRGenBuilder.h"
+#include "llvm/ADT/TypeSwitch.h"
using namespace clang::CIRGen;
@@ -66,6 +67,69 @@ clang::CIRGen::CIRGenBuilderTy::getConstFP(mlir::Location loc, mlir::Type t,
return create<cir::ConstantOp>(loc, cir::FPAttr::get(t, fpVal));
}
+void CIRGenBuilderTy::computeGlobalViewIndicesFromFlatOffset(
+ int64_t offset, mlir::Type ty, cir::CIRDataLayout layout,
+ llvm::SmallVectorImpl<int64_t> &indices) {
+ if (!offset)
+ return;
+
+ auto getIndexAndNewOffset =
+ [](int64_t offset, int64_t eltSize) -> std::pair<int64_t, int64_t> {
+ int64_t divRet = offset / eltSize;
+ if (divRet < 0)
+ divRet -= 1; // make sure offset is positive
+ int64_t modRet = offset - (divRet * eltSize);
+ return {divRet, modRet};
+ };
+
+ mlir::Type subType =
+ llvm::TypeSwitch<mlir::Type, mlir::Type>(ty)
+ .Case<cir::ArrayType>([&](auto arrayTy) {
+ int64_t eltSize = layout.getTypeAllocSize(arrayTy.getElementType());
+ const auto [index, newOffset] =
+ getIndexAndNewOffset(offset, eltSize);
+ indices.push_back(index);
+ offset = newOffset;
+ return arrayTy.getElementType();
+ })
+ .Case<cir::RecordType>([&](auto recordTy) {
+ ArrayRef<mlir::Type> elts = recordTy.getMembers();
+ int64_t pos = 0;
+ for (size_t i = 0; i < elts.size(); ++i) {
+ int64_t eltSize =
+ (int64_t)layout.getTypeAllocSize(elts[i]).getFixedValue();
+ unsigned alignMask = layout.getABITypeAlign(elts[i]).value() - 1;
+ if (recordTy.getPacked())
+ alignMask = 0;
+ // Union's fields have the same offset, so no need to change pos
+ // here, we just need to find eltSize that is greater then the
+ // required offset. The same is true for the similar union type
+ // check below
+ if (!recordTy.isUnion())
+ pos = (pos + alignMask) & ~alignMask;
+ assert(offset >= 0);
+ if (offset < pos + eltSize) {
+ indices.push_back(i);
+ offset -= pos;
+ return elts[i];
+ }
+ // No need to update pos here, see the comment above.
+ if (!recordTy.isUnion())
+ pos += eltSize;
+ }
+ llvm_unreachable("offset was not found within the record");
+ })
+ .Default([](mlir::Type otherTy) {
+ llvm_unreachable("unexpected type");
+ return otherTy; // Even though this is unreachable, we need to
+ // return a type to satisfy the return type of the
+ // lambda.
+ });
+
+ assert(subType);
+ computeGlobalViewIndicesFromFlatOffset(offset, subType, layout, indices);
+}
+
// This can't be defined in Address.h because that file is included by
// CIRGenBuilder.h
Address Address::withElementType(CIRGenBuilderTy &builder,
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index ff8e121..d5cb6d4 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -12,6 +12,7 @@
#include "Address.h"
#include "CIRGenRecordLayout.h"
#include "CIRGenTypeCache.h"
+#include "clang/CIR/Dialect/IR/CIRDataLayout.h"
#include "clang/CIR/Interfaces/CIRTypeInterfaces.h"
#include "clang/CIR/MissingFeatures.h"
@@ -59,6 +60,23 @@ public:
trailingZerosNum);
}
+ cir::ConstRecordAttr getAnonConstRecord(mlir::ArrayAttr arrayAttr,
+ bool packed = false,
+ bool padded = false,
+ mlir::Type ty = {}) {
+ llvm::SmallVector<mlir::Type, 4> members;
+ for (auto &f : arrayAttr) {
+ auto ta = mlir::cast<mlir::TypedAttr>(f);
+ members.push_back(ta.getType());
+ }
+
+ if (!ty)
+ ty = getAnonRecordTy(members, packed, padded);
+
+ auto sTy = mlir::cast<cir::RecordType>(ty);
+ return cir::ConstRecordAttr::get(sTy, arrayAttr);
+ }
+
std::string getUniqueAnonRecordName() { return getUniqueRecordName("anon"); }
std::string getUniqueRecordName(const std::string &baseName) {
@@ -83,6 +101,10 @@ public:
llvm_unreachable("Unsupported format for long double");
}
+ mlir::Type getPtrToVPtrType() {
+ return getPointerTo(cir::VPtrType::get(getContext()));
+ }
+
/// Get a CIR record kind from a AST declaration tag.
cir::RecordType::RecordKind getRecordKind(const clang::TagTypeKind kind) {
switch (kind) {
@@ -244,6 +266,17 @@ public:
}
bool isInt(mlir::Type i) { return mlir::isa<cir::IntType>(i); }
+ // Fetch the type representing a pointer to unsigned int8 values.
+ cir::PointerType getUInt8PtrTy() { return typeCache.UInt8PtrTy; }
+
+ /// Get a CIR anonymous record type.
+ cir::RecordType getAnonRecordTy(llvm::ArrayRef<mlir::Type> members,
+ bool packed = false, bool padded = false) {
+ assert(!cir::MissingFeatures::astRecordDeclAttr());
+ auto kind = cir::RecordType::RecordKind::Struct;
+ return getType<cir::RecordType>(members, packed, padded, kind);
+ }
+
//
// Constant creation helpers
// -------------------------
@@ -251,11 +284,14 @@ public:
cir::ConstantOp getSInt32(int32_t c, mlir::Location loc) {
return getConstantInt(loc, getSInt32Ty(), c);
}
+ cir::ConstantOp getUInt32(uint32_t c, mlir::Location loc) {
+ return getConstantInt(loc, getUInt32Ty(), c);
+ }
// Creates constant nullptr for pointer type ty.
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) {
assert(!cir::MissingFeatures::targetCodeGenInfoGetNullPointer());
- return create<cir::ConstantOp>(loc, getConstPtrAttr(ty, 0));
+ return cir::ConstantOp::create(*this, loc, getConstPtrAttr(ty, 0));
}
mlir::Value createNeg(mlir::Value value) {
@@ -264,7 +300,7 @@ public:
// Source is a unsigned integer: first cast it to signed.
if (intTy.isUnsigned())
value = createIntCast(value, getSIntNTy(intTy.getWidth()));
- return create<cir::UnaryOp>(value.getLoc(), value.getType(),
+ return cir::UnaryOp::create(*this, value.getLoc(), value.getType(),
cir::UnaryOpKind::Minus, value);
}
@@ -276,8 +312,8 @@ public:
mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType) {
assert(!cir::MissingFeatures::fpConstraints());
- return create<cir::CastOp>(v.getLoc(), destType, cir::CastKind::floating,
- v);
+ return cir::CastOp::create(*this, v.getLoc(), destType,
+ cir::CastKind::floating, v);
}
mlir::Value createFSub(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
@@ -285,7 +321,7 @@ public:
assert(!cir::MissingFeatures::fpConstraints());
assert(!cir::MissingFeatures::fastMathFlags());
- return create<cir::BinOp>(loc, cir::BinOpKind::Sub, lhs, rhs);
+ return cir::BinOp::create(*this, loc, cir::BinOpKind::Sub, lhs, rhs);
}
mlir::Value createFAdd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
@@ -293,21 +329,21 @@ public:
assert(!cir::MissingFeatures::fpConstraints());
assert(!cir::MissingFeatures::fastMathFlags());
- return create<cir::BinOp>(loc, cir::BinOpKind::Add, lhs, rhs);
+ return cir::BinOp::create(*this, loc, cir::BinOpKind::Add, lhs, rhs);
}
mlir::Value createFMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
assert(!cir::MissingFeatures::metaDataNode());
assert(!cir::MissingFeatures::fpConstraints());
assert(!cir::MissingFeatures::fastMathFlags());
- return create<cir::BinOp>(loc, cir::BinOpKind::Mul, lhs, rhs);
+ return cir::BinOp::create(*this, loc, cir::BinOpKind::Mul, lhs, rhs);
}
mlir::Value createFDiv(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
assert(!cir::MissingFeatures::metaDataNode());
assert(!cir::MissingFeatures::fpConstraints());
assert(!cir::MissingFeatures::fastMathFlags());
- return create<cir::BinOp>(loc, cir::BinOpKind::Div, lhs, rhs);
+ return cir::BinOp::create(*this, loc, cir::BinOpKind::Div, lhs, rhs);
}
Address createBaseClassAddr(mlir::Location loc, Address addr,
@@ -317,8 +353,9 @@ public:
return addr;
auto ptrTy = getPointerTo(destType);
- auto baseAddr = create<cir::BaseClassAddrOp>(
- loc, ptrTy, addr.getPointer(), mlir::APInt(64, offset), assumeNotNull);
+ auto baseAddr =
+ cir::BaseClassAddrOp::create(*this, loc, ptrTy, addr.getPointer(),
+ mlir::APInt(64, offset), assumeNotNull);
return Address(baseAddr, destType, addr.getAlignment());
}
@@ -337,15 +374,19 @@ public:
cir::LoadOp createLoad(mlir::Location loc, Address addr,
bool isVolatile = false) {
mlir::IntegerAttr align = getAlignmentAttr(addr.getAlignment());
- return create<cir::LoadOp>(loc, addr.getPointer(), /*isDeref=*/false,
- align);
+ return cir::LoadOp::create(*this, loc, addr.getPointer(), /*isDeref=*/false,
+ /*alignment=*/align,
+ /*mem_order=*/cir::MemOrderAttr{});
}
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst,
- mlir::IntegerAttr align = {}) {
+ bool isVolatile = false,
+ mlir::IntegerAttr align = {},
+ cir::MemOrderAttr order = {}) {
if (!align)
align = getAlignmentAttr(dst.getAlignment());
- return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), align);
+ return CIRBaseBuilderTy::createStore(loc, val, dst.getPointer(), isVolatile,
+ align, order);
}
/// Create a cir.complex.real_ptr operation that derives a pointer to the real
@@ -353,8 +394,8 @@ public:
mlir::Value createComplexRealPtr(mlir::Location loc, mlir::Value value) {
auto srcPtrTy = mlir::cast<cir::PointerType>(value.getType());
auto srcComplexTy = mlir::cast<cir::ComplexType>(srcPtrTy.getPointee());
- return create<cir::ComplexRealPtrOp>(
- loc, getPointerTo(srcComplexTy.getElementType()), value);
+ return cir::ComplexRealPtrOp::create(
+ *this, loc, getPointerTo(srcComplexTy.getElementType()), value);
}
Address createComplexRealPtr(mlir::Location loc, Address addr) {
@@ -368,8 +409,8 @@ public:
mlir::Value createComplexImagPtr(mlir::Location loc, mlir::Value value) {
auto srcPtrTy = mlir::cast<cir::PointerType>(value.getType());
auto srcComplexTy = mlir::cast<cir::ComplexType>(srcPtrTy.getPointee());
- return create<cir::ComplexImagPtrOp>(
- loc, getPointerTo(srcComplexTy.getElementType()), value);
+ return cir::ComplexImagPtrOp::create(
+ *this, loc, getPointerTo(srcComplexTy.getElementType()), value);
}
Address createComplexImagPtr(mlir::Location loc, Address addr) {
@@ -390,12 +431,20 @@ public:
mlir::Value maybeBuildArrayDecay(mlir::Location loc, mlir::Value arrayPtr,
mlir::Type eltTy);
+ // Convert byte offset to sequence of high-level indices suitable for
+ // GlobalViewAttr. Ideally we shouldn't deal with low-level offsets at all
+ // but currently some parts of Clang AST, which we don't want to touch just
+ // yet, return them.
+ void computeGlobalViewIndicesFromFlatOffset(
+ int64_t offset, mlir::Type ty, cir::CIRDataLayout layout,
+ llvm::SmallVectorImpl<int64_t> &indices);
+
/// Creates a versioned global variable. If the symbol is already taken, an ID
/// will be appended to the symbol. The returned global must always be queried
/// for its name so it can be referenced correctly.
[[nodiscard]] cir::GlobalOp
createVersionedGlobal(mlir::ModuleOp module, mlir::Location loc,
- mlir::StringRef name, mlir::Type type,
+ mlir::StringRef name, mlir::Type type, bool isConstant,
cir::GlobalLinkageKind linkage) {
// Create a unique name if the given name is already taken.
std::string uniqueName;
@@ -404,7 +453,7 @@ public:
else
uniqueName = name.str();
- return createGlobal(module, loc, uniqueName, type, linkage);
+ return createGlobal(module, loc, uniqueName, type, isConstant, linkage);
}
mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType,
@@ -419,9 +468,9 @@ public:
useVolatile ? cir::IntType::get(storageType.getContext(),
info.volatileStorageSize, info.isSigned)
: storageType;
- return create<cir::SetBitfieldOp>(
- loc, resultType, dstAddr.getPointer(), storageType, src, info.name,
- info.size, offset, info.isSigned, isLvalueVolatile,
+ return cir::SetBitfieldOp::create(
+ *this, loc, resultType, dstAddr.getPointer(), storageType, src,
+ info.name, info.size, offset, info.isSigned, isLvalueVolatile,
dstAddr.getAlignment().getAsAlign().value());
}
@@ -437,7 +486,7 @@ public:
useVolatile ? cir::IntType::get(storageType.getContext(),
info.volatileStorageSize, info.isSigned)
: storageType;
- return create<cir::GetBitfieldOp>(loc, resultType, addr.getPointer(),
+ return cir::GetBitfieldOp::create(*this, loc, resultType, addr.getPointer(),
storageType, info.name, info.size, offset,
info.isSigned, isLvalueVolatile,
addr.getAlignment().getAsAlign().value());
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 7767bf4..b6a6299 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -72,6 +72,19 @@ RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
return RValue::get(r);
}
+template <class Operation>
+static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf,
+ const CallExpr &e) {
+ mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
+
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ assert(!cir::MissingFeatures::fpConstraints());
+
+ auto call =
+ Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
+ return RValue::get(call->getResult(0));
+}
+
RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
const CallExpr *e,
ReturnValueSlot returnValue) {
@@ -112,6 +125,32 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
default:
break;
+ // C stdarg builtins.
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ case Builtin::BI__va_start: {
+ mlir::Value vaList = builtinID == Builtin::BI__va_start
+ ? emitScalarExpr(e->getArg(0))
+ : emitVAListRef(e->getArg(0)).getPointer();
+ mlir::Value count = emitScalarExpr(e->getArg(1));
+ emitVAStart(vaList, count);
+ return {};
+ }
+
+ case Builtin::BI__builtin_va_end:
+ emitVAEnd(emitVAListRef(e->getArg(0)).getPointer());
+ return {};
+
+ case Builtin::BIfabs:
+ case Builtin::BIfabsf:
+ case Builtin::BIfabsl:
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsf16:
+ case Builtin::BI__builtin_fabsl:
+ case Builtin::BI__builtin_fabsf128:
+ return emitUnaryMaybeConstrainedFPBuiltin<cir::FAbsOp>(*this, *e);
+
case Builtin::BI__assume:
case Builtin::BI__builtin_assume: {
if (e->getArg(0)->HasSideEffects(getContext()))
@@ -129,6 +168,24 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_assume_aligned: {
+ const Expr *ptrExpr = e->getArg(0);
+ mlir::Value ptrValue = emitScalarExpr(ptrExpr);
+ mlir::Value offsetValue =
+ (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
+
+ std::optional<llvm::APSInt> alignment =
+ e->getArg(1)->getIntegerConstantExpr(getContext());
+ assert(alignment.has_value() &&
+ "the second argument to __builtin_assume_aligned must be an "
+ "integral constant expression");
+
+ mlir::Value result =
+ emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
+ alignment->getSExtValue(), offsetValue);
+ return RValue::get(result);
+ }
+
case Builtin::BI__builtin_complex: {
mlir::Value real = emitScalarExpr(e->getArg(0));
mlir::Value imag = emitScalarExpr(e->getArg(1));
@@ -271,6 +328,20 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
case Builtin::BI__builtin_rotateright64:
return emitRotate(e, /*isRotateLeft=*/false);
+ case Builtin::BI__builtin_return_address:
+ case Builtin::BI__builtin_frame_address: {
+ mlir::Location loc = getLoc(e->getExprLoc());
+ llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
+ if (builtinID == Builtin::BI__builtin_return_address) {
+ return RValue::get(cir::ReturnAddrOp::create(
+ builder, loc,
+ builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
+ }
+ return RValue::get(cir::FrameAddrOp::create(
+ builder, loc,
+ builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
+ }
+
case Builtin::BI__builtin_trap:
emitTrap(loc, /*createNewBlock=*/true);
return RValue::get(nullptr);
@@ -320,3 +391,25 @@ mlir::Value CIRGenFunction::emitCheckedArgForAssume(const Expr *e) {
"emitCheckedArgForAssume: sanitizers are NYI");
return {};
}
+
+void CIRGenFunction::emitVAStart(mlir::Value vaList, mlir::Value count) {
+ // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
+ // early, defer to LLVM lowering.
+ cir::VAStartOp::create(builder, vaList.getLoc(), vaList, count);
+}
+
+void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
+ cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
+}
+
+// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
+// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
+// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
+mlir::Value CIRGenFunction::emitVAArg(VAArgExpr *ve) {
+ assert(!cir::MissingFeatures::msabi());
+ assert(!cir::MissingFeatures::vlas());
+ mlir::Location loc = cgm.getLoc(ve->getExprLoc());
+ mlir::Type type = convertType(ve->getType());
+ mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
+ return cir::VAArgOp::create(builder, loc, type, vaList);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index 5929568..7c62030 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -37,6 +37,12 @@ public:
void setCXXABIThisValue(CIRGenFunction &cgf, mlir::Value thisPtr);
+ /// Emit the code to initialize hidden members required to handle virtual
+ /// inheritance, if needed by the ABI.
+ virtual void
+ initializeHiddenVirtualInheritanceMembers(CIRGenFunction &cgf,
+ const CXXRecordDecl *rd) {}
+
/// Emit a single constructor/destructor with the gen type from a C++
/// constructor/destructor Decl.
virtual void emitCXXStructor(clang::GlobalDecl gd) = 0;
@@ -47,9 +53,11 @@ public:
}
/// Emit the ABI-specific prolog for the function
- virtual void emitInstanceFunctionProlog(SourceLocation Loc,
+ virtual void emitInstanceFunctionProlog(SourceLocation loc,
CIRGenFunction &cgf) = 0;
+ virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0;
+
/// Get the type of the implicit "this" parameter used by a method. May return
/// zero if no specific type is applicable, e.g. if the ABI expects the "this"
/// parameter to point to some artificial offset in a complete object due to
@@ -63,6 +71,16 @@ public:
/// parameter.
virtual bool needsVTTParameter(clang::GlobalDecl gd) { return false; }
+ /// Perform ABI-specific "this" argument adjustment required prior to
+ /// a call of a virtual function.
+ /// The "VirtualCall" argument is true iff the call itself is virtual.
+ virtual Address adjustThisArgumentForVirtualFunctionCall(CIRGenFunction &cgf,
+ clang::GlobalDecl gd,
+ Address thisPtr,
+ bool virtualCall) {
+ return thisPtr;
+ }
+
/// Build a parameter variable suitable for 'this'.
void buildThisParam(CIRGenFunction &cgf, FunctionArgList &params);
@@ -80,6 +98,15 @@ public:
bool forVirtualBase, bool delegating,
Address thisAddr, QualType thisTy) = 0;
+ /// Checks if ABI requires extra virtual offset for vtable field.
+ virtual bool
+ isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf,
+ CIRGenFunction::VPtr vptr) = 0;
+
+ /// Emits the VTable definitions required for the given record type.
+ virtual void emitVTableDefinitions(CIRGenVTables &cgvt,
+ const CXXRecordDecl *rd) = 0;
+
/// Returns true if the given destructor type should be emitted as a linkonce
/// delegating thunk, regardless of whether the dtor is defined in this TU or
/// not.
@@ -90,6 +117,33 @@ public:
getCXXDestructorLinkage(GVALinkage linkage, const CXXDestructorDecl *dtor,
CXXDtorType dt) const;
+ /// Get the address of the vtable for the given record decl which should be
+ /// used for the vptr at the given offset in RD.
+ virtual cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *rd,
+ CharUnits vptrOffset) = 0;
+
+ /// Build a virtual function pointer in the ABI-specific way.
+ virtual CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &cgf,
+ clang::GlobalDecl gd,
+ Address thisAddr,
+ mlir::Type ty,
+ SourceLocation loc) = 0;
+
+ /// Get the address point of the vtable for the given base subobject.
+ virtual mlir::Value
+ getVTableAddressPoint(BaseSubobject base,
+ const CXXRecordDecl *vtableClass) = 0;
+
+ /// Get the address point of the vtable for the given base subobject while
+ /// building a constructor or a destructor.
+ virtual mlir::Value getVTableAddressPointInStructor(
+ CIRGenFunction &cgf, const CXXRecordDecl *vtableClass, BaseSubobject base,
+ const CXXRecordDecl *nearestVBase) = 0;
+
+ /// Checks if ABI requires to initialize vptrs for given dynamic class.
+ virtual bool
+ doStructorsInitializeVPtrs(const clang::CXXRecordDecl *vtableClass) = 0;
+
/// Returns true if the given constructor or destructor is one of the kinds
/// that the ABI says returns 'this' (only applies when called non-virtually
/// for destructors).
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
index 67d8988..c9e4ed9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
@@ -75,15 +75,14 @@ static MemberCallInfo commonBuildCXXMemberOrOperatorCall(
RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *ce, const CXXMethodDecl *md, ReturnValueSlot returnValue,
- bool hasQualifier, NestedNameSpecifier *qualifier, bool isArrow,
+ bool hasQualifier, NestedNameSpecifier qualifier, bool isArrow,
const Expr *base) {
assert(isa<CXXMemberCallExpr>(ce) || isa<CXXOperatorCallExpr>(ce));
- if (md->isVirtual()) {
- cgm.errorNYI(ce->getSourceRange(),
- "emitCXXMemberOrOperatorMemberCallExpr: virtual call");
- return RValue::get(nullptr);
- }
+ // Compute the object pointer.
+ bool canUseVirtualCall = md->isVirtual() && !hasQualifier;
+ const CXXMethodDecl *devirtualizedMethod = nullptr;
+ assert(!cir::MissingFeatures::devirtualizeMemberFunction());
// Note on trivial assignment
// --------------------------
@@ -127,7 +126,8 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr(
return RValue::get(nullptr);
// Compute the function type we're calling
- const CXXMethodDecl *calleeDecl = md;
+ const CXXMethodDecl *calleeDecl =
+ devirtualizedMethod ? devirtualizedMethod : md;
const CIRGenFunctionInfo *fInfo = nullptr;
if (isa<CXXDestructorDecl>(calleeDecl)) {
cgm.errorNYI(ce->getSourceRange(),
@@ -137,25 +137,46 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr(
fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(calleeDecl);
- mlir::Type ty = cgm.getTypes().getFunctionType(*fInfo);
+ cir::FuncType ty = cgm.getTypes().getFunctionType(*fInfo);
assert(!cir::MissingFeatures::sanitizers());
assert(!cir::MissingFeatures::emitTypeCheck());
+ // C++ [class.virtual]p12:
+ // Explicit qualification with the scope operator (5.1) suppresses the
+ // virtual call mechanism.
+ //
+ // We also don't emit a virtual call if the base expression has a record type
+ // because then we know what the type is.
+ bool useVirtualCall = canUseVirtualCall && !devirtualizedMethod;
+
if (isa<CXXDestructorDecl>(calleeDecl)) {
cgm.errorNYI(ce->getSourceRange(),
"emitCXXMemberOrOperatorMemberCallExpr: destructor call");
return RValue::get(nullptr);
}
- assert(!cir::MissingFeatures::sanitizers());
- if (getLangOpts().AppleKext) {
- cgm.errorNYI(ce->getSourceRange(),
- "emitCXXMemberOrOperatorMemberCallExpr: AppleKext");
- return RValue::get(nullptr);
+ CIRGenCallee callee;
+ if (useVirtualCall) {
+ callee = CIRGenCallee::forVirtual(ce, md, thisPtr.getAddress(), ty);
+ } else {
+ assert(!cir::MissingFeatures::sanitizers());
+ if (getLangOpts().AppleKext) {
+ cgm.errorNYI(ce->getSourceRange(),
+ "emitCXXMemberOrOperatorMemberCallExpr: AppleKext");
+ return RValue::get(nullptr);
+ }
+
+ callee = CIRGenCallee::forDirect(cgm.getAddrOfFunction(calleeDecl, ty),
+ GlobalDecl(calleeDecl));
+ }
+
+ if (md->isVirtual()) {
+ Address newThisAddr =
+ cgm.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
+ *this, calleeDecl, thisPtr.getAddress(), useVirtualCall);
+ thisPtr.setAddress(newThisAddr);
}
- CIRGenCallee callee =
- CIRGenCallee::forDirect(cgm.getAddrOfFunction(md, ty), GlobalDecl(md));
return emitCXXMemberOrOperatorCall(
calleeDecl, callee, returnValue, thisPtr.getPointer(),
@@ -169,7 +190,7 @@ CIRGenFunction::emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e,
assert(md->isInstance() &&
"Trying to emit a member call expr on a static method!");
return emitCXXMemberOrOperatorMemberCallExpr(
- e, md, returnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
+ e, md, returnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
/*IsArrow=*/false, e->getArg(0));
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index fc208ff..2585988 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -42,21 +42,41 @@ CIRGenFunctionInfo::create(CanQualType resultType,
return fi;
}
-cir::FuncType CIRGenTypes::getFunctionType(const CIRGenFunctionInfo &fi) {
- mlir::Type resultType = convertType(fi.getReturnType());
+cir::FuncType CIRGenTypes::getFunctionType(GlobalDecl gd) {
+ const CIRGenFunctionInfo &fi = arrangeGlobalDeclaration(gd);
+ return getFunctionType(fi);
+}
+
+cir::FuncType CIRGenTypes::getFunctionType(const CIRGenFunctionInfo &info) {
+ mlir::Type resultType = convertType(info.getReturnType());
SmallVector<mlir::Type, 8> argTypes;
- argTypes.reserve(fi.getNumRequiredArgs());
+ argTypes.reserve(info.getNumRequiredArgs());
- for (const CanQualType &argType : fi.requiredArguments())
+ for (const CanQualType &argType : info.requiredArguments())
argTypes.push_back(convertType(argType));
return cir::FuncType::get(argTypes,
(resultType ? resultType : builder.getVoidTy()),
- fi.isVariadic());
+ info.isVariadic());
+}
+
+cir::FuncType CIRGenTypes::getFunctionTypeForVTable(GlobalDecl gd) {
+ const CXXMethodDecl *md = cast<CXXMethodDecl>(gd.getDecl());
+ const FunctionProtoType *fpt = md->getType()->getAs<FunctionProtoType>();
+
+ if (!isFuncTypeConvertible(fpt))
+ cgm.errorNYI("getFunctionTypeForVTable: non-convertible function type");
+
+ return getFunctionType(gd);
}
CIRGenCallee CIRGenCallee::prepareConcreteCallee(CIRGenFunction &cgf) const {
- assert(!cir::MissingFeatures::opCallVirtual());
+ if (isVirtual()) {
+ const CallExpr *ce = getVirtualCallExpr();
+ return cgf.cgm.getCXXABI().getVirtualFunctionPointer(
+ cgf, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
+ ce ? ce->getBeginLoc() : SourceLocation());
+ }
return *this;
}
@@ -203,9 +223,9 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl gd) {
/// when calling a method pointer.
CanQualType CIRGenTypes::deriveThisType(const CXXRecordDecl *rd,
const CXXMethodDecl *md) {
- QualType recTy;
+ CanQualType recTy;
if (rd) {
- recTy = getASTContext().getTagDeclType(rd)->getCanonicalTypeInternal();
+ recTy = getASTContext().getCanonicalTagType(rd);
} else {
// This can happen with the MS ABI. It shouldn't need anything more than
// setting recTy to VoidTy here, but we're flagging it for now because we
@@ -215,9 +235,9 @@ CanQualType CIRGenTypes::deriveThisType(const CXXRecordDecl *rd,
}
if (md)
- recTy = getASTContext().getAddrSpaceQualType(
- recTy, md->getMethodQualifiers().getAddressSpace());
- return getASTContext().getPointerType(CanQualType::CreateUnsafe(recTy));
+ recTy = CanQualType::CreateUnsafe(getASTContext().getAddrSpaceQualType(
+ recTy, md->getMethodQualifiers().getAddressSpace()));
+ return getASTContext().getPointerType(recTy);
}
/// Arrange the CIR function layout for a value of the given function type, on
@@ -267,7 +287,7 @@ void CIRGenFunction::emitDelegateCallArg(CallArgList &args,
// Deactivate the cleanup for the callee-destructed param that was pushed.
assert(!cir::MissingFeatures::thunks());
if (type->isRecordType() &&
- type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
+ type->castAsRecordDecl()->isParamDestroyedInCallee() &&
param->needsDestruction(getContext())) {
cgm.errorNYI(param->getSourceRange(),
"emitDelegateCallArg: callee-destructed param");
@@ -668,7 +688,7 @@ void CIRGenFunction::emitCallArg(CallArgList &args, const clang::Expr *e,
// However, we still have to push an EH-only cleanup in case we unwind before
// we make it to the call.
if (argType->isRecordType() &&
- argType->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ argType->castAsRecordDecl()->isParamDestroyedInCallee()) {
assert(!cir::MissingFeatures::msabi());
cgm.errorNYI(e->getSourceRange(), "emitCallArg: msabi is NYI");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h
index 28576a1..81cbb85 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -46,20 +46,33 @@ class CIRGenCallee {
enum class SpecialKind : uintptr_t {
Invalid,
Builtin,
+ PseudoDestructor,
+ Virtual,
- Last = Builtin,
+ Last = Virtual
};
struct BuiltinInfoStorage {
const clang::FunctionDecl *decl;
unsigned id;
};
+ struct PseudoDestructorInfoStorage {
+ const clang::CXXPseudoDestructorExpr *expr;
+ };
+ struct VirtualInfoStorage {
+ const clang::CallExpr *ce;
+ clang::GlobalDecl md;
+ Address addr;
+ cir::FuncType fTy;
+ };
SpecialKind kindOrFunctionPtr;
union {
CIRGenCalleeInfo abstractInfo;
BuiltinInfoStorage builtinInfo;
+ PseudoDestructorInfoStorage pseudoDestructorInfo;
+ VirtualInfoStorage virtualInfo;
};
explicit CIRGenCallee(SpecialKind kind) : kindOrFunctionPtr(kind) {}
@@ -98,6 +111,22 @@ public:
return result;
}
+ static CIRGenCallee
+ forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr) {
+ CIRGenCallee result(SpecialKind::PseudoDestructor);
+ result.pseudoDestructorInfo.expr = expr;
+ return result;
+ }
+
+ bool isPseudoDestructor() const {
+ return kindOrFunctionPtr == SpecialKind::PseudoDestructor;
+ }
+
+ const CXXPseudoDestructorExpr *getPseudoDestructorExpr() const {
+ assert(isPseudoDestructor());
+ return pseudoDestructorInfo.expr;
+ }
+
bool isOrdinary() const {
return uintptr_t(kindOrFunctionPtr) > uintptr_t(SpecialKind::Last);
}
@@ -107,7 +136,8 @@ public:
CIRGenCallee prepareConcreteCallee(CIRGenFunction &cgf) const;
CIRGenCalleeInfo getAbstractInfo() const {
- assert(!cir::MissingFeatures::opCallVirtual());
+ if (isVirtual())
+ return virtualInfo.md;
assert(isOrdinary());
return abstractInfo;
}
@@ -117,6 +147,39 @@ public:
return reinterpret_cast<mlir::Operation *>(kindOrFunctionPtr);
}
+ bool isVirtual() const { return kindOrFunctionPtr == SpecialKind::Virtual; }
+
+ static CIRGenCallee forVirtual(const clang::CallExpr *ce,
+ clang::GlobalDecl md, Address addr,
+ cir::FuncType fTy) {
+ CIRGenCallee result(SpecialKind::Virtual);
+ result.virtualInfo.ce = ce;
+ result.virtualInfo.md = md;
+ result.virtualInfo.addr = addr;
+ result.virtualInfo.fTy = fTy;
+ return result;
+ }
+
+ const clang::CallExpr *getVirtualCallExpr() const {
+ assert(isVirtual());
+ return virtualInfo.ce;
+ }
+
+ clang::GlobalDecl getVirtualMethodDecl() const {
+ assert(isVirtual());
+ return virtualInfo.md;
+ }
+
+ Address getThisAddress() const {
+ assert(isVirtual());
+ return virtualInfo.addr;
+ }
+
+ cir::FuncType getVirtualFunctionType() const {
+ assert(isVirtual());
+ return virtualInfo.fTy;
+ }
+
void setFunctionPointer(mlir::Operation *functionPtr) {
assert(isOrdinary());
kindOrFunctionPtr = SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index 72b9d17..9a27932 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -14,6 +14,7 @@
#include "CIRGenFunction.h"
#include "CIRGenValue.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
@@ -86,7 +87,7 @@ static void emitMemberInitializer(CIRGenFunction &cgf,
QualType fieldType = field->getType();
mlir::Value thisPtr = cgf.loadCXXThis();
- QualType recordTy = cgf.getContext().getTypeDeclType(classDecl);
+ CanQualType recordTy = cgf.getContext().getCanonicalTagType(classDecl);
// If a base constructor is being emitted, create an LValue that has the
// non-virtual alignment.
@@ -120,11 +121,36 @@ static void emitMemberInitializer(CIRGenFunction &cgf,
static bool isInitializerOfDynamicClass(const CXXCtorInitializer *baseInit) {
const Type *baseType = baseInit->getBaseClass();
- const auto *baseClassDecl =
- cast<CXXRecordDecl>(baseType->castAs<RecordType>()->getDecl());
+ const auto *baseClassDecl = baseType->castAsCXXRecordDecl();
return baseClassDecl->isDynamicClass();
}
+namespace {
+/// A visitor which checks whether an initializer uses 'this' in a
+/// way which requires the vtable to be properly set.
+struct DynamicThisUseChecker
+ : ConstEvaluatedExprVisitor<DynamicThisUseChecker> {
+ using super = ConstEvaluatedExprVisitor<DynamicThisUseChecker>;
+
+ bool usesThis = false;
+
+ DynamicThisUseChecker(const ASTContext &c) : super(c) {}
+
+ // Black-list all explicit and implicit references to 'this'.
+ //
+ // Do we need to worry about external references to 'this' derived
+ // from arbitrary code? If so, then anything which runs arbitrary
+ // external code might potentially access the vtable.
+ void VisitCXXThisExpr(const CXXThisExpr *e) { usesThis = true; }
+};
+} // end anonymous namespace
+
+static bool baseInitializerUsesThis(ASTContext &c, const Expr *init) {
+ DynamicThisUseChecker checker(c);
+ checker.Visit(init);
+ return checker.usesThis;
+}
+
/// Gets the address of a direct base class within a complete object.
/// This should only be used for (1) non-virtual bases or (2) virtual bases
/// when the type is known to be complete (e.g. in complete destructors).
@@ -160,18 +186,15 @@ void CIRGenFunction::emitBaseInitializer(mlir::Location loc,
Address thisPtr = loadCXXThisAddress();
const Type *baseType = baseInit->getBaseClass();
- const auto *baseClassDecl =
- cast<CXXRecordDecl>(baseType->castAs<RecordType>()->getDecl());
+ const auto *baseClassDecl = baseType->castAsCXXRecordDecl();
bool isBaseVirtual = baseInit->isBaseVirtual();
// If the initializer for the base (other than the constructor
// itself) accesses 'this' in any way, we need to initialize the
// vtables.
- if (classDecl->isDynamicClass()) {
- cgm.errorNYI(loc, "emitBaseInitializer: dynamic class");
- return;
- }
+ if (baseInitializerUsesThis(getContext(), baseInit->getInit()))
+ initializeVTablePointers(loc, classDecl);
// We can pretend to be a complete class because it only matters for
// virtual bases, and we only do virtual bases for complete ctors.
@@ -197,19 +220,9 @@ void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *cd,
return;
}
- // If there are no member initializers, we can just return.
- if (cd->getNumCtorInitializers() == 0)
- return;
-
const CXXRecordDecl *classDecl = cd->getParent();
- // This code doesn't use range-based iteration because we may need to emit
- // code between the virtual base initializers and the non-virtual base or
- // between the non-virtual base initializers and the member initializers.
- CXXConstructorDecl::init_const_iterator b = cd->init_begin(),
- e = cd->init_end();
-
- // Virtual base initializers first, if any. They aren't needed if:
+ // Virtual base initializers aren't needed if:
// - This is a base ctor variant
// - There are no vbases
// - The class is abstract, so a complete object of it cannot be constructed
@@ -219,40 +232,66 @@ void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *cd,
bool constructVBases = ctorType != Ctor_Base &&
classDecl->getNumVBases() != 0 &&
!classDecl->isAbstract();
- if (constructVBases) {
- cgm.errorNYI(cd->getSourceRange(), "emitCtorPrologue: virtual base");
- return;
- }
-
- const mlir::Value oldThisValue = cxxThisValue;
- if (!constructVBases && (*b)->isBaseInitializer() && (*b)->isBaseVirtual()) {
+ if (constructVBases &&
+ !cgm.getTarget().getCXXABI().hasConstructorVariants()) {
cgm.errorNYI(cd->getSourceRange(),
- "emitCtorPrologue: virtual base initializer");
+ "emitCtorPrologue: virtual base without variants");
return;
}
- // Handle non-virtual base initializers.
- for (; b != e && (*b)->isBaseInitializer(); b++) {
- assert(!(*b)->isBaseVirtual());
+ // Create three separate ranges for the different types of initializers.
+ auto allInits = cd->inits();
+
+ // Find the boundaries between the three groups.
+ auto virtualBaseEnd = std::find_if(
+ allInits.begin(), allInits.end(), [](const CXXCtorInitializer *Init) {
+ return !(Init->isBaseInitializer() && Init->isBaseVirtual());
+ });
+
+ auto nonVirtualBaseEnd = std::find_if(virtualBaseEnd, allInits.end(),
+ [](const CXXCtorInitializer *Init) {
+ return !Init->isBaseInitializer();
+ });
+
+ // Create the three ranges.
+ auto virtualBaseInits = llvm::make_range(allInits.begin(), virtualBaseEnd);
+ auto nonVirtualBaseInits =
+ llvm::make_range(virtualBaseEnd, nonVirtualBaseEnd);
+ auto memberInits = llvm::make_range(nonVirtualBaseEnd, allInits.end());
+ const mlir::Value oldThisValue = cxxThisValue;
+
+ auto emitInitializer = [&](CXXCtorInitializer *baseInit) {
if (cgm.getCodeGenOpts().StrictVTablePointers &&
cgm.getCodeGenOpts().OptimizationLevel > 0 &&
- isInitializerOfDynamicClass(*b)) {
+ isInitializerOfDynamicClass(baseInit)) {
+ // It's OK to continue after emitting the error here. The missing code
+ // just "launders" the 'this' pointer.
cgm.errorNYI(cd->getSourceRange(),
- "emitCtorPrologue: strict vtable pointers");
- return;
+ "emitCtorPrologue: strict vtable pointers for vbase");
}
- emitBaseInitializer(getLoc(cd->getBeginLoc()), classDecl, *b);
+ emitBaseInitializer(getLoc(cd->getBeginLoc()), classDecl, baseInit);
+ };
+
+ // Process virtual base initializers.
+ for (CXXCtorInitializer *virtualBaseInit : virtualBaseInits) {
+ if (!constructVBases)
+ continue;
+ emitInitializer(virtualBaseInit);
}
- cxxThisValue = oldThisValue;
+ assert(!cir::MissingFeatures::msabi());
- if (classDecl->isDynamicClass()) {
- cgm.errorNYI(cd->getSourceRange(),
- "emitCtorPrologue: initialize vtable pointers");
- return;
+ // Then, non-virtual base initializers.
+ for (CXXCtorInitializer *nonVirtualBaseInit : nonVirtualBaseInits) {
+ assert(!nonVirtualBaseInit->isBaseVirtual());
+ emitInitializer(nonVirtualBaseInit);
}
+ cxxThisValue = oldThisValue;
+
+ initializeVTablePointers(getLoc(cd->getBeginLoc()), classDecl);
+
// Finally, initialize class members.
FieldConstructionScope fcs(*this, loadCXXThisAddress());
// Classic codegen uses a special class to attempt to replace member
@@ -260,8 +299,7 @@ void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *cd,
// lowering or optimization phases to keep the memory accesses more
// explicit. For now, we don't insert memcpy at all.
assert(!cir::MissingFeatures::ctorMemcpyizer());
- for (; b != e; b++) {
- CXXCtorInitializer *member = (*b);
+ for (CXXCtorInitializer *member : memberInits) {
assert(!member->isBaseInitializer());
assert(member->isAnyMemberInitializer() &&
"Delegating initializer on non-delegating constructor");
@@ -269,6 +307,167 @@ void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *cd,
}
}
+static Address applyNonVirtualAndVirtualOffset(
+ mlir::Location loc, CIRGenFunction &cgf, Address addr,
+ CharUnits nonVirtualOffset, mlir::Value virtualOffset,
+ const CXXRecordDecl *derivedClass, const CXXRecordDecl *nearestVBase,
+ mlir::Type baseValueTy = {}, bool assumeNotNull = true) {
+ // Assert that we have something to do.
+ assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
+
+ // Compute the offset from the static and dynamic components.
+ if (!nonVirtualOffset.isZero()) {
+ if (virtualOffset) {
+ cgf.cgm.errorNYI(
+ loc,
+ "applyNonVirtualAndVirtualOffset: virtual and non-virtual offset");
+ return Address::invalid();
+ } else {
+ assert(baseValueTy && "expected base type");
+ // If no virtualOffset is present this is the final stop.
+ return cgf.getBuilder().createBaseClassAddr(
+ loc, addr, baseValueTy, nonVirtualOffset.getQuantity(),
+ assumeNotNull);
+ }
+ }
+
+ cgf.cgm.errorNYI(loc, "applyNonVirtualAndVirtualOffset: virtual offset");
+ return Address::invalid();
+}
+
+void CIRGenFunction::initializeVTablePointer(mlir::Location loc,
+ const VPtr &vptr) {
+ // Compute the address point.
+ mlir::Value vtableAddressPoint =
+ cgm.getCXXABI().getVTableAddressPointInStructor(
+ *this, vptr.vtableClass, vptr.base, vptr.nearestVBase);
+
+ if (!vtableAddressPoint)
+ return;
+
+ // Compute where to store the address point.
+ mlir::Value virtualOffset{};
+ CharUnits nonVirtualOffset = CharUnits::Zero();
+
+ mlir::Type baseValueTy;
+ if (cgm.getCXXABI().isVirtualOffsetNeededForVTableField(*this, vptr)) {
+ cgm.errorNYI(loc, "initializeVTablePointer: virtual offset for vtable");
+ } else {
+ // We can just use the base offset in the complete class.
+ nonVirtualOffset = vptr.base.getBaseOffset();
+ baseValueTy =
+ convertType(getContext().getCanonicalTagType(vptr.base.getBase()));
+ }
+
+ // Apply the offsets.
+ Address classAddr = loadCXXThisAddress();
+ if (!nonVirtualOffset.isZero() || virtualOffset) {
+ classAddr = applyNonVirtualAndVirtualOffset(
+ loc, *this, classAddr, nonVirtualOffset, virtualOffset,
+ vptr.vtableClass, vptr.nearestVBase, baseValueTy);
+ }
+
+ // Finally, store the address point. Use the same CIR types as the field.
+ //
+ // vtable field is derived from `this` pointer, therefore they should be in
+ // the same addr space.
+ assert(!cir::MissingFeatures::addressSpace());
+ auto vtablePtr = cir::VTableGetVPtrOp::create(
+ builder, loc, builder.getPtrToVPtrType(), classAddr.getPointer());
+ Address vtableField = Address(vtablePtr, classAddr.getAlignment());
+ builder.createStore(loc, vtableAddressPoint, vtableField);
+ assert(!cir::MissingFeatures::opTBAA());
+ assert(!cir::MissingFeatures::createInvariantGroup());
+}
+
+void CIRGenFunction::initializeVTablePointers(mlir::Location loc,
+ const CXXRecordDecl *rd) {
+ // Ignore classes without a vtable.
+ if (!rd->isDynamicClass())
+ return;
+
+ // Initialize the vtable pointers for this class and all of its bases.
+ if (cgm.getCXXABI().doStructorsInitializeVPtrs(rd))
+ for (const auto &vptr : getVTablePointers(rd))
+ initializeVTablePointer(loc, vptr);
+
+ if (rd->getNumVBases())
+ cgm.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, rd);
+}
+
+CIRGenFunction::VPtrsVector
+CIRGenFunction::getVTablePointers(const CXXRecordDecl *vtableClass) {
+ CIRGenFunction::VPtrsVector vptrsResult;
+ VisitedVirtualBasesSetTy vbases;
+ getVTablePointers(BaseSubobject(vtableClass, CharUnits::Zero()),
+ /*NearestVBase=*/nullptr,
+ /*OffsetFromNearestVBase=*/CharUnits::Zero(),
+ /*BaseIsNonVirtualPrimaryBase=*/false, vtableClass, vbases,
+ vptrsResult);
+ return vptrsResult;
+}
+
+void CIRGenFunction::getVTablePointers(BaseSubobject base,
+ const CXXRecordDecl *nearestVBase,
+ CharUnits offsetFromNearestVBase,
+ bool baseIsNonVirtualPrimaryBase,
+ const CXXRecordDecl *vtableClass,
+ VisitedVirtualBasesSetTy &vbases,
+ VPtrsVector &vptrs) {
+ // If this base is a non-virtual primary base the address point has already
+ // been set.
+ if (!baseIsNonVirtualPrimaryBase) {
+ // Initialize the vtable pointer for this base.
+ VPtr vptr = {base, nearestVBase, offsetFromNearestVBase, vtableClass};
+ vptrs.push_back(vptr);
+ }
+
+ const CXXRecordDecl *rd = base.getBase();
+
+ for (const auto &nextBase : rd->bases()) {
+ const auto *baseDecl =
+ cast<CXXRecordDecl>(
+ nextBase.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
+
+ // Ignore classes without a vtable.
+ if (!baseDecl->isDynamicClass())
+ continue;
+
+ CharUnits baseOffset;
+ CharUnits baseOffsetFromNearestVBase;
+ bool baseDeclIsNonVirtualPrimaryBase;
+ const CXXRecordDecl *nextBaseDecl;
+
+ if (nextBase.isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (!vbases.insert(baseDecl).second)
+ continue;
+
+ const ASTRecordLayout &layout =
+ getContext().getASTRecordLayout(vtableClass);
+
+ nextBaseDecl = nearestVBase;
+ baseOffset = layout.getVBaseClassOffset(baseDecl);
+ baseOffsetFromNearestVBase = CharUnits::Zero();
+ baseDeclIsNonVirtualPrimaryBase = false;
+ } else {
+ const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
+
+ nextBaseDecl = baseDecl;
+ baseOffset = base.getBaseOffset() + layout.getBaseClassOffset(baseDecl);
+ baseOffsetFromNearestVBase =
+ offsetFromNearestVBase + layout.getBaseClassOffset(baseDecl);
+ baseDeclIsNonVirtualPrimaryBase = layout.getPrimaryBase() == baseDecl;
+ }
+
+ getVTablePointers(BaseSubobject(baseDecl, baseOffset), nextBaseDecl,
+ baseOffsetFromNearestVBase,
+ baseDeclIsNonVirtualPrimaryBase, vtableClass, vbases,
+ vptrs);
+ }
+}
+
Address CIRGenFunction::loadCXXThisAddress() {
assert(curFuncDecl && "loading 'this' without a func declaration?");
assert(isa<CXXMethodDecl>(curFuncDecl));
@@ -377,7 +576,7 @@ void CIRGenFunction::emitCXXAggrConstructorCall(
//
// Note that these are complete objects and so we don't need to
// use the non-virtual size or alignment.
- QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CanQualType type = getContext().getCanonicalTagType(ctor->getParent());
CharUnits eltAlignment = arrayBase.getAlignment().alignmentOfArrayElement(
getContext().getTypeSizeInChars(type));
@@ -483,8 +682,7 @@ void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &args) {
void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr,
QualType type) {
- const RecordType *rtype = type->castAs<RecordType>();
- const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const auto *record = type->castAsCXXRecordDecl();
const CXXDestructorDecl *dtor = record->getDestructor();
// TODO(cir): Unlike traditional codegen, CIRGen should actually emit trivial
// dtors which shall be removed on later CIR passes. However, only remove this
@@ -571,6 +769,37 @@ Address CIRGenFunction::getAddressOfBaseClass(
return value;
}
+// TODO(cir): this can be shared with LLVM codegen.
+bool CIRGenFunction::shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd) {
+ assert(!cir::MissingFeatures::hiddenVisibility());
+ if (!cgm.getCodeGenOpts().WholeProgramVTables)
+ return false;
+
+ if (cgm.getCodeGenOpts().VirtualFunctionElimination)
+ return true;
+
+ assert(!cir::MissingFeatures::sanitizers());
+
+ return false;
+}
+
+mlir::Value CIRGenFunction::getVTablePtr(mlir::Location loc, Address thisAddr,
+ const CXXRecordDecl *rd) {
+ auto vtablePtr = cir::VTableGetVPtrOp::create(
+ builder, loc, builder.getPtrToVPtrType(), thisAddr.getPointer());
+ Address vtablePtrAddr = Address(vtablePtr, thisAddr.getAlignment());
+
+ auto vtable = builder.createLoad(loc, vtablePtrAddr);
+ assert(!cir::MissingFeatures::opTBAA());
+
+ if (cgm.getCodeGenOpts().OptimizationLevel > 0 &&
+ cgm.getCodeGenOpts().StrictVTablePointers) {
+ assert(!cir::MissingFeatures::createInvariantGroup());
+ }
+
+ return vtable;
+}
+
void CIRGenFunction::emitCXXConstructorCall(const clang::CXXConstructorDecl *d,
clang::CXXCtorType type,
bool forVirtualBase,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index be21ce9..4d4d10b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -16,6 +16,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CIRGenCleanup.h"
#include "CIRGenFunction.h"
#include "clang/CIR/MissingFeatures.h"
@@ -33,37 +34,147 @@ using namespace clang::CIRGen;
void EHScopeStack::Cleanup::anchor() {}
-static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) {
- mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
- mlir::Block *cleanup =
- cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
- return cleanup;
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t size) {
+ size = llvm::alignTo(size, ScopeStackAlignment);
+ if (!startOfBuffer) {
+ unsigned capacity = llvm::PowerOf2Ceil(std::max(size, 1024ul));
+ startOfBuffer = std::make_unique<char[]>(capacity);
+ startOfData = endOfBuffer = startOfBuffer.get() + capacity;
+ } else if (static_cast<size_t>(startOfData - startOfBuffer.get()) < size) {
+ unsigned currentCapacity = endOfBuffer - startOfBuffer.get();
+ unsigned usedCapacity =
+ currentCapacity - (startOfData - startOfBuffer.get());
+ unsigned requiredCapacity = usedCapacity + size;
+ // We know from the 'else if' condition that requiredCapacity is greater
+ // than currentCapacity.
+ unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity);
+
+ std::unique_ptr<char[]> newStartOfBuffer =
+ std::make_unique<char[]>(newCapacity);
+ char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity;
+ char *newStartOfData = newEndOfBuffer - usedCapacity;
+ memcpy(newStartOfData, startOfData, usedCapacity);
+ startOfBuffer.swap(newStartOfBuffer);
+ endOfBuffer = newEndOfBuffer;
+ startOfData = newStartOfData;
+ }
+
+ assert(startOfBuffer.get() + size <= startOfData);
+ startOfData -= size;
+ return startOfData;
+}
+
+void EHScopeStack::deallocate(size_t size) {
+ startOfData += llvm::alignTo(size, ScopeStackAlignment);
+}
+
+void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
+ char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
+ bool isEHCleanup = kind & EHCleanup;
+ bool isLifetimeMarker = kind & LifetimeMarker;
+
+ assert(!cir::MissingFeatures::innermostEHScope());
+
+ EHCleanupScope *scope = new (buffer) EHCleanupScope(size);
+
+ if (isLifetimeMarker)
+ cgf->cgm.errorNYI("push lifetime marker cleanup");
+
+ // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup
+ if (cgf->getLangOpts().EHAsynch && isEHCleanup && !isLifetimeMarker &&
+ cgf->getTarget().getCXXABI().isMicrosoft())
+ cgf->cgm.errorNYI("push seh cleanup");
+
+ return scope->getCleanupBuffer();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
+ deallocate(cleanup.getAllocatedSize());
+
+ // Destroy the cleanup.
+ cleanup.destroy();
+
+ assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+}
+
+static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
+ // Ask the cleanup to emit itself.
+ assert(cgf.haveInsertPoint() && "expected insertion point");
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ cleanup->emit(cgf);
+ assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
}
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
void CIRGenFunction::popCleanupBlock() {
- assert(!ehStack.cleanupStack.empty() && "cleanup stack is empty!");
- mlir::OpBuilder::InsertionGuard guard(builder);
- std::unique_ptr<EHScopeStack::Cleanup> cleanup =
- ehStack.cleanupStack.pop_back_val();
+ assert(!ehStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.begin());
+
+ // Remember activation information.
+ bool isActive = scope.isActive();
+
+ assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+
+ // - whether there's a fallthrough
+ mlir::Block *fallthroughSource = builder.getInsertionBlock();
+ bool hasFallthrough = fallthroughSource != nullptr && isActive;
+
+ bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
+
+ // If we don't need the cleanup at all, we're done.
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+ if (!requiresNormalCleanup) {
+ ehStack.popCleanup();
+ return;
+ }
+
+ // Copy the cleanup emission data out. This uses either a stack
+ // array or malloc'd memory, depending on the size, which is
+ // behavior that SmallVector would provide, if we could use it
+ // here. Unfortunately, if you ask for a SmallVector<char>, the
+ // alignment isn't sufficient.
+ auto *cleanupSource = reinterpret_cast<char *>(scope.getCleanupBuffer());
+ alignas(EHScopeStack::ScopeStackAlignment) char
+ cleanupBufferStack[8 * sizeof(void *)];
+ std::unique_ptr<char[]> cleanupBufferHeap;
+ size_t cleanupSize = scope.getCleanupSize();
+ EHScopeStack::Cleanup *cleanup;
+
+ // This is necessary because we are going to deallocate the cleanup
+ // (in popCleanup) before we emit it.
+ if (cleanupSize <= sizeof(cleanupBufferStack)) {
+ memcpy(cleanupBufferStack, cleanupSource, cleanupSize);
+ cleanup = reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferStack);
+ } else {
+ cleanupBufferHeap.reset(new char[cleanupSize]);
+ memcpy(cleanupBufferHeap.get(), cleanupSource, cleanupSize);
+ cleanup =
+ reinterpret_cast<EHScopeStack::Cleanup *>(cleanupBufferHeap.get());
+ }
assert(!cir::MissingFeatures::ehCleanupFlags());
- mlir::Block *cleanupEntry = getCurCleanupBlock(*this);
- builder.setInsertionPointToEnd(cleanupEntry);
- cleanup->emit(*this);
+
+ ehStack.popCleanup();
+ scope.markEmitted();
+ emitCleanup(*this, cleanup);
}
/// Pops cleanup blocks until the given savepoint is reached.
-void CIRGenFunction::popCleanupBlocks(size_t oldCleanupStackDepth) {
+void CIRGenFunction::popCleanupBlocks(
+ EHScopeStack::stable_iterator oldCleanupStackDepth) {
assert(!cir::MissingFeatures::ehstackBranches());
- assert(ehStack.getStackDepth() >= oldCleanupStackDepth);
-
// Pop cleanup blocks until we reach the base stack depth for the
// current scope.
- while (ehStack.getStackDepth() > oldCleanupStackDepth) {
+ while (ehStack.stable_begin() != oldCleanupStackDepth) {
popCleanupBlock();
}
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
new file mode 100644
index 0000000..a4ec8cc
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -0,0 +1,142 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of CIR for cleanups, initially based
+// on LLVM IR cleanup handling, but ought to change as CIR evolves.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
+#define CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
+
+#include "Address.h"
+#include "EHScopeStack.h"
+#include "mlir/IR/Value.h"
+
+namespace clang::CIRGen {
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ class CommonBitFields {
+ friend class EHScope;
+ unsigned kind : 3;
+ };
+ enum { NumCommonBits = 3 };
+
+protected:
+ class CleanupBitFields {
+ friend class EHCleanupScope;
+ unsigned : NumCommonBits;
+
+ /// Whether this cleanup needs to be run along normal edges.
+ unsigned isNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ unsigned isEHCleanup : 1;
+
+ /// Whether this cleanup is currently active.
+ unsigned isActive : 1;
+
+ /// Whether this cleanup is a lifetime marker
+ unsigned isLifetimeMarker : 1;
+
+ /// Whether the normal cleanup should test the activation flag.
+ unsigned testFlagInNormalCleanup : 1;
+
+ /// Whether the EH cleanup should test the activation flag.
+ unsigned testFlagInEHCleanup : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned cleanupSize : 12;
+ };
+
+ union {
+ CommonBitFields commonBits;
+ CleanupBitFields cleanupBits;
+ };
+
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind kind) { commonBits.kind = kind; }
+
+ Kind getKind() const { return static_cast<Kind>(commonBits.kind); }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
+ : public EHScope {
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t size) {
+ return sizeof(EHCleanupScope) + size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHCleanupScope) + cleanupBits.cleanupSize;
+ }
+
+ EHCleanupScope(unsigned cleanupSize) : EHScope(EHScope::Cleanup) {
+ // TODO(cir): When exception handling is upstreamed, isNormalCleanup and
+ // isEHCleanup will be arguments to the constructor.
+ cleanupBits.isNormalCleanup = true;
+ cleanupBits.isEHCleanup = false;
+ cleanupBits.isActive = true;
+ cleanupBits.isLifetimeMarker = false;
+ cleanupBits.testFlagInNormalCleanup = false;
+ cleanupBits.testFlagInEHCleanup = false;
+ cleanupBits.cleanupSize = cleanupSize;
+
+ assert(cleanupBits.cleanupSize == cleanupSize && "cleanup size overflow");
+ }
+
+ void destroy() {}
+ // Objects of EHCleanupScope are not destructed. Use destroy().
+ ~EHCleanupScope() = delete;
+
+ bool isNormalCleanup() const { return cleanupBits.isNormalCleanup; }
+
+ bool isActive() const { return cleanupBits.isActive; }
+
+ size_t getCleanupSize() const { return cleanupBits.cleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::Cleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::Cleanup *>(getCleanupBuffer());
+ }
+
+ static bool classof(const EHScope *scope) {
+ return (scope->getKind() == Cleanup);
+ }
+
+ void markEmitted() {}
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *ptr = nullptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *ptr) : ptr(ptr) {}
+
+public:
+ iterator() = default;
+
+ EHScope *get() const { return reinterpret_cast<EHScope *>(ptr); }
+
+ EHScope &operator*() const { return *get(); }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(startOfData);
+}
+
+} // namespace clang::CIRGen
+#endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h b/clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h
index d6dac50..d455f6e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h
+++ b/clang/lib/CIR/CodeGen/CIRGenConstantEmitter.h
@@ -80,7 +80,7 @@ public:
// initializer or to propagate to another context; for example,
// side effects, or emitting an initialization that requires a
// reference to its current location.
- mlir::Attribute emitForMemory(mlir::Attribute c, QualType t);
+ mlir::Attribute emitForMemory(mlir::Attribute c, QualType destType);
/// Try to emit the initializer of the given declaration as an abstract
/// constant.
@@ -90,8 +90,9 @@ public:
/// asserting that it succeeded. This is only safe to do when the
/// expression is known to be a constant expression with either a fairly
/// simple type or a known simple form.
+ mlir::Attribute emitAbstract(const Expr *e, QualType destType);
mlir::Attribute emitAbstract(SourceLocation loc, const APValue &value,
- QualType t);
+ QualType destType);
mlir::Attribute tryEmitConstantExpr(const ConstantExpr *ce);
@@ -101,6 +102,7 @@ public:
mlir::Attribute tryEmitPrivateForVarInit(const VarDecl &d);
+ mlir::TypedAttr tryEmitPrivate(const Expr *e, QualType destType);
mlir::Attribute tryEmitPrivate(const APValue &value, QualType destType);
mlir::Attribute tryEmitPrivateForMemory(const APValue &value, QualType t);
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 78d375c..7cc024f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -154,15 +154,19 @@ void CIRGenFunction::emitAutoVarInit(
initializeWhatIsTechnicallyUninitialized(addr);
LValue lv = makeAddrLValue(addr, type, AlignmentSource::Decl);
emitExprAsInit(init, &d, lv);
- // In case lv has uses it means we indeed initialized something
- // out of it while trying to build the expression, mark it as such.
- mlir::Value val = lv.getAddress().getPointer();
- assert(val && "Should have an address");
- auto allocaOp = val.getDefiningOp<cir::AllocaOp>();
- assert(allocaOp && "Address should come straight out of the alloca");
-
- if (!allocaOp.use_empty())
- allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
+
+ if (!emission.wasEmittedAsOffloadClause()) {
+ // In case lv has uses it means we indeed initialized something
+ // out of it while trying to build the expression, mark it as such.
+ mlir::Value val = lv.getAddress().getPointer();
+ assert(val && "Should have an address");
+ auto allocaOp = val.getDefiningOp<cir::AllocaOp>();
+ assert(allocaOp && "Address should come straight out of the alloca");
+
+ if (!allocaOp.use_empty())
+ allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
+ }
+
return;
}
@@ -293,7 +297,7 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &d,
mlir::Attribute init = builder.getZeroInitAttr(convertType(ty));
cir::GlobalOp gv = builder.createVersionedGlobal(
- getModule(), getLoc(d.getLocation()), name, lty, linkage);
+ getModule(), getLoc(d.getLocation()), name, lty, false, linkage);
// TODO(cir): infer visibility from linkage in global op builder.
gv.setVisibility(getMLIRVisibilityFromCIRLinkage(linkage));
gv.setInitialValueAttr(init);
@@ -667,6 +671,12 @@ struct DestroyObject final : EHScopeStack::Cleanup {
void emit(CIRGenFunction &cgf) override {
cgf.emitDestroy(addr, type, destroyer);
}
+
+ // This is a placeholder until EHCleanupScope is implemented.
+ size_t getSize() const override {
+ assert(!cir::MissingFeatures::ehCleanupScope());
+ return sizeof(DestroyObject);
+ }
};
} // namespace
diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp
new file mode 100644
index 0000000..7fcb39a
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp
@@ -0,0 +1,41 @@
+//===--- CIRGenException.cpp - Emit CIR Code for C++ exceptions -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ exception related code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenCXXABI.h"
+#include "CIRGenFunction.h"
+
+#include "clang/AST/StmtVisitor.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+void CIRGenFunction::emitCXXThrowExpr(const CXXThrowExpr *e) {
+ const llvm::Triple &triple = getTarget().getTriple();
+ if (cgm.getLangOpts().OpenMPIsTargetDevice &&
+ (triple.isNVPTX() || triple.isAMDGCN())) {
+ cgm.errorNYI("emitCXXThrowExpr OpenMP with NVPTX or AMDGCN Triples");
+ return;
+ }
+
+ if (const Expr *subExpr = e->getSubExpr()) {
+ QualType throwType = subExpr->getType();
+ if (throwType->isObjCObjectPointerType()) {
+ cgm.errorNYI("emitCXXThrowExpr ObjCObjectPointerType");
+ return;
+ } else {
+ cgm.errorNYI("emitCXXThrowExpr with subExpr");
+ return;
+ }
+ } else {
+ cgm.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true);
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index a0ff08e..4698793 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -73,28 +73,58 @@ Address CIRGenFunction::emitPointerWithAlignment(const Expr *expr,
// Casts:
if (auto const *ce = dyn_cast<CastExpr>(expr)) {
- if (isa<ExplicitCastExpr>(ce)) {
- cgm.errorNYI(expr->getSourceRange(),
- "emitPointerWithAlignment: explicit cast");
- return Address::invalid();
- }
+ if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
+ cgm.emitExplicitCastExprType(ece);
switch (ce->getCastKind()) {
// Non-converting casts (but not C's implicit conversion from void*).
case CK_BitCast:
case CK_NoOp:
case CK_AddressSpaceConversion: {
- cgm.errorNYI(expr->getSourceRange(),
- "emitPointerWithAlignment: noop cast");
- return Address::invalid();
- } break;
+ if (const auto *ptrTy =
+ ce->getSubExpr()->getType()->getAs<PointerType>()) {
+ if (ptrTy->getPointeeType()->isVoidType())
+ break;
+
+ LValueBaseInfo innerBaseInfo;
+ assert(!cir::MissingFeatures::opTBAA());
+ Address addr =
+ emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
+ if (baseInfo)
+ *baseInfo = innerBaseInfo;
+
+ if (isa<ExplicitCastExpr>(ce)) {
+ LValueBaseInfo targetTypeBaseInfo;
+
+ const QualType pointeeType = expr->getType()->getPointeeType();
+ const CharUnits align =
+ cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
+
+ // If the source l-value is opaque, honor the alignment of the
+ // casted-to type.
+ if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
+ if (baseInfo)
+ baseInfo->mergeForCast(targetTypeBaseInfo);
+ addr = Address(addr.getPointer(), addr.getElementType(), align);
+ }
+ }
+
+ assert(!cir::MissingFeatures::sanitizers());
+
+ const mlir::Type eltTy =
+ convertTypeForMem(expr->getType()->getPointeeType());
+ addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
+ addr, eltTy);
+ assert(!cir::MissingFeatures::addressSpace());
+
+ return addr;
+ }
+ break;
+ }
// Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
- case CK_ArrayToPointerDecay: {
- cgm.errorNYI(expr->getSourceRange(),
- "emitPointerWithAlignment: array-to-pointer decay");
- return Address::invalid();
- }
+ case CK_ArrayToPointerDecay:
+ return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
@@ -184,8 +214,11 @@ Address CIRGenFunction::emitPointerWithAlignment(const Expr *expr,
if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
// TODO(cir): maybe we should use cir.unary for pointers here instead.
if (uo->getOpcode() == UO_AddrOf) {
- cgm.errorNYI(expr->getSourceRange(), "emitPointerWithAlignment: unary &");
- return Address::invalid();
+ LValue lv = emitLValue(uo->getSubExpr());
+ if (baseInfo)
+ *baseInfo = lv.getBaseInfo();
+ assert(!cir::MissingFeatures::opTBAA());
+ return lv.getAddress();
}
}
@@ -369,9 +402,10 @@ Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base,
unsigned index) {
mlir::Location loc = getLoc(field->getLocation());
cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
- cir::GetMemberOp sea = getBuilder().createGetMember(
- loc, fieldPtr, base.getPointer(), field->getName(), index);
auto rec = cast<cir::RecordType>(base.getAddress().getElementType());
+ cir::GetMemberOp sea = getBuilder().createGetMember(
+ loc, fieldPtr, base.getPointer(), field->getName(),
+ rec.isUnion() ? field->getFieldIndex() : index);
CharUnits offset = CharUnits::fromQuantity(
rec.getElementOffset(cgm.getDataLayout().layout, index));
return Address(sea, base.getAlignment().alignmentAtOffset(offset));
@@ -550,6 +584,37 @@ RValue CIRGenFunction::emitLoadOfLValue(LValue lv, SourceLocation loc) {
return RValue::get(nullptr);
}
+static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
+ assert(!cir::MissingFeatures::weakRefReference());
+ return cgm.getAddrOfFunction(gd);
+}
+
+static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
+ GlobalDecl gd) {
+ const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
+ cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
+ mlir::Location loc = cgf.getLoc(e->getSourceRange());
+ CharUnits align = cgf.getContext().getDeclAlign(fd);
+
+ assert(!cir::MissingFeatures::sanitizers());
+
+ mlir::Type fnTy = funcOp.getFunctionType();
+ mlir::Type ptrTy = cir::PointerType::get(fnTy);
+ mlir::Value addr = cgf.getBuilder().create<cir::GetGlobalOp>(
+ loc, ptrTy, funcOp.getSymName());
+
+ if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
+ fnTy = cgf.convertType(fd->getType());
+ ptrTy = cir::PointerType::get(fnTy);
+
+ addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
+ cir::CastKind::bitcast, addr);
+ }
+
+ return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
+ AlignmentSource::Decl);
+}
+
LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
const NamedDecl *nd = e->getDecl();
QualType ty = e->getType();
@@ -588,6 +653,12 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
? emitLoadOfReferenceLValue(addr, getLoc(e->getSourceRange()),
vd->getType(), AlignmentSource::Decl)
: makeAddrLValue(addr, ty, AlignmentSource::Decl);
+
+ // Statics are defined as globals, so they are not include in the function's
+ // symbol table.
+ assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
+ "non-static locals should be already mapped");
+
return lv;
}
@@ -600,6 +671,16 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
return emitLValue(bd->getBinding());
}
+ if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
+ LValue lv = emitFunctionDeclLValue(*this, e, fd);
+
+ // Emit debuginfo for the function declaration if the target wants to.
+ if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
+ assert(!cir::MissingFeatures::generateDebugInfo());
+
+ return lv;
+ }
+
cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
return LValue();
}
@@ -1011,9 +1092,7 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- const auto *derivedClassTy =
- e->getSubExpr()->getType()->castAs<clang::RecordType>();
- auto *derivedClassDecl = cast<CXXRecordDecl>(derivedClassTy->getDecl());
+ auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
LValue lv = emitLValue(e->getSubExpr());
Address thisAddr = lv.getAddress();
@@ -1037,10 +1116,22 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
llvm_unreachable("Invalid cast kind");
}
+static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf,
+ const MemberExpr *me) {
+ if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
+ // Try to emit static variable member expressions as DREs.
+ return DeclRefExpr::Create(
+ cgf.getContext(), NestedNameSpecifierLoc(), SourceLocation(), vd,
+ /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
+ me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
+ }
+ return nullptr;
+}
+
LValue CIRGenFunction::emitMemberExpr(const MemberExpr *e) {
- if (isa<VarDecl>(e->getMemberDecl())) {
- cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: VarDecl");
- return LValue();
+ if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
+ emitIgnoredExpr(e->getBase());
+ return emitDeclRefLValue(dre);
}
Expr *baseExpr = e->getBase();
@@ -1162,26 +1253,27 @@ static void pushTemporaryCleanup(CIRGenFunction &cgf,
return;
}
- CXXDestructorDecl *referenceTemporaryDtor = nullptr;
- if (const clang::RecordType *rt = e->getType()
- ->getBaseElementTypeUnsafe()
- ->getAs<clang::RecordType>()) {
- // Get the destructor for the reference temporary.
- auto *classDecl = cast<CXXRecordDecl>(rt->getDecl());
- if (!classDecl->hasTrivialDestructor())
- referenceTemporaryDtor = classDecl->getDestructor();
- }
-
- if (!referenceTemporaryDtor)
+ const QualType::DestructionKind dk = e->getType().isDestructedType();
+ if (dk == QualType::DK_none)
return;
- // Call the destructor for the temporary.
switch (m->getStorageDuration()) {
case SD_Static:
- case SD_Thread:
- cgf.cgm.errorNYI(e->getSourceRange(),
- "pushTemporaryCleanup: static/thread storage duration");
- return;
+ case SD_Thread: {
+ CXXDestructorDecl *referenceTemporaryDtor = nullptr;
+ if (const auto *classDecl =
+ e->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ classDecl && !classDecl->hasTrivialDestructor())
+ // Get the destructor for the reference temporary.
+ referenceTemporaryDtor = classDecl->getDestructor();
+
+ if (!referenceTemporaryDtor)
+ return;
+
+ cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
+ "storage duration with destructors");
+ break;
+ }
case SD_FullExpression:
cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
@@ -1373,11 +1465,6 @@ RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot) {
llvm_unreachable("bad evaluation kind");
}
-static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
- assert(!cir::MissingFeatures::weakRefReference());
- return cgm.getAddrOfFunction(gd);
-}
-
// Detect the unusual situation where an inline version is shadowed by a
// non-inline version. In that case we should pick the external one
// everywhere. That's GCC behavior too.
@@ -1540,10 +1627,10 @@ CIRGenCallee CIRGenFunction::emitCallee(const clang::Expr *e) {
cgm.errorNYI(e->getSourceRange(),
"emitCallee: call to member function is NYI");
return {};
+ } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
+ return CIRGenCallee::forPseudoDestructor(pde);
}
- assert(!cir::MissingFeatures::opCallPseudoDtor());
-
// Otherwise, we have an indirect reference.
mlir::Value calleePtr;
QualType functionType;
@@ -1595,10 +1682,8 @@ RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *e,
return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
returnValue);
- if (isa<CXXPseudoDestructorExpr>(e->getCallee())) {
- cgm.errorNYI(e->getSourceRange(), "call to pseudo destructor");
- }
- assert(!cir::MissingFeatures::opCallPseudoDtor());
+ if (callee.isPseudoDestructor())
+ return emitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
return emitCall(e->getCallee()->getType(), callee, e, returnValue);
}
@@ -1615,7 +1700,9 @@ void CIRGenFunction::emitIgnoredExpr(const Expr *e) {
emitLValue(e);
}
-Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e) {
+Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e,
+ LValueBaseInfo *baseInfo) {
+ assert(!cir::MissingFeatures::opTBAA());
assert(e->getType()->isArrayType() &&
"Array to pointer decay must have array source type!");
@@ -1831,7 +1918,7 @@ RValue CIRGenFunction::emitCXXMemberCallExpr(const CXXMemberCallExpr *ce,
}
bool hasQualifier = me->hasQualifier();
- NestedNameSpecifier *qualifier = hasQualifier ? me->getQualifier() : nullptr;
+ NestedNameSpecifier qualifier = me->getQualifier();
bool isArrow = me->isArrow();
const Expr *base = me->getBase();
@@ -1885,12 +1972,8 @@ void CIRGenFunction::emitCXXConstructExpr(const CXXConstructExpr *e,
delegating = true;
break;
case CXXConstructionKind::VirtualBase:
- // This should just set 'forVirtualBase' to true and fall through, but
- // virtual base class support is otherwise missing, so this needs to wait
- // until it can be tested.
- cgm.errorNYI(e->getSourceRange(),
- "emitCXXConstructExpr: virtual base constructor");
- return;
+ forVirtualBase = true;
+ [[fallthrough]];
case CXXConstructionKind::NonVirtualBase:
type = Ctor_Base;
break;
@@ -2052,8 +2135,8 @@ cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
///
/// For named members of enums, this is the only way they are emitted.
CIRGenFunction::ConstantEmission
-CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
- ValueDecl *value = refExpr->getDecl();
+CIRGenFunction::tryEmitAsConstant(const DeclRefExpr *refExpr) {
+ const ValueDecl *value = refExpr->getDecl();
// There is a lot more to do here, but for now only EnumConstantDecl is
// supported.
@@ -2086,6 +2169,13 @@ CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
return ConstantEmission::forValue(cstToEmit);
}
+CIRGenFunction::ConstantEmission
+CIRGenFunction::tryEmitAsConstant(const MemberExpr *me) {
+ if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, me))
+ return tryEmitAsConstant(dre);
+ return ConstantEmission();
+}
+
mlir::Value CIRGenFunction::emitScalarConstant(
const CIRGenFunction::ConstantEmission &constant, Expr *e) {
assert(constant && "not a constant");
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 51aab95..113f996 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -69,6 +69,12 @@ public:
void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
void VisitCallExpr(const CallExpr *e);
+ void VisitStmtExpr(const StmtExpr *e) {
+ CIRGenFunction::StmtExprEvaluation eval(cgf);
+ Address retAlloca =
+ cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
+ (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
+ }
void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
@@ -78,6 +84,205 @@ public:
void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
FieldDecl *initializedFieldInUnion,
Expr *arrayFiller);
+
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *e) {
+ assert(!cir::MissingFeatures::aggValueSlotDestructedFlag());
+ Visit(e->getSubExpr());
+ }
+
+ // Stubs -- These should be moved up when they are implemented.
+ void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *e) {
+ // We shouldn't really get here, but we do because of missing handling for
+ // emitting constant aggregate initializers. If we just ignore this, a
+ // fallback handler will do the right thing.
+ assert(!cir::MissingFeatures::constEmitterAggILE());
+ return;
+ }
+ void VisitCastExpr(CastExpr *e) {
+ switch (e->getCastKind()) {
+ case CK_LValueToRValue:
+ assert(!cir::MissingFeatures::aggValueSlotVolatile());
+ [[fallthrough]];
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ assert(cgf.getContext().hasSameUnqualifiedType(e->getSubExpr()->getType(),
+ e->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(e->getSubExpr());
+ break;
+ default:
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ std::string("AggExprEmitter: VisitCastExpr: ") +
+ e->getCastKindName());
+ break;
+ }
+ }
+ void VisitStmt(Stmt *s) {
+ cgf.cgm.errorNYI(s->getSourceRange(),
+ std::string("AggExprEmitter::VisitStmt: ") +
+ s->getStmtClassName());
+ }
+ void VisitParenExpr(ParenExpr *pe) {
+ cgf.cgm.errorNYI(pe->getSourceRange(), "AggExprEmitter: VisitParenExpr");
+ }
+ void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
+ cgf.cgm.errorNYI(ge->getSourceRange(),
+ "AggExprEmitter: VisitGenericSelectionExpr");
+ }
+ void VisitCoawaitExpr(CoawaitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
+ }
+ void VisitCoyieldExpr(CoyieldExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoyieldExpr");
+ }
+ void VisitUnaryCoawait(UnaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryCoawait");
+ }
+ void VisitUnaryExtension(UnaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitUnaryExtension");
+ }
+ void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitSubstNonTypeTemplateParmExpr");
+ }
+ void VisitConstantExpr(ConstantExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
+ }
+ void VisitMemberExpr(MemberExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitMemberExpr");
+ }
+ void VisitUnaryDeref(UnaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryDeref");
+ }
+ void VisitStringLiteral(StringLiteral *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitStringLiteral");
+ }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCompoundLiteralExpr");
+ }
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitArraySubscriptExpr");
+ }
+ void VisitPredefinedExpr(const PredefinedExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitPredefinedExpr");
+ }
+ void VisitBinaryOperator(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitBinaryOperator");
+ }
+ void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
+ }
+ void VisitBinAssign(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinAssign");
+ }
+ void VisitBinComma(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinComma");
+ }
+ void VisitBinCmp(const BinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinCmp");
+ }
+ void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXRewrittenBinaryOperator");
+ }
+ void VisitObjCMessageExpr(ObjCMessageExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitObjCMessageExpr");
+ }
+ void VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitObjCIVarRefExpr");
+ }
+
+ void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitDesignatedInitUpdateExpr");
+ }
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitAbstractConditionalOperator");
+ }
+ void VisitChooseExpr(const ChooseExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitChooseExpr");
+ }
+ void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXParenListInitExpr");
+ }
+ void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *e,
+ llvm::Value *outerBegin = nullptr) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitArrayInitLoopExpr");
+ }
+ void VisitImplicitValueInitExpr(ImplicitValueInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitImplicitValueInitExpr");
+ }
+ void VisitNoInitExpr(NoInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitNoInitExpr");
+ }
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
+ cgf.cgm.errorNYI(dae->getSourceRange(),
+ "AggExprEmitter: VisitCXXDefaultArgExpr");
+ }
+ void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
+ cgf.cgm.errorNYI(die->getSourceRange(),
+ "AggExprEmitter: VisitCXXDefaultInitExpr");
+ }
+ void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXInheritedCtorInitExpr");
+ }
+ void VisitLambdaExpr(LambdaExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitLambdaExpr");
+ }
+ void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXStdInitializerListExpr");
+ }
+
+ void VisitExprWithCleanups(ExprWithCleanups *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitExprWithCleanups");
+ }
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitCXXScalarValueInitExpr");
+ }
+ void VisitCXXTypeidExpr(CXXTypeidExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXTypeidExpr");
+ }
+ void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitMaterializeTemporaryExpr");
+ }
+ void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitOpaqueValueExpr");
+ }
+
+ void VisitPseudoObjectExpr(PseudoObjectExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "AggExprEmitter: VisitPseudoObjectExpr");
+ }
+
+ void VisitVAArgExpr(VAArgExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitVAArgExpr");
+ }
+
+ void VisitCXXThrowExpr(const CXXThrowExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXThrowExpr");
+ }
+ void VisitAtomicExpr(AtomicExpr *e) {
+ cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitAtomicExpr");
+ }
};
} // namespace
@@ -124,8 +329,8 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
const QualType elementType =
cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
- if (elementType.isDestructedType()) {
- cgf.cgm.errorNYI(loc, "dtorKind NYI");
+ if (elementType.isDestructedType() && cgf.cgm.getLangOpts().Exceptions) {
+ cgf.cgm.errorNYI(loc, "initialized array requires destruction");
return;
}
@@ -135,9 +340,9 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
const cir::PointerType cirElementPtrType =
builder.getPointerTo(cirElementType);
- auto begin = builder.create<cir::CastOp>(loc, cirElementPtrType,
- cir::CastKind::array_to_ptrdecay,
- destPtr.getPointer());
+ auto begin = cir::CastOp::create(builder, loc, cirElementPtrType,
+ cir::CastKind::array_to_ptrdecay,
+ destPtr.getPointer());
const CharUnits elementSize =
cgf.getContext().getTypeSizeInChars(elementType);
@@ -182,8 +387,8 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
// Advance to the start of the rest of the array.
if (numInitElements) {
one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
- element = builder.create<cir::PtrStrideOp>(loc, cirElementPtrType,
- element, one);
+ element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
+ element, one);
}
// Allocate the temporary variable
@@ -193,25 +398,52 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
- // TODO(CIR): Replace this part later with cir::DoWhileOp
- for (unsigned i = numInitElements; i != numArrayElements; ++i) {
- cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
-
- // Emit the actual filler expression.
- const LValue elementLV = cgf.makeAddrLValue(
- Address(currentElement, cirElementType, elementAlign), elementType);
-
- if (arrayFiller)
- emitInitializationToLValue(arrayFiller, elementLV);
- else
- emitNullInitializationToLValue(loc, elementLV);
-
- // Advance pointer and store them to temporary variable
- one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
- cir::PtrStrideOp nextElement =
- builder.createPtrStride(loc, currentElement, one);
- cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
- }
+ // Compute the end of array
+ cir::ConstantOp numArrayElementsConst = builder.getConstInt(
+ loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), numArrayElements);
+ mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
+ begin, numArrayElementsConst);
+
+ builder.createDoWhile(
+ loc,
+ /*condBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
+ mlir::Type boolTy = cgf.convertType(cgf.getContext().BoolTy);
+ cir::CmpOp cmp = cir::CmpOp::create(
+ builder, loc, boolTy, cir::CmpOpKind::ne, currentElement, end);
+ builder.createCondition(cmp);
+ },
+ /*bodyBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
+
+ assert(!cir::MissingFeatures::requiresCleanups());
+
+ // Emit the actual filler expression.
+ LValue elementLV = cgf.makeAddrLValue(
+ Address(currentElement, cirElementType, elementAlign),
+ elementType);
+ if (arrayFiller)
+ emitInitializationToLValue(arrayFiller, elementLV);
+ else
+ emitNullInitializationToLValue(loc, elementLV);
+
+ // Tell the EH cleanup that we finished with the last element.
+ if (cgf.cgm.getLangOpts().Exceptions) {
+ cgf.cgm.errorNYI(loc, "update destructed array element for EH");
+ return;
+ }
+
+ // Advance pointer and store them to temporary variable
+ cir::ConstantOp one = builder.getConstInt(
+ loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 1);
+ auto nextElement = cir::PtrStrideOp::create(
+ builder, loc, cirElementPtrType, currentElement, one);
+ cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
+
+ builder.createYield(loc);
+ });
}
}
@@ -376,7 +608,7 @@ void AggExprEmitter::visitCXXParenListOrInitListExpr(
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned numInitElements = args.size();
- RecordDecl *record = e->getType()->castAs<RecordType>()->getDecl();
+ auto *record = e->getType()->castAsRecordDecl();
// We'll need to enter cleanup scopes in case any of the element
// initializers throws an exception.
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
new file mode 100644
index 0000000..a320508
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -0,0 +1,36 @@
+//===--- CIRGenExprCXX.cpp - Emit CIR Code for C++ expressions ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ expressions
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "clang/AST/ExprCXX.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+RValue CIRGenFunction::emitCXXPseudoDestructorExpr(
+ const CXXPseudoDestructorExpr *expr) {
+ QualType destroyedType = expr->getDestroyedType();
+ if (destroyedType.hasStrongOrWeakObjCLifetime()) {
+ assert(!cir::MissingFeatures::objCLifetime());
+ cgm.errorNYI(expr->getExprLoc(),
+ "emitCXXPseudoDestructorExpr: Objective-C lifetime is NYI");
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ emitIgnoredExpr(expr->getBase());
+ }
+
+ return RValue::get(nullptr);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index 3aa170e..cbdd525 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -62,6 +62,14 @@ public:
mlir::Value VisitImplicitCastExpr(ImplicitCastExpr *e);
mlir::Value VisitInitListExpr(InitListExpr *e);
+ mlir::Value VisitMemberExpr(MemberExpr *me) {
+ if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(me)) {
+ cgf.emitIgnoredExpr(me->getBase());
+ return emitConstant(constant, me);
+ }
+ return emitLoadOfLValue(me);
+ }
+
mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
return emitLoadOfLValue(e);
}
@@ -116,20 +124,23 @@ public:
mlir::Value emitPromotedComplexOperand(const Expr *e, QualType promotionTy);
+ LValue emitCompoundAssignLValue(
+ const CompoundAssignOperator *e,
+ mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &),
+ RValue &value);
+
+ mlir::Value emitCompoundAssign(
+ const CompoundAssignOperator *e,
+ mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &));
+
mlir::Value emitBinAdd(const BinOpInfo &op);
mlir::Value emitBinSub(const BinOpInfo &op);
mlir::Value emitBinMul(const BinOpInfo &op);
+ mlir::Value emitBinDiv(const BinOpInfo &op);
QualType getPromotionType(QualType ty, bool isDivOpCode = false) {
if (auto *complexTy = ty->getAs<ComplexType>()) {
QualType elementTy = complexTy->getElementType();
- if (isDivOpCode && elementTy->isFloatingType() &&
- cgf.getLangOpts().getComplexRange() ==
- LangOptions::ComplexRangeKind::CX_Promoted) {
- cgf.cgm.errorNYI("HigherPrecisionTypeForComplexArithmetic");
- return QualType();
- }
-
if (elementTy.UseExcessPrecision(cgf.getContext()))
return cgf.getContext().getComplexType(cgf.getContext().FloatTy);
}
@@ -145,14 +156,34 @@ public:
e->getType(), e->getOpcode() == BinaryOperatorKind::BO_Div); \
mlir::Value result = emitBin##OP(emitBinOps(e, promotionTy)); \
if (!promotionTy.isNull()) \
- cgf.cgm.errorNYI("Binop emitUnPromotedValue"); \
+ result = cgf.emitUnPromotedValue(result, e->getType()); \
return result; \
}
HANDLEBINOP(Add)
HANDLEBINOP(Sub)
HANDLEBINOP(Mul)
+ HANDLEBINOP(Div)
#undef HANDLEBINOP
+
+ // Compound assignments.
+ mlir::Value VisitBinAddAssign(const CompoundAssignOperator *e) {
+ return emitCompoundAssign(e, &ComplexExprEmitter::emitBinAdd);
+ }
+
+ mlir::Value VisitBinSubAssign(const CompoundAssignOperator *e) {
+ return emitCompoundAssign(e, &ComplexExprEmitter::emitBinSub);
+ }
+
+ mlir::Value VisitBinMulAssign(const CompoundAssignOperator *e) {
+ return emitCompoundAssign(e, &ComplexExprEmitter::emitBinMul);
+ }
+
+ mlir::Value VisitBinDivAssign(const CompoundAssignOperator *e) {
+ return emitCompoundAssign(e, &ComplexExprEmitter::emitBinDiv);
+ }
+
+ mlir::Value VisitVAArgExpr(VAArgExpr *e);
};
} // namespace
@@ -298,10 +329,8 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e) {
QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Plus, promotionTy);
- if (!promotionTy.isNull()) {
- cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryPlus emitUnPromotedValue");
- return {};
- }
+ if (!promotionTy.isNull())
+ return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
return result;
}
@@ -323,10 +352,8 @@ mlir::Value ComplexExprEmitter::VisitPlusMinus(const UnaryOperator *e,
mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e) {
QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Minus, promotionTy);
- if (!promotionTy.isNull()) {
- cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryMinus emitUnPromotedValue");
- return {};
- }
+ if (!promotionTy.isNull())
+ return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
return result;
}
@@ -572,6 +599,10 @@ mlir::Value ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *e) {
return builder.createNot(op);
}
+mlir::Value ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *e) {
+ return cgf.emitVAArg(e);
+}
+
mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e,
QualType promotionTy) {
e = e->IgnoreParens();
@@ -602,7 +633,7 @@ mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e,
mlir::Value result = Visit(const_cast<Expr *>(e));
if (!promotionTy.isNull())
- cgf.cgm.errorNYI("emitPromoted emitPromotedValue");
+ return cgf.emitPromotedValue(result, promotionTy);
return result;
}
@@ -616,8 +647,12 @@ ComplexExprEmitter::emitPromotedComplexOperand(const Expr *e,
return Visit(const_cast<Expr *>(e));
}
- cgf.cgm.errorNYI("emitPromotedComplexOperand non-complex type");
- return {};
+ if (!promotionTy.isNull()) {
+ QualType complexElementTy =
+ promotionTy->castAs<ComplexType>()->getElementType();
+ return cgf.emitPromotedScalarExpr(e, complexElementTy);
+ }
+ return cgf.emitScalarExpr(e);
}
ComplexExprEmitter::BinOpInfo
@@ -630,16 +665,162 @@ ComplexExprEmitter::emitBinOps(const BinaryOperator *e, QualType promotionTy) {
return binOpInfo;
}
+LValue ComplexExprEmitter::emitCompoundAssignLValue(
+ const CompoundAssignOperator *e,
+ mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &), RValue &value) {
+ QualType lhsTy = e->getLHS()->getType();
+ QualType rhsTy = e->getRHS()->getType();
+ SourceLocation exprLoc = e->getExprLoc();
+ mlir::Location loc = cgf.getLoc(exprLoc);
+
+ if (lhsTy->getAs<AtomicType>()) {
+ cgf.cgm.errorNYI("emitCompoundAssignLValue AtmoicType");
+ return {};
+ }
+
+ BinOpInfo opInfo{loc};
+ opInfo.fpFeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
+
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+
+ // Load the RHS and LHS operands.
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen a little.
+ QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
+ opInfo.ty = promotionTypeCR.isNull() ? e->getComputationResultType()
+ : promotionTypeCR;
+
+ QualType complexElementTy =
+ opInfo.ty->castAs<ComplexType>()->getElementType();
+ QualType promotionTypeRHS = getPromotionType(rhsTy);
+
+ // The RHS should have been converted to the computation type.
+ if (e->getRHS()->getType()->isRealFloatingType()) {
+ if (!promotionTypeRHS.isNull()) {
+ opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
+ } else {
+ assert(cgf.getContext().hasSameUnqualifiedType(complexElementTy, rhsTy));
+ opInfo.rhs = cgf.emitScalarExpr(e->getRHS());
+ }
+ } else {
+ if (!promotionTypeRHS.isNull()) {
+ opInfo.rhs = cgf.emitPromotedComplexExpr(e->getRHS(), promotionTypeRHS);
+ } else {
+ assert(cgf.getContext().hasSameUnqualifiedType(opInfo.ty, rhsTy));
+ opInfo.rhs = Visit(e->getRHS());
+ }
+ }
+
+ LValue lhs = cgf.emitLValue(e->getLHS());
+
+ // Load from the l-value and convert it.
+ QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
+ if (lhsTy->isAnyComplexType()) {
+ mlir::Value lhsValue = emitLoadOfLValue(lhs, exprLoc);
+ QualType destTy = promotionTypeLHS.isNull() ? opInfo.ty : promotionTypeLHS;
+ opInfo.lhs = emitComplexToComplexCast(lhsValue, lhsTy, destTy, exprLoc);
+ } else {
+ mlir::Value lhsVal = cgf.emitLoadOfScalar(lhs, exprLoc);
+ // For floating point real operands we can directly pass the scalar form
+ // to the binary operator emission and potentially get more efficient code.
+ if (lhsTy->isRealFloatingType()) {
+ QualType promotedComplexElementTy;
+ if (!promotionTypeLHS.isNull()) {
+ promotedComplexElementTy =
+ cast<ComplexType>(promotionTypeLHS)->getElementType();
+ if (!cgf.getContext().hasSameUnqualifiedType(promotedComplexElementTy,
+ promotionTypeLHS))
+ lhsVal = cgf.emitScalarConversion(lhsVal, lhsTy,
+ promotedComplexElementTy, exprLoc);
+ } else {
+ if (!cgf.getContext().hasSameUnqualifiedType(complexElementTy, lhsTy))
+ lhsVal = cgf.emitScalarConversion(lhsVal, lhsTy, complexElementTy,
+ exprLoc);
+ }
+ opInfo.lhs = lhsVal;
+ } else {
+ opInfo.lhs = emitScalarToComplexCast(lhsVal, lhsTy, opInfo.ty, exprLoc);
+ }
+ }
+
+ // Expand the binary operator.
+ mlir::Value result = (this->*func)(opInfo);
+
+ // Truncate the result and store it into the LHS lvalue.
+ if (lhsTy->isAnyComplexType()) {
+ mlir::Value resultValue =
+ emitComplexToComplexCast(result, opInfo.ty, lhsTy, exprLoc);
+ emitStoreOfComplex(loc, resultValue, lhs, /*isInit*/ false);
+ value = RValue::getComplex(resultValue);
+ } else {
+ mlir::Value resultValue =
+ cgf.emitComplexToScalarConversion(result, opInfo.ty, lhsTy, exprLoc);
+ cgf.emitStoreOfScalar(resultValue, lhs, /*isInit*/ false);
+ value = RValue::get(resultValue);
+ }
+
+ return lhs;
+}
+
+mlir::Value ComplexExprEmitter::emitCompoundAssign(
+ const CompoundAssignOperator *e,
+ mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &)) {
+ RValue val;
+ LValue lv = emitCompoundAssignLValue(e, func, val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!cgf.getLangOpts().CPlusPlus)
+ return val.getComplexValue();
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!lv.isVolatileQualified())
+ return val.getComplexValue();
+
+ return emitLoadOfLValue(lv, e->getExprLoc());
+}
+
mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &op) {
assert(!cir::MissingFeatures::fastMathFlags());
assert(!cir::MissingFeatures::cgFPOptionsRAII());
- return builder.create<cir::ComplexAddOp>(op.loc, op.lhs, op.rhs);
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
+ mlir::isa<cir::ComplexType>(op.rhs.getType()))
+ return builder.create<cir::ComplexAddOp>(op.loc, op.lhs, op.rhs);
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
+ mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
+ mlir::Value newReal = builder.createAdd(op.loc, real, op.rhs);
+ return builder.createComplexCreate(op.loc, newReal, imag);
+ }
+
+ assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
+ mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
+ mlir::Value newReal = builder.createAdd(op.loc, op.lhs, real);
+ return builder.createComplexCreate(op.loc, newReal, imag);
}
mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &op) {
assert(!cir::MissingFeatures::fastMathFlags());
assert(!cir::MissingFeatures::cgFPOptionsRAII());
- return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
+ mlir::isa<cir::ComplexType>(op.rhs.getType()))
+ return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
+ mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
+ mlir::Value newReal = builder.createSub(op.loc, real, op.rhs);
+ return builder.createComplexCreate(op.loc, newReal, imag);
+ }
+
+ assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
+ mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
+ mlir::Value newReal = builder.createSub(op.loc, op.lhs, real);
+ return builder.createComplexCreate(op.loc, newReal, imag);
}
static cir::ComplexRangeKind
@@ -654,7 +835,7 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
case LangOptions::CX_Basic:
return cir::ComplexRangeKind::Basic;
case LangOptions::CX_None:
- // The default value for ComplexRangeKind is Full is no option is selected
+ // The default value for ComplexRangeKind is Full if no option is selected
return cir::ComplexRangeKind::Full;
}
}
@@ -662,9 +843,64 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &op) {
assert(!cir::MissingFeatures::fastMathFlags());
assert(!cir::MissingFeatures::cgFPOptionsRAII());
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
+ mlir::isa<cir::ComplexType>(op.rhs.getType())) {
+ cir::ComplexRangeKind rangeKind =
+ getComplexRangeAttr(op.fpFeatures.getComplexRange());
+ return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
+ }
+
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
+ mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
+ mlir::Value newReal = builder.createMul(op.loc, real, op.rhs);
+ mlir::Value newImag = builder.createMul(op.loc, imag, op.rhs);
+ return builder.createComplexCreate(op.loc, newReal, newImag);
+ }
+
+ assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
+ mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
+ mlir::Value newReal = builder.createMul(op.loc, op.lhs, real);
+ mlir::Value newImag = builder.createMul(op.loc, op.lhs, imag);
+ return builder.createComplexCreate(op.loc, newReal, newImag);
+}
+
+mlir::Value ComplexExprEmitter::emitBinDiv(const BinOpInfo &op) {
+ assert(!cir::MissingFeatures::fastMathFlags());
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+
+ // Handle division between two complex values. In the case of complex integer
+ // types mixed with scalar integers, the scalar integer type will always be
+ // promoted to a complex integer value with a zero imaginary component when
+ // the AST is formed.
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
+ mlir::isa<cir::ComplexType>(op.rhs.getType())) {
+ cir::ComplexRangeKind rangeKind =
+ getComplexRangeAttr(op.fpFeatures.getComplexRange());
+ return cir::ComplexDivOp::create(builder, op.loc, op.lhs, op.rhs,
+ rangeKind);
+ }
+
+ // The C99 standard (G.5.1) defines division of a complex value by a real
+ // value in the following simplified form.
+ if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
+ assert(mlir::cast<cir::ComplexType>(op.lhs.getType()).getElementType() ==
+ op.rhs.getType());
+ mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
+ mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
+ mlir::Value newReal = builder.createFDiv(op.loc, real, op.rhs);
+ mlir::Value newImag = builder.createFDiv(op.loc, imag, op.rhs);
+ return builder.createComplexCreate(op.loc, newReal, newImag);
+ }
+
+ assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
+ cir::ConstantOp nullValue = builder.getNullValue(op.lhs.getType(), op.loc);
+ mlir::Value lhs = builder.createComplexCreate(op.loc, op.lhs, nullValue);
cir::ComplexRangeKind rangeKind =
getComplexRangeAttr(op.fpFeatures.getComplexRange());
- return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
+ return cir::ComplexDivOp::create(builder, op.loc, lhs, op.rhs, rangeKind);
}
LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) {
@@ -685,6 +921,31 @@ mlir::Value CIRGenFunction::emitComplexExpr(const Expr *e) {
return ComplexExprEmitter(*this).Visit(const_cast<Expr *>(e));
}
+using CompoundFunc =
+ mlir::Value (ComplexExprEmitter::*)(const ComplexExprEmitter::BinOpInfo &);
+
+static CompoundFunc getComplexOp(BinaryOperatorKind op) {
+ switch (op) {
+ case BO_MulAssign:
+ return &ComplexExprEmitter::emitBinMul;
+ case BO_DivAssign:
+ return &ComplexExprEmitter::emitBinDiv;
+ case BO_SubAssign:
+ return &ComplexExprEmitter::emitBinSub;
+ case BO_AddAssign:
+ return &ComplexExprEmitter::emitBinAdd;
+ default:
+ llvm_unreachable("unexpected complex compound assignment");
+ }
+}
+
+LValue CIRGenFunction::emitComplexCompoundAssignmentLValue(
+ const CompoundAssignOperator *e) {
+ CompoundFunc op = getComplexOp(e->getOpcode());
+ RValue val;
+ return ComplexExprEmitter(*this).emitCompoundAssignLValue(e, op, val);
+}
+
mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *e,
LValue lv,
cir::UnaryOpKind op,
@@ -729,3 +990,28 @@ mlir::Value CIRGenFunction::emitPromotedComplexExpr(const Expr *e,
QualType promotionType) {
return ComplexExprEmitter(*this).emitPromoted(e, promotionType);
}
+
+mlir::Value CIRGenFunction::emitPromotedValue(mlir::Value result,
+ QualType promotionType) {
+ assert(!mlir::cast<cir::ComplexType>(result.getType()).isIntegerComplex() &&
+ "integral complex will never be promoted");
+ return builder.createCast(cir::CastKind::float_complex, result,
+ convertType(promotionType));
+}
+
+mlir::Value CIRGenFunction::emitUnPromotedValue(mlir::Value result,
+ QualType unPromotionType) {
+ assert(!mlir::cast<cir::ComplexType>(result.getType()).isIntegerComplex() &&
+ "integral complex will never be promoted");
+ return builder.createCast(cir::CastKind::float_complex, result,
+ convertType(unPromotionType));
+}
+
+LValue CIRGenFunction::emitScalarCompoundAssignWithComplex(
+ const CompoundAssignOperator *e, mlir::Value &result) {
+ CompoundFunc op = getComplexOp(e->getOpcode());
+ RValue value;
+ LValue ret = ComplexExprEmitter(*this).emitCompoundAssignLValue(e, op, value);
+ result = value.getValue();
+ return ret;
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 5b3bf85..262d2548 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -285,7 +285,7 @@ emitArrayConstant(CIRGenModule &cgm, mlir::Type desiredType,
mlir::Type commonElementType, unsigned arrayBound,
SmallVectorImpl<mlir::TypedAttr> &elements,
mlir::TypedAttr filler) {
- const CIRGenBuilderTy &builder = cgm.getBuilder();
+ CIRGenBuilderTy &builder = cgm.getBuilder();
unsigned nonzeroLength = arrayBound;
if (elements.size() < nonzeroLength && builder.isNullValue(filler))
@@ -306,6 +306,33 @@ emitArrayConstant(CIRGenModule &cgm, mlir::Type desiredType,
if (trailingZeroes >= 8) {
assert(elements.size() >= nonzeroLength &&
"missing initializer for non-zero element");
+
+ if (commonElementType && nonzeroLength >= 8) {
+ // If all the elements had the same type up to the trailing zeroes and
+ // there are eight or more nonzero elements, emit a struct of two arrays
+ // (the nonzero data and the zeroinitializer).
+ SmallVector<mlir::Attribute, 4> eles;
+ eles.reserve(nonzeroLength);
+ for (const auto &element : elements)
+ eles.push_back(element);
+ auto initial = cir::ConstArrayAttr::get(
+ cir::ArrayType::get(commonElementType, nonzeroLength),
+ mlir::ArrayAttr::get(builder.getContext(), eles));
+ elements.resize(2);
+ elements[0] = initial;
+ } else {
+ // Otherwise, emit a struct with individual elements for each nonzero
+ // initializer, followed by a zeroinitializer array filler.
+ elements.resize(nonzeroLength + 1);
+ }
+
+ mlir::Type fillerType =
+ commonElementType
+ ? commonElementType
+ : mlir::cast<cir::ArrayType>(desiredType).getElementType();
+ fillerType = cir::ArrayType::get(fillerType, trailingZeroes);
+ elements.back() = cir::ZeroAttr::get(fillerType);
+ commonElementType = nullptr;
} else if (elements.size() != arrayBound) {
elements.resize(arrayBound, filler);
@@ -325,8 +352,13 @@ emitArrayConstant(CIRGenModule &cgm, mlir::Type desiredType,
mlir::ArrayAttr::get(builder.getContext(), eles));
}
- cgm.errorNYI("array with different type elements");
- return {};
+ SmallVector<mlir::Attribute, 4> eles;
+ eles.reserve(elements.size());
+ for (auto const &element : elements)
+ eles.push_back(element);
+
+ auto arrAttr = mlir::ArrayAttr::get(builder.getContext(), eles);
+ return builder.getAnonConstRecord(arrAttr, /*isPacked=*/true);
}
//===----------------------------------------------------------------------===//
@@ -340,7 +372,11 @@ struct ConstantLValue {
llvm::PointerUnion<mlir::Value, mlir::Attribute> value;
bool hasOffsetApplied;
- ConstantLValue(std::nullptr_t) : value(nullptr), hasOffsetApplied(false) {}
+ /*implicit*/ ConstantLValue(std::nullptr_t)
+ : value(nullptr), hasOffsetApplied(false) {}
+ /*implicit*/ ConstantLValue(cir::GlobalViewAttr address)
+ : value(address), hasOffsetApplied(false) {}
+
ConstantLValue() : value(nullptr), hasOffsetApplied(false) {}
};
@@ -380,6 +416,43 @@ private:
ConstantLValue VisitCXXTypeidExpr(const CXXTypeidExpr *e);
ConstantLValue
VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e);
+
+ /// Return GEP-like value offset
+ mlir::ArrayAttr getOffset(mlir::Type ty) {
+ int64_t offset = value.getLValueOffset().getQuantity();
+ cir::CIRDataLayout layout(cgm.getModule());
+ SmallVector<int64_t, 3> idxVec;
+ cgm.getBuilder().computeGlobalViewIndicesFromFlatOffset(offset, ty, layout,
+ idxVec);
+
+ llvm::SmallVector<mlir::Attribute, 3> indices;
+ for (int64_t i : idxVec) {
+ mlir::IntegerAttr intAttr = cgm.getBuilder().getI32IntegerAttr(i);
+ indices.push_back(intAttr);
+ }
+
+ if (indices.empty())
+ return {};
+ return cgm.getBuilder().getArrayAttr(indices);
+ }
+
+ /// Apply the value offset to the given constant.
+ ConstantLValue applyOffset(ConstantLValue &c) {
+ // Handle attribute constant LValues.
+ if (auto attr = mlir::dyn_cast<mlir::Attribute>(c.value)) {
+ if (auto gv = mlir::dyn_cast<cir::GlobalViewAttr>(attr)) {
+ auto baseTy = mlir::cast<cir::PointerType>(gv.getType()).getPointee();
+ mlir::Type destTy = cgm.getTypes().convertTypeForMem(destType);
+ assert(!gv.getIndices() && "Global view is already indexed");
+ return cir::GlobalViewAttr::get(destTy, gv.getSymbol(),
+ getOffset(baseTy));
+ }
+ llvm_unreachable("Unsupported attribute type to offset");
+ }
+
+ cgm.errorNYI("ConstantLValue: non-attribute offset");
+ return {};
+ }
};
} // namespace
@@ -411,10 +484,8 @@ mlir::Attribute ConstantLValueEmitter::tryEmit() {
return {};
// Apply the offset if necessary and not already done.
- if (!result.hasOffsetApplied) {
- cgm.errorNYI("ConstantLValueEmitter: apply offset");
- return {};
- }
+ if (!result.hasOffsetApplied)
+ value = applyOffset(result).value;
// Convert to the appropriate type; this could be an lvalue for
// an integer. FIXME: performAddrSpaceCast
@@ -453,15 +524,35 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
}
if (auto *fd = dyn_cast<FunctionDecl>(d)) {
- cgm.errorNYI(fd->getSourceRange(),
- "ConstantLValueEmitter: function decl");
- return {};
+ cir::FuncOp fop = cgm.getAddrOfFunction(fd);
+ CIRGenBuilderTy &builder = cgm.getBuilder();
+ mlir::MLIRContext *mlirContext = builder.getContext();
+ return cir::GlobalViewAttr::get(
+ builder.getPointerTo(fop.getFunctionType()),
+ mlir::FlatSymbolRefAttr::get(mlirContext, fop.getSymNameAttr()));
}
if (auto *vd = dyn_cast<VarDecl>(d)) {
- cgm.errorNYI(vd->getSourceRange(), "ConstantLValueEmitter: var decl");
- return {};
+ // We can never refer to a variable with local storage.
+ if (!vd->hasLocalStorage()) {
+ if (vd->isFileVarDecl() || vd->hasExternalStorage())
+ return cgm.getAddrOfGlobalVarAttr(vd);
+
+ if (vd->isLocalVarDecl()) {
+ cgm.errorNYI(vd->getSourceRange(),
+ "ConstantLValueEmitter: local var decl");
+ return {};
+ }
+ }
}
+
+ // Classic codegen handles MSGuidDecl,UnnamedGlobalConstantDecl, and
+ // TemplateParamObjectDecl, but it can also fall through from VarDecl,
+ // in which case it silently returns nullptr. For now, let's emit an
+ // error to see what cases we need to handle.
+ cgm.errorNYI(d->getSourceRange(),
+ "ConstantLValueEmitter: unhandled value decl");
+ return {};
}
// Handle typeid(T).
@@ -487,8 +578,7 @@ ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *e) {
ConstantLValue
ConstantLValueEmitter::VisitStringLiteral(const StringLiteral *e) {
- cgm.errorNYI(e->getSourceRange(), "ConstantLValueEmitter: string literal");
- return {};
+ return cgm.getAddrOfConstantStringFromLiteral(e);
}
ConstantLValue
@@ -590,8 +680,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &d) {
// assignments and whatnots). Since this is for globals shouldn't
// be a problem for the near future.
if (cd->isTrivial() && cd->isDefaultConstructor()) {
- const auto *cxxrd =
- cast<CXXRecordDecl>(ty->getAs<RecordType>()->getDecl());
+ const auto *cxxrd = ty->castAsCXXRecordDecl();
if (cxxrd->getNumBases() != 0) {
// There may not be anything additional to do here, but this will
// force us to pause and test this path when it is supported.
@@ -650,6 +739,16 @@ mlir::Attribute ConstantEmitter::tryEmitPrivateForMemory(const APValue &value,
return (c ? emitForMemory(c, destType) : nullptr);
}
+mlir::Attribute ConstantEmitter::emitAbstract(const Expr *e,
+ QualType destType) {
+ AbstractStateRAII state{*this, true};
+ mlir::Attribute c = mlir::cast<mlir::Attribute>(tryEmitPrivate(e, destType));
+ if (!c)
+ cgm.errorNYI(e->getSourceRange(),
+ "emitAbstract failed, emit null constaant");
+ return c;
+}
+
mlir::Attribute ConstantEmitter::emitAbstract(SourceLocation loc,
const APValue &value,
QualType destType) {
@@ -671,6 +770,32 @@ mlir::Attribute ConstantEmitter::emitForMemory(mlir::Attribute c,
return c;
}
+mlir::TypedAttr ConstantEmitter::tryEmitPrivate(const Expr *e,
+ QualType destType) {
+ assert(!destType->isVoidType() && "can't emit a void constant");
+
+ if (mlir::Attribute c =
+ ConstExprEmitter(*this).Visit(const_cast<Expr *>(e), destType))
+ return llvm::dyn_cast<mlir::TypedAttr>(c);
+
+ Expr::EvalResult result;
+
+ bool success = false;
+
+ if (destType->isReferenceType())
+ success = e->EvaluateAsLValue(result, cgm.getASTContext());
+ else
+ success =
+ e->EvaluateAsRValue(result, cgm.getASTContext(), inConstantContext);
+
+ if (success && !result.hasSideEffects()) {
+ mlir::Attribute c = tryEmitPrivate(result.Val, destType);
+ return llvm::dyn_cast<mlir::TypedAttr>(c);
+ }
+
+ return nullptr;
+}
+
mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &value,
QualType destType) {
auto &builder = cgm.getBuilder();
@@ -822,7 +947,7 @@ mlir::Value CIRGenModule::emitNullConstant(QualType t, mlir::Location loc) {
errorNYI("CIRGenModule::emitNullConstant ConstantArrayType");
}
- if (t->getAs<RecordType>())
+ if (t->isRecordType())
errorNYI("CIRGenModule::emitNullConstant RecordType");
assert(t->isMemberDataPointerType() &&
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 32c1c1a..0e000cc 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -92,6 +92,10 @@ public:
mlir::Value value, CastKind kind,
QualType destTy);
+ mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
+ return cgf.cgm.emitNullConstant(ty, loc);
+ }
+
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
return builder.createFloatingCast(result, cgf.convertType(promotionType));
}
@@ -182,9 +186,31 @@ public:
return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
}
+ mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
+ if (e->getType()->isVoidType())
+ return {};
+
+ return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
+ }
+
mlir::Value VisitCastExpr(CastExpr *e);
mlir::Value VisitCallExpr(const CallExpr *e);
+ mlir::Value VisitStmtExpr(StmtExpr *e) {
+ CIRGenFunction::StmtExprEvaluation eval(cgf);
+ if (e->getType()->isVoidType()) {
+ (void)cgf.emitCompoundStmt(*e->getSubStmt());
+ return {};
+ }
+
+ Address retAlloca =
+ cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
+ (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
+
+ return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
+ e->getExprLoc());
+ }
+
mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
if (e->getBase()->getType()->isVectorType()) {
assert(!cir::MissingFeatures::scalableVectors());
@@ -384,6 +410,17 @@ public:
return Visit(e->getReplacement());
}
+ mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
+ QualType ty = ve->getType();
+
+ if (ty->isVariablyModifiedType()) {
+ cgf.cgm.errorNYI(ve->getSourceRange(),
+ "variably modified types in varargs");
+ }
+
+ return cgf.emitVAArg(ve);
+ }
+
mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
mlir::Value
VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
@@ -631,6 +668,11 @@ public:
return cgf.emitCXXNewExpr(e);
}
+ mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
+ cgf.emitCXXThrowExpr(e);
+ return {};
+ }
+
/// Emit a conversion from the specified type to the specified destination
/// type, both of which are CIR scalar types.
/// TODO: do we need ScalarConversionOpts here? Should be done in another
@@ -1060,20 +1102,22 @@ public:
return maybePromoteBoolResult(resOp.getResult(), resTy);
}
+
+ mlir::Value VisitAtomicExpr(AtomicExpr *e) {
+ return cgf.emitAtomicExpr(e).getValue();
+ }
};
LValue ScalarExprEmitter::emitCompoundAssignLValue(
const CompoundAssignOperator *e,
mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
mlir::Value &result) {
+ if (e->getComputationResultType()->isAnyComplexType())
+ return cgf.emitScalarCompoundAssignWithComplex(e, result);
+
QualType lhsTy = e->getLHS()->getType();
BinOpInfo opInfo;
- if (e->getComputationResultType()->isAnyComplexType()) {
- cgf.cgm.errorNYI(result.getLoc(), "complex lvalue assign");
- return LValue();
- }
-
// Emit the RHS first. __block variables need to have the rhs evaluated
// first, plus this should improve codegen a little.
@@ -1877,6 +1921,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
cgf.getLoc(subExpr->getSourceRange()), cgf.convertType(destTy),
Visit(subExpr));
}
+ case CK_FunctionToPointerDecay:
+ return cgf.emitLValue(subExpr).getPointer();
default:
cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
@@ -1936,11 +1982,9 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
cgf.getLoc(e->getSourceRange()), vectorType, elements);
}
- if (numInitElements == 0) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "InitListExpr Non VectorType with 0 init elements");
- return {};
- }
+ // C++11 value-initialization for the scalar.
+ if (numInitElements == 0)
+ return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
return Visit(e->getInit(0));
}
@@ -1955,6 +1999,29 @@ mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
.emitScalarConversion(src, srcTy, dstTy, loc);
}
+mlir::Value CIRGenFunction::emitComplexToScalarConversion(mlir::Value src,
+ QualType srcTy,
+ QualType dstTy,
+ SourceLocation loc) {
+ assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
+ "Invalid complex -> scalar conversion");
+
+ QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
+ if (dstTy->isBooleanType()) {
+ auto kind = complexElemTy->isFloatingType()
+ ? cir::CastKind::float_complex_to_bool
+ : cir::CastKind::int_complex_to_bool;
+ return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
+ }
+
+ auto kind = complexElemTy->isFloatingType()
+ ? cir::CastKind::float_complex_to_real
+ : cir::CastKind::int_complex_to_real;
+ mlir::Value real =
+ builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
+ return emitScalarConversion(real, complexElemTy, dstTy, loc);
+}
+
mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
// Perform vector logical not on comparison with zero vector.
if (e->getType()->isVectorType() &&
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index eb05c93..deabb94 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -28,8 +28,6 @@ CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder,
bool suppressNewContext)
: CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
ehStack.setCGF(this);
- currentCleanupStackDepth = 0;
- assert(ehStack.getStackDepth() == 0);
}
CIRGenFunction::~CIRGenFunction() {}
@@ -217,7 +215,7 @@ void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
mlir::Location loc, CharUnits alignment,
bool isParam) {
assert(isa<NamedDecl>(var) && "Needs a named decl");
- assert(!cir::MissingFeatures::cgfSymbolTable());
+ assert(!symbolTable.count(var) && "not supposed to be available just yet");
auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
assert(allocaOp && "expected cir::AllocaOp");
@@ -226,6 +224,8 @@ void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
if (ty->isReferenceType() || ty.isConstQualified())
allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
+
+ symbolTable.insert(var, allocaOp);
}
void CIRGenFunction::LexicalScope::cleanup() {
@@ -356,11 +356,8 @@ static bool mayDropFunctionReturn(const ASTContext &astContext,
QualType returnType) {
// We can't just discard the return value for a record type with a complex
// destructor or a non-trivially copyable type.
- if (const RecordType *recordType =
- returnType.getCanonicalType()->getAs<RecordType>()) {
- if (const auto *classDecl = dyn_cast<CXXRecordDecl>(recordType->getDecl()))
- return classDecl->hasTrivialDestructor();
- }
+ if (const auto *classDecl = returnType->getAsCXXRecordDecl())
+ return classDecl->hasTrivialDestructor();
return returnType.isTriviallyCopyableType(astContext);
}
@@ -409,6 +406,8 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
curFuncDecl = d->getNonClosureContext();
+ prologueCleanupDepth = ehStack.stable_begin();
+
mlir::Block *entryBB = &fn.getBlocks().front();
builder.setInsertionPointToStart(entryBB);
@@ -475,22 +474,22 @@ void CIRGenFunction::finishFunction(SourceLocation endLoc) {
// important to do this before we enter the return block or return
// edges will be *really* confused.
// TODO(cir): Use prologueCleanupDepth here.
- bool hasCleanups = ehStack.getStackDepth() != currentCleanupStackDepth;
+ bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
if (hasCleanups) {
assert(!cir::MissingFeatures::generateDebugInfo());
// FIXME(cir): should we clearInsertionPoint? breaks many testcases
- popCleanupBlocks(currentCleanupStackDepth);
+ popCleanupBlocks(prologueCleanupDepth);
}
}
mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
- auto result = mlir::LogicalResult::success();
+ // We start with function level scope for variables.
+ SymTableScopeTy varScope(symbolTable);
+
if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
- emitCompoundStmtWithoutScope(*block);
- else
- result = emitStmt(body, /*useCurrentScope=*/true);
+ return emitCompoundStmtWithoutScope(*block);
- return result;
+ return emitStmt(body, /*useCurrentScope=*/true);
}
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
@@ -530,6 +529,8 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
FunctionArgList args;
QualType retTy = buildFunctionArgList(gd, args);
+ // Create a scope in the symbol table to hold variable declarations.
+ SymTableScopeTy varScope(symbolTable);
{
LexicalScope lexScope(*this, fusedLoc, entryBB);
@@ -553,7 +554,6 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
emitImplicitAssignmentOperatorBody(args);
} else if (body) {
if (mlir::failed(emitFunctionBody(body))) {
- fn.erase();
return nullptr;
}
} else {
@@ -785,9 +785,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
}
if (!ty->isAnyComplexType())
return emitCompoundAssignmentLValue(cast<CompoundAssignOperator>(e));
- cgm.errorNYI(e->getSourceRange(),
- "CompoundAssignOperator with ComplexType");
- return LValue();
+
+ return emitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(e));
}
case Expr::CallExprClass:
case Expr::CXXMemberCallExprClass:
@@ -826,12 +825,9 @@ std::string CIRGenFunction::getCounterAggTmpAsString() {
void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
QualType ty) {
// Ignore empty classes in C++.
- if (getLangOpts().CPlusPlus) {
- if (const RecordType *rt = ty->getAs<RecordType>()) {
- if (cast<CXXRecordDecl>(rt->getDecl())->isEmpty())
- return;
- }
- }
+ if (getLangOpts().CPlusPlus)
+ if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
+ return;
// Cast the dest ptr to the appropriate i8 pointer type.
if (builder.isInt8Ty(destPtr.getElementType())) {
@@ -931,6 +927,23 @@ CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType,
return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
}
+mlir::Value CIRGenFunction::emitAlignmentAssumption(
+ mlir::Value ptrValue, QualType ty, SourceLocation loc,
+ SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
+ assert(!cir::MissingFeatures::sanitizers());
+ return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
+ alignment, offsetValue);
+}
+
+mlir::Value CIRGenFunction::emitAlignmentAssumption(
+ mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
+ int64_t alignment, mlir::Value offsetValue) {
+ QualType ty = expr->getType();
+ SourceLocation loc = expr->getExprLoc();
+ return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
+ offsetValue);
+}
+
// TODO(cir): Most of this function can be shared between CIRGen
// and traditional LLVM codegen
void CIRGenFunction::emitVariablyModifiedType(QualType type) {
@@ -979,10 +992,6 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) {
case Type::BitInt:
llvm_unreachable("type class is never variably-modified!");
- case Type::Elaborated:
- type = cast<clang::ElaboratedType>(ty)->getNamedType();
- break;
-
case Type::Adjusted:
type = cast<clang::AdjustedType>(ty)->getAdjustedType();
break;
@@ -1058,4 +1067,10 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) {
} while (type->isVariablyModifiedType());
}
+Address CIRGenFunction::emitVAListRef(const Expr *e) {
+ if (getContext().getBuiltinVaListType()->isArrayType())
+ return emitPointerWithAlignment(e);
+ return emitLValue(e).getAddress();
+}
+
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 3d92545..39bacfb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -23,6 +23,7 @@
#include "Address.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/BaseSubobject.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Stmt.h"
@@ -30,6 +31,7 @@
#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "clang/CIR/MissingFeatures.h"
#include "clang/CIR/TypeEvaluationKind.h"
+#include "llvm/ADT/ScopedHashTable.h"
namespace {
class ScalarExprEmitter;
@@ -102,6 +104,14 @@ public:
/// Sanitizers enabled for this function.
clang::SanitizerSet sanOpts;
+ /// The symbol table maps a variable name to a value in the current scope.
+ /// Entering a function creates a new scope, and the function arguments are
+ /// added to the mapping. When the processing of a function is terminated,
+ /// the scope is destroyed and the mappings created in this scope are
+ /// dropped.
+ using SymTableTy = llvm::ScopedHashTable<const clang::Decl *, mlir::Value>;
+ SymTableTy symbolTable;
+
/// Whether or not a Microsoft-style asm block has been processed within
/// this fuction. These can potentially set the return value.
bool sawAsmBlock = false;
@@ -324,6 +334,9 @@ public:
~SourceLocRAIIObject() { restore(); }
};
+ using SymTableScopeTy =
+ llvm::ScopedHashTableScope<const clang::Decl *, mlir::Value>;
+
/// Hold counters for incrementally naming temporaries
unsigned counterRefTmp = 0;
unsigned counterAggTmp = 0;
@@ -337,6 +350,15 @@ public:
const clang::LangOptions &getLangOpts() const { return cgm.getLangOpts(); }
+ /// True if an insertion point is defined. If not, this indicates that the
+ /// current code being emitted is unreachable.
+ /// FIXME(cir): we need to inspect this and perhaps use a cleaner mechanism
+ /// since we don't yet force null insertion point to designate behavior (like
+ /// LLVM's codegen does) and we probably shouldn't.
+ bool haveInsertPoint() const {
+ return builder.getInsertionBlock() != nullptr;
+ }
+
// Wrapper for function prototype sources. Wraps either a FunctionProtoType or
// an ObjCMethodDecl.
struct PrototypeWrapper {
@@ -430,7 +452,8 @@ public:
}
};
- ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
+ ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr);
+ ConstantEmission tryEmitAsConstant(const MemberExpr *me);
struct AutoVarEmission {
const clang::VarDecl *Variable;
@@ -448,6 +471,10 @@ public:
/// escaping block.
bool IsEscapingByRef = false;
+ /// True if the variable was emitted as an offload recipe, and thus doesn't
+ /// have the same sort of alloca initialization.
+ bool EmittedAsOffload = false;
+
mlir::Value NRVOFlag{};
struct Invalid {};
@@ -460,11 +487,18 @@ public:
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
+ bool wasEmittedAsOffloadClause() const { return EmittedAsOffload; }
+
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself. It is casted to default
/// address space for address space agnostic languages.
Address getAllocatedAddress() const { return Addr; }
+ // Changes the stored address for the emission. This function should only
+ // be used in extreme cases, and isn't required to model normal AST
+ // initialization/variables.
+ void setAllocatedAddress(Address A) { Addr = A; }
+
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
@@ -489,9 +523,42 @@ public:
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
localDeclMap.insert({vd, addr});
- // TODO: Add symbol table support
+
+ // Add to the symbol table if not there already.
+ if (symbolTable.count(vd))
+ return;
+ symbolTable.insert(vd, addr.getPointer());
}
+ // A class to allow reverting changes to a var-decl's registration to the
+ // localDeclMap. This is used in cases where things are being inserted into
+ // the variable list but don't follow normal lookup/search rules, like in
+ // OpenACC recipe generation.
+ class DeclMapRevertingRAII {
+ CIRGenFunction &cgf;
+ const VarDecl *vd;
+ bool shouldDelete = false;
+ Address oldAddr = Address::invalid();
+
+ public:
+ DeclMapRevertingRAII(CIRGenFunction &cgf, const VarDecl *vd)
+ : cgf(cgf), vd(vd) {
+ auto mapItr = cgf.localDeclMap.find(vd);
+
+ if (mapItr != cgf.localDeclMap.end())
+ oldAddr = mapItr->second;
+ else
+ shouldDelete = true;
+ }
+
+ ~DeclMapRevertingRAII() {
+ if (shouldDelete)
+ cgf.localDeclMap.erase(vd);
+ else
+ cgf.localDeclMap.insert_or_assign(vd, oldAddr);
+ }
+ };
+
bool shouldNullCheckClassCastValue(const CastExpr *ce);
RValue convertTempToRValue(Address addr, clang::QualType type,
@@ -500,6 +567,33 @@ public:
static bool
isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor);
+ struct VPtr {
+ clang::BaseSubobject base;
+ const clang::CXXRecordDecl *nearestVBase;
+ clang::CharUnits offsetFromNearestVBase;
+ const clang::CXXRecordDecl *vtableClass;
+ };
+
+ using VisitedVirtualBasesSetTy =
+ llvm::SmallPtrSet<const clang::CXXRecordDecl *, 4>;
+
+ using VPtrsVector = llvm::SmallVector<VPtr, 4>;
+ VPtrsVector getVTablePointers(const clang::CXXRecordDecl *vtableClass);
+ void getVTablePointers(clang::BaseSubobject base,
+ const clang::CXXRecordDecl *nearestVBase,
+ clang::CharUnits offsetFromNearestVBase,
+ bool baseIsNonVirtualPrimaryBase,
+ const clang::CXXRecordDecl *vtableClass,
+ VisitedVirtualBasesSetTy &vbases, VPtrsVector &vptrs);
+ /// Return the Value of the vtable pointer member pointed to by thisAddr.
+ mlir::Value getVTablePtr(mlir::Location loc, Address thisAddr,
+ const clang::CXXRecordDecl *vtableClass);
+
+ /// Returns whether we should perform a type checked load when loading a
+ /// virtual function for virtual calls to members of RD. This is generally
+ /// true when both vcall CFI and whole-program-vtables are enabled.
+ bool shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *rd);
+
/// A scope within which we are constructing the fields of an object which
/// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use if
/// we need to evaluate the CXXDefaultInitExpr within the evaluation.
@@ -548,6 +642,10 @@ public:
return LValue::makeAddr(addr, ty, baseInfo);
}
+ void initializeVTablePointers(mlir::Location loc,
+ const clang::CXXRecordDecl *rd);
+ void initializeVTablePointer(mlir::Location loc, const VPtr &vptr);
+
/// Return the address of a local variable.
Address getAddrOfLocalVar(const clang::VarDecl *vd) {
auto it = localDeclMap.find(vd);
@@ -601,9 +699,13 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// The cleanup depth enclosing all the cleanups associated with the
+ /// parameters.
+ EHScopeStack::stable_iterator prologueCleanupDepth;
+
/// Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added.
- void popCleanupBlocks(size_t oldCleanupStackDepth);
+ void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth);
void popCleanupBlock();
/// Push a cleanup to be run at the end of the current full-expression. Safe
@@ -622,7 +724,7 @@ public:
/// Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
- size_t cleanupStackDepth, oldCleanupStackDepth;
+ EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
protected:
bool performCleanup;
@@ -638,7 +740,7 @@ public:
/// Enter a new cleanup scope.
explicit RunCleanupsScope(CIRGenFunction &cgf)
: performCleanup(true), cgf(cgf) {
- cleanupStackDepth = cgf.ehStack.getStackDepth();
+ cleanupStackDepth = cgf.ehStack.stable_begin();
oldCleanupStackDepth = cgf.currentCleanupStackDepth;
cgf.currentCleanupStackDepth = cleanupStackDepth;
}
@@ -663,7 +765,7 @@ public:
};
// Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
- size_t currentCleanupStackDepth;
+ EHScopeStack::stable_iterator currentCleanupStackDepth = ehStack.stable_end();
public:
/// Represents a scope, including function bodies, compound statements, and
@@ -825,6 +927,18 @@ public:
/// ----------------------
/// CIR emit functions
/// ----------------------
+public:
+ mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty,
+ SourceLocation loc,
+ SourceLocation assumptionLoc,
+ int64_t alignment,
+ mlir::Value offsetValue = nullptr);
+
+ mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr,
+ SourceLocation assumptionLoc,
+ int64_t alignment,
+ mlir::Value offsetValue = nullptr);
+
private:
void emitAndUpdateRetAlloca(clang::QualType type, mlir::Location loc,
clang::CharUnits alignment);
@@ -875,7 +989,13 @@ public:
QualType &baseType, Address &addr);
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
- Address emitArrayToPointerDecay(const Expr *array);
+ Address emitArrayToPointerDecay(const Expr *e,
+ LValueBaseInfo *baseInfo = nullptr);
+
+ mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s);
+
+ RValue emitAtomicExpr(AtomicExpr *e);
+ void emitAtomicInit(Expr *init, LValue dest);
AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
mlir::OpBuilder::InsertPoint ip = {});
@@ -886,6 +1006,11 @@ public:
void emitAutoVarDecl(const clang::VarDecl &d);
void emitAutoVarCleanups(const AutoVarEmission &emission);
+ /// Emit the initializer for an allocated variable. If this call is not
+ /// associated with the call to emitAutoVarAlloca (as the address of the
+ /// emission is not directly an alloca), the allocatedSeparately parameter can
+ /// be used to suppress the assertions. However, this should only be used in
+ /// extreme cases, as it doesn't properly reflect the language/AST.
void emitAutoVarInit(const AutoVarEmission &emission);
void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
clang::QualType::DestructionKind dtorKind);
@@ -944,6 +1069,11 @@ public:
/// sanitizer is enabled, a runtime check is also emitted.
mlir::Value emitCheckedArgForAssume(const Expr *e);
+ /// Emit a conversion from the specified complex type to the specified
+ /// destination type, where the destination type is an LLVM scalar type.
+ mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy,
+ QualType dstTy, SourceLocation loc);
+
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e);
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e);
@@ -1002,7 +1132,7 @@ public:
RValue emitCXXMemberOrOperatorMemberCallExpr(
const clang::CallExpr *ce, const clang::CXXMethodDecl *md,
ReturnValueSlot returnValue, bool hasQualifier,
- clang::NestedNameSpecifier *qualifier, bool isArrow,
+ clang::NestedNameSpecifier qualifier, bool isArrow,
const clang::Expr *base);
mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
@@ -1011,6 +1141,10 @@ public:
const CXXMethodDecl *md,
ReturnValueSlot returnValue);
+ RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr);
+
+ void emitCXXThrowExpr(const CXXThrowExpr *e);
+
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor,
clang::CXXCtorType ctorType, FunctionArgList &args);
@@ -1038,6 +1172,8 @@ public:
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body);
+ mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s);
+
void emitImplicitAssignmentOperatorBody(FunctionArgList &args);
void emitInitializerForField(clang::FieldDecl *field, LValue lhs,
@@ -1047,6 +1183,8 @@ public:
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
+ mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType);
+
/// Emit the computation of the specified expression of scalar type.
mlir::Value emitScalarExpr(const clang::Expr *e);
@@ -1076,10 +1214,18 @@ public:
cir::UnaryOpKind op, bool isPre);
LValue emitComplexAssignmentLValue(const BinaryOperator *e);
+ LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
+ LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
+ mlir::Value &result);
- void emitCompoundStmt(const clang::CompoundStmt &s);
+ mlir::LogicalResult
+ emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue = nullptr,
+ AggValueSlot slot = AggValueSlot::ignored());
- void emitCompoundStmtWithoutScope(const clang::CompoundStmt &s);
+ mlir::LogicalResult
+ emitCompoundStmtWithoutScope(const clang::CompoundStmt &s,
+ Address *lastValue = nullptr,
+ AggValueSlot slot = AggValueSlot::ignored());
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
@@ -1117,6 +1263,9 @@ public:
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond);
+ mlir::LogicalResult emitLabel(const clang::LabelDecl &d);
+ mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s);
+
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s);
/// Emit code to compute the specified expression,
@@ -1173,7 +1322,7 @@ public:
/// reasonable to just ignore the returned alignment when it isn't from an
/// explicit source.
Address emitPointerWithAlignment(const clang::Expr *expr,
- LValueBaseInfo *baseInfo);
+ LValueBaseInfo *baseInfo = nullptr);
/// Emits a reference binding to the passed in expression.
RValue emitReferenceBindingToExpr(const Expr *e);
@@ -1235,6 +1384,8 @@ public:
LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
+ mlir::Value emitUnPromotedValue(mlir::Value result, QualType unPromotionType);
+
/// Emit a reached-unreachable diagnostic if \p loc is valid and runtime
/// checking is enabled. Otherwise, just emit an unreachable instruction.
/// \p createNewBlock indicates whether to create a new block for the IR
@@ -1302,7 +1453,7 @@ public:
mlir::OpBuilder::InsertionGuard guard(builder);
builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
builder.createStore(
- value.getLoc(), value, addr,
+ value.getLoc(), value, addr, /*isVolatile=*/false,
mlir::IntegerAttr::get(
mlir::IntegerType::get(value.getContext(), 64),
(uint64_t)addr.getAlignment().getAsAlign().value()));
@@ -1313,6 +1464,27 @@ public:
// we know if a temporary should be destroyed conditionally.
ConditionalEvaluation *outermostConditional = nullptr;
+ /// An RAII object to record that we're evaluating a statement
+ /// expression.
+ class StmtExprEvaluation {
+ CIRGenFunction &cgf;
+
+ /// We have to save the outermost conditional: cleanups in a
+ /// statement expression aren't conditional just because the
+ /// StmtExpr is.
+ ConditionalEvaluation *savedOutermostConditional;
+
+ public:
+ StmtExprEvaluation(CIRGenFunction &cgf)
+ : cgf(cgf), savedOutermostConditional(cgf.outermostConditional) {
+ cgf.outermostConditional = nullptr;
+ }
+
+ ~StmtExprEvaluation() {
+ cgf.outermostConditional = savedOutermostConditional;
+ }
+ };
+
template <typename FuncTy>
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
const FuncTy &branchGenFunc);
@@ -1321,6 +1493,35 @@ public:
const clang::Stmt *thenS,
const clang::Stmt *elseS);
+ /// Build a "reference" to a va_list; this is either the address or the value
+ /// of the expression, depending on how va_list is defined.
+ Address emitVAListRef(const Expr *e);
+
+ /// Emits the start of a CIR variable-argument operation (`cir.va_start`)
+ ///
+ /// \param vaList A reference to the \c va_list as emitted by either
+ /// \c emitVAListRef or \c emitMSVAListRef.
+ ///
+ /// \param count The number of arguments in \c vaList
+ void emitVAStart(mlir::Value vaList, mlir::Value count);
+
+ /// Emits the end of a CIR variable-argument operation (`cir.va_start`)
+ ///
+ /// \param vaList A reference to the \c va_list as emitted by either
+ /// \c emitVAListRef or \c emitMSVAListRef.
+ void emitVAEnd(mlir::Value vaList);
+
+ /// Generate code to get an argument from the passed in pointer
+ /// and update it accordingly.
+ ///
+ /// \param ve The \c VAArgExpr for which to generate code.
+ ///
+ /// \param vaListAddr Receives a reference to the \c va_list as emitted by
+ /// either \c emitVAListRef or \c emitMSVAListRef.
+ ///
+ /// \returns SSA value with the argument.
+ mlir::Value emitVAArg(VAArgExpr *ve);
+
/// ----------------------
/// CIR build helpers
/// -----------------
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index e5e4c68..ab7a069 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/VTableBuilder.h"
#include "clang/CIR/MissingFeatures.h"
#include "llvm/Support/ErrorHandling.h"
@@ -31,6 +32,10 @@ using namespace clang::CIRGen;
namespace {
class CIRGenItaniumCXXABI : public CIRGenCXXABI {
+protected:
+ /// All the vtables which have been defined.
+ llvm::DenseMap<const CXXRecordDecl *, cir::GlobalOp> vtables;
+
public:
CIRGenItaniumCXXABI(CIRGenModule &cgm) : CIRGenCXXABI(cgm) {
assert(!cir::MissingFeatures::cxxabiUseARMMethodPtrABI());
@@ -51,6 +56,8 @@ public:
bool delegating, Address thisAddr,
QualType thisTy) override;
+ void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override;
+
bool useThunkForDtorVariant(const CXXDestructorDecl *dtor,
CXXDtorType dt) const override {
// Itanium does not emit any destructor variant as an inline thunk.
@@ -58,6 +65,30 @@ public:
// emitted with external linkage or as linkonce if they are inline and used.
return false;
}
+
+ bool isVirtualOffsetNeededForVTableField(CIRGenFunction &cgf,
+ CIRGenFunction::VPtr vptr) override;
+
+ cir::GlobalOp getAddrOfVTable(const CXXRecordDecl *rd,
+ CharUnits vptrOffset) override;
+ CIRGenCallee getVirtualFunctionPointer(CIRGenFunction &cgf,
+ clang::GlobalDecl gd, Address thisAddr,
+ mlir::Type ty,
+ SourceLocation loc) override;
+
+ mlir::Value getVTableAddressPoint(BaseSubobject base,
+ const CXXRecordDecl *vtableClass) override;
+
+ mlir::Value getVTableAddressPointInStructor(
+ CIRGenFunction &cgf, const clang::CXXRecordDecl *vtableClass,
+ clang::BaseSubobject base,
+ const clang::CXXRecordDecl *nearestVBase) override;
+ void emitVTableDefinitions(CIRGenVTables &cgvt,
+ const CXXRecordDecl *rd) override;
+
+ bool doStructorsInitializeVPtrs(const CXXRecordDecl *vtableClass) override {
+ return true;
+ }
};
} // namespace
@@ -243,6 +274,67 @@ bool CIRGenItaniumCXXABI::needsVTTParameter(GlobalDecl gd) {
return false;
}
+void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &cgvt,
+ const CXXRecordDecl *rd) {
+ cir::GlobalOp vtable = getAddrOfVTable(rd, CharUnits());
+ if (vtable.hasInitializer())
+ return;
+
+ ItaniumVTableContext &vtContext = cgm.getItaniumVTableContext();
+ const VTableLayout &vtLayout = vtContext.getVTableLayout(rd);
+ cir::GlobalLinkageKind linkage = cgm.getVTableLinkage(rd);
+ mlir::Attribute rtti =
+ cgm.getAddrOfRTTIDescriptor(cgm.getLoc(rd->getBeginLoc()),
+ cgm.getASTContext().getCanonicalTagType(rd));
+
+ // Classic codegen uses ConstantInitBuilder here, which is a very general
+ // and feature-rich class to generate initializers for global values.
+ // For now, this is using a simpler approach to create the initializer in CIR.
+ cgvt.createVTableInitializer(vtable, vtLayout, rtti,
+ cir::isLocalLinkage(linkage));
+
+ // Set the correct linkage.
+ vtable.setLinkage(linkage);
+
+ if (cgm.supportsCOMDAT() && cir::isWeakForLinker(linkage))
+ vtable.setComdat(true);
+
+ // Set the right visibility.
+ cgm.setGVProperties(vtable, rd);
+
+ // If this is the magic class __cxxabiv1::__fundamental_type_info,
+ // we will emit the typeinfo for the fundamental types. This is the
+ // same behaviour as GCC.
+ const DeclContext *DC = rd->getDeclContext();
+ if (rd->getIdentifier() &&
+ rd->getIdentifier()->isStr("__fundamental_type_info") &&
+ isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
+ DC->getParent()->isTranslationUnit()) {
+ cgm.errorNYI(rd->getSourceRange(),
+ "emitVTableDefinitions: __fundamental_type_info");
+ }
+
+ auto vtableAsGlobalValue = dyn_cast<cir::CIRGlobalValueInterface>(*vtable);
+ assert(vtableAsGlobalValue && "VTable must support CIRGlobalValueInterface");
+ // Always emit type metadata on non-available_externally definitions, and on
+ // available_externally definitions if we are performing whole program
+ // devirtualization. For WPD we need the type metadata on all vtable
+ // definitions to ensure we associate derived classes with base classes
+ // defined in headers but with a strong definition only in a shared
+ // library.
+ assert(!cir::MissingFeatures::vtableEmitMetadata());
+ if (cgm.getCodeGenOpts().WholeProgramVTables) {
+ cgm.errorNYI(rd->getSourceRange(),
+ "emitVTableDefinitions: WholeProgramVTables");
+ }
+
+ assert(!cir::MissingFeatures::vtableRelativeLayout());
+ if (vtContext.isRelativeLayout()) {
+ cgm.errorNYI(rd->getSourceRange(), "vtableRelativeLayout");
+ }
+}
+
void CIRGenItaniumCXXABI::emitDestructorCall(
CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type,
bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) {
@@ -262,6 +354,44 @@ void CIRGenItaniumCXXABI::emitDestructorCall(
vttTy, nullptr);
}
+// The idea here is creating a separate block for the throw with an
+// `UnreachableOp` as the terminator. So, we branch from the current block
+// to the throw block and create a block for the remaining operations.
+static void insertThrowAndSplit(mlir::OpBuilder &builder, mlir::Location loc,
+ mlir::Value exceptionPtr = {},
+ mlir::FlatSymbolRefAttr typeInfo = {},
+ mlir::FlatSymbolRefAttr dtor = {}) {
+ mlir::Block *currentBlock = builder.getInsertionBlock();
+ mlir::Region *region = currentBlock->getParent();
+
+ if (currentBlock->empty()) {
+ cir::ThrowOp::create(builder, loc, exceptionPtr, typeInfo, dtor);
+ cir::UnreachableOp::create(builder, loc);
+ } else {
+ mlir::Block *throwBlock = builder.createBlock(region);
+
+ cir::ThrowOp::create(builder, loc, exceptionPtr, typeInfo, dtor);
+ cir::UnreachableOp::create(builder, loc);
+
+ builder.setInsertionPointToEnd(currentBlock);
+ cir::BrOp::create(builder, loc, throwBlock);
+ }
+
+ (void)builder.createBlock(region);
+}
+
+void CIRGenItaniumCXXABI::emitRethrow(CIRGenFunction &cgf, bool isNoReturn) {
+ // void __cxa_rethrow();
+ if (isNoReturn) {
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ assert(cgf.currSrcLoc && "expected source location");
+ mlir::Location loc = *cgf.currSrcLoc;
+ insertThrowAndSplit(builder, loc);
+ } else {
+ cgm.errorNYI("emitRethrow with isNoReturn false");
+ }
+}
+
CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &cgm) {
switch (cgm.getASTContext().getCXXABIKind()) {
case TargetCXXABI::GenericItanium:
@@ -278,3 +408,136 @@ CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &cgm) {
llvm_unreachable("bad or NYI ABI kind");
}
}
+
+cir::GlobalOp CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *rd,
+ CharUnits vptrOffset) {
+ assert(vptrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
+ cir::GlobalOp &vtable = vtables[rd];
+ if (vtable)
+ return vtable;
+
+ // Queue up this vtable for possible deferred emission.
+ assert(!cir::MissingFeatures::deferredVtables());
+
+ SmallString<256> name;
+ llvm::raw_svector_ostream out(name);
+ getMangleContext().mangleCXXVTable(rd, out);
+
+ const VTableLayout &vtLayout =
+ cgm.getItaniumVTableContext().getVTableLayout(rd);
+ mlir::Type vtableType = cgm.getVTables().getVTableType(vtLayout);
+
+ // Use pointer alignment for the vtable. Otherwise we would align them based
+ // on the size of the initializer which doesn't make sense as only single
+ // values are read.
+ unsigned ptrAlign = cgm.getItaniumVTableContext().isRelativeLayout()
+ ? 32
+ : cgm.getTarget().getPointerAlign(LangAS::Default);
+
+ vtable = cgm.createOrReplaceCXXRuntimeVariable(
+ cgm.getLoc(rd->getSourceRange()), name, vtableType,
+ cir::GlobalLinkageKind::ExternalLinkage,
+ cgm.getASTContext().toCharUnitsFromBits(ptrAlign));
+ // LLVM codegen handles unnamedAddr
+ assert(!cir::MissingFeatures::opGlobalUnnamedAddr());
+
+ // In MS C++ if you have a class with virtual functions in which you are using
+ // selective member import/export, then all virtual functions must be exported
+ // unless they are inline, otherwise a link error will result. To match this
+ // behavior, for such classes, we dllimport the vtable if it is defined
+ // externally and all the non-inline virtual methods are marked dllimport, and
+ // we dllexport the vtable if it is defined in this TU and all the non-inline
+ // virtual methods are marked dllexport.
+ if (cgm.getTarget().hasPS4DLLImportExport())
+ cgm.errorNYI(rd->getSourceRange(),
+ "getAddrOfVTable: PS4 DLL import/export");
+
+ cgm.setGVProperties(vtable, rd);
+ return vtable;
+}
+
+CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer(
+ CIRGenFunction &cgf, clang::GlobalDecl gd, Address thisAddr, mlir::Type ty,
+ SourceLocation srcLoc) {
+ CIRGenBuilderTy &builder = cgm.getBuilder();
+ mlir::Location loc = cgf.getLoc(srcLoc);
+ cir::PointerType tyPtr = builder.getPointerTo(ty);
+ auto *methodDecl = cast<CXXMethodDecl>(gd.getDecl());
+ mlir::Value vtable = cgf.getVTablePtr(loc, thisAddr, methodDecl->getParent());
+
+ uint64_t vtableIndex = cgm.getItaniumVTableContext().getMethodVTableIndex(gd);
+ mlir::Value vfunc{};
+ if (cgf.shouldEmitVTableTypeCheckedLoad(methodDecl->getParent())) {
+ cgm.errorNYI(loc, "getVirtualFunctionPointer: emitVTableTypeCheckedLoad");
+ } else {
+ assert(!cir::MissingFeatures::emitTypeMetadataCodeForVCall());
+
+ mlir::Value vfuncLoad;
+ if (cgm.getItaniumVTableContext().isRelativeLayout()) {
+ assert(!cir::MissingFeatures::vtableRelativeLayout());
+ cgm.errorNYI(loc, "getVirtualFunctionPointer: isRelativeLayout");
+ } else {
+ auto vtableSlotPtr = cir::VTableGetVirtualFnAddrOp::create(
+ builder, loc, builder.getPointerTo(tyPtr), vtable, vtableIndex);
+ vfuncLoad = builder.createAlignedLoad(
+ loc, vtableSlotPtr, cgf.getPointerAlign().getQuantity());
+ }
+
+ // Add !invariant.load md to virtual function load to indicate that
+ // function didn't change inside vtable.
+ // It's safe to add it without -fstrict-vtable-pointers, but it would not
+ // help in devirtualization because it will only matter if we will have 2
+ // the same virtual function loads from the same vtable load, which won't
+ // happen without enabled devirtualization with -fstrict-vtable-pointers.
+ if (cgm.getCodeGenOpts().OptimizationLevel > 0 &&
+ cgm.getCodeGenOpts().StrictVTablePointers) {
+ cgm.errorNYI(loc, "getVirtualFunctionPointer: strictVTablePointers");
+ }
+ vfunc = vfuncLoad;
+ }
+
+ CIRGenCallee callee(gd, vfunc.getDefiningOp());
+ return callee;
+}
+
+mlir::Value
+CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject base,
+ const CXXRecordDecl *vtableClass) {
+ cir::GlobalOp vtable = getAddrOfVTable(vtableClass, CharUnits());
+
+ // Find the appropriate vtable within the vtable group, and the address point
+ // within that vtable.
+ VTableLayout::AddressPointLocation addressPoint =
+ cgm.getItaniumVTableContext()
+ .getVTableLayout(vtableClass)
+ .getAddressPoint(base);
+
+ mlir::OpBuilder &builder = cgm.getBuilder();
+ auto vtablePtrTy = cir::VPtrType::get(builder.getContext());
+
+ return builder.create<cir::VTableAddrPointOp>(
+ cgm.getLoc(vtableClass->getSourceRange()), vtablePtrTy,
+ mlir::FlatSymbolRefAttr::get(vtable.getSymNameAttr()),
+ cir::AddressPointAttr::get(cgm.getBuilder().getContext(),
+ addressPoint.VTableIndex,
+ addressPoint.AddressPointIndex));
+}
+
+mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructor(
+ CIRGenFunction &cgf, const clang::CXXRecordDecl *vtableClass,
+ clang::BaseSubobject base, const clang::CXXRecordDecl *nearestVBase) {
+
+ if ((base.getBase()->getNumVBases() || nearestVBase != nullptr) &&
+ needsVTTParameter(cgf.curGD)) {
+ cgm.errorNYI(cgf.curFuncDecl->getLocation(),
+ "getVTableAddressPointInStructorWithVTT");
+ }
+ return getVTableAddressPoint(base, vtableClass);
+}
+
+bool CIRGenItaniumCXXABI::isVirtualOffsetNeededForVTableField(
+ CIRGenFunction &cgf, CIRGenFunction::VPtr vptr) {
+ if (vptr.nearestVBase == nullptr)
+ return false;
+ return needsVTTParameter(cgf.curGD);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 425250d..c7f5484 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -64,7 +64,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
langOpts(astContext.getLangOpts()), codeGenOpts(cgo),
theModule{mlir::ModuleOp::create(mlir::UnknownLoc::get(&mlirContext))},
diags(diags), target(astContext.getTargetInfo()),
- abi(createCXXABI(*this)), genTypes(*this) {
+ abi(createCXXABI(*this)), genTypes(*this), vtables(*this) {
// Initialize cached types
VoidTy = cir::VoidType::get(&getMLIRContext());
@@ -75,6 +75,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
SInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true);
SInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true);
UInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false);
+ UInt8PtrTy = cir::PointerType::get(UInt8Ty);
UInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false);
UInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false);
UInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false);
@@ -102,6 +103,11 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
PtrDiffTy =
cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/true);
+ std::optional<cir::SourceLanguage> sourceLanguage = getCIRSourceLanguage();
+ if (sourceLanguage)
+ theModule->setAttr(
+ cir::CIRDialect::getSourceLanguageAttrName(),
+ cir::SourceLanguageAttr::get(&mlirContext, *sourceLanguage));
theModule->setAttr(cir::CIRDialect::getTripleAttrName(),
builder.getStringAttr(getTriple().str()));
@@ -437,13 +443,27 @@ void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd,
errorNYI(funcDecl->getSourceRange(), "deferredAnnotations");
}
+void CIRGenModule::handleCXXStaticMemberVarInstantiation(VarDecl *vd) {
+ VarDecl::DefinitionKind dk = vd->isThisDeclarationADefinition();
+ if (dk == VarDecl::Definition && vd->hasAttr<DLLImportAttr>())
+ return;
+
+ TemplateSpecializationKind tsk = vd->getTemplateSpecializationKind();
+ // If we have a definition, this might be a deferred decl. If the
+ // instantiation is explicit, make sure we emit it at the end.
+ if (vd->getDefinition() && tsk == TSK_ExplicitInstantiationDefinition)
+ getAddrOfGlobalVar(vd);
+
+ emitTopLevelDecl(vd);
+}
+
mlir::Operation *CIRGenModule::getGlobalValue(StringRef name) {
return mlir::SymbolTable::lookupSymbolIn(theModule, name);
}
cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &cgm,
mlir::Location loc, StringRef name,
- mlir::Type t,
+ mlir::Type t, bool isConstant,
mlir::Operation *insertPoint) {
cir::GlobalOp g;
CIRGenBuilderTy &builder = cgm.getBuilder();
@@ -464,7 +484,7 @@ cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &cgm,
builder.setInsertionPointToStart(cgm.getModule().getBody());
}
- g = builder.create<cir::GlobalOp>(loc, name, t);
+ g = builder.create<cir::GlobalOp>(loc, name, t, isConstant);
if (!insertPoint)
cgm.lastGlobalOp = g;
@@ -495,6 +515,24 @@ void CIRGenModule::setNonAliasAttributes(GlobalDecl gd, mlir::Operation *op) {
assert(!cir::MissingFeatures::setTargetAttributes());
}
+std::optional<cir::SourceLanguage> CIRGenModule::getCIRSourceLanguage() const {
+ using ClangStd = clang::LangStandard;
+ using CIRLang = cir::SourceLanguage;
+ auto opts = getLangOpts();
+
+ if (opts.CPlusPlus)
+ return CIRLang::CXX;
+ if (opts.C99 || opts.C11 || opts.C17 || opts.C23 || opts.C2y ||
+ opts.LangStd == ClangStd::lang_c89 ||
+ opts.LangStd == ClangStd::lang_gnu89)
+ return CIRLang::C;
+
+ // TODO(cir): support remaining source languages.
+ assert(!cir::MissingFeatures::sourceLanguageCases());
+ errorNYI("CIR does not yet support the given source language");
+ return std::nullopt;
+}
+
static void setLinkageForGV(cir::GlobalOp &gv, const NamedDecl *nd) {
// Set linkage and visibility in case we never see a definition.
LinkageInfo lv = nd->getLinkageAndVisibility();
@@ -566,7 +604,7 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef mangledName, mlir::Type ty,
// mlir::SymbolTable::Visibility::Public is the default, no need to explicitly
// mark it as such.
cir::GlobalOp gv =
- CIRGenModule::createGlobalOp(*this, loc, mangledName, ty,
+ CIRGenModule::createGlobalOp(*this, loc, mangledName, ty, false,
/*insertPoint=*/entry.getOperation());
// This is the first use or definition of a mangled name. If there is a
@@ -654,6 +692,16 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty,
g.getSymName());
}
+cir::GlobalViewAttr CIRGenModule::getAddrOfGlobalVarAttr(const VarDecl *d) {
+ assert(d->hasGlobalStorage() && "Not a global variable");
+ mlir::Type ty = getTypes().convertTypeForMem(d->getType());
+
+ cir::GlobalOp globalOp = getOrCreateCIRGlobal(d, ty, NotForDefinition);
+ assert(!cir::MissingFeatures::addressSpace());
+ cir::PointerType ptrTy = builder.getPointerTo(globalOp.getSymType());
+ return builder.getGlobalViewAttr(ptrTy, globalOp);
+}
+
void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd,
bool isTentative) {
if (getLangOpts().OpenCL || getLangOpts().OpenMPIsTargetDevice) {
@@ -800,7 +848,7 @@ void CIRGenModule::emitGlobalDefinition(clang::GlobalDecl gd,
emitGlobalFunctionDefinition(gd, op);
if (method->isVirtual())
- errorNYI(method->getSourceRange(), "virtual member function");
+ getVTables().emitThunks(gd);
return;
}
@@ -946,6 +994,39 @@ void CIRGenModule::applyReplacements() {
}
}
+cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable(
+ mlir::Location loc, StringRef name, mlir::Type ty,
+ cir::GlobalLinkageKind linkage, clang::CharUnits alignment) {
+ auto gv = mlir::dyn_cast_or_null<cir::GlobalOp>(
+ mlir::SymbolTable::lookupSymbolIn(theModule, name));
+
+ if (gv) {
+ // There should be handling added here to check the type as assert that
+ // gv was a declaration if the type doesn't match and handling below
+ // to replace the variable if it was a declaration.
+ errorNYI(loc, "createOrReplaceCXXRuntimeVariable: already exists");
+ return gv;
+ }
+
+ // Create a new variable.
+ gv = createGlobalOp(*this, loc, name, ty);
+
+ // Set up extra information and add to the module
+ gv.setLinkageAttr(
+ cir::GlobalLinkageKindAttr::get(&getMLIRContext(), linkage));
+ mlir::SymbolTable::setSymbolVisibility(gv,
+ CIRGenModule::getMLIRVisibility(gv));
+
+ if (supportsCOMDAT() && cir::isWeakForLinker(linkage) &&
+ !gv.hasAvailableExternallyLinkage()) {
+ gv.setComdat(true);
+ }
+
+ gv.setAlignmentAttr(getSize(alignment));
+ setDSOLocal(static_cast<mlir::Operation *>(gv));
+ return gv;
+}
+
// TODO(CIR): this could be a common method between LLVM codegen.
static bool isVarDeclStrongDefinition(const ASTContext &astContext,
CIRGenModule &cgm, const VarDecl *vd,
@@ -996,8 +1077,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &astContext,
if (astContext.isAlignmentRequired(varType))
return true;
- if (const auto *rt = varType->getAs<RecordType>()) {
- const RecordDecl *rd = rt->getDecl();
+ if (const auto *rd = varType->getAsRecordDecl()) {
for (const FieldDecl *fd : rd->fields()) {
if (fd->isBitField())
continue;
@@ -1181,8 +1261,8 @@ generateStringLiteral(mlir::Location loc, mlir::TypedAttr c,
// Create a global variable for this string
// FIXME(cir): check for insertion point in module level.
- cir::GlobalOp gv =
- CIRGenModule::createGlobalOp(cgm, loc, globalName, c.getType());
+ cir::GlobalOp gv = CIRGenModule::createGlobalOp(
+ cgm, loc, globalName, c.getType(), !cgm.getLangOpts().WritableStrings);
// Set up extra information and add to the module
gv.setAlignmentAttr(cgm.getSize(alignment));
@@ -1260,6 +1340,19 @@ cir::GlobalOp CIRGenModule::getGlobalForStringLiteral(const StringLiteral *s,
return gv;
}
+/// Return a pointer to a constant array for the given string literal.
+cir::GlobalViewAttr
+CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *s,
+ StringRef name) {
+ cir::GlobalOp gv = getGlobalForStringLiteral(s, name);
+ auto arrayTy = mlir::dyn_cast<cir::ArrayType>(gv.getSymType());
+ assert(arrayTy && "String literal must be array");
+ assert(!cir::MissingFeatures::addressSpace());
+ cir::PointerType ptrTy = getBuilder().getPointerTo(arrayTy.getElementType());
+
+ return builder.getGlobalViewAttr(ptrTy, gv);
+}
+
void CIRGenModule::emitExplicitCastExprType(const ExplicitCastExpr *e,
CIRGenFunction *cgf) {
if (cgf && e->getType()->isVariablyModifiedType())
@@ -1365,6 +1458,21 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
assert(!cir::MissingFeatures::generateDebugInfo());
assert(!cir::MissingFeatures::cxxRecordStaticMembers());
break;
+
+ case Decl::FileScopeAsm:
+ // File-scope asm is ignored during device-side CUDA compilation.
+ if (langOpts.CUDA && langOpts.CUDAIsDevice)
+ break;
+ // File-scope asm is ignored during device-side OpenMP compilation.
+ if (langOpts.OpenMPIsTargetDevice)
+ break;
+ // File-scope asm is ignored during device-side SYCL compilation.
+ if (langOpts.SYCLIsDevice)
+ break;
+ auto *file_asm = cast<FileScopeAsmDecl>(decl);
+ std::string line = file_asm->getAsmString();
+ globalScopeAsm.push_back(builder.getStringAttr(line));
+ break;
}
}
@@ -1926,6 +2034,15 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name,
}
mlir::SymbolTable::Visibility
+CIRGenModule::getMLIRVisibility(cir::GlobalOp op) {
+ // MLIR doesn't accept public symbols declarations (only
+ // definitions).
+ if (op.isDeclaration())
+ return mlir::SymbolTable::Visibility::Private;
+ return getMLIRVisibilityFromCIRLinkage(op.getLinkage());
+}
+
+mlir::SymbolTable::Visibility
CIRGenModule::getMLIRVisibilityFromCIRLinkage(cir::GlobalLinkageKind glk) {
switch (glk) {
case cir::GlobalLinkageKind::InternalLinkage:
@@ -1978,6 +2095,9 @@ void CIRGenModule::release() {
emitDeferred();
applyReplacements();
+ theModule->setAttr(cir::CIRDialect::getModuleLevelAsmAttrName(),
+ builder.getArrayAttr(globalScopeAsm));
+
// There's a lot of code that is not implemented yet.
assert(!cir::MissingFeatures::cgmRelease());
}
@@ -2033,6 +2153,18 @@ bool CIRGenModule::verifyModule() const {
return mlir::verify(theModule).succeeded();
}
+mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc,
+ QualType ty, bool forEh) {
+ // Return a bogus pointer if RTTI is disabled, unless it's for EH.
+ // FIXME: should we even be calling this method if RTTI is disabled
+ // and it's not for EH?
+ if (!shouldEmitRTTI(forEh))
+ return builder.getConstNullPtrAttr(builder.getUInt8PtrTy());
+
+ errorNYI(loc, "getAddrOfRTTIDescriptor");
+ return mlir::Attribute();
+}
+
// TODO(cir): this can be shared with LLVM codegen.
CharUnits CIRGenModule::computeNonVirtualBaseClassOffset(
const CXXRecordDecl *derivedClass,
@@ -2048,8 +2180,7 @@ CharUnits CIRGenModule::computeNonVirtualBaseClassOffset(
// Get the layout.
const ASTRecordLayout &layout = astContext.getASTRecordLayout(rd);
- const auto *baseDecl = cast<CXXRecordDecl>(
- base->getType()->castAs<clang::RecordType>()->getDecl());
+ const auto *baseDecl = base->getType()->castAsCXXRecordDecl();
// Add the offset.
offset += layout.getBaseClassOffset(baseDecl);
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 5d07d38..4f5c7f8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -17,6 +17,7 @@
#include "CIRGenCall.h"
#include "CIRGenTypeCache.h"
#include "CIRGenTypes.h"
+#include "CIRGenVTables.h"
#include "CIRGenValue.h"
#include "clang/AST/CharUnits.h"
@@ -86,16 +87,22 @@ private:
CIRGenTypes genTypes;
+ /// Holds information about C++ vtables.
+ CIRGenVTables vtables;
+
/// Per-function codegen information. Updated everytime emitCIR is called
/// for FunctionDecls's.
CIRGenFunction *curCGF = nullptr;
+ llvm::SmallVector<mlir::Attribute> globalScopeAsm;
+
public:
mlir::ModuleOp getModule() const { return theModule; }
CIRGenBuilderTy &getBuilder() { return builder; }
clang::ASTContext &getASTContext() const { return astContext; }
const clang::TargetInfo &getTarget() const { return target; }
const clang::CodeGenOptions &getCodeGenOpts() const { return codeGenOpts; }
+ clang::DiagnosticsEngine &getDiags() const { return diags; }
CIRGenTypes &getTypes() { return genTypes; }
const clang::LangOptions &getLangOpts() const { return langOpts; }
@@ -114,6 +121,9 @@ public:
mlir::Operation *lastGlobalOp = nullptr;
+ /// Tell the consumer that this variable has been instantiated.
+ void handleCXXStaticMemberVarInstantiation(VarDecl *vd);
+
llvm::DenseMap<const Decl *, cir::GlobalOp> staticLocalDeclMap;
mlir::Operation *getGlobalValue(llvm::StringRef ref);
@@ -140,6 +150,7 @@ public:
static cir::GlobalOp createGlobalOp(CIRGenModule &cgm, mlir::Location loc,
llvm::StringRef name, mlir::Type t,
+ bool isConstant = false,
mlir::Operation *insertPoint = nullptr);
llvm::StringMap<unsigned> cgGlobalNames;
@@ -155,6 +166,9 @@ public:
getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty = {},
ForDefinition_t isForDefinition = NotForDefinition);
+ /// Return the mlir::GlobalViewAttr for the address of the given global.
+ cir::GlobalViewAttr getAddrOfGlobalVarAttr(const VarDecl *d);
+
CharUnits computeNonVirtualBaseClassOffset(
const CXXRecordDecl *derivedClass,
llvm::iterator_range<CastExpr::path_const_iterator> path);
@@ -168,6 +182,24 @@ public:
void constructAttributeList(CIRGenCalleeInfo calleeInfo,
mlir::NamedAttrList &attrs);
+ /// Will return a global variable of the given type. If a variable with a
+ /// different type already exists then a new variable with the right type
+ /// will be created and all uses of the old variable will be replaced with a
+ /// bitcast to the new variable.
+ cir::GlobalOp createOrReplaceCXXRuntimeVariable(
+ mlir::Location loc, llvm::StringRef name, mlir::Type ty,
+ cir::GlobalLinkageKind linkage, clang::CharUnits alignment);
+
+ void emitVTable(const CXXRecordDecl *rd);
+
+ /// Return the appropriate linkage for the vtable, VTT, and type information
+ /// of the given class.
+ cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *rd);
+
+ /// Get the address of the RTTI descriptor for the given type.
+ mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType ty,
+ bool forEH = false);
+
/// Return a constant array for the given string.
mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *e);
@@ -176,6 +208,12 @@ public:
cir::GlobalOp getGlobalForStringLiteral(const StringLiteral *s,
llvm::StringRef name = ".str");
+ /// Return a global symbol reference to a constant array for the given string
+ /// literal.
+ cir::GlobalViewAttr
+ getAddrOfConstantStringFromLiteral(const StringLiteral *s,
+ llvm::StringRef name = ".str");
+
/// Set attributes which are common to any form of a global definition (alias,
/// Objective-C method, function, global variable).
///
@@ -213,6 +251,16 @@ public:
cir::FuncType fnType = nullptr, bool dontDefer = false,
ForDefinition_t isForDefinition = NotForDefinition);
+ mlir::Type getVTableComponentType();
+ CIRGenVTables &getVTables() { return vtables; }
+
+ ItaniumVTableContext &getItaniumVTableContext() {
+ return vtables.getItaniumVTableContext();
+ }
+ const ItaniumVTableContext &getItaniumVTableContext() const {
+ return vtables.getItaniumVTableContext();
+ }
+
/// This contains all the decls which have definitions but which are deferred
/// for emission and therefore should only be output if they are actually
/// used. If a decl is in this, then it is known to have not been referenced
@@ -252,6 +300,13 @@ public:
getAddrOfGlobal(clang::GlobalDecl gd,
ForDefinition_t isForDefinition = NotForDefinition);
+ // Return whether RTTI information should be emitted for this target.
+ bool shouldEmitRTTI(bool forEH = false) {
+ return (forEH || getLangOpts().RTTI) && !getLangOpts().CUDAIsDevice &&
+ !(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
+ getTriple().isNVPTX());
+ }
+
/// Emit type info if type of an expression is a variably modified
/// type. Also emit proper debug info for cast types.
void emitExplicitCastExprType(const ExplicitCastExpr *e,
@@ -355,8 +410,8 @@ public:
static cir::VisibilityKind getGlobalVisibilityKindFromClangVisibility(
clang::VisibilityAttr::VisibilityType visibility);
cir::VisibilityAttr getGlobalVisibilityAttrFromDecl(const Decl *decl);
- static mlir::SymbolTable::Visibility getMLIRVisibility(cir::GlobalOp op);
cir::GlobalLinkageKind getFunctionLinkage(GlobalDecl gd);
+ static mlir::SymbolTable::Visibility getMLIRVisibility(cir::GlobalOp op);
cir::GlobalLinkageKind getCIRLinkageForDeclarator(const DeclaratorDecl *dd,
GVALinkage linkage,
bool isConstantVariable);
@@ -423,6 +478,9 @@ private:
void replacePointerTypeArgs(cir::FuncOp oldF, cir::FuncOp newF);
void setNonAliasAttributes(GlobalDecl gd, mlir::Operation *op);
+
+ /// Map source language used to a CIR attribute.
+ std::optional<cir::SourceLanguage> getCIRSourceLanguage() const;
};
} // namespace CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
index bb9054a..3753336 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
@@ -357,15 +357,12 @@ class OpenACCClauseCIREmitter final
}
template <typename RecipeTy>
- RecipeTy getOrCreateRecipe(ASTContext &astCtx, const Expr *varRef,
- const VarDecl *varRecipe, DeclContext *dc,
- QualType baseType, mlir::Value mainOp) {
- mlir::ModuleOp mod =
- builder.getBlock()->getParent()->getParentOfType<mlir::ModuleOp>();
-
+ std::string getRecipeName(SourceRange loc, QualType baseType,
+ OpenACCReductionOperator reductionOp) {
std::string recipeName;
{
llvm::raw_string_ostream stream(recipeName);
+
if constexpr (std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
stream << "privatization_";
} else if constexpr (std::is_same_v<RecipeTy,
@@ -375,11 +372,40 @@ class OpenACCClauseCIREmitter final
} else if constexpr (std::is_same_v<RecipeTy,
mlir::acc::ReductionRecipeOp>) {
stream << "reduction_";
- // We don't have the reduction operation here well enough to know how to
- // spell this correctly (+ == 'add', etc), so when we implement
- // 'reduction' we have to do that here.
- cgf.cgm.errorNYI(varRef->getSourceRange(),
- "OpeNACC reduction recipe creation");
+ // Values here are a little weird (for bitwise and/or is 'i' prefix, and
+ // logical ops with 'l'), but are chosen to be the same as the MLIR
+ // dialect names as well as to match the Flang versions of these.
+ switch (reductionOp) {
+ case OpenACCReductionOperator::Addition:
+ stream << "add_";
+ break;
+ case OpenACCReductionOperator::Multiplication:
+ stream << "mul_";
+ break;
+ case OpenACCReductionOperator::Max:
+ stream << "max_";
+ break;
+ case OpenACCReductionOperator::Min:
+ stream << "min_";
+ break;
+ case OpenACCReductionOperator::BitwiseAnd:
+ stream << "iand_";
+ break;
+ case OpenACCReductionOperator::BitwiseOr:
+ stream << "ior_";
+ break;
+ case OpenACCReductionOperator::BitwiseXOr:
+ stream << "xor_";
+ break;
+ case OpenACCReductionOperator::And:
+ stream << "land_";
+ break;
+ case OpenACCReductionOperator::Or:
+ stream << "lor_";
+ break;
+ case OpenACCReductionOperator::Invalid:
+ llvm_unreachable("invalid reduction operator");
+ }
} else {
static_assert(!sizeof(RecipeTy), "Unknown Recipe op kind");
}
@@ -387,72 +413,221 @@ class OpenACCClauseCIREmitter final
MangleContext &mc = cgf.cgm.getCXXABI().getMangleContext();
mc.mangleCanonicalTypeName(baseType, stream);
}
+ return recipeName;
+ }
- if (auto recipe = mod.lookupSymbol<RecipeTy>(recipeName))
- return recipe;
-
- mlir::Location loc = cgf.cgm.getLoc(varRef->getBeginLoc());
- mlir::Location locEnd = cgf.cgm.getLoc(varRef->getEndLoc());
+ void createFirstprivateRecipeCopy(
+ mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp,
+ CIRGenFunction::AutoVarEmission tempDeclEmission,
+ mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe,
+ const VarDecl *temporary) {
+ mlir::Block *block = builder.createBlock(
+ &recipe.getCopyRegion(), recipe.getCopyRegion().end(),
+ {mainOp.getType(), mainOp.getType()}, {loc, loc});
+ builder.setInsertionPointToEnd(&recipe.getCopyRegion().back());
+ CIRGenFunction::LexicalScope ls(cgf, loc, block);
+
+ mlir::BlockArgument fromArg = block->getArgument(0);
+ mlir::BlockArgument toArg = block->getArgument(1);
+
+ mlir::Type elementTy =
+ mlir::cast<cir::PointerType>(mainOp.getType()).getPointee();
+
+ // Set the address of the emission to be the argument, so that we initialize
+ // that instead of the variable in the other block.
+ tempDeclEmission.setAllocatedAddress(
+ Address{toArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)});
+ tempDeclEmission.EmittedAsOffload = true;
+
+ CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, temporary};
+ cgf.setAddrOfLocalVar(
+ temporary,
+ Address{fromArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)});
+
+ cgf.emitAutoVarInit(tempDeclEmission);
+ mlir::acc::YieldOp::create(builder, locEnd);
+ }
- mlir::OpBuilder modBuilder(mod.getBodyRegion());
- auto recipe =
- RecipeTy::create(modBuilder, loc, recipeName, mainOp.getType());
+ // Create the 'init' section of the recipe, including the 'copy' section for
+ // 'firstprivate'. Note that this function is not 'insertion point' clean, in
+ // that it alters the insertion point to be inside of the 'destroy' section of
+ // the recipe, but doesn't restore it aftewards.
+ template <typename RecipeTy>
+ void createRecipeInitCopy(mlir::Location loc, mlir::Location locEnd,
+ SourceRange exprRange, mlir::Value mainOp,
+ RecipeTy recipe, const VarDecl *varRecipe,
+ const VarDecl *temporary) {
+ assert(varRecipe && "Required recipe variable not set?");
CIRGenFunction::AutoVarEmission tempDeclEmission{
CIRGenFunction::AutoVarEmission::invalid()};
-
- // Init section.
- {
- llvm::SmallVector<mlir::Type> argsTys{mainOp.getType()};
- llvm::SmallVector<mlir::Location> argsLocs{loc};
- builder.createBlock(&recipe.getInitRegion(), recipe.getInitRegion().end(),
- argsTys, argsLocs);
- builder.setInsertionPointToEnd(&recipe.getInitRegion().back());
-
- if constexpr (!std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
- // We have only implemented 'init' for private, so make this NYI until
- // we have explicitly implemented everything.
- cgf.cgm.errorNYI(varRef->getSourceRange(),
- "OpenACC non-private recipe init");
+ CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, varRecipe};
+
+ // Do the 'init' section of the recipe IR, which does an alloca, then the
+ // initialization (except for firstprivate).
+ mlir::Block *block = builder.createBlock(&recipe.getInitRegion(),
+ recipe.getInitRegion().end(),
+ {mainOp.getType()}, {loc});
+ builder.setInsertionPointToEnd(&recipe.getInitRegion().back());
+ CIRGenFunction::LexicalScope ls(cgf, loc, block);
+
+ tempDeclEmission =
+ cgf.emitAutoVarAlloca(*varRecipe, builder.saveInsertionPoint());
+
+ // 'firstprivate' doesn't do its initialization in the 'init' section,
+ // instead does it in the 'copy' section. SO only do init here.
+ // 'reduction' appears to use it too (rather than a 'copy' section), so
+ // we probably have to do it here too, but we can do that when we get to
+ // reduction implementation.
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) {
+ // We are OK with no init for builtins, arrays of builtins, or pointers,
+ // else we should NYI so we know to go look for these.
+ if (cgf.getContext().getLangOpts().CPlusPlus &&
+ !varRecipe->getType()
+ ->getPointeeOrArrayElementType()
+ ->isBuiltinType() &&
+ !varRecipe->getType()->isPointerType() && !varRecipe->getInit()) {
+ // If we don't have any initialization recipe, we failed during Sema to
+ // initialize this correctly. If we disable the
+ // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll
+ // emit an error to tell us. However, emitting those errors during
+ // production is a violation of the standard, so we cannot do them.
+ cgf.cgm.errorNYI(exprRange, "private default-init recipe");
}
+ cgf.emitAutoVarInit(tempDeclEmission);
+ } else if constexpr (std::is_same_v<RecipeTy,
+ mlir::acc::ReductionRecipeOp>) {
+ // Unlike Private, the recipe here is always required as it has to do
+ // init, not just 'default' init.
+ if (!varRecipe->getInit())
+ cgf.cgm.errorNYI(exprRange, "reduction init recipe");
+ cgf.emitAutoVarInit(tempDeclEmission);
+ }
- if (varRecipe) {
- tempDeclEmission =
- cgf.emitAutoVarAlloca(*varRecipe, builder.saveInsertionPoint());
- cgf.emitAutoVarInit(tempDeclEmission);
+ mlir::acc::YieldOp::create(builder, locEnd);
+
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp>) {
+ if (!varRecipe->getInit()) {
+ // If we don't have any initialization recipe, we failed during Sema to
+ // initialize this correctly. If we disable the
+ // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll
+ // emit an error to tell us. However, emitting those errors during
+ // production is a violation of the standard, so we cannot do them.
+ cgf.cgm.errorNYI(
+ exprRange, "firstprivate copy-init recipe not properly generated");
}
- mlir::acc::YieldOp::create(builder, locEnd);
+ createFirstprivateRecipeCopy(loc, locEnd, mainOp, tempDeclEmission,
+ recipe, varRecipe, temporary);
}
+ }
- // Copy section.
- if constexpr (std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp> ||
- std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) {
- // TODO: OpenACC: 'private' doesn't emit this, but for the other two we
- // have to figure out what 'copy' means here.
- cgf.cgm.errorNYI(varRef->getSourceRange(),
- "OpenACC record type privatization copy section");
+ // This function generates the 'combiner' section for a reduction recipe. Note
+ // that this function is not 'insertion point' clean, in that it alters the
+ // insertion point to be inside of the 'combiner' section of the recipe, but
+ // doesn't restore it aftewards.
+ void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd,
+ mlir::Value mainOp,
+ mlir::acc::ReductionRecipeOp recipe) {
+ mlir::Block *block = builder.createBlock(
+ &recipe.getCombinerRegion(), recipe.getCombinerRegion().end(),
+ {mainOp.getType(), mainOp.getType()}, {loc, loc});
+ builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back());
+ CIRGenFunction::LexicalScope ls(cgf, loc, block);
+
+ mlir::BlockArgument lhsArg = block->getArgument(0);
+
+ mlir::acc::YieldOp::create(builder, locEnd, lhsArg);
+ }
+
+ // This function generates the 'destroy' section for a recipe. Note
+ // that this function is not 'insertion point' clean, in that it alters the
+ // insertion point to be inside of the 'destroy' section of the recipe, but
+ // doesn't restore it aftewards.
+ void createRecipeDestroySection(mlir::Location loc, mlir::Location locEnd,
+ mlir::Value mainOp, CharUnits alignment,
+ QualType baseType,
+ mlir::Region &destroyRegion) {
+ mlir::Block *block = builder.createBlock(
+ &destroyRegion, destroyRegion.end(), {mainOp.getType()}, {loc});
+ builder.setInsertionPointToEnd(&destroyRegion.back());
+ CIRGenFunction::LexicalScope ls(cgf, loc, block);
+
+ mlir::Type elementTy =
+ mlir::cast<cir::PointerType>(mainOp.getType()).getPointee();
+ Address addr{block->getArgument(0), elementTy, alignment};
+ cgf.emitDestroy(addr, baseType,
+ cgf.getDestroyer(QualType::DK_cxx_destructor));
+
+ mlir::acc::YieldOp::create(builder, locEnd);
+ }
+
+ mlir::acc::ReductionOperator convertReductionOp(OpenACCReductionOperator op) {
+ switch (op) {
+ case OpenACCReductionOperator::Addition:
+ return mlir::acc::ReductionOperator::AccAdd;
+ case OpenACCReductionOperator::Multiplication:
+ return mlir::acc::ReductionOperator::AccMul;
+ case OpenACCReductionOperator::Max:
+ return mlir::acc::ReductionOperator::AccMax;
+ case OpenACCReductionOperator::Min:
+ return mlir::acc::ReductionOperator::AccMin;
+ case OpenACCReductionOperator::BitwiseAnd:
+ return mlir::acc::ReductionOperator::AccIand;
+ case OpenACCReductionOperator::BitwiseOr:
+ return mlir::acc::ReductionOperator::AccIor;
+ case OpenACCReductionOperator::BitwiseXOr:
+ return mlir::acc::ReductionOperator::AccXor;
+ case OpenACCReductionOperator::And:
+ return mlir::acc::ReductionOperator::AccLand;
+ case OpenACCReductionOperator::Or:
+ return mlir::acc::ReductionOperator::AccLor;
+ case OpenACCReductionOperator::Invalid:
+ llvm_unreachable("invalid reduction operator");
}
- // Destroy section (doesn't currently exist).
- if (varRecipe && varRecipe->needsDestruction(cgf.getContext())) {
- llvm::SmallVector<mlir::Type> argsTys{mainOp.getType()};
- llvm::SmallVector<mlir::Location> argsLocs{loc};
- mlir::Block *block = builder.createBlock(&recipe.getDestroyRegion(),
- recipe.getDestroyRegion().end(),
- argsTys, argsLocs);
- builder.setInsertionPointToEnd(&recipe.getDestroyRegion().back());
-
- mlir::Type elementTy =
- mlir::cast<cir::PointerType>(mainOp.getType()).getPointee();
- Address addr{block->getArgument(0), elementTy,
- cgf.getContext().getDeclAlign(varRecipe)};
- cgf.emitDestroy(addr, baseType,
- cgf.getDestroyer(QualType::DK_cxx_destructor));
-
- mlir::acc::YieldOp::create(builder, locEnd);
+ llvm_unreachable("invalid reduction operator");
+ }
+
+ template <typename RecipeTy>
+ RecipeTy getOrCreateRecipe(ASTContext &astCtx, const Expr *varRef,
+ const VarDecl *varRecipe, const VarDecl *temporary,
+ OpenACCReductionOperator reductionOp,
+ DeclContext *dc, QualType baseType,
+ mlir::Value mainOp) {
+ mlir::ModuleOp mod = builder.getBlock()
+ ->getParent()
+ ->template getParentOfType<mlir::ModuleOp>();
+
+ std::string recipeName = getRecipeName<RecipeTy>(varRef->getSourceRange(),
+ baseType, reductionOp);
+ if (auto recipe = mod.lookupSymbol<RecipeTy>(recipeName))
+ return recipe;
+
+ mlir::Location loc = cgf.cgm.getLoc(varRef->getBeginLoc());
+ mlir::Location locEnd = cgf.cgm.getLoc(varRef->getEndLoc());
+
+ mlir::OpBuilder modBuilder(mod.getBodyRegion());
+ RecipeTy recipe;
+
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) {
+ recipe = RecipeTy::create(modBuilder, loc, recipeName, mainOp.getType(),
+ convertReductionOp(reductionOp));
+ } else {
+ recipe = RecipeTy::create(modBuilder, loc, recipeName, mainOp.getType());
}
+ createRecipeInitCopy(loc, locEnd, varRef->getSourceRange(), mainOp, recipe,
+ varRecipe, temporary);
+
+ if constexpr (std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) {
+ createReductionRecipeCombiner(loc, locEnd, mainOp, recipe);
+ }
+
+ if (varRecipe && varRecipe->needsDestruction(cgf.getContext()))
+ createRecipeDestroySection(loc, locEnd, mainOp,
+ cgf.getContext().getDeclAlign(varRecipe),
+ baseType, recipe.getDestroyRegion());
return recipe;
}
@@ -1088,7 +1263,9 @@ public:
{
mlir::OpBuilder::InsertionGuard guardCase(builder);
auto recipe = getOrCreateRecipe<mlir::acc::PrivateRecipeOp>(
- cgf.getContext(), varExpr, varRecipe,
+ cgf.getContext(), varExpr, varRecipe, /*temporary=*/nullptr,
+ OpenACCReductionOperator::Invalid,
+
Decl::castToDeclContext(cgf.curFuncDecl), opInfo.baseType,
privateOp.getResult());
// TODO: OpenACC: The dialect is going to change in the near future to
@@ -1105,6 +1282,77 @@ public:
llvm_unreachable("Unknown construct kind in VisitPrivateClause");
}
}
+
+ void VisitFirstPrivateClause(const OpenACCFirstPrivateClause &clause) {
+ if constexpr (isOneOfTypes<OpTy, mlir::acc::ParallelOp,
+ mlir::acc::SerialOp>) {
+ for (const auto [varExpr, varRecipe] :
+ llvm::zip_equal(clause.getVarList(), clause.getInitRecipes())) {
+ CIRGenFunction::OpenACCDataOperandInfo opInfo =
+ cgf.getOpenACCDataOperandInfo(varExpr);
+ auto firstPrivateOp = mlir::acc::FirstprivateOp::create(
+ builder, opInfo.beginLoc, opInfo.varValue, /*structured=*/true,
+ /*implicit=*/false, opInfo.name, opInfo.bounds);
+
+ firstPrivateOp.setDataClause(mlir::acc::DataClause::acc_firstprivate);
+
+ {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+ auto recipe = getOrCreateRecipe<mlir::acc::FirstprivateRecipeOp>(
+ cgf.getContext(), varExpr, varRecipe.RecipeDecl,
+ varRecipe.InitFromTemporary, OpenACCReductionOperator::Invalid,
+ Decl::castToDeclContext(cgf.curFuncDecl), opInfo.baseType,
+ firstPrivateOp.getResult());
+
+ // TODO: OpenACC: The dialect is going to change in the near future to
+ // have these be on a different operation, so when that changes, we
+ // probably need to change these here.
+ operation.addFirstPrivatization(builder.getContext(), firstPrivateOp,
+ recipe);
+ }
+ }
+ } else if constexpr (isCombinedType<OpTy>) {
+ // Unlike 'private', 'firstprivate' applies to the compute op, not the
+ // loop op.
+ applyToComputeOp(clause);
+ } else {
+ llvm_unreachable("Unknown construct kind in VisitFirstPrivateClause");
+ }
+ }
+
+ void VisitReductionClause(const OpenACCReductionClause &clause) {
+ if constexpr (isOneOfTypes<OpTy, mlir::acc::ParallelOp, mlir::acc::SerialOp,
+ mlir::acc::LoopOp>) {
+ for (const auto [varExpr, varRecipe] :
+ llvm::zip_equal(clause.getVarList(), clause.getRecipes())) {
+ CIRGenFunction::OpenACCDataOperandInfo opInfo =
+ cgf.getOpenACCDataOperandInfo(varExpr);
+
+ auto reductionOp = mlir::acc::ReductionOp::create(
+ builder, opInfo.beginLoc, opInfo.varValue, /*structured=*/true,
+ /*implicit=*/false, opInfo.name, opInfo.bounds);
+ reductionOp.setDataClause(mlir::acc::DataClause::acc_reduction);
+
+ {
+ mlir::OpBuilder::InsertionGuard guardCase(builder);
+
+ auto recipe = getOrCreateRecipe<mlir::acc::ReductionRecipeOp>(
+ cgf.getContext(), varExpr, varRecipe.RecipeDecl,
+ /*temporary=*/nullptr, clause.getReductionOp(),
+ Decl::castToDeclContext(cgf.curFuncDecl), opInfo.baseType,
+ reductionOp.getResult());
+
+ operation.addReduction(builder.getContext(), reductionOp, recipe);
+ }
+ }
+ } else if constexpr (isCombinedType<OpTy>) {
+ // Despite this being valid on ParallelOp or SerialOp, combined type
+ // applies to the 'loop'.
+ applyToLoopOp(clause);
+ } else {
+ llvm_unreachable("Unknown construct kind in VisitReductionClause");
+ }
+ }
};
template <typename OpTy>
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h
index b28afe4..914ef16 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h
@@ -141,6 +141,10 @@ private:
// for both virtual and non-virtual bases.
llvm::DenseMap<const clang::CXXRecordDecl *, unsigned> nonVirtualBases;
+ /// Map from virtual bases to their field index in the complete object.
+ llvm::DenseMap<const clang::CXXRecordDecl *, unsigned>
+ completeObjectVirtualBases;
+
/// Map from (bit-field) record field to the corresponding CIR record type
/// field no. This info is populated by record builder.
llvm::DenseMap<const clang::FieldDecl *, CIRGenBitFieldInfo> bitFields;
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index 1764967..6c7cf75 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -41,7 +41,7 @@ struct CIRRecordLowering final {
// member type that ensures correct rounding.
struct MemberInfo final {
CharUnits offset;
- enum class InfoKind { VFPtr, Field, Base } kind;
+ enum class InfoKind { VFPtr, Field, Base, VBase } kind;
mlir::Type data;
union {
const FieldDecl *fieldDecl;
@@ -71,17 +71,18 @@ struct CIRRecordLowering final {
void setBitFieldInfo(const FieldDecl *fd, CharUnits startOffset,
mlir::Type storageType);
- void lower();
+ void lower(bool NonVirtualBaseType);
void lowerUnion();
/// Determines if we need a packed llvm struct.
- void determinePacked();
+ void determinePacked(bool nvBaseType);
/// Inserts padding everywhere it's needed.
void insertPadding();
void computeVolatileBitfields();
- void accumulateBases(const CXXRecordDecl *cxxRecordDecl);
+ void accumulateBases();
void accumulateVPtrs();
+ void accumulateVBases();
void accumulateFields();
RecordDecl::field_iterator
accumulateBitFields(RecordDecl::field_iterator field,
@@ -96,6 +97,17 @@ struct CIRRecordLowering final {
/// Helper function to check if the target machine is BigEndian.
bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); }
+ // The Itanium base layout rule allows virtual bases to overlap
+ // other bases, which complicates layout in specific ways.
+ //
+ // Note specifically that the ms_struct attribute doesn't change this.
+ bool isOverlappingVBaseABI() {
+ return !astContext.getTargetInfo().getCXXABI().isMicrosoft();
+ }
+ // Recursively searches all of the bases to find out if a vbase is
+ // not the primary vbase of some base class.
+ bool hasOwnStorage(const CXXRecordDecl *decl, const CXXRecordDecl *query);
+
CharUnits bitsToCharUnits(uint64_t bitOffset) {
return astContext.toCharUnitsFromBits(bitOffset);
}
@@ -184,6 +196,7 @@ struct CIRRecordLowering final {
CIRGenBuilderTy &builder;
const ASTContext &astContext;
const RecordDecl *recordDecl;
+ const CXXRecordDecl *cxxRecordDecl;
const ASTRecordLayout &astRecordLayout;
// Helpful intermediate data-structures
std::vector<MemberInfo> members;
@@ -192,6 +205,7 @@ struct CIRRecordLowering final {
llvm::DenseMap<const FieldDecl *, CIRGenBitFieldInfo> bitFields;
llvm::DenseMap<const FieldDecl *, unsigned> fieldIdxMap;
llvm::DenseMap<const CXXRecordDecl *, unsigned> nonVirtualBases;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> virtualBases;
cir::CIRDataLayout dataLayout;
LLVM_PREFERRED_TYPE(bool)
@@ -211,13 +225,14 @@ private:
CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes,
const RecordDecl *recordDecl, bool packed)
- : cirGenTypes(cirGenTypes), builder(cirGenTypes.getBuilder()),
- astContext(cirGenTypes.getASTContext()), recordDecl(recordDecl),
- astRecordLayout(
- cirGenTypes.getASTContext().getASTRecordLayout(recordDecl)),
- dataLayout(cirGenTypes.getCGModule().getModule()),
- zeroInitializable(true), zeroInitializableAsBase(true), packed(packed),
- padded(false) {}
+ : cirGenTypes{cirGenTypes}, builder{cirGenTypes.getBuilder()},
+ astContext{cirGenTypes.getASTContext()}, recordDecl{recordDecl},
+ cxxRecordDecl{llvm::dyn_cast<CXXRecordDecl>(recordDecl)},
+ astRecordLayout{
+ cirGenTypes.getASTContext().getASTRecordLayout(recordDecl)},
+ dataLayout{cirGenTypes.getCGModule().getModule()},
+ zeroInitializable{true}, zeroInitializableAsBase{true}, packed{packed},
+ padded{false} {}
void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
CharUnits startOffset,
@@ -246,27 +261,28 @@ void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
info.volatileStorageOffset = CharUnits::Zero();
}
-void CIRRecordLowering::lower() {
+void CIRRecordLowering::lower(bool nonVirtualBaseType) {
if (recordDecl->isUnion()) {
lowerUnion();
computeVolatileBitfields();
return;
}
- assert(!cir::MissingFeatures::recordLayoutVirtualBases());
- CharUnits size = astRecordLayout.getSize();
+ CharUnits size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize()
+ : astRecordLayout.getSize();
accumulateFields();
- if (const auto *cxxRecordDecl = dyn_cast<CXXRecordDecl>(recordDecl)) {
+ if (cxxRecordDecl) {
accumulateVPtrs();
- accumulateBases(cxxRecordDecl);
+ accumulateBases();
if (members.empty()) {
appendPaddingBytes(size);
computeVolatileBitfields();
return;
}
- assert(!cir::MissingFeatures::recordLayoutVirtualBases());
+ if (!nonVirtualBaseType)
+ accumulateVBases();
}
llvm::stable_sort(members);
@@ -275,7 +291,7 @@ void CIRRecordLowering::lower() {
assert(!cir::MissingFeatures::recordZeroInit());
members.push_back(makeStorageInfo(size, getUIntNType(8)));
- determinePacked();
+ determinePacked(nonVirtualBaseType);
insertPadding();
members.pop_back();
@@ -298,8 +314,9 @@ void CIRRecordLowering::fillOutputFields() {
setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back());
} else if (member.kind == MemberInfo::InfoKind::Base) {
nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
+ } else if (member.kind == MemberInfo::InfoKind::VBase) {
+ virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
}
- assert(!cir::MissingFeatures::recordLayoutVirtualBases());
}
}
@@ -426,8 +443,9 @@ CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
limitOffset = bitsToCharUnits(getFieldBitOffset(*probe));
goto FoundLimit;
}
- assert(!cir::MissingFeatures::cxxSupport());
- limitOffset = astRecordLayout.getDataSize();
+ limitOffset = cxxRecordDecl ? astRecordLayout.getNonVirtualSize()
+ : astRecordLayout.getDataSize();
+
FoundLimit:
CharUnits typeSize = getSize(type);
if (beginOffset + typeSize <= limitOffset) {
@@ -524,24 +542,25 @@ void CIRRecordLowering::calculateZeroInit() {
continue;
zeroInitializable = zeroInitializableAsBase = false;
return;
- } else if (member.kind == MemberInfo::InfoKind::Base) {
+ } else if (member.kind == MemberInfo::InfoKind::Base ||
+ member.kind == MemberInfo::InfoKind::VBase) {
if (isZeroInitializable(member.cxxRecordDecl))
continue;
zeroInitializable = false;
if (member.kind == MemberInfo::InfoKind::Base)
zeroInitializableAsBase = false;
}
- assert(!cir::MissingFeatures::recordLayoutVirtualBases());
}
}
-void CIRRecordLowering::determinePacked() {
+void CIRRecordLowering::determinePacked(bool nvBaseType) {
if (packed)
return;
CharUnits alignment = CharUnits::One();
-
- // TODO(cir): handle non-virtual base types
- assert(!cir::MissingFeatures::cxxSupport());
+ CharUnits nvAlignment = CharUnits::One();
+ CharUnits nvSize = !nvBaseType && cxxRecordDecl
+ ? astRecordLayout.getNonVirtualSize()
+ : CharUnits::Zero();
for (const MemberInfo &member : members) {
if (!member.data)
@@ -550,12 +569,19 @@ void CIRRecordLowering::determinePacked() {
// then the entire record must be packed.
if (member.offset % getAlignment(member.data))
packed = true;
+ if (member.offset < nvSize)
+ nvAlignment = std::max(nvAlignment, getAlignment(member.data));
alignment = std::max(alignment, getAlignment(member.data));
}
// If the size of the record (the capstone's offset) is not a multiple of the
// record's alignment, it must be packed.
if (members.back().offset % alignment)
packed = true;
+ // If the non-virtual sub-object is not a multiple of the non-virtual
+ // sub-object's alignment, it must be packed. We cannot have a packed
+ // non-virtual sub-object and an unpacked complete object or vise versa.
+ if (nvSize % nvAlignment)
+ packed = true;
// Update the alignment of the sentinel.
if (!packed)
members.back().data = getUIntNType(astContext.toBits(alignment));
@@ -589,7 +615,7 @@ std::unique_ptr<CIRGenRecordLayout>
CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
CIRRecordLowering lowering(*this, rd, /*packed=*/false);
assert(ty->isIncomplete() && "recomputing record layout?");
- lowering.lower();
+ lowering.lower(/*nonVirtualBaseType=*/false);
// If we're in C++, compute the base subobject type.
cir::RecordType baseTy;
@@ -599,7 +625,7 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
if (lowering.astRecordLayout.getNonVirtualSize() !=
lowering.astRecordLayout.getSize()) {
CIRRecordLowering baseLowering(*this, rd, /*Packed=*/lowering.packed);
- baseLowering.lower();
+ baseLowering.lower(/*NonVirtualBaseType=*/true);
std::string baseIdentifier = getRecordTypeName(rd, ".base");
baseTy =
builder.getCompleteRecordTy(baseLowering.fieldTypes, baseIdentifier,
@@ -626,8 +652,8 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
assert(!cir::MissingFeatures::recordZeroInit());
rl->nonVirtualBases.swap(lowering.nonVirtualBases);
+ rl->completeObjectVirtualBases.swap(lowering.virtualBases);
- assert(!cir::MissingFeatures::cxxSupport());
assert(!cir::MissingFeatures::bitfields());
// Add all the field numbers.
@@ -754,6 +780,17 @@ void CIRRecordLowering::lowerUnion() {
packed = true;
}
+bool CIRRecordLowering::hasOwnStorage(const CXXRecordDecl *decl,
+ const CXXRecordDecl *query) {
+ const ASTRecordLayout &declLayout = astContext.getASTRecordLayout(decl);
+ if (declLayout.isPrimaryBaseVirtual() && declLayout.getPrimaryBase() == query)
+ return false;
+ for (const auto &base : decl->bases())
+ if (!hasOwnStorage(base.getType()->getAsCXXRecordDecl(), query))
+ return false;
+ return true;
+}
+
/// The AAPCS that defines that, when possible, bit-fields should
/// be accessed using containers of the declared type width:
/// When a volatile bit-field is read, and its container does not overlap with
@@ -873,7 +910,7 @@ void CIRRecordLowering::computeVolatileBitfields() {
}
}
-void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
+void CIRRecordLowering::accumulateBases() {
// If we've got a primary virtual base, we need to add it with the bases.
if (astRecordLayout.isPrimaryBaseVirtual()) {
cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
@@ -881,12 +918,9 @@ void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
}
// Accumulate the non-virtual bases.
- for ([[maybe_unused]] const auto &base : cxxRecordDecl->bases()) {
- if (base.isVirtual()) {
- cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
- "accumulateBases: virtual base");
+ for (const auto &base : cxxRecordDecl->bases()) {
+ if (base.isVirtual())
continue;
- }
// Bases can be zero-sized even if not technically empty if they
// contain only a trailing array member.
const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
@@ -899,6 +933,31 @@ void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
}
}
+void CIRRecordLowering::accumulateVBases() {
+ for (const auto &base : cxxRecordDecl->vbases()) {
+ const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
+ if (isEmptyRecordForLayout(astContext, base.getType()))
+ continue;
+ CharUnits offset = astRecordLayout.getVBaseClassOffset(baseDecl);
+ // If the vbase is a primary virtual base of some base, then it doesn't
+ // get its own storage location but instead lives inside of that base.
+ if (isOverlappingVBaseABI() && astContext.isNearlyEmpty(baseDecl) &&
+ !hasOwnStorage(cxxRecordDecl, baseDecl)) {
+ members.push_back(
+ MemberInfo(offset, MemberInfo::InfoKind::VBase, nullptr, baseDecl));
+ continue;
+ }
+ // If we've got a vtordisp, add it as a storage type.
+ if (astRecordLayout.getVBaseOffsetsMap()
+ .find(baseDecl)
+ ->second.hasVtorDisp())
+ members.push_back(makeStorageInfo(offset - CharUnits::fromQuantity(4),
+ getUIntNType(32)));
+ members.push_back(MemberInfo(offset, MemberInfo::InfoKind::VBase,
+ getStorageType(baseDecl), baseDecl));
+ }
+}
+
void CIRRecordLowering::accumulateVPtrs() {
if (astRecordLayout.hasOwnVFPtr())
members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr,
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 50642e7..3b0eabe 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -14,6 +14,8 @@
#include "CIRGenFunction.h"
#include "mlir/IR/Builders.h"
+#include "mlir/IR/Location.h"
+#include "mlir/Support/LLVM.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtOpenACC.h"
@@ -23,28 +25,80 @@ using namespace clang;
using namespace clang::CIRGen;
using namespace cir;
-void CIRGenFunction::emitCompoundStmtWithoutScope(const CompoundStmt &s) {
- for (auto *curStmt : s.body()) {
- if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
- getCIRGenModule().errorNYI(curStmt->getSourceRange(),
- std::string("emitCompoundStmtWithoutScope: ") +
- curStmt->getStmtClassName());
+static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf,
+ const Stmt *exprResult,
+ AggValueSlot slot,
+ Address *lastValue) {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ // Similar issues arise for attributed statements.
+ while (!isa<Expr>(exprResult)) {
+ if (const auto *ls = dyn_cast<LabelStmt>(exprResult)) {
+ if (cgf.emitLabel(*ls->getDecl()).failed())
+ return mlir::failure();
+ exprResult = ls->getSubStmt();
+ } else if (const auto *as = dyn_cast<AttributedStmt>(exprResult)) {
+ // FIXME: Update this if we ever have attributes that affect the
+ // semantics of an expression.
+ exprResult = as->getSubStmt();
+ } else {
+ llvm_unreachable("Unknown value statement");
+ }
}
+
+ const Expr *e = cast<Expr>(exprResult);
+ QualType exprTy = e->getType();
+ if (cgf.hasAggregateEvaluationKind(exprTy)) {
+ cgf.emitAggExpr(e, slot);
+ } else {
+ // We can't return an RValue here because there might be cleanups at
+ // the end of the StmtExpr. Because of that, we have to emit the result
+ // here into a temporary alloca.
+ cgf.emitAnyExprToMem(e, *lastValue, Qualifiers(),
+ /*IsInit*/ false);
+ }
+
+ return mlir::success();
}
-void CIRGenFunction::emitCompoundStmt(const CompoundStmt &s) {
+mlir::LogicalResult CIRGenFunction::emitCompoundStmtWithoutScope(
+ const CompoundStmt &s, Address *lastValue, AggValueSlot slot) {
+ mlir::LogicalResult result = mlir::success();
+ const Stmt *exprResult = s.getStmtExprResult();
+ assert((!lastValue || (lastValue && exprResult)) &&
+ "If lastValue is not null then the CompoundStmt must have a "
+ "StmtExprResult");
+
+ for (const Stmt *curStmt : s.body()) {
+ const bool saveResult = lastValue && exprResult == curStmt;
+ if (saveResult) {
+ if (emitStmtWithResult(*this, exprResult, slot, lastValue).failed())
+ result = mlir::failure();
+ } else {
+ if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
+ result = mlir::failure();
+ }
+ }
+ return result;
+}
+
+mlir::LogicalResult CIRGenFunction::emitCompoundStmt(const CompoundStmt &s,
+ Address *lastValue,
+ AggValueSlot slot) {
+ // Add local scope to track new declared variables.
+ SymTableScopeTy varScope(symbolTable);
mlir::Location scopeLoc = getLoc(s.getSourceRange());
mlir::OpBuilder::InsertPoint scopeInsPt;
builder.create<cir::ScopeOp>(
scopeLoc, [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) {
scopeInsPt = b.saveInsertionPoint();
});
- {
- mlir::OpBuilder::InsertionGuard guard(builder);
- builder.restoreInsertionPoint(scopeInsPt);
- LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
- emitCompoundStmtWithoutScope(s);
- }
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeInsPt);
+ LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
+ return emitCompoundStmtWithoutScope(s, lastValue, slot);
}
void CIRGenFunction::emitStopPoint(const Stmt *s) {
@@ -130,6 +184,9 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
return emitOpenACCCacheConstruct(cast<OpenACCCacheConstruct>(*s));
case Stmt::OpenACCAtomicConstructClass:
return emitOpenACCAtomicConstruct(cast<OpenACCAtomicConstruct>(*s));
+ case Stmt::GCCAsmStmtClass:
+ case Stmt::MSAsmStmtClass:
+ return emitAsmStmt(cast<AsmStmt>(*s));
case Stmt::OMPScopeDirectiveClass:
case Stmt::OMPErrorDirectiveClass:
case Stmt::LabelStmtClass:
@@ -143,8 +200,6 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
case Stmt::CoreturnStmtClass:
case Stmt::CXXTryStmtClass:
case Stmt::IndirectGotoStmtClass:
- case Stmt::GCCAsmStmtClass:
- case Stmt::MSAsmStmtClass:
case Stmt::OMPParallelDirectiveClass:
case Stmt::OMPTaskwaitDirectiveClass:
case Stmt::OMPTaskyieldDirectiveClass:
@@ -246,16 +301,19 @@ mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
return emitDeclStmt(cast<DeclStmt>(*s));
case Stmt::CompoundStmtClass:
if (useCurrentScope)
- emitCompoundStmtWithoutScope(cast<CompoundStmt>(*s));
- else
- emitCompoundStmt(cast<CompoundStmt>(*s));
- break;
+ return emitCompoundStmtWithoutScope(cast<CompoundStmt>(*s));
+ return emitCompoundStmt(cast<CompoundStmt>(*s));
+ case Stmt::GotoStmtClass:
+ return emitGotoStmt(cast<GotoStmt>(*s));
case Stmt::ContinueStmtClass:
return emitContinueStmt(cast<ContinueStmt>(*s));
// NullStmt doesn't need any handling, but we need to say we handled it.
case Stmt::NullStmtClass:
break;
+
+ case Stmt::LabelStmtClass:
+ return emitLabelStmt(cast<LabelStmt>(*s));
case Stmt::CaseStmtClass:
case Stmt::DefaultStmtClass:
// If we reached here, we must not handling a switch case in the top level.
@@ -272,6 +330,17 @@ mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
return mlir::success();
}
+mlir::LogicalResult CIRGenFunction::emitLabelStmt(const clang::LabelStmt &s) {
+
+ if (emitLabel(*s.getDecl()).failed())
+ return mlir::failure();
+
+ if (getContext().getLangOpts().EHAsynch && s.isSideEntry())
+ getCIRGenModule().errorNYI(s.getSourceRange(), "IsEHa: not implemented.");
+
+ return emitStmt(s.getSubStmt(), /*useCurrentScope*/ true);
+}
+
// Add a terminating yield on a body region if no other terminators are used.
static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r,
mlir::Location loc) {
@@ -412,13 +481,31 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
// This should emit a branch through the cleanup block if one exists.
builder.create<cir::BrOp>(loc, retBlock);
- if (ehStack.getStackDepth() != currentCleanupStackDepth)
+ if (ehStack.stable_begin() != currentCleanupStackDepth)
cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
builder.createBlock(builder.getBlock()->getParent());
return mlir::success();
}
+mlir::LogicalResult CIRGenFunction::emitGotoStmt(const clang::GotoStmt &s) {
+ // FIXME: LLVM codegen inserts emit a stop point here for debug info
+ // sake when the insertion point is available, but doesn't do
+ // anything special when there isn't. We haven't implemented debug
+ // info support just yet, look at this again once we have it.
+ assert(!cir::MissingFeatures::generateDebugInfo());
+
+ cir::GotoOp::create(builder, getLoc(s.getSourceRange()),
+ s.getLabel()->getName());
+
+ // A goto marks the end of a block, create a new one for codegen after
+ // emitGotoStmt can resume building in that block.
+ // Insert the new block to continue codegen after goto.
+ builder.createBlock(builder.getBlock()->getParent());
+
+ return mlir::success();
+}
+
mlir::LogicalResult
CIRGenFunction::emitContinueStmt(const clang::ContinueStmt &s) {
builder.createContinue(getLoc(s.getContinueLoc()));
@@ -429,6 +516,32 @@ CIRGenFunction::emitContinueStmt(const clang::ContinueStmt &s) {
return mlir::success();
}
+mlir::LogicalResult CIRGenFunction::emitLabel(const clang::LabelDecl &d) {
+ // Create a new block to tag with a label and add a branch from
+ // the current one to it. If the block is empty just call attach it
+ // to this label.
+ mlir::Block *currBlock = builder.getBlock();
+ mlir::Block *labelBlock = currBlock;
+
+ if (!currBlock->empty()) {
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ labelBlock = builder.createBlock(builder.getBlock()->getParent());
+ }
+ builder.create<cir::BrOp>(getLoc(d.getSourceRange()), labelBlock);
+ }
+
+ builder.setInsertionPointToEnd(labelBlock);
+ builder.create<cir::LabelOp>(getLoc(d.getSourceRange()), d.getName());
+ builder.setInsertionPointToEnd(labelBlock);
+
+ // FIXME: emit debug info for labels, incrementProfileCounter
+ assert(!cir::MissingFeatures::ehstackBranches());
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ return mlir::success();
+}
+
mlir::LogicalResult CIRGenFunction::emitBreakStmt(const clang::BreakStmt &s) {
builder.createBreak(getLoc(s.getBreakLoc()));
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
index 1d081d5..eb8dcd6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
@@ -59,6 +59,7 @@ struct CIRGenTypeCache {
/// void* in address space 0
cir::PointerType VoidPtrTy;
+ cir::PointerType UInt8PtrTy;
/// The size and alignment of a pointer into the generic address space.
union {
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
index 3e07f6d..bb24933 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
@@ -103,7 +103,8 @@ std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl,
policy.SuppressTagKeyword = true;
if (recordDecl->getIdentifier())
- astContext.getRecordType(recordDecl).print(outStream, policy);
+ QualType(astContext.getCanonicalTagType(recordDecl))
+ .print(outStream, policy);
else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl())
typedefNameDecl->printQualifiedName(outStream, policy);
else
@@ -138,7 +139,9 @@ isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt,
if (!alreadyChecked.insert(rd).second)
return true;
- const Type *key = cgt.getASTContext().getTagDeclType(rd).getTypePtr();
+ assert(rd->isCompleteDefinition() &&
+ "Expect RecordDecl to be CompleteDefinition");
+ const Type *key = cgt.getASTContext().getCanonicalTagType(rd).getTypePtr();
// If this type is already laid out, converting it is a noop.
if (cgt.isRecordLayoutComplete(key))
@@ -152,13 +155,14 @@ isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt,
// out, don't do it. This includes virtual base classes which get laid out
// when a class is translated, even though they aren't embedded by-value into
// the class.
- if (auto *crd = dyn_cast<CXXRecordDecl>(rd)) {
- if (crd->getNumBases() > 0) {
- assert(!cir::MissingFeatures::cxxSupport());
- cgt.getCGModule().errorNYI(rd->getSourceRange(),
- "isSafeToConvert: CXXRecordDecl with bases");
- return false;
- }
+ if (const CXXRecordDecl *crd = dyn_cast<CXXRecordDecl>(rd)) {
+ for (const clang::CXXBaseSpecifier &i : crd->bases())
+ if (!isSafeToConvert(i.getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf(),
+ cgt, alreadyChecked))
+ return false;
}
// If this type would require laying out members that are currently being laid
@@ -181,8 +185,8 @@ isSafeToConvert(QualType qt, CIRGenTypes &cgt,
qt = at->getValueType();
// If this is a record, check it.
- if (const auto *rt = qt->getAs<RecordType>())
- return isSafeToConvert(rt->getDecl(), cgt, alreadyChecked);
+ if (const auto *rd = qt->getAsRecordDecl())
+ return isSafeToConvert(rd, cgt, alreadyChecked);
// If this is an array, check the elements, which are embedded inline.
if (const auto *at = cgt.getASTContext().getAsArrayType(qt))
@@ -210,7 +214,7 @@ static bool isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt) {
mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *rd) {
// TagDecl's are not necessarily unique, instead use the (clang) type
// connected to the decl.
- const Type *key = astContext.getTagDeclType(rd).getTypePtr();
+ const Type *key = astContext.getCanonicalTagType(rd).getTypePtr();
cir::RecordType entry = recordDeclTypes[key];
// If we don't have an entry for this record yet, create one.
@@ -242,7 +246,7 @@ mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *rd) {
for (const auto &base : cxxRecordDecl->bases()) {
if (base.isVirtual())
continue;
- convertRecordDeclType(base.getType()->castAs<RecordType>()->getDecl());
+ convertRecordDeclType(base.getType()->castAsRecordDecl());
}
}
@@ -275,7 +279,8 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
// Process record types before the type cache lookup.
if (const auto *recordType = dyn_cast<RecordType>(type))
- return convertRecordDeclType(recordType->getDecl());
+ return convertRecordDeclType(
+ recordType->getOriginalDecl()->getDefinitionOrSelf());
// Has the type already been processed?
TypeCacheTy::iterator tci = typeCache.find(ty);
@@ -457,7 +462,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
}
case Type::Enum: {
- const EnumDecl *ed = cast<EnumType>(ty)->getDecl();
+ const auto *ed = ty->castAsEnumDecl();
if (auto integerType = ed->getIntegerType(); !integerType.isNull())
return convertType(integerType);
// Return a placeholder 'i32' type. This can be changed later when the
@@ -484,6 +489,20 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
break;
}
+ case Type::Atomic: {
+ QualType valueType = cast<AtomicType>(ty)->getValueType();
+ resultType = convertTypeForMem(valueType);
+
+ // Pad out to the inflated size if necessary.
+ uint64_t valueSize = astContext.getTypeSize(valueType);
+ uint64_t atomicSize = astContext.getTypeSize(ty);
+ if (valueSize != atomicSize) {
+ cgm.errorNYI("convertType: atomic type value size != atomic size");
+ }
+
+ break;
+ }
+
default:
cgm.errorNYI(SourceLocation(), "processing of type",
type->getTypeClassName());
@@ -516,7 +535,7 @@ mlir::Type CIRGenTypes::convertTypeForMem(clang::QualType qualType,
/// Return record layout info for the given record decl.
const CIRGenRecordLayout &
CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *rd) {
- const auto *key = astContext.getTagDeclType(rd).getTypePtr();
+ const auto *key = astContext.getCanonicalTagType(rd).getTypePtr();
// If we have already computed the layout, return it.
auto it = cirGenRecordLayouts.find(key);
@@ -547,10 +566,8 @@ bool CIRGenTypes::isZeroInitializable(clang::QualType t) {
return true;
}
- if (const RecordType *rt = t->getAs<RecordType>()) {
- const RecordDecl *rd = rt->getDecl();
+ if (const auto *rd = t->getAsRecordDecl())
return isZeroInitializable(rd);
- }
if (t->getAs<MemberPointerType>()) {
cgm.errorNYI(SourceLocation(), "isZeroInitializable for MemberPointerType",
@@ -623,8 +640,10 @@ void CIRGenTypes::updateCompletedType(const TagDecl *td) {
// declaration of enums, and C doesn't allow an incomplete forward
// declaration with a non-default type.
assert(
- !typeCache.count(ed->getTypeForDecl()) ||
- (convertType(ed->getIntegerType()) == typeCache[ed->getTypeForDecl()]));
+ !typeCache.count(
+ ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()) ||
+ (convertType(ed->getIntegerType()) ==
+ typeCache[ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()]));
// If necessary, provide the full definition of a type only used with a
// declaration so far.
assert(!cir::MissingFeatures::generateDebugInfo());
@@ -639,7 +658,7 @@ void CIRGenTypes::updateCompletedType(const TagDecl *td) {
// Only complete if we converted it already. If we haven't converted it yet,
// we'll just do it lazily.
- if (recordDeclTypes.count(astContext.getTagDeclType(rd).getTypePtr()))
+ if (recordDeclTypes.count(astContext.getCanonicalTagType(rd).getTypePtr()))
convertRecordDeclType(rd);
// If necessary, provide the full definition of a type only used with a
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h
index c2813d7..7af0d95 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypes.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h
@@ -130,6 +130,13 @@ public:
/// Get the CIR function type for \arg Info.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info);
+ cir::FuncType getFunctionType(clang::GlobalDecl gd);
+
+ /// Get the CIR function type for use in a vtable, given a CXXMethodDecl. If
+ /// the method has an incomplete return type, and/or incomplete argument
+ /// types, this will return the opaque type.
+ cir::FuncType getFunctionTypeForVTable(clang::GlobalDecl gd);
+
// The arrangement methods are split into three families:
// - those meant to drive the signature and prologue/epilogue
// of a function declaration or definition,
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
new file mode 100644
index 0000000..aca12aa
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
@@ -0,0 +1,244 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenVTables.h"
+
+#include "CIRGenCXXABI.h"
+#include "CIRGenModule.h"
+#include "mlir/IR/Types.h"
+#include "clang/AST/VTableBuilder.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace llvm;
+using namespace clang;
+using namespace clang::CIRGen;
+
+CIRGenVTables::CIRGenVTables(CIRGenModule &cgm)
+ : cgm(cgm), vtContext(cgm.getASTContext().getVTableContext()) {}
+
+mlir::Type CIRGenModule::getVTableComponentType() {
+ mlir::Type ptrTy = builder.getUInt8PtrTy();
+ assert(!cir::MissingFeatures::vtableRelativeLayout());
+ return ptrTy;
+}
+
+mlir::Type CIRGenVTables::getVTableComponentType() {
+ return cgm.getVTableComponentType();
+}
+
+cir::RecordType CIRGenVTables::getVTableType(const VTableLayout &layout) {
+ SmallVector<mlir::Type, 4> tys;
+ mlir::Type componentType = getVTableComponentType();
+ for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i)
+ tys.push_back(cir::ArrayType::get(componentType, layout.getVTableSize(i)));
+
+ // FIXME(cir): should VTableLayout be encoded like we do for some
+ // AST nodes?
+ return cgm.getBuilder().getAnonRecordTy(tys, /*incomplete=*/false);
+}
+
+/// This is a callback from Sema to tell us that a particular vtable is
+/// required to be emitted in this translation unit.
+///
+/// This is only called for vtables that _must_ be emitted (mainly due to key
+/// functions). For weak vtables, CodeGen tracks when they are needed and
+/// emits them as-needed.
+void CIRGenModule::emitVTable(const CXXRecordDecl *rd) {
+ vtables.generateClassData(rd);
+}
+
+void CIRGenVTables::generateClassData(const CXXRecordDecl *rd) {
+ assert(!cir::MissingFeatures::generateDebugInfo());
+
+ if (rd->getNumVBases())
+ cgm.errorNYI(rd->getSourceRange(), "emitVirtualInheritanceTables");
+
+ cgm.getCXXABI().emitVTableDefinitions(*this, rd);
+}
+
+mlir::Attribute CIRGenVTables::getVTableComponent(
+ const VTableLayout &layout, unsigned componentIndex, mlir::Attribute rtti,
+ unsigned &nextVTableThunkIndex, unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage) {
+ const VTableComponent &component = layout.vtable_components()[componentIndex];
+
+ CIRGenBuilderTy builder = cgm.getBuilder();
+
+ assert(!cir::MissingFeatures::vtableRelativeLayout());
+
+ switch (component.getKind()) {
+ case VTableComponent::CK_VCallOffset:
+ cgm.errorNYI("getVTableComponent: VCallOffset");
+ return mlir::Attribute();
+ case VTableComponent::CK_VBaseOffset:
+ cgm.errorNYI("getVTableComponent: VBaseOffset");
+ return mlir::Attribute();
+ case VTableComponent::CK_CompleteDtorPointer:
+ cgm.errorNYI("getVTableComponent: CompleteDtorPointer");
+ return mlir::Attribute();
+ case VTableComponent::CK_DeletingDtorPointer:
+ cgm.errorNYI("getVTableComponent: DeletingDtorPointer");
+ return mlir::Attribute();
+ case VTableComponent::CK_UnusedFunctionPointer:
+ cgm.errorNYI("getVTableComponent: UnusedFunctionPointer");
+ return mlir::Attribute();
+
+ case VTableComponent::CK_OffsetToTop:
+ return builder.getConstPtrAttr(builder.getUInt8PtrTy(),
+ component.getOffsetToTop().getQuantity());
+
+ case VTableComponent::CK_RTTI:
+ assert((mlir::isa<cir::GlobalViewAttr>(rtti) ||
+ mlir::isa<cir::ConstPtrAttr>(rtti)) &&
+ "expected GlobalViewAttr or ConstPtrAttr");
+ return rtti;
+
+ case VTableComponent::CK_FunctionPointer: {
+ GlobalDecl gd = component.getGlobalDecl();
+
+ assert(!cir::MissingFeatures::cudaSupport());
+
+ cir::FuncOp fnPtr;
+ if (cast<CXXMethodDecl>(gd.getDecl())->isPureVirtual()) {
+ cgm.errorNYI("getVTableComponent: CK_FunctionPointer: pure virtual");
+ return mlir::Attribute();
+ } else if (cast<CXXMethodDecl>(gd.getDecl())->isDeleted()) {
+ cgm.errorNYI("getVTableComponent: CK_FunctionPointer: deleted virtual");
+ return mlir::Attribute();
+ } else if (nextVTableThunkIndex < layout.vtable_thunks().size() &&
+ layout.vtable_thunks()[nextVTableThunkIndex].first ==
+ componentIndex) {
+ cgm.errorNYI("getVTableComponent: CK_FunctionPointer: thunk");
+ return mlir::Attribute();
+ } else {
+ // Otherwise we can use the method definition directly.
+ cir::FuncType fnTy = cgm.getTypes().getFunctionTypeForVTable(gd);
+ fnPtr = cgm.getAddrOfFunction(gd, fnTy, /*ForVTable=*/true);
+ }
+
+ return cir::GlobalViewAttr::get(
+ builder.getUInt8PtrTy(),
+ mlir::FlatSymbolRefAttr::get(fnPtr.getSymNameAttr()));
+ }
+ }
+
+ llvm_unreachable("Unexpected vtable component kind");
+}
+
+void CIRGenVTables::createVTableInitializer(cir::GlobalOp &vtableOp,
+ const clang::VTableLayout &layout,
+ mlir::Attribute rtti,
+ bool vtableHasLocalLinkage) {
+ mlir::Type componentType = getVTableComponentType();
+
+ const llvm::SmallVectorImpl<unsigned> &addressPoints =
+ layout.getAddressPointIndices();
+ unsigned nextVTableThunkIndex = 0;
+
+ mlir::MLIRContext *mlirContext = &cgm.getMLIRContext();
+
+ SmallVector<mlir::Attribute> vtables;
+ for (auto [vtableIndex, addressPoint] : llvm::enumerate(addressPoints)) {
+ // Build a ConstArrayAttr of the vtable components.
+ size_t vtableStart = layout.getVTableOffset(vtableIndex);
+ size_t vtableEnd = vtableStart + layout.getVTableSize(vtableIndex);
+ llvm::SmallVector<mlir::Attribute> components;
+ components.reserve(vtableEnd - vtableStart);
+ for (size_t componentIndex : llvm::seq(vtableStart, vtableEnd))
+ components.push_back(
+ getVTableComponent(layout, componentIndex, rtti, nextVTableThunkIndex,
+ addressPoint, vtableHasLocalLinkage));
+ // Create a ConstArrayAttr to hold the components.
+ auto arr = cir::ConstArrayAttr::get(
+ cir::ArrayType::get(componentType, components.size()),
+ mlir::ArrayAttr::get(mlirContext, components));
+ vtables.push_back(arr);
+ }
+
+ // Create a ConstRecordAttr to hold the component array.
+ const auto members = mlir::ArrayAttr::get(mlirContext, vtables);
+ cir::ConstRecordAttr record = cgm.getBuilder().getAnonConstRecord(members);
+
+ // Create a VTableAttr
+ auto vtableAttr = cir::VTableAttr::get(record.getType(), record.getMembers());
+
+ // Add the vtable initializer to the vtable global op.
+ cgm.setInitializer(vtableOp, vtableAttr);
+}
+
+/// Compute the required linkage of the vtable for the given class.
+///
+/// Note that we only call this at the end of the translation unit.
+cir::GlobalLinkageKind CIRGenModule::getVTableLinkage(const CXXRecordDecl *rd) {
+ if (!rd->isExternallyVisible())
+ return cir::GlobalLinkageKind::InternalLinkage;
+
+ // We're at the end of the translation unit, so the current key
+ // function is fully correct.
+ const CXXMethodDecl *keyFunction = astContext.getCurrentKeyFunction(rd);
+ if (keyFunction && !rd->hasAttr<DLLImportAttr>()) {
+ // If this class has a key function, use that to determine the
+ // linkage of the vtable.
+ const FunctionDecl *def = nullptr;
+ if (keyFunction->hasBody(def))
+ keyFunction = cast<CXXMethodDecl>(def);
+
+ // All of the cases below do something different with AppleKext enabled.
+ assert(!cir::MissingFeatures::appleKext());
+ switch (keyFunction->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ assert(
+ (def || codeGenOpts.OptimizationLevel > 0 ||
+ codeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo) &&
+ "Shouldn't query vtable linkage without key function, "
+ "optimizations, or debug info");
+ if (!def && codeGenOpts.OptimizationLevel > 0)
+ return cir::GlobalLinkageKind::AvailableExternallyLinkage;
+
+ if (keyFunction->isInlined())
+ return !astContext.getLangOpts().AppleKext
+ ? cir::GlobalLinkageKind::LinkOnceODRLinkage
+ : cir::GlobalLinkageKind::InternalLinkage;
+ return cir::GlobalLinkageKind::ExternalLinkage;
+
+ case TSK_ImplicitInstantiation:
+ return cir::GlobalLinkageKind::LinkOnceODRLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return cir::GlobalLinkageKind::WeakODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable("Should not have been asked to emit this");
+ }
+ }
+
+ errorNYI(rd->getSourceRange(), "getVTableLinkage: no key function");
+ return cir::GlobalLinkageKind::ExternalLinkage;
+}
+
+void CIRGenVTables::emitThunks(GlobalDecl gd) {
+ const CXXMethodDecl *md =
+ cast<CXXMethodDecl>(gd.getDecl())->getCanonicalDecl();
+
+ // We don't need to generate thunks for the base destructor.
+ if (isa<CXXDestructorDecl>(md) && gd.getDtorType() == Dtor_Base)
+ return;
+
+ const VTableContextBase::ThunkInfoVectorTy *thunkInfoVector =
+ vtContext->getThunkInfo(gd);
+
+ if (!thunkInfoVector)
+ return;
+
+ cgm.errorNYI(md->getSourceRange(), "emitThunks");
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h
new file mode 100644
index 0000000..518d7d7
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h
@@ -0,0 +1,74 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H
+#define CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H
+
+#include "mlir/IR/Types.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/VTableBuilder.h"
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
+
+namespace clang {
+class CXXRecordDecl;
+}
+
+namespace clang::CIRGen {
+class CIRGenModule;
+
+class CIRGenVTables {
+ CIRGenModule &cgm;
+
+ clang::VTableContextBase *vtContext;
+
+ mlir::Attribute
+ getVTableComponent(const VTableLayout &layout, unsigned componentIndex,
+ mlir::Attribute rtti, unsigned &nextVTableThunkIndex,
+ unsigned vtableAddressPoint, bool vtableHasLocalLinkage);
+
+ mlir::Type getVTableComponentType();
+
+public:
+ CIRGenVTables(CIRGenModule &cgm);
+
+ /// Add vtable components for the given vtable layout to the given
+ /// global initializer.
+ void createVTableInitializer(cir::GlobalOp &vtable,
+ const clang::VTableLayout &layout,
+ mlir::Attribute rtti,
+ bool vtableHasLocalLinkage);
+
+ clang::ItaniumVTableContext &getItaniumVTableContext() {
+ return *llvm::cast<clang::ItaniumVTableContext>(vtContext);
+ }
+
+ const clang::ItaniumVTableContext &getItaniumVTableContext() const {
+ return *llvm::cast<clang::ItaniumVTableContext>(vtContext);
+ }
+
+ /// Emit the associated thunks for the given global decl.
+ void emitThunks(GlobalDecl gd);
+
+ /// Generate all the class data required to be generated upon definition of a
+ /// KeyFunction. This includes the vtable, the RTTI data structure (if RTTI
+ /// is enabled) and the VTT (if the class has virtual bases).
+ void generateClassData(const CXXRecordDecl *rd);
+
+ /// Returns the type of a vtable with the given layout. Normally a struct of
+ /// arrays of pointers, with one struct element for each vtable in the vtable
+ /// group.
+ cir::RecordType getVTableType(const clang::VTableLayout &layout);
+};
+
+} // namespace clang::CIRGen
+
+#endif // CLANG_LIB_CIR_CODEGEN_CIRGENVTABLES_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index 0832c414..ac7e1cc 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -190,6 +190,7 @@ public:
bool isSimple() const { return lvType == Simple; }
bool isVectorElt() const { return lvType == VectorElt; }
bool isBitField() const { return lvType == BitField; }
+ bool isGlobalReg() const { return lvType == GlobalReg; }
bool isVolatile() const { return quals.hasVolatile(); }
bool isVolatileQualified() const { return quals.hasVolatile(); }
@@ -211,6 +212,14 @@ public:
return Address(getPointer(), elementType, getAlignment());
}
+ void setAddress(Address address) {
+ assert(isSimple());
+ v = address.getPointer();
+ elementType = address.getElementType();
+ alignment = address.getAlignment().getQuantity();
+ assert(!cir::MissingFeatures::addressIsKnownNonNull());
+ }
+
const clang::Qualifiers &getQuals() const { return quals; }
clang::Qualifiers &getQuals() { return quals; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp
index b0357d9..aa4d9eb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp
@@ -163,7 +163,7 @@ void CIRGenerator::HandleCXXStaticMemberVarInstantiation(VarDecl *D) {
if (diags.hasErrorOccurred())
return;
- cgm->errorNYI(D->getSourceRange(), "HandleCXXStaticMemberVarInstantiation");
+ cgm->handleCXXStaticMemberVarInstantiation(D);
}
void CIRGenerator::CompleteTentativeDefinition(VarDecl *d) {
@@ -177,5 +177,5 @@ void CIRGenerator::HandleVTable(CXXRecordDecl *rd) {
if (diags.hasErrorOccurred())
return;
- cgm->errorNYI(rd->getSourceRange(), "HandleVTable");
+ cgm->emitVTable(rd);
}
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index ca3a329..6d7072a 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -8,6 +8,8 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
add_clang_library(clangCIR
CIRGenerator.cpp
+ CIRGenAsm.cpp
+ CIRGenAtomic.cpp
CIRGenBuilder.cpp
CIRGenCall.cpp
CIRGenClass.cpp
@@ -18,10 +20,12 @@ add_clang_library(clangCIR
CIRGenBuiltin.cpp
CIRGenDecl.cpp
CIRGenDeclOpenACC.cpp
+ CIRGenException.cpp
CIRGenExpr.cpp
CIRGenExprAggregate.cpp
CIRGenExprComplex.cpp
CIRGenExprConstant.cpp
+ CIRGenExprCXX.cpp
CIRGenExprScalar.cpp
CIRGenFunction.cpp
CIRGenItaniumCXXABI.cpp
@@ -33,6 +37,7 @@ add_clang_library(clangCIR
CIRGenStmtOpenACC.cpp
CIRGenStmtOpenACCLoop.cpp
CIRGenTypes.cpp
+ CIRGenVTables.cpp
TargetInfo.cpp
DEPENDS
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 22750ac..47478f6 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -42,7 +42,47 @@ enum CleanupKind : unsigned {
/// A stack of scopes which respond to exceptions, including cleanups
/// and catch blocks.
class EHScopeStack {
+ friend class CIRGenFunction;
+
public:
+ // TODO(ogcg): Switch to alignof(uint64_t) instead of 8
+ enum { ScopeStackAlignment = 8 };
+
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from startOfData to endOfBuffer.
+ ptrdiff_t size = -1;
+
+ explicit stable_iterator(ptrdiff_t size) : size(size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() = default;
+
+ bool isValid() const { return size >= 0; }
+
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator other) const { return size <= other.size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return size < I.size; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.size == B.size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.size != B.size;
+ }
+ };
+
/// Information for lazily generating a cleanup. Subclasses must be
/// POD-like: cleanups will not be destructed, and they will be
/// allocated on the cleanup stack and freely copied and moved
@@ -68,30 +108,75 @@ public:
///
// \param flags cleanup kind.
virtual void emit(CIRGenFunction &cgf) = 0;
- };
- // Classic codegen has a finely tuned custom allocator and a complex stack
- // management scheme. We'll probably eventually want to find a way to share
- // that implementation. For now, we will use a very simplified implementation
- // to get cleanups working.
- llvm::SmallVector<std::unique_ptr<Cleanup>, 8> cleanupStack;
+ // This is a placeholder until EHScope is implemented.
+ virtual size_t getSize() const = 0;
+ };
private:
+ // The implementation for this class is in CIRGenCleanup.h and
+ // CIRGenCleanup.cpp; the definition is here because it's used as a
+ // member of CIRGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ std::unique_ptr<char[]> startOfBuffer;
+
+ /// The end of the buffer.
+ char *endOfBuffer = nullptr;
+
+ /// The first valid entry in the buffer.
+ char *startOfData = nullptr;
+
/// The CGF this Stack belong to
CIRGenFunction *cgf = nullptr;
+ // This class uses a custom allocator for maximum efficiency because cleanups
+ // are allocated and freed very frequently. It's basically a bump pointer
+ // allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets
+ // into the buffer as stable iterators.
+ char *allocate(size_t size);
+ void deallocate(size_t size);
+
+ void *pushCleanup(CleanupKind kind, size_t dataSize);
+
public:
EHScopeStack() = default;
~EHScopeStack() = default;
/// Push a lazily-created cleanup on the stack.
template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) {
- cleanupStack.push_back(std::make_unique<T>(a...));
+ static_assert(alignof(T) <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
+ void *buffer = pushCleanup(kind, sizeof(T));
+ [[maybe_unused]] Cleanup *obj = new (buffer) T(a...);
}
void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
- size_t getStackDepth() const { return cleanupStack.size(); }
+ /// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
+ void popCleanup();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return startOfData == endOfBuffer; }
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(endOfBuffer - startOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() { return stable_iterator(0); }
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/TargetInfo.cpp b/clang/lib/CIR/CodeGen/TargetInfo.cpp
index d2d32bb..62a8c59 100644
--- a/clang/lib/CIR/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CIR/CodeGen/TargetInfo.cpp
@@ -6,24 +6,22 @@ using namespace clang::CIRGen;
bool clang::CIRGen::isEmptyRecordForLayout(const ASTContext &context,
QualType t) {
- const RecordType *rt = t->getAs<RecordType>();
- if (!rt)
+ const auto *rd = t->getAsRecordDecl();
+ if (!rd)
return false;
- const RecordDecl *rd = rt->getDecl();
-
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *cxxrd = dyn_cast<CXXRecordDecl>(rd)) {
if (cxxrd->isDynamicClass())
return false;
- for (const auto &I : cxxrd->bases())
- if (!isEmptyRecordForLayout(context, I.getType()))
+ for (const auto &i : cxxrd->bases())
+ if (!isEmptyRecordForLayout(context, i.getType()))
return false;
}
- for (const auto *I : rd->fields())
- if (!isEmptyFieldForLayout(context, I))
+ for (const auto *i : rd->fields())
+ if (!isEmptyFieldForLayout(context, i))
return false;
return true;
diff --git a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp
index c039bdc..95faad6 100644
--- a/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRAttrs.cpp
@@ -16,6 +16,14 @@
#include "llvm/ADT/TypeSwitch.h"
//===-----------------------------------------------------------------===//
+// RecordMembers
+//===-----------------------------------------------------------------===//
+
+static void printRecordMembers(mlir::AsmPrinter &p, mlir::ArrayAttr members);
+static mlir::ParseResult parseRecordMembers(mlir::AsmParser &parser,
+ mlir::ArrayAttr &members);
+
+//===-----------------------------------------------------------------===//
// IntLiteral
//===-----------------------------------------------------------------===//
@@ -68,6 +76,61 @@ void CIRDialect::printAttribute(Attribute attr, DialectAsmPrinter &os) const {
llvm_unreachable("unexpected CIR type kind");
}
+static void printRecordMembers(mlir::AsmPrinter &printer,
+ mlir::ArrayAttr members) {
+ printer << '{';
+ llvm::interleaveComma(members, printer);
+ printer << '}';
+}
+
+static ParseResult parseRecordMembers(mlir::AsmParser &parser,
+ mlir::ArrayAttr &members) {
+ llvm::SmallVector<mlir::Attribute, 4> elts;
+
+ auto delimiter = AsmParser::Delimiter::Braces;
+ auto result = parser.parseCommaSeparatedList(delimiter, [&]() {
+ mlir::TypedAttr attr;
+ if (parser.parseAttribute(attr).failed())
+ return mlir::failure();
+ elts.push_back(attr);
+ return mlir::success();
+ });
+
+ if (result.failed())
+ return mlir::failure();
+
+ members = mlir::ArrayAttr::get(parser.getContext(), elts);
+ return mlir::success();
+}
+
+//===----------------------------------------------------------------------===//
+// ConstRecordAttr definitions
+//===----------------------------------------------------------------------===//
+
+LogicalResult
+ConstRecordAttr::verify(function_ref<InFlightDiagnostic()> emitError,
+ mlir::Type type, ArrayAttr members) {
+ auto sTy = mlir::dyn_cast_if_present<cir::RecordType>(type);
+ if (!sTy)
+ return emitError() << "expected !cir.record type";
+
+ if (sTy.getMembers().size() != members.size())
+ return emitError() << "number of elements must match";
+
+ unsigned attrIdx = 0;
+ for (auto &member : sTy.getMembers()) {
+ auto m = mlir::cast<mlir::TypedAttr>(members[attrIdx]);
+ if (member != m.getType())
+ return emitError() << "element at index " << attrIdx << " has type "
+ << m.getType()
+ << " but the expected type for this element is "
+ << member;
+ attrIdx++;
+ }
+
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// OptInfoAttr definitions
//===----------------------------------------------------------------------===//
@@ -362,6 +425,44 @@ cir::ConstVectorAttr::verify(function_ref<InFlightDiagnostic()> emitError,
}
//===----------------------------------------------------------------------===//
+// CIR VTableAttr
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::VTableAttr::verify(
+ llvm::function_ref<mlir::InFlightDiagnostic()> emitError, mlir::Type type,
+ mlir::ArrayAttr data) {
+ auto sTy = mlir::dyn_cast_if_present<cir::RecordType>(type);
+ if (!sTy)
+ return emitError() << "expected !cir.record type result";
+ if (sTy.getMembers().empty() || data.empty())
+ return emitError() << "expected record type with one or more subtype";
+
+ if (cir::ConstRecordAttr::verify(emitError, type, data).failed())
+ return failure();
+
+ for (const auto &element : data.getAsRange<mlir::Attribute>()) {
+ const auto &constArrayAttr = mlir::dyn_cast<cir::ConstArrayAttr>(element);
+ if (!constArrayAttr)
+ return emitError() << "expected constant array subtype";
+
+ LogicalResult eltTypeCheck = success();
+ auto arrayElts = mlir::cast<ArrayAttr>(constArrayAttr.getElts());
+ arrayElts.walkImmediateSubElements(
+ [&](mlir::Attribute attr) {
+ if (mlir::isa<ConstPtrAttr, GlobalViewAttr>(attr))
+ return;
+
+ eltTypeCheck = emitError()
+ << "expected GlobalViewAttr or ConstPtrAttr";
+ },
+ [&](mlir::Type type) {});
+ if (eltTypeCheck.failed())
+ return eltTypeCheck;
+ }
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// CIR Dialect
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp
index d835c40..42d4581 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp
@@ -1,4 +1,6 @@
#include "clang/CIR/Dialect/IR/CIRDataLayout.h"
+#include "clang/CIR/Dialect/IR/CIRTypes.h"
+#include "clang/CIR/MissingFeatures.h"
using namespace cir;
@@ -20,3 +22,42 @@ void CIRDataLayout::reset(mlir::DataLayoutSpecInterface spec) {
bigEndian = str == mlir::DLTIDialect::kDataLayoutEndiannessBig;
}
}
+
+llvm::Align CIRDataLayout::getAlignment(mlir::Type ty, bool useABIAlign) const {
+ if (auto recTy = llvm::dyn_cast<cir::RecordType>(ty)) {
+ // Packed record types always have an ABI alignment of one.
+ if (recTy && recTy.getPacked() && useABIAlign)
+ return llvm::Align(1);
+
+ // Get the layout annotation... which is lazily created on demand.
+ llvm_unreachable("getAlignment()) for record type is not implemented");
+ }
+
+ // FIXME(cir): This does not account for differnt address spaces, and relies
+ // on CIR's data layout to give the proper alignment.
+ assert(!cir::MissingFeatures::addressSpace());
+
+ // Fetch type alignment from MLIR's data layout.
+ unsigned align = useABIAlign ? layout.getTypeABIAlignment(ty)
+ : layout.getTypePreferredAlignment(ty);
+ return llvm::Align(align);
+}
+
+// The implementation of this method is provided inline as it is particularly
+// well suited to constant folding when called on a specific Type subclass.
+llvm::TypeSize CIRDataLayout::getTypeSizeInBits(mlir::Type ty) const {
+ assert(cir::isSized(ty) && "Cannot getTypeInfo() on a type that is unsized!");
+
+ if (auto recordTy = llvm::dyn_cast<cir::RecordType>(ty)) {
+ // FIXME(cir): CIR record's data layout implementation doesn't do a good job
+ // of handling unions particularities. We should have a separate union type.
+ return recordTy.getTypeSizeInBits(layout, {});
+ }
+
+ // FIXME(cir): This does not account for different address spaces, and relies
+ // on CIR's data layout to give the proper ABI-specific type width.
+ assert(!cir::MissingFeatures::addressSpace());
+
+ // This is calling mlir::DataLayout::getTypeSizeInBits().
+ return llvm::TypeSize::getFixed(layout.getTypeSizeInBits(ty));
+}
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index d3fcac1..80ca2d3 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -22,6 +22,8 @@
#include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc"
#include "clang/CIR/Dialect/IR/CIROpsEnums.cpp.inc"
#include "clang/CIR/MissingFeatures.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/LogicalResult.h"
#include <numeric>
@@ -339,7 +341,9 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType,
}
if (mlir::isa<cir::ConstArrayAttr, cir::ConstVectorAttr,
- cir::ConstComplexAttr, cir::PoisonAttr>(attrType))
+ cir::ConstComplexAttr, cir::ConstRecordAttr,
+ cir::GlobalViewAttr, cir::PoisonAttr, cir::VTableAttr>(
+ attrType))
return success();
assert(isa<TypedAttr>(attrType) && "What else could we be looking at here?");
@@ -1355,11 +1359,14 @@ mlir::LogicalResult cir::GlobalOp::verify() {
void cir::GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState,
llvm::StringRef sym_name, mlir::Type sym_type,
- cir::GlobalLinkageKind linkage) {
+ bool isConstant, cir::GlobalLinkageKind linkage) {
odsState.addAttribute(getSymNameAttrName(odsState.name),
odsBuilder.getStringAttr(sym_name));
odsState.addAttribute(getSymTypeAttrName(odsState.name),
mlir::TypeAttr::get(sym_type));
+ if (isConstant)
+ odsState.addAttribute(getConstantAttrName(odsState.name),
+ odsBuilder.getUnitAttr());
cir::GlobalLinkageKindAttr linkageAttr =
cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage);
@@ -1444,6 +1451,77 @@ cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
}
//===----------------------------------------------------------------------===//
+// VTableAddrPointOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult
+cir::VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
+ StringRef name = getName();
+
+ // Verify that the result type underlying pointer type matches the type of
+ // the referenced cir.global.
+ auto op =
+ symbolTable.lookupNearestSymbolFrom<cir::GlobalOp>(*this, getNameAttr());
+ if (!op)
+ return emitOpError("'")
+ << name << "' does not reference a valid cir.global";
+ std::optional<mlir::Attribute> init = op.getInitialValue();
+ if (!init)
+ return success();
+ if (!isa<cir::VTableAttr>(*init))
+ return emitOpError("Expected #cir.vtable in initializer for global '")
+ << name << "'";
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// VTTAddrPointOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult
+cir::VTTAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
+ // VTT ptr is not coming from a symbol.
+ if (!getName())
+ return success();
+ StringRef name = *getName();
+
+ // Verify that the result type underlying pointer type matches the type of
+ // the referenced cir.global op.
+ auto op =
+ symbolTable.lookupNearestSymbolFrom<cir::GlobalOp>(*this, getNameAttr());
+ if (!op)
+ return emitOpError("'")
+ << name << "' does not reference a valid cir.global";
+ std::optional<mlir::Attribute> init = op.getInitialValue();
+ if (!init)
+ return success();
+ if (!isa<cir::ConstArrayAttr>(*init))
+ return emitOpError(
+ "Expected constant array in initializer for global VTT '")
+ << name << "'";
+ return success();
+}
+
+LogicalResult cir::VTTAddrPointOp::verify() {
+ // The operation uses either a symbol or a value to operate, but not both
+ if (getName() && getSymAddr())
+ return emitOpError("should use either a symbol or value, but not both");
+
+ // If not a symbol, stick with the concrete type used for getSymAddr.
+ if (getSymAddr())
+ return success();
+
+ mlir::Type resultType = getAddr().getType();
+ mlir::Type resTy = cir::PointerType::get(
+ cir::PointerType::get(cir::VoidType::get(getContext())));
+
+ if (resultType != resTy)
+ return emitOpError("result type must be ")
+ << resTy << ", but provided result type is " << resultType;
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// FuncOp
//===----------------------------------------------------------------------===//
@@ -1625,9 +1703,28 @@ void cir::FuncOp::print(OpAsmPrinter &p) {
}
}
-// TODO(CIR): The properties of functions that require verification haven't
-// been implemented yet.
-mlir::LogicalResult cir::FuncOp::verify() { return success(); }
+mlir::LogicalResult cir::FuncOp::verify() {
+
+ llvm::SmallSet<llvm::StringRef, 16> labels;
+ llvm::SmallSet<llvm::StringRef, 16> gotos;
+
+ getOperation()->walk([&](mlir::Operation *op) {
+ if (auto lab = dyn_cast<cir::LabelOp>(op)) {
+ labels.insert(lab.getLabel());
+ } else if (auto goTo = dyn_cast<cir::GotoOp>(op)) {
+ gotos.insert(goTo.getLabel());
+ }
+ });
+
+ if (!labels.empty() || !gotos.empty()) {
+ llvm::SmallSet<llvm::StringRef, 16> mismatched =
+ llvm::set_difference(gotos, labels);
+
+ if (!mismatched.empty())
+ return emitOpError() << "goto/label mismatch";
+ }
+ return success();
+}
//===----------------------------------------------------------------------===//
// BinOp
@@ -1763,6 +1860,19 @@ LogicalResult cir::ShiftOp::verify() {
}
//===----------------------------------------------------------------------===//
+// LabelOp Definitions
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::LabelOp::verify() {
+ mlir::Operation *op = getOperation();
+ mlir::Block *blk = op->getBlock();
+ if (&blk->front() != op)
+ return emitError() << "must be the first operation in a block";
+
+ return mlir::success();
+}
+
+//===----------------------------------------------------------------------===//
// UnaryOp
//===----------------------------------------------------------------------===//
@@ -2385,6 +2495,227 @@ OpFoldResult RotateOp::fold(FoldAdaptor adaptor) {
}
//===----------------------------------------------------------------------===//
+// InlineAsmOp
+//===----------------------------------------------------------------------===//
+
+void cir::InlineAsmOp::print(OpAsmPrinter &p) {
+ p << '(' << getAsmFlavor() << ", ";
+ p.increaseIndent();
+ p.printNewline();
+
+ llvm::SmallVector<std::string, 3> names{"out", "in", "in_out"};
+ auto *nameIt = names.begin();
+ auto *attrIt = getOperandAttrs().begin();
+
+ for (mlir::OperandRange ops : getAsmOperands()) {
+ p << *nameIt << " = ";
+
+ p << '[';
+ llvm::interleaveComma(llvm::make_range(ops.begin(), ops.end()), p,
+ [&](Value value) {
+ p.printOperand(value);
+ p << " : " << value.getType();
+ if (*attrIt)
+ p << " (maybe_memory)";
+ attrIt++;
+ });
+ p << "],";
+ p.printNewline();
+ ++nameIt;
+ }
+
+ p << "{";
+ p.printString(getAsmString());
+ p << " ";
+ p.printString(getConstraints());
+ p << "}";
+ p.decreaseIndent();
+ p << ')';
+ if (getSideEffects())
+ p << " side_effects";
+
+ std::array elidedAttrs{
+ llvm::StringRef("asm_flavor"), llvm::StringRef("asm_string"),
+ llvm::StringRef("constraints"), llvm::StringRef("operand_attrs"),
+ llvm::StringRef("operands_segments"), llvm::StringRef("side_effects")};
+ p.printOptionalAttrDict(getOperation()->getAttrs(), elidedAttrs);
+
+ if (auto v = getRes())
+ p << " -> " << v.getType();
+}
+
+void cir::InlineAsmOp::build(OpBuilder &odsBuilder, OperationState &odsState,
+ ArrayRef<ValueRange> asmOperands,
+ StringRef asmString, StringRef constraints,
+ bool sideEffects, cir::AsmFlavor asmFlavor,
+ ArrayRef<Attribute> operandAttrs) {
+ // Set up the operands_segments for VariadicOfVariadic
+ SmallVector<int32_t> segments;
+ for (auto operandRange : asmOperands) {
+ segments.push_back(operandRange.size());
+ odsState.addOperands(operandRange);
+ }
+
+ odsState.addAttribute(
+ "operands_segments",
+ DenseI32ArrayAttr::get(odsBuilder.getContext(), segments));
+ odsState.addAttribute("asm_string", odsBuilder.getStringAttr(asmString));
+ odsState.addAttribute("constraints", odsBuilder.getStringAttr(constraints));
+ odsState.addAttribute("asm_flavor",
+ AsmFlavorAttr::get(odsBuilder.getContext(), asmFlavor));
+
+ if (sideEffects)
+ odsState.addAttribute("side_effects", odsBuilder.getUnitAttr());
+
+ odsState.addAttribute("operand_attrs", odsBuilder.getArrayAttr(operandAttrs));
+}
+
+ParseResult cir::InlineAsmOp::parse(OpAsmParser &parser,
+ OperationState &result) {
+ llvm::SmallVector<mlir::Attribute> operandAttrs;
+ llvm::SmallVector<int32_t> operandsGroupSizes;
+ std::string asmString, constraints;
+ Type resType;
+ MLIRContext *ctxt = parser.getBuilder().getContext();
+
+ auto error = [&](const Twine &msg) -> LogicalResult {
+ return parser.emitError(parser.getCurrentLocation(), msg);
+ };
+
+ auto expected = [&](const std::string &c) {
+ return error("expected '" + c + "'");
+ };
+
+ if (parser.parseLParen().failed())
+ return expected("(");
+
+ auto flavor = FieldParser<AsmFlavor, AsmFlavor>::parse(parser);
+ if (failed(flavor))
+ return error("Unknown AsmFlavor");
+
+ if (parser.parseComma().failed())
+ return expected(",");
+
+ auto parseValue = [&](Value &v) {
+ OpAsmParser::UnresolvedOperand op;
+
+ if (parser.parseOperand(op) || parser.parseColon())
+ return error("can't parse operand");
+
+ Type typ;
+ if (parser.parseType(typ).failed())
+ return error("can't parse operand type");
+ llvm::SmallVector<mlir::Value> tmp;
+ if (parser.resolveOperand(op, typ, tmp))
+ return error("can't resolve operand");
+ v = tmp[0];
+ return mlir::success();
+ };
+
+ auto parseOperands = [&](llvm::StringRef name) {
+ if (parser.parseKeyword(name).failed())
+ return error("expected " + name + " operands here");
+ if (parser.parseEqual().failed())
+ return expected("=");
+ if (parser.parseLSquare().failed())
+ return expected("[");
+
+ int size = 0;
+ if (parser.parseOptionalRSquare().succeeded()) {
+ operandsGroupSizes.push_back(size);
+ if (parser.parseComma())
+ return expected(",");
+ return mlir::success();
+ }
+
+ auto parseOperand = [&]() {
+ Value val;
+ if (parseValue(val).succeeded()) {
+ result.operands.push_back(val);
+ size++;
+
+ if (parser.parseOptionalLParen().failed()) {
+ operandAttrs.push_back(mlir::Attribute());
+ return mlir::success();
+ }
+
+ if (parser.parseKeyword("maybe_memory").succeeded()) {
+ operandAttrs.push_back(mlir::UnitAttr::get(ctxt));
+ if (parser.parseRParen())
+ return expected(")");
+ return mlir::success();
+ } else {
+ return expected("maybe_memory");
+ }
+ }
+ return mlir::failure();
+ };
+
+ if (parser.parseCommaSeparatedList(parseOperand).failed())
+ return mlir::failure();
+
+ if (parser.parseRSquare().failed() || parser.parseComma().failed())
+ return expected("]");
+ operandsGroupSizes.push_back(size);
+ return mlir::success();
+ };
+
+ if (parseOperands("out").failed() || parseOperands("in").failed() ||
+ parseOperands("in_out").failed())
+ return error("failed to parse operands");
+
+ if (parser.parseLBrace())
+ return expected("{");
+ if (parser.parseString(&asmString))
+ return error("asm string parsing failed");
+ if (parser.parseString(&constraints))
+ return error("constraints string parsing failed");
+ if (parser.parseRBrace())
+ return expected("}");
+ if (parser.parseRParen())
+ return expected(")");
+
+ if (parser.parseOptionalKeyword("side_effects").succeeded())
+ result.attributes.set("side_effects", UnitAttr::get(ctxt));
+
+ if (parser.parseOptionalArrow().succeeded() &&
+ parser.parseType(resType).failed())
+ return mlir::failure();
+
+ if (parser.parseOptionalAttrDict(result.attributes).failed())
+ return mlir::failure();
+
+ result.attributes.set("asm_flavor", AsmFlavorAttr::get(ctxt, *flavor));
+ result.attributes.set("asm_string", StringAttr::get(ctxt, asmString));
+ result.attributes.set("constraints", StringAttr::get(ctxt, constraints));
+ result.attributes.set("operand_attrs", ArrayAttr::get(ctxt, operandAttrs));
+ result.getOrAddProperties<InlineAsmOp::Properties>().operands_segments =
+ parser.getBuilder().getDenseI32ArrayAttr(operandsGroupSizes);
+ if (resType)
+ result.addTypes(TypeRange{resType});
+
+ return mlir::success();
+}
+
+//===----------------------------------------------------------------------===//
+// ThrowOp
+//===----------------------------------------------------------------------===//
+
+mlir::LogicalResult cir::ThrowOp::verify() {
+ // For the no-rethrow version, it must have at least the exception pointer.
+ if (rethrows())
+ return success();
+
+ if (getNumOperands() != 0) {
+ if (getTypeInfo())
+ return success();
+ return emitOpError() << "'type_info' symbol attribute missing";
+ }
+
+ return failure();
+}
+
+//===----------------------------------------------------------------------===//
// TableGen'd op method definitions
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
index 2eaa60c..d41ea0a 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
@@ -47,8 +47,8 @@ struct RemoveRedundantBranches : public OpRewritePattern<BrOp> {
Block *block = op.getOperation()->getBlock();
Block *dest = op.getDest();
- assert(!cir::MissingFeatures::labelOp());
-
+ if (isa<cir::LabelOp>(dest->front()))
+ return failure();
// Single edge between blocks: merge it.
if (block->getNumSuccessors() == 1 &&
dest->getSinglePredecessor() == block) {
diff --git a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt
index 18beca7..df7a1a3 100644
--- a/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt
+++ b/clang/lib/CIR/Dialect/Transforms/CMakeLists.txt
@@ -4,6 +4,7 @@ add_clang_library(MLIRCIRTransforms
FlattenCFG.cpp
HoistAllocas.cpp
LoweringPrepare.cpp
+ GotoSolver.cpp
DEPENDS
MLIRCIRPassIncGen
diff --git a/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp
new file mode 100644
index 0000000..c0db984
--- /dev/null
+++ b/clang/lib/CIR/Dialect/Transforms/GotoSolver.cpp
@@ -0,0 +1,57 @@
+//====- GotoSolver.cpp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "PassDetail.h"
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
+#include "clang/CIR/Dialect/Passes.h"
+#include "llvm/Support/TimeProfiler.h"
+#include <memory>
+
+using namespace mlir;
+using namespace cir;
+
+namespace {
+
+struct GotoSolverPass : public GotoSolverBase<GotoSolverPass> {
+ GotoSolverPass() = default;
+ void runOnOperation() override;
+};
+
+static void process(cir::FuncOp func) {
+ mlir::OpBuilder rewriter(func.getContext());
+ llvm::StringMap<Block *> labels;
+ llvm::SmallVector<cir::GotoOp, 4> gotos;
+
+ func.getBody().walk([&](mlir::Operation *op) {
+ if (auto lab = dyn_cast<cir::LabelOp>(op)) {
+ // Will construct a string copy inplace. Safely erase the label
+ labels.try_emplace(lab.getLabel(), lab->getBlock());
+ lab.erase();
+ } else if (auto goTo = dyn_cast<cir::GotoOp>(op)) {
+ gotos.push_back(goTo);
+ }
+ });
+
+ for (auto goTo : gotos) {
+ mlir::OpBuilder::InsertionGuard guard(rewriter);
+ rewriter.setInsertionPoint(goTo);
+ Block *dest = labels[goTo.getLabel()];
+ cir::BrOp::create(rewriter, goTo.getLoc(), dest);
+ goTo.erase();
+ }
+}
+
+void GotoSolverPass::runOnOperation() {
+ llvm::TimeTraceScope scope("Goto Solver");
+ getOperation()->walk(&process);
+}
+
+} // namespace
+
+std::unique_ptr<Pass> mlir::createGotoSolverPass() {
+ return std::make_unique<GotoSolverPass>();
+}
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index 66260eb..c15637d 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -8,7 +8,7 @@
#include "PassDetail.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/CharUnits.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h"
#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "clang/CIR/Dialect/IR/CIROpsEnums.h"
@@ -27,6 +27,7 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
void runOnOp(mlir::Operation *op);
void lowerCastOp(cir::CastOp op);
+ void lowerComplexDivOp(cir::ComplexDivOp op);
void lowerComplexMulOp(cir::ComplexMulOp op);
void lowerUnaryOp(cir::UnaryOp op);
void lowerArrayDtor(cir::ArrayDtor op);
@@ -182,6 +183,280 @@ static mlir::Value buildComplexBinOpLibCall(
}
static llvm::StringRef
+getComplexDivLibCallName(llvm::APFloat::Semantics semantics) {
+ switch (semantics) {
+ case llvm::APFloat::S_IEEEhalf:
+ return "__divhc3";
+ case llvm::APFloat::S_IEEEsingle:
+ return "__divsc3";
+ case llvm::APFloat::S_IEEEdouble:
+ return "__divdc3";
+ case llvm::APFloat::S_PPCDoubleDouble:
+ return "__divtc3";
+ case llvm::APFloat::S_x87DoubleExtended:
+ return "__divxc3";
+ case llvm::APFloat::S_IEEEquad:
+ return "__divtc3";
+ default:
+ llvm_unreachable("unsupported floating point type");
+ }
+}
+
+static mlir::Value
+buildAlgebraicComplexDiv(CIRBaseBuilderTy &builder, mlir::Location loc,
+ mlir::Value lhsReal, mlir::Value lhsImag,
+ mlir::Value rhsReal, mlir::Value rhsImag) {
+ // (a+bi) / (c+di) = ((ac+bd)/(cc+dd)) + ((bc-ad)/(cc+dd))i
+ mlir::Value &a = lhsReal;
+ mlir::Value &b = lhsImag;
+ mlir::Value &c = rhsReal;
+ mlir::Value &d = rhsImag;
+
+ mlir::Value ac = builder.createBinop(loc, a, cir::BinOpKind::Mul, c); // a*c
+ mlir::Value bd = builder.createBinop(loc, b, cir::BinOpKind::Mul, d); // b*d
+ mlir::Value cc = builder.createBinop(loc, c, cir::BinOpKind::Mul, c); // c*c
+ mlir::Value dd = builder.createBinop(loc, d, cir::BinOpKind::Mul, d); // d*d
+ mlir::Value acbd =
+ builder.createBinop(loc, ac, cir::BinOpKind::Add, bd); // ac+bd
+ mlir::Value ccdd =
+ builder.createBinop(loc, cc, cir::BinOpKind::Add, dd); // cc+dd
+ mlir::Value resultReal =
+ builder.createBinop(loc, acbd, cir::BinOpKind::Div, ccdd);
+
+ mlir::Value bc = builder.createBinop(loc, b, cir::BinOpKind::Mul, c); // b*c
+ mlir::Value ad = builder.createBinop(loc, a, cir::BinOpKind::Mul, d); // a*d
+ mlir::Value bcad =
+ builder.createBinop(loc, bc, cir::BinOpKind::Sub, ad); // bc-ad
+ mlir::Value resultImag =
+ builder.createBinop(loc, bcad, cir::BinOpKind::Div, ccdd);
+ return builder.createComplexCreate(loc, resultReal, resultImag);
+}
+
+static mlir::Value
+buildRangeReductionComplexDiv(CIRBaseBuilderTy &builder, mlir::Location loc,
+ mlir::Value lhsReal, mlir::Value lhsImag,
+ mlir::Value rhsReal, mlir::Value rhsImag) {
+ // Implements Smith's algorithm for complex division.
+ // SMITH, R. L. Algorithm 116: Complex division. Commun. ACM 5, 8 (1962).
+
+ // Let:
+ // - lhs := a+bi
+ // - rhs := c+di
+ // - result := lhs / rhs = e+fi
+ //
+ // The algorithm pseudocode looks like follows:
+ // if fabs(c) >= fabs(d):
+ // r := d / c
+ // tmp := c + r*d
+ // e = (a + b*r) / tmp
+ // f = (b - a*r) / tmp
+ // else:
+ // r := c / d
+ // tmp := d + r*c
+ // e = (a*r + b) / tmp
+ // f = (b*r - a) / tmp
+
+ mlir::Value &a = lhsReal;
+ mlir::Value &b = lhsImag;
+ mlir::Value &c = rhsReal;
+ mlir::Value &d = rhsImag;
+
+ auto trueBranchBuilder = [&](mlir::OpBuilder &, mlir::Location) {
+ mlir::Value r = builder.createBinop(loc, d, cir::BinOpKind::Div,
+ c); // r := d / c
+ mlir::Value rd = builder.createBinop(loc, r, cir::BinOpKind::Mul, d); // r*d
+ mlir::Value tmp = builder.createBinop(loc, c, cir::BinOpKind::Add,
+ rd); // tmp := c + r*d
+
+ mlir::Value br = builder.createBinop(loc, b, cir::BinOpKind::Mul, r); // b*r
+ mlir::Value abr =
+ builder.createBinop(loc, a, cir::BinOpKind::Add, br); // a + b*r
+ mlir::Value e = builder.createBinop(loc, abr, cir::BinOpKind::Div, tmp);
+
+ mlir::Value ar = builder.createBinop(loc, a, cir::BinOpKind::Mul, r); // a*r
+ mlir::Value bar =
+ builder.createBinop(loc, b, cir::BinOpKind::Sub, ar); // b - a*r
+ mlir::Value f = builder.createBinop(loc, bar, cir::BinOpKind::Div, tmp);
+
+ mlir::Value result = builder.createComplexCreate(loc, e, f);
+ builder.createYield(loc, result);
+ };
+
+ auto falseBranchBuilder = [&](mlir::OpBuilder &, mlir::Location) {
+ mlir::Value r = builder.createBinop(loc, c, cir::BinOpKind::Div,
+ d); // r := c / d
+ mlir::Value rc = builder.createBinop(loc, r, cir::BinOpKind::Mul, c); // r*c
+ mlir::Value tmp = builder.createBinop(loc, d, cir::BinOpKind::Add,
+ rc); // tmp := d + r*c
+
+ mlir::Value ar = builder.createBinop(loc, a, cir::BinOpKind::Mul, r); // a*r
+ mlir::Value arb =
+ builder.createBinop(loc, ar, cir::BinOpKind::Add, b); // a*r + b
+ mlir::Value e = builder.createBinop(loc, arb, cir::BinOpKind::Div, tmp);
+
+ mlir::Value br = builder.createBinop(loc, b, cir::BinOpKind::Mul, r); // b*r
+ mlir::Value bra =
+ builder.createBinop(loc, br, cir::BinOpKind::Sub, a); // b*r - a
+ mlir::Value f = builder.createBinop(loc, bra, cir::BinOpKind::Div, tmp);
+
+ mlir::Value result = builder.createComplexCreate(loc, e, f);
+ builder.createYield(loc, result);
+ };
+
+ auto cFabs = builder.create<cir::FAbsOp>(loc, c);
+ auto dFabs = builder.create<cir::FAbsOp>(loc, d);
+ cir::CmpOp cmpResult =
+ builder.createCompare(loc, cir::CmpOpKind::ge, cFabs, dFabs);
+ auto ternary = builder.create<cir::TernaryOp>(
+ loc, cmpResult, trueBranchBuilder, falseBranchBuilder);
+
+ return ternary.getResult();
+}
+
+static mlir::Type higherPrecisionElementTypeForComplexArithmetic(
+ mlir::MLIRContext &context, clang::ASTContext &cc,
+ CIRBaseBuilderTy &builder, mlir::Type elementType) {
+
+ auto getHigherPrecisionFPType = [&context](mlir::Type type) -> mlir::Type {
+ if (mlir::isa<cir::FP16Type>(type))
+ return cir::SingleType::get(&context);
+
+ if (mlir::isa<cir::SingleType>(type) || mlir::isa<cir::BF16Type>(type))
+ return cir::DoubleType::get(&context);
+
+ if (mlir::isa<cir::DoubleType>(type))
+ return cir::LongDoubleType::get(&context, type);
+
+ return type;
+ };
+
+ auto getFloatTypeSemantics =
+ [&cc](mlir::Type type) -> const llvm::fltSemantics & {
+ const clang::TargetInfo &info = cc.getTargetInfo();
+ if (mlir::isa<cir::FP16Type>(type))
+ return info.getHalfFormat();
+
+ if (mlir::isa<cir::BF16Type>(type))
+ return info.getBFloat16Format();
+
+ if (mlir::isa<cir::SingleType>(type))
+ return info.getFloatFormat();
+
+ if (mlir::isa<cir::DoubleType>(type))
+ return info.getDoubleFormat();
+
+ if (mlir::isa<cir::LongDoubleType>(type)) {
+ if (cc.getLangOpts().OpenMP && cc.getLangOpts().OpenMPIsTargetDevice)
+ llvm_unreachable("NYI Float type semantics with OpenMP");
+ return info.getLongDoubleFormat();
+ }
+
+ if (mlir::isa<cir::FP128Type>(type)) {
+ if (cc.getLangOpts().OpenMP && cc.getLangOpts().OpenMPIsTargetDevice)
+ llvm_unreachable("NYI Float type semantics with OpenMP");
+ return info.getFloat128Format();
+ }
+
+ assert(false && "Unsupported float type semantics");
+ };
+
+ const mlir::Type higherElementType = getHigherPrecisionFPType(elementType);
+ const llvm::fltSemantics &elementTypeSemantics =
+ getFloatTypeSemantics(elementType);
+ const llvm::fltSemantics &higherElementTypeSemantics =
+ getFloatTypeSemantics(higherElementType);
+
+ // Check that the promoted type can handle the intermediate values without
+ // overflowing. This can be interpreted as:
+ // (SmallerType.LargestFiniteVal * SmallerType.LargestFiniteVal) * 2 <=
+ // LargerType.LargestFiniteVal.
+ // In terms of exponent it gives this formula:
+ // (SmallerType.LargestFiniteVal * SmallerType.LargestFiniteVal
+ // doubles the exponent of SmallerType.LargestFiniteVal)
+ if (llvm::APFloat::semanticsMaxExponent(elementTypeSemantics) * 2 + 1 <=
+ llvm::APFloat::semanticsMaxExponent(higherElementTypeSemantics)) {
+ return higherElementType;
+ }
+
+ // The intermediate values can't be represented in the promoted type
+ // without overflowing.
+ return {};
+}
+
+static mlir::Value
+lowerComplexDiv(LoweringPreparePass &pass, CIRBaseBuilderTy &builder,
+ mlir::Location loc, cir::ComplexDivOp op, mlir::Value lhsReal,
+ mlir::Value lhsImag, mlir::Value rhsReal, mlir::Value rhsImag,
+ mlir::MLIRContext &mlirCx, clang::ASTContext &cc) {
+ cir::ComplexType complexTy = op.getType();
+ if (mlir::isa<cir::FPTypeInterface>(complexTy.getElementType())) {
+ cir::ComplexRangeKind range = op.getRange();
+ if (range == cir::ComplexRangeKind::Improved)
+ return buildRangeReductionComplexDiv(builder, loc, lhsReal, lhsImag,
+ rhsReal, rhsImag);
+
+ if (range == cir::ComplexRangeKind::Full)
+ return buildComplexBinOpLibCall(pass, builder, &getComplexDivLibCallName,
+ loc, complexTy, lhsReal, lhsImag, rhsReal,
+ rhsImag);
+
+ if (range == cir::ComplexRangeKind::Promoted) {
+ mlir::Type originalElementType = complexTy.getElementType();
+ mlir::Type higherPrecisionElementType =
+ higherPrecisionElementTypeForComplexArithmetic(mlirCx, cc, builder,
+ originalElementType);
+
+ if (!higherPrecisionElementType)
+ return buildRangeReductionComplexDiv(builder, loc, lhsReal, lhsImag,
+ rhsReal, rhsImag);
+
+ cir::CastKind floatingCastKind = cir::CastKind::floating;
+ lhsReal = builder.createCast(floatingCastKind, lhsReal,
+ higherPrecisionElementType);
+ lhsImag = builder.createCast(floatingCastKind, lhsImag,
+ higherPrecisionElementType);
+ rhsReal = builder.createCast(floatingCastKind, rhsReal,
+ higherPrecisionElementType);
+ rhsImag = builder.createCast(floatingCastKind, rhsImag,
+ higherPrecisionElementType);
+
+ mlir::Value algebraicResult = buildAlgebraicComplexDiv(
+ builder, loc, lhsReal, lhsImag, rhsReal, rhsImag);
+
+ mlir::Value resultReal = builder.createComplexReal(loc, algebraicResult);
+ mlir::Value resultImag = builder.createComplexImag(loc, algebraicResult);
+
+ mlir::Value finalReal =
+ builder.createCast(floatingCastKind, resultReal, originalElementType);
+ mlir::Value finalImag =
+ builder.createCast(floatingCastKind, resultImag, originalElementType);
+ return builder.createComplexCreate(loc, finalReal, finalImag);
+ }
+ }
+
+ return buildAlgebraicComplexDiv(builder, loc, lhsReal, lhsImag, rhsReal,
+ rhsImag);
+}
+
+void LoweringPreparePass::lowerComplexDivOp(cir::ComplexDivOp op) {
+ cir::CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op);
+ mlir::Location loc = op.getLoc();
+ mlir::TypedValue<cir::ComplexType> lhs = op.getLhs();
+ mlir::TypedValue<cir::ComplexType> rhs = op.getRhs();
+ mlir::Value lhsReal = builder.createComplexReal(loc, lhs);
+ mlir::Value lhsImag = builder.createComplexImag(loc, lhs);
+ mlir::Value rhsReal = builder.createComplexReal(loc, rhs);
+ mlir::Value rhsImag = builder.createComplexImag(loc, rhs);
+
+ mlir::Value loweredResult =
+ lowerComplexDiv(*this, builder, loc, op, lhsReal, lhsImag, rhsReal,
+ rhsImag, getContext(), *astCtx);
+ op.replaceAllUsesWith(loweredResult);
+ op.erase();
+}
+
+static llvm::StringRef
getComplexMulLibCallName(llvm::APFloat::Semantics semantics) {
switch (semantics) {
case llvm::APFloat::S_IEEEhalf:
@@ -412,6 +687,8 @@ void LoweringPreparePass::runOnOp(mlir::Operation *op) {
lowerArrayDtor(arrayDtor);
else if (auto cast = mlir::dyn_cast<cir::CastOp>(op))
lowerCastOp(cast);
+ else if (auto complexDiv = mlir::dyn_cast<cir::ComplexDivOp>(op))
+ lowerComplexDivOp(complexDiv);
else if (auto complexMul = mlir::dyn_cast<cir::ComplexMulOp>(op))
lowerComplexMulOp(complexMul);
else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op))
@@ -427,7 +704,7 @@ void LoweringPreparePass::runOnOperation() {
op->walk([&](mlir::Operation *op) {
if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp,
- cir::ComplexMulOp, cir::UnaryOp>(op))
+ cir::ComplexMulOp, cir::ComplexDivOp, cir::UnaryOp>(op))
opsToTransform.push_back(op);
});
diff --git a/clang/lib/CIR/Lowering/CIRPasses.cpp b/clang/lib/CIR/Lowering/CIRPasses.cpp
index bb9781b..ccc8387 100644
--- a/clang/lib/CIR/Lowering/CIRPasses.cpp
+++ b/clang/lib/CIR/Lowering/CIRPasses.cpp
@@ -45,6 +45,7 @@ namespace mlir {
void populateCIRPreLoweringPasses(OpPassManager &pm) {
pm.addPass(createHoistAllocasPass());
pm.addPass(createCIRFlattenCFGPass());
+ pm.addPass(createGotoSolverPass());
}
} // namespace mlir
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 7e1c9fb..f1fdfed 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -201,7 +201,8 @@ public:
mlir::Value visit(mlir::Attribute attr) {
return llvm::TypeSwitch<mlir::Attribute, mlir::Value>(attr)
.Case<cir::IntAttr, cir::FPAttr, cir::ConstComplexAttr,
- cir::ConstArrayAttr, cir::ConstVectorAttr, cir::ConstPtrAttr,
+ cir::ConstArrayAttr, cir::ConstRecordAttr, cir::ConstVectorAttr,
+ cir::ConstPtrAttr, cir::GlobalViewAttr, cir::VTableAttr,
cir::ZeroAttr>([&](auto attrT) { return visitCirAttr(attrT); })
.Default([&](auto attrT) { return mlir::Value(); });
}
@@ -211,7 +212,10 @@ public:
mlir::Value visitCirAttr(cir::ConstComplexAttr complexAttr);
mlir::Value visitCirAttr(cir::ConstPtrAttr ptrAttr);
mlir::Value visitCirAttr(cir::ConstArrayAttr attr);
+ mlir::Value visitCirAttr(cir::ConstRecordAttr attr);
mlir::Value visitCirAttr(cir::ConstVectorAttr attr);
+ mlir::Value visitCirAttr(cir::GlobalViewAttr attr);
+ mlir::Value visitCirAttr(cir::VTableAttr attr);
mlir::Value visitCirAttr(cir::ZeroAttr attr);
private:
@@ -265,6 +269,26 @@ void convertSideEffectForCall(mlir::Operation *callOp, bool isNothrow,
}
}
+static mlir::LLVM::CallIntrinsicOp
+createCallLLVMIntrinsicOp(mlir::ConversionPatternRewriter &rewriter,
+ mlir::Location loc, const llvm::Twine &intrinsicName,
+ mlir::Type resultTy, mlir::ValueRange operands) {
+ auto intrinsicNameAttr =
+ mlir::StringAttr::get(rewriter.getContext(), intrinsicName);
+ return mlir::LLVM::CallIntrinsicOp::create(rewriter, loc, resultTy,
+ intrinsicNameAttr, operands);
+}
+
+static mlir::LLVM::CallIntrinsicOp replaceOpWithCallLLVMIntrinsicOp(
+ mlir::ConversionPatternRewriter &rewriter, mlir::Operation *op,
+ const llvm::Twine &intrinsicName, mlir::Type resultTy,
+ mlir::ValueRange operands) {
+ mlir::LLVM::CallIntrinsicOp callIntrinOp = createCallLLVMIntrinsicOp(
+ rewriter, op->getLoc(), intrinsicName, resultTy, operands);
+ rewriter.replaceOp(op, callIntrinOp.getOperation());
+ return callIntrinOp;
+}
+
/// IntAttr visitor.
mlir::Value CIRAttrToValue::visitCirAttr(cir::IntAttr intAttr) {
mlir::Location loc = parentOp->getLoc();
@@ -364,6 +388,21 @@ mlir::Value CIRAttrToValue::visitCirAttr(cir::ConstArrayAttr attr) {
return result;
}
+/// ConstRecord visitor.
+mlir::Value CIRAttrToValue::visitCirAttr(cir::ConstRecordAttr constRecord) {
+ const mlir::Type llvmTy = converter->convertType(constRecord.getType());
+ const mlir::Location loc = parentOp->getLoc();
+ mlir::Value result = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmTy);
+
+ // Iteratively lower each constant element of the record.
+ for (auto [idx, elt] : llvm::enumerate(constRecord.getMembers())) {
+ mlir::Value init = visit(elt);
+ result = rewriter.create<mlir::LLVM::InsertValueOp>(loc, result, init, idx);
+ }
+
+ return result;
+}
+
/// ConstVectorAttr visitor.
mlir::Value CIRAttrToValue::visitCirAttr(cir::ConstVectorAttr attr) {
const mlir::Type llvmTy = converter->convertType(attr.getType());
@@ -391,6 +430,92 @@ mlir::Value CIRAttrToValue::visitCirAttr(cir::ConstVectorAttr attr) {
mlirValues));
}
+// GlobalViewAttr visitor.
+mlir::Value CIRAttrToValue::visitCirAttr(cir::GlobalViewAttr globalAttr) {
+ auto moduleOp = parentOp->getParentOfType<mlir::ModuleOp>();
+ mlir::DataLayout dataLayout(moduleOp);
+ mlir::Type sourceType;
+ assert(!cir::MissingFeatures::addressSpace());
+ llvm::StringRef symName;
+ mlir::Operation *sourceSymbol =
+ mlir::SymbolTable::lookupSymbolIn(moduleOp, globalAttr.getSymbol());
+ if (auto llvmSymbol = dyn_cast<mlir::LLVM::GlobalOp>(sourceSymbol)) {
+ sourceType = llvmSymbol.getType();
+ symName = llvmSymbol.getSymName();
+ } else if (auto cirSymbol = dyn_cast<cir::GlobalOp>(sourceSymbol)) {
+ sourceType =
+ convertTypeForMemory(*converter, dataLayout, cirSymbol.getSymType());
+ symName = cirSymbol.getSymName();
+ } else if (auto llvmFun = dyn_cast<mlir::LLVM::LLVMFuncOp>(sourceSymbol)) {
+ sourceType = llvmFun.getFunctionType();
+ symName = llvmFun.getSymName();
+ } else if (auto fun = dyn_cast<cir::FuncOp>(sourceSymbol)) {
+ sourceType = converter->convertType(fun.getFunctionType());
+ symName = fun.getSymName();
+ } else if (auto alias = dyn_cast<mlir::LLVM::AliasOp>(sourceSymbol)) {
+ sourceType = alias.getType();
+ symName = alias.getSymName();
+ } else {
+ llvm_unreachable("Unexpected GlobalOp type");
+ }
+
+ mlir::Location loc = parentOp->getLoc();
+ mlir::Value addrOp = rewriter.create<mlir::LLVM::AddressOfOp>(
+ loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), symName);
+
+ if (globalAttr.getIndices()) {
+ llvm::SmallVector<mlir::LLVM::GEPArg> indices;
+
+ if (mlir::isa<mlir::LLVM::LLVMArrayType, mlir::LLVM::LLVMStructType>(
+ sourceType))
+ indices.push_back(0);
+
+ for (mlir::Attribute idx : globalAttr.getIndices()) {
+ auto intAttr = mlir::cast<mlir::IntegerAttr>(idx);
+ indices.push_back(intAttr.getValue().getSExtValue());
+ }
+ mlir::Type resTy = addrOp.getType();
+ mlir::Type eltTy = converter->convertType(sourceType);
+ addrOp = rewriter.create<mlir::LLVM::GEPOp>(
+ loc, resTy, eltTy, addrOp, indices, mlir::LLVM::GEPNoWrapFlags::none);
+ }
+
+ // The incubator has handling here for the attribute having integer type, but
+ // the only test case I could find that reaches it is a direct CIR-to-LLVM IR
+ // lowering with no clear indication of how the CIR might have been generated.
+ // We'll hit the unreachable below if this happens.
+ assert(!cir::MissingFeatures::globalViewIntLowering());
+
+ if (auto ptrTy = mlir::dyn_cast<cir::PointerType>(globalAttr.getType())) {
+ mlir::Type llvmEltTy =
+ convertTypeForMemory(*converter, dataLayout, ptrTy.getPointee());
+
+ if (llvmEltTy == sourceType)
+ return addrOp;
+
+ mlir::Type llvmDstTy = converter->convertType(globalAttr.getType());
+ return rewriter.create<mlir::LLVM::BitcastOp>(parentOp->getLoc(), llvmDstTy,
+ addrOp);
+ }
+
+ llvm_unreachable("Expecting pointer or integer type for GlobalViewAttr");
+}
+
+// VTableAttr visitor.
+mlir::Value CIRAttrToValue::visitCirAttr(cir::VTableAttr vtableArr) {
+ mlir::Type llvmTy = converter->convertType(vtableArr.getType());
+ mlir::Location loc = parentOp->getLoc();
+ mlir::Value result = mlir::LLVM::UndefOp::create(rewriter, loc, llvmTy);
+
+ for (auto [idx, elt] : llvm::enumerate(vtableArr.getData())) {
+ mlir::Value init = visit(elt);
+ result =
+ mlir::LLVM::InsertValueOp::create(rewriter, loc, result, init, idx);
+ }
+
+ return result;
+}
+
/// ZeroAttr visitor.
mlir::Value CIRAttrToValue::visitCirAttr(cir::ZeroAttr attr) {
mlir::Location loc = parentOp->getLoc();
@@ -460,6 +585,29 @@ mlir::LogicalResult CIRToLLVMAssumeOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMAssumeAlignedOpLowering::matchAndRewrite(
+ cir::AssumeAlignedOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ SmallVector<mlir::Value, 3> opBundleArgs{adaptor.getPointer()};
+
+ auto alignment = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ adaptor.getAlignmentAttr());
+ opBundleArgs.push_back(alignment);
+
+ if (mlir::Value offset = adaptor.getOffset())
+ opBundleArgs.push_back(offset);
+
+ auto cond = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ rewriter.getI1Type(), 1);
+ mlir::LLVM::AssumeOp::create(rewriter, op.getLoc(), cond, "align",
+ opBundleArgs);
+
+ // The llvm.assume operation does not have a result, so we need to replace
+ // all uses of this cir.assume_aligned operation with the input ptr itself.
+ rewriter.replaceOp(op, adaptor.getPointer());
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMAssumeSepStorageOpLowering::matchAndRewrite(
cir::AssumeSepStorageOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -989,8 +1137,7 @@ rewriteCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands,
auto calleeTy = op->getOperands().front().getType();
auto calleePtrTy = cast<cir::PointerType>(calleeTy);
auto calleeFuncTy = cast<cir::FuncType>(calleePtrTy.getPointee());
- calleeFuncTy.dump();
- converter->convertType(calleeFuncTy).dump();
+ llvm::append_range(adjustedCallOperands, callOperands);
llvmFnTy = cast<mlir::LLVM::LLVMFunctionType>(
converter->convertType(calleeFuncTy));
}
@@ -1016,12 +1163,50 @@ mlir::LogicalResult CIRToLLVMCallOpLowering::matchAndRewrite(
getTypeConverter(), op.getCalleeAttr());
}
+mlir::LogicalResult CIRToLLVMReturnAddrOpLowering::matchAndRewrite(
+ cir::ReturnAddrOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
+ replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm.returnaddress",
+ llvmPtrTy, adaptor.getOperands());
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMFrameAddrOpLowering::matchAndRewrite(
+ cir::FrameAddrOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
+ replaceOpWithCallLLVMIntrinsicOp(rewriter, op, "llvm.frameaddress", llvmPtrTy,
+ adaptor.getOperands());
+ return mlir::success();
+}
+
+static mlir::LLVM::AtomicOrdering
+getLLVMMemOrder(std::optional<cir::MemOrder> memorder) {
+ if (!memorder)
+ return mlir::LLVM::AtomicOrdering::not_atomic;
+ switch (*memorder) {
+ case cir::MemOrder::Relaxed:
+ return mlir::LLVM::AtomicOrdering::monotonic;
+ case cir::MemOrder::Consume:
+ case cir::MemOrder::Acquire:
+ return mlir::LLVM::AtomicOrdering::acquire;
+ case cir::MemOrder::Release:
+ return mlir::LLVM::AtomicOrdering::release;
+ case cir::MemOrder::AcquireRelease:
+ return mlir::LLVM::AtomicOrdering::acq_rel;
+ case cir::MemOrder::SequentiallyConsistent:
+ return mlir::LLVM::AtomicOrdering::seq_cst;
+ }
+ llvm_unreachable("unknown memory order");
+}
+
mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
cir::LoadOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
const mlir::Type llvmTy =
convertTypeForMemory(*getTypeConverter(), dataLayout, op.getType());
- assert(!cir::MissingFeatures::opLoadStoreMemOrder());
+ mlir::LLVM::AtomicOrdering ordering = getLLVMMemOrder(op.getMemOrder());
std::optional<size_t> opAlign = op.getAlignment();
unsigned alignment =
(unsigned)opAlign.value_or(dataLayout.getTypeABIAlignment(llvmTy));
@@ -1030,11 +1215,10 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
// TODO: nontemporal, syncscope.
assert(!cir::MissingFeatures::opLoadStoreVolatile());
- mlir::LLVM::LoadOp newLoad = rewriter.create<mlir::LLVM::LoadOp>(
- op->getLoc(), llvmTy, adaptor.getAddr(), alignment,
- /*volatile=*/false, /*nontemporal=*/false,
- /*invariant=*/false, /*invariantGroup=*/false,
- mlir::LLVM::AtomicOrdering::not_atomic);
+ mlir::LLVM::LoadOp newLoad = mlir::LLVM::LoadOp::create(
+ rewriter, op->getLoc(), llvmTy, adaptor.getAddr(), alignment,
+ /*isVolatile=*/false, /*isNonTemporal=*/false,
+ /*isInvariant=*/false, /*isInvariantGroup=*/false, ordering);
// Convert adapted result to its original type if needed.
mlir::Value result =
@@ -1047,7 +1231,7 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite(
cir::StoreOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
- assert(!cir::MissingFeatures::opLoadStoreMemOrder());
+ mlir::LLVM::AtomicOrdering memorder = getLLVMMemOrder(op.getMemOrder());
const mlir::Type llvmTy =
getTypeConverter()->convertType(op.getValue().getType());
std::optional<size_t> opAlign = op.getAlignment();
@@ -1061,10 +1245,10 @@ mlir::LogicalResult CIRToLLVMStoreOpLowering::matchAndRewrite(
op.getValue().getType(), adaptor.getValue());
// TODO: nontemporal, syncscope.
assert(!cir::MissingFeatures::opLoadStoreVolatile());
- mlir::LLVM::StoreOp storeOp = rewriter.create<mlir::LLVM::StoreOp>(
- op->getLoc(), value, adaptor.getAddr(), alignment, /*volatile=*/false,
- /*nontemporal=*/false, /*invariantGroup=*/false,
- mlir::LLVM::AtomicOrdering::not_atomic);
+ mlir::LLVM::StoreOp storeOp = mlir::LLVM::StoreOp::create(
+ rewriter, op->getLoc(), value, adaptor.getAddr(), alignment,
+ /*isVolatile=*/false,
+ /*isNonTemporal=*/false, /*isInvariantGroup=*/false, memorder);
rewriter.replaceOp(op, storeOp);
assert(!cir::MissingFeatures::opLoadStoreTbaa());
return mlir::LogicalResult::success();
@@ -1101,7 +1285,13 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite(
attr = rewriter.getIntegerAttr(typeConverter->convertType(op.getType()),
value);
} else if (mlir::isa<cir::IntType>(op.getType())) {
- assert(!cir::MissingFeatures::opGlobalViewAttr());
+ // Lower GlobalViewAttr to llvm.mlir.addressof + llvm.mlir.ptrtoint
+ if (auto ga = mlir::dyn_cast<cir::GlobalViewAttr>(op.getValue())) {
+ // See the comment in visitCirAttr for why this isn't implemented.
+ assert(!cir::MissingFeatures::globalViewIntLowering());
+ op.emitError() << "global view with integer type";
+ return mlir::failure();
+ }
attr = rewriter.getIntegerAttr(
typeConverter->convertType(op.getType()),
@@ -1119,7 +1309,12 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite(
return mlir::success();
}
}
- assert(!cir::MissingFeatures::opGlobalViewAttr());
+ // Lower GlobalViewAttr to llvm.mlir.addressof
+ if (auto gv = mlir::dyn_cast<cir::GlobalViewAttr>(op.getValue())) {
+ auto newOp = lowerCirAttrAsValue(op, gv, rewriter, getTypeConverter());
+ rewriter.replaceOp(op, newOp);
+ return mlir::success();
+ }
attr = op.getValue();
} else if (const auto arrTy = mlir::dyn_cast<cir::ArrayType>(op.getType())) {
const auto constArr = mlir::dyn_cast<cir::ConstArrayAttr>(op.getValue());
@@ -1142,6 +1337,11 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite(
rewriter.eraseOp(op);
return mlir::success();
}
+ } else if (const auto recordAttr =
+ mlir::dyn_cast<cir::ConstRecordAttr>(op.getValue())) {
+ auto initVal = lowerCirAttrAsValue(op, recordAttr, rewriter, typeConverter);
+ rewriter.replaceOp(op, initVal);
+ return mlir::success();
} else if (const auto vecTy = mlir::dyn_cast<cir::VectorType>(op.getType())) {
rewriter.replaceOp(op, lowerCirAttrAsValue(op, op.getValue(), rewriter,
getTypeConverter()));
@@ -1204,6 +1404,15 @@ mlir::LogicalResult CIRToLLVMExpectOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMFAbsOpLowering::matchAndRewrite(
+ cir::FAbsOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Type resTy = typeConverter->convertType(op.getType());
+ rewriter.replaceOpWithNewOp<mlir::LLVM::FAbsOp>(op, resTy,
+ adaptor.getOperands()[0]);
+ return mlir::success();
+}
+
/// Convert the `cir.func` attributes to `llvm.func` attributes.
/// Only retain those attributes that are not constructed by
/// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out
@@ -1349,7 +1558,7 @@ void CIRToLLVMGlobalOpLowering::setupRegionInitializedLLVMGlobalOp(
// in CIRToLLVMGlobalOpLowering::matchAndRewrite() but that will go
// away when the placeholders are no longer needed.
assert(!cir::MissingFeatures::opGlobalConstant());
- const bool isConst = false;
+ const bool isConst = op.getConstant();
assert(!cir::MissingFeatures::addressSpace());
const unsigned addrSpace = 0;
const bool isDsoLocal = op.getDsoLocal();
@@ -1374,8 +1583,9 @@ CIRToLLVMGlobalOpLowering::matchAndRewriteRegionInitializedGlobal(
cir::GlobalOp op, mlir::Attribute init,
mlir::ConversionPatternRewriter &rewriter) const {
// TODO: Generalize this handling when more types are needed here.
- assert((isa<cir::ConstArrayAttr, cir::ConstVectorAttr, cir::ConstPtrAttr,
- cir::ConstComplexAttr, cir::ZeroAttr>(init)));
+ assert((isa<cir::ConstArrayAttr, cir::ConstRecordAttr, cir::ConstVectorAttr,
+ cir::ConstPtrAttr, cir::ConstComplexAttr, cir::GlobalViewAttr,
+ cir::VTableAttr, cir::ZeroAttr>(init)));
// TODO(cir): once LLVM's dialect has proper equivalent attributes this
// should be updated. For now, we use a custom op to initialize globals
@@ -1428,8 +1638,9 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite(
return mlir::failure();
}
} else if (mlir::isa<cir::ConstArrayAttr, cir::ConstVectorAttr,
- cir::ConstPtrAttr, cir::ConstComplexAttr,
- cir::ZeroAttr>(init.value())) {
+ cir::ConstRecordAttr, cir::ConstPtrAttr,
+ cir::ConstComplexAttr, cir::GlobalViewAttr,
+ cir::VTableAttr, cir::ZeroAttr>(init.value())) {
// TODO(cir): once LLVM's dialect has proper equivalent attributes this
// should be updated. For now, we use a custom op to initialize globals
// to the appropriate value.
@@ -1981,6 +2192,10 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
return mlir::LLVM::LLVMPointerType::get(type.getContext(), targetAS);
});
+ converter.addConversion([&](cir::VPtrType type) -> mlir::Type {
+ assert(!cir::MissingFeatures::addressSpace());
+ return mlir::LLVM::LLVMPointerType::get(type.getContext());
+ });
converter.addConversion([&](cir::ArrayType type) -> mlir::Type {
mlir::Type ty =
convertTypeForMemory(converter, dataLayout, type.getElementType());
@@ -2057,6 +2272,9 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
}
break;
}
+ converter.addConversion([&](cir::VoidType type) -> mlir::Type {
+ return mlir::LLVM::LLVMVoidType::get(type.getContext());
+ });
// Record has a name: lower as an identified record.
mlir::LLVM::LLVMStructType llvmStruct;
@@ -2072,6 +2290,9 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
return llvmStruct;
});
+ converter.addConversion([&](cir::VoidType type) -> mlir::Type {
+ return mlir::LLVM::LLVMVoidType::get(type.getContext());
+ });
}
// The applyPartialConversion function traverses blocks in the dominance order,
@@ -2143,6 +2364,11 @@ void ConvertCIRToLLVMPass::processCIRAttrs(mlir::ModuleOp module) {
module->getAttr(cir::CIRDialect::getTripleAttrName()))
module->setAttr(mlir::LLVM::LLVMDialect::getTargetTripleAttrName(),
tripleAttr);
+
+ if (mlir::Attribute asmAttr =
+ module->getAttr(cir::CIRDialect::getModuleLevelAsmAttrName()))
+ module->setAttr(mlir::LLVM::LLVMDialect::getModuleLevelAsmAttrName(),
+ asmAttr);
}
void ConvertCIRToLLVMPass::runOnOperation() {
@@ -2165,9 +2391,12 @@ void ConvertCIRToLLVMPass::runOnOperation() {
patterns.add<CIRToLLVMCastOpLowering>(converter, patterns.getContext(), dl);
patterns.add<CIRToLLVMPtrStrideOpLowering>(converter, patterns.getContext(),
dl);
+ patterns.add<CIRToLLVMInlineAsmOpLowering>(converter, patterns.getContext(),
+ dl);
patterns.add<
// clang-format off
CIRToLLVMAssumeOpLowering,
+ CIRToLLVMAssumeAlignedOpLowering,
CIRToLLVMAssumeSepStorageOpLowering,
CIRToLLVMBaseClassAddrOpLowering,
CIRToLLVMBinOpLowering,
@@ -2192,10 +2421,13 @@ void ConvertCIRToLLVMPass::runOnOperation() {
CIRToLLVMComplexSubOpLowering,
CIRToLLVMConstantOpLowering,
CIRToLLVMExpectOpLowering,
+ CIRToLLVMFAbsOpLowering,
+ CIRToLLVMFrameAddrOpLowering,
CIRToLLVMFuncOpLowering,
CIRToLLVMGetBitfieldOpLowering,
CIRToLLVMGetGlobalOpLowering,
CIRToLLVMGetMemberOpLowering,
+ CIRToLLVMReturnAddrOpLowering,
CIRToLLVMRotateOpLowering,
CIRToLLVMSelectOpLowering,
CIRToLLVMSetBitfieldOpLowering,
@@ -2203,8 +2435,13 @@ void ConvertCIRToLLVMPass::runOnOperation() {
CIRToLLVMStackRestoreOpLowering,
CIRToLLVMStackSaveOpLowering,
CIRToLLVMSwitchFlatOpLowering,
+ CIRToLLVMThrowOpLowering,
CIRToLLVMTrapOpLowering,
CIRToLLVMUnaryOpLowering,
+ CIRToLLVMUnreachableOpLowering,
+ CIRToLLVMVAArgOpLowering,
+ CIRToLLVMVAEndOpLowering,
+ CIRToLLVMVAStartOpLowering,
CIRToLLVMVecCmpOpLowering,
CIRToLLVMVecCreateOpLowering,
CIRToLLVMVecExtractOpLowering,
@@ -2213,7 +2450,10 @@ void ConvertCIRToLLVMPass::runOnOperation() {
CIRToLLVMVecShuffleOpLowering,
CIRToLLVMVecSplatOpLowering,
CIRToLLVMVecTernaryOpLowering,
- CIRToLLVMUnreachableOpLowering
+ CIRToLLVMVTableAddrPointOpLowering,
+ CIRToLLVMVTableGetVPtrOpLowering,
+ CIRToLLVMVTableGetVirtualFnAddrOpLowering,
+ CIRToLLVMVTTAddrPointOpLowering
// clang-format on
>(converter, patterns.getContext());
@@ -2276,6 +2516,42 @@ mlir::LogicalResult CIRToLLVMUnreachableOpLowering::matchAndRewrite(
return mlir::success();
}
+void createLLVMFuncOpIfNotExist(mlir::ConversionPatternRewriter &rewriter,
+ mlir::Operation *srcOp, llvm::StringRef fnName,
+ mlir::Type fnTy) {
+ auto modOp = srcOp->getParentOfType<mlir::ModuleOp>();
+ auto enclosingFnOp = srcOp->getParentOfType<mlir::LLVM::LLVMFuncOp>();
+ mlir::Operation *sourceSymbol =
+ mlir::SymbolTable::lookupSymbolIn(modOp, fnName);
+ if (!sourceSymbol) {
+ mlir::OpBuilder::InsertionGuard guard(rewriter);
+ rewriter.setInsertionPoint(enclosingFnOp);
+ rewriter.create<mlir::LLVM::LLVMFuncOp>(srcOp->getLoc(), fnName, fnTy);
+ }
+}
+
+mlir::LogicalResult CIRToLLVMThrowOpLowering::matchAndRewrite(
+ cir::ThrowOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ if (op.rethrows()) {
+ auto voidTy = mlir::LLVM::LLVMVoidType::get(getContext());
+ auto funcTy =
+ mlir::LLVM::LLVMFunctionType::get(getContext(), voidTy, {}, false);
+
+ auto mlirModule = op->getParentOfType<mlir::ModuleOp>();
+ rewriter.setInsertionPointToStart(&mlirModule.getBodyRegion().front());
+
+ const llvm::StringRef functionName = "__cxa_rethrow";
+ createLLVMFuncOpIfNotExist(rewriter, op, functionName, funcTy);
+
+ rewriter.setInsertionPointAfter(op.getOperation());
+ rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
+ op, mlir::TypeRange{}, functionName, mlir::ValueRange{});
+ }
+
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMTrapOpLowering::matchAndRewrite(
cir::TrapOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -2292,6 +2568,106 @@ mlir::LogicalResult CIRToLLVMTrapOpLowering::matchAndRewrite(
return mlir::success();
}
+static mlir::Value
+getValueForVTableSymbol(mlir::Operation *op,
+ mlir::ConversionPatternRewriter &rewriter,
+ const mlir::TypeConverter *converter,
+ mlir::FlatSymbolRefAttr nameAttr, mlir::Type &eltType) {
+ auto module = op->getParentOfType<mlir::ModuleOp>();
+ mlir::Operation *symbol = mlir::SymbolTable::lookupSymbolIn(module, nameAttr);
+ if (auto llvmSymbol = mlir::dyn_cast<mlir::LLVM::GlobalOp>(symbol)) {
+ eltType = llvmSymbol.getType();
+ } else if (auto cirSymbol = mlir::dyn_cast<cir::GlobalOp>(symbol)) {
+ eltType = converter->convertType(cirSymbol.getSymType());
+ } else {
+ op->emitError() << "unexpected symbol type for " << symbol;
+ return {};
+ }
+
+ return mlir::LLVM::AddressOfOp::create(
+ rewriter, op->getLoc(),
+ mlir::LLVM::LLVMPointerType::get(op->getContext()), nameAttr.getValue());
+}
+
+mlir::LogicalResult CIRToLLVMVTableAddrPointOpLowering::matchAndRewrite(
+ cir::VTableAddrPointOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ const mlir::TypeConverter *converter = getTypeConverter();
+ mlir::Type targetType = converter->convertType(op.getType());
+ llvm::SmallVector<mlir::LLVM::GEPArg> offsets;
+ mlir::Type eltType;
+ mlir::Value symAddr = getValueForVTableSymbol(op, rewriter, converter,
+ op.getNameAttr(), eltType);
+ if (!symAddr)
+ return op.emitError() << "Unable to get value for vtable symbol";
+
+ offsets = llvm::SmallVector<mlir::LLVM::GEPArg>{
+ 0, op.getAddressPointAttr().getIndex(),
+ op.getAddressPointAttr().getOffset()};
+
+ assert(eltType && "Shouldn't ever be missing an eltType here");
+ mlir::LLVM::GEPNoWrapFlags inboundsNuw =
+ mlir::LLVM::GEPNoWrapFlags::inbounds | mlir::LLVM::GEPNoWrapFlags::nuw;
+ rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(op, targetType, eltType,
+ symAddr, offsets, inboundsNuw);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVTableGetVPtrOpLowering::matchAndRewrite(
+ cir::VTableGetVPtrOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ // cir.vtable.get_vptr is equivalent to a bitcast from the source object
+ // pointer to the vptr type. Since the LLVM dialect uses opaque pointers
+ // we can just replace uses of this operation with the original pointer.
+ mlir::Value srcVal = adaptor.getSrc();
+ rewriter.replaceAllUsesWith(op, srcVal);
+ rewriter.eraseOp(op);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVTableGetVirtualFnAddrOpLowering::matchAndRewrite(
+ cir::VTableGetVirtualFnAddrOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Type targetType = getTypeConverter()->convertType(op.getType());
+ auto eltType = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
+ llvm::SmallVector<mlir::LLVM::GEPArg> offsets =
+ llvm::SmallVector<mlir::LLVM::GEPArg>{op.getIndex()};
+ rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(
+ op, targetType, eltType, adaptor.getVptr(), offsets,
+ mlir::LLVM::GEPNoWrapFlags::inbounds);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVTTAddrPointOpLowering::matchAndRewrite(
+ cir::VTTAddrPointOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ const mlir::Type resultType = getTypeConverter()->convertType(op.getType());
+ llvm::SmallVector<mlir::LLVM::GEPArg> offsets;
+ mlir::Type eltType;
+ mlir::Value llvmAddr = adaptor.getSymAddr();
+
+ if (op.getSymAddr()) {
+ if (op.getOffset() == 0) {
+ rewriter.replaceOp(op, {llvmAddr});
+ return mlir::success();
+ }
+
+ offsets.push_back(adaptor.getOffset());
+ eltType = mlir::IntegerType::get(resultType.getContext(), 8,
+ mlir::IntegerType::Signless);
+ } else {
+ llvmAddr = getValueForVTableSymbol(op, rewriter, getTypeConverter(),
+ op.getNameAttr(), eltType);
+ assert(eltType && "Shouldn't ever be missing an eltType here");
+ offsets.push_back(0);
+ offsets.push_back(adaptor.getOffset());
+ }
+ rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(
+ op, resultType, eltType, llvmAddr, offsets,
+ mlir::LLVM::GEPNoWrapFlags::inbounds);
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMStackSaveOpLowering::matchAndRewrite(
cir::StackSaveOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -2797,6 +3173,105 @@ mlir::LogicalResult CIRToLLVMGetBitfieldOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMInlineAsmOpLowering::matchAndRewrite(
+ cir::InlineAsmOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Type llResTy;
+ if (op.getNumResults())
+ llResTy = getTypeConverter()->convertType(op.getType(0));
+
+ cir::AsmFlavor dialect = op.getAsmFlavor();
+ mlir::LLVM::AsmDialect llDialect = dialect == cir::AsmFlavor::x86_att
+ ? mlir::LLVM::AsmDialect::AD_ATT
+ : mlir::LLVM::AsmDialect::AD_Intel;
+
+ SmallVector<mlir::Attribute> opAttrs;
+ StringRef llvmAttrName = mlir::LLVM::InlineAsmOp::getElementTypeAttrName();
+
+ // this is for the lowering to LLVM from LLVM dialect. Otherwise, if we
+ // don't have the result (i.e. void type as a result of operation), the
+ // element type attribute will be attached to the whole instruction, but not
+ // to the operand
+ if (!op.getNumResults())
+ opAttrs.push_back(mlir::Attribute());
+
+ SmallVector<mlir::Value> llvmOperands;
+ SmallVector<mlir::Value> cirOperands;
+ for (auto const &[llvmOp, cirOp] :
+ zip(adaptor.getAsmOperands(), op.getAsmOperands())) {
+ append_range(llvmOperands, llvmOp);
+ append_range(cirOperands, cirOp);
+ }
+
+ // so far we infer the llvm dialect element type attr from
+ // CIR operand type.
+ for (auto const &[cirOpAttr, cirOp] :
+ zip(op.getOperandAttrs(), cirOperands)) {
+ if (!cirOpAttr) {
+ opAttrs.push_back(mlir::Attribute());
+ continue;
+ }
+
+ llvm::SmallVector<mlir::NamedAttribute, 1> attrs;
+ cir::PointerType typ = mlir::cast<cir::PointerType>(cirOp.getType());
+ mlir::TypeAttr typAttr = mlir::TypeAttr::get(convertTypeForMemory(
+ *getTypeConverter(), dataLayout, typ.getPointee()));
+
+ attrs.push_back(rewriter.getNamedAttr(llvmAttrName, typAttr));
+ mlir::DictionaryAttr newDict = rewriter.getDictionaryAttr(attrs);
+ opAttrs.push_back(newDict);
+ }
+
+ rewriter.replaceOpWithNewOp<mlir::LLVM::InlineAsmOp>(
+ op, llResTy, llvmOperands, op.getAsmStringAttr(), op.getConstraintsAttr(),
+ op.getSideEffectsAttr(),
+ /*is_align_stack*/ mlir::UnitAttr(),
+ /*tail_call_kind*/
+ mlir::LLVM::TailCallKindAttr::get(
+ getContext(), mlir::LLVM::tailcallkind::TailCallKind::None),
+ mlir::LLVM::AsmDialectAttr::get(getContext(), llDialect),
+ rewriter.getArrayAttr(opAttrs));
+
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVAStartOpLowering::matchAndRewrite(
+ cir::VAStartOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext());
+ auto vaList = mlir::LLVM::BitcastOp::create(rewriter, op.getLoc(), opaquePtr,
+ adaptor.getArgList());
+ rewriter.replaceOpWithNewOp<mlir::LLVM::VaStartOp>(op, vaList);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVAEndOpLowering::matchAndRewrite(
+ cir::VAEndOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext());
+ auto vaList = mlir::LLVM::BitcastOp::create(rewriter, op.getLoc(), opaquePtr,
+ adaptor.getArgList());
+ rewriter.replaceOpWithNewOp<mlir::LLVM::VaEndOp>(op, vaList);
+ return mlir::success();
+}
+
+mlir::LogicalResult CIRToLLVMVAArgOpLowering::matchAndRewrite(
+ cir::VAArgOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ assert(!cir::MissingFeatures::vaArgABILowering());
+ auto opaquePtr = mlir::LLVM::LLVMPointerType::get(getContext());
+ auto vaList = mlir::LLVM::BitcastOp::create(rewriter, op.getLoc(), opaquePtr,
+ adaptor.getArgList());
+
+ mlir::Type llvmType =
+ getTypeConverter()->convertType(op->getResultTypes().front());
+ if (!llvmType)
+ return mlir::failure();
+
+ rewriter.replaceOpWithNewOp<mlir::LLVM::VaArgOp>(op, llvmType, vaList);
+ return mlir::success();
+}
+
std::unique_ptr<mlir::Pass> createConvertCIRToLLVMPass() {
return std::make_unique<ConvertCIRToLLVMPass>();
}
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index c5106cb..da7df89 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -44,6 +44,16 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMAssumeAlignedOpLowering
+ : public mlir::OpConversionPattern<cir::AssumeAlignedOp> {
+public:
+ using mlir::OpConversionPattern<cir::AssumeAlignedOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::AssumeAlignedOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMAssumeSepStorageOpLowering
: public mlir::OpConversionPattern<cir::AssumeSepStorageOp> {
public:
@@ -199,6 +209,26 @@ public:
mlir::ConversionPatternRewriter &rewriter) const override;
};
+class CIRToLLVMReturnAddrOpLowering
+ : public mlir::OpConversionPattern<cir::ReturnAddrOp> {
+public:
+ using mlir::OpConversionPattern<cir::ReturnAddrOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::ReturnAddrOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMFrameAddrOpLowering
+ : public mlir::OpConversionPattern<cir::FrameAddrOp> {
+public:
+ using mlir::OpConversionPattern<cir::FrameAddrOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::FrameAddrOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMAllocaOpLowering
: public mlir::OpConversionPattern<cir::AllocaOp> {
mlir::DataLayout const &dataLayout;
@@ -447,6 +477,47 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMVTableAddrPointOpLowering
+ : public mlir::OpConversionPattern<cir::VTableAddrPointOp> {
+public:
+ using mlir::OpConversionPattern<cir::VTableAddrPointOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VTableAddrPointOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVTableGetVPtrOpLowering
+ : public mlir::OpConversionPattern<cir::VTableGetVPtrOp> {
+public:
+ using mlir::OpConversionPattern<cir::VTableGetVPtrOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VTableGetVPtrOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVTableGetVirtualFnAddrOpLowering
+ : public mlir::OpConversionPattern<cir::VTableGetVirtualFnAddrOp> {
+public:
+ using mlir::OpConversionPattern<
+ cir::VTableGetVirtualFnAddrOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VTableGetVirtualFnAddrOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVTTAddrPointOpLowering
+ : public mlir::OpConversionPattern<cir::VTTAddrPointOp> {
+public:
+ using mlir::OpConversionPattern<cir::VTTAddrPointOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VTTAddrPointOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMStackSaveOpLowering
: public mlir::OpConversionPattern<cir::StackSaveOp> {
public:
@@ -638,6 +709,72 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMFAbsOpLowering : public mlir::OpConversionPattern<cir::FAbsOp> {
+public:
+ using mlir::OpConversionPattern<cir::FAbsOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::FAbsOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMInlineAsmOpLowering
+ : public mlir::OpConversionPattern<cir::InlineAsmOp> {
+ mlir::DataLayout const &dataLayout;
+
+public:
+ CIRToLLVMInlineAsmOpLowering(const mlir::TypeConverter &typeConverter,
+ mlir::MLIRContext *context,
+ mlir::DataLayout const &dataLayout)
+ : OpConversionPattern(typeConverter, context), dataLayout(dataLayout) {}
+
+ using mlir::OpConversionPattern<cir::InlineAsmOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::InlineAsmOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMThrowOpLowering
+ : public mlir::OpConversionPattern<cir::ThrowOp> {
+public:
+ using mlir::OpConversionPattern<cir::ThrowOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::ThrowOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVAStartOpLowering
+ : public mlir::OpConversionPattern<cir::VAStartOp> {
+public:
+ using mlir::OpConversionPattern<cir::VAStartOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VAStartOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVAEndOpLowering
+ : public mlir::OpConversionPattern<cir::VAEndOp> {
+public:
+ using mlir::OpConversionPattern<cir::VAEndOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VAEndOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
+class CIRToLLVMVAArgOpLowering
+ : public mlir::OpConversionPattern<cir::VAArgOp> {
+public:
+ using mlir::OpConversionPattern<cir::VAArgOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::VAArgOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
} // namespace direct
} // namespace cir