aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR')
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp12
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp23
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.h5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp132
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.cpp69
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp153
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp115
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp91
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp169
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp40
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp302
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h126
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp26
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp102
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CMakeLists.txt1
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h99
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp256
-rw-r--r--clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp3
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp208
-rw-r--r--clang/lib/CIR/Lowering/CIRPasses.cpp2
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp118
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h25
27 files changed, 1999 insertions, 106 deletions
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 61d1c54..9049a01 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -121,6 +121,13 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_assume_separate_storage: {
+ mlir::Value value0 = emitScalarExpr(e->getArg(0));
+ mlir::Value value1 = emitScalarExpr(e->getArg(1));
+ builder.create<cir::AssumeSepStorageOp>(loc, value0, value1);
+ return RValue::get(nullptr);
+ }
+
case Builtin::BI__builtin_complex: {
mlir::Value real = emitScalarExpr(e->getArg(0));
mlir::Value imag = emitScalarExpr(e->getArg(1));
@@ -183,6 +190,11 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
assert(!cir::MissingFeatures::builtinCheckKind());
return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll:
+ return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
+
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll:
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index eb079b8..5929568 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -75,6 +75,11 @@ public:
/// Emit dtor variants required by this ABI.
virtual void emitCXXDestructors(const clang::CXXDestructorDecl *d) = 0;
+ virtual void emitDestructorCall(CIRGenFunction &cgf,
+ const CXXDestructorDecl *dd, CXXDtorType type,
+ bool forVirtualBase, bool delegating,
+ Address thisAddr, QualType thisTy) = 0;
+
/// Returns true if the given destructor type should be emitted as a linkonce
/// delegating thunk, regardless of whether the dtor is defined in this TU or
/// not.
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
index 8da832d..67d8988 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
@@ -246,6 +246,29 @@ static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e,
}
}
+RValue CIRGenFunction::emitCXXDestructorCall(
+ GlobalDecl dtor, const CIRGenCallee &callee, mlir::Value thisVal,
+ QualType thisTy, mlir::Value implicitParam, QualType implicitParamTy,
+ const CallExpr *ce) {
+ const CXXMethodDecl *dtorDecl = cast<CXXMethodDecl>(dtor.getDecl());
+
+ assert(!thisTy.isNull());
+ assert(thisTy->getAsCXXRecordDecl() == dtorDecl->getParent() &&
+ "Pointer/Object mixup");
+
+ assert(!cir::MissingFeatures::addressSpace());
+
+ CallArgList args;
+ commonBuildCXXMemberOrOperatorCall(*this, dtorDecl, thisVal, implicitParam,
+ implicitParamTy, ce, args, nullptr);
+ assert((ce || dtor.getDecl()) && "expected source location provider");
+ assert(!cir::MissingFeatures::opCallMustTail());
+ return emitCall(cgm.getTypes().arrangeCXXStructorDeclaration(dtor), callee,
+ ReturnValueSlot(), args, nullptr,
+ ce ? getLoc(ce->getExprLoc())
+ : getLoc(dtor.getDecl()->getSourceRange()));
+}
+
/// Emit a call to an operator new or operator delete function, as implicitly
/// created by new-expressions and delete-expressions.
static RValue emitNewDeleteCall(CIRGenFunction &cgf,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index 938d143..fc208ff 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -582,6 +582,14 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &funcInfo,
cir::FuncOp directFuncOp;
if (auto fnOp = dyn_cast<cir::FuncOp>(calleePtr)) {
directFuncOp = fnOp;
+ } else if (auto getGlobalOp = mlir::dyn_cast<cir::GetGlobalOp>(calleePtr)) {
+ // FIXME(cir): This peephole optimization avoids indirect calls for
+ // builtins. This should be fixed in the builtin declaration instead by
+ // not emitting an unecessary get_global in the first place.
+ // However, this is also used for no-prototype functions.
+ mlir::Operation *globalOp = cgm.getGlobalValue(getGlobalOp.getName());
+ assert(globalOp && "undefined global function");
+ directFuncOp = mlir::cast<cir::FuncOp>(globalOp);
} else {
[[maybe_unused]] mlir::ValueTypeRange<mlir::ResultRange> resultTypes =
calleePtr->getResultTypes();
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h
index bd11329..a78956b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -116,6 +116,11 @@ public:
assert(isOrdinary());
return reinterpret_cast<mlir::Operation *>(kindOrFunctionPtr);
}
+
+ void setFunctionPointer(mlir::Operation *functionPtr) {
+ assert(isOrdinary());
+ kindOrFunctionPtr = SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
+ }
};
/// Type for representing both the decl and type of parameters to a function.
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index 8667bb6..50cca0e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -12,6 +12,7 @@
#include "CIRGenCXXABI.h"
#include "CIRGenFunction.h"
+#include "CIRGenValue.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
@@ -311,6 +312,116 @@ void CIRGenFunction::emitInitializerForField(FieldDecl *field, LValue lhs,
assert(!cir::MissingFeatures::requiresCleanups());
}
+/// Emit a loop to call a particular constructor for each of several members
+/// of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param arrayType the type of the array to initialize
+/// \param arrayBegin an arrayType*
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void CIRGenFunction::emitCXXAggrConstructorCall(
+ const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType,
+ Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked,
+ bool zeroInitialize) {
+ QualType elementType;
+ mlir::Value numElements = emitArrayLength(arrayType, elementType, arrayBegin);
+ emitCXXAggrConstructorCall(ctor, numElements, arrayBegin, e,
+ newPointerIsChecked, zeroInitialize);
+}
+
+/// Emit a loop to call a particular constructor for each of several members
+/// of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param numElements the number of elements in the array;
+/// may be zero
+/// \param arrayBase a T*, where T is the type constructed by ctor
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void CIRGenFunction::emitCXXAggrConstructorCall(
+ const CXXConstructorDecl *ctor, mlir::Value numElements, Address arrayBase,
+ const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize) {
+ // It's legal for numElements to be zero. This can happen both
+ // dynamically, because x can be zero in 'new A[x]', and statically,
+ // because of GCC extensions that permit zero-length arrays. There
+ // are probably legitimate places where we could assume that this
+ // doesn't happen, but it's not clear that it's worth it.
+
+ // Optimize for a constant count.
+ auto constantCount = dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
+ if (constantCount) {
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constantCount.getValue());
+ // Just skip out if the constant count is zero.
+ if (constIntAttr && constIntAttr.getUInt() == 0)
+ return;
+ } else {
+ // Otherwise, emit the check.
+ cgm.errorNYI(e->getSourceRange(), "dynamic-length array expression");
+ }
+
+ auto arrayTy = mlir::cast<cir::ArrayType>(arrayBase.getElementType());
+ mlir::Type elementType = arrayTy.getElementType();
+ cir::PointerType ptrToElmType = builder.getPointerTo(elementType);
+
+ // Tradional LLVM codegen emits a loop here. CIR lowers to a loop as part of
+ // LoweringPrepare.
+
+ // The alignment of the base, adjusted by the size of a single element,
+ // provides a conservative estimate of the alignment of every element.
+ // (This assumes we never start tracking offsetted alignments.)
+ //
+ // Note that these are complete objects and so we don't need to
+ // use the non-virtual size or alignment.
+ QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CharUnits eltAlignment = arrayBase.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(type));
+
+ // Zero initialize the storage, if requested.
+ if (zeroInitialize)
+ emitNullInitialization(*currSrcLoc, arrayBase, type);
+
+ // C++ [class.temporary]p4:
+ // There are two contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. The first context is when a
+ // default constructor is called to initialize an element of an array.
+ // If the constructor has one or more default arguments, the destruction of
+ // every temporary created in a default argument expression is sequenced
+ // before the construction of the next array element, if any.
+ {
+ assert(!cir::MissingFeatures::runCleanupsScope());
+
+ // Evaluate the constructor and its arguments in a regular
+ // partial-destroy cleanup.
+ if (getLangOpts().Exceptions &&
+ !ctor->getParent()->hasTrivialDestructor()) {
+ cgm.errorNYI(e->getSourceRange(), "partial array cleanups");
+ }
+
+ // Emit the constructor call that will execute for every array element.
+ mlir::Value arrayOp =
+ builder.createPtrBitcast(arrayBase.getPointer(), arrayTy);
+ builder.create<cir::ArrayCtor>(
+ *currSrcLoc, arrayOp, [&](mlir::OpBuilder &b, mlir::Location loc) {
+ mlir::BlockArgument arg =
+ b.getInsertionBlock()->addArgument(ptrToElmType, loc);
+ Address curAddr = Address(arg, elementType, eltAlignment);
+ assert(!cir::MissingFeatures::sanitizers());
+ auto currAVS = AggValueSlot::forAddr(
+ curAddr, type.getQualifiers(), AggValueSlot::IsDestructed,
+ AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap,
+ AggValueSlot::IsNotZeroed);
+ emitCXXConstructorCall(ctor, Ctor_Complete,
+ /*ForVirtualBase=*/false,
+ /*Delegating=*/false, currAVS, e);
+ builder.create<cir::YieldOp>(loc);
+ });
+ }
+
+ if (constantCount.use_empty())
+ constantCount.erase();
+}
+
void CIRGenFunction::emitDelegateCXXConstructorCall(
const CXXConstructorDecl *ctor, CXXCtorType ctorType,
const FunctionArgList &args, SourceLocation loc) {
@@ -369,6 +480,19 @@ void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &args) {
s->getStmtClassName());
}
+void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr,
+ QualType type) {
+ const RecordType *rtype = type->castAs<RecordType>();
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXDestructorDecl *dtor = record->getDestructor();
+ // TODO(cir): Unlike traditional codegen, CIRGen should actually emit trivial
+ // dtors which shall be removed on later CIR passes. However, only remove this
+ // assertion after we have a test case to exercise this path.
+ assert(!dtor->isTrivial());
+ cgf.emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase*/ false,
+ /*delegating=*/false, addr, type);
+}
+
void CIRGenFunction::emitDelegatingCXXConstructorCall(
const CXXConstructorDecl *ctor, const FunctionArgList &args) {
assert(ctor->isDelegatingConstructor());
@@ -392,6 +516,14 @@ void CIRGenFunction::emitDelegatingCXXConstructorCall(
}
}
+void CIRGenFunction::emitCXXDestructorCall(const CXXDestructorDecl *dd,
+ CXXDtorType type,
+ bool forVirtualBase, bool delegating,
+ Address thisAddr, QualType thisTy) {
+ cgm.getCXXABI().emitDestructorCall(*this, dd, type, forVirtualBase,
+ delegating, thisAddr, thisTy);
+}
+
Address CIRGenFunction::getAddressOfBaseClass(
Address value, const CXXRecordDecl *derived,
llvm::iterator_range<CastExpr::path_const_iterator> path,
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
new file mode 100644
index 0000000..be21ce9
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -0,0 +1,69 @@
+//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code dealing with the IR generation for cleanups
+// and related information.
+//
+// A "cleanup" is a piece of code which needs to be executed whenever
+// control transfers out of a particular scope. This can be
+// conditionalized to occur only on exceptional control flow, only on
+// normal control flow, or both.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+
+#include "clang/CIR/MissingFeatures.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+//===----------------------------------------------------------------------===//
+// CIRGenFunction cleanup related
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// EHScopeStack
+//===----------------------------------------------------------------------===//
+
+void EHScopeStack::Cleanup::anchor() {}
+
+static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ mlir::Block *cleanup =
+ cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
+ return cleanup;
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CIRGenFunction::popCleanupBlock() {
+ assert(!ehStack.cleanupStack.empty() && "cleanup stack is empty!");
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ std::unique_ptr<EHScopeStack::Cleanup> cleanup =
+ ehStack.cleanupStack.pop_back_val();
+
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ mlir::Block *cleanupEntry = getCurCleanupBlock(*this);
+ builder.setInsertionPointToEnd(cleanupEntry);
+ cleanup->emit(*this);
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CIRGenFunction::popCleanupBlocks(size_t oldCleanupStackDepth) {
+ assert(!cir::MissingFeatures::ehstackBranches());
+
+ assert(ehStack.getStackDepth() >= oldCleanupStackDepth);
+
+ // Pop cleanup blocks until we reach the base stack depth for the
+ // current scope.
+ while (ehStack.getStackDepth() > oldCleanupStackDepth) {
+ popCleanupBlock();
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index afbe92a..6527fb5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -183,8 +183,8 @@ void CIRGenFunction::emitAutoVarCleanups(
const VarDecl &d = *emission.Variable;
// Check the type for a cleanup.
- if (d.needsDestruction(getContext()))
- cgm.errorNYI(d.getSourceRange(), "emitAutoVarCleanups: type cleanup");
+ if (QualType::DestructionKind dtorKind = d.needsDestruction(getContext()))
+ emitAutoVarTypeCleanup(emission, dtorKind);
assert(!cir::MissingFeatures::opAllocaPreciseLifetime());
@@ -648,3 +648,152 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs,
assert(!cir::MissingFeatures::sanitizers());
}
+
+/// Destroys all the elements of the given array, beginning from last to first.
+/// The array cannot be zero-length.
+///
+/// \param begin - a type* denoting the first element of the array
+/// \param end - a type* denoting one past the end of the array
+/// \param elementType - the element type of the array
+/// \param destroyer - the function to call to destroy elements
+void CIRGenFunction::emitArrayDestroy(mlir::Value begin, mlir::Value end,
+ QualType elementType,
+ CharUnits elementAlign,
+ Destroyer *destroyer) {
+ assert(!elementType->isArrayType());
+
+ // Differently from LLVM traditional codegen, use a higher level
+ // representation instead of lowering directly to a loop.
+ mlir::Type cirElementType = convertTypeForMem(elementType);
+ cir::PointerType ptrToElmType = builder.getPointerTo(cirElementType);
+
+ // Emit the dtor call that will execute for every array element.
+ cir::ArrayDtor::create(
+ builder, *currSrcLoc, begin, [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc);
+ Address curAddr = Address(arg, cirElementType, elementAlign);
+ assert(!cir::MissingFeatures::dtorCleanups());
+
+ // Perform the actual destruction there.
+ destroyer(*this, curAddr, elementType);
+
+ cir::YieldOp::create(builder, loc);
+ });
+}
+
+/// Immediately perform the destruction of the given object.
+///
+/// \param addr - the address of the object; a type*
+/// \param type - the type of the object; if an array type, all
+/// objects are destroyed in reverse order
+/// \param destroyer - the function to call to destroy individual
+/// elements
+void CIRGenFunction::emitDestroy(Address addr, QualType type,
+ Destroyer *destroyer) {
+ const ArrayType *arrayType = getContext().getAsArrayType(type);
+ if (!arrayType)
+ return destroyer(*this, addr, type);
+
+ mlir::Value length = emitArrayLength(arrayType, type, addr);
+
+ CharUnits elementAlign = addr.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(type));
+
+ auto constantCount = length.getDefiningOp<cir::ConstantOp>();
+ if (!constantCount) {
+ assert(!cir::MissingFeatures::vlas());
+ cgm.errorNYI("emitDestroy: variable length array");
+ return;
+ }
+
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constantCount.getValue());
+ // If it's constant zero, we can just skip the entire thing.
+ if (constIntAttr && constIntAttr.getUInt() == 0)
+ return;
+
+ mlir::Value begin = addr.getPointer();
+ mlir::Value end; // This will be used for future non-constant counts.
+ emitArrayDestroy(begin, end, type, elementAlign, destroyer);
+
+ // If the array destroy didn't use the length op, we can erase it.
+ if (constantCount.use_empty())
+ constantCount.erase();
+}
+
+CIRGenFunction::Destroyer *
+CIRGenFunction::getDestroyer(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ llvm_unreachable("no destroyer for trivial dtor");
+ case QualType::DK_cxx_destructor:
+ return destroyCXXObject;
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
+ cgm.errorNYI("getDestroyer: other destruction kind");
+ return nullptr;
+ }
+ llvm_unreachable("Unknown DestructionKind");
+}
+
+namespace {
+struct DestroyObject final : EHScopeStack::Cleanup {
+ DestroyObject(Address addr, QualType type,
+ CIRGenFunction::Destroyer *destroyer)
+ : addr(addr), type(type), destroyer(destroyer) {}
+
+ Address addr;
+ QualType type;
+ CIRGenFunction::Destroyer *destroyer;
+
+ void emit(CIRGenFunction &cgf) override {
+ cgf.emitDestroy(addr, type, destroyer);
+ }
+};
+} // namespace
+
+/// Enter a destroy cleanup for the given local variable.
+void CIRGenFunction::emitAutoVarTypeCleanup(
+ const CIRGenFunction::AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind) {
+ assert(dtorKind != QualType::DK_none);
+
+ // Note that for __block variables, we want to destroy the
+ // original stack object, not the possibly forwarded object.
+ Address addr = emission.getObjectAddress(*this);
+
+ const VarDecl *var = emission.Variable;
+ QualType type = var->getType();
+
+ CleanupKind cleanupKind = NormalAndEHCleanup;
+ CIRGenFunction::Destroyer *destroyer = nullptr;
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ llvm_unreachable("no cleanup for trivially-destructible variable");
+
+ case QualType::DK_cxx_destructor:
+ // If there's an NRVO flag on the emission, we need a different
+ // cleanup.
+ if (emission.NRVOFlag) {
+ cgm.errorNYI(var->getSourceRange(), "emitAutoVarTypeCleanup: NRVO");
+ return;
+ }
+ // Otherwise, this is handled below.
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
+ cgm.errorNYI(var->getSourceRange(),
+ "emitAutoVarTypeCleanup: other dtor kind");
+ return;
+ }
+
+ // If we haven't chosen a more specific destroyer, use the default.
+ if (!destroyer)
+ destroyer = getDestroyer(dtorKind);
+
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ ehStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 1f64801..c18498f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -949,7 +949,6 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
case CK_Dynamic:
case CK_ToUnion:
case CK_BaseToDerived:
- case CK_LValueBitCast:
case CK_AddressSpaceConversion:
case CK_ObjCObjectLValueCast:
case CK_VectorSplat:
@@ -965,6 +964,18 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
return {};
}
+ case CK_LValueBitCast: {
+ // This must be a reinterpret_cast (or c-style equivalent).
+ const auto *ce = cast<ExplicitCastExpr>(e);
+
+ cgm.emitExplicitCastExprType(ce, this);
+ LValue LV = emitLValue(e->getSubExpr());
+ Address V = LV.getAddress().withElementType(
+ builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
+
+ return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
+ }
+
case CK_NoOp: {
// CK_NoOp can model a qualification conversion, which can remove an array
// bound and change the IR type.
@@ -1269,7 +1280,7 @@ RValue CIRGenFunction::getUndefRValue(QualType ty) {
}
RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
- const CIRGenCallee &callee,
+ const CIRGenCallee &origCallee,
const clang::CallExpr *e,
ReturnValueSlot returnValue) {
// Get the actual function type. The callee type will always be a pointer to
@@ -1280,6 +1291,8 @@ RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
calleeTy = getContext().getCanonicalType(calleeTy);
auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
+ CIRGenCallee callee = origCallee;
+
if (getLangOpts().CPlusPlus)
assert(!cir::MissingFeatures::sanitizers());
@@ -1296,7 +1309,44 @@ RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
const CIRGenFunctionInfo &funcInfo =
cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
- assert(!cir::MissingFeatures::opCallNoPrototypeFunc());
+ // C99 6.5.2.2p6:
+ // If the expression that denotes the called function has a type that does
+ // not include a prototype, [the default argument promotions are performed].
+ // If the number of arguments does not equal the number of parameters, the
+ // behavior is undefined. If the function is defined with a type that
+ // includes a prototype, and either the prototype ends with an ellipsis (,
+ // ...) or the types of the arguments after promotion are not compatible
+ // with the types of the parameters, the behavior is undefined. If the
+ // function is defined with a type that does not include a prototype, and
+ // the types of the arguments after promotion are not compatible with those
+ // of the parameters after promotion, the behavior is undefined [except in
+ // some trivial cases].
+ // That is, in the general case, we should assume that a call through an
+ // unprototyped function type works like a *non-variadic* call. The way we
+ // make this work is to cast to the exxact type fo the promoted arguments.
+ if (isa<FunctionNoProtoType>(fnType)) {
+ assert(!cir::MissingFeatures::opCallChain());
+ assert(!cir::MissingFeatures::addressSpace());
+ cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
+ // get non-variadic function type
+ calleeTy = cir::FuncType::get(calleeTy.getInputs(),
+ calleeTy.getReturnType(), false);
+ auto calleePtrTy = cir::PointerType::get(calleeTy);
+
+ mlir::Operation *fn = callee.getFunctionPointer();
+ mlir::Value addr;
+ if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
+ addr = builder.create<cir::GetGlobalOp>(
+ getLoc(e->getSourceRange()),
+ cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
+ } else {
+ addr = fn->getResult(0);
+ }
+
+ fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
+ callee.setFunctionPointer(fn);
+ }
+
assert(!cir::MissingFeatures::opCallFnInfoOpts());
assert(!cir::MissingFeatures::hip());
assert(!cir::MissingFeatures::opCallMustTail());
@@ -1657,37 +1707,38 @@ void CIRGenFunction::emitCXXConstructExpr(const CXXConstructExpr *e,
return;
}
- if (getContext().getAsArrayType(e->getType())) {
- cgm.errorNYI(e->getSourceRange(), "emitCXXConstructExpr: array type");
- return;
- }
+ if (const ArrayType *arrayType = getContext().getAsArrayType(e->getType())) {
+ assert(!cir::MissingFeatures::sanitizers());
+ emitCXXAggrConstructorCall(cd, arrayType, dest.getAddress(), e, false);
+ } else {
- clang::CXXCtorType type = Ctor_Complete;
- bool forVirtualBase = false;
- bool delegating = false;
-
- switch (e->getConstructionKind()) {
- case CXXConstructionKind::Complete:
- type = Ctor_Complete;
- break;
- case CXXConstructionKind::Delegating:
- // We should be emitting a constructor; GlobalDecl will assert this
- type = curGD.getCtorType();
- delegating = true;
- break;
- case CXXConstructionKind::VirtualBase:
- // This should just set 'forVirtualBase' to true and fall through, but
- // virtual base class support is otherwise missing, so this needs to wait
- // until it can be tested.
- cgm.errorNYI(e->getSourceRange(),
- "emitCXXConstructExpr: virtual base constructor");
- return;
- case CXXConstructionKind::NonVirtualBase:
- type = Ctor_Base;
- break;
- }
+ clang::CXXCtorType type = Ctor_Complete;
+ bool forVirtualBase = false;
+ bool delegating = false;
- emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
+ switch (e->getConstructionKind()) {
+ case CXXConstructionKind::Complete:
+ type = Ctor_Complete;
+ break;
+ case CXXConstructionKind::Delegating:
+ // We should be emitting a constructor; GlobalDecl will assert this
+ type = curGD.getCtorType();
+ delegating = true;
+ break;
+ case CXXConstructionKind::VirtualBase:
+ // This should just set 'forVirtualBase' to true and fall through, but
+ // virtual base class support is otherwise missing, so this needs to wait
+ // until it can be tested.
+ cgm.errorNYI(e->getSourceRange(),
+ "emitCXXConstructExpr: virtual base constructor");
+ return;
+ case CXXConstructionKind::NonVirtualBase:
+ type = Ctor_Base;
+ break;
+ }
+
+ emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
+ }
}
RValue CIRGenFunction::emitReferenceBindingToExpr(const Expr *e) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 0d12c5c..51aab95 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -357,10 +357,97 @@ void AggExprEmitter::visitCXXParenListOrInitListExpr(
emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
arrayFiller);
return;
+ } else if (e->getType()->isVariableArrayType()) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr variable array type");
+ return;
+ }
+
+ if (e->getType()->isArrayType()) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr array type");
+ return;
+ }
+
+ assert(e->getType()->isRecordType() && "Only support structs/unions here!");
+
+ // Do struct initialization; this code just sets each individual member
+ // to the approprate value. This makes bitfield support automatic;
+ // the disadvantage is that the generated code is more difficult for
+ // the optimizer, especially with bitfields.
+ unsigned numInitElements = args.size();
+ RecordDecl *record = e->getType()->castAs<RecordType>()->getDecl();
+
+ // We'll need to enter cleanup scopes in case any of the element
+ // initializers throws an exception.
+ assert(!cir::MissingFeatures::requiresCleanups());
+
+ unsigned curInitIndex = 0;
+
+ // Emit initialization of base classes.
+ if (auto *cxxrd = dyn_cast<CXXRecordDecl>(record)) {
+ assert(numInitElements >= cxxrd->getNumBases() &&
+ "missing initializer for base class");
+ if (cxxrd->getNumBases() > 0) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr base class init");
+ return;
+ }
+ }
+
+ LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
+
+ if (record->isUnion()) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr union type");
+ return;
}
- cgf.cgm.errorNYI(
- "visitCXXParenListOrInitListExpr Record or VariableSizeArray type");
+ // Here we iterate over the fields; this makes it simpler to both
+ // default-initialize fields and skip over unnamed fields.
+ for (const FieldDecl *field : record->fields()) {
+ // We're done once we hit the flexible array member.
+ if (field->getType()->isIncompleteArrayType())
+ break;
+
+ // Always skip anonymous bitfields.
+ if (field->isUnnamedBitField())
+ continue;
+
+ // We're done if we reach the end of the explicit initializers, we
+ // have a zeroed object, and the rest of the fields are
+ // zero-initializable.
+ if (curInitIndex == numInitElements && dest.isZeroed() &&
+ cgf.getTypes().isZeroInitializable(e->getType()))
+ break;
+ LValue lv =
+ cgf.emitLValueForFieldInitialization(destLV, field, field->getName());
+ // We never generate write-barriers for initialized fields.
+ assert(!cir::MissingFeatures::setNonGC());
+
+ if (curInitIndex < numInitElements) {
+ // Store the initializer into the field.
+ CIRGenFunction::SourceLocRAIIObject loc{
+ cgf, cgf.getLoc(record->getSourceRange())};
+ emitInitializationToLValue(args[curInitIndex++], lv);
+ } else {
+ // We're out of initializers; default-initialize to null
+ emitNullInitializationToLValue(cgf.getLoc(e->getSourceRange()), lv);
+ }
+
+ // Push a destructor if necessary.
+ // FIXME: if we have an array of structures, all explicitly
+ // initialized, we can end up pushing a linear number of cleanups.
+ if (field->getType().isDestructedType()) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr destructor");
+ return;
+ }
+
+ // From classic codegen, maybe not useful for CIR:
+ // If the GEP didn't get used because of a dead zero init or something
+ // else, clean it up for -O0 builds and general tidiness.
+ }
}
// TODO(cir): This could be shared with classic codegen.
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index 6756a7c..a09d739 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -34,11 +34,20 @@ public:
}
mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc);
+
/// Store the specified real/imag parts into the
/// specified value pointer.
void emitStoreOfComplex(mlir::Location loc, mlir::Value val, LValue lv,
bool isInit);
+ /// Emit a cast from complex value Val to DestType.
+ mlir::Value emitComplexToComplexCast(mlir::Value value, QualType srcType,
+ QualType destType, SourceLocation loc);
+
+ /// Emit a cast from scalar value Val to DestType.
+ mlir::Value emitScalarToComplexCast(mlir::Value value, QualType srcType,
+ QualType destType, SourceLocation loc);
+
mlir::Value
VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
mlir::Value VisitArraySubscriptExpr(Expr *e);
@@ -51,7 +60,7 @@ public:
mlir::Value VisitDeclRefExpr(DeclRefExpr *e);
mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *e);
mlir::Value VisitImplicitCastExpr(ImplicitCastExpr *e);
- mlir::Value VisitInitListExpr(const InitListExpr *e);
+ mlir::Value VisitInitListExpr(InitListExpr *e);
mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
return emitLoadOfLValue(e);
@@ -164,14 +173,113 @@ LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e,
mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
QualType destTy) {
switch (ck) {
+ case CK_Dependent:
+ llvm_unreachable("dependent type must be resolved before the CIR codegen");
+
case CK_NoOp:
case CK_LValueToRValue:
return Visit(op);
- default:
- break;
+
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_UserDefinedConversion: {
+ cgf.cgm.errorNYI(
+ "ComplexExprEmitter::emitCast Atmoic & UserDefinedConversion");
+ return {};
}
- cgf.cgm.errorNYI("ComplexType Cast");
- return {};
+
+ case CK_LValueBitCast: {
+ LValue origLV = cgf.emitLValue(op);
+ Address addr =
+ origLV.getAddress().withElementType(builder, cgf.convertType(destTy));
+ LValue destLV = cgf.makeAddrLValue(addr, destTy);
+ return emitLoadOfLValue(destLV, op->getExprLoc());
+ }
+
+ case CK_LValueToRValueBitCast: {
+ LValue sourceLVal = cgf.emitLValue(op);
+ Address addr = sourceLVal.getAddress().withElementType(
+ builder, cgf.convertTypeForMem(destTy));
+ LValue destLV = cgf.makeAddrLValue(addr, destTy);
+ assert(!cir::MissingFeatures::opTBAA());
+ return emitLoadOfLValue(destLV, op->getExprLoc());
+ }
+
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_BooleanToSignedIntegral:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
+ case CK_ZeroToOCLOpaqueType:
+ case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
+ case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
+ case CK_HLSLArrayRValue:
+ case CK_HLSLElementwiseCast:
+ case CK_HLSLAggregateSplatCast:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_FloatingRealToComplex:
+ case CK_IntegralRealToComplex: {
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ return emitScalarToComplexCast(cgf.emitScalarExpr(op), op->getType(),
+ destTy, op->getExprLoc());
+ }
+
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex: {
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ return emitComplexToComplexCast(Visit(op), op->getType(), destTy,
+ op->getExprLoc());
+ }
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
}
mlir::Value ComplexExprEmitter::emitConstant(
@@ -207,6 +315,49 @@ void ComplexExprEmitter::emitStoreOfComplex(mlir::Location loc, mlir::Value val,
builder.createStore(loc, val, destAddr);
}
+mlir::Value ComplexExprEmitter::emitComplexToComplexCast(mlir::Value val,
+ QualType srcType,
+ QualType destType,
+ SourceLocation loc) {
+ if (srcType == destType)
+ return val;
+
+ // Get the src/dest element type.
+ QualType srcElemTy = srcType->castAs<ComplexType>()->getElementType();
+ QualType destElemTy = destType->castAs<ComplexType>()->getElementType();
+
+ cir::CastKind castOpKind;
+ if (srcElemTy->isFloatingType() && destElemTy->isFloatingType())
+ castOpKind = cir::CastKind::float_complex;
+ else if (srcElemTy->isFloatingType() && destElemTy->isIntegerType())
+ castOpKind = cir::CastKind::float_complex_to_int_complex;
+ else if (srcElemTy->isIntegerType() && destElemTy->isFloatingType())
+ castOpKind = cir::CastKind::int_complex_to_float_complex;
+ else if (srcElemTy->isIntegerType() && destElemTy->isIntegerType())
+ castOpKind = cir::CastKind::int_complex;
+ else
+ llvm_unreachable("unexpected src type or dest type");
+
+ return builder.createCast(cgf.getLoc(loc), castOpKind, val,
+ cgf.convertType(destType));
+}
+
+mlir::Value ComplexExprEmitter::emitScalarToComplexCast(mlir::Value val,
+ QualType srcType,
+ QualType destType,
+ SourceLocation loc) {
+ cir::CastKind castOpKind;
+ if (srcType->isFloatingType())
+ castOpKind = cir::CastKind::float_to_complex;
+ else if (srcType->isIntegerType())
+ castOpKind = cir::CastKind::int_to_complex;
+ else
+ llvm_unreachable("unexpected src type");
+
+ return builder.createCast(cgf.getLoc(loc), castOpKind, val,
+ cgf.convertType(destType));
+}
+
mlir::Value ComplexExprEmitter::VisitAbstractConditionalOperator(
const AbstractConditionalOperator *e) {
mlir::Value condValue = Visit(e->getCond());
@@ -304,7 +455,7 @@ mlir::Value ComplexExprEmitter::VisitImplicitCastExpr(ImplicitCastExpr *e) {
return emitCast(e->getCastKind(), e->getSubExpr(), e->getType());
}
-mlir::Value ComplexExprEmitter::VisitInitListExpr(const InitListExpr *e) {
+mlir::Value ComplexExprEmitter::VisitInitListExpr(InitListExpr *e) {
mlir::Location loc = cgf.getLoc(e->getExprLoc());
if (e->getNumInits() == 2) {
mlir::Value real = cgf.emitScalarExpr(e->getInit(0));
@@ -312,10 +463,8 @@ mlir::Value ComplexExprEmitter::VisitInitListExpr(const InitListExpr *e) {
return builder.createComplexCreate(loc, real, imag);
}
- if (e->getNumInits() == 1) {
- cgf.cgm.errorNYI("Create Complex with InitList with size 1");
- return {};
- }
+ if (e->getNumInits() == 1)
+ return Visit(e->getInit(0));
assert(e->getNumInits() == 0 && "Unexpected number of inits");
mlir::Type complexTy = cgf.convertType(e->getType());
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index eba6bff..2523b0f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -88,6 +88,10 @@ public:
// Utilities
//===--------------------------------------------------------------------===//
+ mlir::Value emitComplexToScalarConversion(mlir::Location loc,
+ mlir::Value value, CastKind kind,
+ QualType destTy);
+
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
return builder.createFloatingCast(result, cgf.convertType(promotionType));
}
@@ -1125,7 +1129,7 @@ LValue ScalarExprEmitter::emitCompoundAssignLValue(
// 'An assignment expression has the value of the left operand after the
// assignment...'.
if (lhsLV.isBitField())
- cgf.cgm.errorNYI(e->getSourceRange(), "store through bitfield lvalue");
+ cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
else
cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
@@ -1135,6 +1139,31 @@ LValue ScalarExprEmitter::emitCompoundAssignLValue(
return lhsLV;
}
+mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
+ mlir::Value value,
+ CastKind kind,
+ QualType destTy) {
+ cir::CastKind castOpKind;
+ switch (kind) {
+ case CK_FloatingComplexToReal:
+ castOpKind = cir::CastKind::float_complex_to_real;
+ break;
+ case CK_IntegralComplexToReal:
+ castOpKind = cir::CastKind::int_complex_to_real;
+ break;
+ case CK_FloatingComplexToBoolean:
+ castOpKind = cir::CastKind::float_complex_to_bool;
+ break;
+ case CK_IntegralComplexToBoolean:
+ castOpKind = cir::CastKind::int_complex_to_bool;
+ break;
+ default:
+ llvm_unreachable("invalid complex-to-scalar cast kind");
+ }
+
+ return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
+}
+
mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
QualType promotionType) {
e = e->IgnoreParens();
@@ -1758,6 +1787,15 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
ce->getExprLoc(), opts);
}
+ case CK_FloatingComplexToReal:
+ case CK_IntegralComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ mlir::Value value = cgf.emitComplexExpr(subExpr);
+ return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
+ kind, destTy);
+ }
+
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
case CK_IntegralRealToComplex:
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 7e1a44c..c65d025 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -26,7 +26,11 @@ namespace clang::CIRGen {
CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder,
bool suppressNewContext)
- : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {}
+ : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
+ ehStack.setCGF(this);
+ currentCleanupStackDepth = 0;
+ assert(ehStack.getStackDepth() == 0);
+}
CIRGenFunction::~CIRGenFunction() {}
@@ -227,6 +231,14 @@ void CIRGenFunction::LexicalScope::cleanup() {
CIRGenBuilderTy &builder = cgf.builder;
LexicalScope *localScope = cgf.curLexScope;
+ auto applyCleanup = [&]() {
+ if (performCleanup) {
+ // ApplyDebugLocation
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ forceCleanup();
+ }
+ };
+
if (returnBlock != nullptr) {
// Write out the return block, which loads the value from `__retval` and
// issues the `cir.return`.
@@ -235,32 +247,42 @@ void CIRGenFunction::LexicalScope::cleanup() {
(void)emitReturn(*returnLoc);
}
- mlir::Block *curBlock = builder.getBlock();
- if (isGlobalInit() && !curBlock)
- return;
- if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
- return;
-
- // Get rid of any empty block at the end of the scope.
- bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
- if (!entryBlock && curBlock->empty()) {
- curBlock->erase();
- if (returnBlock != nullptr && returnBlock->getUses().empty())
- returnBlock->erase();
- return;
- }
-
- // Reached the end of the scope.
- {
+ auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
mlir::OpBuilder::InsertionGuard guard(builder);
- builder.setInsertionPointToEnd(curBlock);
+ builder.setInsertionPointToEnd(insPt);
+
+ // If we still don't have a cleanup block, it means that `applyCleanup`
+ // below might be able to get us one.
+ mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
+
+ // Leverage and defers to RunCleanupsScope's dtor and scope handling.
+ applyCleanup();
+
+ // If we now have one after `applyCleanup`, hook it up properly.
+ if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
+ cleanupBlock = localScope->getCleanupBlock(builder);
+ builder.create<cir::BrOp>(insPt->back().getLoc(), cleanupBlock);
+ if (!cleanupBlock->mightHaveTerminator()) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToEnd(cleanupBlock);
+ builder.create<cir::YieldOp>(localScope->endLoc);
+ }
+ }
if (localScope->depth == 0) {
// Reached the end of the function.
if (returnBlock != nullptr) {
- if (returnBlock->getUses().empty())
+ if (returnBlock->getUses().empty()) {
returnBlock->erase();
- else {
+ } else {
+ // Thread return block via cleanup block.
+ if (cleanupBlock) {
+ for (mlir::BlockOperand &blockUse : returnBlock->getUses()) {
+ cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
+ brOp.setSuccessor(cleanupBlock);
+ }
+ }
+
builder.create<cir::BrOp>(*returnLoc, returnBlock);
return;
}
@@ -268,13 +290,50 @@ void CIRGenFunction::LexicalScope::cleanup() {
emitImplicitReturn();
return;
}
- // Reached the end of a non-function scope. Some scopes, such as those
- // used with the ?: operator, can return a value.
- if (!localScope->isTernary() && !curBlock->mightHaveTerminator()) {
+
+ // End of any local scope != function
+ // Ternary ops have to deal with matching arms for yielding types
+ // and do return a value, it must do its own cir.yield insertion.
+ if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
!retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
: builder.create<cir::YieldOp>(localScope->endLoc, retVal);
}
+ };
+
+ // If a cleanup block has been created at some point, branch to it
+ // and set the insertion point to continue at the cleanup block.
+ // Terminators are then inserted either in the cleanup block or
+ // inline in this current block.
+ mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
+ if (cleanupBlock)
+ insertCleanupAndLeave(cleanupBlock);
+
+ // Now deal with any pending block wrap up like implicit end of
+ // scope.
+
+ mlir::Block *curBlock = builder.getBlock();
+ if (isGlobalInit() && !curBlock)
+ return;
+ if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
+ return;
+
+ // Get rid of any empty block at the end of the scope.
+ bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
+ if (!entryBlock && curBlock->empty()) {
+ curBlock->erase();
+ if (returnBlock != nullptr && returnBlock->getUses().empty())
+ returnBlock->erase();
+ return;
}
+
+ // If there's a cleanup block, branch to it, nothing else to do.
+ if (cleanupBlock) {
+ builder.create<cir::BrOp>(curBlock->back().getLoc(), cleanupBlock);
+ return;
+ }
+
+ // No pre-existent cleanup block, emit cleanup code and yield/return.
+ insertCleanupAndLeave(curBlock);
}
cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
@@ -408,7 +467,19 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
}
}
-void CIRGenFunction::finishFunction(SourceLocation endLoc) {}
+void CIRGenFunction::finishFunction(SourceLocation endLoc) {
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ // TODO(cir): Use prologueCleanupDepth here.
+ bool hasCleanups = ehStack.getStackDepth() != currentCleanupStackDepth;
+ if (hasCleanups) {
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ // FIXME(cir): should we clearInsertionPoint? breaks many testcases
+ popCleanupBlocks(currentCleanupStackDepth);
+ }
+}
mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
auto result = mlir::LogicalResult::success();
@@ -593,11 +664,12 @@ void CIRGenFunction::emitDestructorBody(FunctionArgList &args) {
assert(!cir::MissingFeatures::dtorCleanups());
- // TODO(cir): A complete destructor is supposed to call the base destructor.
- // Since we have to emit both dtor kinds we just fall through for now and.
- // As long as we don't support virtual bases this should be functionally
- // equivalent.
- assert(!cir::MissingFeatures::completeDtors());
+ if (!isTryBody) {
+ QualType thisTy = dtor->getFunctionObjectParameterType();
+ emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
+ /*delegating=*/false, loadCXXThisAddress(), thisTy);
+ break;
+ }
// Fallthrough: act like we're in the base variant.
[[fallthrough]];
@@ -807,4 +879,174 @@ bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *ce) {
return true;
}
+/// Computes the length of an array in elements, as well as the base
+/// element type and a properly-typed first element pointer.
+mlir::Value
+CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType,
+ QualType &baseType, Address &addr) {
+ const clang::ArrayType *arrayType = origArrayType;
+
+ // If it's a VLA, we have to load the stored size. Note that
+ // this is the size of the VLA in bytes, not its size in elements.
+ if (isa<VariableArrayType>(arrayType)) {
+ assert(cir::MissingFeatures::vlas());
+ cgm.errorNYI(*currSrcLoc, "VLAs");
+ return builder.getConstInt(*currSrcLoc, SizeTy, 0);
+ }
+
+ uint64_t countFromCLAs = 1;
+ QualType eltType;
+
+ auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
+
+ while (cirArrayType) {
+ assert(isa<ConstantArrayType>(arrayType));
+ countFromCLAs *= cirArrayType.getSize();
+ eltType = arrayType->getElementType();
+
+ cirArrayType =
+ mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
+
+ arrayType = getContext().getAsArrayType(arrayType->getElementType());
+ assert((!cirArrayType || arrayType) &&
+ "CIR and Clang types are out-of-sync");
+ }
+
+ if (arrayType) {
+ // From this point onwards, the Clang array type has been emitted
+ // as some other type (probably a packed struct). Compute the array
+ // size, and just emit the 'begin' expression as a bitcast.
+ cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
+ }
+
+ baseType = eltType;
+ return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
+}
+
+// TODO(cir): Most of this function can be shared between CIRGen
+// and traditional LLVM codegen
+void CIRGenFunction::emitVariablyModifiedType(QualType type) {
+ assert(type->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ // We're going to walk down into the type and look for VLA
+ // expressions.
+ do {
+ assert(type->isVariablyModifiedType());
+
+ const Type *ty = type.getTypePtr();
+ switch (ty->getTypeClass()) {
+ case Type::CountAttributed:
+ case Type::PackIndexing:
+ case Type::ArrayParameter:
+ case Type::HLSLAttributedResource:
+ case Type::HLSLInlineSpirv:
+ case Type::PredefinedSugar:
+ cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
+
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.inc"
+ llvm_unreachable(
+ "dependent type must be resolved before the CIR codegen");
+
+ // These types are never variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Using:
+ case Type::TemplateSpecialization:
+ case Type::ObjCTypeParam:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::BitInt:
+ llvm_unreachable("type class is never variably-modified!");
+
+ case Type::Elaborated:
+ type = cast<clang::ElaboratedType>(ty)->getNamedType();
+ break;
+
+ case Type::Adjusted:
+ type = cast<clang::AdjustedType>(ty)->getAdjustedType();
+ break;
+
+ case Type::Decayed:
+ type = cast<clang::DecayedType>(ty)->getPointeeType();
+ break;
+
+ case Type::Pointer:
+ type = cast<clang::PointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::BlockPointer:
+ type = cast<clang::BlockPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ type = cast<clang::ReferenceType>(ty)->getPointeeType();
+ break;
+
+ case Type::MemberPointer:
+ type = cast<clang::MemberPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Losing element qualification here is fine.
+ type = cast<clang::ArrayType>(ty)->getElementType();
+ break;
+
+ case Type::VariableArray: {
+ cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA");
+ break;
+ }
+
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ type = cast<clang::FunctionType>(ty)->getReturnType();
+ break;
+
+ case Type::Paren:
+ case Type::TypeOf:
+ case Type::UnaryTransform:
+ case Type::Attributed:
+ case Type::BTFTagAttributed:
+ case Type::SubstTemplateTypeParm:
+ case Type::MacroQualified:
+ // Keep walking after single level desugaring.
+ type = type.getSingleStepDesugaredType(getContext());
+ break;
+
+ case Type::Typedef:
+ case Type::Decltype:
+ case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
+ // Stop walking: nothing to do.
+ return;
+
+ case Type::TypeOfExpr:
+ // Stop walking: emit typeof expression.
+ emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
+ return;
+
+ case Type::Atomic:
+ type = cast<clang::AtomicType>(ty)->getValueType();
+ break;
+
+ case Type::Pipe:
+ type = cast<clang::PipeType>(ty)->getElementType();
+ break;
+ }
+ } while (type->isVariablyModifiedType());
+}
+
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 12484196..603f750 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -18,6 +18,7 @@
#include "CIRGenModule.h"
#include "CIRGenTypeCache.h"
#include "CIRGenValue.h"
+#include "EHScopeStack.h"
#include "Address.h"
@@ -61,6 +62,9 @@ public:
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
+ /// Tracks function scope overall cleanup handling.
+ EHScopeStack ehStack;
+
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
ImplicitParamDecl *cxxabiThisDecl = nullptr;
@@ -595,14 +599,65 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// Takes the old cleanup stack size and emits the cleanup blocks
+ /// that have been added.
+ void popCleanupBlocks(size_t oldCleanupStackDepth);
+ void popCleanupBlock();
+
+ /// Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
+ size_t cleanupStackDepth, oldCleanupStackDepth;
+
+ protected:
+ bool performCleanup;
+
+ private:
+ RunCleanupsScope(const RunCleanupsScope &) = delete;
+ void operator=(const RunCleanupsScope &) = delete;
+
+ protected:
+ CIRGenFunction &cgf;
+
+ /// Enter a new cleanup scope.
+ explicit RunCleanupsScope(CIRGenFunction &cgf)
+ : performCleanup(true), cgf(cgf) {
+ cleanupStackDepth = cgf.ehStack.getStackDepth();
+ oldCleanupStackDepth = cgf.currentCleanupStackDepth;
+ cgf.currentCleanupStackDepth = cleanupStackDepth;
+ }
+
+ /// Exit this cleanup scope, emitting any accumulated cleanups.
+ ~RunCleanupsScope() {
+ if (performCleanup)
+ forceCleanup();
+ }
+
+ /// Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void forceCleanup() {
+ assert(performCleanup && "Already forced cleanup");
+ {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ cgf.popCleanupBlocks(cleanupStackDepth);
+ performCleanup = false;
+ cgf.currentCleanupStackDepth = oldCleanupStackDepth;
+ }
+ }
+ };
+
+ // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
+ size_t currentCleanupStackDepth;
+
+public:
/// Represents a scope, including function bodies, compound statements, and
/// the substatements of if/while/do/for/switch/try statements. This class
/// handles any automatic cleanup, along with the return value.
- struct LexicalScope {
+ struct LexicalScope : public RunCleanupsScope {
private:
- // TODO(CIR): This will live in the base class RunCleanupScope once that
- // class is upstreamed.
- CIRGenFunction &cgf;
+ // Block containing cleanup code for things initialized in this
+ // lexical context (scope).
+ mlir::Block *cleanupBlock = nullptr;
// Points to the scope entry block. This is useful, for instance, for
// helping to insert allocas before finalizing any recursive CodeGen from
@@ -632,8 +687,8 @@ public:
unsigned depth = 0;
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
- : cgf(cgf), entryBlock(eb), parentScope(cgf.curLexScope), beginLoc(loc),
- endLoc(loc) {
+ : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
+ beginLoc(loc), endLoc(loc) {
assert(entryBlock && "LexicalScope requires an entry block");
cgf.curLexScope = this;
@@ -671,6 +726,27 @@ public:
void setAsSwitch() { scopeKind = Kind::Switch; }
void setAsTernary() { scopeKind = Kind::Ternary; }
+ // Lazy create cleanup block or return what's available.
+ mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
+ if (cleanupBlock)
+ return cleanupBlock;
+ cleanupBlock = createCleanupBlock(builder);
+ return cleanupBlock;
+ }
+
+ mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
+ return cleanupBlock;
+ }
+
+ mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
+ // Create the cleanup block but dont hook it up around just yet.
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
+ : &cgf.curFn->getRegion(0);
+ cleanupBlock = builder.createBlock(r);
+ return cleanupBlock;
+ }
+
// ---
// Return handling.
// ---
@@ -721,6 +797,12 @@ public:
LexicalScope *curLexScope = nullptr;
+ typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
+
+ static Destroyer destroyCXXObject;
+
+ Destroyer *getDestroyer(clang::QualType::DestructionKind kind);
+
/// ----------------------
/// CIR emit functions
/// ----------------------
@@ -766,6 +848,12 @@ public:
/// even if no aggregate location is provided.
RValue emitAnyExprToTemp(const clang::Expr *e);
+ void emitArrayDestroy(mlir::Value begin, mlir::Value end,
+ QualType elementType, CharUnits elementAlign,
+ Destroyer *destroyer);
+
+ mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
+ QualType &baseType, Address &addr);
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
Address emitArrayToPointerDecay(const Expr *array);
@@ -779,6 +867,8 @@ public:
void emitAutoVarCleanups(const AutoVarEmission &emission);
void emitAutoVarInit(const AutoVarEmission &emission);
+ void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
+ clang::QualType::DestructionKind dtorKind);
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
CXXCtorInitializer *baseInit);
@@ -836,6 +926,9 @@ public:
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e);
void emitConstructorBody(FunctionArgList &args);
+
+ void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
+
void emitDestructorBody(FunctionArgList &args);
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
@@ -843,6 +936,16 @@ public:
void emitCXXConstructExpr(const clang::CXXConstructExpr *e,
AggValueSlot dest);
+ void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ const clang::ArrayType *arrayType,
+ Address arrayBegin, const CXXConstructExpr *e,
+ bool newPointerIsChecked,
+ bool zeroInitialize = false);
+ void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ mlir::Value numElements, Address arrayBase,
+ const CXXConstructExpr *e,
+ bool newPointerIsChecked,
+ bool zeroInitialize);
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d,
clang::CXXCtorType type, bool forVirtualBase,
bool delegating, AggValueSlot thisAVS,
@@ -853,6 +956,15 @@ public:
bool delegating, Address thisAddr,
CallArgList &args, clang::SourceLocation loc);
+ void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type,
+ bool forVirtualBase, bool delegating,
+ Address thisAddr, QualType thisTy);
+
+ RValue emitCXXDestructorCall(GlobalDecl dtor, const CIRGenCallee &callee,
+ mlir::Value thisVal, QualType thisTy,
+ mlir::Value implicitParam,
+ QualType implicitParamTy, const CallExpr *e);
+
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
llvm::ArrayRef<const Attr *> attrs);
@@ -1093,6 +1205,8 @@ public:
/// inside a function, including static vars etc.
void emitVarDecl(const clang::VarDecl &d);
+ void emitVariablyModifiedType(QualType ty);
+
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
/// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index 1496d87..e5e4c68 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -46,6 +46,11 @@ public:
void emitCXXDestructors(const clang::CXXDestructorDecl *d) override;
void emitCXXStructor(clang::GlobalDecl gd) override;
+ void emitDestructorCall(CIRGenFunction &cgf, const CXXDestructorDecl *dd,
+ CXXDtorType type, bool forVirtualBase,
+ bool delegating, Address thisAddr,
+ QualType thisTy) override;
+
bool useThunkForDtorVariant(const CXXDestructorDecl *dtor,
CXXDtorType dt) const override {
// Itanium does not emit any destructor variant as an inline thunk.
@@ -108,8 +113,6 @@ static StructorCIRGen getCIRGenToUse(CIRGenModule &cgm,
GlobalDecl aliasDecl;
if (const auto *dd = dyn_cast<CXXDestructorDecl>(md)) {
- // The assignment is correct here, but other support for this is NYI.
- cgm.errorNYI(md->getSourceRange(), "getCIRGenToUse: dtor");
aliasDecl = GlobalDecl(dd, Dtor_Complete);
} else {
const auto *cd = cast<CXXConstructorDecl>(md);
@@ -240,6 +243,25 @@ bool CIRGenItaniumCXXABI::needsVTTParameter(GlobalDecl gd) {
return false;
}
+void CIRGenItaniumCXXABI::emitDestructorCall(
+ CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type,
+ bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) {
+ GlobalDecl gd(dd, type);
+ if (needsVTTParameter(gd)) {
+ cgm.errorNYI(dd->getSourceRange(), "emitDestructorCall: VTT");
+ }
+
+ mlir::Value vtt = nullptr;
+ ASTContext &astContext = cgm.getASTContext();
+ QualType vttTy = astContext.getPointerType(astContext.VoidPtrTy);
+ assert(!cir::MissingFeatures::appleKext());
+ CIRGenCallee callee =
+ CIRGenCallee::forDirect(cgm.getAddrOfCXXStructor(gd), gd);
+
+ cgf.emitCXXDestructorCall(gd, callee, thisAddr.getPointer(), thisTy, vtt,
+ vttTy, nullptr);
+}
+
CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &cgm) {
switch (cgm.getASTContext().getCXXABIKind()) {
case TargetCXXABI::GenericItanium:
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 3502705..750fe97 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -1103,6 +1103,60 @@ cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator(
return cir::GlobalLinkageKind::ExternalLinkage;
}
+/// This function is called when we implement a function with no prototype, e.g.
+/// "int foo() {}". If there are existing call uses of the old function in the
+/// module, this adjusts them to call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+void CIRGenModule::replaceUsesOfNonProtoTypeWithRealFunction(
+ mlir::Operation *old, cir::FuncOp newFn) {
+ // If we're redefining a global as a function, don't transform it.
+ auto oldFn = mlir::dyn_cast<cir::FuncOp>(old);
+ if (!oldFn)
+ return;
+
+ // TODO(cir): this RAUW ignores the features below.
+ assert(!cir::MissingFeatures::opFuncExceptions());
+ assert(!cir::MissingFeatures::opFuncParameterAttributes());
+ assert(!cir::MissingFeatures::opFuncOperandBundles());
+ if (oldFn->getAttrs().size() <= 1)
+ errorNYI(old->getLoc(),
+ "replaceUsesOfNonProtoTypeWithRealFunction: Attribute forwarding");
+
+ // Mark new function as originated from a no-proto declaration.
+ newFn.setNoProto(oldFn.getNoProto());
+
+ // Iterate through all calls of the no-proto function.
+ std::optional<mlir::SymbolTable::UseRange> symUses =
+ oldFn.getSymbolUses(oldFn->getParentOp());
+ for (const mlir::SymbolTable::SymbolUse &use : symUses.value()) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+
+ if (auto noProtoCallOp = mlir::dyn_cast<cir::CallOp>(use.getUser())) {
+ builder.setInsertionPoint(noProtoCallOp);
+
+ // Patch call type with the real function type.
+ cir::CallOp realCallOp = builder.createCallOp(
+ noProtoCallOp.getLoc(), newFn, noProtoCallOp.getOperands());
+
+ // Replace old no proto call with fixed call.
+ noProtoCallOp.replaceAllUsesWith(realCallOp);
+ noProtoCallOp.erase();
+ } else if (auto getGlobalOp =
+ mlir::dyn_cast<cir::GetGlobalOp>(use.getUser())) {
+ // Replace type
+ getGlobalOp.getAddr().setType(
+ cir::PointerType::get(newFn.getFunctionType()));
+ } else {
+ errorNYI(use.getUser()->getLoc(),
+ "replaceUsesOfNonProtoTypeWithRealFunction: unexpected use");
+ }
+ }
+}
+
cir::GlobalLinkageKind
CIRGenModule::getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant) {
assert(!isConstant && "constant variables NYI");
@@ -1208,6 +1262,15 @@ cir::GlobalOp CIRGenModule::getGlobalForStringLiteral(const StringLiteral *s,
return gv;
}
+void CIRGenModule::emitExplicitCastExprType(const ExplicitCastExpr *e,
+ CIRGenFunction *cgf) {
+ if (cgf && e->getType()->isVariablyModifiedType())
+ cgf->emitVariablyModifiedType(e->getType());
+
+ assert(!cir::MissingFeatures::generateDebugInfo() &&
+ "emitExplicitCastExprType");
+}
+
void CIRGenModule::emitDeclContext(const DeclContext *dc) {
for (Decl *decl : dc->decls()) {
// Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
@@ -1530,10 +1593,10 @@ static bool shouldAssumeDSOLocal(const CIRGenModule &cgm,
const llvm::Triple &tt = cgm.getTriple();
const CodeGenOptions &cgOpts = cgm.getCodeGenOpts();
- if (tt.isWindowsGNUEnvironment()) {
- // In MinGW, variables without DLLImport can still be automatically
- // imported from a DLL by the linker; don't mark variables that
- // potentially could come from another DLL as DSO local.
+ if (tt.isOSCygMing()) {
+ // In MinGW and Cygwin, variables without DLLImport can still be
+ // automatically imported from a DLL by the linker; don't mark variables
+ // that potentially could come from another DLL as DSO local.
// With EmulatedTLS, TLS variables can be autoimported from other DLLs
// (and this actually happens in the public interface of libstdc++), so
@@ -1692,8 +1755,7 @@ cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
// Lookup the entry, lazily creating it if necessary.
mlir::Operation *entry = getGlobalValue(mangledName);
if (entry) {
- if (!isa<cir::FuncOp>(entry))
- errorNYI(d->getSourceRange(), "getOrCreateCIRFunction: non-FuncOp");
+ assert(mlir::isa<cir::FuncOp>(entry));
assert(!cir::MissingFeatures::weakRefReference());
@@ -1729,6 +1791,30 @@ cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
invalidLoc ? theModule->getLoc() : getLoc(funcDecl->getSourceRange()),
mangledName, mlir::cast<cir::FuncType>(funcType), funcDecl);
+ // If we already created a function with the same mangled name (but different
+ // type) before, take its name and add it to the list of functions to be
+ // replaced with F at the end of CodeGen.
+ //
+ // This happens if there is a prototype for a function (e.g. "int f()") and
+ // then a definition of a different type (e.g. "int f(int x)").
+ if (entry) {
+
+ // Fetch a generic symbol-defining operation and its uses.
+ auto symbolOp = mlir::cast<mlir::SymbolOpInterface>(entry);
+
+ // This might be an implementation of a function without a prototype, in
+ // which case, try to do special replacement of calls which match the new
+ // prototype. The really key thing here is that we also potentially drop
+ // arguments from the call site so as to make a direct call, which makes the
+ // inliner happier and suppresses a number of optimizer warnings (!) about
+ // dropping arguments.
+ if (symbolOp.getSymbolUses(symbolOp->getParentOp()))
+ replaceUsesOfNonProtoTypeWithRealFunction(entry, funcOp);
+
+ // Obliterate no-proto declaration.
+ entry->erase();
+ }
+
if (d)
setFunctionAttributes(gd, funcOp, /*isIncompleteFunction=*/false, isThunk);
@@ -1805,7 +1891,9 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name,
func = builder.create<cir::FuncOp>(loc, name, funcType);
assert(!cir::MissingFeatures::opFuncAstDeclAttr());
- assert(!cir::MissingFeatures::opFuncNoProto());
+
+ if (funcDecl && !funcDecl->hasPrototype())
+ func.setNoProto(true);
assert(func.isDeclaration() && "expected empty body");
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 16922b1..5d07d38 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -252,6 +252,11 @@ public:
getAddrOfGlobal(clang::GlobalDecl gd,
ForDefinition_t isForDefinition = NotForDefinition);
+ /// Emit type info if type of an expression is a variably modified
+ /// type. Also emit proper debug info for cast types.
+ void emitExplicitCastExprType(const ExplicitCastExpr *e,
+ CIRGenFunction *cgf = nullptr);
+
/// Emit code for a single global function or variable declaration. Forward
/// declarations are emitted lazily.
void emitGlobal(clang::GlobalDecl gd);
@@ -308,6 +313,9 @@ public:
static void setInitializer(cir::GlobalOp &op, mlir::Attribute value);
+ void replaceUsesOfNonProtoTypeWithRealFunction(mlir::Operation *old,
+ cir::FuncOp newFn);
+
cir::FuncOp
getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType,
clang::GlobalDecl gd, bool forVTable,
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index 05e8848..e4ec380 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -438,9 +438,7 @@ CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
} else if (cirGenTypes.getCGModule()
.getCodeGenOpts()
.FineGrainedBitfieldAccesses) {
- assert(!cir::MissingFeatures::nonFineGrainedBitfields());
- cirGenTypes.getCGModule().errorNYI(field->getSourceRange(),
- "NYI FineGrainedBitfield");
+ installBest = true;
} else {
// Otherwise, we're not installing. Update the bit size
// of the current span to go all the way to limitOffset, which is
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 9193f6f..21bee33 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -409,7 +409,10 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
}
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
+ // This should emit a branch through the cleanup block if one exists.
builder.create<cir::BrOp>(loc, retBlock);
+ if (ehStack.getStackDepth() != currentCleanupStackDepth)
+ cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
builder.createBlock(builder.getBlock()->getParent());
return mlir::success();
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index 03ea60c..ca3a329 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -11,6 +11,7 @@ add_clang_library(clangCIR
CIRGenBuilder.cpp
CIRGenCall.cpp
CIRGenClass.cpp
+ CIRGenCleanup.cpp
CIRGenCXX.cpp
CIRGenCXXABI.cpp
CIRGenCXXExpr.cpp
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
new file mode 100644
index 0000000..22750ac
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -0,0 +1,99 @@
+//===-- EHScopeStack.h - Stack for cleanup CIR generation -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes should be the minimum interface required for other parts of
+// CIR CodeGen to emit cleanups. The implementation is in CIRGenCleanup.cpp and
+// other implemenentation details that are not widely needed are in
+// CIRGenCleanup.h.
+//
+// TODO(cir): this header should be shared between LLVM and CIR codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang::CIRGen {
+
+class CIRGenFunction;
+
+enum CleanupKind : unsigned {
+ /// Denotes a cleanup that should run when a scope is exited using exceptional
+ /// control flow (a throw statement leading to stack unwinding, ).
+ EHCleanup = 0x1,
+
+ /// Denotes a cleanup that should run when a scope is exited using normal
+ /// control flow (falling off the end of the scope, return, goto, ...).
+ NormalCleanup = 0x2,
+
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ LifetimeMarker = 0x8,
+ NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
+
+ public:
+ Cleanup(const Cleanup &) = default;
+ Cleanup(Cleanup &&) {}
+ Cleanup() = default;
+
+ virtual ~Cleanup() = default;
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param flags cleanup kind.
+ virtual void emit(CIRGenFunction &cgf) = 0;
+ };
+
+ // Classic codegen has a finely tuned custom allocator and a complex stack
+ // management scheme. We'll probably eventually want to find a way to share
+ // that implementation. For now, we will use a very simplified implementation
+ // to get cleanups working.
+ llvm::SmallVector<std::unique_ptr<Cleanup>, 8> cleanupStack;
+
+private:
+ /// The CGF this Stack belong to
+ CIRGenFunction *cgf = nullptr;
+
+public:
+ EHScopeStack() = default;
+ ~EHScopeStack() = default;
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) {
+ cleanupStack.push_back(std::make_unique<T>(a...));
+ }
+
+ void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
+
+ size_t getStackDepth() const { return cleanupStack.size(); }
+};
+
+} // namespace clang::CIRGen
+
+#endif // CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index f0416b6..1c3a310 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -17,6 +17,7 @@
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/FunctionImplementation.h"
+#include "mlir/Support/LLVM.h"
#include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc"
#include "clang/CIR/Dialect/IR/CIROpsEnums.cpp.inc"
@@ -338,7 +339,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType,
}
if (mlir::isa<cir::ConstArrayAttr, cir::ConstVectorAttr,
- cir::ConstComplexAttr>(attrType))
+ cir::ConstComplexAttr, cir::PoisonAttr>(attrType))
return success();
assert(isa<TypedAttr>(attrType) && "What else could we be looking at here?");
@@ -489,6 +490,104 @@ LogicalResult cir::CastOp::verify() {
return emitOpError() << "requires two types differ in addrspace only";
return success();
}
+ case cir::CastKind::float_to_complex: {
+ if (!mlir::isa<cir::FPTypeInterface>(srcType))
+ return emitOpError() << "requires !cir.float type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy)
+ return emitOpError() << "requires !cir.complex type for result";
+ if (srcType != resComplexTy.getElementType())
+ return emitOpError() << "requires source type match result element type";
+ return success();
+ }
+ case cir::CastKind::int_to_complex: {
+ if (!mlir::isa<cir::IntType>(srcType))
+ return emitOpError() << "requires !cir.int type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy)
+ return emitOpError() << "requires !cir.complex type for result";
+ if (srcType != resComplexTy.getElementType())
+ return emitOpError() << "requires source type match result element type";
+ return success();
+ }
+ case cir::CastKind::float_complex_to_real: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy)
+ return emitOpError() << "requires !cir.complex type for source";
+ if (!mlir::isa<cir::FPTypeInterface>(resType))
+ return emitOpError() << "requires !cir.float type for result";
+ if (srcComplexTy.getElementType() != resType)
+ return emitOpError() << "requires source element type match result type";
+ return success();
+ }
+ case cir::CastKind::int_complex_to_real: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy)
+ return emitOpError() << "requires !cir.complex type for source";
+ if (!mlir::isa<cir::IntType>(resType))
+ return emitOpError() << "requires !cir.int type for result";
+ if (srcComplexTy.getElementType() != resType)
+ return emitOpError() << "requires source element type match result type";
+ return success();
+ }
+ case cir::CastKind::float_complex_to_bool: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ if (!mlir::isa<cir::BoolType>(resType))
+ return emitOpError() << "requires !cir.bool type for result";
+ return success();
+ }
+ case cir::CastKind::int_complex_to_bool: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isIntegerComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ if (!mlir::isa<cir::BoolType>(resType))
+ return emitOpError() << "requires !cir.bool type for result";
+ return success();
+ }
+ case cir::CastKind::float_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for result";
+ return success();
+ }
+ case cir::CastKind::float_complex_to_int_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for result";
+ return success();
+ }
+ case cir::CastKind::int_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for result";
+ return success();
+ }
+ case cir::CastKind::int_complex_to_float_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for result";
+ return success();
+ }
default:
llvm_unreachable("Unknown CastOp kind?");
}
@@ -530,6 +629,11 @@ static Value tryFoldCastChain(cir::CastOp op) {
}
OpFoldResult cir::CastOp::fold(FoldAdaptor adaptor) {
+ if (mlir::isa_and_present<cir::PoisonAttr>(adaptor.getSrc())) {
+ // Propagate poison value
+ return cir::PoisonAttr::get(getContext(), getType());
+ }
+
if (getSrc().getType() == getType()) {
switch (getKind()) {
case cir::CastKind::integral: {
@@ -1366,10 +1470,14 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) {
llvm::SMLoc loc = parser.getCurrentLocation();
mlir::Builder &builder = parser.getBuilder();
+ mlir::StringAttr noProtoNameAttr = getNoProtoAttrName(state.name);
mlir::StringAttr visNameAttr = getSymVisibilityAttrName(state.name);
mlir::StringAttr visibilityNameAttr = getGlobalVisibilityAttrName(state.name);
mlir::StringAttr dsoLocalNameAttr = getDsoLocalAttrName(state.name);
+ if (parser.parseOptionalKeyword(noProtoNameAttr).succeeded())
+ state.addAttribute(noProtoNameAttr, parser.getBuilder().getUnitAttr());
+
// Default to external linkage if no keyword is provided.
state.addAttribute(getLinkageAttrNameString(),
GlobalLinkageKindAttr::get(
@@ -1474,6 +1582,9 @@ mlir::Region *cir::FuncOp::getCallableRegion() {
}
void cir::FuncOp::print(OpAsmPrinter &p) {
+ if (getNoProto())
+ p << " no_proto";
+
if (getComdat())
p << " comdat";
@@ -1684,6 +1795,12 @@ static bool isBoolNot(cir::UnaryOp op) {
//
// and the argument of the first one (%0) will be used instead.
OpFoldResult cir::UnaryOp::fold(FoldAdaptor adaptor) {
+ if (auto poison =
+ mlir::dyn_cast_if_present<cir::PoisonAttr>(adaptor.getInput())) {
+ // Propagate poison values
+ return poison;
+ }
+
if (isBoolNot(*this))
if (auto previous = dyn_cast_or_null<UnaryOp>(getInput().getDefiningOp()))
if (isBoolNot(previous))
@@ -2133,6 +2250,143 @@ LogicalResult cir::ComplexImagPtrOp::verify() {
}
//===----------------------------------------------------------------------===//
+// Bit manipulation operations
+//===----------------------------------------------------------------------===//
+
+static OpFoldResult
+foldUnaryBitOp(mlir::Attribute inputAttr,
+ llvm::function_ref<llvm::APInt(const llvm::APInt &)> func,
+ bool poisonZero = false) {
+ if (mlir::isa_and_present<cir::PoisonAttr>(inputAttr)) {
+ // Propagate poison value
+ return inputAttr;
+ }
+
+ auto input = mlir::dyn_cast_if_present<IntAttr>(inputAttr);
+ if (!input)
+ return nullptr;
+
+ llvm::APInt inputValue = input.getValue();
+ if (poisonZero && inputValue.isZero())
+ return cir::PoisonAttr::get(input.getType());
+
+ llvm::APInt resultValue = func(inputValue);
+ return IntAttr::get(input.getType(), resultValue);
+}
+
+OpFoldResult BitClrsbOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ unsigned resultValue =
+ inputValue.getBitWidth() - inputValue.getSignificantBits();
+ return llvm::APInt(inputValue.getBitWidth(), resultValue);
+ });
+}
+
+OpFoldResult BitClzOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(
+ adaptor.getInput(),
+ [](const llvm::APInt &inputValue) {
+ unsigned resultValue = inputValue.countLeadingZeros();
+ return llvm::APInt(inputValue.getBitWidth(), resultValue);
+ },
+ getPoisonZero());
+}
+
+OpFoldResult BitCtzOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(
+ adaptor.getInput(),
+ [](const llvm::APInt &inputValue) {
+ return llvm::APInt(inputValue.getBitWidth(),
+ inputValue.countTrailingZeros());
+ },
+ getPoisonZero());
+}
+
+OpFoldResult BitFfsOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ unsigned trailingZeros = inputValue.countTrailingZeros();
+ unsigned result =
+ trailingZeros == inputValue.getBitWidth() ? 0 : trailingZeros + 1;
+ return llvm::APInt(inputValue.getBitWidth(), result);
+ });
+}
+
+OpFoldResult BitParityOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ return llvm::APInt(inputValue.getBitWidth(), inputValue.popcount() % 2);
+ });
+}
+
+OpFoldResult BitPopcountOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ return llvm::APInt(inputValue.getBitWidth(), inputValue.popcount());
+ });
+}
+
+OpFoldResult BitReverseOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ return inputValue.reverseBits();
+ });
+}
+
+OpFoldResult ByteSwapOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ return inputValue.byteSwap();
+ });
+}
+
+OpFoldResult RotateOp::fold(FoldAdaptor adaptor) {
+ if (mlir::isa_and_present<cir::PoisonAttr>(adaptor.getInput()) ||
+ mlir::isa_and_present<cir::PoisonAttr>(adaptor.getAmount())) {
+ // Propagate poison values
+ return cir::PoisonAttr::get(getType());
+ }
+
+ auto input = mlir::dyn_cast_if_present<IntAttr>(adaptor.getInput());
+ auto amount = mlir::dyn_cast_if_present<IntAttr>(adaptor.getAmount());
+ if (!input && !amount)
+ return nullptr;
+
+ // We could fold cir.rotate even if one of its two operands is not a constant:
+ // - `cir.rotate left/right %0, 0` could be folded into just %0 even if %0
+ // is not a constant.
+ // - `cir.rotate left/right 0/0b111...111, %0` could be folded into 0 or
+ // 0b111...111 even if %0 is not a constant.
+
+ llvm::APInt inputValue;
+ if (input) {
+ inputValue = input.getValue();
+ if (inputValue.isZero() || inputValue.isAllOnes()) {
+ // An input value of all 0s or all 1s will not change after rotation
+ return input;
+ }
+ }
+
+ uint64_t amountValue;
+ if (amount) {
+ amountValue = amount.getValue().urem(getInput().getType().getWidth());
+ if (amountValue == 0) {
+ // A shift amount of 0 will not change the input value
+ return getInput();
+ }
+ }
+
+ if (!input || !amount)
+ return nullptr;
+
+ assert(inputValue.getBitWidth() == getInput().getType().getWidth() &&
+ "input value must have the same bit width as the input type");
+
+ llvm::APInt resultValue;
+ if (isRotateLeft())
+ resultValue = inputValue.rotl(amountValue);
+ else
+ resultValue = inputValue.rotr(amountValue);
+
+ return IntAttr::get(input.getContext(), input.getType(), resultValue);
+}
+
+//===----------------------------------------------------------------------===//
// TableGen'd op method definitions
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
index e505db5..2eaa60c 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
@@ -143,7 +143,8 @@ void CIRCanonicalizePass::runOnOperation() {
if (isa<BrOp, BrCondOp, CastOp, ScopeOp, SwitchOp, SelectOp, UnaryOp,
ComplexCreateOp, ComplexImagOp, ComplexRealOp, VecCmpOp,
VecCreateOp, VecExtractOp, VecShuffleOp, VecShuffleDynamicOp,
- VecTernaryOp>(op))
+ VecTernaryOp, BitClrsbOp, BitClzOp, BitCtzOp, BitFfsOp, BitParityOp,
+ BitPopcountOp, BitReverseOp, ByteSwapOp, RotateOp>(op))
ops.push_back(op);
});
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index 8f848c7..ce3b30d 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -8,11 +8,14 @@
#include "PassDetail.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h"
#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "clang/CIR/Dialect/IR/CIROpsEnums.h"
#include "clang/CIR/Dialect/Passes.h"
+#include "clang/CIR/MissingFeatures.h"
+#include <iostream>
#include <memory>
using namespace mlir;
@@ -24,11 +27,107 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
void runOnOperation() override;
void runOnOp(mlir::Operation *op);
+ void lowerCastOp(cir::CastOp op);
void lowerUnaryOp(cir::UnaryOp op);
+ void lowerArrayDtor(cir::ArrayDtor op);
+ void lowerArrayCtor(cir::ArrayCtor op);
+
+ ///
+ /// AST related
+ /// -----------
+
+ clang::ASTContext *astCtx;
+
+ void setASTContext(clang::ASTContext *c) { astCtx = c; }
};
} // namespace
+static mlir::Value lowerScalarToComplexCast(mlir::MLIRContext &ctx,
+ cir::CastOp op) {
+ cir::CIRBaseBuilderTy builder(ctx);
+ builder.setInsertionPoint(op);
+
+ mlir::Value src = op.getSrc();
+ mlir::Value imag = builder.getNullValue(src.getType(), op.getLoc());
+ return builder.createComplexCreate(op.getLoc(), src, imag);
+}
+
+static mlir::Value lowerComplexToScalarCast(mlir::MLIRContext &ctx,
+ cir::CastOp op,
+ cir::CastKind elemToBoolKind) {
+ cir::CIRBaseBuilderTy builder(ctx);
+ builder.setInsertionPoint(op);
+
+ mlir::Value src = op.getSrc();
+ if (!mlir::isa<cir::BoolType>(op.getType()))
+ return builder.createComplexReal(op.getLoc(), src);
+
+ // Complex cast to bool: (bool)(a+bi) => (bool)a || (bool)b
+ mlir::Value srcReal = builder.createComplexReal(op.getLoc(), src);
+ mlir::Value srcImag = builder.createComplexImag(op.getLoc(), src);
+
+ cir::BoolType boolTy = builder.getBoolTy();
+ mlir::Value srcRealToBool =
+ builder.createCast(op.getLoc(), elemToBoolKind, srcReal, boolTy);
+ mlir::Value srcImagToBool =
+ builder.createCast(op.getLoc(), elemToBoolKind, srcImag, boolTy);
+ return builder.createLogicalOr(op.getLoc(), srcRealToBool, srcImagToBool);
+}
+
+static mlir::Value lowerComplexToComplexCast(mlir::MLIRContext &ctx,
+ cir::CastOp op,
+ cir::CastKind scalarCastKind) {
+ CIRBaseBuilderTy builder(ctx);
+ builder.setInsertionPoint(op);
+
+ mlir::Value src = op.getSrc();
+ auto dstComplexElemTy =
+ mlir::cast<cir::ComplexType>(op.getType()).getElementType();
+
+ mlir::Value srcReal = builder.createComplexReal(op.getLoc(), src);
+ mlir::Value srcImag = builder.createComplexImag(op.getLoc(), src);
+
+ mlir::Value dstReal = builder.createCast(op.getLoc(), scalarCastKind, srcReal,
+ dstComplexElemTy);
+ mlir::Value dstImag = builder.createCast(op.getLoc(), scalarCastKind, srcImag,
+ dstComplexElemTy);
+ return builder.createComplexCreate(op.getLoc(), dstReal, dstImag);
+}
+
+void LoweringPreparePass::lowerCastOp(cir::CastOp op) {
+ mlir::MLIRContext &ctx = getContext();
+ mlir::Value loweredValue = [&]() -> mlir::Value {
+ switch (op.getKind()) {
+ case cir::CastKind::float_to_complex:
+ case cir::CastKind::int_to_complex:
+ return lowerScalarToComplexCast(ctx, op);
+ case cir::CastKind::float_complex_to_real:
+ case cir::CastKind::int_complex_to_real:
+ return lowerComplexToScalarCast(ctx, op, op.getKind());
+ case cir::CastKind::float_complex_to_bool:
+ return lowerComplexToScalarCast(ctx, op, cir::CastKind::float_to_bool);
+ case cir::CastKind::int_complex_to_bool:
+ return lowerComplexToScalarCast(ctx, op, cir::CastKind::int_to_bool);
+ case cir::CastKind::float_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::floating);
+ case cir::CastKind::float_complex_to_int_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::float_to_int);
+ case cir::CastKind::int_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::integral);
+ case cir::CastKind::int_complex_to_float_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::int_to_float);
+ default:
+ return nullptr;
+ }
+ }();
+
+ if (loweredValue) {
+ op.replaceAllUsesWith(loweredValue);
+ op.erase();
+ }
+}
+
void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
mlir::Type ty = op.getType();
if (!mlir::isa<cir::ComplexType>(ty))
@@ -71,8 +170,105 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
op.erase();
}
+static void lowerArrayDtorCtorIntoLoop(cir::CIRBaseBuilderTy &builder,
+ clang::ASTContext *astCtx,
+ mlir::Operation *op, mlir::Type eltTy,
+ mlir::Value arrayAddr, uint64_t arrayLen,
+ bool isCtor) {
+ // Generate loop to call into ctor/dtor for every element.
+ mlir::Location loc = op->getLoc();
+
+ // TODO: instead of getting the size from the AST context, create alias for
+ // PtrDiffTy and unify with CIRGen stuff.
+ const unsigned sizeTypeSize =
+ astCtx->getTypeSize(astCtx->getSignedSizeType());
+ uint64_t endOffset = isCtor ? arrayLen : arrayLen - 1;
+ mlir::Value endOffsetVal =
+ builder.getUnsignedInt(loc, endOffset, sizeTypeSize);
+
+ auto begin = cir::CastOp::create(builder, loc, eltTy,
+ cir::CastKind::array_to_ptrdecay, arrayAddr);
+ mlir::Value end =
+ cir::PtrStrideOp::create(builder, loc, eltTy, begin, endOffsetVal);
+ mlir::Value start = isCtor ? begin : end;
+ mlir::Value stop = isCtor ? end : begin;
+
+ mlir::Value tmpAddr = builder.createAlloca(
+ loc, /*addr type*/ builder.getPointerTo(eltTy),
+ /*var type*/ eltTy, "__array_idx", builder.getAlignmentAttr(1));
+ builder.createStore(loc, start, tmpAddr);
+
+ cir::DoWhileOp loop = builder.createDoWhile(
+ loc,
+ /*condBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto currentElement = b.create<cir::LoadOp>(loc, eltTy, tmpAddr);
+ mlir::Type boolTy = cir::BoolType::get(b.getContext());
+ auto cmp = builder.create<cir::CmpOp>(loc, boolTy, cir::CmpOpKind::ne,
+ currentElement, stop);
+ builder.createCondition(cmp);
+ },
+ /*bodyBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto currentElement = b.create<cir::LoadOp>(loc, eltTy, tmpAddr);
+
+ cir::CallOp ctorCall;
+ op->walk([&](cir::CallOp c) { ctorCall = c; });
+ assert(ctorCall && "expected ctor call");
+
+ // Array elements get constructed in order but destructed in reverse.
+ mlir::Value stride;
+ if (isCtor)
+ stride = builder.getUnsignedInt(loc, 1, sizeTypeSize);
+ else
+ stride = builder.getSignedInt(loc, -1, sizeTypeSize);
+
+ ctorCall->moveBefore(stride.getDefiningOp());
+ ctorCall->setOperand(0, currentElement);
+ auto nextElement = cir::PtrStrideOp::create(builder, loc, eltTy,
+ currentElement, stride);
+
+ // Store the element pointer to the temporary variable
+ builder.createStore(loc, nextElement, tmpAddr);
+ builder.createYield(loc);
+ });
+
+ op->replaceAllUsesWith(loop);
+ op->erase();
+}
+
+void LoweringPreparePass::lowerArrayDtor(cir::ArrayDtor op) {
+ CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op.getOperation());
+
+ mlir::Type eltTy = op->getRegion(0).getArgument(0).getType();
+ assert(!cir::MissingFeatures::vlas());
+ auto arrayLen =
+ mlir::cast<cir::ArrayType>(op.getAddr().getType().getPointee()).getSize();
+ lowerArrayDtorCtorIntoLoop(builder, astCtx, op, eltTy, op.getAddr(), arrayLen,
+ false);
+}
+
+void LoweringPreparePass::lowerArrayCtor(cir::ArrayCtor op) {
+ cir::CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op.getOperation());
+
+ mlir::Type eltTy = op->getRegion(0).getArgument(0).getType();
+ assert(!cir::MissingFeatures::vlas());
+ auto arrayLen =
+ mlir::cast<cir::ArrayType>(op.getAddr().getType().getPointee()).getSize();
+ lowerArrayDtorCtorIntoLoop(builder, astCtx, op, eltTy, op.getAddr(), arrayLen,
+ true);
+}
+
void LoweringPreparePass::runOnOp(mlir::Operation *op) {
- if (auto unary = dyn_cast<cir::UnaryOp>(op))
+ if (auto arrayCtor = dyn_cast<ArrayCtor>(op))
+ lowerArrayCtor(arrayCtor);
+ else if (auto arrayDtor = dyn_cast<cir::ArrayDtor>(op))
+ lowerArrayDtor(arrayDtor);
+ else if (auto cast = mlir::dyn_cast<cir::CastOp>(op))
+ lowerCastOp(cast);
+ else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op))
lowerUnaryOp(unary);
}
@@ -82,7 +278,8 @@ void LoweringPreparePass::runOnOperation() {
llvm::SmallVector<mlir::Operation *> opsToTransform;
op->walk([&](mlir::Operation *op) {
- if (mlir::isa<cir::UnaryOp>(op))
+ if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp, cir::UnaryOp>(
+ op))
opsToTransform.push_back(op);
});
@@ -93,3 +290,10 @@ void LoweringPreparePass::runOnOperation() {
std::unique_ptr<Pass> mlir::createLoweringPreparePass() {
return std::make_unique<LoweringPreparePass>();
}
+
+std::unique_ptr<Pass>
+mlir::createLoweringPreparePass(clang::ASTContext *astCtx) {
+ auto pass = std::make_unique<LoweringPreparePass>();
+ pass->setASTContext(astCtx);
+ return std::move(pass);
+}
diff --git a/clang/lib/CIR/Lowering/CIRPasses.cpp b/clang/lib/CIR/Lowering/CIRPasses.cpp
index 5607abc..bb9781b 100644
--- a/clang/lib/CIR/Lowering/CIRPasses.cpp
+++ b/clang/lib/CIR/Lowering/CIRPasses.cpp
@@ -31,7 +31,7 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule,
if (enableCIRSimplify)
pm.addPass(mlir::createCIRSimplifyPass());
- pm.addPass(mlir::createLoweringPreparePass());
+ pm.addPass(mlir::createLoweringPreparePass(&astContext));
pm.enableVerifier(enableVerifier);
(void)mlir::applyPassManagerCLOptions(pm);
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index f075be8..957a51a 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -460,6 +460,17 @@ mlir::LogicalResult CIRToLLVMAssumeOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMAssumeSepStorageOpLowering::matchAndRewrite(
+ cir::AssumeSepStorageOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto cond = rewriter.create<mlir::LLVM::ConstantOp>(op.getLoc(),
+ rewriter.getI1Type(), 1);
+ rewriter.replaceOpWithNewOp<mlir::LLVM::AssumeOp>(
+ op, cond, mlir::LLVM::AssumeSeparateStorageTag{}, adaptor.getPtr1(),
+ adaptor.getPtr2());
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite(
cir::BitClrsbOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -510,6 +521,32 @@ mlir::LogicalResult CIRToLLVMBitCtzOpLowering::matchAndRewrite(
return mlir::LogicalResult::success();
}
+mlir::LogicalResult CIRToLLVMBitFfsOpLowering::matchAndRewrite(
+ cir::BitFfsOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto resTy = getTypeConverter()->convertType(op.getType());
+ auto ctz = rewriter.create<mlir::LLVM::CountTrailingZerosOp>(
+ op.getLoc(), resTy, adaptor.getInput(), /*is_zero_poison=*/true);
+
+ auto one = rewriter.create<mlir::LLVM::ConstantOp>(op.getLoc(), resTy, 1);
+ auto ctzAddOne = rewriter.create<mlir::LLVM::AddOp>(op.getLoc(), ctz, one);
+
+ auto zeroInputTy = rewriter.create<mlir::LLVM::ConstantOp>(
+ op.getLoc(), adaptor.getInput().getType(), 0);
+ auto isZero = rewriter.create<mlir::LLVM::ICmpOp>(
+ op.getLoc(),
+ mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(),
+ mlir::LLVM::ICmpPredicate::eq),
+ adaptor.getInput(), zeroInputTy);
+
+ auto zero = rewriter.create<mlir::LLVM::ConstantOp>(op.getLoc(), resTy, 0);
+ auto res = rewriter.create<mlir::LLVM::SelectOp>(op.getLoc(), isZero, zero,
+ ctzAddOne);
+ rewriter.replaceOp(op, res);
+
+ return mlir::LogicalResult::success();
+}
+
mlir::LogicalResult CIRToLLVMBitParityOpLowering::matchAndRewrite(
cir::BitParityOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -908,13 +945,45 @@ rewriteCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands,
memoryEffects, noUnwind, willReturn);
mlir::LLVM::LLVMFunctionType llvmFnTy;
+
+ // Temporary to handle the case where we need to prepend an operand if the
+ // callee is an alias.
+ SmallVector<mlir::Value> adjustedCallOperands;
+
if (calleeAttr) { // direct call
- mlir::FunctionOpInterface fn =
- mlir::SymbolTable::lookupNearestSymbolFrom<mlir::FunctionOpInterface>(
- op, calleeAttr);
- assert(fn && "Did not find function for call");
- llvmFnTy = cast<mlir::LLVM::LLVMFunctionType>(
- converter->convertType(fn.getFunctionType()));
+ mlir::Operation *callee =
+ mlir::SymbolTable::lookupNearestSymbolFrom(op, calleeAttr);
+ if (auto fn = mlir::dyn_cast<mlir::FunctionOpInterface>(callee)) {
+ llvmFnTy = converter->convertType<mlir::LLVM::LLVMFunctionType>(
+ fn.getFunctionType());
+ assert(llvmFnTy && "Failed to convert function type");
+ } else if (auto alias = mlir::cast<mlir::LLVM::AliasOp>(callee)) {
+ // If the callee was an alias. In that case,
+ // we need to prepend the address of the alias to the operands. The
+ // way aliases work in the LLVM dialect is a little counter-intuitive.
+ // The AliasOp itself is a pseudo-function that returns the address of
+ // the global value being aliased, but when we generate the call we
+ // need to insert an operation that gets the address of the AliasOp.
+ // This all gets sorted out when the LLVM dialect is lowered to LLVM IR.
+ auto symAttr = mlir::cast<mlir::FlatSymbolRefAttr>(calleeAttr);
+ auto addrOfAlias =
+ mlir::LLVM::AddressOfOp::create(
+ rewriter, op->getLoc(),
+ mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), symAttr)
+ .getResult();
+ adjustedCallOperands.push_back(addrOfAlias);
+
+ // Now add the regular operands and assign this to the range value.
+ llvm::append_range(adjustedCallOperands, callOperands);
+ callOperands = adjustedCallOperands;
+
+ // Clear the callee attribute because we're calling an alias.
+ calleeAttr = {};
+ llvmFnTy = mlir::cast<mlir::LLVM::LLVMFunctionType>(alias.getType());
+ } else {
+ // Was this an ifunc?
+ return op->emitError("Unexpected callee type!");
+ }
} else { // indirect call
assert(!op->getOperands().empty() &&
"operands list must no be empty for the indirect call");
@@ -1016,6 +1085,12 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite(
mlir::ConversionPatternRewriter &rewriter) const {
mlir::Attribute attr = op.getValue();
+ if (mlir::isa<cir::PoisonAttr>(attr)) {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::PoisonOp>(
+ op, getTypeConverter()->convertType(op.getType()));
+ return mlir::success();
+ }
+
if (mlir::isa<mlir::IntegerType>(op.getType())) {
// Verified cir.const operations cannot actually be of these types, but the
// lowering pass may generate temporary cir.const operations with these
@@ -1155,6 +1230,30 @@ void CIRToLLVMFuncOpLowering::lowerFuncAttributes(
}
}
+mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewriteAlias(
+ cir::FuncOp op, llvm::StringRef aliasee, mlir::Type ty, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ SmallVector<mlir::NamedAttribute, 4> attributes;
+ lowerFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes);
+
+ mlir::Location loc = op.getLoc();
+ auto aliasOp = rewriter.replaceOpWithNewOp<mlir::LLVM::AliasOp>(
+ op, ty, convertLinkage(op.getLinkage()), op.getName(), op.getDsoLocal(),
+ /*threadLocal=*/false, attributes);
+
+ // Create the alias body
+ mlir::OpBuilder builder(op.getContext());
+ mlir::Block *block = builder.createBlock(&aliasOp.getInitializerRegion());
+ builder.setInsertionPointToStart(block);
+ // The type of AddressOfOp is always a pointer.
+ assert(!cir::MissingFeatures::addressSpace());
+ mlir::Type ptrTy = mlir::LLVM::LLVMPointerType::get(ty.getContext());
+ auto addrOp = mlir::LLVM::AddressOfOp::create(builder, loc, ptrTy, aliasee);
+ mlir::LLVM::ReturnOp::create(builder, loc, addrOp);
+
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite(
cir::FuncOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -1179,6 +1278,11 @@ mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite(
resultType ? resultType : mlir::LLVM::LLVMVoidType::get(getContext()),
signatureConversion.getConvertedTypes(),
/*isVarArg=*/fnType.isVarArg());
+
+ // If this is an alias, it needs to be lowered to llvm::AliasOp.
+ if (std::optional<llvm::StringRef> aliasee = op.getAliasee())
+ return matchAndRewriteAlias(op, *aliasee, llvmFnTy, adaptor, rewriter);
+
// LLVMFuncOp expects a single FileLine Location instead of a fused
// location.
mlir::Location loc = op.getLoc();
@@ -2066,11 +2170,13 @@ void ConvertCIRToLLVMPass::runOnOperation() {
patterns.add<
// clang-format off
CIRToLLVMAssumeOpLowering,
+ CIRToLLVMAssumeSepStorageOpLowering,
CIRToLLVMBaseClassAddrOpLowering,
CIRToLLVMBinOpLowering,
CIRToLLVMBitClrsbOpLowering,
CIRToLLVMBitClzOpLowering,
CIRToLLVMBitCtzOpLowering,
+ CIRToLLVMBitFfsOpLowering,
CIRToLLVMBitParityOpLowering,
CIRToLLVMBitPopcountOpLowering,
CIRToLLVMBitReverseOpLowering,
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index 3faf1e9..f339d43 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -44,6 +44,16 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMAssumeSepStorageOpLowering
+ : public mlir::OpConversionPattern<cir::AssumeSepStorageOp> {
+public:
+ using mlir::OpConversionPattern<cir::AssumeSepStorageOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::AssumeSepStorageOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMBitClrsbOpLowering
: public mlir::OpConversionPattern<cir::BitClrsbOp> {
public:
@@ -74,6 +84,16 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMBitFfsOpLowering
+ : public mlir::OpConversionPattern<cir::BitFfsOp> {
+public:
+ using mlir::OpConversionPattern<cir::BitFfsOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::BitFfsOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMBitParityOpLowering
: public mlir::OpConversionPattern<cir::BitParityOp> {
public:
@@ -247,6 +267,11 @@ class CIRToLLVMFuncOpLowering : public mlir::OpConversionPattern<cir::FuncOp> {
cir::FuncOp func, bool filterArgAndResAttrs,
mlir::SmallVectorImpl<mlir::NamedAttribute> &result) const;
+ mlir::LogicalResult
+ matchAndRewriteAlias(cir::FuncOp op, llvm::StringRef aliasee, mlir::Type ty,
+ OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
public:
using mlir::OpConversionPattern<cir::FuncOp>::OpConversionPattern;