aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR')
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.h5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp62
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp56
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp126
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp102
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp4
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp16
-rw-r--r--clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp2
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp68
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp100
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h15
16 files changed, 542 insertions, 48 deletions
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index ef136f8..9049a01 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -190,6 +190,11 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
assert(!cir::MissingFeatures::builtinCheckKind());
return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll:
+ return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
+
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll:
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index 938d143..fc208ff 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -582,6 +582,14 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &funcInfo,
cir::FuncOp directFuncOp;
if (auto fnOp = dyn_cast<cir::FuncOp>(calleePtr)) {
directFuncOp = fnOp;
+ } else if (auto getGlobalOp = mlir::dyn_cast<cir::GetGlobalOp>(calleePtr)) {
+ // FIXME(cir): This peephole optimization avoids indirect calls for
+ // builtins. This should be fixed in the builtin declaration instead by
+ // not emitting an unecessary get_global in the first place.
+ // However, this is also used for no-prototype functions.
+ mlir::Operation *globalOp = cgm.getGlobalValue(getGlobalOp.getName());
+ assert(globalOp && "undefined global function");
+ directFuncOp = mlir::cast<cir::FuncOp>(globalOp);
} else {
[[maybe_unused]] mlir::ValueTypeRange<mlir::ResultRange> resultTypes =
calleePtr->getResultTypes();
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h
index bd11329..a78956b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -116,6 +116,11 @@ public:
assert(isOrdinary());
return reinterpret_cast<mlir::Operation *>(kindOrFunctionPtr);
}
+
+ void setFunctionPointer(mlir::Operation *functionPtr) {
+ assert(isOrdinary());
+ kindOrFunctionPtr = SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
+ }
};
/// Type for representing both the decl and type of parameters to a function.
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index a28ac3c..6527fb5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -649,6 +649,38 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs,
assert(!cir::MissingFeatures::sanitizers());
}
+/// Destroys all the elements of the given array, beginning from last to first.
+/// The array cannot be zero-length.
+///
+/// \param begin - a type* denoting the first element of the array
+/// \param end - a type* denoting one past the end of the array
+/// \param elementType - the element type of the array
+/// \param destroyer - the function to call to destroy elements
+void CIRGenFunction::emitArrayDestroy(mlir::Value begin, mlir::Value end,
+ QualType elementType,
+ CharUnits elementAlign,
+ Destroyer *destroyer) {
+ assert(!elementType->isArrayType());
+
+ // Differently from LLVM traditional codegen, use a higher level
+ // representation instead of lowering directly to a loop.
+ mlir::Type cirElementType = convertTypeForMem(elementType);
+ cir::PointerType ptrToElmType = builder.getPointerTo(cirElementType);
+
+ // Emit the dtor call that will execute for every array element.
+ cir::ArrayDtor::create(
+ builder, *currSrcLoc, begin, [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc);
+ Address curAddr = Address(arg, cirElementType, elementAlign);
+ assert(!cir::MissingFeatures::dtorCleanups());
+
+ // Perform the actual destruction there.
+ destroyer(*this, curAddr, elementType);
+
+ cir::YieldOp::create(builder, loc);
+ });
+}
+
/// Immediately perform the destruction of the given object.
///
/// \param addr - the address of the object; a type*
@@ -658,10 +690,34 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs,
/// elements
void CIRGenFunction::emitDestroy(Address addr, QualType type,
Destroyer *destroyer) {
- if (getContext().getAsArrayType(type))
- cgm.errorNYI("emitDestroy: array type");
+ const ArrayType *arrayType = getContext().getAsArrayType(type);
+ if (!arrayType)
+ return destroyer(*this, addr, type);
+
+ mlir::Value length = emitArrayLength(arrayType, type, addr);
+
+ CharUnits elementAlign = addr.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(type));
+
+ auto constantCount = length.getDefiningOp<cir::ConstantOp>();
+ if (!constantCount) {
+ assert(!cir::MissingFeatures::vlas());
+ cgm.errorNYI("emitDestroy: variable length array");
+ return;
+ }
+
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constantCount.getValue());
+ // If it's constant zero, we can just skip the entire thing.
+ if (constIntAttr && constIntAttr.getUInt() == 0)
+ return;
+
+ mlir::Value begin = addr.getPointer();
+ mlir::Value end; // This will be used for future non-constant counts.
+ emitArrayDestroy(begin, end, type, elementAlign, destroyer);
- return destroyer(*this, addr, type);
+ // If the array destroy didn't use the length op, we can erase it.
+ if (constantCount.use_empty())
+ constantCount.erase();
}
CIRGenFunction::Destroyer *
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 7ff5f26..c18498f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -949,7 +949,6 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
case CK_Dynamic:
case CK_ToUnion:
case CK_BaseToDerived:
- case CK_LValueBitCast:
case CK_AddressSpaceConversion:
case CK_ObjCObjectLValueCast:
case CK_VectorSplat:
@@ -965,6 +964,18 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
return {};
}
+ case CK_LValueBitCast: {
+ // This must be a reinterpret_cast (or c-style equivalent).
+ const auto *ce = cast<ExplicitCastExpr>(e);
+
+ cgm.emitExplicitCastExprType(ce, this);
+ LValue LV = emitLValue(e->getSubExpr());
+ Address V = LV.getAddress().withElementType(
+ builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
+
+ return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
+ }
+
case CK_NoOp: {
// CK_NoOp can model a qualification conversion, which can remove an array
// bound and change the IR type.
@@ -1269,7 +1280,7 @@ RValue CIRGenFunction::getUndefRValue(QualType ty) {
}
RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
- const CIRGenCallee &callee,
+ const CIRGenCallee &origCallee,
const clang::CallExpr *e,
ReturnValueSlot returnValue) {
// Get the actual function type. The callee type will always be a pointer to
@@ -1280,6 +1291,8 @@ RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
calleeTy = getContext().getCanonicalType(calleeTy);
auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
+ CIRGenCallee callee = origCallee;
+
if (getLangOpts().CPlusPlus)
assert(!cir::MissingFeatures::sanitizers());
@@ -1296,7 +1309,44 @@ RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
const CIRGenFunctionInfo &funcInfo =
cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
- assert(!cir::MissingFeatures::opCallNoPrototypeFunc());
+ // C99 6.5.2.2p6:
+ // If the expression that denotes the called function has a type that does
+ // not include a prototype, [the default argument promotions are performed].
+ // If the number of arguments does not equal the number of parameters, the
+ // behavior is undefined. If the function is defined with a type that
+ // includes a prototype, and either the prototype ends with an ellipsis (,
+ // ...) or the types of the arguments after promotion are not compatible
+ // with the types of the parameters, the behavior is undefined. If the
+ // function is defined with a type that does not include a prototype, and
+ // the types of the arguments after promotion are not compatible with those
+ // of the parameters after promotion, the behavior is undefined [except in
+ // some trivial cases].
+ // That is, in the general case, we should assume that a call through an
+ // unprototyped function type works like a *non-variadic* call. The way we
+ // make this work is to cast to the exxact type fo the promoted arguments.
+ if (isa<FunctionNoProtoType>(fnType)) {
+ assert(!cir::MissingFeatures::opCallChain());
+ assert(!cir::MissingFeatures::addressSpace());
+ cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
+ // get non-variadic function type
+ calleeTy = cir::FuncType::get(calleeTy.getInputs(),
+ calleeTy.getReturnType(), false);
+ auto calleePtrTy = cir::PointerType::get(calleeTy);
+
+ mlir::Operation *fn = callee.getFunctionPointer();
+ mlir::Value addr;
+ if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
+ addr = builder.create<cir::GetGlobalOp>(
+ getLoc(e->getSourceRange()),
+ cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
+ } else {
+ addr = fn->getResult(0);
+ }
+
+ fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
+ callee.setFunctionPointer(fn);
+ }
+
assert(!cir::MissingFeatures::opCallFnInfoOpts());
assert(!cir::MissingFeatures::hip());
assert(!cir::MissingFeatures::opCallMustTail());
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index 02685a3..a09d739 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -189,8 +189,11 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
}
case CK_LValueBitCast: {
- cgf.cgm.errorNYI("ComplexExprEmitter::emitCast CK_LValueBitCast");
- return {};
+ LValue origLV = cgf.emitLValue(op);
+ Address addr =
+ origLV.getAddress().withElementType(builder, cgf.convertType(destTy));
+ LValue destLV = cgf.makeAddrLValue(addr, destTy);
+ return emitLoadOfLValue(destLV, op->getExprLoc());
}
case CK_LValueToRValueBitCast: {
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index b4b95d6..c65d025 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -923,4 +923,130 @@ CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType,
return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
}
+// TODO(cir): Most of this function can be shared between CIRGen
+// and traditional LLVM codegen
+void CIRGenFunction::emitVariablyModifiedType(QualType type) {
+ assert(type->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ // We're going to walk down into the type and look for VLA
+ // expressions.
+ do {
+ assert(type->isVariablyModifiedType());
+
+ const Type *ty = type.getTypePtr();
+ switch (ty->getTypeClass()) {
+ case Type::CountAttributed:
+ case Type::PackIndexing:
+ case Type::ArrayParameter:
+ case Type::HLSLAttributedResource:
+ case Type::HLSLInlineSpirv:
+ case Type::PredefinedSugar:
+ cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
+
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.inc"
+ llvm_unreachable(
+ "dependent type must be resolved before the CIR codegen");
+
+ // These types are never variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Using:
+ case Type::TemplateSpecialization:
+ case Type::ObjCTypeParam:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::BitInt:
+ llvm_unreachable("type class is never variably-modified!");
+
+ case Type::Elaborated:
+ type = cast<clang::ElaboratedType>(ty)->getNamedType();
+ break;
+
+ case Type::Adjusted:
+ type = cast<clang::AdjustedType>(ty)->getAdjustedType();
+ break;
+
+ case Type::Decayed:
+ type = cast<clang::DecayedType>(ty)->getPointeeType();
+ break;
+
+ case Type::Pointer:
+ type = cast<clang::PointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::BlockPointer:
+ type = cast<clang::BlockPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ type = cast<clang::ReferenceType>(ty)->getPointeeType();
+ break;
+
+ case Type::MemberPointer:
+ type = cast<clang::MemberPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Losing element qualification here is fine.
+ type = cast<clang::ArrayType>(ty)->getElementType();
+ break;
+
+ case Type::VariableArray: {
+ cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA");
+ break;
+ }
+
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ type = cast<clang::FunctionType>(ty)->getReturnType();
+ break;
+
+ case Type::Paren:
+ case Type::TypeOf:
+ case Type::UnaryTransform:
+ case Type::Attributed:
+ case Type::BTFTagAttributed:
+ case Type::SubstTemplateTypeParm:
+ case Type::MacroQualified:
+ // Keep walking after single level desugaring.
+ type = type.getSingleStepDesugaredType(getContext());
+ break;
+
+ case Type::Typedef:
+ case Type::Decltype:
+ case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
+ // Stop walking: nothing to do.
+ return;
+
+ case Type::TypeOfExpr:
+ // Stop walking: emit typeof expression.
+ emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
+ return;
+
+ case Type::Atomic:
+ type = cast<clang::AtomicType>(ty)->getValueType();
+ break;
+
+ case Type::Pipe:
+ type = cast<clang::PipeType>(ty)->getElementType();
+ break;
+ }
+ } while (type->isVariablyModifiedType());
+}
+
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 4891c74..603f750 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -848,6 +848,10 @@ public:
/// even if no aggregate location is provided.
RValue emitAnyExprToTemp(const clang::Expr *e);
+ void emitArrayDestroy(mlir::Value begin, mlir::Value end,
+ QualType elementType, CharUnits elementAlign,
+ Destroyer *destroyer);
+
mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
QualType &baseType, Address &addr);
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
@@ -1201,6 +1205,8 @@ public:
/// inside a function, including static vars etc.
void emitVarDecl(const clang::VarDecl &d);
+ void emitVariablyModifiedType(QualType ty);
+
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
/// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 3502705..750fe97 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -1103,6 +1103,60 @@ cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator(
return cir::GlobalLinkageKind::ExternalLinkage;
}
+/// This function is called when we implement a function with no prototype, e.g.
+/// "int foo() {}". If there are existing call uses of the old function in the
+/// module, this adjusts them to call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+void CIRGenModule::replaceUsesOfNonProtoTypeWithRealFunction(
+ mlir::Operation *old, cir::FuncOp newFn) {
+ // If we're redefining a global as a function, don't transform it.
+ auto oldFn = mlir::dyn_cast<cir::FuncOp>(old);
+ if (!oldFn)
+ return;
+
+ // TODO(cir): this RAUW ignores the features below.
+ assert(!cir::MissingFeatures::opFuncExceptions());
+ assert(!cir::MissingFeatures::opFuncParameterAttributes());
+ assert(!cir::MissingFeatures::opFuncOperandBundles());
+ if (oldFn->getAttrs().size() <= 1)
+ errorNYI(old->getLoc(),
+ "replaceUsesOfNonProtoTypeWithRealFunction: Attribute forwarding");
+
+ // Mark new function as originated from a no-proto declaration.
+ newFn.setNoProto(oldFn.getNoProto());
+
+ // Iterate through all calls of the no-proto function.
+ std::optional<mlir::SymbolTable::UseRange> symUses =
+ oldFn.getSymbolUses(oldFn->getParentOp());
+ for (const mlir::SymbolTable::SymbolUse &use : symUses.value()) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+
+ if (auto noProtoCallOp = mlir::dyn_cast<cir::CallOp>(use.getUser())) {
+ builder.setInsertionPoint(noProtoCallOp);
+
+ // Patch call type with the real function type.
+ cir::CallOp realCallOp = builder.createCallOp(
+ noProtoCallOp.getLoc(), newFn, noProtoCallOp.getOperands());
+
+ // Replace old no proto call with fixed call.
+ noProtoCallOp.replaceAllUsesWith(realCallOp);
+ noProtoCallOp.erase();
+ } else if (auto getGlobalOp =
+ mlir::dyn_cast<cir::GetGlobalOp>(use.getUser())) {
+ // Replace type
+ getGlobalOp.getAddr().setType(
+ cir::PointerType::get(newFn.getFunctionType()));
+ } else {
+ errorNYI(use.getUser()->getLoc(),
+ "replaceUsesOfNonProtoTypeWithRealFunction: unexpected use");
+ }
+ }
+}
+
cir::GlobalLinkageKind
CIRGenModule::getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant) {
assert(!isConstant && "constant variables NYI");
@@ -1208,6 +1262,15 @@ cir::GlobalOp CIRGenModule::getGlobalForStringLiteral(const StringLiteral *s,
return gv;
}
+void CIRGenModule::emitExplicitCastExprType(const ExplicitCastExpr *e,
+ CIRGenFunction *cgf) {
+ if (cgf && e->getType()->isVariablyModifiedType())
+ cgf->emitVariablyModifiedType(e->getType());
+
+ assert(!cir::MissingFeatures::generateDebugInfo() &&
+ "emitExplicitCastExprType");
+}
+
void CIRGenModule::emitDeclContext(const DeclContext *dc) {
for (Decl *decl : dc->decls()) {
// Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
@@ -1530,10 +1593,10 @@ static bool shouldAssumeDSOLocal(const CIRGenModule &cgm,
const llvm::Triple &tt = cgm.getTriple();
const CodeGenOptions &cgOpts = cgm.getCodeGenOpts();
- if (tt.isWindowsGNUEnvironment()) {
- // In MinGW, variables without DLLImport can still be automatically
- // imported from a DLL by the linker; don't mark variables that
- // potentially could come from another DLL as DSO local.
+ if (tt.isOSCygMing()) {
+ // In MinGW and Cygwin, variables without DLLImport can still be
+ // automatically imported from a DLL by the linker; don't mark variables
+ // that potentially could come from another DLL as DSO local.
// With EmulatedTLS, TLS variables can be autoimported from other DLLs
// (and this actually happens in the public interface of libstdc++), so
@@ -1692,8 +1755,7 @@ cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
// Lookup the entry, lazily creating it if necessary.
mlir::Operation *entry = getGlobalValue(mangledName);
if (entry) {
- if (!isa<cir::FuncOp>(entry))
- errorNYI(d->getSourceRange(), "getOrCreateCIRFunction: non-FuncOp");
+ assert(mlir::isa<cir::FuncOp>(entry));
assert(!cir::MissingFeatures::weakRefReference());
@@ -1729,6 +1791,30 @@ cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
invalidLoc ? theModule->getLoc() : getLoc(funcDecl->getSourceRange()),
mangledName, mlir::cast<cir::FuncType>(funcType), funcDecl);
+ // If we already created a function with the same mangled name (but different
+ // type) before, take its name and add it to the list of functions to be
+ // replaced with F at the end of CodeGen.
+ //
+ // This happens if there is a prototype for a function (e.g. "int f()") and
+ // then a definition of a different type (e.g. "int f(int x)").
+ if (entry) {
+
+ // Fetch a generic symbol-defining operation and its uses.
+ auto symbolOp = mlir::cast<mlir::SymbolOpInterface>(entry);
+
+ // This might be an implementation of a function without a prototype, in
+ // which case, try to do special replacement of calls which match the new
+ // prototype. The really key thing here is that we also potentially drop
+ // arguments from the call site so as to make a direct call, which makes the
+ // inliner happier and suppresses a number of optimizer warnings (!) about
+ // dropping arguments.
+ if (symbolOp.getSymbolUses(symbolOp->getParentOp()))
+ replaceUsesOfNonProtoTypeWithRealFunction(entry, funcOp);
+
+ // Obliterate no-proto declaration.
+ entry->erase();
+ }
+
if (d)
setFunctionAttributes(gd, funcOp, /*isIncompleteFunction=*/false, isThunk);
@@ -1805,7 +1891,9 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name,
func = builder.create<cir::FuncOp>(loc, name, funcType);
assert(!cir::MissingFeatures::opFuncAstDeclAttr());
- assert(!cir::MissingFeatures::opFuncNoProto());
+
+ if (funcDecl && !funcDecl->hasPrototype())
+ func.setNoProto(true);
assert(func.isDeclaration() && "expected empty body");
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 16922b1..5d07d38 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -252,6 +252,11 @@ public:
getAddrOfGlobal(clang::GlobalDecl gd,
ForDefinition_t isForDefinition = NotForDefinition);
+ /// Emit type info if type of an expression is a variably modified
+ /// type. Also emit proper debug info for cast types.
+ void emitExplicitCastExprType(const ExplicitCastExpr *e,
+ CIRGenFunction *cgf = nullptr);
+
/// Emit code for a single global function or variable declaration. Forward
/// declarations are emitted lazily.
void emitGlobal(clang::GlobalDecl gd);
@@ -308,6 +313,9 @@ public:
static void setInitializer(cir::GlobalOp &op, mlir::Attribute value);
+ void replaceUsesOfNonProtoTypeWithRealFunction(mlir::Operation *old,
+ cir::FuncOp newFn);
+
cir::FuncOp
getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType,
clang::GlobalDecl gd, bool forVTable,
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index 05e8848..e4ec380 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -438,9 +438,7 @@ CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
} else if (cirGenTypes.getCGModule()
.getCodeGenOpts()
.FineGrainedBitfieldAccesses) {
- assert(!cir::MissingFeatures::nonFineGrainedBitfields());
- cirGenTypes.getCGModule().errorNYI(field->getSourceRange(),
- "NYI FineGrainedBitfield");
+ installBest = true;
} else {
// Otherwise, we're not installing. Update the bit size
// of the current span to go all the way to limitOffset, which is
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 2213c75..1c3a310 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1470,10 +1470,14 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) {
llvm::SMLoc loc = parser.getCurrentLocation();
mlir::Builder &builder = parser.getBuilder();
+ mlir::StringAttr noProtoNameAttr = getNoProtoAttrName(state.name);
mlir::StringAttr visNameAttr = getSymVisibilityAttrName(state.name);
mlir::StringAttr visibilityNameAttr = getGlobalVisibilityAttrName(state.name);
mlir::StringAttr dsoLocalNameAttr = getDsoLocalAttrName(state.name);
+ if (parser.parseOptionalKeyword(noProtoNameAttr).succeeded())
+ state.addAttribute(noProtoNameAttr, parser.getBuilder().getUnitAttr());
+
// Default to external linkage if no keyword is provided.
state.addAttribute(getLinkageAttrNameString(),
GlobalLinkageKindAttr::get(
@@ -1578,6 +1582,9 @@ mlir::Region *cir::FuncOp::getCallableRegion() {
}
void cir::FuncOp::print(OpAsmPrinter &p) {
+ if (getNoProto())
+ p << " no_proto";
+
if (getComdat())
p << " comdat";
@@ -2295,6 +2302,15 @@ OpFoldResult BitCtzOp::fold(FoldAdaptor adaptor) {
getPoisonZero());
}
+OpFoldResult BitFfsOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ unsigned trailingZeros = inputValue.countTrailingZeros();
+ unsigned result =
+ trailingZeros == inputValue.getBitWidth() ? 0 : trailingZeros + 1;
+ return llvm::APInt(inputValue.getBitWidth(), result);
+ });
+}
+
OpFoldResult BitParityOp::fold(FoldAdaptor adaptor) {
return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
return llvm::APInt(inputValue.getBitWidth(), inputValue.popcount() % 2);
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
index 2143f16..2eaa60c 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
@@ -143,7 +143,7 @@ void CIRCanonicalizePass::runOnOperation() {
if (isa<BrOp, BrCondOp, CastOp, ScopeOp, SwitchOp, SelectOp, UnaryOp,
ComplexCreateOp, ComplexImagOp, ComplexRealOp, VecCmpOp,
VecCreateOp, VecExtractOp, VecShuffleOp, VecShuffleDynamicOp,
- VecTernaryOp, BitClrsbOp, BitClzOp, BitCtzOp, BitParityOp,
+ VecTernaryOp, BitClrsbOp, BitClzOp, BitCtzOp, BitFfsOp, BitParityOp,
BitPopcountOp, BitReverseOp, ByteSwapOp, RotateOp>(op))
ops.push_back(op);
});
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index cef83ea..ce3b30d 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -29,7 +29,8 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
void runOnOp(mlir::Operation *op);
void lowerCastOp(cir::CastOp op);
void lowerUnaryOp(cir::UnaryOp op);
- void lowerArrayCtor(ArrayCtor op);
+ void lowerArrayDtor(cir::ArrayDtor op);
+ void lowerArrayCtor(cir::ArrayCtor op);
///
/// AST related
@@ -172,28 +173,30 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
static void lowerArrayDtorCtorIntoLoop(cir::CIRBaseBuilderTy &builder,
clang::ASTContext *astCtx,
mlir::Operation *op, mlir::Type eltTy,
- mlir::Value arrayAddr,
- uint64_t arrayLen) {
+ mlir::Value arrayAddr, uint64_t arrayLen,
+ bool isCtor) {
// Generate loop to call into ctor/dtor for every element.
mlir::Location loc = op->getLoc();
- // TODO: instead of fixed integer size, create alias for PtrDiffTy and unify
- // with CIRGen stuff.
+ // TODO: instead of getting the size from the AST context, create alias for
+ // PtrDiffTy and unify with CIRGen stuff.
const unsigned sizeTypeSize =
astCtx->getTypeSize(astCtx->getSignedSizeType());
- auto ptrDiffTy =
- cir::IntType::get(builder.getContext(), sizeTypeSize, /*isSigned=*/false);
- mlir::Value numArrayElementsConst = builder.getUnsignedInt(loc, arrayLen, 64);
+ uint64_t endOffset = isCtor ? arrayLen : arrayLen - 1;
+ mlir::Value endOffsetVal =
+ builder.getUnsignedInt(loc, endOffset, sizeTypeSize);
- auto begin = builder.create<cir::CastOp>(
- loc, eltTy, cir::CastKind::array_to_ptrdecay, arrayAddr);
- mlir::Value end = builder.create<cir::PtrStrideOp>(loc, eltTy, begin,
- numArrayElementsConst);
+ auto begin = cir::CastOp::create(builder, loc, eltTy,
+ cir::CastKind::array_to_ptrdecay, arrayAddr);
+ mlir::Value end =
+ cir::PtrStrideOp::create(builder, loc, eltTy, begin, endOffsetVal);
+ mlir::Value start = isCtor ? begin : end;
+ mlir::Value stop = isCtor ? end : begin;
mlir::Value tmpAddr = builder.createAlloca(
loc, /*addr type*/ builder.getPointerTo(eltTy),
/*var type*/ eltTy, "__array_idx", builder.getAlignmentAttr(1));
- builder.createStore(loc, begin, tmpAddr);
+ builder.createStore(loc, start, tmpAddr);
cir::DoWhileOp loop = builder.createDoWhile(
loc,
@@ -202,7 +205,7 @@ static void lowerArrayDtorCtorIntoLoop(cir::CIRBaseBuilderTy &builder,
auto currentElement = b.create<cir::LoadOp>(loc, eltTy, tmpAddr);
mlir::Type boolTy = cir::BoolType::get(b.getContext());
auto cmp = builder.create<cir::CmpOp>(loc, boolTy, cir::CmpOpKind::ne,
- currentElement, end);
+ currentElement, stop);
builder.createCondition(cmp);
},
/*bodyBuilder=*/
@@ -213,15 +216,19 @@ static void lowerArrayDtorCtorIntoLoop(cir::CIRBaseBuilderTy &builder,
op->walk([&](cir::CallOp c) { ctorCall = c; });
assert(ctorCall && "expected ctor call");
- auto one = builder.create<cir::ConstantOp>(
- loc, ptrDiffTy, cir::IntAttr::get(ptrDiffTy, 1));
+ // Array elements get constructed in order but destructed in reverse.
+ mlir::Value stride;
+ if (isCtor)
+ stride = builder.getUnsignedInt(loc, 1, sizeTypeSize);
+ else
+ stride = builder.getSignedInt(loc, -1, sizeTypeSize);
- ctorCall->moveAfter(one);
+ ctorCall->moveBefore(stride.getDefiningOp());
ctorCall->setOperand(0, currentElement);
+ auto nextElement = cir::PtrStrideOp::create(builder, loc, eltTy,
+ currentElement, stride);
- // Advance pointer and store them to temporary variable
- auto nextElement =
- builder.create<cir::PtrStrideOp>(loc, eltTy, currentElement, one);
+ // Store the element pointer to the temporary variable
builder.createStore(loc, nextElement, tmpAddr);
builder.createYield(loc);
});
@@ -230,6 +237,18 @@ static void lowerArrayDtorCtorIntoLoop(cir::CIRBaseBuilderTy &builder,
op->erase();
}
+void LoweringPreparePass::lowerArrayDtor(cir::ArrayDtor op) {
+ CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op.getOperation());
+
+ mlir::Type eltTy = op->getRegion(0).getArgument(0).getType();
+ assert(!cir::MissingFeatures::vlas());
+ auto arrayLen =
+ mlir::cast<cir::ArrayType>(op.getAddr().getType().getPointee()).getSize();
+ lowerArrayDtorCtorIntoLoop(builder, astCtx, op, eltTy, op.getAddr(), arrayLen,
+ false);
+}
+
void LoweringPreparePass::lowerArrayCtor(cir::ArrayCtor op) {
cir::CIRBaseBuilderTy builder(getContext());
builder.setInsertionPointAfter(op.getOperation());
@@ -238,13 +257,15 @@ void LoweringPreparePass::lowerArrayCtor(cir::ArrayCtor op) {
assert(!cir::MissingFeatures::vlas());
auto arrayLen =
mlir::cast<cir::ArrayType>(op.getAddr().getType().getPointee()).getSize();
- lowerArrayDtorCtorIntoLoop(builder, astCtx, op, eltTy, op.getAddr(),
- arrayLen);
+ lowerArrayDtorCtorIntoLoop(builder, astCtx, op, eltTy, op.getAddr(), arrayLen,
+ true);
}
void LoweringPreparePass::runOnOp(mlir::Operation *op) {
if (auto arrayCtor = dyn_cast<ArrayCtor>(op))
lowerArrayCtor(arrayCtor);
+ else if (auto arrayDtor = dyn_cast<cir::ArrayDtor>(op))
+ lowerArrayDtor(arrayDtor);
else if (auto cast = mlir::dyn_cast<cir::CastOp>(op))
lowerCastOp(cast);
else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op))
@@ -257,7 +278,8 @@ void LoweringPreparePass::runOnOperation() {
llvm::SmallVector<mlir::Operation *> opsToTransform;
op->walk([&](mlir::Operation *op) {
- if (mlir::isa<cir::ArrayCtor, cir::CastOp, cir::UnaryOp>(op))
+ if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp, cir::UnaryOp>(
+ op))
opsToTransform.push_back(op);
});
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index c27b889..957a51a 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -521,6 +521,32 @@ mlir::LogicalResult CIRToLLVMBitCtzOpLowering::matchAndRewrite(
return mlir::LogicalResult::success();
}
+mlir::LogicalResult CIRToLLVMBitFfsOpLowering::matchAndRewrite(
+ cir::BitFfsOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto resTy = getTypeConverter()->convertType(op.getType());
+ auto ctz = rewriter.create<mlir::LLVM::CountTrailingZerosOp>(
+ op.getLoc(), resTy, adaptor.getInput(), /*is_zero_poison=*/true);
+
+ auto one = rewriter.create<mlir::LLVM::ConstantOp>(op.getLoc(), resTy, 1);
+ auto ctzAddOne = rewriter.create<mlir::LLVM::AddOp>(op.getLoc(), ctz, one);
+
+ auto zeroInputTy = rewriter.create<mlir::LLVM::ConstantOp>(
+ op.getLoc(), adaptor.getInput().getType(), 0);
+ auto isZero = rewriter.create<mlir::LLVM::ICmpOp>(
+ op.getLoc(),
+ mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(),
+ mlir::LLVM::ICmpPredicate::eq),
+ adaptor.getInput(), zeroInputTy);
+
+ auto zero = rewriter.create<mlir::LLVM::ConstantOp>(op.getLoc(), resTy, 0);
+ auto res = rewriter.create<mlir::LLVM::SelectOp>(op.getLoc(), isZero, zero,
+ ctzAddOne);
+ rewriter.replaceOp(op, res);
+
+ return mlir::LogicalResult::success();
+}
+
mlir::LogicalResult CIRToLLVMBitParityOpLowering::matchAndRewrite(
cir::BitParityOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -919,13 +945,45 @@ rewriteCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands,
memoryEffects, noUnwind, willReturn);
mlir::LLVM::LLVMFunctionType llvmFnTy;
+
+ // Temporary to handle the case where we need to prepend an operand if the
+ // callee is an alias.
+ SmallVector<mlir::Value> adjustedCallOperands;
+
if (calleeAttr) { // direct call
- mlir::FunctionOpInterface fn =
- mlir::SymbolTable::lookupNearestSymbolFrom<mlir::FunctionOpInterface>(
- op, calleeAttr);
- assert(fn && "Did not find function for call");
- llvmFnTy = cast<mlir::LLVM::LLVMFunctionType>(
- converter->convertType(fn.getFunctionType()));
+ mlir::Operation *callee =
+ mlir::SymbolTable::lookupNearestSymbolFrom(op, calleeAttr);
+ if (auto fn = mlir::dyn_cast<mlir::FunctionOpInterface>(callee)) {
+ llvmFnTy = converter->convertType<mlir::LLVM::LLVMFunctionType>(
+ fn.getFunctionType());
+ assert(llvmFnTy && "Failed to convert function type");
+ } else if (auto alias = mlir::cast<mlir::LLVM::AliasOp>(callee)) {
+ // If the callee was an alias. In that case,
+ // we need to prepend the address of the alias to the operands. The
+ // way aliases work in the LLVM dialect is a little counter-intuitive.
+ // The AliasOp itself is a pseudo-function that returns the address of
+ // the global value being aliased, but when we generate the call we
+ // need to insert an operation that gets the address of the AliasOp.
+ // This all gets sorted out when the LLVM dialect is lowered to LLVM IR.
+ auto symAttr = mlir::cast<mlir::FlatSymbolRefAttr>(calleeAttr);
+ auto addrOfAlias =
+ mlir::LLVM::AddressOfOp::create(
+ rewriter, op->getLoc(),
+ mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), symAttr)
+ .getResult();
+ adjustedCallOperands.push_back(addrOfAlias);
+
+ // Now add the regular operands and assign this to the range value.
+ llvm::append_range(adjustedCallOperands, callOperands);
+ callOperands = adjustedCallOperands;
+
+ // Clear the callee attribute because we're calling an alias.
+ calleeAttr = {};
+ llvmFnTy = mlir::cast<mlir::LLVM::LLVMFunctionType>(alias.getType());
+ } else {
+ // Was this an ifunc?
+ return op->emitError("Unexpected callee type!");
+ }
} else { // indirect call
assert(!op->getOperands().empty() &&
"operands list must no be empty for the indirect call");
@@ -1172,6 +1230,30 @@ void CIRToLLVMFuncOpLowering::lowerFuncAttributes(
}
}
+mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewriteAlias(
+ cir::FuncOp op, llvm::StringRef aliasee, mlir::Type ty, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ SmallVector<mlir::NamedAttribute, 4> attributes;
+ lowerFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes);
+
+ mlir::Location loc = op.getLoc();
+ auto aliasOp = rewriter.replaceOpWithNewOp<mlir::LLVM::AliasOp>(
+ op, ty, convertLinkage(op.getLinkage()), op.getName(), op.getDsoLocal(),
+ /*threadLocal=*/false, attributes);
+
+ // Create the alias body
+ mlir::OpBuilder builder(op.getContext());
+ mlir::Block *block = builder.createBlock(&aliasOp.getInitializerRegion());
+ builder.setInsertionPointToStart(block);
+ // The type of AddressOfOp is always a pointer.
+ assert(!cir::MissingFeatures::addressSpace());
+ mlir::Type ptrTy = mlir::LLVM::LLVMPointerType::get(ty.getContext());
+ auto addrOp = mlir::LLVM::AddressOfOp::create(builder, loc, ptrTy, aliasee);
+ mlir::LLVM::ReturnOp::create(builder, loc, addrOp);
+
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite(
cir::FuncOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -1196,6 +1278,11 @@ mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite(
resultType ? resultType : mlir::LLVM::LLVMVoidType::get(getContext()),
signatureConversion.getConvertedTypes(),
/*isVarArg=*/fnType.isVarArg());
+
+ // If this is an alias, it needs to be lowered to llvm::AliasOp.
+ if (std::optional<llvm::StringRef> aliasee = op.getAliasee())
+ return matchAndRewriteAlias(op, *aliasee, llvmFnTy, adaptor, rewriter);
+
// LLVMFuncOp expects a single FileLine Location instead of a fused
// location.
mlir::Location loc = op.getLoc();
@@ -2089,6 +2176,7 @@ void ConvertCIRToLLVMPass::runOnOperation() {
CIRToLLVMBitClrsbOpLowering,
CIRToLLVMBitClzOpLowering,
CIRToLLVMBitCtzOpLowering,
+ CIRToLLVMBitFfsOpLowering,
CIRToLLVMBitParityOpLowering,
CIRToLLVMBitPopcountOpLowering,
CIRToLLVMBitReverseOpLowering,
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index 2911ced..f339d43 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -84,6 +84,16 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMBitFfsOpLowering
+ : public mlir::OpConversionPattern<cir::BitFfsOp> {
+public:
+ using mlir::OpConversionPattern<cir::BitFfsOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::BitFfsOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMBitParityOpLowering
: public mlir::OpConversionPattern<cir::BitParityOp> {
public:
@@ -257,6 +267,11 @@ class CIRToLLVMFuncOpLowering : public mlir::OpConversionPattern<cir::FuncOp> {
cir::FuncOp func, bool filterArgAndResAttrs,
mlir::SmallVectorImpl<mlir::NamedAttribute> &result) const;
+ mlir::LogicalResult
+ matchAndRewriteAlias(cir::FuncOp op, llvm::StringRef aliasee, mlir::Type ty,
+ OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
public:
using mlir::OpConversionPattern<cir::FuncOp>::OpConversionPattern;