aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR/CodeGen')
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.h7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp78
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp57
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp88
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp113
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp106
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp17
13 files changed, 454 insertions, 42 deletions
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index ef136f8..9049a01 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -190,6 +190,11 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
assert(!cir::MissingFeatures::builtinCheckKind());
return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll:
+ return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
+
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll:
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index 938d143..fc208ff 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -582,6 +582,14 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &funcInfo,
cir::FuncOp directFuncOp;
if (auto fnOp = dyn_cast<cir::FuncOp>(calleePtr)) {
directFuncOp = fnOp;
+ } else if (auto getGlobalOp = mlir::dyn_cast<cir::GetGlobalOp>(calleePtr)) {
+ // FIXME(cir): This peephole optimization avoids indirect calls for
+ // builtins. This should be fixed in the builtin declaration instead by
+ // not emitting an unecessary get_global in the first place.
+ // However, this is also used for no-prototype functions.
+ mlir::Operation *globalOp = cgm.getGlobalValue(getGlobalOp.getName());
+ assert(globalOp && "undefined global function");
+ directFuncOp = mlir::cast<cir::FuncOp>(globalOp);
} else {
[[maybe_unused]] mlir::ValueTypeRange<mlir::ResultRange> resultTypes =
calleePtr->getResultTypes();
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h
index bd11329..28576a1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -116,6 +116,11 @@ public:
assert(isOrdinary());
return reinterpret_cast<mlir::Operation *>(kindOrFunctionPtr);
}
+
+ void setFunctionPointer(mlir::Operation *functionPtr) {
+ assert(isOrdinary());
+ kindOrFunctionPtr = SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
+ }
};
/// Type for representing both the decl and type of parameters to a function.
@@ -132,7 +137,7 @@ private:
/// A data-flow flag to make sure getRValue and/or copyInto are not
/// called twice for duplicated IR emission.
- mutable bool isUsed;
+ [[maybe_unused]] mutable bool isUsed;
public:
clang::QualType ty;
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index a28ac3c..9e8eaa5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -520,7 +520,7 @@ void CIRGenFunction::emitExprAsInit(const Expr *init, const ValueDecl *d,
llvm_unreachable("bad evaluation kind");
}
-void CIRGenFunction::emitDecl(const Decl &d) {
+void CIRGenFunction::emitDecl(const Decl &d, bool evaluateConditionDecl) {
switch (d.getKind()) {
case Decl::BuiltinTemplate:
case Decl::TranslationUnit:
@@ -608,11 +608,14 @@ void CIRGenFunction::emitDecl(const Decl &d) {
case Decl::UsingDirective: // using namespace X; [C++]
assert(!cir::MissingFeatures::generateDebugInfo());
return;
- case Decl::Var: {
+ case Decl::Var:
+ case Decl::Decomposition: {
const VarDecl &vd = cast<VarDecl>(d);
assert(vd.isLocalVarDecl() &&
"Should not see file-scope variables inside a function!");
emitVarDecl(vd);
+ if (evaluateConditionDecl)
+ maybeEmitDeferredVarDeclInit(&vd);
return;
}
case Decl::OpenACCDeclare:
@@ -632,7 +635,6 @@ void CIRGenFunction::emitDecl(const Decl &d) {
case Decl::ImplicitConceptSpecialization:
case Decl::TopLevelStmt:
case Decl::UsingPack:
- case Decl::Decomposition: // This could be moved to join Decl::Var
case Decl::OMPDeclareReduction:
case Decl::OMPDeclareMapper:
cgm.errorNYI(d.getSourceRange(),
@@ -649,6 +651,38 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs,
assert(!cir::MissingFeatures::sanitizers());
}
+/// Destroys all the elements of the given array, beginning from last to first.
+/// The array cannot be zero-length.
+///
+/// \param begin - a type* denoting the first element of the array
+/// \param end - a type* denoting one past the end of the array
+/// \param elementType - the element type of the array
+/// \param destroyer - the function to call to destroy elements
+void CIRGenFunction::emitArrayDestroy(mlir::Value begin, mlir::Value end,
+ QualType elementType,
+ CharUnits elementAlign,
+ Destroyer *destroyer) {
+ assert(!elementType->isArrayType());
+
+ // Differently from LLVM traditional codegen, use a higher level
+ // representation instead of lowering directly to a loop.
+ mlir::Type cirElementType = convertTypeForMem(elementType);
+ cir::PointerType ptrToElmType = builder.getPointerTo(cirElementType);
+
+ // Emit the dtor call that will execute for every array element.
+ cir::ArrayDtor::create(
+ builder, *currSrcLoc, begin, [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc);
+ Address curAddr = Address(arg, cirElementType, elementAlign);
+ assert(!cir::MissingFeatures::dtorCleanups());
+
+ // Perform the actual destruction there.
+ destroyer(*this, curAddr, elementType);
+
+ cir::YieldOp::create(builder, loc);
+ });
+}
+
/// Immediately perform the destruction of the given object.
///
/// \param addr - the address of the object; a type*
@@ -658,10 +692,34 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs,
/// elements
void CIRGenFunction::emitDestroy(Address addr, QualType type,
Destroyer *destroyer) {
- if (getContext().getAsArrayType(type))
- cgm.errorNYI("emitDestroy: array type");
+ const ArrayType *arrayType = getContext().getAsArrayType(type);
+ if (!arrayType)
+ return destroyer(*this, addr, type);
+
+ mlir::Value length = emitArrayLength(arrayType, type, addr);
+
+ CharUnits elementAlign = addr.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(type));
+
+ auto constantCount = length.getDefiningOp<cir::ConstantOp>();
+ if (!constantCount) {
+ assert(!cir::MissingFeatures::vlas());
+ cgm.errorNYI("emitDestroy: variable length array");
+ return;
+ }
+
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constantCount.getValue());
+ // If it's constant zero, we can just skip the entire thing.
+ if (constIntAttr && constIntAttr.getUInt() == 0)
+ return;
+
+ mlir::Value begin = addr.getPointer();
+ mlir::Value end; // This will be used for future non-constant counts.
+ emitArrayDestroy(begin, end, type, elementAlign, destroyer);
- return destroyer(*this, addr, type);
+ // If the array destroy didn't use the length op, we can erase it.
+ if (constantCount.use_empty())
+ constantCount.erase();
}
CIRGenFunction::Destroyer *
@@ -741,3 +799,11 @@ void CIRGenFunction::emitAutoVarTypeCleanup(
assert(!cir::MissingFeatures::ehCleanupFlags());
ehStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
}
+
+void CIRGenFunction::maybeEmitDeferredVarDeclInit(const VarDecl *vd) {
+ if (auto *dd = dyn_cast_if_present<DecompositionDecl>(vd)) {
+ for (auto *b : dd->flat_bindings())
+ if (auto *hd = b->getHoldingVar())
+ emitVarDecl(*hd);
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 64dc1ce..d267504 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -584,6 +584,15 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
return lv;
}
+ if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
+ if (e->refersToEnclosingVariableOrCapture()) {
+ assert(!cir::MissingFeatures::lambdaCaptures());
+ cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
+ return LValue();
+ }
+ return emitLValue(bd->getBinding());
+ }
+
cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
return LValue();
}
@@ -1280,7 +1289,7 @@ RValue CIRGenFunction::getUndefRValue(QualType ty) {
}
RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
- const CIRGenCallee &callee,
+ const CIRGenCallee &origCallee,
const clang::CallExpr *e,
ReturnValueSlot returnValue) {
// Get the actual function type. The callee type will always be a pointer to
@@ -1291,6 +1300,8 @@ RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
calleeTy = getContext().getCanonicalType(calleeTy);
auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
+ CIRGenCallee callee = origCallee;
+
if (getLangOpts().CPlusPlus)
assert(!cir::MissingFeatures::sanitizers());
@@ -1307,7 +1318,44 @@ RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
const CIRGenFunctionInfo &funcInfo =
cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
- assert(!cir::MissingFeatures::opCallNoPrototypeFunc());
+ // C99 6.5.2.2p6:
+ // If the expression that denotes the called function has a type that does
+ // not include a prototype, [the default argument promotions are performed].
+ // If the number of arguments does not equal the number of parameters, the
+ // behavior is undefined. If the function is defined with a type that
+ // includes a prototype, and either the prototype ends with an ellipsis (,
+ // ...) or the types of the arguments after promotion are not compatible
+ // with the types of the parameters, the behavior is undefined. If the
+ // function is defined with a type that does not include a prototype, and
+ // the types of the arguments after promotion are not compatible with those
+ // of the parameters after promotion, the behavior is undefined [except in
+ // some trivial cases].
+ // That is, in the general case, we should assume that a call through an
+ // unprototyped function type works like a *non-variadic* call. The way we
+ // make this work is to cast to the exxact type fo the promoted arguments.
+ if (isa<FunctionNoProtoType>(fnType)) {
+ assert(!cir::MissingFeatures::opCallChain());
+ assert(!cir::MissingFeatures::addressSpace());
+ cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
+ // get non-variadic function type
+ calleeTy = cir::FuncType::get(calleeTy.getInputs(),
+ calleeTy.getReturnType(), false);
+ auto calleePtrTy = cir::PointerType::get(calleeTy);
+
+ mlir::Operation *fn = callee.getFunctionPointer();
+ mlir::Value addr;
+ if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
+ addr = builder.create<cir::GetGlobalOp>(
+ getLoc(e->getSourceRange()),
+ cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
+ } else {
+ addr = fn->getResult(0);
+ }
+
+ fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
+ callee.setFunctionPointer(fn);
+ }
+
assert(!cir::MissingFeatures::opCallFnInfoOpts());
assert(!cir::MissingFeatures::hip());
assert(!cir::MissingFeatures::opCallMustTail());
@@ -1433,9 +1481,10 @@ Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e) {
if (e->getType()->isVariableArrayType())
return addr;
- auto pointeeTy = mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
+ [[maybe_unused]] auto pointeeTy =
+ mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
- mlir::Type arrayTy = convertType(e->getType());
+ [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
assert(pointeeTy == arrayTy);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index a09d739..3aa170e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -91,6 +91,14 @@ public:
}
mlir::Value VisitUnaryDeref(const Expr *e);
+
+ mlir::Value VisitUnaryPlus(const UnaryOperator *e);
+
+ mlir::Value VisitPlusMinus(const UnaryOperator *e, cir::UnaryOpKind kind,
+ QualType promotionType);
+
+ mlir::Value VisitUnaryMinus(const UnaryOperator *e);
+
mlir::Value VisitUnaryNot(const UnaryOperator *e);
struct BinOpInfo {
@@ -110,6 +118,7 @@ public:
mlir::Value emitBinAdd(const BinOpInfo &op);
mlir::Value emitBinSub(const BinOpInfo &op);
+ mlir::Value emitBinMul(const BinOpInfo &op);
QualType getPromotionType(QualType ty, bool isDivOpCode = false) {
if (auto *complexTy = ty->getAs<ComplexType>()) {
@@ -142,16 +151,20 @@ public:
HANDLEBINOP(Add)
HANDLEBINOP(Sub)
+ HANDLEBINOP(Mul)
#undef HANDLEBINOP
};
} // namespace
+#ifndef NDEBUG
+// Only used in asserts
static const ComplexType *getComplexType(QualType type) {
type = type.getCanonicalType();
if (const ComplexType *comp = dyn_cast<ComplexType>(type))
return comp;
return cast<ComplexType>(cast<AtomicType>(type)->getValueType());
}
+#endif // NDEBUG
LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e,
mlir::Value &value) {
@@ -282,6 +295,41 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
llvm_unreachable("unknown cast resulting in complex value");
}
+mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e) {
+ QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
+ mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Plus, promotionTy);
+ if (!promotionTy.isNull()) {
+ cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryPlus emitUnPromotedValue");
+ return {};
+ }
+ return result;
+}
+
+mlir::Value ComplexExprEmitter::VisitPlusMinus(const UnaryOperator *e,
+ cir::UnaryOpKind kind,
+ QualType promotionType) {
+ assert(kind == cir::UnaryOpKind::Plus ||
+ kind == cir::UnaryOpKind::Minus &&
+ "Invalid UnaryOp kind for ComplexType Plus or Minus");
+
+ mlir::Value op;
+ if (!promotionType.isNull())
+ op = cgf.emitPromotedComplexExpr(e->getSubExpr(), promotionType);
+ else
+ op = Visit(e->getSubExpr());
+ return builder.createUnaryOp(cgf.getLoc(e->getExprLoc()), kind, op);
+}
+
+mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e) {
+ QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
+ mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Minus, promotionTy);
+ if (!promotionTy.isNull()) {
+ cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryMinus emitUnPromotedValue");
+ return {};
+ }
+ return result;
+}
+
mlir::Value ComplexExprEmitter::emitConstant(
const CIRGenFunction::ConstantEmission &constant, Expr *e) {
assert(constant && "not a constant");
@@ -534,13 +582,22 @@ mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e,
return emitBin##OP(emitBinOps(bo, promotionTy));
HANDLE_BINOP(Add)
HANDLE_BINOP(Sub)
+ HANDLE_BINOP(Mul)
#undef HANDLE_BINOP
default:
break;
}
- } else if (isa<UnaryOperator>(e)) {
- cgf.cgm.errorNYI("emitPromoted UnaryOperator");
- return {};
+ } else if (const auto *unaryOp = dyn_cast<UnaryOperator>(e)) {
+ switch (unaryOp->getOpcode()) {
+ case UO_Minus:
+ case UO_Plus: {
+ auto kind = unaryOp->getOpcode() == UO_Plus ? cir::UnaryOpKind::Plus
+ : cir::UnaryOpKind::Minus;
+ return VisitPlusMinus(unaryOp, kind, promotionTy);
+ }
+ default:
+ break;
+ }
}
mlir::Value result = Visit(const_cast<Expr *>(e));
@@ -585,6 +642,31 @@ mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &op) {
return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);
}
+static cir::ComplexRangeKind
+getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
+ switch (range) {
+ case LangOptions::CX_Full:
+ return cir::ComplexRangeKind::Full;
+ case LangOptions::CX_Improved:
+ return cir::ComplexRangeKind::Improved;
+ case LangOptions::CX_Promoted:
+ return cir::ComplexRangeKind::Promoted;
+ case LangOptions::CX_Basic:
+ return cir::ComplexRangeKind::Basic;
+ case LangOptions::CX_None:
+ // The default value for ComplexRangeKind is Full is no option is selected
+ return cir::ComplexRangeKind::Full;
+ }
+}
+
+mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &op) {
+ assert(!cir::MissingFeatures::fastMathFlags());
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ cir::ComplexRangeKind rangeKind =
+ getComplexRangeAttr(op.fpFeatures.getComplexRange());
+ return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
+}
+
LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) {
assert(e->getOpcode() == BO_Assign && "Expected assign op");
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 2523b0f..f62be49 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -439,7 +439,7 @@ public:
value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
} else if (type->isIntegerType()) {
QualType promotedType;
- bool canPerformLossyDemotionCheck = false;
+ [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
if (cgf.getContext().isPromotableIntegerType(type)) {
promotedType = cgf.getContext().getPromotedIntegerType(type);
assert(promotedType != type && "Shouldn't promote to the same type.");
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index c65d025..0c9bc38 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -216,8 +216,7 @@ void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
mlir::Location loc, CharUnits alignment,
bool isParam) {
- const auto *namedVar = dyn_cast_or_null<NamedDecl>(var);
- assert(namedVar && "Needs a named decl");
+ assert(isa<NamedDecl>(var) && "Needs a named decl");
assert(!cir::MissingFeatures::cgfSymbolTable());
auto allocaOp = cast<cir::AllocaOp>(addrVal.getDefiningOp());
@@ -943,6 +942,7 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) {
case Type::HLSLInlineSpirv:
case Type::PredefinedSugar:
cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
+ break;
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 77539d7..f9c8636 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -848,6 +848,10 @@ public:
/// even if no aggregate location is provided.
RValue emitAnyExprToTemp(const clang::Expr *e);
+ void emitArrayDestroy(mlir::Value begin, mlir::Value end,
+ QualType elementType, CharUnits elementAlign,
+ Destroyer *destroyer);
+
mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
QualType &baseType, Address &addr);
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
@@ -866,6 +870,8 @@ public:
void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
clang::QualType::DestructionKind dtorKind);
+ void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
+
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
CXXCtorInitializer *baseInit);
@@ -1055,7 +1061,7 @@ public:
void emitCompoundStmtWithoutScope(const clang::CompoundStmt &s);
- void emitDecl(const clang::Decl &d);
+ void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 0724cb1..b143682 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -656,8 +656,6 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty,
void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd,
bool isTentative) {
- const QualType astTy = vd->getType();
-
if (getLangOpts().OpenCL || getLangOpts().OpenMPIsTargetDevice) {
errorNYI(vd->getSourceRange(), "emit OpenCL/OpenMP global variable");
return;
@@ -701,7 +699,7 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd,
// never attempt to emit a tentative definition if a real one
// exists. A use may still exists, however, so we still may need
// to do a RAUW.
- assert(!astTy->isIncompleteType() && "Unexpected incomplete type");
+ assert(!vd->getType()->isIncompleteType() && "Unexpected incomplete type");
init = builder.getZeroInitAttr(convertType(vd->getType()));
} else {
emitter.emplace(*this);
@@ -1103,6 +1101,60 @@ cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator(
return cir::GlobalLinkageKind::ExternalLinkage;
}
+/// This function is called when we implement a function with no prototype, e.g.
+/// "int foo() {}". If there are existing call uses of the old function in the
+/// module, this adjusts them to call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+void CIRGenModule::replaceUsesOfNonProtoTypeWithRealFunction(
+ mlir::Operation *old, cir::FuncOp newFn) {
+ // If we're redefining a global as a function, don't transform it.
+ auto oldFn = mlir::dyn_cast<cir::FuncOp>(old);
+ if (!oldFn)
+ return;
+
+ // TODO(cir): this RAUW ignores the features below.
+ assert(!cir::MissingFeatures::opFuncExceptions());
+ assert(!cir::MissingFeatures::opFuncParameterAttributes());
+ assert(!cir::MissingFeatures::opFuncOperandBundles());
+ if (oldFn->getAttrs().size() <= 1)
+ errorNYI(old->getLoc(),
+ "replaceUsesOfNonProtoTypeWithRealFunction: Attribute forwarding");
+
+ // Mark new function as originated from a no-proto declaration.
+ newFn.setNoProto(oldFn.getNoProto());
+
+ // Iterate through all calls of the no-proto function.
+ std::optional<mlir::SymbolTable::UseRange> symUses =
+ oldFn.getSymbolUses(oldFn->getParentOp());
+ for (const mlir::SymbolTable::SymbolUse &use : symUses.value()) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+
+ if (auto noProtoCallOp = mlir::dyn_cast<cir::CallOp>(use.getUser())) {
+ builder.setInsertionPoint(noProtoCallOp);
+
+ // Patch call type with the real function type.
+ cir::CallOp realCallOp = builder.createCallOp(
+ noProtoCallOp.getLoc(), newFn, noProtoCallOp.getOperands());
+
+ // Replace old no proto call with fixed call.
+ noProtoCallOp.replaceAllUsesWith(realCallOp);
+ noProtoCallOp.erase();
+ } else if (auto getGlobalOp =
+ mlir::dyn_cast<cir::GetGlobalOp>(use.getUser())) {
+ // Replace type
+ getGlobalOp.getAddr().setType(
+ cir::PointerType::get(newFn.getFunctionType()));
+ } else {
+ errorNYI(use.getUser()->getLoc(),
+ "replaceUsesOfNonProtoTypeWithRealFunction: unexpected use");
+ }
+ }
+}
+
cir::GlobalLinkageKind
CIRGenModule::getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant) {
assert(!isConstant && "constant variables NYI");
@@ -1244,6 +1296,7 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
decl->getDeclKindName());
break;
+ case Decl::CXXConversion:
case Decl::CXXMethod:
case Decl::Function: {
auto *fd = cast<FunctionDecl>(decl);
@@ -1253,8 +1306,13 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
break;
}
- case Decl::Var: {
+ case Decl::Var:
+ case Decl::Decomposition: {
auto *vd = cast<VarDecl>(decl);
+ if (isa<DecompositionDecl>(decl)) {
+ errorNYI(decl->getSourceRange(), "global variable decompositions");
+ break;
+ }
emitGlobal(vd);
break;
}
@@ -1276,8 +1334,14 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
break;
// No code generation needed.
- case Decl::UsingShadow:
+ case Decl::ClassTemplate:
+ case Decl::Concept:
+ case Decl::CXXDeductionGuide:
case Decl::Empty:
+ case Decl::FunctionTemplate:
+ case Decl::StaticAssert:
+ case Decl::TypeAliasTemplate:
+ case Decl::UsingShadow:
break;
case Decl::CXXConstructor:
@@ -1539,10 +1603,10 @@ static bool shouldAssumeDSOLocal(const CIRGenModule &cgm,
const llvm::Triple &tt = cgm.getTriple();
const CodeGenOptions &cgOpts = cgm.getCodeGenOpts();
- if (tt.isWindowsGNUEnvironment()) {
- // In MinGW, variables without DLLImport can still be automatically
- // imported from a DLL by the linker; don't mark variables that
- // potentially could come from another DLL as DSO local.
+ if (tt.isOSCygMing()) {
+ // In MinGW and Cygwin, variables without DLLImport can still be
+ // automatically imported from a DLL by the linker; don't mark variables
+ // that potentially could come from another DLL as DSO local.
// With EmulatedTLS, TLS variables can be autoimported from other DLLs
// (and this actually happens in the public interface of libstdc++), so
@@ -1701,8 +1765,7 @@ cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
// Lookup the entry, lazily creating it if necessary.
mlir::Operation *entry = getGlobalValue(mangledName);
if (entry) {
- if (!isa<cir::FuncOp>(entry))
- errorNYI(d->getSourceRange(), "getOrCreateCIRFunction: non-FuncOp");
+ assert(mlir::isa<cir::FuncOp>(entry));
assert(!cir::MissingFeatures::weakRefReference());
@@ -1738,6 +1801,30 @@ cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
invalidLoc ? theModule->getLoc() : getLoc(funcDecl->getSourceRange()),
mangledName, mlir::cast<cir::FuncType>(funcType), funcDecl);
+ // If we already created a function with the same mangled name (but different
+ // type) before, take its name and add it to the list of functions to be
+ // replaced with F at the end of CodeGen.
+ //
+ // This happens if there is a prototype for a function (e.g. "int f()") and
+ // then a definition of a different type (e.g. "int f(int x)").
+ if (entry) {
+
+ // Fetch a generic symbol-defining operation and its uses.
+ auto symbolOp = mlir::cast<mlir::SymbolOpInterface>(entry);
+
+ // This might be an implementation of a function without a prototype, in
+ // which case, try to do special replacement of calls which match the new
+ // prototype. The really key thing here is that we also potentially drop
+ // arguments from the call site so as to make a direct call, which makes the
+ // inliner happier and suppresses a number of optimizer warnings (!) about
+ // dropping arguments.
+ if (symbolOp.getSymbolUses(symbolOp->getParentOp()))
+ replaceUsesOfNonProtoTypeWithRealFunction(entry, funcOp);
+
+ // Obliterate no-proto declaration.
+ entry->erase();
+ }
+
if (d)
setFunctionAttributes(gd, funcOp, /*isIncompleteFunction=*/false, isThunk);
@@ -1814,7 +1901,9 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name,
func = builder.create<cir::FuncOp>(loc, name, funcType);
assert(!cir::MissingFeatures::opFuncAstDeclAttr());
- assert(!cir::MissingFeatures::opFuncNoProto());
+
+ if (funcDecl && !funcDecl->hasPrototype())
+ func.setNoProto(true);
assert(func.isDeclaration() && "expected empty body");
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 22519ff..5d07d38 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -313,6 +313,9 @@ public:
static void setInitializer(cir::GlobalOp &op, mlir::Attribute value);
+ void replaceUsesOfNonProtoTypeWithRealFunction(mlir::Operation *old,
+ cir::FuncOp newFn);
+
cir::FuncOp
getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType,
clang::GlobalDecl gd, bool forVTable,
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index 05e8848..0c8ff4bd 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -91,6 +91,9 @@ struct CIRRecordLowering final {
return astContext.getTargetInfo().getABI().starts_with("aapcs");
}
+ /// Helper function to check if the target machine is BigEndian.
+ bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); }
+
CharUnits bitsToCharUnits(uint64_t bitOffset) {
return astContext.toCharUnitsFromBits(bitOffset);
}
@@ -438,9 +441,7 @@ CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
} else if (cirGenTypes.getCGModule()
.getCodeGenOpts()
.FineGrainedBitfieldAccesses) {
- assert(!cir::MissingFeatures::nonFineGrainedBitfields());
- cirGenTypes.getCGModule().errorNYI(field->getSourceRange(),
- "NYI FineGrainedBitfield");
+ installBest = true;
} else {
// Otherwise, we're not installing. Update the bit size
// of the current span to go all the way to limitOffset, which is
@@ -773,7 +774,104 @@ void CIRRecordLowering::computeVolatileBitfields() {
!cirGenTypes.getCGModule().getCodeGenOpts().AAPCSBitfieldWidth)
return;
- assert(!cir::MissingFeatures::armComputeVolatileBitfields());
+ for (auto &[field, info] : bitFields) {
+ mlir::Type resLTy = cirGenTypes.convertTypeForMem(field->getType());
+
+ if (astContext.toBits(astRecordLayout.getAlignment()) <
+ getSizeInBits(resLTy).getQuantity())
+ continue;
+
+ // CIRRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
+ // for big-endian targets, but it assumes a container of width
+ // info.storageSize. Since AAPCS uses a different container size (width
+ // of the type), we first undo that calculation here and redo it once
+ // the bit-field offset within the new container is calculated.
+ const unsigned oldOffset =
+ isBigEndian() ? info.storageSize - (info.offset + info.size)
+ : info.offset;
+ // Offset to the bit-field from the beginning of the struct.
+ const unsigned absoluteOffset =
+ astContext.toBits(info.storageOffset) + oldOffset;
+
+ // Container size is the width of the bit-field type.
+ const unsigned storageSize = getSizeInBits(resLTy).getQuantity();
+ // Nothing to do if the access uses the desired
+ // container width and is naturally aligned.
+ if (info.storageSize == storageSize && (oldOffset % storageSize == 0))
+ continue;
+
+ // Offset within the container.
+ unsigned offset = absoluteOffset & (storageSize - 1);
+ // Bail out if an aligned load of the container cannot cover the entire
+ // bit-field. This can happen for example, if the bit-field is part of a
+ // packed struct. AAPCS does not define access rules for such cases, we let
+ // clang to follow its own rules.
+ if (offset + info.size > storageSize)
+ continue;
+
+ // Re-adjust offsets for big-endian targets.
+ if (isBigEndian())
+ offset = storageSize - (offset + info.size);
+
+ const CharUnits storageOffset =
+ astContext.toCharUnitsFromBits(absoluteOffset & ~(storageSize - 1));
+ const CharUnits end = storageOffset +
+ astContext.toCharUnitsFromBits(storageSize) -
+ CharUnits::One();
+
+ const ASTRecordLayout &layout =
+ astContext.getASTRecordLayout(field->getParent());
+ // If we access outside memory outside the record, than bail out.
+ const CharUnits recordSize = layout.getSize();
+ if (end >= recordSize)
+ continue;
+
+ // Bail out if performing this load would access non-bit-fields members.
+ bool conflict = false;
+ for (const auto *f : recordDecl->fields()) {
+ // Allow sized bit-fields overlaps.
+ if (f->isBitField() && !f->isZeroLengthBitField())
+ continue;
+
+ const CharUnits fOffset = astContext.toCharUnitsFromBits(
+ layout.getFieldOffset(f->getFieldIndex()));
+
+ // As C11 defines, a zero sized bit-field defines a barrier, so
+ // fields after and before it should be race condition free.
+ // The AAPCS acknowledges it and imposes no restritions when the
+ // natural container overlaps a zero-length bit-field.
+ if (f->isZeroLengthBitField()) {
+ if (end > fOffset && storageOffset < fOffset) {
+ conflict = true;
+ break;
+ }
+ }
+
+ const CharUnits fEnd =
+ fOffset +
+ astContext.toCharUnitsFromBits(astContext.toBits(
+ getSizeInBits(cirGenTypes.convertTypeForMem(f->getType())))) -
+ CharUnits::One();
+ // If no overlap, continue.
+ if (end < fOffset || fEnd < storageOffset)
+ continue;
+
+ // The desired load overlaps a non-bit-field member, bail out.
+ conflict = true;
+ break;
+ }
+
+ if (conflict)
+ continue;
+ // Write the new bit-field access parameters.
+ // As the storage offset now is defined as the number of elements from the
+ // start of the structure, we should divide the Offset by the element size.
+ info.volatileStorageOffset =
+ storageOffset /
+ astContext.toCharUnitsFromBits(storageSize).getQuantity();
+ info.volatileStorageSize = storageSize;
+ info.volatileOffset = offset;
+ }
}
void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 21bee33..50642e7 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -79,14 +79,15 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
#define EXPR(Type, Base) case Stmt::Type##Class:
#include "clang/AST/StmtNodes.inc"
{
- // Remember the block we came in on.
- mlir::Block *incoming = builder.getInsertionBlock();
- assert(incoming && "expression emission must have an insertion point");
+ assert(builder.getInsertionBlock() &&
+ "expression emission must have an insertion point");
emitIgnoredExpr(cast<Expr>(s));
- mlir::Block *outgoing = builder.getInsertionBlock();
- assert(outgoing && "expression emission cleared block!");
+ // Classic codegen has a check here to see if the emitter created a new
+ // block that isn't used (comparing the incoming and outgoing insertion
+ // points) and deletes the outgoing block if it's not used. In CIR, we
+ // will handle that during the cir.canonicalize pass.
return mlir::success();
}
case Stmt::IfStmtClass:
@@ -363,8 +364,8 @@ mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) {
mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) {
assert(builder.getInsertionBlock() && "expected valid insertion point");
- for (const Decl *I : s.decls())
- emitDecl(*I);
+ for (const Decl *i : s.decls())
+ emitDecl(*i, /*evaluateConditionDecl=*/true);
return mlir::success();
}
@@ -875,7 +876,7 @@ mlir::LogicalResult CIRGenFunction::emitSwitchStmt(const clang::SwitchStmt &s) {
return mlir::failure();
if (s.getConditionVariable())
- emitDecl(*s.getConditionVariable());
+ emitDecl(*s.getConditionVariable(), /*evaluateConditionDecl=*/true);
mlir::Value condV = emitScalarExpr(s.getCond());