aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CIR/CodeGen/CIRGenFunction.h
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CIR/CodeGen/CIRGenFunction.h')
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h95
1 files changed, 93 insertions, 2 deletions
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 3c36f5c..e3b9b6a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -60,11 +60,44 @@ private:
/// is where the next operations will be introduced.
CIRGenBuilderTy &builder;
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() = default;
+ JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
+ unsigned index = 0)
+ : block(block) {}
+
+ bool isValid() const { return block != nullptr; }
+ mlir::Block *getBlock() const { return block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
+ unsigned getDestIndex() const { return index; }
+
+ // This should be used cautiously.
+ void setScopeDepth(EHScopeStack::stable_iterator depth) {
+ scopeDepth = depth;
+ }
+
+ private:
+ mlir::Block *block = nullptr;
+ EHScopeStack::stable_iterator scopeDepth;
+ unsigned index;
+ };
+
public:
/// The GlobalDecl for the current function being compiled or the global
/// variable currently being initialized.
clang::GlobalDecl curGD;
+ /// Unified return block.
+ /// In CIR this is a function because each scope might have
+ /// its associated return block.
+ JumpDest returnBlock(mlir::Block *retBlock) {
+ return getJumpDestInCurrentScope(retBlock);
+ }
+
+ unsigned nextCleanupDestIndex = 1;
+
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
@@ -574,6 +607,16 @@ public:
}
};
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ /// CIRGen: this mostly tracks state for figuring out the proper scope
+ /// information, no actual branches are emitted.
+ JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
+ return JumpDest(target, ehStack.getInnermostNormalCleanup(),
+ nextCleanupDestIndex++);
+ }
+
/// Perform the usual unary conversions on the specified expression and
/// compare the result against zero, returning an Int1Ty value.
mlir::Value evaluateExprAsBool(const clang::Expr *e);
@@ -733,6 +776,11 @@ public:
SourceLocExprScopeGuard sourceLocScope;
};
+ struct CXXDefaultArgExprScope : SourceLocExprScopeGuard {
+ CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
+ : SourceLocExprScopeGuard(e, cfg.curSourceLocExprScope) {}
+ };
+
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t);
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
@@ -853,6 +901,13 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// returns true if aggregate type has a volatile member.
+ bool hasVolatileMember(QualType t) {
+ if (const auto *rd = t->getAsRecordDecl())
+ return rd->hasVolatileMember();
+ return false;
+ }
+
/// The cleanup depth enclosing all the cleanups associated with the
/// parameters.
EHScopeStack::stable_iterator prologueCleanupDepth;
@@ -942,6 +997,9 @@ public:
LexicalScope *parentScope = nullptr;
+ // Holds the actual value for ScopeKind::Try
+ cir::TryOp tryOp = nullptr;
+
// Only Regular is used at the moment. Support for other kinds will be
// added as the relevant statements/expressions are upstreamed.
enum Kind {
@@ -1001,6 +1059,10 @@ public:
void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
void setAsSwitch() { scopeKind = Kind::Switch; }
void setAsTernary() { scopeKind = Kind::Ternary; }
+ void setAsTry(cir::TryOp op) {
+ scopeKind = Kind::Try;
+ tryOp = op;
+ }
// Lazy create cleanup block or return what's available.
mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
@@ -1010,6 +1072,11 @@ public:
return cleanupBlock;
}
+ cir::TryOp getTry() {
+ assert(isTry());
+ return tryOp;
+ }
+
mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
return cleanupBlock;
}
@@ -1077,6 +1144,9 @@ public:
static Destroyer destroyCXXObject;
+ void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
+ QualType type);
+
void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer);
@@ -1131,14 +1201,16 @@ public:
/// occupied by some other object. More efficient code can often be
/// generated if not.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
- AggValueSlot::Overlap_t mayOverlap);
+ AggValueSlot::Overlap_t mayOverlap,
+ bool isVolatile = false);
/// Emit code to compute the specified expression which can have any type. The
/// result is returned as an RValue struct. If this is an aggregate
/// expression, the aggloc/agglocvolatile arguments indicate where the result
/// should be returned.
RValue emitAnyExpr(const clang::Expr *e,
- AggValueSlot aggSlot = AggValueSlot::ignored());
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
/// Emits the code necessary to evaluate an arbitrary expression into the
/// given memory location.
@@ -1192,6 +1264,8 @@ public:
LValue emitBinaryOperatorLValue(const BinaryOperator *e);
+ cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
+
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
@@ -1331,6 +1405,13 @@ public:
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
+ mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s);
+
+ void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
+ bool isFnTryBlock = false);
+
+ void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock = false);
+
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor,
clang::CXXCtorType ctorType, FunctionArgList &args);
@@ -1518,6 +1599,10 @@ public:
LValue emitMemberExpr(const MemberExpr *e);
+ LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
+
+ LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
+
/// Given an expression with a pointer type, emit the value and compute our
/// best estimate of the alignment of the pointee.
///
@@ -1574,6 +1659,10 @@ public:
bool buildingTopLevelCase);
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
+ mlir::Value emitTargetBuiltinExpr(unsigned builtinID,
+ const clang::CallExpr *e,
+ ReturnValueSlot &returnValue);
+
/// Given a value and its clang type, returns the value casted to its memory
/// representation.
/// Note: CIR defers most of the special casting to the final lowering passes
@@ -1612,6 +1701,8 @@ public:
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
+ mlir::Value emitX86BuiltinExpr(unsigned builtinID, const CallExpr *e);
+
/// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
/// nonnull, if 1\p LHS is marked _Nonnull.
void emitNullabilityCheck(LValue lhs, mlir::Value rhs,