aboutsummaryrefslogtreecommitdiff
path: root/clang
diff options
context:
space:
mode:
Diffstat (limited to 'clang')
-rw-r--r--clang/bindings/python/clang/cindex.py2
-rw-r--r--clang/cmake/caches/Release.cmake39
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROps.td73
-rw-r--r--clang/include/clang/CIR/MissingFeatures.h7
-rw-r--r--clang/include/clang/Sema/Sema.h2
-rw-r--r--clang/include/clang/Sema/SemaBase.h9
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp102
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp74
-rw-r--r--clang/lib/Basic/Targets.cpp4
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp3
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp5
-rw-r--r--clang/lib/Basic/Targets/RISCV.h6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenAtomic.cpp143
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp95
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp814
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCXXABI.h25
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.cpp227
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.h112
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenException.cpp151
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp20
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp3
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp73
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp43
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h70
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp75
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp13
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp121
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypeCache.h10
-rw-r--r--clang/lib/CIR/CodeGen/CMakeLists.txt3
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h90
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp15
-rw-r--r--clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp94
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp141
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp6
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp19
-rw-r--r--clang/lib/Driver/ToolChains/Arch/ARM.cpp5
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp11
-rw-r--r--clang/lib/Sema/HeuristicResolver.cpp5
-rw-r--r--clang/lib/Sema/Sema.cpp11
-rw-r--r--clang/lib/Sema/SemaBase.cpp17
-rw-r--r--clang/lib/Sema/SemaOverload.cpp13
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp16
-rw-r--r--clang/test/CIR/CodeGen/atomic.c523
-rw-r--r--clang/test/CIR/CodeGen/builtin_inline.c91
-rw-r--r--clang/test/CIR/CodeGen/dtors.cpp12
-rw-r--r--clang/test/CIR/CodeGen/lambda.cpp24
-rw-r--r--clang/test/CIR/CodeGen/new.cpp121
-rw-r--r--clang/test/CIR/CodeGen/statement-exprs.c10
-rw-r--r--clang/test/CIR/CodeGen/struct-init.cpp16
-rw-r--r--clang/test/CIR/CodeGen/struct.cpp64
-rw-r--r--clang/test/CIR/CodeGen/try-catch.cpp87
-rw-r--r--clang/test/CIR/CodeGen/vla.c59
-rw-r--r--clang/test/CIR/IR/invalid-atomic.cir7
-rw-r--r--clang/test/CodeGen/AArch64/sign-return-address.c12
-rw-r--r--clang/test/CodeGen/arm-branch-protection-attr-2.c8
-rw-r--r--clang/test/Driver/arm-abi.c2
-rw-r--r--clang/test/Driver/fuchsia.c9
-rw-r--r--clang/test/Driver/print-supported-extensions-riscv.c5
-rw-r--r--clang/test/Frontend/arm-ignore-branch-protection-option.c2
-rw-r--r--clang/test/Preprocessor/riscv-atomics.c24
-rw-r--r--clang/test/Preprocessor/riscv-target-features-sifive.c45
-rw-r--r--clang/test/lit.cfg.py13
-rw-r--r--clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.cpp161
-rw-r--r--clang/unittests/Format/FormatTestComments.cpp19
-rw-r--r--clang/unittests/Sema/HeuristicResolverTest.cpp22
68 files changed, 3857 insertions, 268 deletions
diff --git a/clang/bindings/python/clang/cindex.py b/clang/bindings/python/clang/cindex.py
index da97bd3..2786add 100644
--- a/clang/bindings/python/clang/cindex.py
+++ b/clang/bindings/python/clang/cindex.py
@@ -3015,7 +3015,7 @@ class _CXUnsavedFile(Structure):
# Functions calls through the python interface are rather slow. Fortunately,
-# for most symboles, we do not need to perform a function call. Their spelling
+# for most symbols, we do not need to perform a function call. Their spelling
# never changes and is consequently provided by this spelling cache.
SPELLING_CACHE = {
# 0: CompletionChunk.Kind("Optional"),
diff --git a/clang/cmake/caches/Release.cmake b/clang/cmake/caches/Release.cmake
index a523cc5..82bfdc0 100644
--- a/clang/cmake/caches/Release.cmake
+++ b/clang/cmake/caches/Release.cmake
@@ -44,6 +44,19 @@ set(LLVM_RELEASE_ENABLE_LTO THIN CACHE STRING "")
set(LLVM_RELEASE_ENABLE_PGO ON CACHE BOOL "")
set(LLVM_RELEASE_ENABLE_RUNTIMES ${DEFAULT_RUNTIMES} CACHE STRING "")
set(LLVM_RELEASE_ENABLE_PROJECTS ${DEFAULT_PROJECTS} CACHE STRING "")
+
+# This option enables linking stage2 clang statically with the runtimes
+# (libc++ and compiler-rt) from stage1. In theory this will give the
+# binaries better performance and make them more portable. However,
+# this configuration is not well tested and causes build failures with
+# the flang-rt tests cases, since the -stclib=libc++ flag does not
+# get propagated to the runtimes build. There is also a separate
+# issue on Darwin where clang will use the local libc++ headers, but
+# link with system libc++ which can cause some incompatibilities.
+# See https://github.com/llvm/llvm-project/issues/77653
+# Because of these problems, this option will default to OFF.
+set(LLVM_RELEASE_ENABLE_LINK_LOCAL_RUNTIMES OFF CACHE BOOL "")
+
# Note we don't need to add install here, since it is one of the pre-defined
# steps.
set(LLVM_RELEASE_FINAL_STAGE_TARGETS "clang;package;check-all;check-llvm;check-clang" CACHE STRING "")
@@ -55,8 +68,12 @@ set(CLANG_ENABLE_BOOTSTRAP ON CACHE BOOL "")
set(STAGE1_PROJECTS "clang")
+# Need to build compiler-rt in order to use PGO for later stages.
+set(STAGE1_RUNTIMES "compiler-rt")
# Build all runtimes so we can statically link them into the stage2 compiler.
-set(STAGE1_RUNTIMES "compiler-rt;libcxx;libcxxabi;libunwind")
+if(LLVM_RELEASE_ENABLE_LINK_LOCAL_RUNTIMES)
+ list(APPEND STAGE1_RUNTIMES "libcxx;libcxxabi;libunwind")
+endif()
if (LLVM_RELEASE_ENABLE_PGO)
list(APPEND STAGE1_PROJECTS "lld")
@@ -118,11 +135,13 @@ set_instrument_and_final_stage_var(LLVM_ENABLE_LTO "${LLVM_RELEASE_ENABLE_LTO}"
if (LLVM_RELEASE_ENABLE_LTO)
set_instrument_and_final_stage_var(LLVM_ENABLE_LLD "ON" BOOL)
endif()
-set_instrument_and_final_stage_var(LLVM_ENABLE_LIBCXX "ON" BOOL)
-set_instrument_and_final_stage_var(LLVM_STATIC_LINK_CXX_STDLIB "ON" BOOL)
-set(RELEASE_LINKER_FLAGS "-rtlib=compiler-rt --unwindlib=libunwind")
-if(NOT ${CMAKE_HOST_SYSTEM_NAME} MATCHES "Darwin")
- set(RELEASE_LINKER_FLAGS "${RELEASE_LINKER_FLAGS} -static-libgcc")
+if(LLVM_RELEASE_ENABLE_LINK_LOCAL_RUNTIMES)
+ set_instrument_and_final_stage_var(LLVM_ENABLE_LIBCXX "ON" BOOL)
+ set_instrument_and_final_stage_var(LLVM_STATIC_LINK_CXX_STDLIB "ON" BOOL)
+ set(RELEASE_LINKER_FLAGS "-rtlib=compiler-rt --unwindlib=libunwind")
+ if(NOT ${CMAKE_HOST_SYSTEM_NAME} MATCHES "Darwin")
+ set(RELEASE_LINKER_FLAGS "${RELEASE_LINKER_FLAGS} -static-libgcc")
+ endif()
endif()
# Set flags for bolt
@@ -130,9 +149,11 @@ if (${CMAKE_HOST_SYSTEM_NAME} MATCHES "Linux")
set(RELEASE_LINKER_FLAGS "${RELEASE_LINKER_FLAGS} -Wl,--emit-relocs,-znow")
endif()
-set_instrument_and_final_stage_var(CMAKE_EXE_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING)
-set_instrument_and_final_stage_var(CMAKE_SHARED_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING)
-set_instrument_and_final_stage_var(CMAKE_MODULE_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING)
+if (RELEASE_LINKER_FLAGS)
+ set_instrument_and_final_stage_var(CMAKE_EXE_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING)
+ set_instrument_and_final_stage_var(CMAKE_SHARED_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING)
+ set_instrument_and_final_stage_var(CMAKE_MODULE_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING)
+endif()
# Final Stage Config (stage2)
set_final_stage_var(LLVM_ENABLE_RUNTIMES "${LLVM_RELEASE_ENABLE_RUNTIMES}" STRING)
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index b1e6ba2..86d09d7 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -4457,6 +4457,79 @@ def CIR_TryOp : CIR_Op<"try",[
// Atomic operations
//===----------------------------------------------------------------------===//
+def CIR_AtomicFetchKind : CIR_I32EnumAttr<
+ "AtomicFetchKind", "Binary opcode for atomic fetch-and-update operations", [
+ I32EnumAttrCase<"Add", 0, "add">,
+ I32EnumAttrCase<"Sub", 1, "sub">,
+ I32EnumAttrCase<"And", 2, "and">,
+ I32EnumAttrCase<"Xor", 3, "xor">,
+ I32EnumAttrCase<"Or", 4, "or">,
+ I32EnumAttrCase<"Nand", 5, "nand">,
+ I32EnumAttrCase<"Max", 6, "max">,
+ I32EnumAttrCase<"Min", 7, "min">
+]>;
+
+def CIR_AtomicFetchOp : CIR_Op<"atomic.fetch", [
+ AllTypesMatch<["result", "val"]>,
+ TypesMatchWith<"type of 'val' must match the pointee type of 'ptr'",
+ "ptr", "val", "mlir::cast<cir::PointerType>($_self).getPointee()">
+]> {
+ let summary = "Atomic fetch-and-update operation";
+ let description = [{
+ C/C++ atomic fetch-and-update operation. This operation implements the C/C++
+ builtin functions `__atomic_<binop>_fetch`, `__atomic_fetch_<binop>`, and
+ `__c11_atomic_fetch_<binop>`, where `<binop>` is one of the following binary
+ opcodes: `add`, `sub`, `and`, `xor`, `or`, `nand`, `max`, and `min`.
+
+ This operation takes 2 arguments: a pointer `ptr` and a value `val`. The
+ type of `val` must match the pointee type of `ptr`. If the binary operation
+ is `add`, `sub`, `max`, or `min`, the type of `val` may either be an integer
+ type or a floating-point type. Otherwise, `val` must be an integer.
+
+ This operation atomically loads the value from `ptr`, performs the binary
+ operation as indicated by `binop` on the loaded value and `val`, and stores
+ the result back to `ptr`. If the `fetch_first` flag is present, the result
+ of this operation is the old value loaded from `ptr` before the binary
+ operation. Otherwise, the result of this operation is the result of the
+ binary operation.
+
+ Example:
+ %res = cir.atomic.fetch add seq_cst %ptr, %val
+ : (!cir.ptr<!s32i>, !s32i) -> !s32i
+ }];
+ let results = (outs CIR_AnyIntOrFloatType:$result);
+ let arguments = (ins
+ Arg<CIR_PtrToIntOrFloatType, "", [MemRead, MemWrite]>:$ptr,
+ CIR_AnyIntOrFloatType:$val,
+ CIR_AtomicFetchKind:$binop,
+ Arg<CIR_MemOrder, "memory order">:$mem_order,
+ UnitAttr:$is_volatile,
+ UnitAttr:$fetch_first
+ );
+
+ let assemblyFormat = [{
+ $binop $mem_order
+ (`fetch_first` $fetch_first^)?
+ $ptr `,` $val
+ (`volatile` $is_volatile^)?
+ `:` `(` qualified(type($ptr)) `,` qualified(type($val)) `)`
+ `->` type($result) attr-dict
+ }];
+
+ let hasVerifier = 1;
+
+ let extraLLVMLoweringPatternDecl = [{
+ mlir::Value buildPostOp(cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::Value rmwVal, bool isInt) const;
+
+ mlir::Value buildMinMaxPostOp(cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::Value rmwVal, bool isInt,
+ bool isSigned) const;
+ }];
+}
+
def CIR_AtomicXchgOp : CIR_Op<"atomic.xchg", [
AllTypesMatch<["result", "val"]>,
TypesMatchWith<"type of 'val' must match the pointee type of 'ptr'",
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 01da626..598e826a 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -119,6 +119,7 @@ struct MissingFeatures {
static bool opCallLandingPad() { return false; }
static bool opCallContinueBlock() { return false; }
static bool opCallChain() { return false; }
+ static bool opCallExceptionAttr() { return false; }
// CXXNewExpr
static bool exprNewNullCheck() { return false; }
@@ -218,6 +219,9 @@ struct MissingFeatures {
static bool checkBitfieldClipping() { return false; }
static bool cirgenABIInfo() { return false; }
static bool cleanupAfterErrorDiags() { return false; }
+ static bool cleanupAppendInsts() { return false; }
+ static bool cleanupBranchThrough() { return false; }
+ static bool cleanupIndexAndBIAdjustment() { return false; }
static bool cleanupsToDeactivate() { return false; }
static bool constEmitterAggILE() { return false; }
static bool constEmitterArrayILE() { return false; }
@@ -238,6 +242,7 @@ struct MissingFeatures {
static bool deleteArray() { return false; }
static bool devirtualizeMemberFunction() { return false; }
static bool ehCleanupFlags() { return false; }
+ static bool ehCleanupHasPrebranchedFallthrough() { return false; }
static bool ehCleanupScope() { return false; }
static bool ehCleanupScopeRequiresEHCleanup() { return false; }
static bool ehCleanupBranchFixups() { return false; }
@@ -256,6 +261,7 @@ struct MissingFeatures {
static bool generateDebugInfo() { return false; }
static bool globalViewIndices() { return false; }
static bool globalViewIntLowering() { return false; }
+ static bool handleBuiltinICEArguments() { return false; }
static bool hip() { return false; }
static bool incrementProfileCounter() { return false; }
static bool innermostEHScope() { return false; }
@@ -294,6 +300,7 @@ struct MissingFeatures {
static bool setNonGC() { return false; }
static bool setObjCGCLValueClass() { return false; }
static bool setTargetAttributes() { return false; }
+ static bool simplifyCleanupEntry() { return false; }
static bool sourceLanguageCases() { return false; }
static bool stackBase() { return false; }
static bool stackSaveOp() { return false; }
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index cb21335..87b96c2 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -10021,7 +10021,7 @@ public:
public:
DeferDiagsRAII(Sema &S, bool DeferDiags)
: S(S), SavedDeferDiags(S.DeferDiags) {
- S.DeferDiags = DeferDiags;
+ S.DeferDiags = SavedDeferDiags || DeferDiags;
}
~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; }
};
diff --git a/clang/include/clang/Sema/SemaBase.h b/clang/include/clang/Sema/SemaBase.h
index 550f530..8e43b0b 100644
--- a/clang/include/clang/Sema/SemaBase.h
+++ b/clang/include/clang/Sema/SemaBase.h
@@ -212,16 +212,13 @@ public:
};
/// Emit a diagnostic.
- SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
- bool DeferHint = false);
+ SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
/// Emit a partial diagnostic.
- SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
- bool DeferHint = false);
+ SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD);
/// Emit a compatibility diagnostic.
- SemaDiagnosticBuilder DiagCompat(SourceLocation Loc, unsigned CompatDiagId,
- bool DeferHint = false);
+ SemaDiagnosticBuilder DiagCompat(SourceLocation Loc, unsigned CompatDiagId);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0);
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index f7731f0..6b98927 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -4841,46 +4841,39 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init,
return !NeedsOp || this->emitCheckDecl(VD, VD);
};
- auto initGlobal = [&](unsigned GlobalIndex) -> bool {
- assert(Init);
-
- if (VarT) {
- if (!this->visit(Init))
- return checkDecl() && false;
-
- return checkDecl() && this->emitInitGlobal(*VarT, GlobalIndex, VD);
- }
-
- if (!checkDecl())
- return false;
-
- if (!this->emitGetPtrGlobal(GlobalIndex, Init))
- return false;
-
- if (!visitInitializer(Init))
- return false;
-
- return this->emitFinishInitGlobal(Init);
- };
-
DeclScope<Emitter> LocalScope(this, VD);
- // We've already seen and initialized this global.
- if (UnsignedOrNone GlobalIndex = P.getGlobal(VD)) {
+ UnsignedOrNone GlobalIndex = P.getGlobal(VD);
+ if (GlobalIndex) {
+ // We've already seen and initialized this global.
if (P.getPtrGlobal(*GlobalIndex).isInitialized())
return checkDecl();
-
// The previous attempt at initialization might've been unsuccessful,
// so let's try this one.
- return !Init || (checkDecl() && initGlobal(*GlobalIndex));
+ } else if ((GlobalIndex = P.createGlobal(VD, Init))) {
+ } else {
+ return false;
}
+ if (!Init)
+ return true;
- UnsignedOrNone GlobalIndex = P.createGlobal(VD, Init);
+ if (!checkDecl())
+ return false;
- if (!GlobalIndex)
+ if (VarT) {
+ if (!this->visit(Init))
+ return false;
+
+ return this->emitInitGlobal(*VarT, *GlobalIndex, VD);
+ }
+
+ if (!this->emitGetPtrGlobal(*GlobalIndex, Init))
+ return false;
+
+ if (!visitInitializer(Init))
return false;
- return !Init || (checkDecl() && initGlobal(*GlobalIndex));
+ return this->emitFinishInitGlobal(Init);
}
// Local variables.
InitLinkScope<Emitter> ILS(this, InitLink::Decl(VD));
@@ -4890,36 +4883,37 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init,
VD, *VarT, VD->getType().isConstQualified(),
VD->getType().isVolatileQualified(), nullptr, ScopeKind::Block,
IsConstexprUnknown);
- if (Init) {
- // If this is a toplevel declaration, create a scope for the
- // initializer.
- if (Toplevel) {
- LocalScope<Emitter> Scope(this);
- if (!this->visit(Init))
- return false;
- return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
- }
- if (!this->visit(Init))
- return false;
- return this->emitSetLocal(*VarT, Offset, VD);
- }
- } else {
- if (UnsignedOrNone Offset = this->allocateLocal(
- VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) {
- if (!Init)
- return true;
- if (!this->emitGetPtrLocal(*Offset, Init))
- return false;
+ if (!Init)
+ return true;
- if (!visitInitializer(Init))
+ // If this is a toplevel declaration, create a scope for the
+ // initializer.
+ if (Toplevel) {
+ LocalScope<Emitter> Scope(this);
+ if (!this->visit(Init))
return false;
-
- return this->emitFinishInitPop(Init);
+ return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
}
- return false;
+ if (!this->visit(Init))
+ return false;
+ return this->emitSetLocal(*VarT, Offset, VD);
}
- return true;
+ // Local composite variables.
+ if (UnsignedOrNone Offset = this->allocateLocal(
+ VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) {
+ if (!Init)
+ return true;
+
+ if (!this->emitGetPtrLocal(*Offset, Init))
+ return false;
+
+ if (!visitInitializer(Init))
+ return false;
+
+ return this->emitFinishInitPop(Init);
+ }
+ return false;
}
template <class Emitter>
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
index c88a470..f068be5 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
@@ -24,6 +24,7 @@
#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/RecordOps.h"
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "clang/Basic/LLVM.h"
@@ -95,6 +96,18 @@ static QualType getStatusOrValueType(ClassTemplateSpecializationDecl *TRD) {
return TRD->getTemplateArgs().get(0).getAsType();
}
+static auto ofClassStatus() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return ofClass(hasName("::absl::Status"));
+}
+
+static auto isStatusMemberCallWithName(llvm::StringRef member_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxMemberCallExpr(
+ on(expr(unless(cxxThisExpr()))),
+ callee(cxxMethodDecl(hasName(member_name), ofClassStatus())));
+}
+
static auto isStatusOrMemberCallWithName(llvm::StringRef member_name) {
using namespace ::clang::ast_matchers; // NOLINT: Too many names
return cxxMemberCallExpr(
@@ -244,6 +257,61 @@ static void transferStatusOrOkCall(const CXXMemberCallExpr *Expr,
State.Env.setValue(*Expr, OkVal);
}
+static void transferStatusCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ RecordStorageLocation *StatusOrLoc =
+ getImplicitObjectLocation(*Expr, State.Env);
+ if (StatusOrLoc == nullptr)
+ return;
+
+ RecordStorageLocation &StatusLoc = locForStatus(*StatusOrLoc);
+
+ if (State.Env.getValue(locForOk(StatusLoc)) == nullptr)
+ initializeStatusOr(*StatusOrLoc, State.Env);
+
+ if (Expr->isPRValue())
+ copyRecord(StatusLoc, State.Env.getResultObjectLocation(*Expr), State.Env);
+ else
+ State.Env.setStorageLocation(*Expr, StatusLoc);
+}
+
+static void transferStatusOkCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ RecordStorageLocation *StatusLoc =
+ getImplicitObjectLocation(*Expr, State.Env);
+ if (StatusLoc == nullptr)
+ return;
+
+ if (Value *Val = State.Env.getValue(locForOk(*StatusLoc)))
+ State.Env.setValue(*Expr, *Val);
+}
+
+static void transferStatusUpdateCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ // S.Update(OtherS) sets S to the error code of OtherS if it is OK,
+ // otherwise does nothing.
+ assert(Expr->getNumArgs() == 1);
+ auto *Arg = Expr->getArg(0);
+ RecordStorageLocation *ArgRecord =
+ Arg->isPRValue() ? &State.Env.getResultObjectLocation(*Arg)
+ : State.Env.get<RecordStorageLocation>(*Arg);
+ RecordStorageLocation *ThisLoc = getImplicitObjectLocation(*Expr, State.Env);
+ if (ThisLoc == nullptr || ArgRecord == nullptr)
+ return;
+
+ auto &ThisOkVal = valForOk(*ThisLoc, State.Env);
+ auto &ArgOkVal = valForOk(*ArgRecord, State.Env);
+ auto &A = State.Env.arena();
+ auto &NewVal = State.Env.makeAtomicBoolValue();
+ State.Env.assume(A.makeImplies(A.makeNot(ThisOkVal.formula()),
+ A.makeNot(NewVal.formula())));
+ State.Env.assume(A.makeImplies(NewVal.formula(), ArgOkVal.formula()));
+ State.Env.setValue(locForOk(*ThisLoc), NewVal);
+}
+
CFGMatchSwitch<LatticeTransferState>
buildTransferMatchSwitch(ASTContext &Ctx,
CFGMatchSwitchBuilder<LatticeTransferState> Builder) {
@@ -251,6 +319,12 @@ buildTransferMatchSwitch(ASTContext &Ctx,
return std::move(Builder)
.CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("ok"),
transferStatusOrOkCall)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("status"),
+ transferStatusCall)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusMemberCallWithName("ok"),
+ transferStatusOkCall)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusMemberCallWithName("Update"),
+ transferStatusUpdateCall)
.Build();
}
diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp
index b7e8bad..f39c698 100644
--- a/clang/lib/Basic/Targets.cpp
+++ b/clang/lib/Basic/Targets.cpp
@@ -222,6 +222,8 @@ std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
return std::make_unique<OHOSTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
return std::make_unique<FreeBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return std::make_unique<FuchsiaTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
return std::make_unique<NetBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
@@ -254,6 +256,8 @@ std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
return std::make_unique<AppleMachOARMTargetInfo>(Triple, Opts);
switch (os) {
+ case llvm::Triple::Fuchsia:
+ return std::make_unique<FuchsiaTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::Linux:
return std::make_unique<LinuxTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index 3de17d2..d00a3a4 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -260,6 +260,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
: TargetInfo(Triple), FPMath(FP_Default), IsAAPCS(true), LDREX(0),
HW_FP(0) {
bool IsFreeBSD = Triple.isOSFreeBSD();
+ bool IsFuchsia = Triple.isOSFuchsia();
bool IsOpenBSD = Triple.isOSOpenBSD();
bool IsNetBSD = Triple.isOSNetBSD();
bool IsHaiku = Triple.isOSHaiku();
@@ -332,7 +333,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
default:
if (IsNetBSD)
setABI("apcs-gnu");
- else if (IsFreeBSD || IsOpenBSD || IsHaiku || IsOHOS)
+ else if (IsFreeBSD || IsFuchsia || IsOpenBSD || IsHaiku || IsOHOS)
setABI("aapcs-linux");
else
setABI("aapcs");
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 04da4e6..685925b 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -192,8 +192,11 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__riscv_muldiv");
}
- if (ISAInfo->hasExtension("a")) {
+ // The "a" extension is composed of "zalrsc" and "zaamo"
+ if (ISAInfo->hasExtension("a"))
Builder.defineMacro("__riscv_atomic");
+
+ if (ISAInfo->hasExtension("zalrsc")) {
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index d8b0e64..85fa4cc 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -195,7 +195,8 @@ public:
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;
- if (ISAInfo->hasExtension("a"))
+ // "a" implies "zalrsc" which is sufficient to inline atomics
+ if (ISAInfo->hasExtension("zalrsc"))
MaxAtomicInlineWidth = 32;
}
};
@@ -225,7 +226,8 @@ public:
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;
- if (ISAInfo->hasExtension("a"))
+ // "a" implies "zalrsc" which is sufficient to inline atomics
+ if (ISAInfo->hasExtension("zalrsc"))
MaxAtomicInlineWidth = 64;
}
};
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 67ca60c..7db6e28 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -346,6 +346,8 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
CIRGenBuilderTy &builder = cgf.getBuilder();
mlir::Location loc = cgf.getLoc(expr->getSourceRange());
auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
+ cir::AtomicFetchKindAttr fetchAttr;
+ bool fetchFirst = true;
switch (expr->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
@@ -407,6 +409,86 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
opName = cir::AtomicXchgOp::getOperationName();
break;
+ case AtomicExpr::AO__atomic_add_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Add);
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Sub);
+ break;
+
+ case AtomicExpr::AO__atomic_min_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_min:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Min);
+ break;
+
+ case AtomicExpr::AO__atomic_max_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_max:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Max);
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::And);
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Or);
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Xor);
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Nand);
+ break;
+
case AtomicExpr::AO__atomic_test_and_set: {
auto op = cir::AtomicTestAndSetOp::create(
builder, loc, ptr.getPointer(), order,
@@ -450,74 +532,50 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
case AtomicExpr::AO__scoped_atomic_exchange_n:
case AtomicExpr::AO__scoped_atomic_exchange:
- case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__scoped_atomic_add_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
- case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__scoped_atomic_fetch_add:
- case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__scoped_atomic_sub_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__scoped_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_min_fetch:
case AtomicExpr::AO__scoped_atomic_min_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__scoped_atomic_fetch_min:
- case AtomicExpr::AO__atomic_max_fetch:
case AtomicExpr::AO__scoped_atomic_max_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
- case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__scoped_atomic_fetch_max:
- case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__scoped_atomic_and_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
- case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__scoped_atomic_fetch_and:
- case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__scoped_atomic_or_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
- case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__scoped_atomic_fetch_or:
- case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__scoped_atomic_xor_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
- case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__scoped_atomic_fetch_xor:
- case AtomicExpr::AO__atomic_nand_fetch:
case AtomicExpr::AO__scoped_atomic_nand_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_nand:
- case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__scoped_atomic_fetch_nand:
cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
return;
@@ -531,9 +589,13 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
atomicOperands, atomicResTys);
+ if (fetchAttr)
+ rmwOp->setAttr("binop", fetchAttr);
rmwOp->setAttr("mem_order", orderAttr);
if (expr->isVolatile())
rmwOp->setAttr("is_volatile", builder.getUnitAttr());
+ if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
+ rmwOp->setAttr("fetch_first", builder.getUnitAttr());
mlir::Value result = rmwOp->getResult(0);
builder.createStore(loc, result, dest);
@@ -629,8 +691,41 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
isWeakExpr = e->getWeak();
break;
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (memTy->isPointerType()) {
+ cgm.errorNYI(e->getSourceRange(),
+ "atomic fetch-and-add and fetch-and-sub for pointers");
+ return RValue::get(nullptr);
+ }
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ shouldCastToIntPtrTy = !memTy->isFloatingType();
+ [[fallthrough]];
+
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__c11_atomic_store:
val1 = emitValToTemp(*this, e->getVal1());
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index ea31871..798e9d9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -463,12 +463,107 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return emitLibraryCall(*this, fd, e,
cgm.getBuiltinLibFunction(fd, builtinID));
+ // Some target-specific builtins can have aggregate return values, e.g.
+ // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
+ // returnValue to be non-null, so that the target-specific emission code can
+ // always just emit into it.
+ cir::TypeEvaluationKind evalKind = getEvaluationKind(e->getType());
+ if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
+ cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
+ return getUndefRValue(e->getType());
+ }
+
+ // Now see if we can emit a target-specific builtin.
+ if (mlir::Value v = emitTargetBuiltinExpr(builtinID, e, returnValue)) {
+ switch (evalKind) {
+ case cir::TEK_Scalar:
+ if (mlir::isa<cir::VoidType>(v.getType()))
+ return RValue::get(nullptr);
+ return RValue::get(v);
+ case cir::TEK_Aggregate:
+ cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
+ return getUndefRValue(e->getType());
+ case cir::TEK_Complex:
+ llvm_unreachable("No current target builtin returns complex");
+ }
+ llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
+ }
+
cgm.errorNYI(e->getSourceRange(),
std::string("unimplemented builtin call: ") +
getContext().BuiltinInfo.getName(builtinID));
return getUndefRValue(e->getType());
}
+static mlir::Value emitTargetArchBuiltinExpr(CIRGenFunction *cgf,
+ unsigned builtinID,
+ const CallExpr *e,
+ ReturnValueSlot &returnValue,
+ llvm::Triple::ArchType arch) {
+ // When compiling in HipStdPar mode we have to be conservative in rejecting
+ // target specific features in the FE, and defer the possible error to the
+ // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
+ // referenced by an accelerator executable function, we emit an error.
+ // Returning nullptr here leads to the builtin being handled in
+ // EmitStdParUnsupportedBuiltin.
+ if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
+ arch != cgf->getTarget().getTriple().getArch())
+ return {};
+
+ switch (arch) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::bpfeb:
+ case llvm::Triple::bpfel:
+ // These are actually NYI, but that will be reported by emitBuiltinExpr.
+ // At this point, we don't even know that the builtin is target-specific.
+ return nullptr;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return cgf->emitX86BuiltinExpr(builtinID, e);
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
+ case llvm::Triple::systemz:
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ case llvm::Triple::hexagon:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ // These are actually NYI, but that will be reported by emitBuiltinExpr.
+ // At this point, we don't even know that the builtin is target-specific.
+ return {};
+ default:
+ return {};
+ }
+}
+
+mlir::Value
+CIRGenFunction::emitTargetBuiltinExpr(unsigned builtinID, const CallExpr *e,
+ ReturnValueSlot &returnValue) {
+ if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
+ assert(getContext().getAuxTargetInfo() && "Missing aux target info");
+ return emitTargetArchBuiltinExpr(
+ this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
+ returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
+ }
+
+ return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
+ getTarget().getTriple().getArch());
+}
+
/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
/// for "fabsf".
cir::FuncOp CIRGenModule::getBuiltinLibFunction(const FunctionDecl *fd,
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
new file mode 100644
index 0000000..3c9c7ec
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
@@ -0,0 +1,814 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit x86/x86_64 Builtin calls as CIR or a function
+// call to be later resolved.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "CIRGenModule.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/CIR/MissingFeatures.h"
+#include "llvm/IR/IntrinsicsX86.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned builtinID,
+ const CallExpr *e) {
+ if (builtinID == Builtin::BI__builtin_cpu_is) {
+ cgm.errorNYI(e->getSourceRange(), "__builtin_cpu_is");
+ return {};
+ }
+ if (builtinID == Builtin::BI__builtin_cpu_supports) {
+ cgm.errorNYI(e->getSourceRange(), "__builtin_cpu_supports");
+ return {};
+ }
+ if (builtinID == Builtin::BI__builtin_cpu_init) {
+ cgm.errorNYI(e->getSourceRange(), "__builtin_cpu_init");
+ return {};
+ }
+
+ // Handle MSVC intrinsics before argument evaluation to prevent double
+ // evaluation.
+ assert(!cir::MissingFeatures::msvcBuiltins());
+
+ // Find out if any arguments are required to be integer constant expressions.
+ assert(!cir::MissingFeatures::handleBuiltinICEArguments());
+
+ switch (builtinID) {
+ default:
+ return {};
+ case X86::BI_mm_prefetch:
+ case X86::BI_mm_clflush:
+ case X86::BI_mm_lfence:
+ case X86::BI_mm_pause:
+ case X86::BI_mm_mfence:
+ case X86::BI_mm_sfence:
+ case X86::BI__rdtsc:
+ case X86::BI__builtin_ia32_rdtscp:
+ case X86::BI__builtin_ia32_lzcnt_u16:
+ case X86::BI__builtin_ia32_lzcnt_u32:
+ case X86::BI__builtin_ia32_lzcnt_u64:
+ case X86::BI__builtin_ia32_tzcnt_u16:
+ case X86::BI__builtin_ia32_tzcnt_u32:
+ case X86::BI__builtin_ia32_tzcnt_u64:
+ case X86::BI__builtin_ia32_undef128:
+ case X86::BI__builtin_ia32_undef256:
+ case X86::BI__builtin_ia32_undef512:
+ case X86::BI__builtin_ia32_vec_ext_v4hi:
+ case X86::BI__builtin_ia32_vec_ext_v16qi:
+ case X86::BI__builtin_ia32_vec_ext_v8hi:
+ case X86::BI__builtin_ia32_vec_ext_v4si:
+ case X86::BI__builtin_ia32_vec_ext_v4sf:
+ case X86::BI__builtin_ia32_vec_ext_v2di:
+ case X86::BI__builtin_ia32_vec_ext_v32qi:
+ case X86::BI__builtin_ia32_vec_ext_v16hi:
+ case X86::BI__builtin_ia32_vec_ext_v8si:
+ case X86::BI__builtin_ia32_vec_ext_v4di:
+ case X86::BI__builtin_ia32_vec_set_v4hi:
+ case X86::BI__builtin_ia32_vec_set_v16qi:
+ case X86::BI__builtin_ia32_vec_set_v8hi:
+ case X86::BI__builtin_ia32_vec_set_v4si:
+ case X86::BI__builtin_ia32_vec_set_v2di:
+ case X86::BI__builtin_ia32_vec_set_v32qi:
+ case X86::BI__builtin_ia32_vec_set_v16hi:
+ case X86::BI__builtin_ia32_vec_set_v8si:
+ case X86::BI__builtin_ia32_vec_set_v4di:
+ case X86::BI_mm_setcsr:
+ case X86::BI__builtin_ia32_ldmxcsr:
+ case X86::BI_mm_getcsr:
+ case X86::BI__builtin_ia32_stmxcsr:
+ case X86::BI__builtin_ia32_xsave:
+ case X86::BI__builtin_ia32_xsave64:
+ case X86::BI__builtin_ia32_xrstor:
+ case X86::BI__builtin_ia32_xrstor64:
+ case X86::BI__builtin_ia32_xsaveopt:
+ case X86::BI__builtin_ia32_xsaveopt64:
+ case X86::BI__builtin_ia32_xrstors:
+ case X86::BI__builtin_ia32_xrstors64:
+ case X86::BI__builtin_ia32_xsavec:
+ case X86::BI__builtin_ia32_xsavec64:
+ case X86::BI__builtin_ia32_xsaves:
+ case X86::BI__builtin_ia32_xsaves64:
+ case X86::BI__builtin_ia32_xsetbv:
+ case X86::BI_xsetbv:
+ case X86::BI__builtin_ia32_xgetbv:
+ case X86::BI_xgetbv:
+ case X86::BI__builtin_ia32_storedqudi128_mask:
+ case X86::BI__builtin_ia32_storedqusi128_mask:
+ case X86::BI__builtin_ia32_storedquhi128_mask:
+ case X86::BI__builtin_ia32_storedquqi128_mask:
+ case X86::BI__builtin_ia32_storeupd128_mask:
+ case X86::BI__builtin_ia32_storeups128_mask:
+ case X86::BI__builtin_ia32_storedqudi256_mask:
+ case X86::BI__builtin_ia32_storedqusi256_mask:
+ case X86::BI__builtin_ia32_storedquhi256_mask:
+ case X86::BI__builtin_ia32_storedquqi256_mask:
+ case X86::BI__builtin_ia32_storeupd256_mask:
+ case X86::BI__builtin_ia32_storeups256_mask:
+ case X86::BI__builtin_ia32_storedqudi512_mask:
+ case X86::BI__builtin_ia32_storedqusi512_mask:
+ case X86::BI__builtin_ia32_storedquhi512_mask:
+ case X86::BI__builtin_ia32_storedquqi512_mask:
+ case X86::BI__builtin_ia32_storeupd512_mask:
+ case X86::BI__builtin_ia32_storeups512_mask:
+ case X86::BI__builtin_ia32_storesbf16128_mask:
+ case X86::BI__builtin_ia32_storesh128_mask:
+ case X86::BI__builtin_ia32_storess128_mask:
+ case X86::BI__builtin_ia32_storesd128_mask:
+ case X86::BI__builtin_ia32_cvtmask2b128:
+ case X86::BI__builtin_ia32_cvtmask2b256:
+ case X86::BI__builtin_ia32_cvtmask2b512:
+ case X86::BI__builtin_ia32_cvtmask2w128:
+ case X86::BI__builtin_ia32_cvtmask2w256:
+ case X86::BI__builtin_ia32_cvtmask2w512:
+ case X86::BI__builtin_ia32_cvtmask2d128:
+ case X86::BI__builtin_ia32_cvtmask2d256:
+ case X86::BI__builtin_ia32_cvtmask2d512:
+ case X86::BI__builtin_ia32_cvtmask2q128:
+ case X86::BI__builtin_ia32_cvtmask2q256:
+ case X86::BI__builtin_ia32_cvtmask2q512:
+ case X86::BI__builtin_ia32_cvtb2mask128:
+ case X86::BI__builtin_ia32_cvtb2mask256:
+ case X86::BI__builtin_ia32_cvtb2mask512:
+ case X86::BI__builtin_ia32_cvtw2mask128:
+ case X86::BI__builtin_ia32_cvtw2mask256:
+ case X86::BI__builtin_ia32_cvtw2mask512:
+ case X86::BI__builtin_ia32_cvtd2mask128:
+ case X86::BI__builtin_ia32_cvtd2mask256:
+ case X86::BI__builtin_ia32_cvtd2mask512:
+ case X86::BI__builtin_ia32_cvtq2mask128:
+ case X86::BI__builtin_ia32_cvtq2mask256:
+ case X86::BI__builtin_ia32_cvtq2mask512:
+ case X86::BI__builtin_ia32_cvtdq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtqq2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
+ case X86::BI__builtin_ia32_cvtudq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
+ case X86::BI__builtin_ia32_vfmaddss3:
+ case X86::BI__builtin_ia32_vfmaddsd3:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask:
+ case X86::BI__builtin_ia32_vfmaddss3_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask:
+ case X86::BI__builtin_ia32_vfmaddss:
+ case X86::BI__builtin_ia32_vfmaddsd:
+ case X86::BI__builtin_ia32_vfmaddsh3_maskz:
+ case X86::BI__builtin_ia32_vfmaddss3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsd3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask3:
+ case X86::BI__builtin_ia32_vfmaddss3_mask3:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask3:
+ case X86::BI__builtin_ia32_vfmsubsh3_mask3:
+ case X86::BI__builtin_ia32_vfmsubss3_mask3:
+ case X86::BI__builtin_ia32_vfmsubsd3_mask3:
+ case X86::BI__builtin_ia32_vfmaddph512_mask:
+ case X86::BI__builtin_ia32_vfmaddph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddps512_mask:
+ case X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ case X86::BI__builtin_ia32_movdqa32store128_mask:
+ case X86::BI__builtin_ia32_movdqa64store128_mask:
+ case X86::BI__builtin_ia32_storeaps128_mask:
+ case X86::BI__builtin_ia32_storeapd128_mask:
+ case X86::BI__builtin_ia32_movdqa32store256_mask:
+ case X86::BI__builtin_ia32_movdqa64store256_mask:
+ case X86::BI__builtin_ia32_storeaps256_mask:
+ case X86::BI__builtin_ia32_storeapd256_mask:
+ case X86::BI__builtin_ia32_movdqa32store512_mask:
+ case X86::BI__builtin_ia32_movdqa64store512_mask:
+ case X86::BI__builtin_ia32_storeaps512_mask:
+ case X86::BI__builtin_ia32_storeapd512_mask:
+ case X86::BI__builtin_ia32_loadups128_mask:
+ case X86::BI__builtin_ia32_loadups256_mask:
+ case X86::BI__builtin_ia32_loadups512_mask:
+ case X86::BI__builtin_ia32_loadupd128_mask:
+ case X86::BI__builtin_ia32_loadupd256_mask:
+ case X86::BI__builtin_ia32_loadupd512_mask:
+ case X86::BI__builtin_ia32_loaddquqi128_mask:
+ case X86::BI__builtin_ia32_loaddquqi256_mask:
+ case X86::BI__builtin_ia32_loaddquqi512_mask:
+ case X86::BI__builtin_ia32_loaddquhi128_mask:
+ case X86::BI__builtin_ia32_loaddquhi256_mask:
+ case X86::BI__builtin_ia32_loaddquhi512_mask:
+ case X86::BI__builtin_ia32_loaddqusi128_mask:
+ case X86::BI__builtin_ia32_loaddqusi256_mask:
+ case X86::BI__builtin_ia32_loaddqusi512_mask:
+ case X86::BI__builtin_ia32_loaddqudi128_mask:
+ case X86::BI__builtin_ia32_loaddqudi256_mask:
+ case X86::BI__builtin_ia32_loaddqudi512_mask:
+ case X86::BI__builtin_ia32_loadsbf16128_mask:
+ case X86::BI__builtin_ia32_loadsh128_mask:
+ case X86::BI__builtin_ia32_loadss128_mask:
+ case X86::BI__builtin_ia32_loadsd128_mask:
+ case X86::BI__builtin_ia32_loadaps128_mask:
+ case X86::BI__builtin_ia32_loadaps256_mask:
+ case X86::BI__builtin_ia32_loadaps512_mask:
+ case X86::BI__builtin_ia32_loadapd128_mask:
+ case X86::BI__builtin_ia32_loadapd256_mask:
+ case X86::BI__builtin_ia32_loadapd512_mask:
+ case X86::BI__builtin_ia32_movdqa32load128_mask:
+ case X86::BI__builtin_ia32_movdqa32load256_mask:
+ case X86::BI__builtin_ia32_movdqa32load512_mask:
+ case X86::BI__builtin_ia32_movdqa64load128_mask:
+ case X86::BI__builtin_ia32_movdqa64load256_mask:
+ case X86::BI__builtin_ia32_movdqa64load512_mask:
+ case X86::BI__builtin_ia32_expandloaddf128_mask:
+ case X86::BI__builtin_ia32_expandloaddf256_mask:
+ case X86::BI__builtin_ia32_expandloaddf512_mask:
+ case X86::BI__builtin_ia32_expandloadsf128_mask:
+ case X86::BI__builtin_ia32_expandloadsf256_mask:
+ case X86::BI__builtin_ia32_expandloadsf512_mask:
+ case X86::BI__builtin_ia32_expandloaddi128_mask:
+ case X86::BI__builtin_ia32_expandloaddi256_mask:
+ case X86::BI__builtin_ia32_expandloaddi512_mask:
+ case X86::BI__builtin_ia32_expandloadsi128_mask:
+ case X86::BI__builtin_ia32_expandloadsi256_mask:
+ case X86::BI__builtin_ia32_expandloadsi512_mask:
+ case X86::BI__builtin_ia32_expandloadhi128_mask:
+ case X86::BI__builtin_ia32_expandloadhi256_mask:
+ case X86::BI__builtin_ia32_expandloadhi512_mask:
+ case X86::BI__builtin_ia32_expandloadqi128_mask:
+ case X86::BI__builtin_ia32_expandloadqi256_mask:
+ case X86::BI__builtin_ia32_expandloadqi512_mask:
+ case X86::BI__builtin_ia32_compressstoredf128_mask:
+ case X86::BI__builtin_ia32_compressstoredf256_mask:
+ case X86::BI__builtin_ia32_compressstoredf512_mask:
+ case X86::BI__builtin_ia32_compressstoresf128_mask:
+ case X86::BI__builtin_ia32_compressstoresf256_mask:
+ case X86::BI__builtin_ia32_compressstoresf512_mask:
+ case X86::BI__builtin_ia32_compressstoredi128_mask:
+ case X86::BI__builtin_ia32_compressstoredi256_mask:
+ case X86::BI__builtin_ia32_compressstoredi512_mask:
+ case X86::BI__builtin_ia32_compressstoresi128_mask:
+ case X86::BI__builtin_ia32_compressstoresi256_mask:
+ case X86::BI__builtin_ia32_compressstoresi512_mask:
+ case X86::BI__builtin_ia32_compressstorehi128_mask:
+ case X86::BI__builtin_ia32_compressstorehi256_mask:
+ case X86::BI__builtin_ia32_compressstorehi512_mask:
+ case X86::BI__builtin_ia32_compressstoreqi128_mask:
+ case X86::BI__builtin_ia32_compressstoreqi256_mask:
+ case X86::BI__builtin_ia32_compressstoreqi512_mask:
+ case X86::BI__builtin_ia32_expanddf128_mask:
+ case X86::BI__builtin_ia32_expanddf256_mask:
+ case X86::BI__builtin_ia32_expanddf512_mask:
+ case X86::BI__builtin_ia32_expandsf128_mask:
+ case X86::BI__builtin_ia32_expandsf256_mask:
+ case X86::BI__builtin_ia32_expandsf512_mask:
+ case X86::BI__builtin_ia32_expanddi128_mask:
+ case X86::BI__builtin_ia32_expanddi256_mask:
+ case X86::BI__builtin_ia32_expanddi512_mask:
+ case X86::BI__builtin_ia32_expandsi128_mask:
+ case X86::BI__builtin_ia32_expandsi256_mask:
+ case X86::BI__builtin_ia32_expandsi512_mask:
+ case X86::BI__builtin_ia32_expandhi128_mask:
+ case X86::BI__builtin_ia32_expandhi256_mask:
+ case X86::BI__builtin_ia32_expandhi512_mask:
+ case X86::BI__builtin_ia32_expandqi128_mask:
+ case X86::BI__builtin_ia32_expandqi256_mask:
+ case X86::BI__builtin_ia32_expandqi512_mask:
+ case X86::BI__builtin_ia32_compressdf128_mask:
+ case X86::BI__builtin_ia32_compressdf256_mask:
+ case X86::BI__builtin_ia32_compressdf512_mask:
+ case X86::BI__builtin_ia32_compresssf128_mask:
+ case X86::BI__builtin_ia32_compresssf256_mask:
+ case X86::BI__builtin_ia32_compresssf512_mask:
+ case X86::BI__builtin_ia32_compressdi128_mask:
+ case X86::BI__builtin_ia32_compressdi256_mask:
+ case X86::BI__builtin_ia32_compressdi512_mask:
+ case X86::BI__builtin_ia32_compresssi128_mask:
+ case X86::BI__builtin_ia32_compresssi256_mask:
+ case X86::BI__builtin_ia32_compresssi512_mask:
+ case X86::BI__builtin_ia32_compresshi128_mask:
+ case X86::BI__builtin_ia32_compresshi256_mask:
+ case X86::BI__builtin_ia32_compresshi512_mask:
+ case X86::BI__builtin_ia32_compressqi128_mask:
+ case X86::BI__builtin_ia32_compressqi256_mask:
+ case X86::BI__builtin_ia32_compressqi512_mask:
+ case X86::BI__builtin_ia32_gather3div2df:
+ case X86::BI__builtin_ia32_gather3div2di:
+ case X86::BI__builtin_ia32_gather3div4df:
+ case X86::BI__builtin_ia32_gather3div4di:
+ case X86::BI__builtin_ia32_gather3div4sf:
+ case X86::BI__builtin_ia32_gather3div4si:
+ case X86::BI__builtin_ia32_gather3div8sf:
+ case X86::BI__builtin_ia32_gather3div8si:
+ case X86::BI__builtin_ia32_gather3siv2df:
+ case X86::BI__builtin_ia32_gather3siv2di:
+ case X86::BI__builtin_ia32_gather3siv4df:
+ case X86::BI__builtin_ia32_gather3siv4di:
+ case X86::BI__builtin_ia32_gather3siv4sf:
+ case X86::BI__builtin_ia32_gather3siv4si:
+ case X86::BI__builtin_ia32_gather3siv8sf:
+ case X86::BI__builtin_ia32_gather3siv8si:
+ case X86::BI__builtin_ia32_gathersiv8df:
+ case X86::BI__builtin_ia32_gathersiv16sf:
+ case X86::BI__builtin_ia32_gatherdiv8df:
+ case X86::BI__builtin_ia32_gatherdiv16sf:
+ case X86::BI__builtin_ia32_gathersiv8di:
+ case X86::BI__builtin_ia32_gathersiv16si:
+ case X86::BI__builtin_ia32_gatherdiv8di:
+ case X86::BI__builtin_ia32_gatherdiv16si:
+ case X86::BI__builtin_ia32_scattersiv8df:
+ case X86::BI__builtin_ia32_scattersiv16sf:
+ case X86::BI__builtin_ia32_scatterdiv8df:
+ case X86::BI__builtin_ia32_scatterdiv16sf:
+ case X86::BI__builtin_ia32_scattersiv8di:
+ case X86::BI__builtin_ia32_scattersiv16si:
+ case X86::BI__builtin_ia32_scatterdiv8di:
+ case X86::BI__builtin_ia32_scatterdiv16si:
+ case X86::BI__builtin_ia32_scatterdiv2df:
+ case X86::BI__builtin_ia32_scatterdiv2di:
+ case X86::BI__builtin_ia32_scatterdiv4df:
+ case X86::BI__builtin_ia32_scatterdiv4di:
+ case X86::BI__builtin_ia32_scatterdiv4sf:
+ case X86::BI__builtin_ia32_scatterdiv4si:
+ case X86::BI__builtin_ia32_scatterdiv8sf:
+ case X86::BI__builtin_ia32_scatterdiv8si:
+ case X86::BI__builtin_ia32_scattersiv2df:
+ case X86::BI__builtin_ia32_scattersiv2di:
+ case X86::BI__builtin_ia32_scattersiv4df:
+ case X86::BI__builtin_ia32_scattersiv4di:
+ case X86::BI__builtin_ia32_scattersiv4sf:
+ case X86::BI__builtin_ia32_scattersiv4si:
+ case X86::BI__builtin_ia32_scattersiv8sf:
+ case X86::BI__builtin_ia32_scattersiv8si:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_vinsertf128_pd256:
+ case X86::BI__builtin_ia32_vinsertf128_ps256:
+ case X86::BI__builtin_ia32_vinsertf128_si256:
+ case X86::BI__builtin_ia32_insert128i256:
+ case X86::BI__builtin_ia32_insertf64x4:
+ case X86::BI__builtin_ia32_insertf32x4:
+ case X86::BI__builtin_ia32_inserti64x4:
+ case X86::BI__builtin_ia32_inserti32x4:
+ case X86::BI__builtin_ia32_insertf32x8:
+ case X86::BI__builtin_ia32_inserti32x8:
+ case X86::BI__builtin_ia32_insertf32x4_256:
+ case X86::BI__builtin_ia32_inserti32x4_256:
+ case X86::BI__builtin_ia32_insertf64x2_256:
+ case X86::BI__builtin_ia32_inserti64x2_256:
+ case X86::BI__builtin_ia32_insertf64x2_512:
+ case X86::BI__builtin_ia32_inserti64x2_512:
+ case X86::BI__builtin_ia32_pmovqd512_mask:
+ case X86::BI__builtin_ia32_pmovwb512_mask:
+ case X86::BI__builtin_ia32_pblendw128:
+ case X86::BI__builtin_ia32_blendpd:
+ case X86::BI__builtin_ia32_blendps:
+ case X86::BI__builtin_ia32_blendpd256:
+ case X86::BI__builtin_ia32_blendps256:
+ case X86::BI__builtin_ia32_pblendw256:
+ case X86::BI__builtin_ia32_pblendd128:
+ case X86::BI__builtin_ia32_pblendd256:
+ case X86::BI__builtin_ia32_pshuflw:
+ case X86::BI__builtin_ia32_pshuflw256:
+ case X86::BI__builtin_ia32_pshuflw512:
+ case X86::BI__builtin_ia32_pshufhw:
+ case X86::BI__builtin_ia32_pshufhw256:
+ case X86::BI__builtin_ia32_pshufhw512:
+ case X86::BI__builtin_ia32_pshufd:
+ case X86::BI__builtin_ia32_pshufd256:
+ case X86::BI__builtin_ia32_pshufd512:
+ case X86::BI__builtin_ia32_vpermilpd:
+ case X86::BI__builtin_ia32_vpermilps:
+ case X86::BI__builtin_ia32_vpermilpd256:
+ case X86::BI__builtin_ia32_vpermilps256:
+ case X86::BI__builtin_ia32_vpermilpd512:
+ case X86::BI__builtin_ia32_vpermilps512:
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512:
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ case X86::BI__builtin_ia32_permdi256:
+ case X86::BI__builtin_ia32_permdf256:
+ case X86::BI__builtin_ia32_permdi512:
+ case X86::BI__builtin_ia32_permdf512:
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256:
+ case X86::BI__builtin_ia32_palignr512:
+ case X86::BI__builtin_ia32_alignd128:
+ case X86::BI__builtin_ia32_alignd256:
+ case X86::BI__builtin_ia32_alignd512:
+ case X86::BI__builtin_ia32_alignq128:
+ case X86::BI__builtin_ia32_alignq256:
+ case X86::BI__builtin_ia32_alignq512:
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_i64x2:
+ case X86::BI__builtin_ia32_vperm2f128_pd256:
+ case X86::BI__builtin_ia32_vperm2f128_ps256:
+ case X86::BI__builtin_ia32_vperm2f128_si256:
+ case X86::BI__builtin_ia32_permti256:
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ case X86::BI__builtin_ia32_kshiftliqi:
+ case X86::BI__builtin_ia32_kshiftlihi:
+ case X86::BI__builtin_ia32_kshiftlisi:
+ case X86::BI__builtin_ia32_kshiftlidi:
+ case X86::BI__builtin_ia32_kshiftriqi:
+ case X86::BI__builtin_ia32_kshiftrihi:
+ case X86::BI__builtin_ia32_kshiftrisi:
+ case X86::BI__builtin_ia32_kshiftridi:
+ case X86::BI__builtin_ia32_vprotbi:
+ case X86::BI__builtin_ia32_vprotwi:
+ case X86::BI__builtin_ia32_vprotdi:
+ case X86::BI__builtin_ia32_vprotqi:
+ case X86::BI__builtin_ia32_prold128:
+ case X86::BI__builtin_ia32_prold256:
+ case X86::BI__builtin_ia32_prold512:
+ case X86::BI__builtin_ia32_prolq128:
+ case X86::BI__builtin_ia32_prolq256:
+ case X86::BI__builtin_ia32_prolq512:
+ case X86::BI__builtin_ia32_prord128:
+ case X86::BI__builtin_ia32_prord256:
+ case X86::BI__builtin_ia32_prord512:
+ case X86::BI__builtin_ia32_prorq128:
+ case X86::BI__builtin_ia32_prorq256:
+ case X86::BI__builtin_ia32_prorq512:
+ case X86::BI__builtin_ia32_selectb_128:
+ case X86::BI__builtin_ia32_selectb_256:
+ case X86::BI__builtin_ia32_selectb_512:
+ case X86::BI__builtin_ia32_selectw_128:
+ case X86::BI__builtin_ia32_selectw_256:
+ case X86::BI__builtin_ia32_selectw_512:
+ case X86::BI__builtin_ia32_selectd_128:
+ case X86::BI__builtin_ia32_selectd_256:
+ case X86::BI__builtin_ia32_selectd_512:
+ case X86::BI__builtin_ia32_selectq_128:
+ case X86::BI__builtin_ia32_selectq_256:
+ case X86::BI__builtin_ia32_selectq_512:
+ case X86::BI__builtin_ia32_selectph_128:
+ case X86::BI__builtin_ia32_selectph_256:
+ case X86::BI__builtin_ia32_selectph_512:
+ case X86::BI__builtin_ia32_selectpbf_128:
+ case X86::BI__builtin_ia32_selectpbf_256:
+ case X86::BI__builtin_ia32_selectpbf_512:
+ case X86::BI__builtin_ia32_selectps_128:
+ case X86::BI__builtin_ia32_selectps_256:
+ case X86::BI__builtin_ia32_selectps_512:
+ case X86::BI__builtin_ia32_selectpd_128:
+ case X86::BI__builtin_ia32_selectpd_256:
+ case X86::BI__builtin_ia32_selectpd_512:
+ case X86::BI__builtin_ia32_selectsh_128:
+ case X86::BI__builtin_ia32_selectsbf_128:
+ case X86::BI__builtin_ia32_selectss_128:
+ case X86::BI__builtin_ia32_selectsd_128:
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ case X86::BI__builtin_ia32_vpcomb:
+ case X86::BI__builtin_ia32_vpcomw:
+ case X86::BI__builtin_ia32_vpcomd:
+ case X86::BI__builtin_ia32_vpcomq:
+ case X86::BI__builtin_ia32_vpcomub:
+ case X86::BI__builtin_ia32_vpcomuw:
+ case X86::BI__builtin_ia32_vpcomud:
+ case X86::BI__builtin_ia32_vpcomuq:
+ case X86::BI__builtin_ia32_kortestcqi:
+ case X86::BI__builtin_ia32_kortestchi:
+ case X86::BI__builtin_ia32_kortestcsi:
+ case X86::BI__builtin_ia32_kortestcdi:
+ case X86::BI__builtin_ia32_kortestzqi:
+ case X86::BI__builtin_ia32_kortestzhi:
+ case X86::BI__builtin_ia32_kortestzsi:
+ case X86::BI__builtin_ia32_kortestzdi:
+ case X86::BI__builtin_ia32_ktestcqi:
+ case X86::BI__builtin_ia32_ktestzqi:
+ case X86::BI__builtin_ia32_ktestchi:
+ case X86::BI__builtin_ia32_ktestzhi:
+ case X86::BI__builtin_ia32_ktestcsi:
+ case X86::BI__builtin_ia32_ktestzsi:
+ case X86::BI__builtin_ia32_ktestcdi:
+ case X86::BI__builtin_ia32_ktestzdi:
+ case X86::BI__builtin_ia32_kaddqi:
+ case X86::BI__builtin_ia32_kaddhi:
+ case X86::BI__builtin_ia32_kaddsi:
+ case X86::BI__builtin_ia32_kadddi:
+ case X86::BI__builtin_ia32_kandqi:
+ case X86::BI__builtin_ia32_kandhi:
+ case X86::BI__builtin_ia32_kandsi:
+ case X86::BI__builtin_ia32_kanddi:
+ case X86::BI__builtin_ia32_kandnqi:
+ case X86::BI__builtin_ia32_kandnhi:
+ case X86::BI__builtin_ia32_kandnsi:
+ case X86::BI__builtin_ia32_kandndi:
+ case X86::BI__builtin_ia32_korqi:
+ case X86::BI__builtin_ia32_korhi:
+ case X86::BI__builtin_ia32_korsi:
+ case X86::BI__builtin_ia32_kordi:
+ case X86::BI__builtin_ia32_kxnorqi:
+ case X86::BI__builtin_ia32_kxnorhi:
+ case X86::BI__builtin_ia32_kxnorsi:
+ case X86::BI__builtin_ia32_kxnordi:
+ case X86::BI__builtin_ia32_kxorqi:
+ case X86::BI__builtin_ia32_kxorhi:
+ case X86::BI__builtin_ia32_kxorsi:
+ case X86::BI__builtin_ia32_kxordi:
+ case X86::BI__builtin_ia32_knotqi:
+ case X86::BI__builtin_ia32_knothi:
+ case X86::BI__builtin_ia32_knotsi:
+ case X86::BI__builtin_ia32_knotdi:
+ case X86::BI__builtin_ia32_kmovb:
+ case X86::BI__builtin_ia32_kmovw:
+ case X86::BI__builtin_ia32_kmovd:
+ case X86::BI__builtin_ia32_kmovq:
+ case X86::BI__builtin_ia32_kunpckdi:
+ case X86::BI__builtin_ia32_kunpcksi:
+ case X86::BI__builtin_ia32_kunpckhi:
+ case X86::BI__builtin_ia32_sqrtsh_round_mask:
+ case X86::BI__builtin_ia32_sqrtsd_round_mask:
+ case X86::BI__builtin_ia32_sqrtss_round_mask:
+ case X86::BI__builtin_ia32_sqrtpd256:
+ case X86::BI__builtin_ia32_sqrtpd:
+ case X86::BI__builtin_ia32_sqrtps256:
+ case X86::BI__builtin_ia32_sqrtps:
+ case X86::BI__builtin_ia32_sqrtph256:
+ case X86::BI__builtin_ia32_sqrtph:
+ case X86::BI__builtin_ia32_sqrtph512:
+ case X86::BI__builtin_ia32_vsqrtbf16256:
+ case X86::BI__builtin_ia32_vsqrtbf16:
+ case X86::BI__builtin_ia32_vsqrtbf16512:
+ case X86::BI__builtin_ia32_sqrtps512:
+ case X86::BI__builtin_ia32_sqrtpd512:
+ case X86::BI__builtin_ia32_pmuludq128:
+ case X86::BI__builtin_ia32_pmuludq256:
+ case X86::BI__builtin_ia32_pmuludq512:
+ case X86::BI__builtin_ia32_pmuldq128:
+ case X86::BI__builtin_ia32_pmuldq256:
+ case X86::BI__builtin_ia32_pmuldq512:
+ case X86::BI__builtin_ia32_pternlogd512_mask:
+ case X86::BI__builtin_ia32_pternlogq512_mask:
+ case X86::BI__builtin_ia32_pternlogd128_mask:
+ case X86::BI__builtin_ia32_pternlogd256_mask:
+ case X86::BI__builtin_ia32_pternlogq128_mask:
+ case X86::BI__builtin_ia32_pternlogq256_mask:
+ case X86::BI__builtin_ia32_pternlogd512_maskz:
+ case X86::BI__builtin_ia32_pternlogq512_maskz:
+ case X86::BI__builtin_ia32_pternlogd128_maskz:
+ case X86::BI__builtin_ia32_pternlogd256_maskz:
+ case X86::BI__builtin_ia32_pternlogq128_maskz:
+ case X86::BI__builtin_ia32_pternlogq256_maskz:
+ case X86::BI__builtin_ia32_vpshldd128:
+ case X86::BI__builtin_ia32_vpshldd256:
+ case X86::BI__builtin_ia32_vpshldd512:
+ case X86::BI__builtin_ia32_vpshldq128:
+ case X86::BI__builtin_ia32_vpshldq256:
+ case X86::BI__builtin_ia32_vpshldq512:
+ case X86::BI__builtin_ia32_vpshldw128:
+ case X86::BI__builtin_ia32_vpshldw256:
+ case X86::BI__builtin_ia32_vpshldw512:
+ case X86::BI__builtin_ia32_vpshrdd128:
+ case X86::BI__builtin_ia32_vpshrdd256:
+ case X86::BI__builtin_ia32_vpshrdd512:
+ case X86::BI__builtin_ia32_vpshrdq128:
+ case X86::BI__builtin_ia32_vpshrdq256:
+ case X86::BI__builtin_ia32_vpshrdq512:
+ case X86::BI__builtin_ia32_vpshrdw128:
+ case X86::BI__builtin_ia32_vpshrdw256:
+ case X86::BI__builtin_ia32_vpshrdw512:
+ case X86::BI__builtin_ia32_reduce_fadd_pd512:
+ case X86::BI__builtin_ia32_reduce_fadd_ps512:
+ case X86::BI__builtin_ia32_reduce_fadd_ph512:
+ case X86::BI__builtin_ia32_reduce_fadd_ph256:
+ case X86::BI__builtin_ia32_reduce_fadd_ph128:
+ case X86::BI__builtin_ia32_reduce_fmul_pd512:
+ case X86::BI__builtin_ia32_reduce_fmul_ps512:
+ case X86::BI__builtin_ia32_reduce_fmul_ph512:
+ case X86::BI__builtin_ia32_reduce_fmul_ph256:
+ case X86::BI__builtin_ia32_reduce_fmul_ph128:
+ case X86::BI__builtin_ia32_reduce_fmax_pd512:
+ case X86::BI__builtin_ia32_reduce_fmax_ps512:
+ case X86::BI__builtin_ia32_reduce_fmax_ph512:
+ case X86::BI__builtin_ia32_reduce_fmax_ph256:
+ case X86::BI__builtin_ia32_reduce_fmax_ph128:
+ case X86::BI__builtin_ia32_reduce_fmin_pd512:
+ case X86::BI__builtin_ia32_reduce_fmin_ps512:
+ case X86::BI__builtin_ia32_reduce_fmin_ph512:
+ case X86::BI__builtin_ia32_reduce_fmin_ph256:
+ case X86::BI__builtin_ia32_reduce_fmin_ph128:
+ case X86::BI__builtin_ia32_rdrand16_step:
+ case X86::BI__builtin_ia32_rdrand32_step:
+ case X86::BI__builtin_ia32_rdrand64_step:
+ case X86::BI__builtin_ia32_rdseed16_step:
+ case X86::BI__builtin_ia32_rdseed32_step:
+ case X86::BI__builtin_ia32_rdseed64_step:
+ case X86::BI__builtin_ia32_addcarryx_u32:
+ case X86::BI__builtin_ia32_addcarryx_u64:
+ case X86::BI__builtin_ia32_subborrow_u32:
+ case X86::BI__builtin_ia32_subborrow_u64:
+ case X86::BI__builtin_ia32_fpclassps128_mask:
+ case X86::BI__builtin_ia32_fpclassps256_mask:
+ case X86::BI__builtin_ia32_fpclassps512_mask:
+ case X86::BI__builtin_ia32_vfpclassbf16128_mask:
+ case X86::BI__builtin_ia32_vfpclassbf16256_mask:
+ case X86::BI__builtin_ia32_vfpclassbf16512_mask:
+ case X86::BI__builtin_ia32_fpclassph128_mask:
+ case X86::BI__builtin_ia32_fpclassph256_mask:
+ case X86::BI__builtin_ia32_fpclassph512_mask:
+ case X86::BI__builtin_ia32_fpclasspd128_mask:
+ case X86::BI__builtin_ia32_fpclasspd256_mask:
+ case X86::BI__builtin_ia32_fpclasspd512_mask:
+ case X86::BI__builtin_ia32_vp2intersect_q_512:
+ case X86::BI__builtin_ia32_vp2intersect_q_256:
+ case X86::BI__builtin_ia32_vp2intersect_q_128:
+ case X86::BI__builtin_ia32_vp2intersect_d_512:
+ case X86::BI__builtin_ia32_vp2intersect_d_256:
+ case X86::BI__builtin_ia32_vp2intersect_d_128:
+ case X86::BI__builtin_ia32_vpmultishiftqb128:
+ case X86::BI__builtin_ia32_vpmultishiftqb256:
+ case X86::BI__builtin_ia32_vpmultishiftqb512:
+ case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpordps:
+ case X86::BI__builtin_ia32_cmpordpd:
+ case X86::BI__builtin_ia32_cmpph128_mask:
+ case X86::BI__builtin_ia32_cmpph256_mask:
+ case X86::BI__builtin_ia32_cmpph512_mask:
+ case X86::BI__builtin_ia32_cmpps128_mask:
+ case X86::BI__builtin_ia32_cmpps256_mask:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmppd128_mask:
+ case X86::BI__builtin_ia32_cmppd256_mask:
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ case X86::BI__builtin_ia32_vcmpbf16512_mask:
+ case X86::BI__builtin_ia32_vcmpbf16256_mask:
+ case X86::BI__builtin_ia32_vcmpbf16128_mask:
+ case X86::BI__builtin_ia32_cmpps:
+ case X86::BI__builtin_ia32_cmpps256:
+ case X86::BI__builtin_ia32_cmppd:
+ case X86::BI__builtin_ia32_cmppd256:
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpordss:
+ case X86::BI__builtin_ia32_cmpeqsd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ case X86::BI__builtin_ia32_cmplesd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ case X86::BI__builtin_ia32_vcvtph2ps_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps256_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ case X86::BI__builtin_ia32_cvtneps2bf16_128_mask:
+ case X86::BI__builtin_ia32_cvtsbf162ss_32:
+ case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
+ case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
+ case X86::BI__cpuid:
+ case X86::BI__cpuidex:
+ case X86::BI__emul:
+ case X86::BI__emulu:
+ case X86::BI__mulh:
+ case X86::BI__umulh:
+ case X86::BI_mul128:
+ case X86::BI_umul128:
+ case X86::BI__faststorefence:
+ case X86::BI__shiftleft128:
+ case X86::BI__shiftright128:
+ case X86::BI_ReadWriteBarrier:
+ case X86::BI_ReadBarrier:
+ case X86::BI_WriteBarrier:
+ case X86::BI_AddressOfReturnAddress:
+ case X86::BI__stosb:
+ case X86::BI__builtin_ia32_t2rpntlvwz0_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz0rs_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz0t1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz0rst1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1rs_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1t1_internal:
+ case X86::BI__builtin_ia32_t2rpntlvwz1rst1_internal:
+ case X86::BI__ud2:
+ case X86::BI__int2c:
+ case X86::BI__readfsbyte:
+ case X86::BI__readfsword:
+ case X86::BI__readfsdword:
+ case X86::BI__readfsqword:
+ case X86::BI__readgsbyte:
+ case X86::BI__readgsword:
+ case X86::BI__readgsdword:
+ case X86::BI__readgsqword:
+ case X86::BI__builtin_ia32_encodekey128_u32:
+ case X86::BI__builtin_ia32_encodekey256_u32:
+ case X86::BI__builtin_ia32_aesenc128kl_u8:
+ case X86::BI__builtin_ia32_aesdec128kl_u8:
+ case X86::BI__builtin_ia32_aesenc256kl_u8:
+ case X86::BI__builtin_ia32_aesdec256kl_u8:
+ case X86::BI__builtin_ia32_aesencwide128kl_u8:
+ case X86::BI__builtin_ia32_aesdecwide128kl_u8:
+ case X86::BI__builtin_ia32_aesencwide256kl_u8:
+ case X86::BI__builtin_ia32_aesdecwide256kl_u8:
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_prefetchi:
+ cgm.errorNYI(e->getSourceRange(),
+ std::string("unimplemented X86 builtin call: ") +
+ getContext().BuiltinInfo.getName(builtinID));
+ return {};
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp
index df42af8..eef3739 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp
@@ -37,6 +37,10 @@ CIRGenCXXABI::AddedStructorArgCounts CIRGenCXXABI::addImplicitConstructorArgs(
addedArgs.suffix.size());
}
+CatchTypeInfo CIRGenCXXABI::getCatchAllTypeInfo() {
+ return CatchTypeInfo{{}, 0};
+}
+
void CIRGenCXXABI::buildThisParam(CIRGenFunction &cgf,
FunctionArgList &params) {
const auto *md = cast<CXXMethodDecl>(cgf.curGD.getDecl());
@@ -81,8 +85,7 @@ CharUnits CIRGenCXXABI::getArrayCookieSize(const CXXNewExpr *e) {
if (!requiresArrayCookie(e))
return CharUnits::Zero();
- cgm.errorNYI(e->getSourceRange(), "CIRGenCXXABI::getArrayCookieSize");
- return CharUnits::Zero();
+ return getArrayCookieSizeImpl(e->getAllocatedType());
}
bool CIRGenCXXABI::requiresArrayCookie(const CXXNewExpr *e) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index 6d3741c4..c78f9b0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LIB_CIR_CIRGENCXXABI_H
#include "CIRGenCall.h"
+#include "CIRGenCleanup.h"
#include "CIRGenFunction.h"
#include "CIRGenModule.h"
@@ -155,6 +156,8 @@ public:
/// Loads the incoming C++ this pointer as it was passed by the caller.
mlir::Value loadIncomingCXXThis(CIRGenFunction &cgf);
+ virtual CatchTypeInfo getCatchAllTypeInfo();
+
/// Get the implicit (second) parameter that comes after the "this" pointer,
/// or nullptr if there is isn't one.
virtual mlir::Value getCXXDestructorImplicitParam(CIRGenFunction &cgf,
@@ -299,8 +302,28 @@ public:
/// - non-array allocations never need a cookie
/// - calls to \::operator new(size_t, void*) never need a cookie
///
- /// \param E - the new-expression being allocated.
+ /// \param e - the new-expression being allocated.
virtual CharUnits getArrayCookieSize(const CXXNewExpr *e);
+
+ /// Initialize the array cookie for the given allocation.
+ ///
+ /// \param newPtr - a char* which is the presumed-non-null
+ /// return value of the allocation function
+ /// \param numElements - the computed number of elements,
+ /// potentially collapsed from the multidimensional array case;
+ /// always a size_t
+ /// \param elementType - the base element allocated type,
+ /// i.e. the allocated type after stripping all array types
+ virtual Address initializeArrayCookie(CIRGenFunction &cgf, Address newPtr,
+ mlir::Value numElements,
+ const CXXNewExpr *e,
+ QualType elementType) = 0;
+
+protected:
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. Assumes that an array cookie is
+ /// required.
+ virtual CharUnits getArrayCookieSizeImpl(QualType elementType) = 0;
};
/// Creates and Itanium-family ABI
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index 8700697..851328a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -28,6 +28,46 @@ using namespace clang::CIRGen;
// CIRGenFunction cleanup related
//===----------------------------------------------------------------------===//
+/// Build a unconditional branch to the lexical scope cleanup block
+/// or with the labeled blocked if already solved.
+///
+/// Track on scope basis, goto's we need to fix later.
+cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location loc,
+ JumpDest dest) {
+ // Insert a branch: to the cleanup block (unsolved) or to the already
+ // materialized label. Keep track of unsolved goto's.
+ assert(dest.getBlock() && "assumes incoming valid dest");
+ auto brOp = cir::BrOp::create(builder, loc, dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator topCleanup =
+ ehStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (topCleanup == ehStack.stable_end() ||
+ topCleanup.encloses(dest.getScopeDepth())) { // works for invalid
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!dest.getScopeDepth().isValid()) {
+ BranchFixup &fixup = ehStack.addBranchFixup();
+ fixup.destination = dest.getBlock();
+ fixup.destinationIndex = dest.getDestIndex();
+ fixup.initialBranch = brOp;
+ fixup.optimisticBranchBlock = nullptr;
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ cgm.errorNYI(loc, "emitBranchThroughCleanup: valid destination scope depth");
+ return brOp;
+}
+
/// Emits all the code to cause the given temporary to be cleaned up.
void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
QualType tempType, Address ptr) {
@@ -40,6 +80,19 @@ void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
void EHScopeStack::Cleanup::anchor() {}
+EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ stable_iterator si = getInnermostNormalCleanup();
+ stable_iterator se = stable_end();
+ while (si != se) {
+ EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
+ if (cleanup.isActive())
+ return si;
+ si = cleanup.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
/// Push an entry of the given size onto this protected-scope stack.
char *EHScopeStack::allocate(size_t size) {
size = llvm::alignTo(size, ScopeStackAlignment);
@@ -75,14 +128,30 @@ void EHScopeStack::deallocate(size_t size) {
startOfData += llvm::alignTo(size, ScopeStackAlignment);
}
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ cgf->cgm.errorNYI("popNullFixups");
+}
+
void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
+ bool isNormalCleanup = kind & NormalCleanup;
bool isEHCleanup = kind & EHCleanup;
bool isLifetimeMarker = kind & LifetimeMarker;
assert(!cir::MissingFeatures::innermostEHScope());
- EHCleanupScope *scope = new (buffer) EHCleanupScope(size);
+ EHCleanupScope *scope = new (buffer)
+ EHCleanupScope(size, branchFixups.size(), innermostNormalCleanup);
+
+ if (isNormalCleanup)
+ innermostNormalCleanup = stable_begin();
if (isLifetimeMarker)
cgf->cgm.errorNYI("push lifetime marker cleanup");
@@ -100,12 +169,30 @@ void EHScopeStack::popCleanup() {
assert(isa<EHCleanupScope>(*begin()));
EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
+ innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
deallocate(cleanup.getAllocatedSize());
// Destroy the cleanup.
cleanup.destroy();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // Check whether we can shrink the branch-fixups stack.
+ if (!branchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups()) {
+ branchFixups.clear();
+ } else {
+ // Otherwise we can still trim out unnecessary nulls.
+ popNullFixups();
+ }
+ }
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
+ char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
+ assert(!cir::MissingFeatures::innermostEHScope());
+ EHCatchScope *scope = new (buffer) EHCatchScope(numHandlers);
+ return scope;
}
static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
@@ -116,6 +203,18 @@ static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
}
+static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
+ EHCleanupScope &scope) {
+ assert(scope.isNormalCleanup());
+ mlir::Block *entry = scope.getNormalBlock();
+ if (!entry) {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ entry = cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
+ scope.setNormalBlock(entry);
+ }
+ return entry;
+}
+
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
@@ -123,17 +222,21 @@ void CIRGenFunction::popCleanupBlock() {
assert(!ehStack.empty() && "cleanup stack is empty!");
assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.begin());
+ assert(scope.getFixupDepth() <= ehStack.getNumBranchFixups());
// Remember activation information.
bool isActive = scope.isActive();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // - whether there are branch fix-ups through this cleanup
+ unsigned fixupDepth = scope.getFixupDepth();
+ bool hasFixups = ehStack.getNumBranchFixups() != fixupDepth;
// - whether there's a fallthrough
mlir::Block *fallthroughSource = builder.getInsertionBlock();
bool hasFallthrough = fallthroughSource != nullptr && isActive;
- bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
+ bool requiresNormalCleanup =
+ scope.isNormalCleanup() && (hasFixups || hasFallthrough);
// If we don't need the cleanup at all, we're done.
assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
@@ -168,9 +271,119 @@ void CIRGenFunction::popCleanupBlock() {
assert(!cir::MissingFeatures::ehCleanupFlags());
- ehStack.popCleanup();
- scope.markEmitted();
- emitCleanup(*this, cleanup);
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (hasFallthrough && !hasFixups) {
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+ ehStack.popCleanup();
+ scope.markEmitted();
+ emitCleanup(*this, cleanup);
+ } else {
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+
+ // Force the entry block to exist.
+ mlir::Block *normalEntry = createNormalEntry(*this, scope);
+
+ // I. Set up the fallthrough edge in.
+ mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (hasFallthrough) {
+ assert(!cir::MissingFeatures::ehCleanupHasPrebranchedFallthrough());
+
+ } else if (fallthroughSource) {
+ // Otherwise, save and clear the IP if we don't have fallthrough
+ // because the cleanup is inactive.
+ assert(!isActive && "source without fallthrough for active cleanup");
+ savedInactiveFallthroughIP = builder.saveInsertionPoint();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ builder.setInsertionPointToEnd(normalEntry);
+
+ // intercept normal cleanup to mark SEH scope end
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+ bool hasEnclosingCleanups =
+ (scope.getEnclosingNormalCleanup() != ehStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ if (hasFixups && hasEnclosingCleanups)
+ cgm.errorNYI("cleanup branch-through dest");
+
+ mlir::Block *fallthroughDest = nullptr;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ // Skip for SEH, since ExitSwitch is used to generate code to indicate
+ // abnormal termination. (SEH: Except _leave and fall-through at
+ // the end, all other exits in a _try (return/goto/continue/break)
+ // are considered as abnormal terminations, using NormalCleanupDestSlot
+ // to indicate abnormal termination)
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // IV. Pop the cleanup and emit it.
+ scope.markEmitted();
+ ehStack.popCleanup();
+ assert(ehStack.hasNormalCleanups() == hasEnclosingCleanups);
+
+ emitCleanup(*this, cleanup);
+
+ // Append the prepared cleanup prologue from above.
+ assert(!cir::MissingFeatures::cleanupAppendInsts());
+
+ // Optimistically hope that any fixups will continue falling through.
+ if (fixupDepth != ehStack.getNumBranchFixups())
+ cgm.errorNYI("cleanup fixup depth mismatch");
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but doesn't branch to the
+ // cleanup because the cleanup is inactive.
+ if (!hasFallthrough && fallthroughSource) {
+ // Prebranched fallthrough was forwarded earlier.
+ // Non-prebranched fallthrough doesn't need to be forwarded.
+ // Either way, all we need to do is restore the IP we cleared before.
+ assert(!isActive);
+ cgm.errorNYI("cleanup inactive fallthrough");
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (hasFallthrough && fallthroughDest) {
+ cgm.errorNYI("cleanup fallthrough destination");
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (hasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ // FIXME(cir): should we clear insertion point here?
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ //
+ // If it did invalidate those pointers, and normalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ assert(!cir::MissingFeatures::simplifyCleanupEntry());
+ }
}
/// Pops cleanup blocks until the given savepoint is reached.
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
index 30f5607..9acf8b1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -20,6 +20,13 @@
namespace clang::CIRGen {
+/// The MS C++ ABI needs a pointer to RTTI data plus some flags to describe the
+/// type of a catch handler, so we use this wrapper.
+struct CatchTypeInfo {
+ mlir::TypedAttr rtti;
+ unsigned flags;
+};
+
/// A protected scope for zero-cost EH handling.
class EHScope {
class CommonBitFields {
@@ -29,6 +36,12 @@ class EHScope {
enum { NumCommonBits = 3 };
protected:
+ class CatchBitFields {
+ friend class EHCatchScope;
+ unsigned : NumCommonBits;
+ unsigned numHandlers : 32 - NumCommonBits;
+ };
+
class CleanupBitFields {
friend class EHCleanupScope;
unsigned : NumCommonBits;
@@ -58,6 +71,7 @@ protected:
union {
CommonBitFields commonBits;
+ CatchBitFields catchBits;
CleanupBitFields cleanupBits;
};
@@ -67,11 +81,88 @@ public:
EHScope(Kind kind) { commonBits.kind = kind; }
Kind getKind() const { return static_cast<Kind>(commonBits.kind); }
+
+ bool mayThrow() const {
+ // Traditional LLVM codegen also checks for `!block->use_empty()`, but
+ // in CIRGen the block content is not important, just used as a way to
+ // signal `hasEHBranches`.
+ assert(!cir::MissingFeatures::ehstackBranches());
+ return false;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C \@finally blocks are represented using a cleanup scope
+/// after the catch scope.
+
+class EHCatchScope : public EHScope {
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null MLIR attribute for a catch-all
+ CatchTypeInfo type;
+
+ /// The catch handler for this type.
+ mlir::Region *region;
+ };
+
+private:
+ friend class EHScopeStack;
+
+ Handler *getHandlers() { return reinterpret_cast<Handler *>(this + 1); }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned n) {
+ return sizeof(EHCatchScope) + n * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned numHandlers) : EHScope(Catch) {
+ catchBits.numHandlers = numHandlers;
+ assert(catchBits.numHandlers == numHandlers && "NumHandlers overflow?");
+ }
+
+ unsigned getNumHandlers() const { return catchBits.numHandlers; }
+
+ void setHandler(unsigned i, CatchTypeInfo type, mlir::Region *region) {
+ assert(i < getNumHandlers());
+ getHandlers()[i].type = type;
+ getHandlers()[i].region = region;
+ }
+
+ // Clear all handler blocks.
+ // FIXME: it's better to always call clearHandlerBlocks in DTOR and have a
+ // 'takeHandler' or some such function which removes ownership from the
+ // EHCatchScope object if the handlers should live longer than EHCatchScope.
+ void clearHandlerBlocks() {
+ // The blocks are owned by TryOp, nothing to delete.
+ }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Catch;
+ }
};
/// A cleanup scope which generates the cleanup blocks lazily.
class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
: public EHScope {
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator enclosingNormal;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ mlir::Block *normalBlock = nullptr;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned fixupDepth = 0;
+
public:
/// Gets the size required for a lazy cleanup scope with the given
/// cleanup-data requirements.
@@ -83,7 +174,10 @@ public:
return sizeof(EHCleanupScope) + cleanupBits.cleanupSize;
}
- EHCleanupScope(unsigned cleanupSize) : EHScope(EHScope::Cleanup) {
+ EHCleanupScope(unsigned cleanupSize, unsigned fixupDepth,
+ EHScopeStack::stable_iterator enclosingNormal)
+ : EHScope(EHScope::Cleanup), enclosingNormal(enclosingNormal),
+ fixupDepth(fixupDepth) {
// TODO(cir): When exception handling is upstreamed, isNormalCleanup and
// isEHCleanup will be arguments to the constructor.
cleanupBits.isNormalCleanup = true;
@@ -101,11 +195,19 @@ public:
// Objects of EHCleanupScope are not destructed. Use destroy().
~EHCleanupScope() = delete;
+ mlir::Block *getNormalBlock() const { return normalBlock; }
+ void setNormalBlock(mlir::Block *bb) { normalBlock = bb; }
+
bool isNormalCleanup() const { return cleanupBits.isNormalCleanup; }
bool isActive() const { return cleanupBits.isActive; }
void setActive(bool isActive) { cleanupBits.isActive = isActive; }
+ unsigned getFixupDepth() const { return fixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return enclosingNormal;
+ }
+
size_t getCleanupSize() const { return cleanupBits.cleanupSize; }
void *getCleanupBuffer() { return this + 1; }
@@ -147,5 +249,13 @@ EHScopeStack::find(stable_iterator savePoint) const {
return iterator(endOfBuffer - savePoint.size);
}
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHCatchScope &scope = llvm::cast<EHCatchScope>(*begin());
+ assert(!cir::MissingFeatures::innermostEHScope());
+ deallocate(EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers()));
+}
+
} // namespace clang::CIRGen
#endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp
index f9ff37b..717a3e0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenException.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp
@@ -69,6 +69,153 @@ mlir::LogicalResult CIRGenFunction::emitCXXTryStmt(const CXXTryStmt &s) {
if (s.getTryBlock()->body_empty())
return mlir::LogicalResult::success();
- cgm.errorNYI("exitCXXTryStmt: CXXTryStmt with non-empty body");
- return mlir::LogicalResult::success();
+ mlir::Location loc = getLoc(s.getSourceRange());
+ // Create a scope to hold try local storage for catch params.
+
+ mlir::OpBuilder::InsertPoint scopeIP;
+ cir::ScopeOp::create(
+ builder, loc,
+ /*scopeBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) {
+ scopeIP = builder.saveInsertionPoint();
+ });
+
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeIP);
+ mlir::LogicalResult result = emitCXXTryStmtUnderScope(s);
+ cir::YieldOp::create(builder, loc);
+ return result;
+}
+
+mlir::LogicalResult
+CIRGenFunction::emitCXXTryStmtUnderScope(const CXXTryStmt &s) {
+ const llvm::Triple &t = getTarget().getTriple();
+ // If we encounter a try statement on in an OpenMP target region offloaded to
+ // a GPU, we treat it as a basic block.
+ const bool isTargetDevice =
+ (cgm.getLangOpts().OpenMPIsTargetDevice && (t.isNVPTX() || t.isAMDGCN()));
+ if (isTargetDevice) {
+ cgm.errorNYI(
+ "emitCXXTryStmtUnderScope: OpenMP target region offloaded to GPU");
+ return mlir::success();
+ }
+
+ unsigned numHandlers = s.getNumHandlers();
+ mlir::Location tryLoc = getLoc(s.getBeginLoc());
+ mlir::OpBuilder::InsertPoint beginInsertTryBody;
+
+ bool hasCatchAll = false;
+ for (unsigned i = 0; i != numHandlers; ++i) {
+ hasCatchAll |= s.getHandler(i)->getExceptionDecl() == nullptr;
+ if (hasCatchAll)
+ break;
+ }
+
+ // Create the scope to represent only the C/C++ `try {}` part. However,
+ // don't populate right away. Create regions for the catch handlers,
+ // but don't emit the handler bodies yet. For now, only make sure the
+ // scope returns the exception information.
+ auto tryOp = cir::TryOp::create(
+ builder, tryLoc,
+ /*tryBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ beginInsertTryBody = builder.saveInsertionPoint();
+ },
+ /*handlersBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc,
+ mlir::OperationState &result) {
+ mlir::OpBuilder::InsertionGuard guard(b);
+
+ // We create an extra region for an unwind catch handler in case the
+ // catch-all handler doesn't exists
+ unsigned numRegionsToCreate =
+ hasCatchAll ? numHandlers : numHandlers + 1;
+
+ for (unsigned i = 0; i != numRegionsToCreate; ++i) {
+ mlir::Region *region = result.addRegion();
+ builder.createBlock(region);
+ }
+ });
+
+ // Finally emit the body for try/catch.
+ {
+ mlir::Location loc = tryOp.getLoc();
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(beginInsertTryBody);
+ CIRGenFunction::LexicalScope tryScope{*this, loc,
+ builder.getInsertionBlock()};
+
+ tryScope.setAsTry(tryOp);
+
+ // Attach the basic blocks for the catch regions.
+ enterCXXTryStmt(s, tryOp);
+
+ // Emit the body for the `try {}` part.
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ CIRGenFunction::LexicalScope tryBodyScope{*this, loc,
+ builder.getInsertionBlock()};
+ if (emitStmt(s.getTryBlock(), /*useCurrentScope=*/true).failed())
+ return mlir::failure();
+ }
+
+ // Emit catch clauses.
+ exitCXXTryStmt(s);
+ }
+
+ return mlir::success();
+}
+
+void CIRGenFunction::enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
+ bool isFnTryBlock) {
+ unsigned numHandlers = s.getNumHandlers();
+ EHCatchScope *catchScope = ehStack.pushCatch(numHandlers);
+ for (unsigned i = 0; i != numHandlers; ++i) {
+ const CXXCatchStmt *catchStmt = s.getHandler(i);
+ if (catchStmt->getExceptionDecl()) {
+ cgm.errorNYI("enterCXXTryStmt: CatchStmt with ExceptionDecl");
+ return;
+ }
+
+ // No exception decl indicates '...', a catch-all.
+ mlir::Region *handler = &tryOp.getHandlerRegions()[i];
+ catchScope->setHandler(i, cgm.getCXXABI().getCatchAllTypeInfo(), handler);
+
+ // Under async exceptions, catch(...) needs to catch HW exception too
+ // Mark scope with SehTryBegin as a SEH __try scope
+ if (getLangOpts().EHAsynch) {
+ cgm.errorNYI("enterCXXTryStmt: EHAsynch");
+ return;
+ }
+ }
+}
+
+void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock) {
+ unsigned numHandlers = s.getNumHandlers();
+ EHCatchScope &catchScope = cast<EHCatchScope>(*ehStack.begin());
+ assert(catchScope.getNumHandlers() == numHandlers);
+ cir::TryOp tryOp = curLexScope->getTry();
+
+ // If the catch was not required, bail out now.
+ if (!catchScope.mayThrow()) {
+ catchScope.clearHandlerBlocks();
+ ehStack.popCatch();
+
+ // Drop all basic block from all catch regions.
+ SmallVector<mlir::Block *> eraseBlocks;
+ for (mlir::Region &handlerRegion : tryOp.getHandlerRegions()) {
+ if (handlerRegion.empty())
+ continue;
+
+ for (mlir::Block &b : handlerRegion.getBlocks())
+ eraseBlocks.push_back(&b);
+ }
+
+ for (mlir::Block *b : eraseBlocks)
+ b->erase();
+
+ tryOp.setHandlerTypesAttr({});
+ return;
+ }
+
+ cgm.errorNYI("exitCXXTryStmt: Required catch");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 9732c9c..52021fc 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1675,7 +1675,25 @@ CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
// name to make it clear it's not the actual builtin.
auto fn = cast<cir::FuncOp>(curFn);
if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
- cgm.errorNYI("Inline only builtin function calls");
+ cir::FuncOp clone =
+ mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
+
+ if (!clone) {
+ // Create a forward declaration - the body will be generated in
+ // generateCode when the function definition is processed
+ cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToStart(cgm.getModule().getBody());
+
+ clone = builder.create<cir::FuncOp>(calleeFunc.getLoc(), fdInlineName,
+ calleeFunc.getFunctionType());
+ clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
+ &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
+ clone.setSymVisibility("private");
+ clone.setInlineKindAttr(cir::InlineAttr::get(
+ &cgm.getMLIRContext(), cir::InlineKind::AlwaysInline));
+ }
+ return CIRGenCallee::forDirect(clone, gd);
}
// Replaceable builtins provide their own implementation of a builtin. If we
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 568cbdb..d6d226b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -280,6 +280,7 @@ public:
void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
+
void VisitPredefinedExpr(const PredefinedExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPredefinedExpr");
@@ -670,7 +671,7 @@ void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
return;
}
- cgf.cgm.errorNYI("emitStoreThroughBitfieldLValue");
+ cgf.emitStoreThroughBitfieldLValue(RValue::get(null), lv);
return;
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index b1e9e76..fe9e210 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -306,6 +306,7 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
mlir::cast<cir::IntAttr>(constNumElements).getValue();
unsigned numElementsWidth = count.getBitWidth();
+ bool hasAnyOverflow = false;
// The equivalent code in CodeGen/CGExprCXX.cpp handles these cases as
// overflow, but that should never happen. The size argument is implicitly
@@ -336,11 +337,22 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
// Add in the cookie, and check whether it's overflowed.
if (cookieSize != 0) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "emitCXXNewAllocSize: array cookie");
+ // Save the current size without a cookie. This shouldn't be
+ // used if there was overflow
+ sizeWithoutCookie = cgf.getBuilder().getConstInt(
+ loc, allocationSize.zextOrTrunc(sizeWidth));
+
+ allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
+ hasAnyOverflow |= overflow;
}
- size = cgf.getBuilder().getConstInt(loc, allocationSize);
+ // On overflow, produce a -1 so operator new will fail
+ if (hasAnyOverflow) {
+ size =
+ cgf.getBuilder().getConstInt(loc, llvm::APInt::getAllOnes(sizeWidth));
+ } else {
+ size = cgf.getBuilder().getConstInt(loc, allocationSize);
+ }
} else {
// TODO: Handle the variable size case
cgf.cgm.errorNYI(e->getSourceRange(),
@@ -390,7 +402,50 @@ void CIRGenFunction::emitNewArrayInitializer(
if (!e->hasInitializer())
return;
- cgm.errorNYI(e->getSourceRange(), "emitNewArrayInitializer");
+ unsigned initListElements = 0;
+
+ const Expr *init = e->getInitializer();
+ const InitListExpr *ile = dyn_cast<InitListExpr>(init);
+ if (ile) {
+ cgm.errorNYI(ile->getSourceRange(), "emitNewArrayInitializer: init list");
+ return;
+ }
+
+ // If all elements have already been initialized, skip any further
+ // initialization.
+ auto constOp = mlir::dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
+ if (constOp) {
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constOp.getValue());
+ // Just skip out if the constant count is zero.
+ if (constIntAttr && constIntAttr.getUInt() <= initListElements)
+ return;
+ }
+
+ assert(init && "have trailing elements to initialize but no initializer");
+
+ // If this is a constructor call, try to optimize it out, and failing that
+ // emit a single loop to initialize all remaining elements.
+ if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(init)) {
+ CXXConstructorDecl *ctor = cce->getConstructor();
+ if (ctor->isTrivial()) {
+ // If new expression did not specify value-initialization, then there
+ // is no initialization.
+ if (!cce->requiresZeroInitialization())
+ return;
+
+ cgm.errorNYI(cce->getSourceRange(),
+ "emitNewArrayInitializer: trivial ctor zero-init");
+ return;
+ }
+
+ cgm.errorNYI(cce->getSourceRange(),
+ "emitNewArrayInitializer: ctor initializer");
+ return;
+ }
+
+ cgm.errorNYI(init->getSourceRange(),
+ "emitNewArrayInitializer: unsupported initializer");
+ return;
}
static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e,
@@ -586,9 +641,6 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
// If there is a brace-initializer, cannot allocate fewer elements than inits.
unsigned minElements = 0;
- if (e->isArray() && e->hasInitializer()) {
- cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array initializer");
- }
mlir::Value numElements = nullptr;
mlir::Value allocSizeWithoutCookie = nullptr;
@@ -667,8 +719,11 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
!e->getOperatorDelete()->isReservedGlobalPlacementOperator())
cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: operator delete");
- if (allocSize != allocSizeWithoutCookie)
- cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array with cookies");
+ if (allocSize != allocSizeWithoutCookie) {
+ assert(e->isArray());
+ allocation = cgm.getCXXABI().initializeArrayCookie(
+ *this, allocation, numElements, e, allocType);
+ }
mlir::Type elementTy;
if (e->isArray()) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 19ed656..800262a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -1011,9 +1011,9 @@ public:
}
mlir::Attribute VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die, QualType t) {
- cgm.errorNYI(die->getBeginLoc(),
- "ConstExprEmitter::VisitCXXDefaultInitExpr");
- return {};
+ // No need for a DefaultInitExprScope: we don't handle 'this' in a
+ // constant expression.
+ return Visit(die->getExpr(), t);
}
mlir::Attribute VisitExprWithCleanups(ExprWithCleanups *e, QualType t) {
@@ -1028,9 +1028,7 @@ public:
mlir::Attribute VisitImplicitValueInitExpr(ImplicitValueInitExpr *e,
QualType t) {
- cgm.errorNYI(e->getBeginLoc(),
- "ConstExprEmitter::VisitImplicitValueInitExpr");
- return {};
+ return cgm.getBuilder().getZeroInitAttr(cgm.convertType(t));
}
mlir::Attribute VisitInitListExpr(InitListExpr *ile, QualType t) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 138082b..33eb748 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -2041,8 +2041,9 @@ mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
assert(!cir::MissingFeatures::tryEmitAsConstant());
Expr::EvalResult result;
if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
- cgf.cgm.errorNYI(e->getSourceRange(), "Constant interger member expr");
- // Fall through to emit this as a non-constant access.
+ llvm::APSInt value = result.Val.getInt();
+ cgf.emitIgnoredExpr(e->getBase());
+ return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value);
}
return emitLoadOfLValue(e);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 25a46df..d3c0d9f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -551,6 +551,49 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
curGD = gd;
+ if (funcDecl->isInlineBuiltinDeclaration()) {
+ // When generating code for a builtin with an inline declaration, use a
+ // mangled name to hold the actual body, while keeping an external
+ // declaration in case the function pointer is referenced somewhere.
+ std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
+ cir::FuncOp clone =
+ mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
+ if (!clone) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPoint(fn);
+ clone = builder.create<cir::FuncOp>(fn.getLoc(), fdInlineName,
+ fn.getFunctionType());
+ clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
+ clone.setSymVisibility("private");
+ clone.setInlineKind(cir::InlineKind::AlwaysInline);
+ }
+ fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
+ fn.setSymVisibility("private");
+ fn = clone;
+ } else {
+ // Detect the unusual situation where an inline version is shadowed by a
+ // non-inline version. In that case we should pick the external one
+ // everywhere. That's GCC behavior too.
+ for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
+ pd = pd->getPreviousDecl()) {
+ if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
+ std::string inlineName = funcDecl->getName().str() + ".inline";
+ if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
+ cgm.getGlobalValue(inlineName))) {
+ // Replace all uses of the .inline function with the regular function
+ // FIXME: This performs a linear walk over the module. Introduce some
+ // caching here.
+ if (inlineFn
+ .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
+ .failed())
+ llvm_unreachable("Failed to replace inline builtin symbol uses");
+ inlineFn.erase();
+ }
+ break;
+ }
+ }
+ }
+
SourceLocation loc = funcDecl->getLocation();
Stmt *body = funcDecl->getBody();
SourceRange bodyRange =
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 5a71126..e3b9b6a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -60,11 +60,44 @@ private:
/// is where the next operations will be introduced.
CIRGenBuilderTy &builder;
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() = default;
+ JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
+ unsigned index = 0)
+ : block(block) {}
+
+ bool isValid() const { return block != nullptr; }
+ mlir::Block *getBlock() const { return block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
+ unsigned getDestIndex() const { return index; }
+
+ // This should be used cautiously.
+ void setScopeDepth(EHScopeStack::stable_iterator depth) {
+ scopeDepth = depth;
+ }
+
+ private:
+ mlir::Block *block = nullptr;
+ EHScopeStack::stable_iterator scopeDepth;
+ unsigned index;
+ };
+
public:
/// The GlobalDecl for the current function being compiled or the global
/// variable currently being initialized.
clang::GlobalDecl curGD;
+ /// Unified return block.
+ /// In CIR this is a function because each scope might have
+ /// its associated return block.
+ JumpDest returnBlock(mlir::Block *retBlock) {
+ return getJumpDestInCurrentScope(retBlock);
+ }
+
+ unsigned nextCleanupDestIndex = 1;
+
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
@@ -574,6 +607,16 @@ public:
}
};
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ /// CIRGen: this mostly tracks state for figuring out the proper scope
+ /// information, no actual branches are emitted.
+ JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
+ return JumpDest(target, ehStack.getInnermostNormalCleanup(),
+ nextCleanupDestIndex++);
+ }
+
/// Perform the usual unary conversions on the specified expression and
/// compare the result against zero, returning an Int1Ty value.
mlir::Value evaluateExprAsBool(const clang::Expr *e);
@@ -954,6 +997,9 @@ public:
LexicalScope *parentScope = nullptr;
+ // Holds the actual value for ScopeKind::Try
+ cir::TryOp tryOp = nullptr;
+
// Only Regular is used at the moment. Support for other kinds will be
// added as the relevant statements/expressions are upstreamed.
enum Kind {
@@ -1013,6 +1059,10 @@ public:
void setAsGlobalInit() { scopeKind = Kind::GlobalInit; }
void setAsSwitch() { scopeKind = Kind::Switch; }
void setAsTernary() { scopeKind = Kind::Ternary; }
+ void setAsTry(cir::TryOp op) {
+ scopeKind = Kind::Try;
+ tryOp = op;
+ }
// Lazy create cleanup block or return what's available.
mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
@@ -1022,6 +1072,11 @@ public:
return cleanupBlock;
}
+ cir::TryOp getTry() {
+ assert(isTry());
+ return tryOp;
+ }
+
mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
return cleanupBlock;
}
@@ -1209,6 +1264,8 @@ public:
LValue emitBinaryOperatorLValue(const BinaryOperator *e);
+ cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
+
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
@@ -1348,6 +1405,13 @@ public:
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s);
+ mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &s);
+
+ void enterCXXTryStmt(const CXXTryStmt &s, cir::TryOp tryOp,
+ bool isFnTryBlock = false);
+
+ void exitCXXTryStmt(const CXXTryStmt &s, bool isFnTryBlock = false);
+
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor,
clang::CXXCtorType ctorType, FunctionArgList &args);
@@ -1595,6 +1659,10 @@ public:
bool buildingTopLevelCase);
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s);
+ mlir::Value emitTargetBuiltinExpr(unsigned builtinID,
+ const clang::CallExpr *e,
+ ReturnValueSlot &returnValue);
+
/// Given a value and its clang type, returns the value casted to its memory
/// representation.
/// Note: CIR defers most of the special casting to the final lowering passes
@@ -1633,6 +1701,8 @@ public:
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
+ mlir::Value emitX86BuiltinExpr(unsigned builtinID, const CallExpr *e);
+
/// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
/// nonnull, if 1\p LHS is marked _Nonnull.
void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index c184d4a..e620310 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -135,8 +135,14 @@ public:
cir::PointerType destCIRTy, bool isRefCast,
Address src) override;
- /**************************** RTTI Uniqueness ******************************/
+ Address initializeArrayCookie(CIRGenFunction &cgf, Address newPtr,
+ mlir::Value numElements, const CXXNewExpr *e,
+ QualType elementType) override;
+
protected:
+ CharUnits getArrayCookieSizeImpl(QualType elementType) override;
+
+ /**************************** RTTI Uniqueness ******************************/
/// Returns true if the ABI requires RTTI type_info objects to be unique
/// across a program.
virtual bool shouldRTTIBeUnique() const { return true; }
@@ -2003,3 +2009,70 @@ mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &cgf,
return cgf.getBuilder().createDynCast(loc, src.getPointer(), destCIRTy,
isRefCast, castInfo);
}
+
+/************************** Array allocation cookies **************************/
+
+CharUnits CIRGenItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // The array cookie is a size_t; pad that up to the element alignment.
+ // The cookie is actually right-justified in that space.
+ return std::max(
+ cgm.getSizeSize(),
+ cgm.getASTContext().getPreferredTypeAlignInChars(elementType));
+}
+
+Address CIRGenItaniumCXXABI::initializeArrayCookie(CIRGenFunction &cgf,
+ Address newPtr,
+ mlir::Value numElements,
+ const CXXNewExpr *e,
+ QualType elementType) {
+ assert(requiresArrayCookie(e));
+
+ // TODO: When sanitizer support is implemented, we'll need to
+ // get the address space from `newPtr`.
+ assert(!cir::MissingFeatures::addressSpace());
+ assert(!cir::MissingFeatures::sanitizers());
+
+ ASTContext &ctx = cgm.getASTContext();
+ CharUnits sizeSize = cgf.getSizeSize();
+ mlir::Location loc = cgf.getLoc(e->getSourceRange());
+
+ // The size of the cookie.
+ CharUnits cookieSize =
+ std::max(sizeSize, ctx.getPreferredTypeAlignInChars(elementType));
+ assert(cookieSize == getArrayCookieSizeImpl(elementType));
+
+ cir::PointerType u8PtrTy = cgf.getBuilder().getUInt8PtrTy();
+ mlir::Value baseBytePtr =
+ cgf.getBuilder().createPtrBitcast(newPtr.getPointer(), u8PtrTy);
+
+ // Compute an offset to the cookie.
+ CharUnits cookieOffset = cookieSize - sizeSize;
+ mlir::Value cookiePtrValue = baseBytePtr;
+ if (!cookieOffset.isZero()) {
+ mlir::Value offsetOp = cgf.getBuilder().getSignedInt(
+ loc, cookieOffset.getQuantity(), /*width=*/32);
+ cookiePtrValue =
+ cgf.getBuilder().createPtrStride(loc, cookiePtrValue, offsetOp);
+ }
+
+ CharUnits baseAlignment = newPtr.getAlignment();
+ CharUnits cookiePtrAlignment = baseAlignment.alignmentAtOffset(cookieOffset);
+ Address cookiePtr(cookiePtrValue, u8PtrTy, cookiePtrAlignment);
+
+ // Write the number of elements into the appropriate slot.
+ Address numElementsPtr =
+ cookiePtr.withElementType(cgf.getBuilder(), cgf.SizeTy);
+ cgf.getBuilder().createStore(loc, numElements, numElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ mlir::Value dataOffset =
+ cgf.getBuilder().getSignedInt(loc, cookieSize.getQuantity(),
+ /*width=*/32);
+ mlir::Value dataPtr =
+ cgf.getBuilder().createPtrStride(loc, baseBytePtr, dataOffset);
+ mlir::Value finalPtr =
+ cgf.getBuilder().createPtrBitcast(dataPtr, newPtr.getElementType());
+ CharUnits finalAlignment = baseAlignment.alignmentAtOffset(cookieSize);
+ return Address(finalPtr, newPtr.getElementType(), finalAlignment);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 127f763..6b29373 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -102,7 +102,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
// TODO(CIR): Should be updated once TypeSizeInfoAttr is upstreamed
const unsigned sizeTypeSize =
astContext.getTypeSize(astContext.getSignedSizeType());
- SizeAlignInBytes = astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity();
+ SizeSizeInBytes = astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity();
// In CIRGenTypeCache, UIntPtrTy and SizeType are fields of the same union
UIntPtrTy =
cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/false);
@@ -1917,6 +1917,17 @@ void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl,
const Decl *decl = globalDecl.getDecl();
func.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(decl));
}
+
+ // If we plan on emitting this inline builtin, we can't treat it as a builtin.
+ const auto *fd = cast<FunctionDecl>(globalDecl.getDecl());
+ if (fd->isInlineBuiltinDeclaration()) {
+ const FunctionDecl *fdBody;
+ bool hasBody = fd->hasBody(fdBody);
+ (void)hasBody;
+ assert(hasBody && "Inline builtin declarations should always have an "
+ "available body!");
+ assert(!cir::MissingFeatures::attributeNoBuiltin());
+ }
}
void CIRGenModule::setCIRFunctionAttributesForDefinition(
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index ad8c4d0..f486c46 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -446,54 +446,89 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
mlir::Location loc = getLoc(s.getSourceRange());
const Expr *rv = s.getRetValue();
- if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
- s.getNRVOCandidate()->isNRVOVariable()) {
- assert(!cir::MissingFeatures::openMP());
- assert(!cir::MissingFeatures::nrvo());
- } else if (!rv) {
- // No return expression. Do nothing.
- } else if (rv->getType()->isVoidType()) {
- // Make sure not to return anything, but evaluate the expression
- // for side effects.
- if (rv) {
- emitAnyExpr(rv);
+ RunCleanupsScope cleanupScope(*this);
+ bool createNewScope = false;
+ if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
+ rv = ewc->getSubExpr();
+ createNewScope = true;
+ }
+
+ auto handleReturnVal = [&]() {
+ if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
+ s.getNRVOCandidate()->isNRVOVariable()) {
+ assert(!cir::MissingFeatures::openMP());
+ assert(!cir::MissingFeatures::nrvo());
+ } else if (!rv) {
+ // No return expression. Do nothing.
+ } else if (rv->getType()->isVoidType()) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (rv) {
+ emitAnyExpr(rv);
+ }
+ } else if (cast<FunctionDecl>(curGD.getDecl())
+ ->getReturnType()
+ ->isReferenceType()) {
+ // If this function returns a reference, take the address of the
+ // expression rather than the value.
+ RValue result = emitReferenceBindingToExpr(rv);
+ builder.CIRBaseBuilderTy::createStore(loc, result.getValue(),
+ *fnRetAlloca);
+ } else {
+ mlir::Value value = nullptr;
+ switch (CIRGenFunction::getEvaluationKind(rv->getType())) {
+ case cir::TEK_Scalar:
+ value = emitScalarExpr(rv);
+ if (value) { // Change this to an assert once emitScalarExpr is complete
+ builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
+ }
+ break;
+ case cir::TEK_Complex:
+ emitComplexExprIntoLValue(rv,
+ makeAddrLValue(returnValue, rv->getType()),
+ /*isInit=*/true);
+ break;
+ case cir::TEK_Aggregate:
+ assert(!cir::MissingFeatures::aggValueSlotGC());
+ emitAggExpr(rv, AggValueSlot::forAddr(returnValue, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::IsNotAliased,
+ getOverlapForReturnValue()));
+ break;
+ }
}
- } else if (cast<FunctionDecl>(curGD.getDecl())
- ->getReturnType()
- ->isReferenceType()) {
- // If this function returns a reference, take the address of the
- // expression rather than the value.
- RValue result = emitReferenceBindingToExpr(rv);
- builder.CIRBaseBuilderTy::createStore(loc, result.getValue(), *fnRetAlloca);
+ };
+
+ if (!createNewScope) {
+ handleReturnVal();
} else {
- mlir::Value value = nullptr;
- switch (CIRGenFunction::getEvaluationKind(rv->getType())) {
- case cir::TEK_Scalar:
- value = emitScalarExpr(rv);
- if (value) { // Change this to an assert once emitScalarExpr is complete
- builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
- }
- break;
- case cir::TEK_Complex:
- emitComplexExprIntoLValue(rv, makeAddrLValue(returnValue, rv->getType()),
- /*isInit=*/true);
- break;
- case cir::TEK_Aggregate:
- assert(!cir::MissingFeatures::aggValueSlotGC());
- emitAggExpr(rv, AggValueSlot::forAddr(returnValue, Qualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::IsNotAliased,
- getOverlapForReturnValue()));
- break;
+ mlir::Location scopeLoc =
+ getLoc(rv ? rv->getSourceRange() : s.getSourceRange());
+ // First create cir.scope and later emit it's body. Otherwise all CIRGen
+ // dispatched by `handleReturnVal()` might needs to manipulate blocks and
+ // look into parents, which are all unlinked.
+ mlir::OpBuilder::InsertPoint scopeBody;
+ cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ scopeBody = b.saveInsertionPoint();
+ });
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(scopeBody);
+ CIRGenFunction::LexicalScope lexScope{*this, scopeLoc,
+ builder.getInsertionBlock()};
+ handleReturnVal();
}
}
+ cleanupScope.forceCleanup();
+
+ // In CIR we might have returns in different scopes.
+ // FIXME(cir): cleanup code is handling actual return emission, the logic
+ // should try to match traditional codegen more closely (to the extent which
+ // is possible).
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
- // This should emit a branch through the cleanup block if one exists.
- builder.create<cir::BrOp>(loc, retBlock);
- assert(!cir::MissingFeatures::emitBranchThroughCleanup());
- if (ehStack.stable_begin() != currentCleanupStackDepth)
- cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
+ emitBranchThroughCleanup(loc, returnBlock(retBlock));
// Insert the new block to continue codegen after branch to ret block.
builder.createBlock(builder.getBlock()->getParent());
@@ -1063,5 +1098,5 @@ void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv,
assert(!cir::MissingFeatures::emitBranchThroughCleanup());
builder.create<cir::BrOp>(loc, retBlock);
if (ehStack.stable_begin() != currentCleanupStackDepth)
- cgm.errorNYI(loc, "return with cleanup stack");
+ cgm.errorNYI(loc, "return of r-value with cleanup stack");
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
index b5612d9..ff5842c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
@@ -74,11 +74,17 @@ struct CIRGenTypeCache {
unsigned char PointerSizeInBytes;
};
- /// The alignment of size_t.
- unsigned char SizeAlignInBytes;
+ /// The size and alignment of size_t.
+ union {
+ unsigned char SizeSizeInBytes; // sizeof(size_t)
+ unsigned char SizeAlignInBytes;
+ };
cir::TargetAddressSpaceAttr cirAllocaAddressSpace;
+ clang::CharUnits getSizeSize() const {
+ return clang::CharUnits::fromQuantity(SizeSizeInBytes);
+ }
clang::CharUnits getSizeAlign() const {
return clang::CharUnits::fromQuantity(SizeAlignInBytes);
}
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index 36db4bd..7c31bea 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -11,13 +11,14 @@ add_clang_library(clangCIR
CIRGenAsm.cpp
CIRGenAtomic.cpp
CIRGenBuilder.cpp
+ CIRGenBuiltin.cpp
+ CIRGenBuiltinX86.cpp
CIRGenCall.cpp
CIRGenClass.cpp
CIRGenCleanup.cpp
CIRGenCoroutine.cpp
CIRGenCXX.cpp
CIRGenCXXABI.cpp
- CIRGenBuiltin.cpp
CIRGenDecl.cpp
CIRGenDeclCXX.cpp
CIRGenDeclOpenACC.cpp
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 67a72f5..4198c23 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -18,12 +18,38 @@
#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "llvm/ADT/SmallVector.h"
namespace clang::CIRGen {
class CIRGenFunction;
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ mlir::Block *optimisticBranchBlock = nullptr;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ mlir::Block *destination = nullptr;
+
+ /// The destination index value.
+ unsigned destinationIndex = 0;
+
+ /// The initial branch of the fixup.
+ cir::BrOp initialBranch = {};
+};
+
enum CleanupKind : unsigned {
/// Denotes a cleanup that should run when a scope is exited using exceptional
/// control flow (a throw statement leading to stack unwinding, ).
@@ -126,9 +152,31 @@ private:
/// The first valid entry in the buffer.
char *startOfData = nullptr;
+ /// The innermost normal cleanup on the stack.
+ stable_iterator innermostNormalCleanup = stable_end();
+
/// The CGF this Stack belong to
CIRGenFunction *cgf = nullptr;
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup> branchFixups;
+
// This class uses a custom allocator for maximum efficiency because cleanups
// are allocated and freed very frequently. It's basically a bump pointer
// allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets
@@ -155,9 +203,29 @@ public:
/// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
void popCleanup();
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned numHandlers);
+
+ /// Pops a catch scope off the stack. This is private to CIRGenException.cpp.
+ void popCatch();
+
/// Determines whether the exception-scopes stack is empty.
bool empty() const { return startOfData == endOfBuffer; }
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return innermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return innermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const;
+
/// An unstable reference to a scope-stack depth. Invalidated by
/// pushes but not pops.
class iterator;
@@ -172,12 +240,30 @@ public:
return stable_iterator(endOfBuffer - startOfData);
}
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() { return stable_iterator(0); }
+
/// Turn a stable reference to a scope depth into a unstable pointer
/// to the EH stack.
iterator find(stable_iterator savePoint) const;
- /// Create a stable reference to the bottom of the EH stack.
- static stable_iterator stable_end() { return stable_iterator(0); }
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ branchFixups.push_back(BranchFixup());
+ return branchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return branchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned i) {
+ assert(i < getNumBranchFixups());
+ return branchFixups[i];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index ed606b7..fa180f5 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2941,6 +2941,21 @@ mlir::LogicalResult cir::ThrowOp::verify() {
}
//===----------------------------------------------------------------------===//
+// AtomicFetchOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::AtomicFetchOp::verify() {
+ if (getBinop() != cir::AtomicFetchKind::Add &&
+ getBinop() != cir::AtomicFetchKind::Sub &&
+ getBinop() != cir::AtomicFetchKind::Max &&
+ getBinop() != cir::AtomicFetchKind::Min &&
+ !mlir::isa<cir::IntType>(getVal().getType()))
+ return emitError("only atomic add, sub, max, and min operation could "
+ "operate on floating-point values");
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
// TypeInfoAttr
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
index 8589a2e..46bd186 100644
--- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
@@ -551,10 +551,100 @@ public:
}
};
+class CIRTryOpFlattening : public mlir::OpRewritePattern<cir::TryOp> {
+public:
+ using OpRewritePattern<cir::TryOp>::OpRewritePattern;
+
+ mlir::Block *buildTryBody(cir::TryOp tryOp,
+ mlir::PatternRewriter &rewriter) const {
+ // Split the current block before the TryOp to create the inlining
+ // point.
+ mlir::Block *beforeTryScopeBlock = rewriter.getInsertionBlock();
+ mlir::Block *afterTry =
+ rewriter.splitBlock(beforeTryScopeBlock, rewriter.getInsertionPoint());
+
+ // Inline body region.
+ mlir::Block *beforeBody = &tryOp.getTryRegion().front();
+ rewriter.inlineRegionBefore(tryOp.getTryRegion(), afterTry);
+
+ // Branch into the body of the region.
+ rewriter.setInsertionPointToEnd(beforeTryScopeBlock);
+ cir::BrOp::create(rewriter, tryOp.getLoc(), mlir::ValueRange(), beforeBody);
+ return afterTry;
+ }
+
+ void buildHandlers(cir::TryOp tryOp, mlir::PatternRewriter &rewriter,
+ mlir::Block *afterBody, mlir::Block *afterTry,
+ SmallVectorImpl<cir::CallOp> &callsToRewrite,
+ SmallVectorImpl<mlir::Block *> &landingPads) const {
+ // Replace the tryOp return with a branch that jumps out of the body.
+ rewriter.setInsertionPointToEnd(afterBody);
+
+ mlir::Block *beforeCatch = rewriter.getInsertionBlock();
+ rewriter.setInsertionPointToEnd(beforeCatch);
+
+ // Check if the terminator is a YieldOp because there could be another
+ // terminator, e.g. unreachable
+ if (auto tryBodyYield = dyn_cast<cir::YieldOp>(afterBody->getTerminator()))
+ rewriter.replaceOpWithNewOp<cir::BrOp>(tryBodyYield, afterTry);
+
+ mlir::ArrayAttr handlers = tryOp.getHandlerTypesAttr();
+ if (!handlers || handlers.empty())
+ return;
+
+ llvm_unreachable("TryOpFlattening buildHandlers with CallsOp is NYI");
+ }
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::TryOp tryOp,
+ mlir::PatternRewriter &rewriter) const override {
+ mlir::OpBuilder::InsertionGuard guard(rewriter);
+ mlir::Block *afterBody = &tryOp.getTryRegion().back();
+
+ // Grab the collection of `cir.call exception`s to rewrite to
+ // `cir.try_call`.
+ llvm::SmallVector<cir::CallOp, 4> callsToRewrite;
+ tryOp.getTryRegion().walk([&](CallOp op) {
+ // Only grab calls within immediate closest TryOp scope.
+ if (op->getParentOfType<cir::TryOp>() != tryOp)
+ return;
+ assert(!cir::MissingFeatures::opCallExceptionAttr());
+ callsToRewrite.push_back(op);
+ });
+
+ if (!callsToRewrite.empty())
+ llvm_unreachable(
+ "TryOpFlattening with try block that contains CallOps is NYI");
+
+ // Build try body.
+ mlir::Block *afterTry = buildTryBody(tryOp, rewriter);
+
+ // Build handlers.
+ llvm::SmallVector<mlir::Block *, 4> landingPads;
+ buildHandlers(tryOp, rewriter, afterBody, afterTry, callsToRewrite,
+ landingPads);
+
+ rewriter.eraseOp(tryOp);
+
+ assert((landingPads.size() == callsToRewrite.size()) &&
+ "expected matching number of entries");
+
+ // Quick block cleanup: no indirection to the post try block.
+ auto brOp = dyn_cast<cir::BrOp>(afterTry->getTerminator());
+ if (brOp && brOp.getDest()->hasNoPredecessors()) {
+ mlir::Block *srcBlock = brOp.getDest();
+ rewriter.eraseOp(brOp);
+ rewriter.mergeBlocks(srcBlock, afterTry);
+ }
+
+ return mlir::success();
+ }
+};
+
void populateFlattenCFGPatterns(RewritePatternSet &patterns) {
patterns
.add<CIRIfFlattening, CIRLoopOpInterfaceFlattening, CIRScopeOpFlattening,
- CIRSwitchOpFlattening, CIRTernaryOpFlattening>(
+ CIRSwitchOpFlattening, CIRTernaryOpFlattening, CIRTryOpFlattening>(
patterns.getContext());
}
@@ -568,7 +658,7 @@ void CIRFlattenCFGPass::runOnOperation() {
assert(!cir::MissingFeatures::ifOp());
assert(!cir::MissingFeatures::switchOp());
assert(!cir::MissingFeatures::tryOp());
- if (isa<IfOp, ScopeOp, SwitchOp, LoopOpInterface, TernaryOp>(op))
+ if (isa<IfOp, ScopeOp, SwitchOp, LoopOpInterface, TernaryOp, TryOp>(op))
ops.push_back(op);
});
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 3fc94eb..bb75f2d 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -770,6 +770,147 @@ mlir::LogicalResult CIRToLLVMAtomicClearOpLowering::matchAndRewrite(
return mlir::success();
}
+static mlir::LLVM::AtomicBinOp
+getLLVMAtomicBinOp(cir::AtomicFetchKind k, bool isInt, bool isSignedInt) {
+ switch (k) {
+ case cir::AtomicFetchKind::Add:
+ return isInt ? mlir::LLVM::AtomicBinOp::add : mlir::LLVM::AtomicBinOp::fadd;
+ case cir::AtomicFetchKind::Sub:
+ return isInt ? mlir::LLVM::AtomicBinOp::sub : mlir::LLVM::AtomicBinOp::fsub;
+ case cir::AtomicFetchKind::And:
+ return mlir::LLVM::AtomicBinOp::_and;
+ case cir::AtomicFetchKind::Xor:
+ return mlir::LLVM::AtomicBinOp::_xor;
+ case cir::AtomicFetchKind::Or:
+ return mlir::LLVM::AtomicBinOp::_or;
+ case cir::AtomicFetchKind::Nand:
+ return mlir::LLVM::AtomicBinOp::nand;
+ case cir::AtomicFetchKind::Max: {
+ if (!isInt)
+ return mlir::LLVM::AtomicBinOp::fmax;
+ return isSignedInt ? mlir::LLVM::AtomicBinOp::max
+ : mlir::LLVM::AtomicBinOp::umax;
+ }
+ case cir::AtomicFetchKind::Min: {
+ if (!isInt)
+ return mlir::LLVM::AtomicBinOp::fmin;
+ return isSignedInt ? mlir::LLVM::AtomicBinOp::min
+ : mlir::LLVM::AtomicBinOp::umin;
+ }
+ }
+ llvm_unreachable("Unknown atomic fetch opcode");
+}
+
+static llvm::StringLiteral getLLVMBinop(cir::AtomicFetchKind k, bool isInt) {
+ switch (k) {
+ case cir::AtomicFetchKind::Add:
+ return isInt ? mlir::LLVM::AddOp::getOperationName()
+ : mlir::LLVM::FAddOp::getOperationName();
+ case cir::AtomicFetchKind::Sub:
+ return isInt ? mlir::LLVM::SubOp::getOperationName()
+ : mlir::LLVM::FSubOp::getOperationName();
+ case cir::AtomicFetchKind::And:
+ return mlir::LLVM::AndOp::getOperationName();
+ case cir::AtomicFetchKind::Xor:
+ return mlir::LLVM::XOrOp::getOperationName();
+ case cir::AtomicFetchKind::Or:
+ return mlir::LLVM::OrOp::getOperationName();
+ case cir::AtomicFetchKind::Nand:
+ // There's no nand binop in LLVM, this is later fixed with a not.
+ return mlir::LLVM::AndOp::getOperationName();
+ case cir::AtomicFetchKind::Max:
+ case cir::AtomicFetchKind::Min:
+ llvm_unreachable("handled in buildMinMaxPostOp");
+ }
+ llvm_unreachable("Unknown atomic fetch opcode");
+}
+
+mlir::Value CIRToLLVMAtomicFetchOpLowering::buildPostOp(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal,
+ bool isInt) const {
+ SmallVector<mlir::Value> atomicOperands = {rmwVal, adaptor.getVal()};
+ SmallVector<mlir::Type> atomicResTys = {rmwVal.getType()};
+ return rewriter
+ .create(op.getLoc(),
+ rewriter.getStringAttr(getLLVMBinop(op.getBinop(), isInt)),
+ atomicOperands, atomicResTys, {})
+ ->getResult(0);
+}
+
+mlir::Value CIRToLLVMAtomicFetchOpLowering::buildMinMaxPostOp(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, bool isInt,
+ bool isSigned) const {
+ mlir::Location loc = op.getLoc();
+
+ if (!isInt) {
+ if (op.getBinop() == cir::AtomicFetchKind::Max)
+ return mlir::LLVM::MaxNumOp::create(rewriter, loc, rmwVal,
+ adaptor.getVal());
+ return mlir::LLVM::MinNumOp::create(rewriter, loc, rmwVal,
+ adaptor.getVal());
+ }
+
+ mlir::LLVM::ICmpPredicate pred;
+ if (op.getBinop() == cir::AtomicFetchKind::Max) {
+ pred = isSigned ? mlir::LLVM::ICmpPredicate::sgt
+ : mlir::LLVM::ICmpPredicate::ugt;
+ } else { // Min
+ pred = isSigned ? mlir::LLVM::ICmpPredicate::slt
+ : mlir::LLVM::ICmpPredicate::ult;
+ }
+ mlir::Value cmp = mlir::LLVM::ICmpOp::create(
+ rewriter, loc,
+ mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), pred), rmwVal,
+ adaptor.getVal());
+ return mlir::LLVM::SelectOp::create(rewriter, loc, cmp, rmwVal,
+ adaptor.getVal());
+}
+
+mlir::LogicalResult CIRToLLVMAtomicFetchOpLowering::matchAndRewrite(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ bool isInt = false;
+ bool isSignedInt = false;
+ if (auto intTy = mlir::dyn_cast<cir::IntType>(op.getVal().getType())) {
+ isInt = true;
+ isSignedInt = intTy.isSigned();
+ } else if (mlir::isa<cir::SingleType, cir::DoubleType>(
+ op.getVal().getType())) {
+ isInt = false;
+ } else {
+ return op.emitError() << "Unsupported type: " << op.getVal().getType();
+ }
+
+ mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(op.getMemOrder());
+ mlir::LLVM::AtomicBinOp llvmBinOp =
+ getLLVMAtomicBinOp(op.getBinop(), isInt, isSignedInt);
+ auto rmwVal = mlir::LLVM::AtomicRMWOp::create(rewriter, op.getLoc(),
+ llvmBinOp, adaptor.getPtr(),
+ adaptor.getVal(), llvmOrder);
+
+ mlir::Value result = rmwVal.getResult();
+ if (!op.getFetchFirst()) {
+ if (op.getBinop() == cir::AtomicFetchKind::Max ||
+ op.getBinop() == cir::AtomicFetchKind::Min)
+ result = buildMinMaxPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt,
+ isSignedInt);
+ else
+ result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt);
+
+ // Compensate lack of nand binop in LLVM IR.
+ if (op.getBinop() == cir::AtomicFetchKind::Nand) {
+ auto negOne = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ result.getType(), -1);
+ result = mlir::LLVM::XOrOp::create(rewriter, op.getLoc(), result, negOne);
+ }
+ }
+
+ rewriter.replaceOp(op, result);
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite(
cir::BitClrsbOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 6020684..ecfbcb5 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -430,12 +430,6 @@ static bool initTargetOptions(const CompilerInstance &CI,
Options.NoInfsFPMath = LangOpts.NoHonorInfs;
Options.NoNaNsFPMath = LangOpts.NoHonorNaNs;
Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- Options.UnsafeFPMath = LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
- LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
- (LangOpts.getDefaultFPContractMode() ==
- LangOptions::FPModeKind::FPM_Fast ||
- LangOpts.getDefaultFPContractMode() ==
- LangOptions::FPModeKind::FPM_FastHonorPragmas);
Options.BBAddrMap = CodeGenOpts.BBAddrMap;
Options.BBSections =
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index e490b1c..3746bc04 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -1325,22 +1325,29 @@ void CodeGenModule::Release() {
"tag-stack-memory-buildattr", 1);
if (T.isARM() || T.isThumb() || T.isAArch64()) {
+ // Previously 1 is used and meant for the backed to derive the function
+ // attribute form it. 2 now means function attributes already set for all
+ // functions in this module, so no need to propagate those from the module
+ // flag. Value is only used in case of LTO module merge because the backend
+ // will see all required function attribute set already. Value is used
+ // before modules got merged. Any posive value means the feature is active
+ // and required binary markings need to be emit accordingly.
if (LangOpts.BranchTargetEnforcement)
getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
- 1);
+ 2);
if (LangOpts.BranchProtectionPAuthLR)
getModule().addModuleFlag(llvm::Module::Min, "branch-protection-pauth-lr",
- 1);
+ 2);
if (LangOpts.GuardedControlStack)
- getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 1);
+ getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 2);
if (LangOpts.hasSignReturnAddress())
- getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1);
+ getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 2);
if (LangOpts.isSignReturnAddressScopeAll())
getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
- 1);
+ 2);
if (!LangOpts.isSignReturnAddressWithAKey())
getModule().addModuleFlag(llvm::Module::Min,
- "sign-return-address-with-bkey", 1);
+ "sign-return-address-with-bkey", 2);
if (LangOpts.PointerAuthELFGOT)
getModule().addModuleFlag(llvm::Module::Min, "ptrauth-elf-got", 1);
diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index 954ecab..61beb04 100644
--- a/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -290,6 +290,8 @@ void arm::setArchNameInTriple(const Driver &D, const ArgList &Args,
// Thumb2 is the default for V7 on Darwin.
(llvm::ARM::parseArchVersion(Suffix) == 7 &&
Triple.isOSBinFormatMachO()) ||
+ // Thumb2 is the default for Fuchsia.
+ Triple.isOSFuchsia() ||
// FIXME: this is invalid for WindowsCE
Triple.isOSWindows();
@@ -452,6 +454,9 @@ arm::FloatABI arm::getDefaultFloatABI(const llvm::Triple &Triple) {
case llvm::Triple::OpenBSD:
return FloatABI::SoftFP;
+ case llvm::Triple::Fuchsia:
+ return FloatABI::Hard;
+
default:
if (Triple.isOHOSFamily())
return FloatABI::Soft;
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index 37c10c6..e5abf83 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -798,9 +798,11 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
}
if (!DryRun) {
+ const bool ContinuePPDirective =
+ State.Line->InMacroBody && Current.isNot(TT_LineComment);
Whitespaces.replaceWhitespace(Current, /*Newlines=*/0, Spaces,
State.Column + Spaces + PPColumnCorrection,
- /*IsAligned=*/false, State.Line->InMacroBody);
+ /*IsAligned=*/false, ContinuePPDirective);
}
// If "BreakBeforeInheritanceComma" mode, don't break within the inheritance
@@ -1176,10 +1178,11 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// about removing empty lines on closing blocks. Special case them here.
MaxEmptyLinesToKeep = 1;
}
- unsigned Newlines =
+ const unsigned Newlines =
std::max(1u, std::min(Current.NewlinesBefore, MaxEmptyLinesToKeep));
- bool ContinuePPDirective =
- State.Line->InPPDirective && State.Line->Type != LT_ImportStatement;
+ const bool ContinuePPDirective = State.Line->InPPDirective &&
+ State.Line->Type != LT_ImportStatement &&
+ Current.isNot(TT_LineComment);
Whitespaces.replaceWhitespace(Current, Newlines, State.Column, State.Column,
CurrentState.IsAligned, ContinuePPDirective);
}
diff --git a/clang/lib/Sema/HeuristicResolver.cpp b/clang/lib/Sema/HeuristicResolver.cpp
index cbdefaa..056e133 100644
--- a/clang/lib/Sema/HeuristicResolver.cpp
+++ b/clang/lib/Sema/HeuristicResolver.cpp
@@ -450,7 +450,12 @@ QualType HeuristicResolverImpl::resolveExprToType(const Expr *E) {
if (const auto *CE = dyn_cast<CallExpr>(E)) {
if (QualType Resolved = resolveTypeOfCallExpr(CE); !Resolved.isNull())
return Resolved;
+
+ // Don't proceed to try resolveExprToDecls(), it would just call
+ // resolveTypeOfCallExpr() again.
+ return E->getType();
}
+
// Similarly, unwrapping a unary dereference operation does not work via
// resolveExprToDecls.
if (const auto *UO = dyn_cast<UnaryOperator>(E->IgnoreParenCasts())) {
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index 39fa25f..215ac18 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -2214,9 +2214,9 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
else
PD << "expression";
- if (Diag(Loc, PD, FD)
- << false /*show bit size*/ << 0 << Ty << false /*return*/
- << TI.getTriple().str()) {
+ if (Diag(Loc, PD) << false /*show bit size*/ << 0 << Ty
+ << false /*return*/
+ << TI.getTriple().str()) {
if (D)
D->setInvalidDecl();
}
@@ -2233,9 +2233,8 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
else
PD << "expression";
- if (Diag(Loc, PD, FD)
- << false /*show bit size*/ << 0 << Ty << true /*return*/
- << TI.getTriple().str()) {
+ if (Diag(Loc, PD) << false /*show bit size*/ << 0 << Ty << true /*return*/
+ << TI.getTriple().str()) {
if (D)
D->setInvalidDecl();
}
diff --git a/clang/lib/Sema/SemaBase.cpp b/clang/lib/Sema/SemaBase.cpp
index 9b677f4..bf32491 100644
--- a/clang/lib/Sema/SemaBase.cpp
+++ b/clang/lib/Sema/SemaBase.cpp
@@ -58,13 +58,13 @@ SemaBase::SemaDiagnosticBuilder::getDeviceDeferredDiags() const {
return S.DeviceDeferredDiags;
}
-Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc, unsigned DiagID,
- bool DeferHint) {
+Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc,
+ unsigned DiagID) {
bool IsError =
getDiagnostics().getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
bool ShouldDefer = getLangOpts().CUDA && getLangOpts().GPUDeferDiag &&
DiagnosticIDs::isDeferrable(DiagID) &&
- (DeferHint || SemaRef.DeferDiags || !IsError);
+ (SemaRef.DeferDiags || !IsError);
auto SetIsLastErrorImmediate = [&](bool Flag) {
if (IsError)
SemaRef.IsLastErrorImmediate = Flag;
@@ -83,16 +83,13 @@ Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc, unsigned DiagID,
}
Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc,
- const PartialDiagnostic &PD,
- bool DeferHint) {
- return Diag(Loc, PD.getDiagID(), DeferHint) << PD;
+ const PartialDiagnostic &PD) {
+ return Diag(Loc, PD.getDiagID()) << PD;
}
SemaBase::SemaDiagnosticBuilder SemaBase::DiagCompat(SourceLocation Loc,
- unsigned CompatDiagId,
- bool DeferHint) {
+ unsigned CompatDiagId) {
return Diag(Loc,
- DiagnosticIDs::getCXXCompatDiagId(getLangOpts(), CompatDiagId),
- DeferHint);
+ DiagnosticIDs::getCXXCompatDiagId(getLangOpts(), CompatDiagId));
}
} // namespace clang
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 7da09e8..1f25111 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -13208,7 +13208,10 @@ void OverloadCandidateSet::NoteCandidates(
auto Cands = CompleteCandidates(S, OCD, Args, OpLoc, Filter);
- S.Diag(PD.first, PD.second, shouldDeferDiags(S, Args, OpLoc));
+ {
+ Sema::DeferDiagsRAII RAII{S, shouldDeferDiags(S, Args, OpLoc)};
+ S.Diag(PD.first, PD.second);
+ }
// In WebAssembly we don't want to emit further diagnostics if a table is
// passed as an argument to a function.
@@ -13271,10 +13274,10 @@ void OverloadCandidateSet::NoteCandidates(Sema &S, ArrayRef<Expr *> Args,
// inform the future value of S.Diags.getNumOverloadCandidatesToShow().
S.Diags.overloadCandidatesShown(CandsShown);
- if (I != E)
- S.Diag(OpLoc, diag::note_ovl_too_many_candidates,
- shouldDeferDiags(S, Args, OpLoc))
- << int(E - I);
+ if (I != E) {
+ Sema::DeferDiagsRAII RAII{S, shouldDeferDiags(S, Args, OpLoc)};
+ S.Diag(OpLoc, diag::note_ovl_too_many_candidates) << int(E - I);
+ }
}
static SourceLocation
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index c5ef0d5..b5f91a3 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1445,21 +1445,21 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
!FeatureMap.lookup("zve64d"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve64d";
// (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
// least zve64x
else if (((EltSize == 64 && Info.ElementType->isIntegerType()) ||
MinElts == 1) &&
!FeatureMap.lookup("zve64x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve64x";
else if (Info.ElementType->isFloat16Type() && !FeatureMap.lookup("zvfh") &&
!FeatureMap.lookup("zvfhmin") &&
!FeatureMap.lookup("xandesvpackfph"))
if (DeclareAndesVectorBuiltins) {
- Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ Diag(Loc, diag::err_riscv_type_requires_extension)
<< Ty << "zvfh, zvfhmin or xandesvpackfph";
} else {
- Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ Diag(Loc, diag::err_riscv_type_requires_extension)
<< Ty << "zvfh or zvfhmin";
}
else if (Info.ElementType->isBFloat16Type() &&
@@ -1467,18 +1467,18 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
!FeatureMap.lookup("xandesvbfhcvt") &&
!FeatureMap.lookup("experimental-zvfbfa"))
if (DeclareAndesVectorBuiltins) {
- Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ Diag(Loc, diag::err_riscv_type_requires_extension)
<< Ty << "zvfbfmin or xandesvbfhcvt";
} else {
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zvfbfmin";
}
else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
!FeatureMap.lookup("zve32f"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve32f";
// Given that caller already checked isRVVType() before calling this function,
// if we don't have at least zve32x supported, then we need to emit error.
else if (!FeatureMap.lookup("zve32x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
+ Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve32x";
}
/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index cf20226..6579988 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -584,3 +584,526 @@ void clear_volatile(volatile void *p) {
// OGCG: store atomic volatile i8 0, ptr %{{.+}} seq_cst, align 1
}
+
+int atomic_fetch_add(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_add
+ // LLVM-LABEL: @atomic_fetch_add
+ // OGCG-LABEL: @atomic_fetch_add
+
+ return __atomic_fetch_add(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_add_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_add_fetch
+ // LLVM-LABEL: @atomic_add_fetch
+ // OGCG-LABEL: @atomic_add_fetch
+
+ return __atomic_add_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw add ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = add i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw add ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = add i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_add(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_add
+ // LLVM-LABEL: @c11_atomic_fetch_add
+ // OGCG-LABEL: @c11_atomic_fetch_add
+
+ return __c11_atomic_fetch_add(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_sub(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_sub
+ // LLVM-LABEL: @atomic_fetch_sub
+ // OGCG-LABEL: @atomic_fetch_sub
+
+ return __atomic_fetch_sub(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_sub_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_sub_fetch
+ // LLVM-LABEL: @atomic_sub_fetch
+ // OGCG-LABEL: @atomic_sub_fetch
+
+ return __atomic_sub_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw sub ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = sub i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw sub ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = sub i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_sub(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_sub
+ // LLVM-LABEL: @c11_atomic_fetch_sub
+ // OGCG-LABEL: @c11_atomic_fetch_sub
+
+ return __c11_atomic_fetch_sub(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_fetch_add_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_fetch_add_fp
+ // LLVM-LABEL: @atomic_fetch_add_fp
+ // OGCG-LABEL: @atomic_fetch_add_fp
+
+ return __atomic_fetch_add(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fadd ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fadd ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_add_fetch_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_add_fetch_fp
+ // LLVM-LABEL: @atomic_add_fetch_fp
+ // OGCG-LABEL: @atomic_add_fetch_fp
+
+ return __atomic_add_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[OLD:.+]] = atomicrmw fadd ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = fadd float %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw fadd ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = fadd float %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float c11_atomic_fetch_sub_fp(_Atomic(float) *ptr, float value) {
+ // CIR-LABEL: @c11_atomic_fetch_sub_fp
+ // LLVM-LABEL: @c11_atomic_fetch_sub_fp
+ // OGCG-LABEL: @c11_atomic_fetch_sub_fp
+
+ return __c11_atomic_fetch_sub(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fsub ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fsub ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_min(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_min
+ // LLVM-LABEL: @atomic_fetch_min
+ // OGCG-LABEL: @atomic_fetch_min
+
+ return __atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_min_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_min_fetch
+ // LLVM-LABEL: @atomic_min_fetch
+ // OGCG-LABEL: @atomic_min_fetch
+
+ return __atomic_min_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw min ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[OLD_LESS:.+]] = icmp slt i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: %[[RES:.+]] = select i1 %[[OLD_LESS]], i32 %[[OLD]], i32 %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw min ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[OLD_LESS:.+]] = icmp slt i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: %[[RES:.+]] = select i1 %[[OLD_LESS]], i32 %[[OLD]], i32 %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_min(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_min
+ // LLVM-LABEL: @c11_atomic_fetch_min
+ // OGCG-LABEL: @c11_atomic_fetch_min
+
+ return __c11_atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_fetch_min_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_fetch_min_fp
+ // LLVM-LABEL: @atomic_fetch_min_fp
+ // OGCG-LABEL: @atomic_fetch_min_fp
+
+ return __atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_min_fetch_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_min_fetch_fp
+ // LLVM-LABEL: @atomic_min_fetch_fp
+ // OGCG-LABEL: @atomic_min_fetch_fp
+
+ return __atomic_min_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[OLD:.+]] = atomicrmw fmin ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = call float @llvm.minnum.f32(float %[[OLD]], float %[[VAL]])
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw fmin ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = call float @llvm.minnum.f32(float %[[OLD]], float %[[VAL]])
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float c11_atomic_fetch_min_fp(_Atomic(float) *ptr, float value) {
+ // CIR-LABEL: @c11_atomic_fetch_min_fp
+ // LLVM-LABEL: @c11_atomic_fetch_min_fp
+ // OGCG-LABEL: @c11_atomic_fetch_min_fp
+
+ return __c11_atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_max(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_max
+ // LLVM-LABEL: @atomic_fetch_max
+ // OGCG-LABEL: @atomic_fetch_max
+
+ return __atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_max_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_max_fetch
+ // LLVM-LABEL: @atomic_max_fetch
+ // OGCG-LABEL: @atomic_max_fetch
+
+ return __atomic_max_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw max ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[OLD_GREATER:.+]] = icmp sgt i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: %[[RES:.+]] = select i1 %[[OLD_GREATER]], i32 %[[OLD]], i32 %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw max ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[OLD_GREATER:.+]] = icmp sgt i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: %[[RES:.+]] = select i1 %[[OLD_GREATER]], i32 %[[OLD]], i32 %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_max(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_max
+ // LLVM-LABEL: @c11_atomic_fetch_max
+ // OGCG-LABEL: @c11_atomic_fetch_max
+
+ return __c11_atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_fetch_max_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_fetch_max_fp
+ // LLVM-LABEL: @atomic_fetch_max_fp
+ // OGCG-LABEL: @atomic_fetch_max_fp
+
+ return __atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_max_fetch_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_max_fetch_fp
+ // LLVM-LABEL: @atomic_max_fetch_fp
+ // OGCG-LABEL: @atomic_max_fetch_fp
+
+ return __atomic_max_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[OLD:.+]] = atomicrmw fmax ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = call float @llvm.maxnum.f32(float %[[OLD]], float %[[VAL]])
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw fmax ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = call float @llvm.maxnum.f32(float %[[OLD]], float %[[VAL]])
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float c11_atomic_fetch_max_fp(_Atomic(float) *ptr, float value) {
+ // CIR-LABEL: @c11_atomic_fetch_max_fp
+ // LLVM-LABEL: @c11_atomic_fetch_max_fp
+ // OGCG-LABEL: @c11_atomic_fetch_max_fp
+
+ return __c11_atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_and(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_and
+ // LLVM-LABEL: @atomic_fetch_and
+ // OGCG-LABEL: @atomic_fetch_and
+
+ return __atomic_fetch_and(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch and seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_and_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_and_fetch
+ // LLVM-LABEL: @atomic_and_fetch
+ // OGCG-LABEL: @atomic_and_fetch
+
+ return __atomic_and_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch and seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw and ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw and ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_and(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_and
+ // LLVM-LABEL: @c11_atomic_fetch_and
+ // OGCG-LABEL: @c11_atomic_fetch_and
+
+ return __c11_atomic_fetch_and(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch and seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_or(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_or
+ // LLVM-LABEL: @atomic_fetch_or
+ // OGCG-LABEL: @atomic_fetch_or
+
+ return __atomic_fetch_or(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch or seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_or_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_or_fetch
+ // LLVM-LABEL: @atomic_or_fetch
+ // OGCG-LABEL: @atomic_or_fetch
+
+ return __atomic_or_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch or seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw or ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = or i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw or ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = or i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_or(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_or
+ // LLVM-LABEL: @c11_atomic_fetch_or
+ // OGCG-LABEL: @c11_atomic_fetch_or
+
+ return __c11_atomic_fetch_or(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch or seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_xor(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_xor
+ // LLVM-LABEL: @atomic_fetch_xor
+ // OGCG-LABEL: @atomic_fetch_xor
+
+ return __atomic_fetch_xor(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch xor seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_xor_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_xor_fetch
+ // LLVM-LABEL: @atomic_xor_fetch
+ // OGCG-LABEL: @atomic_xor_fetch
+
+ return __atomic_xor_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch xor seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw xor ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = xor i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw xor ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = xor i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_xor(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_xor
+ // LLVM-LABEL: @c11_atomic_fetch_xor
+ // OGCG-LABEL: @c11_atomic_fetch_xor
+
+ return __c11_atomic_fetch_xor(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch xor seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_nand(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_nand
+ // LLVM-LABEL: @atomic_fetch_nand
+ // OGCG-LABEL: @atomic_fetch_nand
+
+ return __atomic_fetch_nand(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch nand seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_nand_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_nand_fetch
+ // LLVM-LABEL: @atomic_nand_fetch
+ // OGCG-LABEL: @atomic_nand_fetch
+
+ return __atomic_nand_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch nand seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw nand ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[TMP:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: %[[RES:.+]] = xor i32 %[[TMP]], -1
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw nand ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[TMP:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: %[[RES:.+]] = xor i32 %[[TMP]], -1
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_nand(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_nand
+ // LLVM-LABEL: @c11_atomic_fetch_nand
+ // OGCG-LABEL: @c11_atomic_fetch_nand
+
+ return __c11_atomic_fetch_nand(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch nand seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
diff --git a/clang/test/CIR/CodeGen/builtin_inline.c b/clang/test/CIR/CodeGen/builtin_inline.c
new file mode 100644
index 0000000..83a3ba6
--- /dev/null
+++ b/clang/test/CIR/CodeGen/builtin_inline.c
@@ -0,0 +1,91 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -disable-llvm-passes %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -disable-llvm-passes %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+typedef unsigned long size_t;
+
+// Normal inline builtin declaration
+// When a builtin is redefined with extern inline + always_inline attributes,
+// the compiler creates a .inline version to avoid conflicts with the builtin
+
+extern inline __attribute__((always_inline)) __attribute__((gnu_inline))
+void *memcpy(void *a, const void *b, size_t c) {
+ return __builtin_memcpy(a, b, c);
+}
+
+void *test_inline_builtin_memcpy(void *a, const void *b, size_t c) {
+ return memcpy(a, b, c);
+}
+
+// CIR: cir.func internal private{{.*}}@memcpy.inline({{.*}}) -> !cir.ptr<!void> inline(always)
+
+// CIR-LABEL: @test_inline_builtin_memcpy(
+// CIR: cir.call @memcpy.inline(
+// CIR: }
+
+// LLVM: define internal ptr @memcpy.inline(ptr{{.*}}, ptr{{.*}}, i64{{.*}}) #{{[0-9]+}}
+
+// LLVM-LABEL: @test_inline_builtin_memcpy(
+// LLVM: call ptr @memcpy.inline(
+
+// OGCG-LABEL: @test_inline_builtin_memcpy(
+// OGCG: call ptr @memcpy.inline(
+
+// OGCG: define internal ptr @memcpy.inline(ptr{{.*}} %a, ptr{{.*}} %b, i64{{.*}} %c) #{{[0-9]+}}
+
+// Shadowing case
+// When a non-inline function definition shadows an inline builtin declaration,
+// the .inline version should be replaced with the regular function and removed.
+
+extern inline __attribute__((always_inline)) __attribute__((gnu_inline))
+void *memmove(void *a, const void *b, size_t c) {
+ return __builtin_memmove(a, b, c);
+}
+
+void *memmove(void *a, const void *b, size_t c) {
+ char *dst = (char *)a;
+ const char *src = (const char *)b;
+ if (dst < src) {
+ for (size_t i = 0; i < c; i++) {
+ dst[i] = src[i];
+ }
+ } else {
+ for (size_t i = c; i > 0; i--) {
+ dst[i-1] = src[i-1];
+ }
+ }
+ return a;
+}
+
+void *test_shadowed_memmove(void *a, const void *b, size_t c) {
+ return memmove(a, b, c);
+}
+
+// CIR: cir.func{{.*}}@memmove({{.*}}) -> !cir.ptr<!void>{{.*}}{
+// CIR-NOT: @memmove.inline
+
+// CIR-LABEL: @test_shadowed_memmove(
+// CIR: cir.call @memmove(
+// CIR-NOT: @memmove.inline
+// CIR: }
+
+// LLVM: define dso_local ptr @memmove(ptr{{.*}}, ptr{{.*}}, i64{{.*}}) #{{[0-9]+}}
+// LLVM-NOT: @memmove.inline
+
+// LLVM-LABEL: @test_shadowed_memmove(
+// TODO - this deviation from OGCG is expected until we implement the nobuiltin
+// attribute. See CIRGenFunction::emitDirectCallee
+// LLVM: call ptr @memmove(
+// LLVM-NOT: @memmove.inline
+// LLVM: }
+
+// OGCG: define dso_local ptr @memmove(ptr{{.*}} %a, ptr{{.*}} %b, i64{{.*}} %c) #{{[0-9]+}}
+// OGCG-NOT: @memmove.inline
+
+// OGCG-LABEL: @test_shadowed_memmove(
+// OGCG: call void @llvm.memmove.p0.p0.i64(
+// OGCG-NOT: @memmove.inline
+// OGCG: }
diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp
index f2c80a5..1fe048b7 100644
--- a/clang/test/CIR/CodeGen/dtors.cpp
+++ b/clang/test/CIR/CodeGen/dtors.cpp
@@ -35,7 +35,7 @@ bool make_temp(const B &) { return false; }
bool test_temp_or() { return make_temp(1) || make_temp(2); }
// CIR: cir.func{{.*}} @_Z12test_temp_orv()
-// CIR: %[[SCOPE:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[REF_TMP0:.*]] = cir.alloca !rec_B, !cir.ptr<!rec_B>, ["ref.tmp0"]
// CIR: %[[ONE:.*]] = cir.const #cir.int<1>
// CIR: cir.call @_ZN1BC2Ei(%[[REF_TMP0]], %[[ONE]])
@@ -51,9 +51,9 @@ bool test_temp_or() { return make_temp(1) || make_temp(2); }
// CIR: cir.call @_ZN1BD2Ev(%[[REF_TMP1]])
// CIR: cir.yield %[[MAKE_TEMP1]] : !cir.bool
// CIR: })
+// CIR: cir.store{{.*}} %[[TERNARY]], %[[RETVAL:.*]]
// CIR: cir.call @_ZN1BD2Ev(%[[REF_TMP0]])
-// CIR: cir.yield %[[TERNARY]] : !cir.bool
-// CIR: } : !cir.bool
+// CIR: }
// LLVM: define{{.*}} i1 @_Z12test_temp_orv(){{.*}} {
// LLVM: %[[REF_TMP0:.*]] = alloca %struct.B
@@ -105,7 +105,7 @@ bool test_temp_or() { return make_temp(1) || make_temp(2); }
bool test_temp_and() { return make_temp(1) && make_temp(2); }
// CIR: cir.func{{.*}} @_Z13test_temp_andv()
-// CIR: %[[SCOPE:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[REF_TMP0:.*]] = cir.alloca !rec_B, !cir.ptr<!rec_B>, ["ref.tmp0"]
// CIR: %[[ONE:.*]] = cir.const #cir.int<1>
// CIR: cir.call @_ZN1BC2Ei(%[[REF_TMP0]], %[[ONE]])
@@ -121,9 +121,9 @@ bool test_temp_and() { return make_temp(1) && make_temp(2); }
// CIR: %[[FALSE:.*]] = cir.const #false
// CIR: cir.yield %[[FALSE]] : !cir.bool
// CIR: })
+// CIR: cir.store{{.*}} %[[TERNARY]], %[[RETVAL:.*]]
// CIR: cir.call @_ZN1BD2Ev(%[[REF_TMP0]])
-// CIR: cir.yield %[[TERNARY]] : !cir.bool
-// CIR: } : !cir.bool
+// CIR: }
// LLVM: define{{.*}} i1 @_Z13test_temp_andv(){{.*}} {
// LLVM: %[[REF_TMP0:.*]] = alloca %struct.B
diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp
index 0c32ceb1..91380b9 100644
--- a/clang/test/CIR/CodeGen/lambda.cpp
+++ b/clang/test/CIR/CodeGen/lambda.cpp
@@ -219,14 +219,13 @@ int f() {
// CIR: cir.func dso_local @_Z1fv() -> !s32i {{.*}} {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
-// CIR: %[[SCOPE_RET:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[TMP:.*]] = cir.alloca ![[REC_LAM_G2]], !cir.ptr<![[REC_LAM_G2]]>, ["ref.tmp0"]
// CIR: %[[G2:.*]] = cir.call @_Z2g2v() : () -> ![[REC_LAM_G2]]
// CIR: cir.store{{.*}} %[[G2]], %[[TMP]]
// CIR: %[[RESULT:.*]] = cir.call @_ZZ2g2vENK3$_0clEv(%[[TMP]])
-// CIR: cir.yield %[[RESULT]]
+// CIR: cir.store{{.*}} %[[RESULT]], %[[RETVAL]]
// CIR: }
-// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
// CIR: cir.return %[[RET]]
@@ -255,10 +254,9 @@ int f() {
// LLVM: %[[G2:.*]] = call %[[REC_LAM_G2]] @_Z2g2v()
// LLVM: store %[[REC_LAM_G2]] %[[G2]], ptr %[[TMP]]
// LLVM: %[[RESULT:.*]] = call i32 @"_ZZ2g2vENK3$_0clEv"(ptr %[[TMP]])
+// LLVM: store i32 %[[RESULT]], ptr %[[RETVAL]]
// LLVM: br label %[[RET_BB:.*]]
// LLVM: [[RET_BB]]:
-// LLVM: %[[RETPHI:.*]] = phi i32 [ %[[RESULT]], %[[SCOPE_BB]] ]
-// LLVM: store i32 %[[RETPHI]], ptr %[[RETVAL]]
// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]]
// LLVM: ret i32 %[[RET]]
@@ -333,14 +331,13 @@ struct A {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
// CIR: %[[THIS]] = cir.load deref %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
-// CIR: %[[SCOPE_RET:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[LAM_ADDR:.*]] = cir.alloca ![[REC_LAM_A]], !cir.ptr<![[REC_LAM_A]]>, ["ref.tmp0"]
// CIR: %[[STRUCT_A:.*]] = cir.get_member %[[LAM_ADDR]][0] {name = "this"} : !cir.ptr<![[REC_LAM_A]]> -> !cir.ptr<!rec_A>
// CIR: cir.call @_ZN1AC1ERKS_(%[[STRUCT_A]], %[[THIS]]){{.*}} : (!cir.ptr<!rec_A>, !cir.ptr<!rec_A>){{.*}} -> ()
// CIR: %[[LAM_RET:.*]] = cir.call @_ZZN1A3fooEvENKUlvE_clEv(%[[LAM_ADDR]])
-// CIR: cir.yield %[[LAM_RET]]
+// CIR: cir.store{{.*}} %[[LAM_RET]], %[[RETVAL]]
// CIR: }
-// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
// CIR: cir.return %[[RET]]
@@ -355,10 +352,9 @@ struct A {
// LLVM: %[[STRUCT_A:.*]] = getelementptr %[[REC_LAM_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
// LLVM: call void @_ZN1AC1ERKS_(ptr %[[STRUCT_A]], ptr %[[THIS]])
// LLVM: %[[LAM_RET:.*]] = call i32 @_ZZN1A3fooEvENKUlvE_clEv(ptr %[[LAM_ALLOCA]])
+// LLVM: store i32 %[[LAM_RET]], ptr %[[RETVAL]]
// LLVM: br label %[[RET_BB:.*]]
// LLVM: [[RET_BB]]:
-// LLVM: %[[RETPHI:.*]] = phi i32 [ %[[LAM_RET]], %[[SCOPE_BB]] ]
-// LLVM: store i32 %[[RETPHI]], ptr %[[RETVAL]]
// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]]
// LLVM: ret i32 %[[RET]]
@@ -407,14 +403,13 @@ struct A {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
// CIR: %[[THIS]] = cir.load %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
-// CIR: %[[SCOPE_RET:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[LAM_ADDR:.*]] = cir.alloca ![[REC_LAM_PTR_A]], !cir.ptr<![[REC_LAM_PTR_A]]>, ["ref.tmp0"]
// CIR: %[[A_ADDR_ADDR:.*]] = cir.get_member %[[LAM_ADDR]][0] {name = "this"} : !cir.ptr<![[REC_LAM_PTR_A]]> -> !cir.ptr<!cir.ptr<!rec_A>>
// CIR: cir.store{{.*}} %[[THIS]], %[[A_ADDR_ADDR]]
// CIR: %[[LAM_RET:.*]] = cir.call @_ZZN1A3barEvENKUlvE_clEv(%[[LAM_ADDR]])
-// CIR: cir.yield %[[LAM_RET]]
+// CIR: cir.store{{.*}} %[[LAM_RET]], %[[RETVAL]]
// CIR: }
-// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
// CIR: cir.return %[[RET]]
@@ -429,10 +424,9 @@ struct A {
// LLVM: %[[A_ADDR_ADDR:.*]] = getelementptr %[[REC_LAM_PTR_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
// LLVM: store ptr %[[THIS]], ptr %[[A_ADDR_ADDR]]
// LLVM: %[[LAM_RET:.*]] = call i32 @_ZZN1A3barEvENKUlvE_clEv(ptr %[[LAM_ALLOCA]])
+// LLVM: store i32 %[[LAM_RET]], ptr %[[RETVAL]]
// LLVM: br label %[[RET_BB:.*]]
// LLVM: [[RET_BB]]:
-// LLVM: %[[RETPHI:.*]] = phi i32 [ %[[LAM_RET]], %[[SCOPE_BB]] ]
-// LLVM: store i32 %[[RETPHI]], ptr %[[RETVAL]]
// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]]
// LLVM: ret i32 %[[RET]]
diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp
index 000ea5b..2efad10 100644
--- a/clang/test/CIR/CodeGen/new.cpp
+++ b/clang/test/CIR/CodeGen/new.cpp
@@ -208,6 +208,127 @@ void t_new_constant_size() {
// OGCG: %[[CALL:.*]] = call noalias noundef nonnull ptr @_Znam(i64 noundef 128)
// OGCG: store ptr %[[CALL]], ptr %[[P_ADDR]], align 8
+class C {
+ public:
+ ~C();
+};
+
+void t_constant_size_nontrivial() {
+ auto p = new C[3];
+}
+
+// CHECK: cir.func{{.*}} @_Z26t_constant_size_nontrivialv()
+// CHECK: %[[P_ADDR:.*]] = cir.alloca !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>, ["p", init] {alignment = 8 : i64}
+// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i
+// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<3> : !u64i
+// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<11> : !u64i
+// CHECK: %[[RAW_PTR:.*]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr<!void>
+// CHECK: %[[COOKIE_PTR_BASE:.*]] = cir.cast bitcast %[[RAW_PTR]] : !cir.ptr<!void> -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[COOKIE_PTR:.*]] = cir.cast bitcast %[[COOKIE_PTR_BASE]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!u64i>
+// CHECK: cir.store align(8) %[[#NUM_ELEMENTS]], %[[COOKIE_PTR]] : !u64i, !cir.ptr<!u64i>
+// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<8> : !s32i
+// CHECK: %[[DATA_PTR_RAW:.*]] = cir.ptr_stride %[[COOKIE_PTR_BASE]], %[[#COOKIE_SIZE]] : (!cir.ptr<!cir.ptr<!u8i>>, !s32i) -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[DATA_PTR_VOID:.*]] = cir.cast bitcast %[[DATA_PTR_RAW]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!void>
+// CHECK: %[[DATA_PTR:.*]] = cir.cast bitcast %[[DATA_PTR_VOID]] : !cir.ptr<!void> -> !cir.ptr<!rec_C>
+// CHECK: cir.store align(8) %[[DATA_PTR]], %[[P_ADDR]] : !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>
+// CHECK: cir.return
+// CHECK: }
+
+// LLVM: @_Z26t_constant_size_nontrivialv()
+// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[COOKIE_PTR:.*]] = call ptr @_Znam(i64 11)
+// LLVM: store i64 3, ptr %[[COOKIE_PTR]], align 8
+// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr ptr, ptr %[[COOKIE_PTR]], i64 8
+// LLVM: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+
+// OGCG: @_Z26t_constant_size_nontrivialv()
+// OGCG: %[[ALLOCA:.*]] = alloca ptr, align 8
+// OGCG: %[[COOKIE_PTR:.*]] = call noalias noundef nonnull ptr @_Znam(i64 noundef 11)
+// OGCG: store i64 3, ptr %[[COOKIE_PTR]], align 8
+// OGCG: %[[ALLOCATED_PTR:.*]] = getelementptr inbounds i8, ptr %[[COOKIE_PTR]], i64 8
+// OGCG: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+
+class D {
+ public:
+ int x;
+ ~D();
+};
+
+void t_constant_size_nontrivial2() {
+ auto p = new D[3];
+}
+
+// In this test SIZE_WITHOUT_COOKIE isn't used, but it would be if there were
+// an initializer.
+
+// CHECK: cir.func{{.*}} @_Z27t_constant_size_nontrivial2v()
+// CHECK: %[[P_ADDR:.*]] = cir.alloca !cir.ptr<!rec_D>, !cir.ptr<!cir.ptr<!rec_D>>, ["p", init] {alignment = 8 : i64}
+// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i
+// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<12> : !u64i
+// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<20> : !u64i
+// CHECK: %[[RAW_PTR:.*]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr<!void>
+// CHECK: %[[COOKIE_PTR_BASE:.*]] = cir.cast bitcast %[[RAW_PTR]] : !cir.ptr<!void> -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[COOKIE_PTR:.*]] = cir.cast bitcast %[[COOKIE_PTR_BASE]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!u64i>
+// CHECK: cir.store align(8) %[[#NUM_ELEMENTS]], %[[COOKIE_PTR]] : !u64i, !cir.ptr<!u64i>
+// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<8> : !s32i
+// CHECK: %[[DATA_PTR_RAW:.*]] = cir.ptr_stride %[[COOKIE_PTR_BASE]], %[[#COOKIE_SIZE]] : (!cir.ptr<!cir.ptr<!u8i>>, !s32i) -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[DATA_PTR_VOID:.*]] = cir.cast bitcast %[[DATA_PTR_RAW]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!void>
+// CHECK: %[[DATA_PTR:.*]] = cir.cast bitcast %[[DATA_PTR_VOID]] : !cir.ptr<!void> -> !cir.ptr<!rec_D>
+// CHECK: cir.store align(8) %[[DATA_PTR]], %[[P_ADDR]] : !cir.ptr<!rec_D>, !cir.ptr<!cir.ptr<!rec_D>>
+// CHECK: cir.return
+// CHECK: }
+
+// LLVM: @_Z27t_constant_size_nontrivial2v()
+// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[COOKIE_PTR:.*]] = call ptr @_Znam(i64 20)
+// LLVM: store i64 3, ptr %[[COOKIE_PTR]], align 8
+// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr ptr, ptr %[[COOKIE_PTR]], i64 8
+// LLVM: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+
+struct alignas(16) E {
+ int x;
+ ~E();
+};
+
+void t_align16_nontrivial() {
+ auto p = new E[2];
+}
+
+// CHECK: cir.func{{.*}} @_Z20t_align16_nontrivialv()
+// CHECK: %[[P_ADDR:.*]] = cir.alloca !cir.ptr<!rec_E>, !cir.ptr<!cir.ptr<!rec_E>>, ["p", init] {alignment = 8 : i64}
+// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<2> : !u64i
+// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<32> : !u64i
+// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<48> : !u64i
+// CHECK: %[[RAW_PTR:.*]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr<!void>
+// CHECK: %[[COOKIE_PTR_BASE:.*]] = cir.cast bitcast %[[RAW_PTR]] : !cir.ptr<!void> -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[COOKIE_OFFSET:.*]] = cir.const #cir.int<8> : !s32i
+// CHECK: %[[COOKIE_PTR_RAW:.*]] = cir.ptr_stride %[[COOKIE_PTR_BASE]], %[[COOKIE_OFFSET]] : (!cir.ptr<!cir.ptr<!u8i>>, !s32i) -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[COOKIE_PTR:.*]] = cir.cast bitcast %[[COOKIE_PTR_RAW]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!u64i>
+// CHECK: cir.store align(8) %[[#NUM_ELEMENTS]], %[[COOKIE_PTR]] : !u64i, !cir.ptr<!u64i>
+// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<16> : !s32i
+// CHECK: %[[DATA_PTR_RAW:.*]] = cir.ptr_stride %[[COOKIE_PTR_BASE]], %[[#COOKIE_SIZE]] : (!cir.ptr<!cir.ptr<!u8i>>, !s32i) -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[DATA_PTR_VOID:.*]] = cir.cast bitcast %[[DATA_PTR_RAW]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!void>
+// CHECK: %[[DATA_PTR:.*]] = cir.cast bitcast %[[DATA_PTR_VOID]] : !cir.ptr<!void> -> !cir.ptr<!rec_E>
+// CHECK: cir.store align(8) %[[DATA_PTR]], %[[P_ADDR]] : !cir.ptr<!rec_E>, !cir.ptr<!cir.ptr<!rec_E>>
+// CHECK: cir.return
+// CHECK: }
+
+// LLVM: @_Z20t_align16_nontrivialv()
+// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[RAW_PTR:.*]] = call ptr @_Znam(i64 48)
+// LLVM: %[[COOKIE_PTR:.*]] = getelementptr ptr, ptr %[[RAW_PTR]], i64 8
+// LLVM: store i64 2, ptr %[[COOKIE_PTR]], align 8
+// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr ptr, ptr %[[RAW_PTR]], i64 16
+// LLVM: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+
+// OGCG: define{{.*}} void @_Z20t_align16_nontrivialv
+// OGCG: %[[ALLOCA:.*]] = alloca ptr, align 8
+// OGCG: %[[RAW_PTR:.*]] = call noalias noundef nonnull ptr @_Znam(i64 noundef 48)
+// OGCG: %[[COOKIE_PTR:.*]] = getelementptr inbounds i8, ptr %[[RAW_PTR]], i64 8
+// OGCG: store i64 2, ptr %[[COOKIE_PTR]], align 8
+// OGCG: %[[ALLOCATED_PTR:.*]] = getelementptr inbounds i8, ptr %[[RAW_PTR]], i64 16
+// OGCG: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+// OGCG: ret void
void t_new_multidim_constant_size() {
auto p = new double[2][3][4];
diff --git a/clang/test/CIR/CodeGen/statement-exprs.c b/clang/test/CIR/CodeGen/statement-exprs.c
index f6ec9ec..c784ec9 100644
--- a/clang/test/CIR/CodeGen/statement-exprs.c
+++ b/clang/test/CIR/CodeGen/statement-exprs.c
@@ -218,7 +218,7 @@ struct S { int x; };
int test3() { return ({ struct S s = {1}; s; }).x; }
// CIR: cir.func no_proto dso_local @test3() -> !s32i
// CIR: %[[RETVAL:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
-// CIR: %[[YIELDVAL:.+]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[REF_TMP0:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["ref.tmp0"]
// CIR: %[[TMP:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["tmp"]
// CIR: cir.scope {
@@ -230,9 +230,8 @@ int test3() { return ({ struct S s = {1}; s; }).x; }
// CIR: }
// CIR: %[[GEP_X_TMP:.+]] = cir.get_member %[[REF_TMP0]][0] {name = "x"} : !cir.ptr<!rec_S> -> !cir.ptr<!s32i>
// CIR: %[[XVAL:.+]] = cir.load {{.*}} %[[GEP_X_TMP]] : !cir.ptr<!s32i>, !s32i
-// CIR: cir.yield %[[XVAL]] : !s32i
-// CIR: } : !s32i
-// CIR: cir.store %[[YIELDVAL]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store{{.*}} %[[XVAL]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
// CIR: %[[RES:.+]] = cir.load %[[RETVAL]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.return %[[RES]] : !s32i
@@ -252,10 +251,9 @@ int test3() { return ({ struct S s = {1}; s; }).x; }
// LLVM: [[LBL8]]:
// LLVM: %[[GEP_VAR1:.+]] = getelementptr %struct.S, ptr %[[VAR1]], i32 0, i32 0
// LLVM: %[[LOAD_X:.+]] = load i32, ptr %[[GEP_VAR1]]
+// LLVM: store i32 %[[LOAD_X]], ptr %[[VAR4]]
// LLVM: br label %[[LBL11:.+]]
// LLVM: [[LBL11]]:
-// LLVM: %[[PHI:.+]] = phi i32 [ %[[LOAD_X]], %[[LBL8]] ]
-// LLVM: store i32 %[[PHI]], ptr %[[VAR4]]
// LLVM: %[[RES:.+]] = load i32, ptr %[[VAR4]]
// LLVM: ret i32 %[[RES]]
diff --git a/clang/test/CIR/CodeGen/struct-init.cpp b/clang/test/CIR/CodeGen/struct-init.cpp
index a47ef53..cb50999 100644
--- a/clang/test/CIR/CodeGen/struct-init.cpp
+++ b/clang/test/CIR/CodeGen/struct-init.cpp
@@ -9,6 +9,22 @@ struct S {
int a, b, c;
};
+S partial_init = { 1 };
+
+// CIR: cir.global external @partial_init = #cir.const_record<{#cir.int<1> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i}> : !rec_S
+// LLVM: @partial_init = global %struct.S { i32 1, i32 0, i32 0 }
+// OGCG: @partial_init = global %struct.S { i32 1, i32 0, i32 0 }
+
+struct StructWithDefaultInit {
+ int a = 2;
+};
+
+StructWithDefaultInit swdi = {};
+
+// CIR: cir.global external @swdi = #cir.const_record<{#cir.int<2> : !s32i}> : !rec_StructWithDefaultInit
+// LLVM: @swdi = global %struct.StructWithDefaultInit { i32 2 }, align 4
+// OGCG: @swdi = global %struct.StructWithDefaultInit { i32 2 }, align 4
+
void init() {
S s1 = {1, 2, 3};
S s2 = {4, 5};
diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp
index 6d362c7..c8db714 100644
--- a/clang/test/CIR/CodeGen/struct.cpp
+++ b/clang/test/CIR/CodeGen/struct.cpp
@@ -280,3 +280,67 @@ void bin_comma() {
// OGCG: define{{.*}} void @_Z9bin_commav()
// OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4
// OGCG: call void @llvm.memset.p0.i64(ptr align 4 %[[A_ADDR]], i8 0, i64 8, i1 false)
+
+void compound_literal_expr() { CompleteS a = (CompleteS){}; }
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["a", init]
+// CIR: %[[A_ELEM_0_PTR:.*]] = cir.get_member %[[A_ADDR]][0] {name = "a"} : !cir.ptr<!rec_CompleteS> -> !cir.ptr<!s32i>
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store{{.*}} %[[CONST_0]], %[[A_ELEM_0_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[A_ELEM_1_PTR:.*]] = cir.get_member %[[A_ADDR]][1] {name = "b"} : !cir.ptr<!rec_CompleteS> -> !cir.ptr<!s8i>
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s8i
+// CIR: cir.store{{.*}} %[[CONST_0]], %[[A_ELEM_1_PTR]] : !s8i, !cir.ptr<!s8i>
+
+// TODO(cir): zero-initialize the padding
+
+// LLVM: %[[A_ADDR:.*]] = alloca %struct.CompleteS, i64 1, align 4
+// LLVM: %[[A_ELEM_0_PTR:.*]] = getelementptr %struct.CompleteS, ptr %[[A_ADDR]], i32 0, i32 0
+// LLVM: store i32 0, ptr %[[A_ELEM_0_PTR]], align 4
+// LLVM: %[[A_ELEM_1_PTR:.*]] = getelementptr %struct.CompleteS, ptr %[[A_ADDR]], i32 0, i32 1
+// LLVM: store i8 0, ptr %[[A_ELEM_1_PTR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4
+// OGCG: call void @llvm.memset.p0.i64(ptr align 4 %[[A_ADDR]], i8 0, i64 8, i1 false)
+
+struct StructWithConstMember {
+ int a : 1;
+};
+
+void struct_with_const_member_expr() {
+ int a = (StructWithConstMember){}.a;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[RESULT:.*]] = cir.scope {
+// CIR: %[[REF_ADDR:.*]] = cir.alloca !rec_StructWithConstMember, !cir.ptr<!rec_StructWithConstMember>, ["ref.tmp0"]
+// CIR: %[[ELEM_0_PTR:.*]] = cir.get_member %[[REF_ADDR]][0] {name = "a"} : !cir.ptr<!rec_StructWithConstMember> -> !cir.ptr<!u8i>
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: %[[SET_BF:.*]] = cir.set_bitfield{{.*}} (#bfi_a, %[[ELEM_0_PTR]] : !cir.ptr<!u8i>, %[[CONST_0]] : !s32i) -> !s32i
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.yield %[[CONST_0]] : !s32i
+// CIR: } : !s32i
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+
+// TODO(cir): zero-initialize the padding
+
+// LLVM: %[[REF_ADDR:.*]] = alloca %struct.StructWithConstMember, i64 1, align 4
+// LLVM: %[[A_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: br label %[[BF_LABEL:.*]]
+// LLVM: [[BF_LABEL]]:
+// LLVM: %[[ELEM_0_PTR:.*]] = getelementptr %struct.StructWithConstMember, ptr %[[REF_ADDR]], i32 0, i32 0
+// LLVM: %[[TMP_REF:.*]] = load i8, ptr %[[ELEM_0_PTR]], align 4
+// LLVM: %[[BF_CLEAR:.*]] = and i8 %[[TMP_REF]], -2
+// LLVM: %[[BF_SET:.*]] = or i8 %[[BF_CLEAR]], 0
+// LLVM: store i8 %[[BF_SET]], ptr %[[ELEM_0_PTR]], align 4
+// LLVM: br label %[[RESULT_LABEL:.*]]
+// LLVM: [[RESULT_LABEL]]:
+// LLVM: %[[RESULT:.*]] = phi i32 [ 0, %[[BF_LABEL]] ]
+// LLVM: store i32 %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca i32, align 4
+// OGCG: %[[REF_ADDR:.*]] = alloca %struct.StructWithConstMember, align 4
+// OGCG: %[[TMP_REF:.*]] = load i8, ptr %[[REF_ADDR]], align 4
+// OGCG: %[[BF_CLEAR:.*]] = and i8 %[[TMP_REF]], -2
+// OGCG: %[[BF_SET:.*]] = or i8 %[[BF_CLEAR]], 0
+// OGCG: store i8 %[[BF_SET]], ptr %[[REF_ADDR]], align 4
+// OGCG: store i32 0, ptr %[[A_ADDR]], align 4
diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp
index 8f0b3c4..5a50310 100644
--- a/clang/test/CIR/CodeGen/try-catch.cpp
+++ b/clang/test/CIR/CodeGen/try-catch.cpp
@@ -30,3 +30,90 @@ void empty_try_block_with_catch_with_int_exception() {
// OGCG: define{{.*}} void @_Z45empty_try_block_with_catch_with_int_exceptionv()
// OGCG: ret void
+
+void try_catch_with_empty_catch_all() {
+ int a = 1;
+ try {
+ return;
+ ++a;
+ } catch (...) {
+ }
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store{{.*}} %[[CONST_1]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i
+// CIR: cir.scope {
+// CIR: cir.try {
+// CIR: cir.return
+// CIR: ^bb1: // no predecessors
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[RESULT:.*]] = cir.unary(inc, %[[TMP_A]]) nsw : !s32i, !s32i
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.yield
+// CIR: }
+// CIR: }
+
+// LLVM: %[[A_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: store i32 1, ptr %[[A_ADDR]], align 4
+// LLVM: br label %[[BB_2:.*]]
+// LLVM: [[BB_2]]:
+// LLVM: br label %[[BB_3:.*]]
+// LLVM: [[BB_3]]:
+// LLVM: ret void
+// LLVM: [[BB_4:.*]]:
+// LLVM: %[[TMP_A:.*]] = load i32, ptr %[[A_ADDR]], align 4
+// LLVM: %[[RESULT:.*]] = add nsw i32 %[[TMP_A]], 1
+// LLVM: store i32 %[[RESULT]], ptr %[[A_ADDR]], align 4
+// LLVM: br label %[[BB_7:.*]]
+// LLVM: [[BB_7]]:
+// LLVM: br label %[[BB_8:.*]]
+// LLVM: [[BB_8]]:
+// LLVM: ret void
+
+// OGCG: %[[A_ADDR:.*]] = alloca i32, align 4
+// OGCG: store i32 1, ptr %[[A_ADDR]], align 4
+// OGCG: ret void
+
+void try_catch_with_empty_catch_all_2() {
+ int a = 1;
+ try {
+ ++a;
+ return;
+ } catch (...) {
+ }
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store{{.*}} %[[CONST_1]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.scope {
+// CIR: cir.try {
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[RESULT:.*]] = cir.unary(inc, %[[TMP_A]]) nsw : !s32i, !s32i
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.return
+// CIR: }
+// CIR: }
+
+// LLVM: %[[A_ADDR]] = alloca i32, i64 1, align 4
+// LLVM: store i32 1, ptr %[[A_ADDR]], align 4
+// LLVM: br label %[[BB_2:.*]]
+// LLVM: [[BB_2]]:
+// LLVM: br label %[[BB_3:.*]]
+// LLVM: [[BB_3]]:
+// LLVM: %[[TMP_A:.*]] = load i32, ptr %[[A_ADDR]], align 4
+// LLVM: %[[RESULT:.*]] = add nsw i32 %[[TMP_A:.*]], 1
+// LLVM: store i32 %[[RESULT]], ptr %[[A_ADDR]], align 4
+// LLVM: ret void
+// LLVM: [[BB_6:.*]]:
+// LLVM: br label %[[BB_7:.*]]
+// LLVM: [[BB_7]]:
+// LLVM: ret void
+
+// OGCG: %[[A_ADDR:.*]] = alloca i32, align 4
+// OGCG: store i32 1, ptr %[[A_ADDR]], align 4
+// OGCG: %[[TMP_A:.*]] = load i32, ptr %[[A_ADDR]], align 4
+// OGCG: %[[RESULT:.*]] = add nsw i32 %[[TMP_A]], 1
+// OGCG: store i32 %[[RESULT]], ptr %[[A_ADDR]], align 4
+// OGCG: ret void
diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c
index b22c704..0af4f83 100644
--- a/clang/test/CIR/CodeGen/vla.c
+++ b/clang/test/CIR/CodeGen/vla.c
@@ -282,4 +282,61 @@ void f3(unsigned len) {
// break;
// }
// }
- \ No newline at end of file
+
+int f5(unsigned long len) {
+ int arr[len];
+ return arr[2];
+}
+
+// CIR: cir.func{{.*}} @f5(%[[LEN_ARG:.*]]: !u64i {{.*}}) -> !s32i
+// CIR: %[[LEN_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["len", init]
+// CIR: %[[RET_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[SAVED_STACK:.*]] = cir.alloca !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>, ["saved_stack"]
+// CIR: cir.store{{.*}} %[[LEN_ARG]], %[[LEN_ADDR]]
+// CIR: %[[LEN:.*]] = cir.load{{.*}} %[[LEN_ADDR]]
+// CIR: %[[STACK_PTR:.*]] = cir.stacksave
+// CIR: cir.store{{.*}} %[[STACK_PTR]], %[[SAVED_STACK]]
+// CIR: %[[ARR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[LEN]] : !u64i, ["arr"]
+// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[ARR_2:.*]] = cir.ptr_stride %[[ARR]], %[[TWO]]
+// CIR: %[[ARR_VAL:.*]] = cir.load{{.*}} %[[ARR_2]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store{{.*}} %[[ARR_VAL]], %[[RET_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]]
+// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]]
+// CIR: %[[RET_VAL:.*]] = cir.load{{.*}} %[[RET_ADDR]]
+// CIR: cir.return %[[RET_VAL]] : !s32i
+
+// LLVM: define{{.*}} i32 @f5(i64 %[[LEN_ARG:.*]])
+// LLVM: %[[LEN_ADDR:.*]] = alloca i64
+// LLVM: %[[RET_ADDR:.*]] = alloca i32
+// LLVM: %[[SAVED_STACK:.*]] = alloca ptr
+// LLVM: store i64 %[[LEN_ARG]], ptr %[[LEN_ADDR]]
+// LLVM: %[[LEN:.*]] = load i64, ptr %[[LEN_ADDR]]
+// LLVM: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0()
+// LLVM: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]]
+// LLVM: %[[ARR:.*]] = alloca i32, i64 %[[LEN]]
+// LLVM: %[[ARR_2:.*]] = getelementptr i32, ptr %[[ARR]], i64 2
+// LLVM: %[[ARR_VAL:.*]] = load i32, ptr %[[ARR_2]]
+// LLVM: store i32 %[[ARR_VAL]], ptr %[[RET_ADDR]]
+// LLVM: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]]
+// LLVM: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]])
+// LLVM: %[[RET_VAL:.*]] = load i32, ptr %[[RET_ADDR]]
+// LLVM: ret i32 %[[RET_VAL]]
+
+// Note: VLA_EXPR0 below is emitted to capture debug info.
+
+// OGCG: define{{.*}} i32 @f5(i64 {{.*}} %[[LEN_ARG:.*]])
+// OGCG: %[[LEN_ADDR:.*]] = alloca i64
+// OGCG: %[[SAVED_STACK:.*]] = alloca ptr
+// OGCG: %[[VLA_EXPR0:.*]] = alloca i64
+// OGCG: store i64 %[[LEN_ARG]], ptr %[[LEN_ADDR]]
+// OGCG: %[[LEN:.*]] = load i64, ptr %[[LEN_ADDR]]
+// OGCG: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0()
+// OGCG: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]]
+// OGCG: %[[ARR:.*]] = alloca i32, i64 %[[LEN]]
+// OGCG: store i64 %[[LEN]], ptr %[[VLA_EXPR0]]
+// OGCG: %[[ARR_2:.*]] = getelementptr inbounds i32, ptr %[[ARR]], i64 2
+// OGCG: %[[ARR_VAL:.*]] = load i32, ptr %[[ARR_2]]
+// OGCG: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]]
+// OGCG: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]])
+// OGCG: ret i32 %[[ARR_VAL]]
diff --git a/clang/test/CIR/IR/invalid-atomic.cir b/clang/test/CIR/IR/invalid-atomic.cir
new file mode 100644
index 0000000..a124e43
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-atomic.cir
@@ -0,0 +1,7 @@
+// RUN: cir-opt %s -verify-diagnostics -split-input-file
+
+cir.func @f1(%arg0: !cir.ptr<!cir.float>, %arg1: !cir.float) {
+ // expected-error @below {{only atomic add, sub, max, and min operation could operate on floating-point values}}
+ %0 = cir.atomic.fetch and seq_cst %arg0, %arg1 : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+ cir.return
+}
diff --git a/clang/test/CodeGen/AArch64/sign-return-address.c b/clang/test/CodeGen/AArch64/sign-return-address.c
index 11dd683..2b505de 100644
--- a/clang/test/CodeGen/AArch64/sign-return-address.c
+++ b/clang/test/CodeGen/AArch64/sign-return-address.c
@@ -28,17 +28,17 @@
// NONE-NOT: !"branch-target-enforcement"
// ALL-NOT: !"branch-target-enforcement"
// PART-NOT: !"branch-target-enforcement"
-// BTE: !{i32 8, !"branch-target-enforcement", i32 1}
+// BTE: !{i32 8, !"branch-target-enforcement", i32 2}
// B-KEY-NOT: !"branch-target-enforcement"
// NONE-NOT: !"sign-return-address"
-// ALL: !{i32 8, !"sign-return-address", i32 1}
-// PART: !{i32 8, !"sign-return-address", i32 1}
+// ALL: !{i32 8, !"sign-return-address", i32 2}
+// PART: !{i32 8, !"sign-return-address", i32 2}
// BTE-NOT: !"sign-return-address"
-// B-KEY: !{i32 8, !"sign-return-address", i32 1}
+// B-KEY: !{i32 8, !"sign-return-address", i32 2}
// NONE-NOT: !"sign-return-address-all"
-// ALL: !{i32 8, !"sign-return-address-all", i32 1}
+// ALL: !{i32 8, !"sign-return-address-all", i32 2}
// PART-NOT: !"sign-return-address-all"
// BTE-NOT: !"sign-return-address-all"
// B-KEY-NOT: !"sign-return-address-all"
@@ -47,6 +47,6 @@
// ALL-NOT: !"sign-return-address-with-bkey"
// PART-NOT: !"sign-return-address-with-bkey"
// BTE-NOT: !"sign-return-address-with-bkey"
-// B-KEY: !{i32 8, !"sign-return-address-with-bkey", i32 1}
+// B-KEY: !{i32 8, !"sign-return-address-with-bkey", i32 2}
void foo() {}
diff --git a/clang/test/CodeGen/arm-branch-protection-attr-2.c b/clang/test/CodeGen/arm-branch-protection-attr-2.c
index fad5dc0..5391537 100644
--- a/clang/test/CodeGen/arm-branch-protection-attr-2.c
+++ b/clang/test/CodeGen/arm-branch-protection-attr-2.c
@@ -23,16 +23,16 @@
// NONE-NOT: !"branch-target-enforcement"
// PART-NOT: !"branch-target-enforcement"
// ALL-NOT: !"branch-target-enforcement"
-// BTE: !{i32 8, !"branch-target-enforcement", i32 1}
+// BTE: !{i32 8, !"branch-target-enforcement", i32 2}
// NONE-NOT: !"sign-return-address"
-// PART: !{i32 8, !"sign-return-address", i32 1}
-// ALL: !{i32 8, !"sign-return-address", i32 1}
+// PART: !{i32 8, !"sign-return-address", i32 2}
+// ALL: !{i32 8, !"sign-return-address", i32 2}
// BTE-NOT: !"sign-return-address"
// NONE-NOT: !"sign-return-address-all", i32 0}
// PART-NOT: !"sign-return-address-all", i32 0}
-// ALL: !{i32 8, !"sign-return-address-all", i32 1}
+// ALL: !{i32 8, !"sign-return-address-all", i32 2}
// BTE-NOT: !"sign-return-address-all", i32 0}
void foo() {}
diff --git a/clang/test/Driver/arm-abi.c b/clang/test/Driver/arm-abi.c
index 139456c..b89b969 100644
--- a/clang/test/Driver/arm-abi.c
+++ b/clang/test/Driver/arm-abi.c
@@ -31,6 +31,8 @@
// FreeBSD / OpenBSD default to aapcs-linux
// RUN: %clang -target arm--freebsd- %s -### -o %t.o 2>&1 \
// RUN: | FileCheck -check-prefix=CHECK-AAPCS-LINUX %s
+// RUN: %clang -target arm--fuchsia- %s -### -o %t.o 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-AAPCS-LINUX %s
// RUN: %clang -target arm--openbsd- %s -### -o %t.o 2>&1 \
// RUN: | FileCheck -check-prefix=CHECK-AAPCS-LINUX %s
// RUN: %clang -target arm--haiku- %s -### -o %t.o 2>&1 \
diff --git a/clang/test/Driver/fuchsia.c b/clang/test/Driver/fuchsia.c
index cf92f85..3fb2a94 100644
--- a/clang/test/Driver/fuchsia.c
+++ b/clang/test/Driver/fuchsia.c
@@ -2,6 +2,10 @@
// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
// RUN: | FileCheck -check-prefixes=CHECK,CHECK-X86_64 %s
+// RUN: %clang -### %s --target=arm-unknown-fuchsia \
+// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
+// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
+// RUN: | FileCheck -check-prefixes=CHECK,CHECK-ARMV8A %s
// RUN: %clang -### %s --target=aarch64-unknown-fuchsia \
// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
@@ -14,6 +18,10 @@
// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
// RUN: | FileCheck -check-prefixes=CHECK,CHECK-X86_64 %s
+// RUN: %clang -### %s --target=arm-fuchsia \
+// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
+// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
+// RUN: | FileCheck -check-prefixes=CHECK,CHECK-ARMV8A %s
// RUN: %clang -### %s --target=aarch64-fuchsia \
// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
@@ -24,6 +32,7 @@
// RUN: | FileCheck -check-prefixes=CHECK,CHECK-RISCV64 %s
// CHECK: "-cc1"
// CHECK-X86_64: "-triple" "x86_64-unknown-fuchsia"
+// CHECK-ARMV8A: "-triple" "thumbv8a-unknown-fuchsia"
// CHECK-AARCH64: "-triple" "aarch64-unknown-fuchsia"
// CHECK-RISCV64: "-triple" "riscv64-unknown-fuchsia"
// CHECK: "-funwind-tables=2"
diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c
index 7972b9e..cb81273 100644
--- a/clang/test/Driver/print-supported-extensions-riscv.c
+++ b/clang/test/Driver/print-supported-extensions-riscv.c
@@ -190,6 +190,11 @@
// CHECK-NEXT: xsfmm64t 0.6 'XSfmm64t' (TE=64 configuration)
// CHECK-NEXT: xsfmmbase 0.6 'XSfmmbase' (All non arithmetic instructions for all TEWs and sf.vtzero)
// CHECK-NEXT: xsfvcp 1.0 'XSfvcp' (SiFive Custom Vector Coprocessor Interface Instructions)
+// CHECK-NEXT: xsfvfbfexp16e 0.5 'XSfvfbfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, BFloat16)
+// CHECK-NEXT: xsfvfexp16e 0.5 'XSfvfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, Half Precision)
+// CHECK-NEXT: xsfvfexp32e 0.5 'XSfvfexp32e' (SiFive Vector Floating-Point Exponential Function Instruction, Single Precision)
+// CHECK-NEXT: xsfvfexpa 0.2 'XSfvfexpa' (SiFive Vector Floating-Point Exponential Approximation Instruction)
+// CHECK-NEXT: xsfvfexpa64e 0.2 'XSfvfexpa64e' (SiFive Vector Floating-Point Exponential Approximation Instruction with Double-Precision)
// CHECK-NEXT: xsfvfnrclipxfqf 1.0 'XSfvfnrclipxfqf' (SiFive FP32-to-int8 Ranged Clip Instructions)
// CHECK-NEXT: xsfvfwmaccqqq 1.0 'XSfvfwmaccqqq' (SiFive Matrix Multiply Accumulate Instruction (4-by-4))
// CHECK-NEXT: xsfvqmaccdod 1.0 'XSfvqmaccdod' (SiFive Int8 Matrix Multiplication Instructions (2-by-8 and 8-by-2))
diff --git a/clang/test/Frontend/arm-ignore-branch-protection-option.c b/clang/test/Frontend/arm-ignore-branch-protection-option.c
index 99a2acc..45bdb37 100644
--- a/clang/test/Frontend/arm-ignore-branch-protection-option.c
+++ b/clang/test/Frontend/arm-ignore-branch-protection-option.c
@@ -15,4 +15,4 @@ __attribute__((target("arch=cortex-m0"))) void f() {}
// CHECK-NOT: attributes { {{.*}} "branch-target-enforcement"
/// Check that there are branch protection module attributes despite the warning.
-// CHECK: !{i32 8, !"branch-target-enforcement", i32 1}
+// CHECK: !{i32 8, !"branch-target-enforcement", i32 2}
diff --git a/clang/test/Preprocessor/riscv-atomics.c b/clang/test/Preprocessor/riscv-atomics.c
new file mode 100644
index 0000000..6e02173
--- /dev/null
+++ b/clang/test/Preprocessor/riscv-atomics.c
@@ -0,0 +1,24 @@
+// RUN: %clang --target=riscv32-unknown-linux-gnu -march=rv32ia -x c -E -dM %s \
+// RUN: -o - | FileCheck %s
+// RUN: %clang --target=riscv32-unknown-linux-gnu -march=rv32i_zalrsc -x c -E \
+// RUN: -dM %s -o - | FileCheck %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu -march=rv64ia -x c -E -dM %s \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
+// RUN: %clang --target=riscv64-unknown-linux-gnu -march=rv64i_zalrsc -x c -E \
+// RUN: -dM %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
+
+// CHECK: #define __GCC_ATOMIC_BOOL_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_CHAR16_T_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_CHAR32_T_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_CHAR_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_INT_LOCK_FREE 2
+// CHECK-RV64: #define __GCC_ATOMIC_LLONG_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_LONG_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_POINTER_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_SHORT_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_TEST_AND_SET_TRUEVAL 1
+// CHECK: #define __GCC_ATOMIC_WCHAR_T_LOCK_FREE 2
+// CHECK: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 1
+// CHECK: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 1
+// CHECK: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1
+// CHECK-RV64: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 1
diff --git a/clang/test/Preprocessor/riscv-target-features-sifive.c b/clang/test/Preprocessor/riscv-target-features-sifive.c
index 1c49b55..5002f62 100644
--- a/clang/test/Preprocessor/riscv-target-features-sifive.c
+++ b/clang/test/Preprocessor/riscv-target-features-sifive.c
@@ -5,6 +5,11 @@
// CHECK-NOT: __riscv_xsfcease {{.*$}}
// CHECK-NOT: __riscv_xsfvcp {{.*$}}
+// CHECK-NOT: __riscv_xsfvfbfexp16e {{.*$}}
+// CHECK-NOT: __riscv_xsfvfexp16e {{.*$}}
+// CHECK-NOT: __riscv_xsfvfexp32e {{.*$}}
+// CHECK-NOT: __riscv_xsfvfexpa {{.*$}}
+// CHECK-NOT: __riscv_xsfvfexpa64e {{.*$}}
// CHECK-NOT: __riscv_xsfvfnrclipxfqf {{.*$}}
// CHECK-NOT: __riscv_xsfvfwmaccqqq {{.*$}}
// CHECK-NOT: __riscv_xsfqmaccdod {{.*$}}
@@ -39,6 +44,46 @@
// CHECK-XSFVCP-EXT: __riscv_xsfvcp 1000000{{$}}
// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfbfexp16e_zvfbfmin -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFBFEXP16E-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfbfexp16e_zvfbfmin -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFBFEXP16E-EXT %s
+// CHECK-XSFVFBFEXP16E-EXT: __riscv_xsfvfbfexp16e 5000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfexp16e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP16E-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfexp16e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP16E-EXT %s
+// CHECK-XSFVFEXP16E-EXT: __riscv_xsfvfexp16e 5000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfexp32e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP32E-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfexp32e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP32E-EXT %s
+// CHECK-XSFVFEXP32E-EXT: __riscv_xsfvfexp32e 5000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfexpa -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfexpa -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA-EXT %s
+// CHECK-XSFVFEXPA-EXT: __riscv_xsfvfexpa 2000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfexpa64e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA64E-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfexpa64e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA64E-EXT %s
+// CHECK-XSFVFEXPA64E-EXT: __riscv_xsfvfexpa64e 2000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
// RUN: -march=rv32ixsfvfnrclipxfqf -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFNRCLIPXFQF-EXT %s
// RUN: %clang --target=riscv64-unknown-linux-gnu \
diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py
index 29088ef..52b275c 100644
--- a/clang/test/lit.cfg.py
+++ b/clang/test/lit.cfg.py
@@ -18,11 +18,22 @@ from lit.llvm.subst import FindTool
# name: The name of this test suite.
config.name = "Clang"
+# TODO: Consolidate the logic for turning on the internal shell by default for all LLVM test suites.
+# See https://github.com/llvm/llvm-project/issues/106636 for more details.
+#
+# We prefer the lit internal shell which provides a better user experience on failures
+# and is faster unless the user explicitly disables it with LIT_USE_INTERNAL_SHELL=0
+# env var.
+use_lit_shell = True
+lit_shell_env = os.environ.get("LIT_USE_INTERNAL_SHELL")
+if lit_shell_env:
+ use_lit_shell = lit.util.pythonize_bool(lit_shell_env)
+
# testFormat: The test format to use to interpret tests.
#
# For now we require '&&' between commands, until they get globally killed and
# the test runner updated.
-config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
+config.test_format = lit.formats.ShTest(execute_external=not use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = [
diff --git a/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.cpp b/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.cpp
index 4827cc1..cae9265 100644
--- a/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.cpp
@@ -2453,6 +2453,167 @@ TEST_P(UncheckedStatusOrAccessModelTest, SubclassOperator) {
)cc");
}
+TEST_P(UncheckedStatusOrAccessModelTest, UnwrapValueWithStatusCheck) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (sor.status().ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, UnwrapValueWithStatusRefCheck) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ const STATUS& s = sor.status();
+ if (s.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, UnwrapValueWithStatusPtrCheck) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ const STATUS* s = &sor.status();
+ if (s->ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, UnwrapValueWithMovedStatus) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (std::move(sor.status()).ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, MembersUsedInsideStatus) {
+ ExpectDiagnosticsFor(R"cc(
+ namespace absl {
+
+ class Status {
+ public:
+ bool ok() const;
+
+ void target() const { ok(); }
+ };
+
+ } // namespace absl
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, StatusUpdate) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ STATUS s;
+ s.Update(sor.status());
+ if (s.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor1, STATUSOR_INT sor2) {
+ STATUS s;
+ s.Update(sor1.status());
+ s.Update(sor2.status());
+ if (s.ok()) {
+ sor1.value();
+ sor2.value();
+ }
+ }
+ )cc");
+
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor1, STATUSOR_INT sor2) {
+ STATUS s;
+ s.Update(sor1.status());
+ CHECK(s.ok());
+ s.Update(sor2.status());
+ sor1.value();
+ sor2.value(); // [[unsafe]]
+ }
+ )cc");
+
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor1, STATUSOR_INT sor2) {
+ STATUS s;
+ s.Update(sor1.status());
+ CHECK(s.ok());
+ sor1.value();
+ sor2.value(); // [[unsafe]]
+ }
+ )cc");
+
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor1, STATUSOR_INT sor2) {
+ STATUS s;
+ STATUS sor1_status = sor1.status();
+ s.Update(std::move(sor1_status));
+ CHECK(s.ok());
+ sor1.value();
+ sor2.value(); // [[unsafe]]
+ }
+ )cc");
+
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor1, STATUSOR_INT sor2) {
+ STATUS s;
+ STATUS sor1_status = sor1.status();
+ sor1_status.Update(sor2.status());
+ s.Update(std::move(sor1_status));
+ CHECK(s.ok());
+ sor1.value();
+ sor2.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ const STATUS& OptStatus();
+
+ void target(STATUSOR_INT sor) {
+ auto s = sor.status();
+ s.Update(OptStatus());
+ if (s.ok()) sor.value();
+ }
+ )cc");
+}
+
} // namespace
std::string
diff --git a/clang/unittests/Format/FormatTestComments.cpp b/clang/unittests/Format/FormatTestComments.cpp
index fc80bf4..6b433bb 100644
--- a/clang/unittests/Format/FormatTestComments.cpp
+++ b/clang/unittests/Format/FormatTestComments.cpp
@@ -839,6 +839,25 @@ TEST_F(FormatTestComments, MultiLineCommentsInDefines) {
getLLVMStyleWithColumns(17)));
}
+TEST_F(FormatTestComments, LineCommentsInMacrosDoNotGetEscapedNewlines) {
+ FormatStyle Style = getLLVMStyleWithColumns(0);
+ Style.ReflowComments = FormatStyle::RCS_Never;
+ verifyFormat("#define FOO (1U) // comment\n"
+ " // comment",
+ Style);
+
+ Style.ColumnLimit = 32;
+ verifyFormat("#define SOME_MACRO(x) x\n"
+ "#define FOO \\\n"
+ " SOME_MACRO(1) + \\\n"
+ " SOME_MACRO(2) // comment\n"
+ " // comment",
+ "#define SOME_MACRO(x) x\n"
+ "#define FOO SOME_MACRO(1) + SOME_MACRO(2) // comment\n"
+ " // comment",
+ Style);
+}
+
TEST_F(FormatTestComments, ParsesCommentsAdjacentToPPDirectives) {
EXPECT_EQ("namespace {}\n// Test\n#define A",
format("namespace {}\n // Test\n#define A"));
diff --git a/clang/unittests/Sema/HeuristicResolverTest.cpp b/clang/unittests/Sema/HeuristicResolverTest.cpp
index a00632f..c592e74 100644
--- a/clang/unittests/Sema/HeuristicResolverTest.cpp
+++ b/clang/unittests/Sema/HeuristicResolverTest.cpp
@@ -524,6 +524,28 @@ TEST(HeuristicResolver, MemberExpr_HangIssue126536) {
cxxDependentScopeMemberExpr(hasMemberName("foo")).bind("input"));
}
+TEST(HeuristicResolver, MemberExpr_HangOnLongCallChain) {
+ const size_t CallChainLength = 50;
+ std::string Code = R"cpp(
+ template <typename T>
+ void foo(T t) {
+ t
+ )cpp";
+ for (size_t I = 0; I < CallChainLength; ++I)
+ Code.append(".method()\n");
+ Code.append(R"cpp(
+ .lastMethod();
+ }
+ )cpp");
+ // Test that resolution of a name whose base is a long call chain
+ // does not hang. Note that the hang for which this is a regression
+ // test is finite (exponential runtime in the length of the chain),
+ // so a "failure" here manifests as abnormally long runtime.
+ expectResolution(
+ Code, &HeuristicResolver::resolveMemberExpr,
+ cxxDependentScopeMemberExpr(hasMemberName("lastMethod")).bind("input"));
+}
+
TEST(HeuristicResolver, MemberExpr_DefaultTemplateArgument) {
std::string Code = R"cpp(
struct Default {