aboutsummaryrefslogtreecommitdiff
path: root/clang/test
diff options
context:
space:
mode:
Diffstat (limited to 'clang/test')
-rw-r--r--clang/test/CIR/CodeGen/atomic.c523
-rw-r--r--clang/test/CIR/CodeGen/builtin_inline.c91
-rw-r--r--clang/test/CIR/CodeGen/dtors.cpp12
-rw-r--r--clang/test/CIR/CodeGen/lambda.cpp24
-rw-r--r--clang/test/CIR/CodeGen/new.cpp121
-rw-r--r--clang/test/CIR/CodeGen/statement-exprs.c10
-rw-r--r--clang/test/CIR/CodeGen/struct-init.cpp16
-rw-r--r--clang/test/CIR/CodeGen/struct.cpp64
-rw-r--r--clang/test/CIR/CodeGen/try-catch.cpp87
-rw-r--r--clang/test/CIR/CodeGen/vla.c59
-rw-r--r--clang/test/CIR/IR/invalid-atomic.cir7
-rw-r--r--clang/test/CodeGen/AArch64/sign-return-address.c12
-rw-r--r--clang/test/CodeGen/arm-branch-protection-attr-2.c8
-rw-r--r--clang/test/Driver/arm-abi.c2
-rw-r--r--clang/test/Driver/fuchsia.c9
-rw-r--r--clang/test/Driver/print-supported-extensions-riscv.c5
-rw-r--r--clang/test/Frontend/arm-ignore-branch-protection-option.c2
-rw-r--r--clang/test/Preprocessor/riscv-atomics.c24
-rw-r--r--clang/test/Preprocessor/riscv-target-features-sifive.c45
-rw-r--r--clang/test/lit.cfg.py13
20 files changed, 1094 insertions, 40 deletions
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index cf20226..6579988 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -584,3 +584,526 @@ void clear_volatile(volatile void *p) {
// OGCG: store atomic volatile i8 0, ptr %{{.+}} seq_cst, align 1
}
+
+int atomic_fetch_add(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_add
+ // LLVM-LABEL: @atomic_fetch_add
+ // OGCG-LABEL: @atomic_fetch_add
+
+ return __atomic_fetch_add(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_add_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_add_fetch
+ // LLVM-LABEL: @atomic_add_fetch
+ // OGCG-LABEL: @atomic_add_fetch
+
+ return __atomic_add_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw add ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = add i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw add ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = add i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_add(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_add
+ // LLVM-LABEL: @c11_atomic_fetch_add
+ // OGCG-LABEL: @c11_atomic_fetch_add
+
+ return __c11_atomic_fetch_add(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_sub(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_sub
+ // LLVM-LABEL: @atomic_fetch_sub
+ // OGCG-LABEL: @atomic_fetch_sub
+
+ return __atomic_fetch_sub(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_sub_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_sub_fetch
+ // LLVM-LABEL: @atomic_sub_fetch
+ // OGCG-LABEL: @atomic_sub_fetch
+
+ return __atomic_sub_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw sub ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = sub i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw sub ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = sub i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_sub(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_sub
+ // LLVM-LABEL: @c11_atomic_fetch_sub
+ // OGCG-LABEL: @c11_atomic_fetch_sub
+
+ return __c11_atomic_fetch_sub(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_fetch_add_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_fetch_add_fp
+ // LLVM-LABEL: @atomic_fetch_add_fp
+ // OGCG-LABEL: @atomic_fetch_add_fp
+
+ return __atomic_fetch_add(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fadd ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fadd ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_add_fetch_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_add_fetch_fp
+ // LLVM-LABEL: @atomic_add_fetch_fp
+ // OGCG-LABEL: @atomic_add_fetch_fp
+
+ return __atomic_add_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[OLD:.+]] = atomicrmw fadd ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = fadd float %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw fadd ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = fadd float %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float c11_atomic_fetch_sub_fp(_Atomic(float) *ptr, float value) {
+ // CIR-LABEL: @c11_atomic_fetch_sub_fp
+ // LLVM-LABEL: @c11_atomic_fetch_sub_fp
+ // OGCG-LABEL: @c11_atomic_fetch_sub_fp
+
+ return __c11_atomic_fetch_sub(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fsub ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fsub ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_min(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_min
+ // LLVM-LABEL: @atomic_fetch_min
+ // OGCG-LABEL: @atomic_fetch_min
+
+ return __atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_min_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_min_fetch
+ // LLVM-LABEL: @atomic_min_fetch
+ // OGCG-LABEL: @atomic_min_fetch
+
+ return __atomic_min_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw min ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[OLD_LESS:.+]] = icmp slt i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: %[[RES:.+]] = select i1 %[[OLD_LESS]], i32 %[[OLD]], i32 %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw min ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[OLD_LESS:.+]] = icmp slt i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: %[[RES:.+]] = select i1 %[[OLD_LESS]], i32 %[[OLD]], i32 %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_min(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_min
+ // LLVM-LABEL: @c11_atomic_fetch_min
+ // OGCG-LABEL: @c11_atomic_fetch_min
+
+ return __c11_atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_fetch_min_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_fetch_min_fp
+ // LLVM-LABEL: @atomic_fetch_min_fp
+ // OGCG-LABEL: @atomic_fetch_min_fp
+
+ return __atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_min_fetch_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_min_fetch_fp
+ // LLVM-LABEL: @atomic_min_fetch_fp
+ // OGCG-LABEL: @atomic_min_fetch_fp
+
+ return __atomic_min_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[OLD:.+]] = atomicrmw fmin ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = call float @llvm.minnum.f32(float %[[OLD]], float %[[VAL]])
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw fmin ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = call float @llvm.minnum.f32(float %[[OLD]], float %[[VAL]])
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float c11_atomic_fetch_min_fp(_Atomic(float) *ptr, float value) {
+ // CIR-LABEL: @c11_atomic_fetch_min_fp
+ // LLVM-LABEL: @c11_atomic_fetch_min_fp
+ // OGCG-LABEL: @c11_atomic_fetch_min_fp
+
+ return __c11_atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_max(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_max
+ // LLVM-LABEL: @atomic_fetch_max
+ // OGCG-LABEL: @atomic_fetch_max
+
+ return __atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_max_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_max_fetch
+ // LLVM-LABEL: @atomic_max_fetch
+ // OGCG-LABEL: @atomic_max_fetch
+
+ return __atomic_max_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw max ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[OLD_GREATER:.+]] = icmp sgt i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: %[[RES:.+]] = select i1 %[[OLD_GREATER]], i32 %[[OLD]], i32 %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw max ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[OLD_GREATER:.+]] = icmp sgt i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: %[[RES:.+]] = select i1 %[[OLD_GREATER]], i32 %[[OLD]], i32 %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_max(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_max
+ // LLVM-LABEL: @c11_atomic_fetch_max
+ // OGCG-LABEL: @c11_atomic_fetch_max
+
+ return __c11_atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_fetch_max_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_fetch_max_fp
+ // LLVM-LABEL: @atomic_fetch_max_fp
+ // OGCG-LABEL: @atomic_fetch_max_fp
+
+ return __atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_max_fetch_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_max_fetch_fp
+ // LLVM-LABEL: @atomic_max_fetch_fp
+ // OGCG-LABEL: @atomic_max_fetch_fp
+
+ return __atomic_max_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[OLD:.+]] = atomicrmw fmax ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = call float @llvm.maxnum.f32(float %[[OLD]], float %[[VAL]])
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw fmax ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = call float @llvm.maxnum.f32(float %[[OLD]], float %[[VAL]])
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float c11_atomic_fetch_max_fp(_Atomic(float) *ptr, float value) {
+ // CIR-LABEL: @c11_atomic_fetch_max_fp
+ // LLVM-LABEL: @c11_atomic_fetch_max_fp
+ // OGCG-LABEL: @c11_atomic_fetch_max_fp
+
+ return __c11_atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_and(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_and
+ // LLVM-LABEL: @atomic_fetch_and
+ // OGCG-LABEL: @atomic_fetch_and
+
+ return __atomic_fetch_and(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch and seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_and_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_and_fetch
+ // LLVM-LABEL: @atomic_and_fetch
+ // OGCG-LABEL: @atomic_and_fetch
+
+ return __atomic_and_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch and seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw and ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw and ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_and(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_and
+ // LLVM-LABEL: @c11_atomic_fetch_and
+ // OGCG-LABEL: @c11_atomic_fetch_and
+
+ return __c11_atomic_fetch_and(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch and seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_or(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_or
+ // LLVM-LABEL: @atomic_fetch_or
+ // OGCG-LABEL: @atomic_fetch_or
+
+ return __atomic_fetch_or(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch or seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_or_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_or_fetch
+ // LLVM-LABEL: @atomic_or_fetch
+ // OGCG-LABEL: @atomic_or_fetch
+
+ return __atomic_or_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch or seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw or ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = or i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw or ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = or i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_or(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_or
+ // LLVM-LABEL: @c11_atomic_fetch_or
+ // OGCG-LABEL: @c11_atomic_fetch_or
+
+ return __c11_atomic_fetch_or(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch or seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_xor(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_xor
+ // LLVM-LABEL: @atomic_fetch_xor
+ // OGCG-LABEL: @atomic_fetch_xor
+
+ return __atomic_fetch_xor(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch xor seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_xor_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_xor_fetch
+ // LLVM-LABEL: @atomic_xor_fetch
+ // OGCG-LABEL: @atomic_xor_fetch
+
+ return __atomic_xor_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch xor seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw xor ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = xor i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw xor ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = xor i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_xor(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_xor
+ // LLVM-LABEL: @c11_atomic_fetch_xor
+ // OGCG-LABEL: @c11_atomic_fetch_xor
+
+ return __c11_atomic_fetch_xor(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch xor seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_nand(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_nand
+ // LLVM-LABEL: @atomic_fetch_nand
+ // OGCG-LABEL: @atomic_fetch_nand
+
+ return __atomic_fetch_nand(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch nand seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_nand_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_nand_fetch
+ // LLVM-LABEL: @atomic_nand_fetch
+ // OGCG-LABEL: @atomic_nand_fetch
+
+ return __atomic_nand_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch nand seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw nand ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[TMP:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: %[[RES:.+]] = xor i32 %[[TMP]], -1
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw nand ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[TMP:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: %[[RES:.+]] = xor i32 %[[TMP]], -1
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_nand(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_nand
+ // LLVM-LABEL: @c11_atomic_fetch_nand
+ // OGCG-LABEL: @c11_atomic_fetch_nand
+
+ return __c11_atomic_fetch_nand(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch nand seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
diff --git a/clang/test/CIR/CodeGen/builtin_inline.c b/clang/test/CIR/CodeGen/builtin_inline.c
new file mode 100644
index 0000000..83a3ba6
--- /dev/null
+++ b/clang/test/CIR/CodeGen/builtin_inline.c
@@ -0,0 +1,91 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm -disable-llvm-passes %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -disable-llvm-passes %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+typedef unsigned long size_t;
+
+// Normal inline builtin declaration
+// When a builtin is redefined with extern inline + always_inline attributes,
+// the compiler creates a .inline version to avoid conflicts with the builtin
+
+extern inline __attribute__((always_inline)) __attribute__((gnu_inline))
+void *memcpy(void *a, const void *b, size_t c) {
+ return __builtin_memcpy(a, b, c);
+}
+
+void *test_inline_builtin_memcpy(void *a, const void *b, size_t c) {
+ return memcpy(a, b, c);
+}
+
+// CIR: cir.func internal private{{.*}}@memcpy.inline({{.*}}) -> !cir.ptr<!void> inline(always)
+
+// CIR-LABEL: @test_inline_builtin_memcpy(
+// CIR: cir.call @memcpy.inline(
+// CIR: }
+
+// LLVM: define internal ptr @memcpy.inline(ptr{{.*}}, ptr{{.*}}, i64{{.*}}) #{{[0-9]+}}
+
+// LLVM-LABEL: @test_inline_builtin_memcpy(
+// LLVM: call ptr @memcpy.inline(
+
+// OGCG-LABEL: @test_inline_builtin_memcpy(
+// OGCG: call ptr @memcpy.inline(
+
+// OGCG: define internal ptr @memcpy.inline(ptr{{.*}} %a, ptr{{.*}} %b, i64{{.*}} %c) #{{[0-9]+}}
+
+// Shadowing case
+// When a non-inline function definition shadows an inline builtin declaration,
+// the .inline version should be replaced with the regular function and removed.
+
+extern inline __attribute__((always_inline)) __attribute__((gnu_inline))
+void *memmove(void *a, const void *b, size_t c) {
+ return __builtin_memmove(a, b, c);
+}
+
+void *memmove(void *a, const void *b, size_t c) {
+ char *dst = (char *)a;
+ const char *src = (const char *)b;
+ if (dst < src) {
+ for (size_t i = 0; i < c; i++) {
+ dst[i] = src[i];
+ }
+ } else {
+ for (size_t i = c; i > 0; i--) {
+ dst[i-1] = src[i-1];
+ }
+ }
+ return a;
+}
+
+void *test_shadowed_memmove(void *a, const void *b, size_t c) {
+ return memmove(a, b, c);
+}
+
+// CIR: cir.func{{.*}}@memmove({{.*}}) -> !cir.ptr<!void>{{.*}}{
+// CIR-NOT: @memmove.inline
+
+// CIR-LABEL: @test_shadowed_memmove(
+// CIR: cir.call @memmove(
+// CIR-NOT: @memmove.inline
+// CIR: }
+
+// LLVM: define dso_local ptr @memmove(ptr{{.*}}, ptr{{.*}}, i64{{.*}}) #{{[0-9]+}}
+// LLVM-NOT: @memmove.inline
+
+// LLVM-LABEL: @test_shadowed_memmove(
+// TODO - this deviation from OGCG is expected until we implement the nobuiltin
+// attribute. See CIRGenFunction::emitDirectCallee
+// LLVM: call ptr @memmove(
+// LLVM-NOT: @memmove.inline
+// LLVM: }
+
+// OGCG: define dso_local ptr @memmove(ptr{{.*}} %a, ptr{{.*}} %b, i64{{.*}} %c) #{{[0-9]+}}
+// OGCG-NOT: @memmove.inline
+
+// OGCG-LABEL: @test_shadowed_memmove(
+// OGCG: call void @llvm.memmove.p0.p0.i64(
+// OGCG-NOT: @memmove.inline
+// OGCG: }
diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp
index f2c80a5..1fe048b7 100644
--- a/clang/test/CIR/CodeGen/dtors.cpp
+++ b/clang/test/CIR/CodeGen/dtors.cpp
@@ -35,7 +35,7 @@ bool make_temp(const B &) { return false; }
bool test_temp_or() { return make_temp(1) || make_temp(2); }
// CIR: cir.func{{.*}} @_Z12test_temp_orv()
-// CIR: %[[SCOPE:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[REF_TMP0:.*]] = cir.alloca !rec_B, !cir.ptr<!rec_B>, ["ref.tmp0"]
// CIR: %[[ONE:.*]] = cir.const #cir.int<1>
// CIR: cir.call @_ZN1BC2Ei(%[[REF_TMP0]], %[[ONE]])
@@ -51,9 +51,9 @@ bool test_temp_or() { return make_temp(1) || make_temp(2); }
// CIR: cir.call @_ZN1BD2Ev(%[[REF_TMP1]])
// CIR: cir.yield %[[MAKE_TEMP1]] : !cir.bool
// CIR: })
+// CIR: cir.store{{.*}} %[[TERNARY]], %[[RETVAL:.*]]
// CIR: cir.call @_ZN1BD2Ev(%[[REF_TMP0]])
-// CIR: cir.yield %[[TERNARY]] : !cir.bool
-// CIR: } : !cir.bool
+// CIR: }
// LLVM: define{{.*}} i1 @_Z12test_temp_orv(){{.*}} {
// LLVM: %[[REF_TMP0:.*]] = alloca %struct.B
@@ -105,7 +105,7 @@ bool test_temp_or() { return make_temp(1) || make_temp(2); }
bool test_temp_and() { return make_temp(1) && make_temp(2); }
// CIR: cir.func{{.*}} @_Z13test_temp_andv()
-// CIR: %[[SCOPE:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[REF_TMP0:.*]] = cir.alloca !rec_B, !cir.ptr<!rec_B>, ["ref.tmp0"]
// CIR: %[[ONE:.*]] = cir.const #cir.int<1>
// CIR: cir.call @_ZN1BC2Ei(%[[REF_TMP0]], %[[ONE]])
@@ -121,9 +121,9 @@ bool test_temp_and() { return make_temp(1) && make_temp(2); }
// CIR: %[[FALSE:.*]] = cir.const #false
// CIR: cir.yield %[[FALSE]] : !cir.bool
// CIR: })
+// CIR: cir.store{{.*}} %[[TERNARY]], %[[RETVAL:.*]]
// CIR: cir.call @_ZN1BD2Ev(%[[REF_TMP0]])
-// CIR: cir.yield %[[TERNARY]] : !cir.bool
-// CIR: } : !cir.bool
+// CIR: }
// LLVM: define{{.*}} i1 @_Z13test_temp_andv(){{.*}} {
// LLVM: %[[REF_TMP0:.*]] = alloca %struct.B
diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp
index 0c32ceb1..91380b9 100644
--- a/clang/test/CIR/CodeGen/lambda.cpp
+++ b/clang/test/CIR/CodeGen/lambda.cpp
@@ -219,14 +219,13 @@ int f() {
// CIR: cir.func dso_local @_Z1fv() -> !s32i {{.*}} {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
-// CIR: %[[SCOPE_RET:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[TMP:.*]] = cir.alloca ![[REC_LAM_G2]], !cir.ptr<![[REC_LAM_G2]]>, ["ref.tmp0"]
// CIR: %[[G2:.*]] = cir.call @_Z2g2v() : () -> ![[REC_LAM_G2]]
// CIR: cir.store{{.*}} %[[G2]], %[[TMP]]
// CIR: %[[RESULT:.*]] = cir.call @_ZZ2g2vENK3$_0clEv(%[[TMP]])
-// CIR: cir.yield %[[RESULT]]
+// CIR: cir.store{{.*}} %[[RESULT]], %[[RETVAL]]
// CIR: }
-// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
// CIR: cir.return %[[RET]]
@@ -255,10 +254,9 @@ int f() {
// LLVM: %[[G2:.*]] = call %[[REC_LAM_G2]] @_Z2g2v()
// LLVM: store %[[REC_LAM_G2]] %[[G2]], ptr %[[TMP]]
// LLVM: %[[RESULT:.*]] = call i32 @"_ZZ2g2vENK3$_0clEv"(ptr %[[TMP]])
+// LLVM: store i32 %[[RESULT]], ptr %[[RETVAL]]
// LLVM: br label %[[RET_BB:.*]]
// LLVM: [[RET_BB]]:
-// LLVM: %[[RETPHI:.*]] = phi i32 [ %[[RESULT]], %[[SCOPE_BB]] ]
-// LLVM: store i32 %[[RETPHI]], ptr %[[RETVAL]]
// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]]
// LLVM: ret i32 %[[RET]]
@@ -333,14 +331,13 @@ struct A {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
// CIR: %[[THIS]] = cir.load deref %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
-// CIR: %[[SCOPE_RET:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[LAM_ADDR:.*]] = cir.alloca ![[REC_LAM_A]], !cir.ptr<![[REC_LAM_A]]>, ["ref.tmp0"]
// CIR: %[[STRUCT_A:.*]] = cir.get_member %[[LAM_ADDR]][0] {name = "this"} : !cir.ptr<![[REC_LAM_A]]> -> !cir.ptr<!rec_A>
// CIR: cir.call @_ZN1AC1ERKS_(%[[STRUCT_A]], %[[THIS]]){{.*}} : (!cir.ptr<!rec_A>, !cir.ptr<!rec_A>){{.*}} -> ()
// CIR: %[[LAM_RET:.*]] = cir.call @_ZZN1A3fooEvENKUlvE_clEv(%[[LAM_ADDR]])
-// CIR: cir.yield %[[LAM_RET]]
+// CIR: cir.store{{.*}} %[[LAM_RET]], %[[RETVAL]]
// CIR: }
-// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
// CIR: cir.return %[[RET]]
@@ -355,10 +352,9 @@ struct A {
// LLVM: %[[STRUCT_A:.*]] = getelementptr %[[REC_LAM_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
// LLVM: call void @_ZN1AC1ERKS_(ptr %[[STRUCT_A]], ptr %[[THIS]])
// LLVM: %[[LAM_RET:.*]] = call i32 @_ZZN1A3fooEvENKUlvE_clEv(ptr %[[LAM_ALLOCA]])
+// LLVM: store i32 %[[LAM_RET]], ptr %[[RETVAL]]
// LLVM: br label %[[RET_BB:.*]]
// LLVM: [[RET_BB]]:
-// LLVM: %[[RETPHI:.*]] = phi i32 [ %[[LAM_RET]], %[[SCOPE_BB]] ]
-// LLVM: store i32 %[[RETPHI]], ptr %[[RETVAL]]
// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]]
// LLVM: ret i32 %[[RET]]
@@ -407,14 +403,13 @@ struct A {
// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
// CIR: %[[THIS]] = cir.load %[[THIS_ADDR]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
-// CIR: %[[SCOPE_RET:.*]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[LAM_ADDR:.*]] = cir.alloca ![[REC_LAM_PTR_A]], !cir.ptr<![[REC_LAM_PTR_A]]>, ["ref.tmp0"]
// CIR: %[[A_ADDR_ADDR:.*]] = cir.get_member %[[LAM_ADDR]][0] {name = "this"} : !cir.ptr<![[REC_LAM_PTR_A]]> -> !cir.ptr<!cir.ptr<!rec_A>>
// CIR: cir.store{{.*}} %[[THIS]], %[[A_ADDR_ADDR]]
// CIR: %[[LAM_RET:.*]] = cir.call @_ZZN1A3barEvENKUlvE_clEv(%[[LAM_ADDR]])
-// CIR: cir.yield %[[LAM_RET]]
+// CIR: cir.store{{.*}} %[[LAM_RET]], %[[RETVAL]]
// CIR: }
-// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[RETVAL]]
// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]]
// CIR: cir.return %[[RET]]
@@ -429,10 +424,9 @@ struct A {
// LLVM: %[[A_ADDR_ADDR:.*]] = getelementptr %[[REC_LAM_PTR_A]], ptr %[[LAM_ALLOCA]], i32 0, i32 0
// LLVM: store ptr %[[THIS]], ptr %[[A_ADDR_ADDR]]
// LLVM: %[[LAM_RET:.*]] = call i32 @_ZZN1A3barEvENKUlvE_clEv(ptr %[[LAM_ALLOCA]])
+// LLVM: store i32 %[[LAM_RET]], ptr %[[RETVAL]]
// LLVM: br label %[[RET_BB:.*]]
// LLVM: [[RET_BB]]:
-// LLVM: %[[RETPHI:.*]] = phi i32 [ %[[LAM_RET]], %[[SCOPE_BB]] ]
-// LLVM: store i32 %[[RETPHI]], ptr %[[RETVAL]]
// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]]
// LLVM: ret i32 %[[RET]]
diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp
index 000ea5b..2efad10 100644
--- a/clang/test/CIR/CodeGen/new.cpp
+++ b/clang/test/CIR/CodeGen/new.cpp
@@ -208,6 +208,127 @@ void t_new_constant_size() {
// OGCG: %[[CALL:.*]] = call noalias noundef nonnull ptr @_Znam(i64 noundef 128)
// OGCG: store ptr %[[CALL]], ptr %[[P_ADDR]], align 8
+class C {
+ public:
+ ~C();
+};
+
+void t_constant_size_nontrivial() {
+ auto p = new C[3];
+}
+
+// CHECK: cir.func{{.*}} @_Z26t_constant_size_nontrivialv()
+// CHECK: %[[P_ADDR:.*]] = cir.alloca !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>, ["p", init] {alignment = 8 : i64}
+// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i
+// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<3> : !u64i
+// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<11> : !u64i
+// CHECK: %[[RAW_PTR:.*]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr<!void>
+// CHECK: %[[COOKIE_PTR_BASE:.*]] = cir.cast bitcast %[[RAW_PTR]] : !cir.ptr<!void> -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[COOKIE_PTR:.*]] = cir.cast bitcast %[[COOKIE_PTR_BASE]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!u64i>
+// CHECK: cir.store align(8) %[[#NUM_ELEMENTS]], %[[COOKIE_PTR]] : !u64i, !cir.ptr<!u64i>
+// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<8> : !s32i
+// CHECK: %[[DATA_PTR_RAW:.*]] = cir.ptr_stride %[[COOKIE_PTR_BASE]], %[[#COOKIE_SIZE]] : (!cir.ptr<!cir.ptr<!u8i>>, !s32i) -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[DATA_PTR_VOID:.*]] = cir.cast bitcast %[[DATA_PTR_RAW]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!void>
+// CHECK: %[[DATA_PTR:.*]] = cir.cast bitcast %[[DATA_PTR_VOID]] : !cir.ptr<!void> -> !cir.ptr<!rec_C>
+// CHECK: cir.store align(8) %[[DATA_PTR]], %[[P_ADDR]] : !cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!rec_C>>
+// CHECK: cir.return
+// CHECK: }
+
+// LLVM: @_Z26t_constant_size_nontrivialv()
+// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[COOKIE_PTR:.*]] = call ptr @_Znam(i64 11)
+// LLVM: store i64 3, ptr %[[COOKIE_PTR]], align 8
+// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr ptr, ptr %[[COOKIE_PTR]], i64 8
+// LLVM: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+
+// OGCG: @_Z26t_constant_size_nontrivialv()
+// OGCG: %[[ALLOCA:.*]] = alloca ptr, align 8
+// OGCG: %[[COOKIE_PTR:.*]] = call noalias noundef nonnull ptr @_Znam(i64 noundef 11)
+// OGCG: store i64 3, ptr %[[COOKIE_PTR]], align 8
+// OGCG: %[[ALLOCATED_PTR:.*]] = getelementptr inbounds i8, ptr %[[COOKIE_PTR]], i64 8
+// OGCG: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+
+class D {
+ public:
+ int x;
+ ~D();
+};
+
+void t_constant_size_nontrivial2() {
+ auto p = new D[3];
+}
+
+// In this test SIZE_WITHOUT_COOKIE isn't used, but it would be if there were
+// an initializer.
+
+// CHECK: cir.func{{.*}} @_Z27t_constant_size_nontrivial2v()
+// CHECK: %[[P_ADDR:.*]] = cir.alloca !cir.ptr<!rec_D>, !cir.ptr<!cir.ptr<!rec_D>>, ["p", init] {alignment = 8 : i64}
+// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<3> : !u64i
+// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<12> : !u64i
+// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<20> : !u64i
+// CHECK: %[[RAW_PTR:.*]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr<!void>
+// CHECK: %[[COOKIE_PTR_BASE:.*]] = cir.cast bitcast %[[RAW_PTR]] : !cir.ptr<!void> -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[COOKIE_PTR:.*]] = cir.cast bitcast %[[COOKIE_PTR_BASE]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!u64i>
+// CHECK: cir.store align(8) %[[#NUM_ELEMENTS]], %[[COOKIE_PTR]] : !u64i, !cir.ptr<!u64i>
+// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<8> : !s32i
+// CHECK: %[[DATA_PTR_RAW:.*]] = cir.ptr_stride %[[COOKIE_PTR_BASE]], %[[#COOKIE_SIZE]] : (!cir.ptr<!cir.ptr<!u8i>>, !s32i) -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[DATA_PTR_VOID:.*]] = cir.cast bitcast %[[DATA_PTR_RAW]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!void>
+// CHECK: %[[DATA_PTR:.*]] = cir.cast bitcast %[[DATA_PTR_VOID]] : !cir.ptr<!void> -> !cir.ptr<!rec_D>
+// CHECK: cir.store align(8) %[[DATA_PTR]], %[[P_ADDR]] : !cir.ptr<!rec_D>, !cir.ptr<!cir.ptr<!rec_D>>
+// CHECK: cir.return
+// CHECK: }
+
+// LLVM: @_Z27t_constant_size_nontrivial2v()
+// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[COOKIE_PTR:.*]] = call ptr @_Znam(i64 20)
+// LLVM: store i64 3, ptr %[[COOKIE_PTR]], align 8
+// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr ptr, ptr %[[COOKIE_PTR]], i64 8
+// LLVM: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+
+struct alignas(16) E {
+ int x;
+ ~E();
+};
+
+void t_align16_nontrivial() {
+ auto p = new E[2];
+}
+
+// CHECK: cir.func{{.*}} @_Z20t_align16_nontrivialv()
+// CHECK: %[[P_ADDR:.*]] = cir.alloca !cir.ptr<!rec_E>, !cir.ptr<!cir.ptr<!rec_E>>, ["p", init] {alignment = 8 : i64}
+// CHECK: %[[#NUM_ELEMENTS:]] = cir.const #cir.int<2> : !u64i
+// CHECK: %[[#SIZE_WITHOUT_COOKIE:]] = cir.const #cir.int<32> : !u64i
+// CHECK: %[[#ALLOCATION_SIZE:]] = cir.const #cir.int<48> : !u64i
+// CHECK: %[[RAW_PTR:.*]] = cir.call @_Znam(%[[#ALLOCATION_SIZE]]) : (!u64i) -> !cir.ptr<!void>
+// CHECK: %[[COOKIE_PTR_BASE:.*]] = cir.cast bitcast %[[RAW_PTR]] : !cir.ptr<!void> -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[COOKIE_OFFSET:.*]] = cir.const #cir.int<8> : !s32i
+// CHECK: %[[COOKIE_PTR_RAW:.*]] = cir.ptr_stride %[[COOKIE_PTR_BASE]], %[[COOKIE_OFFSET]] : (!cir.ptr<!cir.ptr<!u8i>>, !s32i) -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[COOKIE_PTR:.*]] = cir.cast bitcast %[[COOKIE_PTR_RAW]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!u64i>
+// CHECK: cir.store align(8) %[[#NUM_ELEMENTS]], %[[COOKIE_PTR]] : !u64i, !cir.ptr<!u64i>
+// CHECK: %[[#COOKIE_SIZE:]] = cir.const #cir.int<16> : !s32i
+// CHECK: %[[DATA_PTR_RAW:.*]] = cir.ptr_stride %[[COOKIE_PTR_BASE]], %[[#COOKIE_SIZE]] : (!cir.ptr<!cir.ptr<!u8i>>, !s32i) -> !cir.ptr<!cir.ptr<!u8i>>
+// CHECK: %[[DATA_PTR_VOID:.*]] = cir.cast bitcast %[[DATA_PTR_RAW]] : !cir.ptr<!cir.ptr<!u8i>> -> !cir.ptr<!void>
+// CHECK: %[[DATA_PTR:.*]] = cir.cast bitcast %[[DATA_PTR_VOID]] : !cir.ptr<!void> -> !cir.ptr<!rec_E>
+// CHECK: cir.store align(8) %[[DATA_PTR]], %[[P_ADDR]] : !cir.ptr<!rec_E>, !cir.ptr<!cir.ptr<!rec_E>>
+// CHECK: cir.return
+// CHECK: }
+
+// LLVM: @_Z20t_align16_nontrivialv()
+// LLVM: %[[ALLOCA:.*]] = alloca ptr, i64 1, align 8
+// LLVM: %[[RAW_PTR:.*]] = call ptr @_Znam(i64 48)
+// LLVM: %[[COOKIE_PTR:.*]] = getelementptr ptr, ptr %[[RAW_PTR]], i64 8
+// LLVM: store i64 2, ptr %[[COOKIE_PTR]], align 8
+// LLVM: %[[ALLOCATED_PTR:.*]] = getelementptr ptr, ptr %[[RAW_PTR]], i64 16
+// LLVM: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+
+// OGCG: define{{.*}} void @_Z20t_align16_nontrivialv
+// OGCG: %[[ALLOCA:.*]] = alloca ptr, align 8
+// OGCG: %[[RAW_PTR:.*]] = call noalias noundef nonnull ptr @_Znam(i64 noundef 48)
+// OGCG: %[[COOKIE_PTR:.*]] = getelementptr inbounds i8, ptr %[[RAW_PTR]], i64 8
+// OGCG: store i64 2, ptr %[[COOKIE_PTR]], align 8
+// OGCG: %[[ALLOCATED_PTR:.*]] = getelementptr inbounds i8, ptr %[[RAW_PTR]], i64 16
+// OGCG: store ptr %[[ALLOCATED_PTR]], ptr %[[ALLOCA]], align 8
+// OGCG: ret void
void t_new_multidim_constant_size() {
auto p = new double[2][3][4];
diff --git a/clang/test/CIR/CodeGen/statement-exprs.c b/clang/test/CIR/CodeGen/statement-exprs.c
index f6ec9ec..c784ec9 100644
--- a/clang/test/CIR/CodeGen/statement-exprs.c
+++ b/clang/test/CIR/CodeGen/statement-exprs.c
@@ -218,7 +218,7 @@ struct S { int x; };
int test3() { return ({ struct S s = {1}; s; }).x; }
// CIR: cir.func no_proto dso_local @test3() -> !s32i
// CIR: %[[RETVAL:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
-// CIR: %[[YIELDVAL:.+]] = cir.scope {
+// CIR: cir.scope {
// CIR: %[[REF_TMP0:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["ref.tmp0"]
// CIR: %[[TMP:.+]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["tmp"]
// CIR: cir.scope {
@@ -230,9 +230,8 @@ int test3() { return ({ struct S s = {1}; s; }).x; }
// CIR: }
// CIR: %[[GEP_X_TMP:.+]] = cir.get_member %[[REF_TMP0]][0] {name = "x"} : !cir.ptr<!rec_S> -> !cir.ptr<!s32i>
// CIR: %[[XVAL:.+]] = cir.load {{.*}} %[[GEP_X_TMP]] : !cir.ptr<!s32i>, !s32i
-// CIR: cir.yield %[[XVAL]] : !s32i
-// CIR: } : !s32i
-// CIR: cir.store %[[YIELDVAL]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store{{.*}} %[[XVAL]], %[[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
// CIR: %[[RES:.+]] = cir.load %[[RETVAL]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.return %[[RES]] : !s32i
@@ -252,10 +251,9 @@ int test3() { return ({ struct S s = {1}; s; }).x; }
// LLVM: [[LBL8]]:
// LLVM: %[[GEP_VAR1:.+]] = getelementptr %struct.S, ptr %[[VAR1]], i32 0, i32 0
// LLVM: %[[LOAD_X:.+]] = load i32, ptr %[[GEP_VAR1]]
+// LLVM: store i32 %[[LOAD_X]], ptr %[[VAR4]]
// LLVM: br label %[[LBL11:.+]]
// LLVM: [[LBL11]]:
-// LLVM: %[[PHI:.+]] = phi i32 [ %[[LOAD_X]], %[[LBL8]] ]
-// LLVM: store i32 %[[PHI]], ptr %[[VAR4]]
// LLVM: %[[RES:.+]] = load i32, ptr %[[VAR4]]
// LLVM: ret i32 %[[RES]]
diff --git a/clang/test/CIR/CodeGen/struct-init.cpp b/clang/test/CIR/CodeGen/struct-init.cpp
index a47ef53..cb50999 100644
--- a/clang/test/CIR/CodeGen/struct-init.cpp
+++ b/clang/test/CIR/CodeGen/struct-init.cpp
@@ -9,6 +9,22 @@ struct S {
int a, b, c;
};
+S partial_init = { 1 };
+
+// CIR: cir.global external @partial_init = #cir.const_record<{#cir.int<1> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i}> : !rec_S
+// LLVM: @partial_init = global %struct.S { i32 1, i32 0, i32 0 }
+// OGCG: @partial_init = global %struct.S { i32 1, i32 0, i32 0 }
+
+struct StructWithDefaultInit {
+ int a = 2;
+};
+
+StructWithDefaultInit swdi = {};
+
+// CIR: cir.global external @swdi = #cir.const_record<{#cir.int<2> : !s32i}> : !rec_StructWithDefaultInit
+// LLVM: @swdi = global %struct.StructWithDefaultInit { i32 2 }, align 4
+// OGCG: @swdi = global %struct.StructWithDefaultInit { i32 2 }, align 4
+
void init() {
S s1 = {1, 2, 3};
S s2 = {4, 5};
diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp
index 6d362c7..c8db714 100644
--- a/clang/test/CIR/CodeGen/struct.cpp
+++ b/clang/test/CIR/CodeGen/struct.cpp
@@ -280,3 +280,67 @@ void bin_comma() {
// OGCG: define{{.*}} void @_Z9bin_commav()
// OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4
// OGCG: call void @llvm.memset.p0.i64(ptr align 4 %[[A_ADDR]], i8 0, i64 8, i1 false)
+
+void compound_literal_expr() { CompleteS a = (CompleteS){}; }
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["a", init]
+// CIR: %[[A_ELEM_0_PTR:.*]] = cir.get_member %[[A_ADDR]][0] {name = "a"} : !cir.ptr<!rec_CompleteS> -> !cir.ptr<!s32i>
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store{{.*}} %[[CONST_0]], %[[A_ELEM_0_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[A_ELEM_1_PTR:.*]] = cir.get_member %[[A_ADDR]][1] {name = "b"} : !cir.ptr<!rec_CompleteS> -> !cir.ptr<!s8i>
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s8i
+// CIR: cir.store{{.*}} %[[CONST_0]], %[[A_ELEM_1_PTR]] : !s8i, !cir.ptr<!s8i>
+
+// TODO(cir): zero-initialize the padding
+
+// LLVM: %[[A_ADDR:.*]] = alloca %struct.CompleteS, i64 1, align 4
+// LLVM: %[[A_ELEM_0_PTR:.*]] = getelementptr %struct.CompleteS, ptr %[[A_ADDR]], i32 0, i32 0
+// LLVM: store i32 0, ptr %[[A_ELEM_0_PTR]], align 4
+// LLVM: %[[A_ELEM_1_PTR:.*]] = getelementptr %struct.CompleteS, ptr %[[A_ADDR]], i32 0, i32 1
+// LLVM: store i8 0, ptr %[[A_ELEM_1_PTR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4
+// OGCG: call void @llvm.memset.p0.i64(ptr align 4 %[[A_ADDR]], i8 0, i64 8, i1 false)
+
+struct StructWithConstMember {
+ int a : 1;
+};
+
+void struct_with_const_member_expr() {
+ int a = (StructWithConstMember){}.a;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[RESULT:.*]] = cir.scope {
+// CIR: %[[REF_ADDR:.*]] = cir.alloca !rec_StructWithConstMember, !cir.ptr<!rec_StructWithConstMember>, ["ref.tmp0"]
+// CIR: %[[ELEM_0_PTR:.*]] = cir.get_member %[[REF_ADDR]][0] {name = "a"} : !cir.ptr<!rec_StructWithConstMember> -> !cir.ptr<!u8i>
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: %[[SET_BF:.*]] = cir.set_bitfield{{.*}} (#bfi_a, %[[ELEM_0_PTR]] : !cir.ptr<!u8i>, %[[CONST_0]] : !s32i) -> !s32i
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.yield %[[CONST_0]] : !s32i
+// CIR: } : !s32i
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+
+// TODO(cir): zero-initialize the padding
+
+// LLVM: %[[REF_ADDR:.*]] = alloca %struct.StructWithConstMember, i64 1, align 4
+// LLVM: %[[A_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: br label %[[BF_LABEL:.*]]
+// LLVM: [[BF_LABEL]]:
+// LLVM: %[[ELEM_0_PTR:.*]] = getelementptr %struct.StructWithConstMember, ptr %[[REF_ADDR]], i32 0, i32 0
+// LLVM: %[[TMP_REF:.*]] = load i8, ptr %[[ELEM_0_PTR]], align 4
+// LLVM: %[[BF_CLEAR:.*]] = and i8 %[[TMP_REF]], -2
+// LLVM: %[[BF_SET:.*]] = or i8 %[[BF_CLEAR]], 0
+// LLVM: store i8 %[[BF_SET]], ptr %[[ELEM_0_PTR]], align 4
+// LLVM: br label %[[RESULT_LABEL:.*]]
+// LLVM: [[RESULT_LABEL]]:
+// LLVM: %[[RESULT:.*]] = phi i32 [ 0, %[[BF_LABEL]] ]
+// LLVM: store i32 %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca i32, align 4
+// OGCG: %[[REF_ADDR:.*]] = alloca %struct.StructWithConstMember, align 4
+// OGCG: %[[TMP_REF:.*]] = load i8, ptr %[[REF_ADDR]], align 4
+// OGCG: %[[BF_CLEAR:.*]] = and i8 %[[TMP_REF]], -2
+// OGCG: %[[BF_SET:.*]] = or i8 %[[BF_CLEAR]], 0
+// OGCG: store i8 %[[BF_SET]], ptr %[[REF_ADDR]], align 4
+// OGCG: store i32 0, ptr %[[A_ADDR]], align 4
diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp
index 8f0b3c4..5a50310 100644
--- a/clang/test/CIR/CodeGen/try-catch.cpp
+++ b/clang/test/CIR/CodeGen/try-catch.cpp
@@ -30,3 +30,90 @@ void empty_try_block_with_catch_with_int_exception() {
// OGCG: define{{.*}} void @_Z45empty_try_block_with_catch_with_int_exceptionv()
// OGCG: ret void
+
+void try_catch_with_empty_catch_all() {
+ int a = 1;
+ try {
+ return;
+ ++a;
+ } catch (...) {
+ }
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store{{.*}} %[[CONST_1]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i
+// CIR: cir.scope {
+// CIR: cir.try {
+// CIR: cir.return
+// CIR: ^bb1: // no predecessors
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[RESULT:.*]] = cir.unary(inc, %[[TMP_A]]) nsw : !s32i, !s32i
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.yield
+// CIR: }
+// CIR: }
+
+// LLVM: %[[A_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: store i32 1, ptr %[[A_ADDR]], align 4
+// LLVM: br label %[[BB_2:.*]]
+// LLVM: [[BB_2]]:
+// LLVM: br label %[[BB_3:.*]]
+// LLVM: [[BB_3]]:
+// LLVM: ret void
+// LLVM: [[BB_4:.*]]:
+// LLVM: %[[TMP_A:.*]] = load i32, ptr %[[A_ADDR]], align 4
+// LLVM: %[[RESULT:.*]] = add nsw i32 %[[TMP_A]], 1
+// LLVM: store i32 %[[RESULT]], ptr %[[A_ADDR]], align 4
+// LLVM: br label %[[BB_7:.*]]
+// LLVM: [[BB_7]]:
+// LLVM: br label %[[BB_8:.*]]
+// LLVM: [[BB_8]]:
+// LLVM: ret void
+
+// OGCG: %[[A_ADDR:.*]] = alloca i32, align 4
+// OGCG: store i32 1, ptr %[[A_ADDR]], align 4
+// OGCG: ret void
+
+void try_catch_with_empty_catch_all_2() {
+ int a = 1;
+ try {
+ ++a;
+ return;
+ } catch (...) {
+ }
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store{{.*}} %[[CONST_1]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.scope {
+// CIR: cir.try {
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[RESULT:.*]] = cir.unary(inc, %[[TMP_A]]) nsw : !s32i, !s32i
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.return
+// CIR: }
+// CIR: }
+
+// LLVM: %[[A_ADDR]] = alloca i32, i64 1, align 4
+// LLVM: store i32 1, ptr %[[A_ADDR]], align 4
+// LLVM: br label %[[BB_2:.*]]
+// LLVM: [[BB_2]]:
+// LLVM: br label %[[BB_3:.*]]
+// LLVM: [[BB_3]]:
+// LLVM: %[[TMP_A:.*]] = load i32, ptr %[[A_ADDR]], align 4
+// LLVM: %[[RESULT:.*]] = add nsw i32 %[[TMP_A:.*]], 1
+// LLVM: store i32 %[[RESULT]], ptr %[[A_ADDR]], align 4
+// LLVM: ret void
+// LLVM: [[BB_6:.*]]:
+// LLVM: br label %[[BB_7:.*]]
+// LLVM: [[BB_7]]:
+// LLVM: ret void
+
+// OGCG: %[[A_ADDR:.*]] = alloca i32, align 4
+// OGCG: store i32 1, ptr %[[A_ADDR]], align 4
+// OGCG: %[[TMP_A:.*]] = load i32, ptr %[[A_ADDR]], align 4
+// OGCG: %[[RESULT:.*]] = add nsw i32 %[[TMP_A]], 1
+// OGCG: store i32 %[[RESULT]], ptr %[[A_ADDR]], align 4
+// OGCG: ret void
diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c
index b22c704..0af4f83 100644
--- a/clang/test/CIR/CodeGen/vla.c
+++ b/clang/test/CIR/CodeGen/vla.c
@@ -282,4 +282,61 @@ void f3(unsigned len) {
// break;
// }
// }
- \ No newline at end of file
+
+int f5(unsigned long len) {
+ int arr[len];
+ return arr[2];
+}
+
+// CIR: cir.func{{.*}} @f5(%[[LEN_ARG:.*]]: !u64i {{.*}}) -> !s32i
+// CIR: %[[LEN_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["len", init]
+// CIR: %[[RET_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[SAVED_STACK:.*]] = cir.alloca !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>, ["saved_stack"]
+// CIR: cir.store{{.*}} %[[LEN_ARG]], %[[LEN_ADDR]]
+// CIR: %[[LEN:.*]] = cir.load{{.*}} %[[LEN_ADDR]]
+// CIR: %[[STACK_PTR:.*]] = cir.stacksave
+// CIR: cir.store{{.*}} %[[STACK_PTR]], %[[SAVED_STACK]]
+// CIR: %[[ARR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[LEN]] : !u64i, ["arr"]
+// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[ARR_2:.*]] = cir.ptr_stride %[[ARR]], %[[TWO]]
+// CIR: %[[ARR_VAL:.*]] = cir.load{{.*}} %[[ARR_2]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store{{.*}} %[[ARR_VAL]], %[[RET_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]]
+// CIR: cir.stackrestore %[[STACK_RESTORE_PTR]]
+// CIR: %[[RET_VAL:.*]] = cir.load{{.*}} %[[RET_ADDR]]
+// CIR: cir.return %[[RET_VAL]] : !s32i
+
+// LLVM: define{{.*}} i32 @f5(i64 %[[LEN_ARG:.*]])
+// LLVM: %[[LEN_ADDR:.*]] = alloca i64
+// LLVM: %[[RET_ADDR:.*]] = alloca i32
+// LLVM: %[[SAVED_STACK:.*]] = alloca ptr
+// LLVM: store i64 %[[LEN_ARG]], ptr %[[LEN_ADDR]]
+// LLVM: %[[LEN:.*]] = load i64, ptr %[[LEN_ADDR]]
+// LLVM: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0()
+// LLVM: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]]
+// LLVM: %[[ARR:.*]] = alloca i32, i64 %[[LEN]]
+// LLVM: %[[ARR_2:.*]] = getelementptr i32, ptr %[[ARR]], i64 2
+// LLVM: %[[ARR_VAL:.*]] = load i32, ptr %[[ARR_2]]
+// LLVM: store i32 %[[ARR_VAL]], ptr %[[RET_ADDR]]
+// LLVM: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]]
+// LLVM: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]])
+// LLVM: %[[RET_VAL:.*]] = load i32, ptr %[[RET_ADDR]]
+// LLVM: ret i32 %[[RET_VAL]]
+
+// Note: VLA_EXPR0 below is emitted to capture debug info.
+
+// OGCG: define{{.*}} i32 @f5(i64 {{.*}} %[[LEN_ARG:.*]])
+// OGCG: %[[LEN_ADDR:.*]] = alloca i64
+// OGCG: %[[SAVED_STACK:.*]] = alloca ptr
+// OGCG: %[[VLA_EXPR0:.*]] = alloca i64
+// OGCG: store i64 %[[LEN_ARG]], ptr %[[LEN_ADDR]]
+// OGCG: %[[LEN:.*]] = load i64, ptr %[[LEN_ADDR]]
+// OGCG: %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0()
+// OGCG: store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]]
+// OGCG: %[[ARR:.*]] = alloca i32, i64 %[[LEN]]
+// OGCG: store i64 %[[LEN]], ptr %[[VLA_EXPR0]]
+// OGCG: %[[ARR_2:.*]] = getelementptr inbounds i32, ptr %[[ARR]], i64 2
+// OGCG: %[[ARR_VAL:.*]] = load i32, ptr %[[ARR_2]]
+// OGCG: %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]]
+// OGCG: call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]])
+// OGCG: ret i32 %[[ARR_VAL]]
diff --git a/clang/test/CIR/IR/invalid-atomic.cir b/clang/test/CIR/IR/invalid-atomic.cir
new file mode 100644
index 0000000..a124e43
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-atomic.cir
@@ -0,0 +1,7 @@
+// RUN: cir-opt %s -verify-diagnostics -split-input-file
+
+cir.func @f1(%arg0: !cir.ptr<!cir.float>, %arg1: !cir.float) {
+ // expected-error @below {{only atomic add, sub, max, and min operation could operate on floating-point values}}
+ %0 = cir.atomic.fetch and seq_cst %arg0, %arg1 : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+ cir.return
+}
diff --git a/clang/test/CodeGen/AArch64/sign-return-address.c b/clang/test/CodeGen/AArch64/sign-return-address.c
index 11dd683..2b505de 100644
--- a/clang/test/CodeGen/AArch64/sign-return-address.c
+++ b/clang/test/CodeGen/AArch64/sign-return-address.c
@@ -28,17 +28,17 @@
// NONE-NOT: !"branch-target-enforcement"
// ALL-NOT: !"branch-target-enforcement"
// PART-NOT: !"branch-target-enforcement"
-// BTE: !{i32 8, !"branch-target-enforcement", i32 1}
+// BTE: !{i32 8, !"branch-target-enforcement", i32 2}
// B-KEY-NOT: !"branch-target-enforcement"
// NONE-NOT: !"sign-return-address"
-// ALL: !{i32 8, !"sign-return-address", i32 1}
-// PART: !{i32 8, !"sign-return-address", i32 1}
+// ALL: !{i32 8, !"sign-return-address", i32 2}
+// PART: !{i32 8, !"sign-return-address", i32 2}
// BTE-NOT: !"sign-return-address"
-// B-KEY: !{i32 8, !"sign-return-address", i32 1}
+// B-KEY: !{i32 8, !"sign-return-address", i32 2}
// NONE-NOT: !"sign-return-address-all"
-// ALL: !{i32 8, !"sign-return-address-all", i32 1}
+// ALL: !{i32 8, !"sign-return-address-all", i32 2}
// PART-NOT: !"sign-return-address-all"
// BTE-NOT: !"sign-return-address-all"
// B-KEY-NOT: !"sign-return-address-all"
@@ -47,6 +47,6 @@
// ALL-NOT: !"sign-return-address-with-bkey"
// PART-NOT: !"sign-return-address-with-bkey"
// BTE-NOT: !"sign-return-address-with-bkey"
-// B-KEY: !{i32 8, !"sign-return-address-with-bkey", i32 1}
+// B-KEY: !{i32 8, !"sign-return-address-with-bkey", i32 2}
void foo() {}
diff --git a/clang/test/CodeGen/arm-branch-protection-attr-2.c b/clang/test/CodeGen/arm-branch-protection-attr-2.c
index fad5dc0..5391537 100644
--- a/clang/test/CodeGen/arm-branch-protection-attr-2.c
+++ b/clang/test/CodeGen/arm-branch-protection-attr-2.c
@@ -23,16 +23,16 @@
// NONE-NOT: !"branch-target-enforcement"
// PART-NOT: !"branch-target-enforcement"
// ALL-NOT: !"branch-target-enforcement"
-// BTE: !{i32 8, !"branch-target-enforcement", i32 1}
+// BTE: !{i32 8, !"branch-target-enforcement", i32 2}
// NONE-NOT: !"sign-return-address"
-// PART: !{i32 8, !"sign-return-address", i32 1}
-// ALL: !{i32 8, !"sign-return-address", i32 1}
+// PART: !{i32 8, !"sign-return-address", i32 2}
+// ALL: !{i32 8, !"sign-return-address", i32 2}
// BTE-NOT: !"sign-return-address"
// NONE-NOT: !"sign-return-address-all", i32 0}
// PART-NOT: !"sign-return-address-all", i32 0}
-// ALL: !{i32 8, !"sign-return-address-all", i32 1}
+// ALL: !{i32 8, !"sign-return-address-all", i32 2}
// BTE-NOT: !"sign-return-address-all", i32 0}
void foo() {}
diff --git a/clang/test/Driver/arm-abi.c b/clang/test/Driver/arm-abi.c
index 139456c..b89b969 100644
--- a/clang/test/Driver/arm-abi.c
+++ b/clang/test/Driver/arm-abi.c
@@ -31,6 +31,8 @@
// FreeBSD / OpenBSD default to aapcs-linux
// RUN: %clang -target arm--freebsd- %s -### -o %t.o 2>&1 \
// RUN: | FileCheck -check-prefix=CHECK-AAPCS-LINUX %s
+// RUN: %clang -target arm--fuchsia- %s -### -o %t.o 2>&1 \
+// RUN: | FileCheck -check-prefix=CHECK-AAPCS-LINUX %s
// RUN: %clang -target arm--openbsd- %s -### -o %t.o 2>&1 \
// RUN: | FileCheck -check-prefix=CHECK-AAPCS-LINUX %s
// RUN: %clang -target arm--haiku- %s -### -o %t.o 2>&1 \
diff --git a/clang/test/Driver/fuchsia.c b/clang/test/Driver/fuchsia.c
index cf92f85..3fb2a94 100644
--- a/clang/test/Driver/fuchsia.c
+++ b/clang/test/Driver/fuchsia.c
@@ -2,6 +2,10 @@
// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
// RUN: | FileCheck -check-prefixes=CHECK,CHECK-X86_64 %s
+// RUN: %clang -### %s --target=arm-unknown-fuchsia \
+// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
+// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
+// RUN: | FileCheck -check-prefixes=CHECK,CHECK-ARMV8A %s
// RUN: %clang -### %s --target=aarch64-unknown-fuchsia \
// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
@@ -14,6 +18,10 @@
// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
// RUN: | FileCheck -check-prefixes=CHECK,CHECK-X86_64 %s
+// RUN: %clang -### %s --target=arm-fuchsia \
+// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
+// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
+// RUN: | FileCheck -check-prefixes=CHECK,CHECK-ARMV8A %s
// RUN: %clang -### %s --target=aarch64-fuchsia \
// RUN: -resource-dir=%S/Inputs/resource_dir_with_per_target_subdir \
// RUN: --sysroot=%S/platform -fuse-ld=ld 2>&1 \
@@ -24,6 +32,7 @@
// RUN: | FileCheck -check-prefixes=CHECK,CHECK-RISCV64 %s
// CHECK: "-cc1"
// CHECK-X86_64: "-triple" "x86_64-unknown-fuchsia"
+// CHECK-ARMV8A: "-triple" "thumbv8a-unknown-fuchsia"
// CHECK-AARCH64: "-triple" "aarch64-unknown-fuchsia"
// CHECK-RISCV64: "-triple" "riscv64-unknown-fuchsia"
// CHECK: "-funwind-tables=2"
diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c
index 7972b9e..cb81273 100644
--- a/clang/test/Driver/print-supported-extensions-riscv.c
+++ b/clang/test/Driver/print-supported-extensions-riscv.c
@@ -190,6 +190,11 @@
// CHECK-NEXT: xsfmm64t 0.6 'XSfmm64t' (TE=64 configuration)
// CHECK-NEXT: xsfmmbase 0.6 'XSfmmbase' (All non arithmetic instructions for all TEWs and sf.vtzero)
// CHECK-NEXT: xsfvcp 1.0 'XSfvcp' (SiFive Custom Vector Coprocessor Interface Instructions)
+// CHECK-NEXT: xsfvfbfexp16e 0.5 'XSfvfbfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, BFloat16)
+// CHECK-NEXT: xsfvfexp16e 0.5 'XSfvfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, Half Precision)
+// CHECK-NEXT: xsfvfexp32e 0.5 'XSfvfexp32e' (SiFive Vector Floating-Point Exponential Function Instruction, Single Precision)
+// CHECK-NEXT: xsfvfexpa 0.2 'XSfvfexpa' (SiFive Vector Floating-Point Exponential Approximation Instruction)
+// CHECK-NEXT: xsfvfexpa64e 0.2 'XSfvfexpa64e' (SiFive Vector Floating-Point Exponential Approximation Instruction with Double-Precision)
// CHECK-NEXT: xsfvfnrclipxfqf 1.0 'XSfvfnrclipxfqf' (SiFive FP32-to-int8 Ranged Clip Instructions)
// CHECK-NEXT: xsfvfwmaccqqq 1.0 'XSfvfwmaccqqq' (SiFive Matrix Multiply Accumulate Instruction (4-by-4))
// CHECK-NEXT: xsfvqmaccdod 1.0 'XSfvqmaccdod' (SiFive Int8 Matrix Multiplication Instructions (2-by-8 and 8-by-2))
diff --git a/clang/test/Frontend/arm-ignore-branch-protection-option.c b/clang/test/Frontend/arm-ignore-branch-protection-option.c
index 99a2acc..45bdb37 100644
--- a/clang/test/Frontend/arm-ignore-branch-protection-option.c
+++ b/clang/test/Frontend/arm-ignore-branch-protection-option.c
@@ -15,4 +15,4 @@ __attribute__((target("arch=cortex-m0"))) void f() {}
// CHECK-NOT: attributes { {{.*}} "branch-target-enforcement"
/// Check that there are branch protection module attributes despite the warning.
-// CHECK: !{i32 8, !"branch-target-enforcement", i32 1}
+// CHECK: !{i32 8, !"branch-target-enforcement", i32 2}
diff --git a/clang/test/Preprocessor/riscv-atomics.c b/clang/test/Preprocessor/riscv-atomics.c
new file mode 100644
index 0000000..6e02173
--- /dev/null
+++ b/clang/test/Preprocessor/riscv-atomics.c
@@ -0,0 +1,24 @@
+// RUN: %clang --target=riscv32-unknown-linux-gnu -march=rv32ia -x c -E -dM %s \
+// RUN: -o - | FileCheck %s
+// RUN: %clang --target=riscv32-unknown-linux-gnu -march=rv32i_zalrsc -x c -E \
+// RUN: -dM %s -o - | FileCheck %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu -march=rv64ia -x c -E -dM %s \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
+// RUN: %clang --target=riscv64-unknown-linux-gnu -march=rv64i_zalrsc -x c -E \
+// RUN: -dM %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
+
+// CHECK: #define __GCC_ATOMIC_BOOL_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_CHAR16_T_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_CHAR32_T_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_CHAR_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_INT_LOCK_FREE 2
+// CHECK-RV64: #define __GCC_ATOMIC_LLONG_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_LONG_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_POINTER_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_SHORT_LOCK_FREE 2
+// CHECK: #define __GCC_ATOMIC_TEST_AND_SET_TRUEVAL 1
+// CHECK: #define __GCC_ATOMIC_WCHAR_T_LOCK_FREE 2
+// CHECK: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 1
+// CHECK: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 1
+// CHECK: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1
+// CHECK-RV64: #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 1
diff --git a/clang/test/Preprocessor/riscv-target-features-sifive.c b/clang/test/Preprocessor/riscv-target-features-sifive.c
index 1c49b55..5002f62 100644
--- a/clang/test/Preprocessor/riscv-target-features-sifive.c
+++ b/clang/test/Preprocessor/riscv-target-features-sifive.c
@@ -5,6 +5,11 @@
// CHECK-NOT: __riscv_xsfcease {{.*$}}
// CHECK-NOT: __riscv_xsfvcp {{.*$}}
+// CHECK-NOT: __riscv_xsfvfbfexp16e {{.*$}}
+// CHECK-NOT: __riscv_xsfvfexp16e {{.*$}}
+// CHECK-NOT: __riscv_xsfvfexp32e {{.*$}}
+// CHECK-NOT: __riscv_xsfvfexpa {{.*$}}
+// CHECK-NOT: __riscv_xsfvfexpa64e {{.*$}}
// CHECK-NOT: __riscv_xsfvfnrclipxfqf {{.*$}}
// CHECK-NOT: __riscv_xsfvfwmaccqqq {{.*$}}
// CHECK-NOT: __riscv_xsfqmaccdod {{.*$}}
@@ -39,6 +44,46 @@
// CHECK-XSFVCP-EXT: __riscv_xsfvcp 1000000{{$}}
// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfbfexp16e_zvfbfmin -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFBFEXP16E-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfbfexp16e_zvfbfmin -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFBFEXP16E-EXT %s
+// CHECK-XSFVFBFEXP16E-EXT: __riscv_xsfvfbfexp16e 5000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfexp16e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP16E-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfexp16e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP16E-EXT %s
+// CHECK-XSFVFEXP16E-EXT: __riscv_xsfvfexp16e 5000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfexp32e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP32E-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfexp32e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP32E-EXT %s
+// CHECK-XSFVFEXP32E-EXT: __riscv_xsfvfexp32e 5000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfexpa -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfexpa -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA-EXT %s
+// CHECK-XSFVFEXPA-EXT: __riscv_xsfvfexpa 2000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfexpa64e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA64E-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfexpa64e -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA64E-EXT %s
+// CHECK-XSFVFEXPA64E-EXT: __riscv_xsfvfexpa64e 2000{{$}}
+
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
// RUN: -march=rv32ixsfvfnrclipxfqf -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFNRCLIPXFQF-EXT %s
// RUN: %clang --target=riscv64-unknown-linux-gnu \
diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py
index 29088ef..52b275c 100644
--- a/clang/test/lit.cfg.py
+++ b/clang/test/lit.cfg.py
@@ -18,11 +18,22 @@ from lit.llvm.subst import FindTool
# name: The name of this test suite.
config.name = "Clang"
+# TODO: Consolidate the logic for turning on the internal shell by default for all LLVM test suites.
+# See https://github.com/llvm/llvm-project/issues/106636 for more details.
+#
+# We prefer the lit internal shell which provides a better user experience on failures
+# and is faster unless the user explicitly disables it with LIT_USE_INTERNAL_SHELL=0
+# env var.
+use_lit_shell = True
+lit_shell_env = os.environ.get("LIT_USE_INTERNAL_SHELL")
+if lit_shell_env:
+ use_lit_shell = lit.util.pythonize_bool(lit_shell_env)
+
# testFormat: The test format to use to interpret tests.
#
# For now we require '&&' between commands, until they get globally killed and
# the test runner updated.
-config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
+config.test_format = lit.formats.ShTest(execute_external=not use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = [