aboutsummaryrefslogtreecommitdiff
path: root/clang/test/CIR
diff options
context:
space:
mode:
Diffstat (limited to 'clang/test/CIR')
-rw-r--r--clang/test/CIR/CodeGen/bitfields.c94
-rw-r--r--clang/test/CIR/CodeGen/bitfields.cpp22
-rw-r--r--clang/test/CIR/CodeGen/bitfields_be.c26
-rw-r--r--clang/test/CIR/CodeGen/builtin_call.cpp16
-rw-r--r--clang/test/CIR/CodeGen/complex-cast.cpp328
-rw-r--r--clang/test/CIR/CodeGen/compound_literal.cpp99
-rw-r--r--clang/test/CIR/CodeGen/destructors.cpp4
7 files changed, 540 insertions, 49 deletions
diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c
index 896acbf..869a7c9 100644
--- a/clang/test/CIR/CodeGen/bitfields.c
+++ b/clang/test/CIR/CodeGen/bitfields.c
@@ -87,14 +87,14 @@ int load_field(S* s) {
// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["s", init]
// CIR: [[TMP1:%.*]] = cir.load{{.*}} [[TMP0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "c"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
-// CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_c, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
+// CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
// LLVM: define dso_local i32 @load_field
// LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
// LLVM: [[TMP1:%.*]] = alloca i32, i64 1, align 4
// LLVM: [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
// LLVM: [[TMP3:%.*]] = getelementptr %struct.S, ptr [[TMP2]], i32 0, i32 0
-// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8
+// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 4
// LLVM: [[TMP5:%.*]] = shl i64 [[TMP4]], 15
// LLVM: [[TMP6:%.*]] = ashr i64 [[TMP5]], 47
// LLVM: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i32
@@ -115,13 +115,13 @@ unsigned int load_field_unsigned(A* s) {
//CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["s", init] {alignment = 8 : i64}
//CIR: [[TMP1:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
//CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][3] {name = "more_bits"} : !cir.ptr<!rec_A> -> !cir.ptr<!u16i>
-//CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_more_bits, [[TMP2]] : !cir.ptr<!u16i>) -> !u32i
+//CIR: [[TMP3:%.*]] = cir.get_bitfield align(1) (#bfi_more_bits, [[TMP2]] : !cir.ptr<!u16i>) -> !u32i
//LLVM: define dso_local i32 @load_field_unsigned
//LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
//LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8
//LLVM: [[TMP2:%.*]] = getelementptr %struct.A, ptr [[TMP1]], i32 0, i32 3
-//LLVM: [[TMP3:%.*]] = load i16, ptr [[TMP2]], align 2
+//LLVM: [[TMP3:%.*]] = load i16, ptr [[TMP2]], align 1
//LLVM: [[TMP4:%.*]] = lshr i16 [[TMP3]], 3
//LLVM: [[TMP5:%.*]] = and i16 [[TMP4]], 15
//LLVM: [[TMP6:%.*]] = zext i16 [[TMP5]] to i32
@@ -143,15 +143,15 @@ void store_field() {
// CIR: [[TMP0:%.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>
// CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i
// CIR: [[TMP2:%.*]] = cir.get_member [[TMP0]][1] {name = "e"} : !cir.ptr<!rec_S> -> !cir.ptr<!u16i>
-// CIR: cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr<!u16i>, [[TMP1]] : !s32i)
+// CIR: cir.set_bitfield align(4) (#bfi_e, [[TMP2]] : !cir.ptr<!u16i>, [[TMP1]] : !s32i)
// LLVM: define dso_local void @store_field()
// LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1, align 4
// LLVM: [[TMP1:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 1
-// LLVM: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2
+// LLVM: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 4
// LLVM: [[TMP3:%.*]] = and i16 [[TMP2]], -32768
// LLVM: [[TMP4:%.*]] = or i16 [[TMP3]], 3
-// LLVM: store i16 [[TMP4]], ptr [[TMP1]], align 2
+// LLVM: store i16 [[TMP4]], ptr [[TMP1]], align 4
// OGCG: define dso_local void @store_field()
// OGCG: [[TMP0:%.*]] = alloca %struct.S, align 4
@@ -169,24 +169,24 @@ void store_bitfield_to_bitfield() {
// CIR: cir.func {{.*@store_bitfield_to_bitfield}}
// CIR: [[TMP0:%.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["s"] {alignment = 4 : i64}
// CIR: [[TMP1:%.*]] = cir.get_member [[TMP0]][0] {name = "c"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
-// CIR: [[TMP2:%.*]] = cir.get_bitfield(#bfi_c, [[TMP1]] : !cir.ptr<!u64i>) -> !s32i
+// CIR: [[TMP2:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP1]] : !cir.ptr<!u64i>) -> !s32i
// CIR: [[TMP3:%.*]] = cir.get_member [[TMP0]][0] {name = "a"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
-// CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_a, [[TMP3]] : !cir.ptr<!u64i>, [[TMP2]] : !s32i) -> !s32i
+// CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_a, [[TMP3]] : !cir.ptr<!u64i>, [[TMP2]] : !s32i) -> !s32i
// LLVM: define dso_local void @store_bitfield_to_bitfield()
// LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1, align 4
// LLVM: [[TMP1:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 0
-// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8
+// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4
// LLVM: [[TMP3:%.*]] = shl i64 [[TMP2]], 15
// LLVM: [[TMP4:%.*]] = ashr i64 [[TMP3]], 47
// LLVM: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
// LLVM: [[TMP6:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 0
// LLVM: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
-// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP6]], align 8
+// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP6]], align 4
// LLVM: [[TMP9:%.*]] = and i64 [[TMP7]], 15
// LLVM: [[TMP10:%.*]] = and i64 [[TMP8]], -16
// LLVM: [[TMP11:%.*]] = or i64 [[TMP10]], [[TMP9]]
-// LLVM: store i64 [[TMP11]], ptr [[TMP6]], align 8
+// LLVM: store i64 [[TMP11]], ptr [[TMP6]], align 4
// LLVM: [[TMP12:%.*]] = shl i64 [[TMP9]], 60
// LLVM: [[TMP13:%.*]] = ashr i64 [[TMP12]], 60
// LLVM: [[TMP15:%.*]] = trunc i64 [[TMP13]] to i32
@@ -222,16 +222,16 @@ void get_volatile(V* v) {
// CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i
// CIR: [[TMP2:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_V>>, !cir.ptr<!rec_V>
// CIR: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "b"} : !cir.ptr<!rec_V> -> !cir.ptr<!u64i>
-// CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) {is_volatile} -> !s32i
+// CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) {is_volatile} -> !s32i
// LLVM: define dso_local void @get_volatile
// LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
// LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8
// LLVM: [[TMP2:%.*]] = getelementptr %struct.V, ptr [[TMP1]], i32 0, i32 0
-// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 8
+// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 4
// LLVM: [[TMP4:%.*]] = and i64 [[TMP3]], -1095216660481
// LLVM: [[TMP5:%.*]] = or i64 [[TMP4]], 12884901888
-// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 8
+// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 4
// OCGC: define dso_local void @get_volatile
// OCGC: [[TMP0:%.*]] = alloca ptr, align 8
@@ -249,16 +249,16 @@ void set_volatile(V* v) {
//CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i
//CIR: [[TMP2:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_V>>, !cir.ptr<!rec_V>
//CIR: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "b"} : !cir.ptr<!rec_V> -> !cir.ptr<!u64i>
-//CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) {is_volatile} -> !s32i
+//CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) {is_volatile} -> !s32i
// LLVM: define dso_local void @set_volatile
// LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
// LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8
// LLVM: [[TMP2:%.*]] = getelementptr %struct.V, ptr [[TMP1]], i32 0, i32 0
-// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 8
+// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 4
// LLVM: [[TMP4:%.*]] = and i64 [[TMP3]], -1095216660481
// LLVM: [[TMP5:%.*]] = or i64 [[TMP4]], 12884901888
-// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 8
+// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 4
// OGCG: define dso_local void @set_volatile
// OGCG: [[TMP0:%.*]] = alloca ptr, align 8
@@ -276,24 +276,24 @@ void unOp(S* s) {
// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["s", init] {alignment = 8 : i64}
// CIR: [[TMP1:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "d"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
-// CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
+// CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_d, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
// CIR: [[TMP4:%.*]] = cir.unary(inc, [[TMP3]]) nsw : !s32i, !s32i
-// CIR: cir.set_bitfield(#bfi_d, [[TMP2]] : !cir.ptr<!u64i>, [[TMP4]] : !s32i)
+// CIR: cir.set_bitfield align(4) (#bfi_d, [[TMP2]] : !cir.ptr<!u64i>, [[TMP4]] : !s32i)
// LLVM: define {{.*@unOp}}
// LLVM: [[TMP0:%.*]] = getelementptr %struct.S, ptr [[LOAD0:%.*]], i32 0, i32 0
-// LLVM: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8
+// LLVM: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 4
// LLVM: [[TMP2:%.*]] = shl i64 [[TMP1]], 13
// LLVM: [[TMP3:%.*]] = ashr i64 [[TMP2]], 62
// LLVM: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
// LLVM: [[TMP5:%.*]] = add nsw i32 [[TMP4]], 1
// LLVM: [[TMP6:%.*]] = zext i32 [[TMP5]] to i64
-// LLVM: [[TMP7:%.*]] = load i64, ptr [[TMP0]], align 8
+// LLVM: [[TMP7:%.*]] = load i64, ptr [[TMP0]], align 4
// LLVM: [[TMP8:%.*]] = and i64 [[TMP6]], 3
// LLVM: [[TMP9:%.*]] = shl i64 [[TMP8]], 49
// LLVM: [[TMP10:%.*]] = and i64 [[TMP7]], -1688849860263937
// LLVM: [[TMP11:%.*]] = or i64 [[TMP10]], [[TMP9]]
-// LLVM: store i64 [[TMP11]], ptr [[TMP0]], align 8
+// LLVM: store i64 [[TMP11]], ptr [[TMP0]], align 4
// LLVM: [[TMP12:%.*]] = shl i64 [[TMP8]], 62
// LLVM: [[TMP13:%.*]] = ashr i64 [[TMP12]], 62
// LLVM: [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
@@ -315,3 +315,51 @@ void unOp(S* s) {
// OGCG: [[TMP12:%.*]] = shl i64 [[TMP8]], 62
// OGCG: [[TMP13:%.*]] = ashr i64 [[TMP12]], 62
// OGCG: [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
+
+void binOp(S* s) {
+ s->d |= 42;
+}
+
+// CIR: cir.func {{.*@binOp}}
+// CIR: [[TMP0:%.*]] = cir.const #cir.int<42> : !s32i
+// CIR: [[TMP1:%.*]] = cir.get_member {{.*}}[0] {name = "d"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
+// CIR: [[TMP2:%.*]] = cir.get_bitfield align(4) (#bfi_d, [[TMP1]] : !cir.ptr<!u64i>) -> !s32i
+// CIR: [[TMP3:%.*]] = cir.binop(or, [[TMP2]], [[TMP0]]) : !s32i
+// CIR: cir.set_bitfield align(4) (#bfi_d, [[TMP1]] : !cir.ptr<!u64i>, [[TMP3]] : !s32i)
+
+// LLVM: define {{.*@binOp}}
+// LLVM: [[TMP0:%.*]] = load ptr, ptr {{.*}}, align 8
+// LLVM: [[TMP1:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 0
+// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4
+// LLVM: [[TMP3:%.*]] = shl i64 [[TMP2]], 13
+// LLVM: [[TMP4:%.*]] = ashr i64 [[TMP3]], 62
+// LLVM: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
+// LLVM: [[TMP6:%.*]] = or i32 [[TMP5]], 42
+// LLVM: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
+// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP1]], align 4
+// LLVM: [[TMP9:%.*]] = and i64 [[TMP7]], 3
+// LLVM: [[TMP10:%.*]] = shl i64 [[TMP9]], 49
+// LLVM: [[TMP11:%.*]] = and i64 [[TMP8]], -1688849860263937
+// LLVM: [[TMP12:%.*]] = or i64 [[TMP11]], [[TMP10]]
+// LLVM: store i64 [[TMP12]], ptr [[TMP1]], align 4
+// LLVM: [[TMP13:%.*]] = shl i64 [[TMP9]], 62
+// LLVM: [[TMP14:%.*]] = ashr i64 [[TMP13]], 62
+// LLVM: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+
+// OGCG: define {{.*@binOp}}
+// OGCG: [[TMP0:%.*]] = load ptr, ptr %s.addr, align 8
+// OGCG: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 4
+// OGCG: [[TMP2:%.*]] = shl i64 [[TMP1]], 13
+// OGCG: [[TMP3:%.*]] = ashr i64 [[TMP2]], 62
+// OGCG: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+// OGCG: [[TMP5:%.*]] = or i32 [[TMP4]], 42
+// OGCG: [[TMP6:%.*]] = zext i32 [[TMP5]] to i64
+// OGCG: [[TMP7:%.*]] = load i64, ptr [[TMP0]], align 4
+// OGCG: [[TMP8:%.*]] = and i64 [[TMP6]], 3
+// OGCG: [[TMP9:%.*]] = shl i64 [[TMP8]], 49
+// OGCG: [[TMP10:%.*]] = and i64 [[TMP7]], -1688849860263937
+// OGCG: [[TMP11:%.*]] = or i64 [[TMP10]], [[TMP9]]
+// OGCG: store i64 [[TMP11]], ptr [[TMP0]], align 4
+// OGCG: [[TMP12:%.*]] = shl i64 [[TMP8]], 62
+// OGCG: [[TMP13:%.*]] = ashr i64 [[TMP12]], 62
+// OGCG: [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp
index 6715ebf..7650e0b 100644
--- a/clang/test/CIR/CodeGen/bitfields.cpp
+++ b/clang/test/CIR/CodeGen/bitfields.cpp
@@ -39,14 +39,14 @@ int load_field(S* s) {
// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["s", init]
// CIR: [[TMP1:%.*]] = cir.load{{.*}} [[TMP0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "c"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
-// CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_c, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
+// CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
// LLVM: define dso_local i32 @_Z10load_fieldP1S
// LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
// LLVM: [[TMP1:%.*]] = alloca i32, i64 1, align 4
// LLVM: [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
// LLVM: [[TMP3:%.*]] = getelementptr %struct.S, ptr [[TMP2]], i32 0, i32 0
-// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8
+// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 4
// LLVM: [[TMP5:%.*]] = shl i64 [[TMP4]], 15
// LLVM: [[TMP6:%.*]] = ashr i64 [[TMP5]], 47
// LLVM: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i32
@@ -67,15 +67,15 @@ void store_field() {
// CIR: [[TMP0:%.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>
// CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i
// CIR: [[TMP2:%.*]] = cir.get_member [[TMP0]][0] {name = "a"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
-// CIR: cir.set_bitfield(#bfi_a, [[TMP2]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i)
+// CIR: cir.set_bitfield align(4) (#bfi_a, [[TMP2]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i)
// LLVM: define dso_local void @_Z11store_fieldv
// LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1, align 4
// LLVM: [[TMP1:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 0
-// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8
+// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4
// LLVM: [[TMP3:%.*]] = and i64 [[TMP2]], -16
// LLVM: [[TMP4:%.*]] = or i64 [[TMP3]], 3
-// LLVM: store i64 [[TMP4]], ptr [[TMP1]], align 8
+// LLVM: store i64 [[TMP4]], ptr [[TMP1]], align 4
// OGCG: define dso_local void @_Z11store_fieldv()
// OGCG: [[TMP0:%.*]] = alloca %struct.S, align 4
@@ -93,25 +93,25 @@ void store_bitfield_to_bitfield(S* s) {
// CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i
// CIR: [[TMP2:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "b"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
-// CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) -> !s32i
+// CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) -> !s32i
// CIR: [[TMP5:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: [[TMP6:%.*]] = cir.get_member [[TMP5]][0] {name = "a"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
-// CIR: [[TMP7:%.*]] = cir.set_bitfield(#bfi_a, [[TMP6]] : !cir.ptr<!u64i>, [[TMP4]] : !s32i) -> !s32i
+// CIR: [[TMP7:%.*]] = cir.set_bitfield align(4) (#bfi_a, [[TMP6]] : !cir.ptr<!u64i>, [[TMP4]] : !s32i) -> !s32i
// LLVM: define dso_local void @_Z26store_bitfield_to_bitfieldP1S
// LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
// LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8
// LLVM: [[TMP2:%.*]] = getelementptr %struct.S, ptr [[TMP1]], i32 0, i32 0
-// LLVM: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8
+// LLVM: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 4
// LLVM: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483633
// LLVM: [[TMP5:%.*]] = or i64 [[TMP4]], 48
-// LLVM: store i64 [[TMP5]], ptr [[TMP2]], align 8
+// LLVM: store i64 [[TMP5]], ptr [[TMP2]], align 4
// LLVM: [[TMP6:%.*]] = load ptr, ptr [[TMP0]], align 8
// LLVM: [[TMP7:%.*]] = getelementptr %struct.S, ptr [[TMP6]], i32 0, i32 0
-// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP7]], align 8
+// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP7]], align 4
// LLVM: [[TMP9:%.*]] = and i64 [[TMP8]], -16
// LLVM: [[TMP10:%.*]] = or i64 [[TMP9]], 3
-// LLVM: store i64 [[TMP10]], ptr [[TMP7]], align 8
+// LLVM: store i64 [[TMP10]], ptr [[TMP7]], align 4
// OGCG: define dso_local void @_Z26store_bitfield_to_bitfieldP1S
// OGCG: [[TMP0:%.*]] = alloca ptr, align 8
diff --git a/clang/test/CIR/CodeGen/bitfields_be.c b/clang/test/CIR/CodeGen/bitfields_be.c
index 6133927..77741ba 100644
--- a/clang/test/CIR/CodeGen/bitfields_be.c
+++ b/clang/test/CIR/CodeGen/bitfields_be.c
@@ -25,7 +25,7 @@ int init(S* s) {
//CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["s", init] {alignment = 8 : i64}
//CIR: [[TMP1:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
//CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "c"} : !cir.ptr<!rec_S> -> !cir.ptr<!u32i>
-//CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_c, [[TMP2]] : !cir.ptr<!u32i>) -> !s32i
+//CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP2]] : !cir.ptr<!u32i>) -> !s32i
//LLVM: define dso_local i32 @init(ptr %0) {
//LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
@@ -57,7 +57,7 @@ void load(S* s) {
// CIR: %[[MIN1:.*]] = cir.unary(minus, %[[CONST1]]) nsw : !s32i, !s32i
// CIR: %[[VAL0:.*]] = cir.load align(8) %[[PTR0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: %[[GET0:.*]] = cir.get_member %[[VAL0]][0] {name = "a"} : !cir.ptr<!rec_S> -> !cir.ptr<!u32i>
-// CIR: %[[SET0:.*]] = cir.set_bitfield(#bfi_a, %[[GET0]] : !cir.ptr<!u32i>, %[[MIN1]] : !s32i) -> !s32i
+// CIR: %[[SET0:.*]] = cir.set_bitfield align(4) (#bfi_a, %[[GET0]] : !cir.ptr<!u32i>, %[[MIN1]] : !s32i) -> !s32i
// LLVM: define dso_local void @load
// LLVM: %[[PTR0:.*]] = load ptr
@@ -65,50 +65,50 @@ void load(S* s) {
// LLVM: %[[VAL0:.*]] = load i32, ptr %[[GET0]], align 4
// LLVM: %[[AND0:.*]] = and i32 %[[VAL0]], 268435455
// LLVM: %[[OR0:.*]] = or i32 %[[AND0]], -1073741824
-// LLVM: store i32 %[[OR0]], ptr %[[GET0]]
+// LLVM: store i32 %[[OR0]], ptr %[[GET0]], align 4
// OGCG: define dso_local void @load
// OGCG: %[[PTR0:.*]] = load ptr
-// OGCG: %[[VAL0:.*]] = load i32, ptr %[[PTR0]]
+// OGCG: %[[VAL0:.*]] = load i32, ptr %[[PTR0]], align 4
// OGCG: %[[AND0:.*]] = and i32 %[[VAL0]], 268435455
// OGCG: %[[OR0:.*]] = or i32 %[[AND0]], -1073741824
-// OGCG: store i32 %[[OR0]], ptr %[[PTR0]]
+// OGCG: store i32 %[[OR0]], ptr %[[PTR0]], align 4
// field 'b'
// CIR: %[[CONST2:.*]] = cir.const #cir.int<42> : !s32i
// CIR: %[[VAL1:.*]] = cir.load align(8) %[[PTR0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: %[[GET1:.*]] = cir.get_member %[[VAL1]][0] {name = "b"} : !cir.ptr<!rec_S> -> !cir.ptr<!u32i>
-// CIR: %[[SET1:.*]] = cir.set_bitfield(#bfi_b, %[[GET1]] : !cir.ptr<!u32i>, %[[CONST2]] : !s32i) -> !s32i
+// CIR: %[[SET1:.*]] = cir.set_bitfield align(4) (#bfi_b, %[[GET1]] : !cir.ptr<!u32i>, %[[CONST2]] : !s32i) -> !s32i
// LLVM: %[[PTR1:.*]] = load ptr
// LLVM: %[[GET1:.*]] = getelementptr %struct.S, ptr %[[PTR1]], i32 0, i32 0
// LLVM: %[[VAL1:.*]] = load i32, ptr %[[GET1]], align 4
// LLVM: %[[AND1:.*]] = and i32 %[[VAL1]], -268304385
// LLVM: %[[OR1:.*]] = or i32 %[[AND1]], 5505024
-// LLVM: store i32 %[[OR1]], ptr %[[GET1]]
+// LLVM: store i32 %[[OR1]], ptr %[[GET1]], align 4
// OGCG: %[[PTR1:.*]] = load ptr
-// OGCG: %[[VAL1:.*]] = load i32, ptr %[[PTR1]]
+// OGCG: %[[VAL1:.*]] = load i32, ptr %[[PTR1]], align 4
// OGCG: %[[AND1:.*]] = and i32 %[[VAL1]], -268304385
// OGCG: %[[OR1:.*]] = or i32 %[[AND1]], 5505024
-// OGCG: store i32 %[[OR1]], ptr %[[PTR1]]
+// OGCG: store i32 %[[OR1]], ptr %[[PTR1]], align 4
// field 'c'
// CIR: %[[CONST3:.*]] = cir.const #cir.int<12345> : !s32i
// CIR: %[[MIN2:.*]] = cir.unary(minus, %[[CONST3]]) nsw : !s32i, !s32i
// CIR: %[[VAL2:.*]] = cir.load align(8) %[[PTR0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: %[[GET2:.*]] = cir.get_member %[[VAL2]][0] {name = "c"} : !cir.ptr<!rec_S> -> !cir.ptr<!u32i>
-// CIR: %[[SET2:.*]] = cir.set_bitfield(#bfi_c, %[[GET2]] : !cir.ptr<!u32i>, %[[MIN2]] : !s32i) -> !s32i
+// CIR: %[[SET2:.*]] = cir.set_bitfield align(4) (#bfi_c, %[[GET2]] : !cir.ptr<!u32i>, %[[MIN2]] : !s32i) -> !s32i
// LLVM: %[[PTR2:.*]] = load ptr
// LLVM: %[[GET2:.*]] = getelementptr %struct.S, ptr %[[PTR2]], i32 0, i32 0
// LLVM: %[[VAL2:.*]] = load i32, ptr %[[GET2]], align 4
// LLVM: %[[AND2:.*]] = and i32 %[[VAL2]], -131072
// LLVM: %[[OR2:.*]] = or i32 %[[AND2]], 118727
-// LLVM: store i32 %[[OR2]], ptr %[[GET2]]
+// LLVM: store i32 %[[OR2]], ptr %[[GET2]], align 4
// OGCG: %[[PTR2:.*]] = load ptr
-// OGCG: %[[VAL2:.*]] = load i32, ptr %[[PTR2]]
+// OGCG: %[[VAL2:.*]] = load i32, ptr %[[PTR2]], align 4
// OGCG: %[[AND2:.*]] = and i32 %[[VAL2]], -131072
// OGCG: %[[OR2:.*]] = or i32 %[[AND2]], 118727
-// OGCG: store i32 %[[OR2]], ptr %[[PTR2]]
+// OGCG: store i32 %[[OR2]], ptr %[[PTR2]], align 4
diff --git a/clang/test/CIR/CodeGen/builtin_call.cpp b/clang/test/CIR/CodeGen/builtin_call.cpp
index ad0e478..d9a7068 100644
--- a/clang/test/CIR/CodeGen/builtin_call.cpp
+++ b/clang/test/CIR/CodeGen/builtin_call.cpp
@@ -111,6 +111,22 @@ void assume(bool arg) {
// OGCG: call void @llvm.assume(i1 %{{.+}})
// OGCG: }
+void assume_separate_storage(void *p1, void *p2) {
+ __builtin_assume_separate_storage(p1, p2);
+}
+
+// CIR: cir.func{{.*}} @_Z23assume_separate_storagePvS_
+// CIR: cir.assume_separate_storage %{{.+}}, %{{.+}} : !cir.ptr<!void>
+// CIR: }
+
+// LLVM: define {{.*}}void @_Z23assume_separate_storagePvS_
+// LLVM: call void @llvm.assume(i1 true) [ "separate_storage"(ptr %{{.+}}, ptr %{{.+}}) ]
+// LLVM: }
+
+// OGCG: define {{.*}}void @_Z23assume_separate_storagePvS_
+// OGCG: call void @llvm.assume(i1 true) [ "separate_storage"(ptr %{{.+}}, ptr %{{.+}}) ]
+// OGCG: }
+
void expect(int x, int y) {
__builtin_expect(x, y);
}
diff --git a/clang/test/CIR/CodeGen/complex-cast.cpp b/clang/test/CIR/CodeGen/complex-cast.cpp
new file mode 100644
index 0000000..94f47e4
--- /dev/null
+++ b/clang/test/CIR/CodeGen/complex-cast.cpp
@@ -0,0 +1,328 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CIR-BEFORE %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-after=cir-lowering-prepare -o %t.cir %s 2>&1 | FileCheck --check-prefixes=CIR-AFTER %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+double _Complex cd;
+float _Complex cf;
+int _Complex ci;
+short _Complex cs;
+double sd;
+int si;
+bool b;
+
+void scalar_to_complex() {
+ cd = sd;
+ ci = si;
+ cd = si;
+ ci = sd;
+}
+
+// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %{{.*}} : !cir.double), !cir.complex<!cir.double>
+
+// CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.double>, !cir.double
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double
+// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !cir.double -> !cir.complex<!cir.double>
+
+// LLVM: %[[REAL:.*]] = load double, ptr {{.*}}, align 8
+// LLVM-NEXT: %[[TMP:.*]] = insertvalue { double, double } undef, double %[[REAL]], 0
+// LLVM-NEXT: %[[COMPLEX:.*]] = insertvalue { double, double } %[[TMP]], double 0.000000e+00, 1
+// LLVM-NEXT: store { double, double } %[[COMPLEX]], ptr {{.*}}, align 8
+
+// OGCG: %[[REAL:.*]] = load double, ptr {{.*}}, align 8
+// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
+// OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr @cd, i32 0, i32 1), align 8
+
+// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %{{.*}} : !s32i), !cir.complex<!s32i>
+
+// CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!s32i>, !s32i
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i
+// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !s32i -> !cir.complex<!s32i>
+
+// LLVM: %[[REAL:.*]] = load i32, ptr {{.*}}, align 4
+// LLVM-NEXT: %[[TMP:.*]] = insertvalue { i32, i32 } undef, i32 %[[REAL]], 0
+// LLVM-NEXT: %[[COMPLEX:.*]] = insertvalue { i32, i32 } %[[TMP]], i32 0, 1
+// LLVM-NEXT: store { i32, i32 } %[[COMPLEX]], ptr {{.*}}, align 4
+
+// OGCG: %[[REAL:.*]] = load i32, ptr {{.*}}, align 4
+// OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4
+// OGCG: store i32 0, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci, i32 0, i32 1), align 4
+
+// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast(int_to_float, %{{.*}} : !s32i), !cir.double
+// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %[[INT_TO_FP]] : !cir.double), !cir.complex<!cir.double>
+
+// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!s32i>, !s32i
+// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(int_to_float, %[[TMP]] : !s32i), !cir.double
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double
+// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !cir.double -> !cir.complex<!cir.double>
+
+// LLVM: %[[TMP:.*]] = load i32, ptr {{.*}}, align 4
+// LLVM-NEXT: %[[REAL:.*]] = sitofp i32 %[[TMP]] to double
+// LLVM-NEXT: %[[TMP_2:.*]] = insertvalue { double, double } undef, double %[[REAL]], 0
+// LLVM-NEXT: %[[COMPLEX:.*]] = insertvalue { double, double } %[[TMP_2]], double 0.000000e+00, 1
+// LLVM-NEXT: store { double, double } %[[COMPLEX]], ptr {{.*}}, align 8
+
+// OGCG: %[[TMP:.*]] = load i32, ptr {{.*}}, align 4
+// OGCG: %[[REAL:.*]] = sitofp i32 %[[TMP]] to double
+// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
+// OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8
+
+// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast(float_to_int, %{{.*}} : !cir.double), !s32i
+// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %[[FP_TO_INT]] : !s32i), !cir.complex<!s32i>
+
+// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.double>, !cir.double
+// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.double), !s32i
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i
+// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !s32i -> !cir.complex<!s32i>
+
+// LLVM: %[[TMP:.*]] = load double, ptr {{.*}}, align 8
+// LLVM-NEXT: %[[REAL:.*]] = fptosi double %[[TMP]] to i32
+// LLVM-NEXT: %[[TMP_2:.*]] = insertvalue { i32, i32 } undef, i32 %[[REAL]], 0
+// LLVM-NEXT: %[[COMPLEX:.*]] = insertvalue { i32, i32 } %[[TMP_2]], i32 0, 1
+// LLVM-NEXT: store { i32, i32 } %[[COMPLEX]], ptr {{.*}}, align 4
+
+// OGCG: %[[TMP:.*]] = load double, ptr {{.*}}, align 8
+// OGCG: %[[REAL:.*]] = fptosi double %[[TMP]] to i32
+// OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4
+// OGCG: store i32 0, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr {{.*}}, i32 0, i32 1), align 4
+
+void scalar_to_complex_explicit() {
+ cd = (double _Complex)sd;
+ ci = (int _Complex)si;
+ cd = (double _Complex)si;
+ ci = (int _Complex)sd;
+}
+
+// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %{{.*}} : !cir.double), !cir.complex<!cir.double>
+
+// CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.double>, !cir.double
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double
+// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !cir.double -> !cir.complex<!cir.double>
+
+// LLVM: %[[REAL:.*]] = load double, ptr {{.*}}, align 8
+// LLVM-NEXT: %[[TMP:.*]] = insertvalue { double, double } undef, double %[[REAL]], 0
+// LLVM-NEXT: %[[COMPLEX:.*]] = insertvalue { double, double } %[[TMP]], double 0.000000e+00, 1
+// LLVM-NEXT: store { double, double } %[[COMPLEX]], ptr {{.*}}, align 8
+
+// OGCG: %[[REAL:.*]] = load double, ptr {{.*}}, align 8
+// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
+// OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr @cd, i32 0, i32 1), align 8
+
+// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %{{.*}} : !s32i), !cir.complex<!s32i>
+
+// CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!s32i>, !s32i
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i
+// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !s32i -> !cir.complex<!s32i>
+
+// LLVM: %[[REAL:.*]] = load i32, ptr {{.*}}, align 4
+// LLVM-NEXT: %[[TMP:.*]] = insertvalue { i32, i32 } undef, i32 %[[REAL]], 0
+// LLVM-NEXT: %[[COMPLEX:.*]] = insertvalue { i32, i32 } %[[TMP]], i32 0, 1
+// LLVM-NEXT: store { i32, i32 } %[[COMPLEX]], ptr {{.*}}, align 4
+
+// OGCG: %[[REAL:.*]] = load i32, ptr {{.*}}, align 4
+// OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4
+// OGCG: store i32 0, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci, i32 0, i32 1), align 4
+
+// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast(int_to_float, %{{.*}} : !s32i), !cir.double
+// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %[[INT_TO_FP]] : !cir.double), !cir.complex<!cir.double>
+
+// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!s32i>, !s32i
+// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(int_to_float, %[[TMP]] : !s32i), !cir.double
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double
+// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !cir.double -> !cir.complex<!cir.double>
+
+// LLVM: %[[TMP:.*]] = load i32, ptr {{.*}}, align 4
+// LLVM-NEXT: %[[REAL:.*]] = sitofp i32 %[[TMP]] to double
+// LLVM-NEXT: %[[TMP_2:.*]] = insertvalue { double, double } undef, double %[[REAL]], 0
+// LLVM-NEXT: %[[COMPLEX:.*]] = insertvalue { double, double } %[[TMP_2]], double 0.000000e+00, 1
+// LLVM-NEXT: store { double, double } %[[COMPLEX]], ptr {{.*}}, align 8
+
+// OGCG: %[[TMP:.*]] = load i32, ptr {{.*}}, align 4
+// OGCG: %[[REAL:.*]] = sitofp i32 %[[TMP]] to double
+// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
+// OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8
+
+// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast(float_to_int, %{{.*}} : !cir.double), !s32i
+// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %[[FP_TO_INT]] : !s32i), !cir.complex<!s32i>
+
+// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.double>, !cir.double
+// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.double), !s32i
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i
+// CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !s32i -> !cir.complex<!s32i>
+
+// LLVM: %[[TMP:.*]] = load double, ptr {{.*}}, align 8
+// LLVM-NEXT: %[[REAL:.*]] = fptosi double %[[TMP]] to i32
+// LLVM-NEXT: %[[TMP_2:.*]] = insertvalue { i32, i32 } undef, i32 %[[REAL]], 0
+// LLVM-NEXT: %[[COMPLEX:.*]] = insertvalue { i32, i32 } %[[TMP_2]], i32 0, 1
+// LLVM-NEXT: store { i32, i32 } %[[COMPLEX]], ptr {{.*}}, align 4
+
+// OGCG: %[[TMP:.*]] = load double, ptr {{.*}}, align 8
+// OGCG: %[[REAL:.*]] = fptosi double %[[TMP]] to i32
+// OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4
+// OGCG: store i32 0, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr {{.*}}, i32 0, i32 1), align 4
+
+void complex_to_scalar() {
+ sd = (double)cd;
+ si = (int)ci;
+ sd = (double)ci;
+ si = (int)cd;
+}
+
+// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast(float_complex_to_real, %{{.*}} : !cir.complex<!cir.double>), !cir.double
+
+// CIR-AFTER: %{{.*}} = cir.complex.real %{{.*}} : !cir.complex<!cir.double> -> !cir.double
+
+// LLVM: %[[REAL:.*]] = extractvalue { double, double } %{{.*}}, 0
+// LLVM: store double %[[REAL]], ptr {{.*}}, align 8
+
+// OGCG: %[[REAL:.*]] = load double, ptr {{.*}}, align 8
+// OGCG: store double %[[REAL]], ptr {{.*}}, align 8
+
+// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast(int_complex_to_real, %{{.*}} : !cir.complex<!s32i>), !s32i
+
+// CIR-AFTER: %{{.*}} = cir.complex.real %{{.*}} : !cir.complex<!s32i> -> !s32i
+
+// LLVM: %[[REAL:.*]] = extractvalue { i32, i32 } %{{.*}}, 0
+// LLVM: store i32 %[[REAL]], ptr {{.*}}, align 4
+
+// OGCG: %[[REAL:.*]] = load i32, ptr {{.*}}, align 4
+// OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4
+
+// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast(int_complex_to_real, %{{.*}} : !cir.complex<!s32i>), !s32i
+// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast(int_to_float, %[[INT_COMPLEX_TO_REAL]] : !s32i), !cir.double
+
+// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!s32i> -> !s32i
+// CIR-AFTER-NEXT: %{{.*}} = cir.cast(int_to_float, %[[REAL]] : !s32i), !cir.double
+
+// LLVM: %[[REAL:.*]] = extractvalue { i32, i32 } %{{.+}}, 0
+// LLVM-NEXT: %[[REAL_TO_DOUBLE:.*]] = sitofp i32 %[[REAL]] to double
+// LLVM-NEXT: store double %[[REAL_TO_DOUBLE]], ptr {{.*}}, align 8
+
+// OGCG: %[[REAL:.*]] = load i32, ptr {{.*}}, align 4
+// OGCG: %[[INT_TO_FP:.*]] = sitofp i32 %[[REAL]] to double
+// OGCG: store double %[[INT_TO_FP]], ptr {{.*}}, align 8
+
+// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast(float_complex_to_real, %{{.*}} : !cir.complex<!cir.double>), !cir.double
+// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast(float_to_int, %[[FP_TO_COMPLEX_REAL]] : !cir.double), !s32i
+
+// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!cir.double> -> !cir.double
+// CIR-AFTER-NEXT: %{{.*}} = cir.cast(float_to_int, %[[REAL]] : !cir.double), !s32i
+
+// LLVM: %[[REAL:.*]] = extractvalue { double, double } %{{.+}}, 0
+// LLVM-NEXT: %[[REAL_TO_INT:.*]] = fptosi double %[[REAL]] to i32
+// LLVM-NEXT: store i32 %[[REAL_TO_INT]], ptr {{.*}}, align 4
+
+// OGCG: %[[REAL:.*]] = load double, ptr {{.*}}, align 8
+// OGCG: %[[FP_TO_INT:.*]] = fptosi double %[[REAL]] to i32
+// OGCG: store i32 %[[FP_TO_INT]], ptr {{.*}}, align 4
+
+void complex_to_bool() {
+ b = (bool)cd;
+ b = (bool)ci;
+}
+
+// CIR-BEFORE: %[[FP_COMPLEX_TO_BOOL:.*]] = cir.cast(float_complex_to_bool, %{{.*}} : !cir.complex<!cir.double>), !cir.bool
+
+// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!cir.double> -> !cir.double
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex<!cir.double> -> !cir.double
+// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast(float_to_bool, %[[REAL]] : !cir.double), !cir.bool
+// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast(float_to_bool, %[[IMAG]] : !cir.double), !cir.bool
+// CIR-AFTER-NEXT: %[[CONST_TRUE:.*]] = cir.const #true
+// CIR-AFTER-NEXT: %{{.*}} = cir.select if %[[REAL_TO_BOOL]] then %[[CONST_TRUE]] else %[[IMAG_TO_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
+
+// LLVM: %[[REAL:.*]] = extractvalue { double, double } %{{.*}}, 0
+// LLVM-NEXT: %[[IMAG:.*]] = extractvalue { double, double } %{{.*}}, 1
+// LLVM-NEXT: %[[REAL_TO_BOOL:.*]] = fcmp une double %[[REAL]], 0.000000e+00
+// LLVM-NEXT: %[[IMAG_TO_BOOL:.*]] = fcmp une double %[[IMAG]], 0.000000e+00
+// LLVM-NEXT: %[[OR:.*]] = or i1 %[[REAL_TO_BOOL]], %[[IMAG_TO_BOOL]]
+// LLVM-NEXT: %[[RESULT:.*]] = zext i1 %[[OR]] to i8
+// LLVM-NEXT: store i8 %[[RESULT]], ptr {{.*}}, align 1
+
+// OGCG: %[[REAL:.*]] = load double, ptr {{.*}}, align 8
+// OGCG: %[[IMAG:.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8
+// OGCG: %[[REAL_TO_BOOL:.*]] = fcmp une double %[[REAL]], 0.000000e+00
+// OGCG: %[[IMAG_TO_BOOL:.*]] = fcmp une double %[[IMAG]], 0.000000e+00
+// OGCG: %[[COMPLEX_TO_BOOL:.*]] = or i1 %[[REAL_TO_BOOL]], %[[IMAG_TO_BOOL]]
+// OGCG: %[[BOOL_TO_INT:.*]] = zext i1 %[[COMPLEX_TO_BOOL]] to i8
+// OGCG: store i8 %[[BOOL_TO_INT]], ptr {{.*}}, align 1
+
+// CIR-BEFORE: %[[INT_COMPLEX_TO_BOOL:.*]] = cir.cast(int_complex_to_bool, %{{.*}} : !cir.complex<!s32i>), !cir.bool
+
+// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!s32i> -> !s32i
+// CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex<!s32i> -> !s32i
+// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast(int_to_bool, %[[REAL]] : !s32i), !cir.bool
+// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast(int_to_bool, %[[IMAG]] : !s32i), !cir.bool
+// CIR-AFTER-NEXT: %[[CONST_TRUE:.*]] = cir.const #true
+// CIR-AFTER-NEXT: %{{.+}} = cir.select if %[[REAL_TO_BOOL]] then %[[CONST_TRUE]] else %[[IMAG_TO_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
+
+// LLVM: %[[REAL:.*]] = extractvalue { i32, i32 } %{{.*}}, 0
+// LLVM-NEXT: %[[IMAG:.*]] = extractvalue { i32, i32 } %{{.*}}, 1
+// LLVM-NEXT: %[[REAL_TO_BOOL:.*]] = icmp ne i32 %[[REAL]], 0
+// LLVM-NEXT: %[[IMAG_TO_BOOL:.*]] = icmp ne i32 %[[IMAG]], 0
+// LLVM-NEXT: %[[OR:.*]] = or i1 %[[REAL_TO_BOOL]], %[[IMAG_TO_BOOL]]
+// LLVM-NEXT: %[[RESULT:.*]] = zext i1 %[[OR]] to i8
+// LLVM-NEXT: store i8 %[[RESULT]], ptr {{.*}}, align 1
+
+// OGCG: %[[REAL:.*]] = load i32, ptr {{.*}}, align 4
+// OGCG: %[[IMAG:.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr {{.*}}, i32 0, i32 1), align 4
+// OGCG: %[[REAL_TO_BOOL:.*]] = icmp ne i32 %[[REAL]], 0
+// OGCG: %[[IMAG_TO_BOOL:.*]] = icmp ne i32 %[[IMAG]], 0
+// OGCG: %[[COMPLEX_TO_BOOL:.*]] = or i1 %[[REAL_TO_BOOL]], %[[IMAG_TO_BOOL]]
+// OGCG: %[[BOOL_TO_INT:.*]] = zext i1 %[[COMPLEX_TO_BOOL]] to i8
+// OGCG: store i8 %[[BOOL_TO_INT]], ptr {{.*}}, align 1
+
+void complex_to_complex_cast() {
+ cd = cf;
+ ci = cs;
+}
+
+// CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[FP_COMPLEX:.*]] = cir.cast(float_complex, %[[TMP]] : !cir.complex<!cir.float>), !cir.complex<!cir.double>
+
+// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[REAL_FP_CAST:.*]] = cir.cast(floating, %[[REAL]] : !cir.float), !cir.double
+// CIR-AFTER: %[[IMAG_FP_CAST:.*]] = cir.cast(floating, %[[IMAG]] : !cir.float), !cir.double
+// CIR-AFTER: %{{.*}} = cir.complex.create %[[REAL_FP_CAST]], %[[IMAG_FP_CAST]] : !cir.double -> !cir.complex<!cir.double>
+
+// LLVM: %[[REAL:.*]] = extractvalue { float, float } %{{.*}}, 0
+// LLVM: %[[IMAG:.*]] = extractvalue { float, float } %{{.*}}, 1
+// LLVM: %[[REAL_FP_CAST:.*]] = fpext float %[[REAL]] to double
+// LLVM: %[[IMAG_FP_CAST:.*]] = fpext float %[[IMAG]] to double
+// LLVM: %[[TMP:.*]] = insertvalue { double, double } undef, double %[[REAL_FP_CAST]], 0
+// LLVM: %[[COMPLEX:.*]] = insertvalue { double, double } %[[TMP]], double %[[IMAG_FP_CAST]], 1
+// LLVM: store { double, double } %[[COMPLEX]], ptr {{.*}}, align 8
+
+// OGCG: %[[REAL:.*]] = load float, ptr {{.*}}, align 4
+// OGCG: %[[IMAG:.*]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr {{.*}}, i32 0, i32 1), align 4
+// OGCG: %[[REAL_FP_CAST:.*]] = fpext float %[[REAL]] to double
+// OGCG: %[[IMAG_FP_CAST:.*]] = fpext float %[[IMAG]] to double
+// OGCG: store double %[[REAL_FP_CAST]], ptr {{.*}}, align 8
+// OGCG: store double %[[IMAG_FP_CAST]], ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8
+
+// CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr<!cir.complex<!s16i>>, !cir.complex<!s16i>
+// CIR-BEFORE: %[[INT_COMPLEX:.*]] = cir.cast(int_complex, %[[TMP]] : !cir.complex<!s16i>), !cir.complex<!s32i>
+
+// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex<!s16i> -> !s16i
+// CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex<!s16i> -> !s16i
+// CIR-AFTER: %[[REAL_INT_CAST:.*]] = cir.cast(integral, %[[REAL]] : !s16i), !s32i
+// CIR-AFTER: %[[IMAG_INT_CAST:.*]] = cir.cast(integral, %[[IMAG]] : !s16i), !s32i
+// CIR-AFTER: %{{.*}} = cir.complex.create %[[REAL_INT_CAST]], %[[IMAG_INT_CAST]] : !s32i -> !cir.complex<!s32i>
+
+// LLVM: %[[REAL:.*]] = extractvalue { i16, i16 } %{{.*}}, 0
+// LLVM: %[[IMAG:.*]] = extractvalue { i16, i16 } %{{.*}}, 1
+// LLVM: %[[REAL_INT_CAST:.*]] = sext i16 %[[REAL]] to i32
+// LLVM: %[[IMAG_INT_CAST:.*]] = sext i16 %[[IMAG]] to i32
+// LLVM: %[[TMP:.*]] = insertvalue { i32, i32 } undef, i32 %[[REAL_INT_CAST]], 0
+// LLVM: %[[COMPLEX:.*]] = insertvalue { i32, i32 } %[[TMP]], i32 %[[IMAG_INT_CAST]], 1
+// LLVM: store { i32, i32 } %[[COMPLEX]], ptr {{.*}}, align 4
+
+// OGCG: %[[REAL:.*]] = load i16, ptr {{.*}}, align 2
+// OGCG: %[[IMAG:.*]] = load i16, ptr getelementptr inbounds nuw ({ i16, i16 }, ptr {{.*}}, i32 0, i32 1), align 2
+// OGCG: %[[REAL_INT_CAST:.*]] = sext i16 %[[REAL]] to i32
+// OGCG: %[[IMAG_INT_CAST:.*]] = sext i16 %[[IMAG]] to i32
+// OGCG: store i32 %[[REAL_INT_CAST]], ptr {{.*}}, align 4
+// OGCG: store i32 %[[IMAG_INT_CAST]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr {{.*}}, i32 0, i32 1), align 4
+
diff --git a/clang/test/CIR/CodeGen/compound_literal.cpp b/clang/test/CIR/CodeGen/compound_literal.cpp
new file mode 100644
index 0000000..a92af95
--- /dev/null
+++ b/clang/test/CIR/CodeGen/compound_literal.cpp
@@ -0,0 +1,99 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+int foo() {
+ int e = (int){1};
+ return e;
+}
+
+// CIR: %[[RET:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
+// CIR: %[[COMPOUND:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, [".compoundliteral", init]
+// CIR: %[[VALUE:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store{{.*}} %[[VALUE]], %[[COMPOUND]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[COMPOUND]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store{{.*}} %[[TMP]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP_2:.*]] = cir.load{{.*}} %[[INIT]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store %[[TMP_2]], %[[RET]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP_3:.*]] = cir.load %[[RET]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP_3]] : !s32i
+
+// LLVM: %[[RET:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[INIT:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[COMPOUND:.*]] = alloca i32, i64 1, align 4
+// LLVM: store i32 1, ptr %[[COMPOUND]], align 4
+// LLVM: %[[TMP:.*]] = load i32, ptr %[[COMPOUND]], align 4
+// LLVM: store i32 %[[TMP]], ptr %[[INIT]], align 4
+// LLVM: %[[TMP_2:.*]] = load i32, ptr %[[INIT]], align 4
+// LLVM: store i32 %[[TMP_2]], ptr %[[RET]], align 4
+// LLVM: %[[TMP_3:.*]] = load i32, ptr %[[RET]], align 4
+// LLVM: ret i32 %[[TMP_3]]
+
+// OGCG: %[[INIT:.*]] = alloca i32, align 4
+// OGCG: %[[COMPOUND:.*]] = alloca i32, align 4
+// OGCG: store i32 1, ptr %[[COMPOUND]], align 4
+// OGCG: %[[TMP:.*]] = load i32, ptr %[[COMPOUND]], align 4
+// OGCG: store i32 %[[TMP]], ptr %[[INIT]], align 4
+// OGCG: %[[TMP_2:.*]] = load i32, ptr %[[INIT]], align 4
+// OGCG: ret i32 %[[TMP_2]]
+
+void foo2() {
+ int _Complex a = (int _Complex) { 1, 2};
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a", init]
+// CIR: %[[CL_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, [".compoundliteral"]
+// CIR: %[[COMPLEX:.*]] = cir.const #cir.const_complex<#cir.int<1> : !s32i, #cir.int<2> : !s32i> : !cir.complex<!s32i>
+// CIR: cir.store{{.*}} %[[COMPLEX]], %[[CL_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[CL_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: cir.store{{.*}} %[[TMP]], %[[A_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[CL_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: store { i32, i32 } { i32 1, i32 2 }, ptr %[[CL_ADDR]], align 4
+// LLVM: %[[TMP:.*]] = load { i32, i32 }, ptr %[[CL_ADDR]], align 4
+// LLVM: store { i32, i32 } %[[TMP]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[CL_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[CL_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[CL_ADDR]], i32 0, i32 0
+// OGCG: %[[CL_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[CL_ADDR]], i32 0, i32 1
+// OGCG: store i32 1, ptr %[[CL_REAL_PTR]], align 4
+// OGCG: store i32 2, ptr %[[CL_IMAG_PTR]], align 4
+// OGCG: %[[CL_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[CL_ADDR]], i32 0, i32 0
+// OGCG: %[[CL_REAL:.*]] = load i32, ptr %[[CL_REAL_PTR]], align 4
+// OGCG: %[[CL_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[CL_ADDR]], i32 0, i32 1
+// OGCG: %[[CL_IMAG:.*]] = load i32, ptr %[[CL_IMAG_PTR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: store i32 %[[CL_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store i32 %[[CL_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
+void foo3() {
+ typedef int vi4 __attribute__((vector_size(16)));
+ auto a = (vi4){10, 20, 30, 40};
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, ["a", init]
+// CIR: %[[CL_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>, [".compoundliteral", init]
+// CIR: %[[VEC:.*]] = cir.const #cir.const_vector<[#cir.int<10> : !s32i, #cir.int<20> : !s32i, #cir.int<30> : !s32i, #cir.int<40> : !s32i]> : !cir.vector<4 x !s32i>
+// CIR: cir.store{{.*}} %[[VEC]], %[[CL_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CIR: %[[TMP:.*]] = cir.load{{.*}} %[[CL_ADDR]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
+// CIR: cir.store{{.*}} %[[TMP]], %[[A_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16
+// LLVM: %[[CL_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16
+// LLVM: store <4 x i32> <i32 10, i32 20, i32 30, i32 40>, ptr %[[CL_ADDR]], align 16
+// LLVM: %[[TMP:.*]] = load <4 x i32>, ptr %[[CL_ADDR]], align 16
+// LLVM: store <4 x i32> %[[TMP]], ptr %[[A_ADDR]], align 16
+
+// OGCG: %[[A_ADDR:.*]] = alloca <4 x i32>, align 16
+// OGCG: %[[CL_ADDR:.*]] = alloca <4 x i32>, align 16
+// OGCG: store <4 x i32> <i32 10, i32 20, i32 30, i32 40>, ptr %[[CL_ADDR]], align 16
+// OGCG: %[[TMP:.*]] = load <4 x i32>, ptr %[[CL_ADDR]], align 16
+// OGCG: store <4 x i32> %[[TMP]], ptr %[[A_ADDR]], align 16
+
diff --git a/clang/test/CIR/CodeGen/destructors.cpp b/clang/test/CIR/CodeGen/destructors.cpp
index d8f9f23..de7718f 100644
--- a/clang/test/CIR/CodeGen/destructors.cpp
+++ b/clang/test/CIR/CodeGen/destructors.cpp
@@ -31,11 +31,11 @@ out_of_line_destructor::~out_of_line_destructor() {
// OGCG: ret void
// CIR: cir.func dso_local @_ZN22out_of_line_destructorD1Ev(%{{.+}}: !cir.ptr<!rec_out_of_line_destructor>
-// CIR: cir.call @_Z13some_functionv() nothrow : () -> ()
+// CIR: cir.call @_ZN22out_of_line_destructorD2Ev(%{{.*}}) nothrow : (!cir.ptr<!rec_out_of_line_destructor>)
// CIR: cir.return
// LLVM: define dso_local void @_ZN22out_of_line_destructorD1Ev(ptr %{{.+}})
-// LLVM: call void @_Z13some_functionv()
+// LLVM: call void @_ZN22out_of_line_destructorD2Ev
// LLVM: ret void
// OGCG: define dso_local void @_ZN22out_of_line_destructorD1Ev(ptr {{.*}}%{{.+}})