aboutsummaryrefslogtreecommitdiff
path: root/clang/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/test/CodeGen')
-rw-r--r--clang/test/CodeGen/AArch64/atomic-ops-float-check-minmax.c117
-rw-r--r--clang/test/CodeGen/RISCV/bitint.c342
-rw-r--r--clang/test/CodeGen/X86/avx-builtins.c57
-rw-r--r--clang/test/CodeGen/X86/avx2-builtins.c26
-rw-r--r--clang/test/CodeGen/X86/avx512bw-builtins.c9
-rw-r--r--clang/test/CodeGen/X86/avx512cd-builtins.c32
-rw-r--r--clang/test/CodeGen/X86/avx512dq-builtins.c13
-rw-r--r--clang/test/CodeGen/X86/avx512f-builtins.c14
-rw-r--r--clang/test/CodeGen/X86/avx512ifma-builtins.c199
-rw-r--r--clang/test/CodeGen/X86/avx512ifmavl-builtins.c182
-rw-r--r--clang/test/CodeGen/X86/avx512vl-builtins.c44
-rw-r--r--clang/test/CodeGen/X86/avx512vlbw-builtins.c13
-rw-r--r--clang/test/CodeGen/X86/avx512vlcd-builtins.c59
-rw-r--r--clang/test/CodeGen/X86/avx512vldq-builtins.c18
-rw-r--r--clang/test/CodeGen/X86/avxifma-builtins.c182
-rw-r--r--clang/test/CodeGen/X86/mmx-builtins.c8
-rw-r--r--clang/test/CodeGen/X86/sse3-builtins.c4
-rw-r--r--clang/test/CodeGen/X86/sse41-builtins.c20
-rw-r--r--clang/test/CodeGen/X86/ssse3-builtins.c9
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/load-store.c420
-rw-r--r--clang/test/CodeGen/attr-target-mv.c10
-rw-r--r--clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll9
-rw-r--r--clang/test/CodeGen/ext-int-cc.c8
-rw-r--r--clang/test/CodeGen/inline-asm-systemz-flag-output.c57
-rw-r--r--clang/test/CodeGen/target-builtin-noerror.c2
-rw-r--r--clang/test/CodeGen/target-data.c8
-rw-r--r--clang/test/CodeGen/unified-lto-module-flag.ll11
27 files changed, 1606 insertions, 267 deletions
diff --git a/clang/test/CodeGen/AArch64/atomic-ops-float-check-minmax.c b/clang/test/CodeGen/AArch64/atomic-ops-float-check-minmax.c
new file mode 100644
index 0000000..4d9b29b
--- /dev/null
+++ b/clang/test/CodeGen/AArch64/atomic-ops-float-check-minmax.c
@@ -0,0 +1,117 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6
+// RUN: %clang_cc1 %s -emit-llvm -o - -ffreestanding -triple=aarch64-linux-gnu -pthread | FileCheck %s
+
+#include <stdint.h>
+#include <stdatomic.h>
+
+// CHECK-LABEL: define dso_local void @test_minmax_postop(
+// CHECK-SAME: ptr noundef [[F32:%.*]], ptr noundef [[F16:%.*]], ptr noundef [[BF16:%.*]], ptr noundef [[F64:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[F16_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[BF16_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[F64_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca double, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca double, align 8
+// CHECK-NEXT: [[DOTATOMICTMP1:%.*]] = alloca float, align 4
+// CHECK-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca float, align 4
+// CHECK-NEXT: [[DOTATOMICTMP4:%.*]] = alloca half, align 2
+// CHECK-NEXT: [[ATOMIC_TEMP5:%.*]] = alloca half, align 2
+// CHECK-NEXT: [[DOTATOMICTMP7:%.*]] = alloca bfloat, align 2
+// CHECK-NEXT: [[ATOMIC_TEMP8:%.*]] = alloca bfloat, align 2
+// CHECK-NEXT: [[DOTATOMICTMP10:%.*]] = alloca double, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP11:%.*]] = alloca double, align 8
+// CHECK-NEXT: [[DOTATOMICTMP13:%.*]] = alloca float, align 4
+// CHECK-NEXT: [[ATOMIC_TEMP14:%.*]] = alloca float, align 4
+// CHECK-NEXT: [[DOTATOMICTMP16:%.*]] = alloca half, align 2
+// CHECK-NEXT: [[ATOMIC_TEMP17:%.*]] = alloca half, align 2
+// CHECK-NEXT: [[DOTATOMICTMP19:%.*]] = alloca bfloat, align 2
+// CHECK-NEXT: [[ATOMIC_TEMP20:%.*]] = alloca bfloat, align 2
+// CHECK-NEXT: store ptr [[F32]], ptr [[F32_ADDR]], align 8
+// CHECK-NEXT: store ptr [[F16]], ptr [[F16_ADDR]], align 8
+// CHECK-NEXT: store ptr [[BF16]], ptr [[BF16_ADDR]], align 8
+// CHECK-NEXT: store ptr [[F64]], ptr [[F64_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[F64_ADDR]], align 8
+// CHECK-NEXT: store double 4.210000e+01, ptr [[DOTATOMICTMP]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[DOTATOMICTMP]], align 8
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fmax ptr [[TMP0]], double [[TMP1]] release, align 8
+// CHECK-NEXT: [[NEWVAL:%.*]] = call double @llvm.maxnum.f64(double [[TMP2]], double [[TMP1]])
+// CHECK-NEXT: store double [[NEWVAL]], ptr [[ATOMIC_TEMP]], align 8
+// CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[ATOMIC_TEMP]], align 8
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[F64_ADDR]], align 8
+// CHECK-NEXT: store double [[TMP3]], ptr [[TMP4]], align 8
+// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[F32_ADDR]], align 8
+// CHECK-NEXT: store float 0x40450CCCC0000000, ptr [[DOTATOMICTMP1]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[DOTATOMICTMP1]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = atomicrmw fmax ptr [[TMP5]], float [[TMP6]] release, align 4
+// CHECK-NEXT: [[NEWVAL3:%.*]] = call float @llvm.maxnum.f32(float [[TMP7]], float [[TMP6]])
+// CHECK-NEXT: store float [[NEWVAL3]], ptr [[ATOMIC_TEMP2]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ATOMIC_TEMP2]], align 4
+// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[F32_ADDR]], align 8
+// CHECK-NEXT: store float [[TMP8]], ptr [[TMP9]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[F16_ADDR]], align 8
+// CHECK-NEXT: store half 0xH5143, ptr [[DOTATOMICTMP4]], align 2
+// CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[DOTATOMICTMP4]], align 2
+// CHECK-NEXT: [[TMP12:%.*]] = atomicrmw fmax ptr [[TMP10]], half [[TMP11]] release, align 2
+// CHECK-NEXT: [[NEWVAL6:%.*]] = call half @llvm.maxnum.f16(half [[TMP12]], half [[TMP11]])
+// CHECK-NEXT: store half [[NEWVAL6]], ptr [[ATOMIC_TEMP5]], align 2
+// CHECK-NEXT: [[TMP13:%.*]] = load half, ptr [[ATOMIC_TEMP5]], align 2
+// CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[F16_ADDR]], align 8
+// CHECK-NEXT: store half [[TMP13]], ptr [[TMP14]], align 2
+// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[BF16_ADDR]], align 8
+// CHECK-NEXT: store bfloat 0xR4228, ptr [[DOTATOMICTMP7]], align 2
+// CHECK-NEXT: [[TMP16:%.*]] = load bfloat, ptr [[DOTATOMICTMP7]], align 2
+// CHECK-NEXT: [[TMP17:%.*]] = atomicrmw fmax ptr [[TMP15]], bfloat [[TMP16]] release, align 2
+// CHECK-NEXT: [[NEWVAL9:%.*]] = call bfloat @llvm.maxnum.bf16(bfloat [[TMP17]], bfloat [[TMP16]])
+// CHECK-NEXT: store bfloat [[NEWVAL9]], ptr [[ATOMIC_TEMP8]], align 2
+// CHECK-NEXT: [[TMP18:%.*]] = load bfloat, ptr [[ATOMIC_TEMP8]], align 2
+// CHECK-NEXT: [[TMP19:%.*]] = load ptr, ptr [[BF16_ADDR]], align 8
+// CHECK-NEXT: store bfloat [[TMP18]], ptr [[TMP19]], align 2
+// CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[F64_ADDR]], align 8
+// CHECK-NEXT: store double 4.210000e+01, ptr [[DOTATOMICTMP10]], align 8
+// CHECK-NEXT: [[TMP21:%.*]] = load double, ptr [[DOTATOMICTMP10]], align 8
+// CHECK-NEXT: [[TMP22:%.*]] = atomicrmw fmin ptr [[TMP20]], double [[TMP21]] release, align 8
+// CHECK-NEXT: [[NEWVAL12:%.*]] = call double @llvm.minnum.f64(double [[TMP22]], double [[TMP21]])
+// CHECK-NEXT: store double [[NEWVAL12]], ptr [[ATOMIC_TEMP11]], align 8
+// CHECK-NEXT: [[TMP23:%.*]] = load double, ptr [[ATOMIC_TEMP11]], align 8
+// CHECK-NEXT: [[TMP24:%.*]] = load ptr, ptr [[F64_ADDR]], align 8
+// CHECK-NEXT: store double [[TMP23]], ptr [[TMP24]], align 8
+// CHECK-NEXT: [[TMP25:%.*]] = load ptr, ptr [[F32_ADDR]], align 8
+// CHECK-NEXT: store float 0x40450CCCC0000000, ptr [[DOTATOMICTMP13]], align 4
+// CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[DOTATOMICTMP13]], align 4
+// CHECK-NEXT: [[TMP27:%.*]] = atomicrmw fmin ptr [[TMP25]], float [[TMP26]] release, align 4
+// CHECK-NEXT: [[NEWVAL15:%.*]] = call float @llvm.minnum.f32(float [[TMP27]], float [[TMP26]])
+// CHECK-NEXT: store float [[NEWVAL15]], ptr [[ATOMIC_TEMP14]], align 4
+// CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ATOMIC_TEMP14]], align 4
+// CHECK-NEXT: [[TMP29:%.*]] = load ptr, ptr [[F32_ADDR]], align 8
+// CHECK-NEXT: store float [[TMP28]], ptr [[TMP29]], align 4
+// CHECK-NEXT: [[TMP30:%.*]] = load ptr, ptr [[F16_ADDR]], align 8
+// CHECK-NEXT: store half 0xH5143, ptr [[DOTATOMICTMP16]], align 2
+// CHECK-NEXT: [[TMP31:%.*]] = load half, ptr [[DOTATOMICTMP16]], align 2
+// CHECK-NEXT: [[TMP32:%.*]] = atomicrmw fmin ptr [[TMP30]], half [[TMP31]] release, align 2
+// CHECK-NEXT: [[NEWVAL18:%.*]] = call half @llvm.minnum.f16(half [[TMP32]], half [[TMP31]])
+// CHECK-NEXT: store half [[NEWVAL18]], ptr [[ATOMIC_TEMP17]], align 2
+// CHECK-NEXT: [[TMP33:%.*]] = load half, ptr [[ATOMIC_TEMP17]], align 2
+// CHECK-NEXT: [[TMP34:%.*]] = load ptr, ptr [[F16_ADDR]], align 8
+// CHECK-NEXT: store half [[TMP33]], ptr [[TMP34]], align 2
+// CHECK-NEXT: [[TMP35:%.*]] = load ptr, ptr [[BF16_ADDR]], align 8
+// CHECK-NEXT: store bfloat 0xR4228, ptr [[DOTATOMICTMP19]], align 2
+// CHECK-NEXT: [[TMP36:%.*]] = load bfloat, ptr [[DOTATOMICTMP19]], align 2
+// CHECK-NEXT: [[TMP37:%.*]] = atomicrmw fmin ptr [[TMP35]], bfloat [[TMP36]] release, align 2
+// CHECK-NEXT: [[NEWVAL21:%.*]] = call bfloat @llvm.minnum.bf16(bfloat [[TMP37]], bfloat [[TMP36]])
+// CHECK-NEXT: store bfloat [[NEWVAL21]], ptr [[ATOMIC_TEMP20]], align 2
+// CHECK-NEXT: [[TMP38:%.*]] = load bfloat, ptr [[ATOMIC_TEMP20]], align 2
+// CHECK-NEXT: [[TMP39:%.*]] = load ptr, ptr [[BF16_ADDR]], align 8
+// CHECK-NEXT: store bfloat [[TMP38]], ptr [[TMP39]], align 2
+// CHECK-NEXT: ret void
+//
+void test_minmax_postop(float *f32, _Float16 *f16, __bf16 *bf16, double *f64) {
+ *f64 = __atomic_max_fetch(f64, 42.1, memory_order_release);
+ *f32 = __atomic_max_fetch(f32, 42.1, memory_order_release);
+ *f16 = __atomic_max_fetch(f16, 42.1, memory_order_release);
+ *bf16 = __atomic_max_fetch(bf16, 42.1, memory_order_release);
+ *f64 = __atomic_min_fetch(f64, 42.1, memory_order_release);
+ *f32 = __atomic_min_fetch(f32, 42.1, memory_order_release);
+ *f16 = __atomic_min_fetch(f16, 42.1, memory_order_release);
+ *bf16 = __atomic_min_fetch(bf16, 42.1, memory_order_release);
+}
diff --git a/clang/test/CodeGen/RISCV/bitint.c b/clang/test/CodeGen/RISCV/bitint.c
new file mode 100644
index 0000000..1ad43af
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/bitint.c
@@ -0,0 +1,342 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature
+// RUN: %clang_cc1 -triple riscv64 -O2 -emit-llvm -o - %s | FileCheck %s --check-prefix=RISCV64
+// RUN: %clang_cc1 -triple riscv32 -O2 -emit-llvm -o - %s | FileCheck %s --check-prefix=RISCV32
+// RUN: %clang_cc1 -triple riscv32 -fforce-enable-int128 -O2 -emit-llvm -o - %s | FileCheck %s --check-prefix=RISCV32_INT128
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_17_add_unsigned
+// RISCV64-SAME: (i17 noundef zeroext [[A:%.*]], i17 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add i17 [[B]], [[A]]
+// RISCV64-NEXT: ret i17 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_17_add_unsigned
+// RISCV32-SAME: (i17 noundef zeroext [[A:%.*]], i17 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[ADD:%.*]] = add i17 [[B]], [[A]]
+// RISCV32-NEXT: ret i17 [[ADD]]
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_17_add_unsigned
+// RISCV32_INT128-SAME: (i17 noundef zeroext [[A:%.*]], i17 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add i17 [[B]], [[A]]
+// RISCV32_INT128-NEXT: ret i17 [[ADD]]
+//
+unsigned _BitInt(17) test_bitint_17_add_unsigned(unsigned _BitInt(17) a, unsigned _BitInt(17) b) {
+ return a + b;
+}
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_17_add_signed
+// RISCV64-SAME: (i17 noundef signext [[A:%.*]], i17 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add nsw i17 [[B]], [[A]]
+// RISCV64-NEXT: ret i17 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_17_add_signed
+// RISCV32-SAME: (i17 noundef signext [[A:%.*]], i17 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[ADD:%.*]] = add nsw i17 [[B]], [[A]]
+// RISCV32-NEXT: ret i17 [[ADD]]
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_17_add_signed
+// RISCV32_INT128-SAME: (i17 noundef signext [[A:%.*]], i17 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add nsw i17 [[B]], [[A]]
+// RISCV32_INT128-NEXT: ret i17 [[ADD]]
+//
+signed _BitInt(17) test_bitint_17_add_signed(signed _BitInt(17) a, signed _BitInt(17) b) {
+ return a + b;
+}
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_17_add_default
+// RISCV64-SAME: (i17 noundef signext [[A:%.*]], i17 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add nsw i17 [[B]], [[A]]
+// RISCV64-NEXT: ret i17 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_17_add_default
+// RISCV32-SAME: (i17 noundef signext [[A:%.*]], i17 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[ADD:%.*]] = add nsw i17 [[B]], [[A]]
+// RISCV32-NEXT: ret i17 [[ADD]]
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_17_add_default
+// RISCV32_INT128-SAME: (i17 noundef signext [[A:%.*]], i17 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add nsw i17 [[B]], [[A]]
+// RISCV32_INT128-NEXT: ret i17 [[ADD]]
+//
+_BitInt(17) test_bitint_17_add_default(_BitInt(17) a, _BitInt(17) b) {
+ return a + b;
+}
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_32_add_unsigned
+// RISCV64-SAME: (i32 noundef signext [[A:%.*]], i32 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add i32 [[B]], [[A]]
+// RISCV64-NEXT: ret i32 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_32_add_unsigned
+// RISCV32-SAME: (i32 noundef zeroext [[A:%.*]], i32 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[ADD:%.*]] = add i32 [[B]], [[A]]
+// RISCV32-NEXT: ret i32 [[ADD]]
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_32_add_unsigned
+// RISCV32_INT128-SAME: (i32 noundef zeroext [[A:%.*]], i32 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add i32 [[B]], [[A]]
+// RISCV32_INT128-NEXT: ret i32 [[ADD]]
+//
+unsigned _BitInt(32) test_bitint_32_add_unsigned(unsigned _BitInt(32) a, unsigned _BitInt(32) b) {
+ return a + b;
+}
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_32_add_signed
+// RISCV64-SAME: (i32 noundef signext [[A:%.*]], i32 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add nsw i32 [[B]], [[A]]
+// RISCV64-NEXT: ret i32 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_32_add_signed
+// RISCV32-SAME: (i32 noundef signext [[A:%.*]], i32 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[ADD:%.*]] = add nsw i32 [[B]], [[A]]
+// RISCV32-NEXT: ret i32 [[ADD]]
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_32_add_signed
+// RISCV32_INT128-SAME: (i32 noundef signext [[A:%.*]], i32 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add nsw i32 [[B]], [[A]]
+// RISCV32_INT128-NEXT: ret i32 [[ADD]]
+//
+signed _BitInt(32) test_bitint_32_add_signed(signed _BitInt(32) a, signed _BitInt(32) b) {
+ return a + b;
+}
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_32_add_default
+// RISCV64-SAME: (i32 noundef signext [[A:%.*]], i32 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add nsw i32 [[B]], [[A]]
+// RISCV64-NEXT: ret i32 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_32_add_default
+// RISCV32-SAME: (i32 noundef signext [[A:%.*]], i32 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[ADD:%.*]] = add nsw i32 [[B]], [[A]]
+// RISCV32-NEXT: ret i32 [[ADD]]
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_32_add_default
+// RISCV32_INT128-SAME: (i32 noundef signext [[A:%.*]], i32 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add nsw i32 [[B]], [[A]]
+// RISCV32_INT128-NEXT: ret i32 [[ADD]]
+//
+_BitInt(32) test_bitint_32_add_default(_BitInt(32) a, _BitInt(32) b) {
+ return a + b;
+}
+
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_65_add_unsigned
+// RISCV64-SAME: (i65 noundef zeroext [[A:%.*]], i65 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add i65 [[B]], [[A]]
+// RISCV64-NEXT: ret i65 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_65_add_unsigned
+// RISCV32-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA6:![0-9]+]]
+// RISCV32-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i65
+// RISCV32-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA6]]
+// RISCV32-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i65
+// RISCV32-NEXT: [[ADD:%.*]] = add i65 [[B]], [[A]]
+// RISCV32-NEXT: [[STOREDV4:%.*]] = zext i65 [[ADD]] to i128
+// RISCV32-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA6]]
+// RISCV32-NEXT: ret void
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_65_add_unsigned
+// RISCV32_INT128-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA6:![0-9]+]]
+// RISCV32_INT128-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i65
+// RISCV32_INT128-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA6]]
+// RISCV32_INT128-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i65
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add i65 [[B]], [[A]]
+// RISCV32_INT128-NEXT: [[STOREDV4:%.*]] = zext i65 [[ADD]] to i128
+// RISCV32_INT128-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA6]]
+// RISCV32_INT128-NEXT: ret void
+//
+unsigned _BitInt(65) test_bitint_65_add_unsigned(unsigned _BitInt(65) a, unsigned _BitInt(65) b) {
+ return a + b;
+}
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_65_add_signed
+// RISCV64-SAME: (i65 noundef signext [[A:%.*]], i65 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add nsw i65 [[B]], [[A]]
+// RISCV64-NEXT: ret i65 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_65_add_signed
+// RISCV32-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA6]]
+// RISCV32-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i65
+// RISCV32-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA6]]
+// RISCV32-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i65
+// RISCV32-NEXT: [[ADD:%.*]] = add nsw i65 [[B]], [[A]]
+// RISCV32-NEXT: [[STOREDV4:%.*]] = sext i65 [[ADD]] to i128
+// RISCV32-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA6]]
+// RISCV32-NEXT: ret void
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_65_add_signed
+// RISCV32_INT128-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA6]]
+// RISCV32_INT128-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i65
+// RISCV32_INT128-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA6]]
+// RISCV32_INT128-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i65
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add nsw i65 [[B]], [[A]]
+// RISCV32_INT128-NEXT: [[STOREDV4:%.*]] = sext i65 [[ADD]] to i128
+// RISCV32_INT128-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA6]]
+// RISCV32_INT128-NEXT: ret void
+//
+signed _BitInt(65) test_bitint_65_add_signed(signed _BitInt(65) a, signed _BitInt(65) b) {
+ return a + b;
+}
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_65_add_default
+// RISCV64-SAME: (i65 noundef signext [[A:%.*]], i65 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add nsw i65 [[B]], [[A]]
+// RISCV64-NEXT: ret i65 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_65_add_default
+// RISCV32-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA6]]
+// RISCV32-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i65
+// RISCV32-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA6]]
+// RISCV32-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i65
+// RISCV32-NEXT: [[ADD:%.*]] = add nsw i65 [[B]], [[A]]
+// RISCV32-NEXT: [[STOREDV4:%.*]] = sext i65 [[ADD]] to i128
+// RISCV32-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA6]]
+// RISCV32-NEXT: ret void
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_65_add_default
+// RISCV32_INT128-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA6]]
+// RISCV32_INT128-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i65
+// RISCV32_INT128-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA6]]
+// RISCV32_INT128-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i65
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add nsw i65 [[B]], [[A]]
+// RISCV32_INT128-NEXT: [[STOREDV4:%.*]] = sext i65 [[ADD]] to i128
+// RISCV32_INT128-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA6]]
+// RISCV32_INT128-NEXT: ret void
+//
+_BitInt(65) test_bitint_65_add_default(_BitInt(65) a, _BitInt(65) b) {
+ return a + b;
+}
+
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_77_add_unsigned
+// RISCV64-SAME: (i77 noundef zeroext [[A:%.*]], i77 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add i77 [[B]], [[A]]
+// RISCV64-NEXT: ret i77 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_77_add_unsigned
+// RISCV32-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA10:![0-9]+]]
+// RISCV32-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i77
+// RISCV32-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA10]]
+// RISCV32-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i77
+// RISCV32-NEXT: [[ADD:%.*]] = add i77 [[B]], [[A]]
+// RISCV32-NEXT: [[STOREDV4:%.*]] = zext i77 [[ADD]] to i128
+// RISCV32-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA10]]
+// RISCV32-NEXT: ret void
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_77_add_unsigned
+// RISCV32_INT128-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA10:![0-9]+]]
+// RISCV32_INT128-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i77
+// RISCV32_INT128-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA10]]
+// RISCV32_INT128-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i77
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add i77 [[B]], [[A]]
+// RISCV32_INT128-NEXT: [[STOREDV4:%.*]] = zext i77 [[ADD]] to i128
+// RISCV32_INT128-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA10]]
+// RISCV32_INT128-NEXT: ret void
+//
+unsigned _BitInt(77) test_bitint_77_add_unsigned(unsigned _BitInt(77) a, unsigned _BitInt(77) b) {
+ return a + b;
+}
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_77_add_signed
+// RISCV64-SAME: (i77 noundef signext [[A:%.*]], i77 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add nsw i77 [[B]], [[A]]
+// RISCV64-NEXT: ret i77 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_77_add_signed
+// RISCV32-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA10]]
+// RISCV32-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i77
+// RISCV32-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA10]]
+// RISCV32-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i77
+// RISCV32-NEXT: [[ADD:%.*]] = add nsw i77 [[B]], [[A]]
+// RISCV32-NEXT: [[STOREDV4:%.*]] = sext i77 [[ADD]] to i128
+// RISCV32-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA10]]
+// RISCV32-NEXT: ret void
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_77_add_signed
+// RISCV32_INT128-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA10]]
+// RISCV32_INT128-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i77
+// RISCV32_INT128-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA10]]
+// RISCV32_INT128-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i77
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add nsw i77 [[B]], [[A]]
+// RISCV32_INT128-NEXT: [[STOREDV4:%.*]] = sext i77 [[ADD]] to i128
+// RISCV32_INT128-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA10]]
+// RISCV32_INT128-NEXT: ret void
+//
+signed _BitInt(77) test_bitint_77_add_signed(signed _BitInt(77) a, signed _BitInt(77) b) {
+ return a + b;
+}
+
+// RISCV64-LABEL: define {{[^@]+}}@test_bitint_77_add_default
+// RISCV64-SAME: (i77 noundef signext [[A:%.*]], i77 noundef signext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// RISCV64-NEXT: entry:
+// RISCV64-NEXT: [[ADD:%.*]] = add nsw i77 [[B]], [[A]]
+// RISCV64-NEXT: ret i77 [[ADD]]
+//
+// RISCV32-LABEL: define {{[^@]+}}@test_bitint_77_add_default
+// RISCV32-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32-NEXT: entry:
+// RISCV32-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA10]]
+// RISCV32-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i77
+// RISCV32-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA10]]
+// RISCV32-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i77
+// RISCV32-NEXT: [[ADD:%.*]] = add nsw i77 [[B]], [[A]]
+// RISCV32-NEXT: [[STOREDV4:%.*]] = sext i77 [[ADD]] to i128
+// RISCV32-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA10]]
+// RISCV32-NEXT: ret void
+//
+// RISCV32_INT128-LABEL: define {{[^@]+}}@test_bitint_77_add_default
+// RISCV32_INT128-SAME: (ptr dead_on_unwind noalias writable writeonly sret(i128) align 8 captures(none) initializes((0, 16)) [[AGG_RESULT:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP0:%.*]], ptr dead_on_return noundef readonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// RISCV32_INT128-NEXT: entry:
+// RISCV32_INT128-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 8, !tbaa [[TBAA10]]
+// RISCV32_INT128-NEXT: [[A:%.*]] = trunc i128 [[TMP2]] to i77
+// RISCV32_INT128-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 8, !tbaa [[TBAA10]]
+// RISCV32_INT128-NEXT: [[B:%.*]] = trunc i128 [[TMP3]] to i77
+// RISCV32_INT128-NEXT: [[ADD:%.*]] = add nsw i77 [[B]], [[A]]
+// RISCV32_INT128-NEXT: [[STOREDV4:%.*]] = sext i77 [[ADD]] to i128
+// RISCV32_INT128-NEXT: store i128 [[STOREDV4]], ptr [[AGG_RESULT]], align 8, !tbaa [[TBAA10]]
+// RISCV32_INT128-NEXT: ret void
+//
+_BitInt(77) test_bitint_77_add_default(_BitInt(77) a, _BitInt(77) b) {
+ return a + b;
+}
diff --git a/clang/test/CodeGen/X86/avx-builtins.c b/clang/test/CodeGen/X86/avx-builtins.c
index 5f08b6b..bcffd861 100644
--- a/clang/test/CodeGen/X86/avx-builtins.c
+++ b/clang/test/CodeGen/X86/avx-builtins.c
@@ -1100,6 +1100,7 @@ __m256d test_mm256_hadd_pd(__m256d A, __m256d B) {
// CHECK: call {{.*}}<4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_hadd_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_hadd_pd((__m256d){+1.0, +2.0, +3.0, +4.0}, (__m256d){+5.0, +6.0, +7.0, +8.0}), +3.0, +11.0, +7.0, +15.0));
__m256 test_mm256_hadd_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_hadd_ps
@@ -1107,17 +1108,27 @@ __m256 test_mm256_hadd_ps(__m256 A, __m256 B) {
return _mm256_hadd_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_hadd_ps(
+ (__m256){+1.0f, +2.0f, +3.0f, +4.0f, +5.0f, +6.0f, +7.0f, +8.0f},
+ (__m256){+9.0f, +10.0f, +11.0f, +12.0f, +13.0f, +14.0f, +15.0f, +16.0f}),
+ +3.0f, +7.0f, +19.0f, +23.0f, +11.0f, +15.0f, +27.0f, +31.0f));
+
__m256d test_mm256_hsub_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_hsub_pd
// CHECK: call {{.*}}<4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_hsub_pd(A, B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_hsub_pd((__m256d){+1.0, +2.0, +4.0, +3.0}, (__m256d){+10.0, +6.0, +16.0, +8.0}), -1.0,+4.0,+1.0,+8.0));
__m256 test_mm256_hsub_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_hsub_ps
// CHECK: call {{.*}}<8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_hsub_ps(A, B);
}
+TEST_CONSTEXPR(match_m256(_mm256_hsub_ps(
+ (__m256){1.0f, 2.0f, 4.0f, 3.0f, 5.0f, 7.0f, 7.0f, 5.0f},
+ (__m256){6.0f, 9.0f, 11.0f, 8.0f, 13.0f, 17.0f, 15.0f, 11.0f}),
+ -1.0f, 1.0f, -3.0f, 3.0f, -2.0f, 2.0f, -4.0f, 4.0f));
__m256i test_mm256_insert_epi8(__m256i x, char b) {
// CHECK-LABEL: test_mm256_insert_epi8
@@ -2009,90 +2020,136 @@ int test_mm_testc_pd(__m128d A, __m128d B) {
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestc.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_testc_pd(A, B);
}
+TEST_CONSTEXPR(_mm_testc_pd((__m128d)(__v2df){-1.0, -2.0},(__m128d)(__v2df){-3.0, 4.0}) == 1);
+TEST_CONSTEXPR(_mm_testc_pd((__m128d)(__v2df){ 1.0, -2.0},(__m128d)(__v2df){-3.0, 4.0}) == 0);
+TEST_CONSTEXPR(_mm_testc_pd((__m128d)(__v2df){ 1.0, -2.0},(__m128d)(__v2df){ 0.0, 5.0}) == 1);
int test_mm256_testc_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_testc_pd
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_testc_pd(A, B);
}
+TEST_CONSTEXPR(_mm256_testc_pd((__m256d)(__v4df){-1.0, 2.0, -3.0, 4.0},(__m256d)(__v4df){-5.0, 6.0, 7.0, 8.0}) == 1);
+TEST_CONSTEXPR(_mm256_testc_pd((__m256d)(__v4df){ 1.0, 2.0, -3.0, 4.0},(__m256d)(__v4df){-5.0, 6.0, 7.0, 8.0}) == 0);
+TEST_CONSTEXPR(_mm256_testc_pd((__m256d)(__v4df){-1.0, -2.0, -3.0, -4.0},(__m256d)(__v4df){ 5.0, 6.0, 7.0, 8.0}) == 1);
int test_mm_testc_ps(__m128 A, __m128 B) {
// CHECK-LABEL: test_mm_testc_ps
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestc.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_testc_ps(A, B);
}
+TEST_CONSTEXPR(_mm_testc_ps((__m128)(__v4sf){-1,-1,-9001.1009,0x0FA1L}, (__m128)(__v4sf){-1.0,-9001,9001,9000}) == 1);
+TEST_CONSTEXPR(_mm_testc_ps((__m128)(__v4sf){-1,2384.23,-9001.1009,0x0FA1L}, (__m128)(__v4sf){-1.0,-9001,9001,9000}) == 0);
+TEST_CONSTEXPR(_mm_testc_ps((__m128)(__v4sf){-1,-2,-9001.1009,-93}, (__m128)(__v4sf){-1.0,-9001,-0.9001,-1000}) == 1);
int test_mm256_testc_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_testc_ps
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_testc_ps(A, B);
}
+TEST_CONSTEXPR(_mm256_testc_ps((__m256)(__v8sf){-1, -2, -3, -4, -5, -6, -7, 8},(__m256)(__v8sf){1, 2, 3, 4, 5, 6, 7, -8}) == 0);
+TEST_CONSTEXPR(_mm256_testc_ps((__m256)(__v8sf){0,0,0,0,0,0,0,-1.00001},(__m256)(__v8sf){1, 2, 3, 4, 5, 6, 7, -8}) == 1);
+TEST_CONSTEXPR(_mm256_testc_ps((__m256)(__v8sf){0,-0.00002,0,0,0,0,0,-1.00001},(__m256)(__v8sf){1, 2, 3, -4, 5, -6, -7, 8}) == 0);
int test_mm256_testc_si256(__m256i A, __m256i B) {
// CHECK-LABEL: test_mm256_testc_si256
// CHECK: call {{.*}}i32 @llvm.x86.avx.ptestc.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_testc_si256(A, B);
}
+TEST_CONSTEXPR(_mm256_testc_si256((__m256i)(__v4di){0,0,0,0}, (__m256i)(__v4di){0,0,0,0}) == 1);
+TEST_CONSTEXPR(_mm256_testc_si256((__m256i)(__v4di){0,0,-1,0}, (__m256i)(__v4di){0,0,1,0}) == 1);
+TEST_CONSTEXPR(_mm256_testc_si256((__m256i)(__v4di){-1,-2,1,3}, (__m256i)(__v4di){0,-1,1,1}) == 0);
int test_mm_testnzc_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_testnzc_pd
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_testnzc_pd(A, B);
}
+TEST_CONSTEXPR(_mm_testnzc_pd((__m128d)(__v2df){-1.0, +2.0},(__m128d)(__v2df){-3.0, -4.0}) == 1);
+TEST_CONSTEXPR(_mm_testnzc_pd((__m128d)(__v2df){+1.0, +2.0},(__m128d)(__v2df){+3.0, -4.0}) == 0);
+TEST_CONSTEXPR(_mm_testnzc_pd((__m128d)(__v2df){-1.0, -2.0},(__m128d)(__v2df){-3.0, +4.0}) == 0);
int test_mm256_testnzc_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_testnzc_pd
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_testnzc_pd(A, B);
}
+TEST_CONSTEXPR(_mm256_testnzc_pd((__m256d)(__v4df){-1.0, 2.0, 3.0, -4.0}, (__m256d)(__v4df){-5.0, -6.0, 7.0, 8.0}) == 1);
+TEST_CONSTEXPR(_mm256_testnzc_pd((__m256d)(__v4df){1.0, 2.0, 3.0, 4.0}, (__m256d)(__v4df){-1.0, 6.0, 7.0, 8.0}) == 0);
+TEST_CONSTEXPR(_mm256_testnzc_pd((__m256d)(__v4df){-1.0, -2.0, -3.0, -4.0}, (__m256d)(__v4df){-5.0, 6.0, 7.0, 8.0}) == 0);
int test_mm_testnzc_ps(__m128 A, __m128 B) {
// CHECK-LABEL: test_mm_testnzc_ps
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestnzc.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_testnzc_ps(A, B);
}
+TEST_CONSTEXPR(_mm_testnzc_ps((__m128)(__v4sf){-9.9,987,-67,0}, (__m128)(__v4sf){10.0,-1.12,-29.29,0}) == 1);
+TEST_CONSTEXPR(_mm_testnzc_ps((__m128)(__v4sf){-810.0,-1.0,-1.0,-3.0}, (__m128)(__v4sf){-10.0,-1.0,-1.0,-2.0}) == 0);
+TEST_CONSTEXPR(_mm_testnzc_ps((__m128)(__v4sf){0,0,0,0}, (__m128)(__v4sf){0,-1,0,-1}) == 0);
int test_mm256_testnzc_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_testnzc_ps
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_testnzc_ps(A, B);
}
+TEST_CONSTEXPR(_mm256_testnzc_ps((__m256)(__v8sf){-1, -2, -3, -4, -5, -6, -7, 8},(__m256)(__v8sf){1, -2, 3, 4, 5, 6, 7, -8}) == 1);
+TEST_CONSTEXPR(_mm256_testnzc_ps((__m256)(__v8sf){0,0,0,0,0,0,0,-1.00001},(__m256)(__v8sf){1, 2, 3, 4, 5, 6, 7, -8}) == 0);
+TEST_CONSTEXPR(_mm256_testnzc_ps((__m256)(__v8sf){0,-0.00002,0,0,0,0,0,-1.00001},(__m256)(__v8sf){1, 2, 3, -4, 5, -6, -7, 8}) == 0);
int test_mm256_testnzc_si256(__m256i A, __m256i B) {
// CHECK-LABEL: test_mm256_testnzc_si256
// CHECK: call {{.*}}i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_testnzc_si256(A, B);
}
+TEST_CONSTEXPR(_mm256_testnzc_si256((__m256i)(__v4di){-1,0,0,0}, (__m256i)(__v4di){478329848,23438,2343,-3483}) == 1);
+TEST_CONSTEXPR(_mm256_testnzc_si256((__m256i)(__v4di){1,0,0,0}, (__m256i)(__v4di){3,0,0,0}) == 1);
+TEST_CONSTEXPR(_mm256_testnzc_si256((__m256i)(__v4di){-1,239483,-1,0}, (__m256i)(__v4di){3849234,0,-2,0}) == 0);
+TEST_CONSTEXPR(_mm256_testnzc_si256((__m256i)(__v4di){-1,0,-1,3}, (__m256i)(__v4di){1,0,9999999,1}) == 0);
int test_mm_testz_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_testz_pd
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestz.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_testz_pd(A, B);
}
+TEST_CONSTEXPR(_mm_testz_pd((__m128d)(__v2df){-1,0}, (__m128d)(__v2df){0,-1}) == 1);
+TEST_CONSTEXPR(_mm_testz_pd((__m128d)(__v2df){0,-13.13}, (__m128d)(__v2df){0,-11.1}) == 0);
+TEST_CONSTEXPR(_mm_testz_pd((__m128d)(__v2df){0,5.13}, (__m128d)(__v2df){0,-113.1324823}) == 1);
int test_mm256_testz_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_testz_pd
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}})
return _mm256_testz_pd(A, B);
}
+TEST_CONSTEXPR(_mm256_testz_pd((__m256d)(__v4df){-1,0,-47.47,0.00002}, (__m256d)(__v4df){0,-1,74.0101,-1}) == 1);
+TEST_CONSTEXPR(_mm256_testz_pd((__m256d)(__v4df){-1,3249.9,-47.47,-0.00002}, (__m256d)(__v4df){0,-1,74.0101,-9999900}) == 0);
+TEST_CONSTEXPR(_mm256_testz_pd((__m256d)(__v4df){0,0,-8,0}, (__m256d)(__v4df){0,-1,-101,-123}) == 0);
int test_mm_testz_ps(__m128 A, __m128 B) {
// CHECK-LABEL: test_mm_testz_ps
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestz.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_testz_ps(A, B);
}
+TEST_CONSTEXPR(_mm_testz_ps((__m128)(__v4sf){-9.9,987,67,0}, (__m128)(__v4sf){10.0,-1.12,-29.29,0}) == 1);
+TEST_CONSTEXPR(_mm_testz_ps((__m128)(__v4sf){10.0,1.0,-1.0,-3.0}, (__m128)(__v4sf){-10.0,-1.0,-1.0,-2.0}) == 0);
+TEST_CONSTEXPR(_mm_testz_ps((__m128)(__v4sf){0,0,0,0}, (__m128)(__v4sf){0,-1,0,-1}) == 1);
int test_mm256_testz_ps(__m256 A, __m256 B) {
// CHECK-LABEL: test_mm256_testz_ps
// CHECK: call {{.*}}i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}})
return _mm256_testz_ps(A, B);
}
+TEST_CONSTEXPR(_mm256_testz_ps((__m256)(__v8sf){-1, -2, -3, -4, -5, -6, -7, 8},(__m256)(__v8sf){1, 2, 3, 4, 5, 6, 7, -8}) == 1);
+TEST_CONSTEXPR(_mm256_testz_ps((__m256)(__v8sf){0,0,0,0,0,0,0,-1.00001},(__m256)(__v8sf){1, 2, 3, 4, 5, 6, 7, -8}) == 0);
+TEST_CONSTEXPR(_mm256_testz_ps((__m256)(__v8sf){0,-0.00002,0,0,0,0,0,-1.00001},(__m256)(__v8sf){1, 2, 3, -4, 5, -6, -7, 8}) == 1);
int test_mm256_testz_si256(__m256i A, __m256i B) {
// CHECK-LABEL: test_mm256_testz_si256
// CHECK: call {{.*}}i32 @llvm.x86.avx.ptestz.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_testz_si256(A, B);
}
+TEST_CONSTEXPR(_mm256_testz_si256((__m256i)(__v4di){0,0,0,0}, (__m256i)(__v4di){0,0,0,0}) == 1);
+TEST_CONSTEXPR(_mm256_testz_si256((__m256i)(__v4di){0,0,-1,0}, (__m256i)(__v4di){0,0,-1,0}) == 0);
+TEST_CONSTEXPR(_mm256_testz_si256((__m256i)(__v4di){-1,0,1,0}, (__m256i)(__v4di){0,-1,0,1}) == 1);
__m256 test_mm256_undefined_ps(void) {
// X64-LABEL: test_mm256_undefined_ps
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index 55f18f9..b798618 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -485,36 +485,60 @@ __m256i test_mm256_hadd_epi16(__m256i a, __m256i b) {
// CHECK: call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_hadd_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_hadd_epi16(
+ (__m256i)(__v16hi){1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16},
+ (__m256i)(__v16hi){17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}),
+ 3,7,11,15,35,39,43,47,19,23,27,31,51,55,59,63));
__m256i test_mm256_hadd_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_hadd_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_hadd_epi32(a, b);
}
+TEST_CONSTEXPR(match_v8si(_mm256_hadd_epi32(
+ (__m256i)(__v8si){10, 20, 30, 40, 50, 60, 70, 80},
+ (__m256i)(__v8si){5, 15, 25, 35, 45, 55, 65, 75}),
+ 30,70,20,60,110,150,100,140));
__m256i test_mm256_hadds_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_hadds_epi16
// CHECK:call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_hadds_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi( _mm256_hadds_epi16(
+ (__m256i)(__v16hi){32767, 32767, 1,2,3,4,5,6,7,8,9,10,11,12,13,14},
+ (__m256i)(__v16hi){19,20,21,22,23,24,25,26,27,28,29,30,31,32, 32767, 5}),
+ 32767, 3,7,11, 39,43,47,51,15,19,23,27, 55,59,63, 32767));
__m256i test_mm256_hsub_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_hsub_epi16
// CHECK: call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_hsub_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_hsub_epi16(
+ (__m256i)(__v16hi){2,1,1,2,5,3,3,5,7,4,4,7,9,5,5,9},
+ (__m256i)(__v16hi){10,5,5,10,12,6,6,12,21,14,14,21,24,16,16,24}),
+ 1,-1,2,-2,5,-5,6,-6,3,-3,4,-4, 7,-7,8,-8));
__m256i test_mm256_hsub_epi32(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_hsub_epi32
// CHECK: call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_hsub_epi32(a, b);
}
+TEST_CONSTEXPR(match_v8si(_mm256_hsub_epi32(
+ (__m256i)(__v8si){10, 20, 30,50,60,90,100,140},
+ (__m256i)(__v8si){200,150,260,200,420,350,800,720}),
+ -10,-20,50,60, -30,-40, 70,80));
__m256i test_mm256_hsubs_epi16(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_hsubs_epi16
// CHECK:call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_hsubs_epi16(a, b);
}
+TEST_CONSTEXPR(match_v16hi(_mm256_hsubs_epi16(
+ (__m256i)(__v16hi){32726, -100, 3, 2, 6, 4, 8, 5,15,10 ,21, 14, 27, 18, 100, 90},
+ (__m256i)(__v16hi){40, 20, 100, 70, 200,150, 100,40, 1000,900,300,150, 500,300, 1, 1}),
+ 32767, 1, 2, 3, 20, 30, 50, 60, 5, 7, 9, 10, 100, 150, 200, 0));
__m128i test_mm_i32gather_epi32(int const *b, __m128i c) {
// CHECK-LABEL: test_mm_i32gather_epi32
@@ -1106,6 +1130,8 @@ __m256i test_mm256_shuffle_epi8(__m256i a, __m256i b) {
return _mm256_shuffle_epi8(a, b);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_shuffle_epi8((__m256i)(__v32qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}, (__m256i)(__v32qs){0,33,2,35,4,37,6,-39,8,41,10,43,12,45,14,-47,16,49,18,51,20,53,22,-55,24,57,26,59,28,61,30,-63}), 0,1,2,3,4,5,6,0,8,9,10,11,12,13,14,0,16,17,18,19,20,21,22,0,24,25,26,27,28,29,30,0));
+
__m256i test_mm256_shuffle_epi32(__m256i a) {
// CHECK-LABEL: test_mm256_shuffle_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <8 x i32> <i32 3, i32 3, i32 0, i32 0, i32 7, i32 7, i32 4, i32 4>
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c
index af1c904..fddf17d 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -1466,18 +1466,27 @@ __m512i test_mm512_shuffle_epi8(__m512i __A, __m512i __B) {
// CHECK: @llvm.x86.avx512.pshuf.b.512
return _mm512_shuffle_epi8(__A,__B);
}
+
+TEST_CONSTEXPR(match_v64qi(_mm512_shuffle_epi8((__m512i)(__v64qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63}, (__m512i)(__v64qs){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,-15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,-15,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,-79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,-95}), 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,0,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,0,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,0,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,0));
+
__m512i test_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_mask_shuffle_epi8
// CHECK: @llvm.x86.avx512.pshuf.b.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_shuffle_epi8(__W,__U,__A,__B);
}
+
+TEST_CONSTEXPR(match_v64qi(_mm512_mask_shuffle_epi8((__m512i)(__v64qi){1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8}, 0xFFFFFFFF00000000, (__m512i)(__v64qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63}, (__m512i)(__v64qi){63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48));
+
__m512i test_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_maskz_shuffle_epi8
// CHECK: @llvm.x86.avx512.pshuf.b.512
// CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_shuffle_epi8(__U,__A,__B);
}
+
+TEST_CONSTEXPR(match_v64qi(_mm512_maskz_shuffle_epi8(0x8888888888888888,(__m512i)(__v64qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63}, (__m512i)(__v64qi){127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64}), 0,0,0,12,0,0,0,8,0,0,0,4,0,0,0,0,0,0,0,28,0,0,0,24,0,0,0,20,0,0,0,16,0,0,0,44,0,0,0,40,0,0,0,36,0,0,0,32,0,0,0,60,0,0,0,56,0,0,0,52,0,0,0,48));
+
__m512i test_mm512_subs_epi8(__m512i __A, __m512i __B) {
// CHECK-LABEL: test_mm512_subs_epi8
// CHECK: @llvm.ssub.sat.v64i8
diff --git a/clang/test/CodeGen/X86/avx512cd-builtins.c b/clang/test/CodeGen/X86/avx512cd-builtins.c
index b9d42b7..80a20b1 100644
--- a/clang/test/CodeGen/X86/avx512cd-builtins.c
+++ b/clang/test/CodeGen/X86/avx512cd-builtins.c
@@ -14,37 +14,53 @@
__m512i test_mm512_conflict_epi64(__m512i __A) {
// CHECK-LABEL: test_mm512_conflict_epi64
// CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %{{.*}})
- return _mm512_conflict_epi64(__A);
+ return _mm512_conflict_epi64(__A);
}
+
+TEST_CONSTEXPR(match_v8di(_mm512_conflict_epi64((__m512i)(__v8di){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0, 1, 0, 2, 0, 5, 0));
+TEST_CONSTEXPR(match_v8di(_mm512_conflict_epi64((__m512i)(__v8di){5, 5, 5, 5, 5, 5, 5, 5}), 0, 1, 3, 7, 15, 31, 63, 127));
+TEST_CONSTEXPR(match_v8di(_mm512_conflict_epi64((__m512i)(__v8di){1, 2, 3, 4, 5, 6, 7, 8}), 0, 0, 0, 0, 0, 0, 0, 0));
__m512i test_mm512_mask_conflict_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_conflict_epi64
// CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
- return _mm512_mask_conflict_epi64(__W,__U,__A);
+ return _mm512_mask_conflict_epi64(__W,__U,__A);
}
+
+TEST_CONSTEXPR(match_v8di(_mm512_mask_conflict_epi64((__m512i)(__v8di){0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 0x55, (__m512i)(__v8di){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0xFF, 1, 0xFF, 2, 0xFF, 5, 0xFF));
__m512i test_mm512_maskz_conflict_epi64(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_conflict_epi64
// CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
- return _mm512_maskz_conflict_epi64(__U,__A);
+ return _mm512_maskz_conflict_epi64(__U,__A);
}
+
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_conflict_epi64(0x55, (__m512i)(__v8di){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0, 1, 0, 2, 0, 5, 0));
__m512i test_mm512_conflict_epi32(__m512i __A) {
// CHECK-LABEL: test_mm512_conflict_epi32
// CHECK: call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %{{.*}})
- return _mm512_conflict_epi32(__A);
+ return _mm512_conflict_epi32(__A);
}
+
+TEST_CONSTEXPR(match_v16si(_mm512_conflict_epi32((__m512i)(__v16si){1, 2, 1, 3, 2, 4, 1, 5, 6, 7, 6, 8, 7, 9, 6, 10}), 0, 0, 1, 0, 2, 0, 5, 0, 0, 0, 256, 0, 512, 0, 1280, 0));
+TEST_CONSTEXPR(match_v16si(_mm512_conflict_epi32((__m512i)(__v16si){9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}), 0, 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767));
+TEST_CONSTEXPR(match_v16si(_mm512_conflict_epi32((__m512i)(__v16si){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m512i test_mm512_mask_conflict_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_conflict_epi32
// CHECK: call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
- return _mm512_mask_conflict_epi32(__W,__U,__A);
+ return _mm512_mask_conflict_epi32(__W,__U,__A);
}
+
+TEST_CONSTEXPR(match_v16si(_mm512_mask_conflict_epi32((__m512i)(__v16si){0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 0x5555, (__m512i)(__v16si){1, 2, 1, 3, 2, 4, 1, 5, 6, 7, 6, 8, 7, 9, 6, 10}), 0, 0xFF, 1, 0xFF, 2, 0xFF, 5, 0xFF, 0, 0xFF, 256, 0xFF, 512, 0xFF, 1280, 0xFF));
__m512i test_mm512_maskz_conflict_epi32(__mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_conflict_epi32
// CHECK: call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %{{.*}})
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
- return _mm512_maskz_conflict_epi32(__U,__A);
+ return _mm512_maskz_conflict_epi32(__U,__A);
}
+
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_conflict_epi32(0x5555, (__m512i)(__v16si){1, 2, 1, 3, 2, 4, 1, 5, 6, 7, 6, 8, 7, 9, 6, 10}), 0, 0, 1, 0, 2, 0, 5, 0, 0, 0, 256, 0, 512, 0, 1280, 0));
__m512i test_mm512_lzcnt_epi32(__m512i __A) {
// CHECK-LABEL: test_mm512_lzcnt_epi32
// CHECK: call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %{{.*}}, i1 true)
@@ -125,6 +141,8 @@ __m512i test_mm512_broadcastmb_epi64(__m512i a, __m512i b) {
// CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 7
return _mm512_broadcastmb_epi64(_mm512_cmpeq_epu64_mask ( a, b));
}
+TEST_CONSTEXPR(match_v8di(_mm512_broadcastmb_epi64((__mmask8)(0)), 0,0,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8di(_mm512_broadcastmb_epi64((__mmask8)(0xab)), 0xab,0xab,0xab,0xab, 0xab,0xab,0xab,0xab));
__m512i test_mm512_broadcastmw_epi32(__m512i a, __m512i b) {
// CHECK-LABEL: test_mm512_broadcastmw_epi32
@@ -148,3 +166,5 @@ __m512i test_mm512_broadcastmw_epi32(__m512i a, __m512i b) {
// CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}}
return _mm512_broadcastmw_epi32(_mm512_cmpeq_epi32_mask ( a, b));
}
+TEST_CONSTEXPR(match_v16si(_mm512_broadcastmw_epi32((__mmask16)(0xff)), 0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff));
+TEST_CONSTEXPR(match_v16si(_mm512_broadcastmw_epi32((__mmask16)(0x0FA1L)), 0x0FA1L,0x0FA1L,0x0FA1L,0x0FA1L, 0x0FA1L,0x0FA1L,0x0FA1L,0x0FA1L, 0x0FA1L,0x0FA1L,0x0FA1L,0x0FA1L, 0x0FA1L,0x0FA1L,0x0FA1L,0x0FA1L));
diff --git a/clang/test/CodeGen/X86/avx512dq-builtins.c b/clang/test/CodeGen/X86/avx512dq-builtins.c
index 4112561..1b09959 100644
--- a/clang/test/CodeGen/X86/avx512dq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512dq-builtins.c
@@ -1305,6 +1305,7 @@ __m512 test_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, float const* _
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_broadcast_f32x8(__O, __M, _mm256_loadu_ps(__A));
}
+TEST_CONSTEXPR(match_m512(_mm512_mask_broadcast_f32x8(_mm512_setzero_ps(), 0xAAAA, (__m256)(__v8sf){5.0f,5.0f,5.0f,5.0f,5.0f,5.0f,5.0f,5.0f}), 0,5,0,5,0,5,0,5,0,5,0,5,0,5,0,5));
__m512 test_mm512_maskz_broadcast_f32x8(__mmask16 __M, float const* __A) {
// CHECK-LABEL: test_mm512_maskz_broadcast_f32x8
@@ -1312,6 +1313,7 @@ __m512 test_mm512_maskz_broadcast_f32x8(__mmask16 __M, float const* __A) {
// CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_broadcast_f32x8(__M, _mm256_loadu_ps(__A));
}
+TEST_CONSTEXPR(match_m512(_mm512_maskz_broadcast_f32x8(0xAAAA, _mm256_set1_ps(7.0f)), 0,7,0,7,0,7,0,7,0,7,0,7,0,7,0,7));
__m512d test_mm512_broadcast_f64x2(double const* __A) {
// CHECK-LABEL: test_mm512_broadcast_f64x2
@@ -1327,6 +1329,8 @@ __m512d test_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, double const*
return _mm512_mask_broadcast_f64x2(__O, __M, _mm_loadu_pd(__A));
}
+TEST_CONSTEXPR(match_m512d(_mm512_mask_broadcast_f64x2(_mm512_setzero_pd(), 0xAA, (__m128d)(__v2df){1,2}), 0,2,0,2,0,2,0,2));
+
__m512d test_mm512_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
// CHECK-LABEL: test_mm512_maskz_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -1334,6 +1338,8 @@ __m512d test_mm512_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
return _mm512_maskz_broadcast_f64x2(__M, _mm_loadu_pd(__A));
}
+TEST_CONSTEXPR(match_m512d(_mm512_maskz_broadcast_f64x2(0xAA, (__m128d)(__v2df){1,2}), 0,2,0,2,0,2,0,2));
+
__m512i test_mm512_broadcast_i32x2(__m128i __A) {
// CHECK-LABEL: test_mm512_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -1348,6 +1354,8 @@ __m512i test_mm512_mask_broadcast_i32x2(__m512i __O, __mmask16 __M, __m128i __A)
return _mm512_mask_broadcast_i32x2(__O, __M, __A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_broadcast_i32x2(_mm512_setzero_si512(), 0xAAAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1));
+
__m512i test_mm512_maskz_broadcast_i32x2(__mmask16 __M, __m128i __A) {
// CHECK-LABEL: test_mm512_maskz_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -1355,6 +1363,8 @@ __m512i test_mm512_maskz_broadcast_i32x2(__mmask16 __M, __m128i __A) {
return _mm512_maskz_broadcast_i32x2(__M, __A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_broadcast_i32x2(0xAAAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1));
+
__m512i test_mm512_broadcast_i32x8(__m256i const* __A) {
// CHECK-LABEL: test_mm512_broadcast_i32x8
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1368,6 +1378,7 @@ __m512i test_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i cons
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_broadcast_i32x8(__O, __M, _mm256_loadu_si256(__A));
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_broadcast_i32x8(_mm512_setzero_si512(), 0xAAAA, _mm256_set1_epi32(8)), 0,8,0,8,0,8,0,8,0,8,0,8,0,8,0,8));
__m512i test_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i const* __A) {
// CHECK-LABEL: test_mm512_maskz_broadcast_i32x8
@@ -1376,6 +1387,8 @@ __m512i test_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i const* __A) {
return _mm512_maskz_broadcast_i32x8(__M, _mm256_loadu_si256(__A));
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_broadcast_i32x8(0xAAAA, _mm256_set1_epi32(9)), 0,9,0,9,0,9,0,9,0,9,0,9,0,9,0,9));
+
__m512i test_mm512_broadcast_i64x2(__m128i const* __A) {
// CHECK-LABEL: test_mm512_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c
index 7756f0d..3deaf8e 100644
--- a/clang/test/CodeGen/X86/avx512f-builtins.c
+++ b/clang/test/CodeGen/X86/avx512f-builtins.c
@@ -6836,6 +6836,8 @@ __m512 test_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, float const* _
return _mm512_mask_broadcast_f32x4(__O, __M, _mm_loadu_ps(__A));
}
+TEST_CONSTEXPR(match_m512(_mm512_mask_broadcast_f32x4(_mm512_setzero_ps(), 0xAAAA, (__m128)(__v4sf){1,2,3,4}), 0,2,0,4,0,2,0,4,0,2,0,4,0,2,0,4));
+
__m512 test_mm512_maskz_broadcast_f32x4(__mmask16 __M, float const* __A) {
// CHECK-LABEL: test_mm512_maskz_broadcast_f32x4
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -6843,6 +6845,8 @@ __m512 test_mm512_maskz_broadcast_f32x4(__mmask16 __M, float const* __A) {
return _mm512_maskz_broadcast_f32x4(__M, _mm_loadu_ps(__A));
}
+TEST_CONSTEXPR(match_m512(_mm512_maskz_broadcast_f32x4(0xAAAA, (__m128)(__v4sf){1,2,3,4}), 0,2,0,4,0,2,0,4,0,2,0,4,0,2,0,4));
+
__m512d test_mm512_broadcast_f64x4(double const* __A) {
// CHECK-LABEL: test_mm512_broadcast_f64x4
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -6885,6 +6889,8 @@ __m512i test_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i const* __A) {
return _mm512_maskz_broadcast_i32x4(__M, _mm_loadu_si128(__A));
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_broadcast_i32x4(0xAAAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,3,0,1,0,3,0,1,0,3,0,1,0,3));
+
__m512i test_mm512_broadcast_i64x4(__m256i const* __A) {
// CHECK-LABEL: test_mm512_broadcast_i64x4
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -10903,6 +10909,8 @@ __m512i test_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
return _mm512_mask_set1_epi32 ( __O, __M, __A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_mask_set1_epi32(_mm512_setzero_si512(), 0xAAAA, 13), 0,13,0,13,0,13,0,13,0,13,0,13,0,13,0,13));
+
__m512i test_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
{
// CHECK-LABEL: test_mm512_maskz_set1_epi32
@@ -10926,6 +10934,8 @@ __m512i test_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
return _mm512_maskz_set1_epi32(__M, __A);
}
+TEST_CONSTEXPR(match_v16si(_mm512_maskz_set1_epi32(0xAAAA, 19), 0,19,0,19,0,19,0,19,0,19,0,19,0,19,0,19));
+
__m512i test_mm512_set_epi8(char e63, char e62, char e61, char e60, char e59,
char e58, char e57, char e56, char e55, char e54, char e53, char e52,
@@ -11145,6 +11155,8 @@ __m512i test_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
return _mm512_mask_set1_epi64 (__O, __M, __A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_set1_epi64(_mm512_setzero_si512(), 0xAA, 21), 0,21,0,21,0,21,0,21));
+
__m512i test_mm512_maskz_set1_epi64 (__mmask8 __M, long long __A)
{
// CHECK-LABEL: test_mm512_maskz_set1_epi64
@@ -11160,6 +11172,8 @@ __m512i test_mm512_maskz_set1_epi64 (__mmask8 __M, long long __A)
return _mm512_maskz_set1_epi64 (__M, __A);
}
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_set1_epi64(0xAA, 23), 0, 23, 0, 23, 0, 23, 0, 23));
+
__m512i test_mm512_set_epi64 (long long __A, long long __B, long long __C,
long long __D, long long __E, long long __F,
diff --git a/clang/test/CodeGen/X86/avx512ifma-builtins.c b/clang/test/CodeGen/X86/avx512ifma-builtins.c
index eebefb0..f90697e 100644
--- a/clang/test/CodeGen/X86/avx512ifma-builtins.c
+++ b/clang/test/CodeGen/X86/avx512ifma-builtins.c
@@ -8,45 +8,230 @@
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
-
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m512i test_mm512_madd52hi_epu64(__m512i __X, __m512i __Y, __m512i __Z) {
// CHECK-LABEL: test_mm512_madd52hi_epu64
// CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52h.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
- return _mm512_madd52hi_epu64(__X, __Y, __Z);
+ return _mm512_madd52hi_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v8di(_mm512_madd52hi_epu64(
+ (__m512i)(__v8du){100, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){10, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){5, 0, 0, 0, 0, 0, 0, 0}),
+ 100, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52hi_epu64(
+ (__m512i)(__v8du){0, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){0xFFFFFFFFFFFFFull, 0, 0, 0,
+ 0, 0, 0, 0},
+ (__m512i)(__v8du){0xFFFFFFFFFFFFFull, 0, 0, 0,
+ 0, 0, 0, 0}),
+ 0xFFFFFFFFFFFFEull, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52hi_epu64(
+ (__m512i)(__v8du){1, 2, 3, 4, 5, 6, 7, 8},
+ (__m512i)(__v8du){0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull},
+ (__m512i)(__v8du){0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull}),
+ 4503599627370495ull, 4503599627370496ull,
+ 4503599627370497ull, 4503599627370498ull,
+ 4503599627370499ull, 4503599627370500ull,
+ 4503599627370501ull, 4503599627370502ull));
+
__m512i test_mm512_mask_madd52hi_epu64(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) {
// CHECK-LABEL: test_mm512_mask_madd52hi_epu64
// CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52h.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
- return _mm512_mask_madd52hi_epu64(__W, __M, __X, __Y);
+ return _mm512_mask_madd52hi_epu64(__W, __M, __X, __Y);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_madd52hi_epu64(
+ (__m512i)(__v8du){111, 222, 333, 444, 555, 666,
+ 777, 888},
+ 0x00,
+ (__m512i)(__v8du){1, 2, 3, 4, 5, 6, 7, 8},
+ (__m512i)(__v8du){10, 20, 30, 40, 50, 60, 70,
+ 80}),
+ 111, 222, 333, 444, 555, 666, 777, 888));
+
+TEST_CONSTEXPR(match_v8di(_mm512_mask_madd52hi_epu64(
+ (__m512i)(__v8du){10, 20, 30, 40, 50, 60, 70,
+ 80},
+ 0xFF,
+ (__m512i)(__v8du){100, 200, 300, 400, 500, 600,
+ 700, 800},
+ (__m512i)(__v8du){10, 20, 30, 40, 50, 60, 70,
+ 80}),
+ 10, 20, 30, 40, 50, 60, 70, 80));
+
__m512i test_mm512_maskz_madd52hi_epu64(__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) {
// CHECK-LABEL: test_mm512_maskz_madd52hi_epu64
// CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52h.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
- return _mm512_maskz_madd52hi_epu64(__M, __X, __Y, __Z);
+ return _mm512_maskz_madd52hi_epu64(__M, __X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_madd52hi_epu64(
+ 0x00,
+ (__m512i)(__v8du){1, 2, 3, 4, 5, 6, 7, 8},
+ (__m512i)(__v8du){10, 20, 30, 40, 50, 60, 70,
+ 80},
+ (__m512i)(__v8du){100, 200, 300, 400, 500, 600,
+ 700, 800}),
+ 0, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_madd52hi_epu64(
+ 0xFF,
+ (__m512i)(__v8du){1, 2, 3, 4, 5, 6, 7, 8},
+ (__m512i)(__v8du){10, 20, 30, 40, 50, 60, 70,
+ 80},
+ (__m512i)(__v8du){100, 200, 300, 400, 500, 600,
+ 700, 800}),
+ 1, 2, 3, 4, 5, 6, 7, 8));
+
__m512i test_mm512_madd52lo_epu64(__m512i __X, __m512i __Y, __m512i __Z) {
// CHECK-LABEL: test_mm512_madd52lo_epu64
// CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52l.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
- return _mm512_madd52lo_epu64(__X, __Y, __Z);
+ return _mm512_madd52lo_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v8di(_mm512_madd52lo_epu64(
+ (__m512i)(__v8du){0, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){10, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){5, 0, 0, 0, 0, 0, 0, 0}),
+ 50, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52lo_epu64(
+ (__m512i)(__v8du){100, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){20, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){30, 0, 0, 0, 0, 0, 0, 0}),
+ 700, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52lo_epu64(
+ (__m512i)(__v8du){0, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){0xFFFFFFFFFFFFFull, 0, 0, 0,
+ 0, 0, 0, 0},
+ (__m512i)(__v8du){1, 0, 0, 0, 0, 0, 0, 0}),
+ 0xFFFFFFFFFFFFFull, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52lo_epu64(
+ (__m512i)(__v8du){0, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){0x1F000000000000ull, 0, 0, 0,
+ 0, 0, 0, 0},
+ (__m512i)(__v8du){2, 0, 0, 0, 0, 0, 0, 0}),
+ 0xE000000000000ull, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52lo_epu64(
+ (__m512i)(__v8du){1, 2, 3, 4, 5, 6, 7, 8},
+ (__m512i)(__v8du){10, 20, 30, 40, 50, 60, 70,
+ 80},
+ (__m512i)(__v8du){2, 3, 4, 5, 6, 7, 8, 9}),
+ 21, 62, 123, 204, 305, 426, 567, 728));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52lo_epu64(
+ (__m512i)(__v8du){0xFFFFFFFFFFFFFull, 0, 0, 0,
+ 0, 0, 0, 0},
+ (__m512i)(__v8du){10, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){5, 0, 0, 0, 0, 0, 0, 0}),
+ 4503599627370545ull, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52lo_epu64(
+ (__m512i)(__v8du){10, 20, 30, 40, 50, 60, 70,
+ 80},
+ (__m512i)(__v8du){100, 200, 300, 400, 500, 600,
+ 700, 800},
+ (__m512i)(__v8du){2, 3, 4, 5, 6, 7, 8, 9}),
+ 210, 620, 1230, 2040, 3050, 4260, 5670, 7280));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52lo_epu64(
+ (__m512i)(__v8du){0, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){0x1F000000000000ull,
+ 0x1F000000000000ull, 0, 0, 0,
+ 0, 0, 0},
+ (__m512i)(__v8du){2, 3, 0, 0, 0, 0, 0, 0}),
+ 0xE000000000000ull, 0xD000000000000ull, 0, 0, 0, 0,
+ 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_madd52lo_epu64(
+ (__m512i)(__v8du){0, 0, 0, 0, 0, 0, 0, 0},
+ (__m512i)(__v8du){0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull},
+ (__m512i)(__v8du){1, 1, 1, 1, 1, 1, 1, 1}),
+ 0xFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFull));
+
__m512i test_mm512_mask_madd52lo_epu64(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) {
// CHECK-LABEL: test_mm512_mask_madd52lo_epu64
// CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52l.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
- return _mm512_mask_madd52lo_epu64(__W, __M, __X, __Y);
+ return _mm512_mask_madd52lo_epu64(__W, __M, __X, __Y);
}
+TEST_CONSTEXPR(match_v8di(_mm512_mask_madd52lo_epu64(
+ (__m512i)(__v8du){111, 222, 333, 444, 555, 666,
+ 777, 888},
+ 0x00,
+ (__m512i)(__v8du){1, 2, 3, 4, 5, 6, 7, 8},
+ (__m512i)(__v8du){10, 20, 30, 40, 50, 60, 70,
+ 80}),
+ 111, 222, 333, 444, 555, 666, 777, 888));
+
+TEST_CONSTEXPR(match_v8di(_mm512_mask_madd52lo_epu64(
+ (__m512i)(__v8du){1000, 2000, 3000, 4000, 5000,
+ 6000, 7000, 8000},
+ 0xFF,
+ (__m512i)(__v8du){100, 200, 300, 400, 500, 600,
+ 700, 800},
+ (__m512i)(__v8du){20, 30, 40, 50, 60, 70, 80,
+ 90}),
+ 3000, 8000, 15000, 24000, 35000, 48000, 63000,
+ 80000));
+
__m512i test_mm512_maskz_madd52lo_epu64(__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) {
// CHECK-LABEL: test_mm512_maskz_madd52lo_epu64
// CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.vpmadd52l.uq.512(<8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
- return _mm512_maskz_madd52lo_epu64(__M, __X, __Y, __Z);
+ return _mm512_maskz_madd52lo_epu64(__M, __X, __Y, __Z);
}
+
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_madd52lo_epu64(
+ 0x00,
+ (__m512i)(__v8du){1, 2, 3, 4, 5, 6, 7, 8},
+ (__m512i)(__v8du){10, 20, 30, 40, 50, 60, 70,
+ 80},
+ (__m512i)(__v8du){2, 3, 4, 5, 6, 7, 8, 9}),
+ 0, 0, 0, 0, 0, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v8di(_mm512_maskz_madd52lo_epu64(
+ 0xFF,
+ (__m512i)(__v8du){100, 200, 300, 400, 500, 600,
+ 700, 800},
+ (__m512i)(__v8du){20, 30, 40, 50, 60, 70, 80,
+ 90},
+ (__m512i)(__v8du){30, 40, 50, 60, 70, 80, 90,
+ 100}),
+ 700, 1400, 2300, 3400, 4700, 6200, 7900, 9800));
diff --git a/clang/test/CodeGen/X86/avx512ifmavl-builtins.c b/clang/test/CodeGen/X86/avx512ifmavl-builtins.c
index 89108fc..1cbb580 100644
--- a/clang/test/CodeGen/X86/avx512ifmavl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512ifmavl-builtins.c
@@ -8,85 +8,241 @@
// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=i386-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
-
#include <immintrin.h>
+#include "builtin_test_helpers.h"
__m128i test_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
// CHECK-LABEL: test_mm_madd52hi_epu64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
- return _mm_madd52hi_epu64(__X, __Y, __Z);
+ return _mm_madd52hi_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v2di(_mm_madd52hi_epu64(
+ (__m128i)((__v2du){100, 0}),
+ (__m128i)((__v2du){10, 0}),
+ (__m128i)((__v2du){5, 0})),
+ 100, 0));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52hi_epu64(
+ (__m128i)((__v2du){0, 0}),
+ (__m128i)((__v2du){0xFFFFFFFFFFFFFull, 0}),
+ (__m128i)((__v2du){0xFFFFFFFFFFFFFull, 0})),
+ 0xFFFFFFFFFFFFEull, 0));
+
__m128i test_mm_mask_madd52hi_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) {
// CHECK-LABEL: test_mm_mask_madd52hi_epu64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
- return _mm_mask_madd52hi_epu64(__W, __M, __X, __Y);
+ return _mm_mask_madd52hi_epu64(__W, __M, __X, __Y);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_madd52hi_epu64((__m128i)((__v2du){111, 222}),
+ 0x0,
+ (__m128i)((__v2du){1, 2}),
+ (__m128i)((__v2du){10, 20})),
+ 111, 222));
+
+TEST_CONSTEXPR(match_v2di(_mm_mask_madd52hi_epu64((__m128i)((__v2du){10, 20}),
+ 0x2,
+ (__m128i)((__v2du){0x1000000000000ULL, 0x1000000000000ULL}),
+ (__m128i)((__v2du){0x1000000000000ULL, 0x1000000000000ULL})),
+ 10, 0x100000000014ULL));
+
__m128i test_mm_maskz_madd52hi_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) {
// CHECK-LABEL: test_mm_maskz_madd52hi_epu64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
- return _mm_maskz_madd52hi_epu64(__M, __X, __Y, __Z);
+ return _mm_maskz_madd52hi_epu64(__M, __X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v2di(_mm_maskz_madd52hi_epu64(0x3,
+ (__m128i)((__v2du){1, 2}),
+ (__m128i)((__v2du){10, 20}),
+ (__m128i)((__v2du){100, 200})),
+ 1, 2));
+
+TEST_CONSTEXPR(match_v2di(_mm_maskz_madd52hi_epu64(0x1,
+ (__m128i)((__v2du){0x1000000000000ULL, 0x1000000000000ULL}),
+ (__m128i)((__v2du){0x1000000000000ULL, 0x1000000000000ULL}),
+ (__m128i)((__v2du){0, 0})),
+ 0x1000000000000ULL, 0));
+
__m256i test_mm256_madd52hi_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
// CHECK-LABEL: test_mm256_madd52hi_epu64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
- return _mm256_madd52hi_epu64(__X, __Y, __Z);
+ return _mm256_madd52hi_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v4di(_mm256_madd52hi_epu64(
+ (__m256i)((__v4du){100, 200, 300, 400}),
+ (__m256i)((__v4du){10, 20, 30, 40}),
+ (__m256i)((__v4du){5, 6, 7, 8})),
+ 100, 200, 300, 400));
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52hi_epu64(
+ (__m256i)((__v4du){0, 0, 0, 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull, 0, 0,
+ 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull, 0, 0,
+ 0})),
+ 0xFFFFFFFFFFFFEull, 0, 0, 0));
+
__m256i test_mm256_mask_madd52hi_epu64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
// CHECK-LABEL: test_mm256_mask_madd52hi_epu64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
- return _mm256_mask_madd52hi_epu64(__W, __M, __X, __Y);
+ return _mm256_mask_madd52hi_epu64(__W, __M, __X, __Y);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_madd52hi_epu64((__m256i)((__v4du){111, 222, 333, 444}),
+ 0x0,
+ (__m256i)((__v4du){1, 2, 3, 4}),
+ (__m256i)((__v4du){10, 20, 30, 40})),
+ 111, 222, 333, 444));
+
+TEST_CONSTEXPR(match_v4di(_mm256_mask_madd52hi_epu64((__m256i)((__v4du){10, 20, 30, 40}),
+ 0xA,
+ (__m256i)((__v4du){0x1000000000000ULL, 0x1000000000000ULL,
+ 0x1000000000000ULL, 0x1000000000000ULL}),
+ (__m256i)((__v4du){0x1000000000000ULL, 0x1000000000000ULL,
+ 0x1000000000000ULL, 0x1000000000000ULL})),
+ 10, 0x100000000014ULL, 30, 0x100000000028ULL));
+
__m256i test_mm256_maskz_madd52hi_epu64(__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) {
// CHECK-LABEL: test_mm256_maskz_madd52hi_epu64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
- return _mm256_maskz_madd52hi_epu64(__M, __X, __Y, __Z);
+ return _mm256_maskz_madd52hi_epu64(__M, __X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_madd52hi_epu64(0xF,
+ (__m256i)((__v4du){1, 2, 3, 4}),
+ (__m256i)((__v4du){10, 20, 30, 40}),
+ (__m256i)((__v4du){100, 200, 300, 400})),
+ 1, 2, 3, 4));
+
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_madd52hi_epu64(0x5,
+ (__m256i)((__v4du){0x1000000000000ULL, 0x1000000000000ULL,
+ 0x1000000000000ULL, 0x1000000000000ULL}),
+ (__m256i)((__v4du){0x1000000000000ULL, 0x1000000000000ULL,
+ 0x1000000000000ULL, 0x1000000000000ULL}),
+ (__m256i)((__v4du){0, 0, 0, 0})),
+ 0x1000000000000ULL, 0, 0x1000000000000ULL, 0));
+
__m128i test_mm_madd52lo_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
// CHECK-LABEL: test_mm_madd52lo_epu64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
- return _mm_madd52lo_epu64(__X, __Y, __Z);
+ return _mm_madd52lo_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_epu64(
+ (__m128i)((__v2du){0, 0}),
+ (__m128i)((__v2du){10, 0}),
+ (__m128i)((__v2du){5, 0})),
+ 50, 0));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_epu64(
+ (__m128i)((__v2du){100, 0}),
+ (__m128i)((__v2du){20, 0}),
+ (__m128i)((__v2du){30, 0})),
+ 700, 0));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_epu64(
+ (__m128i)((__v2du){1, 2}),
+ (__m128i)((__v2du){10, 20}),
+ (__m128i)((__v2du){2, 3})),
+ 21, 62));
+
__m128i test_mm_mask_madd52lo_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) {
// CHECK-LABEL: test_mm_mask_madd52lo_epu64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
- return _mm_mask_madd52lo_epu64(__W, __M, __X, __Y);
+ return _mm_mask_madd52lo_epu64(__W, __M, __X, __Y);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_madd52lo_epu64((__m128i)((__v2du){1000, 2000}),
+ 0x3,
+ (__m128i)((__v2du){100, 200}),
+ (__m128i)((__v2du){20, 30})),
+ 3000, 8000));
+
+TEST_CONSTEXPR(match_v2di(_mm_mask_madd52lo_epu64((__m128i)((__v2du){111, 222}),
+ 0x0,
+ (__m128i)((__v2du){1, 2}),
+ (__m128i)((__v2du){10, 20})),
+ 111, 222));
+
__m128i test_mm_maskz_madd52lo_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) {
// CHECK-LABEL: test_mm_maskz_madd52lo_epu64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
- return _mm_maskz_madd52lo_epu64(__M, __X, __Y, __Z);
+ return _mm_maskz_madd52lo_epu64(__M, __X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v2di(_mm_maskz_madd52lo_epu64(0x3,
+ (__m128i)((__v2du){100, 200}),
+ (__m128i)((__v2du){20, 30}),
+ (__m128i)((__v2du){30, 40})),
+ 700, 1400));
+
+TEST_CONSTEXPR(match_v2di(_mm_maskz_madd52lo_epu64(0x1,
+ (__m128i)((__v2du){100, 0}),
+ (__m128i)((__v2du){20, 0}),
+ (__m128i)((__v2du){30, 0})),
+ 700, 0));
+
__m256i test_mm256_madd52lo_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
// CHECK-LABEL: test_mm256_madd52lo_epu64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
- return _mm256_madd52lo_epu64(__X, __Y, __Z);
+ return _mm256_madd52lo_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v4di(_mm256_madd52lo_epu64(
+ (__m256i)((__v4du){1, 2, 3, 4}),
+ (__m256i)((__v4du){10, 20, 30, 40}),
+ (__m256i)((__v4du){2, 3, 4, 5})),
+ 21, 62, 123, 204));
+
__m256i test_mm256_mask_madd52lo_epu64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
// CHECK-LABEL: test_mm256_mask_madd52lo_epu64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
- return _mm256_mask_madd52lo_epu64(__W, __M, __X, __Y);
+ return _mm256_mask_madd52lo_epu64(__W, __M, __X, __Y);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_madd52lo_epu64((__m256i)((__v4du){1000, 2000, 3000, 4000}),
+ 0xF,
+ (__m256i)((__v4du){100, 200, 300, 400}),
+ (__m256i)((__v4du){20, 30, 40, 50})),
+ 3000, 8000, 15000, 24000));
+
+TEST_CONSTEXPR(match_v4di(_mm256_mask_madd52lo_epu64((__m256i)((__v4du){111, 222, 333, 444}),
+ 0x0,
+ (__m256i)((__v4du){1, 2, 3, 4}),
+ (__m256i)((__v4du){10, 20, 30, 40})),
+ 111, 222, 333, 444));
+
+TEST_CONSTEXPR(match_v4di(_mm256_mask_madd52lo_epu64((__m256i)((__v4du){11, 22, 33, 44}),
+ 0x5,
+ (__m256i)((__v4du){100, 200, 300, 400}),
+ (__m256i)((__v4du){10, 20, 30, 40})),
+ 1011, 22, 9033, 44));
+
__m256i test_mm256_maskz_madd52lo_epu64(__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) {
// CHECK-LABEL: test_mm256_maskz_madd52lo_epu64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
- return _mm256_maskz_madd52lo_epu64(__M, __X, __Y, __Z);
+ return _mm256_maskz_madd52lo_epu64(__M, __X, __Y, __Z);
}
+
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_madd52lo_epu64(0xF,
+ (__m256i)((__v4du){100, 200, 300, 400}),
+ (__m256i)((__v4du){20, 30, 40, 50}),
+ (__m256i)((__v4du){30, 40, 50, 60})),
+ 700, 1400, 2300, 3400));
+
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_madd52lo_epu64(0x9,
+ (__m256i)((__v4du){100, 200, 300, 400}),
+ (__m256i)((__v4du){10, 20, 30, 40}),
+ (__m256i)((__v4du){5, 10, 15, 20})),
+ 150, 0, 0, 1200));
diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c
index 51385d5..9b6bfea9 100644
--- a/clang/test/CodeGen/X86/avx512vl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vl-builtins.c
@@ -7201,6 +7201,8 @@ __m128i test_mm_mask_set1_epi32(__m128i __O, __mmask8 __M) {
return _mm_mask_set1_epi32(__O, __M, 5);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_set1_epi32(_mm_setzero_si128(), 0xF, 7), 7, 7, 7, 7));
+
__m128i test_mm_maskz_set1_epi32(__mmask8 __M) {
// CHECK-LABEL: test_mm_maskz_set1_epi32
// CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i32 0
@@ -7212,6 +7214,8 @@ __m128i test_mm_maskz_set1_epi32(__mmask8 __M) {
return _mm_maskz_set1_epi32(__M, 5);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_set1_epi32(0xA, 11), 0, 11, 0, 11));
+
__m256i test_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M) {
// CHECK-LABEL: test_mm256_mask_set1_epi32
// CHECK: insertelement <8 x i32> poison, i32 %{{.*}}, i32 0
@@ -7226,6 +7230,8 @@ __m256i test_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M) {
return _mm256_mask_set1_epi32(__O, __M, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_set1_epi32(_mm256_setzero_si256(), 0xAA, 5), 0, 5, 0, 5, 0, 5, 0, 5));
+
__m256i test_mm256_maskz_set1_epi32(__mmask8 __M) {
// CHECK-LABEL: test_mm256_maskz_set1_epi32
// CHECK: insertelement <8 x i32> poison, i32 %{{.*}}, i32 0
@@ -7240,6 +7246,8 @@ __m256i test_mm256_maskz_set1_epi32(__mmask8 __M) {
return _mm256_maskz_set1_epi32(__M, 5);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_set1_epi32(0xAA, 13), 0, 13, 0, 13, 0, 13, 0, 13));
+
__m128i test_mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) {
// CHECK-LABEL: test_mm_mask_set1_epi64
// CHECK: insertelement <2 x i64> poison, i64 %{{.*}}, i32 0
@@ -7249,6 +7257,8 @@ __m128i test_mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) {
return _mm_mask_set1_epi64(__O, __M, __A);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_set1_epi64(_mm_setzero_si128(), 0x3, 9), 9, 9));
+
__m128i test_mm_maskz_set1_epi64(__mmask8 __M, long long __A) {
// CHECK-LABEL: test_mm_maskz_set1_epi64
// CHECK: insertelement <2 x i64> poison, i64 %{{.*}}, i32 0
@@ -7258,6 +7268,8 @@ __m128i test_mm_maskz_set1_epi64(__mmask8 __M, long long __A) {
return _mm_maskz_set1_epi64(__M, __A);
}
+TEST_CONSTEXPR(match_v2di(_mm_maskz_set1_epi64(0x2, 15), 0, 15));
+
__m256i test_mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) {
// CHECK-LABEL: test_mm256_mask_set1_epi64
// CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i32 0
@@ -7269,6 +7281,8 @@ __m256i test_mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) {
return _mm256_mask_set1_epi64(__O, __M, __A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_set1_epi64(_mm256_setzero_si256(), 0xF, 11), 11, 11, 11, 11));
+
__m256i test_mm256_maskz_set1_epi64(__mmask8 __M, long long __A) {
// CHECK-LABEL: test_mm256_maskz_set1_epi64
// CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i32 0
@@ -7280,6 +7294,8 @@ __m256i test_mm256_maskz_set1_epi64(__mmask8 __M, long long __A) {
return _mm256_maskz_set1_epi64(__M, __A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_set1_epi64(0xA, 17), 0, 17, 0, 17));
+
__m128d test_mm_fixupimm_pd(__m128d __A, __m128d __B, __m128i __C) {
// CHECK-LABEL: test_mm_fixupimm_pd
// CHECK: @llvm.x86.avx512.mask.fixupimm.pd.128
@@ -7623,6 +7639,8 @@ __m128d test_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d
return _mm_mask_unpackhi_pd(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_m128d(_mm_mask_unpackhi_pd(_mm_setzero_pd(), 0x3, (__m128d)(__v2df){1.0,2.0}, (__m128d)(__v2df){3.0,4.0}), 2.0,4.0));
+
__m128d test_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B) {
// CHECK-LABEL: test_mm_maskz_unpackhi_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
@@ -7637,6 +7655,8 @@ __m256d test_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m2
return _mm256_mask_unpackhi_pd(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_mask_unpackhi_pd(_mm256_setzero_pd(), 0xAA, (__m256d)(__v4df){1.0,2.0,3.0,4.0}, (__m256d)(__v4df){5.0,6.0,7.0,8.0}), 0,6.0,0,8.0));
+
__m256d test_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B) {
// CHECK-LABEL: test_mm256_maskz_unpackhi_pd
// CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -7679,6 +7699,8 @@ __m128d test_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d
return _mm_mask_unpacklo_pd(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_m128d(_mm_mask_unpacklo_pd(_mm_setzero_pd(), 0x3, (__m128d)(__v2df){1.0,2.0}, (__m128d)(__v2df){3.0,4.0}), 1.0,3.0));
+
__m128d test_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) {
// CHECK-LABEL: test_mm_maskz_unpacklo_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
@@ -7686,6 +7708,8 @@ __m128d test_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) {
return _mm_maskz_unpacklo_pd(__U, __A, __B);
}
+TEST_CONSTEXPR(match_m128d(_mm_maskz_unpacklo_pd(0x2, (__m128d)(__v2df){1.0,2.0}, (__m128d)(__v2df){3.0,4.0}), 0.0,3.0));
+
__m256d test_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
// CHECK-LABEL: test_mm256_mask_unpacklo_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -7693,6 +7717,8 @@ __m256d test_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m2
return _mm256_mask_unpacklo_pd(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_mask_unpacklo_pd(_mm256_setzero_pd(), 0xAA, (__m256d)(__v4df){1.0,2.0,3.0,4.0}, (__m256d)(__v4df){5.0,6.0,7.0,8.0}), 0,5.0,0,7.0));
+
__m256d test_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) {
// CHECK-LABEL: test_mm256_maskz_unpacklo_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -7700,6 +7726,8 @@ __m256d test_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return _mm256_maskz_unpacklo_pd(__U, __A, __B);
}
+TEST_CONSTEXPR(match_m256d(_mm256_maskz_unpacklo_pd(0x0A, (__m256d)(__v4df){1.0,2.0,3.0,4.0}, (__m256d)(__v4df){5.0,6.0,7.0,8.0}), 0.0,5.0,0.0,7.0));
+
__m128 test_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
// CHECK-LABEL: test_mm_mask_unpacklo_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -8039,6 +8067,8 @@ __m128i test_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m1
return _mm_mask_unpackhi_epi32(__W, __U, __A, __B);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_unpackhi_epi32(_mm_setzero_si128(), 0xA, (__m128i)(__v4si){0,1,2,3}, (__m128i)(__v4si){4,5,6,7}), 0,6,0,7));
+
__m128i test_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_maskz_unpackhi_epi32
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -8046,6 +8076,8 @@ __m128i test_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) {
return _mm_maskz_unpackhi_epi32(__U, __A, __B);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_unpackhi_epi32(0x5, (__m128i)(__v4si){0,1,2,3}, (__m128i)(__v4si){4,5,6,7}), 2,0,3,0));
+
__m256i test_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: test_mm256_mask_unpackhi_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -8997,6 +9029,8 @@ __m256 test_mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A) {
return _mm256_maskz_broadcast_f32x4(__M, __A);
}
+TEST_CONSTEXPR(match_m256(_mm256_maskz_broadcast_f32x4(0xAA, (__m128)(__v4sf){0,1,2,3}), 0,1,0,3,0,1,0,3));
+
__m256i test_mm256_broadcast_i32x4(__m128i const* __A) {
// CHECK-LABEL: test_mm256_broadcast_i32x4
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -9018,6 +9052,8 @@ __m256i test_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i const* __A) {
return _mm256_maskz_broadcast_i32x4(__M, _mm_loadu_si128(__A));
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_broadcast_i32x4(0xAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,3,0,1,0,3));
+
__m256d test_mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A) {
// CHECK-LABEL: test_mm256_mask_broadcastsd_pd
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> zeroinitializer
@@ -10324,6 +10360,8 @@ __m128 test_mm_mask_movehdup_ps(__m128 __W, __mmask8 __U, __m128 __A) {
return _mm_mask_movehdup_ps(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m128(_mm_mask_movehdup_ps(_mm_setzero_ps(), 0xF, (__m128)(__v4sf){1.f,2.f,3.f,4.f}), 2.f,2.f,4.f,4.f));
+
__m128 test_mm_maskz_movehdup_ps(__mmask8 __U, __m128 __A) {
// CHECK-LABEL: test_mm_maskz_movehdup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -10338,6 +10376,8 @@ __m256 test_mm256_mask_movehdup_ps(__m256 __W, __mmask8 __U, __m256 __A) {
return _mm256_mask_movehdup_ps(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m256(_mm256_mask_movehdup_ps(_mm256_setzero_ps(), 0xAA, (__m256)(__v8sf){1,2,3,4,5,6,7,8}), 0,2,0,4,0,6,0,8));
+
__m256 test_mm256_maskz_movehdup_ps(__mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm256_maskz_movehdup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -10352,6 +10392,8 @@ __m128 test_mm_mask_moveldup_ps(__m128 __W, __mmask8 __U, __m128 __A) {
return _mm_mask_moveldup_ps(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m128(_mm_mask_moveldup_ps(_mm_setzero_ps(), 0xF, (__m128)(__v4sf){1.f,2.f,3.f,4.f}), 1.f,1.f,3.f,3.f));
+
__m128 test_mm_maskz_moveldup_ps(__mmask8 __U, __m128 __A) {
// CHECK-LABEL: test_mm_maskz_moveldup_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -10366,6 +10408,8 @@ __m256 test_mm256_mask_moveldup_ps(__m256 __W, __mmask8 __U, __m256 __A) {
return _mm256_mask_moveldup_ps(__W, __U, __A);
}
+TEST_CONSTEXPR(match_m256(_mm256_mask_moveldup_ps(_mm256_setzero_ps(), 0xAA, (__m256)(__v8sf){1,2,3,4,5,6,7,8}), 0,1,0,3,0,5,0,7));
+
__m256 test_mm256_maskz_moveldup_ps(__mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm256_maskz_moveldup_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index c0e46de..d569283 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -1688,24 +1688,37 @@ __m128i test_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m12
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_shuffle_epi8(__W,__U,__A,__B);
}
+
+TEST_CONSTEXPR(match_v16qi(_mm_mask_shuffle_epi8((__m128i)(__v16qi){1,1,1,1,1,1,1,1,2,2,4,4,6,6,8,8}, 0x00FF, (__m128i)(__v16qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}, (__m128i)(__v16qi){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 15,14,13,12,11,10,9,8,2,2,4,4,6,6,8,8));
+
__m128i test_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_maskz_shuffle_epi8
// CHECK: @llvm.x86.ssse3.pshuf.b
// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_shuffle_epi8(__U,__A,__B);
}
+
+TEST_CONSTEXPR(match_v16qi(_mm_maskz_shuffle_epi8(0xAAAA, (__m128i)(__v16qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}, (__m128i)(__v16qi){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 0,14,0,12,0,10,0,8,0,6,0,4,0,2,0,0));
+
__m256i test_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: test_mm256_mask_shuffle_epi8
// CHECK: @llvm.x86.avx2.pshuf.b
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_shuffle_epi8(__W,__U,__A,__B);
}
+
+TEST_CONSTEXPR(match_v32qi(_mm256_mask_shuffle_epi8((__m256i)(__v32qi){1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4}, 0x80808080, (__m256i)(__v32qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}, (__m256i)(__v32qi){31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 1,1,1,1,1,1,1,8,2,2,2,2,2,2,2,0,3,3,3,3,3,3,3,24,4,4,4,4,4,4,4,16));
+
+
__m256i test_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: test_mm256_maskz_shuffle_epi8
// CHECK: @llvm.x86.avx2.pshuf.b
// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_shuffle_epi8(__U,__A,__B);
}
+
+TEST_CONSTEXPR(match_v32qi(_mm256_maskz_shuffle_epi8(0x0000FFFF, (__m256i)(__v32qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}, (__m256i)(__v32qi){31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0));
+
__m128i test_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_mask_subs_epi8
// CHECK: @llvm.ssub.sat.v16i8
diff --git a/clang/test/CodeGen/X86/avx512vlcd-builtins.c b/clang/test/CodeGen/X86/avx512vlcd-builtins.c
index 1619305..29fc6fd 100644
--- a/clang/test/CodeGen/X86/avx512vlcd-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlcd-builtins.c
@@ -20,6 +20,7 @@ __m128i test_mm_broadcastmb_epi64(__m128i a,__m128i b) {
// CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 1
return _mm_broadcastmb_epi64(_mm_cmpeq_epi32_mask (a, b));
}
+TEST_CONSTEXPR(match_v2du(_mm_broadcastmb_epi64((__mmask8)(76)), 76, 76));
__m256i test_mm256_broadcastmb_epi64(__m256i a, __m256i b) {
// CHECK-LABEL: test_mm256_broadcastmb_epi64
@@ -32,6 +33,7 @@ __m256i test_mm256_broadcastmb_epi64(__m256i a, __m256i b) {
// CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 3
return _mm256_broadcastmb_epi64(_mm256_cmpeq_epi64_mask ( a, b));
}
+TEST_CONSTEXPR(match_v4di(_mm256_broadcastmb_epi64((__mmask8)(67)), 67, 67, 67, 67));
__m128i test_mm_broadcastmw_epi32(__m512i a, __m512i b) {
// CHECK-LABEL: test_mm_broadcastmw_epi32
@@ -43,6 +45,7 @@ __m128i test_mm_broadcastmw_epi32(__m512i a, __m512i b) {
// CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 3
return _mm_broadcastmw_epi32(_mm512_cmpeq_epi32_mask ( a, b));
}
+TEST_CONSTEXPR(match_v4su(_mm_broadcastmw_epi32((__mmask16)(0xbabe)), 0xbabe, 0xbabe, 0xbabe, 0xbabe));
__m256i test_mm256_broadcastmw_epi32(__m512i a, __m512i b) {
// CHECK-LABEL: test_mm256_broadcastmw_epi32
@@ -58,87 +61,119 @@ __m256i test_mm256_broadcastmw_epi32(__m512i a, __m512i b) {
// CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 7
return _mm256_broadcastmw_epi32(_mm512_cmpeq_epi32_mask ( a, b));
}
+TEST_CONSTEXPR(match_v8si(_mm256_broadcastmw_epi32((__mmask16)(0xcafe)), 0xcafe,0xcafe,0xcafe,0xcafe, 0xcafe,0xcafe,0xcafe,0xcafe));
__m128i test_mm_conflict_epi64(__m128i __A) {
// CHECK-LABEL: test_mm_conflict_epi64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %{{.*}})
- return _mm_conflict_epi64(__A);
+ return _mm_conflict_epi64(__A);
}
+TEST_CONSTEXPR(match_v2di(_mm_conflict_epi64((__m128i)(__v2di){1, 2}), 0, 0));
+TEST_CONSTEXPR(match_v2di(_mm_conflict_epi64((__m128i)(__v2di){5, 5}), 0, 1));
+
__m128i test_mm_mask_conflict_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_conflict_epi64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
- return _mm_mask_conflict_epi64(__W, __U, __A);
+ return _mm_mask_conflict_epi64(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v2di(_mm_mask_conflict_epi64((__m128i)(__v2di){0xFF, 0xFF}, 0x2, (__m128i)(__v2di){5, 5}), 0xFF, 1));
+
__m128i test_mm_maskz_conflict_epi64(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_maskz_conflict_epi64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %{{.*}})
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
- return _mm_maskz_conflict_epi64(__U, __A);
+ return _mm_maskz_conflict_epi64(__U, __A);
}
+TEST_CONSTEXPR(match_v2di(_mm_maskz_conflict_epi64(0x2, (__m128i)(__v2di){5, 5}), 0, 1));
+
__m256i test_mm256_conflict_epi64(__m256i __A) {
// CHECK-LABEL: test_mm256_conflict_epi64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %{{.*}})
- return _mm256_conflict_epi64(__A);
+ return _mm256_conflict_epi64(__A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_conflict_epi64((__m256i)(__v4di){1, 2, 1, 3}), 0, 0, 1, 0));
+TEST_CONSTEXPR(match_v4di(_mm256_conflict_epi64((__m256i)(__v4di){7, 7, 7, 7}), 0, 1, 3, 7));
+TEST_CONSTEXPR(match_v4di(_mm256_conflict_epi64((__m256i)(__v4di){1, 2, 3, 4}), 0, 0, 0, 0));
+
__m256i test_mm256_mask_conflict_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_conflict_epi64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
- return _mm256_mask_conflict_epi64(__W, __U, __A);
+ return _mm256_mask_conflict_epi64(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_mask_conflict_epi64((__m256i)(__v4di){0xFF, 0xFF, 0xFF, 0xFF}, 0x5, (__m256i)(__v4di){1, 2, 1, 3}), 0, 0xFF, 1, 0xFF));
+
__m256i test_mm256_maskz_conflict_epi64(__mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_maskz_conflict_epi64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
- return _mm256_maskz_conflict_epi64(__U, __A);
+ return _mm256_maskz_conflict_epi64(__U, __A);
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_conflict_epi64(0x5, (__m256i)(__v4di){1, 2, 1, 3}), 0, 0, 1, 0));
+
__m128i test_mm_conflict_epi32(__m128i __A) {
// CHECK-LABEL: test_mm_conflict_epi32
// CHECK: call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %{{.*}})
- return _mm_conflict_epi32(__A);
+ return _mm_conflict_epi32(__A);
}
+TEST_CONSTEXPR(match_v4si(_mm_conflict_epi32((__m128i)(__v4si){1, 2, 1, 3}), 0, 0, 1, 0));
+TEST_CONSTEXPR(match_v4si(_mm_conflict_epi32((__m128i)(__v4si){3, 3, 3, 3}), 0, 1, 3, 7));
+TEST_CONSTEXPR(match_v4si(_mm_conflict_epi32((__m128i)(__v4si){1, 2, 3, 4}), 0, 0, 0, 0));
+
__m128i test_mm_mask_conflict_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_conflict_epi32
// CHECK: call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
- return _mm_mask_conflict_epi32(__W, __U, __A);
+ return _mm_mask_conflict_epi32(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_conflict_epi32((__m128i)(__v4si){0xFF, 0xFF, 0xFF, 0xFF}, 0x5, (__m128i)(__v4si){1, 2, 1, 3}), 0, 0xFF, 1, 0xFF));
+
__m128i test_mm_maskz_conflict_epi32(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_maskz_conflict_epi32
// CHECK: call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %{{.*}})
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
- return _mm_maskz_conflict_epi32(__U, __A);
+ return _mm_maskz_conflict_epi32(__U, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_conflict_epi32(0x5, (__m128i)(__v4si){1, 2, 1, 3}), 0, 0, 1, 0));
+
__m256i test_mm256_conflict_epi32(__m256i __A) {
// CHECK-LABEL: test_mm256_conflict_epi32
// CHECK: call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %{{.*}})
- return _mm256_conflict_epi32(__A);
+ return _mm256_conflict_epi32(__A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_conflict_epi32((__m256i)(__v8si){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0, 1, 0, 2, 0, 5, 0));
+TEST_CONSTEXPR(match_v8si(_mm256_conflict_epi32((__m256i)(__v8si){4, 4, 4, 4, 4, 4, 4, 4}), 0, 1, 3, 7, 15, 31, 63, 127));
+TEST_CONSTEXPR(match_v8si(_mm256_conflict_epi32((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), 0, 0, 0, 0, 0, 0, 0, 0));
+
__m256i test_mm256_mask_conflict_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_conflict_epi32
// CHECK: call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
- return _mm256_mask_conflict_epi32(__W, __U, __A);
+ return _mm256_mask_conflict_epi32(__W, __U, __A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_conflict_epi32((__m256i)(__v8si){0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /*0101 0101=*/0x55, (__m256i)(__v8si){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0xFF, 1, 0xFF, 2, 0xFF, 5, 0xFF));
+
__m256i test_mm256_maskz_conflict_epi32(__mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_maskz_conflict_epi32
// CHECK: call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %{{.*}})
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
- return _mm256_maskz_conflict_epi32(__U, __A);
+ return _mm256_maskz_conflict_epi32(__U, __A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_conflict_epi32(0x55, (__m256i)(__v8si){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0, 1, 0, 2, 0, 5, 0));
+
__m128i test_mm_lzcnt_epi32(__m128i __A) {
// CHECK-LABEL: test_mm_lzcnt_epi32
// CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 true)
diff --git a/clang/test/CodeGen/X86/avx512vldq-builtins.c b/clang/test/CodeGen/X86/avx512vldq-builtins.c
index 9388457..4773b60 100644
--- a/clang/test/CodeGen/X86/avx512vldq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vldq-builtins.c
@@ -987,6 +987,8 @@ __m256 test_mm256_mask_broadcast_f32x2(__m256 __O, __mmask8 __M, __m128 __A) {
return _mm256_mask_broadcast_f32x2(__O, __M, __A);
}
+TEST_CONSTEXPR(match_m256(_mm256_mask_broadcast_f32x2(_mm256_setzero_ps(), 0xAA, (__m128)(__v4sf){1.f,2.f,3.f,4.f}), 0,2.f,0,2.f,0,2.f,0,2.f));
+
__m256 test_mm256_maskz_broadcast_f32x2(__mmask8 __M, __m128 __A) {
// CHECK-LABEL: test_mm256_maskz_broadcast_f32x2
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -994,6 +996,8 @@ __m256 test_mm256_maskz_broadcast_f32x2(__mmask8 __M, __m128 __A) {
return _mm256_maskz_broadcast_f32x2(__M, __A);
}
+TEST_CONSTEXPR(match_m256(_mm256_maskz_broadcast_f32x2(0xAA, (__m128)(__v4sf){1.f,2.f,3.f,4.f}), 0,2.f,0,2.f,0,2.f,0,2.f));
+
__m256d test_mm256_broadcast_f64x2(double const* __A) {
// CHECK-LABEL: test_mm256_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1008,6 +1012,8 @@ __m256d test_mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, double const*
return _mm256_mask_broadcast_f64x2(__O, __M, _mm_loadu_pd(__A));
}
+TEST_CONSTEXPR(match_m256d(_mm256_mask_broadcast_f64x2(_mm256_setzero_pd(), 0xA, (__m128d)(__v2df){1.0,2.0}), 0,2.0,0,2.0));
+
__m256d test_mm256_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
// CHECK-LABEL: test_mm256_maskz_broadcast_f64x2
// CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1015,6 +1021,8 @@ __m256d test_mm256_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) {
return _mm256_maskz_broadcast_f64x2(__M, _mm_loadu_pd(__A));
}
+TEST_CONSTEXPR(match_m256d(_mm256_maskz_broadcast_f64x2(0xA, (__m128d)(__v2df){1.0,2.0}), 0,2.0,0,2.0));
+
__m128i test_mm_broadcast_i32x2(__m128i __A) {
// CHECK-LABEL: test_mm_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1029,6 +1037,8 @@ __m128i test_mm_mask_broadcast_i32x2(__m128i __O, __mmask8 __M, __m128i __A) {
return _mm_mask_broadcast_i32x2(__O, __M, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm_mask_broadcast_i32x2(_mm_setzero_si128(), 0xF, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1));
+
__m128i test_mm_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) {
// CHECK-LABEL: test_mm_maskz_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1036,6 +1046,8 @@ __m128i test_mm_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) {
return _mm_maskz_broadcast_i32x2(__M, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm_maskz_broadcast_i32x2(0xF, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1));
+
__m256i test_mm256_broadcast_i32x2(__m128i __A) {
// CHECK-LABEL: test_mm256_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -1050,6 +1062,8 @@ __m256i test_mm256_mask_broadcast_i32x2(__m256i __O, __mmask8 __M, __m128i __A)
return _mm256_mask_broadcast_i32x2(__O, __M, __A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_mask_broadcast_i32x2(_mm256_setzero_si256(), 0xAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1,0,1,0,1));
+
__m256i test_mm256_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) {
// CHECK-LABEL: test_mm256_maskz_broadcast_i32x2
// CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
@@ -1057,6 +1071,8 @@ __m256i test_mm256_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) {
return _mm256_maskz_broadcast_i32x2(__M, __A);
}
+TEST_CONSTEXPR(match_v8si(_mm256_maskz_broadcast_i32x2(0xAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1,0,1,0,1));
+
__m256i test_mm256_broadcast_i64x2(__m128i const* __A) {
// CHECK-LABEL: test_mm256_broadcast_i64x2
// CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1078,6 +1094,8 @@ __m256i test_mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i const* __A) {
return _mm256_maskz_broadcast_i64x2(__M, _mm_loadu_si128(__A));
}
+TEST_CONSTEXPR(match_v4di(_mm256_maskz_broadcast_i64x2(0xF, (__m128i)(__v2di){1,2}), 1,2,1,2));
+
__m128d test_mm256_extractf64x2_pd(__m256d __A) {
// CHECK-LABEL: test_mm256_extractf64x2_pd
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
diff --git a/clang/test/CodeGen/X86/avxifma-builtins.c b/clang/test/CodeGen/X86/avxifma-builtins.c
index aa15159..70531da 100644
--- a/clang/test/CodeGen/X86/avxifma-builtins.c
+++ b/clang/test/CodeGen/X86/avxifma-builtins.c
@@ -8,8 +8,9 @@
// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s
-
#include <immintrin.h>
+#include "builtin_test_helpers.h"
+
__m128i test_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
// CHECK-LABEL: test_mm_madd52hi_epu64
@@ -17,44 +18,207 @@ __m128i test_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
return _mm_madd52hi_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v2di(_mm_madd52hi_epu64(
+ (__m128i)((__v2du){50, 100}),
+ (__m128i)((__v2du){10, 20}),
+ (__m128i)((__v2du){5, 6})),
+ 50, 100));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52hi_epu64(
+ (__m128i)((__v2du){0, 0}),
+ (__m128i)((__v2du){0xFFFFFFFFFFFFFull, 0}),
+ (__m128i)((__v2du){0xFFFFFFFFFFFFFull, 0})),
+ 0xFFFFFFFFFFFFEull, 0));
+
__m256i test_mm256_madd52hi_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
// CHECK-LABEL: test_mm256_madd52hi_epu64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52hi_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v4di(_mm256_madd52hi_epu64(
+ (__m256i)((__v4du){100, 200, 300, 400}),
+ (__m256i)((__v4du){10, 20, 30, 40}),
+ (__m256i)((__v4du){5, 6, 7, 8})),
+ 100, 200, 300, 400));
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52hi_epu64(
+ (__m256i)((__v4du){0, 0, 0, 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull, 0, 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull, 0, 0})),
+ 0xFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFEull, 0, 0));
+
__m128i test_mm_madd52lo_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
// CHECK-LABEL: test_mm_madd52lo_epu64
// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_madd52lo_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_epu64(
+ (__m128i)((__v2du){0, 0}),
+ (__m128i)((__v2du){10, 0}),
+ (__m128i)((__v2du){5, 0})),
+ 50, 0));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_epu64(
+ (__m128i)((__v2du){1, 2}),
+ (__m128i)((__v2du){10, 20}),
+ (__m128i)((__v2du){2, 3})),
+ 21, 62));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_epu64(
+ (__m128i)((__v2du){0, 0}),
+ (__m128i)((__v2du){0xFFFFFFFFFFFFFull, 0}),
+ (__m128i)((__v2du){1, 0})),
+ 0xFFFFFFFFFFFFFull, 0));
+
__m256i test_mm256_madd52lo_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
// CHECK-LABEL: test_mm256_madd52lo_epu64
// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52lo_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v4di(_mm256_madd52lo_epu64(
+ (__m256i)((__v4du){1, 2, 3, 4}),
+ (__m256i)((__v4du){10, 20, 30, 40}),
+ (__m256i)((__v4du){2, 3, 4, 5})),
+ 21, 62, 123, 204));
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52lo_epu64(
+ (__m256i)((__v4du){0, 0, 0, 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull, 0, 0,
+ 0}),
+ (__m256i)((__v4du){1, 0, 0, 0})),
+ 0xFFFFFFFFFFFFFull, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52lo_epu64(
+ (__m256i)((__v4du){0, 0, 0, 0}),
+ (__m256i)((__v4du){0x1F000000000000ull, 0, 0,
+ 0}),
+ (__m256i)((__v4du){2, 0, 0, 0})),
+ 0xE000000000000ull, 0, 0, 0));
+
__m128i test_mm_madd52hi_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
-// CHECK-LABEL: test_mm_madd52hi_avx_epu64
-// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_madd52hi_avx_epu64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_madd52hi_avx_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v2di(_mm_madd52hi_avx_epu64(
+ (__m128i)((__v2du){50, 100}),
+ (__m128i)((__v2du){10, 20}),
+ (__m128i)((__v2du){5, 6})),
+ 50, 100));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52hi_avx_epu64(
+ (__m128i)((__v2du){100, 0}),
+ (__m128i)((__v2du){10, 0}),
+ (__m128i)((__v2du){5, 0})),
+ 100, 0));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52hi_avx_epu64(
+ (__m128i)((__v2du){0, 0}),
+ (__m128i)((__v2du){0xFFFFFFFFFFFFFull, 0}),
+ (__m128i)((__v2du){0xFFFFFFFFFFFFFull, 0})),
+ 0xFFFFFFFFFFFFEull, 0));
+
__m256i test_mm256_madd52hi_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
-// CHECK-LABEL: test_mm256_madd52hi_avx_epu64
-// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_madd52hi_avx_epu64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52hi_avx_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v4di(_mm256_madd52hi_avx_epu64(
+ (__m256i)((__v4du){0, 0, 0, 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull, 0, 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFull, 0, 0})),
+ 0xFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFEull, 0, 0));
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52hi_avx_epu64(
+ (__m256i)((__v4du){100, 200, 300, 400}),
+ (__m256i)((__v4du){10, 20, 30, 40}),
+ (__m256i)((__v4du){5, 6, 7, 8})),
+ 100, 200, 300, 400));
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52hi_avx_epu64(
+ (__m256i)((__v4du){0, 0, 0, 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull, 0, 0,
+ 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull, 0, 0,
+ 0})),
+ 0xFFFFFFFFFFFFEull, 0, 0, 0));
+
__m128i test_mm_madd52lo_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
-// CHECK-LABEL: test_mm_madd52lo_avx_epu64
-// CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm_madd52lo_avx_epu64
+ // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_madd52lo_avx_epu64(__X, __Y, __Z);
}
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_avx_epu64(
+ (__m128i)((__v2du){0, 0}),
+ (__m128i)((__v2du){10, 0}),
+ (__m128i)((__v2du){5, 0})),
+ 50, 0));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_avx_epu64(
+ (__m128i)((__v2du){100, 0}),
+ (__m128i)((__v2du){20, 0}),
+ (__m128i)((__v2du){30, 0})),
+ 700, 0));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_avx_epu64(
+ (__m128i)((__v2du){1, 2}),
+ (__m128i)((__v2du){10, 20}),
+ (__m128i)((__v2du){2, 3})),
+ 21, 62));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_avx_epu64(
+ (__m128i)((__v2du){0, 0}),
+ (__m128i)((__v2du){0xFFFFFFFFFFFFFull, 0}),
+ (__m128i)((__v2du){1, 0})),
+ 0xFFFFFFFFFFFFFull, 0));
+
__m256i test_mm256_madd52lo_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
-// CHECK-LABEL: test_mm256_madd52lo_avx_epu64
-// CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK-LABEL: test_mm256_madd52lo_avx_epu64
+ // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_madd52lo_avx_epu64(__X, __Y, __Z);
}
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52lo_avx_epu64(
+ (__m256i)((__v4du){1, 2, 3, 4}),
+ (__m256i)((__v4du){10, 20, 30, 40}),
+ (__m256i)((__v4du){2, 3, 4, 5})),
+ 21, 62, 123, 204));
+
+
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52lo_avx_epu64(
+ (__m256i)((__v4du){0, 0, 0, 0}),
+ (__m256i)((__v4du){0xFFFFFFFFFFFFFull, 0, 0,
+ 0}),
+ (__m256i)((__v4du){1, 0, 0, 0})),
+ 0xFFFFFFFFFFFFFull, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52lo_avx_epu64(
+ (__m256i)((__v4du){0, 0, 0, 0}),
+ (__m256i)((__v4du){0x1F000000000000ull, 0, 0,
+ 0}),
+ (__m256i)((__v4du){2, 0, 0, 0})),
+ 0xE000000000000ull, 0, 0, 0));
+
+TEST_CONSTEXPR(match_v2di(_mm_madd52lo_avx_epu64(
+ (__m128i)((__v2du){5, 10}),
+ (__m128i)((__v2du){100, 200}),
+ (__m128i)((__v2du){7, 8})),
+ 705, 1610));
+
+TEST_CONSTEXPR(match_v4di(_mm256_madd52lo_avx_epu64(
+ (__m256i)((__v4du){1, 2, 3, 4}),
+ (__m256i)((__v4du){10, 20, 30, 40}),
+ (__m256i)((__v4du){2, 3, 4, 5})),
+ 21, 62, 123, 204));
+
diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c
index 2b45b92..d9041d4 100644
--- a/clang/test/CodeGen/X86/mmx-builtins.c
+++ b/clang/test/CodeGen/X86/mmx-builtins.c
@@ -312,36 +312,42 @@ __m64 test_mm_hadd_pi16(__m64 a, __m64 b) {
// CHECK: call <8 x i16> @llvm.x86.ssse3.phadd.w.128(
return _mm_hadd_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_hadd_pi16((__m64)(__v4hi){1,2,3,4},(__m64)(__v4hi){5,6,7,8}),3,7,11,15));
__m64 test_mm_hadd_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_hadd_pi32
// CHECK: call <4 x i32> @llvm.x86.ssse3.phadd.d.128(
return _mm_hadd_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_hadd_pi32((__m64)(__v2si){1,2},(__m64)(__v2si){3,4}),3,7));
__m64 test_mm_hadds_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_hadds_pi16
// CHECK: call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(
return _mm_hadds_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_hadds_pi16((__m64)(__v4hi){32767, 32767, 1,3},(__m64)(__v4hi){-1,3, 40, 60}),32767, 4, 2,100));
__m64 test_mm_hsub_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_hsub_pi16
// CHECK: call <8 x i16> @llvm.x86.ssse3.phsub.w.128(
return _mm_hsub_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_hsub_pi16((__m64)(__v4hi){1,2,4,3},(__m64)(__v4hi){10,5,0,-10}),-1,1,5,10));
__m64 test_mm_hsub_pi32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_hsub_pi32
// CHECK: call <4 x i32> @llvm.x86.ssse3.phsub.d.128(
return _mm_hsub_pi32(a, b);
}
+TEST_CONSTEXPR(match_v2si(_mm_hsub_pi32((__m64)(__v2si){1,2},(__m64)(__v2si){4,3}),-1,1));
__m64 test_mm_hsubs_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_hsubs_pi16
// CHECK: call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(
return _mm_hsubs_pi16(a, b);
}
+TEST_CONSTEXPR(match_v4hi(_mm_hsubs_pi16((__m64)(__v4hi){32767, 32767, 5, -32767},(__m64)(__v4hi){4,5,10,5}),0,32767,-1,5));
__m64 test_mm_insert_pi16(__m64 a, int d) {
// CHECK-LABEL: test_mm_insert_pi16
@@ -583,6 +589,8 @@ __m64 test_mm_shuffle_pi8(__m64 a, __m64 b) {
return _mm_shuffle_pi8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_shuffle_pi8((__m64)(__v8qi){0,1,2,3,4,5,6,7}, (__m64)(__v8qi){10,20,30,40,50,60,70,80}), 2,4,6,0,2,4,6,0));
+
__m64 test_mm_shuffle_pi16(__m64 a) {
// CHECK-LABEL: test_mm_shuffle_pi16
// CHECK: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <4 x i32> <i32 3, i32 0, i32 0, i32 0>
diff --git a/clang/test/CodeGen/X86/sse3-builtins.c b/clang/test/CodeGen/X86/sse3-builtins.c
index c53afc5..a82dd40 100644
--- a/clang/test/CodeGen/X86/sse3-builtins.c
+++ b/clang/test/CodeGen/X86/sse3-builtins.c
@@ -31,24 +31,28 @@ __m128d test_mm_hadd_pd(__m128d A, __m128d B) {
// CHECK: call {{.*}}<2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_hadd_pd(A, B);
}
+TEST_CONSTEXPR(match_m128d(_mm_hadd_pd((__m128d){+1.0, +2.0}, (__m128d){+3.0, +4.0}), +3.0, +7.0));
__m128 test_mm_hadd_ps(__m128 A, __m128 B) {
// CHECK-LABEL: test_mm_hadd_ps
// CHECK: call {{.*}}<4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_hadd_ps(A, B);
}
+TEST_CONSTEXPR(match_m128(_mm_hadd_ps((__m128){+1.0f, +2.0f, +3.0f, +4.0f}, (__m128){+5.0f,+6.0f,+7.0f,+8.0f}), +3.0f, +7.0f, +11.0f, +15.0f));
__m128d test_mm_hsub_pd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_hsub_pd
// CHECK: call {{.*}}<2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}})
return _mm_hsub_pd(A, B);
}
+TEST_CONSTEXPR(match_m128d(_mm_hsub_pd((__m128d){+1.0, +2.0}, (__m128d){+4.0, +3.0}), -1.0, +1.0));
__m128 test_mm_hsub_ps(__m128 A, __m128 B) {
// CHECK-LABEL: test_mm_hsub_ps
// CHECK: call {{.*}}<4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}})
return _mm_hsub_ps(A, B);
}
+TEST_CONSTEXPR(match_m128(_mm_hsub_ps((__m128){+1.0f, +2.0f, +4.0f, +3.0f}, (__m128){+5.0f,+7.0f,+10.0f,+8.0f}), -1.0f, +1.0f, -2.0f, +2.0f));
__m128i test_mm_lddqu_si128(__m128i const* P) {
// CHECK-LABEL: test_mm_lddqu_si128
diff --git a/clang/test/CodeGen/X86/sse41-builtins.c b/clang/test/CodeGen/X86/sse41-builtins.c
index eee479a..89a7ac2 100644
--- a/clang/test/CodeGen/X86/sse41-builtins.c
+++ b/clang/test/CodeGen/X86/sse41-builtins.c
@@ -449,33 +449,53 @@ int test_mm_test_all_ones(__m128i x) {
// CHECK: call {{.*}}i32 @llvm.x86.sse41.ptestc(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_test_all_ones(x);
}
+TEST_CONSTEXPR(_mm_test_all_ones(((__m128i)(__v2di){-1, -1})) == 1);
+TEST_CONSTEXPR(_mm_test_all_ones(((__m128i)(__v2di){-1, 0})) == 0);
+TEST_CONSTEXPR(_mm_test_all_ones(((__m128i)(__v4si){-1, -1, -1, 0x7FFFFFFF})) == 0);
int test_mm_test_all_zeros(__m128i x, __m128i y) {
// CHECK-LABEL: test_mm_test_all_zeros
// CHECK: call {{.*}}i32 @llvm.x86.sse41.ptestz(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_test_all_zeros(x, y);
}
+TEST_CONSTEXPR(_mm_test_all_zeros(((__m128i)(__v2di){0,0}), ((__m128i)(__v2di){0,0})) == 1);
+TEST_CONSTEXPR(_mm_test_all_zeros(((__m128i)(__v2di){0xFF00,0}), ((__m128i)(__v2di){0x00FF,0})) == 1);
+TEST_CONSTEXPR(_mm_test_all_zeros(((__m128i)(__v2di){1,0}), ((__m128i)(__v2di){-1,0})) == 0);
+TEST_CONSTEXPR(_mm_test_all_zeros(((__m128i)(__v2di){0,1}), ((__m128i)(__v2di){0,-1})) == 0);
int test_mm_test_mix_ones_zeros(__m128i x, __m128i y) {
// CHECK-LABEL: test_mm_test_mix_ones_zeros
// CHECK: call {{.*}}i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_test_mix_ones_zeros(x, y);
}
+TEST_CONSTEXPR(_mm_test_mix_ones_zeros(((__m128i)(__v2di){0xFF, 0}), ((__m128i)(__v2di){0xF0, 1})) == 1);
+TEST_CONSTEXPR(_mm_test_mix_ones_zeros(((__m128i)(__v2di){0xF0, 0}), ((__m128i)(__v2di){0x0F, 0})) == 0);
+TEST_CONSTEXPR(_mm_test_mix_ones_zeros(((__m128i)(__v2di){-1, -1}), ((__m128i)(__v2di){1, 0})) == 0);
+TEST_CONSTEXPR(_mm_test_mix_ones_zeros(((__m128i)(__v2di){0, 0}), ((__m128i)(__v2di){0, 0})) == 0);
int test_mm_testc_si128(__m128i x, __m128i y) {
// CHECK-LABEL: test_mm_testc_si128
// CHECK: call {{.*}}i32 @llvm.x86.sse41.ptestc(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_testc_si128(x, y);
}
+TEST_CONSTEXPR(_mm_testc_si128((__m128i)(__v2di){0,0}, (__m128i)(__v2di){0,0}) == 1);
+TEST_CONSTEXPR(_mm_testc_si128((__m128i)(__v2di){1,0}, (__m128i)(__v2di){-1,0}) == 0);
+TEST_CONSTEXPR(_mm_testc_si128((__m128i)(__v2di){0,-1}, (__m128i)(__v2di){0,1}) == 1);
int test_mm_testnzc_si128(__m128i x, __m128i y) {
// CHECK-LABEL: test_mm_testnzc_si128
// CHECK: call {{.*}}i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_testnzc_si128(x, y);
}
+TEST_CONSTEXPR(_mm_testnzc_si128((__m128i)(__v2di){3,0}, (__m128i)(__v2di){1,1}) == 1);
+TEST_CONSTEXPR(_mm_testnzc_si128((__m128i)(__v2di){32,-1}, (__m128i)(__v2di){15,0}) == 0);
+TEST_CONSTEXPR(_mm_testnzc_si128((__m128i)(__v2di){0,999}, (__m128i)(__v2di){0,999}) == 0);
int test_mm_testz_si128(__m128i x, __m128i y) {
// CHECK-LABEL: test_mm_testz_si128
// CHECK: call {{.*}}i32 @llvm.x86.sse41.ptestz(<2 x i64> %{{.*}}, <2 x i64> %{{.*}})
return _mm_testz_si128(x, y);
}
+TEST_CONSTEXPR(_mm_testz_si128((__m128i)(__v2di){0,0}, (__m128i)(__v2di){0,0}) == 1);
+TEST_CONSTEXPR(_mm_testz_si128((__m128i)(__v2di){1,0}, (__m128i)(__v2di){-1,0}) == 0);
+TEST_CONSTEXPR(_mm_testz_si128((__m128i)(__v2di){1,0}, (__m128i)(__v2di){0,1}) == 1);
diff --git a/clang/test/CodeGen/X86/ssse3-builtins.c b/clang/test/CodeGen/X86/ssse3-builtins.c
index 5885768..32abd9d 100644
--- a/clang/test/CodeGen/X86/ssse3-builtins.c
+++ b/clang/test/CodeGen/X86/ssse3-builtins.c
@@ -60,36 +60,43 @@ __m128i test_mm_hadd_epi16(__m128i a, __m128i b) {
// CHECK: call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_hadd_epi16(a, b);
}
+TEST_CONSTEXPR(match_v8hi(_mm_hadd_epi16((__m128i)(__v8hi){1,2,3,4,5,6,7,8}, (__m128i)(__v8hi){17,18,19,20,21,22,23,24}), 3,7,11,15,35,39,43,47));
__m128i test_mm_hadd_epi32(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_hadd_epi32
// CHECK: call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_hadd_epi32(a, b);
}
+TEST_CONSTEXPR(match_v4si(_mm_hadd_epi32((__m128i)(__v4si){1,2,3,4}, (__m128i)(__v4si){5,6,7,8}), 3,7,11,15));
__m128i test_mm_hadds_epi16(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_hadds_epi16
// CHECK: call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_hadds_epi16(a, b);
}
+TEST_CONSTEXPR(match_v8hi(_mm_hadds_epi16((__m128i)(__v8hi){30000,30000,-1,2,-3,3,1,4}, (__m128i)(__v8hi){2,6,1,9,-4,16,7,8}), 32767, 1,0,5,8,10,12,15));
+
__m128i test_mm_hsub_epi16(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_hsub_epi16
// CHECK: call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_hsub_epi16(a, b);
}
+TEST_CONSTEXPR(match_v8hi(_mm_hsub_epi16((__m128i)(__v8hi){20,15,16,12,9,6,4,2}, (__m128i)(__v8hi){3,2,1,1,4,5,0,2}), 5,4,3,2,1,0,-1,-2));
__m128i test_mm_hsub_epi32(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_hsub_epi32
// CHECK: call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
return _mm_hsub_epi32(a, b);
}
+TEST_CONSTEXPR(match_v4si(_mm_hsub_epi32((__m128i)(__v4si){4,3,1,1}, (__m128i)(__v4si){7,5,10,5}), 1,0,2,5));
__m128i test_mm_hsubs_epi16(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_hsubs_epi16
// CHECK: call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}})
return _mm_hsubs_epi16(a, b);
}
+TEST_CONSTEXPR(match_v8hi(_mm_hsubs_epi16((__m128i)(__v8hi){32767, -15,16,12,9,6,4,2},(__m128i)(__v8hi){3,2,1,1,4,5,0,2}), 32767,4,3,2,1,0,-1,-2));
__m128i test_mm_maddubs_epi16(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_maddubs_epi16
@@ -110,6 +117,8 @@ __m128i test_mm_shuffle_epi8(__m128i a, __m128i b) {
return _mm_shuffle_epi8(a, b);
}
+TEST_CONSTEXPR(match_v16qi(_mm_shuffle_epi8((__m128i)(__v16qs){0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15}, (__m128i)(__v16qs){15,-14,13,-12,11,-10,9,-8,7,-6,5,-4,3,-2,1,0}), -15,0,-13,0,-11,0,-9,0,-7,0,-5,0,-3,0,-1,0));
+
__m128i test_mm_sign_epi8(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_sign_epi8
// CHECK: call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}})
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/load-store.c b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
index 2dde75f..50f7011 100644
--- a/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
+++ b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
@@ -8,8 +8,8 @@
// CHECK-LABEL: @test_vld1q_f16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <8 x half>, ptr [[BASE:%.*]], align 2
-// CHECK-NEXT: ret <8 x half> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <8 x half>, ptr [[BASE:%.*]], align 2
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
//
float16x8_t test_vld1q_f16(const float16_t *base)
{
@@ -22,8 +22,8 @@ float16x8_t test_vld1q_f16(const float16_t *base)
// CHECK-LABEL: @test_vld1q_f32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[BASE:%.*]], align 4
-// CHECK-NEXT: ret <4 x float> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[BASE:%.*]], align 4
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
//
float32x4_t test_vld1q_f32(const float32_t *base)
{
@@ -36,8 +36,8 @@ float32x4_t test_vld1q_f32(const float32_t *base)
// CHECK-LABEL: @test_vld1q_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1
-// CHECK-NEXT: ret <16 x i8> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
int8x16_t test_vld1q_s8(const int8_t *base)
{
@@ -50,8 +50,8 @@ int8x16_t test_vld1q_s8(const int8_t *base)
// CHECK-LABEL: @test_vld1q_s16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2
-// CHECK-NEXT: ret <8 x i16> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
int16x8_t test_vld1q_s16(const int16_t *base)
{
@@ -64,8 +64,8 @@ int16x8_t test_vld1q_s16(const int16_t *base)
// CHECK-LABEL: @test_vld1q_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4
-// CHECK-NEXT: ret <4 x i32> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
//
int32x4_t test_vld1q_s32(const int32_t *base)
{
@@ -78,8 +78,8 @@ int32x4_t test_vld1q_s32(const int32_t *base)
// CHECK-LABEL: @test_vld1q_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1
-// CHECK-NEXT: ret <16 x i8> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
uint8x16_t test_vld1q_u8(const uint8_t *base)
{
@@ -92,8 +92,8 @@ uint8x16_t test_vld1q_u8(const uint8_t *base)
// CHECK-LABEL: @test_vld1q_u16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2
-// CHECK-NEXT: ret <8 x i16> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
uint16x8_t test_vld1q_u16(const uint16_t *base)
{
@@ -106,8 +106,8 @@ uint16x8_t test_vld1q_u16(const uint16_t *base)
// CHECK-LABEL: @test_vld1q_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4
-// CHECK-NEXT: ret <4 x i32> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
//
uint32x4_t test_vld1q_u32(const uint32_t *base)
{
@@ -120,10 +120,10 @@ uint32x4_t test_vld1q_u32(const uint32_t *base)
// CHECK-LABEL: @test_vld1q_z_f16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x half> zeroinitializer)
-// CHECK-NEXT: ret <8 x half> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
//
float16x8_t test_vld1q_z_f16(const float16_t *base, mve_pred16_t p)
{
@@ -136,10 +136,10 @@ float16x8_t test_vld1q_z_f16(const float16_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vld1q_z_f32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x float> zeroinitializer)
-// CHECK-NEXT: ret <4 x float> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
//
float32x4_t test_vld1q_z_f32(const float32_t *base, mve_pred16_t p)
{
@@ -152,10 +152,10 @@ float32x4_t test_vld1q_z_f32(const float32_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vld1q_z_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer)
-// CHECK-NEXT: ret <16 x i8> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
int8x16_t test_vld1q_z_s8(const int8_t *base, mve_pred16_t p)
{
@@ -168,10 +168,10 @@ int8x16_t test_vld1q_z_s8(const int8_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vld1q_z_s16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer)
-// CHECK-NEXT: ret <8 x i16> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
int16x8_t test_vld1q_z_s16(const int16_t *base, mve_pred16_t p)
{
@@ -184,10 +184,10 @@ int16x8_t test_vld1q_z_s16(const int16_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vld1q_z_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer)
-// CHECK-NEXT: ret <4 x i32> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
int32x4_t test_vld1q_z_s32(const int32_t *base, mve_pred16_t p)
{
@@ -200,10 +200,10 @@ int32x4_t test_vld1q_z_s32(const int32_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vld1q_z_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer)
-// CHECK-NEXT: ret <16 x i8> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vld1q_z_u8(const uint8_t *base, mve_pred16_t p)
{
@@ -216,10 +216,10 @@ uint8x16_t test_vld1q_z_u8(const uint8_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vld1q_z_u16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer)
-// CHECK-NEXT: ret <8 x i16> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vld1q_z_u16(const uint16_t *base, mve_pred16_t p)
{
@@ -232,10 +232,10 @@ uint16x8_t test_vld1q_z_u16(const uint16_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vld1q_z_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer)
-// CHECK-NEXT: ret <4 x i32> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
uint32x4_t test_vld1q_z_u32(const uint32_t *base, mve_pred16_t p)
{
@@ -248,8 +248,8 @@ uint32x4_t test_vld1q_z_u32(const uint32_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrbq_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1
-// CHECK-NEXT: ret <16 x i8> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
int8x16_t test_vldrbq_s8(const int8_t *base)
{
@@ -258,9 +258,9 @@ int8x16_t test_vldrbq_s8(const int8_t *base)
// CHECK-LABEL: @test_vldrbq_s16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[BASE:%.*]], align 1
-// CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i8> [[TMP1]] to <8 x i16>
-// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[BASE:%.*]], align 1
+// CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i8> [[TMP0]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP1]]
//
int16x8_t test_vldrbq_s16(const int8_t *base)
{
@@ -269,9 +269,9 @@ int16x8_t test_vldrbq_s16(const int8_t *base)
// CHECK-LABEL: @test_vldrbq_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[BASE:%.*]], align 1
-// CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i8> [[TMP1]] to <4 x i32>
-// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[BASE:%.*]], align 1
+// CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i8> [[TMP0]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP1]]
//
int32x4_t test_vldrbq_s32(const int8_t *base)
{
@@ -280,8 +280,8 @@ int32x4_t test_vldrbq_s32(const int8_t *base)
// CHECK-LABEL: @test_vldrbq_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1
-// CHECK-NEXT: ret <16 x i8> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
uint8x16_t test_vldrbq_u8(const uint8_t *base)
{
@@ -290,9 +290,9 @@ uint8x16_t test_vldrbq_u8(const uint8_t *base)
// CHECK-LABEL: @test_vldrbq_u16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[BASE:%.*]], align 1
-// CHECK-NEXT: [[TMP2:%.*]] = zext <8 x i8> [[TMP1]] to <8 x i16>
-// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[BASE:%.*]], align 1
+// CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP1]]
//
uint16x8_t test_vldrbq_u16(const uint8_t *base)
{
@@ -301,9 +301,9 @@ uint16x8_t test_vldrbq_u16(const uint8_t *base)
// CHECK-LABEL: @test_vldrbq_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[BASE:%.*]], align 1
-// CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i32>
-// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[BASE:%.*]], align 1
+// CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP1]]
//
uint32x4_t test_vldrbq_u32(const uint8_t *base)
{
@@ -312,10 +312,10 @@ uint32x4_t test_vldrbq_u32(const uint8_t *base)
// CHECK-LABEL: @test_vldrbq_z_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer)
-// CHECK-NEXT: ret <16 x i8> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
int8x16_t test_vldrbq_z_s8(const int8_t *base, mve_pred16_t p)
{
@@ -324,11 +324,11 @@ int8x16_t test_vldrbq_z_s8(const int8_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrbq_z_s16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]], <8 x i8> zeroinitializer)
-// CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[TMP3]] to <8 x i16>
-// CHECK-NEXT: ret <8 x i16> [[TMP4]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[TMP2]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP3]]
//
int16x8_t test_vldrbq_z_s16(const int8_t *base, mve_pred16_t p)
{
@@ -337,11 +337,11 @@ int16x8_t test_vldrbq_z_s16(const int8_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrbq_z_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]], <4 x i8> zeroinitializer)
-// CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i8> [[TMP3]] to <4 x i32>
-// CHECK-NEXT: ret <4 x i32> [[TMP4]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i8> [[TMP2]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
int32x4_t test_vldrbq_z_s32(const int8_t *base, mve_pred16_t p)
{
@@ -350,10 +350,10 @@ int32x4_t test_vldrbq_z_s32(const int8_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrbq_z_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer)
-// CHECK-NEXT: ret <16 x i8> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vldrbq_z_u8(const uint8_t *base, mve_pred16_t p)
{
@@ -362,11 +362,11 @@ uint8x16_t test_vldrbq_z_u8(const uint8_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrbq_z_u16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]], <8 x i8> zeroinitializer)
-// CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[TMP3]] to <8 x i16>
-// CHECK-NEXT: ret <8 x i16> [[TMP4]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[TMP2]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP3]]
//
uint16x8_t test_vldrbq_z_u16(const uint8_t *base, mve_pred16_t p)
{
@@ -375,11 +375,11 @@ uint16x8_t test_vldrbq_z_u16(const uint8_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrbq_z_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]], <4 x i8> zeroinitializer)
-// CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP3]] to <4 x i32>
-// CHECK-NEXT: ret <4 x i32> [[TMP4]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
uint32x4_t test_vldrbq_z_u32(const uint8_t *base, mve_pred16_t p)
{
@@ -388,8 +388,8 @@ uint32x4_t test_vldrbq_z_u32(const uint8_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrhq_f16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <8 x half>, ptr [[BASE:%.*]], align 2
-// CHECK-NEXT: ret <8 x half> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <8 x half>, ptr [[BASE:%.*]], align 2
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
//
float16x8_t test_vldrhq_f16(const float16_t *base)
{
@@ -398,8 +398,8 @@ float16x8_t test_vldrhq_f16(const float16_t *base)
// CHECK-LABEL: @test_vldrhq_s16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2
-// CHECK-NEXT: ret <8 x i16> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
int16x8_t test_vldrhq_s16(const int16_t *base)
{
@@ -408,9 +408,9 @@ int16x8_t test_vldrhq_s16(const int16_t *base)
// CHECK-LABEL: @test_vldrhq_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr [[BASE:%.*]], align 2
-// CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32>
-// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr [[BASE:%.*]], align 2
+// CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[TMP0]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP1]]
//
int32x4_t test_vldrhq_s32(const int16_t *base)
{
@@ -419,8 +419,8 @@ int32x4_t test_vldrhq_s32(const int16_t *base)
// CHECK-LABEL: @test_vldrhq_u16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2
-// CHECK-NEXT: ret <8 x i16> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
uint16x8_t test_vldrhq_u16(const uint16_t *base)
{
@@ -429,9 +429,9 @@ uint16x8_t test_vldrhq_u16(const uint16_t *base)
// CHECK-LABEL: @test_vldrhq_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr [[BASE:%.*]], align 2
-// CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
-// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr [[BASE:%.*]], align 2
+// CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i16> [[TMP0]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP1]]
//
uint32x4_t test_vldrhq_u32(const uint16_t *base)
{
@@ -440,10 +440,10 @@ uint32x4_t test_vldrhq_u32(const uint16_t *base)
// CHECK-LABEL: @test_vldrhq_z_f16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x half> zeroinitializer)
-// CHECK-NEXT: ret <8 x half> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
//
float16x8_t test_vldrhq_z_f16(const float16_t *base, mve_pred16_t p)
{
@@ -452,10 +452,10 @@ float16x8_t test_vldrhq_z_f16(const float16_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrhq_z_s16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer)
-// CHECK-NEXT: ret <8 x i16> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
int16x8_t test_vldrhq_z_s16(const int16_t *base, mve_pred16_t p)
{
@@ -464,11 +464,11 @@ int16x8_t test_vldrhq_z_s16(const int16_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrhq_z_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]], <4 x i16> zeroinitializer)
-// CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i16> [[TMP3]] to <4 x i32>
-// CHECK-NEXT: ret <4 x i32> [[TMP4]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i16> [[TMP2]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
int32x4_t test_vldrhq_z_s32(const int16_t *base, mve_pred16_t p)
{
@@ -477,10 +477,10 @@ int32x4_t test_vldrhq_z_s32(const int16_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrhq_z_u16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer)
-// CHECK-NEXT: ret <8 x i16> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vldrhq_z_u16(const uint16_t *base, mve_pred16_t p)
{
@@ -489,11 +489,11 @@ uint16x8_t test_vldrhq_z_u16(const uint16_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrhq_z_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]], <4 x i16> zeroinitializer)
-// CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i16> [[TMP3]] to <4 x i32>
-// CHECK-NEXT: ret <4 x i32> [[TMP4]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
uint32x4_t test_vldrhq_z_u32(const uint16_t *base, mve_pred16_t p)
{
@@ -502,8 +502,8 @@ uint32x4_t test_vldrhq_z_u32(const uint16_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrwq_f32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[BASE:%.*]], align 4
-// CHECK-NEXT: ret <4 x float> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[BASE:%.*]], align 4
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
//
float32x4_t test_vldrwq_f32(const float32_t *base)
{
@@ -512,8 +512,8 @@ float32x4_t test_vldrwq_f32(const float32_t *base)
// CHECK-LABEL: @test_vldrwq_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4
-// CHECK-NEXT: ret <4 x i32> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
//
int32x4_t test_vldrwq_s32(const int32_t *base)
{
@@ -522,8 +522,8 @@ int32x4_t test_vldrwq_s32(const int32_t *base)
// CHECK-LABEL: @test_vldrwq_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4
-// CHECK-NEXT: ret <4 x i32> [[TMP1]]
+// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
//
uint32x4_t test_vldrwq_u32(const uint32_t *base)
{
@@ -532,10 +532,10 @@ uint32x4_t test_vldrwq_u32(const uint32_t *base)
// CHECK-LABEL: @test_vldrwq_z_f32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x float> zeroinitializer)
-// CHECK-NEXT: ret <4 x float> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
//
float32x4_t test_vldrwq_z_f32(const float32_t *base, mve_pred16_t p)
{
@@ -544,10 +544,10 @@ float32x4_t test_vldrwq_z_f32(const float32_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrwq_z_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer)
-// CHECK-NEXT: ret <4 x i32> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
int32x4_t test_vldrwq_z_s32(const int32_t *base, mve_pred16_t p)
{
@@ -556,10 +556,10 @@ int32x4_t test_vldrwq_z_s32(const int32_t *base, mve_pred16_t p)
// CHECK-LABEL: @test_vldrwq_z_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer)
-// CHECK-NEXT: ret <4 x i32> [[TMP3]]
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
uint32x4_t test_vldrwq_z_u32(const uint32_t *base, mve_pred16_t p)
{
@@ -680,9 +680,9 @@ void test_vst1q_u32(uint32_t *base, uint32x4_t value)
// CHECK-LABEL: @test_vst1q_p_f16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
@@ -696,9 +696,9 @@ void test_vst1q_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vst1q_p_f32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
@@ -712,9 +712,9 @@ void test_vst1q_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vst1q_p_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
@@ -728,9 +728,9 @@ void test_vst1q_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vst1q_p_s16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
@@ -744,9 +744,9 @@ void test_vst1q_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vst1q_p_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
@@ -760,9 +760,9 @@ void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vst1q_p_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
@@ -776,9 +776,9 @@ void test_vst1q_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vst1q_p_u16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
@@ -792,9 +792,9 @@ void test_vst1q_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vst1q_p_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p)
@@ -896,9 +896,9 @@ void test_vstrbq_u32(uint8_t *base, uint32x4_t value)
// CHECK-LABEL: @test_vstrbq_p_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
@@ -913,9 +913,9 @@ void test_vstrbq_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrbq_p_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
-// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP3]])
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_s16(int8_t *base, int16x8_t value, mve_pred16_t p)
@@ -930,9 +930,9 @@ void test_vstrbq_p_s16(int8_t *base, int16x8_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrbq_p_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
-// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP3]])
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_s32(int8_t *base, int32x4_t value, mve_pred16_t p)
@@ -946,9 +946,9 @@ void test_vstrbq_p_s32(int8_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrbq_p_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
@@ -963,9 +963,9 @@ void test_vstrbq_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrbq_p_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
-// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP3]])
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_u16(uint8_t *base, uint16x8_t value, mve_pred16_t p)
@@ -980,9 +980,9 @@ void test_vstrbq_p_u16(uint8_t *base, uint16x8_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrbq_p_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
-// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP3]])
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_u32(uint8_t *base, uint32x4_t value, mve_pred16_t p)
@@ -1068,9 +1068,9 @@ void test_vstrhq_u32(uint16_t *base, uint32x4_t value)
// CHECK-LABEL: @test_vstrhq_p_f16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
@@ -1084,9 +1084,9 @@ void test_vstrhq_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrhq_p_s16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
@@ -1101,9 +1101,9 @@ void test_vstrhq_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrhq_p_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
-// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP3]])
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_s32(int16_t *base, int32x4_t value, mve_pred16_t p)
@@ -1117,9 +1117,9 @@ void test_vstrhq_p_s32(int16_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrhq_p_u16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
@@ -1134,9 +1134,9 @@ void test_vstrhq_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrhq_p_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
-// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP3]])
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_u32(uint16_t *base, uint32x4_t value, mve_pred16_t p)
@@ -1192,9 +1192,9 @@ void test_vstrwq_u32(uint32_t *base, uint32x4_t value)
// CHECK-LABEL: @test_vstrwq_p_f32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrwq_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
@@ -1208,9 +1208,9 @@ void test_vstrwq_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrwq_p_s32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrwq_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
@@ -1224,9 +1224,9 @@ void test_vstrwq_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-LABEL: @test_vstrwq_p_u32(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrwq_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p)
diff --git a/clang/test/CodeGen/attr-target-mv.c b/clang/test/CodeGen/attr-target-mv.c
index 07f47d9..b8807dd 100644
--- a/clang/test/CodeGen/attr-target-mv.c
+++ b/clang/test/CodeGen/attr-target-mv.c
@@ -30,6 +30,8 @@ int __attribute__((target("arch=gracemont"))) foo(void) {return 24;}
int __attribute__((target("arch=pantherlake"))) foo(void) {return 25;}
int __attribute__((target("arch=clearwaterforest"))) foo(void) {return 26;}
int __attribute__((target("arch=diamondrapids"))) foo(void) {return 27;}
+int __attribute__((target("arch=wildcatlake"))) foo(void) {return 28;}
+int __attribute__((target("arch=novalake"))) foo(void) {return 29;}
int __attribute__((target("default"))) foo(void) { return 2; }
int bar(void) {
@@ -203,6 +205,10 @@ void calls_pr50025c(void) { pr50025c(); }
// ITANIUM: ret i32 26
// ITANIUM: define{{.*}} i32 @foo.arch_diamondrapids()
// ITANIUM: ret i32 27
+// ITANIUM: define{{.*}} i32 @foo.arch_wildcatlake()
+// ITANIUM: ret i32 28
+// ITANIUM: define{{.*}} i32 @foo.arch_novalake()
+// ITANIUM: ret i32 29
// ITANIUM: define{{.*}} i32 @foo()
// ITANIUM: ret i32 2
// ITANIUM: define{{.*}} i32 @bar()
@@ -262,6 +268,10 @@ void calls_pr50025c(void) { pr50025c(); }
// WINDOWS: ret i32 26
// WINDOWS: define dso_local i32 @foo.arch_diamondrapids()
// WINDOWS: ret i32 27
+// WINDOWS: define dso_local i32 @foo.arch_wildcatlake()
+// WINDOWS: ret i32 28
+// WINDOWS: define dso_local i32 @foo.arch_novalake()
+// WINDOWS: ret i32 29
// WINDOWS: define dso_local i32 @foo()
// WINDOWS: ret i32 2
// WINDOWS: define dso_local i32 @bar()
diff --git a/clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll b/clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll
index 08c1a29..90cda3e 100644
--- a/clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll
+++ b/clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll
@@ -22,7 +22,7 @@
; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -o %t1.o -x ir %t.o -c -fthinlto-index=%t.o.thinlto.bc -save-temps=obj
-; RUN: llvm-dis %t.s.3.import.bc -o - | FileCheck %s --check-prefix=CHECK-IR
+; RUN: llvm-dis %t.s.4.opt.bc -o - | FileCheck %s --check-prefix=CHECK-IR
; CHECK-IR: !memprof {{.*}} !callsite
; CHECK-IR: "memprof"="cold"
@@ -42,10 +42,15 @@
; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -o %t1.o -x ir %t.o -c -fthinlto-index=%t.o.thinlto.bc -save-temps=obj
-; RUN: llvm-dis %t.s.3.import.bc -o - | FileCheck %s \
+; RUN: llvm-dis %t.s.4.opt.bc -o - | FileCheck %s \
; RUN: --implicit-check-not "!memprof" --implicit-check-not "!callsite" \
; RUN: --implicit-check-not "memprof"="cold"
+;; Ensure the attributes and metadata are stripped when running a non-LTO pipeline.
+; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -x ir %t.o -S -emit-llvm -o - | FileCheck %s \
+; RUN: --implicit-check-not "!memprof" --implicit-check-not "!callsite" \
+; RUN: --implicit-check-not "memprof"="cold"
+
source_filename = "thinlto-distributed-supports-hot-cold-new.ll"
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/clang/test/CodeGen/ext-int-cc.c b/clang/test/CodeGen/ext-int-cc.c
index 7cfd992..f845afc 100644
--- a/clang/test/CodeGen/ext-int-cc.c
+++ b/clang/test/CodeGen/ext-int-cc.c
@@ -49,8 +49,8 @@ void ParamPassing(_BitInt(128) b, _BitInt(64) c) {}
// R600: define{{.*}} void @ParamPassing(ptr addrspace(5) byval(i128) align 8 %{{.+}}, i64 %{{.+}})
// ARC: define{{.*}} void @ParamPassing(ptr byval(i128) align 4 %{{.+}}, i64 inreg %{{.+}})
// XCORE: define{{.*}} void @ParamPassing(ptr byval(i128) align 4 %{{.+}}, i64 %{{.+}})
-// RISCV64: define{{.*}} void @ParamPassing(i128 %{{.+}}, i64 %{{.+}})
-// RISCV32: define{{.*}} void @ParamPassing(ptr dead_on_return %{{.+}}, i64 %{{.+}})
+// RISCV64: define{{.*}} void @ParamPassing(i128 signext %{{.+}}, i64 signext %{{.+}})
+// RISCV32: define{{.*}} void @ParamPassing(ptr dead_on_return %{{.+}}, i64 signext %{{.+}})
// WASM: define{{.*}} void @ParamPassing(i128 %{{.+}}, i64 %{{.+}})
// SYSTEMZ: define{{.*}} void @ParamPassing(ptr dead_on_return %{{.+}}, i64 %{{.+}})
// PPC64: define{{.*}} void @ParamPassing(i128 %{{.+}}, i64 %{{.+}})
@@ -79,8 +79,8 @@ void ParamPassing2(_BitInt(127) b, _BitInt(63) c) {}
// R600: define{{.*}} void @ParamPassing2(ptr addrspace(5) byval(i128) align 8 %{{.+}}, i63 %{{.+}})
// ARC: define{{.*}} void @ParamPassing2(ptr byval(i128) align 4 %{{.+}}, i63 inreg %{{.+}})
// XCORE: define{{.*}} void @ParamPassing2(ptr byval(i128) align 4 %{{.+}}, i63 %{{.+}})
-// RISCV64: define{{.*}} void @ParamPassing2(i127 %{{.+}}, i63 signext %{{.+}})
-// RISCV32: define{{.*}} void @ParamPassing2(ptr dead_on_return %{{.+}}, i63 %{{.+}})
+// RISCV64: define{{.*}} void @ParamPassing2(i127 signext %{{.+}}, i63 signext %{{.+}})
+// RISCV32: define{{.*}} void @ParamPassing2(ptr dead_on_return %{{.+}}, i63 signext %{{.+}})
// WASM: define{{.*}} void @ParamPassing2(i127 %{{.+}}, i63 %{{.+}})
// SYSTEMZ: define{{.*}} void @ParamPassing2(ptr dead_on_return %{{.+}}, i63 signext %{{.+}})
// PPC64: define{{.*}} void @ParamPassing2(i127 %{{.+}}, i63 signext %{{.+}})
diff --git a/clang/test/CodeGen/inline-asm-systemz-flag-output.c b/clang/test/CodeGen/inline-asm-systemz-flag-output.c
new file mode 100644
index 0000000..041797b
--- /dev/null
+++ b/clang/test/CodeGen/inline-asm-systemz-flag-output.c
@@ -0,0 +1,57 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6
+// RUN: %clang_cc1 -O2 -triple s390x-linux -emit-llvm -o - %s | FileCheck %s
+
+// CHECK-LABEL: define dso_local signext range(i32 0, 4) i32 @test(
+// CHECK-SAME: i32 noundef signext [[X:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 [[X]]) #[[ATTR2:[0-9]+]], !srcloc [[META2:![0-9]+]]
+// CHECK-NEXT: [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
+// CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
+// CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP1]])
+// CHECK-NEXT: ret i32 [[ASMRESULT1]]
+//
+int test(int x) {
+ int cc;
+ asm ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+ return cc;
+}
+
+// CHECK-LABEL: define dso_local signext range(i32 0, 2) i32 @test_low_high_transformation(
+// CHECK-SAME: i32 noundef signext [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 [[X]]) #[[ATTR2]], !srcloc [[META3:![0-9]+]]
+// CHECK-NEXT: [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
+// CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
+// CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP1]])
+// CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[ASMRESULT1]], -1
+// CHECK-NEXT: [[TMP3:%.*]] = icmp ult i32 [[TMP2]], 2
+// CHECK-NEXT: [[LOR_EXT:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT: ret i32 [[LOR_EXT]]
+//
+int test_low_high_transformation(int x) {
+ int cc;
+ asm ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+ return cc == 1 || cc == 2;
+}
+
+// CHECK-LABEL: define dso_local signext range(i32 0, 2) i32 @test_equal_high_transformation(
+// CHECK-SAME: i32 noundef signext [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { i32, i32 } asm "ahi $0,42\0A", "=d,={@cc},0"(i32 [[X]]) #[[ATTR2]], !srcloc [[META4:![0-9]+]]
+// CHECK-NEXT: [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1
+// CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[ASMRESULT1]], 4
+// CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP1]])
+// CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ASMRESULT1]], 1
+// CHECK-NEXT: [[LOR_EXT:%.*]] = xor i32 [[TMP2]], 1
+// CHECK-NEXT: ret i32 [[LOR_EXT]]
+//
+int test_equal_high_transformation(int x) {
+ int cc;
+ asm ("ahi %[x],42\n" : [x] "+d"(x), "=@cc" (cc));
+ return cc == 0 || cc == 2;
+}
+//.
+// CHECK: [[META2]] = !{i64 788}
+// CHECK: [[META3]] = !{i64 1670}
+// CHECK: [[META4]] = !{i64 2505}
+//.
diff --git a/clang/test/CodeGen/target-builtin-noerror.c b/clang/test/CodeGen/target-builtin-noerror.c
index 120f1a5..47d5ae5 100644
--- a/clang/test/CodeGen/target-builtin-noerror.c
+++ b/clang/test/CodeGen/target-builtin-noerror.c
@@ -178,6 +178,8 @@ void verifycpustrings(void) {
(void)__builtin_cpu_is("lunarlake");
(void)__builtin_cpu_is("clearwaterforest");
(void)__builtin_cpu_is("pantherlake");
+ (void)__builtin_cpu_is("wildcatlake");
+ (void)__builtin_cpu_is("novalake");
(void)__builtin_cpu_is("haswell");
(void)__builtin_cpu_is("icelake-client");
(void)__builtin_cpu_is("icelake-server");
diff --git a/clang/test/CodeGen/target-data.c b/clang/test/CodeGen/target-data.c
index eecee69..e950794 100644
--- a/clang/test/CodeGen/target-data.c
+++ b/clang/test/CodeGen/target-data.c
@@ -152,20 +152,20 @@
// RUN: %clang_cc1 -triple r600-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=R600
-// R600: target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
+// R600: target datalayout = "e-m:e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
// RUN: %clang_cc1 -triple r600-unknown -target-cpu cayman -o - -emit-llvm %s \
// RUN: | FileCheck %s -check-prefix=R600D
-// R600D: target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
+// R600D: target datalayout = "e-m:e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
// RUN: %clang_cc1 -triple amdgcn-unknown -target-cpu hawaii -o - -emit-llvm %s \
// RUN: | FileCheck %s -check-prefix=R600SI
-// R600SI: target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9"
+// R600SI: target datalayout = "e-m:e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9"
// Test default -target-cpu
// RUN: %clang_cc1 -triple amdgcn-unknown -o - -emit-llvm %s \
// RUN: | FileCheck %s -check-prefix=R600SIDefault
-// R600SIDefault: target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9"
+// R600SIDefault: target datalayout = "e-m:e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9"
// RUN: %clang_cc1 -triple arm64-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=AARCH64
diff --git a/clang/test/CodeGen/unified-lto-module-flag.ll b/clang/test/CodeGen/unified-lto-module-flag.ll
new file mode 100644
index 0000000..deefe82
--- /dev/null
+++ b/clang/test/CodeGen/unified-lto-module-flag.ll
@@ -0,0 +1,11 @@
+; Test that we do not duplicate the UnifiedLTO module flag.
+;
+; RUN: %clang_cc1 -emit-llvm -flto=full -funified-lto -o - %s | FileCheck %s
+
+; CHECK: !llvm.module.flags = !{!0, !1, !2, !3}
+!llvm.module.flags = !{!0, !1, !2, !3}
+
+!0 = !{i32 1, !"wchar_size", i32 2}
+!1 = !{i32 7, !"frame-pointer", i32 2}
+!2 = !{i32 1, !"EnableSplitLTOUnit", i32 1}
+!3 = !{i32 1, !"UnifiedLTO", i32 1}