aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWANG Rui <wangrui@loongson.cn>2026-04-23 21:22:29 +0800
committerWANG Rui <wangrui@loongson.cn>2026-04-23 21:22:56 +0800
commit56ccc12a59947e19375c7a84a912a8ebed3f3496 (patch)
tree796fcd77101392cb09bbd0c2c8a61e2a9c245cac
parentb9a2e843d9b23ac34698a6315bf1803f4a09954a (diff)
downloadllvm-users/hev/vbitsel-tests.tar.gz
llvm-users/hev/vbitsel-tests.tar.bz2
llvm-users/hev/vbitsel-tests.zip
[LoongArch][NFC] Add tests for vector bitwise selectionusers/hev/vbitsel-tests
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/bitsel.ll78
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/bitsel.ll78
2 files changed, 156 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/LoongArch/lasx/bitsel.ll b/llvm/test/CodeGen/LoongArch/lasx/bitsel.ll
new file mode 100644
index 000000000000..c1a67a2dcf1d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/bitsel.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define <32 x i8> @bitsel_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c) nounwind {
+; CHECK-LABEL: bitsel_v32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvand.v $xr1, $xr2, $xr1
+; CHECK-NEXT: xvandn.v $xr0, $xr2, $xr0
+; CHECK-NEXT: xvor.v $xr0, $xr1, $xr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <32 x i8> %c, %b
+ %1 = xor <32 x i8> %c, splat (i8 -1)
+ %2 = and <32 x i8> %a, %1
+ %3 = or <32 x i8> %0, %2
+ ret <32 x i8> %3
+}
+
+define <16 x i16> @bitsel_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c) nounwind {
+; CHECK-LABEL: bitsel_v16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvand.v $xr1, $xr2, $xr1
+; CHECK-NEXT: xvandn.v $xr0, $xr2, $xr0
+; CHECK-NEXT: xvor.v $xr0, $xr1, $xr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <16 x i16> %c, %b
+ %1 = xor <16 x i16> %c, splat (i16 -1)
+ %2 = and <16 x i16> %a, %1
+ %3 = or <16 x i16> %0, %2
+ ret <16 x i16> %3
+}
+
+define <8 x i32> @bitsel_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c) nounwind {
+; CHECK-LABEL: bitsel_v8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvand.v $xr1, $xr2, $xr1
+; CHECK-NEXT: xvandn.v $xr0, $xr2, $xr0
+; CHECK-NEXT: xvor.v $xr0, $xr1, $xr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <8 x i32> %c, %b
+ %1 = xor <8 x i32> %c, splat (i32 -1)
+ %2 = and <8 x i32> %a, %1
+ %3 = or <8 x i32> %0, %2
+ ret <8 x i32> %3
+}
+
+define <4 x i64> @bitsel_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c) nounwind {
+; CHECK-LABEL: bitsel_v4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvand.v $xr1, $xr2, $xr1
+; CHECK-NEXT: xvandn.v $xr0, $xr2, $xr0
+; CHECK-NEXT: xvor.v $xr0, $xr1, $xr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <4 x i64> %c, %b
+ %1 = xor <4 x i64> %c, splat (i64 -1)
+ %2 = and <4 x i64> %a, %1
+ %3 = or <4 x i64> %0, %2
+ ret <4 x i64> %3
+}
+
+define <32 x i8> @bitsel_v32i8_1(<32 x i8> %a, <32 x i8> %b) nounwind {
+; CHECK-LABEL: bitsel_v32i8_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvandi.b $xr2, $xr0, 1
+; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr1
+; CHECK-NEXT: xvor.v $xr0, $xr2, $xr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <32 x i8> %a, splat (i8 1)
+ %1 = xor <32 x i8> %a, splat (i8 -1)
+ %2 = and <32 x i8> %b, %1
+ %3 = or <32 x i8> %0, %2
+ ret <32 x i8> %3
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/bitsel.ll b/llvm/test/CodeGen/LoongArch/lsx/bitsel.ll
new file mode 100644
index 000000000000..eb39012eeaa8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/bitsel.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define <16 x i8> @bitsel_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind {
+; CHECK-LABEL: bitsel_v16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vand.v $vr1, $vr2, $vr1
+; CHECK-NEXT: vandn.v $vr0, $vr2, $vr0
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <16 x i8> %c, %b
+ %1 = xor <16 x i8> %c, splat (i8 -1)
+ %2 = and <16 x i8> %a, %1
+ %3 = or <16 x i8> %0, %2
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @bitsel_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind {
+; CHECK-LABEL: bitsel_v8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vand.v $vr1, $vr2, $vr1
+; CHECK-NEXT: vandn.v $vr0, $vr2, $vr0
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <8 x i16> %c, %b
+ %1 = xor <8 x i16> %c, splat (i16 -1)
+ %2 = and <8 x i16> %a, %1
+ %3 = or <8 x i16> %0, %2
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @bitsel_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind {
+; CHECK-LABEL: bitsel_v4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vand.v $vr1, $vr2, $vr1
+; CHECK-NEXT: vandn.v $vr0, $vr2, $vr0
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <4 x i32> %c, %b
+ %1 = xor <4 x i32> %c, splat (i32 -1)
+ %2 = and <4 x i32> %a, %1
+ %3 = or <4 x i32> %0, %2
+ ret <4 x i32> %3
+}
+
+define <2 x i64> @bitsel_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind {
+; CHECK-LABEL: bitsel_v2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vand.v $vr1, $vr2, $vr1
+; CHECK-NEXT: vandn.v $vr0, $vr2, $vr0
+; CHECK-NEXT: vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <2 x i64> %c, %b
+ %1 = xor <2 x i64> %c, splat (i64 -1)
+ %2 = and <2 x i64> %a, %1
+ %3 = or <2 x i64> %0, %2
+ ret <2 x i64> %3
+}
+
+define <16 x i8> @bitsel_v16i8_1(<16 x i8> %a, <16 x i8> %b) nounwind {
+; CHECK-LABEL: bitsel_v16i8_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vandi.b $vr2, $vr0, 1
+; CHECK-NEXT: vandn.v $vr0, $vr0, $vr1
+; CHECK-NEXT: vor.v $vr0, $vr2, $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = and <16 x i8> %a, splat (i8 1)
+ %1 = xor <16 x i8> %a, splat (i8 -1)
+ %2 = and <16 x i8> %b, %1
+ %3 = or <16 x i8> %0, %2
+ ret <16 x i8> %3
+}