aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@sifive.com>2024-06-24 09:46:35 -0700
committerCraig Topper <craig.topper@sifive.com>2024-06-24 10:01:52 -0700
commit7601ae125de673c5a5f13a1d97f16cdd49e26e29 (patch)
tree94f7e4208ad1076b16e04363b3c5825f9fa2fdc0
parentd75f9dd1d29b332bdc51346de63cbc04646354d7 (diff)
downloadllvm-7601ae125de673c5a5f13a1d97f16cdd49e26e29.zip
llvm-7601ae125de673c5a5f13a1d97f16cdd49e26e29.tar.gz
llvm-7601ae125de673c5a5f13a1d97f16cdd49e26e29.tar.bz2
[RISCV] Add back some test cases I inadvertently deleted. NFC
These tests were accidentally removed in a7a1195f01037e5019f671c96ef4bca9af9bb9a7. I only meant to remove bfloat tests, but I accidentally removed f32 and f64 as well.
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll224
1 files changed, 224 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
index e75591c..d945cf5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll
@@ -119,3 +119,227 @@ define <16 x half> @selectcc_v16f16(half %a, half %b, <16 x half> %c, <16 x half
%v = select i1 %cmp, <16 x half> %c, <16 x half> %d
ret <16 x half> %v
}
+
+define <2 x float> @select_v2f32(i1 zeroext %c, <2 x float> %a, <2 x float> %b) {
+; CHECK-LABEL: select_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %v = select i1 %c, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %v
+}
+
+define <2 x float> @selectcc_v2f32(float %a, float %b, <2 x float> %c, <2 x float> %d) {
+; CHECK-LABEL: selectcc_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: feq.s a0, fa0, fa1
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = fcmp oeq float %a, %b
+ %v = select i1 %cmp, <2 x float> %c, <2 x float> %d
+ ret <2 x float> %v
+}
+
+define <4 x float> @select_v4f32(i1 zeroext %c, <4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: select_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %v = select i1 %c, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %v
+}
+
+define <4 x float> @selectcc_v4f32(float %a, float %b, <4 x float> %c, <4 x float> %d) {
+; CHECK-LABEL: selectcc_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: feq.s a0, fa0, fa1
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = fcmp oeq float %a, %b
+ %v = select i1 %cmp, <4 x float> %c, <4 x float> %d
+ ret <4 x float> %v
+}
+
+define <8 x float> @select_v8f32(i1 zeroext %c, <8 x float> %a, <8 x float> %b) {
+; CHECK-LABEL: select_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v12, a0
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
+; CHECK-NEXT: ret
+ %v = select i1 %c, <8 x float> %a, <8 x float> %b
+ ret <8 x float> %v
+}
+
+define <8 x float> @selectcc_v8f32(float %a, float %b, <8 x float> %c, <8 x float> %d) {
+; CHECK-LABEL: selectcc_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: feq.s a0, fa0, fa1
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v12, a0
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
+; CHECK-NEXT: ret
+ %cmp = fcmp oeq float %a, %b
+ %v = select i1 %cmp, <8 x float> %c, <8 x float> %d
+ ret <8 x float> %v
+}
+
+define <16 x float> @select_v16f32(i1 zeroext %c, <16 x float> %a, <16 x float> %b) {
+; CHECK-LABEL: select_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v16, a0
+; CHECK-NEXT: vmsne.vi v0, v16, 0
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: ret
+ %v = select i1 %c, <16 x float> %a, <16 x float> %b
+ ret <16 x float> %v
+}
+
+define <16 x float> @selectcc_v16f32(float %a, float %b, <16 x float> %c, <16 x float> %d) {
+; CHECK-LABEL: selectcc_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: feq.s a0, fa0, fa1
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v16, a0
+; CHECK-NEXT: vmsne.vi v0, v16, 0
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: ret
+ %cmp = fcmp oeq float %a, %b
+ %v = select i1 %cmp, <16 x float> %c, <16 x float> %d
+ ret <16 x float> %v
+}
+
+define <2 x double> @select_v2f64(i1 zeroext %c, <2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: select_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %v = select i1 %c, <2 x double> %a, <2 x double> %b
+ ret <2 x double> %v
+}
+
+define <2 x double> @selectcc_v2f64(double %a, double %b, <2 x double> %c, <2 x double> %d) {
+; CHECK-LABEL: selectcc_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: feq.d a0, fa0, fa1
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT: ret
+ %cmp = fcmp oeq double %a, %b
+ %v = select i1 %cmp, <2 x double> %c, <2 x double> %d
+ ret <2 x double> %v
+}
+
+define <4 x double> @select_v4f64(i1 zeroext %c, <4 x double> %a, <4 x double> %b) {
+; CHECK-LABEL: select_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v12, a0
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
+; CHECK-NEXT: ret
+ %v = select i1 %c, <4 x double> %a, <4 x double> %b
+ ret <4 x double> %v
+}
+
+define <4 x double> @selectcc_v4f64(double %a, double %b, <4 x double> %c, <4 x double> %d) {
+; CHECK-LABEL: selectcc_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: feq.d a0, fa0, fa1
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v12, a0
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
+; CHECK-NEXT: ret
+ %cmp = fcmp oeq double %a, %b
+ %v = select i1 %cmp, <4 x double> %c, <4 x double> %d
+ ret <4 x double> %v
+}
+
+define <8 x double> @select_v8f64(i1 zeroext %c, <8 x double> %a, <8 x double> %b) {
+; CHECK-LABEL: select_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v16, a0
+; CHECK-NEXT: vmsne.vi v0, v16, 0
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: ret
+ %v = select i1 %c, <8 x double> %a, <8 x double> %b
+ ret <8 x double> %v
+}
+
+define <8 x double> @selectcc_v8f64(double %a, double %b, <8 x double> %c, <8 x double> %d) {
+; CHECK-LABEL: selectcc_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: feq.d a0, fa0, fa1
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmv.v.x v16, a0
+; CHECK-NEXT: vmsne.vi v0, v16, 0
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: ret
+ %cmp = fcmp oeq double %a, %b
+ %v = select i1 %cmp, <8 x double> %c, <8 x double> %d
+ ret <8 x double> %v
+}
+
+define <16 x double> @select_v16f64(i1 zeroext %c, <16 x double> %a, <16 x double> %b) {
+; CHECK-LABEL: select_v16f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vmsne.vi v0, v24, 0
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
+; CHECK-NEXT: ret
+ %v = select i1 %c, <16 x double> %a, <16 x double> %b
+ ret <16 x double> %v
+}
+
+define <16 x double> @selectcc_v16f64(double %a, double %b, <16 x double> %c, <16 x double> %d) {
+; CHECK-LABEL: selectcc_v16f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: feq.d a0, fa0, fa1
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vmsne.vi v0, v24, 0
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
+; CHECK-NEXT: ret
+ %cmp = fcmp oeq double %a, %b
+ %v = select i1 %cmp, <16 x double> %c, <16 x double> %d
+ ret <16 x double> %v
+}