aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll')
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll198
1 files changed, 162 insertions, 36 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
index 9f456e9..c0b4916 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
@@ -11,69 +11,153 @@
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+experimental-zvfbfa,+v \
+; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFBFA
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+experimental-zvfbfa,+v \
+; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFBFA
define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %va) {
-; CHECK-LABEL: nxv1bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, 8
-; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nxv1bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vxor.vx v8, v8, a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nxv1bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: lui a0, 8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: nxv1bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfneg.v v8, v8
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 1 x bfloat> %va
ret <vscale x 1 x bfloat> %vb
}
define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %va) {
-; CHECK-LABEL: nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, 8
-; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nxv2bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vxor.vx v8, v8, a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nxv2bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: lui a0, 8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: nxv2bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfneg.v v8, v8
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 2 x bfloat> %va
ret <vscale x 2 x bfloat> %vb
}
define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %va) {
-; CHECK-LABEL: nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, 8
-; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nxv4bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vxor.vx v8, v8, a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nxv4bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: lui a0, 8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: nxv4bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfneg.v v8, v8
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 4 x bfloat> %va
ret <vscale x 4 x bfloat> %vb
}
define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %va) {
-; CHECK-LABEL: nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, 8
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vxor.vx v8, v8, a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: lui a0, 8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfneg.v v8, v8
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 8 x bfloat> %va
ret <vscale x 8 x bfloat> %vb
}
define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %va) {
-; CHECK-LABEL: nxv16bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, 8
-; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nxv16bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vxor.vx v8, v8, a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nxv16bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: lui a0, 8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: nxv16bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfneg.v v8, v8
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 16 x bfloat> %va
ret <vscale x 16 x bfloat> %vb
}
define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %va) {
-; CHECK-LABEL: nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, 8
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT: vxor.vx v8, v8, a0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: nxv32bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vxor.vx v8, v8, a0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: nxv32bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: lui a0, 8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: nxv32bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m8, ta, ma
+; ZVFBFA-NEXT: vfneg.v v8, v8
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 32 x bfloat> %va
ret <vscale x 32 x bfloat> %vb
}
@@ -91,6 +175,13 @@ define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) {
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfneg_vv_nxv1f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: lui a0, 8
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vxor.vx v8, v8, a0
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 1 x half> %va
ret <vscale x 1 x half> %vb
}
@@ -108,6 +199,13 @@ define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va) {
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfneg_vv_nxv2f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: lui a0, 8
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vxor.vx v8, v8, a0
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 2 x half> %va
ret <vscale x 2 x half> %vb
}
@@ -125,6 +223,13 @@ define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va) {
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfneg_vv_nxv4f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: lui a0, 8
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vxor.vx v8, v8, a0
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 4 x half> %va
ret <vscale x 4 x half> %vb
}
@@ -142,6 +247,13 @@ define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va) {
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfneg_vv_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: lui a0, 8
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vxor.vx v8, v8, a0
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 8 x half> %va
ret <vscale x 8 x half> %vb
}
@@ -159,6 +271,13 @@ define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va) {
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfneg_vv_nxv16f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: lui a0, 8
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vxor.vx v8, v8, a0
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 16 x half> %va
ret <vscale x 16 x half> %vb
}
@@ -176,6 +295,13 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va) {
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfneg_vv_nxv32f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: lui a0, 8
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vxor.vx v8, v8, a0
+; ZVFBFA-NEXT: ret
%vb = fneg <vscale x 32 x half> %va
ret <vscale x 32 x half> %vb
}