aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChih-Min Chao <chihmin.chao@sifive.com>2020-04-01 00:45:20 -0700
committerChih-Min Chao <chihmin.chao@sifive.com>2020-05-04 09:50:05 -0700
commitb216e03dd1c10d17693053a1eb83a20fff3242ea (patch)
treee143204aa5fa83b6bac321026e10f791444d39a2
parentd78999f0c7c44317b0c3fc2d34217418b5dbe086 (diff)
downloadspike-b216e03dd1c10d17693053a1eb83a20fff3242ea.zip
spike-b216e03dd1c10d17693053a1eb83a20fff3242ea.tar.gz
spike-b216e03dd1c10d17693053a1eb83a20fff3242ea.tar.bz2
rvv: fp16: support .vv instructions
Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
-rw-r--r--riscv/decode.h14
-rw-r--r--riscv/insns/vfadd_vv.h3
-rw-r--r--riscv/insns/vfclass_v.h3
-rw-r--r--riscv/insns/vfcvt_xu_f_v.h3
-rw-r--r--riscv/insns/vfdiv_vv.h3
-rw-r--r--riscv/insns/vfdot_vv.h3
-rw-r--r--riscv/insns/vfmacc_vv.h3
-rw-r--r--riscv/insns/vfmadd_vv.h3
-rw-r--r--riscv/insns/vfmax_vv.h3
-rw-r--r--riscv/insns/vfmin_vv.h3
-rw-r--r--riscv/insns/vfmsac_vv.h3
-rw-r--r--riscv/insns/vfmsub_vv.h3
-rw-r--r--riscv/insns/vfmul_vv.h3
-rw-r--r--riscv/insns/vfnmacc_vv.h3
-rw-r--r--riscv/insns/vfnmadd_vv.h3
-rw-r--r--riscv/insns/vfnmsac_vv.h3
-rw-r--r--riscv/insns/vfnmsub_vv.h3
-rw-r--r--riscv/insns/vfsgnj_vv.h3
-rw-r--r--riscv/insns/vfsgnjn_vv.h3
-rw-r--r--riscv/insns/vfsgnjx_vv.h3
-rw-r--r--riscv/insns/vfsqrt_v.h3
-rw-r--r--riscv/insns/vfsub_vv.h3
22 files changed, 74 insertions, 3 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 6fa42a0..f455759 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -1662,7 +1662,8 @@ for (reg_t i = 0; i < vlmax && P.VU.vl != 0; ++i) { \
//
#define VI_VFP_COMMON \
require_fp; \
- require((P.VU.vsew == e32 && p->supports_extension('F')) || \
+ require((P.VU.vsew == e16 && p->supports_extension(EXT_ZFH)) || \
+ (P.VU.vsew == e32 && p->supports_extension('F')) || \
(P.VU.vsew == e64 && p->supports_extension('D'))); \
require_vector;\
reg_t vl = P.VU.vl; \
@@ -1729,10 +1730,18 @@ for (reg_t i = 0; i < vlmax && P.VU.vl != 0; ++i) { \
P.VU.vstart = 0; \
set_fp_exceptions;
-#define VI_VFP_VV_LOOP(BODY32, BODY64) \
+#define VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \
VI_CHECK_SSS(true); \
VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
+ case e16: {\
+ float16_t &vd = P.VU.elt<float16_t>(rd_num, i, true); \
+ float16_t vs1 = P.VU.elt<float16_t>(rs1_num, i); \
+ float16_t vs2 = P.VU.elt<float16_t>(rs2_num, i); \
+ BODY16; \
+ set_fp_exceptions; \
+ break; \
+ }\
case e32: {\
float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
float32_t vs1 = P.VU.elt<float32_t>(rs1_num, i); \
@@ -1749,7 +1758,6 @@ for (reg_t i = 0; i < vlmax && P.VU.vl != 0; ++i) { \
set_fp_exceptions; \
break; \
}\
- case e16: \
default: \
require(0); \
break; \
diff --git a/riscv/insns/vfadd_vv.h b/riscv/insns/vfadd_vv.h
index b333a8a..ce94921 100644
--- a/riscv/insns/vfadd_vv.h
+++ b/riscv/insns/vfadd_vv.h
@@ -1,6 +1,9 @@
// vfadd.vv vd, vs2, vs1
VI_VFP_VV_LOOP
({
+ vd = f16_add(vs1, vs2);
+},
+{
vd = f32_add(vs1, vs2);
},
{
diff --git a/riscv/insns/vfclass_v.h b/riscv/insns/vfclass_v.h
index 8ee092f..1bd5f5f 100644
--- a/riscv/insns/vfclass_v.h
+++ b/riscv/insns/vfclass_v.h
@@ -1,6 +1,9 @@
// vfclass.v vd, vs2, vm
VI_VFP_VV_LOOP
({
+ vd.v = f16_classify(vs2);
+},
+{
vd.v = f32_classify(vs2);
},
{
diff --git a/riscv/insns/vfcvt_xu_f_v.h b/riscv/insns/vfcvt_xu_f_v.h
index 5f19f90..febe8e2 100644
--- a/riscv/insns/vfcvt_xu_f_v.h
+++ b/riscv/insns/vfcvt_xu_f_v.h
@@ -1,6 +1,9 @@
// vfcvt.xu.f.v vd, vd2, vm
VI_VFP_VV_LOOP
({
+ P.VU.elt<uint32_t>(rd_num, i) = f16_to_ui32(vs2, STATE.frm, true);
+},
+{
P.VU.elt<uint32_t>(rd_num, i) = f32_to_ui32(vs2, STATE.frm, true);
},
{
diff --git a/riscv/insns/vfdiv_vv.h b/riscv/insns/vfdiv_vv.h
index 8a49a91..c66d751 100644
--- a/riscv/insns/vfdiv_vv.h
+++ b/riscv/insns/vfdiv_vv.h
@@ -1,6 +1,9 @@
// vfdiv.vv vd, vs2, vs1
VI_VFP_VV_LOOP
({
+ vd = f16_div(vs2, vs1);
+},
+{
vd = f32_div(vs2, vs1);
},
{
diff --git a/riscv/insns/vfdot_vv.h b/riscv/insns/vfdot_vv.h
index 85d0b8a..8f5225a 100644
--- a/riscv/insns/vfdot_vv.h
+++ b/riscv/insns/vfdot_vv.h
@@ -1,6 +1,9 @@
// vfdot.vv vd, vs2, vs1
VI_VFP_VV_LOOP
({
+ vd = f16_add(vd, f16_mul(vs2, vs1));
+},
+{
vd = f32_add(vd, f32_mul(vs2, vs1));
},
{
diff --git a/riscv/insns/vfmacc_vv.h b/riscv/insns/vfmacc_vv.h
index f1caf33..499b1d4 100644
--- a/riscv/insns/vfmacc_vv.h
+++ b/riscv/insns/vfmacc_vv.h
@@ -1,6 +1,9 @@
// vfmacc.vv vd, rs1, vs2, vm # vd[i] = +(vs2[i] * vs1[i]) + vd[i]
VI_VFP_VV_LOOP
({
+ vd = f16_mulAdd(vs1, vs2, vd);
+},
+{
vd = f32_mulAdd(vs1, vs2, vd);
},
{
diff --git a/riscv/insns/vfmadd_vv.h b/riscv/insns/vfmadd_vv.h
index a095c38..7ef734f 100644
--- a/riscv/insns/vfmadd_vv.h
+++ b/riscv/insns/vfmadd_vv.h
@@ -1,6 +1,9 @@
// vfmadd: vd[i] = +(vd[i] * vs1[i]) + vs2[i]
VI_VFP_VV_LOOP
({
+ vd = f16_mulAdd(vd, vs1, vs2);
+},
+{
vd = f32_mulAdd(vd, vs1, vs2);
},
{
diff --git a/riscv/insns/vfmax_vv.h b/riscv/insns/vfmax_vv.h
index 2329e74..6439c89 100644
--- a/riscv/insns/vfmax_vv.h
+++ b/riscv/insns/vfmax_vv.h
@@ -1,6 +1,9 @@
// vfmax
VI_VFP_VV_LOOP
({
+ vd = f16_max(vs2, vs1);
+},
+{
vd = f32_max(vs2, vs1);
},
{
diff --git a/riscv/insns/vfmin_vv.h b/riscv/insns/vfmin_vv.h
index 399b563..882a774 100644
--- a/riscv/insns/vfmin_vv.h
+++ b/riscv/insns/vfmin_vv.h
@@ -1,6 +1,9 @@
// vfmin vd, vs2, vs1
VI_VFP_VV_LOOP
({
+ vd = f16_min(vs2, vs1);
+},
+{
vd = f32_min(vs2, vs1);
},
{
diff --git a/riscv/insns/vfmsac_vv.h b/riscv/insns/vfmsac_vv.h
index 9b4ed9f..3bb50e5 100644
--- a/riscv/insns/vfmsac_vv.h
+++ b/riscv/insns/vfmsac_vv.h
@@ -1,6 +1,9 @@
// vfmsac: vd[i] = +(vs1[i] * vs2[i]) - vd[i]
VI_VFP_VV_LOOP
({
+ vd = f16_mulAdd(vs1, vs2, f16(vd.v ^ F16_SIGN));
+},
+{
vd = f32_mulAdd(vs1, vs2, f32(vd.v ^ F32_SIGN));
},
{
diff --git a/riscv/insns/vfmsub_vv.h b/riscv/insns/vfmsub_vv.h
index f8e0b3d..3cac937 100644
--- a/riscv/insns/vfmsub_vv.h
+++ b/riscv/insns/vfmsub_vv.h
@@ -1,6 +1,9 @@
// vfmsub: vd[i] = +(vd[i] * vs1[i]) - vs2[i]
VI_VFP_VV_LOOP
({
+ vd = f16_mulAdd(vd, vs1, f16(vs2.v ^ F16_SIGN));
+},
+{
vd = f32_mulAdd(vd, vs1, f32(vs2.v ^ F32_SIGN));
},
{
diff --git a/riscv/insns/vfmul_vv.h b/riscv/insns/vfmul_vv.h
index 0e4d499..7930fd0 100644
--- a/riscv/insns/vfmul_vv.h
+++ b/riscv/insns/vfmul_vv.h
@@ -1,6 +1,9 @@
// vfmul.vv vd, vs1, vs2, vm
VI_VFP_VV_LOOP
({
+ vd = f16_mul(vs1, vs2);
+},
+{
vd = f32_mul(vs1, vs2);
},
{
diff --git a/riscv/insns/vfnmacc_vv.h b/riscv/insns/vfnmacc_vv.h
index 62a1486..7200e06 100644
--- a/riscv/insns/vfnmacc_vv.h
+++ b/riscv/insns/vfnmacc_vv.h
@@ -1,6 +1,9 @@
// vfnmacc: vd[i] = -(vs1[i] * vs2[i]) - vd[i]
VI_VFP_VV_LOOP
({
+ vd = f16_mulAdd(f16(vs2.v ^ F16_SIGN), vs1, f16(vd.v ^ F16_SIGN));
+},
+{
vd = f32_mulAdd(f32(vs2.v ^ F32_SIGN), vs1, f32(vd.v ^ F32_SIGN));
},
{
diff --git a/riscv/insns/vfnmadd_vv.h b/riscv/insns/vfnmadd_vv.h
index fc70574..7160ed7 100644
--- a/riscv/insns/vfnmadd_vv.h
+++ b/riscv/insns/vfnmadd_vv.h
@@ -1,6 +1,9 @@
// vfnmadd: vd[i] = -(vd[i] * vs1[i]) - vs2[i]
VI_VFP_VV_LOOP
({
+ vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), vs1, f16(vs2.v ^ F16_SIGN));
+},
+{
vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), vs1, f32(vs2.v ^ F32_SIGN));
},
{
diff --git a/riscv/insns/vfnmsac_vv.h b/riscv/insns/vfnmsac_vv.h
index 795dc38..47db61d 100644
--- a/riscv/insns/vfnmsac_vv.h
+++ b/riscv/insns/vfnmsac_vv.h
@@ -1,6 +1,9 @@
// vfnmsac.vv vd, vs1, vs2, vm # vd[i] = -(vs2[i] * vs1[i]) + vd[i]
VI_VFP_VV_LOOP
({
+ vd = f16_mulAdd(f16(vs1.v ^ F16_SIGN), vs2, vd);
+},
+{
vd = f32_mulAdd(f32(vs1.v ^ F32_SIGN), vs2, vd);
},
{
diff --git a/riscv/insns/vfnmsub_vv.h b/riscv/insns/vfnmsub_vv.h
index ff4a9b5..2a45c8f 100644
--- a/riscv/insns/vfnmsub_vv.h
+++ b/riscv/insns/vfnmsub_vv.h
@@ -1,6 +1,9 @@
// vfnmsub: vd[i] = -(vd[i] * vs1[i]) + vs2[i]
VI_VFP_VV_LOOP
({
+ vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), vs1, vs2);
+},
+{
vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), vs1, vs2);
},
{
diff --git a/riscv/insns/vfsgnj_vv.h b/riscv/insns/vfsgnj_vv.h
index 12d3d43..722cb29 100644
--- a/riscv/insns/vfsgnj_vv.h
+++ b/riscv/insns/vfsgnj_vv.h
@@ -1,6 +1,9 @@
// vfsgnj
VI_VFP_VV_LOOP
({
+ vd = fsgnj16(vs2.v, vs1.v, false, false);
+},
+{
vd = fsgnj32(vs2.v, vs1.v, false, false);
},
{
diff --git a/riscv/insns/vfsgnjn_vv.h b/riscv/insns/vfsgnjn_vv.h
index a16acf7..1d91f69 100644
--- a/riscv/insns/vfsgnjn_vv.h
+++ b/riscv/insns/vfsgnjn_vv.h
@@ -1,6 +1,9 @@
// vfsgnn
VI_VFP_VV_LOOP
({
+ vd = fsgnj16(vs2.v, vs1.v, true, false);
+},
+{
vd = fsgnj32(vs2.v, vs1.v, true, false);
},
{
diff --git a/riscv/insns/vfsgnjx_vv.h b/riscv/insns/vfsgnjx_vv.h
index 9dbe078..b04b845 100644
--- a/riscv/insns/vfsgnjx_vv.h
+++ b/riscv/insns/vfsgnjx_vv.h
@@ -1,6 +1,9 @@
// vfsgnx
VI_VFP_VV_LOOP
({
+ vd = fsgnj16(vs2.v, vs1.v, false, true);
+},
+{
vd = fsgnj32(vs2.v, vs1.v, false, true);
},
{
diff --git a/riscv/insns/vfsqrt_v.h b/riscv/insns/vfsqrt_v.h
index 4a36932..f121308 100644
--- a/riscv/insns/vfsqrt_v.h
+++ b/riscv/insns/vfsqrt_v.h
@@ -1,6 +1,9 @@
// vsqrt.v vd, vd2, vm
VI_VFP_VV_LOOP
({
+ vd = f16_sqrt(vs2);
+},
+{
vd = f32_sqrt(vs2);
},
{
diff --git a/riscv/insns/vfsub_vv.h b/riscv/insns/vfsub_vv.h
index 40545fb..b0403f1 100644
--- a/riscv/insns/vfsub_vv.h
+++ b/riscv/insns/vfsub_vv.h
@@ -1,6 +1,9 @@
// vfsub.vv vd, vs2, vs1
VI_VFP_VV_LOOP
({
+ vd = f16_sub(vs2, vs1);
+},
+{
vd = f32_sub(vs2, vs1);
},
{