aboutsummaryrefslogtreecommitdiff
path: root/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'riscv')
-rw-r--r--riscv/decode.h17
-rw-r--r--riscv/insns/vfadd_vv.h3
-rw-r--r--riscv/insns/vfclass_v.h3
-rw-r--r--riscv/insns/vfcvt_xu_f_v.h3
-rw-r--r--riscv/insns/vfdiv_vv.h3
-rw-r--r--riscv/insns/vfdot_vv.h3
-rw-r--r--riscv/insns/vfmacc_vv.h3
-rw-r--r--riscv/insns/vfmadd_vv.h3
-rw-r--r--riscv/insns/vfmax_vv.h3
-rw-r--r--riscv/insns/vfmin_vv.h3
-rw-r--r--riscv/insns/vfmsac_vv.h3
-rw-r--r--riscv/insns/vfmsub_vv.h3
-rw-r--r--riscv/insns/vfmul_vv.h3
-rw-r--r--riscv/insns/vfnmacc_vv.h3
-rw-r--r--riscv/insns/vfnmadd_vv.h3
-rw-r--r--riscv/insns/vfnmsac_vv.h3
-rw-r--r--riscv/insns/vfnmsub_vv.h3
-rw-r--r--riscv/insns/vfsgnj_vv.h3
-rw-r--r--riscv/insns/vfsgnjn_vv.h3
-rw-r--r--riscv/insns/vfsgnjx_vv.h3
-rw-r--r--riscv/insns/vfsqrt_v.h3
-rw-r--r--riscv/insns/vfsub_vv.h3
22 files changed, 75 insertions, 5 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 495ffc4..bdee837 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -1595,9 +1595,9 @@ for (reg_t i = 0; i < vlmax; ++i) { \
// vector: vfp helper
//
#define VI_VFP_COMMON \
- require_extension('F'); \
require_fp; \
- require(P.VU.vsew == 32); \
+ require((P.VU.vsew == e32 && p->supports_extension('F')) || \
+ (P.VU.vsew == e64 && p->supports_extension('D'))); \
require_vector;\
reg_t vl = P.VU.vl; \
reg_t rd_num = insn.rd(); \
@@ -1669,7 +1669,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
P.VU.vstart = 0; \
set_fp_exceptions;
-#define VI_VFP_VV_LOOP(BODY) \
+#define VI_VFP_VV_LOOP(BODY32, BODY64) \
VI_CHECK_SSS(true); \
VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
@@ -1677,12 +1677,19 @@ for (reg_t i = 0; i < vlmax; ++i) { \
float32_t &vd = P.VU.elt<float32_t>(rd_num, i); \
float32_t vs1 = P.VU.elt<float32_t>(rs1_num, i); \
float32_t vs2 = P.VU.elt<float32_t>(rs2_num, i); \
- BODY; \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ }\
+ case e64: {\
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
+ float64_t vs1 = P.VU.elt<float64_t>(rs1_num, i); \
+ float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
+ BODY64; \
set_fp_exceptions; \
break; \
}\
case e16: \
- case e8: \
default: \
require(0); \
break; \
diff --git a/riscv/insns/vfadd_vv.h b/riscv/insns/vfadd_vv.h
index de0ae53..b333a8a 100644
--- a/riscv/insns/vfadd_vv.h
+++ b/riscv/insns/vfadd_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_add(vs1, vs2);
+},
+{
+ vd = f64_add(vs1, vs2);
})
diff --git a/riscv/insns/vfclass_v.h b/riscv/insns/vfclass_v.h
index 75f29a2..8ee092f 100644
--- a/riscv/insns/vfclass_v.h
+++ b/riscv/insns/vfclass_v.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd.v = f32_classify(vs2);
+},
+{
+ vd.v = f64_classify(vs2);
})
diff --git a/riscv/insns/vfcvt_xu_f_v.h b/riscv/insns/vfcvt_xu_f_v.h
index 76c7735..5f19f90 100644
--- a/riscv/insns/vfcvt_xu_f_v.h
+++ b/riscv/insns/vfcvt_xu_f_v.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
P.VU.elt<uint32_t>(rd_num, i) = f32_to_ui32(vs2, STATE.frm, true);
+},
+{
+ P.VU.elt<uint64_t>(rd_num, i) = f64_to_ui64(vs2, STATE.frm, true);
})
diff --git a/riscv/insns/vfdiv_vv.h b/riscv/insns/vfdiv_vv.h
index c20ff1d..8a49a91 100644
--- a/riscv/insns/vfdiv_vv.h
+++ b/riscv/insns/vfdiv_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_div(vs2, vs1);
+},
+{
+ vd = f64_div(vs2, vs1);
})
diff --git a/riscv/insns/vfdot_vv.h b/riscv/insns/vfdot_vv.h
index 11c8bce..85d0b8a 100644
--- a/riscv/insns/vfdot_vv.h
+++ b/riscv/insns/vfdot_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_add(vd, f32_mul(vs2, vs1));
+},
+{
+ vd = f64_add(vd, f64_mul(vs2, vs1));
})
diff --git a/riscv/insns/vfmacc_vv.h b/riscv/insns/vfmacc_vv.h
index 663a648..f1caf33 100644
--- a/riscv/insns/vfmacc_vv.h
+++ b/riscv/insns/vfmacc_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_mulAdd(vs1, vs2, vd);
+},
+{
+ vd = f64_mulAdd(vs1, vs2, vd);
})
diff --git a/riscv/insns/vfmadd_vv.h b/riscv/insns/vfmadd_vv.h
index c967ec3..a095c38 100644
--- a/riscv/insns/vfmadd_vv.h
+++ b/riscv/insns/vfmadd_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_mulAdd(vd, vs1, vs2);
+},
+{
+ vd = f64_mulAdd(vd, vs1, vs2);
})
diff --git a/riscv/insns/vfmax_vv.h b/riscv/insns/vfmax_vv.h
index 6d12f08..2329e74 100644
--- a/riscv/insns/vfmax_vv.h
+++ b/riscv/insns/vfmax_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_max(vs2, vs1);
+},
+{
+ vd = f64_max(vs2, vs1);
})
diff --git a/riscv/insns/vfmin_vv.h b/riscv/insns/vfmin_vv.h
index 65d20ff..399b563 100644
--- a/riscv/insns/vfmin_vv.h
+++ b/riscv/insns/vfmin_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_min(vs2, vs1);
+},
+{
+ vd = f64_min(vs2, vs1);
})
diff --git a/riscv/insns/vfmsac_vv.h b/riscv/insns/vfmsac_vv.h
index 952c12e..9b4ed9f 100644
--- a/riscv/insns/vfmsac_vv.h
+++ b/riscv/insns/vfmsac_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_mulAdd(vs1, vs2, f32(vd.v ^ F32_SIGN));
+},
+{
+ vd = f64_mulAdd(vs1, vs2, f64(vd.v ^ F64_SIGN));
})
diff --git a/riscv/insns/vfmsub_vv.h b/riscv/insns/vfmsub_vv.h
index a58f1e3..f8e0b3d 100644
--- a/riscv/insns/vfmsub_vv.h
+++ b/riscv/insns/vfmsub_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_mulAdd(vd, vs1, f32(vs2.v ^ F32_SIGN));
+},
+{
+ vd = f64_mulAdd(vd, vs1, f64(vs2.v ^ F64_SIGN));
})
diff --git a/riscv/insns/vfmul_vv.h b/riscv/insns/vfmul_vv.h
index 259dc01..0e4d499 100644
--- a/riscv/insns/vfmul_vv.h
+++ b/riscv/insns/vfmul_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_mul(vs1, vs2);
+},
+{
+ vd = f64_mul(vs1, vs2);
})
diff --git a/riscv/insns/vfnmacc_vv.h b/riscv/insns/vfnmacc_vv.h
index b950df9..62a1486 100644
--- a/riscv/insns/vfnmacc_vv.h
+++ b/riscv/insns/vfnmacc_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_mulAdd(f32(vs2.v ^ F32_SIGN), vs1, f32(vd.v ^ F32_SIGN));
+},
+{
+ vd = f64_mulAdd(f64(vs2.v ^ F64_SIGN), vs1, f64(vd.v ^ F64_SIGN));
})
diff --git a/riscv/insns/vfnmadd_vv.h b/riscv/insns/vfnmadd_vv.h
index f96d102..fc70574 100644
--- a/riscv/insns/vfnmadd_vv.h
+++ b/riscv/insns/vfnmadd_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), vs1, f32(vs2.v ^ F32_SIGN));
+},
+{
+ vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), vs1, f64(vs2.v ^ F64_SIGN));
})
diff --git a/riscv/insns/vfnmsac_vv.h b/riscv/insns/vfnmsac_vv.h
index 0ecd648..795dc38 100644
--- a/riscv/insns/vfnmsac_vv.h
+++ b/riscv/insns/vfnmsac_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_mulAdd(f32(vs1.v ^ F32_SIGN), vs2, vd);
+},
+{
+ vd = f64_mulAdd(f64(vs1.v ^ F64_SIGN), vs2, vd);
})
diff --git a/riscv/insns/vfnmsub_vv.h b/riscv/insns/vfnmsub_vv.h
index da9f59c..ff4a9b5 100644
--- a/riscv/insns/vfnmsub_vv.h
+++ b/riscv/insns/vfnmsub_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), vs1, vs2);
+},
+{
+ vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), vs1, vs2);
})
diff --git a/riscv/insns/vfsgnj_vv.h b/riscv/insns/vfsgnj_vv.h
index 050dd9c..12d3d43 100644
--- a/riscv/insns/vfsgnj_vv.h
+++ b/riscv/insns/vfsgnj_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = fsgnj32(vs2.v, vs1.v, false, false);
+},
+{
+ vd = fsgnj64(vs2.v, vs1.v, false, false);
})
diff --git a/riscv/insns/vfsgnjn_vv.h b/riscv/insns/vfsgnjn_vv.h
index 6603352..a16acf7 100644
--- a/riscv/insns/vfsgnjn_vv.h
+++ b/riscv/insns/vfsgnjn_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = fsgnj32(vs2.v, vs1.v, true, false);
+},
+{
+ vd = fsgnj64(vs2.v, vs1.v, true, false);
})
diff --git a/riscv/insns/vfsgnjx_vv.h b/riscv/insns/vfsgnjx_vv.h
index 9cc12dc..9dbe078 100644
--- a/riscv/insns/vfsgnjx_vv.h
+++ b/riscv/insns/vfsgnjx_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = fsgnj32(vs2.v, vs1.v, false, true);
+},
+{
+ vd = fsgnj64(vs2.v, vs1.v, false, true);
})
diff --git a/riscv/insns/vfsqrt_v.h b/riscv/insns/vfsqrt_v.h
index 4931037..4a36932 100644
--- a/riscv/insns/vfsqrt_v.h
+++ b/riscv/insns/vfsqrt_v.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_sqrt(vs2);
+},
+{
+ vd = f64_sqrt(vs2);
})
diff --git a/riscv/insns/vfsub_vv.h b/riscv/insns/vfsub_vv.h
index 71e7a43..40545fb 100644
--- a/riscv/insns/vfsub_vv.h
+++ b/riscv/insns/vfsub_vv.h
@@ -2,4 +2,7 @@
VI_VFP_VV_LOOP
({
vd = f32_sub(vs2, vs1);
+},
+{
+ vd = f64_sub(vs2, vs1);
})