aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--riscv/decode.h31
-rw-r--r--riscv/insns/vmfeq_vf.h8
-rw-r--r--riscv/insns/vmfeq_vv.h8
-rw-r--r--riscv/insns/vmfge_vf.h8
-rw-r--r--riscv/insns/vmfgt_vf.h8
-rw-r--r--riscv/insns/vmfle_vf.h8
-rw-r--r--riscv/insns/vmfle_vv.h8
-rw-r--r--riscv/insns/vmflt_vf.h8
-rw-r--r--riscv/insns/vmflt_vv.h8
-rw-r--r--riscv/insns/vmfne_vf.h8
-rw-r--r--riscv/insns/vmfne_vv.h8
11 files changed, 85 insertions, 26 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 62e798e..acb0c4c 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -1655,12 +1655,12 @@ for (reg_t i = 0; i < vlmax; ++i) { \
#define VI_VFP_LOOP_CMP_END \
switch(P.VU.vsew) { \
- case e32: { \
+ case e32: \
+ case e64: { \
vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
break; \
} \
case e16: \
- case e8: \
default: \
require(0); \
break; \
@@ -1741,12 +1741,31 @@ for (reg_t i = 0; i < vlmax; ++i) { \
DEBUG_RVV_FP_VF; \
VI_VFP_LOOP_END
-#define VI_VFP_LOOP_CMP(BODY, is_vs1) \
+#define VI_VFP_LOOP_CMP(BODY32, BODY64, is_vs1) \
VI_CHECK_MSS(is_vs1); \
VI_VFP_LOOP_CMP_BASE \
- BODY; \
- set_fp_exceptions; \
- DEBUG_RVV_FP_VV; \
+ switch(P.VU.vsew) { \
+ case e32: {\
+ float32_t vs2 = P.VU.elt<float32_t>(rs2_num, i); \
+ float32_t vs1 = P.VU.elt<float32_t>(rs1_num, i); \
+ float32_t rs1 = f32(READ_FREG(rs1_num)); \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ }\
+ case e64: {\
+ float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
+ float64_t vs1 = P.VU.elt<float64_t>(rs1_num, i); \
+ float64_t rs1 = f64(READ_FREG(rs1_num)); \
+ BODY64; \
+ set_fp_exceptions; \
+ break; \
+ }\
+ case e16: \
+ default: \
+ require(0); \
+ break; \
+ }; \
VI_VFP_LOOP_CMP_END \
#define VI_VFP_VF_LOOP_WIDE(BODY) \
diff --git a/riscv/insns/vmfeq_vf.h b/riscv/insns/vmfeq_vf.h
index f0e7109..766f0ab 100644
--- a/riscv/insns/vmfeq_vf.h
+++ b/riscv/insns/vmfeq_vf.h
@@ -1,5 +1,9 @@
-// vfeq.vf vd, vs2, fs1
+// vmfeq.vf vd, vs2, fs1
VI_VFP_LOOP_CMP
({
res = f32_eq(vs2, rs1);
-}, false)
+},
+{
+ res = f64_eq(vs2, rs1);
+},
+false)
diff --git a/riscv/insns/vmfeq_vv.h b/riscv/insns/vmfeq_vv.h
index 1be3a69..19117fc 100644
--- a/riscv/insns/vmfeq_vv.h
+++ b/riscv/insns/vmfeq_vv.h
@@ -1,5 +1,9 @@
-// vfeq.vv vd, vs2, vs1
+// vmfeq.vv vd, vs2, vs1
VI_VFP_LOOP_CMP
({
res = f32_eq(vs2, vs1);
-}, true)
+},
+{
+ res = f64_eq(vs2, vs1);
+},
+true)
diff --git a/riscv/insns/vmfge_vf.h b/riscv/insns/vmfge_vf.h
index 1c68366..c5f4c83 100644
--- a/riscv/insns/vmfge_vf.h
+++ b/riscv/insns/vmfge_vf.h
@@ -1,5 +1,9 @@
-// vfge.vf vd, vs2, rs1
+// vmfge.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
({
res = f32_le(rs1, vs2);
-}, false)
+},
+{
+ res = f64_le(rs1, vs2);
+},
+false)
diff --git a/riscv/insns/vmfgt_vf.h b/riscv/insns/vmfgt_vf.h
index 0979185..5387300 100644
--- a/riscv/insns/vmfgt_vf.h
+++ b/riscv/insns/vmfgt_vf.h
@@ -1,5 +1,9 @@
-// vfgt.vf vd, vs2, rs1
+// vmfgt.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
({
res = f32_lt(rs1, vs2);
-}, false)
+},
+{
+ res = f64_lt(rs1, vs2);
+},
+false)
diff --git a/riscv/insns/vmfle_vf.h b/riscv/insns/vmfle_vf.h
index 90607ec..1a3a7c4 100644
--- a/riscv/insns/vmfle_vf.h
+++ b/riscv/insns/vmfle_vf.h
@@ -1,5 +1,9 @@
-// vfle.vf vd, vs2, rs1
+// vmfle.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
({
res = f32_le(vs2, rs1);
-}, false)
+},
+{
+ res = f64_le(vs2, rs1);
+},
+false)
diff --git a/riscv/insns/vmfle_vv.h b/riscv/insns/vmfle_vv.h
index 6ccdfec..067f1a9 100644
--- a/riscv/insns/vmfle_vv.h
+++ b/riscv/insns/vmfle_vv.h
@@ -1,5 +1,9 @@
-// vfle.vv vd, vs2, rs1
+// vmfle.vv vd, vs2, rs1
VI_VFP_LOOP_CMP
({
res = f32_le(vs2, vs1);
-}, true)
+},
+{
+ res = f64_le(vs2, vs1);
+},
+true)
diff --git a/riscv/insns/vmflt_vf.h b/riscv/insns/vmflt_vf.h
index 6b71a4a..248071d 100644
--- a/riscv/insns/vmflt_vf.h
+++ b/riscv/insns/vmflt_vf.h
@@ -1,5 +1,9 @@
-// vflt.vf vd, vs2, rs1
+// vmflt.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
({
res = f32_lt(vs2, rs1);
-}, false)
+},
+{
+ res = f64_lt(vs2, rs1);
+},
+false)
diff --git a/riscv/insns/vmflt_vv.h b/riscv/insns/vmflt_vv.h
index a2ed8e3..71895df 100644
--- a/riscv/insns/vmflt_vv.h
+++ b/riscv/insns/vmflt_vv.h
@@ -1,5 +1,9 @@
-// vflt.vv vd, vs2, vs1
+// vmflt.vv vd, vs2, vs1
VI_VFP_LOOP_CMP
({
res = f32_lt(vs2, vs1);
-}, true)
+},
+{
+ res = f64_lt(vs2, vs1);
+},
+true)
diff --git a/riscv/insns/vmfne_vf.h b/riscv/insns/vmfne_vf.h
index ef63678..afccbcb 100644
--- a/riscv/insns/vmfne_vf.h
+++ b/riscv/insns/vmfne_vf.h
@@ -1,5 +1,9 @@
-// vfne.vf vd, vs2, rs1
+// vmfne.vf vd, vs2, rs1
VI_VFP_LOOP_CMP
({
res = !f32_eq(vs2, rs1);
-}, false)
+},
+{
+ res = !f64_eq(vs2, rs1);
+},
+false)
diff --git a/riscv/insns/vmfne_vv.h b/riscv/insns/vmfne_vv.h
index 8378a23..d5df60c 100644
--- a/riscv/insns/vmfne_vv.h
+++ b/riscv/insns/vmfne_vv.h
@@ -1,5 +1,9 @@
-// vfne.vv vd, vs2, rs1
+// vmfne.vv vd, vs2, rs1
VI_VFP_LOOP_CMP
({
res = !f32_eq(vs2, vs1);
-}, true)
+},
+{
+ res = !f64_eq(vs2, vs1);
+},
+true)