aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChih-Min Chao <chihmin.chao@sifive.com>2020-01-06 20:16:16 -0800
committerChih-Min Chao <chihmin.chao@sifive.com>2020-01-22 07:54:18 -0800
commit9413a45196a968d42bbc2cc6046a23d819293dc9 (patch)
tree15dad61a512e2eab9e3259df2350fd9371a842d4
parente1cb87f7d7626d749c687020c095f17c31858592 (diff)
downloadriscv-isa-sim-9413a45196a968d42bbc2cc6046a23d819293dc9.zip
riscv-isa-sim-9413a45196a968d42bbc2cc6046a23d819293dc9.tar.gz
riscv-isa-sim-9413a45196a968d42bbc2cc6046a23d819293dc9.tar.bz2
commitlog: rvv: add commitlog support to float instrunctions
Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
-rw-r--r--riscv/decode.h22
-rw-r--r--riscv/insns/vfmerge_vfm.h5
-rw-r--r--riscv/insns/vfmv_s_f.h8
-rw-r--r--riscv/insns/vfmv_v_f.h4
-rw-r--r--riscv/insns/vfncvt_f_f_w.h2
-rw-r--r--riscv/insns/vfncvt_f_x_w.h2
-rw-r--r--riscv/insns/vfncvt_f_xu_w.h2
-rw-r--r--riscv/insns/vfncvt_rod_f_f_w.h2
-rw-r--r--riscv/insns/vfncvt_x_f_w.h2
-rw-r--r--riscv/insns/vfncvt_xu_f_w.h2
-rw-r--r--riscv/insns/vfwcvt_f_f_v.h2
-rw-r--r--riscv/insns/vfwcvt_f_x_v.h2
-rw-r--r--riscv/insns/vfwcvt_f_xu_v.h2
-rw-r--r--riscv/insns/vfwcvt_x_f_v.h2
-rw-r--r--riscv/insns/vfwcvt_xu_f_v.h2
15 files changed, 30 insertions, 31 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 8999d93..9b81fd4 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -1630,7 +1630,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
float32_t rs1 = f32(READ_FREG(rs1_num)); \
VI_LOOP_ELEMENT_SKIP(); \
uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos); \
- uint64_t &vdi = P.VU.elt<uint64_t>(rd_num, midx); \
+ uint64_t &vdi = P.VU.elt<uint64_t>(rd_num, midx, true); \
uint64_t res = 0;
#define VI_VFP_LOOP_REDUCTION_BASE(width) \
@@ -1639,7 +1639,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
vd_0 = vs1_0;\
for (reg_t i=P.VU.vstart; i<vl; ++i){ \
VI_LOOP_ELEMENT_SKIP(); \
- int##width##_t &vd = P.VU.elt<int##width##_t>(rd_num, i); \
+ int##width##_t &vd = P.VU.elt<int##width##_t>(rd_num, i, true); \
float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i); \
#define VI_VFP_LOOP_WIDE_REDUCTION_BASE \
@@ -1661,7 +1661,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
} \
P.VU.vstart = 0; \
if (vl > 0) { \
- P.VU.elt<type_sew_t<x>::type>(rd_num, 0) = vd_0.v; \
+ P.VU.elt<type_sew_t<x>::type>(rd_num, 0, true) = vd_0.v; \
}
#define VI_VFP_LOOP_CMP_END \
@@ -1685,7 +1685,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
- float32_t &vd = P.VU.elt<float32_t>(rd_num, i); \
+ float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
float32_t vs1 = P.VU.elt<float32_t>(rs1_num, i); \
float32_t vs2 = P.VU.elt<float32_t>(rs2_num, i); \
BODY32; \
@@ -1693,7 +1693,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
break; \
}\
case e64: {\
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
float64_t vs1 = P.VU.elt<float64_t>(rs1_num, i); \
float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
BODY64; \
@@ -1745,7 +1745,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
- float32_t &vd = P.VU.elt<float32_t>(rd_num, i); \
+ float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
float32_t rs1 = f32(READ_FREG(rs1_num)); \
float32_t vs2 = P.VU.elt<float32_t>(rs2_num, i); \
BODY32; \
@@ -1753,7 +1753,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
break; \
}\
case e64: {\
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
float64_t rs1 = f64(READ_FREG(rs1_num)); \
float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
BODY64; \
@@ -1801,7 +1801,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
float64_t vs2 = f32_to_f64(P.VU.elt<float32_t>(rs2_num, i)); \
float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \
BODY; \
@@ -1823,7 +1823,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
float64_t vs2 = f32_to_f64(P.VU.elt<float32_t>(rs2_num, i)); \
float64_t vs1 = f32_to_f64(P.VU.elt<float32_t>(rs1_num, i)); \
BODY; \
@@ -1844,7 +1844,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \
BODY; \
@@ -1864,7 +1864,7 @@ for (reg_t i = 0; i < vlmax; ++i) { \
VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
float64_t vs1 = f32_to_f64(P.VU.elt<float32_t>(rs1_num, i)); \
BODY; \
diff --git a/riscv/insns/vfmerge_vfm.h b/riscv/insns/vfmerge_vfm.h
index 639809d..b213e06 100644
--- a/riscv/insns/vfmerge_vfm.h
+++ b/riscv/insns/vfmerge_vfm.h
@@ -2,12 +2,11 @@
require(insn.rd() != 0);
VI_CHECK_SSS(false);
VI_VFP_COMMON;
-reg_t sew = P.VU.vsew;
switch(P.VU.vsew) {
case 32:
for (reg_t i=P.VU.vstart; i<vl; ++i) {
- auto &vd = P.VU.elt<float32_t>(rd_num, i);
+ auto &vd = P.VU.elt<float32_t>(rd_num, i, true);
auto rs1 = f32(READ_FREG(rs1_num));
auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
@@ -20,7 +19,7 @@ switch(P.VU.vsew) {
break;
case 64:
for (reg_t i=P.VU.vstart; i<vl; ++i) {
- auto &vd = P.VU.elt<float64_t>(rd_num, i);
+ auto &vd = P.VU.elt<float64_t>(rd_num, i, true);
auto rs1 = f64(READ_FREG(rs1_num));
auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
diff --git a/riscv/insns/vfmv_s_f.h b/riscv/insns/vfmv_s_f.h
index 44e9e2e..1f319b5 100644
--- a/riscv/insns/vfmv_s_f.h
+++ b/riscv/insns/vfmv_s_f.h
@@ -11,15 +11,15 @@ if (vl > 0) {
switch(P.VU.vsew) {
case 32:
if (FLEN == 64)
- P.VU.elt<uint32_t>(rd_num, 0) = f64(FRS1).v;
+ P.VU.elt<uint32_t>(rd_num, 0, true) = f64(FRS1).v;
else
- P.VU.elt<uint32_t>(rd_num, 0) = f32(FRS1).v;
+ P.VU.elt<uint32_t>(rd_num, 0, true) = f32(FRS1).v;
break;
case 64:
if (FLEN == 64)
- P.VU.elt<uint64_t>(rd_num, 0) = f64(FRS1).v;
+ P.VU.elt<uint64_t>(rd_num, 0, true) = f64(FRS1).v;
else
- P.VU.elt<uint64_t>(rd_num, 0) = f32(FRS1).v;
+ P.VU.elt<uint64_t>(rd_num, 0, true) = f32(FRS1).v;
break;
}
}
diff --git a/riscv/insns/vfmv_v_f.h b/riscv/insns/vfmv_v_f.h
index 75832f9..f85a26a 100644
--- a/riscv/insns/vfmv_v_f.h
+++ b/riscv/insns/vfmv_v_f.h
@@ -4,7 +4,7 @@ VI_VFP_COMMON
switch(P.VU.vsew) {
case e32:
for (reg_t i=P.VU.vstart; i<vl; ++i) {
- auto &vd = P.VU.elt<float32_t>(rd_num, i);
+ auto &vd = P.VU.elt<float32_t>(rd_num, i, true);
auto rs1 = f32(READ_FREG(rs1_num));
vd = rs1;
@@ -12,7 +12,7 @@ switch(P.VU.vsew) {
break;
case e64:
for (reg_t i=P.VU.vstart; i<vl; ++i) {
- auto &vd = P.VU.elt<float64_t>(rd_num, i);
+ auto &vd = P.VU.elt<float64_t>(rd_num, i, true);
auto rs1 = f64(READ_FREG(rs1_num));
vd = rs1;
diff --git a/riscv/insns/vfncvt_f_f_w.h b/riscv/insns/vfncvt_f_f_w.h
index 55a8eac..3a9ead3 100644
--- a/riscv/insns/vfncvt_f_f_w.h
+++ b/riscv/insns/vfncvt_f_f_w.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i) = f64_to_f32(vs2);
+ P.VU.elt<float32_t>(rd_num, i, true) = f64_to_f32(vs2);
set_fp_exceptions;
VI_VFP_LOOP_END
diff --git a/riscv/insns/vfncvt_f_x_w.h b/riscv/insns/vfncvt_f_x_w.h
index daf2274..c328395 100644
--- a/riscv/insns/vfncvt_f_x_w.h
+++ b/riscv/insns/vfncvt_f_x_w.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<int64_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i) = i64_to_f32(vs2);
+ P.VU.elt<float32_t>(rd_num, i, true) = i64_to_f32(vs2);
set_fp_exceptions;
VI_VFP_LOOP_END
diff --git a/riscv/insns/vfncvt_f_xu_w.h b/riscv/insns/vfncvt_f_xu_w.h
index 7f57ec5..c674644 100644
--- a/riscv/insns/vfncvt_f_xu_w.h
+++ b/riscv/insns/vfncvt_f_xu_w.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<uint64_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i) = ui64_to_f32(vs2);
+ P.VU.elt<float32_t>(rd_num, i, true) = ui64_to_f32(vs2);
set_fp_exceptions;
VI_VFP_LOOP_END
diff --git a/riscv/insns/vfncvt_rod_f_f_w.h b/riscv/insns/vfncvt_rod_f_f_w.h
index 130c5b5..0eae343 100644
--- a/riscv/insns/vfncvt_rod_f_f_w.h
+++ b/riscv/insns/vfncvt_rod_f_f_w.h
@@ -6,6 +6,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
softfloat_roundingMode = softfloat_round_odd;
auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i) = f64_to_f32(vs2);
+ P.VU.elt<float32_t>(rd_num, i, true) = f64_to_f32(vs2);
set_fp_exceptions;
VI_VFP_LOOP_END
diff --git a/riscv/insns/vfncvt_x_f_w.h b/riscv/insns/vfncvt_x_f_w.h
index cda2fe2..d6728bd 100644
--- a/riscv/insns/vfncvt_x_f_w.h
+++ b/riscv/insns/vfncvt_x_f_w.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<int32_t>(rd_num, i) = f64_to_i32(vs2, STATE.frm, true);
+ P.VU.elt<int32_t>(rd_num, i, true) = f64_to_i32(vs2, STATE.frm, true);
set_fp_exceptions;
VI_VFP_LOOP_END
diff --git a/riscv/insns/vfncvt_xu_f_w.h b/riscv/insns/vfncvt_xu_f_w.h
index a009105..f2cce24 100644
--- a/riscv/insns/vfncvt_xu_f_w.h
+++ b/riscv/insns/vfncvt_xu_f_w.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<uint32_t>(rd_num, i) = f64_to_ui32(vs2, STATE.frm, true);
+ P.VU.elt<uint32_t>(rd_num, i, true) = f64_to_ui32(vs2, STATE.frm, true);
set_fp_exceptions;
VI_VFP_LOOP_END
diff --git a/riscv/insns/vfwcvt_f_f_v.h b/riscv/insns/vfwcvt_f_f_v.h
index 4bda2bc..424f0f4 100644
--- a/riscv/insns/vfwcvt_f_f_v.h
+++ b/riscv/insns/vfwcvt_f_f_v.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<float64_t>(rd_num, i) = f32_to_f64(vs2);
+ P.VU.elt<float64_t>(rd_num, i, true) = f32_to_f64(vs2);
set_fp_exceptions;
VI_VFP_LOOP_WIDE_END
diff --git a/riscv/insns/vfwcvt_f_x_v.h b/riscv/insns/vfwcvt_f_x_v.h
index 346db32..1c05ab7 100644
--- a/riscv/insns/vfwcvt_f_x_v.h
+++ b/riscv/insns/vfwcvt_f_x_v.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<int32_t>(rs2_num, i);
- P.VU.elt<float64_t>(rd_num, i) = i32_to_f64(vs2);
+ P.VU.elt<float64_t>(rd_num, i, true) = i32_to_f64(vs2);
set_fp_exceptions;
VI_VFP_LOOP_WIDE_END
diff --git a/riscv/insns/vfwcvt_f_xu_v.h b/riscv/insns/vfwcvt_f_xu_v.h
index c963abb..fcb8c0c 100644
--- a/riscv/insns/vfwcvt_f_xu_v.h
+++ b/riscv/insns/vfwcvt_f_xu_v.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<uint32_t>(rs2_num, i);
- P.VU.elt<float64_t>(rd_num, i) = ui32_to_f64(vs2);
+ P.VU.elt<float64_t>(rd_num, i, true) = ui32_to_f64(vs2);
set_fp_exceptions;
VI_VFP_LOOP_WIDE_END
diff --git a/riscv/insns/vfwcvt_x_f_v.h b/riscv/insns/vfwcvt_x_f_v.h
index 9088a79..3df8256 100644
--- a/riscv/insns/vfwcvt_x_f_v.h
+++ b/riscv/insns/vfwcvt_x_f_v.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<int64_t>(rd_num, i) = f32_to_i64(vs2, STATE.frm, true);
+ P.VU.elt<int64_t>(rd_num, i, true) = f32_to_i64(vs2, STATE.frm, true);
set_fp_exceptions;
VI_VFP_LOOP_WIDE_END
diff --git a/riscv/insns/vfwcvt_xu_f_v.h b/riscv/insns/vfwcvt_xu_f_v.h
index 266cbca..6e39b7d 100644
--- a/riscv/insns/vfwcvt_xu_f_v.h
+++ b/riscv/insns/vfwcvt_xu_f_v.h
@@ -5,6 +5,6 @@ if (P.VU.vsew == e32)
VI_VFP_LOOP_BASE
auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<uint64_t>(rd_num, i) = f32_to_ui64(vs2, STATE.frm, true);
+ P.VU.elt<uint64_t>(rd_num, i, true) = f32_to_ui64(vs2, STATE.frm, true);
set_fp_exceptions;
VI_VFP_LOOP_WIDE_END