aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2021-12-17 11:27:19 -0800
committerGitHub <noreply@github.com>2021-12-17 11:27:19 -0800
commit6751b49ac173676c927143d6822910ac2d06a548 (patch)
tree83ce944a0166b1a823efcdfcefc345e62125c8b9
parentf1a6fb9695cffcdea9d8219c14b288c33e046716 (diff)
parentf25933a48392d2141bf557fe16b3e3c56957b2d2 (diff)
downloadspike-6751b49ac173676c927143d6822910ac2d06a548.zip
spike-6751b49ac173676c927143d6822910ac2d06a548.tar.gz
spike-6751b49ac173676c927143d6822910ac2d06a548.tar.bz2
Merge pull request #881 from eopXD/simplify-float-convert
Simplify float convert instructions
-rw-r--r--riscv/decode.h175
-rw-r--r--riscv/insns/vfclass_v.h6
-rw-r--r--riscv/insns/vfcvt_f_x_v.h19
-rw-r--r--riscv/insns/vfcvt_f_xu_v.h19
-rw-r--r--riscv/insns/vfcvt_rtz_x_f_v.h16
-rw-r--r--riscv/insns/vfcvt_rtz_xu_f_v.h16
-rw-r--r--riscv/insns/vfcvt_x_f_v.h16
-rw-r--r--riscv/insns/vfcvt_xu_f_v.h16
-rw-r--r--riscv/insns/vfncvt_f_f_w.h30
-rw-r--r--riscv/insns/vfncvt_f_x_w.h31
-rw-r--r--riscv/insns/vfncvt_f_xu_w.h31
-rw-r--r--riscv/insns/vfncvt_rod_f_f_w.h38
-rw-r--r--riscv/insns/vfncvt_rtz_x_f_w.h32
-rw-r--r--riscv/insns/vfncvt_rtz_xu_f_w.h32
-rw-r--r--riscv/insns/vfncvt_x_f_w.h32
-rw-r--r--riscv/insns/vfncvt_xu_f_w.h32
-rw-r--r--riscv/insns/vfwcvt_f_f_v.h30
-rw-r--r--riscv/insns/vfwcvt_f_x_v.h32
-rw-r--r--riscv/insns/vfwcvt_f_xu_v.h32
-rw-r--r--riscv/insns/vfwcvt_rtz_x_f_v.h31
-rw-r--r--riscv/insns/vfwcvt_rtz_xu_f_v.h31
-rw-r--r--riscv/insns/vfwcvt_x_f_v.h31
-rw-r--r--riscv/insns/vfwcvt_xu_f_v.h31
23 files changed, 322 insertions, 437 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 90025f3..02d0569 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -852,6 +852,18 @@ static inline bool is_aligned(const unsigned val, const unsigned pos)
float##width##_t rs1 = f##width(READ_FREG(rs1_num)); \
float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i);
+#define CVT_FP_TO_FP_PARAMS(from_width, to_width) \
+ auto vs2 = P.VU.elt<float##from_width##_t>(rs2_num, i); \
+ auto &vd = P.VU.elt<float##to_width##_t>(rd_num, i, true);
+
+#define CVT_INT_TO_FP_PARAMS(from_width, to_width, sign) \
+ auto vs2 = P.VU.elt<sign##from_width##_t>(rs2_num, i); \
+ auto &vd = P.VU.elt<float##to_width##_t>(rd_num, i, true);
+
+#define CVT_FP_TO_INT_PARAMS(from_width, to_width, sign) \
+ auto vs2 = P.VU.elt<float##from_width##_t>(rs2_num, i); \
+ auto &vd = P.VU.elt<sign##to_width##_t>(rd_num, i, true);
+
//
// vector: integer and masking operation loop
//
@@ -2369,39 +2381,142 @@ reg_t index[P.VU.vlmax]; \
for (reg_t i=P.VU.vstart->read(); i<vl; ++i){ \
VI_LOOP_ELEMENT_SKIP();
-#define VI_VFP_CVT_SCALE(BODY8, BODY16, BODY32, \
- CHECK8, CHECK16, CHECK32, \
- is_widen, eew_check) \
- if (is_widen) { \
- VI_CHECK_DSS(false);\
- } else { \
- VI_CHECK_SDS(false); \
- } \
- require(eew_check); \
+#define VI_VFP_CVT_LOOP(CVT_PARAMS, CHECK, BODY) \
+ CHECK \
+ VI_VFP_LOOP_SCALE_BASE \
+ CVT_PARAMS \
+ BODY \
+ set_fp_exceptions; \
+ VI_VFP_LOOP_END
+
+#define VI_VFP_CVT_INT_TO_FP(BODY16, BODY32, BODY64, sign) \
+ VI_CHECK_SDS(false); \
switch(P.VU.vsew) { \
- case e8: {\
- CHECK8 \
- VI_VFP_LOOP_SCALE_BASE \
- BODY8 \
- set_fp_exceptions; \
- VI_VFP_LOOP_END \
- } \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 16, sign), {}, BODY16); } \
break; \
- case e16: {\
- CHECK16 \
- VI_VFP_LOOP_SCALE_BASE \
- BODY16 \
- set_fp_exceptions; \
- VI_VFP_LOOP_END \
- } \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 32, sign), {}, BODY32); } \
break; \
- case e32: {\
- CHECK32 \
- VI_VFP_LOOP_SCALE_BASE \
- BODY32 \
- set_fp_exceptions; \
- VI_VFP_LOOP_END \
- } \
+ case e64: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 64, sign), {}, BODY64); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_CVT_FP_TO_INT(BODY16, BODY32, BODY64, sign) \
+ VI_CHECK_SDS(false); \
+ switch(P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 16, sign), {}, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 32, sign), {}, BODY32); } \
+ break; \
+ case e64: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 64, sign), {}, BODY64); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_WCVT_FP_TO_FP(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32) \
+ VI_CHECK_DSS(false); \
+ switch(P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(16, 32), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 64), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_WCVT_INT_TO_FP(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32, \
+ sign) \
+ VI_CHECK_DSS(false); \
+ switch(P.VU.vsew) { \
+ case e8: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(8, 16, sign), CHECK8, BODY8); } \
+ break; \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 32, sign), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 64, sign), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_WCVT_FP_TO_INT(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32, \
+ sign) \
+ VI_CHECK_DSS(false); \
+ switch(P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 32, sign), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 64, sign), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_NCVT_FP_TO_FP(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32) \
+ VI_CHECK_SDS(false); \
+ switch(P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 16), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(64, 32), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_NCVT_INT_TO_FP(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32, \
+ sign) \
+ VI_CHECK_SDS(false); \
+ switch(P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 16, sign), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 32, sign), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_NCVT_FP_TO_INT(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32, \
+ sign) \
+ VI_CHECK_SDS(false); \
+ switch(P.VU.vsew) { \
+ case e8: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 8, sign), CHECK8, BODY8); } \
+ break; \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 16, sign), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 32, sign), CHECK32, BODY32); } \
break; \
default: \
require(0); \
diff --git a/riscv/insns/vfclass_v.h b/riscv/insns/vfclass_v.h
index 658f28a..a307d2d 100644
--- a/riscv/insns/vfclass_v.h
+++ b/riscv/insns/vfclass_v.h
@@ -1,11 +1,11 @@
// vfclass.v vd, vs2, vm
VI_VFP_V_LOOP
({
- vd.v = f16_classify(vs2);
+ vd = f16(f16_classify(vs2));
},
{
- vd.v = f32_classify(vs2);
+ vd = f32(f32_classify(vs2));
},
{
- vd.v = f64_classify(vs2);
+ vd = f64(f64_classify(vs2));
})
diff --git a/riscv/insns/vfcvt_f_x_v.h b/riscv/insns/vfcvt_f_x_v.h
index c53b0e1..d094c14 100644
--- a/riscv/insns/vfcvt_f_x_v.h
+++ b/riscv/insns/vfcvt_f_x_v.h
@@ -1,14 +1,7 @@
// vfcvt.f.x.v vd, vd2, vm
-VI_VFP_VF_LOOP
-({
- auto vs2_i = P.VU.elt<int16_t>(rs2_num, i);
- vd = i32_to_f16(vs2_i);
-},
-{
- auto vs2_i = P.VU.elt<int32_t>(rs2_num, i);
- vd = i32_to_f32(vs2_i);
-},
-{
- auto vs2_i = P.VU.elt<int64_t>(rs2_num, i);
- vd = i64_to_f64(vs2_i);
-})
+VI_VFP_CVT_INT_TO_FP(
+ { vd = i32_to_f16(vs2); }, // BODY16
+ { vd = i32_to_f32(vs2); }, // BODY32
+ { vd = i64_to_f64(vs2); }, // BODY64
+ int // sign
+)
diff --git a/riscv/insns/vfcvt_f_xu_v.h b/riscv/insns/vfcvt_f_xu_v.h
index bd03768..64dbb1c 100644
--- a/riscv/insns/vfcvt_f_xu_v.h
+++ b/riscv/insns/vfcvt_f_xu_v.h
@@ -1,14 +1,7 @@
// vfcvt.f.xu.v vd, vd2, vm
-VI_VFP_VF_LOOP
-({
- auto vs2_u = P.VU.elt<uint16_t>(rs2_num, i);
- vd = ui32_to_f16(vs2_u);
-},
-{
- auto vs2_u = P.VU.elt<uint32_t>(rs2_num, i);
- vd = ui32_to_f32(vs2_u);
-},
-{
- auto vs2_u = P.VU.elt<uint64_t>(rs2_num, i);
- vd = ui64_to_f64(vs2_u);
-})
+VI_VFP_CVT_INT_TO_FP(
+ { vd = ui32_to_f16(vs2); }, // BODY16
+ { vd = ui32_to_f32(vs2); }, // BODY32
+ { vd = ui64_to_f64(vs2); }, // BODY64
+ uint // sign
+)
diff --git a/riscv/insns/vfcvt_rtz_x_f_v.h b/riscv/insns/vfcvt_rtz_x_f_v.h
index e7241bd..ecdfa22 100644
--- a/riscv/insns/vfcvt_rtz_x_f_v.h
+++ b/riscv/insns/vfcvt_rtz_x_f_v.h
@@ -1,11 +1,7 @@
// vfcvt.rtz.x.f.v vd, vd2, vm
-VI_VFP_VF_LOOP
-({
- P.VU.elt<int16_t>(rd_num, i) = f16_to_i16(vs2, softfloat_round_minMag, true);
-},
-{
- P.VU.elt<int32_t>(rd_num, i) = f32_to_i32(vs2, softfloat_round_minMag, true);
-},
-{
- P.VU.elt<int64_t>(rd_num, i) = f64_to_i64(vs2, softfloat_round_minMag, true);
-})
+VI_VFP_CVT_FP_TO_INT(
+ { vd = f16_to_i16(vs2, softfloat_round_minMag, true); }, // BODY16
+ { vd = f32_to_i32(vs2, softfloat_round_minMag, true); }, // BODY32
+ { vd = f64_to_i64(vs2, softfloat_round_minMag, true); }, // BODY64
+ int // sign
+)
diff --git a/riscv/insns/vfcvt_rtz_xu_f_v.h b/riscv/insns/vfcvt_rtz_xu_f_v.h
index d3d266d..87585d2 100644
--- a/riscv/insns/vfcvt_rtz_xu_f_v.h
+++ b/riscv/insns/vfcvt_rtz_xu_f_v.h
@@ -1,11 +1,7 @@
// vfcvt.rtz.xu.f.v vd, vd2, vm
-VI_VFP_VF_LOOP
-({
- P.VU.elt<uint16_t>(rd_num, i) = f16_to_ui16(vs2, softfloat_round_minMag, true);
-},
-{
- P.VU.elt<uint32_t>(rd_num, i) = f32_to_ui32(vs2, softfloat_round_minMag, true);
-},
-{
- P.VU.elt<uint64_t>(rd_num, i) = f64_to_ui64(vs2, softfloat_round_minMag, true);
-})
+VI_VFP_CVT_FP_TO_INT(
+ { vd = f16_to_ui16(vs2, softfloat_round_minMag, true); }, // BODY16
+ { vd = f32_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY32
+ { vd = f64_to_ui64(vs2, softfloat_round_minMag, true); }, // BODY64
+ uint // sign
+)
diff --git a/riscv/insns/vfcvt_x_f_v.h b/riscv/insns/vfcvt_x_f_v.h
index 7e507a3..4f21b52 100644
--- a/riscv/insns/vfcvt_x_f_v.h
+++ b/riscv/insns/vfcvt_x_f_v.h
@@ -1,11 +1,7 @@
// vfcvt.x.f.v vd, vd2, vm
-VI_VFP_VF_LOOP
-({
- P.VU.elt<int16_t>(rd_num, i) = f16_to_i16(vs2, STATE.frm->read(), true);
-},
-{
- P.VU.elt<int32_t>(rd_num, i) = f32_to_i32(vs2, STATE.frm->read(), true);
-},
-{
- P.VU.elt<int64_t>(rd_num, i) = f64_to_i64(vs2, STATE.frm->read(), true);
-})
+VI_VFP_CVT_FP_TO_INT(
+ { vd = f16_to_i16(vs2, softfloat_roundingMode, true); }, // BODY16
+ { vd = f32_to_i32(vs2, softfloat_roundingMode, true); }, // BODY32
+ { vd = f64_to_i64(vs2, softfloat_roundingMode, true); }, // BODY64
+ int // sign
+)
diff --git a/riscv/insns/vfcvt_xu_f_v.h b/riscv/insns/vfcvt_xu_f_v.h
index 51c00ca..ba50fff 100644
--- a/riscv/insns/vfcvt_xu_f_v.h
+++ b/riscv/insns/vfcvt_xu_f_v.h
@@ -1,11 +1,7 @@
// vfcvt.xu.f.v vd, vd2, vm
-VI_VFP_VV_LOOP
-({
- P.VU.elt<uint16_t>(rd_num, i) = f16_to_ui16(vs2, STATE.frm->read(), true);
-},
-{
- P.VU.elt<uint32_t>(rd_num, i) = f32_to_ui32(vs2, STATE.frm->read(), true);
-},
-{
- P.VU.elt<uint64_t>(rd_num, i) = f64_to_ui64(vs2, STATE.frm->read(), true);
-})
+VI_VFP_CVT_FP_TO_INT(
+ { vd = f16_to_ui16(vs2, softfloat_roundingMode, true); }, // BODY16
+ { vd = f32_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY32
+ { vd = f64_to_ui64(vs2, softfloat_roundingMode, true); }, // BODY64
+ uint // sign
+)
diff --git a/riscv/insns/vfncvt_f_f_w.h b/riscv/insns/vfncvt_f_f_w.h
index 5448eb5..f4996f5 100644
--- a/riscv/insns/vfncvt_f_f_w.h
+++ b/riscv/insns/vfncvt_f_f_w.h
@@ -1,23 +1,9 @@
// vfncvt.f.f.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- ;
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<float16_t>(rd_num, i, true) = f32_to_f16(vs2);
-},
-{
- auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i, true) = f64_to_f32(vs2);
-},
-{
- ;
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('D'));
-},
-false, (P.VU.vsew >= 16))
+VI_VFP_NCVT_FP_TO_FP(
+ {;}, // BODY16
+ { vd = f32_to_f16(vs2); }, // BODY32
+ { vd = f64_to_f32(vs2); }, // BODY64
+ {;}, // CHECK16
+ { require_extension(EXT_ZFH); }, // CHECK32
+ { require_extension('D'); } // CHECK64
+)
diff --git a/riscv/insns/vfncvt_f_x_w.h b/riscv/insns/vfncvt_f_x_w.h
index 10a6f7b..d587be2 100644
--- a/riscv/insns/vfncvt_f_x_w.h
+++ b/riscv/insns/vfncvt_f_x_w.h
@@ -1,23 +1,10 @@
// vfncvt.f.x.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- ;
-},
-{
- auto vs2 = P.VU.elt<int32_t>(rs2_num, i);
- P.VU.elt<float16_t>(rd_num, i, true) = i32_to_f16(vs2);
-},
-{
- auto vs2 = P.VU.elt<int64_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i, true) = i64_to_f32(vs2);
-},
-{
- ;
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-false, (P.VU.vsew >= 16))
+VI_VFP_NCVT_INT_TO_FP(
+ {;}, // BODY16
+ { vd = i32_to_f16(vs2); }, // BODY32
+ { vd = i64_to_f32(vs2); }, // BODY64
+ {;}, // CHECK16
+ { require_extension(EXT_ZFH); }, // CHECK32
+ { require_extension('F'); }, // CHECK64
+ int // sign
+)
diff --git a/riscv/insns/vfncvt_f_xu_w.h b/riscv/insns/vfncvt_f_xu_w.h
index 32b4b02..5e0e34f 100644
--- a/riscv/insns/vfncvt_f_xu_w.h
+++ b/riscv/insns/vfncvt_f_xu_w.h
@@ -1,23 +1,10 @@
// vfncvt.f.xu.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- ;
-},
-{
- auto vs2 = P.VU.elt<uint32_t>(rs2_num, i);
- P.VU.elt<float16_t>(rd_num, i, true) = ui32_to_f16(vs2);
-},
-{
- auto vs2 = P.VU.elt<uint64_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i, true) = ui64_to_f32(vs2);
-},
-{
- ;
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-false, (P.VU.vsew >= 16))
+VI_VFP_NCVT_INT_TO_FP(
+ {;}, // BODY16
+ { vd = ui32_to_f16(vs2); }, // BODY32
+ { vd = ui64_to_f32(vs2); }, // BODY64
+ {;}, // CHECK16
+ { require_extension(EXT_ZFH); }, // CHECK32
+ { require_extension('F'); }, // CHECK64
+ uint // sign
+)
diff --git a/riscv/insns/vfncvt_rod_f_f_w.h b/riscv/insns/vfncvt_rod_f_f_w.h
index 20a14c9..89bdc05 100644
--- a/riscv/insns/vfncvt_rod_f_f_w.h
+++ b/riscv/insns/vfncvt_rod_f_f_w.h
@@ -1,25 +1,15 @@
// vfncvt.rod.f.f.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- ;
-},
-{
- softfloat_roundingMode = softfloat_round_odd;
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<float16_t>(rd_num, i, true) = f32_to_f16(vs2);
-},
-{
- softfloat_roundingMode = softfloat_round_odd;
- auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i, true) = f64_to_f32(vs2);
-},
-{
- ;
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-false, (P.VU.vsew >= 16))
+VI_VFP_NCVT_FP_TO_FP(
+ {;}, // BODY16
+ { // BODY32
+ softfloat_roundingMode = softfloat_round_odd;
+ vd = f32_to_f16(vs2);
+ },
+ { // BODY64
+ softfloat_roundingMode = softfloat_round_odd;
+ vd = f64_to_f32(vs2);
+ },
+ {;}, // CHECK16
+ { require_extension(EXT_ZFH); }, // CHECK32
+ { require_extension('F'); } // CHECK64
+)
diff --git a/riscv/insns/vfncvt_rtz_x_f_w.h b/riscv/insns/vfncvt_rtz_x_f_w.h
index 0629b8d..23b4d5e 100644
--- a/riscv/insns/vfncvt_rtz_x_f_w.h
+++ b/riscv/insns/vfncvt_rtz_x_f_w.h
@@ -1,24 +1,10 @@
// vfncvt.rtz.x.f.w vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- P.VU.elt<int8_t>(rd_num, i, true) = f16_to_i8(vs2, softfloat_round_minMag, true);
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<int16_t>(rd_num, i, true) = f32_to_i16(vs2, softfloat_round_minMag, true);
-},
-{
- auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<int32_t>(rd_num, i, true) = f64_to_i32(vs2, softfloat_round_minMag, true);
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-{
- require(p->extension_enabled('D'));
-},
-false, (P.VU.vsew <= 32))
+VI_VFP_NCVT_FP_TO_INT(
+ { vd = f16_to_i8(vs2, softfloat_round_minMag, true); }, // BODY16
+ { vd = f32_to_i16(vs2, softfloat_round_minMag, true); }, // BODY32
+ { vd = f64_to_i32(vs2, softfloat_round_minMag, true); }, // BODY64
+ { require_extension(EXT_ZFH); }, // CHECK16
+ { require(p->extension_enabled('F')); }, // CHECK32
+ { require(p->extension_enabled('D')); }, // CHECK64
+ int // sign
+)
diff --git a/riscv/insns/vfncvt_rtz_xu_f_w.h b/riscv/insns/vfncvt_rtz_xu_f_w.h
index 82aa63e..f55c680 100644
--- a/riscv/insns/vfncvt_rtz_xu_f_w.h
+++ b/riscv/insns/vfncvt_rtz_xu_f_w.h
@@ -1,24 +1,10 @@
// vfncvt.rtz.xu.f.w vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- P.VU.elt<uint8_t>(rd_num, i, true) = f16_to_ui8(vs2, softfloat_round_minMag, true);
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<uint16_t>(rd_num, i, true) = f32_to_ui16(vs2, softfloat_round_minMag, true);
-},
-{
- auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<uint32_t>(rd_num, i, true) = f64_to_ui32(vs2, softfloat_round_minMag, true);
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-{
- require(p->extension_enabled('D'));
-},
-false, (P.VU.vsew <= 32))
+VI_VFP_NCVT_FP_TO_INT(
+ { vd = f16_to_ui8(vs2, softfloat_round_minMag, true); }, // BODY16
+ { vd = f32_to_ui16(vs2, softfloat_round_minMag, true); }, // BODY32
+ { vd = f64_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY64
+ { require_extension(EXT_ZFH); }, // CHECK16
+ { require(p->extension_enabled('F')); }, // CHECK32
+ { require(p->extension_enabled('D')); }, // CHECK64
+ uint // sign
+)
diff --git a/riscv/insns/vfncvt_x_f_w.h b/riscv/insns/vfncvt_x_f_w.h
index a8a6dfb..a7f3c33 100644
--- a/riscv/insns/vfncvt_x_f_w.h
+++ b/riscv/insns/vfncvt_x_f_w.h
@@ -1,24 +1,10 @@
// vfncvt.x.f.w vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- P.VU.elt<int8_t>(rd_num, i, true) = f16_to_i8(vs2, STATE.frm->read(), true);
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<int16_t>(rd_num, i, true) = f32_to_i16(vs2, STATE.frm->read(), true);
-},
-{
- auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<int32_t>(rd_num, i, true) = f64_to_i32(vs2, STATE.frm->read(), true);
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-{
- require(p->extension_enabled('D'));
-},
-false, (P.VU.vsew <= 32))
+VI_VFP_NCVT_FP_TO_INT(
+ { vd = f16_to_i8(vs2, softfloat_roundingMode, true); }, // BODY16
+ { vd = f32_to_i16(vs2, softfloat_roundingMode, true); }, // BODY32
+ { vd = f64_to_i32(vs2, softfloat_roundingMode, true); }, // BODY64
+ { require_extension(EXT_ZFH); }, // CHECK16
+ { require(p->extension_enabled('F')); }, // CHECK32
+ { require(p->extension_enabled('D')); }, // CHECK64
+ int // sign
+)
diff --git a/riscv/insns/vfncvt_xu_f_w.h b/riscv/insns/vfncvt_xu_f_w.h
index bff733e..02046e8 100644
--- a/riscv/insns/vfncvt_xu_f_w.h
+++ b/riscv/insns/vfncvt_xu_f_w.h
@@ -1,24 +1,10 @@
// vfncvt.xu.f.w vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- P.VU.elt<uint8_t>(rd_num, i, true) = f16_to_ui8(vs2, STATE.frm->read(), true);
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<uint16_t>(rd_num, i, true) = f32_to_ui16(vs2, STATE.frm->read(), true);
-},
-{
- auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- P.VU.elt<uint32_t>(rd_num, i, true) = f64_to_ui32(vs2, STATE.frm->read(), true);
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-{
- require(p->extension_enabled('D'));
-},
-false, (P.VU.vsew <= 32))
+VI_VFP_NCVT_FP_TO_INT(
+ { vd = f16_to_ui8(vs2, softfloat_roundingMode, true); }, // BODY16
+ { vd = f32_to_ui16(vs2, softfloat_roundingMode, true); }, // BODY32
+ { vd = f64_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY64
+ { require_extension(EXT_ZFH); }, // CHECK16
+ { require(p->extension_enabled('F')); }, // CHECK32
+ { require(p->extension_enabled('D')); }, // CHECK64
+ uint // sign
+)
diff --git a/riscv/insns/vfwcvt_f_f_v.h b/riscv/insns/vfwcvt_f_f_v.h
index fcaf65c..0700070 100644
--- a/riscv/insns/vfwcvt_f_f_v.h
+++ b/riscv/insns/vfwcvt_f_f_v.h
@@ -1,23 +1,9 @@
// vfwcvt.f.f.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- ;
-},
-{
- auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i, true) = f16_to_f32(vs2);
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<float64_t>(rd_num, i, true) = f32_to_f64(vs2);
-},
-{
- ;
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('D'));
-},
-true, (P.VU.vsew >= 16))
+VI_VFP_WCVT_FP_TO_FP(
+ {;}, // BODY8
+ { vd = f16_to_f32(vs2); }, // BODY16
+ { vd = f32_to_f64(vs2); }, // BODY32
+ {;}, // CHECK8
+ { require_extension(EXT_ZFH); }, // CHECK16
+ { require_extension('D'); } // CHECK32
+)
diff --git a/riscv/insns/vfwcvt_f_x_v.h b/riscv/insns/vfwcvt_f_x_v.h
index 8d8283c..f51e8e3 100644
--- a/riscv/insns/vfwcvt_f_x_v.h
+++ b/riscv/insns/vfwcvt_f_x_v.h
@@ -1,24 +1,10 @@
// vfwcvt.f.x.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- auto vs2 = P.VU.elt<int8_t>(rs2_num, i);
- P.VU.elt<float16_t>(rd_num, i, true) = i32_to_f16(vs2);
-},
-{
- auto vs2 = P.VU.elt<int16_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i, true) = i32_to_f32(vs2);
-},
-{
- auto vs2 = P.VU.elt<int32_t>(rs2_num, i);
- P.VU.elt<float64_t>(rd_num, i, true) = i32_to_f64(vs2);
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-{
- require(p->extension_enabled('D'));
-},
-true, (P.VU.vsew >= 8))
+VI_VFP_WCVT_INT_TO_FP(
+ { vd = i32_to_f16(vs2); }, // BODY8
+ { vd = i32_to_f32(vs2); }, // BODY16
+ { vd = i32_to_f64(vs2); }, // BODY32
+ { require(p->extension_enabled(EXT_ZFH)); }, // CHECK8
+ { require_extension('F'); }, // CHECK16
+ { require_extension('D'); }, // CHECK32
+ int // sign
+)
diff --git a/riscv/insns/vfwcvt_f_xu_v.h b/riscv/insns/vfwcvt_f_xu_v.h
index e8036ce..7dd4972 100644
--- a/riscv/insns/vfwcvt_f_xu_v.h
+++ b/riscv/insns/vfwcvt_f_xu_v.h
@@ -1,24 +1,10 @@
// vfwcvt.f.xu.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- auto vs2 = P.VU.elt<uint8_t>(rs2_num, i);
- P.VU.elt<float16_t>(rd_num, i, true) = ui32_to_f16(vs2);
-},
-{
- auto vs2 = P.VU.elt<uint16_t>(rs2_num, i);
- P.VU.elt<float32_t>(rd_num, i, true) = ui32_to_f32(vs2);
-},
-{
- auto vs2 = P.VU.elt<uint32_t>(rs2_num, i);
- P.VU.elt<float64_t>(rd_num, i, true) = ui32_to_f64(vs2);
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-{
- require(p->extension_enabled('D'));
-},
-true, (P.VU.vsew >= 8))
+VI_VFP_WCVT_INT_TO_FP(
+ { vd = ui32_to_f16(vs2); }, // BODY8
+ { vd = ui32_to_f32(vs2); }, // BODY16
+ { vd = ui32_to_f64(vs2); }, // BODY32
+ { require(p->extension_enabled(EXT_ZFH)); }, // CHECK8
+ { require_extension('F'); }, // CHECK16
+ { require_extension('D'); }, // CHECK32
+ uint // sign
+)
diff --git a/riscv/insns/vfwcvt_rtz_x_f_v.h b/riscv/insns/vfwcvt_rtz_x_f_v.h
index 83fa764..74e5b9a 100644
--- a/riscv/insns/vfwcvt_rtz_x_f_v.h
+++ b/riscv/insns/vfwcvt_rtz_x_f_v.h
@@ -1,23 +1,10 @@
// vfwcvt.rtz.x.f.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- ;
-},
-{
- auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- P.VU.elt<int32_t>(rd_num, i, true) = f16_to_i32(vs2, softfloat_round_minMag, true);
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<int64_t>(rd_num, i, true) = f32_to_i64(vs2, softfloat_round_minMag, true);
-},
-{
- ;
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-true, (P.VU.vsew >= 16))
+VI_VFP_WCVT_FP_TO_INT(
+ {;}, // BODY8
+ { vd = f16_to_i32(vs2, softfloat_round_minMag, true); }, // BODY16
+ { vd = f32_to_i64(vs2, softfloat_round_minMag, true); }, // BODY32
+ {;}, // CHECK8
+ { require_extension(EXT_ZFH); }, // CHECK16
+ { require_extension('F'); }, // CHECK32
+ int // sign
+)
diff --git a/riscv/insns/vfwcvt_rtz_xu_f_v.h b/riscv/insns/vfwcvt_rtz_xu_f_v.h
index 43d1979..72b8c6e 100644
--- a/riscv/insns/vfwcvt_rtz_xu_f_v.h
+++ b/riscv/insns/vfwcvt_rtz_xu_f_v.h
@@ -1,23 +1,10 @@
// vfwcvt.rtz,xu.f.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- ;
-},
-{
- auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- P.VU.elt<uint32_t>(rd_num, i, true) = f16_to_ui32(vs2, softfloat_round_minMag, true);
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<uint64_t>(rd_num, i, true) = f32_to_ui64(vs2, softfloat_round_minMag, true);
-},
-{
- ;
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-true, (P.VU.vsew >= 16))
+VI_VFP_WCVT_FP_TO_INT(
+ {;}, // BODY8
+ { vd = f16_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY16
+ { vd = f32_to_ui64(vs2, softfloat_round_minMag, true); }, // BODY32
+ {;}, // CHECK8
+ { require_extension(EXT_ZFH); }, // CHECK16
+ { require_extension('F'); }, // CHECK32
+ uint // sign
+)
diff --git a/riscv/insns/vfwcvt_x_f_v.h b/riscv/insns/vfwcvt_x_f_v.h
index 5e0c064..74497f4 100644
--- a/riscv/insns/vfwcvt_x_f_v.h
+++ b/riscv/insns/vfwcvt_x_f_v.h
@@ -1,23 +1,10 @@
// vfwcvt.x.f.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- ;
-},
-{
- auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- P.VU.elt<int32_t>(rd_num, i, true) = f16_to_i32(vs2, STATE.frm->read(), true);
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<int64_t>(rd_num, i, true) = f32_to_i64(vs2, STATE.frm->read(), true);
-},
-{
- ;
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-true, (P.VU.vsew >= 16))
+VI_VFP_WCVT_FP_TO_INT(
+ {;}, // BODY8
+ { vd = f16_to_i32(vs2, softfloat_roundingMode, true); }, // BODY16
+ { vd = f32_to_i64(vs2, softfloat_roundingMode, true); }, // BODY32
+ {;}, // CHECK8
+ { require_extension(EXT_ZFH); }, // CHECK16
+ { require_extension('F'); }, // CHECK32
+ int // sign
+)
diff --git a/riscv/insns/vfwcvt_xu_f_v.h b/riscv/insns/vfwcvt_xu_f_v.h
index f3243c8..ad96c9c 100644
--- a/riscv/insns/vfwcvt_xu_f_v.h
+++ b/riscv/insns/vfwcvt_xu_f_v.h
@@ -1,23 +1,10 @@
// vfwcvt.xu.f.v vd, vs2, vm
-VI_VFP_CVT_SCALE
-({
- ;
-},
-{
- auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- P.VU.elt<uint32_t>(rd_num, i, true) = f16_to_ui32(vs2, STATE.frm->read(), true);
-},
-{
- auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- P.VU.elt<uint64_t>(rd_num, i, true) = f32_to_ui64(vs2, STATE.frm->read(), true);
-},
-{
- ;
-},
-{
- require(p->extension_enabled(EXT_ZFH));
-},
-{
- require(p->extension_enabled('F'));
-},
-true, (P.VU.vsew >= 16))
+VI_VFP_WCVT_FP_TO_INT(
+ {;}, // BODY8
+ { vd = f16_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY16
+ { vd = f32_to_ui64(vs2, softfloat_roundingMode, true); }, // BODY32
+ {;}, // CHECK8
+ { require_extension(EXT_ZFH); }, // CHECK16
+ { require_extension('F'); }, // CHECK32
+ uint // sign
+)