aboutsummaryrefslogtreecommitdiff
path: root/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'riscv')
-rw-r--r--riscv/cachesim.cc6
-rw-r--r--riscv/cachesim.h5
-rw-r--r--riscv/cfg.h19
-rw-r--r--riscv/clint.cc1
-rw-r--r--riscv/common.h4
-rw-r--r--riscv/csrs.cc519
-rw-r--r--riscv/csrs.h161
-rw-r--r--riscv/debug_module.cc64
-rw-r--r--riscv/debug_module.h33
-rw-r--r--riscv/decode.h2641
-rw-r--r--riscv/devices.cc13
-rw-r--r--riscv/devices.h3
-rw-r--r--riscv/dts.cc5
-rw-r--r--riscv/dts.h1
-rw-r--r--riscv/encoding.h6825
-rw-r--r--riscv/entropy_source.h48
-rw-r--r--riscv/execute.cc48
-rw-r--r--riscv/extension.h2
-rw-r--r--riscv/insn_macros.h2
-rw-r--r--riscv/insns/aes64ks1i.h7
-rw-r--r--riscv/insns/aes_common.h4
-rw-r--r--riscv/insns/amoswap_d.h2
-rw-r--r--riscv/insns/amoswap_w.h2
-rw-r--r--riscv/insns/beq.h2
-rw-r--r--riscv/insns/bge.h2
-rw-r--r--riscv/insns/bgeu.h2
-rw-r--r--riscv/insns/blt.h2
-rw-r--r--riscv/insns/bltu.h2
-rw-r--r--riscv/insns/bne.h2
-rw-r--r--riscv/insns/c_ebreak.h9
-rw-r--r--riscv/insns/div.h4
-rw-r--r--riscv/insns/divu.h2
-rw-r--r--riscv/insns/divuw.h2
-rw-r--r--riscv/insns/divw.h2
-rw-r--r--riscv/insns/ebreak.h9
-rw-r--r--riscv/insns/fadd_d.h4
-rw-r--r--riscv/insns/fadd_h.h4
-rw-r--r--riscv/insns/fadd_s.h4
-rw-r--r--riscv/insns/fclass_d.h4
-rw-r--r--riscv/insns/fclass_h.h4
-rw-r--r--riscv/insns/fclass_s.h4
-rw-r--r--riscv/insns/fcvt_d_h.h6
-rw-r--r--riscv/insns/fcvt_d_l.h4
-rw-r--r--riscv/insns/fcvt_d_lu.h4
-rw-r--r--riscv/insns/fcvt_d_s.h4
-rw-r--r--riscv/insns/fcvt_d_w.h4
-rw-r--r--riscv/insns/fcvt_d_wu.h4
-rw-r--r--riscv/insns/fcvt_h_d.h6
-rw-r--r--riscv/insns/fcvt_h_l.h4
-rw-r--r--riscv/insns/fcvt_h_lu.h4
-rw-r--r--riscv/insns/fcvt_h_s.h4
-rw-r--r--riscv/insns/fcvt_h_w.h4
-rw-r--r--riscv/insns/fcvt_h_wu.h4
-rw-r--r--riscv/insns/fcvt_l_d.h4
-rw-r--r--riscv/insns/fcvt_l_h.h4
-rw-r--r--riscv/insns/fcvt_l_s.h4
-rw-r--r--riscv/insns/fcvt_lu_d.h4
-rw-r--r--riscv/insns/fcvt_lu_h.h4
-rw-r--r--riscv/insns/fcvt_lu_s.h4
-rw-r--r--riscv/insns/fcvt_s_d.h4
-rw-r--r--riscv/insns/fcvt_s_h.h4
-rw-r--r--riscv/insns/fcvt_s_l.h4
-rw-r--r--riscv/insns/fcvt_s_lu.h4
-rw-r--r--riscv/insns/fcvt_s_w.h4
-rw-r--r--riscv/insns/fcvt_s_wu.h4
-rw-r--r--riscv/insns/fcvt_w_d.h4
-rw-r--r--riscv/insns/fcvt_w_h.h4
-rw-r--r--riscv/insns/fcvt_w_s.h4
-rw-r--r--riscv/insns/fcvt_wu_d.h4
-rw-r--r--riscv/insns/fcvt_wu_h.h4
-rw-r--r--riscv/insns/fcvt_wu_s.h4
-rw-r--r--riscv/insns/fdiv_d.h4
-rw-r--r--riscv/insns/fdiv_h.h4
-rw-r--r--riscv/insns/fdiv_s.h4
-rw-r--r--riscv/insns/feq_d.h4
-rw-r--r--riscv/insns/feq_h.h4
-rw-r--r--riscv/insns/feq_s.h4
-rw-r--r--riscv/insns/fle_d.h4
-rw-r--r--riscv/insns/fle_h.h4
-rw-r--r--riscv/insns/fle_s.h4
-rw-r--r--riscv/insns/flt_d.h4
-rw-r--r--riscv/insns/flt_h.h4
-rw-r--r--riscv/insns/flt_s.h4
-rw-r--r--riscv/insns/fmadd_d.h4
-rw-r--r--riscv/insns/fmadd_h.h4
-rw-r--r--riscv/insns/fmadd_s.h4
-rw-r--r--riscv/insns/fmax_d.h12
-rw-r--r--riscv/insns/fmax_h.h4
-rw-r--r--riscv/insns/fmax_s.h12
-rw-r--r--riscv/insns/fmin_d.h12
-rw-r--r--riscv/insns/fmin_h.h4
-rw-r--r--riscv/insns/fmin_s.h12
-rw-r--r--riscv/insns/fmsub_d.h4
-rw-r--r--riscv/insns/fmsub_h.h4
-rw-r--r--riscv/insns/fmsub_s.h4
-rw-r--r--riscv/insns/fmul_d.h4
-rw-r--r--riscv/insns/fmul_h.h4
-rw-r--r--riscv/insns/fmul_s.h4
-rw-r--r--riscv/insns/fnmadd_d.h4
-rw-r--r--riscv/insns/fnmadd_h.h4
-rw-r--r--riscv/insns/fnmadd_s.h4
-rw-r--r--riscv/insns/fnmsub_d.h4
-rw-r--r--riscv/insns/fnmsub_h.h4
-rw-r--r--riscv/insns/fnmsub_s.h4
-rw-r--r--riscv/insns/fsgnj_d.h4
-rw-r--r--riscv/insns/fsgnj_h.h4
-rw-r--r--riscv/insns/fsgnj_s.h4
-rw-r--r--riscv/insns/fsgnjn_d.h4
-rw-r--r--riscv/insns/fsgnjn_h.h4
-rw-r--r--riscv/insns/fsgnjn_q.h2
-rw-r--r--riscv/insns/fsgnjn_s.h4
-rw-r--r--riscv/insns/fsgnjx_d.h4
-rw-r--r--riscv/insns/fsgnjx_h.h4
-rw-r--r--riscv/insns/fsgnjx_s.h4
-rw-r--r--riscv/insns/fsqrt_d.h4
-rw-r--r--riscv/insns/fsqrt_h.h4
-rw-r--r--riscv/insns/fsqrt_s.h4
-rw-r--r--riscv/insns/fsub_d.h4
-rw-r--r--riscv/insns/fsub_h.h4
-rw-r--r--riscv/insns/fsub_s.h4
-rw-r--r--riscv/insns/kmar64.h1
-rw-r--r--riscv/insns/kmmawb2.h2
-rw-r--r--riscv/insns/kmmawb2_u.h2
-rw-r--r--riscv/insns/kmmawt2.h2
-rw-r--r--riscv/insns/kmmawt2_u.h2
-rw-r--r--riscv/insns/kmmwb2.h2
-rw-r--r--riscv/insns/kmmwb2_u.h2
-rw-r--r--riscv/insns/kmmwt2.h2
-rw-r--r--riscv/insns/kmmwt2_u.h2
-rw-r--r--riscv/insns/kslra16_u.h2
-rw-r--r--riscv/insns/kslra32_u.h2
-rw-r--r--riscv/insns/kslra8_u.h2
-rw-r--r--riscv/insns/kwmmul.h2
-rw-r--r--riscv/insns/kwmmul_u.h2
-rw-r--r--riscv/insns/rem.h4
-rw-r--r--riscv/insns/remu.h2
-rw-r--r--riscv/insns/remuw.h2
-rw-r--r--riscv/insns/remw.h2
-rw-r--r--riscv/insns/rsub64.h2
-rw-r--r--riscv/insns/smul16.h2
-rw-r--r--riscv/insns/smul8.h2
-rw-r--r--riscv/insns/smulx16.h2
-rw-r--r--riscv/insns/smulx8.h2
-rw-r--r--riscv/insns/sra16_u.h2
-rw-r--r--riscv/insns/sra32_u.h2
-rw-r--r--riscv/insns/sra8_u.h2
-rw-r--r--riscv/insns/umul16.h2
-rw-r--r--riscv/insns/umul8.h2
-rw-r--r--riscv/insns/umulx16.h2
-rw-r--r--riscv/insns/umulx8.h2
-rw-r--r--riscv/insns/vcpop_m.h2
-rw-r--r--riscv/insns/vdiv_vx.h4
-rw-r--r--riscv/insns/vdivu_vv.h2
-rw-r--r--riscv/insns/vdivu_vx.h2
-rw-r--r--riscv/insns/vfirst_m.h2
-rw-r--r--riscv/insns/vfmv_f_s.h2
-rw-r--r--riscv/insns/vfmv_s_f.h2
-rw-r--r--riscv/insns/vfslide1down_vf.h6
-rw-r--r--riscv/insns/vfslide1up_vf.h6
-rw-r--r--riscv/insns/vid_v.h3
-rw-r--r--riscv/insns/viota_m.h1
-rw-r--r--riscv/insns/vmsbf_m.h2
-rw-r--r--riscv/insns/vmsif_m.h2
-rw-r--r--riscv/insns/vmsof_m.h2
-rw-r--r--riscv/insns/vmv_s_x.h2
-rw-r--r--riscv/insns/vmv_x_s.h20
-rw-r--r--riscv/insns/vmvnfr_v.h5
-rw-r--r--riscv/insns/vrem_vv.h2
-rw-r--r--riscv/insns/vrgather_vi.h6
-rw-r--r--riscv/insns/vsadd_vi.h2
-rw-r--r--riscv/insns/vsadd_vv.h2
-rw-r--r--riscv/insns/vsadd_vx.h2
-rw-r--r--riscv/insns/vsetivli.h2
-rw-r--r--riscv/insns/vsetvl.h2
-rw-r--r--riscv/insns/vsetvli.h2
-rw-r--r--riscv/insns/vslide1up_vx.h12
-rw-r--r--riscv/insns/vsmul_vv.h12
-rw-r--r--riscv/insns/vsmul_vx.h9
-rw-r--r--riscv/insns/wfi.h4
-rw-r--r--riscv/interactive.cc336
-rw-r--r--riscv/isa_parser.cc29
-rw-r--r--riscv/isa_parser.h6
-rw-r--r--riscv/mmu.cc141
-rw-r--r--riscv/mmu.h163
-rw-r--r--riscv/p_ext_macros.h506
-rw-r--r--riscv/processor.cc188
-rw-r--r--riscv/processor.h34
-rw-r--r--riscv/riscv.mk.in35
-rw-r--r--riscv/rocc.cc10
-rw-r--r--riscv/rom.cc2
-rw-r--r--riscv/sim.cc7
-rw-r--r--riscv/sim.h5
-rw-r--r--riscv/simif.h2
-rw-r--r--riscv/tracer.h2
-rw-r--r--riscv/trap.h7
-rw-r--r--riscv/triggers.cc38
-rw-r--r--riscv/triggers.h46
-rw-r--r--riscv/v_ext_macros.h2065
198 files changed, 7749 insertions, 6885 deletions
diff --git a/riscv/cachesim.cc b/riscv/cachesim.cc
index 48840cb..498d407 100644
--- a/riscv/cachesim.cc
+++ b/riscv/cachesim.cc
@@ -39,9 +39,9 @@ cache_sim_t* cache_sim_t::construct(const char* config, const char* name)
void cache_sim_t::init()
{
- if(sets == 0 || (sets & (sets-1)))
+ if (sets == 0 || (sets & (sets-1)))
help();
- if(linesz < 8 || (linesz & (linesz-1)))
+ if (linesz < 8 || (linesz & (linesz-1)))
help();
idx_shift = 0;
@@ -76,7 +76,7 @@ cache_sim_t::~cache_sim_t()
void cache_sim_t::print_stats()
{
- if(read_accesses + write_accesses == 0)
+ if (read_accesses + write_accesses == 0)
return;
float mr = 100.0f*(read_misses+write_misses)/(read_accesses+write_accesses);
diff --git a/riscv/cachesim.h b/riscv/cachesim.h
index b7f9014..d7046f9 100644
--- a/riscv/cachesim.h
+++ b/riscv/cachesim.h
@@ -4,6 +4,7 @@
#define _RISCV_CACHE_SIM_H
#include "memtracer.h"
+#include "common.h"
#include <cstring>
#include <string>
#include <map>
@@ -108,7 +109,7 @@ class icache_sim_t : public cache_memtracer_t
{
public:
icache_sim_t(const char* config) : cache_memtracer_t(config, "I$") {}
- bool interested_in_range(uint64_t begin, uint64_t end, access_type type)
+ bool interested_in_range(uint64_t UNUSED begin, uint64_t UNUSED end, access_type type)
{
return type == FETCH;
}
@@ -122,7 +123,7 @@ class dcache_sim_t : public cache_memtracer_t
{
public:
dcache_sim_t(const char* config) : cache_memtracer_t(config, "D$") {}
- bool interested_in_range(uint64_t begin, uint64_t end, access_type type)
+ bool interested_in_range(uint64_t UNUSED begin, uint64_t UNUSED end, access_type type)
{
return type == LOAD || type == STORE;
}
diff --git a/riscv/cfg.h b/riscv/cfg.h
index 6369bd8..dbdb58b 100644
--- a/riscv/cfg.h
+++ b/riscv/cfg.h
@@ -32,18 +32,22 @@ private:
class mem_cfg_t
{
public:
- mem_cfg_t(reg_t base, reg_t size)
- : base(base), size(size)
- {
- // The truth of these assertions should be ensured by whatever is creating
+ static bool check_if_supported(reg_t base, reg_t size) {
+ // The truth of these conditions should be ensured by whatever is creating
// the regions in the first place, but we have them here to make sure that
// we can't end up describing memory regions that don't make sense. They
// ask that the page size is a multiple of the minimum page size, that the
// page is aligned to the minimum page size, that the page is non-empty and
// that the top address is still representable in a reg_t.
- assert((size % PGSIZE == 0) &&
+ return (size % PGSIZE == 0) &&
(base % PGSIZE == 0) &&
- (base + size > base));
+ (base + size > base);
+ }
+
+ mem_cfg_t(reg_t base, reg_t size)
+ : base(base), size(size)
+ {
+ assert(mem_cfg_t::check_if_supported(base, size));
}
reg_t base;
@@ -57,6 +61,7 @@ public:
const char *default_bootargs,
const char *default_isa, const char *default_priv,
const char *default_varch,
+ const reg_t default_pmpregions,
const std::vector<mem_cfg_t> &default_mem_layout,
const std::vector<int> default_hartids,
bool default_real_time_clint)
@@ -65,6 +70,7 @@ public:
isa(default_isa),
priv(default_priv),
varch(default_varch),
+ pmpregions(default_pmpregions),
mem_layout(default_mem_layout),
hartids(default_hartids),
explicit_hartids(false),
@@ -76,6 +82,7 @@ public:
cfg_arg_t<const char *> isa;
cfg_arg_t<const char *> priv;
cfg_arg_t<const char *> varch;
+ reg_t pmpregions;
cfg_arg_t<std::vector<mem_cfg_t>> mem_layout;
std::optional<reg_t> start_pc;
cfg_arg_t<std::vector<int>> hartids;
diff --git a/riscv/clint.cc b/riscv/clint.cc
index 72d1bbe..3f2d4d7 100644
--- a/riscv/clint.cc
+++ b/riscv/clint.cc
@@ -82,6 +82,7 @@ void clint_t::increment(reg_t inc)
mtime += inc;
}
for (size_t i = 0; i < procs.size(); i++) {
+ procs[i]->state.time->sync(mtime);
procs[i]->state.mip->backdoor_write_with_mask(MIP_MTIP, 0);
if (mtime >= mtimecmp[i])
procs[i]->state.mip->backdoor_write_with_mask(MIP_MTIP, MIP_MTIP);
diff --git a/riscv/common.h b/riscv/common.h
index 002a83f..a354ced 100644
--- a/riscv/common.h
+++ b/riscv/common.h
@@ -8,11 +8,15 @@
# define unlikely(x) __builtin_expect(x, 0)
# define NOINLINE __attribute__ ((noinline))
# define NORETURN __attribute__ ((noreturn))
+# define ALWAYS_INLINE __attribute__ ((always_inline))
+# define UNUSED __attribute__ ((unused))
#else
# define likely(x) (x)
# define unlikely(x) (x)
# define NOINLINE
# define NORETURN
+# define ALWAYS_INLINE
+# define UNUSED
#endif
#endif
diff --git a/riscv/csrs.cc b/riscv/csrs.cc
index 98edacf..93b0bae 100644
--- a/riscv/csrs.cc
+++ b/riscv/csrs.cc
@@ -1,5 +1,8 @@
// See LICENSE for license details.
+// For std::any_of
+#include <algorithm>
+
#include "csrs.h"
// For processor_t:
#include "processor.h"
@@ -15,7 +18,6 @@
#undef STATE
#define STATE (*state)
-
// implement class csr_t
csr_t::csr_t(processor_t* const proc, const reg_t addr):
proc(proc),
@@ -44,7 +46,6 @@ void csr_t::verify_permissions(insn_t insn, bool write) const {
}
}
-
csr_t::~csr_t() {
}
@@ -59,7 +60,7 @@ void csr_t::log_write() const noexcept {
log_special_write(address, written_value());
}
-void csr_t::log_special_write(const reg_t address, const reg_t val) const noexcept {
+void csr_t::log_special_write(const reg_t UNUSED address, const reg_t UNUSED val) const noexcept {
#if defined(RISCV_ENABLE_COMMITLOG)
proc->get_state()->log_reg_write[((address) << 4) | 4] = {val, 0};
#endif
@@ -80,7 +81,6 @@ bool basic_csr_t::unlogged_write(const reg_t val) noexcept {
return true;
}
-
// implement class pmpaddr_csr_t
pmpaddr_csr_t::pmpaddr_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr),
@@ -89,7 +89,6 @@ pmpaddr_csr_t::pmpaddr_csr_t(processor_t* const proc, const reg_t addr):
pmpidx(address - CSR_PMPADDR0) {
}
-
void pmpaddr_csr_t::verify_permissions(insn_t insn, bool write) const {
csr_t::verify_permissions(insn, write);
// If n_pmp is zero, that means pmp is not implemented hence raise
@@ -100,14 +99,12 @@ void pmpaddr_csr_t::verify_permissions(insn_t insn, bool write) const {
throw trap_illegal_instruction(insn.bits());
}
-
reg_t pmpaddr_csr_t::read() const noexcept {
if ((cfg & PMP_A) >= PMP_NAPOT)
return val | (~proc->pmp_tor_mask() >> 1);
return val & proc->pmp_tor_mask();
}
-
bool pmpaddr_csr_t::unlogged_write(const reg_t val) noexcept {
// If no PMPs are configured, disallow access to all. Otherwise,
// allow access to all, but unimplemented ones are hardwired to
@@ -117,7 +114,9 @@ bool pmpaddr_csr_t::unlogged_write(const reg_t val) noexcept {
if (proc->n_pmp == 0)
return false;
- bool locked = cfg & PMP_L;
+ const bool lock_bypass = state->mseccfg->get_rlb();
+ const bool locked = !lock_bypass && (cfg & PMP_L);
+
if (pmpidx < proc->n_pmp && !locked && !next_locked_and_tor()) {
this->val = val & ((reg_t(1) << (MAX_PADDR_BITS - PMP_SHIFT)) - 1);
}
@@ -129,30 +128,27 @@ bool pmpaddr_csr_t::unlogged_write(const reg_t val) noexcept {
bool pmpaddr_csr_t::next_locked_and_tor() const noexcept {
if (pmpidx+1 >= state->max_pmp) return false; // this is the last entry
- bool next_locked = state->pmpaddr[pmpidx+1]->cfg & PMP_L;
- bool next_tor = (state->pmpaddr[pmpidx+1]->cfg & PMP_A) == PMP_TOR;
+ const bool lock_bypass = state->mseccfg->get_rlb();
+ const bool next_locked = !lock_bypass && (state->pmpaddr[pmpidx+1]->cfg & PMP_L);
+ const bool next_tor = (state->pmpaddr[pmpidx+1]->cfg & PMP_A) == PMP_TOR;
return next_locked && next_tor;
}
-
reg_t pmpaddr_csr_t::tor_paddr() const noexcept {
return (val & proc->pmp_tor_mask()) << PMP_SHIFT;
}
-
reg_t pmpaddr_csr_t::tor_base_paddr() const noexcept {
if (pmpidx == 0) return 0; // entry 0 always uses 0 as base
return state->pmpaddr[pmpidx-1]->tor_paddr();
}
-
reg_t pmpaddr_csr_t::napot_mask() const noexcept {
bool is_na4 = (cfg & PMP_A) == PMP_NA4;
reg_t mask = (val << 1) | (!is_na4) | ~proc->pmp_tor_mask();
return ~(mask & ~(mask + 1)) << PMP_SHIFT;
}
-
bool pmpaddr_csr_t::match4(reg_t addr) const noexcept {
if ((cfg & PMP_A) == 0) return false;
bool is_tor = (cfg & PMP_A) == PMP_TOR;
@@ -161,7 +157,6 @@ bool pmpaddr_csr_t::match4(reg_t addr) const noexcept {
return ((addr ^ tor_paddr()) & napot_mask()) == 0;
}
-
bool pmpaddr_csr_t::subset_match(reg_t addr, reg_t len) const noexcept {
if ((addr | len) & (len - 1))
abort();
@@ -184,21 +179,55 @@ bool pmpaddr_csr_t::subset_match(reg_t addr, reg_t len) const noexcept {
return !(is_tor ? tor_homogeneous : napot_homogeneous);
}
-
bool pmpaddr_csr_t::access_ok(access_type type, reg_t mode) const noexcept {
- return
- (mode == PRV_M && !(cfg & PMP_L)) ||
- (type == LOAD && (cfg & PMP_R)) ||
- (type == STORE && (cfg & PMP_W)) ||
- (type == FETCH && (cfg & PMP_X));
+ const bool cfgx = cfg & PMP_X;
+ const bool cfgw = cfg & PMP_W;
+ const bool cfgr = cfg & PMP_R;
+ const bool cfgl = cfg & PMP_L;
+
+ const bool prvm = mode == PRV_M;
+
+ const bool typer = type == LOAD;
+ const bool typex = type == FETCH;
+ const bool typew = type == STORE;
+ const bool normal_rwx = (typer && cfgr) || (typew && cfgw) || (typex && cfgx);
+ const bool mseccfg_mml = state->mseccfg->get_mml();
+
+ if (mseccfg_mml) {
+ if (cfgx && cfgw && cfgr && cfgl) {
+ // Locked Shared data region: Read only on both M and S/U mode.
+ return typer;
+ } else {
+ const bool mml_shared_region = !cfgr && cfgw;
+ const bool mml_chk_normal = (prvm == cfgl) && normal_rwx;
+ const bool mml_chk_shared =
+ (!cfgl && cfgx && (typer || typew)) ||
+ (!cfgl && !cfgx && (typer || (typew && prvm))) ||
+ (cfgl && typex) ||
+ (cfgl && typer && cfgx && prvm);
+ return mml_shared_region ? mml_chk_shared : mml_chk_normal;
+ }
+ } else {
+ const bool m_bypass = (prvm && !cfgl);
+ return m_bypass || normal_rwx;
+ }
}
-
// implement class pmpcfg_csr_t
pmpcfg_csr_t::pmpcfg_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr) {
}
+void pmpcfg_csr_t::verify_permissions(insn_t insn, bool write) const {
+ csr_t::verify_permissions(insn, write);
+ // If n_pmp is zero, that means pmp is not implemented hence raise
+ // trap if it tries to access the csr. I would prefer to implement
+ // this by not instantiating any pmpcfg_csr_t for these regs, but
+ // n_pmp can change after reset() is run.
+ if (proc->n_pmp == 0)
+ throw trap_illegal_instruction(insn.bits());
+}
+
reg_t pmpcfg_csr_t::read() const noexcept {
reg_t cfg_res = 0;
for (size_t i0 = (address - CSR_PMPCFG0) * 4, i = i0; i < i0 + proc->get_xlen() / 8 && i < state->max_pmp; i++)
@@ -211,14 +240,35 @@ bool pmpcfg_csr_t::unlogged_write(const reg_t val) noexcept {
return false;
bool write_success = false;
+ const bool rlb = state->mseccfg->get_rlb();
+ const bool mml = state->mseccfg->get_mml();
for (size_t i0 = (address - CSR_PMPCFG0) * 4, i = i0; i < i0 + proc->get_xlen() / 8; i++) {
if (i < proc->n_pmp) {
- if (!(state->pmpaddr[i]->cfg & PMP_L)) {
+ const bool locked = (state->pmpaddr[i]->cfg & PMP_L);
+ if (rlb || !locked) {
uint8_t cfg = (val >> (8 * (i - i0))) & (PMP_R | PMP_W | PMP_X | PMP_A | PMP_L);
- cfg &= ~PMP_W | ((cfg & PMP_R) ? PMP_W : 0); // Disallow R=0 W=1
+ // Drop R=0 W=1 when MML = 0
+ // Remove the restriction when MML = 1
+ if (!mml) {
+ cfg &= ~PMP_W | ((cfg & PMP_R) ? PMP_W : 0);
+ }
+ // Disallow A=NA4 when granularity > 4
if (proc->lg_pmp_granularity != PMP_SHIFT && (cfg & PMP_A) == PMP_NA4)
- cfg |= PMP_NAPOT; // Disallow A=NA4 when granularity > 4
- state->pmpaddr[i]->cfg = cfg;
+ cfg |= PMP_NAPOT;
+ /*
+ * Adding a rule with executable privileges that either is M-mode-only or a locked Shared-Region
+ * is not possible and such pmpcfg writes are ignored, leaving pmpcfg unchanged.
+ * This restriction can be temporarily lifted e.g. during the boot process, by setting mseccfg.RLB.
+ */
+ const bool cfgx = cfg & PMP_X;
+ const bool cfgw = cfg & PMP_W;
+ const bool cfgr = cfg & PMP_R;
+ if (rlb || !(mml && ((cfg & PMP_L) // M-mode-only or a locked Shared-Region
+ && !(cfgx && cfgw && cfgr) // RWX = 111 is allowed
+ && (cfgx || (cfgw && !cfgr)) // X=1 or RW=01 is not allowed
+ ))) {
+ state->pmpaddr[i]->cfg = cfg;
+ }
}
write_success = true;
}
@@ -227,6 +277,52 @@ bool pmpcfg_csr_t::unlogged_write(const reg_t val) noexcept {
return write_success;
}
+// implement class mseccfg_csr_t
+mseccfg_csr_t::mseccfg_csr_t(processor_t* const proc, const reg_t addr):
+ basic_csr_t(proc, addr, 0) {
+}
+
+void mseccfg_csr_t::verify_permissions(insn_t insn, bool write) const {
+ basic_csr_t::verify_permissions(insn, write);
+ if (!proc->extension_enabled(EXT_SMEPMP))
+ throw trap_illegal_instruction(insn.bits());
+}
+
+bool mseccfg_csr_t::get_mml() const noexcept {
+ return (read() & MSECCFG_MML);
+}
+
+bool mseccfg_csr_t::get_mmwp() const noexcept {
+ return (read() & MSECCFG_MMWP);
+}
+
+bool mseccfg_csr_t::get_rlb() const noexcept {
+ return (read() & MSECCFG_RLB);
+}
+
+bool mseccfg_csr_t::unlogged_write(const reg_t val) noexcept {
+ if (proc->n_pmp == 0)
+ return false;
+
+ // pmpcfg.L is 1 in any rule or entry (including disabled entries)
+ const bool pmplock_recorded = std::any_of(state->pmpaddr, state->pmpaddr + proc->n_pmp,
+ [](const pmpaddr_csr_t_p & c) { return c->is_locked(); } );
+ reg_t new_val = read();
+
+ // When RLB is 0 and pmplock_recorded, RLB is locked to 0.
+ // Otherwise set the RLB bit according val
+ if (!(pmplock_recorded && (read() & MSECCFG_RLB) == 0)) {
+ new_val &= ~MSECCFG_RLB;
+ new_val |= (val & MSECCFG_RLB);
+ }
+
+ new_val |= (val & MSECCFG_MMWP); //MMWP is sticky
+ new_val |= (val & MSECCFG_MML); //MML is sticky
+
+ proc->get_mmu()->flush_tlb();
+
+ return basic_csr_t::unlogged_write(new_val);
+}
// implement class virtualized_csr_t
virtualized_csr_t::virtualized_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt):
@@ -235,7 +331,6 @@ virtualized_csr_t::virtualized_csr_t(processor_t* const proc, csr_t_p orig, csr_
virt_csr(virt) {
}
-
reg_t virtualized_csr_t::read() const noexcept {
return readvirt(state->v);
}
@@ -252,49 +347,41 @@ bool virtualized_csr_t::unlogged_write(const reg_t val) noexcept {
return false; // virt_csr or orig_csr has already logged
}
-
// implement class epc_csr_t
epc_csr_t::epc_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr),
val(0) {
}
-
reg_t epc_csr_t::read() const noexcept {
return val & proc->pc_alignment_mask();
}
-
bool epc_csr_t::unlogged_write(const reg_t val) noexcept {
this->val = val & ~(reg_t)1;
return true;
}
-
// implement class tvec_csr_t
tvec_csr_t::tvec_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr),
val(0) {
}
-
reg_t tvec_csr_t::read() const noexcept {
return val;
}
-
bool tvec_csr_t::unlogged_write(const reg_t val) noexcept {
this->val = val & ~(reg_t)2;
return true;
}
-
// implement class cause_csr_t
cause_csr_t::cause_csr_t(processor_t* const proc, const reg_t addr):
basic_csr_t(proc, addr, 0) {
}
-
reg_t cause_csr_t::read() const noexcept {
reg_t val = basic_csr_t::read();
// When reading, the interrupt bit needs to adjust to xlen. Spike does
@@ -305,7 +392,6 @@ reg_t cause_csr_t::read() const noexcept {
return val;
}
-
// implement class base_status_csr_t
base_status_csr_t::base_status_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr),
@@ -315,12 +401,11 @@ base_status_csr_t::base_status_csr_t(processor_t* const proc, const reg_t addr):
| (proc->get_const_xlen() == 32 ? SSTATUS32_SD : SSTATUS64_SD)) {
}
-
reg_t base_status_csr_t::compute_sstatus_write_mask() const noexcept {
// If a configuration has FS bits, they will always be accessible no
// matter the state of misa.
- const bool has_fs = proc->extension_enabled('S') || proc->extension_enabled('F')
- || proc->extension_enabled('V');
+ const bool has_fs = (proc->extension_enabled('S') || proc->extension_enabled('F')
+ || proc->extension_enabled('V')) && !proc->extension_enabled(EXT_ZFINX);
const bool has_vs = proc->extension_enabled('V');
return 0
| (proc->extension_enabled('S') ? (SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_SPP) : 0)
@@ -331,7 +416,6 @@ reg_t base_status_csr_t::compute_sstatus_write_mask() const noexcept {
;
}
-
reg_t base_status_csr_t::adjust_sd(const reg_t val) const noexcept {
// This uses get_const_xlen() instead of get_xlen() not only because
// the variable is static, so it's only called once, but also
@@ -347,7 +431,6 @@ reg_t base_status_csr_t::adjust_sd(const reg_t val) const noexcept {
return val & ~sd_bit;
}
-
void base_status_csr_t::maybe_flush_tlb(const reg_t newval) noexcept {
if ((newval ^ read()) &
(MSTATUS_MPP | MSTATUS_MPRV
@@ -356,7 +439,6 @@ void base_status_csr_t::maybe_flush_tlb(const reg_t newval) noexcept {
proc->get_mmu()->flush_tlb();
}
-
namespace {
int xlen_to_uxl(int xlen) {
if (xlen == 32)
@@ -367,7 +449,6 @@ namespace {
}
}
-
// implement class vsstatus_csr_t
vsstatus_csr_t::vsstatus_csr_t(processor_t* const proc, const reg_t addr):
base_status_csr_t(proc, addr),
@@ -381,7 +462,6 @@ bool vsstatus_csr_t::unlogged_write(const reg_t val) noexcept {
return true;
}
-
// implement class sstatus_proxy_csr_t
sstatus_proxy_csr_t::sstatus_proxy_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus):
base_status_csr_t(proc, addr),
@@ -391,32 +471,27 @@ sstatus_proxy_csr_t::sstatus_proxy_csr_t(processor_t* const proc, const reg_t ad
bool sstatus_proxy_csr_t::unlogged_write(const reg_t val) noexcept {
const reg_t new_mstatus = (mstatus->read() & ~sstatus_write_mask) | (val & sstatus_write_mask);
+ // On RV32 this will only log the low 32 bits, so make sure we're
+ // not modifying anything in the upper 32 bits.
+ assert((sstatus_write_mask & 0xffffffffU) == sstatus_write_mask);
+
mstatus->write(new_mstatus);
return false; // avoid double logging: already logged by mstatus->write()
}
-
// implement class mstatus_csr_t
mstatus_csr_t::mstatus_csr_t(processor_t* const proc, const reg_t addr):
base_status_csr_t(proc, addr),
- val(0
- | (proc->extension_enabled_const('U') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_UXL, xlen_to_uxl(proc->get_const_xlen())) : 0)
- | (proc->extension_enabled_const('S') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_SXL, xlen_to_uxl(proc->get_const_xlen())) : 0)
-
-#ifdef RISCV_ENABLE_DUAL_ENDIAN
- | (proc->get_mmu()->is_target_big_endian() ? MSTATUS_UBE | MSTATUS_SBE | MSTATUS_MBE : 0)
-#endif
- | 0 // initial value for mstatus
- ) {
+ val(compute_mstatus_initial_value()) {
}
-
bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept {
- const bool has_mpv = proc->extension_enabled('S') && proc->extension_enabled('H');
+ const bool has_mpv = proc->extension_enabled('H');
const bool has_gva = has_mpv;
const reg_t mask = sstatus_write_mask
- | MSTATUS_MIE | MSTATUS_MPIE | MSTATUS_MPRV
+ | MSTATUS_MIE | MSTATUS_MPIE
+ | (proc->extension_enabled('U') ? MSTATUS_MPRV : 0)
| MSTATUS_MPP | MSTATUS_TW
| (proc->extension_enabled('S') ? MSTATUS_TSR : 0)
| (has_page ? MSTATUS_TVM : 0)
@@ -431,19 +506,59 @@ bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept {
return true;
}
-// implement class mstatush_csr_t
-mstatush_csr_t::mstatush_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus):
+reg_t mstatus_csr_t::compute_mstatus_initial_value() const noexcept {
+ const reg_t big_endian_bits = (proc->extension_enabled_const('U') ? MSTATUS_UBE : 0)
+ | (proc->extension_enabled_const('S') ? MSTATUS_SBE : 0)
+ | MSTATUS_MBE;
+ return 0
+ | (proc->extension_enabled_const('U') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_UXL, xlen_to_uxl(proc->get_const_xlen())) : 0)
+ | (proc->extension_enabled_const('S') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_SXL, xlen_to_uxl(proc->get_const_xlen())) : 0)
+ | (proc->get_mmu()->is_target_big_endian() ? big_endian_bits : 0)
+ | 0; // initial value for mstatus
+}
+
+// implement class rv32_low_csr_t
+rv32_low_csr_t::rv32_low_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig):
+ csr_t(proc, addr),
+ orig(orig) {
+}
+
+reg_t rv32_low_csr_t::read() const noexcept {
+ return orig->read() & 0xffffffffU;
+}
+
+void rv32_low_csr_t::verify_permissions(insn_t insn, bool write) const {
+ orig->verify_permissions(insn, write);
+}
+
+bool rv32_low_csr_t::unlogged_write(const reg_t val) noexcept {
+ return orig->unlogged_write((orig->written_value() >> 32 << 32) | (val & 0xffffffffU));
+}
+
+reg_t rv32_low_csr_t::written_value() const noexcept {
+ return orig->written_value() & 0xffffffffU;
+}
+
+// implement class rv32_high_csr_t
+rv32_high_csr_t::rv32_high_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig):
csr_t(proc, addr),
- mstatus(mstatus),
- mask(MSTATUSH_MPV | MSTATUSH_GVA | MSTATUSH_SBE | MSTATUSH_MBE) {
+ orig(orig) {
+}
+
+reg_t rv32_high_csr_t::read() const noexcept {
+ return (orig->read() >> 32) & 0xffffffffU;
}
-reg_t mstatush_csr_t::read() const noexcept {
- return (mstatus->read() >> 32) & mask;
+void rv32_high_csr_t::verify_permissions(insn_t insn, bool write) const {
+ orig->verify_permissions(insn, write);
}
-bool mstatush_csr_t::unlogged_write(const reg_t val) noexcept {
- return mstatus->unlogged_write((mstatus->written_value() & ~(mask << 32)) | ((val & mask) << 32));
+bool rv32_high_csr_t::unlogged_write(const reg_t val) noexcept {
+ return orig->unlogged_write((orig->written_value() << 32 >> 32) | ((val & 0xffffffffU) << 32));
+}
+
+reg_t rv32_high_csr_t::written_value() const noexcept {
+ return (orig->written_value() >> 32) & 0xffffffffU;
}
// implement class sstatus_csr_t
@@ -483,7 +598,6 @@ bool sstatus_csr_t::enabled(const reg_t which) {
return false;
}
-
// implement class misa_csr_t
misa_csr_t::misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t max_isa):
basic_csr_t(proc, addr, max_isa),
@@ -501,7 +615,7 @@ misa_csr_t::misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t ma
) {
}
-const reg_t misa_csr_t::dependency(const reg_t val, const char feature, const char depends_on) const noexcept {
+reg_t misa_csr_t::dependency(const reg_t val, const char feature, const char depends_on) const noexcept {
return (val & (1L << (depends_on - 'A'))) ? val : (val & ~(1L << (feature - 'A')));
}
@@ -530,7 +644,9 @@ bool misa_csr_t::unlogged_write(const reg_t val) noexcept {
| (1 << CAUSE_STORE_GUEST_PAGE_FAULT)
;
state->medeleg->write(state->medeleg->read() & ~hypervisor_exceptions);
- state->mstatus->write(state->mstatus->read() & ~(MSTATUS_GVA | MSTATUS_MPV));
+ const reg_t new_mstatus = state->mstatus->read() & ~(MSTATUS_GVA | MSTATUS_MPV);
+ state->mstatus->write(new_mstatus);
+ if (state->mstatush) state->mstatush->write(new_mstatus >> 32); // log mstatush change
state->mie->write_with_mask(MIP_HS_MASK, 0); // also takes care of hie, sie
state->mip->write_with_mask(MIP_HS_MASK, 0); // also takes care of hip, sip, hvip
state->hstatus->write(0);
@@ -544,7 +660,6 @@ bool misa_csr_t::extension_enabled_const(unsigned char ext) const noexcept {
return extension_enabled(ext);
}
-
// implement class mip_or_mie_csr_t
mip_or_mie_csr_t::mip_or_mie_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr),
@@ -565,7 +680,6 @@ bool mip_or_mie_csr_t::unlogged_write(const reg_t val) noexcept {
return false; // avoid double logging: already logged by write_with_mask()
}
-
mip_csr_t::mip_csr_t(processor_t* const proc, const reg_t addr):
mip_or_mie_csr_t(proc, addr) {
}
@@ -575,7 +689,9 @@ void mip_csr_t::backdoor_write_with_mask(const reg_t mask, const reg_t val) noex
}
reg_t mip_csr_t::write_mask() const noexcept {
- const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0;
+ // MIP_STIP is writable unless SSTC exists and STCE is set in MENVCFG
+ const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | ((state->menvcfg->read() & MENVCFG_STCE) ? 0 : MIP_STIP) | MIP_SEIP : 0;
+ const reg_t lscof_int = proc->extension_enabled(EXT_SSCOFPMF) ? MIP_LCOFIP : 0;
const reg_t vssip_int = proc->extension_enabled('H') ? MIP_VSSIP : 0;
const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0;
// We must mask off sgeip, vstip, and vseip. All three of these
@@ -583,26 +699,24 @@ reg_t mip_csr_t::write_mask() const noexcept {
// * sgeip is read-only -- write hgeip instead
// * vseip is read-only -- write hvip instead
// * vstip is read-only -- write hvip instead
- return (supervisor_ints | hypervisor_ints) &
- (MIP_SEIP | MIP_SSIP | MIP_STIP | vssip_int);
+ return (supervisor_ints | hypervisor_ints | lscof_int) &
+ (MIP_SEIP | MIP_SSIP | MIP_STIP | MIP_LCOFIP | vssip_int);
}
-
mie_csr_t::mie_csr_t(processor_t* const proc, const reg_t addr):
mip_or_mie_csr_t(proc, addr) {
}
-
reg_t mie_csr_t::write_mask() const noexcept {
const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0;
+ const reg_t lscof_int = proc->extension_enabled(EXT_SSCOFPMF) ? MIP_LCOFIP : 0;
const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0;
const reg_t coprocessor_ints = (reg_t)proc->any_custom_extensions() << IRQ_COP;
- const reg_t delegable_ints = supervisor_ints | coprocessor_ints;
+ const reg_t delegable_ints = supervisor_ints | coprocessor_ints | lscof_int;
const reg_t all_ints = delegable_ints | hypervisor_ints | MIP_MSIP | MIP_MTIP | MIP_MEIP;
return all_ints;
}
-
// implement class generic_int_accessor_t
generic_int_accessor_t::generic_int_accessor_t(state_t* const state,
const reg_t read_mask,
@@ -643,7 +757,6 @@ reg_t generic_int_accessor_t::deleg_mask() const {
return hideleg_mask & mideleg_mask;
}
-
// implement class mip_proxy_csr_t
mip_proxy_csr_t::mip_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr):
csr_t(proc, addr),
@@ -674,7 +787,6 @@ bool mie_proxy_csr_t::unlogged_write(const reg_t val) noexcept {
return false; // accr has already logged
}
-
// implement class mideleg_csr_t
mideleg_csr_t::mideleg_csr_t(processor_t* const proc, const reg_t addr):
basic_csr_t(proc, addr, 0) {
@@ -696,13 +808,13 @@ void mideleg_csr_t::verify_permissions(insn_t insn, bool write) const {
bool mideleg_csr_t::unlogged_write(const reg_t val) noexcept {
const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0;
+ const reg_t lscof_int = proc->extension_enabled(EXT_SSCOFPMF) ? MIP_LCOFIP : 0;
const reg_t coprocessor_ints = (reg_t)proc->any_custom_extensions() << IRQ_COP;
- const reg_t delegable_ints = supervisor_ints | coprocessor_ints;
+ const reg_t delegable_ints = supervisor_ints | coprocessor_ints | lscof_int;
return basic_csr_t::unlogged_write(val & delegable_ints);
}
-
// implement class medeleg_csr_t
medeleg_csr_t::medeleg_csr_t(processor_t* const proc, const reg_t addr):
basic_csr_t(proc, addr, 0),
@@ -724,7 +836,13 @@ void medeleg_csr_t::verify_permissions(insn_t insn, bool write) const {
bool medeleg_csr_t::unlogged_write(const reg_t val) noexcept {
const reg_t mask = 0
| (1 << CAUSE_MISALIGNED_FETCH)
+ | (1 << CAUSE_FETCH_ACCESS)
+ | (1 << CAUSE_ILLEGAL_INSTRUCTION)
| (1 << CAUSE_BREAKPOINT)
+ | (1 << CAUSE_MISALIGNED_LOAD)
+ | (1 << CAUSE_LOAD_ACCESS)
+ | (1 << CAUSE_MISALIGNED_STORE)
+ | (1 << CAUSE_STORE_ACCESS)
| (1 << CAUSE_USER_ECALL)
| (1 << CAUSE_SUPERVISOR_ECALL)
| (1 << CAUSE_FETCH_PAGE_FAULT)
@@ -735,7 +853,6 @@ bool medeleg_csr_t::unlogged_write(const reg_t val) noexcept {
return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask));
}
-
// implement class masked_csr_t
masked_csr_t::masked_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init):
basic_csr_t(proc, addr, init),
@@ -746,13 +863,17 @@ bool masked_csr_t::unlogged_write(const reg_t val) noexcept {
return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask));
}
+// implement class henvcfg_csr_t
+henvcfg_csr_t::henvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, csr_t_p menvcfg):
+ masked_csr_t(proc, addr, mask, init),
+ menvcfg(menvcfg) {
+}
// implement class base_atp_csr_t and family
base_atp_csr_t::base_atp_csr_t(processor_t* const proc, const reg_t addr):
basic_csr_t(proc, addr, 0) {
}
-
bool base_atp_csr_t::unlogged_write(const reg_t val) noexcept {
const reg_t newval = proc->supports_impl(IMPL_MMU) ? compute_new_satp(val) : 0;
if (newval != read())
@@ -798,7 +919,7 @@ satp_csr_t::satp_csr_t(processor_t* const proc, const reg_t addr):
void satp_csr_t::verify_permissions(insn_t insn, bool write) const {
base_atp_csr_t::verify_permissions(insn, write);
if (get_field(state->mstatus->read(), MSTATUS_TVM))
- require(state->prv >= PRV_M);
+ require(state->prv == PRV_M);
}
virtualized_satp_csr_t::virtualized_satp_csr_t(processor_t* const proc, satp_csr_t_p orig, csr_t_p virt):
@@ -826,7 +947,6 @@ bool virtualized_satp_csr_t::unlogged_write(const reg_t val) noexcept {
return virtualized_csr_t::unlogged_write(newval);
}
-
// implement class wide_counter_csr_t
wide_counter_csr_t::wide_counter_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr),
@@ -842,10 +962,7 @@ void wide_counter_csr_t::bump(const reg_t howmuch) noexcept {
}
bool wide_counter_csr_t::unlogged_write(const reg_t val) noexcept {
- if (proc->get_xlen() == 32)
- this->val = (this->val >> 32 << 32) | (val & 0xffffffffU);
- else
- this->val = val;
+ this->val = val;
// The ISA mandates that if an instruction writes instret, the write
// takes precedence over the increment to instret. However, Spike
// unconditionally increments instret after executing an instruction.
@@ -859,29 +976,30 @@ reg_t wide_counter_csr_t::written_value() const noexcept {
return this->val + 1;
}
-void wide_counter_csr_t::write_upper_half(const reg_t val) noexcept {
- this->val = (val << 32) | (this->val << 32 >> 32);
- this->val--; // See comment above.
- // Log upper half only.
- log_special_write(address + (CSR_MINSTRETH - CSR_MINSTRET), written_value() >> 32);
-}
-
-
-counter_top_csr_t::counter_top_csr_t(processor_t* const proc, const reg_t addr, wide_counter_csr_t_p parent):
+// implement class time_counter_csr_t
+time_counter_csr_t::time_counter_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr),
- parent(parent) {
+ shadow_val(0) {
}
-reg_t counter_top_csr_t::read() const noexcept {
- return parent->read() >> 32;
+reg_t time_counter_csr_t::read() const noexcept {
+ // reading the time CSR in VS or VU mode returns the sum of the contents of
+ // htimedelta and the actual value of time.
+ if (state->v)
+ return shadow_val + state->htimedelta->read();
+ else
+ return shadow_val;
}
-bool counter_top_csr_t::unlogged_write(const reg_t val) noexcept {
- parent->write_upper_half(val);
- return true;
+void time_counter_csr_t::sync(const reg_t val) noexcept {
+ shadow_val = val;
+ if (proc->extension_enabled(EXT_SSTC)) {
+ const reg_t mip_val = (shadow_val >= state->stimecmp->read() ? MIP_STIP : 0) |
+ (shadow_val + state->htimedelta->read() >= state->vstimecmp->read() ? MIP_VSTIP : 0);
+ state->mip->backdoor_write_with_mask(MIP_STIP | MIP_VSTIP, mip_val);
+ }
}
-
proxy_csr_t::proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate):
csr_t(proc, addr),
delegate(delegate) {
@@ -896,7 +1014,6 @@ bool proxy_csr_t::unlogged_write(const reg_t val) noexcept {
return false;
}
-
const_csr_t::const_csr_t(processor_t* const proc, const reg_t addr, reg_t val):
csr_t(proc, addr),
val(val) {
@@ -906,11 +1023,10 @@ reg_t const_csr_t::read() const noexcept {
return val;
}
-bool const_csr_t::unlogged_write(const reg_t val) noexcept {
+bool const_csr_t::unlogged_write(const reg_t UNUSED val) noexcept {
return false;
}
-
counter_proxy_csr_t::counter_proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate):
proxy_csr_t(proc, addr, delegate) {
}
@@ -920,11 +1036,13 @@ bool counter_proxy_csr_t::myenable(csr_t_p counteren) const noexcept {
}
void counter_proxy_csr_t::verify_permissions(insn_t insn, bool write) const {
+ proxy_csr_t::verify_permissions(insn, write);
+
const bool mctr_ok = (state->prv < PRV_M) ? myenable(state->mcounteren) : true;
const bool hctr_ok = state->v ? myenable(state->hcounteren) : true;
const bool sctr_ok = (proc->extension_enabled('S') && state->prv < PRV_S) ? myenable(state->scounteren) : true;
- if (write || !mctr_ok)
+ if (!mctr_ok)
throw trap_illegal_instruction(insn.bits());
if (!hctr_ok)
throw trap_virtual_instruction(insn.bits());
@@ -936,7 +1054,6 @@ void counter_proxy_csr_t::verify_permissions(insn_t insn, bool write) const {
}
}
-
hypervisor_csr_t::hypervisor_csr_t(processor_t* const proc, const reg_t addr):
basic_csr_t(proc, addr, 0) {
}
@@ -947,7 +1064,6 @@ void hypervisor_csr_t::verify_permissions(insn_t insn, bool write) const {
throw trap_illegal_instruction(insn.bits());
}
-
hideleg_csr_t::hideleg_csr_t(processor_t* const proc, const reg_t addr, csr_t_p mideleg):
masked_csr_t(proc, addr, MIP_VS_MASK, 0),
mideleg(mideleg) {
@@ -957,7 +1073,6 @@ reg_t hideleg_csr_t::read() const noexcept {
return masked_csr_t::read() & mideleg->read();
};
-
hgatp_csr_t::hgatp_csr_t(processor_t* const proc, const reg_t addr):
basic_csr_t(proc, addr, 0) {
}
@@ -975,7 +1090,7 @@ bool hgatp_csr_t::unlogged_write(const reg_t val) noexcept {
if (proc->get_const_xlen() == 32) {
mask = HGATP32_PPN |
HGATP32_MODE |
- proc->supports_impl(IMPL_MMU_VMID) ? HGATP32_VMID : 0;
+ (proc->supports_impl(IMPL_MMU_VMID) ? HGATP32_VMID : 0);
} else {
mask = (HGATP64_PPN & ((reg_t(1) << (MAX_PADDR_BITS - PGSHIFT)) - 1)) |
(proc->supports_impl(IMPL_MMU_VMID) ? HGATP64_VMID : 0);
@@ -990,7 +1105,6 @@ bool hgatp_csr_t::unlogged_write(const reg_t val) noexcept {
return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask));
}
-
tselect_csr_t::tselect_csr_t(processor_t* const proc, const reg_t addr):
basic_csr_t(proc, addr, 0) {
}
@@ -999,7 +1113,6 @@ bool tselect_csr_t::unlogged_write(const reg_t val) noexcept {
return basic_csr_t::unlogged_write((val < proc->TM.count()) ? val : read());
}
-
tdata1_csr_t::tdata1_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr) {
}
@@ -1012,7 +1125,6 @@ bool tdata1_csr_t::unlogged_write(const reg_t val) noexcept {
return proc->TM.tdata1_write(proc, state->tselect->read(), val);
}
-
tdata2_csr_t::tdata2_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr) {
}
@@ -1025,7 +1137,6 @@ bool tdata2_csr_t::unlogged_write(const reg_t val) noexcept {
return proc->TM.tdata2_write(proc, state->tselect->read(), val);
}
-
debug_mode_csr_t::debug_mode_csr_t(processor_t* const proc, const reg_t addr):
basic_csr_t(proc, addr, 0) {
}
@@ -1046,7 +1157,6 @@ void dpc_csr_t::verify_permissions(insn_t insn, bool write) const {
throw trap_illegal_instruction(insn.bits());
}
-
dcsr_csr_t::dcsr_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr),
prv(0),
@@ -1098,16 +1208,30 @@ void dcsr_csr_t::write_cause_and_prv(uint8_t cause, reg_t prv) noexcept {
log_write();
}
-
float_csr_t::float_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init):
masked_csr_t(proc, addr, mask, init) {
}
void float_csr_t::verify_permissions(insn_t insn, bool write) const {
masked_csr_t::verify_permissions(insn, write);
- require_fp;
- if (!proc->extension_enabled('F'))
+ require_fs;
+ if (!proc->extension_enabled('F') && !proc->extension_enabled(EXT_ZFINX))
throw trap_illegal_instruction(insn.bits());
+
+ if (proc->extension_enabled(EXT_SMSTATEEN) && proc->extension_enabled(EXT_ZFINX)) {
+ if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_FCSR))
+ throw trap_illegal_instruction(insn.bits());
+
+ if (state->v && !(state->hstateen[0]->read() & HSTATEEN0_FCSR))
+ throw trap_virtual_instruction(insn.bits());
+
+ if ((proc->extension_enabled('S') && state->prv < PRV_S) && !(state->sstateen[0]->read() & SSTATEEN0_FCSR)) {
+ if (state->v)
+ throw trap_virtual_instruction(insn.bits());
+ else
+ throw trap_illegal_instruction(insn.bits());
+ }
+ }
}
bool float_csr_t::unlogged_write(const reg_t val) noexcept {
@@ -1115,7 +1239,6 @@ bool float_csr_t::unlogged_write(const reg_t val) noexcept {
return masked_csr_t::unlogged_write(val);
}
-
composite_csr_t::composite_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb):
csr_t(proc, addr),
upper_csr(upper_csr),
@@ -1139,7 +1262,6 @@ bool composite_csr_t::unlogged_write(const reg_t val) noexcept {
return false; // logging is done only by the underlying CSRs
}
-
seed_csr_t::seed_csr_t(processor_t* const proc, const reg_t addr):
csr_t(proc, addr) {
}
@@ -1161,8 +1283,6 @@ bool seed_csr_t::unlogged_write(const reg_t val) noexcept {
return true;
}
-
-
vector_csr_t::vector_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init):
basic_csr_t(proc, addr, init),
mask(mask) {
@@ -1187,7 +1307,6 @@ bool vector_csr_t::unlogged_write(const reg_t val) noexcept {
return basic_csr_t::unlogged_write(val & mask);
}
-
vxsat_csr_t::vxsat_csr_t(processor_t* const proc, const reg_t addr):
masked_csr_t(proc, addr, /*mask*/ 1, /*init*/ 0) {
}
@@ -1203,3 +1322,151 @@ bool vxsat_csr_t::unlogged_write(const reg_t val) noexcept {
dirty_vs_state;
return masked_csr_t::unlogged_write(val);
}
+
+// implement class hstateen_csr_t
+hstateen_csr_t::hstateen_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask,
+ const reg_t init, uint8_t index):
+ masked_csr_t(proc, addr, mask, init),
+ index(index) {
+}
+
+reg_t hstateen_csr_t::read() const noexcept {
+ // For every bit in an mstateen CSR that is zero (whether read-only zero or set to zero),
+ // the same bit appears as read-only zero in the matching hstateen and sstateen CSRs
+ return masked_csr_t::read() & state->mstateen[index]->read();
+}
+
+bool hstateen_csr_t::unlogged_write(const reg_t val) noexcept {
+ // For every bit in an mstateen CSR that is zero (whether read-only zero or set to zero),
+ // the same bit appears as read-only zero in the matching hstateen and sstateen CSRs
+ return masked_csr_t::unlogged_write(val & state->mstateen[index]->read());
+}
+
+void hstateen_csr_t::verify_permissions(insn_t insn, bool write) const {
+ if ((state->prv < PRV_M) && !(state->mstateen[index]->read() & MSTATEEN_HSTATEEN))
+ throw trap_illegal_instruction(insn.bits());
+ masked_csr_t::verify_permissions(insn, write);
+}
+
+// implement class sstateen_csr_t
+sstateen_csr_t::sstateen_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask,
+ const reg_t init, uint8_t index):
+ hstateen_csr_t(proc, addr, mask, init, index) {
+}
+
+reg_t sstateen_csr_t::read() const noexcept {
+ // For every bit in an mstateen CSR that is zero (whether read-only zero or set to zero),
+ // the same bit appears as read-only zero in the matching hstateen and sstateen CSRs
+ // For every bit in an hstateen CSR that is zero (whether read-only zero or set to zero),
+ // the same bit appears as read-only zero in sstateen when accessed in VS-mode
+ if (state->v)
+ return hstateen_csr_t::read() & state->hstateen[index]->read();
+ else
+ return hstateen_csr_t::read();
+}
+
+bool sstateen_csr_t::unlogged_write(const reg_t val) noexcept {
+ // For every bit in an mstateen CSR that is zero (whether read-only zero or set to zero),
+ // the same bit appears as read-only zero in the matching hstateen and sstateen CSRs
+ // For every bit in an hstateen CSR that is zero (whether read-only zero or set to zero),
+ // the same bit appears as read-only zero in sstateen when accessed in VS-mode
+ if (state->v)
+ return hstateen_csr_t::unlogged_write(val & state->hstateen[index]->read());
+ else
+ return hstateen_csr_t::unlogged_write(val);
+}
+
+void sstateen_csr_t::verify_permissions(insn_t insn, bool write) const {
+ hstateen_csr_t::verify_permissions(insn, write);
+
+ if (state->v && !(state->hstateen[index]->read() & HSTATEEN_SSTATEEN))
+ throw trap_virtual_instruction(insn.bits());
+}
+
+// implement class senvcfg_csr_t
+senvcfg_csr_t::senvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask,
+ const reg_t init):
+ masked_csr_t(proc, addr, mask, init) {
+}
+
+void senvcfg_csr_t::verify_permissions(insn_t insn, bool write) const {
+ if (proc->extension_enabled(EXT_SMSTATEEN)) {
+ if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_HENVCFG))
+ throw trap_illegal_instruction(insn.bits());
+
+ if (state->v && !(state->hstateen[0]->read() & HSTATEEN0_SENVCFG))
+ throw trap_virtual_instruction(insn.bits());
+ }
+
+ masked_csr_t::verify_permissions(insn, write);
+}
+
+void henvcfg_csr_t::verify_permissions(insn_t insn, bool write) const {
+ if (proc->extension_enabled(EXT_SMSTATEEN)) {
+ if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_HENVCFG))
+ throw trap_illegal_instruction(insn.bits());
+ }
+
+ masked_csr_t::verify_permissions(insn, write);
+}
+
+stimecmp_csr_t::stimecmp_csr_t(processor_t* const proc, const reg_t addr, const reg_t imask):
+ basic_csr_t(proc, addr, 0), intr_mask(imask) {
+}
+
+bool stimecmp_csr_t::unlogged_write(const reg_t val) noexcept {
+ state->mip->backdoor_write_with_mask(intr_mask, state->time->read() >= val ? intr_mask : 0);
+ return basic_csr_t::unlogged_write(val);
+}
+
+virtualized_stimecmp_csr_t::virtualized_stimecmp_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt):
+ virtualized_csr_t(proc, orig, virt) {
+}
+
+void virtualized_stimecmp_csr_t::verify_permissions(insn_t insn, bool write) const {
+ if (!(state->menvcfg->read() & MENVCFG_STCE)) {
+ // access to (v)stimecmp with MENVCFG.STCE = 0
+ if (state->prv < PRV_M)
+ throw trap_illegal_instruction(insn.bits());
+ }
+
+ state->time_proxy->verify_permissions(insn, false);
+
+ if (state->v && !(state->henvcfg->read() & HENVCFG_STCE)) {
+ // access to vstimecmp with MENVCFG.STCE = 1 and HENVCFG.STCE = 0 when V = 1
+ throw trap_virtual_instruction(insn.bits());
+ }
+
+ virtualized_csr_t::verify_permissions(insn, write);
+}
+
+scountovf_csr_t::scountovf_csr_t(processor_t* const proc, const reg_t addr):
+ csr_t(proc, addr) {
+}
+
+void scountovf_csr_t::verify_permissions(insn_t insn, bool write) const {
+ if (!proc->extension_enabled(EXT_SSCOFPMF))
+ throw trap_illegal_instruction(insn.bits());
+ csr_t::verify_permissions(insn, write);
+}
+
+reg_t scountovf_csr_t::read() const noexcept {
+ reg_t val = 0;
+ for (reg_t i = 3; i <= 31; ++i) {
+ bool of = state->mevent[i - 3]->read() & MHPMEVENT_OF;
+ val |= of << i;
+ }
+
+ /* In M and S modes, scountovf bit X is readable when mcounteren bit X is set, */
+ /* and otherwise reads as zero. Similarly, in VS mode, scountovf bit X is readable */
+ /* when mcounteren bit X and hcounteren bit X are both set, and otherwise reads as zero. */
+ val &= state->mcounteren->read();
+ if (state->v)
+ val &= state->hcounteren->read();
+ return val;
+}
+
+bool scountovf_csr_t::unlogged_write(const reg_t UNUSED val) noexcept {
+ /* this function is unused */
+ return false;
+}
diff --git a/riscv/csrs.h b/riscv/csrs.h
index fb27ae6..70fa2f4 100644
--- a/riscv/csrs.h
+++ b/riscv/csrs.h
@@ -53,11 +53,14 @@ class csr_t {
private:
const unsigned csr_priv;
const bool csr_read_only;
+
+ // For access to written_value() and unlogged_write():
+ friend class rv32_high_csr_t;
+ friend class rv32_low_csr_t;
};
typedef std::shared_ptr<csr_t> csr_t_p;
-
// Basic CSRs, with XLEN bits fully readable and writable.
class basic_csr_t: public csr_t {
public:
@@ -73,7 +76,6 @@ class basic_csr_t: public csr_t {
reg_t val;
};
-
class pmpaddr_csr_t: public csr_t {
public:
pmpaddr_csr_t(processor_t* const proc, const reg_t addr);
@@ -89,6 +91,11 @@ class pmpaddr_csr_t: public csr_t {
// Is the specified access allowed given the pmpcfg privileges?
bool access_ok(access_type type, reg_t mode) const noexcept;
+ // To check lock bit status from outside like mseccfg
+ bool is_locked() const noexcept {
+ return cfg & PMP_L;
+ }
+
protected:
virtual bool unlogged_write(const reg_t val) noexcept override;
private:
@@ -117,11 +124,24 @@ typedef std::shared_ptr<pmpaddr_csr_t> pmpaddr_csr_t_p;
class pmpcfg_csr_t: public csr_t {
public:
pmpcfg_csr_t(processor_t* const proc, const reg_t addr);
+ virtual void verify_permissions(insn_t insn, bool write) const override;
virtual reg_t read() const noexcept override;
protected:
virtual bool unlogged_write(const reg_t val) noexcept override;
};
+class mseccfg_csr_t: public basic_csr_t {
+ public:
+ mseccfg_csr_t(processor_t* const proc, const reg_t addr);
+ virtual void verify_permissions(insn_t insn, bool write) const override;
+ bool get_mml() const noexcept;
+ bool get_mmwp() const noexcept;
+ bool get_rlb() const noexcept;
+ protected:
+ virtual bool unlogged_write(const reg_t val) noexcept override;
+};
+
+typedef std::shared_ptr<mseccfg_csr_t> mseccfg_csr_t_p;
// For CSRs that have a virtualized copy under another name. Each
// instance of virtualized_csr_t will read/write one of two CSRs,
@@ -158,7 +178,6 @@ class epc_csr_t: public csr_t {
reg_t val;
};
-
// For mtvec, stvec, and vstvec
class tvec_csr_t: public csr_t {
public:
@@ -171,7 +190,6 @@ class tvec_csr_t: public csr_t {
reg_t val;
};
-
// For mcause, scause, and vscause
class cause_csr_t: public basic_csr_t {
public:
@@ -180,7 +198,6 @@ class cause_csr_t: public basic_csr_t {
virtual reg_t read() const noexcept override;
};
-
// For *status family of CSRs
class base_status_csr_t: public csr_t {
public:
@@ -202,7 +219,6 @@ class base_status_csr_t: public csr_t {
typedef std::shared_ptr<base_status_csr_t> base_status_csr_t_p;
-
// For vsstatus, which is its own separate architectural register
// (unlike sstatus)
class vsstatus_csr_t final: public base_status_csr_t {
@@ -221,7 +237,6 @@ class vsstatus_csr_t final: public base_status_csr_t {
typedef std::shared_ptr<vsstatus_csr_t> vsstatus_csr_t_p;
-
class mstatus_csr_t final: public base_status_csr_t {
public:
mstatus_csr_t(processor_t* const proc, const reg_t addr);
@@ -233,24 +248,37 @@ class mstatus_csr_t final: public base_status_csr_t {
protected:
virtual bool unlogged_write(const reg_t val) noexcept override;
private:
+ reg_t compute_mstatus_initial_value() const noexcept;
reg_t val;
- friend class mstatush_csr_t;
};
typedef std::shared_ptr<mstatus_csr_t> mstatus_csr_t_p;
-
-class mstatush_csr_t: public csr_t {
+// For RV32 CSRs that are split into two, e.g. mstatus/mstatush
+// CSRW should only modify the lower half
+class rv32_low_csr_t: public csr_t {
public:
- mstatush_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus);
+ rv32_low_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig);
virtual reg_t read() const noexcept override;
+ virtual void verify_permissions(insn_t insn, bool write) const override;
protected:
virtual bool unlogged_write(const reg_t val) noexcept override;
+ virtual reg_t written_value() const noexcept override;
private:
- mstatus_csr_t_p mstatus;
- const reg_t mask;
+ csr_t_p orig;
};
+class rv32_high_csr_t: public csr_t {
+ public:
+ rv32_high_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig);
+ virtual reg_t read() const noexcept override;
+ virtual void verify_permissions(insn_t insn, bool write) const override;
+ protected:
+ virtual bool unlogged_write(const reg_t val) noexcept override;
+ virtual reg_t written_value() const noexcept override;
+ private:
+ csr_t_p orig;
+};
class sstatus_proxy_csr_t final: public base_status_csr_t {
public:
@@ -283,7 +311,6 @@ class sstatus_csr_t: public virtualized_csr_t {
typedef std::shared_ptr<sstatus_csr_t> sstatus_csr_t_p;
-
class misa_csr_t final: public basic_csr_t {
public:
misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t max_isa);
@@ -299,12 +326,11 @@ class misa_csr_t final: public basic_csr_t {
private:
const reg_t max_isa;
const reg_t write_mask;
- const reg_t dependency(const reg_t val, const char feature, const char depends_on) const noexcept;
+ reg_t dependency(const reg_t val, const char feature, const char depends_on) const noexcept;
};
typedef std::shared_ptr<misa_csr_t> misa_csr_t_p;
-
class mip_or_mie_csr_t: public csr_t {
public:
mip_or_mie_csr_t(processor_t* const proc, const reg_t addr);
@@ -319,7 +345,6 @@ class mip_or_mie_csr_t: public csr_t {
virtual reg_t write_mask() const noexcept = 0;
};
-
// mip is special because some of the bits are driven by hardware pins
class mip_csr_t: public mip_or_mie_csr_t {
public:
@@ -333,7 +358,6 @@ class mip_csr_t: public mip_or_mie_csr_t {
typedef std::shared_ptr<mip_csr_t> mip_csr_t_p;
-
class mie_csr_t: public mip_or_mie_csr_t {
public:
mie_csr_t(processor_t* const proc, const reg_t addr);
@@ -343,7 +367,6 @@ class mie_csr_t: public mip_or_mie_csr_t {
typedef std::shared_ptr<mie_csr_t> mie_csr_t_p;
-
// For sip, hip, hvip, vsip, sie, hie, vsie which are all just (masked
// & shifted) views into mip or mie. Each pair will have one of these
// objects describing the view, e.g. one for sip+sie, one for hip+hie,
@@ -375,7 +398,6 @@ class generic_int_accessor_t {
typedef std::shared_ptr<generic_int_accessor_t> generic_int_accessor_t_p;
-
// For all CSRs that are simply (masked & shifted) views into mip
class mip_proxy_csr_t: public csr_t {
public:
@@ -398,8 +420,6 @@ class mie_proxy_csr_t: public csr_t {
generic_int_accessor_t_p accr;
};
-
-
class mideleg_csr_t: public basic_csr_t {
public:
mideleg_csr_t(processor_t* const proc, const reg_t addr);
@@ -409,7 +429,6 @@ class mideleg_csr_t: public basic_csr_t {
virtual bool unlogged_write(const reg_t val) noexcept override;
};
-
class medeleg_csr_t: public basic_csr_t {
public:
medeleg_csr_t(processor_t* const proc, const reg_t addr);
@@ -420,7 +439,6 @@ class medeleg_csr_t: public basic_csr_t {
const reg_t hypervisor_exceptions;
};
-
// For CSRs with certain bits hardwired
class masked_csr_t: public basic_csr_t {
public:
@@ -431,6 +449,21 @@ class masked_csr_t: public basic_csr_t {
const reg_t mask;
};
+// henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
+// henvcfg.stce is read_only 0 when menvcfg.stce = 0
+class henvcfg_csr_t final: public masked_csr_t {
+ public:
+ henvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, csr_t_p menvcfg);
+
+ reg_t read() const noexcept override {
+ return (menvcfg->read() | ~(MENVCFG_PBMTE | MENVCFG_STCE)) & masked_csr_t::read();
+ }
+
+ virtual void verify_permissions(insn_t insn, bool write) const override;
+
+ private:
+ csr_t_p menvcfg;
+};
// For satp and vsatp
// These are three classes in order to handle the [V]TVM bits permission checks
@@ -462,7 +495,6 @@ class virtualized_satp_csr_t: public virtualized_csr_t {
satp_csr_t_p orig_satp;
};
-
// For minstret and mcycle, which are always 64 bits, but in RV32 are
// split into high and low halves. The first class always holds the
// full 64-bit value.
@@ -472,7 +504,6 @@ class wide_counter_csr_t: public csr_t {
// Always returns full 64-bit value
virtual reg_t read() const noexcept override;
void bump(const reg_t howmuch) noexcept;
- void write_upper_half(const reg_t val) noexcept;
protected:
virtual bool unlogged_write(const reg_t val) noexcept override;
virtual reg_t written_value() const noexcept override;
@@ -482,20 +513,20 @@ class wide_counter_csr_t: public csr_t {
typedef std::shared_ptr<wide_counter_csr_t> wide_counter_csr_t_p;
-
-// A simple proxy to read/write the upper half of minstret/mcycle
-class counter_top_csr_t: public csr_t {
+class time_counter_csr_t: public csr_t {
public:
- counter_top_csr_t(processor_t* const proc, const reg_t addr, wide_counter_csr_t_p parent);
+ time_counter_csr_t(processor_t* const proc, const reg_t addr);
virtual reg_t read() const noexcept override;
+
+ void sync(const reg_t val) noexcept;
+
protected:
- virtual bool unlogged_write(const reg_t val) noexcept override;
+ virtual bool unlogged_write(const reg_t UNUSED val) noexcept override { return false; };
private:
- wide_counter_csr_t_p parent;
+ reg_t shadow_val;
};
-typedef std::shared_ptr<counter_top_csr_t> counter_top_csr_t_p;
-
+typedef std::shared_ptr<time_counter_csr_t> time_counter_csr_t_p;
// For a CSR that is an alias of another
class proxy_csr_t: public csr_t {
@@ -508,7 +539,6 @@ class proxy_csr_t: public csr_t {
csr_t_p delegate;
};
-
// For a CSR with a fixed, unchanging value
class const_csr_t: public csr_t {
public:
@@ -520,7 +550,6 @@ class const_csr_t: public csr_t {
const reg_t val;
};
-
// For a CSR that is an unprivileged accessor of a privileged counter
class counter_proxy_csr_t: public proxy_csr_t {
public:
@@ -530,7 +559,6 @@ class counter_proxy_csr_t: public proxy_csr_t {
bool myenable(csr_t_p counteren) const noexcept;
};
-
// For machine-level CSRs that only exist with Hypervisor
class hypervisor_csr_t: public basic_csr_t {
public:
@@ -538,7 +566,6 @@ class hypervisor_csr_t: public basic_csr_t {
virtual void verify_permissions(insn_t insn, bool write) const override;
};
-
class hideleg_csr_t: public masked_csr_t {
public:
hideleg_csr_t(processor_t* const proc, const reg_t addr, csr_t_p mideleg);
@@ -547,7 +574,6 @@ class hideleg_csr_t: public masked_csr_t {
csr_t_p mideleg;
};
-
class hgatp_csr_t: public basic_csr_t {
public:
hgatp_csr_t(processor_t* const proc, const reg_t addr);
@@ -556,7 +582,6 @@ class hgatp_csr_t: public basic_csr_t {
virtual bool unlogged_write(const reg_t val) noexcept override;
};
-
class tselect_csr_t: public basic_csr_t {
public:
tselect_csr_t(processor_t* const proc, const reg_t addr);
@@ -564,7 +589,6 @@ class tselect_csr_t: public basic_csr_t {
virtual bool unlogged_write(const reg_t val) noexcept override;
};
-
class tdata1_csr_t: public csr_t {
public:
tdata1_csr_t(processor_t* const proc, const reg_t addr);
@@ -590,7 +614,6 @@ class debug_mode_csr_t: public basic_csr_t {
typedef std::shared_ptr<tdata2_csr_t> tdata2_csr_t_p;
-
class dpc_csr_t: public epc_csr_t {
public:
dpc_csr_t(processor_t* const proc, const reg_t addr);
@@ -618,7 +641,6 @@ class dcsr_csr_t: public csr_t {
typedef std::shared_ptr<dcsr_csr_t> dcsr_csr_t_p;
-
class float_csr_t final: public masked_csr_t {
public:
float_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init);
@@ -629,7 +651,6 @@ class float_csr_t final: public masked_csr_t {
typedef std::shared_ptr<float_csr_t> float_csr_t_p;
-
// For a CSR like FCSR, that is actually a view into multiple
// underlying registers.
class composite_csr_t: public csr_t {
@@ -646,7 +667,6 @@ class composite_csr_t: public csr_t {
const unsigned upper_lsb;
};
-
class seed_csr_t: public csr_t {
public:
seed_csr_t(processor_t* const proc, const reg_t addr);
@@ -656,7 +676,6 @@ class seed_csr_t: public csr_t {
virtual bool unlogged_write(const reg_t val) noexcept override;
};
-
class vector_csr_t: public basic_csr_t {
public:
vector_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init=0);
@@ -671,7 +690,6 @@ class vector_csr_t: public basic_csr_t {
typedef std::shared_ptr<vector_csr_t> vector_csr_t_p;
-
// For CSRs shared between Vector and P extensions (vxsat)
class vxsat_csr_t: public masked_csr_t {
public:
@@ -681,4 +699,53 @@ class vxsat_csr_t: public masked_csr_t {
virtual bool unlogged_write(const reg_t val) noexcept override;
};
+class hstateen_csr_t: public masked_csr_t {
+ public:
+ hstateen_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, uint8_t index);
+ virtual reg_t read() const noexcept override;
+ virtual void verify_permissions(insn_t insn, bool write) const override;
+ protected:
+ virtual bool unlogged_write(const reg_t val) noexcept override;
+protected:
+ uint8_t index;
+};
+
+class sstateen_csr_t: public hstateen_csr_t {
+ public:
+ sstateen_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, uint8_t index);
+ virtual reg_t read() const noexcept override;
+ virtual void verify_permissions(insn_t insn, bool write) const override;
+ protected:
+ virtual bool unlogged_write(const reg_t val) noexcept override;
+};
+
+class senvcfg_csr_t final: public masked_csr_t {
+ public:
+ senvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init);
+ virtual void verify_permissions(insn_t insn, bool write) const override;
+};
+
+class stimecmp_csr_t: public basic_csr_t {
+ public:
+ stimecmp_csr_t(processor_t* const proc, const reg_t addr, const reg_t imask);
+ protected:
+ virtual bool unlogged_write(const reg_t val) noexcept override;
+ private:
+ reg_t intr_mask;
+};
+
+class virtualized_stimecmp_csr_t: public virtualized_csr_t {
+ public:
+ virtualized_stimecmp_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt);
+ virtual void verify_permissions(insn_t insn, bool write) const override;
+};
+
+class scountovf_csr_t: public csr_t {
+ public:
+ scountovf_csr_t(processor_t* const proc, const reg_t addr);
+ virtual void verify_permissions(insn_t insn, bool write) const override;
+ virtual reg_t read() const noexcept override;
+ protected:
+ virtual bool unlogged_write(const reg_t val) noexcept override;
+};
#endif
diff --git a/riscv/debug_module.cc b/riscv/debug_module.cc
index 0eac842..f5c0c73 100644
--- a/riscv/debug_module.cc
+++ b/riscv/debug_module.cc
@@ -165,18 +165,18 @@ bool debug_module_t::load(reg_t addr, size_t len, uint8_t* bytes)
bool debug_module_t::store(reg_t addr, size_t len, const uint8_t* bytes)
{
D(
- switch (len) {
- case 4:
- fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=0x%08x); "
- "hartsel=0x%x\n", addr, (unsigned) len, *(uint32_t *) bytes,
- dmcontrol.hartsel);
- break;
- default:
- fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=...); "
- "hartsel=0x%x\n", addr, (unsigned) len, dmcontrol.hartsel);
- break;
- }
- );
+ switch (len) {
+ case 4:
+ fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=0x%08x); "
+ "hartsel=0x%x\n", addr, (unsigned) len, *(uint32_t *) bytes,
+ dmcontrol.hartsel);
+ break;
+ default:
+ fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=...); "
+ "hartsel=0x%x\n", addr, (unsigned) len, dmcontrol.hartsel);
+ break;
+ }
+ );
uint8_t id_bytes[4];
uint32_t id = 0;
@@ -215,11 +215,11 @@ bool debug_module_t::store(reg_t addr, size_t len, const uint8_t* bytes)
}
}
if (dmcontrol.hartsel == id) {
- if (0 == (debug_rom_flags[id] & (1 << DEBUG_ROM_FLAG_GO))){
- if (dmcontrol.hartsel == id) {
- abstract_command_completed = true;
- }
+ if (0 == (debug_rom_flags[id] & (1 << DEBUG_ROM_FLAG_GO))) {
+ if (dmcontrol.hartsel == id) {
+ abstract_command_completed = true;
}
+ }
}
return true;
}
@@ -394,15 +394,15 @@ bool debug_module_t::dmi_read(unsigned address, uint32_t *value)
result = set_field(result, DM_DMCONTROL_HASEL, dmcontrol.hasel);
result = set_field(result, DM_DMCONTROL_HARTSELLO, dmcontrol.hartsel);
result = set_field(result, DM_DMCONTROL_HARTRESET, dmcontrol.hartreset);
- result = set_field(result, DM_DMCONTROL_NDMRESET, dmcontrol.ndmreset);
+ result = set_field(result, DM_DMCONTROL_NDMRESET, dmcontrol.ndmreset);
result = set_field(result, DM_DMCONTROL_DMACTIVE, dmcontrol.dmactive);
}
break;
case DM_DMSTATUS:
{
- dmstatus.allhalted = true;
+ dmstatus.allhalted = true;
dmstatus.anyhalted = false;
- dmstatus.allrunning = true;
+ dmstatus.allrunning = true;
dmstatus.anyrunning = false;
dmstatus.allnonexistant = true;
dmstatus.allresumeack = true;
@@ -430,8 +430,8 @@ bool debug_module_t::dmi_read(unsigned address, uint32_t *value)
// non-existant hartsel.
dmstatus.anynonexistant = (dmcontrol.hartsel >= nprocs);
- dmstatus.allunavail = false;
- dmstatus.anyunavail = false;
+ dmstatus.allunavail = false;
+ dmstatus.anyunavail = false;
result = set_field(result, DM_DMSTATUS_IMPEBREAK,
dmstatus.impebreak);
@@ -439,15 +439,15 @@ bool debug_module_t::dmi_read(unsigned address, uint32_t *value)
hart_state[dmcontrol.hartsel].havereset);
result = set_field(result, DM_DMSTATUS_ANYHAVERESET,
hart_state[dmcontrol.hartsel].havereset);
- result = set_field(result, DM_DMSTATUS_ALLNONEXISTENT, dmstatus.allnonexistant);
- result = set_field(result, DM_DMSTATUS_ALLUNAVAIL, dmstatus.allunavail);
- result = set_field(result, DM_DMSTATUS_ALLRUNNING, dmstatus.allrunning);
- result = set_field(result, DM_DMSTATUS_ALLHALTED, dmstatus.allhalted);
+ result = set_field(result, DM_DMSTATUS_ALLNONEXISTENT, dmstatus.allnonexistant);
+ result = set_field(result, DM_DMSTATUS_ALLUNAVAIL, dmstatus.allunavail);
+ result = set_field(result, DM_DMSTATUS_ALLRUNNING, dmstatus.allrunning);
+ result = set_field(result, DM_DMSTATUS_ALLHALTED, dmstatus.allhalted);
result = set_field(result, DM_DMSTATUS_ALLRESUMEACK, dmstatus.allresumeack);
- result = set_field(result, DM_DMSTATUS_ANYNONEXISTENT, dmstatus.anynonexistant);
- result = set_field(result, DM_DMSTATUS_ANYUNAVAIL, dmstatus.anyunavail);
- result = set_field(result, DM_DMSTATUS_ANYRUNNING, dmstatus.anyrunning);
- result = set_field(result, DM_DMSTATUS_ANYHALTED, dmstatus.anyhalted);
+ result = set_field(result, DM_DMSTATUS_ANYNONEXISTENT, dmstatus.anynonexistant);
+ result = set_field(result, DM_DMSTATUS_ANYUNAVAIL, dmstatus.anyunavail);
+ result = set_field(result, DM_DMSTATUS_ANYRUNNING, dmstatus.anyrunning);
+ result = set_field(result, DM_DMSTATUS_ANYHALTED, dmstatus.anyhalted);
result = set_field(result, DM_DMSTATUS_ANYRESUMEACK, dmstatus.anyresumeack);
result = set_field(result, DM_DMSTATUS_AUTHENTICATED, dmstatus.authenticated);
result = set_field(result, DM_DMSTATUS_AUTHBUSY, dmstatus.authbusy);
@@ -672,7 +672,7 @@ bool debug_module_t::perform_abstract_command()
write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH0));
}
- } else if (regno >= 0x1020 && regno < 0x1040) {
+ } else if (regno >= 0x1020 && regno < 0x1040 && config.support_abstract_fpr_access) {
unsigned fprnum = regno - 0x1020;
if (write) {
@@ -920,7 +920,9 @@ bool debug_module_t::dmi_write(unsigned address, uint32_t value)
}
return true;
case DM_DMCS2:
- if (config.support_haltgroups && get_field(value, DM_DMCS2_HGWRITE)) {
+ if (config.support_haltgroups &&
+ get_field(value, DM_DMCS2_HGWRITE) &&
+ get_field(value, DM_DMCS2_GROUPTYPE) == 0) {
hart_state[dmcontrol.hartsel].haltgroup = get_field(value,
DM_DMCS2_GROUP);
}
diff --git a/riscv/debug_module.h b/riscv/debug_module.h
index d79ce7d..8fb335d 100644
--- a/riscv/debug_module.h
+++ b/riscv/debug_module.h
@@ -11,16 +11,17 @@ class sim_t;
class bus_t;
typedef struct {
- // Size of program_buffer in 32-bit words, as exposed to the rest of the
- // world.
- unsigned progbufsize;
- unsigned max_sba_data_width;
- bool require_authentication;
- unsigned abstract_rti;
- bool support_hasel;
- bool support_abstract_csr_access;
- bool support_haltgroups;
- bool support_impebreak;
+ // Size of program_buffer in 32-bit words, as exposed to the rest of the
+ // world.
+ unsigned progbufsize;
+ unsigned max_sba_data_width;
+ bool require_authentication;
+ unsigned abstract_rti;
+ bool support_hasel;
+ bool support_abstract_csr_access;
+ bool support_abstract_fpr_access;
+ bool support_haltgroups;
+ bool support_impebreak;
} debug_module_config_t;
typedef struct {
@@ -54,12 +55,12 @@ typedef struct {
} dmstatus_t;
typedef enum cmderr {
- CMDERR_NONE = 0,
- CMDERR_BUSY = 1,
- CMDERR_NOTSUP = 2,
- CMDERR_EXCEPTION = 3,
- CMDERR_HALTRESUME = 4,
- CMDERR_OTHER = 7
+ CMDERR_NONE = 0,
+ CMDERR_BUSY = 1,
+ CMDERR_NOTSUP = 2,
+ CMDERR_EXCEPTION = 3,
+ CMDERR_HALTRESUME = 4,
+ CMDERR_OTHER = 7
} cmderr_t;
typedef struct {
diff --git a/riscv/decode.h b/riscv/decode.h
index 611c910..2bf9ddf 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -16,7 +16,10 @@
#include "common.h"
#include "softfloat_types.h"
#include "specialize.h"
+#include "p_ext_macros.h"
+#include "v_ext_macros.h"
#include <cinttypes>
+#include <type_traits>
typedef int64_t sreg_t;
typedef uint64_t reg_t;
@@ -67,7 +70,6 @@ const int NCSR = 4096;
(((x) & 0x03) < 0x03 ? 2 : \
((x) & 0x1f) < 0x1f ? 4 : \
((x) & 0x3f) < 0x3f ? 6 : \
- ((x) & 0x7f) == 0x7f ? 4 : \
8)
#define MAX_INSN_LENGTH 8
#define PC_ALIGN 2
@@ -78,13 +80,13 @@ class insn_t
public:
insn_t() = default;
insn_t(insn_bits_t bits) : b(bits) {}
- insn_bits_t bits() { return b & ~((UINT64_MAX) << (length() * 8)); }
+ insn_bits_t bits() { return b; }
int length() { return insn_length(b); }
- int64_t i_imm() { return int64_t(b) >> 20; }
+ int64_t i_imm() { return xs(20, 12); }
int64_t shamt() { return x(20, 6); }
int64_t s_imm() { return x(7, 5) + (xs(25, 7) << 5); }
int64_t sb_imm() { return (x(8, 4) << 1) + (x(25, 6) << 5) + (x(7, 1) << 11) + (imm_sign() << 12); }
- int64_t u_imm() { return int64_t(b) >> 12 << 12; }
+ int64_t u_imm() { return xs(12, 20) << 12; }
int64_t uj_imm() { return (x(21, 10) << 1) + (x(20, 1) << 11) + (x(12, 8) << 12) + (imm_sign() << 20); }
uint64_t rd() { return x(7, 5); }
uint64_t rs1() { return x(15, 5); }
@@ -143,7 +145,7 @@ private:
insn_bits_t b;
uint64_t x(int lo, int len) { return (b >> lo) & ((insn_bits_t(1) << len) - 1); }
uint64_t xs(int lo, int len) { return int64_t(b) << (64 - lo - len) >> (64 - len); }
- uint64_t imm_sign() { return xs(63, 1); }
+ uint64_t imm_sign() { return xs(31, 1); }
};
template <class T, size_t N, bool zero_reg>
@@ -176,7 +178,7 @@ private:
#define STATE (*p->get_state())
#define FLEN (p->get_flen())
#define CHECK_REG(reg) ((void) 0)
-#define READ_REG(reg) ({ CHECK_REG(reg); STATE.XPR[reg]; })
+#define READ_REG(reg) (CHECK_REG(reg), STATE.XPR[reg])
#define READ_FREG(reg) STATE.FPR[reg]
#define RD READ_REG(insn.rd())
#define RS1 READ_REG(insn.rs1())
@@ -222,14 +224,56 @@ private:
#define RVC_SP READ_REG(X_SP)
// FPU macros
+#define READ_ZDINX_REG(reg) (xlen == 32 ? f64(READ_REG_PAIR(reg)) : f64(STATE.XPR[reg] & (uint64_t)-1))
+#define READ_FREG_H(reg) (p->extension_enabled(EXT_ZFINX) ? f16(STATE.XPR[reg] & (uint16_t)-1) : f16(READ_FREG(reg)))
+#define READ_FREG_F(reg) (p->extension_enabled(EXT_ZFINX) ? f32(STATE.XPR[reg] & (uint32_t)-1) : f32(READ_FREG(reg)))
+#define READ_FREG_D(reg) (p->extension_enabled(EXT_ZFINX) ? READ_ZDINX_REG(reg) : f64(READ_FREG(reg)))
#define FRS1 READ_FREG(insn.rs1())
#define FRS2 READ_FREG(insn.rs2())
#define FRS3 READ_FREG(insn.rs3())
+#define FRS1_H READ_FREG_H(insn.rs1())
+#define FRS1_F READ_FREG_F(insn.rs1())
+#define FRS1_D READ_FREG_D(insn.rs1())
+#define FRS2_H READ_FREG_H(insn.rs2())
+#define FRS2_F READ_FREG_F(insn.rs2())
+#define FRS2_D READ_FREG_D(insn.rs2())
+#define FRS3_H READ_FREG_H(insn.rs3())
+#define FRS3_F READ_FREG_F(insn.rs3())
+#define FRS3_D READ_FREG_D(insn.rs3())
#define dirty_fp_state STATE.sstatus->dirty(SSTATUS_FS)
#define dirty_ext_state STATE.sstatus->dirty(SSTATUS_XS)
#define dirty_vs_state STATE.sstatus->dirty(SSTATUS_VS)
#define DO_WRITE_FREG(reg, value) (STATE.FPR.write(reg, value), dirty_fp_state)
#define WRITE_FRD(value) WRITE_FREG(insn.rd(), value)
+#define WRITE_FRD_H(value) \
+do { \
+ if (p->extension_enabled(EXT_ZFINX)) \
+ WRITE_REG(insn.rd(), sext_xlen((int16_t)((value).v))); \
+ else { \
+ WRITE_FRD(value); \
+ } \
+} while (0)
+#define WRITE_FRD_F(value) \
+do { \
+ if (p->extension_enabled(EXT_ZFINX)) \
+ WRITE_REG(insn.rd(), sext_xlen((value).v)); \
+ else { \
+ WRITE_FRD(value); \
+ } \
+} while (0)
+#define WRITE_FRD_D(value) \
+do { \
+ if (p->extension_enabled(EXT_ZFINX)) { \
+ if (xlen == 32) { \
+ uint64_t val = (value).v; \
+ WRITE_RD_PAIR(val); \
+ } else { \
+ WRITE_REG(insn.rd(), (value).v); \
+ } \
+ } else { \
+ WRITE_FRD(value); \
+ } \
+} while (0)
#define SHAMT (insn.i_imm() & 0x3F)
#define BRANCH_TARGET (pc + insn.sb_imm())
@@ -239,8 +283,11 @@ private:
if (rm > 4) throw trap_illegal_instruction(insn.bits()); \
rm; })
-#define get_field(reg, mask) (((reg) & (decltype(reg))(mask)) / ((mask) & ~((mask) << 1)))
-#define set_field(reg, mask, val) (((reg) & ~(decltype(reg))(mask)) | (((decltype(reg))(val) * ((mask) & ~((mask) << 1))) & (decltype(reg))(mask)))
+#define get_field(reg, mask) \
+ (((reg) & (std::remove_cv<decltype(reg)>::type)(mask)) / ((mask) & ~((mask) << 1)))
+
+#define set_field(reg, mask, val) \
+ (((reg) & ~(std::remove_cv<decltype(reg)>::type)(mask)) | (((std::remove_cv<decltype(reg)>::type)(val) * ((mask) & ~((mask) << 1))) & (std::remove_cv<decltype(reg)>::type)(mask)))
#define require_privilege(p) require(STATE.prv >= (p))
#define require_novirt() if (unlikely(STATE.v)) throw trap_virtual_instruction(insn.bits())
@@ -249,7 +296,8 @@ private:
#define require_extension(s) require(p->extension_enabled(s))
#define require_either_extension(A,B) require(p->extension_enabled(A) || p->extension_enabled(B));
#define require_impl(s) require(p->supports_impl(s))
-#define require_fp require(STATE.sstatus->enabled(SSTATUS_FS))
+#define require_fs require(STATE.sstatus->enabled(SSTATUS_FS))
+#define require_fp STATE.fflags->verify_permissions(insn, false)
#define require_accelerator require(STATE.sstatus->enabled(SSTATUS_XS))
#define require_vector_vs require(STATE.sstatus->enabled(SSTATUS_VS))
#define require_vector(alu) \
@@ -262,12 +310,10 @@ private:
WRITE_VSTATUS; \
dirty_vs_state; \
} while (0);
-#define require_vector_novtype(is_log, alu) \
+#define require_vector_novtype(is_log) \
do { \
require_vector_vs; \
require_extension('V'); \
- if (alu && !P.VU.vstart_alu) \
- require(P.VU.vstart->read() == 0); \
if (is_log) \
WRITE_VSTATUS; \
dirty_vs_state; \
@@ -314,7 +360,6 @@ class wait_for_interrupt_t {};
#define wfi() \
do { set_pc_and_serialize(npc); \
- npc = PC_SERIALIZE_WFI; \
throw wait_for_interrupt_t(); \
} while (0)
@@ -323,7 +368,6 @@ class wait_for_interrupt_t {};
/* Sentinel PC values to serialize simulator pipeline */
#define PC_SERIALIZE_BEFORE 3
#define PC_SERIALIZE_AFTER 5
-#define PC_SERIALIZE_WFI 7
#define invalid_pc(pc) ((pc) & 1)
/* Convenience wrappers to simplify softfloat code sequences */
@@ -416,2575 +460,6 @@ inline long double to_f(float128_t f) { long double r; memcpy(&r, &f, sizeof(r))
#define DEBUG_RVV_FMA_VF 0
#endif
-//
-// vector: masking skip helper
-//
-#define VI_MASK_VARS \
- const int midx = i / 64; \
- const int mpos = i % 64;
-
-#define VI_LOOP_ELEMENT_SKIP(BODY) \
- VI_MASK_VARS \
- if (insn.v_vm() == 0) { \
- BODY; \
- bool skip = ((P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1) == 0; \
- if (skip) { \
- continue; \
- } \
- }
-
-#define VI_ELEMENT_SKIP(inx) \
- if (inx >= vl) { \
- continue; \
- } else if (inx < P.VU.vstart->read()) { \
- continue; \
- } else { \
- VI_LOOP_ELEMENT_SKIP(); \
- }
-
-//
-// vector: operation and register acccess check helper
-//
-static inline bool is_overlapped(const int astart, int asize,
- const int bstart, int bsize)
-{
- asize = asize == 0 ? 1 : asize;
- bsize = bsize == 0 ? 1 : bsize;
-
- const int aend = astart + asize;
- const int bend = bstart + bsize;
-
- return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize;
-}
-
-static inline bool is_overlapped_widen(const int astart, int asize,
- const int bstart, int bsize)
-{
- asize = asize == 0 ? 1 : asize;
- bsize = bsize == 0 ? 1 : bsize;
-
- const int aend = astart + asize;
- const int bend = bstart + bsize;
-
- if (astart < bstart &&
- is_overlapped(astart, asize, bstart, bsize) &&
- !is_overlapped(astart, asize, bstart + bsize, bsize)) {
- return false;
- } else {
- return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize;
- }
-}
-
-static inline bool is_aligned(const unsigned val, const unsigned pos)
-{
- return pos ? (val & (pos - 1)) == 0 : true;
-}
-
-#define VI_NARROW_CHECK_COMMON \
- require_vector(true); \
- require(P.VU.vflmul <= 4); \
- require(P.VU.vsew * 2 <= P.VU.ELEN); \
- require_align(insn.rs2(), P.VU.vflmul * 2); \
- require_align(insn.rd(), P.VU.vflmul); \
- require_vm; \
-
-#define VI_WIDE_CHECK_COMMON \
- require_vector(true); \
- require(P.VU.vflmul <= 4); \
- require(P.VU.vsew * 2 <= P.VU.ELEN); \
- require_align(insn.rd(), P.VU.vflmul * 2); \
- require_vm; \
-
-#define VI_CHECK_ST_INDEX(elt_width) \
- require_vector(false); \
- float vemul = ((float)elt_width / P.VU.vsew * P.VU.vflmul); \
- require(vemul >= 0.125 && vemul <= 8); \
- reg_t emul = vemul < 1 ? 1 : vemul; \
- reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \
- require_align(insn.rd(), P.VU.vflmul); \
- require_align(insn.rs2(), vemul); \
- require((nf * flmul) <= (NVPR / 4) && \
- (insn.rd() + nf * flmul) <= NVPR); \
-
-#define VI_CHECK_LD_INDEX(elt_width) \
- VI_CHECK_ST_INDEX(elt_width); \
- for (reg_t idx = 0; idx < nf; ++idx) { \
- reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \
- reg_t seg_vd = insn.rd() + flmul * idx; \
- if (elt_width > P.VU.vsew) { \
- if (seg_vd != insn.rs2()) \
- require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \
- } else if (elt_width < P.VU.vsew) { \
- if (vemul < 1) { \
- require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \
- } else { \
- require_noover_widen(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \
- } \
- } \
- if (nf >= 2) { \
- require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \
- } \
- } \
- require_vm; \
-
-#define VI_CHECK_MSS(is_vs1) \
- if (insn.rd() != insn.rs2()) \
- require_noover(insn.rd(), 1, insn.rs2(), P.VU.vflmul); \
- require_align(insn.rs2(), P.VU.vflmul); \
- if (is_vs1) { \
- if (insn.rd() != insn.rs1()) \
- require_noover(insn.rd(), 1, insn.rs1(), P.VU.vflmul); \
- require_align(insn.rs1(), P.VU.vflmul); \
- } \
-
-#define VI_CHECK_SSS(is_vs1) \
- require_vm; \
- if (P.VU.vflmul > 1) { \
- require_align(insn.rd(), P.VU.vflmul); \
- require_align(insn.rs2(), P.VU.vflmul); \
- if (is_vs1) { \
- require_align(insn.rs1(), P.VU.vflmul); \
- } \
- }
-
-#define VI_CHECK_STORE(elt_width, is_mask_ldst) \
- require_vector(false); \
- reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8; \
- float vemul = is_mask_ldst ? 1 : ((float)veew / P.VU.vsew * P.VU.vflmul); \
- reg_t emul = vemul < 1 ? 1 : vemul; \
- require(vemul >= 0.125 && vemul <= 8); \
- require_align(insn.rd(), vemul); \
- require((nf * emul) <= (NVPR / 4) && \
- (insn.rd() + nf * emul) <= NVPR); \
- require(veew <= P.VU.ELEN); \
-
-#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \
- VI_CHECK_STORE(elt_width, is_mask_ldst); \
- require_vm; \
-
-#define VI_CHECK_DSS(is_vs1) \
- VI_WIDE_CHECK_COMMON; \
- require_align(insn.rs2(), P.VU.vflmul); \
- if (P.VU.vflmul < 1) { \
- require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \
- } else { \
- require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \
- } \
- if (is_vs1) { \
- require_align(insn.rs1(), P.VU.vflmul); \
- if (P.VU.vflmul < 1) { \
- require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \
- } else { \
- require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \
- } \
- }
-
-#define VI_CHECK_DDS(is_rs) \
- VI_WIDE_CHECK_COMMON; \
- require_align(insn.rs2(), P.VU.vflmul * 2); \
- if (is_rs) { \
- require_align(insn.rs1(), P.VU.vflmul); \
- if (P.VU.vflmul < 1) { \
- require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \
- } else { \
- require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \
- } \
- }
-
-#define VI_CHECK_SDS(is_vs1) \
- VI_NARROW_CHECK_COMMON; \
- if (insn.rd() != insn.rs2()) \
- require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul * 2); \
- if (is_vs1) \
- require_align(insn.rs1(), P.VU.vflmul); \
-
-#define VI_CHECK_REDUCTION(is_wide) \
- require_vector(true); \
- if (is_wide) { \
- require(P.VU.vsew * 2 <= P.VU.ELEN); \
- } \
- require_align(insn.rs2(), P.VU.vflmul); \
- require(P.VU.vstart->read() == 0); \
-
-#define VI_CHECK_SLIDE(is_over) \
- require_align(insn.rs2(), P.VU.vflmul); \
- require_align(insn.rd(), P.VU.vflmul); \
- require_vm; \
- if (is_over) \
- require(insn.rd() != insn.rs2()); \
-
-
-//
-// vector: loop header and end helper
-//
-#define VI_GENERAL_LOOP_BASE \
- require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \
- require_vector(true); \
- reg_t vl = P.VU.vl->read(); \
- reg_t sew = P.VU.vsew; \
- reg_t rd_num = insn.rd(); \
- reg_t rs1_num = insn.rs1(); \
- reg_t rs2_num = insn.rs2(); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) {
-
-#define VI_LOOP_BASE \
- VI_GENERAL_LOOP_BASE \
- VI_LOOP_ELEMENT_SKIP();
-
-#define VI_LOOP_END \
- } \
- P.VU.vstart->write(0);
-
-#define VI_LOOP_REDUCTION_END(x) \
- } \
- if (vl > 0) { \
- vd_0_des = vd_0_res; \
- } \
- P.VU.vstart->write(0);
-
-#define VI_LOOP_CARRY_BASE \
- VI_GENERAL_LOOP_BASE \
- VI_MASK_VARS \
- auto v0 = P.VU.elt<uint64_t>(0, midx); \
- const uint64_t mmask = UINT64_C(1) << mpos; \
- const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \
- uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0; \
- uint128_t res = 0; \
- auto &vd = P.VU.elt<uint64_t>(rd_num, midx, true);
-
-#define VI_LOOP_CARRY_END \
- vd = (vd & ~mmask) | (((res) << mpos) & mmask); \
- } \
- P.VU.vstart->write(0);
-#define VI_LOOP_WITH_CARRY_BASE \
- VI_GENERAL_LOOP_BASE \
- VI_MASK_VARS \
- auto &v0 = P.VU.elt<uint64_t>(0, midx); \
- const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \
- uint64_t carry = (v0 >> mpos) & 0x1;
-
-#define VI_LOOP_CMP_BASE \
- require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \
- require_vector(true); \
- reg_t vl = P.VU.vl->read(); \
- reg_t sew = P.VU.vsew; \
- reg_t rd_num = insn.rd(); \
- reg_t rs1_num = insn.rs1(); \
- reg_t rs2_num = insn.rs2(); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP(); \
- uint64_t mmask = UINT64_C(1) << mpos; \
- uint64_t &vdi = P.VU.elt<uint64_t>(insn.rd(), midx, true); \
- uint64_t res = 0;
-
-#define VI_LOOP_CMP_END \
- vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
- } \
- P.VU.vstart->write(0);
-
-#define VI_LOOP_MASK(op) \
- require(P.VU.vsew <= e64); \
- require_vector(true); \
- reg_t vl = P.VU.vl->read(); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- int midx = i / 64; \
- int mpos = i % 64; \
- uint64_t mmask = UINT64_C(1) << mpos; \
- uint64_t vs2 = P.VU.elt<uint64_t>(insn.rs2(), midx); \
- uint64_t vs1 = P.VU.elt<uint64_t>(insn.rs1(), midx); \
- uint64_t &res = P.VU.elt<uint64_t>(insn.rd(), midx, true); \
- res = (res & ~mmask) | ((op) & (1ULL << mpos)); \
- } \
- P.VU.vstart->write(0);
-
-#define VI_LOOP_NSHIFT_BASE \
- VI_GENERAL_LOOP_BASE; \
- VI_LOOP_ELEMENT_SKIP({ \
- require(!(insn.rd() == 0 && P.VU.vflmul > 1)); \
- });
-
-
-#define INT_ROUNDING(result, xrm, gb) \
- do { \
- const uint64_t lsb = 1UL << (gb); \
- const uint64_t lsb_half = lsb >> 1; \
- switch (xrm) { \
- case VRM::RNU: \
- result += lsb_half; \
- break; \
- case VRM::RNE: \
- if ((result & lsb_half) && ((result & (lsb_half - 1)) || (result & lsb))) \
- result += lsb; \
- break; \
- case VRM::RDN: \
- break; \
- case VRM::ROD: \
- if (result & (lsb - 1)) \
- result |= lsb; \
- break; \
- case VRM::INVALID_RM: \
- assert(true); \
- } \
- } while (0)
-
-//
-// vector: integer and masking operand access helper
-//
-#define VXI_PARAMS(x) \
- type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
- type_sew_t<x>::type vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
- type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
- type_sew_t<x>::type rs1 = (type_sew_t<x>::type)RS1; \
- type_sew_t<x>::type simm5 = (type_sew_t<x>::type)insn.v_simm5();
-
-#define VV_U_PARAMS(x) \
- type_usew_t<x>::type &vd = P.VU.elt<type_usew_t<x>::type>(rd_num, i, true); \
- type_usew_t<x>::type vs1 = P.VU.elt<type_usew_t<x>::type>(rs1_num, i); \
- type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
-
-#define VX_U_PARAMS(x) \
- type_usew_t<x>::type &vd = P.VU.elt<type_usew_t<x>::type>(rd_num, i, true); \
- type_usew_t<x>::type rs1 = (type_usew_t<x>::type)RS1; \
- type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
-
-#define VI_U_PARAMS(x) \
- type_usew_t<x>::type &vd = P.VU.elt<type_usew_t<x>::type>(rd_num, i, true); \
- type_usew_t<x>::type zimm5 = (type_usew_t<x>::type)insn.v_zimm5(); \
- type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
-
-#define VV_PARAMS(x) \
- type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
- type_sew_t<x>::type vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
- type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
-
-#define VX_PARAMS(x) \
- type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
- type_sew_t<x>::type rs1 = (type_sew_t<x>::type)RS1; \
- type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
-
-#define VI_PARAMS(x) \
- type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
- type_sew_t<x>::type simm5 = (type_sew_t<x>::type)insn.v_simm5(); \
- type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
-
-#define XV_PARAMS(x) \
- type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
- type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, RS1);
-
-#define VV_SU_PARAMS(x) \
- type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
- type_usew_t<x>::type vs1 = P.VU.elt<type_usew_t<x>::type>(rs1_num, i); \
- type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
-
-#define VX_SU_PARAMS(x) \
- type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
- type_usew_t<x>::type rs1 = (type_usew_t<x>::type)RS1; \
- type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
-
-#define VV_UCMP_PARAMS(x) \
- type_usew_t<x>::type vs1 = P.VU.elt<type_usew_t<x>::type>(rs1_num, i); \
- type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
-
-#define VX_UCMP_PARAMS(x) \
- type_usew_t<x>::type rs1 = (type_usew_t<x>::type)RS1; \
- type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
-
-#define VI_UCMP_PARAMS(x) \
- type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
-
-#define VV_CMP_PARAMS(x) \
- type_sew_t<x>::type vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
- type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
-
-#define VX_CMP_PARAMS(x) \
- type_sew_t<x>::type rs1 = (type_sew_t<x>::type)RS1; \
- type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
-
-#define VI_CMP_PARAMS(x) \
- type_sew_t<x>::type simm5 = (type_sew_t<x>::type)insn.v_simm5(); \
- type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
-
-#define VI_XI_SLIDEDOWN_PARAMS(x, off) \
- auto &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
- auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i + off);
-
-#define VI_XI_SLIDEUP_PARAMS(x, offset) \
- auto &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
- auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i - offset);
-
-#define VI_NARROW_PARAMS(sew1, sew2) \
- auto &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
- auto vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
- auto vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
- auto zimm5 = (type_usew_t<sew1>::type)insn.v_zimm5();
-
-#define VX_NARROW_PARAMS(sew1, sew2) \
- auto &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
- auto vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
- auto vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
- auto rs1 = (type_sew_t<sew1>::type)RS1;
-
-#define VV_NARROW_PARAMS(sew1, sew2) \
- auto &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
- auto vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
- auto vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
- auto vs1 = P.VU.elt<type_sew_t<sew1>::type>(rs1_num, i);
-
-#define XI_CARRY_PARAMS(x) \
- auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
- auto rs1 = (type_sew_t<x>::type)RS1; \
- auto simm5 = (type_sew_t<x>::type)insn.v_simm5(); \
-
-#define VV_CARRY_PARAMS(x) \
- auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
- auto vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
-
-#define XI_WITH_CARRY_PARAMS(x) \
- auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
- auto rs1 = (type_sew_t<x>::type)RS1; \
- auto simm5 = (type_sew_t<x>::type)insn.v_simm5(); \
- auto &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true);
-
-#define VV_WITH_CARRY_PARAMS(x) \
- auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
- auto vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
- auto &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true);
-
-#define VFP_V_PARAMS(width) \
- float##width##_t &vd = P.VU.elt<float##width##_t>(rd_num, i, true); \
- float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i);
-
-#define VFP_VV_PARAMS(width) \
- float##width##_t &vd = P.VU.elt<float##width##_t>(rd_num, i, true); \
- float##width##_t vs1 = P.VU.elt<float##width##_t>(rs1_num, i); \
- float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i);
-
-#define VFP_VF_PARAMS(width) \
- float##width##_t &vd = P.VU.elt<float##width##_t>(rd_num, i, true); \
- float##width##_t rs1 = f##width(READ_FREG(rs1_num)); \
- float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i);
-
-#define CVT_FP_TO_FP_PARAMS(from_width, to_width) \
- auto vs2 = P.VU.elt<float##from_width##_t>(rs2_num, i); \
- auto &vd = P.VU.elt<float##to_width##_t>(rd_num, i, true);
-
-#define CVT_INT_TO_FP_PARAMS(from_width, to_width, sign) \
- auto vs2 = P.VU.elt<sign##from_width##_t>(rs2_num, i); \
- auto &vd = P.VU.elt<float##to_width##_t>(rd_num, i, true);
-
-#define CVT_FP_TO_INT_PARAMS(from_width, to_width, sign) \
- auto vs2 = P.VU.elt<float##from_width##_t>(rs2_num, i); \
- auto &vd = P.VU.elt<sign##to_width##_t>(rd_num, i, true);
-
-//
-// vector: integer and masking operation loop
-//
-
-#define INSNS_BASE(PARAMS, BODY) \
- if (sew == e8) { \
- PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- PARAMS(e64); \
- BODY; \
- }
-
-// comparision result to masking register
-#define VI_LOOP_CMP_BODY(PARAMS, BODY) \
- VI_LOOP_CMP_BASE \
- INSNS_BASE(PARAMS, BODY) \
- VI_LOOP_CMP_END
-
-#define VI_VV_LOOP_CMP(BODY) \
- VI_CHECK_MSS(true); \
- VI_LOOP_CMP_BODY(VV_CMP_PARAMS, BODY)
-
-#define VI_VX_LOOP_CMP(BODY) \
- VI_CHECK_MSS(false); \
- VI_LOOP_CMP_BODY(VX_CMP_PARAMS, BODY)
-
-#define VI_VI_LOOP_CMP(BODY) \
- VI_CHECK_MSS(false); \
- VI_LOOP_CMP_BODY(VI_CMP_PARAMS, BODY)
-
-#define VI_VV_ULOOP_CMP(BODY) \
- VI_CHECK_MSS(true); \
- VI_LOOP_CMP_BODY(VV_UCMP_PARAMS, BODY)
-
-#define VI_VX_ULOOP_CMP(BODY) \
- VI_CHECK_MSS(false); \
- VI_LOOP_CMP_BODY(VX_UCMP_PARAMS, BODY)
-
-#define VI_VI_ULOOP_CMP(BODY) \
- VI_CHECK_MSS(false); \
- VI_LOOP_CMP_BODY(VI_UCMP_PARAMS, BODY)
-
-// merge and copy loop
-#define VI_MERGE_VARS \
- VI_MASK_VARS \
- bool use_first = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
-
-#define VI_MERGE_LOOP_BASE \
- require_vector(true); \
- VI_GENERAL_LOOP_BASE \
- VI_MERGE_VARS
-
-#define VI_VV_MERGE_LOOP(BODY) \
- VI_CHECK_SSS(true); \
- VI_MERGE_LOOP_BASE \
- if (sew == e8) { \
- VV_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VV_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VV_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VV_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VX_MERGE_LOOP(BODY) \
- VI_CHECK_SSS(false); \
- VI_MERGE_LOOP_BASE \
- if (sew == e8) { \
- VX_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VX_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VX_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VX_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VI_MERGE_LOOP(BODY) \
- VI_CHECK_SSS(false); \
- VI_MERGE_LOOP_BASE \
- if (sew == e8) { \
- VI_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VI_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VI_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VI_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VF_MERGE_LOOP(BODY) \
- VI_CHECK_SSS(false); \
- VI_VFP_COMMON \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_MERGE_VARS \
- if (P.VU.vsew == e16) { \
- VFP_VF_PARAMS(16); \
- BODY; \
- } else if (P.VU.vsew == e32) { \
- VFP_VF_PARAMS(32); \
- BODY; \
- } else if (P.VU.vsew == e64) { \
- VFP_VF_PARAMS(64); \
- BODY; \
- } \
- VI_LOOP_END
-
-// reduction loop - signed
-#define VI_LOOP_REDUCTION_BASE(x) \
- require(x >= e8 && x <= e64); \
- reg_t vl = P.VU.vl->read(); \
- reg_t rd_num = insn.rd(); \
- reg_t rs1_num = insn.rs1(); \
- reg_t rs2_num = insn.rs2(); \
- auto &vd_0_des = P.VU.elt<type_sew_t<x>::type>(rd_num, 0, true); \
- auto vd_0_res = P.VU.elt<type_sew_t<x>::type>(rs1_num, 0); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP(); \
- auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
-
-#define REDUCTION_LOOP(x, BODY) \
- VI_LOOP_REDUCTION_BASE(x) \
- BODY; \
- VI_LOOP_REDUCTION_END(x)
-
-#define VI_VV_LOOP_REDUCTION(BODY) \
- VI_CHECK_REDUCTION(false); \
- reg_t sew = P.VU.vsew; \
- if (sew == e8) { \
- REDUCTION_LOOP(e8, BODY) \
- } else if (sew == e16) { \
- REDUCTION_LOOP(e16, BODY) \
- } else if (sew == e32) { \
- REDUCTION_LOOP(e32, BODY) \
- } else if (sew == e64) { \
- REDUCTION_LOOP(e64, BODY) \
- }
-
-// reduction loop - unsigned
-#define VI_ULOOP_REDUCTION_BASE(x) \
- require(x >= e8 && x <= e64); \
- reg_t vl = P.VU.vl->read(); \
- reg_t rd_num = insn.rd(); \
- reg_t rs1_num = insn.rs1(); \
- reg_t rs2_num = insn.rs2(); \
- auto &vd_0_des = P.VU.elt<type_usew_t<x>::type>(rd_num, 0, true); \
- auto vd_0_res = P.VU.elt<type_usew_t<x>::type>(rs1_num, 0); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP(); \
- auto vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
-
-#define REDUCTION_ULOOP(x, BODY) \
- VI_ULOOP_REDUCTION_BASE(x) \
- BODY; \
- VI_LOOP_REDUCTION_END(x)
-
-#define VI_VV_ULOOP_REDUCTION(BODY) \
- VI_CHECK_REDUCTION(false); \
- reg_t sew = P.VU.vsew; \
- if (sew == e8) { \
- REDUCTION_ULOOP(e8, BODY) \
- } else if (sew == e16) { \
- REDUCTION_ULOOP(e16, BODY) \
- } else if (sew == e32) { \
- REDUCTION_ULOOP(e32, BODY) \
- } else if (sew == e64) { \
- REDUCTION_ULOOP(e64, BODY) \
- }
-
-
-// genearl VXI signed/unsigned loop
-#define VI_VV_ULOOP(BODY) \
- VI_CHECK_SSS(true) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VV_U_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VV_U_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VV_U_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VV_U_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VV_LOOP(BODY) \
- VI_CHECK_SSS(true) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VV_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VV_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VV_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VV_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VX_ULOOP(BODY) \
- VI_CHECK_SSS(false) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VX_U_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VX_U_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VX_U_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VX_U_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VX_LOOP(BODY) \
- VI_CHECK_SSS(false) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VX_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VX_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VX_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VX_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VI_ULOOP(BODY) \
- VI_CHECK_SSS(false) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VI_U_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VI_U_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VI_U_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VI_U_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VI_LOOP(BODY) \
- VI_CHECK_SSS(false) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VI_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VI_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VI_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VI_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-// signed unsigned operation loop (e.g. mulhsu)
-#define VI_VV_SU_LOOP(BODY) \
- VI_CHECK_SSS(true) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VV_SU_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VV_SU_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VV_SU_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VV_SU_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VX_SU_LOOP(BODY) \
- VI_CHECK_SSS(false) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VX_SU_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VX_SU_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VX_SU_PARAMS(e32); \
- BODY; \
- } else if (sew == e64) { \
- VX_SU_PARAMS(e64); \
- BODY; \
- } \
- VI_LOOP_END
-
-// narrow operation loop
-#define VI_VV_LOOP_NARROW(BODY) \
- VI_CHECK_SDS(true); \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VV_NARROW_PARAMS(e8, e16) \
- BODY; \
- } else if (sew == e16) { \
- VV_NARROW_PARAMS(e16, e32) \
- BODY; \
- } else if (sew == e32) { \
- VV_NARROW_PARAMS(e32, e64) \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VX_LOOP_NARROW(BODY) \
- VI_CHECK_SDS(false); \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VX_NARROW_PARAMS(e8, e16) \
- BODY; \
- } else if (sew == e16) { \
- VX_NARROW_PARAMS(e16, e32) \
- BODY; \
- } else if (sew == e32) { \
- VX_NARROW_PARAMS(e32, e64) \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VI_LOOP_NARROW(BODY) \
- VI_CHECK_SDS(false); \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VI_NARROW_PARAMS(e8, e16) \
- BODY; \
- } else if (sew == e16) { \
- VI_NARROW_PARAMS(e16, e32) \
- BODY; \
- } else if (sew == e32) { \
- VI_NARROW_PARAMS(e32, e64) \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VI_LOOP_NSHIFT(BODY) \
- VI_CHECK_SDS(false); \
- VI_LOOP_NSHIFT_BASE \
- if (sew == e8) { \
- VI_NARROW_PARAMS(e8, e16) \
- BODY; \
- } else if (sew == e16) { \
- VI_NARROW_PARAMS(e16, e32) \
- BODY; \
- } else if (sew == e32) { \
- VI_NARROW_PARAMS(e32, e64) \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VX_LOOP_NSHIFT(BODY) \
- VI_CHECK_SDS(false); \
- VI_LOOP_NSHIFT_BASE \
- if (sew == e8) { \
- VX_NARROW_PARAMS(e8, e16) \
- BODY; \
- } else if (sew == e16) { \
- VX_NARROW_PARAMS(e16, e32) \
- BODY; \
- } else if (sew == e32) { \
- VX_NARROW_PARAMS(e32, e64) \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VV_LOOP_NSHIFT(BODY) \
- VI_CHECK_SDS(true); \
- VI_LOOP_NSHIFT_BASE \
- if (sew == e8) { \
- VV_NARROW_PARAMS(e8, e16) \
- BODY; \
- } else if (sew == e16) { \
- VV_NARROW_PARAMS(e16, e32) \
- BODY; \
- } else if (sew == e32) { \
- VV_NARROW_PARAMS(e32, e64) \
- BODY; \
- } \
- VI_LOOP_END
-
-// widen operation loop
-#define VI_VV_LOOP_WIDEN(BODY) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VV_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VV_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VV_PARAMS(e32); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VX_LOOP_WIDEN(BODY) \
- VI_LOOP_BASE \
- if (sew == e8) { \
- VX_PARAMS(e8); \
- BODY; \
- } else if (sew == e16) { \
- VX_PARAMS(e16); \
- BODY; \
- } else if (sew == e32) { \
- VX_PARAMS(e32); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_WIDE_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \
- switch (P.VU.vsew) { \
- case e8: { \
- sign##16_t vd_w = P.VU.elt<sign##16_t>(rd_num, i); \
- P.VU.elt<uint16_t>(rd_num, i, true) = \
- op1((sign##16_t)(sign##8_t)var0 op0 (sign##16_t)(sign##8_t)var1) + var2; \
- } \
- break; \
- case e16: { \
- sign##32_t vd_w = P.VU.elt<sign##32_t>(rd_num, i); \
- P.VU.elt<uint32_t>(rd_num, i, true) = \
- op1((sign##32_t)(sign##16_t)var0 op0 (sign##32_t)(sign##16_t)var1) + var2; \
- } \
- break; \
- default: { \
- sign##64_t vd_w = P.VU.elt<sign##64_t>(rd_num, i); \
- P.VU.elt<uint64_t>(rd_num, i, true) = \
- op1((sign##64_t)(sign##32_t)var0 op0 (sign##64_t)(sign##32_t)var1) + var2; \
- } \
- break; \
- }
-
-#define VI_WIDE_OP_AND_ASSIGN_MIX(var0, var1, var2, op0, op1, sign_d, sign_1, sign_2) \
- switch (P.VU.vsew) { \
- case e8: { \
- sign_d##16_t vd_w = P.VU.elt<sign_d##16_t>(rd_num, i); \
- P.VU.elt<uint16_t>(rd_num, i, true) = \
- op1((sign_1##16_t)(sign_1##8_t)var0 op0 (sign_2##16_t)(sign_2##8_t)var1) + var2; \
- } \
- break; \
- case e16: { \
- sign_d##32_t vd_w = P.VU.elt<sign_d##32_t>(rd_num, i); \
- P.VU.elt<uint32_t>(rd_num, i, true) = \
- op1((sign_1##32_t)(sign_1##16_t)var0 op0 (sign_2##32_t)(sign_2##16_t)var1) + var2; \
- } \
- break; \
- default: { \
- sign_d##64_t vd_w = P.VU.elt<sign_d##64_t>(rd_num, i); \
- P.VU.elt<uint64_t>(rd_num, i, true) = \
- op1((sign_1##64_t)(sign_1##32_t)var0 op0 (sign_2##64_t)(sign_2##32_t)var1) + var2; \
- } \
- break; \
- }
-
-#define VI_WIDE_WVX_OP(var0, op0, sign) \
- switch (P.VU.vsew) { \
- case e8: { \
- sign##16_t &vd_w = P.VU.elt<sign##16_t>(rd_num, i, true); \
- sign##16_t vs2_w = P.VU.elt<sign##16_t>(rs2_num, i); \
- vd_w = vs2_w op0 (sign##16_t)(sign##8_t)var0; \
- } \
- break; \
- case e16: { \
- sign##32_t &vd_w = P.VU.elt<sign##32_t>(rd_num, i, true); \
- sign##32_t vs2_w = P.VU.elt<sign##32_t>(rs2_num, i); \
- vd_w = vs2_w op0 (sign##32_t)(sign##16_t)var0; \
- } \
- break; \
- default: { \
- sign##64_t &vd_w = P.VU.elt<sign##64_t>(rd_num, i, true); \
- sign##64_t vs2_w = P.VU.elt<sign##64_t>(rs2_num, i); \
- vd_w = vs2_w op0 (sign##64_t)(sign##32_t)var0; \
- } \
- break; \
- }
-
-// wide reduction loop - signed
-#define VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \
- reg_t vl = P.VU.vl->read(); \
- reg_t rd_num = insn.rd(); \
- reg_t rs1_num = insn.rs1(); \
- reg_t rs2_num = insn.rs2(); \
- auto &vd_0_des = P.VU.elt<type_sew_t<sew2>::type>(rd_num, 0, true); \
- auto vd_0_res = P.VU.elt<type_sew_t<sew2>::type>(rs1_num, 0); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP(); \
- auto vs2 = P.VU.elt<type_sew_t<sew1>::type>(rs2_num, i);
-
-#define WIDE_REDUCTION_LOOP(sew1, sew2, BODY) \
- VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \
- BODY; \
- VI_LOOP_REDUCTION_END(sew2)
-
-#define VI_VV_LOOP_WIDE_REDUCTION(BODY) \
- VI_CHECK_REDUCTION(true); \
- reg_t sew = P.VU.vsew; \
- if (sew == e8) { \
- WIDE_REDUCTION_LOOP(e8, e16, BODY) \
- } else if (sew == e16) { \
- WIDE_REDUCTION_LOOP(e16, e32, BODY) \
- } else if (sew == e32) { \
- WIDE_REDUCTION_LOOP(e32, e64, BODY) \
- }
-
-// wide reduction loop - unsigned
-#define VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \
- reg_t vl = P.VU.vl->read(); \
- reg_t rd_num = insn.rd(); \
- reg_t rs1_num = insn.rs1(); \
- reg_t rs2_num = insn.rs2(); \
- auto &vd_0_des = P.VU.elt<type_usew_t<sew2>::type>(rd_num, 0, true); \
- auto vd_0_res = P.VU.elt<type_usew_t<sew2>::type>(rs1_num, 0); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP(); \
- auto vs2 = P.VU.elt<type_usew_t<sew1>::type>(rs2_num, i);
-
-#define WIDE_REDUCTION_ULOOP(sew1, sew2, BODY) \
- VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \
- BODY; \
- VI_LOOP_REDUCTION_END(sew2)
-
-#define VI_VV_ULOOP_WIDE_REDUCTION(BODY) \
- VI_CHECK_REDUCTION(true); \
- reg_t sew = P.VU.vsew; \
- if (sew == e8) { \
- WIDE_REDUCTION_ULOOP(e8, e16, BODY) \
- } else if (sew == e16) { \
- WIDE_REDUCTION_ULOOP(e16, e32, BODY) \
- } else if (sew == e32) { \
- WIDE_REDUCTION_ULOOP(e32, e64, BODY) \
- }
-
-// carry/borrow bit loop
-#define VI_VV_LOOP_CARRY(BODY) \
- VI_CHECK_MSS(true); \
- VI_LOOP_CARRY_BASE \
- if (sew == e8) { \
- VV_CARRY_PARAMS(e8) \
- BODY; \
- } else if (sew == e16) { \
- VV_CARRY_PARAMS(e16) \
- BODY; \
- } else if (sew == e32) { \
- VV_CARRY_PARAMS(e32) \
- BODY; \
- } else if (sew == e64) { \
- VV_CARRY_PARAMS(e64) \
- BODY; \
- } \
- VI_LOOP_CARRY_END
-
-#define VI_XI_LOOP_CARRY(BODY) \
- VI_CHECK_MSS(false); \
- VI_LOOP_CARRY_BASE \
- if (sew == e8) { \
- XI_CARRY_PARAMS(e8) \
- BODY; \
- } else if (sew == e16) { \
- XI_CARRY_PARAMS(e16) \
- BODY; \
- } else if (sew == e32) { \
- XI_CARRY_PARAMS(e32) \
- BODY; \
- } else if (sew == e64) { \
- XI_CARRY_PARAMS(e64) \
- BODY; \
- } \
- VI_LOOP_CARRY_END
-
-#define VI_VV_LOOP_WITH_CARRY(BODY) \
- require_vm; \
- VI_CHECK_SSS(true); \
- VI_LOOP_WITH_CARRY_BASE \
- if (sew == e8) { \
- VV_WITH_CARRY_PARAMS(e8) \
- BODY; \
- } else if (sew == e16) { \
- VV_WITH_CARRY_PARAMS(e16) \
- BODY; \
- } else if (sew == e32) { \
- VV_WITH_CARRY_PARAMS(e32) \
- BODY; \
- } else if (sew == e64) { \
- VV_WITH_CARRY_PARAMS(e64) \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_XI_LOOP_WITH_CARRY(BODY) \
- require_vm; \
- VI_CHECK_SSS(false); \
- VI_LOOP_WITH_CARRY_BASE \
- if (sew == e8) { \
- XI_WITH_CARRY_PARAMS(e8) \
- BODY; \
- } else if (sew == e16) { \
- XI_WITH_CARRY_PARAMS(e16) \
- BODY; \
- } else if (sew == e32) { \
- XI_WITH_CARRY_PARAMS(e32) \
- BODY; \
- } else if (sew == e64) { \
- XI_WITH_CARRY_PARAMS(e64) \
- BODY; \
- } \
- VI_LOOP_END
-
-// average loop
-#define VI_VV_LOOP_AVG(op) \
-VRM xrm = p->VU.get_vround_mode(); \
-VI_VV_LOOP({ \
- uint128_t res = ((uint128_t)vs2) op vs1; \
- INT_ROUNDING(res, xrm, 1); \
- vd = res >> 1; \
-})
-
-#define VI_VX_LOOP_AVG(op) \
-VRM xrm = p->VU.get_vround_mode(); \
-VI_VX_LOOP({ \
- uint128_t res = ((uint128_t)vs2) op rs1; \
- INT_ROUNDING(res, xrm, 1); \
- vd = res >> 1; \
-})
-
-#define VI_VV_ULOOP_AVG(op) \
-VRM xrm = p->VU.get_vround_mode(); \
-VI_VV_ULOOP({ \
- uint128_t res = ((uint128_t)vs2) op vs1; \
- INT_ROUNDING(res, xrm, 1); \
- vd = res >> 1; \
-})
-
-#define VI_VX_ULOOP_AVG(op) \
-VRM xrm = p->VU.get_vround_mode(); \
-VI_VX_ULOOP({ \
- uint128_t res = ((uint128_t)vs2) op rs1; \
- INT_ROUNDING(res, xrm, 1); \
- vd = res >> 1; \
-})
-
-//
-// vector: load/store helper
-//
-#define VI_STRIP(inx) \
- reg_t vreg_inx = inx;
-
-#define VI_DUPLICATE_VREG(reg_num, idx_sew) \
-reg_t index[P.VU.vlmax]; \
- for (reg_t i = 0; i < P.VU.vlmax && P.VU.vl->read() != 0; ++i) { \
- switch (idx_sew) { \
- case e8: \
- index[i] = P.VU.elt<uint8_t>(reg_num, i); \
- break; \
- case e16: \
- index[i] = P.VU.elt<uint16_t>(reg_num, i); \
- break; \
- case e32: \
- index[i] = P.VU.elt<uint32_t>(reg_num, i); \
- break; \
- case e64: \
- index[i] = P.VU.elt<uint64_t>(reg_num, i); \
- break; \
- } \
-}
-
-#define VI_LD(stride, offset, elt_width, is_mask_ldst) \
- const reg_t nf = insn.v_nf() + 1; \
- const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \
- const reg_t baseAddr = RS1; \
- const reg_t vd = insn.rd(); \
- VI_CHECK_LOAD(elt_width, is_mask_ldst); \
- for (reg_t i = 0; i < vl; ++i) { \
- VI_ELEMENT_SKIP(i); \
- VI_STRIP(i); \
- P.VU.vstart->write(i); \
- for (reg_t fn = 0; fn < nf; ++fn) { \
- elt_width##_t val = MMU.load_##elt_width( \
- baseAddr + (stride) + (offset) * sizeof(elt_width##_t)); \
- P.VU.elt<elt_width##_t>(vd + fn * emul, vreg_inx, true) = val; \
- } \
- } \
- P.VU.vstart->write(0);
-
-#define VI_LD_INDEX(elt_width, is_seg) \
- const reg_t nf = insn.v_nf() + 1; \
- const reg_t vl = P.VU.vl->read(); \
- const reg_t baseAddr = RS1; \
- const reg_t vd = insn.rd(); \
- if (!is_seg) \
- require(nf == 1); \
- VI_CHECK_LD_INDEX(elt_width); \
- VI_DUPLICATE_VREG(insn.rs2(), elt_width); \
- for (reg_t i = 0; i < vl; ++i) { \
- VI_ELEMENT_SKIP(i); \
- VI_STRIP(i); \
- P.VU.vstart->write(i); \
- for (reg_t fn = 0; fn < nf; ++fn) { \
- switch (P.VU.vsew) { \
- case e8: \
- P.VU.elt<uint8_t>(vd + fn * flmul, vreg_inx, true) = \
- MMU.load_uint8(baseAddr + index[i] + fn * 1); \
- break; \
- case e16: \
- P.VU.elt<uint16_t>(vd + fn * flmul, vreg_inx, true) = \
- MMU.load_uint16(baseAddr + index[i] + fn * 2); \
- break; \
- case e32: \
- P.VU.elt<uint32_t>(vd + fn * flmul, vreg_inx, true) = \
- MMU.load_uint32(baseAddr + index[i] + fn * 4); \
- break; \
- default: \
- P.VU.elt<uint64_t>(vd + fn * flmul, vreg_inx, true) = \
- MMU.load_uint64(baseAddr + index[i] + fn * 8); \
- break; \
- } \
- } \
- } \
- P.VU.vstart->write(0);
-
-#define VI_ST(stride, offset, elt_width, is_mask_ldst) \
- const reg_t nf = insn.v_nf() + 1; \
- const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \
- const reg_t baseAddr = RS1; \
- const reg_t vs3 = insn.rd(); \
- VI_CHECK_STORE(elt_width, is_mask_ldst); \
- for (reg_t i = 0; i < vl; ++i) { \
- VI_STRIP(i) \
- VI_ELEMENT_SKIP(i); \
- P.VU.vstart->write(i); \
- for (reg_t fn = 0; fn < nf; ++fn) { \
- elt_width##_t val = P.VU.elt<elt_width##_t>(vs3 + fn * emul, vreg_inx); \
- MMU.store_##elt_width( \
- baseAddr + (stride) + (offset) * sizeof(elt_width##_t), val); \
- } \
- } \
- P.VU.vstart->write(0);
-
-#define VI_ST_INDEX(elt_width, is_seg) \
- const reg_t nf = insn.v_nf() + 1; \
- const reg_t vl = P.VU.vl->read(); \
- const reg_t baseAddr = RS1; \
- const reg_t vs3 = insn.rd(); \
- if (!is_seg) \
- require(nf == 1); \
- VI_CHECK_ST_INDEX(elt_width); \
- VI_DUPLICATE_VREG(insn.rs2(), elt_width); \
- for (reg_t i = 0; i < vl; ++i) { \
- VI_STRIP(i) \
- VI_ELEMENT_SKIP(i); \
- P.VU.vstart->write(i); \
- for (reg_t fn = 0; fn < nf; ++fn) { \
- switch (P.VU.vsew) { \
- case e8: \
- MMU.store_uint8(baseAddr + index[i] + fn * 1, \
- P.VU.elt<uint8_t>(vs3 + fn * flmul, vreg_inx)); \
- break; \
- case e16: \
- MMU.store_uint16(baseAddr + index[i] + fn * 2, \
- P.VU.elt<uint16_t>(vs3 + fn * flmul, vreg_inx)); \
- break; \
- case e32: \
- MMU.store_uint32(baseAddr + index[i] + fn * 4, \
- P.VU.elt<uint32_t>(vs3 + fn * flmul, vreg_inx)); \
- break; \
- default: \
- MMU.store_uint64(baseAddr + index[i] + fn * 8, \
- P.VU.elt<uint64_t>(vs3 + fn * flmul, vreg_inx)); \
- break; \
- } \
- } \
- } \
- P.VU.vstart->write(0);
-
-#define VI_LDST_FF(elt_width) \
- const reg_t nf = insn.v_nf() + 1; \
- const reg_t sew = p->VU.vsew; \
- const reg_t vl = p->VU.vl->read(); \
- const reg_t baseAddr = RS1; \
- const reg_t rd_num = insn.rd(); \
- VI_CHECK_LOAD(elt_width, false); \
- bool early_stop = false; \
- for (reg_t i = p->VU.vstart->read(); i < vl; ++i) { \
- VI_STRIP(i); \
- VI_ELEMENT_SKIP(i); \
- \
- for (reg_t fn = 0; fn < nf; ++fn) { \
- uint64_t val; \
- try { \
- val = MMU.load_##elt_width( \
- baseAddr + (i * nf + fn) * sizeof(elt_width##_t)); \
- } catch (trap_t& t) { \
- if (i == 0) \
- throw; /* Only take exception on zeroth element */ \
- /* Reduce VL if an exception occurs on a later element */ \
- early_stop = true; \
- P.VU.vl->write_raw(i); \
- break; \
- } \
- p->VU.elt<elt_width##_t>(rd_num + fn * emul, vreg_inx, true) = val; \
- } \
- \
- if (early_stop) { \
- break; \
- } \
- } \
- p->VU.vstart->write(0);
-
-#define VI_LD_WHOLE(elt_width) \
- require_vector_novtype(true, false); \
- require(sizeof(elt_width ## _t) * 8 <= P.VU.ELEN); \
- const reg_t baseAddr = RS1; \
- const reg_t vd = insn.rd(); \
- const reg_t len = insn.v_nf() + 1; \
- require_align(vd, len); \
- const reg_t elt_per_reg = P.VU.vlenb / sizeof(elt_width ## _t); \
- const reg_t size = len * elt_per_reg; \
- if (P.VU.vstart->read() < size) { \
- reg_t i = P.VU.vstart->read() / elt_per_reg; \
- reg_t off = P.VU.vstart->read() % elt_per_reg; \
- if (off) { \
- for (reg_t pos = off; pos < elt_per_reg; ++pos) { \
- auto val = MMU.load_## elt_width(baseAddr + \
- P.VU.vstart->read() * sizeof(elt_width ## _t)); \
- P.VU.elt<elt_width ## _t>(vd + i, pos, true) = val; \
- P.VU.vstart->write(P.VU.vstart->read() + 1); \
- } \
- ++i; \
- } \
- for (; i < len; ++i) { \
- for (reg_t pos = 0; pos < elt_per_reg; ++pos) { \
- auto val = MMU.load_## elt_width(baseAddr + \
- P.VU.vstart->read() * sizeof(elt_width ## _t)); \
- P.VU.elt<elt_width ## _t>(vd + i, pos, true) = val; \
- P.VU.vstart->write(P.VU.vstart->read() + 1); \
- } \
- } \
- } \
- P.VU.vstart->write(0);
-
-#define VI_ST_WHOLE \
- require_vector_novtype(true, false); \
- const reg_t baseAddr = RS1; \
- const reg_t vs3 = insn.rd(); \
- const reg_t len = insn.v_nf() + 1; \
- require_align(vs3, len); \
- const reg_t size = len * P.VU.vlenb; \
- \
- if (P.VU.vstart->read() < size) { \
- reg_t i = P.VU.vstart->read() / P.VU.vlenb; \
- reg_t off = P.VU.vstart->read() % P.VU.vlenb; \
- if (off) { \
- for (reg_t pos = off; pos < P.VU.vlenb; ++pos) { \
- auto val = P.VU.elt<uint8_t>(vs3 + i, pos); \
- MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \
- P.VU.vstart->write(P.VU.vstart->read() + 1); \
- } \
- i++; \
- } \
- for (; i < len; ++i) { \
- for (reg_t pos = 0; pos < P.VU.vlenb; ++pos) { \
- auto val = P.VU.elt<uint8_t>(vs3 + i, pos); \
- MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \
- P.VU.vstart->write(P.VU.vstart->read() + 1); \
- } \
- } \
- } \
- P.VU.vstart->write(0);
-
-//
-// vector: amo
-//
-#define VI_AMO(op, type, idx_type) \
- require_vector(false); \
- require_align(insn.rd(), P.VU.vflmul); \
- require(P.VU.vsew <= P.get_xlen() && P.VU.vsew >= 32); \
- require_align(insn.rd(), P.VU.vflmul); \
- float vemul = ((float)idx_type / P.VU.vsew * P.VU.vflmul); \
- require(vemul >= 0.125 && vemul <= 8); \
- require_align(insn.rs2(), vemul); \
- if (insn.v_wd()) { \
- require_vm; \
- if (idx_type > P.VU.vsew) { \
- if (insn.rd() != insn.rs2()) \
- require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \
- } else if (idx_type < P.VU.vsew) { \
- if (vemul < 1) { \
- require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \
- } else { \
- require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \
- } \
- } \
- } \
- VI_DUPLICATE_VREG(insn.rs2(), idx_type); \
- const reg_t vl = P.VU.vl->read(); \
- const reg_t baseAddr = RS1; \
- const reg_t vd = insn.rd(); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_ELEMENT_SKIP(i); \
- VI_STRIP(i); \
- P.VU.vstart->write(i); \
- switch (P.VU.vsew) { \
- case e32: { \
- auto vs3 = P.VU.elt< type ## 32_t>(vd, vreg_inx); \
- auto val = MMU.amo_uint32(baseAddr + index[i], [&](type ## 32_t lhs) { op }); \
- if (insn.v_wd()) \
- P.VU.elt< type ## 32_t>(vd, vreg_inx, true) = val; \
- } \
- break; \
- case e64: { \
- auto vs3 = P.VU.elt< type ## 64_t>(vd, vreg_inx); \
- auto val = MMU.amo_uint64(baseAddr + index[i], [&](type ## 64_t lhs) { op }); \
- if (insn.v_wd()) \
- P.VU.elt< type ## 64_t>(vd, vreg_inx, true) = val; \
- } \
- break; \
- default: \
- require(0); \
- break; \
- } \
- } \
- P.VU.vstart->write(0);
-
-// vector: sign/unsiged extension
-#define VI_VV_EXT(div, type) \
- require(insn.rd() != insn.rs2()); \
- require_vm; \
- reg_t from = P.VU.vsew / div; \
- require(from >= e8 && from <= e64); \
- require(((float)P.VU.vflmul / div) >= 0.125 && ((float)P.VU.vflmul / div) <= 8 ); \
- require_align(insn.rd(), P.VU.vflmul); \
- require_align(insn.rs2(), P.VU.vflmul / div); \
- if ((P.VU.vflmul / div) < 1) { \
- require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \
- } else { \
- require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \
- } \
- reg_t pat = (((P.VU.vsew >> 3) << 4) | from >> 3); \
- VI_GENERAL_LOOP_BASE \
- VI_LOOP_ELEMENT_SKIP(); \
- switch (pat) { \
- case 0x21: \
- P.VU.elt<type##16_t>(rd_num, i, true) = P.VU.elt<type##8_t>(rs2_num, i); \
- break; \
- case 0x41: \
- P.VU.elt<type##32_t>(rd_num, i, true) = P.VU.elt<type##8_t>(rs2_num, i); \
- break; \
- case 0x81: \
- P.VU.elt<type##64_t>(rd_num, i, true) = P.VU.elt<type##8_t>(rs2_num, i); \
- break; \
- case 0x42: \
- P.VU.elt<type##32_t>(rd_num, i, true) = P.VU.elt<type##16_t>(rs2_num, i); \
- break; \
- case 0x82: \
- P.VU.elt<type##64_t>(rd_num, i, true) = P.VU.elt<type##16_t>(rs2_num, i); \
- break; \
- case 0x84: \
- P.VU.elt<type##64_t>(rd_num, i, true) = P.VU.elt<type##32_t>(rs2_num, i); \
- break; \
- case 0x88: \
- P.VU.elt<type##64_t>(rd_num, i, true) = P.VU.elt<type##32_t>(rs2_num, i); \
- break; \
- default: \
- break; \
- } \
- VI_LOOP_END
-
-//
-// vector: vfp helper
-//
-#define VI_VFP_COMMON \
- require_fp; \
- require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || \
- (P.VU.vsew == e32 && p->extension_enabled('F')) || \
- (P.VU.vsew == e64 && p->extension_enabled('D'))); \
- require_vector(true); \
- require(STATE.frm->read() < 0x5); \
- reg_t vl = P.VU.vl->read(); \
- reg_t rd_num = insn.rd(); \
- reg_t rs1_num = insn.rs1(); \
- reg_t rs2_num = insn.rs2(); \
- softfloat_roundingMode = STATE.frm->read();
-
-#define VI_VFP_LOOP_BASE \
- VI_VFP_COMMON \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP();
-
-#define VI_VFP_LOOP_CMP_BASE \
- VI_VFP_COMMON \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP(); \
- uint64_t mmask = UINT64_C(1) << mpos; \
- uint64_t &vd = P.VU.elt<uint64_t>(rd_num, midx, true); \
- uint64_t res = 0;
-
-#define VI_VFP_LOOP_REDUCTION_BASE(width) \
- float##width##_t vd_0 = P.VU.elt<float##width##_t>(rd_num, 0); \
- float##width##_t vs1_0 = P.VU.elt<float##width##_t>(rs1_num, 0); \
- vd_0 = vs1_0; \
- bool is_active = false; \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP(); \
- float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i); \
- is_active = true; \
-
-#define VI_VFP_LOOP_WIDE_REDUCTION_BASE \
- VI_VFP_COMMON \
- float64_t vd_0 = f64(P.VU.elt<float64_t>(rs1_num, 0).v); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP();
-
-#define VI_VFP_LOOP_END \
- } \
- P.VU.vstart->write(0); \
-
-#define VI_VFP_LOOP_REDUCTION_END(x) \
- } \
- P.VU.vstart->write(0); \
- if (vl > 0) { \
- if (is_propagate && !is_active) { \
- switch (x) { \
- case e16: { \
- auto ret = f16_classify(f16(vd_0.v)); \
- if (ret & 0x300) { \
- if (ret & 0x100) { \
- softfloat_exceptionFlags |= softfloat_flag_invalid; \
- set_fp_exceptions; \
- } \
- P.VU.elt<uint16_t>(rd_num, 0, true) = defaultNaNF16UI; \
- } else { \
- P.VU.elt<uint16_t>(rd_num, 0, true) = vd_0.v; \
- } \
- } \
- break; \
- case e32: { \
- auto ret = f32_classify(f32(vd_0.v)); \
- if (ret & 0x300) { \
- if (ret & 0x100) { \
- softfloat_exceptionFlags |= softfloat_flag_invalid; \
- set_fp_exceptions; \
- } \
- P.VU.elt<uint32_t>(rd_num, 0, true) = defaultNaNF32UI; \
- } else { \
- P.VU.elt<uint32_t>(rd_num, 0, true) = vd_0.v; \
- } \
- } \
- break; \
- case e64: { \
- auto ret = f64_classify(f64(vd_0.v)); \
- if (ret & 0x300) { \
- if (ret & 0x100) { \
- softfloat_exceptionFlags |= softfloat_flag_invalid; \
- set_fp_exceptions; \
- } \
- P.VU.elt<uint64_t>(rd_num, 0, true) = defaultNaNF64UI; \
- } else { \
- P.VU.elt<uint64_t>(rd_num, 0, true) = vd_0.v; \
- } \
- } \
- break; \
- } \
- } else { \
- P.VU.elt<type_sew_t<x>::type>(rd_num, 0, true) = vd_0.v; \
- } \
- }
-
-#define VI_VFP_LOOP_CMP_END \
- switch (P.VU.vsew) { \
- case e16: \
- case e32: \
- case e64: { \
- vd = (vd & ~mmask) | (((res) << mpos) & mmask); \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
- } \
- P.VU.vstart->write(0);
-
-#define VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \
- VI_CHECK_SSS(true); \
- VI_VFP_LOOP_BASE \
- switch (P.VU.vsew) { \
- case e16: { \
- VFP_VV_PARAMS(16); \
- BODY16; \
- set_fp_exceptions; \
- break; \
- } \
- case e32: { \
- VFP_VV_PARAMS(32); \
- BODY32; \
- set_fp_exceptions; \
- break; \
- } \
- case e64: { \
- VFP_VV_PARAMS(64); \
- BODY64; \
- set_fp_exceptions; \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
- DEBUG_RVV_FP_VV; \
- VI_VFP_LOOP_END
-
-#define VI_VFP_V_LOOP(BODY16, BODY32, BODY64) \
- VI_CHECK_SSS(false); \
- VI_VFP_LOOP_BASE \
- switch (P.VU.vsew) { \
- case e16: { \
- VFP_V_PARAMS(16); \
- BODY16; \
- break; \
- } \
- case e32: { \
- VFP_V_PARAMS(32); \
- BODY32; \
- break; \
- } \
- case e64: { \
- VFP_V_PARAMS(64); \
- BODY64; \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
- set_fp_exceptions; \
- VI_VFP_LOOP_END
-
-#define VI_VFP_VV_LOOP_REDUCTION(BODY16, BODY32, BODY64) \
- VI_CHECK_REDUCTION(false) \
- VI_VFP_COMMON \
- switch (P.VU.vsew) { \
- case e16: { \
- VI_VFP_LOOP_REDUCTION_BASE(16) \
- BODY16; \
- set_fp_exceptions; \
- VI_VFP_LOOP_REDUCTION_END(e16) \
- break; \
- } \
- case e32: { \
- VI_VFP_LOOP_REDUCTION_BASE(32) \
- BODY32; \
- set_fp_exceptions; \
- VI_VFP_LOOP_REDUCTION_END(e32) \
- break; \
- } \
- case e64: { \
- VI_VFP_LOOP_REDUCTION_BASE(64) \
- BODY64; \
- set_fp_exceptions; \
- VI_VFP_LOOP_REDUCTION_END(e64) \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
-
-#define VI_VFP_VV_LOOP_WIDE_REDUCTION(BODY16, BODY32) \
- VI_CHECK_REDUCTION(true) \
- VI_VFP_COMMON \
- require((P.VU.vsew == e16 && p->extension_enabled('F')) || \
- (P.VU.vsew == e32 && p->extension_enabled('D'))); \
- bool is_active = false; \
- switch (P.VU.vsew) { \
- case e16: { \
- float32_t vd_0 = P.VU.elt<float32_t>(rs1_num, 0); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP(); \
- is_active = true; \
- float32_t vs2 = f16_to_f32(P.VU.elt<float16_t>(rs2_num, i)); \
- BODY16; \
- set_fp_exceptions; \
- VI_VFP_LOOP_REDUCTION_END(e32) \
- break; \
- } \
- case e32: { \
- float64_t vd_0 = P.VU.elt<float64_t>(rs1_num, 0); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP(); \
- is_active = true; \
- float64_t vs2 = f32_to_f64(P.VU.elt<float32_t>(rs2_num, i)); \
- BODY32; \
- set_fp_exceptions; \
- VI_VFP_LOOP_REDUCTION_END(e64) \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
-
-#define VI_VFP_VF_LOOP(BODY16, BODY32, BODY64) \
- VI_CHECK_SSS(false); \
- VI_VFP_LOOP_BASE \
- switch (P.VU.vsew) { \
- case e16: { \
- VFP_VF_PARAMS(16); \
- BODY16; \
- set_fp_exceptions; \
- break; \
- } \
- case e32: { \
- VFP_VF_PARAMS(32); \
- BODY32; \
- set_fp_exceptions; \
- break; \
- } \
- case e64: { \
- VFP_VF_PARAMS(64); \
- BODY64; \
- set_fp_exceptions; \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
- DEBUG_RVV_FP_VF; \
- VI_VFP_LOOP_END
-
-#define VI_VFP_VV_LOOP_CMP(BODY16, BODY32, BODY64) \
- VI_CHECK_MSS(true); \
- VI_VFP_LOOP_CMP_BASE \
- switch (P.VU.vsew) { \
- case e16: { \
- VFP_VV_PARAMS(16); \
- BODY16; \
- set_fp_exceptions; \
- break; \
- } \
- case e32: { \
- VFP_VV_PARAMS(32); \
- BODY32; \
- set_fp_exceptions; \
- break; \
- } \
- case e64: { \
- VFP_VV_PARAMS(64); \
- BODY64; \
- set_fp_exceptions; \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
- VI_VFP_LOOP_CMP_END \
-
-#define VI_VFP_VF_LOOP_CMP(BODY16, BODY32, BODY64) \
- VI_CHECK_MSS(false); \
- VI_VFP_LOOP_CMP_BASE \
- switch (P.VU.vsew) { \
- case e16: { \
- VFP_VF_PARAMS(16); \
- BODY16; \
- set_fp_exceptions; \
- break; \
- } \
- case e32: { \
- VFP_VF_PARAMS(32); \
- BODY32; \
- set_fp_exceptions; \
- break; \
- } \
- case e64: { \
- VFP_VF_PARAMS(64); \
- BODY64; \
- set_fp_exceptions; \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
- VI_VFP_LOOP_CMP_END \
-
-#define VI_VFP_VF_LOOP_WIDE(BODY16, BODY32) \
- VI_CHECK_DSS(false); \
- VI_VFP_LOOP_BASE \
- switch (P.VU.vsew) { \
- case e16: { \
- float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
- float32_t vs2 = f16_to_f32(P.VU.elt<float16_t>(rs2_num, i)); \
- float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \
- BODY16; \
- set_fp_exceptions; \
- break; \
- } \
- case e32: { \
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
- float64_t vs2 = f32_to_f64(P.VU.elt<float32_t>(rs2_num, i)); \
- float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \
- BODY32; \
- set_fp_exceptions; \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
- DEBUG_RVV_FP_VV; \
- VI_VFP_LOOP_END
-
-
-#define VI_VFP_VV_LOOP_WIDE(BODY16, BODY32) \
- VI_CHECK_DSS(true); \
- VI_VFP_LOOP_BASE \
- switch (P.VU.vsew) { \
- case e16: { \
- float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
- float32_t vs2 = f16_to_f32(P.VU.elt<float16_t>(rs2_num, i)); \
- float32_t vs1 = f16_to_f32(P.VU.elt<float16_t>(rs1_num, i)); \
- BODY16; \
- set_fp_exceptions; \
- break; \
- } \
- case e32: { \
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
- float64_t vs2 = f32_to_f64(P.VU.elt<float32_t>(rs2_num, i)); \
- float64_t vs1 = f32_to_f64(P.VU.elt<float32_t>(rs1_num, i)); \
- BODY32; \
- set_fp_exceptions; \
- break; \
- } \
- default: \
- require(0); \
- break; \
- }; \
- DEBUG_RVV_FP_VV; \
- VI_VFP_LOOP_END
-
-#define VI_VFP_WF_LOOP_WIDE(BODY16, BODY32) \
- VI_CHECK_DDS(false); \
- VI_VFP_LOOP_BASE \
- switch (P.VU.vsew) { \
- case e16: { \
- float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
- float32_t vs2 = P.VU.elt<float32_t>(rs2_num, i); \
- float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \
- BODY16; \
- set_fp_exceptions; \
- break; \
- } \
- case e32: { \
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
- float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
- float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \
- BODY32; \
- set_fp_exceptions; \
- break; \
- } \
- default: \
- require(0); \
- }; \
- DEBUG_RVV_FP_VV; \
- VI_VFP_LOOP_END
-
-#define VI_VFP_WV_LOOP_WIDE(BODY16, BODY32) \
- VI_CHECK_DDS(true); \
- VI_VFP_LOOP_BASE \
- switch (P.VU.vsew) { \
- case e16: { \
- float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
- float32_t vs2 = P.VU.elt<float32_t>(rs2_num, i); \
- float32_t vs1 = f16_to_f32(P.VU.elt<float16_t>(rs1_num, i)); \
- BODY16; \
- set_fp_exceptions; \
- break; \
- } \
- case e32: { \
- float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
- float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
- float64_t vs1 = f32_to_f64(P.VU.elt<float32_t>(rs1_num, i)); \
- BODY32; \
- set_fp_exceptions; \
- break; \
- } \
- default: \
- require(0); \
- }; \
- DEBUG_RVV_FP_VV; \
- VI_VFP_LOOP_END
-
-#define VI_VFP_LOOP_SCALE_BASE \
- require_fp; \
- require_vector(true); \
- require(STATE.frm->read() < 0x5); \
- reg_t vl = P.VU.vl->read(); \
- reg_t rd_num = insn.rd(); \
- reg_t rs1_num = insn.rs1(); \
- reg_t rs2_num = insn.rs2(); \
- softfloat_roundingMode = STATE.frm->read(); \
- for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
- VI_LOOP_ELEMENT_SKIP();
-
-#define VI_VFP_CVT_LOOP(CVT_PARAMS, CHECK, BODY) \
- CHECK \
- VI_VFP_LOOP_SCALE_BASE \
- CVT_PARAMS \
- BODY \
- set_fp_exceptions; \
- VI_VFP_LOOP_END
-
-#define VI_VFP_CVT_INT_TO_FP(BODY16, BODY32, BODY64, sign) \
- VI_CHECK_SSS(false); \
- VI_VFP_COMMON \
- switch (P.VU.vsew) { \
- case e16: \
- { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 16, sign), \
- { p->extension_enabled(EXT_ZFH); }, \
- BODY16); } \
- break; \
- case e32: \
- { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 32, sign), \
- { p->extension_enabled('F'); }, \
- BODY32); } \
- break; \
- case e64: \
- { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 64, sign), \
- { p->extension_enabled('D'); }, \
- BODY64); } \
- break; \
- default: \
- require(0); \
- break; \
- }
-
-#define VI_VFP_CVT_FP_TO_INT(BODY16, BODY32, BODY64, sign) \
- VI_CHECK_SSS(false); \
- VI_VFP_COMMON \
- switch (P.VU.vsew) { \
- case e16: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 16, sign), \
- { p->extension_enabled(EXT_ZFH); }, \
- BODY16); } \
- break; \
- case e32: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 32, sign), \
- { p->extension_enabled('F'); }, \
- BODY32); } \
- break; \
- case e64: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 64, sign), \
- { p->extension_enabled('D'); }, \
- BODY64); } \
- break; \
- default: \
- require(0); \
- break; \
- }
-
-#define VI_VFP_WCVT_FP_TO_FP(BODY8, BODY16, BODY32, \
- CHECK8, CHECK16, CHECK32) \
- VI_CHECK_DSS(false); \
- switch (P.VU.vsew) { \
- case e16: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(16, 32), CHECK16, BODY16); } \
- break; \
- case e32: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 64), CHECK32, BODY32); } \
- break; \
- default: \
- require(0); \
- break; \
- }
-
-#define VI_VFP_WCVT_INT_TO_FP(BODY8, BODY16, BODY32, \
- CHECK8, CHECK16, CHECK32, \
- sign) \
- VI_CHECK_DSS(false); \
- switch (P.VU.vsew) { \
- case e8: \
- { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(8, 16, sign), CHECK8, BODY8); } \
- break; \
- case e16: \
- { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 32, sign), CHECK16, BODY16); } \
- break; \
- case e32: \
- { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 64, sign), CHECK32, BODY32); } \
- break; \
- default: \
- require(0); \
- break; \
- }
-
-#define VI_VFP_WCVT_FP_TO_INT(BODY8, BODY16, BODY32, \
- CHECK8, CHECK16, CHECK32, \
- sign) \
- VI_CHECK_DSS(false); \
- switch (P.VU.vsew) { \
- case e16: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 32, sign), CHECK16, BODY16); } \
- break; \
- case e32: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 64, sign), CHECK32, BODY32); } \
- break; \
- default: \
- require(0); \
- break; \
- }
-
-#define VI_VFP_NCVT_FP_TO_FP(BODY8, BODY16, BODY32, \
- CHECK8, CHECK16, CHECK32) \
- VI_CHECK_SDS(false); \
- switch (P.VU.vsew) { \
- case e16: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 16), CHECK16, BODY16); } \
- break; \
- case e32: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(64, 32), CHECK32, BODY32); } \
- break; \
- default: \
- require(0); \
- break; \
- }
-
-#define VI_VFP_NCVT_INT_TO_FP(BODY8, BODY16, BODY32, \
- CHECK8, CHECK16, CHECK32, \
- sign) \
- VI_CHECK_SDS(false); \
- switch (P.VU.vsew) { \
- case e16: \
- { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 16, sign), CHECK16, BODY16); } \
- break; \
- case e32: \
- { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 32, sign), CHECK32, BODY32); } \
- break; \
- default: \
- require(0); \
- break; \
- }
-
-#define VI_VFP_NCVT_FP_TO_INT(BODY8, BODY16, BODY32, \
- CHECK8, CHECK16, CHECK32, \
- sign) \
- VI_CHECK_SDS(false); \
- switch (P.VU.vsew) { \
- case e8: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 8, sign), CHECK8, BODY8); } \
- break; \
- case e16: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 16, sign), CHECK16, BODY16); } \
- break; \
- case e32: \
- { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 32, sign), CHECK32, BODY32); } \
- break; \
- default: \
- require(0); \
- break; \
- }
-
-// The p-extension support is contributed by
-// Programming Langauge Lab, Department of Computer Science, National Tsing-Hua University, Taiwan
-
-#define P_FIELD(R, INDEX, SIZE) \
- (type_sew_t<SIZE>::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE))
-
-#define P_UFIELD(R, INDEX, SIZE) \
- (type_usew_t<SIZE>::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE))
-
-#define P_B(R, INDEX) P_UFIELD(R, INDEX, 8)
-#define P_H(R, INDEX) P_UFIELD(R, INDEX, 16)
-#define P_W(R, INDEX) P_UFIELD(R, INDEX, 32)
-#define P_SB(R, INDEX) P_FIELD(R, INDEX, 8)
-#define P_SH(R, INDEX) P_FIELD(R, INDEX, 16)
-#define P_SW(R, INDEX) P_FIELD(R, INDEX, 32)
-
-#define READ_REG_PAIR(reg) ({ \
- require((reg) % 2 == 0); \
- (reg) == 0 ? reg_t(0) : \
- (READ_REG((reg) + 1) << 32) + zext32(READ_REG(reg)); })
-
-#define RS1_PAIR READ_REG_PAIR(insn.rs1())
-#define RS2_PAIR READ_REG_PAIR(insn.rs2())
-#define RD_PAIR READ_REG_PAIR(insn.rd())
-
-#define WRITE_PD() \
- rd_tmp = set_field(rd_tmp, make_mask64((i * sizeof(pd) * 8), sizeof(pd) * 8), pd);
-
-#define WRITE_RD_PAIR(value) \
- if (insn.rd() != 0) { \
- require(insn.rd() % 2 == 0); \
- WRITE_REG(insn.rd(), sext32(value)); \
- WRITE_REG(insn.rd() + 1, (sreg_t(value)) >> 32); \
- }
-
-#define P_SET_OV(ov) \
- if (ov) P.VU.vxsat->write(1);
-
-#define P_SAT(R, BIT) \
- if (R > INT##BIT##_MAX) { \
- R = INT##BIT##_MAX; \
- P_SET_OV(1); \
- } else if (R < INT##BIT##_MIN) { \
- R = INT##BIT##_MIN; \
- P_SET_OV(1); \
- }
-
-#define P_SATU(R, BIT) \
- if (R > UINT##BIT##_MAX) { \
- R = UINT##BIT##_MAX; \
- P_SET_OV(1); \
- } else if (R < 0) { \
- P_SET_OV(1); \
- R = 0; \
- }
-
-#define P_LOOP_BASE(BIT) \
- require_extension(EXT_ZPN); \
- require(BIT == e8 || BIT == e16 || BIT == e32); \
- reg_t rd_tmp = RD; \
- reg_t rs1 = RS1; \
- reg_t rs2 = RS2; \
- sreg_t len = xlen / BIT; \
- for (sreg_t i = len - 1; i >= 0; --i) {
-
-#define P_ONE_LOOP_BASE(BIT) \
- require_extension(EXT_ZPN); \
- require(BIT == e8 || BIT == e16 || BIT == e32); \
- reg_t rd_tmp = RD; \
- reg_t rs1 = RS1; \
- sreg_t len = xlen / BIT; \
- for (sreg_t i = len - 1; i >= 0; --i) {
-
-#define P_I_LOOP_BASE(BIT, IMMBIT) \
- require_extension(EXT_ZPN); \
- require(BIT == e8 || BIT == e16 || BIT == e32); \
- reg_t rd_tmp = RD; \
- reg_t rs1 = RS1; \
- type_usew_t<BIT>::type imm##IMMBIT##u = insn.p_imm##IMMBIT(); \
- sreg_t len = xlen / BIT; \
- for (sreg_t i = len - 1; i >= 0; --i) {
-
-#define P_X_LOOP_BASE(BIT, LOWBIT) \
- require_extension(EXT_ZPN); \
- require(BIT == e8 || BIT == e16 || BIT == e32); \
- reg_t rd_tmp = RD; \
- reg_t rs1 = RS1; \
- type_usew_t<BIT>::type sa = RS2 & ((uint64_t(1) << LOWBIT) - 1); \
- type_sew_t<BIT>::type ssa = int64_t(RS2) << (64 - LOWBIT) >> (64 - LOWBIT); \
- sreg_t len = xlen / BIT; \
- for (sreg_t i = len - 1; i >= 0; --i) {
-
-#define P_MUL_LOOP_BASE(BIT) \
- require_extension(EXT_ZPN); \
- require(BIT == e8 || BIT == e16 || BIT == e32); \
- reg_t rd_tmp = RD; \
- reg_t rs1 = RS1; \
- reg_t rs2 = RS2; \
- sreg_t len = 32 / BIT; \
- for (sreg_t i = len - 1; i >= 0; --i) {
-
-#define P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \
- require_extension(EXT_ZPN); \
- require(BIT == e16 || BIT == e32 || BIT == e64); \
- reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \
- reg_t rs1 = zext_xlen(RS1); \
- reg_t rs2 = zext_xlen(RS2); \
- sreg_t len = 64 / BIT; \
- sreg_t len_inner = BIT / BIT_INNER; \
- for (sreg_t i = len - 1; i >= 0; --i) { \
- sreg_t pd_res = P_FIELD(rd_tmp, i, BIT); \
- for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) {
-
-#define P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \
- require_extension(EXT_ZPN); \
- require(BIT == e16 || BIT == e32 || BIT == e64); \
- reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \
- reg_t rs1 = zext_xlen(RS1); \
- reg_t rs2 = zext_xlen(RS2); \
- sreg_t len = 64 / BIT; \
- sreg_t len_inner = BIT / BIT_INNER; \
- for (sreg_t i = len - 1; i >=0; --i) { \
- reg_t pd_res = P_UFIELD(rd_tmp, i, BIT); \
- for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) {
-
-#define P_PARAMS(BIT) \
- auto pd = P_FIELD(rd_tmp, i, BIT); \
- auto ps1 = P_FIELD(rs1, i, BIT); \
- auto ps2 = P_FIELD(rs2, i, BIT);
-
-#define P_UPARAMS(BIT) \
- auto pd = P_UFIELD(rd_tmp, i, BIT); \
- auto ps1 = P_UFIELD(rs1, i, BIT); \
- auto ps2 = P_UFIELD(rs2, i, BIT);
-
-#define P_CORSS_PARAMS(BIT) \
- auto pd = P_FIELD(rd_tmp, i, BIT); \
- auto ps1 = P_FIELD(rs1, i, BIT); \
- auto ps2 = P_FIELD(rs2, (i ^ 1), BIT);
-
-#define P_CORSS_UPARAMS(BIT) \
- auto pd = P_UFIELD(rd_tmp, i, BIT); \
- auto ps1 = P_UFIELD(rs1, i, BIT); \
- auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT);
-
-#define P_ONE_PARAMS(BIT) \
- auto pd = P_FIELD(rd_tmp, i, BIT); \
- auto ps1 = P_FIELD(rs1, i, BIT);
-
-#define P_ONE_UPARAMS(BIT) \
- auto pd = P_UFIELD(rd_tmp, i, BIT); \
- auto ps1 = P_UFIELD(rs1, i, BIT);
-
-#define P_ONE_SUPARAMS(BIT) \
- auto pd = P_UFIELD(rd_tmp, i, BIT); \
- auto ps1 = P_FIELD(rs1, i, BIT);
-
-#define P_MUL_PARAMS(BIT) \
- auto pd = P_FIELD(rd_tmp, i, BIT * 2); \
- auto ps1 = P_FIELD(rs1, i, BIT); \
- auto ps2 = P_FIELD(rs2, i, BIT);
-
-#define P_MUL_UPARAMS(BIT) \
- auto pd = P_UFIELD(rd_tmp, i, BIT * 2); \
- auto ps1 = P_UFIELD(rs1, i, BIT); \
- auto ps2 = P_UFIELD(rs2, i, BIT);
-
-#define P_MUL_CROSS_PARAMS(BIT) \
- auto pd = P_FIELD(rd_tmp, i, BIT * 2); \
- auto ps1 = P_FIELD(rs1, i, BIT); \
- auto ps2 = P_FIELD(rs2, (i ^ 1), BIT);
-
-#define P_MUL_CROSS_UPARAMS(BIT) \
- auto pd = P_UFIELD(rd_tmp, i, BIT*2); \
- auto ps1 = P_UFIELD(rs1, i, BIT); \
- auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT);
-
-#define P_REDUCTION_PARAMS(BIT_INNER) \
- auto ps1 = P_FIELD(rs1, j, BIT_INNER); \
- auto ps2 = P_FIELD(rs2, j, BIT_INNER);
-
-#define P_REDUCTION_UPARAMS(BIT_INNER) \
- auto ps1 = P_UFIELD(rs1, j, BIT_INNER); \
- auto ps2 = P_UFIELD(rs2, j, BIT_INNER);
-
-#define P_REDUCTION_SUPARAMS(BIT_INNER) \
- auto ps1 = P_FIELD(rs1, j, BIT_INNER); \
- auto ps2 = P_UFIELD(rs2, j, BIT_INNER);
-
-#define P_REDUCTION_CROSS_PARAMS(BIT_INNER) \
- auto ps1 = P_FIELD(rs1, j, BIT_INNER); \
- auto ps2 = P_FIELD(rs2, (j ^ 1), BIT_INNER);
-
-#define P_LOOP_BODY(BIT, BODY) { \
- P_PARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_ULOOP_BODY(BIT, BODY) { \
- P_UPARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_ONE_LOOP_BODY(BIT, BODY) { \
- P_ONE_PARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_CROSS_LOOP_BODY(BIT, BODY) { \
- P_CORSS_PARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_CROSS_ULOOP_BODY(BIT, BODY) { \
- P_CORSS_UPARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_ONE_ULOOP_BODY(BIT, BODY) { \
- P_ONE_UPARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_MUL_LOOP_BODY(BIT, BODY) { \
- P_MUL_PARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_MUL_ULOOP_BODY(BIT, BODY) { \
- P_MUL_UPARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_MUL_CROSS_LOOP_BODY(BIT, BODY) { \
- P_MUL_CROSS_PARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_MUL_CROSS_ULOOP_BODY(BIT, BODY) { \
- P_MUL_CROSS_UPARAMS(BIT) \
- BODY \
- WRITE_PD(); \
-}
-
-#define P_LOOP(BIT, BODY) \
- P_LOOP_BASE(BIT) \
- P_LOOP_BODY(BIT, BODY) \
- P_LOOP_END()
-
-#define P_ONE_LOOP(BIT, BODY) \
- P_ONE_LOOP_BASE(BIT) \
- P_ONE_LOOP_BODY(BIT, BODY) \
- P_LOOP_END()
-
-#define P_ULOOP(BIT, BODY) \
- P_LOOP_BASE(BIT) \
- P_ULOOP_BODY(BIT, BODY) \
- P_LOOP_END()
-
-#define P_CROSS_LOOP(BIT, BODY1, BODY2) \
- P_LOOP_BASE(BIT) \
- P_CROSS_LOOP_BODY(BIT, BODY1) \
- --i; \
- if (sizeof(#BODY2) == 1) { \
- P_CROSS_LOOP_BODY(BIT, BODY1) \
- } \
- else { \
- P_CROSS_LOOP_BODY(BIT, BODY2) \
- } \
- P_LOOP_END()
-
-#define P_CROSS_ULOOP(BIT, BODY1, BODY2) \
- P_LOOP_BASE(BIT) \
- P_CROSS_ULOOP_BODY(BIT, BODY1) \
- --i; \
- P_CROSS_ULOOP_BODY(BIT, BODY2) \
- P_LOOP_END()
-
-#define P_STRAIGHT_LOOP(BIT, BODY1, BODY2) \
- P_LOOP_BASE(BIT) \
- P_LOOP_BODY(BIT, BODY1) \
- --i; \
- P_LOOP_BODY(BIT, BODY2) \
- P_LOOP_END()
-
-#define P_STRAIGHT_ULOOP(BIT, BODY1, BODY2) \
- P_LOOP_BASE(BIT) \
- P_ULOOP_BODY(BIT, BODY1) \
- --i; \
- P_ULOOP_BODY(BIT, BODY2) \
- P_LOOP_END()
-
-#define P_X_LOOP(BIT, RS2_LOW_BIT, BODY) \
- P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \
- P_ONE_LOOP_BODY(BIT, BODY) \
- P_LOOP_END()
-
-#define P_X_ULOOP(BIT, RS2_LOW_BIT, BODY) \
- P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \
- P_ONE_ULOOP_BODY(BIT, BODY) \
- P_LOOP_END()
-
-#define P_I_LOOP(BIT, IMMBIT, BODY) \
- P_I_LOOP_BASE(BIT, IMMBIT) \
- P_ONE_LOOP_BODY(BIT, BODY) \
- P_LOOP_END()
-
-#define P_I_ULOOP(BIT, IMMBIT, BODY) \
- P_I_LOOP_BASE(BIT, IMMBIT) \
- P_ONE_ULOOP_BODY(BIT, BODY) \
- P_LOOP_END()
-
-#define P_MUL_LOOP(BIT, BODY) \
- P_MUL_LOOP_BASE(BIT) \
- P_MUL_LOOP_BODY(BIT, BODY) \
- P_PAIR_LOOP_END()
-
-#define P_MUL_ULOOP(BIT, BODY) \
- P_MUL_LOOP_BASE(BIT) \
- P_MUL_ULOOP_BODY(BIT, BODY) \
- P_PAIR_LOOP_END()
-
-#define P_MUL_CROSS_LOOP(BIT, BODY) \
- P_MUL_LOOP_BASE(BIT) \
- P_MUL_CROSS_LOOP_BODY(BIT, BODY) \
- P_PAIR_LOOP_END()
-
-#define P_MUL_CROSS_ULOOP(BIT, BODY) \
- P_MUL_LOOP_BASE(BIT) \
- P_MUL_CROSS_ULOOP_BODY(BIT, BODY) \
- P_PAIR_LOOP_END()
-
-#define P_REDUCTION_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \
- P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \
- P_REDUCTION_PARAMS(BIT_INNER) \
- BODY \
- P_REDUCTION_LOOP_END(BIT, IS_SAT)
-
-#define P_REDUCTION_ULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \
- P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \
- P_REDUCTION_UPARAMS(BIT_INNER) \
- BODY \
- P_REDUCTION_ULOOP_END(BIT, IS_SAT)
-
-#define P_REDUCTION_SULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \
- P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \
- P_REDUCTION_SUPARAMS(BIT_INNER) \
- BODY \
- P_REDUCTION_LOOP_END(BIT, IS_SAT)
-
-#define P_REDUCTION_CROSS_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \
- P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \
- P_REDUCTION_CROSS_PARAMS(BIT_INNER) \
- BODY \
- P_REDUCTION_LOOP_END(BIT, IS_SAT)
-
-#define P_LOOP_END() \
- } \
- WRITE_RD(sext_xlen(rd_tmp));
-
-#define P_PAIR_LOOP_END() \
- } \
- if (xlen == 32) { \
- WRITE_RD_PAIR(rd_tmp); \
- } \
- else { \
- WRITE_RD(sext_xlen(rd_tmp)); \
- }
-
-#define P_REDUCTION_LOOP_END(BIT, IS_SAT) \
- } \
- if (IS_SAT) { \
- P_SAT(pd_res, BIT); \
- } \
- type_usew_t<BIT>::type pd = pd_res; \
- WRITE_PD(); \
- } \
- WRITE_RD(sext_xlen(rd_tmp));
-
-#define P_REDUCTION_ULOOP_END(BIT, IS_SAT) \
- } \
- if (IS_SAT) { \
- P_SATU(pd_res, BIT); \
- } \
- type_usew_t<BIT>::type pd = pd_res; \
- WRITE_PD(); \
- } \
- WRITE_RD(sext_xlen(rd_tmp));
-
-#define P_SUNPKD8(X, Y) \
- require_extension(EXT_ZPN); \
- reg_t rd_tmp = 0; \
- int16_t pd[4] = { \
- P_SB(RS1, Y), \
- P_SB(RS1, X), \
- P_SB(RS1, Y + 4), \
- P_SB(RS1, X + 4), \
- }; \
- if (xlen == 64) { \
- memcpy(&rd_tmp, pd, 8); \
- } else { \
- memcpy(&rd_tmp, pd, 4); \
- } \
- WRITE_RD(sext_xlen(rd_tmp));
-
-#define P_ZUNPKD8(X, Y) \
- require_extension(EXT_ZPN); \
- reg_t rd_tmp = 0; \
- uint16_t pd[4] = { \
- P_B(RS1, Y), \
- P_B(RS1, X), \
- P_B(RS1, Y + 4), \
- P_B(RS1, X + 4), \
- }; \
- if (xlen == 64) { \
- memcpy(&rd_tmp, pd, 8); \
- } else { \
- memcpy(&rd_tmp, pd, 4); \
- } \
- WRITE_RD(sext_xlen(rd_tmp));
-
-#define P_PK(BIT, X, Y) \
- require_extension(EXT_ZPN); \
- require(BIT == e16 || BIT == e32); \
- reg_t rd_tmp = 0, rs1 = RS1, rs2 = RS2; \
- for (sreg_t i = 0; i < xlen / BIT / 2; i++) { \
- rd_tmp = set_field(rd_tmp, make_mask64(i * 2 * BIT, BIT), \
- P_UFIELD(RS2, i * 2 + Y, BIT)); \
- rd_tmp = set_field(rd_tmp, make_mask64((i * 2 + 1) * BIT, BIT), \
- P_UFIELD(RS1, i * 2 + X, BIT)); \
- } \
- WRITE_RD(sext_xlen(rd_tmp));
-
-#define P_64_PROFILE_BASE() \
- require_extension(EXT_ZPSFOPERAND); \
- sreg_t rd, rs1, rs2;
-
-#define P_64_UPROFILE_BASE() \
- require_extension(EXT_ZPSFOPERAND); \
- reg_t rd, rs1, rs2;
-
-#define P_64_PROFILE_PARAM(USE_RD, INPUT_PAIR) \
- if (xlen == 32) { \
- rs1 = INPUT_PAIR ? RS1_PAIR : RS1; \
- rs2 = INPUT_PAIR ? RS2_PAIR : RS2; \
- rd = USE_RD ? RD_PAIR : 0; \
- } else { \
- rs1 = RS1; \
- rs2 = RS2; \
- rd = USE_RD ? RD : 0; \
- }
-
-#define P_64_PROFILE(BODY) \
- P_64_PROFILE_BASE() \
- P_64_PROFILE_PARAM(false, true) \
- BODY \
- P_64_PROFILE_END() \
-
-#define P_64_UPROFILE(BODY) \
- P_64_UPROFILE_BASE() \
- P_64_PROFILE_PARAM(false, true) \
- BODY \
- P_64_PROFILE_END() \
-
-#define P_64_PROFILE_REDUCTION(BIT, BODY) \
- P_64_PROFILE_BASE() \
- P_64_PROFILE_PARAM(true, false) \
- for (sreg_t i = 0; i < xlen / BIT; i++) { \
- sreg_t ps1 = P_FIELD(rs1, i, BIT); \
- sreg_t ps2 = P_FIELD(rs2, i, BIT); \
- BODY \
- } \
- P_64_PROFILE_END() \
-
-#define P_64_UPROFILE_REDUCTION(BIT, BODY) \
- P_64_UPROFILE_BASE() \
- P_64_PROFILE_PARAM(true, false) \
- for (sreg_t i = 0; i < xlen / BIT; i++) { \
- reg_t ps1 = P_UFIELD(rs1, i, BIT); \
- reg_t ps2 = P_UFIELD(rs2, i, BIT); \
- BODY \
- } \
- P_64_PROFILE_END() \
-
-#define P_64_PROFILE_END() \
- if (xlen == 32) { \
- WRITE_RD_PAIR(rd); \
- } else { \
- WRITE_RD(sext_xlen(rd)); \
- }
-
#define DECLARE_XENVCFG_VARS(field) \
reg_t m##field = get_field(STATE.menvcfg->read(), MENVCFG_##field); \
reg_t s##field = get_field(STATE.senvcfg->read(), SENVCFG_##field); \
diff --git a/riscv/devices.cc b/riscv/devices.cc
index eb677a5..81b232d 100644
--- a/riscv/devices.cc
+++ b/riscv/devices.cc
@@ -137,3 +137,16 @@ char* mem_t::contents(reg_t addr) {
}
return search->second + pgoff;
}
+
+void mem_t::dump(std::ostream& o) {
+ const char empty[PGSIZE] = {0};
+ for (reg_t i = 0; i < sz; i += PGSIZE) {
+ reg_t ppn = i >> PGSHIFT;
+ auto search = sparse_memory_map.find(ppn);
+ if (search == sparse_memory_map.end()) {
+ o.write(empty, PGSIZE);
+ } else {
+ o.write(sparse_memory_map[ppn], PGSIZE);
+ }
+ }
+}
diff --git a/riscv/devices.h b/riscv/devices.h
index df7b289..bccda34 100644
--- a/riscv/devices.h
+++ b/riscv/devices.h
@@ -45,6 +45,7 @@ class mem_t : public abstract_device_t {
bool store(reg_t addr, size_t len, const uint8_t* bytes) { return load_store(addr, len, const_cast<uint8_t*>(bytes), true); }
char* contents(reg_t addr);
reg_t size() { return sz; }
+ void dump(std::ostream& o);
private:
bool load_store(reg_t addr, size_t len, uint8_t* bytes, bool store);
@@ -60,6 +61,8 @@ class clint_t : public abstract_device_t {
bool store(reg_t addr, size_t len, const uint8_t* bytes);
size_t size() { return CLINT_SIZE; }
void increment(reg_t inc);
+ uint64_t get_mtimecmp(reg_t hartid) { return mtimecmp[hartid]; }
+ uint64_t get_mtime() { return mtime; }
private:
typedef uint64_t mtime_t;
typedef uint64_t mtimecmp_t;
diff --git a/riscv/dts.cc b/riscv/dts.cc
index e6b810d..6a40565 100644
--- a/riscv/dts.cc
+++ b/riscv/dts.cc
@@ -14,6 +14,7 @@
std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz,
reg_t initrd_start, reg_t initrd_end,
const char* bootargs,
+ size_t pmpregions,
std::vector<processor_t*> procs,
std::vector<std::pair<reg_t, mem_t*>> mems)
{
@@ -58,7 +59,7 @@ std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz,
" compatible = \"riscv\";\n"
" riscv,isa = \"" << procs[i]->get_isa().get_isa_string() << "\";\n"
" mmu-type = \"riscv," << (procs[i]->get_isa().get_max_xlen() <= 32 ? "sv32" : "sv57") << "\";\n"
- " riscv,pmpregions = <16>;\n"
+ " riscv,pmpregions = <" << pmpregions << ">;\n"
" riscv,pmpgranularity = <4>;\n"
" clock-frequency = <" << cpu_hz << ">;\n"
" CPU" << i << "_intc: interrupt-controller {\n"
@@ -212,7 +213,6 @@ std::string dts_compile(const std::string& dts)
return dtb.str();
}
-
static int fdt_get_node_addr_size(void *fdt, int node, reg_t *addr,
unsigned long *size, const char *field)
{
@@ -274,7 +274,6 @@ static int check_cpu_node(void *fdt, int cpu_offset)
return 0;
}
-
int fdt_get_offset(void *fdt, const char *field)
{
return fdt_path_offset(fdt, field);
diff --git a/riscv/dts.h b/riscv/dts.h
index a02f594..7a64d7b 100644
--- a/riscv/dts.h
+++ b/riscv/dts.h
@@ -10,6 +10,7 @@
std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz,
reg_t initrd_start, reg_t initrd_end,
const char* bootargs,
+ size_t pmpregions,
std::vector<processor_t*> procs,
std::vector<std::pair<reg_t, mem_t*>> mems);
diff --git a/riscv/encoding.h b/riscv/encoding.h
index e6dbd7c..1af7f89 100644
--- a/riscv/encoding.h
+++ b/riscv/encoding.h
@@ -1,9 +1,9 @@
-/*
- * This file is auto-generated by running 'make' in
- * https://github.com/riscv/riscv-opcodes (d2b9aea)
- */
-/* See LICENSE for license details. */
+/*
+* This file is auto-generated by running 'make' in
+* https://github.com/riscv/riscv-opcodes (3db008d)
+*/
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
#ifndef RISCV_CSR_ENCODING_H
#define RISCV_CSR_ENCODING_H
@@ -139,6 +139,7 @@
#define MIP_VSEIP (1 << IRQ_VS_EXT)
#define MIP_MEIP (1 << IRQ_M_EXT)
#define MIP_SGEIP (1 << IRQ_S_GEXT)
+#define MIP_LCOFIP (1 << IRQ_LCOF)
#define MIP_S_MASK (MIP_SSIP | MIP_STIP | MIP_SEIP)
#define MIP_VS_MASK (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)
@@ -159,6 +160,30 @@
#define MENVCFGH_PBMTE 0x40000000
#define MENVCFGH_STCE 0x80000000
+#define MSTATEEN0_CS 0x00000001
+#define MSTATEEN0_FCSR 0x00000002
+#define MSTATEEN0_HCONTEXT 0x0200000000000000
+#define MSTATEEN0_HENVCFG 0x4000000000000000
+#define MSTATEEN_HSTATEEN 0x8000000000000000
+
+#define MSTATEEN0H_HCONTEXT 0x02000000
+#define MSTATEEN0H_HENVCFG 0x40000000
+#define MSTATEENH_HSTATEEN 0x80000000
+
+#define MHPMEVENT_VUINH 0x0400000000000000
+#define MHPMEVENT_VSINH 0x0800000000000000
+#define MHPMEVENT_UINH 0x1000000000000000
+#define MHPMEVENT_SINH 0x2000000000000000
+#define MHPMEVENT_MINH 0x4000000000000000
+#define MHPMEVENT_OF 0x8000000000000000
+
+#define MHPMEVENTH_VUINH 0x04000000
+#define MHPMEVENTH_VSINH 0x08000000
+#define MHPMEVENTH_UINH 0x10000000
+#define MHPMEVENTH_SINH 0x20000000
+#define MHPMEVENTH_MINH 0x40000000
+#define MHPMEVENTH_OF 0x80000000
+
#define HENVCFG_FIOM 0x00000001
#define HENVCFG_CBIE 0x00000030
#define HENVCFG_CBCFE 0x00000040
@@ -169,11 +194,24 @@
#define HENVCFGH_PBMTE 0x40000000
#define HENVCFGH_STCE 0x80000000
+#define HSTATEEN0_CS 0x00000001
+#define HSTATEEN0_FCSR 0x00000002
+#define HSTATEEN0_SCONTEXT 0x0200000000000000
+#define HSTATEEN0_SENVCFG 0x4000000000000000
+#define HSTATEEN_SSTATEEN 0x8000000000000000
+
+#define HSTATEEN0H_SCONTEXT 0x02000000
+#define HSTATEEN0H_SENVCFG 0x40000000
+#define HSTATEENH_SSTATEEN 0x80000000
+
#define SENVCFG_FIOM 0x00000001
#define SENVCFG_CBIE 0x00000030
#define SENVCFG_CBCFE 0x00000040
#define SENVCFG_CBZE 0x00000080
+#define SSTATEEN0_CS 0x00000001
+#define SSTATEEN0_FCSR 0x00000002
+
#define MSECCFG_MML 0x00000001
#define MSECCFG_MMWP 0x00000002
#define MSECCFG_RLB 0x00000004
@@ -239,7 +277,7 @@
#define IRQ_M_EXT 11
#define IRQ_S_GEXT 12
#define IRQ_COP 12
-#define IRQ_HOST 13
+#define IRQ_LCOF 13
/* page table entry (PTE) fields */
#define PTE_V 0x001 /* Valid */
@@ -310,2525 +348,2409 @@
#endif
#endif
-/* Automatically generated by parse_opcodes. */
+
+/* Automatically generated by parse_opcodes. */
#ifndef RISCV_ENCODING_H
#define RISCV_ENCODING_H
-#define MATCH_SLLI_RV32 0x1013
-#define MASK_SLLI_RV32 0xfe00707f
-#define MATCH_SRLI_RV32 0x5013
-#define MASK_SRLI_RV32 0xfe00707f
-#define MATCH_SRAI_RV32 0x40005013
-#define MASK_SRAI_RV32 0xfe00707f
-#define MATCH_FRFLAGS 0x102073
-#define MASK_FRFLAGS 0xfffff07f
-#define MATCH_FSFLAGS 0x101073
-#define MASK_FSFLAGS 0xfff0707f
-#define MATCH_FSFLAGSI 0x105073
-#define MASK_FSFLAGSI 0xfff0707f
-#define MATCH_FRRM 0x202073
-#define MASK_FRRM 0xfffff07f
-#define MATCH_FSRM 0x201073
-#define MASK_FSRM 0xfff0707f
-#define MATCH_FSRMI 0x205073
-#define MASK_FSRMI 0xfff0707f
-#define MATCH_FSCSR 0x301073
-#define MASK_FSCSR 0xfff0707f
-#define MATCH_FRCSR 0x302073
-#define MASK_FRCSR 0xfffff07f
-#define MATCH_RDCYCLE 0xc0002073
-#define MASK_RDCYCLE 0xfffff07f
-#define MATCH_RDTIME 0xc0102073
-#define MASK_RDTIME 0xfffff07f
-#define MATCH_RDINSTRET 0xc0202073
-#define MASK_RDINSTRET 0xfffff07f
-#define MATCH_RDCYCLEH 0xc8002073
-#define MASK_RDCYCLEH 0xfffff07f
-#define MATCH_RDTIMEH 0xc8102073
-#define MASK_RDTIMEH 0xfffff07f
-#define MATCH_RDINSTRETH 0xc8202073
-#define MASK_RDINSTRETH 0xfffff07f
-#define MATCH_SCALL 0x73
-#define MASK_SCALL 0xffffffff
-#define MATCH_SBREAK 0x100073
-#define MASK_SBREAK 0xffffffff
-#define MATCH_FMV_X_S 0xe0000053
-#define MASK_FMV_X_S 0xfff0707f
-#define MATCH_FMV_S_X 0xf0000053
-#define MASK_FMV_S_X 0xfff0707f
-#define MATCH_FENCE_TSO 0x8330000f
-#define MASK_FENCE_TSO 0xfff0707f
-#define MATCH_PAUSE 0x100000f
-#define MASK_PAUSE 0xffffffff
-#define MATCH_BEQ 0x63
-#define MASK_BEQ 0x707f
-#define MATCH_BNE 0x1063
-#define MASK_BNE 0x707f
-#define MATCH_BLT 0x4063
-#define MASK_BLT 0x707f
-#define MATCH_BGE 0x5063
-#define MASK_BGE 0x707f
-#define MATCH_BLTU 0x6063
-#define MASK_BLTU 0x707f
-#define MATCH_BGEU 0x7063
-#define MASK_BGEU 0x707f
-#define MATCH_JALR 0x67
-#define MASK_JALR 0x707f
-#define MATCH_JAL 0x6f
-#define MASK_JAL 0x7f
-#define MATCH_LUI 0x37
-#define MASK_LUI 0x7f
-#define MATCH_AUIPC 0x17
-#define MASK_AUIPC 0x7f
-#define MATCH_ADDI 0x13
-#define MASK_ADDI 0x707f
-#define MATCH_SLTI 0x2013
-#define MASK_SLTI 0x707f
-#define MATCH_SLTIU 0x3013
-#define MASK_SLTIU 0x707f
-#define MATCH_XORI 0x4013
-#define MASK_XORI 0x707f
-#define MATCH_ORI 0x6013
-#define MASK_ORI 0x707f
-#define MATCH_ANDI 0x7013
-#define MASK_ANDI 0x707f
#define MATCH_ADD 0x33
-#define MASK_ADD 0xfe00707f
-#define MATCH_SUB 0x40000033
-#define MASK_SUB 0xfe00707f
-#define MATCH_SLL 0x1033
-#define MASK_SLL 0xfe00707f
-#define MATCH_SLT 0x2033
-#define MASK_SLT 0xfe00707f
-#define MATCH_SLTU 0x3033
-#define MASK_SLTU 0xfe00707f
-#define MATCH_XOR 0x4033
-#define MASK_XOR 0xfe00707f
-#define MATCH_SRL 0x5033
-#define MASK_SRL 0xfe00707f
-#define MATCH_SRA 0x40005033
-#define MASK_SRA 0xfe00707f
-#define MATCH_OR 0x6033
-#define MASK_OR 0xfe00707f
-#define MATCH_AND 0x7033
-#define MASK_AND 0xfe00707f
-#define MATCH_LB 0x3
-#define MASK_LB 0x707f
-#define MATCH_LH 0x1003
-#define MASK_LH 0x707f
-#define MATCH_LW 0x2003
-#define MASK_LW 0x707f
-#define MATCH_LBU 0x4003
-#define MASK_LBU 0x707f
-#define MATCH_LHU 0x5003
-#define MASK_LHU 0x707f
-#define MATCH_SB 0x23
-#define MASK_SB 0x707f
-#define MATCH_SH 0x1023
-#define MASK_SH 0x707f
-#define MATCH_SW 0x2023
-#define MASK_SW 0x707f
-#define MATCH_FENCE 0xf
-#define MASK_FENCE 0x707f
-#define MATCH_FENCE_I 0x100f
-#define MASK_FENCE_I 0x707f
+#define MASK_ADD 0xfe00707f
+#define MATCH_ADD16 0x40000077
+#define MASK_ADD16 0xfe00707f
+#define MATCH_ADD32 0x40002077
+#define MASK_ADD32 0xfe00707f
+#define MATCH_ADD64 0xc0001077
+#define MASK_ADD64 0xfe00707f
+#define MATCH_ADD8 0x48000077
+#define MASK_ADD8 0xfe00707f
+#define MATCH_ADD_UW 0x800003b
+#define MASK_ADD_UW 0xfe00707f
+#define MATCH_ADDI 0x13
+#define MASK_ADDI 0x707f
#define MATCH_ADDIW 0x1b
-#define MASK_ADDIW 0x707f
-#define MATCH_SLLIW 0x101b
-#define MASK_SLLIW 0xfe00707f
-#define MATCH_SRLIW 0x501b
-#define MASK_SRLIW 0xfe00707f
-#define MATCH_SRAIW 0x4000501b
-#define MASK_SRAIW 0xfe00707f
+#define MASK_ADDIW 0x707f
#define MATCH_ADDW 0x3b
-#define MASK_ADDW 0xfe00707f
-#define MATCH_SUBW 0x4000003b
-#define MASK_SUBW 0xfe00707f
-#define MATCH_SLLW 0x103b
-#define MASK_SLLW 0xfe00707f
-#define MATCH_SRLW 0x503b
-#define MASK_SRLW 0xfe00707f
-#define MATCH_SRAW 0x4000503b
-#define MASK_SRAW 0xfe00707f
-#define MATCH_LD 0x3003
-#define MASK_LD 0x707f
-#define MATCH_LWU 0x6003
-#define MASK_LWU 0x707f
-#define MATCH_SD 0x3023
-#define MASK_SD 0x707f
-#define MATCH_SLLI 0x1013
-#define MASK_SLLI 0xfc00707f
-#define MATCH_SRLI 0x5013
-#define MASK_SRLI 0xfc00707f
-#define MATCH_SRAI 0x40005013
-#define MASK_SRAI 0xfc00707f
-#define MATCH_MUL 0x2000033
-#define MASK_MUL 0xfe00707f
-#define MATCH_MULH 0x2001033
-#define MASK_MULH 0xfe00707f
-#define MATCH_MULHSU 0x2002033
-#define MASK_MULHSU 0xfe00707f
-#define MATCH_MULHU 0x2003033
-#define MASK_MULHU 0xfe00707f
-#define MATCH_DIV 0x2004033
-#define MASK_DIV 0xfe00707f
-#define MATCH_DIVU 0x2005033
-#define MASK_DIVU 0xfe00707f
-#define MATCH_REM 0x2006033
-#define MASK_REM 0xfe00707f
-#define MATCH_REMU 0x2007033
-#define MASK_REMU 0xfe00707f
-#define MATCH_MULW 0x200003b
-#define MASK_MULW 0xfe00707f
-#define MATCH_DIVW 0x200403b
-#define MASK_DIVW 0xfe00707f
-#define MATCH_DIVUW 0x200503b
-#define MASK_DIVUW 0xfe00707f
-#define MATCH_REMW 0x200603b
-#define MASK_REMW 0xfe00707f
-#define MATCH_REMUW 0x200703b
-#define MASK_REMUW 0xfe00707f
+#define MASK_ADDW 0xfe00707f
+#define MATCH_AES32DSI 0x2a000033
+#define MASK_AES32DSI 0x3e00707f
+#define MATCH_AES32DSMI 0x2e000033
+#define MASK_AES32DSMI 0x3e00707f
+#define MATCH_AES32ESI 0x22000033
+#define MASK_AES32ESI 0x3e00707f
+#define MATCH_AES32ESMI 0x26000033
+#define MASK_AES32ESMI 0x3e00707f
+#define MATCH_AES64DS 0x3a000033
+#define MASK_AES64DS 0xfe00707f
+#define MATCH_AES64DSM 0x3e000033
+#define MASK_AES64DSM 0xfe00707f
+#define MATCH_AES64ES 0x32000033
+#define MASK_AES64ES 0xfe00707f
+#define MATCH_AES64ESM 0x36000033
+#define MASK_AES64ESM 0xfe00707f
+#define MATCH_AES64IM 0x30001013
+#define MASK_AES64IM 0xfff0707f
+#define MATCH_AES64KS1I 0x31001013
+#define MASK_AES64KS1I 0xff00707f
+#define MATCH_AES64KS2 0x7e000033
+#define MASK_AES64KS2 0xfe00707f
+#define MATCH_AMOADD_D 0x302f
+#define MASK_AMOADD_D 0xf800707f
#define MATCH_AMOADD_W 0x202f
-#define MASK_AMOADD_W 0xf800707f
-#define MATCH_AMOXOR_W 0x2000202f
-#define MASK_AMOXOR_W 0xf800707f
-#define MATCH_AMOOR_W 0x4000202f
-#define MASK_AMOOR_W 0xf800707f
+#define MASK_AMOADD_W 0xf800707f
+#define MATCH_AMOAND_D 0x6000302f
+#define MASK_AMOAND_D 0xf800707f
#define MATCH_AMOAND_W 0x6000202f
-#define MASK_AMOAND_W 0xf800707f
-#define MATCH_AMOMIN_W 0x8000202f
-#define MASK_AMOMIN_W 0xf800707f
+#define MASK_AMOAND_W 0xf800707f
+#define MATCH_AMOMAX_D 0xa000302f
+#define MASK_AMOMAX_D 0xf800707f
#define MATCH_AMOMAX_W 0xa000202f
-#define MASK_AMOMAX_W 0xf800707f
-#define MATCH_AMOMINU_W 0xc000202f
-#define MASK_AMOMINU_W 0xf800707f
+#define MASK_AMOMAX_W 0xf800707f
+#define MATCH_AMOMAXU_D 0xe000302f
+#define MASK_AMOMAXU_D 0xf800707f
#define MATCH_AMOMAXU_W 0xe000202f
-#define MASK_AMOMAXU_W 0xf800707f
-#define MATCH_AMOSWAP_W 0x800202f
-#define MASK_AMOSWAP_W 0xf800707f
-#define MATCH_LR_W 0x1000202f
-#define MASK_LR_W 0xf9f0707f
-#define MATCH_SC_W 0x1800202f
-#define MASK_SC_W 0xf800707f
-#define MATCH_AMOADD_D 0x302f
-#define MASK_AMOADD_D 0xf800707f
-#define MATCH_AMOXOR_D 0x2000302f
-#define MASK_AMOXOR_D 0xf800707f
-#define MATCH_AMOOR_D 0x4000302f
-#define MASK_AMOOR_D 0xf800707f
-#define MATCH_AMOAND_D 0x6000302f
-#define MASK_AMOAND_D 0xf800707f
+#define MASK_AMOMAXU_W 0xf800707f
#define MATCH_AMOMIN_D 0x8000302f
-#define MASK_AMOMIN_D 0xf800707f
-#define MATCH_AMOMAX_D 0xa000302f
-#define MASK_AMOMAX_D 0xf800707f
+#define MASK_AMOMIN_D 0xf800707f
+#define MATCH_AMOMIN_W 0x8000202f
+#define MASK_AMOMIN_W 0xf800707f
#define MATCH_AMOMINU_D 0xc000302f
-#define MASK_AMOMINU_D 0xf800707f
-#define MATCH_AMOMAXU_D 0xe000302f
-#define MASK_AMOMAXU_D 0xf800707f
+#define MASK_AMOMINU_D 0xf800707f
+#define MATCH_AMOMINU_W 0xc000202f
+#define MASK_AMOMINU_W 0xf800707f
+#define MATCH_AMOOR_D 0x4000302f
+#define MASK_AMOOR_D 0xf800707f
+#define MATCH_AMOOR_W 0x4000202f
+#define MASK_AMOOR_W 0xf800707f
#define MATCH_AMOSWAP_D 0x800302f
-#define MASK_AMOSWAP_D 0xf800707f
-#define MATCH_LR_D 0x1000302f
-#define MASK_LR_D 0xf9f0707f
-#define MATCH_SC_D 0x1800302f
-#define MASK_SC_D 0xf800707f
-#define MATCH_HFENCE_VVMA 0x22000073
-#define MASK_HFENCE_VVMA 0xfe007fff
-#define MATCH_HFENCE_GVMA 0x62000073
-#define MASK_HFENCE_GVMA 0xfe007fff
-#define MATCH_HLV_B 0x60004073
-#define MASK_HLV_B 0xfff0707f
-#define MATCH_HLV_BU 0x60104073
-#define MASK_HLV_BU 0xfff0707f
-#define MATCH_HLV_H 0x64004073
-#define MASK_HLV_H 0xfff0707f
-#define MATCH_HLV_HU 0x64104073
-#define MASK_HLV_HU 0xfff0707f
-#define MATCH_HLVX_HU 0x64304073
-#define MASK_HLVX_HU 0xfff0707f
-#define MATCH_HLV_W 0x68004073
-#define MASK_HLV_W 0xfff0707f
-#define MATCH_HLVX_WU 0x68304073
-#define MASK_HLVX_WU 0xfff0707f
-#define MATCH_HSV_B 0x62004073
-#define MASK_HSV_B 0xfe007fff
-#define MATCH_HSV_H 0x66004073
-#define MASK_HSV_H 0xfe007fff
-#define MATCH_HSV_W 0x6a004073
-#define MASK_HSV_W 0xfe007fff
-#define MATCH_HLV_WU 0x68104073
-#define MASK_HLV_WU 0xfff0707f
-#define MATCH_HLV_D 0x6c004073
-#define MASK_HLV_D 0xfff0707f
-#define MATCH_HSV_D 0x6e004073
-#define MASK_HSV_D 0xfe007fff
-#define MATCH_FADD_S 0x53
-#define MASK_FADD_S 0xfe00007f
-#define MATCH_FSUB_S 0x8000053
-#define MASK_FSUB_S 0xfe00007f
-#define MATCH_FMUL_S 0x10000053
-#define MASK_FMUL_S 0xfe00007f
-#define MATCH_FDIV_S 0x18000053
-#define MASK_FDIV_S 0xfe00007f
-#define MATCH_FSGNJ_S 0x20000053
-#define MASK_FSGNJ_S 0xfe00707f
-#define MATCH_FSGNJN_S 0x20001053
-#define MASK_FSGNJN_S 0xfe00707f
-#define MATCH_FSGNJX_S 0x20002053
-#define MASK_FSGNJX_S 0xfe00707f
-#define MATCH_FMIN_S 0x28000053
-#define MASK_FMIN_S 0xfe00707f
-#define MATCH_FMAX_S 0x28001053
-#define MASK_FMAX_S 0xfe00707f
-#define MATCH_FSQRT_S 0x58000053
-#define MASK_FSQRT_S 0xfff0007f
-#define MATCH_FLE_S 0xa0000053
-#define MASK_FLE_S 0xfe00707f
-#define MATCH_FLT_S 0xa0001053
-#define MASK_FLT_S 0xfe00707f
-#define MATCH_FEQ_S 0xa0002053
-#define MASK_FEQ_S 0xfe00707f
-#define MATCH_FCVT_W_S 0xc0000053
-#define MASK_FCVT_W_S 0xfff0007f
-#define MATCH_FCVT_WU_S 0xc0100053
-#define MASK_FCVT_WU_S 0xfff0007f
-#define MATCH_FMV_X_W 0xe0000053
-#define MASK_FMV_X_W 0xfff0707f
-#define MATCH_FCLASS_S 0xe0001053
-#define MASK_FCLASS_S 0xfff0707f
-#define MATCH_FCVT_S_W 0xd0000053
-#define MASK_FCVT_S_W 0xfff0007f
-#define MATCH_FCVT_S_WU 0xd0100053
-#define MASK_FCVT_S_WU 0xfff0007f
-#define MATCH_FMV_W_X 0xf0000053
-#define MASK_FMV_W_X 0xfff0707f
-#define MATCH_FLW 0x2007
-#define MASK_FLW 0x707f
-#define MATCH_FSW 0x2027
-#define MASK_FSW 0x707f
-#define MATCH_FMADD_S 0x43
-#define MASK_FMADD_S 0x600007f
-#define MATCH_FMSUB_S 0x47
-#define MASK_FMSUB_S 0x600007f
-#define MATCH_FNMSUB_S 0x4b
-#define MASK_FNMSUB_S 0x600007f
-#define MATCH_FNMADD_S 0x4f
-#define MASK_FNMADD_S 0x600007f
-#define MATCH_FCVT_L_S 0xc0200053
-#define MASK_FCVT_L_S 0xfff0007f
-#define MATCH_FCVT_LU_S 0xc0300053
-#define MASK_FCVT_LU_S 0xfff0007f
-#define MATCH_FCVT_S_L 0xd0200053
-#define MASK_FCVT_S_L 0xfff0007f
-#define MATCH_FCVT_S_LU 0xd0300053
-#define MASK_FCVT_S_LU 0xfff0007f
-#define MATCH_FADD_D 0x2000053
-#define MASK_FADD_D 0xfe00007f
-#define MATCH_FSUB_D 0xa000053
-#define MASK_FSUB_D 0xfe00007f
-#define MATCH_FMUL_D 0x12000053
-#define MASK_FMUL_D 0xfe00007f
-#define MATCH_FDIV_D 0x1a000053
-#define MASK_FDIV_D 0xfe00007f
-#define MATCH_FSGNJ_D 0x22000053
-#define MASK_FSGNJ_D 0xfe00707f
-#define MATCH_FSGNJN_D 0x22001053
-#define MASK_FSGNJN_D 0xfe00707f
-#define MATCH_FSGNJX_D 0x22002053
-#define MASK_FSGNJX_D 0xfe00707f
-#define MATCH_FMIN_D 0x2a000053
-#define MASK_FMIN_D 0xfe00707f
-#define MATCH_FMAX_D 0x2a001053
-#define MASK_FMAX_D 0xfe00707f
-#define MATCH_FCVT_S_D 0x40100053
-#define MASK_FCVT_S_D 0xfff0007f
-#define MATCH_FCVT_D_S 0x42000053
-#define MASK_FCVT_D_S 0xfff0007f
-#define MATCH_FSQRT_D 0x5a000053
-#define MASK_FSQRT_D 0xfff0007f
-#define MATCH_FLE_D 0xa2000053
-#define MASK_FLE_D 0xfe00707f
-#define MATCH_FLT_D 0xa2001053
-#define MASK_FLT_D 0xfe00707f
-#define MATCH_FEQ_D 0xa2002053
-#define MASK_FEQ_D 0xfe00707f
-#define MATCH_FCVT_W_D 0xc2000053
-#define MASK_FCVT_W_D 0xfff0007f
-#define MATCH_FCVT_WU_D 0xc2100053
-#define MASK_FCVT_WU_D 0xfff0007f
-#define MATCH_FCLASS_D 0xe2001053
-#define MASK_FCLASS_D 0xfff0707f
-#define MATCH_FCVT_D_W 0xd2000053
-#define MASK_FCVT_D_W 0xfff0007f
-#define MATCH_FCVT_D_WU 0xd2100053
-#define MASK_FCVT_D_WU 0xfff0007f
-#define MATCH_FLD 0x3007
-#define MASK_FLD 0x707f
-#define MATCH_FSD 0x3027
-#define MASK_FSD 0x707f
-#define MATCH_FMADD_D 0x2000043
-#define MASK_FMADD_D 0x600007f
-#define MATCH_FMSUB_D 0x2000047
-#define MASK_FMSUB_D 0x600007f
-#define MATCH_FNMSUB_D 0x200004b
-#define MASK_FNMSUB_D 0x600007f
-#define MATCH_FNMADD_D 0x200004f
-#define MASK_FNMADD_D 0x600007f
-#define MATCH_FCVT_L_D 0xc2200053
-#define MASK_FCVT_L_D 0xfff0007f
-#define MATCH_FCVT_LU_D 0xc2300053
-#define MASK_FCVT_LU_D 0xfff0007f
-#define MATCH_FMV_X_D 0xe2000053
-#define MASK_FMV_X_D 0xfff0707f
-#define MATCH_FCVT_D_L 0xd2200053
-#define MASK_FCVT_D_L 0xfff0007f
-#define MATCH_FCVT_D_LU 0xd2300053
-#define MASK_FCVT_D_LU 0xfff0007f
-#define MATCH_FMV_D_X 0xf2000053
-#define MASK_FMV_D_X 0xfff0707f
-#define MATCH_FADD_Q 0x6000053
-#define MASK_FADD_Q 0xfe00007f
-#define MATCH_FSUB_Q 0xe000053
-#define MASK_FSUB_Q 0xfe00007f
-#define MATCH_FMUL_Q 0x16000053
-#define MASK_FMUL_Q 0xfe00007f
-#define MATCH_FDIV_Q 0x1e000053
-#define MASK_FDIV_Q 0xfe00007f
-#define MATCH_FSGNJ_Q 0x26000053
-#define MASK_FSGNJ_Q 0xfe00707f
-#define MATCH_FSGNJN_Q 0x26001053
-#define MASK_FSGNJN_Q 0xfe00707f
-#define MATCH_FSGNJX_Q 0x26002053
-#define MASK_FSGNJX_Q 0xfe00707f
-#define MATCH_FMIN_Q 0x2e000053
-#define MASK_FMIN_Q 0xfe00707f
-#define MATCH_FMAX_Q 0x2e001053
-#define MASK_FMAX_Q 0xfe00707f
-#define MATCH_FCVT_S_Q 0x40300053
-#define MASK_FCVT_S_Q 0xfff0007f
-#define MATCH_FCVT_Q_S 0x46000053
-#define MASK_FCVT_Q_S 0xfff0007f
-#define MATCH_FCVT_D_Q 0x42300053
-#define MASK_FCVT_D_Q 0xfff0007f
-#define MATCH_FCVT_Q_D 0x46100053
-#define MASK_FCVT_Q_D 0xfff0007f
-#define MATCH_FSQRT_Q 0x5e000053
-#define MASK_FSQRT_Q 0xfff0007f
-#define MATCH_FLE_Q 0xa6000053
-#define MASK_FLE_Q 0xfe00707f
-#define MATCH_FLT_Q 0xa6001053
-#define MASK_FLT_Q 0xfe00707f
-#define MATCH_FEQ_Q 0xa6002053
-#define MASK_FEQ_Q 0xfe00707f
-#define MATCH_FCVT_W_Q 0xc6000053
-#define MASK_FCVT_W_Q 0xfff0007f
-#define MATCH_FCVT_WU_Q 0xc6100053
-#define MASK_FCVT_WU_Q 0xfff0007f
-#define MATCH_FCLASS_Q 0xe6001053
-#define MASK_FCLASS_Q 0xfff0707f
-#define MATCH_FCVT_Q_W 0xd6000053
-#define MASK_FCVT_Q_W 0xfff0007f
-#define MATCH_FCVT_Q_WU 0xd6100053
-#define MASK_FCVT_Q_WU 0xfff0007f
-#define MATCH_FLQ 0x4007
-#define MASK_FLQ 0x707f
-#define MATCH_FSQ 0x4027
-#define MASK_FSQ 0x707f
-#define MATCH_FMADD_Q 0x6000043
-#define MASK_FMADD_Q 0x600007f
-#define MATCH_FMSUB_Q 0x6000047
-#define MASK_FMSUB_Q 0x600007f
-#define MATCH_FNMSUB_Q 0x600004b
-#define MASK_FNMSUB_Q 0x600007f
-#define MATCH_FNMADD_Q 0x600004f
-#define MASK_FNMADD_Q 0x600007f
-#define MATCH_FCVT_L_Q 0xc6200053
-#define MASK_FCVT_L_Q 0xfff0007f
-#define MATCH_FCVT_LU_Q 0xc6300053
-#define MASK_FCVT_LU_Q 0xfff0007f
-#define MATCH_FCVT_Q_L 0xd6200053
-#define MASK_FCVT_Q_L 0xfff0007f
-#define MATCH_FCVT_Q_LU 0xd6300053
-#define MASK_FCVT_Q_LU 0xfff0007f
+#define MASK_AMOSWAP_D 0xf800707f
+#define MATCH_AMOSWAP_W 0x800202f
+#define MASK_AMOSWAP_W 0xf800707f
+#define MATCH_AMOXOR_D 0x2000302f
+#define MASK_AMOXOR_D 0xf800707f
+#define MATCH_AMOXOR_W 0x2000202f
+#define MASK_AMOXOR_W 0xf800707f
+#define MATCH_AND 0x7033
+#define MASK_AND 0xfe00707f
+#define MATCH_ANDI 0x7013
+#define MASK_ANDI 0x707f
#define MATCH_ANDN 0x40007033
-#define MASK_ANDN 0xfe00707f
-#define MATCH_ORN 0x40006033
-#define MASK_ORN 0xfe00707f
-#define MATCH_XNOR 0x40004033
-#define MASK_XNOR 0xfe00707f
-#define MATCH_SLO 0x20001033
-#define MASK_SLO 0xfe00707f
-#define MATCH_SRO 0x20005033
-#define MASK_SRO 0xfe00707f
-#define MATCH_ROL 0x60001033
-#define MASK_ROL 0xfe00707f
-#define MATCH_ROR 0x60005033
-#define MASK_ROR 0xfe00707f
+#define MASK_ANDN 0xfe00707f
+#define MATCH_AUIPC 0x17
+#define MASK_AUIPC 0x7f
+#define MATCH_AVE 0xe0000077
+#define MASK_AVE 0xfe00707f
#define MATCH_BCLR 0x48001033
-#define MASK_BCLR 0xfe00707f
-#define MATCH_BSET 0x28001033
-#define MASK_BSET 0xfe00707f
-#define MATCH_BINV 0x68001033
-#define MASK_BINV 0xfe00707f
-#define MATCH_BEXT 0x48005033
-#define MASK_BEXT 0xfe00707f
-#define MATCH_GORC 0x28005033
-#define MASK_GORC 0xfe00707f
-#define MATCH_GREV 0x68005033
-#define MASK_GREV 0xfe00707f
-#define MATCH_SLOI 0x20001013
-#define MASK_SLOI 0xfc00707f
-#define MATCH_SROI 0x20005013
-#define MASK_SROI 0xfc00707f
-#define MATCH_RORI 0x60005013
-#define MASK_RORI 0xfc00707f
+#define MASK_BCLR 0xfe00707f
#define MATCH_BCLRI 0x48001013
-#define MASK_BCLRI 0xfc00707f
-#define MATCH_BSETI 0x28001013
-#define MASK_BSETI 0xfc00707f
-#define MATCH_BINVI 0x68001013
-#define MASK_BINVI 0xfc00707f
-#define MATCH_BEXTI 0x48005013
-#define MASK_BEXTI 0xfc00707f
-#define MATCH_GORCI 0x28005013
-#define MASK_GORCI 0xfc00707f
-#define MATCH_GREVI 0x68005013
-#define MASK_GREVI 0xfc00707f
-#define MATCH_CMIX 0x6001033
-#define MASK_CMIX 0x600707f
-#define MATCH_CMOV 0x6005033
-#define MASK_CMOV 0x600707f
-#define MATCH_FSL 0x4001033
-#define MASK_FSL 0x600707f
-#define MATCH_FSR 0x4005033
-#define MASK_FSR 0x600707f
-#define MATCH_FSRI 0x4005013
-#define MASK_FSRI 0x400707f
-#define MATCH_CLZ 0x60001013
-#define MASK_CLZ 0xfff0707f
-#define MATCH_CTZ 0x60101013
-#define MASK_CTZ 0xfff0707f
-#define MATCH_CPOP 0x60201013
-#define MASK_CPOP 0xfff0707f
-#define MATCH_SEXT_B 0x60401013
-#define MASK_SEXT_B 0xfff0707f
-#define MATCH_SEXT_H 0x60501013
-#define MASK_SEXT_H 0xfff0707f
-#define MATCH_CRC32_B 0x61001013
-#define MASK_CRC32_B 0xfff0707f
-#define MATCH_CRC32_H 0x61101013
-#define MASK_CRC32_H 0xfff0707f
-#define MATCH_CRC32_W 0x61201013
-#define MASK_CRC32_W 0xfff0707f
-#define MATCH_CRC32C_B 0x61801013
-#define MASK_CRC32C_B 0xfff0707f
-#define MATCH_CRC32C_H 0x61901013
-#define MASK_CRC32C_H 0xfff0707f
-#define MATCH_CRC32C_W 0x61a01013
-#define MASK_CRC32C_W 0xfff0707f
-#define MATCH_SH1ADD 0x20002033
-#define MASK_SH1ADD 0xfe00707f
-#define MATCH_SH2ADD 0x20004033
-#define MASK_SH2ADD 0xfe00707f
-#define MATCH_SH3ADD 0x20006033
-#define MASK_SH3ADD 0xfe00707f
-#define MATCH_CLMUL 0xa001033
-#define MASK_CLMUL 0xfe00707f
-#define MATCH_CLMULR 0xa002033
-#define MASK_CLMULR 0xfe00707f
-#define MATCH_CLMULH 0xa003033
-#define MASK_CLMULH 0xfe00707f
-#define MATCH_MIN 0xa004033
-#define MASK_MIN 0xfe00707f
-#define MATCH_MINU 0xa005033
-#define MASK_MINU 0xfe00707f
-#define MATCH_MAX 0xa006033
-#define MASK_MAX 0xfe00707f
-#define MATCH_MAXU 0xa007033
-#define MASK_MAXU 0xfe00707f
-#define MATCH_SHFL 0x8001033
-#define MASK_SHFL 0xfe00707f
-#define MATCH_UNSHFL 0x8005033
-#define MASK_UNSHFL 0xfe00707f
+#define MASK_BCLRI 0xfc00707f
#define MATCH_BCOMPRESS 0x8006033
-#define MASK_BCOMPRESS 0xfe00707f
+#define MASK_BCOMPRESS 0xfe00707f
+#define MATCH_BCOMPRESSW 0x800603b
+#define MASK_BCOMPRESSW 0xfe00707f
#define MATCH_BDECOMPRESS 0x48006033
-#define MASK_BDECOMPRESS 0xfe00707f
-#define MATCH_PACK 0x8004033
-#define MASK_PACK 0xfe00707f
-#define MATCH_PACKU 0x48004033
-#define MASK_PACKU 0xfe00707f
-#define MATCH_PACKH 0x8007033
-#define MASK_PACKH 0xfe00707f
+#define MASK_BDECOMPRESS 0xfe00707f
+#define MATCH_BDECOMPRESSW 0x4800603b
+#define MASK_BDECOMPRESSW 0xfe00707f
+#define MATCH_BEQ 0x63
+#define MASK_BEQ 0x707f
+#define MATCH_BEXT 0x48005033
+#define MASK_BEXT 0xfe00707f
+#define MATCH_BEXTI 0x48005013
+#define MASK_BEXTI 0xfc00707f
#define MATCH_BFP 0x48007033
-#define MASK_BFP 0xfe00707f
-#define MATCH_SHFLI 0x8001013
-#define MASK_SHFLI 0xfe00707f
-#define MATCH_UNSHFLI 0x8005013
-#define MASK_UNSHFLI 0xfe00707f
-#define MATCH_XPERM4 0x28002033
-#define MASK_XPERM4 0xfe00707f
-#define MATCH_XPERM8 0x28004033
-#define MASK_XPERM8 0xfe00707f
-#define MATCH_XPERM16 0x28006033
-#define MASK_XPERM16 0xfe00707f
+#define MASK_BFP 0xfe00707f
+#define MATCH_BFPW 0x4800703b
+#define MASK_BFPW 0xfe00707f
+#define MATCH_BGE 0x5063
+#define MASK_BGE 0x707f
+#define MATCH_BGEU 0x7063
+#define MASK_BGEU 0x707f
+#define MATCH_BINV 0x68001033
+#define MASK_BINV 0xfe00707f
+#define MATCH_BINVI 0x68001013
+#define MASK_BINVI 0xfc00707f
+#define MATCH_BITREV 0xe6000077
+#define MASK_BITREV 0xfe00707f
+#define MATCH_BITREVI 0xe8000077
+#define MASK_BITREVI 0xfc00707f
+#define MATCH_BLT 0x4063
+#define MASK_BLT 0x707f
+#define MATCH_BLTU 0x6063
+#define MASK_BLTU 0x707f
#define MATCH_BMATFLIP 0x60301013
-#define MASK_BMATFLIP 0xfff0707f
-#define MATCH_CRC32_D 0x61301013
-#define MASK_CRC32_D 0xfff0707f
-#define MATCH_CRC32C_D 0x61b01013
-#define MASK_CRC32C_D 0xfff0707f
+#define MASK_BMATFLIP 0xfff0707f
#define MATCH_BMATOR 0x8003033
-#define MASK_BMATOR 0xfe00707f
+#define MASK_BMATOR 0xfe00707f
#define MATCH_BMATXOR 0x48003033
-#define MASK_BMATXOR 0xfe00707f
-#define MATCH_SLLI_UW 0x800101b
-#define MASK_SLLI_UW 0xfc00707f
-#define MATCH_ADD_UW 0x800003b
-#define MASK_ADD_UW 0xfe00707f
-#define MATCH_SLOW 0x2000103b
-#define MASK_SLOW 0xfe00707f
-#define MATCH_SROW 0x2000503b
-#define MASK_SROW 0xfe00707f
-#define MATCH_ROLW 0x6000103b
-#define MASK_ROLW 0xfe00707f
-#define MATCH_RORW 0x6000503b
-#define MASK_RORW 0xfe00707f
-#define MATCH_GORCW 0x2800503b
-#define MASK_GORCW 0xfe00707f
-#define MATCH_GREVW 0x6800503b
-#define MASK_GREVW 0xfe00707f
-#define MATCH_SLOIW 0x2000101b
-#define MASK_SLOIW 0xfe00707f
-#define MATCH_SROIW 0x2000501b
-#define MASK_SROIW 0xfe00707f
-#define MATCH_RORIW 0x6000501b
-#define MASK_RORIW 0xfe00707f
-#define MATCH_GORCIW 0x2800501b
-#define MASK_GORCIW 0xfe00707f
-#define MATCH_GREVIW 0x6800501b
-#define MASK_GREVIW 0xfe00707f
-#define MATCH_FSLW 0x400103b
-#define MASK_FSLW 0x600707f
-#define MATCH_FSRW 0x400503b
-#define MASK_FSRW 0x600707f
-#define MATCH_FSRIW 0x400501b
-#define MASK_FSRIW 0x600707f
-#define MATCH_CLZW 0x6000101b
-#define MASK_CLZW 0xfff0707f
-#define MATCH_CTZW 0x6010101b
-#define MASK_CTZW 0xfff0707f
-#define MATCH_CPOPW 0x6020101b
-#define MASK_CPOPW 0xfff0707f
-#define MATCH_SH1ADD_UW 0x2000203b
-#define MASK_SH1ADD_UW 0xfe00707f
-#define MATCH_SH2ADD_UW 0x2000403b
-#define MASK_SH2ADD_UW 0xfe00707f
-#define MATCH_SH3ADD_UW 0x2000603b
-#define MASK_SH3ADD_UW 0xfe00707f
-#define MATCH_SHFLW 0x800103b
-#define MASK_SHFLW 0xfe00707f
-#define MATCH_UNSHFLW 0x800503b
-#define MASK_UNSHFLW 0xfe00707f
-#define MATCH_BCOMPRESSW 0x800603b
-#define MASK_BCOMPRESSW 0xfe00707f
-#define MATCH_BDECOMPRESSW 0x4800603b
-#define MASK_BDECOMPRESSW 0xfe00707f
-#define MATCH_PACKW 0x800403b
-#define MASK_PACKW 0xfe00707f
-#define MATCH_PACKUW 0x4800403b
-#define MASK_PACKUW 0xfe00707f
-#define MATCH_BFPW 0x4800703b
-#define MASK_BFPW 0xfe00707f
-#define MATCH_XPERM32 0x28000033
-#define MASK_XPERM32 0xfe00707f
-#define MATCH_ECALL 0x73
-#define MASK_ECALL 0xffffffff
-#define MATCH_EBREAK 0x100073
-#define MASK_EBREAK 0xffffffff
-#define MATCH_SRET 0x10200073
-#define MASK_SRET 0xffffffff
-#define MATCH_MRET 0x30200073
-#define MASK_MRET 0xffffffff
-#define MATCH_DRET 0x7b200073
-#define MASK_DRET 0xffffffff
-#define MATCH_SFENCE_VMA 0x12000073
-#define MASK_SFENCE_VMA 0xfe007fff
-#define MATCH_WFI 0x10500073
-#define MASK_WFI 0xffffffff
-#define MATCH_CSRRW 0x1073
-#define MASK_CSRRW 0x707f
-#define MATCH_CSRRS 0x2073
-#define MASK_CSRRS 0x707f
-#define MATCH_CSRRC 0x3073
-#define MASK_CSRRC 0x707f
-#define MATCH_CSRRWI 0x5073
-#define MASK_CSRRWI 0x707f
-#define MATCH_CSRRSI 0x6073
-#define MASK_CSRRSI 0x707f
-#define MATCH_CSRRCI 0x7073
-#define MASK_CSRRCI 0x707f
-#define MATCH_SINVAL_VMA 0x16000073
-#define MASK_SINVAL_VMA 0xfe007fff
-#define MATCH_SFENCE_W_INVAL 0x18000073
-#define MASK_SFENCE_W_INVAL 0xffffffff
-#define MATCH_SFENCE_INVAL_IR 0x18100073
-#define MASK_SFENCE_INVAL_IR 0xffffffff
-#define MATCH_HINVAL_VVMA 0x26000073
-#define MASK_HINVAL_VVMA 0xfe007fff
-#define MATCH_HINVAL_GVMA 0x66000073
-#define MASK_HINVAL_GVMA 0xfe007fff
-#define MATCH_FADD_H 0x4000053
-#define MASK_FADD_H 0xfe00007f
-#define MATCH_FSUB_H 0xc000053
-#define MASK_FSUB_H 0xfe00007f
-#define MATCH_FMUL_H 0x14000053
-#define MASK_FMUL_H 0xfe00007f
-#define MATCH_FDIV_H 0x1c000053
-#define MASK_FDIV_H 0xfe00007f
-#define MATCH_FSGNJ_H 0x24000053
-#define MASK_FSGNJ_H 0xfe00707f
-#define MATCH_FSGNJN_H 0x24001053
-#define MASK_FSGNJN_H 0xfe00707f
-#define MATCH_FSGNJX_H 0x24002053
-#define MASK_FSGNJX_H 0xfe00707f
-#define MATCH_FMIN_H 0x2c000053
-#define MASK_FMIN_H 0xfe00707f
-#define MATCH_FMAX_H 0x2c001053
-#define MASK_FMAX_H 0xfe00707f
-#define MATCH_FCVT_H_S 0x44000053
-#define MASK_FCVT_H_S 0xfff0007f
-#define MATCH_FCVT_S_H 0x40200053
-#define MASK_FCVT_S_H 0xfff0007f
-#define MATCH_FSQRT_H 0x5c000053
-#define MASK_FSQRT_H 0xfff0007f
-#define MATCH_FLE_H 0xa4000053
-#define MASK_FLE_H 0xfe00707f
-#define MATCH_FLT_H 0xa4001053
-#define MASK_FLT_H 0xfe00707f
-#define MATCH_FEQ_H 0xa4002053
-#define MASK_FEQ_H 0xfe00707f
-#define MATCH_FCVT_W_H 0xc4000053
-#define MASK_FCVT_W_H 0xfff0007f
-#define MATCH_FCVT_WU_H 0xc4100053
-#define MASK_FCVT_WU_H 0xfff0007f
-#define MATCH_FMV_X_H 0xe4000053
-#define MASK_FMV_X_H 0xfff0707f
-#define MATCH_FCLASS_H 0xe4001053
-#define MASK_FCLASS_H 0xfff0707f
-#define MATCH_FCVT_H_W 0xd4000053
-#define MASK_FCVT_H_W 0xfff0007f
-#define MATCH_FCVT_H_WU 0xd4100053
-#define MASK_FCVT_H_WU 0xfff0007f
-#define MATCH_FMV_H_X 0xf4000053
-#define MASK_FMV_H_X 0xfff0707f
-#define MATCH_FLH 0x1007
-#define MASK_FLH 0x707f
-#define MATCH_FSH 0x1027
-#define MASK_FSH 0x707f
-#define MATCH_FMADD_H 0x4000043
-#define MASK_FMADD_H 0x600007f
-#define MATCH_FMSUB_H 0x4000047
-#define MASK_FMSUB_H 0x600007f
-#define MATCH_FNMSUB_H 0x400004b
-#define MASK_FNMSUB_H 0x600007f
-#define MATCH_FNMADD_H 0x400004f
-#define MASK_FNMADD_H 0x600007f
-#define MATCH_FCVT_H_D 0x44100053
-#define MASK_FCVT_H_D 0xfff0007f
-#define MATCH_FCVT_D_H 0x42200053
-#define MASK_FCVT_D_H 0xfff0007f
-#define MATCH_FCVT_H_Q 0x44300053
-#define MASK_FCVT_H_Q 0xfff0007f
-#define MATCH_FCVT_Q_H 0x46200053
-#define MASK_FCVT_Q_H 0xfff0007f
-#define MATCH_FCVT_L_H 0xc4200053
-#define MASK_FCVT_L_H 0xfff0007f
-#define MATCH_FCVT_LU_H 0xc4300053
-#define MASK_FCVT_LU_H 0xfff0007f
-#define MATCH_FCVT_H_L 0xd4200053
-#define MASK_FCVT_H_L 0xfff0007f
-#define MATCH_FCVT_H_LU 0xd4300053
-#define MASK_FCVT_H_LU 0xfff0007f
-#define MATCH_SM4ED 0x30000033
-#define MASK_SM4ED 0x3e00707f
-#define MATCH_SM4KS 0x34000033
-#define MASK_SM4KS 0x3e00707f
-#define MATCH_SM3P0 0x10801013
-#define MASK_SM3P0 0xfff0707f
-#define MATCH_SM3P1 0x10901013
-#define MASK_SM3P1 0xfff0707f
-#define MATCH_SHA256SUM0 0x10001013
-#define MASK_SHA256SUM0 0xfff0707f
-#define MATCH_SHA256SUM1 0x10101013
-#define MASK_SHA256SUM1 0xfff0707f
-#define MATCH_SHA256SIG0 0x10201013
-#define MASK_SHA256SIG0 0xfff0707f
-#define MATCH_SHA256SIG1 0x10301013
-#define MASK_SHA256SIG1 0xfff0707f
-#define MATCH_AES32ESMI 0x26000033
-#define MASK_AES32ESMI 0x3e00707f
-#define MATCH_AES32ESI 0x22000033
-#define MASK_AES32ESI 0x3e00707f
-#define MATCH_AES32DSMI 0x2e000033
-#define MASK_AES32DSMI 0x3e00707f
-#define MATCH_AES32DSI 0x2a000033
-#define MASK_AES32DSI 0x3e00707f
-#define MATCH_SHA512SUM0R 0x50000033
-#define MASK_SHA512SUM0R 0xfe00707f
-#define MATCH_SHA512SUM1R 0x52000033
-#define MASK_SHA512SUM1R 0xfe00707f
-#define MATCH_SHA512SIG0L 0x54000033
-#define MASK_SHA512SIG0L 0xfe00707f
-#define MATCH_SHA512SIG0H 0x5c000033
-#define MASK_SHA512SIG0H 0xfe00707f
-#define MATCH_SHA512SIG1L 0x56000033
-#define MASK_SHA512SIG1L 0xfe00707f
-#define MATCH_SHA512SIG1H 0x5e000033
-#define MASK_SHA512SIG1H 0xfe00707f
-#define MATCH_AES64KS1I 0x31001013
-#define MASK_AES64KS1I 0xff00707f
-#define MATCH_AES64IM 0x30001013
-#define MASK_AES64IM 0xfff0707f
-#define MATCH_AES64KS2 0x7e000033
-#define MASK_AES64KS2 0xfe00707f
-#define MATCH_AES64ESM 0x36000033
-#define MASK_AES64ESM 0xfe00707f
-#define MATCH_AES64ES 0x32000033
-#define MASK_AES64ES 0xfe00707f
-#define MATCH_AES64DSM 0x3e000033
-#define MASK_AES64DSM 0xfe00707f
-#define MATCH_AES64DS 0x3a000033
-#define MASK_AES64DS 0xfe00707f
-#define MATCH_SHA512SUM0 0x10401013
-#define MASK_SHA512SUM0 0xfff0707f
-#define MATCH_SHA512SUM1 0x10501013
-#define MASK_SHA512SUM1 0xfff0707f
-#define MATCH_SHA512SIG0 0x10601013
-#define MASK_SHA512SIG0 0xfff0707f
-#define MATCH_SHA512SIG1 0x10701013
-#define MASK_SHA512SIG1 0xfff0707f
-#define MATCH_CBO_CLEAN 0x10200f
-#define MASK_CBO_CLEAN 0xfff07fff
-#define MATCH_CBO_FLUSH 0x20200f
-#define MASK_CBO_FLUSH 0xfff07fff
-#define MATCH_CBO_INVAL 0x200f
-#define MASK_CBO_INVAL 0xfff07fff
-#define MATCH_CBO_ZERO 0x40200f
-#define MASK_CBO_ZERO 0xfff07fff
-#define MATCH_PREFETCH_I 0x6013
-#define MASK_PREFETCH_I 0x1f07fff
-#define MATCH_PREFETCH_R 0x106013
-#define MASK_PREFETCH_R 0x1f07fff
-#define MATCH_PREFETCH_W 0x306013
-#define MASK_PREFETCH_W 0x1f07fff
-#define MATCH_C_NOP 0x1
-#define MASK_C_NOP 0xffff
+#define MASK_BMATXOR 0xfe00707f
+#define MATCH_BNE 0x1063
+#define MASK_BNE 0x707f
+#define MATCH_BPICK 0x3077
+#define MASK_BPICK 0x600707f
+#define MATCH_BSET 0x28001033
+#define MASK_BSET 0xfe00707f
+#define MATCH_BSETI 0x28001013
+#define MASK_BSETI 0xfc00707f
+#define MATCH_C_ADD 0x9002
+#define MASK_C_ADD 0xf003
+#define MATCH_C_ADDI 0x1
+#define MASK_C_ADDI 0xe003
#define MATCH_C_ADDI16SP 0x6101
-#define MASK_C_ADDI16SP 0xef83
-#define MATCH_C_JR 0x8002
-#define MASK_C_JR 0xf07f
-#define MATCH_C_JALR 0x9002
-#define MASK_C_JALR 0xf07f
-#define MATCH_C_EBREAK 0x9002
-#define MASK_C_EBREAK 0xffff
+#define MASK_C_ADDI16SP 0xef83
#define MATCH_C_ADDI4SPN 0x0
-#define MASK_C_ADDI4SPN 0xe003
+#define MASK_C_ADDI4SPN 0xe003
+#define MATCH_C_ADDIW 0x2001
+#define MASK_C_ADDIW 0xe003
+#define MATCH_C_ADDW 0x9c21
+#define MASK_C_ADDW 0xfc63
+#define MATCH_C_AND 0x8c61
+#define MASK_C_AND 0xfc63
+#define MATCH_C_ANDI 0x8801
+#define MASK_C_ANDI 0xec03
+#define MATCH_C_BEQZ 0xc001
+#define MASK_C_BEQZ 0xe003
+#define MATCH_C_BNEZ 0xe001
+#define MASK_C_BNEZ 0xe003
+#define MATCH_C_EBREAK 0x9002
+#define MASK_C_EBREAK 0xffff
#define MATCH_C_FLD 0x2000
-#define MASK_C_FLD 0xe003
-#define MATCH_C_LW 0x4000
-#define MASK_C_LW 0xe003
+#define MASK_C_FLD 0xe003
+#define MATCH_C_FLDSP 0x2002
+#define MASK_C_FLDSP 0xe003
#define MATCH_C_FLW 0x6000
-#define MASK_C_FLW 0xe003
+#define MASK_C_FLW 0xe003
+#define MATCH_C_FLWSP 0x6002
+#define MASK_C_FLWSP 0xe003
#define MATCH_C_FSD 0xa000
-#define MASK_C_FSD 0xe003
-#define MATCH_C_SW 0xc000
-#define MASK_C_SW 0xe003
+#define MASK_C_FSD 0xe003
+#define MATCH_C_FSDSP 0xa002
+#define MASK_C_FSDSP 0xe003
#define MATCH_C_FSW 0xe000
-#define MASK_C_FSW 0xe003
-#define MATCH_C_ADDI 0x1
-#define MASK_C_ADDI 0xe003
+#define MASK_C_FSW 0xe003
+#define MATCH_C_FSWSP 0xe002
+#define MASK_C_FSWSP 0xe003
+#define MATCH_C_J 0xa001
+#define MASK_C_J 0xe003
#define MATCH_C_JAL 0x2001
-#define MASK_C_JAL 0xe003
+#define MASK_C_JAL 0xe003
+#define MATCH_C_JALR 0x9002
+#define MASK_C_JALR 0xf07f
+#define MATCH_C_JR 0x8002
+#define MASK_C_JR 0xf07f
+#define MATCH_C_LD 0x6000
+#define MASK_C_LD 0xe003
+#define MATCH_C_LDSP 0x6002
+#define MASK_C_LDSP 0xe003
#define MATCH_C_LI 0x4001
-#define MASK_C_LI 0xe003
+#define MASK_C_LI 0xe003
#define MATCH_C_LUI 0x6001
-#define MASK_C_LUI 0xe003
-#define MATCH_C_SRLI 0x8001
-#define MASK_C_SRLI 0xec03
-#define MATCH_C_SRAI 0x8401
-#define MASK_C_SRAI 0xec03
-#define MATCH_C_ANDI 0x8801
-#define MASK_C_ANDI 0xec03
-#define MATCH_C_SUB 0x8c01
-#define MASK_C_SUB 0xfc63
-#define MATCH_C_XOR 0x8c21
-#define MASK_C_XOR 0xfc63
-#define MATCH_C_OR 0x8c41
-#define MASK_C_OR 0xfc63
-#define MATCH_C_AND 0x8c61
-#define MASK_C_AND 0xfc63
-#define MATCH_C_J 0xa001
-#define MASK_C_J 0xe003
-#define MATCH_C_BEQZ 0xc001
-#define MASK_C_BEQZ 0xe003
-#define MATCH_C_BNEZ 0xe001
-#define MASK_C_BNEZ 0xe003
-#define MATCH_C_SLLI 0x2
-#define MASK_C_SLLI 0xe003
-#define MATCH_C_FLDSP 0x2002
-#define MASK_C_FLDSP 0xe003
+#define MASK_C_LUI 0xe003
+#define MATCH_C_LW 0x4000
+#define MASK_C_LW 0xe003
#define MATCH_C_LWSP 0x4002
-#define MASK_C_LWSP 0xe003
-#define MATCH_C_FLWSP 0x6002
-#define MASK_C_FLWSP 0xe003
+#define MASK_C_LWSP 0xe003
#define MATCH_C_MV 0x8002
-#define MASK_C_MV 0xf003
-#define MATCH_C_ADD 0x9002
-#define MASK_C_ADD 0xf003
-#define MATCH_C_FSDSP 0xa002
-#define MASK_C_FSDSP 0xe003
-#define MATCH_C_SWSP 0xc002
-#define MASK_C_SWSP 0xe003
-#define MATCH_C_FSWSP 0xe002
-#define MASK_C_FSWSP 0xe003
-#define MATCH_C_SRLI_RV32 0x8001
-#define MASK_C_SRLI_RV32 0xfc03
-#define MATCH_C_SRAI_RV32 0x8401
-#define MASK_C_SRAI_RV32 0xfc03
-#define MATCH_C_SLLI_RV32 0x2
-#define MASK_C_SLLI_RV32 0xf003
-#define MATCH_C_LD 0x6000
-#define MASK_C_LD 0xe003
+#define MASK_C_MV 0xf003
+#define MATCH_C_NOP 0x1
+#define MASK_C_NOP 0xef83
+#define MATCH_C_OR 0x8c41
+#define MASK_C_OR 0xfc63
#define MATCH_C_SD 0xe000
-#define MASK_C_SD 0xe003
-#define MATCH_C_SUBW 0x9c01
-#define MASK_C_SUBW 0xfc63
-#define MATCH_C_ADDW 0x9c21
-#define MASK_C_ADDW 0xfc63
-#define MATCH_C_ADDIW 0x2001
-#define MASK_C_ADDIW 0xe003
-#define MATCH_C_LDSP 0x6002
-#define MASK_C_LDSP 0xe003
+#define MASK_C_SD 0xe003
#define MATCH_C_SDSP 0xe002
-#define MASK_C_SDSP 0xe003
-#define MATCH_CUSTOM0 0xb
-#define MASK_CUSTOM0 0x707f
-#define MATCH_CUSTOM0_RS1 0x200b
-#define MASK_CUSTOM0_RS1 0x707f
-#define MATCH_CUSTOM0_RS1_RS2 0x300b
-#define MASK_CUSTOM0_RS1_RS2 0x707f
-#define MATCH_CUSTOM0_RD 0x400b
-#define MASK_CUSTOM0_RD 0x707f
-#define MATCH_CUSTOM0_RD_RS1 0x600b
-#define MASK_CUSTOM0_RD_RS1 0x707f
-#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
-#define MASK_CUSTOM0_RD_RS1_RS2 0x707f
-#define MATCH_CUSTOM1 0x2b
-#define MASK_CUSTOM1 0x707f
-#define MATCH_CUSTOM1_RS1 0x202b
-#define MASK_CUSTOM1_RS1 0x707f
-#define MATCH_CUSTOM1_RS1_RS2 0x302b
-#define MASK_CUSTOM1_RS1_RS2 0x707f
-#define MATCH_CUSTOM1_RD 0x402b
-#define MASK_CUSTOM1_RD 0x707f
-#define MATCH_CUSTOM1_RD_RS1 0x602b
-#define MASK_CUSTOM1_RD_RS1 0x707f
-#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
-#define MASK_CUSTOM1_RD_RS1_RS2 0x707f
-#define MATCH_CUSTOM2 0x5b
-#define MASK_CUSTOM2 0x707f
-#define MATCH_CUSTOM2_RS1 0x205b
-#define MASK_CUSTOM2_RS1 0x707f
-#define MATCH_CUSTOM2_RS1_RS2 0x305b
-#define MASK_CUSTOM2_RS1_RS2 0x707f
-#define MATCH_CUSTOM2_RD 0x405b
-#define MASK_CUSTOM2_RD 0x707f
-#define MATCH_CUSTOM2_RD_RS1 0x605b
-#define MASK_CUSTOM2_RD_RS1 0x707f
-#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
-#define MASK_CUSTOM2_RD_RS1_RS2 0x707f
-#define MATCH_CUSTOM3 0x7b
-#define MASK_CUSTOM3 0x707f
-#define MATCH_CUSTOM3_RS1 0x207b
-#define MASK_CUSTOM3_RS1 0x707f
-#define MATCH_CUSTOM3_RS1_RS2 0x307b
-#define MASK_CUSTOM3_RS1_RS2 0x707f
-#define MATCH_CUSTOM3_RD 0x407b
-#define MASK_CUSTOM3_RD 0x707f
-#define MATCH_CUSTOM3_RD_RS1 0x607b
-#define MASK_CUSTOM3_RD_RS1 0x707f
-#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
-#define MASK_CUSTOM3_RD_RS1_RS2 0x707f
-#define MATCH_VSETIVLI 0xc0007057
-#define MASK_VSETIVLI 0xc000707f
-#define MATCH_VSETVLI 0x7057
-#define MASK_VSETVLI 0x8000707f
-#define MATCH_VSETVL 0x80007057
-#define MASK_VSETVL 0xfe00707f
-#define MATCH_VLM_V 0x2b00007
-#define MASK_VLM_V 0xfff0707f
-#define MATCH_VSM_V 0x2b00027
-#define MASK_VSM_V 0xfff0707f
-#define MATCH_VLE8_V 0x7
-#define MASK_VLE8_V 0x1df0707f
-#define MATCH_VLE16_V 0x5007
-#define MASK_VLE16_V 0x1df0707f
-#define MATCH_VLE32_V 0x6007
-#define MASK_VLE32_V 0x1df0707f
-#define MATCH_VLE64_V 0x7007
-#define MASK_VLE64_V 0x1df0707f
-#define MATCH_VLE128_V 0x10000007
-#define MASK_VLE128_V 0x1df0707f
-#define MATCH_VLE256_V 0x10005007
-#define MASK_VLE256_V 0x1df0707f
-#define MATCH_VLE512_V 0x10006007
-#define MASK_VLE512_V 0x1df0707f
-#define MATCH_VLE1024_V 0x10007007
-#define MASK_VLE1024_V 0x1df0707f
-#define MATCH_VSE8_V 0x27
-#define MASK_VSE8_V 0x1df0707f
-#define MATCH_VSE16_V 0x5027
-#define MASK_VSE16_V 0x1df0707f
-#define MATCH_VSE32_V 0x6027
-#define MASK_VSE32_V 0x1df0707f
-#define MATCH_VSE64_V 0x7027
-#define MASK_VSE64_V 0x1df0707f
-#define MATCH_VSE128_V 0x10000027
-#define MASK_VSE128_V 0x1df0707f
-#define MATCH_VSE256_V 0x10005027
-#define MASK_VSE256_V 0x1df0707f
-#define MATCH_VSE512_V 0x10006027
-#define MASK_VSE512_V 0x1df0707f
-#define MATCH_VSE1024_V 0x10007027
-#define MASK_VSE1024_V 0x1df0707f
-#define MATCH_VLUXEI8_V 0x4000007
-#define MASK_VLUXEI8_V 0x1c00707f
-#define MATCH_VLUXEI16_V 0x4005007
-#define MASK_VLUXEI16_V 0x1c00707f
-#define MATCH_VLUXEI32_V 0x4006007
-#define MASK_VLUXEI32_V 0x1c00707f
-#define MATCH_VLUXEI64_V 0x4007007
-#define MASK_VLUXEI64_V 0x1c00707f
-#define MATCH_VLUXEI128_V 0x14000007
-#define MASK_VLUXEI128_V 0x1c00707f
-#define MATCH_VLUXEI256_V 0x14005007
-#define MASK_VLUXEI256_V 0x1c00707f
-#define MATCH_VLUXEI512_V 0x14006007
-#define MASK_VLUXEI512_V 0x1c00707f
-#define MATCH_VLUXEI1024_V 0x14007007
-#define MASK_VLUXEI1024_V 0x1c00707f
-#define MATCH_VSUXEI8_V 0x4000027
-#define MASK_VSUXEI8_V 0x1c00707f
-#define MATCH_VSUXEI16_V 0x4005027
-#define MASK_VSUXEI16_V 0x1c00707f
-#define MATCH_VSUXEI32_V 0x4006027
-#define MASK_VSUXEI32_V 0x1c00707f
-#define MATCH_VSUXEI64_V 0x4007027
-#define MASK_VSUXEI64_V 0x1c00707f
-#define MATCH_VSUXEI128_V 0x14000027
-#define MASK_VSUXEI128_V 0x1c00707f
-#define MATCH_VSUXEI256_V 0x14005027
-#define MASK_VSUXEI256_V 0x1c00707f
-#define MATCH_VSUXEI512_V 0x14006027
-#define MASK_VSUXEI512_V 0x1c00707f
-#define MATCH_VSUXEI1024_V 0x14007027
-#define MASK_VSUXEI1024_V 0x1c00707f
-#define MATCH_VLSE8_V 0x8000007
-#define MASK_VLSE8_V 0x1c00707f
-#define MATCH_VLSE16_V 0x8005007
-#define MASK_VLSE16_V 0x1c00707f
-#define MATCH_VLSE32_V 0x8006007
-#define MASK_VLSE32_V 0x1c00707f
-#define MATCH_VLSE64_V 0x8007007
-#define MASK_VLSE64_V 0x1c00707f
-#define MATCH_VLSE128_V 0x18000007
-#define MASK_VLSE128_V 0x1c00707f
-#define MATCH_VLSE256_V 0x18005007
-#define MASK_VLSE256_V 0x1c00707f
-#define MATCH_VLSE512_V 0x18006007
-#define MASK_VLSE512_V 0x1c00707f
-#define MATCH_VLSE1024_V 0x18007007
-#define MASK_VLSE1024_V 0x1c00707f
-#define MATCH_VSSE8_V 0x8000027
-#define MASK_VSSE8_V 0x1c00707f
-#define MATCH_VSSE16_V 0x8005027
-#define MASK_VSSE16_V 0x1c00707f
-#define MATCH_VSSE32_V 0x8006027
-#define MASK_VSSE32_V 0x1c00707f
-#define MATCH_VSSE64_V 0x8007027
-#define MASK_VSSE64_V 0x1c00707f
-#define MATCH_VSSE128_V 0x18000027
-#define MASK_VSSE128_V 0x1c00707f
-#define MATCH_VSSE256_V 0x18005027
-#define MASK_VSSE256_V 0x1c00707f
-#define MATCH_VSSE512_V 0x18006027
-#define MASK_VSSE512_V 0x1c00707f
-#define MATCH_VSSE1024_V 0x18007027
-#define MASK_VSSE1024_V 0x1c00707f
-#define MATCH_VLOXEI8_V 0xc000007
-#define MASK_VLOXEI8_V 0x1c00707f
-#define MATCH_VLOXEI16_V 0xc005007
-#define MASK_VLOXEI16_V 0x1c00707f
-#define MATCH_VLOXEI32_V 0xc006007
-#define MASK_VLOXEI32_V 0x1c00707f
-#define MATCH_VLOXEI64_V 0xc007007
-#define MASK_VLOXEI64_V 0x1c00707f
-#define MATCH_VLOXEI128_V 0x1c000007
-#define MASK_VLOXEI128_V 0x1c00707f
-#define MATCH_VLOXEI256_V 0x1c005007
-#define MASK_VLOXEI256_V 0x1c00707f
-#define MATCH_VLOXEI512_V 0x1c006007
-#define MASK_VLOXEI512_V 0x1c00707f
-#define MATCH_VLOXEI1024_V 0x1c007007
-#define MASK_VLOXEI1024_V 0x1c00707f
-#define MATCH_VSOXEI8_V 0xc000027
-#define MASK_VSOXEI8_V 0x1c00707f
-#define MATCH_VSOXEI16_V 0xc005027
-#define MASK_VSOXEI16_V 0x1c00707f
-#define MATCH_VSOXEI32_V 0xc006027
-#define MASK_VSOXEI32_V 0x1c00707f
-#define MATCH_VSOXEI64_V 0xc007027
-#define MASK_VSOXEI64_V 0x1c00707f
-#define MATCH_VSOXEI128_V 0x1c000027
-#define MASK_VSOXEI128_V 0x1c00707f
-#define MATCH_VSOXEI256_V 0x1c005027
-#define MASK_VSOXEI256_V 0x1c00707f
-#define MATCH_VSOXEI512_V 0x1c006027
-#define MASK_VSOXEI512_V 0x1c00707f
-#define MATCH_VSOXEI1024_V 0x1c007027
-#define MASK_VSOXEI1024_V 0x1c00707f
-#define MATCH_VLE8FF_V 0x1000007
-#define MASK_VLE8FF_V 0x1df0707f
-#define MATCH_VLE16FF_V 0x1005007
-#define MASK_VLE16FF_V 0x1df0707f
-#define MATCH_VLE32FF_V 0x1006007
-#define MASK_VLE32FF_V 0x1df0707f
-#define MATCH_VLE64FF_V 0x1007007
-#define MASK_VLE64FF_V 0x1df0707f
-#define MATCH_VLE128FF_V 0x11000007
-#define MASK_VLE128FF_V 0x1df0707f
-#define MATCH_VLE256FF_V 0x11005007
-#define MASK_VLE256FF_V 0x1df0707f
-#define MATCH_VLE512FF_V 0x11006007
-#define MASK_VLE512FF_V 0x1df0707f
-#define MATCH_VLE1024FF_V 0x11007007
-#define MASK_VLE1024FF_V 0x1df0707f
-#define MATCH_VL1RE8_V 0x2800007
-#define MASK_VL1RE8_V 0xfff0707f
-#define MATCH_VL1RE16_V 0x2805007
-#define MASK_VL1RE16_V 0xfff0707f
-#define MATCH_VL1RE32_V 0x2806007
-#define MASK_VL1RE32_V 0xfff0707f
-#define MATCH_VL1RE64_V 0x2807007
-#define MASK_VL1RE64_V 0xfff0707f
-#define MATCH_VL2RE8_V 0x22800007
-#define MASK_VL2RE8_V 0xfff0707f
-#define MATCH_VL2RE16_V 0x22805007
-#define MASK_VL2RE16_V 0xfff0707f
-#define MATCH_VL2RE32_V 0x22806007
-#define MASK_VL2RE32_V 0xfff0707f
-#define MATCH_VL2RE64_V 0x22807007
-#define MASK_VL2RE64_V 0xfff0707f
-#define MATCH_VL4RE8_V 0x62800007
-#define MASK_VL4RE8_V 0xfff0707f
-#define MATCH_VL4RE16_V 0x62805007
-#define MASK_VL4RE16_V 0xfff0707f
-#define MATCH_VL4RE32_V 0x62806007
-#define MASK_VL4RE32_V 0xfff0707f
-#define MATCH_VL4RE64_V 0x62807007
-#define MASK_VL4RE64_V 0xfff0707f
-#define MATCH_VL8RE8_V 0xe2800007
-#define MASK_VL8RE8_V 0xfff0707f
-#define MATCH_VL8RE16_V 0xe2805007
-#define MASK_VL8RE16_V 0xfff0707f
-#define MATCH_VL8RE32_V 0xe2806007
-#define MASK_VL8RE32_V 0xfff0707f
-#define MATCH_VL8RE64_V 0xe2807007
-#define MASK_VL8RE64_V 0xfff0707f
-#define MATCH_VS1R_V 0x2800027
-#define MASK_VS1R_V 0xfff0707f
-#define MATCH_VS2R_V 0x22800027
-#define MASK_VS2R_V 0xfff0707f
-#define MATCH_VS4R_V 0x62800027
-#define MASK_VS4R_V 0xfff0707f
-#define MATCH_VS8R_V 0xe2800027
-#define MASK_VS8R_V 0xfff0707f
-#define MATCH_VFADD_VF 0x5057
-#define MASK_VFADD_VF 0xfc00707f
-#define MATCH_VFSUB_VF 0x8005057
-#define MASK_VFSUB_VF 0xfc00707f
-#define MATCH_VFMIN_VF 0x10005057
-#define MASK_VFMIN_VF 0xfc00707f
-#define MATCH_VFMAX_VF 0x18005057
-#define MASK_VFMAX_VF 0xfc00707f
-#define MATCH_VFSGNJ_VF 0x20005057
-#define MASK_VFSGNJ_VF 0xfc00707f
-#define MATCH_VFSGNJN_VF 0x24005057
-#define MASK_VFSGNJN_VF 0xfc00707f
-#define MATCH_VFSGNJX_VF 0x28005057
-#define MASK_VFSGNJX_VF 0xfc00707f
-#define MATCH_VFSLIDE1UP_VF 0x38005057
-#define MASK_VFSLIDE1UP_VF 0xfc00707f
-#define MATCH_VFSLIDE1DOWN_VF 0x3c005057
-#define MASK_VFSLIDE1DOWN_VF 0xfc00707f
-#define MATCH_VFMV_S_F 0x42005057
-#define MASK_VFMV_S_F 0xfff0707f
-#define MATCH_VFMERGE_VFM 0x5c005057
-#define MASK_VFMERGE_VFM 0xfe00707f
-#define MATCH_VFMV_V_F 0x5e005057
-#define MASK_VFMV_V_F 0xfff0707f
-#define MATCH_VMFEQ_VF 0x60005057
-#define MASK_VMFEQ_VF 0xfc00707f
-#define MATCH_VMFLE_VF 0x64005057
-#define MASK_VMFLE_VF 0xfc00707f
-#define MATCH_VMFLT_VF 0x6c005057
-#define MASK_VMFLT_VF 0xfc00707f
-#define MATCH_VMFNE_VF 0x70005057
-#define MASK_VMFNE_VF 0xfc00707f
-#define MATCH_VMFGT_VF 0x74005057
-#define MASK_VMFGT_VF 0xfc00707f
-#define MATCH_VMFGE_VF 0x7c005057
-#define MASK_VMFGE_VF 0xfc00707f
-#define MATCH_VFDIV_VF 0x80005057
-#define MASK_VFDIV_VF 0xfc00707f
-#define MATCH_VFRDIV_VF 0x84005057
-#define MASK_VFRDIV_VF 0xfc00707f
-#define MATCH_VFMUL_VF 0x90005057
-#define MASK_VFMUL_VF 0xfc00707f
-#define MATCH_VFRSUB_VF 0x9c005057
-#define MASK_VFRSUB_VF 0xfc00707f
-#define MATCH_VFMADD_VF 0xa0005057
-#define MASK_VFMADD_VF 0xfc00707f
-#define MATCH_VFNMADD_VF 0xa4005057
-#define MASK_VFNMADD_VF 0xfc00707f
-#define MATCH_VFMSUB_VF 0xa8005057
-#define MASK_VFMSUB_VF 0xfc00707f
-#define MATCH_VFNMSUB_VF 0xac005057
-#define MASK_VFNMSUB_VF 0xfc00707f
-#define MATCH_VFMACC_VF 0xb0005057
-#define MASK_VFMACC_VF 0xfc00707f
-#define MATCH_VFNMACC_VF 0xb4005057
-#define MASK_VFNMACC_VF 0xfc00707f
-#define MATCH_VFMSAC_VF 0xb8005057
-#define MASK_VFMSAC_VF 0xfc00707f
-#define MATCH_VFNMSAC_VF 0xbc005057
-#define MASK_VFNMSAC_VF 0xfc00707f
-#define MATCH_VFWADD_VF 0xc0005057
-#define MASK_VFWADD_VF 0xfc00707f
-#define MATCH_VFWSUB_VF 0xc8005057
-#define MASK_VFWSUB_VF 0xfc00707f
-#define MATCH_VFWADD_WF 0xd0005057
-#define MASK_VFWADD_WF 0xfc00707f
-#define MATCH_VFWSUB_WF 0xd8005057
-#define MASK_VFWSUB_WF 0xfc00707f
-#define MATCH_VFWMUL_VF 0xe0005057
-#define MASK_VFWMUL_VF 0xfc00707f
-#define MATCH_VFWMACC_VF 0xf0005057
-#define MASK_VFWMACC_VF 0xfc00707f
-#define MATCH_VFWNMACC_VF 0xf4005057
-#define MASK_VFWNMACC_VF 0xfc00707f
-#define MATCH_VFWMSAC_VF 0xf8005057
-#define MASK_VFWMSAC_VF 0xfc00707f
-#define MATCH_VFWNMSAC_VF 0xfc005057
-#define MASK_VFWNMSAC_VF 0xfc00707f
-#define MATCH_VFADD_VV 0x1057
-#define MASK_VFADD_VV 0xfc00707f
-#define MATCH_VFREDUSUM_VS 0x4001057
-#define MASK_VFREDUSUM_VS 0xfc00707f
-#define MATCH_VFSUB_VV 0x8001057
-#define MASK_VFSUB_VV 0xfc00707f
-#define MATCH_VFREDOSUM_VS 0xc001057
-#define MASK_VFREDOSUM_VS 0xfc00707f
-#define MATCH_VFMIN_VV 0x10001057
-#define MASK_VFMIN_VV 0xfc00707f
-#define MATCH_VFREDMIN_VS 0x14001057
-#define MASK_VFREDMIN_VS 0xfc00707f
-#define MATCH_VFMAX_VV 0x18001057
-#define MASK_VFMAX_VV 0xfc00707f
-#define MATCH_VFREDMAX_VS 0x1c001057
-#define MASK_VFREDMAX_VS 0xfc00707f
-#define MATCH_VFSGNJ_VV 0x20001057
-#define MASK_VFSGNJ_VV 0xfc00707f
-#define MATCH_VFSGNJN_VV 0x24001057
-#define MASK_VFSGNJN_VV 0xfc00707f
-#define MATCH_VFSGNJX_VV 0x28001057
-#define MASK_VFSGNJX_VV 0xfc00707f
-#define MATCH_VFMV_F_S 0x42001057
-#define MASK_VFMV_F_S 0xfe0ff07f
-#define MATCH_VMFEQ_VV 0x60001057
-#define MASK_VMFEQ_VV 0xfc00707f
-#define MATCH_VMFLE_VV 0x64001057
-#define MASK_VMFLE_VV 0xfc00707f
-#define MATCH_VMFLT_VV 0x6c001057
-#define MASK_VMFLT_VV 0xfc00707f
-#define MATCH_VMFNE_VV 0x70001057
-#define MASK_VMFNE_VV 0xfc00707f
-#define MATCH_VFDIV_VV 0x80001057
-#define MASK_VFDIV_VV 0xfc00707f
-#define MATCH_VFMUL_VV 0x90001057
-#define MASK_VFMUL_VV 0xfc00707f
-#define MATCH_VFMADD_VV 0xa0001057
-#define MASK_VFMADD_VV 0xfc00707f
-#define MATCH_VFNMADD_VV 0xa4001057
-#define MASK_VFNMADD_VV 0xfc00707f
-#define MATCH_VFMSUB_VV 0xa8001057
-#define MASK_VFMSUB_VV 0xfc00707f
-#define MATCH_VFNMSUB_VV 0xac001057
-#define MASK_VFNMSUB_VV 0xfc00707f
-#define MATCH_VFMACC_VV 0xb0001057
-#define MASK_VFMACC_VV 0xfc00707f
-#define MATCH_VFNMACC_VV 0xb4001057
-#define MASK_VFNMACC_VV 0xfc00707f
-#define MATCH_VFMSAC_VV 0xb8001057
-#define MASK_VFMSAC_VV 0xfc00707f
-#define MATCH_VFNMSAC_VV 0xbc001057
-#define MASK_VFNMSAC_VV 0xfc00707f
-#define MATCH_VFCVT_XU_F_V 0x48001057
-#define MASK_VFCVT_XU_F_V 0xfc0ff07f
-#define MATCH_VFCVT_X_F_V 0x48009057
-#define MASK_VFCVT_X_F_V 0xfc0ff07f
-#define MATCH_VFCVT_F_XU_V 0x48011057
-#define MASK_VFCVT_F_XU_V 0xfc0ff07f
-#define MATCH_VFCVT_F_X_V 0x48019057
-#define MASK_VFCVT_F_X_V 0xfc0ff07f
-#define MATCH_VFCVT_RTZ_XU_F_V 0x48031057
-#define MASK_VFCVT_RTZ_XU_F_V 0xfc0ff07f
-#define MATCH_VFCVT_RTZ_X_F_V 0x48039057
-#define MASK_VFCVT_RTZ_X_F_V 0xfc0ff07f
-#define MATCH_VFWCVT_XU_F_V 0x48041057
-#define MASK_VFWCVT_XU_F_V 0xfc0ff07f
-#define MATCH_VFWCVT_X_F_V 0x48049057
-#define MASK_VFWCVT_X_F_V 0xfc0ff07f
-#define MATCH_VFWCVT_F_XU_V 0x48051057
-#define MASK_VFWCVT_F_XU_V 0xfc0ff07f
-#define MATCH_VFWCVT_F_X_V 0x48059057
-#define MASK_VFWCVT_F_X_V 0xfc0ff07f
-#define MATCH_VFWCVT_F_F_V 0x48061057
-#define MASK_VFWCVT_F_F_V 0xfc0ff07f
-#define MATCH_VFWCVT_RTZ_XU_F_V 0x48071057
-#define MASK_VFWCVT_RTZ_XU_F_V 0xfc0ff07f
-#define MATCH_VFWCVT_RTZ_X_F_V 0x48079057
-#define MASK_VFWCVT_RTZ_X_F_V 0xfc0ff07f
-#define MATCH_VFNCVT_XU_F_W 0x48081057
-#define MASK_VFNCVT_XU_F_W 0xfc0ff07f
-#define MATCH_VFNCVT_X_F_W 0x48089057
-#define MASK_VFNCVT_X_F_W 0xfc0ff07f
-#define MATCH_VFNCVT_F_XU_W 0x48091057
-#define MASK_VFNCVT_F_XU_W 0xfc0ff07f
-#define MATCH_VFNCVT_F_X_W 0x48099057
-#define MASK_VFNCVT_F_X_W 0xfc0ff07f
-#define MATCH_VFNCVT_F_F_W 0x480a1057
-#define MASK_VFNCVT_F_F_W 0xfc0ff07f
-#define MATCH_VFNCVT_ROD_F_F_W 0x480a9057
-#define MASK_VFNCVT_ROD_F_F_W 0xfc0ff07f
-#define MATCH_VFNCVT_RTZ_XU_F_W 0x480b1057
-#define MASK_VFNCVT_RTZ_XU_F_W 0xfc0ff07f
-#define MATCH_VFNCVT_RTZ_X_F_W 0x480b9057
-#define MASK_VFNCVT_RTZ_X_F_W 0xfc0ff07f
-#define MATCH_VFSQRT_V 0x4c001057
-#define MASK_VFSQRT_V 0xfc0ff07f
-#define MATCH_VFRSQRT7_V 0x4c021057
-#define MASK_VFRSQRT7_V 0xfc0ff07f
-#define MATCH_VFREC7_V 0x4c029057
-#define MASK_VFREC7_V 0xfc0ff07f
-#define MATCH_VFCLASS_V 0x4c081057
-#define MASK_VFCLASS_V 0xfc0ff07f
-#define MATCH_VFWADD_VV 0xc0001057
-#define MASK_VFWADD_VV 0xfc00707f
-#define MATCH_VFWREDUSUM_VS 0xc4001057
-#define MASK_VFWREDUSUM_VS 0xfc00707f
-#define MATCH_VFWSUB_VV 0xc8001057
-#define MASK_VFWSUB_VV 0xfc00707f
-#define MATCH_VFWREDOSUM_VS 0xcc001057
-#define MASK_VFWREDOSUM_VS 0xfc00707f
-#define MATCH_VFWADD_WV 0xd0001057
-#define MASK_VFWADD_WV 0xfc00707f
-#define MATCH_VFWSUB_WV 0xd8001057
-#define MASK_VFWSUB_WV 0xfc00707f
-#define MATCH_VFWMUL_VV 0xe0001057
-#define MASK_VFWMUL_VV 0xfc00707f
-#define MATCH_VFWMACC_VV 0xf0001057
-#define MASK_VFWMACC_VV 0xfc00707f
-#define MATCH_VFWNMACC_VV 0xf4001057
-#define MASK_VFWNMACC_VV 0xfc00707f
-#define MATCH_VFWMSAC_VV 0xf8001057
-#define MASK_VFWMSAC_VV 0xfc00707f
-#define MATCH_VFWNMSAC_VV 0xfc001057
-#define MASK_VFWNMSAC_VV 0xfc00707f
-#define MATCH_VADD_VX 0x4057
-#define MASK_VADD_VX 0xfc00707f
-#define MATCH_VSUB_VX 0x8004057
-#define MASK_VSUB_VX 0xfc00707f
-#define MATCH_VRSUB_VX 0xc004057
-#define MASK_VRSUB_VX 0xfc00707f
-#define MATCH_VMINU_VX 0x10004057
-#define MASK_VMINU_VX 0xfc00707f
-#define MATCH_VMIN_VX 0x14004057
-#define MASK_VMIN_VX 0xfc00707f
-#define MATCH_VMAXU_VX 0x18004057
-#define MASK_VMAXU_VX 0xfc00707f
-#define MATCH_VMAX_VX 0x1c004057
-#define MASK_VMAX_VX 0xfc00707f
-#define MATCH_VAND_VX 0x24004057
-#define MASK_VAND_VX 0xfc00707f
-#define MATCH_VOR_VX 0x28004057
-#define MASK_VOR_VX 0xfc00707f
-#define MATCH_VXOR_VX 0x2c004057
-#define MASK_VXOR_VX 0xfc00707f
-#define MATCH_VRGATHER_VX 0x30004057
-#define MASK_VRGATHER_VX 0xfc00707f
-#define MATCH_VSLIDEUP_VX 0x38004057
-#define MASK_VSLIDEUP_VX 0xfc00707f
-#define MATCH_VSLIDEDOWN_VX 0x3c004057
-#define MASK_VSLIDEDOWN_VX 0xfc00707f
-#define MATCH_VADC_VXM 0x40004057
-#define MASK_VADC_VXM 0xfe00707f
-#define MATCH_VMADC_VXM 0x44004057
-#define MASK_VMADC_VXM 0xfe00707f
-#define MATCH_VMADC_VX 0x46004057
-#define MASK_VMADC_VX 0xfe00707f
-#define MATCH_VSBC_VXM 0x48004057
-#define MASK_VSBC_VXM 0xfe00707f
-#define MATCH_VMSBC_VXM 0x4c004057
-#define MASK_VMSBC_VXM 0xfe00707f
-#define MATCH_VMSBC_VX 0x4e004057
-#define MASK_VMSBC_VX 0xfe00707f
-#define MATCH_VMERGE_VXM 0x5c004057
-#define MASK_VMERGE_VXM 0xfe00707f
-#define MATCH_VMV_V_X 0x5e004057
-#define MASK_VMV_V_X 0xfff0707f
-#define MATCH_VMSEQ_VX 0x60004057
-#define MASK_VMSEQ_VX 0xfc00707f
-#define MATCH_VMSNE_VX 0x64004057
-#define MASK_VMSNE_VX 0xfc00707f
-#define MATCH_VMSLTU_VX 0x68004057
-#define MASK_VMSLTU_VX 0xfc00707f
-#define MATCH_VMSLT_VX 0x6c004057
-#define MASK_VMSLT_VX 0xfc00707f
-#define MATCH_VMSLEU_VX 0x70004057
-#define MASK_VMSLEU_VX 0xfc00707f
-#define MATCH_VMSLE_VX 0x74004057
-#define MASK_VMSLE_VX 0xfc00707f
-#define MATCH_VMSGTU_VX 0x78004057
-#define MASK_VMSGTU_VX 0xfc00707f
-#define MATCH_VMSGT_VX 0x7c004057
-#define MASK_VMSGT_VX 0xfc00707f
-#define MATCH_VSADDU_VX 0x80004057
-#define MASK_VSADDU_VX 0xfc00707f
-#define MATCH_VSADD_VX 0x84004057
-#define MASK_VSADD_VX 0xfc00707f
-#define MATCH_VSSUBU_VX 0x88004057
-#define MASK_VSSUBU_VX 0xfc00707f
-#define MATCH_VSSUB_VX 0x8c004057
-#define MASK_VSSUB_VX 0xfc00707f
-#define MATCH_VSLL_VX 0x94004057
-#define MASK_VSLL_VX 0xfc00707f
-#define MATCH_VSMUL_VX 0x9c004057
-#define MASK_VSMUL_VX 0xfc00707f
-#define MATCH_VSRL_VX 0xa0004057
-#define MASK_VSRL_VX 0xfc00707f
-#define MATCH_VSRA_VX 0xa4004057
-#define MASK_VSRA_VX 0xfc00707f
-#define MATCH_VSSRL_VX 0xa8004057
-#define MASK_VSSRL_VX 0xfc00707f
-#define MATCH_VSSRA_VX 0xac004057
-#define MASK_VSSRA_VX 0xfc00707f
-#define MATCH_VNSRL_WX 0xb0004057
-#define MASK_VNSRL_WX 0xfc00707f
-#define MATCH_VNSRA_WX 0xb4004057
-#define MASK_VNSRA_WX 0xfc00707f
-#define MATCH_VNCLIPU_WX 0xb8004057
-#define MASK_VNCLIPU_WX 0xfc00707f
-#define MATCH_VNCLIP_WX 0xbc004057
-#define MASK_VNCLIP_WX 0xfc00707f
-#define MATCH_VADD_VV 0x57
-#define MASK_VADD_VV 0xfc00707f
-#define MATCH_VSUB_VV 0x8000057
-#define MASK_VSUB_VV 0xfc00707f
-#define MATCH_VMINU_VV 0x10000057
-#define MASK_VMINU_VV 0xfc00707f
-#define MATCH_VMIN_VV 0x14000057
-#define MASK_VMIN_VV 0xfc00707f
-#define MATCH_VMAXU_VV 0x18000057
-#define MASK_VMAXU_VV 0xfc00707f
-#define MATCH_VMAX_VV 0x1c000057
-#define MASK_VMAX_VV 0xfc00707f
-#define MATCH_VAND_VV 0x24000057
-#define MASK_VAND_VV 0xfc00707f
-#define MATCH_VOR_VV 0x28000057
-#define MASK_VOR_VV 0xfc00707f
-#define MATCH_VXOR_VV 0x2c000057
-#define MASK_VXOR_VV 0xfc00707f
-#define MATCH_VRGATHER_VV 0x30000057
-#define MASK_VRGATHER_VV 0xfc00707f
-#define MATCH_VRGATHEREI16_VV 0x38000057
-#define MASK_VRGATHEREI16_VV 0xfc00707f
-#define MATCH_VADC_VVM 0x40000057
-#define MASK_VADC_VVM 0xfe00707f
-#define MATCH_VMADC_VVM 0x44000057
-#define MASK_VMADC_VVM 0xfe00707f
-#define MATCH_VMADC_VV 0x46000057
-#define MASK_VMADC_VV 0xfe00707f
-#define MATCH_VSBC_VVM 0x48000057
-#define MASK_VSBC_VVM 0xfe00707f
-#define MATCH_VMSBC_VVM 0x4c000057
-#define MASK_VMSBC_VVM 0xfe00707f
-#define MATCH_VMSBC_VV 0x4e000057
-#define MASK_VMSBC_VV 0xfe00707f
-#define MATCH_VMERGE_VVM 0x5c000057
-#define MASK_VMERGE_VVM 0xfe00707f
-#define MATCH_VMV_V_V 0x5e000057
-#define MASK_VMV_V_V 0xfff0707f
-#define MATCH_VMSEQ_VV 0x60000057
-#define MASK_VMSEQ_VV 0xfc00707f
-#define MATCH_VMSNE_VV 0x64000057
-#define MASK_VMSNE_VV 0xfc00707f
-#define MATCH_VMSLTU_VV 0x68000057
-#define MASK_VMSLTU_VV 0xfc00707f
-#define MATCH_VMSLT_VV 0x6c000057
-#define MASK_VMSLT_VV 0xfc00707f
-#define MATCH_VMSLEU_VV 0x70000057
-#define MASK_VMSLEU_VV 0xfc00707f
-#define MATCH_VMSLE_VV 0x74000057
-#define MASK_VMSLE_VV 0xfc00707f
-#define MATCH_VSADDU_VV 0x80000057
-#define MASK_VSADDU_VV 0xfc00707f
-#define MATCH_VSADD_VV 0x84000057
-#define MASK_VSADD_VV 0xfc00707f
-#define MATCH_VSSUBU_VV 0x88000057
-#define MASK_VSSUBU_VV 0xfc00707f
-#define MATCH_VSSUB_VV 0x8c000057
-#define MASK_VSSUB_VV 0xfc00707f
-#define MATCH_VSLL_VV 0x94000057
-#define MASK_VSLL_VV 0xfc00707f
-#define MATCH_VSMUL_VV 0x9c000057
-#define MASK_VSMUL_VV 0xfc00707f
-#define MATCH_VSRL_VV 0xa0000057
-#define MASK_VSRL_VV 0xfc00707f
-#define MATCH_VSRA_VV 0xa4000057
-#define MASK_VSRA_VV 0xfc00707f
-#define MATCH_VSSRL_VV 0xa8000057
-#define MASK_VSSRL_VV 0xfc00707f
-#define MATCH_VSSRA_VV 0xac000057
-#define MASK_VSSRA_VV 0xfc00707f
-#define MATCH_VNSRL_WV 0xb0000057
-#define MASK_VNSRL_WV 0xfc00707f
-#define MATCH_VNSRA_WV 0xb4000057
-#define MASK_VNSRA_WV 0xfc00707f
-#define MATCH_VNCLIPU_WV 0xb8000057
-#define MASK_VNCLIPU_WV 0xfc00707f
-#define MATCH_VNCLIP_WV 0xbc000057
-#define MASK_VNCLIP_WV 0xfc00707f
-#define MATCH_VWREDSUMU_VS 0xc0000057
-#define MASK_VWREDSUMU_VS 0xfc00707f
-#define MATCH_VWREDSUM_VS 0xc4000057
-#define MASK_VWREDSUM_VS 0xfc00707f
-#define MATCH_VADD_VI 0x3057
-#define MASK_VADD_VI 0xfc00707f
-#define MATCH_VRSUB_VI 0xc003057
-#define MASK_VRSUB_VI 0xfc00707f
-#define MATCH_VAND_VI 0x24003057
-#define MASK_VAND_VI 0xfc00707f
-#define MATCH_VOR_VI 0x28003057
-#define MASK_VOR_VI 0xfc00707f
-#define MATCH_VXOR_VI 0x2c003057
-#define MASK_VXOR_VI 0xfc00707f
-#define MATCH_VRGATHER_VI 0x30003057
-#define MASK_VRGATHER_VI 0xfc00707f
-#define MATCH_VSLIDEUP_VI 0x38003057
-#define MASK_VSLIDEUP_VI 0xfc00707f
-#define MATCH_VSLIDEDOWN_VI 0x3c003057
-#define MASK_VSLIDEDOWN_VI 0xfc00707f
-#define MATCH_VADC_VIM 0x40003057
-#define MASK_VADC_VIM 0xfe00707f
-#define MATCH_VMADC_VIM 0x44003057
-#define MASK_VMADC_VIM 0xfe00707f
-#define MATCH_VMADC_VI 0x46003057
-#define MASK_VMADC_VI 0xfe00707f
-#define MATCH_VMERGE_VIM 0x5c003057
-#define MASK_VMERGE_VIM 0xfe00707f
-#define MATCH_VMV_V_I 0x5e003057
-#define MASK_VMV_V_I 0xfff0707f
-#define MATCH_VMSEQ_VI 0x60003057
-#define MASK_VMSEQ_VI 0xfc00707f
-#define MATCH_VMSNE_VI 0x64003057
-#define MASK_VMSNE_VI 0xfc00707f
-#define MATCH_VMSLEU_VI 0x70003057
-#define MASK_VMSLEU_VI 0xfc00707f
-#define MATCH_VMSLE_VI 0x74003057
-#define MASK_VMSLE_VI 0xfc00707f
-#define MATCH_VMSGTU_VI 0x78003057
-#define MASK_VMSGTU_VI 0xfc00707f
-#define MATCH_VMSGT_VI 0x7c003057
-#define MASK_VMSGT_VI 0xfc00707f
-#define MATCH_VSADDU_VI 0x80003057
-#define MASK_VSADDU_VI 0xfc00707f
-#define MATCH_VSADD_VI 0x84003057
-#define MASK_VSADD_VI 0xfc00707f
-#define MATCH_VSLL_VI 0x94003057
-#define MASK_VSLL_VI 0xfc00707f
-#define MATCH_VMV1R_V 0x9e003057
-#define MASK_VMV1R_V 0xfe0ff07f
-#define MATCH_VMV2R_V 0x9e00b057
-#define MASK_VMV2R_V 0xfe0ff07f
-#define MATCH_VMV4R_V 0x9e01b057
-#define MASK_VMV4R_V 0xfe0ff07f
-#define MATCH_VMV8R_V 0x9e03b057
-#define MASK_VMV8R_V 0xfe0ff07f
-#define MATCH_VSRL_VI 0xa0003057
-#define MASK_VSRL_VI 0xfc00707f
-#define MATCH_VSRA_VI 0xa4003057
-#define MASK_VSRA_VI 0xfc00707f
-#define MATCH_VSSRL_VI 0xa8003057
-#define MASK_VSSRL_VI 0xfc00707f
-#define MATCH_VSSRA_VI 0xac003057
-#define MASK_VSSRA_VI 0xfc00707f
-#define MATCH_VNSRL_WI 0xb0003057
-#define MASK_VNSRL_WI 0xfc00707f
-#define MATCH_VNSRA_WI 0xb4003057
-#define MASK_VNSRA_WI 0xfc00707f
-#define MATCH_VNCLIPU_WI 0xb8003057
-#define MASK_VNCLIPU_WI 0xfc00707f
-#define MATCH_VNCLIP_WI 0xbc003057
-#define MASK_VNCLIP_WI 0xfc00707f
-#define MATCH_VREDSUM_VS 0x2057
-#define MASK_VREDSUM_VS 0xfc00707f
-#define MATCH_VREDAND_VS 0x4002057
-#define MASK_VREDAND_VS 0xfc00707f
-#define MATCH_VREDOR_VS 0x8002057
-#define MASK_VREDOR_VS 0xfc00707f
-#define MATCH_VREDXOR_VS 0xc002057
-#define MASK_VREDXOR_VS 0xfc00707f
-#define MATCH_VREDMINU_VS 0x10002057
-#define MASK_VREDMINU_VS 0xfc00707f
-#define MATCH_VREDMIN_VS 0x14002057
-#define MASK_VREDMIN_VS 0xfc00707f
-#define MATCH_VREDMAXU_VS 0x18002057
-#define MASK_VREDMAXU_VS 0xfc00707f
-#define MATCH_VREDMAX_VS 0x1c002057
-#define MASK_VREDMAX_VS 0xfc00707f
-#define MATCH_VAADDU_VV 0x20002057
-#define MASK_VAADDU_VV 0xfc00707f
-#define MATCH_VAADD_VV 0x24002057
-#define MASK_VAADD_VV 0xfc00707f
-#define MATCH_VASUBU_VV 0x28002057
-#define MASK_VASUBU_VV 0xfc00707f
-#define MATCH_VASUB_VV 0x2c002057
-#define MASK_VASUB_VV 0xfc00707f
-#define MATCH_VMV_X_S 0x42002057
-#define MASK_VMV_X_S 0xfe0ff07f
-#define MATCH_VZEXT_VF8 0x48012057
-#define MASK_VZEXT_VF8 0xfc0ff07f
-#define MATCH_VSEXT_VF8 0x4801a057
-#define MASK_VSEXT_VF8 0xfc0ff07f
-#define MATCH_VZEXT_VF4 0x48022057
-#define MASK_VZEXT_VF4 0xfc0ff07f
-#define MATCH_VSEXT_VF4 0x4802a057
-#define MASK_VSEXT_VF4 0xfc0ff07f
-#define MATCH_VZEXT_VF2 0x48032057
-#define MASK_VZEXT_VF2 0xfc0ff07f
-#define MATCH_VSEXT_VF2 0x4803a057
-#define MASK_VSEXT_VF2 0xfc0ff07f
-#define MATCH_VCOMPRESS_VM 0x5e002057
-#define MASK_VCOMPRESS_VM 0xfe00707f
-#define MATCH_VMANDN_MM 0x60002057
-#define MASK_VMANDN_MM 0xfc00707f
-#define MATCH_VMAND_MM 0x64002057
-#define MASK_VMAND_MM 0xfc00707f
-#define MATCH_VMOR_MM 0x68002057
-#define MASK_VMOR_MM 0xfc00707f
-#define MATCH_VMXOR_MM 0x6c002057
-#define MASK_VMXOR_MM 0xfc00707f
-#define MATCH_VMORN_MM 0x70002057
-#define MASK_VMORN_MM 0xfc00707f
-#define MATCH_VMNAND_MM 0x74002057
-#define MASK_VMNAND_MM 0xfc00707f
-#define MATCH_VMNOR_MM 0x78002057
-#define MASK_VMNOR_MM 0xfc00707f
-#define MATCH_VMXNOR_MM 0x7c002057
-#define MASK_VMXNOR_MM 0xfc00707f
-#define MATCH_VMSBF_M 0x5000a057
-#define MASK_VMSBF_M 0xfc0ff07f
-#define MATCH_VMSOF_M 0x50012057
-#define MASK_VMSOF_M 0xfc0ff07f
-#define MATCH_VMSIF_M 0x5001a057
-#define MASK_VMSIF_M 0xfc0ff07f
-#define MATCH_VIOTA_M 0x50082057
-#define MASK_VIOTA_M 0xfc0ff07f
-#define MATCH_VID_V 0x5008a057
-#define MASK_VID_V 0xfdfff07f
-#define MATCH_VCPOP_M 0x40082057
-#define MASK_VCPOP_M 0xfc0ff07f
-#define MATCH_VFIRST_M 0x4008a057
-#define MASK_VFIRST_M 0xfc0ff07f
-#define MATCH_VDIVU_VV 0x80002057
-#define MASK_VDIVU_VV 0xfc00707f
-#define MATCH_VDIV_VV 0x84002057
-#define MASK_VDIV_VV 0xfc00707f
-#define MATCH_VREMU_VV 0x88002057
-#define MASK_VREMU_VV 0xfc00707f
-#define MATCH_VREM_VV 0x8c002057
-#define MASK_VREM_VV 0xfc00707f
-#define MATCH_VMULHU_VV 0x90002057
-#define MASK_VMULHU_VV 0xfc00707f
-#define MATCH_VMUL_VV 0x94002057
-#define MASK_VMUL_VV 0xfc00707f
-#define MATCH_VMULHSU_VV 0x98002057
-#define MASK_VMULHSU_VV 0xfc00707f
-#define MATCH_VMULH_VV 0x9c002057
-#define MASK_VMULH_VV 0xfc00707f
-#define MATCH_VMADD_VV 0xa4002057
-#define MASK_VMADD_VV 0xfc00707f
-#define MATCH_VNMSUB_VV 0xac002057
-#define MASK_VNMSUB_VV 0xfc00707f
-#define MATCH_VMACC_VV 0xb4002057
-#define MASK_VMACC_VV 0xfc00707f
-#define MATCH_VNMSAC_VV 0xbc002057
-#define MASK_VNMSAC_VV 0xfc00707f
-#define MATCH_VWADDU_VV 0xc0002057
-#define MASK_VWADDU_VV 0xfc00707f
-#define MATCH_VWADD_VV 0xc4002057
-#define MASK_VWADD_VV 0xfc00707f
-#define MATCH_VWSUBU_VV 0xc8002057
-#define MASK_VWSUBU_VV 0xfc00707f
-#define MATCH_VWSUB_VV 0xcc002057
-#define MASK_VWSUB_VV 0xfc00707f
-#define MATCH_VWADDU_WV 0xd0002057
-#define MASK_VWADDU_WV 0xfc00707f
-#define MATCH_VWADD_WV 0xd4002057
-#define MASK_VWADD_WV 0xfc00707f
-#define MATCH_VWSUBU_WV 0xd8002057
-#define MASK_VWSUBU_WV 0xfc00707f
-#define MATCH_VWSUB_WV 0xdc002057
-#define MASK_VWSUB_WV 0xfc00707f
-#define MATCH_VWMULU_VV 0xe0002057
-#define MASK_VWMULU_VV 0xfc00707f
-#define MATCH_VWMULSU_VV 0xe8002057
-#define MASK_VWMULSU_VV 0xfc00707f
-#define MATCH_VWMUL_VV 0xec002057
-#define MASK_VWMUL_VV 0xfc00707f
-#define MATCH_VWMACCU_VV 0xf0002057
-#define MASK_VWMACCU_VV 0xfc00707f
-#define MATCH_VWMACC_VV 0xf4002057
-#define MASK_VWMACC_VV 0xfc00707f
-#define MATCH_VWMACCSU_VV 0xfc002057
-#define MASK_VWMACCSU_VV 0xfc00707f
-#define MATCH_VAADDU_VX 0x20006057
-#define MASK_VAADDU_VX 0xfc00707f
-#define MATCH_VAADD_VX 0x24006057
-#define MASK_VAADD_VX 0xfc00707f
-#define MATCH_VASUBU_VX 0x28006057
-#define MASK_VASUBU_VX 0xfc00707f
-#define MATCH_VASUB_VX 0x2c006057
-#define MASK_VASUB_VX 0xfc00707f
-#define MATCH_VMV_S_X 0x42006057
-#define MASK_VMV_S_X 0xfff0707f
-#define MATCH_VSLIDE1UP_VX 0x38006057
-#define MASK_VSLIDE1UP_VX 0xfc00707f
-#define MATCH_VSLIDE1DOWN_VX 0x3c006057
-#define MASK_VSLIDE1DOWN_VX 0xfc00707f
-#define MATCH_VDIVU_VX 0x80006057
-#define MASK_VDIVU_VX 0xfc00707f
-#define MATCH_VDIV_VX 0x84006057
-#define MASK_VDIV_VX 0xfc00707f
-#define MATCH_VREMU_VX 0x88006057
-#define MASK_VREMU_VX 0xfc00707f
-#define MATCH_VREM_VX 0x8c006057
-#define MASK_VREM_VX 0xfc00707f
-#define MATCH_VMULHU_VX 0x90006057
-#define MASK_VMULHU_VX 0xfc00707f
-#define MATCH_VMUL_VX 0x94006057
-#define MASK_VMUL_VX 0xfc00707f
-#define MATCH_VMULHSU_VX 0x98006057
-#define MASK_VMULHSU_VX 0xfc00707f
-#define MATCH_VMULH_VX 0x9c006057
-#define MASK_VMULH_VX 0xfc00707f
-#define MATCH_VMADD_VX 0xa4006057
-#define MASK_VMADD_VX 0xfc00707f
-#define MATCH_VNMSUB_VX 0xac006057
-#define MASK_VNMSUB_VX 0xfc00707f
-#define MATCH_VMACC_VX 0xb4006057
-#define MASK_VMACC_VX 0xfc00707f
-#define MATCH_VNMSAC_VX 0xbc006057
-#define MASK_VNMSAC_VX 0xfc00707f
-#define MATCH_VWADDU_VX 0xc0006057
-#define MASK_VWADDU_VX 0xfc00707f
-#define MATCH_VWADD_VX 0xc4006057
-#define MASK_VWADD_VX 0xfc00707f
-#define MATCH_VWSUBU_VX 0xc8006057
-#define MASK_VWSUBU_VX 0xfc00707f
-#define MATCH_VWSUB_VX 0xcc006057
-#define MASK_VWSUB_VX 0xfc00707f
-#define MATCH_VWADDU_WX 0xd0006057
-#define MASK_VWADDU_WX 0xfc00707f
-#define MATCH_VWADD_WX 0xd4006057
-#define MASK_VWADD_WX 0xfc00707f
-#define MATCH_VWSUBU_WX 0xd8006057
-#define MASK_VWSUBU_WX 0xfc00707f
-#define MATCH_VWSUB_WX 0xdc006057
-#define MASK_VWSUB_WX 0xfc00707f
-#define MATCH_VWMULU_VX 0xe0006057
-#define MASK_VWMULU_VX 0xfc00707f
-#define MATCH_VWMULSU_VX 0xe8006057
-#define MASK_VWMULSU_VX 0xfc00707f
-#define MATCH_VWMUL_VX 0xec006057
-#define MASK_VWMUL_VX 0xfc00707f
-#define MATCH_VWMACCU_VX 0xf0006057
-#define MASK_VWMACCU_VX 0xfc00707f
-#define MATCH_VWMACC_VX 0xf4006057
-#define MASK_VWMACC_VX 0xfc00707f
-#define MATCH_VWMACCUS_VX 0xf8006057
-#define MASK_VWMACCUS_VX 0xfc00707f
-#define MATCH_VWMACCSU_VX 0xfc006057
-#define MASK_VWMACCSU_VX 0xfc00707f
-#define MATCH_VAMOSWAPEI8_V 0x800002f
-#define MASK_VAMOSWAPEI8_V 0xf800707f
-#define MATCH_VAMOADDEI8_V 0x2f
-#define MASK_VAMOADDEI8_V 0xf800707f
-#define MATCH_VAMOXOREI8_V 0x2000002f
-#define MASK_VAMOXOREI8_V 0xf800707f
-#define MATCH_VAMOANDEI8_V 0x6000002f
-#define MASK_VAMOANDEI8_V 0xf800707f
-#define MATCH_VAMOOREI8_V 0x4000002f
-#define MASK_VAMOOREI8_V 0xf800707f
-#define MATCH_VAMOMINEI8_V 0x8000002f
-#define MASK_VAMOMINEI8_V 0xf800707f
-#define MATCH_VAMOMAXEI8_V 0xa000002f
-#define MASK_VAMOMAXEI8_V 0xf800707f
-#define MATCH_VAMOMINUEI8_V 0xc000002f
-#define MASK_VAMOMINUEI8_V 0xf800707f
-#define MATCH_VAMOMAXUEI8_V 0xe000002f
-#define MASK_VAMOMAXUEI8_V 0xf800707f
-#define MATCH_VAMOSWAPEI16_V 0x800502f
-#define MASK_VAMOSWAPEI16_V 0xf800707f
-#define MATCH_VAMOADDEI16_V 0x502f
-#define MASK_VAMOADDEI16_V 0xf800707f
-#define MATCH_VAMOXOREI16_V 0x2000502f
-#define MASK_VAMOXOREI16_V 0xf800707f
-#define MATCH_VAMOANDEI16_V 0x6000502f
-#define MASK_VAMOANDEI16_V 0xf800707f
-#define MATCH_VAMOOREI16_V 0x4000502f
-#define MASK_VAMOOREI16_V 0xf800707f
-#define MATCH_VAMOMINEI16_V 0x8000502f
-#define MASK_VAMOMINEI16_V 0xf800707f
-#define MATCH_VAMOMAXEI16_V 0xa000502f
-#define MASK_VAMOMAXEI16_V 0xf800707f
-#define MATCH_VAMOMINUEI16_V 0xc000502f
-#define MASK_VAMOMINUEI16_V 0xf800707f
-#define MATCH_VAMOMAXUEI16_V 0xe000502f
-#define MASK_VAMOMAXUEI16_V 0xf800707f
-#define MATCH_VAMOSWAPEI32_V 0x800602f
-#define MASK_VAMOSWAPEI32_V 0xf800707f
-#define MATCH_VAMOADDEI32_V 0x602f
-#define MASK_VAMOADDEI32_V 0xf800707f
-#define MATCH_VAMOXOREI32_V 0x2000602f
-#define MASK_VAMOXOREI32_V 0xf800707f
-#define MATCH_VAMOANDEI32_V 0x6000602f
-#define MASK_VAMOANDEI32_V 0xf800707f
-#define MATCH_VAMOOREI32_V 0x4000602f
-#define MASK_VAMOOREI32_V 0xf800707f
-#define MATCH_VAMOMINEI32_V 0x8000602f
-#define MASK_VAMOMINEI32_V 0xf800707f
-#define MATCH_VAMOMAXEI32_V 0xa000602f
-#define MASK_VAMOMAXEI32_V 0xf800707f
-#define MATCH_VAMOMINUEI32_V 0xc000602f
-#define MASK_VAMOMINUEI32_V 0xf800707f
-#define MATCH_VAMOMAXUEI32_V 0xe000602f
-#define MASK_VAMOMAXUEI32_V 0xf800707f
-#define MATCH_VAMOSWAPEI64_V 0x800702f
-#define MASK_VAMOSWAPEI64_V 0xf800707f
-#define MATCH_VAMOADDEI64_V 0x702f
-#define MASK_VAMOADDEI64_V 0xf800707f
-#define MATCH_VAMOXOREI64_V 0x2000702f
-#define MASK_VAMOXOREI64_V 0xf800707f
-#define MATCH_VAMOANDEI64_V 0x6000702f
-#define MASK_VAMOANDEI64_V 0xf800707f
-#define MATCH_VAMOOREI64_V 0x4000702f
-#define MASK_VAMOOREI64_V 0xf800707f
-#define MATCH_VAMOMINEI64_V 0x8000702f
-#define MASK_VAMOMINEI64_V 0xf800707f
-#define MATCH_VAMOMAXEI64_V 0xa000702f
-#define MASK_VAMOMAXEI64_V 0xf800707f
-#define MATCH_VAMOMINUEI64_V 0xc000702f
-#define MASK_VAMOMINUEI64_V 0xf800707f
-#define MATCH_VAMOMAXUEI64_V 0xe000702f
-#define MASK_VAMOMAXUEI64_V 0xf800707f
-#define MATCH_ADD8 0x48000077
-#define MASK_ADD8 0xfe00707f
-#define MATCH_ADD16 0x40000077
-#define MASK_ADD16 0xfe00707f
-#define MATCH_ADD64 0xc0001077
-#define MASK_ADD64 0xfe00707f
-#define MATCH_AVE 0xe0000077
-#define MASK_AVE 0xfe00707f
-#define MATCH_BITREV 0xe6000077
-#define MASK_BITREV 0xfe00707f
-#define MATCH_BITREVI 0xe8000077
-#define MASK_BITREVI 0xfc00707f
-#define MATCH_BPICK 0x3077
-#define MASK_BPICK 0x600707f
-#define MATCH_CLRS8 0xae000077
-#define MASK_CLRS8 0xfff0707f
-#define MATCH_CLRS16 0xae800077
-#define MASK_CLRS16 0xfff0707f
-#define MATCH_CLRS32 0xaf800077
-#define MASK_CLRS32 0xfff0707f
-#define MATCH_CLO8 0xae300077
-#define MASK_CLO8 0xfff0707f
+#define MASK_C_SDSP 0xe003
+#define MATCH_C_SLLI 0x2
+#define MASK_C_SLLI 0xe003
+#define MATCH_C_SRAI 0x8401
+#define MASK_C_SRAI 0xec03
+#define MATCH_C_SRLI 0x8001
+#define MASK_C_SRLI 0xec03
+#define MATCH_C_SUB 0x8c01
+#define MASK_C_SUB 0xfc63
+#define MATCH_C_SUBW 0x9c01
+#define MASK_C_SUBW 0xfc63
+#define MATCH_C_SW 0xc000
+#define MASK_C_SW 0xe003
+#define MATCH_C_SWSP 0xc002
+#define MASK_C_SWSP 0xe003
+#define MATCH_C_XOR 0x8c21
+#define MASK_C_XOR 0xfc63
+#define MATCH_CBO_CLEAN 0x10200f
+#define MASK_CBO_CLEAN 0xfff07fff
+#define MATCH_CBO_FLUSH 0x20200f
+#define MASK_CBO_FLUSH 0xfff07fff
+#define MATCH_CBO_INVAL 0x200f
+#define MASK_CBO_INVAL 0xfff07fff
+#define MATCH_CBO_ZERO 0x40200f
+#define MASK_CBO_ZERO 0xfff07fff
+#define MATCH_CLMUL 0xa001033
+#define MASK_CLMUL 0xfe00707f
+#define MATCH_CLMULH 0xa003033
+#define MASK_CLMULH 0xfe00707f
+#define MATCH_CLMULR 0xa002033
+#define MASK_CLMULR 0xfe00707f
#define MATCH_CLO16 0xaeb00077
-#define MASK_CLO16 0xfff0707f
+#define MASK_CLO16 0xfff0707f
#define MATCH_CLO32 0xafb00077
-#define MASK_CLO32 0xfff0707f
-#define MATCH_CLZ8 0xae100077
-#define MASK_CLZ8 0xfff0707f
+#define MASK_CLO32 0xfff0707f
+#define MATCH_CLO8 0xae300077
+#define MASK_CLO8 0xfff0707f
+#define MATCH_CLRS16 0xae800077
+#define MASK_CLRS16 0xfff0707f
+#define MATCH_CLRS32 0xaf800077
+#define MASK_CLRS32 0xfff0707f
+#define MATCH_CLRS8 0xae000077
+#define MASK_CLRS8 0xfff0707f
+#define MATCH_CLZ 0x60001013
+#define MASK_CLZ 0xfff0707f
#define MATCH_CLZ16 0xae900077
-#define MASK_CLZ16 0xfff0707f
+#define MASK_CLZ16 0xfff0707f
#define MATCH_CLZ32 0xaf900077
-#define MASK_CLZ32 0xfff0707f
-#define MATCH_CMPEQ8 0x4e000077
-#define MASK_CMPEQ8 0xfe00707f
+#define MASK_CLZ32 0xfff0707f
+#define MATCH_CLZ8 0xae100077
+#define MASK_CLZ8 0xfff0707f
+#define MATCH_CLZW 0x6000101b
+#define MASK_CLZW 0xfff0707f
+#define MATCH_CMIX 0x6001033
+#define MASK_CMIX 0x600707f
+#define MATCH_CMOV 0x6005033
+#define MASK_CMOV 0x600707f
#define MATCH_CMPEQ16 0x4c000077
-#define MASK_CMPEQ16 0xfe00707f
+#define MASK_CMPEQ16 0xfe00707f
+#define MATCH_CMPEQ8 0x4e000077
+#define MASK_CMPEQ8 0xfe00707f
+#define MATCH_CPOP 0x60201013
+#define MASK_CPOP 0xfff0707f
+#define MATCH_CPOPW 0x6020101b
+#define MASK_CPOPW 0xfff0707f
#define MATCH_CRAS16 0x44000077
-#define MASK_CRAS16 0xfe00707f
+#define MASK_CRAS16 0xfe00707f
+#define MATCH_CRAS32 0x44002077
+#define MASK_CRAS32 0xfe00707f
+#define MATCH_CRC32_B 0x61001013
+#define MASK_CRC32_B 0xfff0707f
+#define MATCH_CRC32_D 0x61301013
+#define MASK_CRC32_D 0xfff0707f
+#define MATCH_CRC32_H 0x61101013
+#define MASK_CRC32_H 0xfff0707f
+#define MATCH_CRC32_W 0x61201013
+#define MASK_CRC32_W 0xfff0707f
+#define MATCH_CRC32C_B 0x61801013
+#define MASK_CRC32C_B 0xfff0707f
+#define MATCH_CRC32C_D 0x61b01013
+#define MASK_CRC32C_D 0xfff0707f
+#define MATCH_CRC32C_H 0x61901013
+#define MASK_CRC32C_H 0xfff0707f
+#define MATCH_CRC32C_W 0x61a01013
+#define MASK_CRC32C_W 0xfff0707f
#define MATCH_CRSA16 0x46000077
-#define MASK_CRSA16 0xfe00707f
+#define MASK_CRSA16 0xfe00707f
+#define MATCH_CRSA32 0x46002077
+#define MASK_CRSA32 0xfe00707f
+#define MATCH_CSRRC 0x3073
+#define MASK_CSRRC 0x707f
+#define MATCH_CSRRCI 0x7073
+#define MASK_CSRRCI 0x707f
+#define MATCH_CSRRS 0x2073
+#define MASK_CSRRS 0x707f
+#define MATCH_CSRRSI 0x6073
+#define MASK_CSRRSI 0x707f
+#define MATCH_CSRRW 0x1073
+#define MASK_CSRRW 0x707f
+#define MATCH_CSRRWI 0x5073
+#define MASK_CSRRWI 0x707f
+#define MATCH_CTZ 0x60101013
+#define MASK_CTZ 0xfff0707f
+#define MATCH_CTZW 0x6010101b
+#define MASK_CTZW 0xfff0707f
+#define MATCH_DIV 0x2004033
+#define MASK_DIV 0xfe00707f
+#define MATCH_DIVU 0x2005033
+#define MASK_DIVU 0xfe00707f
+#define MATCH_DIVUW 0x200503b
+#define MASK_DIVUW 0xfe00707f
+#define MATCH_DIVW 0x200403b
+#define MASK_DIVW 0xfe00707f
+#define MATCH_DRET 0x7b200073
+#define MASK_DRET 0xffffffff
+#define MATCH_EBREAK 0x100073
+#define MASK_EBREAK 0xffffffff
+#define MATCH_ECALL 0x73
+#define MASK_ECALL 0xffffffff
+#define MATCH_FADD_D 0x2000053
+#define MASK_FADD_D 0xfe00007f
+#define MATCH_FADD_H 0x4000053
+#define MASK_FADD_H 0xfe00007f
+#define MATCH_FADD_Q 0x6000053
+#define MASK_FADD_Q 0xfe00007f
+#define MATCH_FADD_S 0x53
+#define MASK_FADD_S 0xfe00007f
+#define MATCH_FCLASS_D 0xe2001053
+#define MASK_FCLASS_D 0xfff0707f
+#define MATCH_FCLASS_H 0xe4001053
+#define MASK_FCLASS_H 0xfff0707f
+#define MATCH_FCLASS_Q 0xe6001053
+#define MASK_FCLASS_Q 0xfff0707f
+#define MATCH_FCLASS_S 0xe0001053
+#define MASK_FCLASS_S 0xfff0707f
+#define MATCH_FCVT_D_H 0x42200053
+#define MASK_FCVT_D_H 0xfff0007f
+#define MATCH_FCVT_D_L 0xd2200053
+#define MASK_FCVT_D_L 0xfff0007f
+#define MATCH_FCVT_D_LU 0xd2300053
+#define MASK_FCVT_D_LU 0xfff0007f
+#define MATCH_FCVT_D_Q 0x42300053
+#define MASK_FCVT_D_Q 0xfff0007f
+#define MATCH_FCVT_D_S 0x42000053
+#define MASK_FCVT_D_S 0xfff0007f
+#define MATCH_FCVT_D_W 0xd2000053
+#define MASK_FCVT_D_W 0xfff0007f
+#define MATCH_FCVT_D_WU 0xd2100053
+#define MASK_FCVT_D_WU 0xfff0007f
+#define MATCH_FCVT_H_D 0x44100053
+#define MASK_FCVT_H_D 0xfff0007f
+#define MATCH_FCVT_H_L 0xd4200053
+#define MASK_FCVT_H_L 0xfff0007f
+#define MATCH_FCVT_H_LU 0xd4300053
+#define MASK_FCVT_H_LU 0xfff0007f
+#define MATCH_FCVT_H_Q 0x44300053
+#define MASK_FCVT_H_Q 0xfff0007f
+#define MATCH_FCVT_H_S 0x44000053
+#define MASK_FCVT_H_S 0xfff0007f
+#define MATCH_FCVT_H_W 0xd4000053
+#define MASK_FCVT_H_W 0xfff0007f
+#define MATCH_FCVT_H_WU 0xd4100053
+#define MASK_FCVT_H_WU 0xfff0007f
+#define MATCH_FCVT_L_D 0xc2200053
+#define MASK_FCVT_L_D 0xfff0007f
+#define MATCH_FCVT_L_H 0xc4200053
+#define MASK_FCVT_L_H 0xfff0007f
+#define MATCH_FCVT_L_Q 0xc6200053
+#define MASK_FCVT_L_Q 0xfff0007f
+#define MATCH_FCVT_L_S 0xc0200053
+#define MASK_FCVT_L_S 0xfff0007f
+#define MATCH_FCVT_LU_D 0xc2300053
+#define MASK_FCVT_LU_D 0xfff0007f
+#define MATCH_FCVT_LU_H 0xc4300053
+#define MASK_FCVT_LU_H 0xfff0007f
+#define MATCH_FCVT_LU_Q 0xc6300053
+#define MASK_FCVT_LU_Q 0xfff0007f
+#define MATCH_FCVT_LU_S 0xc0300053
+#define MASK_FCVT_LU_S 0xfff0007f
+#define MATCH_FCVT_Q_D 0x46100053
+#define MASK_FCVT_Q_D 0xfff0007f
+#define MATCH_FCVT_Q_H 0x46200053
+#define MASK_FCVT_Q_H 0xfff0007f
+#define MATCH_FCVT_Q_L 0xd6200053
+#define MASK_FCVT_Q_L 0xfff0007f
+#define MATCH_FCVT_Q_LU 0xd6300053
+#define MASK_FCVT_Q_LU 0xfff0007f
+#define MATCH_FCVT_Q_S 0x46000053
+#define MASK_FCVT_Q_S 0xfff0007f
+#define MATCH_FCVT_Q_W 0xd6000053
+#define MASK_FCVT_Q_W 0xfff0007f
+#define MATCH_FCVT_Q_WU 0xd6100053
+#define MASK_FCVT_Q_WU 0xfff0007f
+#define MATCH_FCVT_S_D 0x40100053
+#define MASK_FCVT_S_D 0xfff0007f
+#define MATCH_FCVT_S_H 0x40200053
+#define MASK_FCVT_S_H 0xfff0007f
+#define MATCH_FCVT_S_L 0xd0200053
+#define MASK_FCVT_S_L 0xfff0007f
+#define MATCH_FCVT_S_LU 0xd0300053
+#define MASK_FCVT_S_LU 0xfff0007f
+#define MATCH_FCVT_S_Q 0x40300053
+#define MASK_FCVT_S_Q 0xfff0007f
+#define MATCH_FCVT_S_W 0xd0000053
+#define MASK_FCVT_S_W 0xfff0007f
+#define MATCH_FCVT_S_WU 0xd0100053
+#define MASK_FCVT_S_WU 0xfff0007f
+#define MATCH_FCVT_W_D 0xc2000053
+#define MASK_FCVT_W_D 0xfff0007f
+#define MATCH_FCVT_W_H 0xc4000053
+#define MASK_FCVT_W_H 0xfff0007f
+#define MATCH_FCVT_W_Q 0xc6000053
+#define MASK_FCVT_W_Q 0xfff0007f
+#define MATCH_FCVT_W_S 0xc0000053
+#define MASK_FCVT_W_S 0xfff0007f
+#define MATCH_FCVT_WU_D 0xc2100053
+#define MASK_FCVT_WU_D 0xfff0007f
+#define MATCH_FCVT_WU_H 0xc4100053
+#define MASK_FCVT_WU_H 0xfff0007f
+#define MATCH_FCVT_WU_Q 0xc6100053
+#define MASK_FCVT_WU_Q 0xfff0007f
+#define MATCH_FCVT_WU_S 0xc0100053
+#define MASK_FCVT_WU_S 0xfff0007f
+#define MATCH_FDIV_D 0x1a000053
+#define MASK_FDIV_D 0xfe00007f
+#define MATCH_FDIV_H 0x1c000053
+#define MASK_FDIV_H 0xfe00007f
+#define MATCH_FDIV_Q 0x1e000053
+#define MASK_FDIV_Q 0xfe00007f
+#define MATCH_FDIV_S 0x18000053
+#define MASK_FDIV_S 0xfe00007f
+#define MATCH_FENCE 0xf
+#define MASK_FENCE 0x707f
+#define MATCH_FENCE_I 0x100f
+#define MASK_FENCE_I 0x707f
+#define MATCH_FEQ_D 0xa2002053
+#define MASK_FEQ_D 0xfe00707f
+#define MATCH_FEQ_H 0xa4002053
+#define MASK_FEQ_H 0xfe00707f
+#define MATCH_FEQ_Q 0xa6002053
+#define MASK_FEQ_Q 0xfe00707f
+#define MATCH_FEQ_S 0xa0002053
+#define MASK_FEQ_S 0xfe00707f
+#define MATCH_FLD 0x3007
+#define MASK_FLD 0x707f
+#define MATCH_FLE_D 0xa2000053
+#define MASK_FLE_D 0xfe00707f
+#define MATCH_FLE_H 0xa4000053
+#define MASK_FLE_H 0xfe00707f
+#define MATCH_FLE_Q 0xa6000053
+#define MASK_FLE_Q 0xfe00707f
+#define MATCH_FLE_S 0xa0000053
+#define MASK_FLE_S 0xfe00707f
+#define MATCH_FLH 0x1007
+#define MASK_FLH 0x707f
+#define MATCH_FLQ 0x4007
+#define MASK_FLQ 0x707f
+#define MATCH_FLT_D 0xa2001053
+#define MASK_FLT_D 0xfe00707f
+#define MATCH_FLT_H 0xa4001053
+#define MASK_FLT_H 0xfe00707f
+#define MATCH_FLT_Q 0xa6001053
+#define MASK_FLT_Q 0xfe00707f
+#define MATCH_FLT_S 0xa0001053
+#define MASK_FLT_S 0xfe00707f
+#define MATCH_FLW 0x2007
+#define MASK_FLW 0x707f
+#define MATCH_FMADD_D 0x2000043
+#define MASK_FMADD_D 0x600007f
+#define MATCH_FMADD_H 0x4000043
+#define MASK_FMADD_H 0x600007f
+#define MATCH_FMADD_Q 0x6000043
+#define MASK_FMADD_Q 0x600007f
+#define MATCH_FMADD_S 0x43
+#define MASK_FMADD_S 0x600007f
+#define MATCH_FMAX_D 0x2a001053
+#define MASK_FMAX_D 0xfe00707f
+#define MATCH_FMAX_H 0x2c001053
+#define MASK_FMAX_H 0xfe00707f
+#define MATCH_FMAX_Q 0x2e001053
+#define MASK_FMAX_Q 0xfe00707f
+#define MATCH_FMAX_S 0x28001053
+#define MASK_FMAX_S 0xfe00707f
+#define MATCH_FMIN_D 0x2a000053
+#define MASK_FMIN_D 0xfe00707f
+#define MATCH_FMIN_H 0x2c000053
+#define MASK_FMIN_H 0xfe00707f
+#define MATCH_FMIN_Q 0x2e000053
+#define MASK_FMIN_Q 0xfe00707f
+#define MATCH_FMIN_S 0x28000053
+#define MASK_FMIN_S 0xfe00707f
+#define MATCH_FMSUB_D 0x2000047
+#define MASK_FMSUB_D 0x600007f
+#define MATCH_FMSUB_H 0x4000047
+#define MASK_FMSUB_H 0x600007f
+#define MATCH_FMSUB_Q 0x6000047
+#define MASK_FMSUB_Q 0x600007f
+#define MATCH_FMSUB_S 0x47
+#define MASK_FMSUB_S 0x600007f
+#define MATCH_FMUL_D 0x12000053
+#define MASK_FMUL_D 0xfe00007f
+#define MATCH_FMUL_H 0x14000053
+#define MASK_FMUL_H 0xfe00007f
+#define MATCH_FMUL_Q 0x16000053
+#define MASK_FMUL_Q 0xfe00007f
+#define MATCH_FMUL_S 0x10000053
+#define MASK_FMUL_S 0xfe00007f
+#define MATCH_FMV_D_X 0xf2000053
+#define MASK_FMV_D_X 0xfff0707f
+#define MATCH_FMV_H_X 0xf4000053
+#define MASK_FMV_H_X 0xfff0707f
+#define MATCH_FMV_W_X 0xf0000053
+#define MASK_FMV_W_X 0xfff0707f
+#define MATCH_FMV_X_D 0xe2000053
+#define MASK_FMV_X_D 0xfff0707f
+#define MATCH_FMV_X_H 0xe4000053
+#define MASK_FMV_X_H 0xfff0707f
+#define MATCH_FMV_X_W 0xe0000053
+#define MASK_FMV_X_W 0xfff0707f
+#define MATCH_FNMADD_D 0x200004f
+#define MASK_FNMADD_D 0x600007f
+#define MATCH_FNMADD_H 0x400004f
+#define MASK_FNMADD_H 0x600007f
+#define MATCH_FNMADD_Q 0x600004f
+#define MASK_FNMADD_Q 0x600007f
+#define MATCH_FNMADD_S 0x4f
+#define MASK_FNMADD_S 0x600007f
+#define MATCH_FNMSUB_D 0x200004b
+#define MASK_FNMSUB_D 0x600007f
+#define MATCH_FNMSUB_H 0x400004b
+#define MASK_FNMSUB_H 0x600007f
+#define MATCH_FNMSUB_Q 0x600004b
+#define MASK_FNMSUB_Q 0x600007f
+#define MATCH_FNMSUB_S 0x4b
+#define MASK_FNMSUB_S 0x600007f
+#define MATCH_FSD 0x3027
+#define MASK_FSD 0x707f
+#define MATCH_FSGNJ_D 0x22000053
+#define MASK_FSGNJ_D 0xfe00707f
+#define MATCH_FSGNJ_H 0x24000053
+#define MASK_FSGNJ_H 0xfe00707f
+#define MATCH_FSGNJ_Q 0x26000053
+#define MASK_FSGNJ_Q 0xfe00707f
+#define MATCH_FSGNJ_S 0x20000053
+#define MASK_FSGNJ_S 0xfe00707f
+#define MATCH_FSGNJN_D 0x22001053
+#define MASK_FSGNJN_D 0xfe00707f
+#define MATCH_FSGNJN_H 0x24001053
+#define MASK_FSGNJN_H 0xfe00707f
+#define MATCH_FSGNJN_Q 0x26001053
+#define MASK_FSGNJN_Q 0xfe00707f
+#define MATCH_FSGNJN_S 0x20001053
+#define MASK_FSGNJN_S 0xfe00707f
+#define MATCH_FSGNJX_D 0x22002053
+#define MASK_FSGNJX_D 0xfe00707f
+#define MATCH_FSGNJX_H 0x24002053
+#define MASK_FSGNJX_H 0xfe00707f
+#define MATCH_FSGNJX_Q 0x26002053
+#define MASK_FSGNJX_Q 0xfe00707f
+#define MATCH_FSGNJX_S 0x20002053
+#define MASK_FSGNJX_S 0xfe00707f
+#define MATCH_FSH 0x1027
+#define MASK_FSH 0x707f
+#define MATCH_FSL 0x4001033
+#define MASK_FSL 0x600707f
+#define MATCH_FSLW 0x400103b
+#define MASK_FSLW 0x600707f
+#define MATCH_FSQ 0x4027
+#define MASK_FSQ 0x707f
+#define MATCH_FSQRT_D 0x5a000053
+#define MASK_FSQRT_D 0xfff0007f
+#define MATCH_FSQRT_H 0x5c000053
+#define MASK_FSQRT_H 0xfff0007f
+#define MATCH_FSQRT_Q 0x5e000053
+#define MASK_FSQRT_Q 0xfff0007f
+#define MATCH_FSQRT_S 0x58000053
+#define MASK_FSQRT_S 0xfff0007f
+#define MATCH_FSR 0x4005033
+#define MASK_FSR 0x600707f
+#define MATCH_FSRI 0x4005013
+#define MASK_FSRI 0x400707f
+#define MATCH_FSRIW 0x400501b
+#define MASK_FSRIW 0x600707f
+#define MATCH_FSRW 0x400503b
+#define MASK_FSRW 0x600707f
+#define MATCH_FSUB_D 0xa000053
+#define MASK_FSUB_D 0xfe00007f
+#define MATCH_FSUB_H 0xc000053
+#define MASK_FSUB_H 0xfe00007f
+#define MATCH_FSUB_Q 0xe000053
+#define MASK_FSUB_Q 0xfe00007f
+#define MATCH_FSUB_S 0x8000053
+#define MASK_FSUB_S 0xfe00007f
+#define MATCH_FSW 0x2027
+#define MASK_FSW 0x707f
+#define MATCH_GORC 0x28005033
+#define MASK_GORC 0xfe00707f
+#define MATCH_GORCI 0x28005013
+#define MASK_GORCI 0xfc00707f
+#define MATCH_GORCIW 0x2800501b
+#define MASK_GORCIW 0xfe00707f
+#define MATCH_GORCW 0x2800503b
+#define MASK_GORCW 0xfe00707f
+#define MATCH_GREV 0x68005033
+#define MASK_GREV 0xfe00707f
+#define MATCH_GREVI 0x68005013
+#define MASK_GREVI 0xfc00707f
+#define MATCH_GREVIW 0x6800501b
+#define MASK_GREVIW 0xfe00707f
+#define MATCH_GREVW 0x6800503b
+#define MASK_GREVW 0xfe00707f
+#define MATCH_HFENCE_GVMA 0x62000073
+#define MASK_HFENCE_GVMA 0xfe007fff
+#define MATCH_HFENCE_VVMA 0x22000073
+#define MASK_HFENCE_VVMA 0xfe007fff
+#define MATCH_HINVAL_GVMA 0x66000073
+#define MASK_HINVAL_GVMA 0xfe007fff
+#define MATCH_HINVAL_VVMA 0x26000073
+#define MASK_HINVAL_VVMA 0xfe007fff
+#define MATCH_HLV_B 0x60004073
+#define MASK_HLV_B 0xfff0707f
+#define MATCH_HLV_BU 0x60104073
+#define MASK_HLV_BU 0xfff0707f
+#define MATCH_HLV_D 0x6c004073
+#define MASK_HLV_D 0xfff0707f
+#define MATCH_HLV_H 0x64004073
+#define MASK_HLV_H 0xfff0707f
+#define MATCH_HLV_HU 0x64104073
+#define MASK_HLV_HU 0xfff0707f
+#define MATCH_HLV_W 0x68004073
+#define MASK_HLV_W 0xfff0707f
+#define MATCH_HLV_WU 0x68104073
+#define MASK_HLV_WU 0xfff0707f
+#define MATCH_HLVX_HU 0x64304073
+#define MASK_HLVX_HU 0xfff0707f
+#define MATCH_HLVX_WU 0x68304073
+#define MASK_HLVX_WU 0xfff0707f
+#define MATCH_HSV_B 0x62004073
+#define MASK_HSV_B 0xfe007fff
+#define MATCH_HSV_D 0x6e004073
+#define MASK_HSV_D 0xfe007fff
+#define MATCH_HSV_H 0x66004073
+#define MASK_HSV_H 0xfe007fff
+#define MATCH_HSV_W 0x6a004073
+#define MASK_HSV_W 0xfe007fff
#define MATCH_INSB 0xac000077
-#define MASK_INSB 0xff80707f
-#define MATCH_KABS8 0xad000077
-#define MASK_KABS8 0xfff0707f
+#define MASK_INSB 0xff80707f
+#define MATCH_JAL 0x6f
+#define MASK_JAL 0x7f
+#define MATCH_JALR 0x67
+#define MASK_JALR 0x707f
#define MATCH_KABS16 0xad100077
-#define MASK_KABS16 0xfff0707f
+#define MASK_KABS16 0xfff0707f
+#define MATCH_KABS32 0xad200077
+#define MASK_KABS32 0xfff0707f
+#define MATCH_KABS8 0xad000077
+#define MASK_KABS8 0xfff0707f
#define MATCH_KABSW 0xad400077
-#define MASK_KABSW 0xfff0707f
-#define MATCH_KADD8 0x18000077
-#define MASK_KADD8 0xfe00707f
+#define MASK_KABSW 0xfff0707f
#define MATCH_KADD16 0x10000077
-#define MASK_KADD16 0xfe00707f
+#define MASK_KADD16 0xfe00707f
+#define MATCH_KADD32 0x10002077
+#define MASK_KADD32 0xfe00707f
#define MATCH_KADD64 0x90001077
-#define MASK_KADD64 0xfe00707f
+#define MASK_KADD64 0xfe00707f
+#define MATCH_KADD8 0x18000077
+#define MASK_KADD8 0xfe00707f
#define MATCH_KADDH 0x4001077
-#define MASK_KADDH 0xfe00707f
+#define MASK_KADDH 0xfe00707f
#define MATCH_KADDW 0x1077
-#define MASK_KADDW 0xfe00707f
+#define MASK_KADDW 0xfe00707f
#define MATCH_KCRAS16 0x14000077
-#define MASK_KCRAS16 0xfe00707f
+#define MASK_KCRAS16 0xfe00707f
+#define MATCH_KCRAS32 0x14002077
+#define MASK_KCRAS32 0xfe00707f
#define MATCH_KCRSA16 0x16000077
-#define MASK_KCRSA16 0xfe00707f
-#define MATCH_KDMBB 0xa001077
-#define MASK_KDMBB 0xfe00707f
-#define MATCH_KDMBT 0x1a001077
-#define MASK_KDMBT 0xfe00707f
-#define MATCH_KDMTT 0x2a001077
-#define MASK_KDMTT 0xfe00707f
+#define MASK_KCRSA16 0xfe00707f
+#define MATCH_KCRSA32 0x16002077
+#define MASK_KCRSA32 0xfe00707f
#define MATCH_KDMABB 0xd2001077
-#define MASK_KDMABB 0xfe00707f
+#define MASK_KDMABB 0xfe00707f
+#define MATCH_KDMABB16 0xd8001077
+#define MASK_KDMABB16 0xfe00707f
#define MATCH_KDMABT 0xe2001077
-#define MASK_KDMABT 0xfe00707f
+#define MASK_KDMABT 0xfe00707f
+#define MATCH_KDMABT16 0xe8001077
+#define MASK_KDMABT16 0xfe00707f
#define MATCH_KDMATT 0xf2001077
-#define MASK_KDMATT 0xfe00707f
-#define MATCH_KHM8 0x8e000077
-#define MASK_KHM8 0xfe00707f
-#define MATCH_KHMX8 0x9e000077
-#define MASK_KHMX8 0xfe00707f
+#define MASK_KDMATT 0xfe00707f
+#define MATCH_KDMATT16 0xf8001077
+#define MASK_KDMATT16 0xfe00707f
+#define MATCH_KDMBB 0xa001077
+#define MASK_KDMBB 0xfe00707f
+#define MATCH_KDMBB16 0xda001077
+#define MASK_KDMBB16 0xfe00707f
+#define MATCH_KDMBT 0x1a001077
+#define MASK_KDMBT 0xfe00707f
+#define MATCH_KDMBT16 0xea001077
+#define MASK_KDMBT16 0xfe00707f
+#define MATCH_KDMTT 0x2a001077
+#define MASK_KDMTT 0xfe00707f
+#define MATCH_KDMTT16 0xfa001077
+#define MASK_KDMTT16 0xfe00707f
#define MATCH_KHM16 0x86000077
-#define MASK_KHM16 0xfe00707f
-#define MATCH_KHMX16 0x96000077
-#define MASK_KHMX16 0xfe00707f
+#define MASK_KHM16 0xfe00707f
+#define MATCH_KHM8 0x8e000077
+#define MASK_KHM8 0xfe00707f
#define MATCH_KHMBB 0xc001077
-#define MASK_KHMBB 0xfe00707f
+#define MASK_KHMBB 0xfe00707f
+#define MATCH_KHMBB16 0xdc001077
+#define MASK_KHMBB16 0xfe00707f
#define MATCH_KHMBT 0x1c001077
-#define MASK_KHMBT 0xfe00707f
+#define MASK_KHMBT 0xfe00707f
+#define MATCH_KHMBT16 0xec001077
+#define MASK_KHMBT16 0xfe00707f
#define MATCH_KHMTT 0x2c001077
-#define MASK_KHMTT 0xfe00707f
+#define MASK_KHMTT 0xfe00707f
+#define MATCH_KHMTT16 0xfc001077
+#define MASK_KHMTT16 0xfe00707f
+#define MATCH_KHMX16 0x96000077
+#define MASK_KHMX16 0xfe00707f
+#define MATCH_KHMX8 0x9e000077
+#define MASK_KHMX8 0xfe00707f
#define MATCH_KMABB 0x5a001077
-#define MASK_KMABB 0xfe00707f
+#define MASK_KMABB 0xfe00707f
+#define MATCH_KMABB32 0x5a002077
+#define MASK_KMABB32 0xfe00707f
#define MATCH_KMABT 0x6a001077
-#define MASK_KMABT 0xfe00707f
-#define MATCH_KMATT 0x7a001077
-#define MASK_KMATT 0xfe00707f
+#define MASK_KMABT 0xfe00707f
+#define MATCH_KMABT32 0x6a002077
+#define MASK_KMABT32 0xfe00707f
#define MATCH_KMADA 0x48001077
-#define MASK_KMADA 0xfe00707f
-#define MATCH_KMAXDA 0x4a001077
-#define MASK_KMAXDA 0xfe00707f
-#define MATCH_KMADS 0x5c001077
-#define MASK_KMADS 0xfe00707f
+#define MASK_KMADA 0xfe00707f
#define MATCH_KMADRS 0x6c001077
-#define MASK_KMADRS 0xfe00707f
-#define MATCH_KMAXDS 0x7c001077
-#define MASK_KMAXDS 0xfe00707f
+#define MASK_KMADRS 0xfe00707f
+#define MATCH_KMADRS32 0x6c002077
+#define MASK_KMADRS32 0xfe00707f
+#define MATCH_KMADS 0x5c001077
+#define MASK_KMADS 0xfe00707f
+#define MATCH_KMADS32 0x5c002077
+#define MASK_KMADS32 0xfe00707f
#define MATCH_KMAR64 0x94001077
-#define MASK_KMAR64 0xfe00707f
+#define MASK_KMAR64 0xfe00707f
+#define MATCH_KMATT 0x7a001077
+#define MASK_KMATT 0xfe00707f
+#define MATCH_KMATT32 0x7a002077
+#define MASK_KMATT32 0xfe00707f
+#define MATCH_KMAXDA 0x4a001077
+#define MASK_KMAXDA 0xfe00707f
+#define MATCH_KMAXDA32 0x4a002077
+#define MASK_KMAXDA32 0xfe00707f
+#define MATCH_KMAXDS 0x7c001077
+#define MASK_KMAXDS 0xfe00707f
+#define MATCH_KMAXDS32 0x7c002077
+#define MASK_KMAXDS32 0xfe00707f
#define MATCH_KMDA 0x38001077
-#define MASK_KMDA 0xfe00707f
-#define MATCH_KMXDA 0x3a001077
-#define MASK_KMXDA 0xfe00707f
+#define MASK_KMDA 0xfe00707f
+#define MATCH_KMDA32 0x38002077
+#define MASK_KMDA32 0xfe00707f
#define MATCH_KMMAC 0x60001077
-#define MASK_KMMAC 0xfe00707f
+#define MASK_KMMAC 0xfe00707f
#define MATCH_KMMAC_U 0x70001077
-#define MASK_KMMAC_U 0xfe00707f
+#define MASK_KMMAC_U 0xfe00707f
#define MATCH_KMMAWB 0x46001077
-#define MASK_KMMAWB 0xfe00707f
-#define MATCH_KMMAWB_U 0x56001077
-#define MASK_KMMAWB_U 0xfe00707f
+#define MASK_KMMAWB 0xfe00707f
#define MATCH_KMMAWB2 0xce001077
-#define MASK_KMMAWB2 0xfe00707f
+#define MASK_KMMAWB2 0xfe00707f
#define MATCH_KMMAWB2_U 0xde001077
-#define MASK_KMMAWB2_U 0xfe00707f
+#define MASK_KMMAWB2_U 0xfe00707f
+#define MATCH_KMMAWB_U 0x56001077
+#define MASK_KMMAWB_U 0xfe00707f
#define MATCH_KMMAWT 0x66001077
-#define MASK_KMMAWT 0xfe00707f
-#define MATCH_KMMAWT_U 0x76001077
-#define MASK_KMMAWT_U 0xfe00707f
+#define MASK_KMMAWT 0xfe00707f
#define MATCH_KMMAWT2 0xee001077
-#define MASK_KMMAWT2 0xfe00707f
+#define MASK_KMMAWT2 0xfe00707f
#define MATCH_KMMAWT2_U 0xfe001077
-#define MASK_KMMAWT2_U 0xfe00707f
+#define MASK_KMMAWT2_U 0xfe00707f
+#define MATCH_KMMAWT_U 0x76001077
+#define MASK_KMMAWT_U 0xfe00707f
#define MATCH_KMMSB 0x42001077
-#define MASK_KMMSB 0xfe00707f
+#define MASK_KMMSB 0xfe00707f
#define MATCH_KMMSB_U 0x52001077
-#define MASK_KMMSB_U 0xfe00707f
+#define MASK_KMMSB_U 0xfe00707f
#define MATCH_KMMWB2 0x8e001077
-#define MASK_KMMWB2 0xfe00707f
+#define MASK_KMMWB2 0xfe00707f
#define MATCH_KMMWB2_U 0x9e001077
-#define MASK_KMMWB2_U 0xfe00707f
+#define MASK_KMMWB2_U 0xfe00707f
#define MATCH_KMMWT2 0xae001077
-#define MASK_KMMWT2 0xfe00707f
+#define MASK_KMMWT2 0xfe00707f
#define MATCH_KMMWT2_U 0xbe001077
-#define MASK_KMMWT2_U 0xfe00707f
+#define MASK_KMMWT2_U 0xfe00707f
#define MATCH_KMSDA 0x4c001077
-#define MASK_KMSDA 0xfe00707f
-#define MATCH_KMSXDA 0x4e001077
-#define MASK_KMSXDA 0xfe00707f
+#define MASK_KMSDA 0xfe00707f
+#define MATCH_KMSDA32 0x4c002077
+#define MASK_KMSDA32 0xfe00707f
#define MATCH_KMSR64 0x96001077
-#define MASK_KMSR64 0xfe00707f
-#define MATCH_KSLLW 0x26001077
-#define MASK_KSLLW 0xfe00707f
-#define MATCH_KSLLIW 0x36001077
-#define MASK_KSLLIW 0xfe00707f
-#define MATCH_KSLL8 0x6c000077
-#define MASK_KSLL8 0xfe00707f
-#define MATCH_KSLLI8 0x7c800077
-#define MASK_KSLLI8 0xff80707f
+#define MASK_KMSR64 0xfe00707f
+#define MATCH_KMSXDA 0x4e001077
+#define MASK_KMSXDA 0xfe00707f
+#define MATCH_KMSXDA32 0x4e002077
+#define MASK_KMSXDA32 0xfe00707f
+#define MATCH_KMXDA 0x3a001077
+#define MASK_KMXDA 0xfe00707f
+#define MATCH_KMXDA32 0x3a002077
+#define MASK_KMXDA32 0xfe00707f
#define MATCH_KSLL16 0x64000077
-#define MASK_KSLL16 0xfe00707f
+#define MASK_KSLL16 0xfe00707f
+#define MATCH_KSLL32 0x64002077
+#define MASK_KSLL32 0xfe00707f
+#define MATCH_KSLL8 0x6c000077
+#define MASK_KSLL8 0xfe00707f
#define MATCH_KSLLI16 0x75000077
-#define MASK_KSLLI16 0xff00707f
-#define MATCH_KSLRA8 0x5e000077
-#define MASK_KSLRA8 0xfe00707f
-#define MATCH_KSLRA8_U 0x6e000077
-#define MASK_KSLRA8_U 0xfe00707f
+#define MASK_KSLLI16 0xff00707f
+#define MATCH_KSLLI32 0x84002077
+#define MASK_KSLLI32 0xfe00707f
+#define MATCH_KSLLI8 0x7c800077
+#define MASK_KSLLI8 0xff80707f
+#define MATCH_KSLLIW 0x36001077
+#define MASK_KSLLIW 0xfe00707f
+#define MATCH_KSLLW 0x26001077
+#define MASK_KSLLW 0xfe00707f
#define MATCH_KSLRA16 0x56000077
-#define MASK_KSLRA16 0xfe00707f
+#define MASK_KSLRA16 0xfe00707f
#define MATCH_KSLRA16_U 0x66000077
-#define MASK_KSLRA16_U 0xfe00707f
+#define MASK_KSLRA16_U 0xfe00707f
+#define MATCH_KSLRA32 0x56002077
+#define MASK_KSLRA32 0xfe00707f
+#define MATCH_KSLRA32_U 0x66002077
+#define MASK_KSLRA32_U 0xfe00707f
+#define MATCH_KSLRA8 0x5e000077
+#define MASK_KSLRA8 0xfe00707f
+#define MATCH_KSLRA8_U 0x6e000077
+#define MASK_KSLRA8_U 0xfe00707f
#define MATCH_KSLRAW 0x6e001077
-#define MASK_KSLRAW 0xfe00707f
+#define MASK_KSLRAW 0xfe00707f
#define MATCH_KSLRAW_U 0x7e001077
-#define MASK_KSLRAW_U 0xfe00707f
+#define MASK_KSLRAW_U 0xfe00707f
#define MATCH_KSTAS16 0xc4002077
-#define MASK_KSTAS16 0xfe00707f
+#define MASK_KSTAS16 0xfe00707f
+#define MATCH_KSTAS32 0xc0002077
+#define MASK_KSTAS32 0xfe00707f
#define MATCH_KSTSA16 0xc6002077
-#define MASK_KSTSA16 0xfe00707f
-#define MATCH_KSUB8 0x1a000077
-#define MASK_KSUB8 0xfe00707f
+#define MASK_KSTSA16 0xfe00707f
+#define MATCH_KSTSA32 0xc2002077
+#define MASK_KSTSA32 0xfe00707f
#define MATCH_KSUB16 0x12000077
-#define MASK_KSUB16 0xfe00707f
+#define MASK_KSUB16 0xfe00707f
+#define MATCH_KSUB32 0x12002077
+#define MASK_KSUB32 0xfe00707f
#define MATCH_KSUB64 0x92001077
-#define MASK_KSUB64 0xfe00707f
+#define MASK_KSUB64 0xfe00707f
+#define MATCH_KSUB8 0x1a000077
+#define MASK_KSUB8 0xfe00707f
#define MATCH_KSUBH 0x6001077
-#define MASK_KSUBH 0xfe00707f
+#define MASK_KSUBH 0xfe00707f
#define MATCH_KSUBW 0x2001077
-#define MASK_KSUBW 0xfe00707f
+#define MASK_KSUBW 0xfe00707f
#define MATCH_KWMMUL 0x62001077
-#define MASK_KWMMUL 0xfe00707f
+#define MASK_KWMMUL 0xfe00707f
#define MATCH_KWMMUL_U 0x72001077
-#define MASK_KWMMUL_U 0xfe00707f
+#define MASK_KWMMUL_U 0xfe00707f
+#define MATCH_LB 0x3
+#define MASK_LB 0x707f
+#define MATCH_LBU 0x4003
+#define MASK_LBU 0x707f
+#define MATCH_LD 0x3003
+#define MASK_LD 0x707f
+#define MATCH_LH 0x1003
+#define MASK_LH 0x707f
+#define MATCH_LHU 0x5003
+#define MASK_LHU 0x707f
+#define MATCH_LR_D 0x1000302f
+#define MASK_LR_D 0xf9f0707f
+#define MATCH_LR_W 0x1000202f
+#define MASK_LR_W 0xf9f0707f
+#define MATCH_LUI 0x37
+#define MASK_LUI 0x7f
+#define MATCH_LW 0x2003
+#define MASK_LW 0x707f
+#define MATCH_LWU 0x6003
+#define MASK_LWU 0x707f
#define MATCH_MADDR32 0xc4001077
-#define MASK_MADDR32 0xfe00707f
+#define MASK_MADDR32 0xfe00707f
+#define MATCH_MAX 0xa006033
+#define MASK_MAX 0xfe00707f
+#define MATCH_MAXU 0xa007033
+#define MASK_MAXU 0xfe00707f
#define MATCH_MAXW 0xf2000077
-#define MASK_MAXW 0xfe00707f
+#define MASK_MAXW 0xfe00707f
+#define MATCH_MIN 0xa004033
+#define MASK_MIN 0xfe00707f
+#define MATCH_MINU 0xa005033
+#define MASK_MINU 0xfe00707f
#define MATCH_MINW 0xf0000077
-#define MASK_MINW 0xfe00707f
+#define MASK_MINW 0xfe00707f
+#define MATCH_MRET 0x30200073
+#define MASK_MRET 0xffffffff
#define MATCH_MSUBR32 0xc6001077
-#define MASK_MSUBR32 0xfe00707f
+#define MASK_MSUBR32 0xfe00707f
+#define MATCH_MUL 0x2000033
+#define MASK_MUL 0xfe00707f
+#define MATCH_MULH 0x2001033
+#define MASK_MULH 0xfe00707f
+#define MATCH_MULHSU 0x2002033
+#define MASK_MULHSU 0xfe00707f
+#define MATCH_MULHU 0x2003033
+#define MASK_MULHU 0xfe00707f
#define MATCH_MULR64 0xf0001077
-#define MASK_MULR64 0xfe00707f
+#define MASK_MULR64 0xfe00707f
#define MATCH_MULSR64 0xe0001077
-#define MASK_MULSR64 0xfe00707f
+#define MASK_MULSR64 0xfe00707f
+#define MATCH_MULW 0x200003b
+#define MASK_MULW 0xfe00707f
+#define MATCH_OR 0x6033
+#define MASK_OR 0xfe00707f
+#define MATCH_ORI 0x6013
+#define MASK_ORI 0x707f
+#define MATCH_ORN 0x40006033
+#define MASK_ORN 0xfe00707f
+#define MATCH_PACK 0x8004033
+#define MASK_PACK 0xfe00707f
+#define MATCH_PACKH 0x8007033
+#define MASK_PACKH 0xfe00707f
+#define MATCH_PACKU 0x48004033
+#define MASK_PACKU 0xfe00707f
+#define MATCH_PACKUW 0x4800403b
+#define MASK_PACKUW 0xfe00707f
+#define MATCH_PACKW 0x800403b
+#define MASK_PACKW 0xfe00707f
+#define MATCH_PAUSE 0x100000f
+#define MASK_PAUSE 0xffffffff
#define MATCH_PBSAD 0xfc000077
-#define MASK_PBSAD 0xfe00707f
+#define MASK_PBSAD 0xfe00707f
#define MATCH_PBSADA 0xfe000077
-#define MASK_PBSADA 0xfe00707f
+#define MASK_PBSADA 0xfe00707f
#define MATCH_PKBB16 0xe001077
-#define MASK_PKBB16 0xfe00707f
+#define MASK_PKBB16 0xfe00707f
+#define MATCH_PKBB32 0xe002077
+#define MASK_PKBB32 0xfe00707f
#define MATCH_PKBT16 0x1e001077
-#define MASK_PKBT16 0xfe00707f
-#define MATCH_PKTT16 0x2e001077
-#define MASK_PKTT16 0xfe00707f
+#define MASK_PKBT16 0xfe00707f
+#define MATCH_PKBT32 0x1e002077
+#define MASK_PKBT32 0xfe00707f
#define MATCH_PKTB16 0x3e001077
-#define MASK_PKTB16 0xfe00707f
-#define MATCH_RADD8 0x8000077
-#define MASK_RADD8 0xfe00707f
+#define MASK_PKTB16 0xfe00707f
+#define MATCH_PKTB32 0x3e002077
+#define MASK_PKTB32 0xfe00707f
+#define MATCH_PKTT16 0x2e001077
+#define MASK_PKTT16 0xfe00707f
+#define MATCH_PKTT32 0x2e002077
+#define MASK_PKTT32 0xfe00707f
+#define MATCH_PREFETCH_I 0x6013
+#define MASK_PREFETCH_I 0x1f07fff
+#define MATCH_PREFETCH_R 0x106013
+#define MASK_PREFETCH_R 0x1f07fff
+#define MATCH_PREFETCH_W 0x306013
+#define MASK_PREFETCH_W 0x1f07fff
#define MATCH_RADD16 0x77
-#define MASK_RADD16 0xfe00707f
+#define MASK_RADD16 0xfe00707f
+#define MATCH_RADD32 0x2077
+#define MASK_RADD32 0xfe00707f
#define MATCH_RADD64 0x80001077
-#define MASK_RADD64 0xfe00707f
+#define MASK_RADD64 0xfe00707f
+#define MATCH_RADD8 0x8000077
+#define MASK_RADD8 0xfe00707f
#define MATCH_RADDW 0x20001077
-#define MASK_RADDW 0xfe00707f
+#define MASK_RADDW 0xfe00707f
#define MATCH_RCRAS16 0x4000077
-#define MASK_RCRAS16 0xfe00707f
+#define MASK_RCRAS16 0xfe00707f
+#define MATCH_RCRAS32 0x4002077
+#define MASK_RCRAS32 0xfe00707f
#define MATCH_RCRSA16 0x6000077
-#define MASK_RCRSA16 0xfe00707f
+#define MASK_RCRSA16 0xfe00707f
+#define MATCH_RCRSA32 0x6002077
+#define MASK_RCRSA32 0xfe00707f
+#define MATCH_REM 0x2006033
+#define MASK_REM 0xfe00707f
+#define MATCH_REMU 0x2007033
+#define MASK_REMU 0xfe00707f
+#define MATCH_REMUW 0x200703b
+#define MASK_REMUW 0xfe00707f
+#define MATCH_REMW 0x200603b
+#define MASK_REMW 0xfe00707f
+#define MATCH_ROL 0x60001033
+#define MASK_ROL 0xfe00707f
+#define MATCH_ROLW 0x6000103b
+#define MASK_ROLW 0xfe00707f
+#define MATCH_ROR 0x60005033
+#define MASK_ROR 0xfe00707f
+#define MATCH_RORI 0x60005013
+#define MASK_RORI 0xfc00707f
+#define MATCH_RORIW 0x6000501b
+#define MASK_RORIW 0xfe00707f
+#define MATCH_RORW 0x6000503b
+#define MASK_RORW 0xfe00707f
#define MATCH_RSTAS16 0xb4002077
-#define MASK_RSTAS16 0xfe00707f
+#define MASK_RSTAS16 0xfe00707f
+#define MATCH_RSTAS32 0xb0002077
+#define MASK_RSTAS32 0xfe00707f
#define MATCH_RSTSA16 0xb6002077
-#define MASK_RSTSA16 0xfe00707f
-#define MATCH_RSUB8 0xa000077
-#define MASK_RSUB8 0xfe00707f
+#define MASK_RSTSA16 0xfe00707f
+#define MATCH_RSTSA32 0xb2002077
+#define MASK_RSTSA32 0xfe00707f
#define MATCH_RSUB16 0x2000077
-#define MASK_RSUB16 0xfe00707f
+#define MASK_RSUB16 0xfe00707f
+#define MATCH_RSUB32 0x2002077
+#define MASK_RSUB32 0xfe00707f
#define MATCH_RSUB64 0x82001077
-#define MASK_RSUB64 0xfe00707f
+#define MASK_RSUB64 0xfe00707f
+#define MATCH_RSUB8 0xa000077
+#define MASK_RSUB8 0xfe00707f
#define MATCH_RSUBW 0x22001077
-#define MASK_RSUBW 0xfe00707f
-#define MATCH_SCLIP8 0x8c000077
-#define MASK_SCLIP8 0xff80707f
+#define MASK_RSUBW 0xfe00707f
+#define MATCH_SB 0x23
+#define MASK_SB 0x707f
+#define MATCH_SC_D 0x1800302f
+#define MASK_SC_D 0xf800707f
+#define MATCH_SC_W 0x1800202f
+#define MASK_SC_W 0xf800707f
#define MATCH_SCLIP16 0x84000077
-#define MASK_SCLIP16 0xff00707f
+#define MASK_SCLIP16 0xff00707f
#define MATCH_SCLIP32 0xe4000077
-#define MASK_SCLIP32 0xfe00707f
-#define MATCH_SCMPLE8 0x1e000077
-#define MASK_SCMPLE8 0xfe00707f
+#define MASK_SCLIP32 0xfe00707f
+#define MATCH_SCLIP8 0x8c000077
+#define MASK_SCLIP8 0xff80707f
#define MATCH_SCMPLE16 0x1c000077
-#define MASK_SCMPLE16 0xfe00707f
-#define MATCH_SCMPLT8 0xe000077
-#define MASK_SCMPLT8 0xfe00707f
+#define MASK_SCMPLE16 0xfe00707f
+#define MATCH_SCMPLE8 0x1e000077
+#define MASK_SCMPLE8 0xfe00707f
#define MATCH_SCMPLT16 0xc000077
-#define MASK_SCMPLT16 0xfe00707f
-#define MATCH_SLL8 0x5c000077
-#define MASK_SLL8 0xfe00707f
-#define MATCH_SLLI8 0x7c000077
-#define MASK_SLLI8 0xff80707f
+#define MASK_SCMPLT16 0xfe00707f
+#define MATCH_SCMPLT8 0xe000077
+#define MASK_SCMPLT8 0xfe00707f
+#define MATCH_SD 0x3023
+#define MASK_SD 0x707f
+#define MATCH_SEXT_B 0x60401013
+#define MASK_SEXT_B 0xfff0707f
+#define MATCH_SEXT_H 0x60501013
+#define MASK_SEXT_H 0xfff0707f
+#define MATCH_SFENCE_INVAL_IR 0x18100073
+#define MASK_SFENCE_INVAL_IR 0xffffffff
+#define MATCH_SFENCE_VMA 0x12000073
+#define MASK_SFENCE_VMA 0xfe007fff
+#define MATCH_SFENCE_W_INVAL 0x18000073
+#define MASK_SFENCE_W_INVAL 0xffffffff
+#define MATCH_SH 0x1023
+#define MASK_SH 0x707f
+#define MATCH_SH1ADD 0x20002033
+#define MASK_SH1ADD 0xfe00707f
+#define MATCH_SH1ADD_UW 0x2000203b
+#define MASK_SH1ADD_UW 0xfe00707f
+#define MATCH_SH2ADD 0x20004033
+#define MASK_SH2ADD 0xfe00707f
+#define MATCH_SH2ADD_UW 0x2000403b
+#define MASK_SH2ADD_UW 0xfe00707f
+#define MATCH_SH3ADD 0x20006033
+#define MASK_SH3ADD 0xfe00707f
+#define MATCH_SH3ADD_UW 0x2000603b
+#define MASK_SH3ADD_UW 0xfe00707f
+#define MATCH_SHA256SIG0 0x10201013
+#define MASK_SHA256SIG0 0xfff0707f
+#define MATCH_SHA256SIG1 0x10301013
+#define MASK_SHA256SIG1 0xfff0707f
+#define MATCH_SHA256SUM0 0x10001013
+#define MASK_SHA256SUM0 0xfff0707f
+#define MATCH_SHA256SUM1 0x10101013
+#define MASK_SHA256SUM1 0xfff0707f
+#define MATCH_SHA512SIG0 0x10601013
+#define MASK_SHA512SIG0 0xfff0707f
+#define MATCH_SHA512SIG0H 0x5c000033
+#define MASK_SHA512SIG0H 0xfe00707f
+#define MATCH_SHA512SIG0L 0x54000033
+#define MASK_SHA512SIG0L 0xfe00707f
+#define MATCH_SHA512SIG1 0x10701013
+#define MASK_SHA512SIG1 0xfff0707f
+#define MATCH_SHA512SIG1H 0x5e000033
+#define MASK_SHA512SIG1H 0xfe00707f
+#define MATCH_SHA512SIG1L 0x56000033
+#define MASK_SHA512SIG1L 0xfe00707f
+#define MATCH_SHA512SUM0 0x10401013
+#define MASK_SHA512SUM0 0xfff0707f
+#define MATCH_SHA512SUM0R 0x50000033
+#define MASK_SHA512SUM0R 0xfe00707f
+#define MATCH_SHA512SUM1 0x10501013
+#define MASK_SHA512SUM1 0xfff0707f
+#define MATCH_SHA512SUM1R 0x52000033
+#define MASK_SHA512SUM1R 0xfe00707f
+#define MATCH_SHFL 0x8001033
+#define MASK_SHFL 0xfe00707f
+#define MATCH_SHFLI 0x8001013
+#define MASK_SHFLI 0xfe00707f
+#define MATCH_SHFLW 0x800103b
+#define MASK_SHFLW 0xfe00707f
+#define MATCH_SINVAL_VMA 0x16000073
+#define MASK_SINVAL_VMA 0xfe007fff
+#define MATCH_SLL 0x1033
+#define MASK_SLL 0xfe00707f
#define MATCH_SLL16 0x54000077
-#define MASK_SLL16 0xfe00707f
+#define MASK_SLL16 0xfe00707f
+#define MATCH_SLL32 0x54002077
+#define MASK_SLL32 0xfe00707f
+#define MATCH_SLL8 0x5c000077
+#define MASK_SLL8 0xfe00707f
+#define MATCH_SLLI 0x1013
+#define MASK_SLLI 0xfc00707f
#define MATCH_SLLI16 0x74000077
-#define MASK_SLLI16 0xff00707f
+#define MASK_SLLI16 0xff00707f
+#define MATCH_SLLI32 0x74002077
+#define MASK_SLLI32 0xfe00707f
+#define MATCH_SLLI8 0x7c000077
+#define MASK_SLLI8 0xff80707f
+#define MATCH_SLLI_UW 0x800101b
+#define MASK_SLLI_UW 0xfc00707f
+#define MATCH_SLLIW 0x101b
+#define MASK_SLLIW 0xfe00707f
+#define MATCH_SLLW 0x103b
+#define MASK_SLLW 0xfe00707f
+#define MATCH_SLO 0x20001033
+#define MASK_SLO 0xfe00707f
+#define MATCH_SLOI 0x20001013
+#define MASK_SLOI 0xfc00707f
+#define MATCH_SLOIW 0x2000101b
+#define MASK_SLOIW 0xfe00707f
+#define MATCH_SLOW 0x2000103b
+#define MASK_SLOW 0xfe00707f
+#define MATCH_SLT 0x2033
+#define MASK_SLT 0xfe00707f
+#define MATCH_SLTI 0x2013
+#define MASK_SLTI 0x707f
+#define MATCH_SLTIU 0x3013
+#define MASK_SLTIU 0x707f
+#define MATCH_SLTU 0x3033
+#define MASK_SLTU 0xfe00707f
+#define MATCH_SM3P0 0x10801013
+#define MASK_SM3P0 0xfff0707f
+#define MATCH_SM3P1 0x10901013
+#define MASK_SM3P1 0xfff0707f
+#define MATCH_SM4ED 0x30000033
+#define MASK_SM4ED 0x3e00707f
+#define MATCH_SM4KS 0x34000033
+#define MASK_SM4KS 0x3e00707f
#define MATCH_SMAL 0x5e001077
-#define MASK_SMAL 0xfe00707f
+#define MASK_SMAL 0xfe00707f
#define MATCH_SMALBB 0x88001077
-#define MASK_SMALBB 0xfe00707f
+#define MASK_SMALBB 0xfe00707f
#define MATCH_SMALBT 0x98001077
-#define MASK_SMALBT 0xfe00707f
-#define MATCH_SMALTT 0xa8001077
-#define MASK_SMALTT 0xfe00707f
+#define MASK_SMALBT 0xfe00707f
#define MATCH_SMALDA 0x8c001077
-#define MASK_SMALDA 0xfe00707f
-#define MATCH_SMALXDA 0x9c001077
-#define MASK_SMALXDA 0xfe00707f
-#define MATCH_SMALDS 0x8a001077
-#define MASK_SMALDS 0xfe00707f
+#define MASK_SMALDA 0xfe00707f
#define MATCH_SMALDRS 0x9a001077
-#define MASK_SMALDRS 0xfe00707f
+#define MASK_SMALDRS 0xfe00707f
+#define MATCH_SMALDS 0x8a001077
+#define MASK_SMALDS 0xfe00707f
+#define MATCH_SMALTT 0xa8001077
+#define MASK_SMALTT 0xfe00707f
+#define MATCH_SMALXDA 0x9c001077
+#define MASK_SMALXDA 0xfe00707f
#define MATCH_SMALXDS 0xaa001077
-#define MASK_SMALXDS 0xfe00707f
-#define MATCH_SMAR64 0x84001077
-#define MASK_SMAR64 0xfe00707f
+#define MASK_SMALXDS 0xfe00707f
#define MATCH_SMAQA 0xc8000077
-#define MASK_SMAQA 0xfe00707f
+#define MASK_SMAQA 0xfe00707f
#define MATCH_SMAQA_SU 0xca000077
-#define MASK_SMAQA_SU 0xfe00707f
-#define MATCH_SMAX8 0x8a000077
-#define MASK_SMAX8 0xfe00707f
+#define MASK_SMAQA_SU 0xfe00707f
+#define MATCH_SMAR64 0x84001077
+#define MASK_SMAR64 0xfe00707f
#define MATCH_SMAX16 0x82000077
-#define MASK_SMAX16 0xfe00707f
+#define MASK_SMAX16 0xfe00707f
+#define MATCH_SMAX32 0x92002077
+#define MASK_SMAX32 0xfe00707f
+#define MATCH_SMAX8 0x8a000077
+#define MASK_SMAX8 0xfe00707f
#define MATCH_SMBB16 0x8001077
-#define MASK_SMBB16 0xfe00707f
+#define MASK_SMBB16 0xfe00707f
#define MATCH_SMBT16 0x18001077
-#define MASK_SMBT16 0xfe00707f
-#define MATCH_SMTT16 0x28001077
-#define MASK_SMTT16 0xfe00707f
-#define MATCH_SMDS 0x58001077
-#define MASK_SMDS 0xfe00707f
+#define MASK_SMBT16 0xfe00707f
+#define MATCH_SMBT32 0x18002077
+#define MASK_SMBT32 0xfe00707f
#define MATCH_SMDRS 0x68001077
-#define MASK_SMDRS 0xfe00707f
-#define MATCH_SMXDS 0x78001077
-#define MASK_SMXDS 0xfe00707f
-#define MATCH_SMIN8 0x88000077
-#define MASK_SMIN8 0xfe00707f
+#define MASK_SMDRS 0xfe00707f
+#define MATCH_SMDRS32 0x68002077
+#define MASK_SMDRS32 0xfe00707f
+#define MATCH_SMDS 0x58001077
+#define MASK_SMDS 0xfe00707f
+#define MATCH_SMDS32 0x58002077
+#define MASK_SMDS32 0xfe00707f
#define MATCH_SMIN16 0x80000077
-#define MASK_SMIN16 0xfe00707f
+#define MASK_SMIN16 0xfe00707f
+#define MATCH_SMIN32 0x90002077
+#define MASK_SMIN32 0xfe00707f
+#define MATCH_SMIN8 0x88000077
+#define MASK_SMIN8 0xfe00707f
#define MATCH_SMMUL 0x40001077
-#define MASK_SMMUL 0xfe00707f
+#define MASK_SMMUL 0xfe00707f
#define MATCH_SMMUL_U 0x50001077
-#define MASK_SMMUL_U 0xfe00707f
+#define MASK_SMMUL_U 0xfe00707f
#define MATCH_SMMWB 0x44001077
-#define MASK_SMMWB 0xfe00707f
+#define MASK_SMMWB 0xfe00707f
#define MATCH_SMMWB_U 0x54001077
-#define MASK_SMMWB_U 0xfe00707f
+#define MASK_SMMWB_U 0xfe00707f
#define MATCH_SMMWT 0x64001077
-#define MASK_SMMWT 0xfe00707f
+#define MASK_SMMWT 0xfe00707f
#define MATCH_SMMWT_U 0x74001077
-#define MASK_SMMWT_U 0xfe00707f
+#define MASK_SMMWT_U 0xfe00707f
#define MATCH_SMSLDA 0xac001077
-#define MASK_SMSLDA 0xfe00707f
+#define MASK_SMSLDA 0xfe00707f
#define MATCH_SMSLXDA 0xbc001077
-#define MASK_SMSLXDA 0xfe00707f
+#define MASK_SMSLXDA 0xfe00707f
#define MATCH_SMSR64 0x86001077
-#define MASK_SMSR64 0xfe00707f
-#define MATCH_SMUL8 0xa8000077
-#define MASK_SMUL8 0xfe00707f
-#define MATCH_SMULX8 0xaa000077
-#define MASK_SMULX8 0xfe00707f
+#define MASK_SMSR64 0xfe00707f
+#define MATCH_SMTT16 0x28001077
+#define MASK_SMTT16 0xfe00707f
+#define MATCH_SMTT32 0x28002077
+#define MASK_SMTT32 0xfe00707f
#define MATCH_SMUL16 0xa0000077
-#define MASK_SMUL16 0xfe00707f
+#define MASK_SMUL16 0xfe00707f
+#define MATCH_SMUL8 0xa8000077
+#define MASK_SMUL8 0xfe00707f
#define MATCH_SMULX16 0xa2000077
-#define MASK_SMULX16 0xfe00707f
-#define MATCH_SRA_U 0x24001077
-#define MASK_SRA_U 0xfe00707f
-#define MATCH_SRAI_U 0xd4001077
-#define MASK_SRAI_U 0xfc00707f
-#define MATCH_SRA8 0x58000077
-#define MASK_SRA8 0xfe00707f
-#define MATCH_SRA8_U 0x68000077
-#define MASK_SRA8_U 0xfe00707f
-#define MATCH_SRAI8 0x78000077
-#define MASK_SRAI8 0xff80707f
-#define MATCH_SRAI8_U 0x78800077
-#define MASK_SRAI8_U 0xff80707f
+#define MASK_SMULX16 0xfe00707f
+#define MATCH_SMULX8 0xaa000077
+#define MASK_SMULX8 0xfe00707f
+#define MATCH_SMXDS 0x78001077
+#define MASK_SMXDS 0xfe00707f
+#define MATCH_SMXDS32 0x78002077
+#define MASK_SMXDS32 0xfe00707f
+#define MATCH_SRA 0x40005033
+#define MASK_SRA 0xfe00707f
#define MATCH_SRA16 0x50000077
-#define MASK_SRA16 0xfe00707f
+#define MASK_SRA16 0xfe00707f
#define MATCH_SRA16_U 0x60000077
-#define MASK_SRA16_U 0xfe00707f
+#define MASK_SRA16_U 0xfe00707f
+#define MATCH_SRA32 0x50002077
+#define MASK_SRA32 0xfe00707f
+#define MATCH_SRA32_U 0x60002077
+#define MASK_SRA32_U 0xfe00707f
+#define MATCH_SRA8 0x58000077
+#define MASK_SRA8 0xfe00707f
+#define MATCH_SRA8_U 0x68000077
+#define MASK_SRA8_U 0xfe00707f
+#define MATCH_SRA_U 0x24001077
+#define MASK_SRA_U 0xfe00707f
+#define MATCH_SRAI 0x40005013
+#define MASK_SRAI 0xfc00707f
#define MATCH_SRAI16 0x70000077
-#define MASK_SRAI16 0xff00707f
+#define MASK_SRAI16 0xff00707f
#define MATCH_SRAI16_U 0x71000077
-#define MASK_SRAI16_U 0xff00707f
-#define MATCH_SRL8 0x5a000077
-#define MASK_SRL8 0xfe00707f
-#define MATCH_SRL8_U 0x6a000077
-#define MASK_SRL8_U 0xfe00707f
-#define MATCH_SRLI8 0x7a000077
-#define MASK_SRLI8 0xff80707f
-#define MATCH_SRLI8_U 0x7a800077
-#define MASK_SRLI8_U 0xff80707f
+#define MASK_SRAI16_U 0xff00707f
+#define MATCH_SRAI32 0x70002077
+#define MASK_SRAI32 0xfe00707f
+#define MATCH_SRAI32_U 0x80002077
+#define MASK_SRAI32_U 0xfe00707f
+#define MATCH_SRAI8 0x78000077
+#define MASK_SRAI8 0xff80707f
+#define MATCH_SRAI8_U 0x78800077
+#define MASK_SRAI8_U 0xff80707f
+#define MATCH_SRAI_U 0xd4001077
+#define MASK_SRAI_U 0xfc00707f
+#define MATCH_SRAIW 0x4000501b
+#define MASK_SRAIW 0xfe00707f
+#define MATCH_SRAIW_U 0x34001077
+#define MASK_SRAIW_U 0xfe00707f
+#define MATCH_SRAW 0x4000503b
+#define MASK_SRAW 0xfe00707f
+#define MATCH_SRET 0x10200073
+#define MASK_SRET 0xffffffff
+#define MATCH_SRL 0x5033
+#define MASK_SRL 0xfe00707f
#define MATCH_SRL16 0x52000077
-#define MASK_SRL16 0xfe00707f
+#define MASK_SRL16 0xfe00707f
#define MATCH_SRL16_U 0x62000077
-#define MASK_SRL16_U 0xfe00707f
+#define MASK_SRL16_U 0xfe00707f
+#define MATCH_SRL32 0x52002077
+#define MASK_SRL32 0xfe00707f
+#define MATCH_SRL32_U 0x62002077
+#define MASK_SRL32_U 0xfe00707f
+#define MATCH_SRL8 0x5a000077
+#define MASK_SRL8 0xfe00707f
+#define MATCH_SRL8_U 0x6a000077
+#define MASK_SRL8_U 0xfe00707f
+#define MATCH_SRLI 0x5013
+#define MASK_SRLI 0xfc00707f
#define MATCH_SRLI16 0x72000077
-#define MASK_SRLI16 0xff00707f
+#define MASK_SRLI16 0xff00707f
#define MATCH_SRLI16_U 0x73000077
-#define MASK_SRLI16_U 0xff00707f
+#define MASK_SRLI16_U 0xff00707f
+#define MATCH_SRLI32 0x72002077
+#define MASK_SRLI32 0xfe00707f
+#define MATCH_SRLI32_U 0x82002077
+#define MASK_SRLI32_U 0xfe00707f
+#define MATCH_SRLI8 0x7a000077
+#define MASK_SRLI8 0xff80707f
+#define MATCH_SRLI8_U 0x7a800077
+#define MASK_SRLI8_U 0xff80707f
+#define MATCH_SRLIW 0x501b
+#define MASK_SRLIW 0xfe00707f
+#define MATCH_SRLW 0x503b
+#define MASK_SRLW 0xfe00707f
+#define MATCH_SRO 0x20005033
+#define MASK_SRO 0xfe00707f
+#define MATCH_SROI 0x20005013
+#define MASK_SROI 0xfc00707f
+#define MATCH_SROIW 0x2000501b
+#define MASK_SROIW 0xfe00707f
+#define MATCH_SROW 0x2000503b
+#define MASK_SROW 0xfe00707f
#define MATCH_STAS16 0xf4002077
-#define MASK_STAS16 0xfe00707f
+#define MASK_STAS16 0xfe00707f
+#define MATCH_STAS32 0xf0002077
+#define MASK_STAS32 0xfe00707f
#define MATCH_STSA16 0xf6002077
-#define MASK_STSA16 0xfe00707f
-#define MATCH_SUB8 0x4a000077
-#define MASK_SUB8 0xfe00707f
+#define MASK_STSA16 0xfe00707f
+#define MATCH_STSA32 0xf2002077
+#define MASK_STSA32 0xfe00707f
+#define MATCH_SUB 0x40000033
+#define MASK_SUB 0xfe00707f
#define MATCH_SUB16 0x42000077
-#define MASK_SUB16 0xfe00707f
+#define MASK_SUB16 0xfe00707f
+#define MATCH_SUB32 0x42002077
+#define MASK_SUB32 0xfe00707f
#define MATCH_SUB64 0xc2001077
-#define MASK_SUB64 0xfe00707f
+#define MASK_SUB64 0xfe00707f
+#define MATCH_SUB8 0x4a000077
+#define MASK_SUB8 0xfe00707f
+#define MATCH_SUBW 0x4000003b
+#define MASK_SUBW 0xfe00707f
#define MATCH_SUNPKD810 0xac800077
-#define MASK_SUNPKD810 0xfff0707f
+#define MASK_SUNPKD810 0xfff0707f
#define MATCH_SUNPKD820 0xac900077
-#define MASK_SUNPKD820 0xfff0707f
+#define MASK_SUNPKD820 0xfff0707f
#define MATCH_SUNPKD830 0xaca00077
-#define MASK_SUNPKD830 0xfff0707f
+#define MASK_SUNPKD830 0xfff0707f
#define MATCH_SUNPKD831 0xacb00077
-#define MASK_SUNPKD831 0xfff0707f
+#define MASK_SUNPKD831 0xfff0707f
#define MATCH_SUNPKD832 0xad300077
-#define MASK_SUNPKD832 0xfff0707f
+#define MASK_SUNPKD832 0xfff0707f
+#define MATCH_SW 0x2023
+#define MASK_SW 0x707f
#define MATCH_SWAP8 0xad800077
-#define MASK_SWAP8 0xfff0707f
-#define MATCH_UCLIP8 0x8d000077
-#define MASK_UCLIP8 0xff80707f
+#define MASK_SWAP8 0xfff0707f
#define MATCH_UCLIP16 0x85000077
-#define MASK_UCLIP16 0xff00707f
+#define MASK_UCLIP16 0xff00707f
#define MATCH_UCLIP32 0xf4000077
-#define MASK_UCLIP32 0xfe00707f
-#define MATCH_UCMPLE8 0x3e000077
-#define MASK_UCMPLE8 0xfe00707f
+#define MASK_UCLIP32 0xfe00707f
+#define MATCH_UCLIP8 0x8d000077
+#define MASK_UCLIP8 0xff80707f
#define MATCH_UCMPLE16 0x3c000077
-#define MASK_UCMPLE16 0xfe00707f
-#define MATCH_UCMPLT8 0x2e000077
-#define MASK_UCMPLT8 0xfe00707f
+#define MASK_UCMPLE16 0xfe00707f
+#define MATCH_UCMPLE8 0x3e000077
+#define MASK_UCMPLE8 0xfe00707f
#define MATCH_UCMPLT16 0x2c000077
-#define MASK_UCMPLT16 0xfe00707f
-#define MATCH_UKADD8 0x38000077
-#define MASK_UKADD8 0xfe00707f
+#define MASK_UCMPLT16 0xfe00707f
+#define MATCH_UCMPLT8 0x2e000077
+#define MASK_UCMPLT8 0xfe00707f
#define MATCH_UKADD16 0x30000077
-#define MASK_UKADD16 0xfe00707f
+#define MASK_UKADD16 0xfe00707f
+#define MATCH_UKADD32 0x30002077
+#define MASK_UKADD32 0xfe00707f
#define MATCH_UKADD64 0xb0001077
-#define MASK_UKADD64 0xfe00707f
+#define MASK_UKADD64 0xfe00707f
+#define MATCH_UKADD8 0x38000077
+#define MASK_UKADD8 0xfe00707f
#define MATCH_UKADDH 0x14001077
-#define MASK_UKADDH 0xfe00707f
+#define MASK_UKADDH 0xfe00707f
#define MATCH_UKADDW 0x10001077
-#define MASK_UKADDW 0xfe00707f
+#define MASK_UKADDW 0xfe00707f
#define MATCH_UKCRAS16 0x34000077
-#define MASK_UKCRAS16 0xfe00707f
+#define MASK_UKCRAS16 0xfe00707f
+#define MATCH_UKCRAS32 0x34002077
+#define MASK_UKCRAS32 0xfe00707f
#define MATCH_UKCRSA16 0x36000077
-#define MASK_UKCRSA16 0xfe00707f
+#define MASK_UKCRSA16 0xfe00707f
+#define MATCH_UKCRSA32 0x36002077
+#define MASK_UKCRSA32 0xfe00707f
#define MATCH_UKMAR64 0xb4001077
-#define MASK_UKMAR64 0xfe00707f
+#define MASK_UKMAR64 0xfe00707f
#define MATCH_UKMSR64 0xb6001077
-#define MASK_UKMSR64 0xfe00707f
+#define MASK_UKMSR64 0xfe00707f
#define MATCH_UKSTAS16 0xe4002077
-#define MASK_UKSTAS16 0xfe00707f
+#define MASK_UKSTAS16 0xfe00707f
+#define MATCH_UKSTAS32 0xe0002077
+#define MASK_UKSTAS32 0xfe00707f
#define MATCH_UKSTSA16 0xe6002077
-#define MASK_UKSTSA16 0xfe00707f
-#define MATCH_UKSUB8 0x3a000077
-#define MASK_UKSUB8 0xfe00707f
+#define MASK_UKSTSA16 0xfe00707f
+#define MATCH_UKSTSA32 0xe2002077
+#define MASK_UKSTSA32 0xfe00707f
#define MATCH_UKSUB16 0x32000077
-#define MASK_UKSUB16 0xfe00707f
+#define MASK_UKSUB16 0xfe00707f
+#define MATCH_UKSUB32 0x32002077
+#define MASK_UKSUB32 0xfe00707f
#define MATCH_UKSUB64 0xb2001077
-#define MASK_UKSUB64 0xfe00707f
+#define MASK_UKSUB64 0xfe00707f
+#define MATCH_UKSUB8 0x3a000077
+#define MASK_UKSUB8 0xfe00707f
#define MATCH_UKSUBH 0x16001077
-#define MASK_UKSUBH 0xfe00707f
+#define MASK_UKSUBH 0xfe00707f
#define MATCH_UKSUBW 0x12001077
-#define MASK_UKSUBW 0xfe00707f
-#define MATCH_UMAR64 0xa4001077
-#define MASK_UMAR64 0xfe00707f
+#define MASK_UKSUBW 0xfe00707f
#define MATCH_UMAQA 0xcc000077
-#define MASK_UMAQA 0xfe00707f
-#define MATCH_UMAX8 0x9a000077
-#define MASK_UMAX8 0xfe00707f
+#define MASK_UMAQA 0xfe00707f
+#define MATCH_UMAR64 0xa4001077
+#define MASK_UMAR64 0xfe00707f
#define MATCH_UMAX16 0x92000077
-#define MASK_UMAX16 0xfe00707f
-#define MATCH_UMIN8 0x98000077
-#define MASK_UMIN8 0xfe00707f
+#define MASK_UMAX16 0xfe00707f
+#define MATCH_UMAX32 0xa2002077
+#define MASK_UMAX32 0xfe00707f
+#define MATCH_UMAX8 0x9a000077
+#define MASK_UMAX8 0xfe00707f
#define MATCH_UMIN16 0x90000077
-#define MASK_UMIN16 0xfe00707f
+#define MASK_UMIN16 0xfe00707f
+#define MATCH_UMIN32 0xa0002077
+#define MASK_UMIN32 0xfe00707f
+#define MATCH_UMIN8 0x98000077
+#define MASK_UMIN8 0xfe00707f
#define MATCH_UMSR64 0xa6001077
-#define MASK_UMSR64 0xfe00707f
-#define MATCH_UMUL8 0xb8000077
-#define MASK_UMUL8 0xfe00707f
-#define MATCH_UMULX8 0xba000077
-#define MASK_UMULX8 0xfe00707f
+#define MASK_UMSR64 0xfe00707f
#define MATCH_UMUL16 0xb0000077
-#define MASK_UMUL16 0xfe00707f
+#define MASK_UMUL16 0xfe00707f
+#define MATCH_UMUL8 0xb8000077
+#define MASK_UMUL8 0xfe00707f
#define MATCH_UMULX16 0xb2000077
-#define MASK_UMULX16 0xfe00707f
-#define MATCH_URADD8 0x28000077
-#define MASK_URADD8 0xfe00707f
+#define MASK_UMULX16 0xfe00707f
+#define MATCH_UMULX8 0xba000077
+#define MASK_UMULX8 0xfe00707f
+#define MATCH_UNSHFL 0x8005033
+#define MASK_UNSHFL 0xfe00707f
+#define MATCH_UNSHFLI 0x8005013
+#define MASK_UNSHFLI 0xfe00707f
+#define MATCH_UNSHFLW 0x800503b
+#define MASK_UNSHFLW 0xfe00707f
#define MATCH_URADD16 0x20000077
-#define MASK_URADD16 0xfe00707f
+#define MASK_URADD16 0xfe00707f
+#define MATCH_URADD32 0x20002077
+#define MASK_URADD32 0xfe00707f
#define MATCH_URADD64 0xa0001077
-#define MASK_URADD64 0xfe00707f
+#define MASK_URADD64 0xfe00707f
+#define MATCH_URADD8 0x28000077
+#define MASK_URADD8 0xfe00707f
#define MATCH_URADDW 0x30001077
-#define MASK_URADDW 0xfe00707f
+#define MASK_URADDW 0xfe00707f
#define MATCH_URCRAS16 0x24000077
-#define MASK_URCRAS16 0xfe00707f
+#define MASK_URCRAS16 0xfe00707f
+#define MATCH_URCRAS32 0x24002077
+#define MASK_URCRAS32 0xfe00707f
#define MATCH_URCRSA16 0x26000077
-#define MASK_URCRSA16 0xfe00707f
+#define MASK_URCRSA16 0xfe00707f
+#define MATCH_URCRSA32 0x26002077
+#define MASK_URCRSA32 0xfe00707f
#define MATCH_URSTAS16 0xd4002077
-#define MASK_URSTAS16 0xfe00707f
+#define MASK_URSTAS16 0xfe00707f
+#define MATCH_URSTAS32 0xd0002077
+#define MASK_URSTAS32 0xfe00707f
#define MATCH_URSTSA16 0xd6002077
-#define MASK_URSTSA16 0xfe00707f
-#define MATCH_URSUB8 0x2a000077
-#define MASK_URSUB8 0xfe00707f
+#define MASK_URSTSA16 0xfe00707f
+#define MATCH_URSTSA32 0xd2002077
+#define MASK_URSTSA32 0xfe00707f
#define MATCH_URSUB16 0x22000077
-#define MASK_URSUB16 0xfe00707f
+#define MASK_URSUB16 0xfe00707f
+#define MATCH_URSUB32 0x22002077
+#define MASK_URSUB32 0xfe00707f
#define MATCH_URSUB64 0xa2001077
-#define MASK_URSUB64 0xfe00707f
+#define MASK_URSUB64 0xfe00707f
+#define MATCH_URSUB8 0x2a000077
+#define MASK_URSUB8 0xfe00707f
#define MATCH_URSUBW 0x32001077
-#define MASK_URSUBW 0xfe00707f
-#define MATCH_WEXTI 0xde000077
-#define MASK_WEXTI 0xfe00707f
+#define MASK_URSUBW 0xfe00707f
+#define MATCH_VAADD_VV 0x24002057
+#define MASK_VAADD_VV 0xfc00707f
+#define MATCH_VAADD_VX 0x24006057
+#define MASK_VAADD_VX 0xfc00707f
+#define MATCH_VAADDU_VV 0x20002057
+#define MASK_VAADDU_VV 0xfc00707f
+#define MATCH_VAADDU_VX 0x20006057
+#define MASK_VAADDU_VX 0xfc00707f
+#define MATCH_VADC_VIM 0x40003057
+#define MASK_VADC_VIM 0xfe00707f
+#define MATCH_VADC_VVM 0x40000057
+#define MASK_VADC_VVM 0xfe00707f
+#define MATCH_VADC_VXM 0x40004057
+#define MASK_VADC_VXM 0xfe00707f
+#define MATCH_VADD_VI 0x3057
+#define MASK_VADD_VI 0xfc00707f
+#define MATCH_VADD_VV 0x57
+#define MASK_VADD_VV 0xfc00707f
+#define MATCH_VADD_VX 0x4057
+#define MASK_VADD_VX 0xfc00707f
+#define MATCH_VAMOADDEI16_V 0x502f
+#define MASK_VAMOADDEI16_V 0xf800707f
+#define MATCH_VAMOADDEI32_V 0x602f
+#define MASK_VAMOADDEI32_V 0xf800707f
+#define MATCH_VAMOADDEI64_V 0x702f
+#define MASK_VAMOADDEI64_V 0xf800707f
+#define MATCH_VAMOADDEI8_V 0x2f
+#define MASK_VAMOADDEI8_V 0xf800707f
+#define MATCH_VAMOANDEI16_V 0x6000502f
+#define MASK_VAMOANDEI16_V 0xf800707f
+#define MATCH_VAMOANDEI32_V 0x6000602f
+#define MASK_VAMOANDEI32_V 0xf800707f
+#define MATCH_VAMOANDEI64_V 0x6000702f
+#define MASK_VAMOANDEI64_V 0xf800707f
+#define MATCH_VAMOANDEI8_V 0x6000002f
+#define MASK_VAMOANDEI8_V 0xf800707f
+#define MATCH_VAMOMAXEI16_V 0xa000502f
+#define MASK_VAMOMAXEI16_V 0xf800707f
+#define MATCH_VAMOMAXEI32_V 0xa000602f
+#define MASK_VAMOMAXEI32_V 0xf800707f
+#define MATCH_VAMOMAXEI64_V 0xa000702f
+#define MASK_VAMOMAXEI64_V 0xf800707f
+#define MATCH_VAMOMAXEI8_V 0xa000002f
+#define MASK_VAMOMAXEI8_V 0xf800707f
+#define MATCH_VAMOMAXUEI16_V 0xe000502f
+#define MASK_VAMOMAXUEI16_V 0xf800707f
+#define MATCH_VAMOMAXUEI32_V 0xe000602f
+#define MASK_VAMOMAXUEI32_V 0xf800707f
+#define MATCH_VAMOMAXUEI64_V 0xe000702f
+#define MASK_VAMOMAXUEI64_V 0xf800707f
+#define MATCH_VAMOMAXUEI8_V 0xe000002f
+#define MASK_VAMOMAXUEI8_V 0xf800707f
+#define MATCH_VAMOMINEI16_V 0x8000502f
+#define MASK_VAMOMINEI16_V 0xf800707f
+#define MATCH_VAMOMINEI32_V 0x8000602f
+#define MASK_VAMOMINEI32_V 0xf800707f
+#define MATCH_VAMOMINEI64_V 0x8000702f
+#define MASK_VAMOMINEI64_V 0xf800707f
+#define MATCH_VAMOMINEI8_V 0x8000002f
+#define MASK_VAMOMINEI8_V 0xf800707f
+#define MATCH_VAMOMINUEI16_V 0xc000502f
+#define MASK_VAMOMINUEI16_V 0xf800707f
+#define MATCH_VAMOMINUEI32_V 0xc000602f
+#define MASK_VAMOMINUEI32_V 0xf800707f
+#define MATCH_VAMOMINUEI64_V 0xc000702f
+#define MASK_VAMOMINUEI64_V 0xf800707f
+#define MATCH_VAMOMINUEI8_V 0xc000002f
+#define MASK_VAMOMINUEI8_V 0xf800707f
+#define MATCH_VAMOOREI16_V 0x4000502f
+#define MASK_VAMOOREI16_V 0xf800707f
+#define MATCH_VAMOOREI32_V 0x4000602f
+#define MASK_VAMOOREI32_V 0xf800707f
+#define MATCH_VAMOOREI64_V 0x4000702f
+#define MASK_VAMOOREI64_V 0xf800707f
+#define MATCH_VAMOOREI8_V 0x4000002f
+#define MASK_VAMOOREI8_V 0xf800707f
+#define MATCH_VAMOSWAPEI16_V 0x800502f
+#define MASK_VAMOSWAPEI16_V 0xf800707f
+#define MATCH_VAMOSWAPEI32_V 0x800602f
+#define MASK_VAMOSWAPEI32_V 0xf800707f
+#define MATCH_VAMOSWAPEI64_V 0x800702f
+#define MASK_VAMOSWAPEI64_V 0xf800707f
+#define MATCH_VAMOSWAPEI8_V 0x800002f
+#define MASK_VAMOSWAPEI8_V 0xf800707f
+#define MATCH_VAMOXOREI16_V 0x2000502f
+#define MASK_VAMOXOREI16_V 0xf800707f
+#define MATCH_VAMOXOREI32_V 0x2000602f
+#define MASK_VAMOXOREI32_V 0xf800707f
+#define MATCH_VAMOXOREI64_V 0x2000702f
+#define MASK_VAMOXOREI64_V 0xf800707f
+#define MATCH_VAMOXOREI8_V 0x2000002f
+#define MASK_VAMOXOREI8_V 0xf800707f
+#define MATCH_VAND_VI 0x24003057
+#define MASK_VAND_VI 0xfc00707f
+#define MATCH_VAND_VV 0x24000057
+#define MASK_VAND_VV 0xfc00707f
+#define MATCH_VAND_VX 0x24004057
+#define MASK_VAND_VX 0xfc00707f
+#define MATCH_VASUB_VV 0x2c002057
+#define MASK_VASUB_VV 0xfc00707f
+#define MATCH_VASUB_VX 0x2c006057
+#define MASK_VASUB_VX 0xfc00707f
+#define MATCH_VASUBU_VV 0x28002057
+#define MASK_VASUBU_VV 0xfc00707f
+#define MATCH_VASUBU_VX 0x28006057
+#define MASK_VASUBU_VX 0xfc00707f
+#define MATCH_VCOMPRESS_VM 0x5e002057
+#define MASK_VCOMPRESS_VM 0xfe00707f
+#define MATCH_VCPOP_M 0x40082057
+#define MASK_VCPOP_M 0xfc0ff07f
+#define MATCH_VDIV_VV 0x84002057
+#define MASK_VDIV_VV 0xfc00707f
+#define MATCH_VDIV_VX 0x84006057
+#define MASK_VDIV_VX 0xfc00707f
+#define MATCH_VDIVU_VV 0x80002057
+#define MASK_VDIVU_VV 0xfc00707f
+#define MATCH_VDIVU_VX 0x80006057
+#define MASK_VDIVU_VX 0xfc00707f
+#define MATCH_VFADD_VF 0x5057
+#define MASK_VFADD_VF 0xfc00707f
+#define MATCH_VFADD_VV 0x1057
+#define MASK_VFADD_VV 0xfc00707f
+#define MATCH_VFCLASS_V 0x4c081057
+#define MASK_VFCLASS_V 0xfc0ff07f
+#define MATCH_VFCVT_F_X_V 0x48019057
+#define MASK_VFCVT_F_X_V 0xfc0ff07f
+#define MATCH_VFCVT_F_XU_V 0x48011057
+#define MASK_VFCVT_F_XU_V 0xfc0ff07f
+#define MATCH_VFCVT_RTZ_X_F_V 0x48039057
+#define MASK_VFCVT_RTZ_X_F_V 0xfc0ff07f
+#define MATCH_VFCVT_RTZ_XU_F_V 0x48031057
+#define MASK_VFCVT_RTZ_XU_F_V 0xfc0ff07f
+#define MATCH_VFCVT_X_F_V 0x48009057
+#define MASK_VFCVT_X_F_V 0xfc0ff07f
+#define MATCH_VFCVT_XU_F_V 0x48001057
+#define MASK_VFCVT_XU_F_V 0xfc0ff07f
+#define MATCH_VFDIV_VF 0x80005057
+#define MASK_VFDIV_VF 0xfc00707f
+#define MATCH_VFDIV_VV 0x80001057
+#define MASK_VFDIV_VV 0xfc00707f
+#define MATCH_VFIRST_M 0x4008a057
+#define MASK_VFIRST_M 0xfc0ff07f
+#define MATCH_VFMACC_VF 0xb0005057
+#define MASK_VFMACC_VF 0xfc00707f
+#define MATCH_VFMACC_VV 0xb0001057
+#define MASK_VFMACC_VV 0xfc00707f
+#define MATCH_VFMADD_VF 0xa0005057
+#define MASK_VFMADD_VF 0xfc00707f
+#define MATCH_VFMADD_VV 0xa0001057
+#define MASK_VFMADD_VV 0xfc00707f
+#define MATCH_VFMAX_VF 0x18005057
+#define MASK_VFMAX_VF 0xfc00707f
+#define MATCH_VFMAX_VV 0x18001057
+#define MASK_VFMAX_VV 0xfc00707f
+#define MATCH_VFMERGE_VFM 0x5c005057
+#define MASK_VFMERGE_VFM 0xfe00707f
+#define MATCH_VFMIN_VF 0x10005057
+#define MASK_VFMIN_VF 0xfc00707f
+#define MATCH_VFMIN_VV 0x10001057
+#define MASK_VFMIN_VV 0xfc00707f
+#define MATCH_VFMSAC_VF 0xb8005057
+#define MASK_VFMSAC_VF 0xfc00707f
+#define MATCH_VFMSAC_VV 0xb8001057
+#define MASK_VFMSAC_VV 0xfc00707f
+#define MATCH_VFMSUB_VF 0xa8005057
+#define MASK_VFMSUB_VF 0xfc00707f
+#define MATCH_VFMSUB_VV 0xa8001057
+#define MASK_VFMSUB_VV 0xfc00707f
+#define MATCH_VFMUL_VF 0x90005057
+#define MASK_VFMUL_VF 0xfc00707f
+#define MATCH_VFMUL_VV 0x90001057
+#define MASK_VFMUL_VV 0xfc00707f
+#define MATCH_VFMV_F_S 0x42001057
+#define MASK_VFMV_F_S 0xfe0ff07f
+#define MATCH_VFMV_S_F 0x42005057
+#define MASK_VFMV_S_F 0xfff0707f
+#define MATCH_VFMV_V_F 0x5e005057
+#define MASK_VFMV_V_F 0xfff0707f
+#define MATCH_VFNCVT_F_F_W 0x480a1057
+#define MASK_VFNCVT_F_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_F_X_W 0x48099057
+#define MASK_VFNCVT_F_X_W 0xfc0ff07f
+#define MATCH_VFNCVT_F_XU_W 0x48091057
+#define MASK_VFNCVT_F_XU_W 0xfc0ff07f
+#define MATCH_VFNCVT_ROD_F_F_W 0x480a9057
+#define MASK_VFNCVT_ROD_F_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_RTZ_X_F_W 0x480b9057
+#define MASK_VFNCVT_RTZ_X_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_RTZ_XU_F_W 0x480b1057
+#define MASK_VFNCVT_RTZ_XU_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_X_F_W 0x48089057
+#define MASK_VFNCVT_X_F_W 0xfc0ff07f
+#define MATCH_VFNCVT_XU_F_W 0x48081057
+#define MASK_VFNCVT_XU_F_W 0xfc0ff07f
+#define MATCH_VFNMACC_VF 0xb4005057
+#define MASK_VFNMACC_VF 0xfc00707f
+#define MATCH_VFNMACC_VV 0xb4001057
+#define MASK_VFNMACC_VV 0xfc00707f
+#define MATCH_VFNMADD_VF 0xa4005057
+#define MASK_VFNMADD_VF 0xfc00707f
+#define MATCH_VFNMADD_VV 0xa4001057
+#define MASK_VFNMADD_VV 0xfc00707f
+#define MATCH_VFNMSAC_VF 0xbc005057
+#define MASK_VFNMSAC_VF 0xfc00707f
+#define MATCH_VFNMSAC_VV 0xbc001057
+#define MASK_VFNMSAC_VV 0xfc00707f
+#define MATCH_VFNMSUB_VF 0xac005057
+#define MASK_VFNMSUB_VF 0xfc00707f
+#define MATCH_VFNMSUB_VV 0xac001057
+#define MASK_VFNMSUB_VV 0xfc00707f
+#define MATCH_VFRDIV_VF 0x84005057
+#define MASK_VFRDIV_VF 0xfc00707f
+#define MATCH_VFREC7_V 0x4c029057
+#define MASK_VFREC7_V 0xfc0ff07f
+#define MATCH_VFREDMAX_VS 0x1c001057
+#define MASK_VFREDMAX_VS 0xfc00707f
+#define MATCH_VFREDMIN_VS 0x14001057
+#define MASK_VFREDMIN_VS 0xfc00707f
+#define MATCH_VFREDOSUM_VS 0xc001057
+#define MASK_VFREDOSUM_VS 0xfc00707f
+#define MATCH_VFREDUSUM_VS 0x4001057
+#define MASK_VFREDUSUM_VS 0xfc00707f
+#define MATCH_VFRSQRT7_V 0x4c021057
+#define MASK_VFRSQRT7_V 0xfc0ff07f
+#define MATCH_VFRSUB_VF 0x9c005057
+#define MASK_VFRSUB_VF 0xfc00707f
+#define MATCH_VFSGNJ_VF 0x20005057
+#define MASK_VFSGNJ_VF 0xfc00707f
+#define MATCH_VFSGNJ_VV 0x20001057
+#define MASK_VFSGNJ_VV 0xfc00707f
+#define MATCH_VFSGNJN_VF 0x24005057
+#define MASK_VFSGNJN_VF 0xfc00707f
+#define MATCH_VFSGNJN_VV 0x24001057
+#define MASK_VFSGNJN_VV 0xfc00707f
+#define MATCH_VFSGNJX_VF 0x28005057
+#define MASK_VFSGNJX_VF 0xfc00707f
+#define MATCH_VFSGNJX_VV 0x28001057
+#define MASK_VFSGNJX_VV 0xfc00707f
+#define MATCH_VFSLIDE1DOWN_VF 0x3c005057
+#define MASK_VFSLIDE1DOWN_VF 0xfc00707f
+#define MATCH_VFSLIDE1UP_VF 0x38005057
+#define MASK_VFSLIDE1UP_VF 0xfc00707f
+#define MATCH_VFSQRT_V 0x4c001057
+#define MASK_VFSQRT_V 0xfc0ff07f
+#define MATCH_VFSUB_VF 0x8005057
+#define MASK_VFSUB_VF 0xfc00707f
+#define MATCH_VFSUB_VV 0x8001057
+#define MASK_VFSUB_VV 0xfc00707f
+#define MATCH_VFWADD_VF 0xc0005057
+#define MASK_VFWADD_VF 0xfc00707f
+#define MATCH_VFWADD_VV 0xc0001057
+#define MASK_VFWADD_VV 0xfc00707f
+#define MATCH_VFWADD_WF 0xd0005057
+#define MASK_VFWADD_WF 0xfc00707f
+#define MATCH_VFWADD_WV 0xd0001057
+#define MASK_VFWADD_WV 0xfc00707f
+#define MATCH_VFWCVT_F_F_V 0x48061057
+#define MASK_VFWCVT_F_F_V 0xfc0ff07f
+#define MATCH_VFWCVT_F_X_V 0x48059057
+#define MASK_VFWCVT_F_X_V 0xfc0ff07f
+#define MATCH_VFWCVT_F_XU_V 0x48051057
+#define MASK_VFWCVT_F_XU_V 0xfc0ff07f
+#define MATCH_VFWCVT_RTZ_X_F_V 0x48079057
+#define MASK_VFWCVT_RTZ_X_F_V 0xfc0ff07f
+#define MATCH_VFWCVT_RTZ_XU_F_V 0x48071057
+#define MASK_VFWCVT_RTZ_XU_F_V 0xfc0ff07f
+#define MATCH_VFWCVT_X_F_V 0x48049057
+#define MASK_VFWCVT_X_F_V 0xfc0ff07f
+#define MATCH_VFWCVT_XU_F_V 0x48041057
+#define MASK_VFWCVT_XU_F_V 0xfc0ff07f
+#define MATCH_VFWMACC_VF 0xf0005057
+#define MASK_VFWMACC_VF 0xfc00707f
+#define MATCH_VFWMACC_VV 0xf0001057
+#define MASK_VFWMACC_VV 0xfc00707f
+#define MATCH_VFWMSAC_VF 0xf8005057
+#define MASK_VFWMSAC_VF 0xfc00707f
+#define MATCH_VFWMSAC_VV 0xf8001057
+#define MASK_VFWMSAC_VV 0xfc00707f
+#define MATCH_VFWMUL_VF 0xe0005057
+#define MASK_VFWMUL_VF 0xfc00707f
+#define MATCH_VFWMUL_VV 0xe0001057
+#define MASK_VFWMUL_VV 0xfc00707f
+#define MATCH_VFWNMACC_VF 0xf4005057
+#define MASK_VFWNMACC_VF 0xfc00707f
+#define MATCH_VFWNMACC_VV 0xf4001057
+#define MASK_VFWNMACC_VV 0xfc00707f
+#define MATCH_VFWNMSAC_VF 0xfc005057
+#define MASK_VFWNMSAC_VF 0xfc00707f
+#define MATCH_VFWNMSAC_VV 0xfc001057
+#define MASK_VFWNMSAC_VV 0xfc00707f
+#define MATCH_VFWREDOSUM_VS 0xcc001057
+#define MASK_VFWREDOSUM_VS 0xfc00707f
+#define MATCH_VFWREDUSUM_VS 0xc4001057
+#define MASK_VFWREDUSUM_VS 0xfc00707f
+#define MATCH_VFWSUB_VF 0xc8005057
+#define MASK_VFWSUB_VF 0xfc00707f
+#define MATCH_VFWSUB_VV 0xc8001057
+#define MASK_VFWSUB_VV 0xfc00707f
+#define MATCH_VFWSUB_WF 0xd8005057
+#define MASK_VFWSUB_WF 0xfc00707f
+#define MATCH_VFWSUB_WV 0xd8001057
+#define MASK_VFWSUB_WV 0xfc00707f
+#define MATCH_VID_V 0x5008a057
+#define MASK_VID_V 0xfdfff07f
+#define MATCH_VIOTA_M 0x50082057
+#define MASK_VIOTA_M 0xfc0ff07f
+#define MATCH_VL1RE16_V 0x2805007
+#define MASK_VL1RE16_V 0xfff0707f
+#define MATCH_VL1RE32_V 0x2806007
+#define MASK_VL1RE32_V 0xfff0707f
+#define MATCH_VL1RE64_V 0x2807007
+#define MASK_VL1RE64_V 0xfff0707f
+#define MATCH_VL1RE8_V 0x2800007
+#define MASK_VL1RE8_V 0xfff0707f
+#define MATCH_VL2RE16_V 0x22805007
+#define MASK_VL2RE16_V 0xfff0707f
+#define MATCH_VL2RE32_V 0x22806007
+#define MASK_VL2RE32_V 0xfff0707f
+#define MATCH_VL2RE64_V 0x22807007
+#define MASK_VL2RE64_V 0xfff0707f
+#define MATCH_VL2RE8_V 0x22800007
+#define MASK_VL2RE8_V 0xfff0707f
+#define MATCH_VL4RE16_V 0x62805007
+#define MASK_VL4RE16_V 0xfff0707f
+#define MATCH_VL4RE32_V 0x62806007
+#define MASK_VL4RE32_V 0xfff0707f
+#define MATCH_VL4RE64_V 0x62807007
+#define MASK_VL4RE64_V 0xfff0707f
+#define MATCH_VL4RE8_V 0x62800007
+#define MASK_VL4RE8_V 0xfff0707f
+#define MATCH_VL8RE16_V 0xe2805007
+#define MASK_VL8RE16_V 0xfff0707f
+#define MATCH_VL8RE32_V 0xe2806007
+#define MASK_VL8RE32_V 0xfff0707f
+#define MATCH_VL8RE64_V 0xe2807007
+#define MASK_VL8RE64_V 0xfff0707f
+#define MATCH_VL8RE8_V 0xe2800007
+#define MASK_VL8RE8_V 0xfff0707f
+#define MATCH_VLE1024_V 0x10007007
+#define MASK_VLE1024_V 0x1df0707f
+#define MATCH_VLE1024FF_V 0x11007007
+#define MASK_VLE1024FF_V 0x1df0707f
+#define MATCH_VLE128_V 0x10000007
+#define MASK_VLE128_V 0x1df0707f
+#define MATCH_VLE128FF_V 0x11000007
+#define MASK_VLE128FF_V 0x1df0707f
+#define MATCH_VLE16_V 0x5007
+#define MASK_VLE16_V 0x1df0707f
+#define MATCH_VLE16FF_V 0x1005007
+#define MASK_VLE16FF_V 0x1df0707f
+#define MATCH_VLE256_V 0x10005007
+#define MASK_VLE256_V 0x1df0707f
+#define MATCH_VLE256FF_V 0x11005007
+#define MASK_VLE256FF_V 0x1df0707f
+#define MATCH_VLE32_V 0x6007
+#define MASK_VLE32_V 0x1df0707f
+#define MATCH_VLE32FF_V 0x1006007
+#define MASK_VLE32FF_V 0x1df0707f
+#define MATCH_VLE512_V 0x10006007
+#define MASK_VLE512_V 0x1df0707f
+#define MATCH_VLE512FF_V 0x11006007
+#define MASK_VLE512FF_V 0x1df0707f
+#define MATCH_VLE64_V 0x7007
+#define MASK_VLE64_V 0x1df0707f
+#define MATCH_VLE64FF_V 0x1007007
+#define MASK_VLE64FF_V 0x1df0707f
+#define MATCH_VLE8_V 0x7
+#define MASK_VLE8_V 0x1df0707f
+#define MATCH_VLE8FF_V 0x1000007
+#define MASK_VLE8FF_V 0x1df0707f
+#define MATCH_VLM_V 0x2b00007
+#define MASK_VLM_V 0xfff0707f
+#define MATCH_VLOXEI1024_V 0x1c007007
+#define MASK_VLOXEI1024_V 0x1c00707f
+#define MATCH_VLOXEI128_V 0x1c000007
+#define MASK_VLOXEI128_V 0x1c00707f
+#define MATCH_VLOXEI16_V 0xc005007
+#define MASK_VLOXEI16_V 0x1c00707f
+#define MATCH_VLOXEI256_V 0x1c005007
+#define MASK_VLOXEI256_V 0x1c00707f
+#define MATCH_VLOXEI32_V 0xc006007
+#define MASK_VLOXEI32_V 0x1c00707f
+#define MATCH_VLOXEI512_V 0x1c006007
+#define MASK_VLOXEI512_V 0x1c00707f
+#define MATCH_VLOXEI64_V 0xc007007
+#define MASK_VLOXEI64_V 0x1c00707f
+#define MATCH_VLOXEI8_V 0xc000007
+#define MASK_VLOXEI8_V 0x1c00707f
+#define MATCH_VLSE1024_V 0x18007007
+#define MASK_VLSE1024_V 0x1c00707f
+#define MATCH_VLSE128_V 0x18000007
+#define MASK_VLSE128_V 0x1c00707f
+#define MATCH_VLSE16_V 0x8005007
+#define MASK_VLSE16_V 0x1c00707f
+#define MATCH_VLSE256_V 0x18005007
+#define MASK_VLSE256_V 0x1c00707f
+#define MATCH_VLSE32_V 0x8006007
+#define MASK_VLSE32_V 0x1c00707f
+#define MATCH_VLSE512_V 0x18006007
+#define MASK_VLSE512_V 0x1c00707f
+#define MATCH_VLSE64_V 0x8007007
+#define MASK_VLSE64_V 0x1c00707f
+#define MATCH_VLSE8_V 0x8000007
+#define MASK_VLSE8_V 0x1c00707f
+#define MATCH_VLUXEI1024_V 0x14007007
+#define MASK_VLUXEI1024_V 0x1c00707f
+#define MATCH_VLUXEI128_V 0x14000007
+#define MASK_VLUXEI128_V 0x1c00707f
+#define MATCH_VLUXEI16_V 0x4005007
+#define MASK_VLUXEI16_V 0x1c00707f
+#define MATCH_VLUXEI256_V 0x14005007
+#define MASK_VLUXEI256_V 0x1c00707f
+#define MATCH_VLUXEI32_V 0x4006007
+#define MASK_VLUXEI32_V 0x1c00707f
+#define MATCH_VLUXEI512_V 0x14006007
+#define MASK_VLUXEI512_V 0x1c00707f
+#define MATCH_VLUXEI64_V 0x4007007
+#define MASK_VLUXEI64_V 0x1c00707f
+#define MATCH_VLUXEI8_V 0x4000007
+#define MASK_VLUXEI8_V 0x1c00707f
+#define MATCH_VMACC_VV 0xb4002057
+#define MASK_VMACC_VV 0xfc00707f
+#define MATCH_VMACC_VX 0xb4006057
+#define MASK_VMACC_VX 0xfc00707f
+#define MATCH_VMADC_VI 0x46003057
+#define MASK_VMADC_VI 0xfe00707f
+#define MATCH_VMADC_VIM 0x44003057
+#define MASK_VMADC_VIM 0xfe00707f
+#define MATCH_VMADC_VV 0x46000057
+#define MASK_VMADC_VV 0xfe00707f
+#define MATCH_VMADC_VVM 0x44000057
+#define MASK_VMADC_VVM 0xfe00707f
+#define MATCH_VMADC_VX 0x46004057
+#define MASK_VMADC_VX 0xfe00707f
+#define MATCH_VMADC_VXM 0x44004057
+#define MASK_VMADC_VXM 0xfe00707f
+#define MATCH_VMADD_VV 0xa4002057
+#define MASK_VMADD_VV 0xfc00707f
+#define MATCH_VMADD_VX 0xa4006057
+#define MASK_VMADD_VX 0xfc00707f
+#define MATCH_VMAND_MM 0x64002057
+#define MASK_VMAND_MM 0xfc00707f
+#define MATCH_VMANDN_MM 0x60002057
+#define MASK_VMANDN_MM 0xfc00707f
+#define MATCH_VMAX_VV 0x1c000057
+#define MASK_VMAX_VV 0xfc00707f
+#define MATCH_VMAX_VX 0x1c004057
+#define MASK_VMAX_VX 0xfc00707f
+#define MATCH_VMAXU_VV 0x18000057
+#define MASK_VMAXU_VV 0xfc00707f
+#define MATCH_VMAXU_VX 0x18004057
+#define MASK_VMAXU_VX 0xfc00707f
+#define MATCH_VMERGE_VIM 0x5c003057
+#define MASK_VMERGE_VIM 0xfe00707f
+#define MATCH_VMERGE_VVM 0x5c000057
+#define MASK_VMERGE_VVM 0xfe00707f
+#define MATCH_VMERGE_VXM 0x5c004057
+#define MASK_VMERGE_VXM 0xfe00707f
+#define MATCH_VMFEQ_VF 0x60005057
+#define MASK_VMFEQ_VF 0xfc00707f
+#define MATCH_VMFEQ_VV 0x60001057
+#define MASK_VMFEQ_VV 0xfc00707f
+#define MATCH_VMFGE_VF 0x7c005057
+#define MASK_VMFGE_VF 0xfc00707f
+#define MATCH_VMFGT_VF 0x74005057
+#define MASK_VMFGT_VF 0xfc00707f
+#define MATCH_VMFLE_VF 0x64005057
+#define MASK_VMFLE_VF 0xfc00707f
+#define MATCH_VMFLE_VV 0x64001057
+#define MASK_VMFLE_VV 0xfc00707f
+#define MATCH_VMFLT_VF 0x6c005057
+#define MASK_VMFLT_VF 0xfc00707f
+#define MATCH_VMFLT_VV 0x6c001057
+#define MASK_VMFLT_VV 0xfc00707f
+#define MATCH_VMFNE_VF 0x70005057
+#define MASK_VMFNE_VF 0xfc00707f
+#define MATCH_VMFNE_VV 0x70001057
+#define MASK_VMFNE_VV 0xfc00707f
+#define MATCH_VMIN_VV 0x14000057
+#define MASK_VMIN_VV 0xfc00707f
+#define MATCH_VMIN_VX 0x14004057
+#define MASK_VMIN_VX 0xfc00707f
+#define MATCH_VMINU_VV 0x10000057
+#define MASK_VMINU_VV 0xfc00707f
+#define MATCH_VMINU_VX 0x10004057
+#define MASK_VMINU_VX 0xfc00707f
+#define MATCH_VMNAND_MM 0x74002057
+#define MASK_VMNAND_MM 0xfc00707f
+#define MATCH_VMNOR_MM 0x78002057
+#define MASK_VMNOR_MM 0xfc00707f
+#define MATCH_VMOR_MM 0x68002057
+#define MASK_VMOR_MM 0xfc00707f
+#define MATCH_VMORN_MM 0x70002057
+#define MASK_VMORN_MM 0xfc00707f
+#define MATCH_VMSBC_VV 0x4e000057
+#define MASK_VMSBC_VV 0xfe00707f
+#define MATCH_VMSBC_VVM 0x4c000057
+#define MASK_VMSBC_VVM 0xfe00707f
+#define MATCH_VMSBC_VX 0x4e004057
+#define MASK_VMSBC_VX 0xfe00707f
+#define MATCH_VMSBC_VXM 0x4c004057
+#define MASK_VMSBC_VXM 0xfe00707f
+#define MATCH_VMSBF_M 0x5000a057
+#define MASK_VMSBF_M 0xfc0ff07f
+#define MATCH_VMSEQ_VI 0x60003057
+#define MASK_VMSEQ_VI 0xfc00707f
+#define MATCH_VMSEQ_VV 0x60000057
+#define MASK_VMSEQ_VV 0xfc00707f
+#define MATCH_VMSEQ_VX 0x60004057
+#define MASK_VMSEQ_VX 0xfc00707f
+#define MATCH_VMSGT_VI 0x7c003057
+#define MASK_VMSGT_VI 0xfc00707f
+#define MATCH_VMSGT_VX 0x7c004057
+#define MASK_VMSGT_VX 0xfc00707f
+#define MATCH_VMSGTU_VI 0x78003057
+#define MASK_VMSGTU_VI 0xfc00707f
+#define MATCH_VMSGTU_VX 0x78004057
+#define MASK_VMSGTU_VX 0xfc00707f
+#define MATCH_VMSIF_M 0x5001a057
+#define MASK_VMSIF_M 0xfc0ff07f
+#define MATCH_VMSLE_VI 0x74003057
+#define MASK_VMSLE_VI 0xfc00707f
+#define MATCH_VMSLE_VV 0x74000057
+#define MASK_VMSLE_VV 0xfc00707f
+#define MATCH_VMSLE_VX 0x74004057
+#define MASK_VMSLE_VX 0xfc00707f
+#define MATCH_VMSLEU_VI 0x70003057
+#define MASK_VMSLEU_VI 0xfc00707f
+#define MATCH_VMSLEU_VV 0x70000057
+#define MASK_VMSLEU_VV 0xfc00707f
+#define MATCH_VMSLEU_VX 0x70004057
+#define MASK_VMSLEU_VX 0xfc00707f
+#define MATCH_VMSLT_VV 0x6c000057
+#define MASK_VMSLT_VV 0xfc00707f
+#define MATCH_VMSLT_VX 0x6c004057
+#define MASK_VMSLT_VX 0xfc00707f
+#define MATCH_VMSLTU_VV 0x68000057
+#define MASK_VMSLTU_VV 0xfc00707f
+#define MATCH_VMSLTU_VX 0x68004057
+#define MASK_VMSLTU_VX 0xfc00707f
+#define MATCH_VMSNE_VI 0x64003057
+#define MASK_VMSNE_VI 0xfc00707f
+#define MATCH_VMSNE_VV 0x64000057
+#define MASK_VMSNE_VV 0xfc00707f
+#define MATCH_VMSNE_VX 0x64004057
+#define MASK_VMSNE_VX 0xfc00707f
+#define MATCH_VMSOF_M 0x50012057
+#define MASK_VMSOF_M 0xfc0ff07f
+#define MATCH_VMUL_VV 0x94002057
+#define MASK_VMUL_VV 0xfc00707f
+#define MATCH_VMUL_VX 0x94006057
+#define MASK_VMUL_VX 0xfc00707f
+#define MATCH_VMULH_VV 0x9c002057
+#define MASK_VMULH_VV 0xfc00707f
+#define MATCH_VMULH_VX 0x9c006057
+#define MASK_VMULH_VX 0xfc00707f
+#define MATCH_VMULHSU_VV 0x98002057
+#define MASK_VMULHSU_VV 0xfc00707f
+#define MATCH_VMULHSU_VX 0x98006057
+#define MASK_VMULHSU_VX 0xfc00707f
+#define MATCH_VMULHU_VV 0x90002057
+#define MASK_VMULHU_VV 0xfc00707f
+#define MATCH_VMULHU_VX 0x90006057
+#define MASK_VMULHU_VX 0xfc00707f
+#define MATCH_VMV1R_V 0x9e003057
+#define MASK_VMV1R_V 0xfe0ff07f
+#define MATCH_VMV2R_V 0x9e00b057
+#define MASK_VMV2R_V 0xfe0ff07f
+#define MATCH_VMV4R_V 0x9e01b057
+#define MASK_VMV4R_V 0xfe0ff07f
+#define MATCH_VMV8R_V 0x9e03b057
+#define MASK_VMV8R_V 0xfe0ff07f
+#define MATCH_VMV_S_X 0x42006057
+#define MASK_VMV_S_X 0xfff0707f
+#define MATCH_VMV_V_I 0x5e003057
+#define MASK_VMV_V_I 0xfff0707f
+#define MATCH_VMV_V_V 0x5e000057
+#define MASK_VMV_V_V 0xfff0707f
+#define MATCH_VMV_V_X 0x5e004057
+#define MASK_VMV_V_X 0xfff0707f
+#define MATCH_VMV_X_S 0x42002057
+#define MASK_VMV_X_S 0xfe0ff07f
+#define MATCH_VMXNOR_MM 0x7c002057
+#define MASK_VMXNOR_MM 0xfc00707f
+#define MATCH_VMXOR_MM 0x6c002057
+#define MASK_VMXOR_MM 0xfc00707f
+#define MATCH_VNCLIP_WI 0xbc003057
+#define MASK_VNCLIP_WI 0xfc00707f
+#define MATCH_VNCLIP_WV 0xbc000057
+#define MASK_VNCLIP_WV 0xfc00707f
+#define MATCH_VNCLIP_WX 0xbc004057
+#define MASK_VNCLIP_WX 0xfc00707f
+#define MATCH_VNCLIPU_WI 0xb8003057
+#define MASK_VNCLIPU_WI 0xfc00707f
+#define MATCH_VNCLIPU_WV 0xb8000057
+#define MASK_VNCLIPU_WV 0xfc00707f
+#define MATCH_VNCLIPU_WX 0xb8004057
+#define MASK_VNCLIPU_WX 0xfc00707f
+#define MATCH_VNMSAC_VV 0xbc002057
+#define MASK_VNMSAC_VV 0xfc00707f
+#define MATCH_VNMSAC_VX 0xbc006057
+#define MASK_VNMSAC_VX 0xfc00707f
+#define MATCH_VNMSUB_VV 0xac002057
+#define MASK_VNMSUB_VV 0xfc00707f
+#define MATCH_VNMSUB_VX 0xac006057
+#define MASK_VNMSUB_VX 0xfc00707f
+#define MATCH_VNSRA_WI 0xb4003057
+#define MASK_VNSRA_WI 0xfc00707f
+#define MATCH_VNSRA_WV 0xb4000057
+#define MASK_VNSRA_WV 0xfc00707f
+#define MATCH_VNSRA_WX 0xb4004057
+#define MASK_VNSRA_WX 0xfc00707f
+#define MATCH_VNSRL_WI 0xb0003057
+#define MASK_VNSRL_WI 0xfc00707f
+#define MATCH_VNSRL_WV 0xb0000057
+#define MASK_VNSRL_WV 0xfc00707f
+#define MATCH_VNSRL_WX 0xb0004057
+#define MASK_VNSRL_WX 0xfc00707f
+#define MATCH_VOR_VI 0x28003057
+#define MASK_VOR_VI 0xfc00707f
+#define MATCH_VOR_VV 0x28000057
+#define MASK_VOR_VV 0xfc00707f
+#define MATCH_VOR_VX 0x28004057
+#define MASK_VOR_VX 0xfc00707f
+#define MATCH_VREDAND_VS 0x4002057
+#define MASK_VREDAND_VS 0xfc00707f
+#define MATCH_VREDMAX_VS 0x1c002057
+#define MASK_VREDMAX_VS 0xfc00707f
+#define MATCH_VREDMAXU_VS 0x18002057
+#define MASK_VREDMAXU_VS 0xfc00707f
+#define MATCH_VREDMIN_VS 0x14002057
+#define MASK_VREDMIN_VS 0xfc00707f
+#define MATCH_VREDMINU_VS 0x10002057
+#define MASK_VREDMINU_VS 0xfc00707f
+#define MATCH_VREDOR_VS 0x8002057
+#define MASK_VREDOR_VS 0xfc00707f
+#define MATCH_VREDSUM_VS 0x2057
+#define MASK_VREDSUM_VS 0xfc00707f
+#define MATCH_VREDXOR_VS 0xc002057
+#define MASK_VREDXOR_VS 0xfc00707f
+#define MATCH_VREM_VV 0x8c002057
+#define MASK_VREM_VV 0xfc00707f
+#define MATCH_VREM_VX 0x8c006057
+#define MASK_VREM_VX 0xfc00707f
+#define MATCH_VREMU_VV 0x88002057
+#define MASK_VREMU_VV 0xfc00707f
+#define MATCH_VREMU_VX 0x88006057
+#define MASK_VREMU_VX 0xfc00707f
+#define MATCH_VRGATHER_VI 0x30003057
+#define MASK_VRGATHER_VI 0xfc00707f
+#define MATCH_VRGATHER_VV 0x30000057
+#define MASK_VRGATHER_VV 0xfc00707f
+#define MATCH_VRGATHER_VX 0x30004057
+#define MASK_VRGATHER_VX 0xfc00707f
+#define MATCH_VRGATHEREI16_VV 0x38000057
+#define MASK_VRGATHEREI16_VV 0xfc00707f
+#define MATCH_VRSUB_VI 0xc003057
+#define MASK_VRSUB_VI 0xfc00707f
+#define MATCH_VRSUB_VX 0xc004057
+#define MASK_VRSUB_VX 0xfc00707f
+#define MATCH_VS1R_V 0x2800027
+#define MASK_VS1R_V 0xfff0707f
+#define MATCH_VS2R_V 0x22800027
+#define MASK_VS2R_V 0xfff0707f
+#define MATCH_VS4R_V 0x62800027
+#define MASK_VS4R_V 0xfff0707f
+#define MATCH_VS8R_V 0xe2800027
+#define MASK_VS8R_V 0xfff0707f
+#define MATCH_VSADD_VI 0x84003057
+#define MASK_VSADD_VI 0xfc00707f
+#define MATCH_VSADD_VV 0x84000057
+#define MASK_VSADD_VV 0xfc00707f
+#define MATCH_VSADD_VX 0x84004057
+#define MASK_VSADD_VX 0xfc00707f
+#define MATCH_VSADDU_VI 0x80003057
+#define MASK_VSADDU_VI 0xfc00707f
+#define MATCH_VSADDU_VV 0x80000057
+#define MASK_VSADDU_VV 0xfc00707f
+#define MATCH_VSADDU_VX 0x80004057
+#define MASK_VSADDU_VX 0xfc00707f
+#define MATCH_VSBC_VVM 0x48000057
+#define MASK_VSBC_VVM 0xfe00707f
+#define MATCH_VSBC_VXM 0x48004057
+#define MASK_VSBC_VXM 0xfe00707f
+#define MATCH_VSE1024_V 0x10007027
+#define MASK_VSE1024_V 0x1df0707f
+#define MATCH_VSE128_V 0x10000027
+#define MASK_VSE128_V 0x1df0707f
+#define MATCH_VSE16_V 0x5027
+#define MASK_VSE16_V 0x1df0707f
+#define MATCH_VSE256_V 0x10005027
+#define MASK_VSE256_V 0x1df0707f
+#define MATCH_VSE32_V 0x6027
+#define MASK_VSE32_V 0x1df0707f
+#define MATCH_VSE512_V 0x10006027
+#define MASK_VSE512_V 0x1df0707f
+#define MATCH_VSE64_V 0x7027
+#define MASK_VSE64_V 0x1df0707f
+#define MATCH_VSE8_V 0x27
+#define MASK_VSE8_V 0x1df0707f
+#define MATCH_VSETIVLI 0xc0007057
+#define MASK_VSETIVLI 0xc000707f
+#define MATCH_VSETVL 0x80007057
+#define MASK_VSETVL 0xfe00707f
+#define MATCH_VSETVLI 0x7057
+#define MASK_VSETVLI 0x8000707f
+#define MATCH_VSEXT_VF2 0x4803a057
+#define MASK_VSEXT_VF2 0xfc0ff07f
+#define MATCH_VSEXT_VF4 0x4802a057
+#define MASK_VSEXT_VF4 0xfc0ff07f
+#define MATCH_VSEXT_VF8 0x4801a057
+#define MASK_VSEXT_VF8 0xfc0ff07f
+#define MATCH_VSLIDE1DOWN_VX 0x3c006057
+#define MASK_VSLIDE1DOWN_VX 0xfc00707f
+#define MATCH_VSLIDE1UP_VX 0x38006057
+#define MASK_VSLIDE1UP_VX 0xfc00707f
+#define MATCH_VSLIDEDOWN_VI 0x3c003057
+#define MASK_VSLIDEDOWN_VI 0xfc00707f
+#define MATCH_VSLIDEDOWN_VX 0x3c004057
+#define MASK_VSLIDEDOWN_VX 0xfc00707f
+#define MATCH_VSLIDEUP_VI 0x38003057
+#define MASK_VSLIDEUP_VI 0xfc00707f
+#define MATCH_VSLIDEUP_VX 0x38004057
+#define MASK_VSLIDEUP_VX 0xfc00707f
+#define MATCH_VSLL_VI 0x94003057
+#define MASK_VSLL_VI 0xfc00707f
+#define MATCH_VSLL_VV 0x94000057
+#define MASK_VSLL_VV 0xfc00707f
+#define MATCH_VSLL_VX 0x94004057
+#define MASK_VSLL_VX 0xfc00707f
+#define MATCH_VSM_V 0x2b00027
+#define MASK_VSM_V 0xfff0707f
+#define MATCH_VSMUL_VV 0x9c000057
+#define MASK_VSMUL_VV 0xfc00707f
+#define MATCH_VSMUL_VX 0x9c004057
+#define MASK_VSMUL_VX 0xfc00707f
+#define MATCH_VSOXEI1024_V 0x1c007027
+#define MASK_VSOXEI1024_V 0x1c00707f
+#define MATCH_VSOXEI128_V 0x1c000027
+#define MASK_VSOXEI128_V 0x1c00707f
+#define MATCH_VSOXEI16_V 0xc005027
+#define MASK_VSOXEI16_V 0x1c00707f
+#define MATCH_VSOXEI256_V 0x1c005027
+#define MASK_VSOXEI256_V 0x1c00707f
+#define MATCH_VSOXEI32_V 0xc006027
+#define MASK_VSOXEI32_V 0x1c00707f
+#define MATCH_VSOXEI512_V 0x1c006027
+#define MASK_VSOXEI512_V 0x1c00707f
+#define MATCH_VSOXEI64_V 0xc007027
+#define MASK_VSOXEI64_V 0x1c00707f
+#define MATCH_VSOXEI8_V 0xc000027
+#define MASK_VSOXEI8_V 0x1c00707f
+#define MATCH_VSRA_VI 0xa4003057
+#define MASK_VSRA_VI 0xfc00707f
+#define MATCH_VSRA_VV 0xa4000057
+#define MASK_VSRA_VV 0xfc00707f
+#define MATCH_VSRA_VX 0xa4004057
+#define MASK_VSRA_VX 0xfc00707f
+#define MATCH_VSRL_VI 0xa0003057
+#define MASK_VSRL_VI 0xfc00707f
+#define MATCH_VSRL_VV 0xa0000057
+#define MASK_VSRL_VV 0xfc00707f
+#define MATCH_VSRL_VX 0xa0004057
+#define MASK_VSRL_VX 0xfc00707f
+#define MATCH_VSSE1024_V 0x18007027
+#define MASK_VSSE1024_V 0x1c00707f
+#define MATCH_VSSE128_V 0x18000027
+#define MASK_VSSE128_V 0x1c00707f
+#define MATCH_VSSE16_V 0x8005027
+#define MASK_VSSE16_V 0x1c00707f
+#define MATCH_VSSE256_V 0x18005027
+#define MASK_VSSE256_V 0x1c00707f
+#define MATCH_VSSE32_V 0x8006027
+#define MASK_VSSE32_V 0x1c00707f
+#define MATCH_VSSE512_V 0x18006027
+#define MASK_VSSE512_V 0x1c00707f
+#define MATCH_VSSE64_V 0x8007027
+#define MASK_VSSE64_V 0x1c00707f
+#define MATCH_VSSE8_V 0x8000027
+#define MASK_VSSE8_V 0x1c00707f
+#define MATCH_VSSRA_VI 0xac003057
+#define MASK_VSSRA_VI 0xfc00707f
+#define MATCH_VSSRA_VV 0xac000057
+#define MASK_VSSRA_VV 0xfc00707f
+#define MATCH_VSSRA_VX 0xac004057
+#define MASK_VSSRA_VX 0xfc00707f
+#define MATCH_VSSRL_VI 0xa8003057
+#define MASK_VSSRL_VI 0xfc00707f
+#define MATCH_VSSRL_VV 0xa8000057
+#define MASK_VSSRL_VV 0xfc00707f
+#define MATCH_VSSRL_VX 0xa8004057
+#define MASK_VSSRL_VX 0xfc00707f
+#define MATCH_VSSUB_VV 0x8c000057
+#define MASK_VSSUB_VV 0xfc00707f
+#define MATCH_VSSUB_VX 0x8c004057
+#define MASK_VSSUB_VX 0xfc00707f
+#define MATCH_VSSUBU_VV 0x88000057
+#define MASK_VSSUBU_VV 0xfc00707f
+#define MATCH_VSSUBU_VX 0x88004057
+#define MASK_VSSUBU_VX 0xfc00707f
+#define MATCH_VSUB_VV 0x8000057
+#define MASK_VSUB_VV 0xfc00707f
+#define MATCH_VSUB_VX 0x8004057
+#define MASK_VSUB_VX 0xfc00707f
+#define MATCH_VSUXEI1024_V 0x14007027
+#define MASK_VSUXEI1024_V 0x1c00707f
+#define MATCH_VSUXEI128_V 0x14000027
+#define MASK_VSUXEI128_V 0x1c00707f
+#define MATCH_VSUXEI16_V 0x4005027
+#define MASK_VSUXEI16_V 0x1c00707f
+#define MATCH_VSUXEI256_V 0x14005027
+#define MASK_VSUXEI256_V 0x1c00707f
+#define MATCH_VSUXEI32_V 0x4006027
+#define MASK_VSUXEI32_V 0x1c00707f
+#define MATCH_VSUXEI512_V 0x14006027
+#define MASK_VSUXEI512_V 0x1c00707f
+#define MATCH_VSUXEI64_V 0x4007027
+#define MASK_VSUXEI64_V 0x1c00707f
+#define MATCH_VSUXEI8_V 0x4000027
+#define MASK_VSUXEI8_V 0x1c00707f
+#define MATCH_VWADD_VV 0xc4002057
+#define MASK_VWADD_VV 0xfc00707f
+#define MATCH_VWADD_VX 0xc4006057
+#define MASK_VWADD_VX 0xfc00707f
+#define MATCH_VWADD_WV 0xd4002057
+#define MASK_VWADD_WV 0xfc00707f
+#define MATCH_VWADD_WX 0xd4006057
+#define MASK_VWADD_WX 0xfc00707f
+#define MATCH_VWADDU_VV 0xc0002057
+#define MASK_VWADDU_VV 0xfc00707f
+#define MATCH_VWADDU_VX 0xc0006057
+#define MASK_VWADDU_VX 0xfc00707f
+#define MATCH_VWADDU_WV 0xd0002057
+#define MASK_VWADDU_WV 0xfc00707f
+#define MATCH_VWADDU_WX 0xd0006057
+#define MASK_VWADDU_WX 0xfc00707f
+#define MATCH_VWMACC_VV 0xf4002057
+#define MASK_VWMACC_VV 0xfc00707f
+#define MATCH_VWMACC_VX 0xf4006057
+#define MASK_VWMACC_VX 0xfc00707f
+#define MATCH_VWMACCSU_VV 0xfc002057
+#define MASK_VWMACCSU_VV 0xfc00707f
+#define MATCH_VWMACCSU_VX 0xfc006057
+#define MASK_VWMACCSU_VX 0xfc00707f
+#define MATCH_VWMACCU_VV 0xf0002057
+#define MASK_VWMACCU_VV 0xfc00707f
+#define MATCH_VWMACCU_VX 0xf0006057
+#define MASK_VWMACCU_VX 0xfc00707f
+#define MATCH_VWMACCUS_VX 0xf8006057
+#define MASK_VWMACCUS_VX 0xfc00707f
+#define MATCH_VWMUL_VV 0xec002057
+#define MASK_VWMUL_VV 0xfc00707f
+#define MATCH_VWMUL_VX 0xec006057
+#define MASK_VWMUL_VX 0xfc00707f
+#define MATCH_VWMULSU_VV 0xe8002057
+#define MASK_VWMULSU_VV 0xfc00707f
+#define MATCH_VWMULSU_VX 0xe8006057
+#define MASK_VWMULSU_VX 0xfc00707f
+#define MATCH_VWMULU_VV 0xe0002057
+#define MASK_VWMULU_VV 0xfc00707f
+#define MATCH_VWMULU_VX 0xe0006057
+#define MASK_VWMULU_VX 0xfc00707f
+#define MATCH_VWREDSUM_VS 0xc4000057
+#define MASK_VWREDSUM_VS 0xfc00707f
+#define MATCH_VWREDSUMU_VS 0xc0000057
+#define MASK_VWREDSUMU_VS 0xfc00707f
+#define MATCH_VWSUB_VV 0xcc002057
+#define MASK_VWSUB_VV 0xfc00707f
+#define MATCH_VWSUB_VX 0xcc006057
+#define MASK_VWSUB_VX 0xfc00707f
+#define MATCH_VWSUB_WV 0xdc002057
+#define MASK_VWSUB_WV 0xfc00707f
+#define MATCH_VWSUB_WX 0xdc006057
+#define MASK_VWSUB_WX 0xfc00707f
+#define MATCH_VWSUBU_VV 0xc8002057
+#define MASK_VWSUBU_VV 0xfc00707f
+#define MATCH_VWSUBU_VX 0xc8006057
+#define MASK_VWSUBU_VX 0xfc00707f
+#define MATCH_VWSUBU_WV 0xd8002057
+#define MASK_VWSUBU_WV 0xfc00707f
+#define MATCH_VWSUBU_WX 0xd8006057
+#define MASK_VWSUBU_WX 0xfc00707f
+#define MATCH_VXOR_VI 0x2c003057
+#define MASK_VXOR_VI 0xfc00707f
+#define MATCH_VXOR_VV 0x2c000057
+#define MASK_VXOR_VV 0xfc00707f
+#define MATCH_VXOR_VX 0x2c004057
+#define MASK_VXOR_VX 0xfc00707f
+#define MATCH_VZEXT_VF2 0x48032057
+#define MASK_VZEXT_VF2 0xfc0ff07f
+#define MATCH_VZEXT_VF4 0x48022057
+#define MASK_VZEXT_VF4 0xfc0ff07f
+#define MATCH_VZEXT_VF8 0x48012057
+#define MASK_VZEXT_VF8 0xfc0ff07f
#define MATCH_WEXT 0xce000077
-#define MASK_WEXT 0xfe00707f
+#define MASK_WEXT 0xfe00707f
+#define MATCH_WEXTI 0xde000077
+#define MASK_WEXTI 0xfe00707f
+#define MATCH_WFI 0x10500073
+#define MASK_WFI 0xffffffff
+#define MATCH_WRS_NTO 0xd00073
+#define MASK_WRS_NTO 0xffffffff
+#define MATCH_WRS_STO 0x1d00073
+#define MASK_WRS_STO 0xffffffff
+#define MATCH_XNOR 0x40004033
+#define MASK_XNOR 0xfe00707f
+#define MATCH_XOR 0x4033
+#define MASK_XOR 0xfe00707f
+#define MATCH_XORI 0x4013
+#define MASK_XORI 0x707f
+#define MATCH_XPERM16 0x28006033
+#define MASK_XPERM16 0xfe00707f
+#define MATCH_XPERM32 0x28000033
+#define MASK_XPERM32 0xfe00707f
+#define MATCH_XPERM4 0x28002033
+#define MASK_XPERM4 0xfe00707f
+#define MATCH_XPERM8 0x28004033
+#define MASK_XPERM8 0xfe00707f
#define MATCH_ZUNPKD810 0xacc00077
-#define MASK_ZUNPKD810 0xfff0707f
+#define MASK_ZUNPKD810 0xfff0707f
#define MATCH_ZUNPKD820 0xacd00077
-#define MASK_ZUNPKD820 0xfff0707f
+#define MASK_ZUNPKD820 0xfff0707f
#define MATCH_ZUNPKD830 0xace00077
-#define MASK_ZUNPKD830 0xfff0707f
+#define MASK_ZUNPKD830 0xfff0707f
#define MATCH_ZUNPKD831 0xacf00077
-#define MASK_ZUNPKD831 0xfff0707f
+#define MASK_ZUNPKD831 0xfff0707f
#define MATCH_ZUNPKD832 0xad700077
-#define MASK_ZUNPKD832 0xfff0707f
-#define MATCH_ADD32 0x40002077
-#define MASK_ADD32 0xfe00707f
-#define MATCH_CRAS32 0x44002077
-#define MASK_CRAS32 0xfe00707f
-#define MATCH_CRSA32 0x46002077
-#define MASK_CRSA32 0xfe00707f
-#define MATCH_KABS32 0xad200077
-#define MASK_KABS32 0xfff0707f
-#define MATCH_KADD32 0x10002077
-#define MASK_KADD32 0xfe00707f
-#define MATCH_KCRAS32 0x14002077
-#define MASK_KCRAS32 0xfe00707f
-#define MATCH_KCRSA32 0x16002077
-#define MASK_KCRSA32 0xfe00707f
-#define MATCH_KDMBB16 0xda001077
-#define MASK_KDMBB16 0xfe00707f
-#define MATCH_KDMBT16 0xea001077
-#define MASK_KDMBT16 0xfe00707f
-#define MATCH_KDMTT16 0xfa001077
-#define MASK_KDMTT16 0xfe00707f
-#define MATCH_KDMABB16 0xd8001077
-#define MASK_KDMABB16 0xfe00707f
-#define MATCH_KDMABT16 0xe8001077
-#define MASK_KDMABT16 0xfe00707f
-#define MATCH_KDMATT16 0xf8001077
-#define MASK_KDMATT16 0xfe00707f
-#define MATCH_KHMBB16 0xdc001077
-#define MASK_KHMBB16 0xfe00707f
-#define MATCH_KHMBT16 0xec001077
-#define MASK_KHMBT16 0xfe00707f
-#define MATCH_KHMTT16 0xfc001077
-#define MASK_KHMTT16 0xfe00707f
-#define MATCH_KMABB32 0x5a002077
-#define MASK_KMABB32 0xfe00707f
-#define MATCH_KMABT32 0x6a002077
-#define MASK_KMABT32 0xfe00707f
-#define MATCH_KMATT32 0x7a002077
-#define MASK_KMATT32 0xfe00707f
-#define MATCH_KMAXDA32 0x4a002077
-#define MASK_KMAXDA32 0xfe00707f
-#define MATCH_KMDA32 0x38002077
-#define MASK_KMDA32 0xfe00707f
-#define MATCH_KMXDA32 0x3a002077
-#define MASK_KMXDA32 0xfe00707f
-#define MATCH_KMADS32 0x5c002077
-#define MASK_KMADS32 0xfe00707f
-#define MATCH_KMADRS32 0x6c002077
-#define MASK_KMADRS32 0xfe00707f
-#define MATCH_KMAXDS32 0x7c002077
-#define MASK_KMAXDS32 0xfe00707f
-#define MATCH_KMSDA32 0x4c002077
-#define MASK_KMSDA32 0xfe00707f
-#define MATCH_KMSXDA32 0x4e002077
-#define MASK_KMSXDA32 0xfe00707f
-#define MATCH_KSLL32 0x64002077
-#define MASK_KSLL32 0xfe00707f
-#define MATCH_KSLLI32 0x84002077
-#define MASK_KSLLI32 0xfe00707f
-#define MATCH_KSLRA32 0x56002077
-#define MASK_KSLRA32 0xfe00707f
-#define MATCH_KSLRA32_U 0x66002077
-#define MASK_KSLRA32_U 0xfe00707f
-#define MATCH_KSTAS32 0xc0002077
-#define MASK_KSTAS32 0xfe00707f
-#define MATCH_KSTSA32 0xc2002077
-#define MASK_KSTSA32 0xfe00707f
-#define MATCH_KSUB32 0x12002077
-#define MASK_KSUB32 0xfe00707f
-#define MATCH_PKBB32 0xe002077
-#define MASK_PKBB32 0xfe00707f
-#define MATCH_PKBT32 0x1e002077
-#define MASK_PKBT32 0xfe00707f
-#define MATCH_PKTT32 0x2e002077
-#define MASK_PKTT32 0xfe00707f
-#define MATCH_PKTB32 0x3e002077
-#define MASK_PKTB32 0xfe00707f
-#define MATCH_RADD32 0x2077
-#define MASK_RADD32 0xfe00707f
-#define MATCH_RCRAS32 0x4002077
-#define MASK_RCRAS32 0xfe00707f
-#define MATCH_RCRSA32 0x6002077
-#define MASK_RCRSA32 0xfe00707f
-#define MATCH_RSTAS32 0xb0002077
-#define MASK_RSTAS32 0xfe00707f
-#define MATCH_RSTSA32 0xb2002077
-#define MASK_RSTSA32 0xfe00707f
-#define MATCH_RSUB32 0x2002077
-#define MASK_RSUB32 0xfe00707f
-#define MATCH_SLL32 0x54002077
-#define MASK_SLL32 0xfe00707f
-#define MATCH_SLLI32 0x74002077
-#define MASK_SLLI32 0xfe00707f
-#define MATCH_SMAX32 0x92002077
-#define MASK_SMAX32 0xfe00707f
-#define MATCH_SMBT32 0x18002077
-#define MASK_SMBT32 0xfe00707f
-#define MATCH_SMTT32 0x28002077
-#define MASK_SMTT32 0xfe00707f
-#define MATCH_SMDS32 0x58002077
-#define MASK_SMDS32 0xfe00707f
-#define MATCH_SMDRS32 0x68002077
-#define MASK_SMDRS32 0xfe00707f
-#define MATCH_SMXDS32 0x78002077
-#define MASK_SMXDS32 0xfe00707f
-#define MATCH_SMIN32 0x90002077
-#define MASK_SMIN32 0xfe00707f
-#define MATCH_SRA32 0x50002077
-#define MASK_SRA32 0xfe00707f
-#define MATCH_SRA32_U 0x60002077
-#define MASK_SRA32_U 0xfe00707f
-#define MATCH_SRAI32 0x70002077
-#define MASK_SRAI32 0xfe00707f
-#define MATCH_SRAI32_U 0x80002077
-#define MASK_SRAI32_U 0xfe00707f
-#define MATCH_SRAIW_U 0x34001077
-#define MASK_SRAIW_U 0xfe00707f
-#define MATCH_SRL32 0x52002077
-#define MASK_SRL32 0xfe00707f
-#define MATCH_SRL32_U 0x62002077
-#define MASK_SRL32_U 0xfe00707f
-#define MATCH_SRLI32 0x72002077
-#define MASK_SRLI32 0xfe00707f
-#define MATCH_SRLI32_U 0x82002077
-#define MASK_SRLI32_U 0xfe00707f
-#define MATCH_STAS32 0xf0002077
-#define MASK_STAS32 0xfe00707f
-#define MATCH_STSA32 0xf2002077
-#define MASK_STSA32 0xfe00707f
-#define MATCH_SUB32 0x42002077
-#define MASK_SUB32 0xfe00707f
-#define MATCH_UKADD32 0x30002077
-#define MASK_UKADD32 0xfe00707f
-#define MATCH_UKCRAS32 0x34002077
-#define MASK_UKCRAS32 0xfe00707f
-#define MATCH_UKCRSA32 0x36002077
-#define MASK_UKCRSA32 0xfe00707f
-#define MATCH_UKSTAS32 0xe0002077
-#define MASK_UKSTAS32 0xfe00707f
-#define MATCH_UKSTSA32 0xe2002077
-#define MASK_UKSTSA32 0xfe00707f
-#define MATCH_UKSUB32 0x32002077
-#define MASK_UKSUB32 0xfe00707f
-#define MATCH_UMAX32 0xa2002077
-#define MASK_UMAX32 0xfe00707f
-#define MATCH_UMIN32 0xa0002077
-#define MASK_UMIN32 0xfe00707f
-#define MATCH_URADD32 0x20002077
-#define MASK_URADD32 0xfe00707f
-#define MATCH_URCRAS32 0x24002077
-#define MASK_URCRAS32 0xfe00707f
-#define MATCH_URCRSA32 0x26002077
-#define MASK_URCRSA32 0xfe00707f
-#define MATCH_URSTAS32 0xd0002077
-#define MASK_URSTAS32 0xfe00707f
-#define MATCH_URSTSA32 0xd2002077
-#define MASK_URSTSA32 0xfe00707f
-#define MATCH_URSUB32 0x22002077
-#define MASK_URSUB32 0xfe00707f
-#define MATCH_VMVNFR_V 0x9e003057
-#define MASK_VMVNFR_V 0xfe00707f
-#define MATCH_VL1R_V 0x2800007
-#define MASK_VL1R_V 0xfff0707f
-#define MATCH_VL2R_V 0x6805007
-#define MASK_VL2R_V 0xfff0707f
-#define MATCH_VL4R_V 0xe806007
-#define MASK_VL4R_V 0xfff0707f
-#define MATCH_VL8R_V 0x1e807007
-#define MASK_VL8R_V 0xfff0707f
-#define MATCH_VLE1_V 0x2b00007
-#define MASK_VLE1_V 0xfff0707f
-#define MATCH_VSE1_V 0x2b00027
-#define MASK_VSE1_V 0xfff0707f
-#define MATCH_VFREDSUM_VS 0x4001057
-#define MASK_VFREDSUM_VS 0xfc00707f
-#define MATCH_VFWREDSUM_VS 0xc4001057
-#define MASK_VFWREDSUM_VS 0xfc00707f
-#define MATCH_VPOPC_M 0x40082057
-#define MASK_VPOPC_M 0xfc0ff07f
-#define MATCH_VMORNOT_MM 0x70002057
-#define MASK_VMORNOT_MM 0xfc00707f
-#define MATCH_VMANDNOT_MM 0x60002057
-#define MASK_VMANDNOT_MM 0xfc00707f
+#define MASK_ZUNPKD832 0xfff0707f
+
#define CSR_FFLAGS 0x1
#define CSR_FRM 0x2
#define CSR_FCSR 0x3
@@ -2879,11 +2801,16 @@
#define CSR_STVEC 0x105
#define CSR_SCOUNTEREN 0x106
#define CSR_SENVCFG 0x10a
+#define CSR_SSTATEEN0 0x10c
+#define CSR_SSTATEEN1 0x10d
+#define CSR_SSTATEEN2 0x10e
+#define CSR_SSTATEEN3 0x10f
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
#define CSR_SCAUSE 0x142
#define CSR_STVAL 0x143
#define CSR_SIP 0x144
+#define CSR_STIMECMP 0x14d
#define CSR_SATP 0x180
#define CSR_SCONTEXT 0x5a8
#define CSR_VSSTATUS 0x200
@@ -2894,6 +2821,7 @@
#define CSR_VSCAUSE 0x242
#define CSR_VSTVAL 0x243
#define CSR_VSIP 0x244
+#define CSR_VSTIMECMP 0x24d
#define CSR_VSATP 0x280
#define CSR_HSTATUS 0x600
#define CSR_HEDELEG 0x602
@@ -2903,6 +2831,10 @@
#define CSR_HCOUNTEREN 0x606
#define CSR_HGEIE 0x607
#define CSR_HENVCFG 0x60a
+#define CSR_HSTATEEN0 0x60c
+#define CSR_HSTATEEN1 0x60d
+#define CSR_HSTATEEN2 0x60e
+#define CSR_HSTATEEN3 0x60f
#define CSR_HTVAL 0x643
#define CSR_HIP 0x644
#define CSR_HVIP 0x645
@@ -2910,6 +2842,7 @@
#define CSR_HGATP 0x680
#define CSR_HCONTEXT 0x6a8
#define CSR_HGEIP 0xe12
+#define CSR_SCOUNTOVF 0xda0
#define CSR_UTVT 0x7
#define CSR_UNXTI 0x45
#define CSR_UINTSTATUS 0x46
@@ -2933,6 +2866,10 @@
#define CSR_MTVEC 0x305
#define CSR_MCOUNTEREN 0x306
#define CSR_MENVCFG 0x30a
+#define CSR_MSTATEEN0 0x30c
+#define CSR_MSTATEEN1 0x30d
+#define CSR_MSTATEEN2 0x30e
+#define CSR_MSTATEEN3 0x30f
#define CSR_MCOUNTINHIBIT 0x320
#define CSR_MSCRATCH 0x340
#define CSR_MEPC 0x341
@@ -3099,8 +3036,14 @@
#define CSR_MIMPID 0xf13
#define CSR_MHARTID 0xf14
#define CSR_MCONFIGPTR 0xf15
+#define CSR_STIMECMPH 0x15d
+#define CSR_VSTIMECMPH 0x25d
#define CSR_HTIMEDELTAH 0x615
#define CSR_HENVCFGH 0x61a
+#define CSR_HSTATEEN0H 0x61c
+#define CSR_HSTATEEN1H 0x61d
+#define CSR_HSTATEEN2H 0x61e
+#define CSR_HSTATEEN3H 0x61f
#define CSR_CYCLEH 0xc80
#define CSR_TIMEH 0xc81
#define CSR_INSTRETH 0xc82
@@ -3135,6 +3078,39 @@
#define CSR_HPMCOUNTER31H 0xc9f
#define CSR_MSTATUSH 0x310
#define CSR_MENVCFGH 0x31a
+#define CSR_MSTATEEN0H 0x31c
+#define CSR_MSTATEEN1H 0x31d
+#define CSR_MSTATEEN2H 0x31e
+#define CSR_MSTATEEN3H 0x31f
+#define CSR_MHPMEVENT3H 0x723
+#define CSR_MHPMEVENT4H 0x724
+#define CSR_MHPMEVENT5H 0x725
+#define CSR_MHPMEVENT6H 0x726
+#define CSR_MHPMEVENT7H 0x727
+#define CSR_MHPMEVENT8H 0x728
+#define CSR_MHPMEVENT9H 0x729
+#define CSR_MHPMEVENT10H 0x72a
+#define CSR_MHPMEVENT11H 0x72b
+#define CSR_MHPMEVENT12H 0x72c
+#define CSR_MHPMEVENT13H 0x72d
+#define CSR_MHPMEVENT14H 0x72e
+#define CSR_MHPMEVENT15H 0x72f
+#define CSR_MHPMEVENT16H 0x730
+#define CSR_MHPMEVENT17H 0x731
+#define CSR_MHPMEVENT18H 0x732
+#define CSR_MHPMEVENT19H 0x733
+#define CSR_MHPMEVENT20H 0x734
+#define CSR_MHPMEVENT21H 0x735
+#define CSR_MHPMEVENT22H 0x736
+#define CSR_MHPMEVENT23H 0x737
+#define CSR_MHPMEVENT24H 0x738
+#define CSR_MHPMEVENT25H 0x739
+#define CSR_MHPMEVENT26H 0x73a
+#define CSR_MHPMEVENT27H 0x73b
+#define CSR_MHPMEVENT28H 0x73c
+#define CSR_MHPMEVENT29H 0x73d
+#define CSR_MHPMEVENT30H 0x73e
+#define CSR_MHPMEVENT31H 0x73f
#define CSR_MSECCFGH 0x757
#define CSR_MCYCLEH 0xb80
#define CSR_MINSTRETH 0xb82
@@ -3167,6 +3143,7 @@
#define CSR_MHPMCOUNTER29H 0xb9d
#define CSR_MHPMCOUNTER30H 0xb9e
#define CSR_MHPMCOUNTER31H 0xb9f
+
#define CAUSE_MISALIGNED_FETCH 0x0
#define CAUSE_FETCH_ACCESS 0x1
#define CAUSE_ILLEGAL_INSTRUCTION 0x2
@@ -3186,993 +3163,491 @@
#define CAUSE_LOAD_GUEST_PAGE_FAULT 0x15
#define CAUSE_VIRTUAL_INSTRUCTION 0x16
#define CAUSE_STORE_GUEST_PAGE_FAULT 0x17
+
+#define INSN_FIELD_RD 0xf80
+#define INSN_FIELD_RT 0xf8000
+#define INSN_FIELD_RS1 0xf8000
+#define INSN_FIELD_RS2 0x1f00000
+#define INSN_FIELD_RS3 0xf8000000
+#define INSN_FIELD_AQRL 0x6000000
+#define INSN_FIELD_AQ 0x4000000
+#define INSN_FIELD_RL 0x2000000
+#define INSN_FIELD_FM 0xf0000000
+#define INSN_FIELD_PRED 0xf000000
+#define INSN_FIELD_SUCC 0xf00000
+#define INSN_FIELD_RM 0x7000
+#define INSN_FIELD_FUNCT3 0x7000
+#define INSN_FIELD_FUNCT2 0x6000000
+#define INSN_FIELD_IMM20 0xfffff000
+#define INSN_FIELD_JIMM20 0xfffff000
+#define INSN_FIELD_IMM12 0xfff00000
+#define INSN_FIELD_CSR 0xfff00000
+#define INSN_FIELD_IMM12HI 0xfe000000
+#define INSN_FIELD_BIMM12HI 0xfe000000
+#define INSN_FIELD_IMM12LO 0xf80
+#define INSN_FIELD_BIMM12LO 0xf80
+#define INSN_FIELD_ZIMM 0xf8000
+#define INSN_FIELD_SHAMT 0x7f00000
+#define INSN_FIELD_SHAMTW 0x1f00000
+#define INSN_FIELD_SHAMTW4 0xf00000
+#define INSN_FIELD_SHAMTD 0x3f00000
+#define INSN_FIELD_BS 0xc0000000
+#define INSN_FIELD_RNUM 0xf00000
+#define INSN_FIELD_RC 0x3e000000
+#define INSN_FIELD_IMM2 0x300000
+#define INSN_FIELD_IMM3 0x700000
+#define INSN_FIELD_IMM4 0xf00000
+#define INSN_FIELD_IMM5 0x1f00000
+#define INSN_FIELD_IMM6 0x3f00000
+#define INSN_FIELD_OPCODE 0x7f
+#define INSN_FIELD_FUNCT7 0xfe000000
+#define INSN_FIELD_VD 0xf80
+#define INSN_FIELD_VS3 0xf80
+#define INSN_FIELD_VS1 0xf8000
+#define INSN_FIELD_VS2 0x1f00000
+#define INSN_FIELD_VM 0x2000000
+#define INSN_FIELD_WD 0x4000000
+#define INSN_FIELD_AMOOP 0xf8000000
+#define INSN_FIELD_NF 0xe0000000
+#define INSN_FIELD_SIMM5 0xf8000
+#define INSN_FIELD_ZIMM10 0x3ff00000
+#define INSN_FIELD_ZIMM11 0x7ff00000
+#define INSN_FIELD_C_NZUIMM10 0x1fe0
+#define INSN_FIELD_C_UIMM7LO 0x60
+#define INSN_FIELD_C_UIMM7HI 0x1c00
+#define INSN_FIELD_C_UIMM8LO 0x60
+#define INSN_FIELD_C_UIMM8HI 0x1c00
+#define INSN_FIELD_C_UIMM9LO 0x60
+#define INSN_FIELD_C_UIMM9HI 0x1c00
+#define INSN_FIELD_C_NZIMM6LO 0x7c
+#define INSN_FIELD_C_NZIMM6HI 0x1000
+#define INSN_FIELD_C_IMM6LO 0x7c
+#define INSN_FIELD_C_IMM6HI 0x1000
+#define INSN_FIELD_C_NZIMM10HI 0x1000
+#define INSN_FIELD_C_NZIMM10LO 0x7c
+#define INSN_FIELD_C_NZIMM18HI 0x1000
+#define INSN_FIELD_C_NZIMM18LO 0x7c
+#define INSN_FIELD_C_IMM12 0x1ffc
+#define INSN_FIELD_C_BIMM9LO 0x7c
+#define INSN_FIELD_C_BIMM9HI 0x1c00
+#define INSN_FIELD_C_NZUIMM5 0x7c
+#define INSN_FIELD_C_NZUIMM6LO 0x7c
+#define INSN_FIELD_C_NZUIMM6HI 0x1000
+#define INSN_FIELD_C_UIMM8SPLO 0x7c
+#define INSN_FIELD_C_UIMM8SPHI 0x1000
+#define INSN_FIELD_C_UIMM8SP_S 0x1f80
+#define INSN_FIELD_C_UIMM10SPLO 0x7c
+#define INSN_FIELD_C_UIMM10SPHI 0x1000
+#define INSN_FIELD_C_UIMM9SPLO 0x7c
+#define INSN_FIELD_C_UIMM9SPHI 0x1000
+#define INSN_FIELD_C_UIMM10SP_S 0x1f80
+#define INSN_FIELD_C_UIMM9SP_S 0x1f80
+#define INSN_FIELD_RS1_P 0x380
+#define INSN_FIELD_RS2_P 0x1c
+#define INSN_FIELD_RD_P 0x1c
+#define INSN_FIELD_RD_RS1_N0 0xf80
+#define INSN_FIELD_RD_RS1_P 0x380
+#define INSN_FIELD_RD_RS1 0xf80
+#define INSN_FIELD_RD_N2 0xf80
+#define INSN_FIELD_RD_N0 0xf80
+#define INSN_FIELD_RS1_N0 0xf80
+#define INSN_FIELD_C_RS2_N0 0x7c
+#define INSN_FIELD_C_RS1_N0 0xf80
+#define INSN_FIELD_C_RS2 0x7c
#endif
#ifdef DECLARE_INSN
-DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32)
-DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32)
-DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32)
-DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS)
-DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS)
-DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI)
-DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM)
-DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM)
-DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI)
-DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR)
-DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR)
-DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE)
-DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME)
-DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET)
-DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH)
-DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH)
-DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH)
-DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL)
-DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK)
-DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
-DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
-DECLARE_INSN(fence_tso, MATCH_FENCE_TSO, MASK_FENCE_TSO)
-DECLARE_INSN(pause, MATCH_PAUSE, MASK_PAUSE)
-DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
-DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
-DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
-DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
-DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
-DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
-DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
-DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
-DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
-DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
-DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
-DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
-DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
-DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
-DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
-DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
-DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
-DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
-DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
-DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
-DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
-DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
-DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
-DECLARE_INSN(or, MATCH_OR, MASK_OR)
-DECLARE_INSN(and, MATCH_AND, MASK_AND)
-DECLARE_INSN(lb, MATCH_LB, MASK_LB)
-DECLARE_INSN(lh, MATCH_LH, MASK_LH)
-DECLARE_INSN(lw, MATCH_LW, MASK_LW)
-DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
-DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
-DECLARE_INSN(sb, MATCH_SB, MASK_SB)
-DECLARE_INSN(sh, MATCH_SH, MASK_SH)
-DECLARE_INSN(sw, MATCH_SW, MASK_SW)
-DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
-DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
+DECLARE_INSN(add16, MATCH_ADD16, MASK_ADD16)
+DECLARE_INSN(add32, MATCH_ADD32, MASK_ADD32)
+DECLARE_INSN(add64, MATCH_ADD64, MASK_ADD64)
+DECLARE_INSN(add8, MATCH_ADD8, MASK_ADD8)
+DECLARE_INSN(add_uw, MATCH_ADD_UW, MASK_ADD_UW)
+DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
-DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
-DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
-DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
-DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
-DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
-DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
-DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
-DECLARE_INSN(ld, MATCH_LD, MASK_LD)
-DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
-DECLARE_INSN(sd, MATCH_SD, MASK_SD)
-DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
-DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
-DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
-DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
-DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
-DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
-DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
-DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
-DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
-DECLARE_INSN(rem, MATCH_REM, MASK_REM)
-DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
-DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
-DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
-DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
-DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
-DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
+DECLARE_INSN(aes32dsi, MATCH_AES32DSI, MASK_AES32DSI)
+DECLARE_INSN(aes32dsmi, MATCH_AES32DSMI, MASK_AES32DSMI)
+DECLARE_INSN(aes32esi, MATCH_AES32ESI, MASK_AES32ESI)
+DECLARE_INSN(aes32esmi, MATCH_AES32ESMI, MASK_AES32ESMI)
+DECLARE_INSN(aes64ds, MATCH_AES64DS, MASK_AES64DS)
+DECLARE_INSN(aes64dsm, MATCH_AES64DSM, MASK_AES64DSM)
+DECLARE_INSN(aes64es, MATCH_AES64ES, MASK_AES64ES)
+DECLARE_INSN(aes64esm, MATCH_AES64ESM, MASK_AES64ESM)
+DECLARE_INSN(aes64im, MATCH_AES64IM, MASK_AES64IM)
+DECLARE_INSN(aes64ks1i, MATCH_AES64KS1I, MASK_AES64KS1I)
+DECLARE_INSN(aes64ks2, MATCH_AES64KS2, MASK_AES64KS2)
+DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
-DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
-DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
+DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
-DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
+DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
-DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
+DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
-DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
-DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
-DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
-DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
-DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
-DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
-DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
-DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
+DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
-DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
+DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
+DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
+DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
-DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
-DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
-DECLARE_INSN(hfence_vvma, MATCH_HFENCE_VVMA, MASK_HFENCE_VVMA)
-DECLARE_INSN(hfence_gvma, MATCH_HFENCE_GVMA, MASK_HFENCE_GVMA)
-DECLARE_INSN(hlv_b, MATCH_HLV_B, MASK_HLV_B)
-DECLARE_INSN(hlv_bu, MATCH_HLV_BU, MASK_HLV_BU)
-DECLARE_INSN(hlv_h, MATCH_HLV_H, MASK_HLV_H)
-DECLARE_INSN(hlv_hu, MATCH_HLV_HU, MASK_HLV_HU)
-DECLARE_INSN(hlvx_hu, MATCH_HLVX_HU, MASK_HLVX_HU)
-DECLARE_INSN(hlv_w, MATCH_HLV_W, MASK_HLV_W)
-DECLARE_INSN(hlvx_wu, MATCH_HLVX_WU, MASK_HLVX_WU)
-DECLARE_INSN(hsv_b, MATCH_HSV_B, MASK_HSV_B)
-DECLARE_INSN(hsv_h, MATCH_HSV_H, MASK_HSV_H)
-DECLARE_INSN(hsv_w, MATCH_HSV_W, MASK_HSV_W)
-DECLARE_INSN(hlv_wu, MATCH_HLV_WU, MASK_HLV_WU)
-DECLARE_INSN(hlv_d, MATCH_HLV_D, MASK_HLV_D)
-DECLARE_INSN(hsv_d, MATCH_HSV_D, MASK_HSV_D)
-DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
-DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
-DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
-DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
-DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
-DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
-DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
-DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
-DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
-DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
-DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
-DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
-DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
-DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
-DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
-DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W)
-DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
-DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
-DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
-DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X)
-DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
-DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
-DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
-DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
-DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
-DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
-DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
-DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
-DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
-DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
-DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
-DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
-DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
-DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
-DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
-DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
-DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
-DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
-DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
-DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
-DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
-DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
-DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
-DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
-DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
-DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
-DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
-DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
-DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
-DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
-DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
-DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
-DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
-DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
-DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
-DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
-DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
-DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
-DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
-DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
-DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
-DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
-DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q)
-DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q)
-DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q)
-DECLARE_INSN(fdiv_q, MATCH_FDIV_Q, MASK_FDIV_Q)
-DECLARE_INSN(fsgnj_q, MATCH_FSGNJ_Q, MASK_FSGNJ_Q)
-DECLARE_INSN(fsgnjn_q, MATCH_FSGNJN_Q, MASK_FSGNJN_Q)
-DECLARE_INSN(fsgnjx_q, MATCH_FSGNJX_Q, MASK_FSGNJX_Q)
-DECLARE_INSN(fmin_q, MATCH_FMIN_Q, MASK_FMIN_Q)
-DECLARE_INSN(fmax_q, MATCH_FMAX_Q, MASK_FMAX_Q)
-DECLARE_INSN(fcvt_s_q, MATCH_FCVT_S_Q, MASK_FCVT_S_Q)
-DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S)
-DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q)
-DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D)
-DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q)
-DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q)
-DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q)
-DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q)
-DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q)
-DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q)
-DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q)
-DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W)
-DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU)
-DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ)
-DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ)
-DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q)
-DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q)
-DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q)
-DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q)
-DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q)
-DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q)
-DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L)
-DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU)
+DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
+DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
+DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
+DECLARE_INSN(and, MATCH_AND, MASK_AND)
+DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
DECLARE_INSN(andn, MATCH_ANDN, MASK_ANDN)
-DECLARE_INSN(orn, MATCH_ORN, MASK_ORN)
-DECLARE_INSN(xnor, MATCH_XNOR, MASK_XNOR)
-DECLARE_INSN(slo, MATCH_SLO, MASK_SLO)
-DECLARE_INSN(sro, MATCH_SRO, MASK_SRO)
-DECLARE_INSN(rol, MATCH_ROL, MASK_ROL)
-DECLARE_INSN(ror, MATCH_ROR, MASK_ROR)
+DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
+DECLARE_INSN(ave, MATCH_AVE, MASK_AVE)
DECLARE_INSN(bclr, MATCH_BCLR, MASK_BCLR)
-DECLARE_INSN(bset, MATCH_BSET, MASK_BSET)
-DECLARE_INSN(binv, MATCH_BINV, MASK_BINV)
-DECLARE_INSN(bext, MATCH_BEXT, MASK_BEXT)
-DECLARE_INSN(gorc, MATCH_GORC, MASK_GORC)
-DECLARE_INSN(grev, MATCH_GREV, MASK_GREV)
-DECLARE_INSN(sloi, MATCH_SLOI, MASK_SLOI)
-DECLARE_INSN(sroi, MATCH_SROI, MASK_SROI)
-DECLARE_INSN(rori, MATCH_RORI, MASK_RORI)
DECLARE_INSN(bclri, MATCH_BCLRI, MASK_BCLRI)
-DECLARE_INSN(bseti, MATCH_BSETI, MASK_BSETI)
-DECLARE_INSN(binvi, MATCH_BINVI, MASK_BINVI)
-DECLARE_INSN(bexti, MATCH_BEXTI, MASK_BEXTI)
-DECLARE_INSN(gorci, MATCH_GORCI, MASK_GORCI)
-DECLARE_INSN(grevi, MATCH_GREVI, MASK_GREVI)
-DECLARE_INSN(cmix, MATCH_CMIX, MASK_CMIX)
-DECLARE_INSN(cmov, MATCH_CMOV, MASK_CMOV)
-DECLARE_INSN(fsl, MATCH_FSL, MASK_FSL)
-DECLARE_INSN(fsr, MATCH_FSR, MASK_FSR)
-DECLARE_INSN(fsri, MATCH_FSRI, MASK_FSRI)
-DECLARE_INSN(clz, MATCH_CLZ, MASK_CLZ)
-DECLARE_INSN(ctz, MATCH_CTZ, MASK_CTZ)
-DECLARE_INSN(cpop, MATCH_CPOP, MASK_CPOP)
-DECLARE_INSN(sext_b, MATCH_SEXT_B, MASK_SEXT_B)
-DECLARE_INSN(sext_h, MATCH_SEXT_H, MASK_SEXT_H)
-DECLARE_INSN(crc32_b, MATCH_CRC32_B, MASK_CRC32_B)
-DECLARE_INSN(crc32_h, MATCH_CRC32_H, MASK_CRC32_H)
-DECLARE_INSN(crc32_w, MATCH_CRC32_W, MASK_CRC32_W)
-DECLARE_INSN(crc32c_b, MATCH_CRC32C_B, MASK_CRC32C_B)
-DECLARE_INSN(crc32c_h, MATCH_CRC32C_H, MASK_CRC32C_H)
-DECLARE_INSN(crc32c_w, MATCH_CRC32C_W, MASK_CRC32C_W)
-DECLARE_INSN(sh1add, MATCH_SH1ADD, MASK_SH1ADD)
-DECLARE_INSN(sh2add, MATCH_SH2ADD, MASK_SH2ADD)
-DECLARE_INSN(sh3add, MATCH_SH3ADD, MASK_SH3ADD)
-DECLARE_INSN(clmul, MATCH_CLMUL, MASK_CLMUL)
-DECLARE_INSN(clmulr, MATCH_CLMULR, MASK_CLMULR)
-DECLARE_INSN(clmulh, MATCH_CLMULH, MASK_CLMULH)
-DECLARE_INSN(min, MATCH_MIN, MASK_MIN)
-DECLARE_INSN(minu, MATCH_MINU, MASK_MINU)
-DECLARE_INSN(max, MATCH_MAX, MASK_MAX)
-DECLARE_INSN(maxu, MATCH_MAXU, MASK_MAXU)
-DECLARE_INSN(shfl, MATCH_SHFL, MASK_SHFL)
-DECLARE_INSN(unshfl, MATCH_UNSHFL, MASK_UNSHFL)
DECLARE_INSN(bcompress, MATCH_BCOMPRESS, MASK_BCOMPRESS)
+DECLARE_INSN(bcompressw, MATCH_BCOMPRESSW, MASK_BCOMPRESSW)
DECLARE_INSN(bdecompress, MATCH_BDECOMPRESS, MASK_BDECOMPRESS)
-DECLARE_INSN(pack, MATCH_PACK, MASK_PACK)
-DECLARE_INSN(packu, MATCH_PACKU, MASK_PACKU)
-DECLARE_INSN(packh, MATCH_PACKH, MASK_PACKH)
+DECLARE_INSN(bdecompressw, MATCH_BDECOMPRESSW, MASK_BDECOMPRESSW)
+DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
+DECLARE_INSN(bext, MATCH_BEXT, MASK_BEXT)
+DECLARE_INSN(bexti, MATCH_BEXTI, MASK_BEXTI)
DECLARE_INSN(bfp, MATCH_BFP, MASK_BFP)
-DECLARE_INSN(shfli, MATCH_SHFLI, MASK_SHFLI)
-DECLARE_INSN(unshfli, MATCH_UNSHFLI, MASK_UNSHFLI)
-DECLARE_INSN(xperm4, MATCH_XPERM4, MASK_XPERM4)
-DECLARE_INSN(xperm8, MATCH_XPERM8, MASK_XPERM8)
-DECLARE_INSN(xperm16, MATCH_XPERM16, MASK_XPERM16)
+DECLARE_INSN(bfpw, MATCH_BFPW, MASK_BFPW)
+DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
+DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
+DECLARE_INSN(binv, MATCH_BINV, MASK_BINV)
+DECLARE_INSN(binvi, MATCH_BINVI, MASK_BINVI)
+DECLARE_INSN(bitrev, MATCH_BITREV, MASK_BITREV)
+DECLARE_INSN(bitrevi, MATCH_BITREVI, MASK_BITREVI)
+DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
+DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
DECLARE_INSN(bmatflip, MATCH_BMATFLIP, MASK_BMATFLIP)
-DECLARE_INSN(crc32_d, MATCH_CRC32_D, MASK_CRC32_D)
-DECLARE_INSN(crc32c_d, MATCH_CRC32C_D, MASK_CRC32C_D)
DECLARE_INSN(bmator, MATCH_BMATOR, MASK_BMATOR)
DECLARE_INSN(bmatxor, MATCH_BMATXOR, MASK_BMATXOR)
-DECLARE_INSN(slli_uw, MATCH_SLLI_UW, MASK_SLLI_UW)
-DECLARE_INSN(add_uw, MATCH_ADD_UW, MASK_ADD_UW)
-DECLARE_INSN(slow, MATCH_SLOW, MASK_SLOW)
-DECLARE_INSN(srow, MATCH_SROW, MASK_SROW)
-DECLARE_INSN(rolw, MATCH_ROLW, MASK_ROLW)
-DECLARE_INSN(rorw, MATCH_RORW, MASK_RORW)
-DECLARE_INSN(gorcw, MATCH_GORCW, MASK_GORCW)
-DECLARE_INSN(grevw, MATCH_GREVW, MASK_GREVW)
-DECLARE_INSN(sloiw, MATCH_SLOIW, MASK_SLOIW)
-DECLARE_INSN(sroiw, MATCH_SROIW, MASK_SROIW)
-DECLARE_INSN(roriw, MATCH_RORIW, MASK_RORIW)
-DECLARE_INSN(gorciw, MATCH_GORCIW, MASK_GORCIW)
-DECLARE_INSN(greviw, MATCH_GREVIW, MASK_GREVIW)
-DECLARE_INSN(fslw, MATCH_FSLW, MASK_FSLW)
-DECLARE_INSN(fsrw, MATCH_FSRW, MASK_FSRW)
-DECLARE_INSN(fsriw, MATCH_FSRIW, MASK_FSRIW)
-DECLARE_INSN(clzw, MATCH_CLZW, MASK_CLZW)
-DECLARE_INSN(ctzw, MATCH_CTZW, MASK_CTZW)
-DECLARE_INSN(cpopw, MATCH_CPOPW, MASK_CPOPW)
-DECLARE_INSN(sh1add_uw, MATCH_SH1ADD_UW, MASK_SH1ADD_UW)
-DECLARE_INSN(sh2add_uw, MATCH_SH2ADD_UW, MASK_SH2ADD_UW)
-DECLARE_INSN(sh3add_uw, MATCH_SH3ADD_UW, MASK_SH3ADD_UW)
-DECLARE_INSN(shflw, MATCH_SHFLW, MASK_SHFLW)
-DECLARE_INSN(unshflw, MATCH_UNSHFLW, MASK_UNSHFLW)
-DECLARE_INSN(bcompressw, MATCH_BCOMPRESSW, MASK_BCOMPRESSW)
-DECLARE_INSN(bdecompressw, MATCH_BDECOMPRESSW, MASK_BDECOMPRESSW)
-DECLARE_INSN(packw, MATCH_PACKW, MASK_PACKW)
-DECLARE_INSN(packuw, MATCH_PACKUW, MASK_PACKUW)
-DECLARE_INSN(bfpw, MATCH_BFPW, MASK_BFPW)
-DECLARE_INSN(xperm32, MATCH_XPERM32, MASK_XPERM32)
-DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL)
-DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK)
-DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
-DECLARE_INSN(mret, MATCH_MRET, MASK_MRET)
-DECLARE_INSN(dret, MATCH_DRET, MASK_DRET)
-DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA)
-DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
-DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
-DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
-DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
-DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
-DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
-DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
-DECLARE_INSN(sinval_vma, MATCH_SINVAL_VMA, MASK_SINVAL_VMA)
-DECLARE_INSN(sfence_w_inval, MATCH_SFENCE_W_INVAL, MASK_SFENCE_W_INVAL)
-DECLARE_INSN(sfence_inval_ir, MATCH_SFENCE_INVAL_IR, MASK_SFENCE_INVAL_IR)
-DECLARE_INSN(hinval_vvma, MATCH_HINVAL_VVMA, MASK_HINVAL_VVMA)
-DECLARE_INSN(hinval_gvma, MATCH_HINVAL_GVMA, MASK_HINVAL_GVMA)
-DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H)
-DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H)
-DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H)
-DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H)
-DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H)
-DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H)
-DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H)
-DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H)
-DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H)
-DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S)
-DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H)
-DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H)
-DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H)
-DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H)
-DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H)
-DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H)
-DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H)
-DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H)
-DECLARE_INSN(fclass_h, MATCH_FCLASS_H, MASK_FCLASS_H)
-DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W)
-DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU)
-DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X)
-DECLARE_INSN(flh, MATCH_FLH, MASK_FLH)
-DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH)
-DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H)
-DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H)
-DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H)
-DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H)
-DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D)
-DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H)
-DECLARE_INSN(fcvt_h_q, MATCH_FCVT_H_Q, MASK_FCVT_H_Q)
-DECLARE_INSN(fcvt_q_h, MATCH_FCVT_Q_H, MASK_FCVT_Q_H)
-DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H)
-DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H)
-DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L)
-DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU)
-DECLARE_INSN(sm4ed, MATCH_SM4ED, MASK_SM4ED)
-DECLARE_INSN(sm4ks, MATCH_SM4KS, MASK_SM4KS)
-DECLARE_INSN(sm3p0, MATCH_SM3P0, MASK_SM3P0)
-DECLARE_INSN(sm3p1, MATCH_SM3P1, MASK_SM3P1)
-DECLARE_INSN(sha256sum0, MATCH_SHA256SUM0, MASK_SHA256SUM0)
-DECLARE_INSN(sha256sum1, MATCH_SHA256SUM1, MASK_SHA256SUM1)
-DECLARE_INSN(sha256sig0, MATCH_SHA256SIG0, MASK_SHA256SIG0)
-DECLARE_INSN(sha256sig1, MATCH_SHA256SIG1, MASK_SHA256SIG1)
-DECLARE_INSN(aes32esmi, MATCH_AES32ESMI, MASK_AES32ESMI)
-DECLARE_INSN(aes32esi, MATCH_AES32ESI, MASK_AES32ESI)
-DECLARE_INSN(aes32dsmi, MATCH_AES32DSMI, MASK_AES32DSMI)
-DECLARE_INSN(aes32dsi, MATCH_AES32DSI, MASK_AES32DSI)
-DECLARE_INSN(sha512sum0r, MATCH_SHA512SUM0R, MASK_SHA512SUM0R)
-DECLARE_INSN(sha512sum1r, MATCH_SHA512SUM1R, MASK_SHA512SUM1R)
-DECLARE_INSN(sha512sig0l, MATCH_SHA512SIG0L, MASK_SHA512SIG0L)
-DECLARE_INSN(sha512sig0h, MATCH_SHA512SIG0H, MASK_SHA512SIG0H)
-DECLARE_INSN(sha512sig1l, MATCH_SHA512SIG1L, MASK_SHA512SIG1L)
-DECLARE_INSN(sha512sig1h, MATCH_SHA512SIG1H, MASK_SHA512SIG1H)
-DECLARE_INSN(aes64ks1i, MATCH_AES64KS1I, MASK_AES64KS1I)
-DECLARE_INSN(aes64im, MATCH_AES64IM, MASK_AES64IM)
-DECLARE_INSN(aes64ks2, MATCH_AES64KS2, MASK_AES64KS2)
-DECLARE_INSN(aes64esm, MATCH_AES64ESM, MASK_AES64ESM)
-DECLARE_INSN(aes64es, MATCH_AES64ES, MASK_AES64ES)
-DECLARE_INSN(aes64dsm, MATCH_AES64DSM, MASK_AES64DSM)
-DECLARE_INSN(aes64ds, MATCH_AES64DS, MASK_AES64DS)
-DECLARE_INSN(sha512sum0, MATCH_SHA512SUM0, MASK_SHA512SUM0)
-DECLARE_INSN(sha512sum1, MATCH_SHA512SUM1, MASK_SHA512SUM1)
-DECLARE_INSN(sha512sig0, MATCH_SHA512SIG0, MASK_SHA512SIG0)
-DECLARE_INSN(sha512sig1, MATCH_SHA512SIG1, MASK_SHA512SIG1)
-DECLARE_INSN(cbo_clean, MATCH_CBO_CLEAN, MASK_CBO_CLEAN)
-DECLARE_INSN(cbo_flush, MATCH_CBO_FLUSH, MASK_CBO_FLUSH)
-DECLARE_INSN(cbo_inval, MATCH_CBO_INVAL, MASK_CBO_INVAL)
-DECLARE_INSN(cbo_zero, MATCH_CBO_ZERO, MASK_CBO_ZERO)
-DECLARE_INSN(prefetch_i, MATCH_PREFETCH_I, MASK_PREFETCH_I)
-DECLARE_INSN(prefetch_r, MATCH_PREFETCH_R, MASK_PREFETCH_R)
-DECLARE_INSN(prefetch_w, MATCH_PREFETCH_W, MASK_PREFETCH_W)
-DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP)
+DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
+DECLARE_INSN(bpick, MATCH_BPICK, MASK_BPICK)
+DECLARE_INSN(bset, MATCH_BSET, MASK_BSET)
+DECLARE_INSN(bseti, MATCH_BSETI, MASK_BSETI)
+DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
+DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP)
-DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
-DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
-DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK)
DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN)
+DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
+DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
+DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND)
+DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI)
+DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
+DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
+DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK)
DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD)
-DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
+DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP)
DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW)
+DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP)
DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD)
-DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
+DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP)
DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW)
-DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
+DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP)
+DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
+DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
+DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
+DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
+DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI)
-DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
-DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
-DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI)
-DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
-DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR)
-DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR)
-DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND)
-DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
-DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
-DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
-DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
-DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP)
+DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
-DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP)
DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV)
-DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
-DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP)
-DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
-DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP)
-DECLARE_INSN(c_srli_rv32, MATCH_C_SRLI_RV32, MASK_C_SRLI_RV32)
-DECLARE_INSN(c_srai_rv32, MATCH_C_SRAI_RV32, MASK_C_SRAI_RV32)
-DECLARE_INSN(c_slli_rv32, MATCH_C_SLLI_RV32, MASK_C_SLLI_RV32)
-DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
+DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP)
+DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR)
DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
-DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW)
-DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
-DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
-DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
-DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
-DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
-DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
-DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
-DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
-DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
-DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
-DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
-DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
-DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
-DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
-DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
-DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
-DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
-DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
-DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
-DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
-DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
-DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
-DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
-DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
-DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
-DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
-DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
-DECLARE_INSN(vsetivli, MATCH_VSETIVLI, MASK_VSETIVLI)
-DECLARE_INSN(vsetvli, MATCH_VSETVLI, MASK_VSETVLI)
-DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL)
-DECLARE_INSN(vlm_v, MATCH_VLM_V, MASK_VLM_V)
-DECLARE_INSN(vsm_v, MATCH_VSM_V, MASK_VSM_V)
-DECLARE_INSN(vle8_v, MATCH_VLE8_V, MASK_VLE8_V)
-DECLARE_INSN(vle16_v, MATCH_VLE16_V, MASK_VLE16_V)
-DECLARE_INSN(vle32_v, MATCH_VLE32_V, MASK_VLE32_V)
-DECLARE_INSN(vle64_v, MATCH_VLE64_V, MASK_VLE64_V)
-DECLARE_INSN(vle128_v, MATCH_VLE128_V, MASK_VLE128_V)
-DECLARE_INSN(vle256_v, MATCH_VLE256_V, MASK_VLE256_V)
-DECLARE_INSN(vle512_v, MATCH_VLE512_V, MASK_VLE512_V)
-DECLARE_INSN(vle1024_v, MATCH_VLE1024_V, MASK_VLE1024_V)
-DECLARE_INSN(vse8_v, MATCH_VSE8_V, MASK_VSE8_V)
-DECLARE_INSN(vse16_v, MATCH_VSE16_V, MASK_VSE16_V)
-DECLARE_INSN(vse32_v, MATCH_VSE32_V, MASK_VSE32_V)
-DECLARE_INSN(vse64_v, MATCH_VSE64_V, MASK_VSE64_V)
-DECLARE_INSN(vse128_v, MATCH_VSE128_V, MASK_VSE128_V)
-DECLARE_INSN(vse256_v, MATCH_VSE256_V, MASK_VSE256_V)
-DECLARE_INSN(vse512_v, MATCH_VSE512_V, MASK_VSE512_V)
-DECLARE_INSN(vse1024_v, MATCH_VSE1024_V, MASK_VSE1024_V)
-DECLARE_INSN(vluxei8_v, MATCH_VLUXEI8_V, MASK_VLUXEI8_V)
-DECLARE_INSN(vluxei16_v, MATCH_VLUXEI16_V, MASK_VLUXEI16_V)
-DECLARE_INSN(vluxei32_v, MATCH_VLUXEI32_V, MASK_VLUXEI32_V)
-DECLARE_INSN(vluxei64_v, MATCH_VLUXEI64_V, MASK_VLUXEI64_V)
-DECLARE_INSN(vluxei128_v, MATCH_VLUXEI128_V, MASK_VLUXEI128_V)
-DECLARE_INSN(vluxei256_v, MATCH_VLUXEI256_V, MASK_VLUXEI256_V)
-DECLARE_INSN(vluxei512_v, MATCH_VLUXEI512_V, MASK_VLUXEI512_V)
-DECLARE_INSN(vluxei1024_v, MATCH_VLUXEI1024_V, MASK_VLUXEI1024_V)
-DECLARE_INSN(vsuxei8_v, MATCH_VSUXEI8_V, MASK_VSUXEI8_V)
-DECLARE_INSN(vsuxei16_v, MATCH_VSUXEI16_V, MASK_VSUXEI16_V)
-DECLARE_INSN(vsuxei32_v, MATCH_VSUXEI32_V, MASK_VSUXEI32_V)
-DECLARE_INSN(vsuxei64_v, MATCH_VSUXEI64_V, MASK_VSUXEI64_V)
-DECLARE_INSN(vsuxei128_v, MATCH_VSUXEI128_V, MASK_VSUXEI128_V)
-DECLARE_INSN(vsuxei256_v, MATCH_VSUXEI256_V, MASK_VSUXEI256_V)
-DECLARE_INSN(vsuxei512_v, MATCH_VSUXEI512_V, MASK_VSUXEI512_V)
-DECLARE_INSN(vsuxei1024_v, MATCH_VSUXEI1024_V, MASK_VSUXEI1024_V)
-DECLARE_INSN(vlse8_v, MATCH_VLSE8_V, MASK_VLSE8_V)
-DECLARE_INSN(vlse16_v, MATCH_VLSE16_V, MASK_VLSE16_V)
-DECLARE_INSN(vlse32_v, MATCH_VLSE32_V, MASK_VLSE32_V)
-DECLARE_INSN(vlse64_v, MATCH_VLSE64_V, MASK_VLSE64_V)
-DECLARE_INSN(vlse128_v, MATCH_VLSE128_V, MASK_VLSE128_V)
-DECLARE_INSN(vlse256_v, MATCH_VLSE256_V, MASK_VLSE256_V)
-DECLARE_INSN(vlse512_v, MATCH_VLSE512_V, MASK_VLSE512_V)
-DECLARE_INSN(vlse1024_v, MATCH_VLSE1024_V, MASK_VLSE1024_V)
-DECLARE_INSN(vsse8_v, MATCH_VSSE8_V, MASK_VSSE8_V)
-DECLARE_INSN(vsse16_v, MATCH_VSSE16_V, MASK_VSSE16_V)
-DECLARE_INSN(vsse32_v, MATCH_VSSE32_V, MASK_VSSE32_V)
-DECLARE_INSN(vsse64_v, MATCH_VSSE64_V, MASK_VSSE64_V)
-DECLARE_INSN(vsse128_v, MATCH_VSSE128_V, MASK_VSSE128_V)
-DECLARE_INSN(vsse256_v, MATCH_VSSE256_V, MASK_VSSE256_V)
-DECLARE_INSN(vsse512_v, MATCH_VSSE512_V, MASK_VSSE512_V)
-DECLARE_INSN(vsse1024_v, MATCH_VSSE1024_V, MASK_VSSE1024_V)
-DECLARE_INSN(vloxei8_v, MATCH_VLOXEI8_V, MASK_VLOXEI8_V)
-DECLARE_INSN(vloxei16_v, MATCH_VLOXEI16_V, MASK_VLOXEI16_V)
-DECLARE_INSN(vloxei32_v, MATCH_VLOXEI32_V, MASK_VLOXEI32_V)
-DECLARE_INSN(vloxei64_v, MATCH_VLOXEI64_V, MASK_VLOXEI64_V)
-DECLARE_INSN(vloxei128_v, MATCH_VLOXEI128_V, MASK_VLOXEI128_V)
-DECLARE_INSN(vloxei256_v, MATCH_VLOXEI256_V, MASK_VLOXEI256_V)
-DECLARE_INSN(vloxei512_v, MATCH_VLOXEI512_V, MASK_VLOXEI512_V)
-DECLARE_INSN(vloxei1024_v, MATCH_VLOXEI1024_V, MASK_VLOXEI1024_V)
-DECLARE_INSN(vsoxei8_v, MATCH_VSOXEI8_V, MASK_VSOXEI8_V)
-DECLARE_INSN(vsoxei16_v, MATCH_VSOXEI16_V, MASK_VSOXEI16_V)
-DECLARE_INSN(vsoxei32_v, MATCH_VSOXEI32_V, MASK_VSOXEI32_V)
-DECLARE_INSN(vsoxei64_v, MATCH_VSOXEI64_V, MASK_VSOXEI64_V)
-DECLARE_INSN(vsoxei128_v, MATCH_VSOXEI128_V, MASK_VSOXEI128_V)
-DECLARE_INSN(vsoxei256_v, MATCH_VSOXEI256_V, MASK_VSOXEI256_V)
-DECLARE_INSN(vsoxei512_v, MATCH_VSOXEI512_V, MASK_VSOXEI512_V)
-DECLARE_INSN(vsoxei1024_v, MATCH_VSOXEI1024_V, MASK_VSOXEI1024_V)
-DECLARE_INSN(vle8ff_v, MATCH_VLE8FF_V, MASK_VLE8FF_V)
-DECLARE_INSN(vle16ff_v, MATCH_VLE16FF_V, MASK_VLE16FF_V)
-DECLARE_INSN(vle32ff_v, MATCH_VLE32FF_V, MASK_VLE32FF_V)
-DECLARE_INSN(vle64ff_v, MATCH_VLE64FF_V, MASK_VLE64FF_V)
-DECLARE_INSN(vle128ff_v, MATCH_VLE128FF_V, MASK_VLE128FF_V)
-DECLARE_INSN(vle256ff_v, MATCH_VLE256FF_V, MASK_VLE256FF_V)
-DECLARE_INSN(vle512ff_v, MATCH_VLE512FF_V, MASK_VLE512FF_V)
-DECLARE_INSN(vle1024ff_v, MATCH_VLE1024FF_V, MASK_VLE1024FF_V)
-DECLARE_INSN(vl1re8_v, MATCH_VL1RE8_V, MASK_VL1RE8_V)
-DECLARE_INSN(vl1re16_v, MATCH_VL1RE16_V, MASK_VL1RE16_V)
-DECLARE_INSN(vl1re32_v, MATCH_VL1RE32_V, MASK_VL1RE32_V)
-DECLARE_INSN(vl1re64_v, MATCH_VL1RE64_V, MASK_VL1RE64_V)
-DECLARE_INSN(vl2re8_v, MATCH_VL2RE8_V, MASK_VL2RE8_V)
-DECLARE_INSN(vl2re16_v, MATCH_VL2RE16_V, MASK_VL2RE16_V)
-DECLARE_INSN(vl2re32_v, MATCH_VL2RE32_V, MASK_VL2RE32_V)
-DECLARE_INSN(vl2re64_v, MATCH_VL2RE64_V, MASK_VL2RE64_V)
-DECLARE_INSN(vl4re8_v, MATCH_VL4RE8_V, MASK_VL4RE8_V)
-DECLARE_INSN(vl4re16_v, MATCH_VL4RE16_V, MASK_VL4RE16_V)
-DECLARE_INSN(vl4re32_v, MATCH_VL4RE32_V, MASK_VL4RE32_V)
-DECLARE_INSN(vl4re64_v, MATCH_VL4RE64_V, MASK_VL4RE64_V)
-DECLARE_INSN(vl8re8_v, MATCH_VL8RE8_V, MASK_VL8RE8_V)
-DECLARE_INSN(vl8re16_v, MATCH_VL8RE16_V, MASK_VL8RE16_V)
-DECLARE_INSN(vl8re32_v, MATCH_VL8RE32_V, MASK_VL8RE32_V)
-DECLARE_INSN(vl8re64_v, MATCH_VL8RE64_V, MASK_VL8RE64_V)
-DECLARE_INSN(vs1r_v, MATCH_VS1R_V, MASK_VS1R_V)
-DECLARE_INSN(vs2r_v, MATCH_VS2R_V, MASK_VS2R_V)
-DECLARE_INSN(vs4r_v, MATCH_VS4R_V, MASK_VS4R_V)
-DECLARE_INSN(vs8r_v, MATCH_VS8R_V, MASK_VS8R_V)
-DECLARE_INSN(vfadd_vf, MATCH_VFADD_VF, MASK_VFADD_VF)
-DECLARE_INSN(vfsub_vf, MATCH_VFSUB_VF, MASK_VFSUB_VF)
-DECLARE_INSN(vfmin_vf, MATCH_VFMIN_VF, MASK_VFMIN_VF)
-DECLARE_INSN(vfmax_vf, MATCH_VFMAX_VF, MASK_VFMAX_VF)
-DECLARE_INSN(vfsgnj_vf, MATCH_VFSGNJ_VF, MASK_VFSGNJ_VF)
-DECLARE_INSN(vfsgnjn_vf, MATCH_VFSGNJN_VF, MASK_VFSGNJN_VF)
-DECLARE_INSN(vfsgnjx_vf, MATCH_VFSGNJX_VF, MASK_VFSGNJX_VF)
-DECLARE_INSN(vfslide1up_vf, MATCH_VFSLIDE1UP_VF, MASK_VFSLIDE1UP_VF)
-DECLARE_INSN(vfslide1down_vf, MATCH_VFSLIDE1DOWN_VF, MASK_VFSLIDE1DOWN_VF)
-DECLARE_INSN(vfmv_s_f, MATCH_VFMV_S_F, MASK_VFMV_S_F)
-DECLARE_INSN(vfmerge_vfm, MATCH_VFMERGE_VFM, MASK_VFMERGE_VFM)
-DECLARE_INSN(vfmv_v_f, MATCH_VFMV_V_F, MASK_VFMV_V_F)
-DECLARE_INSN(vmfeq_vf, MATCH_VMFEQ_VF, MASK_VMFEQ_VF)
-DECLARE_INSN(vmfle_vf, MATCH_VMFLE_VF, MASK_VMFLE_VF)
-DECLARE_INSN(vmflt_vf, MATCH_VMFLT_VF, MASK_VMFLT_VF)
-DECLARE_INSN(vmfne_vf, MATCH_VMFNE_VF, MASK_VMFNE_VF)
-DECLARE_INSN(vmfgt_vf, MATCH_VMFGT_VF, MASK_VMFGT_VF)
-DECLARE_INSN(vmfge_vf, MATCH_VMFGE_VF, MASK_VMFGE_VF)
-DECLARE_INSN(vfdiv_vf, MATCH_VFDIV_VF, MASK_VFDIV_VF)
-DECLARE_INSN(vfrdiv_vf, MATCH_VFRDIV_VF, MASK_VFRDIV_VF)
-DECLARE_INSN(vfmul_vf, MATCH_VFMUL_VF, MASK_VFMUL_VF)
-DECLARE_INSN(vfrsub_vf, MATCH_VFRSUB_VF, MASK_VFRSUB_VF)
-DECLARE_INSN(vfmadd_vf, MATCH_VFMADD_VF, MASK_VFMADD_VF)
-DECLARE_INSN(vfnmadd_vf, MATCH_VFNMADD_VF, MASK_VFNMADD_VF)
-DECLARE_INSN(vfmsub_vf, MATCH_VFMSUB_VF, MASK_VFMSUB_VF)
-DECLARE_INSN(vfnmsub_vf, MATCH_VFNMSUB_VF, MASK_VFNMSUB_VF)
-DECLARE_INSN(vfmacc_vf, MATCH_VFMACC_VF, MASK_VFMACC_VF)
-DECLARE_INSN(vfnmacc_vf, MATCH_VFNMACC_VF, MASK_VFNMACC_VF)
-DECLARE_INSN(vfmsac_vf, MATCH_VFMSAC_VF, MASK_VFMSAC_VF)
-DECLARE_INSN(vfnmsac_vf, MATCH_VFNMSAC_VF, MASK_VFNMSAC_VF)
-DECLARE_INSN(vfwadd_vf, MATCH_VFWADD_VF, MASK_VFWADD_VF)
-DECLARE_INSN(vfwsub_vf, MATCH_VFWSUB_VF, MASK_VFWSUB_VF)
-DECLARE_INSN(vfwadd_wf, MATCH_VFWADD_WF, MASK_VFWADD_WF)
-DECLARE_INSN(vfwsub_wf, MATCH_VFWSUB_WF, MASK_VFWSUB_WF)
-DECLARE_INSN(vfwmul_vf, MATCH_VFWMUL_VF, MASK_VFWMUL_VF)
-DECLARE_INSN(vfwmacc_vf, MATCH_VFWMACC_VF, MASK_VFWMACC_VF)
-DECLARE_INSN(vfwnmacc_vf, MATCH_VFWNMACC_VF, MASK_VFWNMACC_VF)
-DECLARE_INSN(vfwmsac_vf, MATCH_VFWMSAC_VF, MASK_VFWMSAC_VF)
-DECLARE_INSN(vfwnmsac_vf, MATCH_VFWNMSAC_VF, MASK_VFWNMSAC_VF)
-DECLARE_INSN(vfadd_vv, MATCH_VFADD_VV, MASK_VFADD_VV)
-DECLARE_INSN(vfredusum_vs, MATCH_VFREDUSUM_VS, MASK_VFREDUSUM_VS)
-DECLARE_INSN(vfsub_vv, MATCH_VFSUB_VV, MASK_VFSUB_VV)
-DECLARE_INSN(vfredosum_vs, MATCH_VFREDOSUM_VS, MASK_VFREDOSUM_VS)
-DECLARE_INSN(vfmin_vv, MATCH_VFMIN_VV, MASK_VFMIN_VV)
-DECLARE_INSN(vfredmin_vs, MATCH_VFREDMIN_VS, MASK_VFREDMIN_VS)
-DECLARE_INSN(vfmax_vv, MATCH_VFMAX_VV, MASK_VFMAX_VV)
-DECLARE_INSN(vfredmax_vs, MATCH_VFREDMAX_VS, MASK_VFREDMAX_VS)
-DECLARE_INSN(vfsgnj_vv, MATCH_VFSGNJ_VV, MASK_VFSGNJ_VV)
-DECLARE_INSN(vfsgnjn_vv, MATCH_VFSGNJN_VV, MASK_VFSGNJN_VV)
-DECLARE_INSN(vfsgnjx_vv, MATCH_VFSGNJX_VV, MASK_VFSGNJX_VV)
-DECLARE_INSN(vfmv_f_s, MATCH_VFMV_F_S, MASK_VFMV_F_S)
-DECLARE_INSN(vmfeq_vv, MATCH_VMFEQ_VV, MASK_VMFEQ_VV)
-DECLARE_INSN(vmfle_vv, MATCH_VMFLE_VV, MASK_VMFLE_VV)
-DECLARE_INSN(vmflt_vv, MATCH_VMFLT_VV, MASK_VMFLT_VV)
-DECLARE_INSN(vmfne_vv, MATCH_VMFNE_VV, MASK_VMFNE_VV)
-DECLARE_INSN(vfdiv_vv, MATCH_VFDIV_VV, MASK_VFDIV_VV)
-DECLARE_INSN(vfmul_vv, MATCH_VFMUL_VV, MASK_VFMUL_VV)
-DECLARE_INSN(vfmadd_vv, MATCH_VFMADD_VV, MASK_VFMADD_VV)
-DECLARE_INSN(vfnmadd_vv, MATCH_VFNMADD_VV, MASK_VFNMADD_VV)
-DECLARE_INSN(vfmsub_vv, MATCH_VFMSUB_VV, MASK_VFMSUB_VV)
-DECLARE_INSN(vfnmsub_vv, MATCH_VFNMSUB_VV, MASK_VFNMSUB_VV)
-DECLARE_INSN(vfmacc_vv, MATCH_VFMACC_VV, MASK_VFMACC_VV)
-DECLARE_INSN(vfnmacc_vv, MATCH_VFNMACC_VV, MASK_VFNMACC_VV)
-DECLARE_INSN(vfmsac_vv, MATCH_VFMSAC_VV, MASK_VFMSAC_VV)
-DECLARE_INSN(vfnmsac_vv, MATCH_VFNMSAC_VV, MASK_VFNMSAC_VV)
-DECLARE_INSN(vfcvt_xu_f_v, MATCH_VFCVT_XU_F_V, MASK_VFCVT_XU_F_V)
-DECLARE_INSN(vfcvt_x_f_v, MATCH_VFCVT_X_F_V, MASK_VFCVT_X_F_V)
-DECLARE_INSN(vfcvt_f_xu_v, MATCH_VFCVT_F_XU_V, MASK_VFCVT_F_XU_V)
-DECLARE_INSN(vfcvt_f_x_v, MATCH_VFCVT_F_X_V, MASK_VFCVT_F_X_V)
-DECLARE_INSN(vfcvt_rtz_xu_f_v, MATCH_VFCVT_RTZ_XU_F_V, MASK_VFCVT_RTZ_XU_F_V)
-DECLARE_INSN(vfcvt_rtz_x_f_v, MATCH_VFCVT_RTZ_X_F_V, MASK_VFCVT_RTZ_X_F_V)
-DECLARE_INSN(vfwcvt_xu_f_v, MATCH_VFWCVT_XU_F_V, MASK_VFWCVT_XU_F_V)
-DECLARE_INSN(vfwcvt_x_f_v, MATCH_VFWCVT_X_F_V, MASK_VFWCVT_X_F_V)
-DECLARE_INSN(vfwcvt_f_xu_v, MATCH_VFWCVT_F_XU_V, MASK_VFWCVT_F_XU_V)
-DECLARE_INSN(vfwcvt_f_x_v, MATCH_VFWCVT_F_X_V, MASK_VFWCVT_F_X_V)
-DECLARE_INSN(vfwcvt_f_f_v, MATCH_VFWCVT_F_F_V, MASK_VFWCVT_F_F_V)
-DECLARE_INSN(vfwcvt_rtz_xu_f_v, MATCH_VFWCVT_RTZ_XU_F_V, MASK_VFWCVT_RTZ_XU_F_V)
-DECLARE_INSN(vfwcvt_rtz_x_f_v, MATCH_VFWCVT_RTZ_X_F_V, MASK_VFWCVT_RTZ_X_F_V)
-DECLARE_INSN(vfncvt_xu_f_w, MATCH_VFNCVT_XU_F_W, MASK_VFNCVT_XU_F_W)
-DECLARE_INSN(vfncvt_x_f_w, MATCH_VFNCVT_X_F_W, MASK_VFNCVT_X_F_W)
-DECLARE_INSN(vfncvt_f_xu_w, MATCH_VFNCVT_F_XU_W, MASK_VFNCVT_F_XU_W)
-DECLARE_INSN(vfncvt_f_x_w, MATCH_VFNCVT_F_X_W, MASK_VFNCVT_F_X_W)
-DECLARE_INSN(vfncvt_f_f_w, MATCH_VFNCVT_F_F_W, MASK_VFNCVT_F_F_W)
-DECLARE_INSN(vfncvt_rod_f_f_w, MATCH_VFNCVT_ROD_F_F_W, MASK_VFNCVT_ROD_F_F_W)
-DECLARE_INSN(vfncvt_rtz_xu_f_w, MATCH_VFNCVT_RTZ_XU_F_W, MASK_VFNCVT_RTZ_XU_F_W)
-DECLARE_INSN(vfncvt_rtz_x_f_w, MATCH_VFNCVT_RTZ_X_F_W, MASK_VFNCVT_RTZ_X_F_W)
-DECLARE_INSN(vfsqrt_v, MATCH_VFSQRT_V, MASK_VFSQRT_V)
-DECLARE_INSN(vfrsqrt7_v, MATCH_VFRSQRT7_V, MASK_VFRSQRT7_V)
-DECLARE_INSN(vfrec7_v, MATCH_VFREC7_V, MASK_VFREC7_V)
-DECLARE_INSN(vfclass_v, MATCH_VFCLASS_V, MASK_VFCLASS_V)
-DECLARE_INSN(vfwadd_vv, MATCH_VFWADD_VV, MASK_VFWADD_VV)
-DECLARE_INSN(vfwredusum_vs, MATCH_VFWREDUSUM_VS, MASK_VFWREDUSUM_VS)
-DECLARE_INSN(vfwsub_vv, MATCH_VFWSUB_VV, MASK_VFWSUB_VV)
-DECLARE_INSN(vfwredosum_vs, MATCH_VFWREDOSUM_VS, MASK_VFWREDOSUM_VS)
-DECLARE_INSN(vfwadd_wv, MATCH_VFWADD_WV, MASK_VFWADD_WV)
-DECLARE_INSN(vfwsub_wv, MATCH_VFWSUB_WV, MASK_VFWSUB_WV)
-DECLARE_INSN(vfwmul_vv, MATCH_VFWMUL_VV, MASK_VFWMUL_VV)
-DECLARE_INSN(vfwmacc_vv, MATCH_VFWMACC_VV, MASK_VFWMACC_VV)
-DECLARE_INSN(vfwnmacc_vv, MATCH_VFWNMACC_VV, MASK_VFWNMACC_VV)
-DECLARE_INSN(vfwmsac_vv, MATCH_VFWMSAC_VV, MASK_VFWMSAC_VV)
-DECLARE_INSN(vfwnmsac_vv, MATCH_VFWNMSAC_VV, MASK_VFWNMSAC_VV)
-DECLARE_INSN(vadd_vx, MATCH_VADD_VX, MASK_VADD_VX)
-DECLARE_INSN(vsub_vx, MATCH_VSUB_VX, MASK_VSUB_VX)
-DECLARE_INSN(vrsub_vx, MATCH_VRSUB_VX, MASK_VRSUB_VX)
-DECLARE_INSN(vminu_vx, MATCH_VMINU_VX, MASK_VMINU_VX)
-DECLARE_INSN(vmin_vx, MATCH_VMIN_VX, MASK_VMIN_VX)
-DECLARE_INSN(vmaxu_vx, MATCH_VMAXU_VX, MASK_VMAXU_VX)
-DECLARE_INSN(vmax_vx, MATCH_VMAX_VX, MASK_VMAX_VX)
-DECLARE_INSN(vand_vx, MATCH_VAND_VX, MASK_VAND_VX)
-DECLARE_INSN(vor_vx, MATCH_VOR_VX, MASK_VOR_VX)
-DECLARE_INSN(vxor_vx, MATCH_VXOR_VX, MASK_VXOR_VX)
-DECLARE_INSN(vrgather_vx, MATCH_VRGATHER_VX, MASK_VRGATHER_VX)
-DECLARE_INSN(vslideup_vx, MATCH_VSLIDEUP_VX, MASK_VSLIDEUP_VX)
-DECLARE_INSN(vslidedown_vx, MATCH_VSLIDEDOWN_VX, MASK_VSLIDEDOWN_VX)
-DECLARE_INSN(vadc_vxm, MATCH_VADC_VXM, MASK_VADC_VXM)
-DECLARE_INSN(vmadc_vxm, MATCH_VMADC_VXM, MASK_VMADC_VXM)
-DECLARE_INSN(vmadc_vx, MATCH_VMADC_VX, MASK_VMADC_VX)
-DECLARE_INSN(vsbc_vxm, MATCH_VSBC_VXM, MASK_VSBC_VXM)
-DECLARE_INSN(vmsbc_vxm, MATCH_VMSBC_VXM, MASK_VMSBC_VXM)
-DECLARE_INSN(vmsbc_vx, MATCH_VMSBC_VX, MASK_VMSBC_VX)
-DECLARE_INSN(vmerge_vxm, MATCH_VMERGE_VXM, MASK_VMERGE_VXM)
-DECLARE_INSN(vmv_v_x, MATCH_VMV_V_X, MASK_VMV_V_X)
-DECLARE_INSN(vmseq_vx, MATCH_VMSEQ_VX, MASK_VMSEQ_VX)
-DECLARE_INSN(vmsne_vx, MATCH_VMSNE_VX, MASK_VMSNE_VX)
-DECLARE_INSN(vmsltu_vx, MATCH_VMSLTU_VX, MASK_VMSLTU_VX)
-DECLARE_INSN(vmslt_vx, MATCH_VMSLT_VX, MASK_VMSLT_VX)
-DECLARE_INSN(vmsleu_vx, MATCH_VMSLEU_VX, MASK_VMSLEU_VX)
-DECLARE_INSN(vmsle_vx, MATCH_VMSLE_VX, MASK_VMSLE_VX)
-DECLARE_INSN(vmsgtu_vx, MATCH_VMSGTU_VX, MASK_VMSGTU_VX)
-DECLARE_INSN(vmsgt_vx, MATCH_VMSGT_VX, MASK_VMSGT_VX)
-DECLARE_INSN(vsaddu_vx, MATCH_VSADDU_VX, MASK_VSADDU_VX)
-DECLARE_INSN(vsadd_vx, MATCH_VSADD_VX, MASK_VSADD_VX)
-DECLARE_INSN(vssubu_vx, MATCH_VSSUBU_VX, MASK_VSSUBU_VX)
-DECLARE_INSN(vssub_vx, MATCH_VSSUB_VX, MASK_VSSUB_VX)
-DECLARE_INSN(vsll_vx, MATCH_VSLL_VX, MASK_VSLL_VX)
-DECLARE_INSN(vsmul_vx, MATCH_VSMUL_VX, MASK_VSMUL_VX)
-DECLARE_INSN(vsrl_vx, MATCH_VSRL_VX, MASK_VSRL_VX)
-DECLARE_INSN(vsra_vx, MATCH_VSRA_VX, MASK_VSRA_VX)
-DECLARE_INSN(vssrl_vx, MATCH_VSSRL_VX, MASK_VSSRL_VX)
-DECLARE_INSN(vssra_vx, MATCH_VSSRA_VX, MASK_VSSRA_VX)
-DECLARE_INSN(vnsrl_wx, MATCH_VNSRL_WX, MASK_VNSRL_WX)
-DECLARE_INSN(vnsra_wx, MATCH_VNSRA_WX, MASK_VNSRA_WX)
-DECLARE_INSN(vnclipu_wx, MATCH_VNCLIPU_WX, MASK_VNCLIPU_WX)
-DECLARE_INSN(vnclip_wx, MATCH_VNCLIP_WX, MASK_VNCLIP_WX)
-DECLARE_INSN(vadd_vv, MATCH_VADD_VV, MASK_VADD_VV)
-DECLARE_INSN(vsub_vv, MATCH_VSUB_VV, MASK_VSUB_VV)
-DECLARE_INSN(vminu_vv, MATCH_VMINU_VV, MASK_VMINU_VV)
-DECLARE_INSN(vmin_vv, MATCH_VMIN_VV, MASK_VMIN_VV)
-DECLARE_INSN(vmaxu_vv, MATCH_VMAXU_VV, MASK_VMAXU_VV)
-DECLARE_INSN(vmax_vv, MATCH_VMAX_VV, MASK_VMAX_VV)
-DECLARE_INSN(vand_vv, MATCH_VAND_VV, MASK_VAND_VV)
-DECLARE_INSN(vor_vv, MATCH_VOR_VV, MASK_VOR_VV)
-DECLARE_INSN(vxor_vv, MATCH_VXOR_VV, MASK_VXOR_VV)
-DECLARE_INSN(vrgather_vv, MATCH_VRGATHER_VV, MASK_VRGATHER_VV)
-DECLARE_INSN(vrgatherei16_vv, MATCH_VRGATHEREI16_VV, MASK_VRGATHEREI16_VV)
-DECLARE_INSN(vadc_vvm, MATCH_VADC_VVM, MASK_VADC_VVM)
-DECLARE_INSN(vmadc_vvm, MATCH_VMADC_VVM, MASK_VMADC_VVM)
-DECLARE_INSN(vmadc_vv, MATCH_VMADC_VV, MASK_VMADC_VV)
-DECLARE_INSN(vsbc_vvm, MATCH_VSBC_VVM, MASK_VSBC_VVM)
-DECLARE_INSN(vmsbc_vvm, MATCH_VMSBC_VVM, MASK_VMSBC_VVM)
-DECLARE_INSN(vmsbc_vv, MATCH_VMSBC_VV, MASK_VMSBC_VV)
-DECLARE_INSN(vmerge_vvm, MATCH_VMERGE_VVM, MASK_VMERGE_VVM)
-DECLARE_INSN(vmv_v_v, MATCH_VMV_V_V, MASK_VMV_V_V)
-DECLARE_INSN(vmseq_vv, MATCH_VMSEQ_VV, MASK_VMSEQ_VV)
-DECLARE_INSN(vmsne_vv, MATCH_VMSNE_VV, MASK_VMSNE_VV)
-DECLARE_INSN(vmsltu_vv, MATCH_VMSLTU_VV, MASK_VMSLTU_VV)
-DECLARE_INSN(vmslt_vv, MATCH_VMSLT_VV, MASK_VMSLT_VV)
-DECLARE_INSN(vmsleu_vv, MATCH_VMSLEU_VV, MASK_VMSLEU_VV)
-DECLARE_INSN(vmsle_vv, MATCH_VMSLE_VV, MASK_VMSLE_VV)
-DECLARE_INSN(vsaddu_vv, MATCH_VSADDU_VV, MASK_VSADDU_VV)
-DECLARE_INSN(vsadd_vv, MATCH_VSADD_VV, MASK_VSADD_VV)
-DECLARE_INSN(vssubu_vv, MATCH_VSSUBU_VV, MASK_VSSUBU_VV)
-DECLARE_INSN(vssub_vv, MATCH_VSSUB_VV, MASK_VSSUB_VV)
-DECLARE_INSN(vsll_vv, MATCH_VSLL_VV, MASK_VSLL_VV)
-DECLARE_INSN(vsmul_vv, MATCH_VSMUL_VV, MASK_VSMUL_VV)
-DECLARE_INSN(vsrl_vv, MATCH_VSRL_VV, MASK_VSRL_VV)
-DECLARE_INSN(vsra_vv, MATCH_VSRA_VV, MASK_VSRA_VV)
-DECLARE_INSN(vssrl_vv, MATCH_VSSRL_VV, MASK_VSSRL_VV)
-DECLARE_INSN(vssra_vv, MATCH_VSSRA_VV, MASK_VSSRA_VV)
-DECLARE_INSN(vnsrl_wv, MATCH_VNSRL_WV, MASK_VNSRL_WV)
-DECLARE_INSN(vnsra_wv, MATCH_VNSRA_WV, MASK_VNSRA_WV)
-DECLARE_INSN(vnclipu_wv, MATCH_VNCLIPU_WV, MASK_VNCLIPU_WV)
-DECLARE_INSN(vnclip_wv, MATCH_VNCLIP_WV, MASK_VNCLIP_WV)
-DECLARE_INSN(vwredsumu_vs, MATCH_VWREDSUMU_VS, MASK_VWREDSUMU_VS)
-DECLARE_INSN(vwredsum_vs, MATCH_VWREDSUM_VS, MASK_VWREDSUM_VS)
-DECLARE_INSN(vadd_vi, MATCH_VADD_VI, MASK_VADD_VI)
-DECLARE_INSN(vrsub_vi, MATCH_VRSUB_VI, MASK_VRSUB_VI)
-DECLARE_INSN(vand_vi, MATCH_VAND_VI, MASK_VAND_VI)
-DECLARE_INSN(vor_vi, MATCH_VOR_VI, MASK_VOR_VI)
-DECLARE_INSN(vxor_vi, MATCH_VXOR_VI, MASK_VXOR_VI)
-DECLARE_INSN(vrgather_vi, MATCH_VRGATHER_VI, MASK_VRGATHER_VI)
-DECLARE_INSN(vslideup_vi, MATCH_VSLIDEUP_VI, MASK_VSLIDEUP_VI)
-DECLARE_INSN(vslidedown_vi, MATCH_VSLIDEDOWN_VI, MASK_VSLIDEDOWN_VI)
-DECLARE_INSN(vadc_vim, MATCH_VADC_VIM, MASK_VADC_VIM)
-DECLARE_INSN(vmadc_vim, MATCH_VMADC_VIM, MASK_VMADC_VIM)
-DECLARE_INSN(vmadc_vi, MATCH_VMADC_VI, MASK_VMADC_VI)
-DECLARE_INSN(vmerge_vim, MATCH_VMERGE_VIM, MASK_VMERGE_VIM)
-DECLARE_INSN(vmv_v_i, MATCH_VMV_V_I, MASK_VMV_V_I)
-DECLARE_INSN(vmseq_vi, MATCH_VMSEQ_VI, MASK_VMSEQ_VI)
-DECLARE_INSN(vmsne_vi, MATCH_VMSNE_VI, MASK_VMSNE_VI)
-DECLARE_INSN(vmsleu_vi, MATCH_VMSLEU_VI, MASK_VMSLEU_VI)
-DECLARE_INSN(vmsle_vi, MATCH_VMSLE_VI, MASK_VMSLE_VI)
-DECLARE_INSN(vmsgtu_vi, MATCH_VMSGTU_VI, MASK_VMSGTU_VI)
-DECLARE_INSN(vmsgt_vi, MATCH_VMSGT_VI, MASK_VMSGT_VI)
-DECLARE_INSN(vsaddu_vi, MATCH_VSADDU_VI, MASK_VSADDU_VI)
-DECLARE_INSN(vsadd_vi, MATCH_VSADD_VI, MASK_VSADD_VI)
-DECLARE_INSN(vsll_vi, MATCH_VSLL_VI, MASK_VSLL_VI)
-DECLARE_INSN(vmv1r_v, MATCH_VMV1R_V, MASK_VMV1R_V)
-DECLARE_INSN(vmv2r_v, MATCH_VMV2R_V, MASK_VMV2R_V)
-DECLARE_INSN(vmv4r_v, MATCH_VMV4R_V, MASK_VMV4R_V)
-DECLARE_INSN(vmv8r_v, MATCH_VMV8R_V, MASK_VMV8R_V)
-DECLARE_INSN(vsrl_vi, MATCH_VSRL_VI, MASK_VSRL_VI)
-DECLARE_INSN(vsra_vi, MATCH_VSRA_VI, MASK_VSRA_VI)
-DECLARE_INSN(vssrl_vi, MATCH_VSSRL_VI, MASK_VSSRL_VI)
-DECLARE_INSN(vssra_vi, MATCH_VSSRA_VI, MASK_VSSRA_VI)
-DECLARE_INSN(vnsrl_wi, MATCH_VNSRL_WI, MASK_VNSRL_WI)
-DECLARE_INSN(vnsra_wi, MATCH_VNSRA_WI, MASK_VNSRA_WI)
-DECLARE_INSN(vnclipu_wi, MATCH_VNCLIPU_WI, MASK_VNCLIPU_WI)
-DECLARE_INSN(vnclip_wi, MATCH_VNCLIP_WI, MASK_VNCLIP_WI)
-DECLARE_INSN(vredsum_vs, MATCH_VREDSUM_VS, MASK_VREDSUM_VS)
-DECLARE_INSN(vredand_vs, MATCH_VREDAND_VS, MASK_VREDAND_VS)
-DECLARE_INSN(vredor_vs, MATCH_VREDOR_VS, MASK_VREDOR_VS)
-DECLARE_INSN(vredxor_vs, MATCH_VREDXOR_VS, MASK_VREDXOR_VS)
-DECLARE_INSN(vredminu_vs, MATCH_VREDMINU_VS, MASK_VREDMINU_VS)
-DECLARE_INSN(vredmin_vs, MATCH_VREDMIN_VS, MASK_VREDMIN_VS)
-DECLARE_INSN(vredmaxu_vs, MATCH_VREDMAXU_VS, MASK_VREDMAXU_VS)
-DECLARE_INSN(vredmax_vs, MATCH_VREDMAX_VS, MASK_VREDMAX_VS)
-DECLARE_INSN(vaaddu_vv, MATCH_VAADDU_VV, MASK_VAADDU_VV)
-DECLARE_INSN(vaadd_vv, MATCH_VAADD_VV, MASK_VAADD_VV)
-DECLARE_INSN(vasubu_vv, MATCH_VASUBU_VV, MASK_VASUBU_VV)
-DECLARE_INSN(vasub_vv, MATCH_VASUB_VV, MASK_VASUB_VV)
-DECLARE_INSN(vmv_x_s, MATCH_VMV_X_S, MASK_VMV_X_S)
-DECLARE_INSN(vzext_vf8, MATCH_VZEXT_VF8, MASK_VZEXT_VF8)
-DECLARE_INSN(vsext_vf8, MATCH_VSEXT_VF8, MASK_VSEXT_VF8)
-DECLARE_INSN(vzext_vf4, MATCH_VZEXT_VF4, MASK_VZEXT_VF4)
-DECLARE_INSN(vsext_vf4, MATCH_VSEXT_VF4, MASK_VSEXT_VF4)
-DECLARE_INSN(vzext_vf2, MATCH_VZEXT_VF2, MASK_VZEXT_VF2)
-DECLARE_INSN(vsext_vf2, MATCH_VSEXT_VF2, MASK_VSEXT_VF2)
-DECLARE_INSN(vcompress_vm, MATCH_VCOMPRESS_VM, MASK_VCOMPRESS_VM)
-DECLARE_INSN(vmandn_mm, MATCH_VMANDN_MM, MASK_VMANDN_MM)
-DECLARE_INSN(vmand_mm, MATCH_VMAND_MM, MASK_VMAND_MM)
-DECLARE_INSN(vmor_mm, MATCH_VMOR_MM, MASK_VMOR_MM)
-DECLARE_INSN(vmxor_mm, MATCH_VMXOR_MM, MASK_VMXOR_MM)
-DECLARE_INSN(vmorn_mm, MATCH_VMORN_MM, MASK_VMORN_MM)
-DECLARE_INSN(vmnand_mm, MATCH_VMNAND_MM, MASK_VMNAND_MM)
-DECLARE_INSN(vmnor_mm, MATCH_VMNOR_MM, MASK_VMNOR_MM)
-DECLARE_INSN(vmxnor_mm, MATCH_VMXNOR_MM, MASK_VMXNOR_MM)
-DECLARE_INSN(vmsbf_m, MATCH_VMSBF_M, MASK_VMSBF_M)
-DECLARE_INSN(vmsof_m, MATCH_VMSOF_M, MASK_VMSOF_M)
-DECLARE_INSN(vmsif_m, MATCH_VMSIF_M, MASK_VMSIF_M)
-DECLARE_INSN(viota_m, MATCH_VIOTA_M, MASK_VIOTA_M)
-DECLARE_INSN(vid_v, MATCH_VID_V, MASK_VID_V)
-DECLARE_INSN(vcpop_m, MATCH_VCPOP_M, MASK_VCPOP_M)
-DECLARE_INSN(vfirst_m, MATCH_VFIRST_M, MASK_VFIRST_M)
-DECLARE_INSN(vdivu_vv, MATCH_VDIVU_VV, MASK_VDIVU_VV)
-DECLARE_INSN(vdiv_vv, MATCH_VDIV_VV, MASK_VDIV_VV)
-DECLARE_INSN(vremu_vv, MATCH_VREMU_VV, MASK_VREMU_VV)
-DECLARE_INSN(vrem_vv, MATCH_VREM_VV, MASK_VREM_VV)
-DECLARE_INSN(vmulhu_vv, MATCH_VMULHU_VV, MASK_VMULHU_VV)
-DECLARE_INSN(vmul_vv, MATCH_VMUL_VV, MASK_VMUL_VV)
-DECLARE_INSN(vmulhsu_vv, MATCH_VMULHSU_VV, MASK_VMULHSU_VV)
-DECLARE_INSN(vmulh_vv, MATCH_VMULH_VV, MASK_VMULH_VV)
-DECLARE_INSN(vmadd_vv, MATCH_VMADD_VV, MASK_VMADD_VV)
-DECLARE_INSN(vnmsub_vv, MATCH_VNMSUB_VV, MASK_VNMSUB_VV)
-DECLARE_INSN(vmacc_vv, MATCH_VMACC_VV, MASK_VMACC_VV)
-DECLARE_INSN(vnmsac_vv, MATCH_VNMSAC_VV, MASK_VNMSAC_VV)
-DECLARE_INSN(vwaddu_vv, MATCH_VWADDU_VV, MASK_VWADDU_VV)
-DECLARE_INSN(vwadd_vv, MATCH_VWADD_VV, MASK_VWADD_VV)
-DECLARE_INSN(vwsubu_vv, MATCH_VWSUBU_VV, MASK_VWSUBU_VV)
-DECLARE_INSN(vwsub_vv, MATCH_VWSUB_VV, MASK_VWSUB_VV)
-DECLARE_INSN(vwaddu_wv, MATCH_VWADDU_WV, MASK_VWADDU_WV)
-DECLARE_INSN(vwadd_wv, MATCH_VWADD_WV, MASK_VWADD_WV)
-DECLARE_INSN(vwsubu_wv, MATCH_VWSUBU_WV, MASK_VWSUBU_WV)
-DECLARE_INSN(vwsub_wv, MATCH_VWSUB_WV, MASK_VWSUB_WV)
-DECLARE_INSN(vwmulu_vv, MATCH_VWMULU_VV, MASK_VWMULU_VV)
-DECLARE_INSN(vwmulsu_vv, MATCH_VWMULSU_VV, MASK_VWMULSU_VV)
-DECLARE_INSN(vwmul_vv, MATCH_VWMUL_VV, MASK_VWMUL_VV)
-DECLARE_INSN(vwmaccu_vv, MATCH_VWMACCU_VV, MASK_VWMACCU_VV)
-DECLARE_INSN(vwmacc_vv, MATCH_VWMACC_VV, MASK_VWMACC_VV)
-DECLARE_INSN(vwmaccsu_vv, MATCH_VWMACCSU_VV, MASK_VWMACCSU_VV)
-DECLARE_INSN(vaaddu_vx, MATCH_VAADDU_VX, MASK_VAADDU_VX)
-DECLARE_INSN(vaadd_vx, MATCH_VAADD_VX, MASK_VAADD_VX)
-DECLARE_INSN(vasubu_vx, MATCH_VASUBU_VX, MASK_VASUBU_VX)
-DECLARE_INSN(vasub_vx, MATCH_VASUB_VX, MASK_VASUB_VX)
-DECLARE_INSN(vmv_s_x, MATCH_VMV_S_X, MASK_VMV_S_X)
-DECLARE_INSN(vslide1up_vx, MATCH_VSLIDE1UP_VX, MASK_VSLIDE1UP_VX)
-DECLARE_INSN(vslide1down_vx, MATCH_VSLIDE1DOWN_VX, MASK_VSLIDE1DOWN_VX)
-DECLARE_INSN(vdivu_vx, MATCH_VDIVU_VX, MASK_VDIVU_VX)
-DECLARE_INSN(vdiv_vx, MATCH_VDIV_VX, MASK_VDIV_VX)
-DECLARE_INSN(vremu_vx, MATCH_VREMU_VX, MASK_VREMU_VX)
-DECLARE_INSN(vrem_vx, MATCH_VREM_VX, MASK_VREM_VX)
-DECLARE_INSN(vmulhu_vx, MATCH_VMULHU_VX, MASK_VMULHU_VX)
-DECLARE_INSN(vmul_vx, MATCH_VMUL_VX, MASK_VMUL_VX)
-DECLARE_INSN(vmulhsu_vx, MATCH_VMULHSU_VX, MASK_VMULHSU_VX)
-DECLARE_INSN(vmulh_vx, MATCH_VMULH_VX, MASK_VMULH_VX)
-DECLARE_INSN(vmadd_vx, MATCH_VMADD_VX, MASK_VMADD_VX)
-DECLARE_INSN(vnmsub_vx, MATCH_VNMSUB_VX, MASK_VNMSUB_VX)
-DECLARE_INSN(vmacc_vx, MATCH_VMACC_VX, MASK_VMACC_VX)
-DECLARE_INSN(vnmsac_vx, MATCH_VNMSAC_VX, MASK_VNMSAC_VX)
-DECLARE_INSN(vwaddu_vx, MATCH_VWADDU_VX, MASK_VWADDU_VX)
-DECLARE_INSN(vwadd_vx, MATCH_VWADD_VX, MASK_VWADD_VX)
-DECLARE_INSN(vwsubu_vx, MATCH_VWSUBU_VX, MASK_VWSUBU_VX)
-DECLARE_INSN(vwsub_vx, MATCH_VWSUB_VX, MASK_VWSUB_VX)
-DECLARE_INSN(vwaddu_wx, MATCH_VWADDU_WX, MASK_VWADDU_WX)
-DECLARE_INSN(vwadd_wx, MATCH_VWADD_WX, MASK_VWADD_WX)
-DECLARE_INSN(vwsubu_wx, MATCH_VWSUBU_WX, MASK_VWSUBU_WX)
-DECLARE_INSN(vwsub_wx, MATCH_VWSUB_WX, MASK_VWSUB_WX)
-DECLARE_INSN(vwmulu_vx, MATCH_VWMULU_VX, MASK_VWMULU_VX)
-DECLARE_INSN(vwmulsu_vx, MATCH_VWMULSU_VX, MASK_VWMULSU_VX)
-DECLARE_INSN(vwmul_vx, MATCH_VWMUL_VX, MASK_VWMUL_VX)
-DECLARE_INSN(vwmaccu_vx, MATCH_VWMACCU_VX, MASK_VWMACCU_VX)
-DECLARE_INSN(vwmacc_vx, MATCH_VWMACC_VX, MASK_VWMACC_VX)
-DECLARE_INSN(vwmaccus_vx, MATCH_VWMACCUS_VX, MASK_VWMACCUS_VX)
-DECLARE_INSN(vwmaccsu_vx, MATCH_VWMACCSU_VX, MASK_VWMACCSU_VX)
-DECLARE_INSN(vamoswapei8_v, MATCH_VAMOSWAPEI8_V, MASK_VAMOSWAPEI8_V)
-DECLARE_INSN(vamoaddei8_v, MATCH_VAMOADDEI8_V, MASK_VAMOADDEI8_V)
-DECLARE_INSN(vamoxorei8_v, MATCH_VAMOXOREI8_V, MASK_VAMOXOREI8_V)
-DECLARE_INSN(vamoandei8_v, MATCH_VAMOANDEI8_V, MASK_VAMOANDEI8_V)
-DECLARE_INSN(vamoorei8_v, MATCH_VAMOOREI8_V, MASK_VAMOOREI8_V)
-DECLARE_INSN(vamominei8_v, MATCH_VAMOMINEI8_V, MASK_VAMOMINEI8_V)
-DECLARE_INSN(vamomaxei8_v, MATCH_VAMOMAXEI8_V, MASK_VAMOMAXEI8_V)
-DECLARE_INSN(vamominuei8_v, MATCH_VAMOMINUEI8_V, MASK_VAMOMINUEI8_V)
-DECLARE_INSN(vamomaxuei8_v, MATCH_VAMOMAXUEI8_V, MASK_VAMOMAXUEI8_V)
-DECLARE_INSN(vamoswapei16_v, MATCH_VAMOSWAPEI16_V, MASK_VAMOSWAPEI16_V)
-DECLARE_INSN(vamoaddei16_v, MATCH_VAMOADDEI16_V, MASK_VAMOADDEI16_V)
-DECLARE_INSN(vamoxorei16_v, MATCH_VAMOXOREI16_V, MASK_VAMOXOREI16_V)
-DECLARE_INSN(vamoandei16_v, MATCH_VAMOANDEI16_V, MASK_VAMOANDEI16_V)
-DECLARE_INSN(vamoorei16_v, MATCH_VAMOOREI16_V, MASK_VAMOOREI16_V)
-DECLARE_INSN(vamominei16_v, MATCH_VAMOMINEI16_V, MASK_VAMOMINEI16_V)
-DECLARE_INSN(vamomaxei16_v, MATCH_VAMOMAXEI16_V, MASK_VAMOMAXEI16_V)
-DECLARE_INSN(vamominuei16_v, MATCH_VAMOMINUEI16_V, MASK_VAMOMINUEI16_V)
-DECLARE_INSN(vamomaxuei16_v, MATCH_VAMOMAXUEI16_V, MASK_VAMOMAXUEI16_V)
-DECLARE_INSN(vamoswapei32_v, MATCH_VAMOSWAPEI32_V, MASK_VAMOSWAPEI32_V)
-DECLARE_INSN(vamoaddei32_v, MATCH_VAMOADDEI32_V, MASK_VAMOADDEI32_V)
-DECLARE_INSN(vamoxorei32_v, MATCH_VAMOXOREI32_V, MASK_VAMOXOREI32_V)
-DECLARE_INSN(vamoandei32_v, MATCH_VAMOANDEI32_V, MASK_VAMOANDEI32_V)
-DECLARE_INSN(vamoorei32_v, MATCH_VAMOOREI32_V, MASK_VAMOOREI32_V)
-DECLARE_INSN(vamominei32_v, MATCH_VAMOMINEI32_V, MASK_VAMOMINEI32_V)
-DECLARE_INSN(vamomaxei32_v, MATCH_VAMOMAXEI32_V, MASK_VAMOMAXEI32_V)
-DECLARE_INSN(vamominuei32_v, MATCH_VAMOMINUEI32_V, MASK_VAMOMINUEI32_V)
-DECLARE_INSN(vamomaxuei32_v, MATCH_VAMOMAXUEI32_V, MASK_VAMOMAXUEI32_V)
-DECLARE_INSN(vamoswapei64_v, MATCH_VAMOSWAPEI64_V, MASK_VAMOSWAPEI64_V)
-DECLARE_INSN(vamoaddei64_v, MATCH_VAMOADDEI64_V, MASK_VAMOADDEI64_V)
-DECLARE_INSN(vamoxorei64_v, MATCH_VAMOXOREI64_V, MASK_VAMOXOREI64_V)
-DECLARE_INSN(vamoandei64_v, MATCH_VAMOANDEI64_V, MASK_VAMOANDEI64_V)
-DECLARE_INSN(vamoorei64_v, MATCH_VAMOOREI64_V, MASK_VAMOOREI64_V)
-DECLARE_INSN(vamominei64_v, MATCH_VAMOMINEI64_V, MASK_VAMOMINEI64_V)
-DECLARE_INSN(vamomaxei64_v, MATCH_VAMOMAXEI64_V, MASK_VAMOMAXEI64_V)
-DECLARE_INSN(vamominuei64_v, MATCH_VAMOMINUEI64_V, MASK_VAMOMINUEI64_V)
-DECLARE_INSN(vamomaxuei64_v, MATCH_VAMOMAXUEI64_V, MASK_VAMOMAXUEI64_V)
-DECLARE_INSN(add8, MATCH_ADD8, MASK_ADD8)
-DECLARE_INSN(add16, MATCH_ADD16, MASK_ADD16)
-DECLARE_INSN(add64, MATCH_ADD64, MASK_ADD64)
-DECLARE_INSN(ave, MATCH_AVE, MASK_AVE)
-DECLARE_INSN(bitrev, MATCH_BITREV, MASK_BITREV)
-DECLARE_INSN(bitrevi, MATCH_BITREVI, MASK_BITREVI)
-DECLARE_INSN(bpick, MATCH_BPICK, MASK_BPICK)
-DECLARE_INSN(clrs8, MATCH_CLRS8, MASK_CLRS8)
-DECLARE_INSN(clrs16, MATCH_CLRS16, MASK_CLRS16)
-DECLARE_INSN(clrs32, MATCH_CLRS32, MASK_CLRS32)
-DECLARE_INSN(clo8, MATCH_CLO8, MASK_CLO8)
+DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
+DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
+DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
+DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
+DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW)
+DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
+DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
+DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR)
+DECLARE_INSN(cbo_clean, MATCH_CBO_CLEAN, MASK_CBO_CLEAN)
+DECLARE_INSN(cbo_flush, MATCH_CBO_FLUSH, MASK_CBO_FLUSH)
+DECLARE_INSN(cbo_inval, MATCH_CBO_INVAL, MASK_CBO_INVAL)
+DECLARE_INSN(cbo_zero, MATCH_CBO_ZERO, MASK_CBO_ZERO)
+DECLARE_INSN(clmul, MATCH_CLMUL, MASK_CLMUL)
+DECLARE_INSN(clmulh, MATCH_CLMULH, MASK_CLMULH)
+DECLARE_INSN(clmulr, MATCH_CLMULR, MASK_CLMULR)
DECLARE_INSN(clo16, MATCH_CLO16, MASK_CLO16)
DECLARE_INSN(clo32, MATCH_CLO32, MASK_CLO32)
-DECLARE_INSN(clz8, MATCH_CLZ8, MASK_CLZ8)
+DECLARE_INSN(clo8, MATCH_CLO8, MASK_CLO8)
+DECLARE_INSN(clrs16, MATCH_CLRS16, MASK_CLRS16)
+DECLARE_INSN(clrs32, MATCH_CLRS32, MASK_CLRS32)
+DECLARE_INSN(clrs8, MATCH_CLRS8, MASK_CLRS8)
+DECLARE_INSN(clz, MATCH_CLZ, MASK_CLZ)
DECLARE_INSN(clz16, MATCH_CLZ16, MASK_CLZ16)
DECLARE_INSN(clz32, MATCH_CLZ32, MASK_CLZ32)
-DECLARE_INSN(cmpeq8, MATCH_CMPEQ8, MASK_CMPEQ8)
+DECLARE_INSN(clz8, MATCH_CLZ8, MASK_CLZ8)
+DECLARE_INSN(clzw, MATCH_CLZW, MASK_CLZW)
+DECLARE_INSN(cmix, MATCH_CMIX, MASK_CMIX)
+DECLARE_INSN(cmov, MATCH_CMOV, MASK_CMOV)
DECLARE_INSN(cmpeq16, MATCH_CMPEQ16, MASK_CMPEQ16)
+DECLARE_INSN(cmpeq8, MATCH_CMPEQ8, MASK_CMPEQ8)
+DECLARE_INSN(cpop, MATCH_CPOP, MASK_CPOP)
+DECLARE_INSN(cpopw, MATCH_CPOPW, MASK_CPOPW)
DECLARE_INSN(cras16, MATCH_CRAS16, MASK_CRAS16)
+DECLARE_INSN(cras32, MATCH_CRAS32, MASK_CRAS32)
+DECLARE_INSN(crc32_b, MATCH_CRC32_B, MASK_CRC32_B)
+DECLARE_INSN(crc32_d, MATCH_CRC32_D, MASK_CRC32_D)
+DECLARE_INSN(crc32_h, MATCH_CRC32_H, MASK_CRC32_H)
+DECLARE_INSN(crc32_w, MATCH_CRC32_W, MASK_CRC32_W)
+DECLARE_INSN(crc32c_b, MATCH_CRC32C_B, MASK_CRC32C_B)
+DECLARE_INSN(crc32c_d, MATCH_CRC32C_D, MASK_CRC32C_D)
+DECLARE_INSN(crc32c_h, MATCH_CRC32C_H, MASK_CRC32C_H)
+DECLARE_INSN(crc32c_w, MATCH_CRC32C_W, MASK_CRC32C_W)
DECLARE_INSN(crsa16, MATCH_CRSA16, MASK_CRSA16)
+DECLARE_INSN(crsa32, MATCH_CRSA32, MASK_CRSA32)
+DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
+DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
+DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
+DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
+DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
+DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
+DECLARE_INSN(ctz, MATCH_CTZ, MASK_CTZ)
+DECLARE_INSN(ctzw, MATCH_CTZW, MASK_CTZW)
+DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
+DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
+DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
+DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
+DECLARE_INSN(dret, MATCH_DRET, MASK_DRET)
+DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK)
+DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL)
+DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
+DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H)
+DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q)
+DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
+DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
+DECLARE_INSN(fclass_h, MATCH_FCLASS_H, MASK_FCLASS_H)
+DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q)
+DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
+DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H)
+DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
+DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
+DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q)
+DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
+DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
+DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
+DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D)
+DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L)
+DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU)
+DECLARE_INSN(fcvt_h_q, MATCH_FCVT_H_Q, MASK_FCVT_H_Q)
+DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S)
+DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W)
+DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU)
+DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
+DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H)
+DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q)
+DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
+DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
+DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H)
+DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q)
+DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
+DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D)
+DECLARE_INSN(fcvt_q_h, MATCH_FCVT_Q_H, MASK_FCVT_Q_H)
+DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L)
+DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU)
+DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S)
+DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W)
+DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU)
+DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
+DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H)
+DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
+DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
+DECLARE_INSN(fcvt_s_q, MATCH_FCVT_S_Q, MASK_FCVT_S_Q)
+DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
+DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
+DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
+DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H)
+DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q)
+DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
+DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
+DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H)
+DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q)
+DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
+DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
+DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H)
+DECLARE_INSN(fdiv_q, MATCH_FDIV_Q, MASK_FDIV_Q)
+DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
+DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
+DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
+DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
+DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H)
+DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q)
+DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
+DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
+DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
+DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H)
+DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q)
+DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
+DECLARE_INSN(flh, MATCH_FLH, MASK_FLH)
+DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ)
+DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
+DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H)
+DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q)
+DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
+DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
+DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
+DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H)
+DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q)
+DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
+DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
+DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H)
+DECLARE_INSN(fmax_q, MATCH_FMAX_Q, MASK_FMAX_Q)
+DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
+DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
+DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H)
+DECLARE_INSN(fmin_q, MATCH_FMIN_Q, MASK_FMIN_Q)
+DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
+DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
+DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H)
+DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q)
+DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
+DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
+DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H)
+DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q)
+DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
+DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
+DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X)
+DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X)
+DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
+DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H)
+DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W)
+DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
+DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H)
+DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q)
+DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
+DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
+DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H)
+DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q)
+DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
+DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
+DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
+DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H)
+DECLARE_INSN(fsgnj_q, MATCH_FSGNJ_Q, MASK_FSGNJ_Q)
+DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
+DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
+DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H)
+DECLARE_INSN(fsgnjn_q, MATCH_FSGNJN_Q, MASK_FSGNJN_Q)
+DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
+DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
+DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H)
+DECLARE_INSN(fsgnjx_q, MATCH_FSGNJX_Q, MASK_FSGNJX_Q)
+DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
+DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH)
+DECLARE_INSN(fsl, MATCH_FSL, MASK_FSL)
+DECLARE_INSN(fslw, MATCH_FSLW, MASK_FSLW)
+DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ)
+DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
+DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H)
+DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q)
+DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
+DECLARE_INSN(fsr, MATCH_FSR, MASK_FSR)
+DECLARE_INSN(fsri, MATCH_FSRI, MASK_FSRI)
+DECLARE_INSN(fsriw, MATCH_FSRIW, MASK_FSRIW)
+DECLARE_INSN(fsrw, MATCH_FSRW, MASK_FSRW)
+DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
+DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H)
+DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q)
+DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
+DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
+DECLARE_INSN(gorc, MATCH_GORC, MASK_GORC)
+DECLARE_INSN(gorci, MATCH_GORCI, MASK_GORCI)
+DECLARE_INSN(gorciw, MATCH_GORCIW, MASK_GORCIW)
+DECLARE_INSN(gorcw, MATCH_GORCW, MASK_GORCW)
+DECLARE_INSN(grev, MATCH_GREV, MASK_GREV)
+DECLARE_INSN(grevi, MATCH_GREVI, MASK_GREVI)
+DECLARE_INSN(greviw, MATCH_GREVIW, MASK_GREVIW)
+DECLARE_INSN(grevw, MATCH_GREVW, MASK_GREVW)
+DECLARE_INSN(hfence_gvma, MATCH_HFENCE_GVMA, MASK_HFENCE_GVMA)
+DECLARE_INSN(hfence_vvma, MATCH_HFENCE_VVMA, MASK_HFENCE_VVMA)
+DECLARE_INSN(hinval_gvma, MATCH_HINVAL_GVMA, MASK_HINVAL_GVMA)
+DECLARE_INSN(hinval_vvma, MATCH_HINVAL_VVMA, MASK_HINVAL_VVMA)
+DECLARE_INSN(hlv_b, MATCH_HLV_B, MASK_HLV_B)
+DECLARE_INSN(hlv_bu, MATCH_HLV_BU, MASK_HLV_BU)
+DECLARE_INSN(hlv_d, MATCH_HLV_D, MASK_HLV_D)
+DECLARE_INSN(hlv_h, MATCH_HLV_H, MASK_HLV_H)
+DECLARE_INSN(hlv_hu, MATCH_HLV_HU, MASK_HLV_HU)
+DECLARE_INSN(hlv_w, MATCH_HLV_W, MASK_HLV_W)
+DECLARE_INSN(hlv_wu, MATCH_HLV_WU, MASK_HLV_WU)
+DECLARE_INSN(hlvx_hu, MATCH_HLVX_HU, MASK_HLVX_HU)
+DECLARE_INSN(hlvx_wu, MATCH_HLVX_WU, MASK_HLVX_WU)
+DECLARE_INSN(hsv_b, MATCH_HSV_B, MASK_HSV_B)
+DECLARE_INSN(hsv_d, MATCH_HSV_D, MASK_HSV_D)
+DECLARE_INSN(hsv_h, MATCH_HSV_H, MASK_HSV_H)
+DECLARE_INSN(hsv_w, MATCH_HSV_W, MASK_HSV_W)
DECLARE_INSN(insb, MATCH_INSB, MASK_INSB)
-DECLARE_INSN(kabs8, MATCH_KABS8, MASK_KABS8)
+DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
+DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
DECLARE_INSN(kabs16, MATCH_KABS16, MASK_KABS16)
+DECLARE_INSN(kabs32, MATCH_KABS32, MASK_KABS32)
+DECLARE_INSN(kabs8, MATCH_KABS8, MASK_KABS8)
DECLARE_INSN(kabsw, MATCH_KABSW, MASK_KABSW)
-DECLARE_INSN(kadd8, MATCH_KADD8, MASK_KADD8)
DECLARE_INSN(kadd16, MATCH_KADD16, MASK_KADD16)
+DECLARE_INSN(kadd32, MATCH_KADD32, MASK_KADD32)
DECLARE_INSN(kadd64, MATCH_KADD64, MASK_KADD64)
+DECLARE_INSN(kadd8, MATCH_KADD8, MASK_KADD8)
DECLARE_INSN(kaddh, MATCH_KADDH, MASK_KADDH)
DECLARE_INSN(kaddw, MATCH_KADDW, MASK_KADDW)
DECLARE_INSN(kcras16, MATCH_KCRAS16, MASK_KCRAS16)
+DECLARE_INSN(kcras32, MATCH_KCRAS32, MASK_KCRAS32)
DECLARE_INSN(kcrsa16, MATCH_KCRSA16, MASK_KCRSA16)
-DECLARE_INSN(kdmbb, MATCH_KDMBB, MASK_KDMBB)
-DECLARE_INSN(kdmbt, MATCH_KDMBT, MASK_KDMBT)
-DECLARE_INSN(kdmtt, MATCH_KDMTT, MASK_KDMTT)
+DECLARE_INSN(kcrsa32, MATCH_KCRSA32, MASK_KCRSA32)
DECLARE_INSN(kdmabb, MATCH_KDMABB, MASK_KDMABB)
+DECLARE_INSN(kdmabb16, MATCH_KDMABB16, MASK_KDMABB16)
DECLARE_INSN(kdmabt, MATCH_KDMABT, MASK_KDMABT)
+DECLARE_INSN(kdmabt16, MATCH_KDMABT16, MASK_KDMABT16)
DECLARE_INSN(kdmatt, MATCH_KDMATT, MASK_KDMATT)
-DECLARE_INSN(khm8, MATCH_KHM8, MASK_KHM8)
-DECLARE_INSN(khmx8, MATCH_KHMX8, MASK_KHMX8)
+DECLARE_INSN(kdmatt16, MATCH_KDMATT16, MASK_KDMATT16)
+DECLARE_INSN(kdmbb, MATCH_KDMBB, MASK_KDMBB)
+DECLARE_INSN(kdmbb16, MATCH_KDMBB16, MASK_KDMBB16)
+DECLARE_INSN(kdmbt, MATCH_KDMBT, MASK_KDMBT)
+DECLARE_INSN(kdmbt16, MATCH_KDMBT16, MASK_KDMBT16)
+DECLARE_INSN(kdmtt, MATCH_KDMTT, MASK_KDMTT)
+DECLARE_INSN(kdmtt16, MATCH_KDMTT16, MASK_KDMTT16)
DECLARE_INSN(khm16, MATCH_KHM16, MASK_KHM16)
-DECLARE_INSN(khmx16, MATCH_KHMX16, MASK_KHMX16)
+DECLARE_INSN(khm8, MATCH_KHM8, MASK_KHM8)
DECLARE_INSN(khmbb, MATCH_KHMBB, MASK_KHMBB)
+DECLARE_INSN(khmbb16, MATCH_KHMBB16, MASK_KHMBB16)
DECLARE_INSN(khmbt, MATCH_KHMBT, MASK_KHMBT)
+DECLARE_INSN(khmbt16, MATCH_KHMBT16, MASK_KHMBT16)
DECLARE_INSN(khmtt, MATCH_KHMTT, MASK_KHMTT)
+DECLARE_INSN(khmtt16, MATCH_KHMTT16, MASK_KHMTT16)
+DECLARE_INSN(khmx16, MATCH_KHMX16, MASK_KHMX16)
+DECLARE_INSN(khmx8, MATCH_KHMX8, MASK_KHMX8)
DECLARE_INSN(kmabb, MATCH_KMABB, MASK_KMABB)
+DECLARE_INSN(kmabb32, MATCH_KMABB32, MASK_KMABB32)
DECLARE_INSN(kmabt, MATCH_KMABT, MASK_KMABT)
-DECLARE_INSN(kmatt, MATCH_KMATT, MASK_KMATT)
+DECLARE_INSN(kmabt32, MATCH_KMABT32, MASK_KMABT32)
DECLARE_INSN(kmada, MATCH_KMADA, MASK_KMADA)
-DECLARE_INSN(kmaxda, MATCH_KMAXDA, MASK_KMAXDA)
-DECLARE_INSN(kmads, MATCH_KMADS, MASK_KMADS)
DECLARE_INSN(kmadrs, MATCH_KMADRS, MASK_KMADRS)
-DECLARE_INSN(kmaxds, MATCH_KMAXDS, MASK_KMAXDS)
+DECLARE_INSN(kmadrs32, MATCH_KMADRS32, MASK_KMADRS32)
+DECLARE_INSN(kmads, MATCH_KMADS, MASK_KMADS)
+DECLARE_INSN(kmads32, MATCH_KMADS32, MASK_KMADS32)
DECLARE_INSN(kmar64, MATCH_KMAR64, MASK_KMAR64)
+DECLARE_INSN(kmatt, MATCH_KMATT, MASK_KMATT)
+DECLARE_INSN(kmatt32, MATCH_KMATT32, MASK_KMATT32)
+DECLARE_INSN(kmaxda, MATCH_KMAXDA, MASK_KMAXDA)
+DECLARE_INSN(kmaxda32, MATCH_KMAXDA32, MASK_KMAXDA32)
+DECLARE_INSN(kmaxds, MATCH_KMAXDS, MASK_KMAXDS)
+DECLARE_INSN(kmaxds32, MATCH_KMAXDS32, MASK_KMAXDS32)
DECLARE_INSN(kmda, MATCH_KMDA, MASK_KMDA)
-DECLARE_INSN(kmxda, MATCH_KMXDA, MASK_KMXDA)
+DECLARE_INSN(kmda32, MATCH_KMDA32, MASK_KMDA32)
DECLARE_INSN(kmmac, MATCH_KMMAC, MASK_KMMAC)
DECLARE_INSN(kmmac_u, MATCH_KMMAC_U, MASK_KMMAC_U)
DECLARE_INSN(kmmawb, MATCH_KMMAWB, MASK_KMMAWB)
-DECLARE_INSN(kmmawb_u, MATCH_KMMAWB_U, MASK_KMMAWB_U)
DECLARE_INSN(kmmawb2, MATCH_KMMAWB2, MASK_KMMAWB2)
DECLARE_INSN(kmmawb2_u, MATCH_KMMAWB2_U, MASK_KMMAWB2_U)
+DECLARE_INSN(kmmawb_u, MATCH_KMMAWB_U, MASK_KMMAWB_U)
DECLARE_INSN(kmmawt, MATCH_KMMAWT, MASK_KMMAWT)
-DECLARE_INSN(kmmawt_u, MATCH_KMMAWT_U, MASK_KMMAWT_U)
DECLARE_INSN(kmmawt2, MATCH_KMMAWT2, MASK_KMMAWT2)
DECLARE_INSN(kmmawt2_u, MATCH_KMMAWT2_U, MASK_KMMAWT2_U)
+DECLARE_INSN(kmmawt_u, MATCH_KMMAWT_U, MASK_KMMAWT_U)
DECLARE_INSN(kmmsb, MATCH_KMMSB, MASK_KMMSB)
DECLARE_INSN(kmmsb_u, MATCH_KMMSB_U, MASK_KMMSB_U)
DECLARE_INSN(kmmwb2, MATCH_KMMWB2, MASK_KMMWB2)
@@ -4180,86 +3655,205 @@ DECLARE_INSN(kmmwb2_u, MATCH_KMMWB2_U, MASK_KMMWB2_U)
DECLARE_INSN(kmmwt2, MATCH_KMMWT2, MASK_KMMWT2)
DECLARE_INSN(kmmwt2_u, MATCH_KMMWT2_U, MASK_KMMWT2_U)
DECLARE_INSN(kmsda, MATCH_KMSDA, MASK_KMSDA)
-DECLARE_INSN(kmsxda, MATCH_KMSXDA, MASK_KMSXDA)
+DECLARE_INSN(kmsda32, MATCH_KMSDA32, MASK_KMSDA32)
DECLARE_INSN(kmsr64, MATCH_KMSR64, MASK_KMSR64)
-DECLARE_INSN(ksllw, MATCH_KSLLW, MASK_KSLLW)
-DECLARE_INSN(kslliw, MATCH_KSLLIW, MASK_KSLLIW)
-DECLARE_INSN(ksll8, MATCH_KSLL8, MASK_KSLL8)
-DECLARE_INSN(kslli8, MATCH_KSLLI8, MASK_KSLLI8)
+DECLARE_INSN(kmsxda, MATCH_KMSXDA, MASK_KMSXDA)
+DECLARE_INSN(kmsxda32, MATCH_KMSXDA32, MASK_KMSXDA32)
+DECLARE_INSN(kmxda, MATCH_KMXDA, MASK_KMXDA)
+DECLARE_INSN(kmxda32, MATCH_KMXDA32, MASK_KMXDA32)
DECLARE_INSN(ksll16, MATCH_KSLL16, MASK_KSLL16)
+DECLARE_INSN(ksll32, MATCH_KSLL32, MASK_KSLL32)
+DECLARE_INSN(ksll8, MATCH_KSLL8, MASK_KSLL8)
DECLARE_INSN(kslli16, MATCH_KSLLI16, MASK_KSLLI16)
-DECLARE_INSN(kslra8, MATCH_KSLRA8, MASK_KSLRA8)
-DECLARE_INSN(kslra8_u, MATCH_KSLRA8_U, MASK_KSLRA8_U)
+DECLARE_INSN(kslli32, MATCH_KSLLI32, MASK_KSLLI32)
+DECLARE_INSN(kslli8, MATCH_KSLLI8, MASK_KSLLI8)
+DECLARE_INSN(kslliw, MATCH_KSLLIW, MASK_KSLLIW)
+DECLARE_INSN(ksllw, MATCH_KSLLW, MASK_KSLLW)
DECLARE_INSN(kslra16, MATCH_KSLRA16, MASK_KSLRA16)
DECLARE_INSN(kslra16_u, MATCH_KSLRA16_U, MASK_KSLRA16_U)
+DECLARE_INSN(kslra32, MATCH_KSLRA32, MASK_KSLRA32)
+DECLARE_INSN(kslra32_u, MATCH_KSLRA32_U, MASK_KSLRA32_U)
+DECLARE_INSN(kslra8, MATCH_KSLRA8, MASK_KSLRA8)
+DECLARE_INSN(kslra8_u, MATCH_KSLRA8_U, MASK_KSLRA8_U)
DECLARE_INSN(kslraw, MATCH_KSLRAW, MASK_KSLRAW)
DECLARE_INSN(kslraw_u, MATCH_KSLRAW_U, MASK_KSLRAW_U)
DECLARE_INSN(kstas16, MATCH_KSTAS16, MASK_KSTAS16)
+DECLARE_INSN(kstas32, MATCH_KSTAS32, MASK_KSTAS32)
DECLARE_INSN(kstsa16, MATCH_KSTSA16, MASK_KSTSA16)
-DECLARE_INSN(ksub8, MATCH_KSUB8, MASK_KSUB8)
+DECLARE_INSN(kstsa32, MATCH_KSTSA32, MASK_KSTSA32)
DECLARE_INSN(ksub16, MATCH_KSUB16, MASK_KSUB16)
+DECLARE_INSN(ksub32, MATCH_KSUB32, MASK_KSUB32)
DECLARE_INSN(ksub64, MATCH_KSUB64, MASK_KSUB64)
+DECLARE_INSN(ksub8, MATCH_KSUB8, MASK_KSUB8)
DECLARE_INSN(ksubh, MATCH_KSUBH, MASK_KSUBH)
DECLARE_INSN(ksubw, MATCH_KSUBW, MASK_KSUBW)
DECLARE_INSN(kwmmul, MATCH_KWMMUL, MASK_KWMMUL)
DECLARE_INSN(kwmmul_u, MATCH_KWMMUL_U, MASK_KWMMUL_U)
+DECLARE_INSN(lb, MATCH_LB, MASK_LB)
+DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
+DECLARE_INSN(ld, MATCH_LD, MASK_LD)
+DECLARE_INSN(lh, MATCH_LH, MASK_LH)
+DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
+DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
+DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
+DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
+DECLARE_INSN(lw, MATCH_LW, MASK_LW)
+DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
DECLARE_INSN(maddr32, MATCH_MADDR32, MASK_MADDR32)
+DECLARE_INSN(max, MATCH_MAX, MASK_MAX)
+DECLARE_INSN(maxu, MATCH_MAXU, MASK_MAXU)
DECLARE_INSN(maxw, MATCH_MAXW, MASK_MAXW)
+DECLARE_INSN(min, MATCH_MIN, MASK_MIN)
+DECLARE_INSN(minu, MATCH_MINU, MASK_MINU)
DECLARE_INSN(minw, MATCH_MINW, MASK_MINW)
+DECLARE_INSN(mret, MATCH_MRET, MASK_MRET)
DECLARE_INSN(msubr32, MATCH_MSUBR32, MASK_MSUBR32)
+DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
+DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
+DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
+DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
DECLARE_INSN(mulr64, MATCH_MULR64, MASK_MULR64)
DECLARE_INSN(mulsr64, MATCH_MULSR64, MASK_MULSR64)
+DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
+DECLARE_INSN(or, MATCH_OR, MASK_OR)
+DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
+DECLARE_INSN(orn, MATCH_ORN, MASK_ORN)
+DECLARE_INSN(pack, MATCH_PACK, MASK_PACK)
+DECLARE_INSN(packh, MATCH_PACKH, MASK_PACKH)
+DECLARE_INSN(packu, MATCH_PACKU, MASK_PACKU)
+DECLARE_INSN(packuw, MATCH_PACKUW, MASK_PACKUW)
+DECLARE_INSN(packw, MATCH_PACKW, MASK_PACKW)
+DECLARE_INSN(pause, MATCH_PAUSE, MASK_PAUSE)
DECLARE_INSN(pbsad, MATCH_PBSAD, MASK_PBSAD)
DECLARE_INSN(pbsada, MATCH_PBSADA, MASK_PBSADA)
DECLARE_INSN(pkbb16, MATCH_PKBB16, MASK_PKBB16)
+DECLARE_INSN(pkbb32, MATCH_PKBB32, MASK_PKBB32)
DECLARE_INSN(pkbt16, MATCH_PKBT16, MASK_PKBT16)
-DECLARE_INSN(pktt16, MATCH_PKTT16, MASK_PKTT16)
+DECLARE_INSN(pkbt32, MATCH_PKBT32, MASK_PKBT32)
DECLARE_INSN(pktb16, MATCH_PKTB16, MASK_PKTB16)
-DECLARE_INSN(radd8, MATCH_RADD8, MASK_RADD8)
+DECLARE_INSN(pktb32, MATCH_PKTB32, MASK_PKTB32)
+DECLARE_INSN(pktt16, MATCH_PKTT16, MASK_PKTT16)
+DECLARE_INSN(pktt32, MATCH_PKTT32, MASK_PKTT32)
+DECLARE_INSN(prefetch_i, MATCH_PREFETCH_I, MASK_PREFETCH_I)
+DECLARE_INSN(prefetch_r, MATCH_PREFETCH_R, MASK_PREFETCH_R)
+DECLARE_INSN(prefetch_w, MATCH_PREFETCH_W, MASK_PREFETCH_W)
DECLARE_INSN(radd16, MATCH_RADD16, MASK_RADD16)
+DECLARE_INSN(radd32, MATCH_RADD32, MASK_RADD32)
DECLARE_INSN(radd64, MATCH_RADD64, MASK_RADD64)
+DECLARE_INSN(radd8, MATCH_RADD8, MASK_RADD8)
DECLARE_INSN(raddw, MATCH_RADDW, MASK_RADDW)
DECLARE_INSN(rcras16, MATCH_RCRAS16, MASK_RCRAS16)
+DECLARE_INSN(rcras32, MATCH_RCRAS32, MASK_RCRAS32)
DECLARE_INSN(rcrsa16, MATCH_RCRSA16, MASK_RCRSA16)
+DECLARE_INSN(rcrsa32, MATCH_RCRSA32, MASK_RCRSA32)
+DECLARE_INSN(rem, MATCH_REM, MASK_REM)
+DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
+DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
+DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
+DECLARE_INSN(rol, MATCH_ROL, MASK_ROL)
+DECLARE_INSN(rolw, MATCH_ROLW, MASK_ROLW)
+DECLARE_INSN(ror, MATCH_ROR, MASK_ROR)
+DECLARE_INSN(rori, MATCH_RORI, MASK_RORI)
+DECLARE_INSN(roriw, MATCH_RORIW, MASK_RORIW)
+DECLARE_INSN(rorw, MATCH_RORW, MASK_RORW)
DECLARE_INSN(rstas16, MATCH_RSTAS16, MASK_RSTAS16)
+DECLARE_INSN(rstas32, MATCH_RSTAS32, MASK_RSTAS32)
DECLARE_INSN(rstsa16, MATCH_RSTSA16, MASK_RSTSA16)
-DECLARE_INSN(rsub8, MATCH_RSUB8, MASK_RSUB8)
+DECLARE_INSN(rstsa32, MATCH_RSTSA32, MASK_RSTSA32)
DECLARE_INSN(rsub16, MATCH_RSUB16, MASK_RSUB16)
+DECLARE_INSN(rsub32, MATCH_RSUB32, MASK_RSUB32)
DECLARE_INSN(rsub64, MATCH_RSUB64, MASK_RSUB64)
+DECLARE_INSN(rsub8, MATCH_RSUB8, MASK_RSUB8)
DECLARE_INSN(rsubw, MATCH_RSUBW, MASK_RSUBW)
-DECLARE_INSN(sclip8, MATCH_SCLIP8, MASK_SCLIP8)
+DECLARE_INSN(sb, MATCH_SB, MASK_SB)
+DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
+DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
DECLARE_INSN(sclip16, MATCH_SCLIP16, MASK_SCLIP16)
DECLARE_INSN(sclip32, MATCH_SCLIP32, MASK_SCLIP32)
-DECLARE_INSN(scmple8, MATCH_SCMPLE8, MASK_SCMPLE8)
+DECLARE_INSN(sclip8, MATCH_SCLIP8, MASK_SCLIP8)
DECLARE_INSN(scmple16, MATCH_SCMPLE16, MASK_SCMPLE16)
-DECLARE_INSN(scmplt8, MATCH_SCMPLT8, MASK_SCMPLT8)
+DECLARE_INSN(scmple8, MATCH_SCMPLE8, MASK_SCMPLE8)
DECLARE_INSN(scmplt16, MATCH_SCMPLT16, MASK_SCMPLT16)
-DECLARE_INSN(sll8, MATCH_SLL8, MASK_SLL8)
-DECLARE_INSN(slli8, MATCH_SLLI8, MASK_SLLI8)
+DECLARE_INSN(scmplt8, MATCH_SCMPLT8, MASK_SCMPLT8)
+DECLARE_INSN(sd, MATCH_SD, MASK_SD)
+DECLARE_INSN(sext_b, MATCH_SEXT_B, MASK_SEXT_B)
+DECLARE_INSN(sext_h, MATCH_SEXT_H, MASK_SEXT_H)
+DECLARE_INSN(sfence_inval_ir, MATCH_SFENCE_INVAL_IR, MASK_SFENCE_INVAL_IR)
+DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA)
+DECLARE_INSN(sfence_w_inval, MATCH_SFENCE_W_INVAL, MASK_SFENCE_W_INVAL)
+DECLARE_INSN(sh, MATCH_SH, MASK_SH)
+DECLARE_INSN(sh1add, MATCH_SH1ADD, MASK_SH1ADD)
+DECLARE_INSN(sh1add_uw, MATCH_SH1ADD_UW, MASK_SH1ADD_UW)
+DECLARE_INSN(sh2add, MATCH_SH2ADD, MASK_SH2ADD)
+DECLARE_INSN(sh2add_uw, MATCH_SH2ADD_UW, MASK_SH2ADD_UW)
+DECLARE_INSN(sh3add, MATCH_SH3ADD, MASK_SH3ADD)
+DECLARE_INSN(sh3add_uw, MATCH_SH3ADD_UW, MASK_SH3ADD_UW)
+DECLARE_INSN(sha256sig0, MATCH_SHA256SIG0, MASK_SHA256SIG0)
+DECLARE_INSN(sha256sig1, MATCH_SHA256SIG1, MASK_SHA256SIG1)
+DECLARE_INSN(sha256sum0, MATCH_SHA256SUM0, MASK_SHA256SUM0)
+DECLARE_INSN(sha256sum1, MATCH_SHA256SUM1, MASK_SHA256SUM1)
+DECLARE_INSN(sha512sig0, MATCH_SHA512SIG0, MASK_SHA512SIG0)
+DECLARE_INSN(sha512sig0h, MATCH_SHA512SIG0H, MASK_SHA512SIG0H)
+DECLARE_INSN(sha512sig0l, MATCH_SHA512SIG0L, MASK_SHA512SIG0L)
+DECLARE_INSN(sha512sig1, MATCH_SHA512SIG1, MASK_SHA512SIG1)
+DECLARE_INSN(sha512sig1h, MATCH_SHA512SIG1H, MASK_SHA512SIG1H)
+DECLARE_INSN(sha512sig1l, MATCH_SHA512SIG1L, MASK_SHA512SIG1L)
+DECLARE_INSN(sha512sum0, MATCH_SHA512SUM0, MASK_SHA512SUM0)
+DECLARE_INSN(sha512sum0r, MATCH_SHA512SUM0R, MASK_SHA512SUM0R)
+DECLARE_INSN(sha512sum1, MATCH_SHA512SUM1, MASK_SHA512SUM1)
+DECLARE_INSN(sha512sum1r, MATCH_SHA512SUM1R, MASK_SHA512SUM1R)
+DECLARE_INSN(shfl, MATCH_SHFL, MASK_SHFL)
+DECLARE_INSN(shfli, MATCH_SHFLI, MASK_SHFLI)
+DECLARE_INSN(shflw, MATCH_SHFLW, MASK_SHFLW)
+DECLARE_INSN(sinval_vma, MATCH_SINVAL_VMA, MASK_SINVAL_VMA)
+DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
DECLARE_INSN(sll16, MATCH_SLL16, MASK_SLL16)
+DECLARE_INSN(sll32, MATCH_SLL32, MASK_SLL32)
+DECLARE_INSN(sll8, MATCH_SLL8, MASK_SLL8)
+DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
DECLARE_INSN(slli16, MATCH_SLLI16, MASK_SLLI16)
+DECLARE_INSN(slli32, MATCH_SLLI32, MASK_SLLI32)
+DECLARE_INSN(slli8, MATCH_SLLI8, MASK_SLLI8)
+DECLARE_INSN(slli_uw, MATCH_SLLI_UW, MASK_SLLI_UW)
+DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
+DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
+DECLARE_INSN(slo, MATCH_SLO, MASK_SLO)
+DECLARE_INSN(sloi, MATCH_SLOI, MASK_SLOI)
+DECLARE_INSN(sloiw, MATCH_SLOIW, MASK_SLOIW)
+DECLARE_INSN(slow, MATCH_SLOW, MASK_SLOW)
+DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
+DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
+DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
+DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
+DECLARE_INSN(sm3p0, MATCH_SM3P0, MASK_SM3P0)
+DECLARE_INSN(sm3p1, MATCH_SM3P1, MASK_SM3P1)
+DECLARE_INSN(sm4ed, MATCH_SM4ED, MASK_SM4ED)
+DECLARE_INSN(sm4ks, MATCH_SM4KS, MASK_SM4KS)
DECLARE_INSN(smal, MATCH_SMAL, MASK_SMAL)
DECLARE_INSN(smalbb, MATCH_SMALBB, MASK_SMALBB)
DECLARE_INSN(smalbt, MATCH_SMALBT, MASK_SMALBT)
-DECLARE_INSN(smaltt, MATCH_SMALTT, MASK_SMALTT)
DECLARE_INSN(smalda, MATCH_SMALDA, MASK_SMALDA)
-DECLARE_INSN(smalxda, MATCH_SMALXDA, MASK_SMALXDA)
-DECLARE_INSN(smalds, MATCH_SMALDS, MASK_SMALDS)
DECLARE_INSN(smaldrs, MATCH_SMALDRS, MASK_SMALDRS)
+DECLARE_INSN(smalds, MATCH_SMALDS, MASK_SMALDS)
+DECLARE_INSN(smaltt, MATCH_SMALTT, MASK_SMALTT)
+DECLARE_INSN(smalxda, MATCH_SMALXDA, MASK_SMALXDA)
DECLARE_INSN(smalxds, MATCH_SMALXDS, MASK_SMALXDS)
-DECLARE_INSN(smar64, MATCH_SMAR64, MASK_SMAR64)
DECLARE_INSN(smaqa, MATCH_SMAQA, MASK_SMAQA)
DECLARE_INSN(smaqa_su, MATCH_SMAQA_SU, MASK_SMAQA_SU)
-DECLARE_INSN(smax8, MATCH_SMAX8, MASK_SMAX8)
+DECLARE_INSN(smar64, MATCH_SMAR64, MASK_SMAR64)
DECLARE_INSN(smax16, MATCH_SMAX16, MASK_SMAX16)
+DECLARE_INSN(smax32, MATCH_SMAX32, MASK_SMAX32)
+DECLARE_INSN(smax8, MATCH_SMAX8, MASK_SMAX8)
DECLARE_INSN(smbb16, MATCH_SMBB16, MASK_SMBB16)
DECLARE_INSN(smbt16, MATCH_SMBT16, MASK_SMBT16)
-DECLARE_INSN(smtt16, MATCH_SMTT16, MASK_SMTT16)
-DECLARE_INSN(smds, MATCH_SMDS, MASK_SMDS)
+DECLARE_INSN(smbt32, MATCH_SMBT32, MASK_SMBT32)
DECLARE_INSN(smdrs, MATCH_SMDRS, MASK_SMDRS)
-DECLARE_INSN(smxds, MATCH_SMXDS, MASK_SMXDS)
-DECLARE_INSN(smin8, MATCH_SMIN8, MASK_SMIN8)
+DECLARE_INSN(smdrs32, MATCH_SMDRS32, MASK_SMDRS32)
+DECLARE_INSN(smds, MATCH_SMDS, MASK_SMDS)
+DECLARE_INSN(smds32, MATCH_SMDS32, MASK_SMDS32)
DECLARE_INSN(smin16, MATCH_SMIN16, MASK_SMIN16)
+DECLARE_INSN(smin32, MATCH_SMIN32, MASK_SMIN32)
+DECLARE_INSN(smin8, MATCH_SMIN8, MASK_SMIN8)
DECLARE_INSN(smmul, MATCH_SMMUL, MASK_SMMUL)
DECLARE_INSN(smmul_u, MATCH_SMMUL_U, MASK_SMMUL_U)
DECLARE_INSN(smmwb, MATCH_SMMWB, MASK_SMMWB)
@@ -4269,183 +3863,598 @@ DECLARE_INSN(smmwt_u, MATCH_SMMWT_U, MASK_SMMWT_U)
DECLARE_INSN(smslda, MATCH_SMSLDA, MASK_SMSLDA)
DECLARE_INSN(smslxda, MATCH_SMSLXDA, MASK_SMSLXDA)
DECLARE_INSN(smsr64, MATCH_SMSR64, MASK_SMSR64)
-DECLARE_INSN(smul8, MATCH_SMUL8, MASK_SMUL8)
-DECLARE_INSN(smulx8, MATCH_SMULX8, MASK_SMULX8)
+DECLARE_INSN(smtt16, MATCH_SMTT16, MASK_SMTT16)
+DECLARE_INSN(smtt32, MATCH_SMTT32, MASK_SMTT32)
DECLARE_INSN(smul16, MATCH_SMUL16, MASK_SMUL16)
+DECLARE_INSN(smul8, MATCH_SMUL8, MASK_SMUL8)
DECLARE_INSN(smulx16, MATCH_SMULX16, MASK_SMULX16)
-DECLARE_INSN(sra_u, MATCH_SRA_U, MASK_SRA_U)
-DECLARE_INSN(srai_u, MATCH_SRAI_U, MASK_SRAI_U)
-DECLARE_INSN(sra8, MATCH_SRA8, MASK_SRA8)
-DECLARE_INSN(sra8_u, MATCH_SRA8_U, MASK_SRA8_U)
-DECLARE_INSN(srai8, MATCH_SRAI8, MASK_SRAI8)
-DECLARE_INSN(srai8_u, MATCH_SRAI8_U, MASK_SRAI8_U)
+DECLARE_INSN(smulx8, MATCH_SMULX8, MASK_SMULX8)
+DECLARE_INSN(smxds, MATCH_SMXDS, MASK_SMXDS)
+DECLARE_INSN(smxds32, MATCH_SMXDS32, MASK_SMXDS32)
+DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
DECLARE_INSN(sra16, MATCH_SRA16, MASK_SRA16)
DECLARE_INSN(sra16_u, MATCH_SRA16_U, MASK_SRA16_U)
+DECLARE_INSN(sra32, MATCH_SRA32, MASK_SRA32)
+DECLARE_INSN(sra32_u, MATCH_SRA32_U, MASK_SRA32_U)
+DECLARE_INSN(sra8, MATCH_SRA8, MASK_SRA8)
+DECLARE_INSN(sra8_u, MATCH_SRA8_U, MASK_SRA8_U)
+DECLARE_INSN(sra_u, MATCH_SRA_U, MASK_SRA_U)
+DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
DECLARE_INSN(srai16, MATCH_SRAI16, MASK_SRAI16)
DECLARE_INSN(srai16_u, MATCH_SRAI16_U, MASK_SRAI16_U)
-DECLARE_INSN(srl8, MATCH_SRL8, MASK_SRL8)
-DECLARE_INSN(srl8_u, MATCH_SRL8_U, MASK_SRL8_U)
-DECLARE_INSN(srli8, MATCH_SRLI8, MASK_SRLI8)
-DECLARE_INSN(srli8_u, MATCH_SRLI8_U, MASK_SRLI8_U)
+DECLARE_INSN(srai32, MATCH_SRAI32, MASK_SRAI32)
+DECLARE_INSN(srai32_u, MATCH_SRAI32_U, MASK_SRAI32_U)
+DECLARE_INSN(srai8, MATCH_SRAI8, MASK_SRAI8)
+DECLARE_INSN(srai8_u, MATCH_SRAI8_U, MASK_SRAI8_U)
+DECLARE_INSN(srai_u, MATCH_SRAI_U, MASK_SRAI_U)
+DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
+DECLARE_INSN(sraiw_u, MATCH_SRAIW_U, MASK_SRAIW_U)
+DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
+DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
DECLARE_INSN(srl16, MATCH_SRL16, MASK_SRL16)
DECLARE_INSN(srl16_u, MATCH_SRL16_U, MASK_SRL16_U)
+DECLARE_INSN(srl32, MATCH_SRL32, MASK_SRL32)
+DECLARE_INSN(srl32_u, MATCH_SRL32_U, MASK_SRL32_U)
+DECLARE_INSN(srl8, MATCH_SRL8, MASK_SRL8)
+DECLARE_INSN(srl8_u, MATCH_SRL8_U, MASK_SRL8_U)
+DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
DECLARE_INSN(srli16, MATCH_SRLI16, MASK_SRLI16)
DECLARE_INSN(srli16_u, MATCH_SRLI16_U, MASK_SRLI16_U)
+DECLARE_INSN(srli32, MATCH_SRLI32, MASK_SRLI32)
+DECLARE_INSN(srli32_u, MATCH_SRLI32_U, MASK_SRLI32_U)
+DECLARE_INSN(srli8, MATCH_SRLI8, MASK_SRLI8)
+DECLARE_INSN(srli8_u, MATCH_SRLI8_U, MASK_SRLI8_U)
+DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
+DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
+DECLARE_INSN(sro, MATCH_SRO, MASK_SRO)
+DECLARE_INSN(sroi, MATCH_SROI, MASK_SROI)
+DECLARE_INSN(sroiw, MATCH_SROIW, MASK_SROIW)
+DECLARE_INSN(srow, MATCH_SROW, MASK_SROW)
DECLARE_INSN(stas16, MATCH_STAS16, MASK_STAS16)
+DECLARE_INSN(stas32, MATCH_STAS32, MASK_STAS32)
DECLARE_INSN(stsa16, MATCH_STSA16, MASK_STSA16)
-DECLARE_INSN(sub8, MATCH_SUB8, MASK_SUB8)
+DECLARE_INSN(stsa32, MATCH_STSA32, MASK_STSA32)
+DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
DECLARE_INSN(sub16, MATCH_SUB16, MASK_SUB16)
+DECLARE_INSN(sub32, MATCH_SUB32, MASK_SUB32)
DECLARE_INSN(sub64, MATCH_SUB64, MASK_SUB64)
+DECLARE_INSN(sub8, MATCH_SUB8, MASK_SUB8)
+DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
DECLARE_INSN(sunpkd810, MATCH_SUNPKD810, MASK_SUNPKD810)
DECLARE_INSN(sunpkd820, MATCH_SUNPKD820, MASK_SUNPKD820)
DECLARE_INSN(sunpkd830, MATCH_SUNPKD830, MASK_SUNPKD830)
DECLARE_INSN(sunpkd831, MATCH_SUNPKD831, MASK_SUNPKD831)
DECLARE_INSN(sunpkd832, MATCH_SUNPKD832, MASK_SUNPKD832)
+DECLARE_INSN(sw, MATCH_SW, MASK_SW)
DECLARE_INSN(swap8, MATCH_SWAP8, MASK_SWAP8)
-DECLARE_INSN(uclip8, MATCH_UCLIP8, MASK_UCLIP8)
DECLARE_INSN(uclip16, MATCH_UCLIP16, MASK_UCLIP16)
DECLARE_INSN(uclip32, MATCH_UCLIP32, MASK_UCLIP32)
-DECLARE_INSN(ucmple8, MATCH_UCMPLE8, MASK_UCMPLE8)
+DECLARE_INSN(uclip8, MATCH_UCLIP8, MASK_UCLIP8)
DECLARE_INSN(ucmple16, MATCH_UCMPLE16, MASK_UCMPLE16)
-DECLARE_INSN(ucmplt8, MATCH_UCMPLT8, MASK_UCMPLT8)
+DECLARE_INSN(ucmple8, MATCH_UCMPLE8, MASK_UCMPLE8)
DECLARE_INSN(ucmplt16, MATCH_UCMPLT16, MASK_UCMPLT16)
-DECLARE_INSN(ukadd8, MATCH_UKADD8, MASK_UKADD8)
+DECLARE_INSN(ucmplt8, MATCH_UCMPLT8, MASK_UCMPLT8)
DECLARE_INSN(ukadd16, MATCH_UKADD16, MASK_UKADD16)
+DECLARE_INSN(ukadd32, MATCH_UKADD32, MASK_UKADD32)
DECLARE_INSN(ukadd64, MATCH_UKADD64, MASK_UKADD64)
+DECLARE_INSN(ukadd8, MATCH_UKADD8, MASK_UKADD8)
DECLARE_INSN(ukaddh, MATCH_UKADDH, MASK_UKADDH)
DECLARE_INSN(ukaddw, MATCH_UKADDW, MASK_UKADDW)
DECLARE_INSN(ukcras16, MATCH_UKCRAS16, MASK_UKCRAS16)
+DECLARE_INSN(ukcras32, MATCH_UKCRAS32, MASK_UKCRAS32)
DECLARE_INSN(ukcrsa16, MATCH_UKCRSA16, MASK_UKCRSA16)
+DECLARE_INSN(ukcrsa32, MATCH_UKCRSA32, MASK_UKCRSA32)
DECLARE_INSN(ukmar64, MATCH_UKMAR64, MASK_UKMAR64)
DECLARE_INSN(ukmsr64, MATCH_UKMSR64, MASK_UKMSR64)
DECLARE_INSN(ukstas16, MATCH_UKSTAS16, MASK_UKSTAS16)
+DECLARE_INSN(ukstas32, MATCH_UKSTAS32, MASK_UKSTAS32)
DECLARE_INSN(ukstsa16, MATCH_UKSTSA16, MASK_UKSTSA16)
-DECLARE_INSN(uksub8, MATCH_UKSUB8, MASK_UKSUB8)
+DECLARE_INSN(ukstsa32, MATCH_UKSTSA32, MASK_UKSTSA32)
DECLARE_INSN(uksub16, MATCH_UKSUB16, MASK_UKSUB16)
+DECLARE_INSN(uksub32, MATCH_UKSUB32, MASK_UKSUB32)
DECLARE_INSN(uksub64, MATCH_UKSUB64, MASK_UKSUB64)
+DECLARE_INSN(uksub8, MATCH_UKSUB8, MASK_UKSUB8)
DECLARE_INSN(uksubh, MATCH_UKSUBH, MASK_UKSUBH)
DECLARE_INSN(uksubw, MATCH_UKSUBW, MASK_UKSUBW)
-DECLARE_INSN(umar64, MATCH_UMAR64, MASK_UMAR64)
DECLARE_INSN(umaqa, MATCH_UMAQA, MASK_UMAQA)
-DECLARE_INSN(umax8, MATCH_UMAX8, MASK_UMAX8)
+DECLARE_INSN(umar64, MATCH_UMAR64, MASK_UMAR64)
DECLARE_INSN(umax16, MATCH_UMAX16, MASK_UMAX16)
-DECLARE_INSN(umin8, MATCH_UMIN8, MASK_UMIN8)
+DECLARE_INSN(umax32, MATCH_UMAX32, MASK_UMAX32)
+DECLARE_INSN(umax8, MATCH_UMAX8, MASK_UMAX8)
DECLARE_INSN(umin16, MATCH_UMIN16, MASK_UMIN16)
+DECLARE_INSN(umin32, MATCH_UMIN32, MASK_UMIN32)
+DECLARE_INSN(umin8, MATCH_UMIN8, MASK_UMIN8)
DECLARE_INSN(umsr64, MATCH_UMSR64, MASK_UMSR64)
-DECLARE_INSN(umul8, MATCH_UMUL8, MASK_UMUL8)
-DECLARE_INSN(umulx8, MATCH_UMULX8, MASK_UMULX8)
DECLARE_INSN(umul16, MATCH_UMUL16, MASK_UMUL16)
+DECLARE_INSN(umul8, MATCH_UMUL8, MASK_UMUL8)
DECLARE_INSN(umulx16, MATCH_UMULX16, MASK_UMULX16)
-DECLARE_INSN(uradd8, MATCH_URADD8, MASK_URADD8)
+DECLARE_INSN(umulx8, MATCH_UMULX8, MASK_UMULX8)
+DECLARE_INSN(unshfl, MATCH_UNSHFL, MASK_UNSHFL)
+DECLARE_INSN(unshfli, MATCH_UNSHFLI, MASK_UNSHFLI)
+DECLARE_INSN(unshflw, MATCH_UNSHFLW, MASK_UNSHFLW)
DECLARE_INSN(uradd16, MATCH_URADD16, MASK_URADD16)
+DECLARE_INSN(uradd32, MATCH_URADD32, MASK_URADD32)
DECLARE_INSN(uradd64, MATCH_URADD64, MASK_URADD64)
+DECLARE_INSN(uradd8, MATCH_URADD8, MASK_URADD8)
DECLARE_INSN(uraddw, MATCH_URADDW, MASK_URADDW)
DECLARE_INSN(urcras16, MATCH_URCRAS16, MASK_URCRAS16)
+DECLARE_INSN(urcras32, MATCH_URCRAS32, MASK_URCRAS32)
DECLARE_INSN(urcrsa16, MATCH_URCRSA16, MASK_URCRSA16)
+DECLARE_INSN(urcrsa32, MATCH_URCRSA32, MASK_URCRSA32)
DECLARE_INSN(urstas16, MATCH_URSTAS16, MASK_URSTAS16)
+DECLARE_INSN(urstas32, MATCH_URSTAS32, MASK_URSTAS32)
DECLARE_INSN(urstsa16, MATCH_URSTSA16, MASK_URSTSA16)
-DECLARE_INSN(ursub8, MATCH_URSUB8, MASK_URSUB8)
+DECLARE_INSN(urstsa32, MATCH_URSTSA32, MASK_URSTSA32)
DECLARE_INSN(ursub16, MATCH_URSUB16, MASK_URSUB16)
+DECLARE_INSN(ursub32, MATCH_URSUB32, MASK_URSUB32)
DECLARE_INSN(ursub64, MATCH_URSUB64, MASK_URSUB64)
+DECLARE_INSN(ursub8, MATCH_URSUB8, MASK_URSUB8)
DECLARE_INSN(ursubw, MATCH_URSUBW, MASK_URSUBW)
-DECLARE_INSN(wexti, MATCH_WEXTI, MASK_WEXTI)
+DECLARE_INSN(vaadd_vv, MATCH_VAADD_VV, MASK_VAADD_VV)
+DECLARE_INSN(vaadd_vx, MATCH_VAADD_VX, MASK_VAADD_VX)
+DECLARE_INSN(vaaddu_vv, MATCH_VAADDU_VV, MASK_VAADDU_VV)
+DECLARE_INSN(vaaddu_vx, MATCH_VAADDU_VX, MASK_VAADDU_VX)
+DECLARE_INSN(vadc_vim, MATCH_VADC_VIM, MASK_VADC_VIM)
+DECLARE_INSN(vadc_vvm, MATCH_VADC_VVM, MASK_VADC_VVM)
+DECLARE_INSN(vadc_vxm, MATCH_VADC_VXM, MASK_VADC_VXM)
+DECLARE_INSN(vadd_vi, MATCH_VADD_VI, MASK_VADD_VI)
+DECLARE_INSN(vadd_vv, MATCH_VADD_VV, MASK_VADD_VV)
+DECLARE_INSN(vadd_vx, MATCH_VADD_VX, MASK_VADD_VX)
+DECLARE_INSN(vamoaddei16_v, MATCH_VAMOADDEI16_V, MASK_VAMOADDEI16_V)
+DECLARE_INSN(vamoaddei32_v, MATCH_VAMOADDEI32_V, MASK_VAMOADDEI32_V)
+DECLARE_INSN(vamoaddei64_v, MATCH_VAMOADDEI64_V, MASK_VAMOADDEI64_V)
+DECLARE_INSN(vamoaddei8_v, MATCH_VAMOADDEI8_V, MASK_VAMOADDEI8_V)
+DECLARE_INSN(vamoandei16_v, MATCH_VAMOANDEI16_V, MASK_VAMOANDEI16_V)
+DECLARE_INSN(vamoandei32_v, MATCH_VAMOANDEI32_V, MASK_VAMOANDEI32_V)
+DECLARE_INSN(vamoandei64_v, MATCH_VAMOANDEI64_V, MASK_VAMOANDEI64_V)
+DECLARE_INSN(vamoandei8_v, MATCH_VAMOANDEI8_V, MASK_VAMOANDEI8_V)
+DECLARE_INSN(vamomaxei16_v, MATCH_VAMOMAXEI16_V, MASK_VAMOMAXEI16_V)
+DECLARE_INSN(vamomaxei32_v, MATCH_VAMOMAXEI32_V, MASK_VAMOMAXEI32_V)
+DECLARE_INSN(vamomaxei64_v, MATCH_VAMOMAXEI64_V, MASK_VAMOMAXEI64_V)
+DECLARE_INSN(vamomaxei8_v, MATCH_VAMOMAXEI8_V, MASK_VAMOMAXEI8_V)
+DECLARE_INSN(vamomaxuei16_v, MATCH_VAMOMAXUEI16_V, MASK_VAMOMAXUEI16_V)
+DECLARE_INSN(vamomaxuei32_v, MATCH_VAMOMAXUEI32_V, MASK_VAMOMAXUEI32_V)
+DECLARE_INSN(vamomaxuei64_v, MATCH_VAMOMAXUEI64_V, MASK_VAMOMAXUEI64_V)
+DECLARE_INSN(vamomaxuei8_v, MATCH_VAMOMAXUEI8_V, MASK_VAMOMAXUEI8_V)
+DECLARE_INSN(vamominei16_v, MATCH_VAMOMINEI16_V, MASK_VAMOMINEI16_V)
+DECLARE_INSN(vamominei32_v, MATCH_VAMOMINEI32_V, MASK_VAMOMINEI32_V)
+DECLARE_INSN(vamominei64_v, MATCH_VAMOMINEI64_V, MASK_VAMOMINEI64_V)
+DECLARE_INSN(vamominei8_v, MATCH_VAMOMINEI8_V, MASK_VAMOMINEI8_V)
+DECLARE_INSN(vamominuei16_v, MATCH_VAMOMINUEI16_V, MASK_VAMOMINUEI16_V)
+DECLARE_INSN(vamominuei32_v, MATCH_VAMOMINUEI32_V, MASK_VAMOMINUEI32_V)
+DECLARE_INSN(vamominuei64_v, MATCH_VAMOMINUEI64_V, MASK_VAMOMINUEI64_V)
+DECLARE_INSN(vamominuei8_v, MATCH_VAMOMINUEI8_V, MASK_VAMOMINUEI8_V)
+DECLARE_INSN(vamoorei16_v, MATCH_VAMOOREI16_V, MASK_VAMOOREI16_V)
+DECLARE_INSN(vamoorei32_v, MATCH_VAMOOREI32_V, MASK_VAMOOREI32_V)
+DECLARE_INSN(vamoorei64_v, MATCH_VAMOOREI64_V, MASK_VAMOOREI64_V)
+DECLARE_INSN(vamoorei8_v, MATCH_VAMOOREI8_V, MASK_VAMOOREI8_V)
+DECLARE_INSN(vamoswapei16_v, MATCH_VAMOSWAPEI16_V, MASK_VAMOSWAPEI16_V)
+DECLARE_INSN(vamoswapei32_v, MATCH_VAMOSWAPEI32_V, MASK_VAMOSWAPEI32_V)
+DECLARE_INSN(vamoswapei64_v, MATCH_VAMOSWAPEI64_V, MASK_VAMOSWAPEI64_V)
+DECLARE_INSN(vamoswapei8_v, MATCH_VAMOSWAPEI8_V, MASK_VAMOSWAPEI8_V)
+DECLARE_INSN(vamoxorei16_v, MATCH_VAMOXOREI16_V, MASK_VAMOXOREI16_V)
+DECLARE_INSN(vamoxorei32_v, MATCH_VAMOXOREI32_V, MASK_VAMOXOREI32_V)
+DECLARE_INSN(vamoxorei64_v, MATCH_VAMOXOREI64_V, MASK_VAMOXOREI64_V)
+DECLARE_INSN(vamoxorei8_v, MATCH_VAMOXOREI8_V, MASK_VAMOXOREI8_V)
+DECLARE_INSN(vand_vi, MATCH_VAND_VI, MASK_VAND_VI)
+DECLARE_INSN(vand_vv, MATCH_VAND_VV, MASK_VAND_VV)
+DECLARE_INSN(vand_vx, MATCH_VAND_VX, MASK_VAND_VX)
+DECLARE_INSN(vasub_vv, MATCH_VASUB_VV, MASK_VASUB_VV)
+DECLARE_INSN(vasub_vx, MATCH_VASUB_VX, MASK_VASUB_VX)
+DECLARE_INSN(vasubu_vv, MATCH_VASUBU_VV, MASK_VASUBU_VV)
+DECLARE_INSN(vasubu_vx, MATCH_VASUBU_VX, MASK_VASUBU_VX)
+DECLARE_INSN(vcompress_vm, MATCH_VCOMPRESS_VM, MASK_VCOMPRESS_VM)
+DECLARE_INSN(vcpop_m, MATCH_VCPOP_M, MASK_VCPOP_M)
+DECLARE_INSN(vdiv_vv, MATCH_VDIV_VV, MASK_VDIV_VV)
+DECLARE_INSN(vdiv_vx, MATCH_VDIV_VX, MASK_VDIV_VX)
+DECLARE_INSN(vdivu_vv, MATCH_VDIVU_VV, MASK_VDIVU_VV)
+DECLARE_INSN(vdivu_vx, MATCH_VDIVU_VX, MASK_VDIVU_VX)
+DECLARE_INSN(vfadd_vf, MATCH_VFADD_VF, MASK_VFADD_VF)
+DECLARE_INSN(vfadd_vv, MATCH_VFADD_VV, MASK_VFADD_VV)
+DECLARE_INSN(vfclass_v, MATCH_VFCLASS_V, MASK_VFCLASS_V)
+DECLARE_INSN(vfcvt_f_x_v, MATCH_VFCVT_F_X_V, MASK_VFCVT_F_X_V)
+DECLARE_INSN(vfcvt_f_xu_v, MATCH_VFCVT_F_XU_V, MASK_VFCVT_F_XU_V)
+DECLARE_INSN(vfcvt_rtz_x_f_v, MATCH_VFCVT_RTZ_X_F_V, MASK_VFCVT_RTZ_X_F_V)
+DECLARE_INSN(vfcvt_rtz_xu_f_v, MATCH_VFCVT_RTZ_XU_F_V, MASK_VFCVT_RTZ_XU_F_V)
+DECLARE_INSN(vfcvt_x_f_v, MATCH_VFCVT_X_F_V, MASK_VFCVT_X_F_V)
+DECLARE_INSN(vfcvt_xu_f_v, MATCH_VFCVT_XU_F_V, MASK_VFCVT_XU_F_V)
+DECLARE_INSN(vfdiv_vf, MATCH_VFDIV_VF, MASK_VFDIV_VF)
+DECLARE_INSN(vfdiv_vv, MATCH_VFDIV_VV, MASK_VFDIV_VV)
+DECLARE_INSN(vfirst_m, MATCH_VFIRST_M, MASK_VFIRST_M)
+DECLARE_INSN(vfmacc_vf, MATCH_VFMACC_VF, MASK_VFMACC_VF)
+DECLARE_INSN(vfmacc_vv, MATCH_VFMACC_VV, MASK_VFMACC_VV)
+DECLARE_INSN(vfmadd_vf, MATCH_VFMADD_VF, MASK_VFMADD_VF)
+DECLARE_INSN(vfmadd_vv, MATCH_VFMADD_VV, MASK_VFMADD_VV)
+DECLARE_INSN(vfmax_vf, MATCH_VFMAX_VF, MASK_VFMAX_VF)
+DECLARE_INSN(vfmax_vv, MATCH_VFMAX_VV, MASK_VFMAX_VV)
+DECLARE_INSN(vfmerge_vfm, MATCH_VFMERGE_VFM, MASK_VFMERGE_VFM)
+DECLARE_INSN(vfmin_vf, MATCH_VFMIN_VF, MASK_VFMIN_VF)
+DECLARE_INSN(vfmin_vv, MATCH_VFMIN_VV, MASK_VFMIN_VV)
+DECLARE_INSN(vfmsac_vf, MATCH_VFMSAC_VF, MASK_VFMSAC_VF)
+DECLARE_INSN(vfmsac_vv, MATCH_VFMSAC_VV, MASK_VFMSAC_VV)
+DECLARE_INSN(vfmsub_vf, MATCH_VFMSUB_VF, MASK_VFMSUB_VF)
+DECLARE_INSN(vfmsub_vv, MATCH_VFMSUB_VV, MASK_VFMSUB_VV)
+DECLARE_INSN(vfmul_vf, MATCH_VFMUL_VF, MASK_VFMUL_VF)
+DECLARE_INSN(vfmul_vv, MATCH_VFMUL_VV, MASK_VFMUL_VV)
+DECLARE_INSN(vfmv_f_s, MATCH_VFMV_F_S, MASK_VFMV_F_S)
+DECLARE_INSN(vfmv_s_f, MATCH_VFMV_S_F, MASK_VFMV_S_F)
+DECLARE_INSN(vfmv_v_f, MATCH_VFMV_V_F, MASK_VFMV_V_F)
+DECLARE_INSN(vfncvt_f_f_w, MATCH_VFNCVT_F_F_W, MASK_VFNCVT_F_F_W)
+DECLARE_INSN(vfncvt_f_x_w, MATCH_VFNCVT_F_X_W, MASK_VFNCVT_F_X_W)
+DECLARE_INSN(vfncvt_f_xu_w, MATCH_VFNCVT_F_XU_W, MASK_VFNCVT_F_XU_W)
+DECLARE_INSN(vfncvt_rod_f_f_w, MATCH_VFNCVT_ROD_F_F_W, MASK_VFNCVT_ROD_F_F_W)
+DECLARE_INSN(vfncvt_rtz_x_f_w, MATCH_VFNCVT_RTZ_X_F_W, MASK_VFNCVT_RTZ_X_F_W)
+DECLARE_INSN(vfncvt_rtz_xu_f_w, MATCH_VFNCVT_RTZ_XU_F_W, MASK_VFNCVT_RTZ_XU_F_W)
+DECLARE_INSN(vfncvt_x_f_w, MATCH_VFNCVT_X_F_W, MASK_VFNCVT_X_F_W)
+DECLARE_INSN(vfncvt_xu_f_w, MATCH_VFNCVT_XU_F_W, MASK_VFNCVT_XU_F_W)
+DECLARE_INSN(vfnmacc_vf, MATCH_VFNMACC_VF, MASK_VFNMACC_VF)
+DECLARE_INSN(vfnmacc_vv, MATCH_VFNMACC_VV, MASK_VFNMACC_VV)
+DECLARE_INSN(vfnmadd_vf, MATCH_VFNMADD_VF, MASK_VFNMADD_VF)
+DECLARE_INSN(vfnmadd_vv, MATCH_VFNMADD_VV, MASK_VFNMADD_VV)
+DECLARE_INSN(vfnmsac_vf, MATCH_VFNMSAC_VF, MASK_VFNMSAC_VF)
+DECLARE_INSN(vfnmsac_vv, MATCH_VFNMSAC_VV, MASK_VFNMSAC_VV)
+DECLARE_INSN(vfnmsub_vf, MATCH_VFNMSUB_VF, MASK_VFNMSUB_VF)
+DECLARE_INSN(vfnmsub_vv, MATCH_VFNMSUB_VV, MASK_VFNMSUB_VV)
+DECLARE_INSN(vfrdiv_vf, MATCH_VFRDIV_VF, MASK_VFRDIV_VF)
+DECLARE_INSN(vfrec7_v, MATCH_VFREC7_V, MASK_VFREC7_V)
+DECLARE_INSN(vfredmax_vs, MATCH_VFREDMAX_VS, MASK_VFREDMAX_VS)
+DECLARE_INSN(vfredmin_vs, MATCH_VFREDMIN_VS, MASK_VFREDMIN_VS)
+DECLARE_INSN(vfredosum_vs, MATCH_VFREDOSUM_VS, MASK_VFREDOSUM_VS)
+DECLARE_INSN(vfredusum_vs, MATCH_VFREDUSUM_VS, MASK_VFREDUSUM_VS)
+DECLARE_INSN(vfrsqrt7_v, MATCH_VFRSQRT7_V, MASK_VFRSQRT7_V)
+DECLARE_INSN(vfrsub_vf, MATCH_VFRSUB_VF, MASK_VFRSUB_VF)
+DECLARE_INSN(vfsgnj_vf, MATCH_VFSGNJ_VF, MASK_VFSGNJ_VF)
+DECLARE_INSN(vfsgnj_vv, MATCH_VFSGNJ_VV, MASK_VFSGNJ_VV)
+DECLARE_INSN(vfsgnjn_vf, MATCH_VFSGNJN_VF, MASK_VFSGNJN_VF)
+DECLARE_INSN(vfsgnjn_vv, MATCH_VFSGNJN_VV, MASK_VFSGNJN_VV)
+DECLARE_INSN(vfsgnjx_vf, MATCH_VFSGNJX_VF, MASK_VFSGNJX_VF)
+DECLARE_INSN(vfsgnjx_vv, MATCH_VFSGNJX_VV, MASK_VFSGNJX_VV)
+DECLARE_INSN(vfslide1down_vf, MATCH_VFSLIDE1DOWN_VF, MASK_VFSLIDE1DOWN_VF)
+DECLARE_INSN(vfslide1up_vf, MATCH_VFSLIDE1UP_VF, MASK_VFSLIDE1UP_VF)
+DECLARE_INSN(vfsqrt_v, MATCH_VFSQRT_V, MASK_VFSQRT_V)
+DECLARE_INSN(vfsub_vf, MATCH_VFSUB_VF, MASK_VFSUB_VF)
+DECLARE_INSN(vfsub_vv, MATCH_VFSUB_VV, MASK_VFSUB_VV)
+DECLARE_INSN(vfwadd_vf, MATCH_VFWADD_VF, MASK_VFWADD_VF)
+DECLARE_INSN(vfwadd_vv, MATCH_VFWADD_VV, MASK_VFWADD_VV)
+DECLARE_INSN(vfwadd_wf, MATCH_VFWADD_WF, MASK_VFWADD_WF)
+DECLARE_INSN(vfwadd_wv, MATCH_VFWADD_WV, MASK_VFWADD_WV)
+DECLARE_INSN(vfwcvt_f_f_v, MATCH_VFWCVT_F_F_V, MASK_VFWCVT_F_F_V)
+DECLARE_INSN(vfwcvt_f_x_v, MATCH_VFWCVT_F_X_V, MASK_VFWCVT_F_X_V)
+DECLARE_INSN(vfwcvt_f_xu_v, MATCH_VFWCVT_F_XU_V, MASK_VFWCVT_F_XU_V)
+DECLARE_INSN(vfwcvt_rtz_x_f_v, MATCH_VFWCVT_RTZ_X_F_V, MASK_VFWCVT_RTZ_X_F_V)
+DECLARE_INSN(vfwcvt_rtz_xu_f_v, MATCH_VFWCVT_RTZ_XU_F_V, MASK_VFWCVT_RTZ_XU_F_V)
+DECLARE_INSN(vfwcvt_x_f_v, MATCH_VFWCVT_X_F_V, MASK_VFWCVT_X_F_V)
+DECLARE_INSN(vfwcvt_xu_f_v, MATCH_VFWCVT_XU_F_V, MASK_VFWCVT_XU_F_V)
+DECLARE_INSN(vfwmacc_vf, MATCH_VFWMACC_VF, MASK_VFWMACC_VF)
+DECLARE_INSN(vfwmacc_vv, MATCH_VFWMACC_VV, MASK_VFWMACC_VV)
+DECLARE_INSN(vfwmsac_vf, MATCH_VFWMSAC_VF, MASK_VFWMSAC_VF)
+DECLARE_INSN(vfwmsac_vv, MATCH_VFWMSAC_VV, MASK_VFWMSAC_VV)
+DECLARE_INSN(vfwmul_vf, MATCH_VFWMUL_VF, MASK_VFWMUL_VF)
+DECLARE_INSN(vfwmul_vv, MATCH_VFWMUL_VV, MASK_VFWMUL_VV)
+DECLARE_INSN(vfwnmacc_vf, MATCH_VFWNMACC_VF, MASK_VFWNMACC_VF)
+DECLARE_INSN(vfwnmacc_vv, MATCH_VFWNMACC_VV, MASK_VFWNMACC_VV)
+DECLARE_INSN(vfwnmsac_vf, MATCH_VFWNMSAC_VF, MASK_VFWNMSAC_VF)
+DECLARE_INSN(vfwnmsac_vv, MATCH_VFWNMSAC_VV, MASK_VFWNMSAC_VV)
+DECLARE_INSN(vfwredosum_vs, MATCH_VFWREDOSUM_VS, MASK_VFWREDOSUM_VS)
+DECLARE_INSN(vfwredusum_vs, MATCH_VFWREDUSUM_VS, MASK_VFWREDUSUM_VS)
+DECLARE_INSN(vfwsub_vf, MATCH_VFWSUB_VF, MASK_VFWSUB_VF)
+DECLARE_INSN(vfwsub_vv, MATCH_VFWSUB_VV, MASK_VFWSUB_VV)
+DECLARE_INSN(vfwsub_wf, MATCH_VFWSUB_WF, MASK_VFWSUB_WF)
+DECLARE_INSN(vfwsub_wv, MATCH_VFWSUB_WV, MASK_VFWSUB_WV)
+DECLARE_INSN(vid_v, MATCH_VID_V, MASK_VID_V)
+DECLARE_INSN(viota_m, MATCH_VIOTA_M, MASK_VIOTA_M)
+DECLARE_INSN(vl1re16_v, MATCH_VL1RE16_V, MASK_VL1RE16_V)
+DECLARE_INSN(vl1re32_v, MATCH_VL1RE32_V, MASK_VL1RE32_V)
+DECLARE_INSN(vl1re64_v, MATCH_VL1RE64_V, MASK_VL1RE64_V)
+DECLARE_INSN(vl1re8_v, MATCH_VL1RE8_V, MASK_VL1RE8_V)
+DECLARE_INSN(vl2re16_v, MATCH_VL2RE16_V, MASK_VL2RE16_V)
+DECLARE_INSN(vl2re32_v, MATCH_VL2RE32_V, MASK_VL2RE32_V)
+DECLARE_INSN(vl2re64_v, MATCH_VL2RE64_V, MASK_VL2RE64_V)
+DECLARE_INSN(vl2re8_v, MATCH_VL2RE8_V, MASK_VL2RE8_V)
+DECLARE_INSN(vl4re16_v, MATCH_VL4RE16_V, MASK_VL4RE16_V)
+DECLARE_INSN(vl4re32_v, MATCH_VL4RE32_V, MASK_VL4RE32_V)
+DECLARE_INSN(vl4re64_v, MATCH_VL4RE64_V, MASK_VL4RE64_V)
+DECLARE_INSN(vl4re8_v, MATCH_VL4RE8_V, MASK_VL4RE8_V)
+DECLARE_INSN(vl8re16_v, MATCH_VL8RE16_V, MASK_VL8RE16_V)
+DECLARE_INSN(vl8re32_v, MATCH_VL8RE32_V, MASK_VL8RE32_V)
+DECLARE_INSN(vl8re64_v, MATCH_VL8RE64_V, MASK_VL8RE64_V)
+DECLARE_INSN(vl8re8_v, MATCH_VL8RE8_V, MASK_VL8RE8_V)
+DECLARE_INSN(vle1024_v, MATCH_VLE1024_V, MASK_VLE1024_V)
+DECLARE_INSN(vle1024ff_v, MATCH_VLE1024FF_V, MASK_VLE1024FF_V)
+DECLARE_INSN(vle128_v, MATCH_VLE128_V, MASK_VLE128_V)
+DECLARE_INSN(vle128ff_v, MATCH_VLE128FF_V, MASK_VLE128FF_V)
+DECLARE_INSN(vle16_v, MATCH_VLE16_V, MASK_VLE16_V)
+DECLARE_INSN(vle16ff_v, MATCH_VLE16FF_V, MASK_VLE16FF_V)
+DECLARE_INSN(vle256_v, MATCH_VLE256_V, MASK_VLE256_V)
+DECLARE_INSN(vle256ff_v, MATCH_VLE256FF_V, MASK_VLE256FF_V)
+DECLARE_INSN(vle32_v, MATCH_VLE32_V, MASK_VLE32_V)
+DECLARE_INSN(vle32ff_v, MATCH_VLE32FF_V, MASK_VLE32FF_V)
+DECLARE_INSN(vle512_v, MATCH_VLE512_V, MASK_VLE512_V)
+DECLARE_INSN(vle512ff_v, MATCH_VLE512FF_V, MASK_VLE512FF_V)
+DECLARE_INSN(vle64_v, MATCH_VLE64_V, MASK_VLE64_V)
+DECLARE_INSN(vle64ff_v, MATCH_VLE64FF_V, MASK_VLE64FF_V)
+DECLARE_INSN(vle8_v, MATCH_VLE8_V, MASK_VLE8_V)
+DECLARE_INSN(vle8ff_v, MATCH_VLE8FF_V, MASK_VLE8FF_V)
+DECLARE_INSN(vlm_v, MATCH_VLM_V, MASK_VLM_V)
+DECLARE_INSN(vloxei1024_v, MATCH_VLOXEI1024_V, MASK_VLOXEI1024_V)
+DECLARE_INSN(vloxei128_v, MATCH_VLOXEI128_V, MASK_VLOXEI128_V)
+DECLARE_INSN(vloxei16_v, MATCH_VLOXEI16_V, MASK_VLOXEI16_V)
+DECLARE_INSN(vloxei256_v, MATCH_VLOXEI256_V, MASK_VLOXEI256_V)
+DECLARE_INSN(vloxei32_v, MATCH_VLOXEI32_V, MASK_VLOXEI32_V)
+DECLARE_INSN(vloxei512_v, MATCH_VLOXEI512_V, MASK_VLOXEI512_V)
+DECLARE_INSN(vloxei64_v, MATCH_VLOXEI64_V, MASK_VLOXEI64_V)
+DECLARE_INSN(vloxei8_v, MATCH_VLOXEI8_V, MASK_VLOXEI8_V)
+DECLARE_INSN(vlse1024_v, MATCH_VLSE1024_V, MASK_VLSE1024_V)
+DECLARE_INSN(vlse128_v, MATCH_VLSE128_V, MASK_VLSE128_V)
+DECLARE_INSN(vlse16_v, MATCH_VLSE16_V, MASK_VLSE16_V)
+DECLARE_INSN(vlse256_v, MATCH_VLSE256_V, MASK_VLSE256_V)
+DECLARE_INSN(vlse32_v, MATCH_VLSE32_V, MASK_VLSE32_V)
+DECLARE_INSN(vlse512_v, MATCH_VLSE512_V, MASK_VLSE512_V)
+DECLARE_INSN(vlse64_v, MATCH_VLSE64_V, MASK_VLSE64_V)
+DECLARE_INSN(vlse8_v, MATCH_VLSE8_V, MASK_VLSE8_V)
+DECLARE_INSN(vluxei1024_v, MATCH_VLUXEI1024_V, MASK_VLUXEI1024_V)
+DECLARE_INSN(vluxei128_v, MATCH_VLUXEI128_V, MASK_VLUXEI128_V)
+DECLARE_INSN(vluxei16_v, MATCH_VLUXEI16_V, MASK_VLUXEI16_V)
+DECLARE_INSN(vluxei256_v, MATCH_VLUXEI256_V, MASK_VLUXEI256_V)
+DECLARE_INSN(vluxei32_v, MATCH_VLUXEI32_V, MASK_VLUXEI32_V)
+DECLARE_INSN(vluxei512_v, MATCH_VLUXEI512_V, MASK_VLUXEI512_V)
+DECLARE_INSN(vluxei64_v, MATCH_VLUXEI64_V, MASK_VLUXEI64_V)
+DECLARE_INSN(vluxei8_v, MATCH_VLUXEI8_V, MASK_VLUXEI8_V)
+DECLARE_INSN(vmacc_vv, MATCH_VMACC_VV, MASK_VMACC_VV)
+DECLARE_INSN(vmacc_vx, MATCH_VMACC_VX, MASK_VMACC_VX)
+DECLARE_INSN(vmadc_vi, MATCH_VMADC_VI, MASK_VMADC_VI)
+DECLARE_INSN(vmadc_vim, MATCH_VMADC_VIM, MASK_VMADC_VIM)
+DECLARE_INSN(vmadc_vv, MATCH_VMADC_VV, MASK_VMADC_VV)
+DECLARE_INSN(vmadc_vvm, MATCH_VMADC_VVM, MASK_VMADC_VVM)
+DECLARE_INSN(vmadc_vx, MATCH_VMADC_VX, MASK_VMADC_VX)
+DECLARE_INSN(vmadc_vxm, MATCH_VMADC_VXM, MASK_VMADC_VXM)
+DECLARE_INSN(vmadd_vv, MATCH_VMADD_VV, MASK_VMADD_VV)
+DECLARE_INSN(vmadd_vx, MATCH_VMADD_VX, MASK_VMADD_VX)
+DECLARE_INSN(vmand_mm, MATCH_VMAND_MM, MASK_VMAND_MM)
+DECLARE_INSN(vmandn_mm, MATCH_VMANDN_MM, MASK_VMANDN_MM)
+DECLARE_INSN(vmax_vv, MATCH_VMAX_VV, MASK_VMAX_VV)
+DECLARE_INSN(vmax_vx, MATCH_VMAX_VX, MASK_VMAX_VX)
+DECLARE_INSN(vmaxu_vv, MATCH_VMAXU_VV, MASK_VMAXU_VV)
+DECLARE_INSN(vmaxu_vx, MATCH_VMAXU_VX, MASK_VMAXU_VX)
+DECLARE_INSN(vmerge_vim, MATCH_VMERGE_VIM, MASK_VMERGE_VIM)
+DECLARE_INSN(vmerge_vvm, MATCH_VMERGE_VVM, MASK_VMERGE_VVM)
+DECLARE_INSN(vmerge_vxm, MATCH_VMERGE_VXM, MASK_VMERGE_VXM)
+DECLARE_INSN(vmfeq_vf, MATCH_VMFEQ_VF, MASK_VMFEQ_VF)
+DECLARE_INSN(vmfeq_vv, MATCH_VMFEQ_VV, MASK_VMFEQ_VV)
+DECLARE_INSN(vmfge_vf, MATCH_VMFGE_VF, MASK_VMFGE_VF)
+DECLARE_INSN(vmfgt_vf, MATCH_VMFGT_VF, MASK_VMFGT_VF)
+DECLARE_INSN(vmfle_vf, MATCH_VMFLE_VF, MASK_VMFLE_VF)
+DECLARE_INSN(vmfle_vv, MATCH_VMFLE_VV, MASK_VMFLE_VV)
+DECLARE_INSN(vmflt_vf, MATCH_VMFLT_VF, MASK_VMFLT_VF)
+DECLARE_INSN(vmflt_vv, MATCH_VMFLT_VV, MASK_VMFLT_VV)
+DECLARE_INSN(vmfne_vf, MATCH_VMFNE_VF, MASK_VMFNE_VF)
+DECLARE_INSN(vmfne_vv, MATCH_VMFNE_VV, MASK_VMFNE_VV)
+DECLARE_INSN(vmin_vv, MATCH_VMIN_VV, MASK_VMIN_VV)
+DECLARE_INSN(vmin_vx, MATCH_VMIN_VX, MASK_VMIN_VX)
+DECLARE_INSN(vminu_vv, MATCH_VMINU_VV, MASK_VMINU_VV)
+DECLARE_INSN(vminu_vx, MATCH_VMINU_VX, MASK_VMINU_VX)
+DECLARE_INSN(vmnand_mm, MATCH_VMNAND_MM, MASK_VMNAND_MM)
+DECLARE_INSN(vmnor_mm, MATCH_VMNOR_MM, MASK_VMNOR_MM)
+DECLARE_INSN(vmor_mm, MATCH_VMOR_MM, MASK_VMOR_MM)
+DECLARE_INSN(vmorn_mm, MATCH_VMORN_MM, MASK_VMORN_MM)
+DECLARE_INSN(vmsbc_vv, MATCH_VMSBC_VV, MASK_VMSBC_VV)
+DECLARE_INSN(vmsbc_vvm, MATCH_VMSBC_VVM, MASK_VMSBC_VVM)
+DECLARE_INSN(vmsbc_vx, MATCH_VMSBC_VX, MASK_VMSBC_VX)
+DECLARE_INSN(vmsbc_vxm, MATCH_VMSBC_VXM, MASK_VMSBC_VXM)
+DECLARE_INSN(vmsbf_m, MATCH_VMSBF_M, MASK_VMSBF_M)
+DECLARE_INSN(vmseq_vi, MATCH_VMSEQ_VI, MASK_VMSEQ_VI)
+DECLARE_INSN(vmseq_vv, MATCH_VMSEQ_VV, MASK_VMSEQ_VV)
+DECLARE_INSN(vmseq_vx, MATCH_VMSEQ_VX, MASK_VMSEQ_VX)
+DECLARE_INSN(vmsgt_vi, MATCH_VMSGT_VI, MASK_VMSGT_VI)
+DECLARE_INSN(vmsgt_vx, MATCH_VMSGT_VX, MASK_VMSGT_VX)
+DECLARE_INSN(vmsgtu_vi, MATCH_VMSGTU_VI, MASK_VMSGTU_VI)
+DECLARE_INSN(vmsgtu_vx, MATCH_VMSGTU_VX, MASK_VMSGTU_VX)
+DECLARE_INSN(vmsif_m, MATCH_VMSIF_M, MASK_VMSIF_M)
+DECLARE_INSN(vmsle_vi, MATCH_VMSLE_VI, MASK_VMSLE_VI)
+DECLARE_INSN(vmsle_vv, MATCH_VMSLE_VV, MASK_VMSLE_VV)
+DECLARE_INSN(vmsle_vx, MATCH_VMSLE_VX, MASK_VMSLE_VX)
+DECLARE_INSN(vmsleu_vi, MATCH_VMSLEU_VI, MASK_VMSLEU_VI)
+DECLARE_INSN(vmsleu_vv, MATCH_VMSLEU_VV, MASK_VMSLEU_VV)
+DECLARE_INSN(vmsleu_vx, MATCH_VMSLEU_VX, MASK_VMSLEU_VX)
+DECLARE_INSN(vmslt_vv, MATCH_VMSLT_VV, MASK_VMSLT_VV)
+DECLARE_INSN(vmslt_vx, MATCH_VMSLT_VX, MASK_VMSLT_VX)
+DECLARE_INSN(vmsltu_vv, MATCH_VMSLTU_VV, MASK_VMSLTU_VV)
+DECLARE_INSN(vmsltu_vx, MATCH_VMSLTU_VX, MASK_VMSLTU_VX)
+DECLARE_INSN(vmsne_vi, MATCH_VMSNE_VI, MASK_VMSNE_VI)
+DECLARE_INSN(vmsne_vv, MATCH_VMSNE_VV, MASK_VMSNE_VV)
+DECLARE_INSN(vmsne_vx, MATCH_VMSNE_VX, MASK_VMSNE_VX)
+DECLARE_INSN(vmsof_m, MATCH_VMSOF_M, MASK_VMSOF_M)
+DECLARE_INSN(vmul_vv, MATCH_VMUL_VV, MASK_VMUL_VV)
+DECLARE_INSN(vmul_vx, MATCH_VMUL_VX, MASK_VMUL_VX)
+DECLARE_INSN(vmulh_vv, MATCH_VMULH_VV, MASK_VMULH_VV)
+DECLARE_INSN(vmulh_vx, MATCH_VMULH_VX, MASK_VMULH_VX)
+DECLARE_INSN(vmulhsu_vv, MATCH_VMULHSU_VV, MASK_VMULHSU_VV)
+DECLARE_INSN(vmulhsu_vx, MATCH_VMULHSU_VX, MASK_VMULHSU_VX)
+DECLARE_INSN(vmulhu_vv, MATCH_VMULHU_VV, MASK_VMULHU_VV)
+DECLARE_INSN(vmulhu_vx, MATCH_VMULHU_VX, MASK_VMULHU_VX)
+DECLARE_INSN(vmv1r_v, MATCH_VMV1R_V, MASK_VMV1R_V)
+DECLARE_INSN(vmv2r_v, MATCH_VMV2R_V, MASK_VMV2R_V)
+DECLARE_INSN(vmv4r_v, MATCH_VMV4R_V, MASK_VMV4R_V)
+DECLARE_INSN(vmv8r_v, MATCH_VMV8R_V, MASK_VMV8R_V)
+DECLARE_INSN(vmv_s_x, MATCH_VMV_S_X, MASK_VMV_S_X)
+DECLARE_INSN(vmv_v_i, MATCH_VMV_V_I, MASK_VMV_V_I)
+DECLARE_INSN(vmv_v_v, MATCH_VMV_V_V, MASK_VMV_V_V)
+DECLARE_INSN(vmv_v_x, MATCH_VMV_V_X, MASK_VMV_V_X)
+DECLARE_INSN(vmv_x_s, MATCH_VMV_X_S, MASK_VMV_X_S)
+DECLARE_INSN(vmxnor_mm, MATCH_VMXNOR_MM, MASK_VMXNOR_MM)
+DECLARE_INSN(vmxor_mm, MATCH_VMXOR_MM, MASK_VMXOR_MM)
+DECLARE_INSN(vnclip_wi, MATCH_VNCLIP_WI, MASK_VNCLIP_WI)
+DECLARE_INSN(vnclip_wv, MATCH_VNCLIP_WV, MASK_VNCLIP_WV)
+DECLARE_INSN(vnclip_wx, MATCH_VNCLIP_WX, MASK_VNCLIP_WX)
+DECLARE_INSN(vnclipu_wi, MATCH_VNCLIPU_WI, MASK_VNCLIPU_WI)
+DECLARE_INSN(vnclipu_wv, MATCH_VNCLIPU_WV, MASK_VNCLIPU_WV)
+DECLARE_INSN(vnclipu_wx, MATCH_VNCLIPU_WX, MASK_VNCLIPU_WX)
+DECLARE_INSN(vnmsac_vv, MATCH_VNMSAC_VV, MASK_VNMSAC_VV)
+DECLARE_INSN(vnmsac_vx, MATCH_VNMSAC_VX, MASK_VNMSAC_VX)
+DECLARE_INSN(vnmsub_vv, MATCH_VNMSUB_VV, MASK_VNMSUB_VV)
+DECLARE_INSN(vnmsub_vx, MATCH_VNMSUB_VX, MASK_VNMSUB_VX)
+DECLARE_INSN(vnsra_wi, MATCH_VNSRA_WI, MASK_VNSRA_WI)
+DECLARE_INSN(vnsra_wv, MATCH_VNSRA_WV, MASK_VNSRA_WV)
+DECLARE_INSN(vnsra_wx, MATCH_VNSRA_WX, MASK_VNSRA_WX)
+DECLARE_INSN(vnsrl_wi, MATCH_VNSRL_WI, MASK_VNSRL_WI)
+DECLARE_INSN(vnsrl_wv, MATCH_VNSRL_WV, MASK_VNSRL_WV)
+DECLARE_INSN(vnsrl_wx, MATCH_VNSRL_WX, MASK_VNSRL_WX)
+DECLARE_INSN(vor_vi, MATCH_VOR_VI, MASK_VOR_VI)
+DECLARE_INSN(vor_vv, MATCH_VOR_VV, MASK_VOR_VV)
+DECLARE_INSN(vor_vx, MATCH_VOR_VX, MASK_VOR_VX)
+DECLARE_INSN(vredand_vs, MATCH_VREDAND_VS, MASK_VREDAND_VS)
+DECLARE_INSN(vredmax_vs, MATCH_VREDMAX_VS, MASK_VREDMAX_VS)
+DECLARE_INSN(vredmaxu_vs, MATCH_VREDMAXU_VS, MASK_VREDMAXU_VS)
+DECLARE_INSN(vredmin_vs, MATCH_VREDMIN_VS, MASK_VREDMIN_VS)
+DECLARE_INSN(vredminu_vs, MATCH_VREDMINU_VS, MASK_VREDMINU_VS)
+DECLARE_INSN(vredor_vs, MATCH_VREDOR_VS, MASK_VREDOR_VS)
+DECLARE_INSN(vredsum_vs, MATCH_VREDSUM_VS, MASK_VREDSUM_VS)
+DECLARE_INSN(vredxor_vs, MATCH_VREDXOR_VS, MASK_VREDXOR_VS)
+DECLARE_INSN(vrem_vv, MATCH_VREM_VV, MASK_VREM_VV)
+DECLARE_INSN(vrem_vx, MATCH_VREM_VX, MASK_VREM_VX)
+DECLARE_INSN(vremu_vv, MATCH_VREMU_VV, MASK_VREMU_VV)
+DECLARE_INSN(vremu_vx, MATCH_VREMU_VX, MASK_VREMU_VX)
+DECLARE_INSN(vrgather_vi, MATCH_VRGATHER_VI, MASK_VRGATHER_VI)
+DECLARE_INSN(vrgather_vv, MATCH_VRGATHER_VV, MASK_VRGATHER_VV)
+DECLARE_INSN(vrgather_vx, MATCH_VRGATHER_VX, MASK_VRGATHER_VX)
+DECLARE_INSN(vrgatherei16_vv, MATCH_VRGATHEREI16_VV, MASK_VRGATHEREI16_VV)
+DECLARE_INSN(vrsub_vi, MATCH_VRSUB_VI, MASK_VRSUB_VI)
+DECLARE_INSN(vrsub_vx, MATCH_VRSUB_VX, MASK_VRSUB_VX)
+DECLARE_INSN(vs1r_v, MATCH_VS1R_V, MASK_VS1R_V)
+DECLARE_INSN(vs2r_v, MATCH_VS2R_V, MASK_VS2R_V)
+DECLARE_INSN(vs4r_v, MATCH_VS4R_V, MASK_VS4R_V)
+DECLARE_INSN(vs8r_v, MATCH_VS8R_V, MASK_VS8R_V)
+DECLARE_INSN(vsadd_vi, MATCH_VSADD_VI, MASK_VSADD_VI)
+DECLARE_INSN(vsadd_vv, MATCH_VSADD_VV, MASK_VSADD_VV)
+DECLARE_INSN(vsadd_vx, MATCH_VSADD_VX, MASK_VSADD_VX)
+DECLARE_INSN(vsaddu_vi, MATCH_VSADDU_VI, MASK_VSADDU_VI)
+DECLARE_INSN(vsaddu_vv, MATCH_VSADDU_VV, MASK_VSADDU_VV)
+DECLARE_INSN(vsaddu_vx, MATCH_VSADDU_VX, MASK_VSADDU_VX)
+DECLARE_INSN(vsbc_vvm, MATCH_VSBC_VVM, MASK_VSBC_VVM)
+DECLARE_INSN(vsbc_vxm, MATCH_VSBC_VXM, MASK_VSBC_VXM)
+DECLARE_INSN(vse1024_v, MATCH_VSE1024_V, MASK_VSE1024_V)
+DECLARE_INSN(vse128_v, MATCH_VSE128_V, MASK_VSE128_V)
+DECLARE_INSN(vse16_v, MATCH_VSE16_V, MASK_VSE16_V)
+DECLARE_INSN(vse256_v, MATCH_VSE256_V, MASK_VSE256_V)
+DECLARE_INSN(vse32_v, MATCH_VSE32_V, MASK_VSE32_V)
+DECLARE_INSN(vse512_v, MATCH_VSE512_V, MASK_VSE512_V)
+DECLARE_INSN(vse64_v, MATCH_VSE64_V, MASK_VSE64_V)
+DECLARE_INSN(vse8_v, MATCH_VSE8_V, MASK_VSE8_V)
+DECLARE_INSN(vsetivli, MATCH_VSETIVLI, MASK_VSETIVLI)
+DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL)
+DECLARE_INSN(vsetvli, MATCH_VSETVLI, MASK_VSETVLI)
+DECLARE_INSN(vsext_vf2, MATCH_VSEXT_VF2, MASK_VSEXT_VF2)
+DECLARE_INSN(vsext_vf4, MATCH_VSEXT_VF4, MASK_VSEXT_VF4)
+DECLARE_INSN(vsext_vf8, MATCH_VSEXT_VF8, MASK_VSEXT_VF8)
+DECLARE_INSN(vslide1down_vx, MATCH_VSLIDE1DOWN_VX, MASK_VSLIDE1DOWN_VX)
+DECLARE_INSN(vslide1up_vx, MATCH_VSLIDE1UP_VX, MASK_VSLIDE1UP_VX)
+DECLARE_INSN(vslidedown_vi, MATCH_VSLIDEDOWN_VI, MASK_VSLIDEDOWN_VI)
+DECLARE_INSN(vslidedown_vx, MATCH_VSLIDEDOWN_VX, MASK_VSLIDEDOWN_VX)
+DECLARE_INSN(vslideup_vi, MATCH_VSLIDEUP_VI, MASK_VSLIDEUP_VI)
+DECLARE_INSN(vslideup_vx, MATCH_VSLIDEUP_VX, MASK_VSLIDEUP_VX)
+DECLARE_INSN(vsll_vi, MATCH_VSLL_VI, MASK_VSLL_VI)
+DECLARE_INSN(vsll_vv, MATCH_VSLL_VV, MASK_VSLL_VV)
+DECLARE_INSN(vsll_vx, MATCH_VSLL_VX, MASK_VSLL_VX)
+DECLARE_INSN(vsm_v, MATCH_VSM_V, MASK_VSM_V)
+DECLARE_INSN(vsmul_vv, MATCH_VSMUL_VV, MASK_VSMUL_VV)
+DECLARE_INSN(vsmul_vx, MATCH_VSMUL_VX, MASK_VSMUL_VX)
+DECLARE_INSN(vsoxei1024_v, MATCH_VSOXEI1024_V, MASK_VSOXEI1024_V)
+DECLARE_INSN(vsoxei128_v, MATCH_VSOXEI128_V, MASK_VSOXEI128_V)
+DECLARE_INSN(vsoxei16_v, MATCH_VSOXEI16_V, MASK_VSOXEI16_V)
+DECLARE_INSN(vsoxei256_v, MATCH_VSOXEI256_V, MASK_VSOXEI256_V)
+DECLARE_INSN(vsoxei32_v, MATCH_VSOXEI32_V, MASK_VSOXEI32_V)
+DECLARE_INSN(vsoxei512_v, MATCH_VSOXEI512_V, MASK_VSOXEI512_V)
+DECLARE_INSN(vsoxei64_v, MATCH_VSOXEI64_V, MASK_VSOXEI64_V)
+DECLARE_INSN(vsoxei8_v, MATCH_VSOXEI8_V, MASK_VSOXEI8_V)
+DECLARE_INSN(vsra_vi, MATCH_VSRA_VI, MASK_VSRA_VI)
+DECLARE_INSN(vsra_vv, MATCH_VSRA_VV, MASK_VSRA_VV)
+DECLARE_INSN(vsra_vx, MATCH_VSRA_VX, MASK_VSRA_VX)
+DECLARE_INSN(vsrl_vi, MATCH_VSRL_VI, MASK_VSRL_VI)
+DECLARE_INSN(vsrl_vv, MATCH_VSRL_VV, MASK_VSRL_VV)
+DECLARE_INSN(vsrl_vx, MATCH_VSRL_VX, MASK_VSRL_VX)
+DECLARE_INSN(vsse1024_v, MATCH_VSSE1024_V, MASK_VSSE1024_V)
+DECLARE_INSN(vsse128_v, MATCH_VSSE128_V, MASK_VSSE128_V)
+DECLARE_INSN(vsse16_v, MATCH_VSSE16_V, MASK_VSSE16_V)
+DECLARE_INSN(vsse256_v, MATCH_VSSE256_V, MASK_VSSE256_V)
+DECLARE_INSN(vsse32_v, MATCH_VSSE32_V, MASK_VSSE32_V)
+DECLARE_INSN(vsse512_v, MATCH_VSSE512_V, MASK_VSSE512_V)
+DECLARE_INSN(vsse64_v, MATCH_VSSE64_V, MASK_VSSE64_V)
+DECLARE_INSN(vsse8_v, MATCH_VSSE8_V, MASK_VSSE8_V)
+DECLARE_INSN(vssra_vi, MATCH_VSSRA_VI, MASK_VSSRA_VI)
+DECLARE_INSN(vssra_vv, MATCH_VSSRA_VV, MASK_VSSRA_VV)
+DECLARE_INSN(vssra_vx, MATCH_VSSRA_VX, MASK_VSSRA_VX)
+DECLARE_INSN(vssrl_vi, MATCH_VSSRL_VI, MASK_VSSRL_VI)
+DECLARE_INSN(vssrl_vv, MATCH_VSSRL_VV, MASK_VSSRL_VV)
+DECLARE_INSN(vssrl_vx, MATCH_VSSRL_VX, MASK_VSSRL_VX)
+DECLARE_INSN(vssub_vv, MATCH_VSSUB_VV, MASK_VSSUB_VV)
+DECLARE_INSN(vssub_vx, MATCH_VSSUB_VX, MASK_VSSUB_VX)
+DECLARE_INSN(vssubu_vv, MATCH_VSSUBU_VV, MASK_VSSUBU_VV)
+DECLARE_INSN(vssubu_vx, MATCH_VSSUBU_VX, MASK_VSSUBU_VX)
+DECLARE_INSN(vsub_vv, MATCH_VSUB_VV, MASK_VSUB_VV)
+DECLARE_INSN(vsub_vx, MATCH_VSUB_VX, MASK_VSUB_VX)
+DECLARE_INSN(vsuxei1024_v, MATCH_VSUXEI1024_V, MASK_VSUXEI1024_V)
+DECLARE_INSN(vsuxei128_v, MATCH_VSUXEI128_V, MASK_VSUXEI128_V)
+DECLARE_INSN(vsuxei16_v, MATCH_VSUXEI16_V, MASK_VSUXEI16_V)
+DECLARE_INSN(vsuxei256_v, MATCH_VSUXEI256_V, MASK_VSUXEI256_V)
+DECLARE_INSN(vsuxei32_v, MATCH_VSUXEI32_V, MASK_VSUXEI32_V)
+DECLARE_INSN(vsuxei512_v, MATCH_VSUXEI512_V, MASK_VSUXEI512_V)
+DECLARE_INSN(vsuxei64_v, MATCH_VSUXEI64_V, MASK_VSUXEI64_V)
+DECLARE_INSN(vsuxei8_v, MATCH_VSUXEI8_V, MASK_VSUXEI8_V)
+DECLARE_INSN(vwadd_vv, MATCH_VWADD_VV, MASK_VWADD_VV)
+DECLARE_INSN(vwadd_vx, MATCH_VWADD_VX, MASK_VWADD_VX)
+DECLARE_INSN(vwadd_wv, MATCH_VWADD_WV, MASK_VWADD_WV)
+DECLARE_INSN(vwadd_wx, MATCH_VWADD_WX, MASK_VWADD_WX)
+DECLARE_INSN(vwaddu_vv, MATCH_VWADDU_VV, MASK_VWADDU_VV)
+DECLARE_INSN(vwaddu_vx, MATCH_VWADDU_VX, MASK_VWADDU_VX)
+DECLARE_INSN(vwaddu_wv, MATCH_VWADDU_WV, MASK_VWADDU_WV)
+DECLARE_INSN(vwaddu_wx, MATCH_VWADDU_WX, MASK_VWADDU_WX)
+DECLARE_INSN(vwmacc_vv, MATCH_VWMACC_VV, MASK_VWMACC_VV)
+DECLARE_INSN(vwmacc_vx, MATCH_VWMACC_VX, MASK_VWMACC_VX)
+DECLARE_INSN(vwmaccsu_vv, MATCH_VWMACCSU_VV, MASK_VWMACCSU_VV)
+DECLARE_INSN(vwmaccsu_vx, MATCH_VWMACCSU_VX, MASK_VWMACCSU_VX)
+DECLARE_INSN(vwmaccu_vv, MATCH_VWMACCU_VV, MASK_VWMACCU_VV)
+DECLARE_INSN(vwmaccu_vx, MATCH_VWMACCU_VX, MASK_VWMACCU_VX)
+DECLARE_INSN(vwmaccus_vx, MATCH_VWMACCUS_VX, MASK_VWMACCUS_VX)
+DECLARE_INSN(vwmul_vv, MATCH_VWMUL_VV, MASK_VWMUL_VV)
+DECLARE_INSN(vwmul_vx, MATCH_VWMUL_VX, MASK_VWMUL_VX)
+DECLARE_INSN(vwmulsu_vv, MATCH_VWMULSU_VV, MASK_VWMULSU_VV)
+DECLARE_INSN(vwmulsu_vx, MATCH_VWMULSU_VX, MASK_VWMULSU_VX)
+DECLARE_INSN(vwmulu_vv, MATCH_VWMULU_VV, MASK_VWMULU_VV)
+DECLARE_INSN(vwmulu_vx, MATCH_VWMULU_VX, MASK_VWMULU_VX)
+DECLARE_INSN(vwredsum_vs, MATCH_VWREDSUM_VS, MASK_VWREDSUM_VS)
+DECLARE_INSN(vwredsumu_vs, MATCH_VWREDSUMU_VS, MASK_VWREDSUMU_VS)
+DECLARE_INSN(vwsub_vv, MATCH_VWSUB_VV, MASK_VWSUB_VV)
+DECLARE_INSN(vwsub_vx, MATCH_VWSUB_VX, MASK_VWSUB_VX)
+DECLARE_INSN(vwsub_wv, MATCH_VWSUB_WV, MASK_VWSUB_WV)
+DECLARE_INSN(vwsub_wx, MATCH_VWSUB_WX, MASK_VWSUB_WX)
+DECLARE_INSN(vwsubu_vv, MATCH_VWSUBU_VV, MASK_VWSUBU_VV)
+DECLARE_INSN(vwsubu_vx, MATCH_VWSUBU_VX, MASK_VWSUBU_VX)
+DECLARE_INSN(vwsubu_wv, MATCH_VWSUBU_WV, MASK_VWSUBU_WV)
+DECLARE_INSN(vwsubu_wx, MATCH_VWSUBU_WX, MASK_VWSUBU_WX)
+DECLARE_INSN(vxor_vi, MATCH_VXOR_VI, MASK_VXOR_VI)
+DECLARE_INSN(vxor_vv, MATCH_VXOR_VV, MASK_VXOR_VV)
+DECLARE_INSN(vxor_vx, MATCH_VXOR_VX, MASK_VXOR_VX)
+DECLARE_INSN(vzext_vf2, MATCH_VZEXT_VF2, MASK_VZEXT_VF2)
+DECLARE_INSN(vzext_vf4, MATCH_VZEXT_VF4, MASK_VZEXT_VF4)
+DECLARE_INSN(vzext_vf8, MATCH_VZEXT_VF8, MASK_VZEXT_VF8)
DECLARE_INSN(wext, MATCH_WEXT, MASK_WEXT)
+DECLARE_INSN(wexti, MATCH_WEXTI, MASK_WEXTI)
+DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
+DECLARE_INSN(wrs_nto, MATCH_WRS_NTO, MASK_WRS_NTO)
+DECLARE_INSN(wrs_sto, MATCH_WRS_STO, MASK_WRS_STO)
+DECLARE_INSN(xnor, MATCH_XNOR, MASK_XNOR)
+DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
+DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
+DECLARE_INSN(xperm16, MATCH_XPERM16, MASK_XPERM16)
+DECLARE_INSN(xperm32, MATCH_XPERM32, MASK_XPERM32)
+DECLARE_INSN(xperm4, MATCH_XPERM4, MASK_XPERM4)
+DECLARE_INSN(xperm8, MATCH_XPERM8, MASK_XPERM8)
DECLARE_INSN(zunpkd810, MATCH_ZUNPKD810, MASK_ZUNPKD810)
DECLARE_INSN(zunpkd820, MATCH_ZUNPKD820, MASK_ZUNPKD820)
DECLARE_INSN(zunpkd830, MATCH_ZUNPKD830, MASK_ZUNPKD830)
DECLARE_INSN(zunpkd831, MATCH_ZUNPKD831, MASK_ZUNPKD831)
DECLARE_INSN(zunpkd832, MATCH_ZUNPKD832, MASK_ZUNPKD832)
-DECLARE_INSN(add32, MATCH_ADD32, MASK_ADD32)
-DECLARE_INSN(cras32, MATCH_CRAS32, MASK_CRAS32)
-DECLARE_INSN(crsa32, MATCH_CRSA32, MASK_CRSA32)
-DECLARE_INSN(kabs32, MATCH_KABS32, MASK_KABS32)
-DECLARE_INSN(kadd32, MATCH_KADD32, MASK_KADD32)
-DECLARE_INSN(kcras32, MATCH_KCRAS32, MASK_KCRAS32)
-DECLARE_INSN(kcrsa32, MATCH_KCRSA32, MASK_KCRSA32)
-DECLARE_INSN(kdmbb16, MATCH_KDMBB16, MASK_KDMBB16)
-DECLARE_INSN(kdmbt16, MATCH_KDMBT16, MASK_KDMBT16)
-DECLARE_INSN(kdmtt16, MATCH_KDMTT16, MASK_KDMTT16)
-DECLARE_INSN(kdmabb16, MATCH_KDMABB16, MASK_KDMABB16)
-DECLARE_INSN(kdmabt16, MATCH_KDMABT16, MASK_KDMABT16)
-DECLARE_INSN(kdmatt16, MATCH_KDMATT16, MASK_KDMATT16)
-DECLARE_INSN(khmbb16, MATCH_KHMBB16, MASK_KHMBB16)
-DECLARE_INSN(khmbt16, MATCH_KHMBT16, MASK_KHMBT16)
-DECLARE_INSN(khmtt16, MATCH_KHMTT16, MASK_KHMTT16)
-DECLARE_INSN(kmabb32, MATCH_KMABB32, MASK_KMABB32)
-DECLARE_INSN(kmabt32, MATCH_KMABT32, MASK_KMABT32)
-DECLARE_INSN(kmatt32, MATCH_KMATT32, MASK_KMATT32)
-DECLARE_INSN(kmaxda32, MATCH_KMAXDA32, MASK_KMAXDA32)
-DECLARE_INSN(kmda32, MATCH_KMDA32, MASK_KMDA32)
-DECLARE_INSN(kmxda32, MATCH_KMXDA32, MASK_KMXDA32)
-DECLARE_INSN(kmads32, MATCH_KMADS32, MASK_KMADS32)
-DECLARE_INSN(kmadrs32, MATCH_KMADRS32, MASK_KMADRS32)
-DECLARE_INSN(kmaxds32, MATCH_KMAXDS32, MASK_KMAXDS32)
-DECLARE_INSN(kmsda32, MATCH_KMSDA32, MASK_KMSDA32)
-DECLARE_INSN(kmsxda32, MATCH_KMSXDA32, MASK_KMSXDA32)
-DECLARE_INSN(ksll32, MATCH_KSLL32, MASK_KSLL32)
-DECLARE_INSN(kslli32, MATCH_KSLLI32, MASK_KSLLI32)
-DECLARE_INSN(kslra32, MATCH_KSLRA32, MASK_KSLRA32)
-DECLARE_INSN(kslra32_u, MATCH_KSLRA32_U, MASK_KSLRA32_U)
-DECLARE_INSN(kstas32, MATCH_KSTAS32, MASK_KSTAS32)
-DECLARE_INSN(kstsa32, MATCH_KSTSA32, MASK_KSTSA32)
-DECLARE_INSN(ksub32, MATCH_KSUB32, MASK_KSUB32)
-DECLARE_INSN(pkbb32, MATCH_PKBB32, MASK_PKBB32)
-DECLARE_INSN(pkbt32, MATCH_PKBT32, MASK_PKBT32)
-DECLARE_INSN(pktt32, MATCH_PKTT32, MASK_PKTT32)
-DECLARE_INSN(pktb32, MATCH_PKTB32, MASK_PKTB32)
-DECLARE_INSN(radd32, MATCH_RADD32, MASK_RADD32)
-DECLARE_INSN(rcras32, MATCH_RCRAS32, MASK_RCRAS32)
-DECLARE_INSN(rcrsa32, MATCH_RCRSA32, MASK_RCRSA32)
-DECLARE_INSN(rstas32, MATCH_RSTAS32, MASK_RSTAS32)
-DECLARE_INSN(rstsa32, MATCH_RSTSA32, MASK_RSTSA32)
-DECLARE_INSN(rsub32, MATCH_RSUB32, MASK_RSUB32)
-DECLARE_INSN(sll32, MATCH_SLL32, MASK_SLL32)
-DECLARE_INSN(slli32, MATCH_SLLI32, MASK_SLLI32)
-DECLARE_INSN(smax32, MATCH_SMAX32, MASK_SMAX32)
-DECLARE_INSN(smbt32, MATCH_SMBT32, MASK_SMBT32)
-DECLARE_INSN(smtt32, MATCH_SMTT32, MASK_SMTT32)
-DECLARE_INSN(smds32, MATCH_SMDS32, MASK_SMDS32)
-DECLARE_INSN(smdrs32, MATCH_SMDRS32, MASK_SMDRS32)
-DECLARE_INSN(smxds32, MATCH_SMXDS32, MASK_SMXDS32)
-DECLARE_INSN(smin32, MATCH_SMIN32, MASK_SMIN32)
-DECLARE_INSN(sra32, MATCH_SRA32, MASK_SRA32)
-DECLARE_INSN(sra32_u, MATCH_SRA32_U, MASK_SRA32_U)
-DECLARE_INSN(srai32, MATCH_SRAI32, MASK_SRAI32)
-DECLARE_INSN(srai32_u, MATCH_SRAI32_U, MASK_SRAI32_U)
-DECLARE_INSN(sraiw_u, MATCH_SRAIW_U, MASK_SRAIW_U)
-DECLARE_INSN(srl32, MATCH_SRL32, MASK_SRL32)
-DECLARE_INSN(srl32_u, MATCH_SRL32_U, MASK_SRL32_U)
-DECLARE_INSN(srli32, MATCH_SRLI32, MASK_SRLI32)
-DECLARE_INSN(srli32_u, MATCH_SRLI32_U, MASK_SRLI32_U)
-DECLARE_INSN(stas32, MATCH_STAS32, MASK_STAS32)
-DECLARE_INSN(stsa32, MATCH_STSA32, MASK_STSA32)
-DECLARE_INSN(sub32, MATCH_SUB32, MASK_SUB32)
-DECLARE_INSN(ukadd32, MATCH_UKADD32, MASK_UKADD32)
-DECLARE_INSN(ukcras32, MATCH_UKCRAS32, MASK_UKCRAS32)
-DECLARE_INSN(ukcrsa32, MATCH_UKCRSA32, MASK_UKCRSA32)
-DECLARE_INSN(ukstas32, MATCH_UKSTAS32, MASK_UKSTAS32)
-DECLARE_INSN(ukstsa32, MATCH_UKSTSA32, MASK_UKSTSA32)
-DECLARE_INSN(uksub32, MATCH_UKSUB32, MASK_UKSUB32)
-DECLARE_INSN(umax32, MATCH_UMAX32, MASK_UMAX32)
-DECLARE_INSN(umin32, MATCH_UMIN32, MASK_UMIN32)
-DECLARE_INSN(uradd32, MATCH_URADD32, MASK_URADD32)
-DECLARE_INSN(urcras32, MATCH_URCRAS32, MASK_URCRAS32)
-DECLARE_INSN(urcrsa32, MATCH_URCRSA32, MASK_URCRSA32)
-DECLARE_INSN(urstas32, MATCH_URSTAS32, MASK_URSTAS32)
-DECLARE_INSN(urstsa32, MATCH_URSTSA32, MASK_URSTSA32)
-DECLARE_INSN(ursub32, MATCH_URSUB32, MASK_URSUB32)
-DECLARE_INSN(vmvnfr_v, MATCH_VMVNFR_V, MASK_VMVNFR_V)
-DECLARE_INSN(vl1r_v, MATCH_VL1R_V, MASK_VL1R_V)
-DECLARE_INSN(vl2r_v, MATCH_VL2R_V, MASK_VL2R_V)
-DECLARE_INSN(vl4r_v, MATCH_VL4R_V, MASK_VL4R_V)
-DECLARE_INSN(vl8r_v, MATCH_VL8R_V, MASK_VL8R_V)
-DECLARE_INSN(vle1_v, MATCH_VLE1_V, MASK_VLE1_V)
-DECLARE_INSN(vse1_v, MATCH_VSE1_V, MASK_VSE1_V)
-DECLARE_INSN(vfredsum_vs, MATCH_VFREDSUM_VS, MASK_VFREDSUM_VS)
-DECLARE_INSN(vfwredsum_vs, MATCH_VFWREDSUM_VS, MASK_VFWREDSUM_VS)
-DECLARE_INSN(vpopc_m, MATCH_VPOPC_M, MASK_VPOPC_M)
-DECLARE_INSN(vmornot_mm, MATCH_VMORNOT_MM, MASK_VMORNOT_MM)
-DECLARE_INSN(vmandnot_mm, MATCH_VMANDNOT_MM, MASK_VMANDNOT_MM)
#endif
#ifdef DECLARE_CSR
DECLARE_CSR(fflags, CSR_FFLAGS)
@@ -4498,11 +4507,16 @@ DECLARE_CSR(sie, CSR_SIE)
DECLARE_CSR(stvec, CSR_STVEC)
DECLARE_CSR(scounteren, CSR_SCOUNTEREN)
DECLARE_CSR(senvcfg, CSR_SENVCFG)
+DECLARE_CSR(sstateen0, CSR_SSTATEEN0)
+DECLARE_CSR(sstateen1, CSR_SSTATEEN1)
+DECLARE_CSR(sstateen2, CSR_SSTATEEN2)
+DECLARE_CSR(sstateen3, CSR_SSTATEEN3)
DECLARE_CSR(sscratch, CSR_SSCRATCH)
DECLARE_CSR(sepc, CSR_SEPC)
DECLARE_CSR(scause, CSR_SCAUSE)
DECLARE_CSR(stval, CSR_STVAL)
DECLARE_CSR(sip, CSR_SIP)
+DECLARE_CSR(stimecmp, CSR_STIMECMP)
DECLARE_CSR(satp, CSR_SATP)
DECLARE_CSR(scontext, CSR_SCONTEXT)
DECLARE_CSR(vsstatus, CSR_VSSTATUS)
@@ -4513,6 +4527,7 @@ DECLARE_CSR(vsepc, CSR_VSEPC)
DECLARE_CSR(vscause, CSR_VSCAUSE)
DECLARE_CSR(vstval, CSR_VSTVAL)
DECLARE_CSR(vsip, CSR_VSIP)
+DECLARE_CSR(vstimecmp, CSR_VSTIMECMP)
DECLARE_CSR(vsatp, CSR_VSATP)
DECLARE_CSR(hstatus, CSR_HSTATUS)
DECLARE_CSR(hedeleg, CSR_HEDELEG)
@@ -4522,6 +4537,10 @@ DECLARE_CSR(htimedelta, CSR_HTIMEDELTA)
DECLARE_CSR(hcounteren, CSR_HCOUNTEREN)
DECLARE_CSR(hgeie, CSR_HGEIE)
DECLARE_CSR(henvcfg, CSR_HENVCFG)
+DECLARE_CSR(hstateen0, CSR_HSTATEEN0)
+DECLARE_CSR(hstateen1, CSR_HSTATEEN1)
+DECLARE_CSR(hstateen2, CSR_HSTATEEN2)
+DECLARE_CSR(hstateen3, CSR_HSTATEEN3)
DECLARE_CSR(htval, CSR_HTVAL)
DECLARE_CSR(hip, CSR_HIP)
DECLARE_CSR(hvip, CSR_HVIP)
@@ -4529,6 +4548,7 @@ DECLARE_CSR(htinst, CSR_HTINST)
DECLARE_CSR(hgatp, CSR_HGATP)
DECLARE_CSR(hcontext, CSR_HCONTEXT)
DECLARE_CSR(hgeip, CSR_HGEIP)
+DECLARE_CSR(scountovf, CSR_SCOUNTOVF)
DECLARE_CSR(utvt, CSR_UTVT)
DECLARE_CSR(unxti, CSR_UNXTI)
DECLARE_CSR(uintstatus, CSR_UINTSTATUS)
@@ -4552,6 +4572,10 @@ DECLARE_CSR(mie, CSR_MIE)
DECLARE_CSR(mtvec, CSR_MTVEC)
DECLARE_CSR(mcounteren, CSR_MCOUNTEREN)
DECLARE_CSR(menvcfg, CSR_MENVCFG)
+DECLARE_CSR(mstateen0, CSR_MSTATEEN0)
+DECLARE_CSR(mstateen1, CSR_MSTATEEN1)
+DECLARE_CSR(mstateen2, CSR_MSTATEEN2)
+DECLARE_CSR(mstateen3, CSR_MSTATEEN3)
DECLARE_CSR(mcountinhibit, CSR_MCOUNTINHIBIT)
DECLARE_CSR(mscratch, CSR_MSCRATCH)
DECLARE_CSR(mepc, CSR_MEPC)
@@ -4718,8 +4742,14 @@ DECLARE_CSR(marchid, CSR_MARCHID)
DECLARE_CSR(mimpid, CSR_MIMPID)
DECLARE_CSR(mhartid, CSR_MHARTID)
DECLARE_CSR(mconfigptr, CSR_MCONFIGPTR)
+DECLARE_CSR(stimecmph, CSR_STIMECMPH)
+DECLARE_CSR(vstimecmph, CSR_VSTIMECMPH)
DECLARE_CSR(htimedeltah, CSR_HTIMEDELTAH)
DECLARE_CSR(henvcfgh, CSR_HENVCFGH)
+DECLARE_CSR(hstateen0h, CSR_HSTATEEN0H)
+DECLARE_CSR(hstateen1h, CSR_HSTATEEN1H)
+DECLARE_CSR(hstateen2h, CSR_HSTATEEN2H)
+DECLARE_CSR(hstateen3h, CSR_HSTATEEN3H)
DECLARE_CSR(cycleh, CSR_CYCLEH)
DECLARE_CSR(timeh, CSR_TIMEH)
DECLARE_CSR(instreth, CSR_INSTRETH)
@@ -4754,6 +4784,39 @@ DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H)
DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H)
DECLARE_CSR(mstatush, CSR_MSTATUSH)
DECLARE_CSR(menvcfgh, CSR_MENVCFGH)
+DECLARE_CSR(mstateen0h, CSR_MSTATEEN0H)
+DECLARE_CSR(mstateen1h, CSR_MSTATEEN1H)
+DECLARE_CSR(mstateen2h, CSR_MSTATEEN2H)
+DECLARE_CSR(mstateen3h, CSR_MSTATEEN3H)
+DECLARE_CSR(mhpmevent3h, CSR_MHPMEVENT3H)
+DECLARE_CSR(mhpmevent4h, CSR_MHPMEVENT4H)
+DECLARE_CSR(mhpmevent5h, CSR_MHPMEVENT5H)
+DECLARE_CSR(mhpmevent6h, CSR_MHPMEVENT6H)
+DECLARE_CSR(mhpmevent7h, CSR_MHPMEVENT7H)
+DECLARE_CSR(mhpmevent8h, CSR_MHPMEVENT8H)
+DECLARE_CSR(mhpmevent9h, CSR_MHPMEVENT9H)
+DECLARE_CSR(mhpmevent10h, CSR_MHPMEVENT10H)
+DECLARE_CSR(mhpmevent11h, CSR_MHPMEVENT11H)
+DECLARE_CSR(mhpmevent12h, CSR_MHPMEVENT12H)
+DECLARE_CSR(mhpmevent13h, CSR_MHPMEVENT13H)
+DECLARE_CSR(mhpmevent14h, CSR_MHPMEVENT14H)
+DECLARE_CSR(mhpmevent15h, CSR_MHPMEVENT15H)
+DECLARE_CSR(mhpmevent16h, CSR_MHPMEVENT16H)
+DECLARE_CSR(mhpmevent17h, CSR_MHPMEVENT17H)
+DECLARE_CSR(mhpmevent18h, CSR_MHPMEVENT18H)
+DECLARE_CSR(mhpmevent19h, CSR_MHPMEVENT19H)
+DECLARE_CSR(mhpmevent20h, CSR_MHPMEVENT20H)
+DECLARE_CSR(mhpmevent21h, CSR_MHPMEVENT21H)
+DECLARE_CSR(mhpmevent22h, CSR_MHPMEVENT22H)
+DECLARE_CSR(mhpmevent23h, CSR_MHPMEVENT23H)
+DECLARE_CSR(mhpmevent24h, CSR_MHPMEVENT24H)
+DECLARE_CSR(mhpmevent25h, CSR_MHPMEVENT25H)
+DECLARE_CSR(mhpmevent26h, CSR_MHPMEVENT26H)
+DECLARE_CSR(mhpmevent27h, CSR_MHPMEVENT27H)
+DECLARE_CSR(mhpmevent28h, CSR_MHPMEVENT28H)
+DECLARE_CSR(mhpmevent29h, CSR_MHPMEVENT29H)
+DECLARE_CSR(mhpmevent30h, CSR_MHPMEVENT30H)
+DECLARE_CSR(mhpmevent31h, CSR_MHPMEVENT31H)
DECLARE_CSR(mseccfgh, CSR_MSECCFGH)
DECLARE_CSR(mcycleh, CSR_MCYCLEH)
DECLARE_CSR(minstreth, CSR_MINSTRETH)
diff --git a/riscv/entropy_source.h b/riscv/entropy_source.h
index 47823ff..3a3c8e6 100644
--- a/riscv/entropy_source.h
+++ b/riscv/entropy_source.h
@@ -3,6 +3,7 @@
#include <iostream>
#include "internals.h"
+#include "common.h"
//
// Used to model the cryptography extension entropy source.
@@ -30,13 +31,12 @@ public:
// seed register
// ------------------------------------------------------------
- void set_seed(reg_t val) {
+ void set_seed(reg_t UNUSED val) {
// Always ignore writes to seed.
// This CSR is strictly read only. It occupies a RW CSR address
// to handle the side-effect of the changing seed value on a read.
}
-
//
// The format of seed is described in Section 4.1 of
// the scalar cryptography specification.
@@ -50,27 +50,27 @@ public:
// the bare minimum.
uint32_t return_status = OPST_ES16;
- if(return_status == OPST_ES16) {
+ if (return_status == OPST_ES16) {
- // Add some sampled entropy into the low 16 bits
- uint16_t entropy = this -> get_two_random_bytes();
- result |= entropy;
+ // Add some sampled entropy into the low 16 bits
+ uint16_t entropy = this -> get_two_random_bytes();
+ result |= entropy;
- } else if(return_status == OPST_BIST) {
+ } else if (return_status == OPST_BIST) {
- // Do nothing.
+ // Do nothing.
- } else if(return_status == OPST_WAIT) {
+ } else if (return_status == OPST_WAIT) {
- // Do nothing.
+ // Do nothing.
- } else if(return_status == OPST_DEAD) {
+ } else if (return_status == OPST_DEAD) {
- // Do nothing. Stay dead.
+ // Do nothing. Stay dead.
} else {
- // Unreachable.
+ // Unreachable.
}
@@ -93,25 +93,25 @@ public:
// Read two random bytes from the entropy source file.
uint16_t get_two_random_bytes() {
- std::ifstream fh(this -> randomness_source, std::ios::binary);
+ std::ifstream fh(this -> randomness_source, std::ios::binary);
- if(fh.is_open()) {
+ if (fh.is_open()) {
- uint16_t random_bytes;
+ uint16_t random_bytes;
- fh.read((char*)(&random_bytes), 2);
+ fh.read((char*)(&random_bytes), 2);
- fh.close();
+ fh.close();
- return random_bytes;
+ return random_bytes;
- } else {
+ } else {
- fprintf(stderr, "Could not open randomness source file:\n\t");
- fprintf(stderr, "%s", randomness_source.c_str());
- abort();
+ fprintf(stderr, "Could not open randomness source file:\n\t");
+ fprintf(stderr, "%s", randomness_source.c_str());
+ abort();
- }
+ }
}
diff --git a/riscv/execute.cc b/riscv/execute.cc
index 98e3cdb..f0bb946 100644
--- a/riscv/execute.cc
+++ b/riscv/execute.cc
@@ -133,7 +133,7 @@ static void commit_log_print_insn(processor_t *p, reg_t pc, insn_t insn)
if (prefix == 'c')
fprintf(log_file, " c%d_%s ", rd, csr_name(rd));
else
- fprintf(log_file, " %c%2d ", prefix, rd);
+ fprintf(log_file, " %c%-2d ", prefix, rd);
if (is_vreg)
commit_log_print_value(log_file, size, &p->VU.elt<uint8_t>(rd, 0));
else
@@ -154,13 +154,9 @@ static void commit_log_print_insn(processor_t *p, reg_t pc, insn_t insn)
}
fprintf(log_file, "\n");
}
-#else
-static void commit_log_reset(processor_t* p) {}
-static void commit_log_stash_privilege(processor_t* p) {}
-static void commit_log_print_insn(processor_t* p, reg_t pc, insn_t insn) {}
#endif
-inline void processor_t::update_histogram(reg_t pc)
+inline void processor_t::update_histogram(reg_t UNUSED pc)
{
#ifdef RISCV_ENABLE_HISTOGRAM
pc_histogram[pc]++;
@@ -172,8 +168,10 @@ inline void processor_t::update_histogram(reg_t pc)
// function calls.
static inline reg_t execute_insn(processor_t* p, reg_t pc, insn_fetch_t fetch)
{
+#ifdef RISCV_ENABLE_COMMITLOG
commit_log_reset(p);
commit_log_stash_privilege(p);
+#endif
reg_t npc;
try {
@@ -238,19 +236,18 @@ void processor_t::step(size_t n)
mmu_t* _mmu = mmu;
#define advance_pc() \
- if (unlikely(invalid_pc(pc))) { \
- switch (pc) { \
- case PC_SERIALIZE_BEFORE: state.serialized = true; break; \
- case PC_SERIALIZE_AFTER: ++instret; break; \
- case PC_SERIALIZE_WFI: n = ++instret; break; \
- default: abort(); \
- } \
- pc = state.pc; \
- break; \
- } else { \
- state.pc = pc; \
- instret++; \
- }
+ if (unlikely(invalid_pc(pc))) { \
+ switch (pc) { \
+ case PC_SERIALIZE_BEFORE: state.serialized = true; break; \
+ case PC_SERIALIZE_AFTER: ++instret; break; \
+ default: abort(); \
+ } \
+ pc = state.pc; \
+ break; \
+ } else { \
+ state.pc = pc; \
+ instret++; \
+ }
try
{
@@ -312,15 +309,6 @@ void processor_t::step(size_t n)
catch (triggers::matched_t& t)
{
if (mmu->matched_trigger) {
- // This exception came from the MMU. That means the instruction hasn't
- // fully executed yet. We start it again, but this time it won't throw
- // an exception because matched_trigger is already set. (All memory
- // instructions are idempotent so restarting is safe.)
-
- insn_fetch_t fetch = mmu->load_insn(pc);
- pc = execute_insn(this, pc, fetch);
- advance_pc();
-
delete mmu->matched_trigger;
mmu->matched_trigger = NULL;
}
@@ -337,6 +325,10 @@ void processor_t::step(size_t n)
abort();
}
}
+ catch(trap_debug_mode&)
+ {
+ enter_debug_mode(DCSR_CAUSE_SWBP);
+ }
catch (wait_for_interrupt_t &t)
{
// Return to the outer simulation loop, which gives other devices/harts a
diff --git a/riscv/extension.h b/riscv/extension.h
index d1e847d..de6ece3 100644
--- a/riscv/extension.h
+++ b/riscv/extension.h
@@ -15,7 +15,7 @@ class extension_t
virtual std::vector<disasm_insn_t*> get_disasms() = 0;
virtual const char* name() = 0;
virtual void reset() {};
- virtual void set_debug(bool value) {};
+ virtual void set_debug(bool UNUSED value) {}
virtual ~extension_t();
void set_processor(processor_t* _p) { p = _p; }
diff --git a/riscv/insn_macros.h b/riscv/insn_macros.h
index 2fdfced..8ec8f76 100644
--- a/riscv/insn_macros.h
+++ b/riscv/insn_macros.h
@@ -4,6 +4,6 @@
// These conflict with Boost headers so can't be included from insn_template.h
#define P (*p)
-#define require(x) do { if (unlikely(!(x))) throw trap_illegal_instruction(insn.bits()); } while (0)
+#define require(x) (unlikely(!(x)) ? throw trap_illegal_instruction(insn.bits()) : (void) 0)
#endif
diff --git a/riscv/insns/aes64ks1i.h b/riscv/insns/aes64ks1i.h
index fff7109..c7354d6 100644
--- a/riscv/insns/aes64ks1i.h
+++ b/riscv/insns/aes64ks1i.h
@@ -10,16 +10,13 @@ uint8_t round_consts [10] = {
uint8_t enc_rcon = insn.rcon() ;
-if(enc_rcon > 0xA) {
- // Invalid opcode.
- throw trap_illegal_instruction(0);
-}
+require(enc_rcon <= 0xA);
uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF ;
uint8_t rcon = 0 ;
uint64_t result ;
-if(enc_rcon != 0xA) {
+if (enc_rcon != 0xA) {
temp = (temp >> 8) | (temp << 24); // Rotate right by 8
rcon = round_consts[enc_rcon];
}
diff --git a/riscv/insns/aes_common.h b/riscv/insns/aes_common.h
index 9cc353c..4f3f618 100644
--- a/riscv/insns/aes_common.h
+++ b/riscv/insns/aes_common.h
@@ -1,5 +1,5 @@
-uint8_t AES_ENC_SBOX[]= {
+static uint8_t UNUSED AES_ENC_SBOX[]= {
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5,
0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0,
@@ -34,7 +34,7 @@ uint8_t AES_ENC_SBOX[]= {
0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
};
-uint8_t AES_DEC_SBOX[] = {
+static uint8_t UNUSED AES_DEC_SBOX[] = {
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38,
0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87,
diff --git a/riscv/insns/amoswap_d.h b/riscv/insns/amoswap_d.h
index e1bffde..f9188ea 100644
--- a/riscv/insns/amoswap_d.h
+++ b/riscv/insns/amoswap_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return RS2; }));
+WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t UNUSED lhs) { return RS2; }));
diff --git a/riscv/insns/amoswap_w.h b/riscv/insns/amoswap_w.h
index 0f78369..151f095 100644
--- a/riscv/insns/amoswap_w.h
+++ b/riscv/insns/amoswap_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return RS2; })));
+WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t UNUSED lhs) { return RS2; })));
diff --git a/riscv/insns/beq.h b/riscv/insns/beq.h
index fd7e061..3d2c975 100644
--- a/riscv/insns/beq.h
+++ b/riscv/insns/beq.h
@@ -1,2 +1,2 @@
-if(RS1 == RS2)
+if (RS1 == RS2)
set_pc(BRANCH_TARGET);
diff --git a/riscv/insns/bge.h b/riscv/insns/bge.h
index da0c68e..b2421c2 100644
--- a/riscv/insns/bge.h
+++ b/riscv/insns/bge.h
@@ -1,2 +1,2 @@
-if(sreg_t(RS1) >= sreg_t(RS2))
+if (sreg_t(RS1) >= sreg_t(RS2))
set_pc(BRANCH_TARGET);
diff --git a/riscv/insns/bgeu.h b/riscv/insns/bgeu.h
index d764a34..f09b7f4 100644
--- a/riscv/insns/bgeu.h
+++ b/riscv/insns/bgeu.h
@@ -1,2 +1,2 @@
-if(RS1 >= RS2)
+if (RS1 >= RS2)
set_pc(BRANCH_TARGET);
diff --git a/riscv/insns/blt.h b/riscv/insns/blt.h
index c54fb76..cad064b 100644
--- a/riscv/insns/blt.h
+++ b/riscv/insns/blt.h
@@ -1,2 +1,2 @@
-if(sreg_t(RS1) < sreg_t(RS2))
+if (sreg_t(RS1) < sreg_t(RS2))
set_pc(BRANCH_TARGET);
diff --git a/riscv/insns/bltu.h b/riscv/insns/bltu.h
index ff75e8a..b7c3300 100644
--- a/riscv/insns/bltu.h
+++ b/riscv/insns/bltu.h
@@ -1,2 +1,2 @@
-if(RS1 < RS2)
+if (RS1 < RS2)
set_pc(BRANCH_TARGET);
diff --git a/riscv/insns/bne.h b/riscv/insns/bne.h
index 1e6cb7c..e832fa1 100644
--- a/riscv/insns/bne.h
+++ b/riscv/insns/bne.h
@@ -1,2 +1,2 @@
-if(RS1 != RS2)
+if (RS1 != RS2)
set_pc(BRANCH_TARGET);
diff --git a/riscv/insns/c_ebreak.h b/riscv/insns/c_ebreak.h
index 7d04f46..c8cc1f5 100644
--- a/riscv/insns/c_ebreak.h
+++ b/riscv/insns/c_ebreak.h
@@ -1,2 +1,9 @@
require_extension('C');
-throw trap_breakpoint(STATE.v, pc);
+if (!STATE.debug_mode &&
+ ((STATE.prv == PRV_M && STATE.dcsr->ebreakm) ||
+ (STATE.prv == PRV_S && STATE.dcsr->ebreaks) ||
+ (STATE.prv == PRV_U && STATE.dcsr->ebreaku))) {
+ throw trap_debug_mode();
+} else {
+ throw trap_breakpoint(STATE.v, pc);
+}
diff --git a/riscv/insns/div.h b/riscv/insns/div.h
index 9cbe8d6..fb62437 100644
--- a/riscv/insns/div.h
+++ b/riscv/insns/div.h
@@ -1,9 +1,9 @@
require_extension('M');
sreg_t lhs = sext_xlen(RS1);
sreg_t rhs = sext_xlen(RS2);
-if(rhs == 0)
+if (rhs == 0)
WRITE_RD(UINT64_MAX);
-else if(lhs == INT64_MIN && rhs == -1)
+else if (lhs == INT64_MIN && rhs == -1)
WRITE_RD(lhs);
else
WRITE_RD(sext_xlen(lhs / rhs));
diff --git a/riscv/insns/divu.h b/riscv/insns/divu.h
index 31d7585..ed05818 100644
--- a/riscv/insns/divu.h
+++ b/riscv/insns/divu.h
@@ -1,7 +1,7 @@
require_extension('M');
reg_t lhs = zext_xlen(RS1);
reg_t rhs = zext_xlen(RS2);
-if(rhs == 0)
+if (rhs == 0)
WRITE_RD(UINT64_MAX);
else
WRITE_RD(sext_xlen(lhs / rhs));
diff --git a/riscv/insns/divuw.h b/riscv/insns/divuw.h
index e127619..bc7e9d2 100644
--- a/riscv/insns/divuw.h
+++ b/riscv/insns/divuw.h
@@ -2,7 +2,7 @@ require_extension('M');
require_rv64;
reg_t lhs = zext32(RS1);
reg_t rhs = zext32(RS2);
-if(rhs == 0)
+if (rhs == 0)
WRITE_RD(UINT64_MAX);
else
WRITE_RD(sext32(lhs / rhs));
diff --git a/riscv/insns/divw.h b/riscv/insns/divw.h
index 11be17e..54409b0 100644
--- a/riscv/insns/divw.h
+++ b/riscv/insns/divw.h
@@ -2,7 +2,7 @@ require_extension('M');
require_rv64;
sreg_t lhs = sext32(RS1);
sreg_t rhs = sext32(RS2);
-if(rhs == 0)
+if (rhs == 0)
WRITE_RD(UINT64_MAX);
else
WRITE_RD(sext32(lhs / rhs));
diff --git a/riscv/insns/ebreak.h b/riscv/insns/ebreak.h
index 9f3d44d..227ab93 100644
--- a/riscv/insns/ebreak.h
+++ b/riscv/insns/ebreak.h
@@ -1 +1,8 @@
-throw trap_breakpoint(STATE.v, pc);
+if (!STATE.debug_mode &&
+ ((STATE.prv == PRV_M && STATE.dcsr->ebreakm) ||
+ (STATE.prv == PRV_S && STATE.dcsr->ebreaks) ||
+ (STATE.prv == PRV_U && STATE.dcsr->ebreaku))) {
+ throw trap_debug_mode();
+} else {
+ throw trap_breakpoint(STATE.v, pc);
+}
diff --git a/riscv/insns/fadd_d.h b/riscv/insns/fadd_d.h
index 4a436e2..9bfff5f 100644
--- a/riscv/insns/fadd_d.h
+++ b/riscv/insns/fadd_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_add(f64(FRS1), f64(FRS2)));
+WRITE_FRD_D(f64_add(FRS1_D, FRS2_D));
set_fp_exceptions;
diff --git a/riscv/insns/fadd_h.h b/riscv/insns/fadd_h.h
index 2b646ae..f57e5fa 100644
--- a/riscv/insns/fadd_h.h
+++ b/riscv/insns/fadd_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_add(f16(FRS1), f16(FRS2)));
+WRITE_FRD_H(f16_add(FRS1_H, FRS2_H));
set_fp_exceptions;
diff --git a/riscv/insns/fadd_s.h b/riscv/insns/fadd_s.h
index cc18d58..7a40b1b 100644
--- a/riscv/insns/fadd_s.h
+++ b/riscv/insns/fadd_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_add(f32(FRS1), f32(FRS2)));
+WRITE_FRD_F(f32_add(FRS1_F, FRS2_F));
set_fp_exceptions;
diff --git a/riscv/insns/fclass_d.h b/riscv/insns/fclass_d.h
index 9456123..a355062 100644
--- a/riscv/insns/fclass_d.h
+++ b/riscv/insns/fclass_d.h
@@ -1,3 +1,3 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
-WRITE_RD(f64_classify(f64(FRS1)));
+WRITE_RD(f64_classify(FRS1_D));
diff --git a/riscv/insns/fclass_h.h b/riscv/insns/fclass_h.h
index 066a2d2..2638ac8 100644
--- a/riscv/insns/fclass_h.h
+++ b/riscv/insns/fclass_h.h
@@ -1,3 +1,3 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
-WRITE_RD(f16_classify(f16(FRS1)));
+WRITE_RD(f16_classify(FRS1_H));
diff --git a/riscv/insns/fclass_s.h b/riscv/insns/fclass_s.h
index a392db8..3d529ad 100644
--- a/riscv/insns/fclass_s.h
+++ b/riscv/insns/fclass_s.h
@@ -1,3 +1,3 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
-WRITE_RD(f32_classify(f32(FRS1)));
+WRITE_RD(f32_classify(FRS1_F));
diff --git a/riscv/insns/fcvt_d_h.h b/riscv/insns/fcvt_d_h.h
index 04e9ff4..061a271 100644
--- a/riscv/insns/fcvt_d_h.h
+++ b/riscv/insns/fcvt_d_h.h
@@ -1,6 +1,6 @@
-require_extension(EXT_ZFHMIN);
-require_extension('D');
+require_either_extension(EXT_ZFHMIN, EXT_ZHINXMIN);
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_to_f64(f16(FRS1)));
+WRITE_FRD_D(f16_to_f64(FRS1_H));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_d_l.h b/riscv/insns/fcvt_d_l.h
index 08716cf..7788f1f 100644
--- a/riscv/insns/fcvt_d_l.h
+++ b/riscv/insns/fcvt_d_l.h
@@ -1,6 +1,6 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(i64_to_f64(RS1));
+WRITE_FRD_D(i64_to_f64(RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_d_lu.h b/riscv/insns/fcvt_d_lu.h
index 306d7fe..edb694f 100644
--- a/riscv/insns/fcvt_d_lu.h
+++ b/riscv/insns/fcvt_d_lu.h
@@ -1,6 +1,6 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(ui64_to_f64(RS1));
+WRITE_FRD_D(ui64_to_f64(RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_d_s.h b/riscv/insns/fcvt_d_s.h
index 5f805b0..8039e94 100644
--- a/riscv/insns/fcvt_d_s.h
+++ b/riscv/insns/fcvt_d_s.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_to_f64(f32(FRS1)));
+WRITE_FRD_D(f32_to_f64(FRS1_F));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_d_w.h b/riscv/insns/fcvt_d_w.h
index 4c4861c..e3375fa 100644
--- a/riscv/insns/fcvt_d_w.h
+++ b/riscv/insns/fcvt_d_w.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(i32_to_f64((int32_t)RS1));
+WRITE_FRD_D(i32_to_f64((int32_t)RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_d_wu.h b/riscv/insns/fcvt_d_wu.h
index 1dbf218..d903561 100644
--- a/riscv/insns/fcvt_d_wu.h
+++ b/riscv/insns/fcvt_d_wu.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(ui32_to_f64((uint32_t)RS1));
+WRITE_FRD_D(ui32_to_f64((uint32_t)RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_h_d.h b/riscv/insns/fcvt_h_d.h
index e9987b7..e06b1a5 100644
--- a/riscv/insns/fcvt_h_d.h
+++ b/riscv/insns/fcvt_h_d.h
@@ -1,6 +1,6 @@
-require_extension(EXT_ZFHMIN);
-require_extension('D');
+require_either_extension(EXT_ZFHMIN, EXT_ZHINXMIN);
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_to_f16(f64(FRS1)));
+WRITE_FRD_H(f64_to_f16(FRS1_D));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_h_l.h b/riscv/insns/fcvt_h_l.h
index 39178c2..31e8a1e 100644
--- a/riscv/insns/fcvt_h_l.h
+++ b/riscv/insns/fcvt_h_l.h
@@ -1,6 +1,6 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(i64_to_f16(RS1));
+WRITE_FRD_H(i64_to_f16(RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_h_lu.h b/riscv/insns/fcvt_h_lu.h
index a872c48..189b160 100644
--- a/riscv/insns/fcvt_h_lu.h
+++ b/riscv/insns/fcvt_h_lu.h
@@ -1,6 +1,6 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(ui64_to_f16(RS1));
+WRITE_FRD_H(ui64_to_f16(RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_h_s.h b/riscv/insns/fcvt_h_s.h
index ce39d81..57ba005 100644
--- a/riscv/insns/fcvt_h_s.h
+++ b/riscv/insns/fcvt_h_s.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFHMIN);
+require_either_extension(EXT_ZFHMIN, EXT_ZHINXMIN);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_to_f16(f32(FRS1)));
+WRITE_FRD_H(f32_to_f16(FRS1_F));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_h_w.h b/riscv/insns/fcvt_h_w.h
index c082454..de4cbe5 100644
--- a/riscv/insns/fcvt_h_w.h
+++ b/riscv/insns/fcvt_h_w.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(i32_to_f16((int32_t)RS1));
+WRITE_FRD_H(i32_to_f16((int32_t)RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_h_wu.h b/riscv/insns/fcvt_h_wu.h
index 9f2f5f6..230c354 100644
--- a/riscv/insns/fcvt_h_wu.h
+++ b/riscv/insns/fcvt_h_wu.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(ui32_to_f16((uint32_t)RS1));
+WRITE_FRD_H(ui32_to_f16((uint32_t)RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_l_d.h b/riscv/insns/fcvt_l_d.h
index c09e6c4..f2374d2 100644
--- a/riscv/insns/fcvt_l_d.h
+++ b/riscv/insns/fcvt_l_d.h
@@ -1,6 +1,6 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(f64_to_i64(f64(FRS1), RM, true));
+WRITE_RD(f64_to_i64(FRS1_D, RM, true));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_l_h.h b/riscv/insns/fcvt_l_h.h
index 5a1fea8..3b63027 100644
--- a/riscv/insns/fcvt_l_h.h
+++ b/riscv/insns/fcvt_l_h.h
@@ -1,6 +1,6 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(f16_to_i64(f16(FRS1), RM, true));
+WRITE_RD(f16_to_i64(FRS1_H, RM, true));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_l_s.h b/riscv/insns/fcvt_l_s.h
index 267e0eb..d121a65 100644
--- a/riscv/insns/fcvt_l_s.h
+++ b/riscv/insns/fcvt_l_s.h
@@ -1,6 +1,6 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(f32_to_i64(f32(FRS1), RM, true));
+WRITE_RD(f32_to_i64(FRS1_F, RM, true));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_lu_d.h b/riscv/insns/fcvt_lu_d.h
index 3a02120..939bc0e 100644
--- a/riscv/insns/fcvt_lu_d.h
+++ b/riscv/insns/fcvt_lu_d.h
@@ -1,6 +1,6 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(f64_to_ui64(f64(FRS1), RM, true));
+WRITE_RD(f64_to_ui64(FRS1_D, RM, true));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_lu_h.h b/riscv/insns/fcvt_lu_h.h
index f1454c3..d27f175 100644
--- a/riscv/insns/fcvt_lu_h.h
+++ b/riscv/insns/fcvt_lu_h.h
@@ -1,6 +1,6 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(f16_to_ui64(f16(FRS1), RM, true));
+WRITE_RD(f16_to_ui64(FRS1_H, RM, true));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_lu_s.h b/riscv/insns/fcvt_lu_s.h
index 94115a3..69c95ef 100644
--- a/riscv/insns/fcvt_lu_s.h
+++ b/riscv/insns/fcvt_lu_s.h
@@ -1,6 +1,6 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(f32_to_ui64(f32(FRS1), RM, true));
+WRITE_RD(f32_to_ui64(FRS1_F, RM, true));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_s_d.h b/riscv/insns/fcvt_s_d.h
index 4033335..f3cd26e 100644
--- a/riscv/insns/fcvt_s_d.h
+++ b/riscv/insns/fcvt_s_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_to_f32(f64(FRS1)));
+WRITE_FRD_F(f64_to_f32(FRS1_D));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_s_h.h b/riscv/insns/fcvt_s_h.h
index 22cdd72..346440a 100644
--- a/riscv/insns/fcvt_s_h.h
+++ b/riscv/insns/fcvt_s_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFHMIN);
+require_either_extension(EXT_ZFHMIN, EXT_ZHINXMIN);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_to_f32(f16(FRS1)));
+WRITE_FRD_F(f16_to_f32(FRS1_H));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_s_l.h b/riscv/insns/fcvt_s_l.h
index 9abcc80..1d096d2 100644
--- a/riscv/insns/fcvt_s_l.h
+++ b/riscv/insns/fcvt_s_l.h
@@ -1,6 +1,6 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(i64_to_f32(RS1));
+WRITE_FRD_F(i64_to_f32(RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_s_lu.h b/riscv/insns/fcvt_s_lu.h
index 70c676e..e4e84cf 100644
--- a/riscv/insns/fcvt_s_lu.h
+++ b/riscv/insns/fcvt_s_lu.h
@@ -1,6 +1,6 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_rv64;
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(ui64_to_f32(RS1));
+WRITE_FRD_F(ui64_to_f32(RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_s_w.h b/riscv/insns/fcvt_s_w.h
index 1ddabd8..75c87db 100644
--- a/riscv/insns/fcvt_s_w.h
+++ b/riscv/insns/fcvt_s_w.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(i32_to_f32((int32_t)RS1));
+WRITE_FRD_F(i32_to_f32((int32_t)RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_s_wu.h b/riscv/insns/fcvt_s_wu.h
index c1394c3..ec90fad 100644
--- a/riscv/insns/fcvt_s_wu.h
+++ b/riscv/insns/fcvt_s_wu.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(ui32_to_f32((uint32_t)RS1));
+WRITE_FRD_F(ui32_to_f32((uint32_t)RS1));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_w_d.h b/riscv/insns/fcvt_w_d.h
index 28eb245..a839f4b 100644
--- a/riscv/insns/fcvt_w_d.h
+++ b/riscv/insns/fcvt_w_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(sext32(f64_to_i32(f64(FRS1), RM, true)));
+WRITE_RD(sext32(f64_to_i32(FRS1_D, RM, true)));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_w_h.h b/riscv/insns/fcvt_w_h.h
index fe8bb48..97e49a5 100644
--- a/riscv/insns/fcvt_w_h.h
+++ b/riscv/insns/fcvt_w_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(sext32(f16_to_i32(f16(FRS1), RM, true)));
+WRITE_RD(sext32(f16_to_i32(FRS1_H, RM, true)));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_w_s.h b/riscv/insns/fcvt_w_s.h
index d30f1b4..6aeb510 100644
--- a/riscv/insns/fcvt_w_s.h
+++ b/riscv/insns/fcvt_w_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(sext32(f32_to_i32(f32(FRS1), RM, true)));
+WRITE_RD(sext32(f32_to_i32(FRS1_F, RM, true)));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_wu_d.h b/riscv/insns/fcvt_wu_d.h
index 5cdc004..906f003 100644
--- a/riscv/insns/fcvt_wu_d.h
+++ b/riscv/insns/fcvt_wu_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(sext32(f64_to_ui32(f64(FRS1), RM, true)));
+WRITE_RD(sext32(f64_to_ui32(FRS1_D, RM, true)));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_wu_h.h b/riscv/insns/fcvt_wu_h.h
index bf6648d..ce11143 100644
--- a/riscv/insns/fcvt_wu_h.h
+++ b/riscv/insns/fcvt_wu_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(sext32(f16_to_ui32(f16(FRS1), RM, true)));
+WRITE_RD(sext32(f16_to_ui32(FRS1_H, RM, true)));
set_fp_exceptions;
diff --git a/riscv/insns/fcvt_wu_s.h b/riscv/insns/fcvt_wu_s.h
index 034d681..a8b8455 100644
--- a/riscv/insns/fcvt_wu_s.h
+++ b/riscv/insns/fcvt_wu_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_RD(sext32(f32_to_ui32(f32(FRS1), RM, true)));
+WRITE_RD(sext32(f32_to_ui32(FRS1_F, RM, true)));
set_fp_exceptions;
diff --git a/riscv/insns/fdiv_d.h b/riscv/insns/fdiv_d.h
index ae7911a..990afca 100644
--- a/riscv/insns/fdiv_d.h
+++ b/riscv/insns/fdiv_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_div(f64(FRS1), f64(FRS2)));
+WRITE_FRD_D(f64_div(FRS1_D, FRS2_D));
set_fp_exceptions;
diff --git a/riscv/insns/fdiv_h.h b/riscv/insns/fdiv_h.h
index a169eae..91c518b 100644
--- a/riscv/insns/fdiv_h.h
+++ b/riscv/insns/fdiv_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_div(f16(FRS1), f16(FRS2)));
+WRITE_FRD_H(f16_div(FRS1_H, FRS2_H));
set_fp_exceptions;
diff --git a/riscv/insns/fdiv_s.h b/riscv/insns/fdiv_s.h
index c74ff04..180b41d 100644
--- a/riscv/insns/fdiv_s.h
+++ b/riscv/insns/fdiv_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_div(f32(FRS1), f32(FRS2)));
+WRITE_FRD_F(f32_div(FRS1_F, FRS2_F));
set_fp_exceptions;
diff --git a/riscv/insns/feq_d.h b/riscv/insns/feq_d.h
index 541ed5b..9585bad 100644
--- a/riscv/insns/feq_d.h
+++ b/riscv/insns/feq_d.h
@@ -1,4 +1,4 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
-WRITE_RD(f64_eq(f64(FRS1), f64(FRS2)));
+WRITE_RD(f64_eq(FRS1_D, FRS2_D));
set_fp_exceptions;
diff --git a/riscv/insns/feq_h.h b/riscv/insns/feq_h.h
index 47e75a5..5988db9 100644
--- a/riscv/insns/feq_h.h
+++ b/riscv/insns/feq_h.h
@@ -1,4 +1,4 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
-WRITE_RD(f16_eq(f16(FRS1), f16(FRS2)));
+WRITE_RD(f16_eq(FRS1_H, FRS2_H));
set_fp_exceptions;
diff --git a/riscv/insns/feq_s.h b/riscv/insns/feq_s.h
index 489bea6..97b57c2 100644
--- a/riscv/insns/feq_s.h
+++ b/riscv/insns/feq_s.h
@@ -1,4 +1,4 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
-WRITE_RD(f32_eq(f32(FRS1), f32(FRS2)));
+WRITE_RD(f32_eq(FRS1_F, FRS2_F));
set_fp_exceptions;
diff --git a/riscv/insns/fle_d.h b/riscv/insns/fle_d.h
index 419a36f..17b4932 100644
--- a/riscv/insns/fle_d.h
+++ b/riscv/insns/fle_d.h
@@ -1,4 +1,4 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
-WRITE_RD(f64_le(f64(FRS1), f64(FRS2)));
+WRITE_RD(f64_le(FRS1_D, FRS2_D));
set_fp_exceptions;
diff --git a/riscv/insns/fle_h.h b/riscv/insns/fle_h.h
index 9fc5968..31ed8a7 100644
--- a/riscv/insns/fle_h.h
+++ b/riscv/insns/fle_h.h
@@ -1,4 +1,4 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
-WRITE_RD(f16_le(f16(FRS1), f16(FRS2)));
+WRITE_RD(f16_le(FRS1_H, FRS2_H));
set_fp_exceptions;
diff --git a/riscv/insns/fle_s.h b/riscv/insns/fle_s.h
index 5c0124e..e26f055 100644
--- a/riscv/insns/fle_s.h
+++ b/riscv/insns/fle_s.h
@@ -1,4 +1,4 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
-WRITE_RD(f32_le(f32(FRS1), f32(FRS2)));
+WRITE_RD(f32_le(FRS1_F, FRS2_F));
set_fp_exceptions;
diff --git a/riscv/insns/flt_d.h b/riscv/insns/flt_d.h
index 7176a96..5fb0572 100644
--- a/riscv/insns/flt_d.h
+++ b/riscv/insns/flt_d.h
@@ -1,4 +1,4 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
-WRITE_RD(f64_lt(f64(FRS1), f64(FRS2)));
+WRITE_RD(f64_lt(FRS1_D, FRS2_D));
set_fp_exceptions;
diff --git a/riscv/insns/flt_h.h b/riscv/insns/flt_h.h
index f516a38..dd6bc79 100644
--- a/riscv/insns/flt_h.h
+++ b/riscv/insns/flt_h.h
@@ -1,4 +1,4 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
-WRITE_RD(f16_lt(f16(FRS1), f16(FRS2)));
+WRITE_RD(f16_lt(FRS1_H, FRS2_H));
set_fp_exceptions;
diff --git a/riscv/insns/flt_s.h b/riscv/insns/flt_s.h
index 40acc34..2f50ed6 100644
--- a/riscv/insns/flt_s.h
+++ b/riscv/insns/flt_s.h
@@ -1,4 +1,4 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
-WRITE_RD(f32_lt(f32(FRS1), f32(FRS2)));
+WRITE_RD(f32_lt(FRS1_F, FRS2_F));
set_fp_exceptions;
diff --git a/riscv/insns/fmadd_d.h b/riscv/insns/fmadd_d.h
index ab22beb..07a8b25 100644
--- a/riscv/insns/fmadd_d.h
+++ b/riscv/insns/fmadd_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_mulAdd(f64(FRS1), f64(FRS2), f64(FRS3)));
+WRITE_FRD_D(f64_mulAdd(FRS1_D, FRS2_D, FRS3_D));
set_fp_exceptions;
diff --git a/riscv/insns/fmadd_h.h b/riscv/insns/fmadd_h.h
index 6551de5..5428897 100644
--- a/riscv/insns/fmadd_h.h
+++ b/riscv/insns/fmadd_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_mulAdd(f16(FRS1), f16(FRS2), f16(FRS3)));
+WRITE_FRD_H(f16_mulAdd(FRS1_H, FRS2_H, FRS3_H));
set_fp_exceptions;
diff --git a/riscv/insns/fmadd_s.h b/riscv/insns/fmadd_s.h
index e919190..5a72cf8 100644
--- a/riscv/insns/fmadd_s.h
+++ b/riscv/insns/fmadd_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_mulAdd(f32(FRS1), f32(FRS2), f32(FRS3)));
+WRITE_FRD_F(f32_mulAdd(FRS1_F, FRS2_F, FRS3_F));
set_fp_exceptions;
diff --git a/riscv/insns/fmax_d.h b/riscv/insns/fmax_d.h
index 11491f5..3e05b7e 100644
--- a/riscv/insns/fmax_d.h
+++ b/riscv/insns/fmax_d.h
@@ -1,9 +1,9 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
-bool greater = f64_lt_quiet(f64(FRS2), f64(FRS1)) ||
- (f64_eq(f64(FRS2), f64(FRS1)) && (f64(FRS2).v & F64_SIGN));
-if (isNaNF64UI(f64(FRS1).v) && isNaNF64UI(f64(FRS2).v))
- WRITE_FRD(f64(defaultNaNF64UI));
+bool greater = f64_lt_quiet(FRS2_D, FRS1_D) ||
+ (f64_eq(FRS2_D, FRS1_D) && (FRS2_D.v & F64_SIGN));
+if (isNaNF64UI(FRS1_D.v) && isNaNF64UI(FRS2_D.v))
+ WRITE_FRD_D(f64(defaultNaNF64UI));
else
- WRITE_FRD(greater || isNaNF64UI(f64(FRS2).v) ? FRS1 : FRS2);
+ WRITE_FRD_D((greater || isNaNF64UI(FRS2_D.v) ? FRS1_D : FRS2_D));
set_fp_exceptions;
diff --git a/riscv/insns/fmax_h.h b/riscv/insns/fmax_h.h
index 3d4c40e..c864258 100644
--- a/riscv/insns/fmax_h.h
+++ b/riscv/insns/fmax_h.h
@@ -1,4 +1,4 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
-WRITE_FRD(f16_max(f16(FRS1), f16(FRS2)));
+WRITE_FRD_H(f16_max(FRS1_H, FRS2_H));
set_fp_exceptions;
diff --git a/riscv/insns/fmax_s.h b/riscv/insns/fmax_s.h
index 41d8f92..17d8b3c 100644
--- a/riscv/insns/fmax_s.h
+++ b/riscv/insns/fmax_s.h
@@ -1,9 +1,9 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
-bool greater = f32_lt_quiet(f32(FRS2), f32(FRS1)) ||
- (f32_eq(f32(FRS2), f32(FRS1)) && (f32(FRS2).v & F32_SIGN));
-if (isNaNF32UI(f32(FRS1).v) && isNaNF32UI(f32(FRS2).v))
- WRITE_FRD(f32(defaultNaNF32UI));
+bool greater = f32_lt_quiet(FRS2_F, FRS1_F) ||
+ (f32_eq(FRS2_F, FRS1_F) && (FRS2_F.v & F32_SIGN));
+if (isNaNF32UI(FRS1_F.v) && isNaNF32UI(FRS2_F.v))
+ WRITE_FRD_F(f32(defaultNaNF32UI));
else
- WRITE_FRD(greater || isNaNF32UI(f32(FRS2).v) ? FRS1 : FRS2);
+ WRITE_FRD_F((greater || isNaNF32UI(FRS2_F.v) ? FRS1_F : FRS2_F));
set_fp_exceptions;
diff --git a/riscv/insns/fmin_d.h b/riscv/insns/fmin_d.h
index 5cf349d..f60a73e 100644
--- a/riscv/insns/fmin_d.h
+++ b/riscv/insns/fmin_d.h
@@ -1,9 +1,9 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
-bool less = f64_lt_quiet(f64(FRS1), f64(FRS2)) ||
- (f64_eq(f64(FRS1), f64(FRS2)) && (f64(FRS1).v & F64_SIGN));
-if (isNaNF64UI(f64(FRS1).v) && isNaNF64UI(f64(FRS2).v))
- WRITE_FRD(f64(defaultNaNF64UI));
+bool less = f64_lt_quiet(FRS1_D, FRS2_D) ||
+ (f64_eq(FRS1_D, FRS2_D) && (FRS1_D.v & F64_SIGN));
+if (isNaNF64UI(FRS1_D.v) && isNaNF64UI(FRS2_D.v))
+ WRITE_FRD_D(f64(defaultNaNF64UI));
else
- WRITE_FRD(less || isNaNF64UI(f64(FRS2).v) ? FRS1 : FRS2);
+ WRITE_FRD_D((less || isNaNF64UI(FRS2_D.v) ? FRS1_D : FRS2_D));
set_fp_exceptions;
diff --git a/riscv/insns/fmin_h.h b/riscv/insns/fmin_h.h
index 5fb1404..cd02f20 100644
--- a/riscv/insns/fmin_h.h
+++ b/riscv/insns/fmin_h.h
@@ -1,4 +1,4 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
-WRITE_FRD(f16_min(f16(FRS1), f16(FRS2)));
+WRITE_FRD_H(f16_min(FRS1_H, FRS2_H));
set_fp_exceptions;
diff --git a/riscv/insns/fmin_s.h b/riscv/insns/fmin_s.h
index 19e1193..476a586 100644
--- a/riscv/insns/fmin_s.h
+++ b/riscv/insns/fmin_s.h
@@ -1,9 +1,9 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
-bool less = f32_lt_quiet(f32(FRS1), f32(FRS2)) ||
- (f32_eq(f32(FRS1), f32(FRS2)) && (f32(FRS1).v & F32_SIGN));
-if (isNaNF32UI(f32(FRS1).v) && isNaNF32UI(f32(FRS2).v))
- WRITE_FRD(f32(defaultNaNF32UI));
+bool less = f32_lt_quiet(FRS1_F, FRS2_F) ||
+ (f32_eq(FRS1_F, FRS2_F) && (FRS1_F.v & F32_SIGN));
+if (isNaNF32UI(FRS1_F.v) && isNaNF32UI(FRS2_F.v))
+ WRITE_FRD_F(f32(defaultNaNF32UI));
else
- WRITE_FRD(less || isNaNF32UI(f32(FRS2).v) ? FRS1 : FRS2);
+ WRITE_FRD_F((less || isNaNF32UI(FRS2_F.v) ? FRS1_F : FRS2_F));
set_fp_exceptions;
diff --git a/riscv/insns/fmsub_d.h b/riscv/insns/fmsub_d.h
index 5b5bc0f..1a7d784 100644
--- a/riscv/insns/fmsub_d.h
+++ b/riscv/insns/fmsub_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_mulAdd(f64(FRS1), f64(FRS2), f64(f64(FRS3).v ^ F64_SIGN)));
+WRITE_FRD_D(f64_mulAdd(FRS1_D, FRS2_D, f64(FRS3_D.v ^ F64_SIGN)));
set_fp_exceptions;
diff --git a/riscv/insns/fmsub_h.h b/riscv/insns/fmsub_h.h
index 934291f..dc6a8e6 100644
--- a/riscv/insns/fmsub_h.h
+++ b/riscv/insns/fmsub_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_mulAdd(f16(FRS1), f16(FRS2), f16(f16(FRS3).v ^ F16_SIGN)));
+WRITE_FRD_H(f16_mulAdd(FRS1_H, FRS2_H, f16(FRS3_H.v ^ F16_SIGN)));
set_fp_exceptions;
diff --git a/riscv/insns/fmsub_s.h b/riscv/insns/fmsub_s.h
index d46c887..179cc2f 100644
--- a/riscv/insns/fmsub_s.h
+++ b/riscv/insns/fmsub_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_mulAdd(f32(FRS1), f32(FRS2), f32(f32(FRS3).v ^ F32_SIGN)));
+WRITE_FRD_F(f32_mulAdd(FRS1_F, FRS2_F, f32(FRS3_F.v ^ F32_SIGN)));
set_fp_exceptions;
diff --git a/riscv/insns/fmul_d.h b/riscv/insns/fmul_d.h
index 9189d8d..e5caa34 100644
--- a/riscv/insns/fmul_d.h
+++ b/riscv/insns/fmul_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_mul(f64(FRS1), f64(FRS2)));
+WRITE_FRD_D(f64_mul(FRS1_D, FRS2_D));
set_fp_exceptions;
diff --git a/riscv/insns/fmul_h.h b/riscv/insns/fmul_h.h
index 0152df8..dc7f9c4 100644
--- a/riscv/insns/fmul_h.h
+++ b/riscv/insns/fmul_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_mul(f16(FRS1), f16(FRS2)));
+WRITE_FRD_H(f16_mul(FRS1_H, FRS2_H));
set_fp_exceptions;
diff --git a/riscv/insns/fmul_s.h b/riscv/insns/fmul_s.h
index 145d5ce..9cf30b4 100644
--- a/riscv/insns/fmul_s.h
+++ b/riscv/insns/fmul_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_mul(f32(FRS1), f32(FRS2)));
+WRITE_FRD_F(f32_mul(FRS1_F, FRS2_F));
set_fp_exceptions;
diff --git a/riscv/insns/fnmadd_d.h b/riscv/insns/fnmadd_d.h
index e8dd743..a2a14e9 100644
--- a/riscv/insns/fnmadd_d.h
+++ b/riscv/insns/fnmadd_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_mulAdd(f64(f64(FRS1).v ^ F64_SIGN), f64(FRS2), f64(f64(FRS3).v ^ F64_SIGN)));
+WRITE_FRD_D(f64_mulAdd(f64(FRS1_D.v ^ F64_SIGN), FRS2_D, f64(FRS3_D.v ^ F64_SIGN)));
set_fp_exceptions;
diff --git a/riscv/insns/fnmadd_h.h b/riscv/insns/fnmadd_h.h
index e4c619e..b1ca283 100644
--- a/riscv/insns/fnmadd_h.h
+++ b/riscv/insns/fnmadd_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_mulAdd(f16(f16(FRS1).v ^ F16_SIGN), f16(FRS2), f16(f16(FRS3).v ^ F16_SIGN)));
+WRITE_FRD_H(f16_mulAdd(f16(FRS1_H.v ^ F16_SIGN), FRS2_H, f16(FRS3_H.v ^ F16_SIGN)));
set_fp_exceptions;
diff --git a/riscv/insns/fnmadd_s.h b/riscv/insns/fnmadd_s.h
index 1c2996e..683257a 100644
--- a/riscv/insns/fnmadd_s.h
+++ b/riscv/insns/fnmadd_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_mulAdd(f32(f32(FRS1).v ^ F32_SIGN), f32(FRS2), f32(f32(FRS3).v ^ F32_SIGN)));
+WRITE_FRD_F(f32_mulAdd(f32(FRS1_F.v ^ F32_SIGN), FRS2_F, f32(FRS3_F.v ^ F32_SIGN)));
set_fp_exceptions;
diff --git a/riscv/insns/fnmsub_d.h b/riscv/insns/fnmsub_d.h
index c29a0b9..9352c3f 100644
--- a/riscv/insns/fnmsub_d.h
+++ b/riscv/insns/fnmsub_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_mulAdd(f64(f64(FRS1).v ^ F64_SIGN), f64(FRS2), f64(FRS3)));
+WRITE_FRD_D(f64_mulAdd(f64(FRS1_D.v ^ F64_SIGN), FRS2_D, FRS3_D));
set_fp_exceptions;
diff --git a/riscv/insns/fnmsub_h.h b/riscv/insns/fnmsub_h.h
index 0410c3b..e05fcd1 100644
--- a/riscv/insns/fnmsub_h.h
+++ b/riscv/insns/fnmsub_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_mulAdd(f16(f16(FRS1).v ^ F16_SIGN), f16(FRS2), f16(FRS3)));
+WRITE_FRD_H(f16_mulAdd(f16(FRS1_H.v ^ F16_SIGN), FRS2_H, FRS3_H));
set_fp_exceptions;
diff --git a/riscv/insns/fnmsub_s.h b/riscv/insns/fnmsub_s.h
index 4c61fc7..b22b3db 100644
--- a/riscv/insns/fnmsub_s.h
+++ b/riscv/insns/fnmsub_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_mulAdd(f32(f32(FRS1).v ^ F32_SIGN), f32(FRS2), f32(FRS3)));
+WRITE_FRD_F(f32_mulAdd(f32(FRS1_F.v ^ F32_SIGN), FRS2_F, FRS3_F));
set_fp_exceptions;
diff --git a/riscv/insns/fsgnj_d.h b/riscv/insns/fsgnj_d.h
index 78f9ce7..8f02fd1 100644
--- a/riscv/insns/fsgnj_d.h
+++ b/riscv/insns/fsgnj_d.h
@@ -1,3 +1,3 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
-WRITE_FRD(fsgnj64(FRS1, FRS2, false, false));
+WRITE_FRD_D(fsgnj64(freg(FRS1_D), freg(FRS2_D), false, false));
diff --git a/riscv/insns/fsgnj_h.h b/riscv/insns/fsgnj_h.h
index 79d50f5..080f27d 100644
--- a/riscv/insns/fsgnj_h.h
+++ b/riscv/insns/fsgnj_h.h
@@ -1,3 +1,3 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
-WRITE_FRD(fsgnj16(FRS1, FRS2, false, false));
+WRITE_FRD_H(fsgnj16(freg(FRS1_H), freg(FRS2_H), false, false));
diff --git a/riscv/insns/fsgnj_s.h b/riscv/insns/fsgnj_s.h
index c1a70cb..ea511b8 100644
--- a/riscv/insns/fsgnj_s.h
+++ b/riscv/insns/fsgnj_s.h
@@ -1,3 +1,3 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
-WRITE_FRD(fsgnj32(FRS1, FRS2, false, false));
+WRITE_FRD_F(fsgnj32(freg(FRS1_F), freg(FRS2_F), false, false));
diff --git a/riscv/insns/fsgnjn_d.h b/riscv/insns/fsgnjn_d.h
index f02c311..870a979 100644
--- a/riscv/insns/fsgnjn_d.h
+++ b/riscv/insns/fsgnjn_d.h
@@ -1,3 +1,3 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
-WRITE_FRD(fsgnj64(FRS1, FRS2, true, false));
+WRITE_FRD_D(fsgnj64(freg(FRS1_D), freg(FRS2_D), true, false));
diff --git a/riscv/insns/fsgnjn_h.h b/riscv/insns/fsgnjn_h.h
index ebb4ac9..1d7bf03 100644
--- a/riscv/insns/fsgnjn_h.h
+++ b/riscv/insns/fsgnjn_h.h
@@ -1,3 +1,3 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
-WRITE_FRD(fsgnj16(FRS1, FRS2, true, false));
+WRITE_FRD_H(fsgnj16(freg(FRS1_H), freg(FRS2_H), true, false)); \ No newline at end of file
diff --git a/riscv/insns/fsgnjn_q.h b/riscv/insns/fsgnjn_q.h
index 38c7bbf..dcf7235 100644
--- a/riscv/insns/fsgnjn_q.h
+++ b/riscv/insns/fsgnjn_q.h
@@ -1,3 +1,3 @@
require_extension('Q');
require_fp;
-WRITE_FRD(fsgnj128(FRS1, FRS2, true, false));
+WRITE_FRD(fsgnj128(FRS1, FRS2, true, false)); \ No newline at end of file
diff --git a/riscv/insns/fsgnjn_s.h b/riscv/insns/fsgnjn_s.h
index 35906d6..a0994b4 100644
--- a/riscv/insns/fsgnjn_s.h
+++ b/riscv/insns/fsgnjn_s.h
@@ -1,3 +1,3 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
-WRITE_FRD(fsgnj32(FRS1, FRS2, true, false));
+WRITE_FRD_F(fsgnj32(freg(FRS1_F), freg(FRS2_F), true, false));
diff --git a/riscv/insns/fsgnjx_d.h b/riscv/insns/fsgnjx_d.h
index c121737..25906f0 100644
--- a/riscv/insns/fsgnjx_d.h
+++ b/riscv/insns/fsgnjx_d.h
@@ -1,3 +1,3 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
-WRITE_FRD(fsgnj64(FRS1, FRS2, false, true));
+WRITE_FRD_D(fsgnj64(freg(FRS1_D), freg(FRS2_D), false, true));
diff --git a/riscv/insns/fsgnjx_h.h b/riscv/insns/fsgnjx_h.h
index 9310269..1d29bb1 100644
--- a/riscv/insns/fsgnjx_h.h
+++ b/riscv/insns/fsgnjx_h.h
@@ -1,3 +1,3 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
-WRITE_FRD(fsgnj16(FRS1, FRS2, false, true));
+WRITE_FRD_H(fsgnj16(freg(FRS1_H), freg(FRS2_H), false, true));
diff --git a/riscv/insns/fsgnjx_s.h b/riscv/insns/fsgnjx_s.h
index 4d5c624..9bc0798 100644
--- a/riscv/insns/fsgnjx_s.h
+++ b/riscv/insns/fsgnjx_s.h
@@ -1,3 +1,3 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
-WRITE_FRD(fsgnj32(FRS1, FRS2, false, true));
+WRITE_FRD_F(fsgnj32(freg(FRS1_F), freg(FRS2_F), false, true));
diff --git a/riscv/insns/fsqrt_d.h b/riscv/insns/fsqrt_d.h
index da138ba..363b457 100644
--- a/riscv/insns/fsqrt_d.h
+++ b/riscv/insns/fsqrt_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_sqrt(f64(FRS1)));
+WRITE_FRD_D(f64_sqrt(FRS1_D));
set_fp_exceptions;
diff --git a/riscv/insns/fsqrt_h.h b/riscv/insns/fsqrt_h.h
index 138d572..fea429b 100644
--- a/riscv/insns/fsqrt_h.h
+++ b/riscv/insns/fsqrt_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_sqrt(f16(FRS1)));
+WRITE_FRD_H(f16_sqrt(FRS1_H));
set_fp_exceptions;
diff --git a/riscv/insns/fsqrt_s.h b/riscv/insns/fsqrt_s.h
index 7476846..d44503a 100644
--- a/riscv/insns/fsqrt_s.h
+++ b/riscv/insns/fsqrt_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_sqrt(f32(FRS1)));
+WRITE_FRD_F(f32_sqrt(FRS1_F));
set_fp_exceptions;
diff --git a/riscv/insns/fsub_d.h b/riscv/insns/fsub_d.h
index 1418a06..4f8bf50 100644
--- a/riscv/insns/fsub_d.h
+++ b/riscv/insns/fsub_d.h
@@ -1,5 +1,5 @@
-require_extension('D');
+require_either_extension('D', EXT_ZDINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f64_sub(f64(FRS1), f64(FRS2)));
+WRITE_FRD_D(f64_sub(FRS1_D, FRS2_D));
set_fp_exceptions;
diff --git a/riscv/insns/fsub_h.h b/riscv/insns/fsub_h.h
index 43b51cc..f7006fb 100644
--- a/riscv/insns/fsub_h.h
+++ b/riscv/insns/fsub_h.h
@@ -1,5 +1,5 @@
-require_extension(EXT_ZFH);
+require_either_extension(EXT_ZFH, EXT_ZHINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f16_sub(f16(FRS1), f16(FRS2)));
+WRITE_FRD_H(f16_sub(FRS1_H, FRS2_H));
set_fp_exceptions;
diff --git a/riscv/insns/fsub_s.h b/riscv/insns/fsub_s.h
index f6183ea..1a33ffd 100644
--- a/riscv/insns/fsub_s.h
+++ b/riscv/insns/fsub_s.h
@@ -1,5 +1,5 @@
-require_extension('F');
+require_either_extension('F', EXT_ZFINX);
require_fp;
softfloat_roundingMode = RM;
-WRITE_FRD(f32_sub(f32(FRS1), f32(FRS2)));
+WRITE_FRD_F(f32_sub(FRS1_F, FRS2_F));
set_fp_exceptions;
diff --git a/riscv/insns/kmar64.h b/riscv/insns/kmar64.h
index 49f4482..a4d332b 100644
--- a/riscv/insns/kmar64.h
+++ b/riscv/insns/kmar64.h
@@ -5,7 +5,6 @@ P_64_PROFILE_PARAM(true, false)
bool sat = false;
sreg_t mres0 = (sreg_t)P_SW(rs1, 0) * P_SW(rs2, 0);
sreg_t mres1 = (sreg_t)P_SW(rs1, 1) * P_SW(rs2, 1);
-sreg_t res;
if (xlen == 32) {
rd = (sat_add<int64_t, uint64_t>(rd, mres0, sat));
diff --git a/riscv/insns/kmmawb2.h b/riscv/insns/kmmawb2.h
index 6b3aa0d..274f9dd 100644
--- a/riscv/insns/kmmawb2.h
+++ b/riscv/insns/kmmawb2.h
@@ -3,7 +3,7 @@ P_LOOP(32, {
int64_t addop = 0;
int64_t mres = 0;
bool sat = false;
- if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) {
+ if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) {
mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1;
addop = mres >> 16;
} else {
diff --git a/riscv/insns/kmmawb2_u.h b/riscv/insns/kmmawb2_u.h
index f44346e..447a3f4 100644
--- a/riscv/insns/kmmawb2_u.h
+++ b/riscv/insns/kmmawb2_u.h
@@ -3,7 +3,7 @@ P_LOOP(32, {
int64_t addop = 0;
int64_t mres = 0;
bool sat = false;
- if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) {
+ if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) {
mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1;
addop = ((mres >> 15) + 1) >> 1;
} else {
diff --git a/riscv/insns/kmmawt2.h b/riscv/insns/kmmawt2.h
index 3cd72de..6eb22ac 100644
--- a/riscv/insns/kmmawt2.h
+++ b/riscv/insns/kmmawt2.h
@@ -3,7 +3,7 @@ P_LOOP(32, {
int64_t addop = 0;
int64_t mres = 0;
bool sat = false;
- if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) {
+ if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) {
mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1;
addop = mres >> 16;
} else {
diff --git a/riscv/insns/kmmawt2_u.h b/riscv/insns/kmmawt2_u.h
index 7fe378c..b82e090 100644
--- a/riscv/insns/kmmawt2_u.h
+++ b/riscv/insns/kmmawt2_u.h
@@ -3,7 +3,7 @@ P_LOOP(32, {
int64_t addop = 0;
int64_t mres = 0;
bool sat = false;
- if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) {
+ if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) {
mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1;
addop = ((mres >> 15) + 1) >> 1;
} else {
diff --git a/riscv/insns/kmmwb2.h b/riscv/insns/kmmwb2.h
index 272f738..d08b0ef 100644
--- a/riscv/insns/kmmwb2.h
+++ b/riscv/insns/kmmwb2.h
@@ -1,6 +1,6 @@
require_vector_vs;
P_LOOP(32, {
- if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) {
+ if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) {
int64_t mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1;
pd = mres >> 16;
} else {
diff --git a/riscv/insns/kmmwb2_u.h b/riscv/insns/kmmwb2_u.h
index b5a5006..d308bf3 100644
--- a/riscv/insns/kmmwb2_u.h
+++ b/riscv/insns/kmmwb2_u.h
@@ -1,6 +1,6 @@
require_vector_vs;
P_LOOP(32, {
- if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) {
+ if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) {
int64_t mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1;
pd = ((mres >> 15) + 1) >> 1;
} else {
diff --git a/riscv/insns/kmmwt2.h b/riscv/insns/kmmwt2.h
index 73d3dc8..38ba9b1 100644
--- a/riscv/insns/kmmwt2.h
+++ b/riscv/insns/kmmwt2.h
@@ -1,6 +1,6 @@
require_vector_vs;
P_LOOP(32, {
- if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) {
+ if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) {
int64_t mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1;
pd = mres >> 16;
} else {
diff --git a/riscv/insns/kmmwt2_u.h b/riscv/insns/kmmwt2_u.h
index 1f525a8..e855786 100644
--- a/riscv/insns/kmmwt2_u.h
+++ b/riscv/insns/kmmwt2_u.h
@@ -1,6 +1,6 @@
require_vector_vs;
P_LOOP(32, {
- if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) {
+ if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) {
int64_t mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1;
pd = ((mres >> 15) + 1) >> 1;
} else {
diff --git a/riscv/insns/kslra16_u.h b/riscv/insns/kslra16_u.h
index 8335f3e..27bb77c 100644
--- a/riscv/insns/kslra16_u.h
+++ b/riscv/insns/kslra16_u.h
@@ -3,7 +3,7 @@ P_X_LOOP(16, 5, {
if (ssa < 0) {
sa = -ssa;
sa = (sa == 16) ? 15 : sa;
- if(sa != 0)
+ if (sa != 0)
pd = ((ps1 >> (sa - 1)) + 1) >> 1;
else
pd = ps1;
diff --git a/riscv/insns/kslra32_u.h b/riscv/insns/kslra32_u.h
index d53c8fe..b9c06cf 100644
--- a/riscv/insns/kslra32_u.h
+++ b/riscv/insns/kslra32_u.h
@@ -4,7 +4,7 @@ P_X_LOOP(32, 6, {
if (ssa < 0) {
sa = -ssa;
sa = (sa == 32) ? 31 : sa;
- if(sa != 0)
+ if (sa != 0)
pd = ((ps1 >> (sa - 1)) + 1) >> 1;
else
pd = ps1;
diff --git a/riscv/insns/kslra8_u.h b/riscv/insns/kslra8_u.h
index 620f3bd..340283f 100644
--- a/riscv/insns/kslra8_u.h
+++ b/riscv/insns/kslra8_u.h
@@ -3,7 +3,7 @@ P_X_LOOP(8, 4, {
if (ssa < 0) {
sa = -ssa;
sa = (sa == 8) ? 7 : sa;
- if(sa != 0)
+ if (sa != 0)
pd = ((ps1 >> (sa - 1)) + 1) >> 1;
else
pd = ps1;
diff --git a/riscv/insns/kwmmul.h b/riscv/insns/kwmmul.h
index b0ab8d4..ca654f2 100644
--- a/riscv/insns/kwmmul.h
+++ b/riscv/insns/kwmmul.h
@@ -1,6 +1,6 @@
require_vector_vs;
P_LOOP(32, {
- if((INT32_MIN != ps1) | (INT32_MIN != ps2)) {
+ if ((INT32_MIN != ps1) | (INT32_MIN != ps2)) {
int64_t mres = ((int64_t) ps1 * (int64_t) ps2) << 1;
pd = mres >> 32;
} else {
diff --git a/riscv/insns/kwmmul_u.h b/riscv/insns/kwmmul_u.h
index c2045e1..b435561 100644
--- a/riscv/insns/kwmmul_u.h
+++ b/riscv/insns/kwmmul_u.h
@@ -1,6 +1,6 @@
require_vector_vs;
P_LOOP(32, {
- if((INT32_MIN != ps1) | (INT32_MIN != ps2)) {
+ if ((INT32_MIN != ps1) | (INT32_MIN != ps2)) {
int64_t mres = ((int64_t) ps1 * (int64_t) ps2) << 1;
pd = ((mres >> 31) + 1) >> 1;
} else {
diff --git a/riscv/insns/rem.h b/riscv/insns/rem.h
index 8587995..d2ee066 100644
--- a/riscv/insns/rem.h
+++ b/riscv/insns/rem.h
@@ -1,9 +1,9 @@
require_extension('M');
sreg_t lhs = sext_xlen(RS1);
sreg_t rhs = sext_xlen(RS2);
-if(rhs == 0)
+if (rhs == 0)
WRITE_RD(lhs);
-else if(lhs == INT64_MIN && rhs == -1)
+else if (lhs == INT64_MIN && rhs == -1)
WRITE_RD(0);
else
WRITE_RD(sext_xlen(lhs % rhs));
diff --git a/riscv/insns/remu.h b/riscv/insns/remu.h
index e74774c..676747a 100644
--- a/riscv/insns/remu.h
+++ b/riscv/insns/remu.h
@@ -1,7 +1,7 @@
require_extension('M');
reg_t lhs = zext_xlen(RS1);
reg_t rhs = zext_xlen(RS2);
-if(rhs == 0)
+if (rhs == 0)
WRITE_RD(sext_xlen(RS1));
else
WRITE_RD(sext_xlen(lhs % rhs));
diff --git a/riscv/insns/remuw.h b/riscv/insns/remuw.h
index b239c8f..caa1583 100644
--- a/riscv/insns/remuw.h
+++ b/riscv/insns/remuw.h
@@ -2,7 +2,7 @@ require_extension('M');
require_rv64;
reg_t lhs = zext32(RS1);
reg_t rhs = zext32(RS2);
-if(rhs == 0)
+if (rhs == 0)
WRITE_RD(sext32(lhs));
else
WRITE_RD(sext32(lhs % rhs));
diff --git a/riscv/insns/remw.h b/riscv/insns/remw.h
index 56221cc..076096c 100644
--- a/riscv/insns/remw.h
+++ b/riscv/insns/remw.h
@@ -2,7 +2,7 @@ require_extension('M');
require_rv64;
sreg_t lhs = sext32(RS1);
sreg_t rhs = sext32(RS2);
-if(rhs == 0)
+if (rhs == 0)
WRITE_RD(lhs);
else
WRITE_RD(sext32(lhs % rhs));
diff --git a/riscv/insns/rsub64.h b/riscv/insns/rsub64.h
index 397c973..2a58485 100644
--- a/riscv/insns/rsub64.h
+++ b/riscv/insns/rsub64.h
@@ -2,7 +2,7 @@ P_64_PROFILE({
rd = (rs1 - rs2) >> 1;
if (rs1 > 0 && rs2 < 0) {
rd &= ~((reg_t)1 << 63);
- } else if(rs1 < 0 && rs2 > 0) {
+ } else if (rs1 < 0 && rs2 > 0) {
rd |= ((reg_t)1 << 63);
}
})
diff --git a/riscv/insns/smul16.h b/riscv/insns/smul16.h
index 8f87612..7e0f08a 100644
--- a/riscv/insns/smul16.h
+++ b/riscv/insns/smul16.h
@@ -1,3 +1,3 @@
P_MUL_LOOP(16, {
- pd = ps1 * ps2;
+ pd = (int32_t)ps1 * (int32_t)ps2;
})
diff --git a/riscv/insns/smul8.h b/riscv/insns/smul8.h
index 155e50e..a4a3ed9 100644
--- a/riscv/insns/smul8.h
+++ b/riscv/insns/smul8.h
@@ -1,3 +1,3 @@
P_MUL_LOOP(8, {
- pd = ps1 * ps2;
+ pd = (int16_t)ps1 * (int16_t)ps2;
})
diff --git a/riscv/insns/smulx16.h b/riscv/insns/smulx16.h
index 14ae047..58e9a08 100644
--- a/riscv/insns/smulx16.h
+++ b/riscv/insns/smulx16.h
@@ -1,3 +1,3 @@
P_MUL_CROSS_LOOP(16, {
- pd = ps1 * ps2;
+ pd = (int32_t)ps1 * (int32_t)ps2;
})
diff --git a/riscv/insns/smulx8.h b/riscv/insns/smulx8.h
index b5ae41c..9270ce3 100644
--- a/riscv/insns/smulx8.h
+++ b/riscv/insns/smulx8.h
@@ -1,3 +1,3 @@
P_MUL_CROSS_LOOP(8, {
- pd = ps1 * ps2;
+ pd = (int16_t)ps1 * (int16_t)ps2;
})
diff --git a/riscv/insns/sra16_u.h b/riscv/insns/sra16_u.h
index c28178e..6fcc398 100644
--- a/riscv/insns/sra16_u.h
+++ b/riscv/insns/sra16_u.h
@@ -1,5 +1,5 @@
P_X_LOOP(16, 4, {
- if(sa > 0)
+ if (sa > 0)
pd = ((ps1 >> (sa - 1)) + 1) >> 1;
else
pd = ps1;
diff --git a/riscv/insns/sra32_u.h b/riscv/insns/sra32_u.h
index e062a88..1a4488c 100644
--- a/riscv/insns/sra32_u.h
+++ b/riscv/insns/sra32_u.h
@@ -1,6 +1,6 @@
require_rv64;
P_X_LOOP(32, 5, {
- if(sa > 0)
+ if (sa > 0)
pd = (((uint64_t)(ps1 >> (sa - 1))) + 1) >> 1;
else
pd = ps1;
diff --git a/riscv/insns/sra8_u.h b/riscv/insns/sra8_u.h
index 7061fc4..1f47623 100644
--- a/riscv/insns/sra8_u.h
+++ b/riscv/insns/sra8_u.h
@@ -1,5 +1,5 @@
P_X_LOOP(8, 3, {
- if(sa > 0)
+ if (sa > 0)
pd = ((ps1 >> (sa - 1)) + 1) >> 1;
else
pd = ps1;
diff --git a/riscv/insns/umul16.h b/riscv/insns/umul16.h
index 860f942..09b839c 100644
--- a/riscv/insns/umul16.h
+++ b/riscv/insns/umul16.h
@@ -1,3 +1,3 @@
P_MUL_ULOOP(16, {
- pd = ps1 * ps2;
+ pd = (uint32_t)ps1 * (uint32_t)ps2;
})
diff --git a/riscv/insns/umul8.h b/riscv/insns/umul8.h
index 04d7a6e..29cae88 100644
--- a/riscv/insns/umul8.h
+++ b/riscv/insns/umul8.h
@@ -1,3 +1,3 @@
P_MUL_ULOOP(8, {
- pd = ps1 * ps2;
+ pd = (uint16_t)ps1 * (uint16_t)ps2;
})
diff --git a/riscv/insns/umulx16.h b/riscv/insns/umulx16.h
index 5abe9cf..3f0cce8 100644
--- a/riscv/insns/umulx16.h
+++ b/riscv/insns/umulx16.h
@@ -1,3 +1,3 @@
P_MUL_CROSS_ULOOP(16, {
- pd = ps1 * ps2;
+ pd = (uint32_t)ps1 * (uint32_t)ps2;
})
diff --git a/riscv/insns/umulx8.h b/riscv/insns/umulx8.h
index a2b073d..848b5d5 100644
--- a/riscv/insns/umulx8.h
+++ b/riscv/insns/umulx8.h
@@ -1,3 +1,3 @@
P_MUL_CROSS_ULOOP(8, {
- pd = ps1 * ps2;
+ pd = (uint16_t)ps1 * (uint16_t)ps2;
})
diff --git a/riscv/insns/vcpop_m.h b/riscv/insns/vcpop_m.h
index cbe45a4..671362f 100644
--- a/riscv/insns/vcpop_m.h
+++ b/riscv/insns/vcpop_m.h
@@ -2,8 +2,6 @@
require(P.VU.vsew >= e8 && P.VU.vsew <= e64);
require_vector(true);
reg_t vl = P.VU.vl->read();
-reg_t sew = P.VU.vsew;
-reg_t rd_num = insn.rd();
reg_t rs2_num = insn.rs2();
require(P.VU.vstart->read() == 0);
reg_t popcount = 0;
diff --git a/riscv/insns/vdiv_vx.h b/riscv/insns/vdiv_vx.h
index 4052952..2b93eac 100644
--- a/riscv/insns/vdiv_vx.h
+++ b/riscv/insns/vdiv_vx.h
@@ -1,9 +1,9 @@
// vdiv.vx vd, vs2, rs1
VI_VX_LOOP
({
- if(rs1 == 0)
+ if (rs1 == 0)
vd = -1;
- else if(vs2 == (INT64_MIN >> (64 - sew)) && rs1 == -1)
+ else if (vs2 == (INT64_MIN >> (64 - sew)) && rs1 == -1)
vd = vs2;
else
vd = vs2 / rs1;
diff --git a/riscv/insns/vdivu_vv.h b/riscv/insns/vdivu_vv.h
index ef6e777..89aeed6 100644
--- a/riscv/insns/vdivu_vv.h
+++ b/riscv/insns/vdivu_vv.h
@@ -1,7 +1,7 @@
// vdivu.vv vd, vs2, vs1
VI_VV_ULOOP
({
- if(vs1 == 0)
+ if (vs1 == 0)
vd = -1;
else
vd = vs2 / vs1;
diff --git a/riscv/insns/vdivu_vx.h b/riscv/insns/vdivu_vx.h
index 7ffe1c6..ce3e964 100644
--- a/riscv/insns/vdivu_vx.h
+++ b/riscv/insns/vdivu_vx.h
@@ -1,7 +1,7 @@
// vdivu.vx vd, vs2, rs1
VI_VX_ULOOP
({
- if(rs1 == 0)
+ if (rs1 == 0)
vd = -1;
else
vd = vs2 / rs1;
diff --git a/riscv/insns/vfirst_m.h b/riscv/insns/vfirst_m.h
index 5b768ed..9ddc82b 100644
--- a/riscv/insns/vfirst_m.h
+++ b/riscv/insns/vfirst_m.h
@@ -2,8 +2,6 @@
require(P.VU.vsew >= e8 && P.VU.vsew <= e64);
require_vector(true);
reg_t vl = P.VU.vl->read();
-reg_t sew = P.VU.vsew;
-reg_t rd_num = insn.rd();
reg_t rs2_num = insn.rs2();
require(P.VU.vstart->read() == 0);
reg_t pos = -1;
diff --git a/riscv/insns/vfmv_f_s.h b/riscv/insns/vfmv_f_s.h
index 81605ea..0f3cf8c 100644
--- a/riscv/insns/vfmv_f_s.h
+++ b/riscv/insns/vfmv_f_s.h
@@ -9,7 +9,7 @@ require(STATE.frm->read() < 0x5);
reg_t rs2_num = insn.rs2();
uint64_t vs2_0 = 0;
const reg_t sew = P.VU.vsew;
-switch(sew) {
+switch (sew) {
case e16:
vs2_0 = P.VU.elt<uint16_t>(rs2_num, 0);
break;
diff --git a/riscv/insns/vfmv_s_f.h b/riscv/insns/vfmv_s_f.h
index edc376e..e50ad41 100644
--- a/riscv/insns/vfmv_s_f.h
+++ b/riscv/insns/vfmv_s_f.h
@@ -11,7 +11,7 @@ reg_t vl = P.VU.vl->read();
if (vl > 0 && P.VU.vstart->read() < vl) {
reg_t rd_num = insn.rd();
- switch(P.VU.vsew) {
+ switch (P.VU.vsew) {
case e16:
P.VU.elt<uint16_t>(rd_num, 0, true) = f16(FRS1).v;
break;
diff --git a/riscv/insns/vfslide1down_vf.h b/riscv/insns/vfslide1down_vf.h
index 66eeacc..40f3c18 100644
--- a/riscv/insns/vfslide1down_vf.h
+++ b/riscv/insns/vfslide1down_vf.h
@@ -23,13 +23,13 @@ if (i != vl - 1) {
} else {
switch (P.VU.vsew) {
case e16:
- P.VU.elt<float16_t>(rd_num, vl - 1, true) = f16(FRS1);
+ P.VU.elt<float16_t>(rd_num, vl - 1, true) = FRS1_H;
break;
case e32:
- P.VU.elt<float32_t>(rd_num, vl - 1, true) = f32(FRS1);
+ P.VU.elt<float32_t>(rd_num, vl - 1, true) = FRS1_F;
break;
case e64:
- P.VU.elt<float64_t>(rd_num, vl - 1, true) = f64(FRS1);
+ P.VU.elt<float64_t>(rd_num, vl - 1, true) = FRS1_D;
break;
}
}
diff --git a/riscv/insns/vfslide1up_vf.h b/riscv/insns/vfslide1up_vf.h
index b9c2817..4e4e499 100644
--- a/riscv/insns/vfslide1up_vf.h
+++ b/riscv/insns/vfslide1up_vf.h
@@ -23,13 +23,13 @@ if (i != 0) {
} else {
switch (P.VU.vsew) {
case e16:
- P.VU.elt<float16_t>(rd_num, 0, true) = f16(FRS1);
+ P.VU.elt<float16_t>(rd_num, 0, true) = FRS1_H;
break;
case e32:
- P.VU.elt<float32_t>(rd_num, 0, true) = f32(FRS1);
+ P.VU.elt<float32_t>(rd_num, 0, true) = FRS1_F;
break;
case e64:
- P.VU.elt<float64_t>(rd_num, 0, true) = f64(FRS1);
+ P.VU.elt<float64_t>(rd_num, 0, true) = FRS1_D;
break;
}
}
diff --git a/riscv/insns/vid_v.h b/riscv/insns/vid_v.h
index c316291..510132d 100644
--- a/riscv/insns/vid_v.h
+++ b/riscv/insns/vid_v.h
@@ -1,11 +1,8 @@
// vmpopc rd, vs2, vm
require(P.VU.vsew >= e8 && P.VU.vsew <= e64);
require_vector(true);
-reg_t vl = P.VU.vl->read();
reg_t sew = P.VU.vsew;
reg_t rd_num = insn.rd();
-reg_t rs1_num = insn.rs1();
-reg_t rs2_num = insn.rs2();
require_align(rd_num, P.VU.vflmul);
require_vm;
diff --git a/riscv/insns/viota_m.h b/riscv/insns/viota_m.h
index f74f2c2..1ee9229 100644
--- a/riscv/insns/viota_m.h
+++ b/riscv/insns/viota_m.h
@@ -4,7 +4,6 @@ require_vector(true);
reg_t vl = P.VU.vl->read();
reg_t sew = P.VU.vsew;
reg_t rd_num = insn.rd();
-reg_t rs1_num = insn.rs1();
reg_t rs2_num = insn.rs2();
require(P.VU.vstart->read() == 0);
require_vm;
diff --git a/riscv/insns/vmsbf_m.h b/riscv/insns/vmsbf_m.h
index 6147f6d..1275872 100644
--- a/riscv/insns/vmsbf_m.h
+++ b/riscv/insns/vmsbf_m.h
@@ -24,7 +24,7 @@ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) {
uint64_t res = 0;
if (!has_one && !vs2_lsb) {
res = 1;
- } else if(!has_one && vs2_lsb) {
+ } else if (!has_one && vs2_lsb) {
has_one = true;
}
vd = (vd & ~mmask) | ((res << mpos) & mmask);
diff --git a/riscv/insns/vmsif_m.h b/riscv/insns/vmsif_m.h
index 447813f..cbcbc2a 100644
--- a/riscv/insns/vmsif_m.h
+++ b/riscv/insns/vmsif_m.h
@@ -23,7 +23,7 @@ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) {
uint64_t res = 0;
if (!has_one && !vs2_lsb) {
res = 1;
- } else if(!has_one && vs2_lsb) {
+ } else if (!has_one && vs2_lsb) {
has_one = true;
res = 1;
}
diff --git a/riscv/insns/vmsof_m.h b/riscv/insns/vmsof_m.h
index b9edcf3..9bd4f0c 100644
--- a/riscv/insns/vmsof_m.h
+++ b/riscv/insns/vmsof_m.h
@@ -21,7 +21,7 @@ for (reg_t i = P.VU.vstart->read() ; i < vl; ++i) {
if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) {
uint64_t &vd = P.VU.elt<uint64_t>(rd_num, midx, true);
uint64_t res = 0;
- if(!has_one && vs2_lsb) {
+ if (!has_one && vs2_lsb) {
has_one = true;
res = 1;
}
diff --git a/riscv/insns/vmv_s_x.h b/riscv/insns/vmv_s_x.h
index b66855b..23a6b56 100644
--- a/riscv/insns/vmv_s_x.h
+++ b/riscv/insns/vmv_s_x.h
@@ -8,7 +8,7 @@ if (vl > 0 && P.VU.vstart->read() < vl) {
reg_t rd_num = insn.rd();
reg_t sew = P.VU.vsew;
- switch(sew) {
+ switch (sew) {
case e8:
P.VU.elt<uint8_t>(rd_num, 0, true) = RS1;
break;
diff --git a/riscv/insns/vmv_x_s.h b/riscv/insns/vmv_x_s.h
index d33c3e5..57a9e1a 100644
--- a/riscv/insns/vmv_x_s.h
+++ b/riscv/insns/vmv_x_s.h
@@ -1,27 +1,27 @@
// vmv_x_s: rd = vs2[0]
require_vector(true);
require(insn.v_vm() == 1);
-uint64_t xmask = UINT64_MAX >> (64 - P.get_isa().get_max_xlen());
-reg_t rs1 = RS1;
reg_t sew = P.VU.vsew;
reg_t rs2_num = insn.rs2();
+reg_t res;
-switch(sew) {
+switch (sew) {
case e8:
- WRITE_RD(P.VU.elt<int8_t>(rs2_num, 0));
+ res = P.VU.elt<int8_t>(rs2_num, 0);
break;
case e16:
- WRITE_RD(P.VU.elt<int16_t>(rs2_num, 0));
+ res = P.VU.elt<int16_t>(rs2_num, 0);
break;
case e32:
- WRITE_RD(P.VU.elt<int32_t>(rs2_num, 0));
+ res = P.VU.elt<int32_t>(rs2_num, 0);
break;
case e64:
- if (P.get_isa().get_max_xlen() <= sew)
- WRITE_RD(P.VU.elt<uint64_t>(rs2_num, 0) & xmask);
- else
- WRITE_RD(P.VU.elt<uint64_t>(rs2_num, 0));
+ res = P.VU.elt<uint64_t>(rs2_num, 0);
break;
+default:
+ abort();
}
+WRITE_RD(sext_xlen(res));
+
P.VU.vstart->write(0);
diff --git a/riscv/insns/vmvnfr_v.h b/riscv/insns/vmvnfr_v.h
index f6dc2c0..9c52810 100644
--- a/riscv/insns/vmvnfr_v.h
+++ b/riscv/insns/vmvnfr_v.h
@@ -1,6 +1,5 @@
-// vmv1r.v vd, vs2
-require_vector_novtype(true, true);
-const reg_t baseAddr = RS1;
+// vmv<nf>r.v vd, vs2
+require_vector(true);
const reg_t vd = insn.rd();
const reg_t vs2 = insn.rs2();
const reg_t len = insn.rs1() + 1;
diff --git a/riscv/insns/vrem_vv.h b/riscv/insns/vrem_vv.h
index 260716a..5c58fa4 100644
--- a/riscv/insns/vrem_vv.h
+++ b/riscv/insns/vrem_vv.h
@@ -3,7 +3,7 @@ VI_VV_LOOP
({
if (vs1 == 0)
vd = vs2;
- else if(vs2 == -(((intmax_t)1) << (sew - 1)) && vs1 == -1)
+ else if (vs2 == -(((intmax_t)1) << (sew - 1)) && vs1 == -1)
vd = 0;
else {
vd = vs2 % vs1;
diff --git a/riscv/insns/vrgather_vi.h b/riscv/insns/vrgather_vi.h
index 56e11e1..85ba621 100644
--- a/riscv/insns/vrgather_vi.h
+++ b/riscv/insns/vrgather_vi.h
@@ -7,10 +7,6 @@ require_vm;
reg_t zimm5 = insn.v_zimm5();
VI_LOOP_BASE
-
-for (reg_t i = P.VU.vstart->read(); i < vl; ++i) {
- VI_LOOP_ELEMENT_SKIP();
-
switch (sew) {
case e8:
P.VU.elt<uint8_t>(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt<uint8_t>(rs2_num, zimm5);
@@ -25,6 +21,4 @@ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) {
P.VU.elt<uint64_t>(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt<uint64_t>(rs2_num, zimm5);
break;
}
-}
-
VI_LOOP_END;
diff --git a/riscv/insns/vsadd_vi.h b/riscv/insns/vsadd_vi.h
index 7e3b652..3a8b1d4 100644
--- a/riscv/insns/vsadd_vi.h
+++ b/riscv/insns/vsadd_vi.h
@@ -2,7 +2,7 @@
VI_CHECK_SSS(false);
VI_LOOP_BASE
bool sat = false;
-switch(sew) {
+switch (sew) {
case e8: {
VI_PARAMS(e8);
vd = sat_add<int8_t, uint8_t>(vs2, vsext(simm5, sew), sat);
diff --git a/riscv/insns/vsadd_vv.h b/riscv/insns/vsadd_vv.h
index 60ad5f3..d4cfe78 100644
--- a/riscv/insns/vsadd_vv.h
+++ b/riscv/insns/vsadd_vv.h
@@ -2,7 +2,7 @@
VI_CHECK_SSS(true);
VI_LOOP_BASE
bool sat = false;
-switch(sew) {
+switch (sew) {
case e8: {
VV_PARAMS(e8);
vd = sat_add<int8_t, uint8_t>(vs2, vs1, sat);
diff --git a/riscv/insns/vsadd_vx.h b/riscv/insns/vsadd_vx.h
index bf68f15..e5e6c40 100644
--- a/riscv/insns/vsadd_vx.h
+++ b/riscv/insns/vsadd_vx.h
@@ -2,7 +2,7 @@
VI_CHECK_SSS(false);
VI_LOOP_BASE
bool sat = false;
-switch(sew) {
+switch (sew) {
case e8: {
VX_PARAMS(e8);
vd = sat_add<int8_t, uint8_t>(vs2, rs1, sat);
diff --git a/riscv/insns/vsetivli.h b/riscv/insns/vsetivli.h
index 04900a2..f880e96 100644
--- a/riscv/insns/vsetivli.h
+++ b/riscv/insns/vsetivli.h
@@ -1,2 +1,2 @@
-require_vector_novtype(false, false);
+require_vector_novtype(false);
WRITE_RD(P.VU.set_vl(insn.rd(), -1, insn.rs1(), insn.v_zimm10()));
diff --git a/riscv/insns/vsetvl.h b/riscv/insns/vsetvl.h
index 2969edc..4d03542 100644
--- a/riscv/insns/vsetvl.h
+++ b/riscv/insns/vsetvl.h
@@ -1,2 +1,2 @@
-require_vector_novtype(false, false);
+require_vector_novtype(false);
WRITE_RD(P.VU.set_vl(insn.rd(), insn.rs1(), RS1, RS2));
diff --git a/riscv/insns/vsetvli.h b/riscv/insns/vsetvli.h
index 7b1f1d7..d1f43b5 100644
--- a/riscv/insns/vsetvli.h
+++ b/riscv/insns/vsetvli.h
@@ -1,2 +1,2 @@
-require_vector_novtype(false, false);
+require_vector_novtype(false);
WRITE_RD(P.VU.set_vl(insn.rd(), insn.rs1(), RS1, insn.v_zimm11()));
diff --git a/riscv/insns/vslide1up_vx.h b/riscv/insns/vslide1up_vx.h
index 33cb9ed..256419e 100644
--- a/riscv/insns/vslide1up_vx.h
+++ b/riscv/insns/vslide1up_vx.h
@@ -6,24 +6,24 @@ if (i != 0) {
if (sew == e8) {
VI_XI_SLIDEUP_PARAMS(e8, 1);
vd = vs2;
- } else if(sew == e16) {
+ } else if (sew == e16) {
VI_XI_SLIDEUP_PARAMS(e16, 1);
vd = vs2;
- } else if(sew == e32) {
+ } else if (sew == e32) {
VI_XI_SLIDEUP_PARAMS(e32, 1);
vd = vs2;
- } else if(sew == e64) {
+ } else if (sew == e64) {
VI_XI_SLIDEUP_PARAMS(e64, 1);
vd = vs2;
}
} else {
if (sew == e8) {
P.VU.elt<uint8_t>(rd_num, 0, true) = RS1;
- } else if(sew == e16) {
+ } else if (sew == e16) {
P.VU.elt<uint16_t>(rd_num, 0, true) = RS1;
- } else if(sew == e32) {
+ } else if (sew == e32) {
P.VU.elt<uint32_t>(rd_num, 0, true) = RS1;
- } else if(sew == e64) {
+ } else if (sew == e64) {
P.VU.elt<uint64_t>(rd_num, 0, true) = RS1;
}
}
diff --git a/riscv/insns/vsmul_vv.h b/riscv/insns/vsmul_vv.h
index 413981c..49e42c1 100644
--- a/riscv/insns/vsmul_vv.h
+++ b/riscv/insns/vsmul_vv.h
@@ -2,27 +2,19 @@
VRM xrm = P.VU.get_vround_mode();
int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
-int64_t sign_mask = uint64_t(1) << (P.VU.vsew - 1);
VI_VV_LOOP
({
- int64_t vs1_sign;
- int64_t vs2_sign;
- int64_t result_sign;
-
- vs1_sign = vs1 & sign_mask;
- vs2_sign = vs2 & sign_mask;
bool overflow = vs1 == vs2 && vs1 == int_min;
-
int128_t result = (int128_t)vs1 * (int128_t)vs2;
- result_sign = (vs1_sign ^ vs2_sign) & sign_mask;
// rounding
INT_ROUNDING(result, xrm, sew - 1);
+
// remove guard bits
result = result >> (sew - 1);
- // saturation
+ // max saturation
if (overflow) {
result = int_max;
P_SET_OV(1);
diff --git a/riscv/insns/vsmul_vx.h b/riscv/insns/vsmul_vx.h
index 2e25670..d2724ee 100644
--- a/riscv/insns/vsmul_vx.h
+++ b/riscv/insns/vsmul_vx.h
@@ -2,20 +2,11 @@
VRM xrm = P.VU.get_vround_mode();
int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
-int64_t sign_mask = uint64_t(1) << (P.VU.vsew - 1);
VI_VX_LOOP
({
- int64_t rs1_sign;
- int64_t vs2_sign;
- int64_t result_sign;
-
- rs1_sign = rs1 & sign_mask;
- vs2_sign = vs2 & sign_mask;
bool overflow = rs1 == vs2 && rs1 == int_min;
-
int128_t result = (int128_t)rs1 * (int128_t)vs2;
- result_sign = (rs1_sign ^ vs2_sign) & sign_mask;
// rounding
INT_ROUNDING(result, xrm, sew - 1);
diff --git a/riscv/insns/wfi.h b/riscv/insns/wfi.h
index 299cb01..3411da0 100644
--- a/riscv/insns/wfi.h
+++ b/riscv/insns/wfi.h
@@ -5,7 +5,9 @@ if (STATE.v && STATE.prv == PRV_U) {
} else if (STATE.v) { // VS-mode
if (get_field(STATE.hstatus->read(), HSTATUS_VTW))
require_novirt();
-} else {
+} else if (p->extension_enabled('S')) {
+ // When S-mode is implemented, then executing WFI in
+ // U-mode causes an illegal instruction exception.
require_privilege(PRV_S);
}
wfi();
diff --git a/riscv/interactive.cc b/riscv/interactive.cc
index 88eb86b..cf95bf6 100644
--- a/riscv/interactive.cc
+++ b/riscv/interactive.cc
@@ -20,13 +20,45 @@
#include <algorithm>
#include <math.h>
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
#define MAX_CMD_STR 40 // maximum possible size of a command line
+#define BITS_PER_CHAR 8
#define STR_(X) #X // these definitions allow to use a macro as a string
#define STR(X) STR_(X)
DECLARE_TRAP(-1, interactive)
+static std::vector<std::string> history_commands;
+
+// if input an arrow/home key, there will be a 3/4-key input sequence,
+// so we use an uint32_t to buffer it
+typedef uint32_t keybuffer_t;
+
+enum KEYCODE
+{
+ KEYCODE_HEADER0 = 0x1b,
+ KEYCODE_HEADER1 = 0x1b5b,
+ KEYCODE_LEFT = 0x1b5b44,
+ KEYCODE_RIGHT = 0x1b5b43,
+ KEYCODE_UP = 0x1b5b41,
+ KEYCODE_DOWN = 0x1b5b42,
+ KEYCODE_HOME0 = 0x1b5b48,
+ KEYCODE_HOME1_0 = 0x1b5b31,
+ KEYCODE_HOME1_1 = 0x1b5b317e,
+ KEYCODE_END0 = 0x1b5b46,
+ KEYCODE_END1_0 = 0x1b5b34,
+ KEYCODE_END1_1 = 0x1b5b347e,
+ KEYCODE_BACKSPACE0 = 0x8,
+ KEYCODE_BACKSPACE1_0 = 0x1b5b33,
+ KEYCODE_BACKSPACE1_1 = 0x1b5b337e,
+ KEYCODE_BACKSPACE2 = 0x7f,
+ KEYCODE_ENTER = '\n',
+};
+
processor_t *sim_t::get_core(const std::string& i)
{
char *ptr;
@@ -36,30 +68,182 @@ processor_t *sim_t::get_core(const std::string& i)
return get_core(p);
}
+static void clear_str(bool noncanonical, int fd, std::string target_str)
+{
+ if (noncanonical)
+ {
+ std::string clear_motion;
+ clear_motion += '\r';
+ for (unsigned i = 0; i < target_str.size(); i++)
+ {
+ clear_motion += ' ';
+ }
+ clear_motion += '\r';
+ if (write(fd, clear_motion.c_str(), clear_motion.size() + 1))
+ ; // shut up gcc
+ }
+}
+
+static void send_key(bool noncanonical, int fd, keybuffer_t key_code, const int len)
+{
+ if (noncanonical)
+ {
+ std::string key_motion;
+ for (int i = len - 1; i >= 0; i--)
+ {
+ key_motion += (char) ((key_code >> (i * BITS_PER_CHAR)) & 0xff);
+ }
+ if (write(fd, key_motion.c_str(), len) != len)
+ ; // shut up gcc
+ }
+}
+
static std::string readline(int fd)
{
struct termios tios;
+ // try to make sure the terminal is noncanonical and nonecho
+ if (tcgetattr(fd, &tios) == 0)
+ {
+ tios.c_lflag &= (~ICANON);
+ tios.c_lflag &= (~ECHO);
+ tcsetattr(fd, TCSANOW, &tios);
+ }
bool noncanonical = tcgetattr(fd, &tios) == 0 && (tios.c_lflag & ICANON) == 0;
- std::string s;
+ std::string s_head = std::string("(spike) ");
+ std::string s = s_head;
+ keybuffer_t key_buffer = 0;
+ // index for up/down arrow
+ size_t history_index = 0;
+ // position for left/right arrow
+ size_t cursor_pos = s.size();
+ const size_t initial_s_len = cursor_pos;
+ std::cerr << s << std::flush;
for (char ch; read(fd, &ch, 1) == 1; )
{
- if (ch == '\x7f')
+ uint32_t keycode = key_buffer << BITS_PER_CHAR | ch;
+ switch (keycode)
{
- if (s.empty())
- continue;
- s.erase(s.end()-1);
-
- if (noncanonical && write(fd, "\b \b", 3) != 3) {}
+ // the partial keycode, add to the key_buffer
+ case KEYCODE_HEADER0:
+ case KEYCODE_HEADER1:
+ case KEYCODE_HOME1_0:
+ case KEYCODE_END1_0:
+ case KEYCODE_BACKSPACE1_0:
+ key_buffer = keycode;
+ break;
+ // for backspace key
+ case KEYCODE_BACKSPACE0:
+ case KEYCODE_BACKSPACE1_1:
+ case KEYCODE_BACKSPACE2:
+ if (cursor_pos <= initial_s_len)
+ continue;
+ clear_str(noncanonical, fd, s);
+ cursor_pos--;
+ s.erase(cursor_pos, 1);
+ if (noncanonical && write(fd, s.c_str(), s.size() + 1) != 1)
+ ; // shut up gcc
+ // move cursor by left arrow key
+ for (unsigned i = 0; i < s.size() - cursor_pos; i++) {
+ send_key(noncanonical, fd, KEYCODE_LEFT, 3);
+ }
+ key_buffer = 0;
+ break;
+ case KEYCODE_HOME0:
+ case KEYCODE_HOME1_1:
+ // move cursor by left arrow key
+ for (unsigned i = 0; i < cursor_pos - initial_s_len; i++) {
+ send_key(noncanonical, fd, KEYCODE_LEFT, 3);
+ }
+ cursor_pos = initial_s_len;
+ key_buffer = 0;
+ break;
+ case KEYCODE_END0:
+ case KEYCODE_END1_1:
+ // move cursor by right arrow key
+ for (unsigned i = 0; i < s.size() - cursor_pos; i++) {
+ send_key(noncanonical, fd, KEYCODE_RIGHT, 3);
+ }
+ cursor_pos = s.size();
+ key_buffer = 0;
+ break;
+ case KEYCODE_UP:
+ // up arrow
+ if (history_commands.size() > 0) {
+ clear_str(noncanonical, fd, s);
+ history_index = std::min(history_commands.size(), history_index + 1);
+ s = history_commands[history_commands.size() - history_index];
+ if (noncanonical && write(fd, s.c_str(), s.size() + 1))
+ ; // shut up gcc
+ cursor_pos = s.size();
+ }
+ key_buffer = 0;
+ break;
+ case KEYCODE_DOWN:
+ // down arrow
+ if (history_commands.size() > 0) {
+ clear_str(noncanonical, fd, s);
+ history_index = std::max(0, (int)history_index - 1);
+ if (history_index == 0) {
+ s = s_head;
+ } else {
+ s = history_commands[history_commands.size() - history_index];
+ }
+ if (noncanonical && write(fd, s.c_str(), s.size() + 1))
+ ; // shut up gcc
+ cursor_pos = s.size();
+ }
+ key_buffer = 0;
+ break;
+ case KEYCODE_LEFT:
+ if (s.size() > initial_s_len) {
+ cursor_pos = cursor_pos - 1;
+ if ((int)cursor_pos < (int)initial_s_len) {
+ cursor_pos = initial_s_len;
+ } else {
+ send_key(noncanonical, fd, KEYCODE_LEFT, 3);
+ }
+ }
+ key_buffer = 0;
+ break;
+ case KEYCODE_RIGHT:
+ if (s.size() > initial_s_len) {
+ cursor_pos = cursor_pos + 1;
+ if (cursor_pos > s.size()) {
+ cursor_pos = s.size();
+ } else {
+ send_key(noncanonical, fd, KEYCODE_RIGHT, 3);
+ }
+ }
+ key_buffer = 0;
+ break;
+ case KEYCODE_ENTER:
+ if (noncanonical && write(fd, &ch, 1) != 1)
+ ; // shut up gcc
+ if (s.size() > initial_s_len && (history_commands.size() == 0 || s != history_commands[history_commands.size() - 1])) {
+ history_commands.push_back(s);
+ }
+ return s.substr(initial_s_len);
+ default:
+ DEFAULT_KEY:
+ // unknown buffered key, do nothing
+ if (key_buffer != 0) {
+ key_buffer = 0;
+ break;
+ }
+ clear_str(noncanonical, fd, s);
+ s.insert(cursor_pos, 1, ch);
+ cursor_pos++;
+ if (noncanonical && write(fd, s.c_str(), s.size() + 1) != 1)
+ ; // shut up gcc
+ // send left arrow key to move cursor
+ for (unsigned i = 0; i < s.size() - cursor_pos; i++) {
+ send_key(noncanonical, fd, KEYCODE_LEFT, 3);
+ }
+ break;
}
- else if (noncanonical && write(fd, &ch, 1) != 1) {}
-
- if (ch == '\n')
- break;
- if (ch != '\x7f')
- s += ch;
}
- return s;
+ return s.substr(initial_s_len);
}
#ifdef HAVE_BOOST_ASIO
@@ -90,7 +274,6 @@ std::string sim_t::rin(boost::asio::streambuf *bout_ptr) {
// output goes to socket
sout_.rdbuf(bout_ptr);
} else { // if we are not listening on a socket, get commands from terminal
- std::cerr << ": " << std::flush;
s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin
// output goes to stderr
sout_.rdbuf(std::cerr.rdbuf());
@@ -132,9 +315,12 @@ void sim_t::interactive()
funcs["pc"] = &sim_t::interactive_pc;
funcs["mem"] = &sim_t::interactive_mem;
funcs["str"] = &sim_t::interactive_str;
+ funcs["mtime"] = &sim_t::interactive_mtime;
+ funcs["mtimecmp"] = &sim_t::interactive_mtimecmp;
funcs["until"] = &sim_t::interactive_until_silent;
funcs["untiln"] = &sim_t::interactive_until_noisy;
funcs["while"] = &sim_t::interactive_until_silent;
+ funcs["dump"] = &sim_t::interactive_dumpmems;
funcs["quit"] = &sim_t::interactive_quit;
funcs["q"] = funcs["quit"];
funcs["help"] = &sim_t::interactive_help;
@@ -150,17 +336,16 @@ void sim_t::interactive()
// first get commands from file, if cmd_file has been set
if (cmd_file && !feof(cmd_file) && fscanf(cmd_file,"%" STR(MAX_CMD_STR) "[^\n]\n", cmd_str)==1) {
// up to MAX_CMD_STR characters before \n, skipping \n
- s = cmd_str;
- // while we get input from file, output goes to stderr
- sout_.rdbuf(std::cerr.rdbuf());
+ s = cmd_str;
+ // while we get input from file, output goes to stderr
+ sout_.rdbuf(std::cerr.rdbuf());
} else {
- // when there are no commands left from file or if there was no file from the beginning
- cmd_file = NULL; // mark file pointer as being not valid, so any method can test this easily
+ // when there are no commands left from file or if there was no file from the beginning
+ cmd_file = NULL; // mark file pointer as being not valid, so any method can test this easily
#ifdef HAVE_BOOST_ASIO
- s = rin(&bout); // get command string from socket or terminal
+ s = rin(&bout); // get command string from socket or terminal
#else
- std::cerr << ": " << std::flush;
- s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin
+ s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin
#endif
}
@@ -211,15 +396,20 @@ void sim_t::interactive_help(const std::string& cmd, const std::vector<std::stri
"fregd <core> <reg> # Display double precision <reg> in <core>\n"
"vreg <core> [reg] # Display vector [reg] (all if omitted) in <core>\n"
"pc <core> # Show current PC in <core>\n"
- "mem <hex addr> # Show contents of physical memory\n"
- "str <core> <hex addr> # Show NUL-terminated C string at <hex addr> in core <core>\n"
+ "mem [core] <hex addr> # Show contents of virtual memory <hex addr> in [core] (physical memory <hex addr> if omitted)\n"
+ "str [core] <hex addr> # Show NUL-terminated C string at virtual address <hex addr> in [core] (physical address <hex addr> if omitted)\n"
+ "dump # Dump physical memory to binary files\n"
+ "mtime # Show mtime\n"
+ "mtimecmp <core> # Show mtimecmp for <core>\n"
"until reg <core> <reg> <val> # Stop when <reg> in <core> hits <val>\n"
+ "untiln reg <core> <reg> <val> # Run noisy and stop when <reg> in <core> hits <val>\n"
"until pc <core> <val> # Stop when PC in <core> hits <val>\n"
"untiln pc <core> <val> # Run noisy and stop when PC in <core> hits <val>\n"
- "until mem <addr> <val> # Stop when memory <addr> becomes <val>\n"
+ "until mem [core] <addr> <val> # Stop when virtual memory <addr> in [core] (physical address <addr> if omitted) becomes <val>\n"
+ "untiln mem [core] <addr> <val> # Run noisy and stop when virtual memory <addr> in [core] (physical address <addr> if omitted) becomes <val>\n"
"while reg <core> <reg> <val> # Run while <reg> in <core> is <val>\n"
"while pc <core> <val> # Run while PC in <core> is <val>\n"
- "while mem <addr> <val> # Run while memory <addr> is <val>\n"
+ "while mem [core] <addr> <val> # Run while virtual memory <addr> in [core] (physical memory <addr> if omitted) is <val>\n"
"run [count] # Resume noisy execution (until CTRL+C, or [count] insns)\n"
"r [count] Alias for run\n"
"rs [count] # Resume silent execution (until CTRL+C, or [count] insns)\n"
@@ -269,7 +459,7 @@ reg_t sim_t::get_pc(const std::vector<std::string>& args)
void sim_t::interactive_pc(const std::string& cmd, const std::vector<std::string>& args)
{
- if(args.size() != 1)
+ if (args.size() != 1)
throw trap_interactive();
processor_t *p = get_core(args[0]);
@@ -305,19 +495,33 @@ reg_t sim_t::get_reg(const std::vector<std::string>& args)
return p->get_state()->XPR[r];
}
-freg_t sim_t::get_freg(const std::vector<std::string>& args)
+freg_t sim_t::get_freg(const std::vector<std::string>& args, int size)
{
- if(args.size() != 2)
+ if (args.size() != 2)
throw trap_interactive();
processor_t *p = get_core(args[0]);
- int r = std::find(fpr_name, fpr_name + NFPR, args[1]) - fpr_name;
- if (r == NFPR)
- r = atoi(args[1].c_str());
- if (r >= NFPR)
- throw trap_interactive();
-
- return p->get_state()->FPR[r];
+ if (p->extension_enabled(EXT_ZFINX)) {
+ int r = std::find(xpr_name, xpr_name + NXPR, args[1]) - xpr_name;
+ if (r == NXPR)
+ r = atoi(args[1].c_str());
+ if (r >= NXPR)
+ throw trap_interactive();
+ if ((p->get_xlen() == 32) && (size == 64)) {
+ if (r % 2 != 0)
+ throw trap_interactive();
+ return freg(f64(r== 0 ? reg_t(0) : (READ_REG(r + 1) << 32) + zext32(READ_REG(r))));
+ } else { //xlen >= size
+ return {p->get_state()->XPR[r] | ~(((uint64_t)-1) >> (64 - size)) ,(uint64_t)-1};
+ }
+ } else {
+ int r = std::find(fpr_name, fpr_name + NFPR, args[1]) - fpr_name;
+ if (r == NFPR)
+ r = atoi(args[1].c_str());
+ if (r >= NFPR)
+ throw trap_interactive();
+ return p->get_state()->FPR[r];
+ }
}
void sim_t::interactive_vreg(const std::string& cmd, const std::vector<std::string>& args)
@@ -347,9 +551,9 @@ void sim_t::interactive_vreg(const std::string& cmd, const std::vector<std::stri
for (int r = rstart; r < rend; ++r) {
out << std::setfill (' ') << std::left << std::setw(4) << vr_name[r] << std::right << ": ";
- for (int e = num_elem-1; e >= 0; --e){
+ for (int e = num_elem-1; e >= 0; --e) {
uint64_t val;
- switch(elen){
+ switch (elen) {
case 8:
val = p->VU.elt<uint64_t>(r, e);
out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(16) << val << " ";
@@ -372,11 +576,10 @@ void sim_t::interactive_vreg(const std::string& cmd, const std::vector<std::stri
}
}
-
void sim_t::interactive_reg(const std::string& cmd, const std::vector<std::string>& args)
{
if (args.size() < 1)
- throw trap_interactive();
+ throw trap_interactive();
processor_t *p = get_core(args[0]);
int max_xlen = p->get_isa().get_max_xlen();
@@ -389,14 +592,14 @@ void sim_t::interactive_reg(const std::string& cmd, const std::vector<std::strin
for (int r = 0; r < NXPR; ++r) {
out << std::setfill(' ') << std::setw(4) << xpr_name[r]
- << ": 0x" << std::setfill('0') << std::setw(max_xlen/4)
- << zext(p->get_state()->XPR[r], max_xlen);
+ << ": 0x" << std::setfill('0') << std::setw(max_xlen/4)
+ << zext(p->get_state()->XPR[r], max_xlen);
if ((r + 1) % 4 == 0)
out << std::endl;
}
} else {
- out << "0x" << std::setfill('0') << std::setw(max_xlen/4)
- << zext(get_reg(args), max_xlen) << std::endl;
+ out << "0x" << std::setfill('0') << std::setw(max_xlen/4)
+ << zext(get_reg(args), max_xlen) << std::endl;
}
}
@@ -409,7 +612,7 @@ union fpr
void sim_t::interactive_freg(const std::string& cmd, const std::vector<std::string>& args)
{
- freg_t r = get_freg(args);
+ freg_t r = get_freg(args, 64);
std::ostream out(sout_.rdbuf());
out << std::hex << "0x" << std::setfill ('0') << std::setw(16) << r.v[1] << std::setw(16) << r.v[0] << std::endl;
@@ -418,7 +621,7 @@ void sim_t::interactive_freg(const std::string& cmd, const std::vector<std::stri
void sim_t::interactive_fregh(const std::string& cmd, const std::vector<std::string>& args)
{
fpr f;
- f.r = freg(f16_to_f32(f16(get_freg(args))));
+ f.r = freg(f16_to_f32(f16(get_freg(args, 16))));
std::ostream out(sout_.rdbuf());
out << (isBoxedF32(f.r) ? (double)f.s : NAN) << std::endl;
@@ -427,7 +630,7 @@ void sim_t::interactive_fregh(const std::string& cmd, const std::vector<std::str
void sim_t::interactive_fregs(const std::string& cmd, const std::vector<std::string>& args)
{
fpr f;
- f.r = get_freg(args);
+ f.r = get_freg(args, 32);
std::ostream out(sout_.rdbuf());
out << (isBoxedF32(f.r) ? (double)f.s : NAN) << std::endl;
@@ -436,7 +639,7 @@ void sim_t::interactive_fregs(const std::string& cmd, const std::vector<std::str
void sim_t::interactive_fregd(const std::string& cmd, const std::vector<std::string>& args)
{
fpr f;
- f.r = get_freg(args);
+ f.r = get_freg(args, 64);
std::ostream out(sout_.rdbuf());
out << (isBoxedF64(f.r) ? f.d : NAN) << std::endl;
@@ -460,7 +663,7 @@ reg_t sim_t::get_mem(const std::vector<std::string>& args)
if (addr == LONG_MAX)
addr = strtoul(addr_str.c_str(),NULL,16);
- switch(addr % 8)
+ switch (addr % 8)
{
case 0:
val = mmu->load_uint64(addr);
@@ -507,7 +710,7 @@ void sim_t::interactive_str(const std::string& cmd, const std::vector<std::strin
std::ostream out(sout_.rdbuf());
char ch;
- while((ch = mmu->load_uint8(addr++)))
+ while ((ch = mmu->load_uint8(addr++)))
out << ch;
out << std::endl;
@@ -577,3 +780,34 @@ void sim_t::interactive_until(const std::string& cmd, const std::vector<std::str
step(1);
}
}
+
+void sim_t::interactive_dumpmems(const std::string& cmd, const std::vector<std::string>& args)
+{
+ for (unsigned i = 0; i < mems.size(); i++) {
+ std::stringstream mem_fname;
+ mem_fname << "mem.0x" << std::hex << mems[i].first << ".bin";
+
+ std::ofstream mem_file(mem_fname.str());
+ mems[i].second->dump(mem_file);
+ mem_file.close();
+ }
+}
+
+void sim_t::interactive_mtime(const std::string& cmd, const std::vector<std::string>& args)
+{
+ std::ostream out(sout_.rdbuf());
+ out << std::hex << std::setfill('0') << "0x" << std::setw(16)
+ << clint->get_mtime() << std::endl;
+}
+
+void sim_t::interactive_mtimecmp(const std::string& cmd, const std::vector<std::string>& args)
+{
+ if (args.size() != 1)
+ throw trap_interactive();
+
+ processor_t *p = get_core(args[0]);
+ std::ostream out(sout_.rdbuf());
+ out << std::hex << std::setfill('0') << "0x" << std::setw(16)
+ << clint->get_mtimecmp(p->get_id()) << std::endl;
+}
+
diff --git a/riscv/isa_parser.cc b/riscv/isa_parser.cc
index 0adec2c..f0cf274 100644
--- a/riscv/isa_parser.cc
+++ b/riscv/isa_parser.cc
@@ -114,6 +114,8 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv)
// unconditionally include FENCE.I, so Zifencei adds nothing more.
} else if (ext_str == "zihintpause") {
// HINTs encoded in base-ISA instructions are always present.
+ } else if (ext_str == "zihintntl") {
+ // HINTs encoded in base-ISA instructions are always present.
} else if (ext_str == "zmmul") {
extension_table[EXT_ZMMUL] = true;
} else if (ext_str == "zba") {
@@ -130,6 +132,18 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv)
extension_table[EXT_ZBKC] = true;
} else if (ext_str == "zbkx") {
extension_table[EXT_ZBKX] = true;
+ } else if (ext_str == "zdinx") {
+ extension_table[EXT_ZFINX] = true;
+ extension_table[EXT_ZDINX] = true;
+ } else if (ext_str == "zfinx") {
+ extension_table[EXT_ZFINX] = true;
+ } else if (ext_str == "zhinx") {
+ extension_table[EXT_ZFINX] = true;
+ extension_table[EXT_ZHINX] = true;
+ extension_table[EXT_ZHINXMIN] = true;
+ } else if (ext_str == "zhinxmin") {
+ extension_table[EXT_ZFINX] = true;
+ extension_table[EXT_ZHINXMIN] = true;
} else if (ext_str == "zk") {
extension_table[EXT_ZBKB] = true;
extension_table[EXT_ZBKC] = true;
@@ -164,6 +178,12 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv)
} else if (ext_str == "zkr") {
extension_table[EXT_ZKR] = true;
} else if (ext_str == "zkt") {
+ } else if (ext_str == "smepmp") {
+ extension_table[EXT_SMEPMP] = true;
+ } else if (ext_str == "smstateen") {
+ extension_table[EXT_SMSTATEEN] = true;
+ } else if (ext_str == "sscofpmf") {
+ extension_table[EXT_SSCOFPMF] = true;
} else if (ext_str == "svnapot") {
extension_table[EXT_SVNAPOT] = true;
} else if (ext_str == "svpbmt") {
@@ -177,6 +197,8 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv)
} else if (ext_str == "zicbop") {
} else if (ext_str == "zicntr") {
} else if (ext_str == "zihpm") {
+ } else if (ext_str == "sstc") {
+ extension_table[EXT_SSTC] = true;
} else if (ext_str[0] == 'x') {
max_isa |= 1L << ('x' - 'a');
extension_table[toupper('x')] = true;
@@ -223,6 +245,10 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv)
bad_isa_string(str, ("can't parse: " + std::string(p)).c_str());
}
+ if (extension_table[EXT_ZFINX] && ((max_isa >> ('f' - 'a')) & 1)) {
+ bad_isa_string(str, ("Zfinx/ZDinx/Zhinx{min} extensions conflict with Base 'F/D/Q/Zfh{min}' extensions"));
+ }
+
std::string lowercase = strtolower(priv);
bool user = false, supervisor = false;
@@ -244,4 +270,7 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv)
max_isa |= reg_t(supervisor) << ('s' - 'a');
extension_table['S'] = true;
}
+
+ if (((max_isa >> ('h' - 'a')) & 1) && !supervisor)
+ bad_isa_string(str, "'H' extension requires S mode");
}
diff --git a/riscv/isa_parser.h b/riscv/isa_parser.h
index 3cefe12..f7a8bc8 100644
--- a/riscv/isa_parser.h
+++ b/riscv/isa_parser.h
@@ -31,6 +31,9 @@ typedef enum {
EXT_ZBPBO,
EXT_ZPN,
EXT_ZPSFOPERAND,
+ EXT_SMEPMP,
+ EXT_SMSTATEEN,
+ EXT_SSCOFPMF,
EXT_SVNAPOT,
EXT_SVPBMT,
EXT_SVINVAL,
@@ -50,6 +53,7 @@ typedef enum {
EXT_XZBM,
EXT_XZBR,
EXT_XZBT,
+ EXT_SSTC,
} isa_extension_t;
typedef enum {
@@ -66,7 +70,7 @@ typedef enum {
class isa_parser_t {
public:
isa_parser_t(const char* str, const char *priv);
- ~isa_parser_t(){};
+ ~isa_parser_t() {};
unsigned get_max_xlen() const { return max_xlen; }
reg_t get_max_isa() const { return max_isa; }
std::string get_isa_string() const { return isa_string; }
diff --git a/riscv/mmu.cc b/riscv/mmu.cc
index 1ef81cf..c77b6b1 100644
--- a/riscv/mmu.cc
+++ b/riscv/mmu.cc
@@ -76,16 +76,26 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f
tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr)
{
- reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0);
+ check_triggers(triggers::OPERATION_EXECUTE, vaddr);
- if (auto host_addr = sim->addr_to_mem(paddr)) {
- return refill_tlb(vaddr, paddr, host_addr, FETCH);
+ tlb_entry_t result;
+ reg_t vpn = vaddr >> PGSHIFT;
+ if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) {
+ reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0);
+ if (auto host_addr = sim->addr_to_mem(paddr)) {
+ result = refill_tlb(vaddr, paddr, host_addr, FETCH);
+ } else {
+ if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp))
+ throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0);
+ result = {(char*)&fetch_temp - vaddr, paddr - vaddr};
+ }
} else {
- if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp))
- throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0);
- tlb_entry_t entry = {(char*)&fetch_temp - vaddr, paddr - vaddr};
- return entry;
+ result = tlb_data[vpn % TLB_ENTRIES];
}
+
+ check_triggers(triggers::OPERATION_EXECUTE, vaddr, from_le(*(const uint16_t*)(result.host_offset + vaddr)));
+
+ return result;
}
reg_t reg_from_bytes(size_t len, const uint8_t* bytes)
@@ -114,7 +124,7 @@ reg_t reg_from_bytes(size_t len, const uint8_t* bytes)
abort();
}
-bool mmu_t::mmio_ok(reg_t addr, access_type type)
+bool mmu_t::mmio_ok(reg_t addr, access_type UNUSED type)
{
// Disallow access to debug region when not in debug mode
if (addr >= DEBUG_START && addr <= DEBUG_END && proc && !proc->state.debug_mode)
@@ -139,8 +149,39 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes)
return sim->mmio_store(addr, len, bytes);
}
-void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags)
+void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std::optional<reg_t> data)
{
+ if (matched_trigger || !proc)
+ return;
+
+ triggers::action_t action;
+ auto match = proc->TM.memory_access_match(&action, operation, address, data);
+
+ switch (match) {
+ case triggers::MATCH_NONE:
+ return;
+
+ case triggers::MATCH_FIRE_BEFORE:
+ throw triggers::matched_t(operation, address, action);
+
+ case triggers::MATCH_FIRE_AFTER:
+ // We want to take this exception on the next instruction. We check
+ // whether to do so in the I$ refill path, so flush the I$.
+ flush_icache();
+ matched_trigger = new triggers::matched_t(operation, address, action);
+ return;
+ }
+}
+
+void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags)
+{
+ reg_t vpn = addr >> PGSHIFT;
+ if (xlate_flags == 0 && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
+ auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
+ memcpy(bytes, host_addr, len);
+ return;
+ }
+
reg_t paddr = translate(addr, len, LOAD, xlate_flags);
if (auto host_addr = sim->addr_to_mem(paddr)) {
@@ -152,26 +193,43 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate
} else if (!mmio_load(paddr, len, bytes)) {
throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
}
+}
+
+void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool UNUSED require_alignment)
+{
+ check_triggers(triggers::OPERATION_LOAD, addr);
- if (!matched_trigger) {
- reg_t data = reg_from_bytes(len, bytes);
- matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data);
- if (matched_trigger)
- throw *matched_trigger;
+ if ((addr & (len - 1)) == 0) {
+ load_slow_path_intrapage(addr, len, bytes, xlate_flags);
+ } else {
+ bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
+#ifndef RISCV_ENABLE_MISALIGNED
+ throw trap_load_address_misaligned(gva, addr, 0, 0);
+#else
+ if (require_alignment)
+ throw trap_load_access_fault(gva, addr, 0, 0);
+
+ reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
+ load_slow_path_intrapage(addr, len_page0, bytes, xlate_flags);
+ if (len_page0 != len)
+ load_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags);
+#endif
}
+
+ check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes));
}
-void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store)
+void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store)
{
- reg_t paddr = translate(addr, len, STORE, xlate_flags);
-
- if (!matched_trigger) {
- reg_t data = reg_from_bytes(len, bytes);
- matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, data);
- if (matched_trigger)
- throw *matched_trigger;
+ reg_t vpn = addr >> PGSHIFT;
+ if (xlate_flags == 0 && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
+ auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
+ memcpy(host_addr, bytes, len);
+ return;
}
+ reg_t paddr = translate(addr, len, STORE, xlate_flags);
+
if (actually_store) {
if (auto host_addr = sim->addr_to_mem(paddr)) {
memcpy(host_addr, bytes, len);
@@ -185,6 +243,29 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_
}
}
+void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
+{
+ if (actually_store)
+ check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes));
+
+ if (addr & (len - 1)) {
+ bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
+#ifndef RISCV_ENABLE_MISALIGNED
+ throw trap_store_address_misaligned(gva, addr, 0, 0);
+#else
+ if (require_alignment)
+ throw trap_store_access_fault(gva, addr, 0, 0);
+
+ reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
+ store_slow_path_intrapage(addr, len_page0, bytes, xlate_flags, actually_store);
+ if (len_page0 != len)
+ store_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags, actually_store);
+#endif
+ } else {
+ store_slow_path_intrapage(addr, len, bytes, xlate_flags, actually_store);
+ }
+}
+
tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type)
{
reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES;
@@ -242,7 +323,11 @@ bool mmu_t::pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode)
}
}
- return mode == PRV_M;
+ // in case matching region is not found
+ const bool mseccfg_mml = proc->state.mseccfg->get_mml();
+ const bool mseccfg_mmwp = proc->state.mseccfg->get_mmwp();
+ return ((mode == PRV_M) && !mseccfg_mmwp
+ && (!mseccfg_mml || ((type == LOAD) || (type == STORE))));
}
reg_t mmu_t::pmp_homogeneous(reg_t addr, reg_t len)
@@ -290,12 +375,15 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty
reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian<uint32_t>*)ppte) : from_target(*(target_endian<uint64_t>*)ppte);
reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT;
+ bool pbmte = proc->get_state()->menvcfg->read() & MENVCFG_PBMTE;
if (pte & PTE_RSVD) {
break;
} else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) {
break;
- } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) {
+ } else if (!pbmte && (pte & PTE_PBMT)) {
+ break;
+ } else if ((pte & PTE_PBMT) == PTE_PBMT) {
break;
} else if (PTE_TABLE(pte)) { // next level of page table
if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT))
@@ -380,12 +468,15 @@ reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool hlvx
reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian<uint32_t>*)ppte) : from_target(*(target_endian<uint64_t>*)ppte);
reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT;
+ bool pbmte = virt ? (proc->get_state()->henvcfg->read() & HENVCFG_PBMTE) : (proc->get_state()->menvcfg->read() & MENVCFG_PBMTE);
if (pte & PTE_RSVD) {
break;
} else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) {
break;
- } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) {
+ } else if (!pbmte && (pte & PTE_PBMT)) {
+ break;
+ } else if ((pte & PTE_PBMT) == PTE_PBMT) {
break;
} else if (PTE_TABLE(pte)) { // next level of page table
if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT))
diff --git a/riscv/mmu.h b/riscv/mmu.h
index dcf338f..da84adc 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -52,32 +52,8 @@ public:
#define RISCV_XLATE_VIRT (1U << 0)
#define RISCV_XLATE_VIRT_HLVX (1U << 1)
- inline reg_t misaligned_load(reg_t addr, size_t size, uint32_t xlate_flags)
- {
-#ifdef RISCV_ENABLE_MISALIGNED
- reg_t res = 0;
- for (size_t i = 0; i < size; i++)
- res += (reg_t)load_uint8(addr + (target_big_endian? size-1-i : i)) << (i * 8);
- return res;
-#else
- bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
- throw trap_load_address_misaligned(gva, addr, 0, 0);
-#endif
- }
-
- inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags)
- {
-#ifdef RISCV_ENABLE_MISALIGNED
- for (size_t i = 0; i < size; i++)
- store_uint8(addr + (target_big_endian? size-1-i : i), data >> (i * 8));
-#else
- bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
- throw trap_store_address_misaligned(gva, addr, 0, 0);
-#endif
- }
-
#ifndef RISCV_ENABLE_COMMITLOG
-# define READ_MEM(addr, size) ({})
+# define READ_MEM(addr, size) ((void)(addr), (void)(size))
#else
# define READ_MEM(addr, size) \
proc->state.log_mem_read.push_back(std::make_tuple(addr, 0, size));
@@ -85,31 +61,20 @@ public:
// template for functions that load an aligned value from memory
#define load_func(type, prefix, xlate_flags) \
- inline type##_t prefix##_##type(reg_t addr, bool require_alignment = false) { \
- if (unlikely(addr & (sizeof(type##_t)-1))) { \
- if (require_alignment) load_reserved_address_misaligned(addr); \
- else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \
- } \
+ type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { \
reg_t vpn = addr >> PGSHIFT; \
size_t size = sizeof(type##_t); \
- if ((xlate_flags) == 0 && likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) { \
+ bool aligned = (addr & (size - 1)) == 0; \
+ bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn; \
+ if (likely((xlate_flags) == 0 && aligned && tlb_hit)) { \
if (proc) READ_MEM(addr, size); \
return from_target(*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
- } \
- if ((xlate_flags) == 0 && unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
- type##_t data = from_target(*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
- if (!matched_trigger) { \
- matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data); \
- if (matched_trigger) \
- throw *matched_trigger; \
- } \
+ } else { \
+ target_endian<type##_t> res; \
+ load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags), require_alignment); \
if (proc) READ_MEM(addr, size); \
- return data; \
+ return from_target(res); \
} \
- target_endian<type##_t> res; \
- load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags)); \
- if (proc) READ_MEM(addr, size); \
- return from_target(res); \
}
// load value from memory at aligned address; zero extend to register width
@@ -139,7 +104,7 @@ public:
load_func(int64, guest_load, RISCV_XLATE_VIRT)
#ifndef RISCV_ENABLE_COMMITLOG
-# define WRITE_MEM(addr, value, size) ({})
+# define WRITE_MEM(addr, value, size) ((void)(addr), (void)(value), (void)(size))
#else
# define WRITE_MEM(addr, val, size) \
proc->state.log_mem_write.push_back(std::make_tuple(addr, val, size));
@@ -147,31 +112,19 @@ public:
// template for functions that store an aligned value to memory
#define store_func(type, prefix, xlate_flags) \
- void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true) { \
- if (unlikely(addr & (sizeof(type##_t)-1))) \
- return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \
+ void ALWAYS_INLINE prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \
reg_t vpn = addr >> PGSHIFT; \
size_t size = sizeof(type##_t); \
- if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \
+ bool aligned = (addr & (size - 1)) == 0; \
+ bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn; \
+ if ((xlate_flags) == 0 && likely(aligned && tlb_hit)) { \
if (actually_store) { \
if (proc) WRITE_MEM(addr, val, size); \
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
} \
- } \
- else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
- if (actually_store) { \
- if (!matched_trigger) { \
- matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, val); \
- if (matched_trigger) \
- throw *matched_trigger; \
- } \
- if (proc) WRITE_MEM(addr, val, size); \
- *(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
- } \
- } \
- else { \
+ } else { \
target_endian<type##_t> target_val = to_target(val); \
- store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store); \
+ store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store, require_alignment); \
if (actually_store && proc) WRITE_MEM(addr, val, size); \
} \
}
@@ -196,7 +149,7 @@ public:
template<typename op> \
type##_t amo_##type(reg_t addr, op f) { \
convert_load_traps_to_store_traps({ \
- store_##type(addr, 0, false); \
+ store_##type(addr, 0, false, true); \
auto lhs = load_##type(addr, true); \
store_##type(addr, f(lhs)); \
return lhs; \
@@ -246,9 +199,9 @@ public:
void clean_inval(reg_t addr, bool clean, bool inval) {
convert_load_traps_to_store_traps({
- reg_t paddr = addr & ~(blocksz - 1);
- paddr = translate(paddr, blocksz, LOAD, 0);
- if (auto host_addr = sim->addr_to_mem(paddr)) {
+ const reg_t vaddr = addr & ~(blocksz - 1);
+ const reg_t paddr = translate(vaddr, blocksz, LOAD, 0);
+ if (sim->addr_to_mem(paddr)) {
if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
tracer.clean_invalidate(paddr, blocksz, clean, inval);
} else {
@@ -271,30 +224,12 @@ public:
throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space
}
- inline void load_reserved_address_misaligned(reg_t vaddr)
- {
- bool gva = proc ? proc->state.v : false;
-#ifdef RISCV_ENABLE_MISALIGNED
- throw trap_load_access_fault(gva, vaddr, 0, 0);
-#else
- throw trap_load_address_misaligned(gva, vaddr, 0, 0);
-#endif
- }
-
- inline void store_conditional_address_misaligned(reg_t vaddr)
- {
- bool gva = proc ? proc->state.v : false;
-#ifdef RISCV_ENABLE_MISALIGNED
- throw trap_store_access_fault(gva, vaddr, 0, 0);
-#else
- throw trap_store_address_misaligned(gva, vaddr, 0, 0);
-#endif
- }
-
inline bool check_load_reservation(reg_t vaddr, size_t size)
{
- if (vaddr & (size-1))
- store_conditional_address_misaligned(vaddr);
+ if (vaddr & (size-1)) {
+ // Raise either access fault or misaligned exception
+ store_slow_path(vaddr, size, nullptr, 0, false, true);
+ }
reg_t paddr = translate(vaddr, 1, STORE, 0);
if (auto host_addr = sim->addr_to_mem(paddr))
@@ -312,20 +247,23 @@ public:
inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
{
+ if (matched_trigger)
+ throw *matched_trigger;
+
auto tlb_entry = translate_insn_addr(addr);
insn_bits_t insn = from_le(*(uint16_t*)(tlb_entry.host_offset + addr));
int length = insn_length(insn);
if (likely(length == 4)) {
- insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 2)) << 16;
+ insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16;
} else if (length == 2) {
- insn = (int16_t)insn;
+ // entire instruction already fetched
} else if (length == 6) {
- insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 4)) << 32;
+ insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 4)) << 32;
insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16;
} else {
static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
- insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 6)) << 48;
+ insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 6)) << 48;
insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 4)) << 32;
insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16;
}
@@ -442,11 +380,14 @@ private:
// handle uncommon cases: TLB misses, page faults, MMIO
tlb_entry_t fetch_slow_path(reg_t addr);
- void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
- void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store);
+ void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool require_alignment);
+ void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
+ void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool require_alignment);
+ void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store);
bool mmio_load(reg_t addr, size_t len, uint8_t* bytes);
bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes);
bool mmio_ok(reg_t addr, access_type type);
+ void check_triggers(triggers::operation_t operation, reg_t address, std::optional<reg_t> data = std::nullopt);
reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags);
// ITLB lookup
@@ -454,43 +395,13 @@ private:
reg_t vpn = addr >> PGSHIFT;
if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn))
return tlb_data[vpn % TLB_ENTRIES];
- tlb_entry_t result;
- if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) {
- result = fetch_slow_path(addr);
- } else {
- result = tlb_data[vpn % TLB_ENTRIES];
- }
- if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) {
- target_endian<uint16_t>* ptr = (target_endian<uint16_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr);
- triggers::action_t action;
- auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, from_target(*ptr));
- if (match != triggers::MATCH_NONE) {
- throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action);
- }
- }
- return result;
+ return fetch_slow_path(addr);
}
inline const uint16_t* translate_insn_addr_to_host(reg_t addr) {
return (uint16_t*)(translate_insn_addr(addr).host_offset + addr);
}
- inline triggers::matched_t *trigger_exception(triggers::operation_t operation,
- reg_t address, reg_t data)
- {
- if (!proc) {
- return NULL;
- }
- triggers::action_t action;
- auto match = proc->TM.memory_access_match(&action, operation, address, data);
- if (match == triggers::MATCH_NONE)
- return NULL;
- if (match == triggers::MATCH_FIRE_BEFORE) {
- throw triggers::matched_t(operation, address, data, action);
- }
- return new triggers::matched_t(operation, address, data, action);
- }
-
reg_t pmp_homogeneous(reg_t addr, reg_t len);
bool pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode);
diff --git a/riscv/p_ext_macros.h b/riscv/p_ext_macros.h
new file mode 100644
index 0000000..29437ad
--- /dev/null
+++ b/riscv/p_ext_macros.h
@@ -0,0 +1,506 @@
+// See LICENSE for license details.
+
+#ifndef _RISCV_P_EXT_MACROS_H
+#define _RISCV_P_EXT_MACROS_H
+
+// The p-extension support is contributed by
+// Programming Langauge Lab, Department of Computer Science, National Tsing-Hua University, Taiwan
+
+#define P_FIELD(R, INDEX, SIZE) \
+ (type_sew_t<SIZE>::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE))
+
+#define P_UFIELD(R, INDEX, SIZE) \
+ (type_usew_t<SIZE>::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE))
+
+#define P_B(R, INDEX) P_UFIELD(R, INDEX, 8)
+#define P_H(R, INDEX) P_UFIELD(R, INDEX, 16)
+#define P_W(R, INDEX) P_UFIELD(R, INDEX, 32)
+#define P_SB(R, INDEX) P_FIELD(R, INDEX, 8)
+#define P_SH(R, INDEX) P_FIELD(R, INDEX, 16)
+#define P_SW(R, INDEX) P_FIELD(R, INDEX, 32)
+
+#define READ_REG_PAIR(reg) ({ \
+ require((reg) % 2 == 0); \
+ (reg) == 0 ? reg_t(0) : \
+ (READ_REG((reg) + 1) << 32) + zext32(READ_REG(reg)); })
+
+#define RS1_PAIR READ_REG_PAIR(insn.rs1())
+#define RS2_PAIR READ_REG_PAIR(insn.rs2())
+#define RD_PAIR READ_REG_PAIR(insn.rd())
+
+#define WRITE_PD() \
+ rd_tmp = set_field(rd_tmp, make_mask64((i * sizeof(pd) * 8), sizeof(pd) * 8), pd);
+
+#define WRITE_RD_PAIR(value) \
+ if (insn.rd() != 0) { \
+ require(insn.rd() % 2 == 0); \
+ WRITE_REG(insn.rd(), sext32(value)); \
+ WRITE_REG(insn.rd() + 1, (sreg_t(value)) >> 32); \
+ }
+
+#define P_SET_OV(ov) \
+ if (ov) P.VU.vxsat->write(1);
+
+#define P_SAT(R, BIT) \
+ if (R > INT##BIT##_MAX) { \
+ R = INT##BIT##_MAX; \
+ P_SET_OV(1); \
+ } else if (R < INT##BIT##_MIN) { \
+ R = INT##BIT##_MIN; \
+ P_SET_OV(1); \
+ }
+
+#define P_SATU(R, BIT) \
+ if (R > UINT##BIT##_MAX) { \
+ R = UINT##BIT##_MAX; \
+ P_SET_OV(1); \
+ } else if (R < 0) { \
+ P_SET_OV(1); \
+ R = 0; \
+ }
+
+#define P_LOOP_BASE(BIT) \
+ require_extension(EXT_ZPN); \
+ require(BIT == e8 || BIT == e16 || BIT == e32); \
+ reg_t rd_tmp = RD; \
+ reg_t rs1 = RS1; \
+ reg_t rs2 = RS2; \
+ sreg_t len = xlen / BIT; \
+ for (sreg_t i = len - 1; i >= 0; --i) {
+
+#define P_ONE_LOOP_BASE(BIT) \
+ require_extension(EXT_ZPN); \
+ require(BIT == e8 || BIT == e16 || BIT == e32); \
+ reg_t rd_tmp = RD; \
+ reg_t rs1 = RS1; \
+ sreg_t len = xlen / BIT; \
+ for (sreg_t i = len - 1; i >= 0; --i) {
+
+#define P_I_LOOP_BASE(BIT, IMMBIT) \
+ require_extension(EXT_ZPN); \
+ require(BIT == e8 || BIT == e16 || BIT == e32); \
+ reg_t rd_tmp = RD; \
+ reg_t rs1 = RS1; \
+ type_usew_t<BIT>::type imm##IMMBIT##u = insn.p_imm##IMMBIT(); \
+ sreg_t len = xlen / BIT; \
+ for (sreg_t i = len - 1; i >= 0; --i) {
+
+#define P_X_LOOP_BASE(BIT, LOWBIT) \
+ require_extension(EXT_ZPN); \
+ require(BIT == e8 || BIT == e16 || BIT == e32); \
+ reg_t rd_tmp = RD; \
+ reg_t rs1 = RS1; \
+ type_usew_t<BIT>::type sa = RS2 & ((uint64_t(1) << LOWBIT) - 1); \
+ type_sew_t<BIT>::type UNUSED ssa = int64_t(RS2) << (64 - LOWBIT) >> (64 - LOWBIT); \
+ sreg_t len = xlen / BIT; \
+ for (sreg_t i = len - 1; i >= 0; --i) {
+
+#define P_MUL_LOOP_BASE(BIT) \
+ require_extension(EXT_ZPN); \
+ require(BIT == e8 || BIT == e16 || BIT == e32); \
+ reg_t rd_tmp = RD; \
+ reg_t rs1 = RS1; \
+ reg_t rs2 = RS2; \
+ sreg_t len = 32 / BIT; \
+ for (sreg_t i = len - 1; i >= 0; --i) {
+
+#define P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \
+ require_extension(EXT_ZPN); \
+ require(BIT == e16 || BIT == e32 || BIT == e64); \
+ reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \
+ reg_t rs1 = zext_xlen(RS1); \
+ reg_t rs2 = zext_xlen(RS2); \
+ sreg_t len = 64 / BIT; \
+ sreg_t len_inner = BIT / BIT_INNER; \
+ for (sreg_t i = len - 1; i >= 0; --i) { \
+ sreg_t pd_res = P_FIELD(rd_tmp, i, BIT); \
+ for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) {
+
+#define P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \
+ require_extension(EXT_ZPN); \
+ require(BIT == e16 || BIT == e32 || BIT == e64); \
+ reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \
+ reg_t rs1 = zext_xlen(RS1); \
+ reg_t rs2 = zext_xlen(RS2); \
+ sreg_t len = 64 / BIT; \
+ sreg_t len_inner = BIT / BIT_INNER; \
+ for (sreg_t i = len - 1; i >=0; --i) { \
+ reg_t pd_res = P_UFIELD(rd_tmp, i, BIT); \
+ for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) {
+
+#define P_PARAMS(BIT) \
+ auto pd = P_FIELD(rd_tmp, i, BIT); \
+ auto ps1 = P_FIELD(rs1, i, BIT); \
+ auto ps2 = P_FIELD(rs2, i, BIT);
+
+#define P_UPARAMS(BIT) \
+ auto pd = P_UFIELD(rd_tmp, i, BIT); \
+ auto ps1 = P_UFIELD(rs1, i, BIT); \
+ auto ps2 = P_UFIELD(rs2, i, BIT);
+
+#define P_CORSS_PARAMS(BIT) \
+ auto pd = P_FIELD(rd_tmp, i, BIT); \
+ auto UNUSED ps1 = P_FIELD(rs1, i, BIT); \
+ auto UNUSED ps2 = P_FIELD(rs2, (i ^ 1), BIT);
+
+#define P_CORSS_UPARAMS(BIT) \
+ auto pd = P_UFIELD(rd_tmp, i, BIT); \
+ auto ps1 = P_UFIELD(rs1, i, BIT); \
+ auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT);
+
+#define P_ONE_PARAMS(BIT) \
+ auto pd = P_FIELD(rd_tmp, i, BIT); \
+ auto ps1 = P_FIELD(rs1, i, BIT);
+
+#define P_ONE_UPARAMS(BIT) \
+ auto pd = P_UFIELD(rd_tmp, i, BIT); \
+ auto ps1 = P_UFIELD(rs1, i, BIT);
+
+#define P_ONE_SUPARAMS(BIT) \
+ auto pd = P_UFIELD(rd_tmp, i, BIT); \
+ auto ps1 = P_FIELD(rs1, i, BIT);
+
+#define P_MUL_PARAMS(BIT) \
+ auto pd = P_FIELD(rd_tmp, i, BIT * 2); \
+ auto ps1 = P_FIELD(rs1, i, BIT); \
+ auto ps2 = P_FIELD(rs2, i, BIT);
+
+#define P_MUL_UPARAMS(BIT) \
+ auto pd = P_UFIELD(rd_tmp, i, BIT * 2); \
+ auto ps1 = P_UFIELD(rs1, i, BIT); \
+ auto ps2 = P_UFIELD(rs2, i, BIT);
+
+#define P_MUL_CROSS_PARAMS(BIT) \
+ auto pd = P_FIELD(rd_tmp, i, BIT * 2); \
+ auto ps1 = P_FIELD(rs1, i, BIT); \
+ auto ps2 = P_FIELD(rs2, (i ^ 1), BIT);
+
+#define P_MUL_CROSS_UPARAMS(BIT) \
+ auto pd = P_UFIELD(rd_tmp, i, BIT*2); \
+ auto ps1 = P_UFIELD(rs1, i, BIT); \
+ auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT);
+
+#define P_REDUCTION_PARAMS(BIT_INNER) \
+ auto ps1 = P_FIELD(rs1, j, BIT_INNER); \
+ auto ps2 = P_FIELD(rs2, j, BIT_INNER);
+
+#define P_REDUCTION_UPARAMS(BIT_INNER) \
+ auto ps1 = P_UFIELD(rs1, j, BIT_INNER); \
+ auto ps2 = P_UFIELD(rs2, j, BIT_INNER);
+
+#define P_REDUCTION_SUPARAMS(BIT_INNER) \
+ auto ps1 = P_FIELD(rs1, j, BIT_INNER); \
+ auto ps2 = P_UFIELD(rs2, j, BIT_INNER);
+
+#define P_REDUCTION_CROSS_PARAMS(BIT_INNER) \
+ auto ps1 = P_FIELD(rs1, j, BIT_INNER); \
+ auto ps2 = P_FIELD(rs2, (j ^ 1), BIT_INNER);
+
+#define P_LOOP_BODY(BIT, BODY) { \
+ P_PARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_ULOOP_BODY(BIT, BODY) { \
+ P_UPARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_ONE_LOOP_BODY(BIT, BODY) { \
+ P_ONE_PARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_CROSS_LOOP_BODY(BIT, BODY) { \
+ P_CORSS_PARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_CROSS_ULOOP_BODY(BIT, BODY) { \
+ P_CORSS_UPARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_ONE_ULOOP_BODY(BIT, BODY) { \
+ P_ONE_UPARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_MUL_LOOP_BODY(BIT, BODY) { \
+ P_MUL_PARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_MUL_ULOOP_BODY(BIT, BODY) { \
+ P_MUL_UPARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_MUL_CROSS_LOOP_BODY(BIT, BODY) { \
+ P_MUL_CROSS_PARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_MUL_CROSS_ULOOP_BODY(BIT, BODY) { \
+ P_MUL_CROSS_UPARAMS(BIT) \
+ BODY \
+ WRITE_PD(); \
+}
+
+#define P_LOOP(BIT, BODY) \
+ P_LOOP_BASE(BIT) \
+ P_LOOP_BODY(BIT, BODY) \
+ P_LOOP_END()
+
+#define P_ONE_LOOP(BIT, BODY) \
+ P_ONE_LOOP_BASE(BIT) \
+ P_ONE_LOOP_BODY(BIT, BODY) \
+ P_LOOP_END()
+
+#define P_ULOOP(BIT, BODY) \
+ P_LOOP_BASE(BIT) \
+ P_ULOOP_BODY(BIT, BODY) \
+ P_LOOP_END()
+
+#define P_CROSS_LOOP(BIT, BODY1, BODY2) \
+ P_LOOP_BASE(BIT) \
+ P_CROSS_LOOP_BODY(BIT, BODY1) \
+ --i; \
+ if (sizeof(#BODY2) == 1) { \
+ P_CROSS_LOOP_BODY(BIT, BODY1) \
+ } \
+ else { \
+ P_CROSS_LOOP_BODY(BIT, BODY2) \
+ } \
+ P_LOOP_END()
+
+#define P_CROSS_ULOOP(BIT, BODY1, BODY2) \
+ P_LOOP_BASE(BIT) \
+ P_CROSS_ULOOP_BODY(BIT, BODY1) \
+ --i; \
+ P_CROSS_ULOOP_BODY(BIT, BODY2) \
+ P_LOOP_END()
+
+#define P_STRAIGHT_LOOP(BIT, BODY1, BODY2) \
+ P_LOOP_BASE(BIT) \
+ P_LOOP_BODY(BIT, BODY1) \
+ --i; \
+ P_LOOP_BODY(BIT, BODY2) \
+ P_LOOP_END()
+
+#define P_STRAIGHT_ULOOP(BIT, BODY1, BODY2) \
+ P_LOOP_BASE(BIT) \
+ P_ULOOP_BODY(BIT, BODY1) \
+ --i; \
+ P_ULOOP_BODY(BIT, BODY2) \
+ P_LOOP_END()
+
+#define P_X_LOOP(BIT, RS2_LOW_BIT, BODY) \
+ P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \
+ P_ONE_LOOP_BODY(BIT, BODY) \
+ P_LOOP_END()
+
+#define P_X_ULOOP(BIT, RS2_LOW_BIT, BODY) \
+ P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \
+ P_ONE_ULOOP_BODY(BIT, BODY) \
+ P_LOOP_END()
+
+#define P_I_LOOP(BIT, IMMBIT, BODY) \
+ P_I_LOOP_BASE(BIT, IMMBIT) \
+ P_ONE_LOOP_BODY(BIT, BODY) \
+ P_LOOP_END()
+
+#define P_I_ULOOP(BIT, IMMBIT, BODY) \
+ P_I_LOOP_BASE(BIT, IMMBIT) \
+ P_ONE_ULOOP_BODY(BIT, BODY) \
+ P_LOOP_END()
+
+#define P_MUL_LOOP(BIT, BODY) \
+ P_MUL_LOOP_BASE(BIT) \
+ P_MUL_LOOP_BODY(BIT, BODY) \
+ P_PAIR_LOOP_END()
+
+#define P_MUL_ULOOP(BIT, BODY) \
+ P_MUL_LOOP_BASE(BIT) \
+ P_MUL_ULOOP_BODY(BIT, BODY) \
+ P_PAIR_LOOP_END()
+
+#define P_MUL_CROSS_LOOP(BIT, BODY) \
+ P_MUL_LOOP_BASE(BIT) \
+ P_MUL_CROSS_LOOP_BODY(BIT, BODY) \
+ P_PAIR_LOOP_END()
+
+#define P_MUL_CROSS_ULOOP(BIT, BODY) \
+ P_MUL_LOOP_BASE(BIT) \
+ P_MUL_CROSS_ULOOP_BODY(BIT, BODY) \
+ P_PAIR_LOOP_END()
+
+#define P_REDUCTION_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \
+ P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \
+ P_REDUCTION_PARAMS(BIT_INNER) \
+ BODY \
+ P_REDUCTION_LOOP_END(BIT, IS_SAT)
+
+#define P_REDUCTION_ULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \
+ P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \
+ P_REDUCTION_UPARAMS(BIT_INNER) \
+ BODY \
+ P_REDUCTION_ULOOP_END(BIT, IS_SAT)
+
+#define P_REDUCTION_SULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \
+ P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \
+ P_REDUCTION_SUPARAMS(BIT_INNER) \
+ BODY \
+ P_REDUCTION_LOOP_END(BIT, IS_SAT)
+
+#define P_REDUCTION_CROSS_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \
+ P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \
+ P_REDUCTION_CROSS_PARAMS(BIT_INNER) \
+ BODY \
+ P_REDUCTION_LOOP_END(BIT, IS_SAT)
+
+#define P_LOOP_END() \
+ } \
+ WRITE_RD(sext_xlen(rd_tmp));
+
+#define P_PAIR_LOOP_END() \
+ } \
+ if (xlen == 32) { \
+ WRITE_RD_PAIR(rd_tmp); \
+ } \
+ else { \
+ WRITE_RD(sext_xlen(rd_tmp)); \
+ }
+
+#define P_REDUCTION_LOOP_END(BIT, IS_SAT) \
+ } \
+ if (IS_SAT) { \
+ P_SAT(pd_res, BIT); \
+ } \
+ type_usew_t<BIT>::type pd = pd_res; \
+ WRITE_PD(); \
+ } \
+ WRITE_RD(sext_xlen(rd_tmp));
+
+#define P_REDUCTION_ULOOP_END(BIT, IS_SAT) \
+ } \
+ if (IS_SAT) { \
+ P_SATU(pd_res, BIT); \
+ } \
+ type_usew_t<BIT>::type pd = pd_res; \
+ WRITE_PD(); \
+ } \
+ WRITE_RD(sext_xlen(rd_tmp));
+
+#define P_SUNPKD8(X, Y) \
+ require_extension(EXT_ZPN); \
+ reg_t rd_tmp = 0; \
+ int16_t pd[4] = { \
+ P_SB(RS1, Y), \
+ P_SB(RS1, X), \
+ P_SB(RS1, Y + 4), \
+ P_SB(RS1, X + 4), \
+ }; \
+ if (xlen == 64) { \
+ memcpy(&rd_tmp, pd, 8); \
+ } else { \
+ memcpy(&rd_tmp, pd, 4); \
+ } \
+ WRITE_RD(sext_xlen(rd_tmp));
+
+#define P_ZUNPKD8(X, Y) \
+ require_extension(EXT_ZPN); \
+ reg_t rd_tmp = 0; \
+ uint16_t pd[4] = { \
+ P_B(RS1, Y), \
+ P_B(RS1, X), \
+ P_B(RS1, Y + 4), \
+ P_B(RS1, X + 4), \
+ }; \
+ if (xlen == 64) { \
+ memcpy(&rd_tmp, pd, 8); \
+ } else { \
+ memcpy(&rd_tmp, pd, 4); \
+ } \
+ WRITE_RD(sext_xlen(rd_tmp));
+
+#define P_PK(BIT, X, Y) \
+ require_extension(EXT_ZPN); \
+ require(BIT == e16 || BIT == e32); \
+ reg_t rd_tmp = 0, UNUSED rs1 = RS1, UNUSED rs2 = RS2; \
+ for (sreg_t i = 0; i < xlen / BIT / 2; i++) { \
+ rd_tmp = set_field(rd_tmp, make_mask64(i * 2 * BIT, BIT), \
+ P_UFIELD(RS2, i * 2 + Y, BIT)); \
+ rd_tmp = set_field(rd_tmp, make_mask64((i * 2 + 1) * BIT, BIT), \
+ P_UFIELD(RS1, i * 2 + X, BIT)); \
+ } \
+ WRITE_RD(sext_xlen(rd_tmp));
+
+#define P_64_PROFILE_BASE() \
+ require_extension(EXT_ZPSFOPERAND); \
+ sreg_t rd, rs1, rs2;
+
+#define P_64_UPROFILE_BASE() \
+ require_extension(EXT_ZPSFOPERAND); \
+ reg_t rd, rs1, rs2;
+
+#define P_64_PROFILE_PARAM(USE_RD, INPUT_PAIR) \
+ if (xlen == 32) { \
+ rs1 = INPUT_PAIR ? RS1_PAIR : RS1; \
+ rs2 = INPUT_PAIR ? RS2_PAIR : RS2; \
+ rd = USE_RD ? RD_PAIR : 0; \
+ } else { \
+ rs1 = RS1; \
+ rs2 = RS2; \
+ rd = USE_RD ? RD : 0; \
+ }
+
+#define P_64_PROFILE(BODY) \
+ P_64_PROFILE_BASE() \
+ P_64_PROFILE_PARAM(false, true) \
+ BODY \
+ P_64_PROFILE_END() \
+
+#define P_64_UPROFILE(BODY) \
+ P_64_UPROFILE_BASE() \
+ P_64_PROFILE_PARAM(false, true) \
+ BODY \
+ P_64_PROFILE_END() \
+
+#define P_64_PROFILE_REDUCTION(BIT, BODY) \
+ P_64_PROFILE_BASE() \
+ P_64_PROFILE_PARAM(true, false) \
+ for (sreg_t i = 0; i < xlen / BIT; i++) { \
+ sreg_t ps1 = P_FIELD(rs1, i, BIT); \
+ sreg_t ps2 = P_FIELD(rs2, i, BIT); \
+ BODY \
+ } \
+ P_64_PROFILE_END() \
+
+#define P_64_UPROFILE_REDUCTION(BIT, BODY) \
+ P_64_UPROFILE_BASE() \
+ P_64_PROFILE_PARAM(true, false) \
+ for (sreg_t i = 0; i < xlen / BIT; i++) { \
+ reg_t ps1 = P_UFIELD(rs1, i, BIT); \
+ reg_t ps2 = P_UFIELD(rs2, i, BIT); \
+ BODY \
+ } \
+ P_64_PROFILE_END() \
+
+#define P_64_PROFILE_END() \
+ if (xlen == 32) { \
+ WRITE_RD_PAIR(rd); \
+ } else { \
+ WRITE_RD(sext_xlen(rd)); \
+ }
+
+#endif
diff --git a/riscv/processor.cc b/riscv/processor.cc
index ad9944e..217d49d 100644
--- a/riscv/processor.cc
+++ b/riscv/processor.cc
@@ -20,6 +20,10 @@
#include <string>
#include <algorithm>
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wunused-variable"
+#endif
+
#undef STATE
#define STATE state
@@ -189,37 +193,54 @@ void state_t::reset(processor_t* const proc, reg_t max_isa)
prv = PRV_M;
v = false;
csrmap[CSR_MISA] = misa = std::make_shared<misa_csr_t>(proc, CSR_MISA, max_isa);
- csrmap[CSR_MSTATUS] = mstatus = std::make_shared<mstatus_csr_t>(proc, CSR_MSTATUS);
- if (xlen == 32) csrmap[CSR_MSTATUSH] = std::make_shared<mstatush_csr_t>(proc, CSR_MSTATUSH, mstatus);
+ mstatus = std::make_shared<mstatus_csr_t>(proc, CSR_MSTATUS);
+
+ if (xlen == 32) {
+ csrmap[CSR_MSTATUS] = std::make_shared<rv32_low_csr_t>(proc, CSR_MSTATUS, mstatus);
+ csrmap[CSR_MSTATUSH] = mstatush = std::make_shared<rv32_high_csr_t>(proc, CSR_MSTATUSH, mstatus);
+ } else {
+ csrmap[CSR_MSTATUS] = mstatus;
+ }
csrmap[CSR_MEPC] = mepc = std::make_shared<epc_csr_t>(proc, CSR_MEPC);
csrmap[CSR_MTVAL] = mtval = std::make_shared<basic_csr_t>(proc, CSR_MTVAL, 0);
csrmap[CSR_MSCRATCH] = std::make_shared<basic_csr_t>(proc, CSR_MSCRATCH, 0);
csrmap[CSR_MTVEC] = mtvec = std::make_shared<tvec_csr_t>(proc, CSR_MTVEC);
csrmap[CSR_MCAUSE] = mcause = std::make_shared<cause_csr_t>(proc, CSR_MCAUSE);
- csrmap[CSR_MINSTRET] = minstret = std::make_shared<wide_counter_csr_t>(proc, CSR_MINSTRET);
- csrmap[CSR_MCYCLE] = mcycle = std::make_shared<wide_counter_csr_t>(proc, CSR_MCYCLE);
+ minstret = std::make_shared<wide_counter_csr_t>(proc, CSR_MINSTRET);
+ mcycle = std::make_shared<wide_counter_csr_t>(proc, CSR_MCYCLE);
+ time = std::make_shared<time_counter_csr_t>(proc, CSR_TIME);
if (proc->extension_enabled_const(EXT_ZICNTR)) {
csrmap[CSR_INSTRET] = std::make_shared<counter_proxy_csr_t>(proc, CSR_INSTRET, minstret);
csrmap[CSR_CYCLE] = std::make_shared<counter_proxy_csr_t>(proc, CSR_CYCLE, mcycle);
+ csrmap[CSR_TIME] = time_proxy = std::make_shared<counter_proxy_csr_t>(proc, CSR_TIME, time);
}
if (xlen == 32) {
- counter_top_csr_t_p minstreth, mcycleh;
- csrmap[CSR_MINSTRETH] = minstreth = std::make_shared<counter_top_csr_t>(proc, CSR_MINSTRETH, minstret);
- csrmap[CSR_MCYCLEH] = mcycleh = std::make_shared<counter_top_csr_t>(proc, CSR_MCYCLEH, mcycle);
+ csr_t_p minstreth, mcycleh;
+ csrmap[CSR_MINSTRET] = std::make_shared<rv32_low_csr_t>(proc, CSR_MINSTRET, minstret);
+ csrmap[CSR_MINSTRETH] = minstreth = std::make_shared<rv32_high_csr_t>(proc, CSR_MINSTRETH, minstret);
+ csrmap[CSR_MCYCLE] = std::make_shared<rv32_low_csr_t>(proc, CSR_MCYCLE, mcycle);
+ csrmap[CSR_MCYCLEH] = mcycleh = std::make_shared<rv32_high_csr_t>(proc, CSR_MCYCLEH, mcycle);
if (proc->extension_enabled_const(EXT_ZICNTR)) {
+ auto timeh = std::make_shared<rv32_high_csr_t>(proc, CSR_TIMEH, time);
csrmap[CSR_INSTRETH] = std::make_shared<counter_proxy_csr_t>(proc, CSR_INSTRETH, minstreth);
csrmap[CSR_CYCLEH] = std::make_shared<counter_proxy_csr_t>(proc, CSR_CYCLEH, mcycleh);
+ csrmap[CSR_TIMEH] = std::make_shared<counter_proxy_csr_t>(proc, CSR_TIMEH, timeh);
}
+ } else {
+ csrmap[CSR_MINSTRET] = minstret;
+ csrmap[CSR_MCYCLE] = mcycle;
}
for (reg_t i = 3; i <= 31; ++i) {
const reg_t which_mevent = CSR_MHPMEVENT3 + i - 3;
+ const reg_t which_meventh = CSR_MHPMEVENT3H + i - 3;
const reg_t which_mcounter = CSR_MHPMCOUNTER3 + i - 3;
const reg_t which_mcounterh = CSR_MHPMCOUNTER3H + i - 3;
const reg_t which_counter = CSR_HPMCOUNTER3 + i - 3;
const reg_t which_counterh = CSR_HPMCOUNTER3H + i - 3;
- auto mevent = std::make_shared<const_csr_t>(proc, which_mevent, 0);
+ const reg_t mevent_mask = proc->extension_enabled_const(EXT_SSCOFPMF) ? MHPMEVENT_VUINH | MHPMEVENT_VSINH | MHPMEVENTH_UINH |
+ MHPMEVENT_UINH | MHPMEVENT_MINH | MHPMEVENT_OF : 0;
+ mevent[i - 3] = std::make_shared<masked_csr_t>(proc, which_mevent, mevent_mask, 0);
auto mcounter = std::make_shared<const_csr_t>(proc, which_mcounter, 0);
- csrmap[which_mevent] = mevent;
csrmap[which_mcounter] = mcounter;
if (proc->extension_enabled_const(EXT_ZICNTR) && proc->extension_enabled_const(EXT_ZIHPM)) {
@@ -227,21 +248,30 @@ void state_t::reset(processor_t* const proc, reg_t max_isa)
csrmap[which_counter] = counter;
}
if (xlen == 32) {
+ csrmap[which_mevent] = std::make_shared<rv32_low_csr_t>(proc, which_mevent, mevent[i - 3]);;
auto mcounterh = std::make_shared<const_csr_t>(proc, which_mcounterh, 0);
csrmap[which_mcounterh] = mcounterh;
if (proc->extension_enabled_const(EXT_ZICNTR) && proc->extension_enabled_const(EXT_ZIHPM)) {
auto counterh = std::make_shared<counter_proxy_csr_t>(proc, which_counterh, mcounterh);
csrmap[which_counterh] = counterh;
}
+ if (proc->extension_enabled_const(EXT_SSCOFPMF)) {
+ auto meventh = std::make_shared<rv32_high_csr_t>(proc, which_meventh, mevent[i - 3]);
+ csrmap[which_meventh] = meventh;
+ }
+ } else {
+ csrmap[which_mevent] = mevent[i - 3];
}
}
csrmap[CSR_MCOUNTINHIBIT] = std::make_shared<const_csr_t>(proc, CSR_MCOUNTINHIBIT, 0);
+ if (proc->extension_enabled_const(EXT_SSCOFPMF))
+ csrmap[CSR_SCOUNTOVF] = std::make_shared<scountovf_csr_t>(proc, CSR_SCOUNTOVF);
csrmap[CSR_MIE] = mie = std::make_shared<mie_csr_t>(proc, CSR_MIE);
csrmap[CSR_MIP] = mip = std::make_shared<mip_csr_t>(proc, CSR_MIP);
auto sip_sie_accr = std::make_shared<generic_int_accessor_t>(
this,
~MIP_HS_MASK, // read_mask
- MIP_SSIP, // ip_write_mask
+ MIP_SSIP | MIP_LCOFIP, // ip_write_mask
~MIP_HS_MASK, // ie_write_mask
generic_int_accessor_t::mask_mode_t::MIDELEG,
0 // shiftamt
@@ -338,6 +368,13 @@ void state_t::reset(processor_t* const proc, reg_t max_isa)
(1 << CAUSE_STORE_PAGE_FAULT);
csrmap[CSR_HEDELEG] = hedeleg = std::make_shared<masked_csr_t>(proc, CSR_HEDELEG, hedeleg_mask, 0);
csrmap[CSR_HCOUNTEREN] = hcounteren = std::make_shared<masked_csr_t>(proc, CSR_HCOUNTEREN, counteren_mask, 0);
+ htimedelta = std::make_shared<basic_csr_t>(proc, CSR_HTIMEDELTA, 0);
+ if (xlen == 32) {
+ csrmap[CSR_HTIMEDELTA] = std::make_shared<rv32_low_csr_t>(proc, CSR_HTIMEDELTA, htimedelta);
+ csrmap[CSR_HTIMEDELTAH] = std::make_shared<rv32_high_csr_t>(proc, CSR_HTIMEDELTAH, htimedelta);
+ } else {
+ csrmap[CSR_HTIMEDELTA] = htimedelta;
+ }
csrmap[CSR_HTVAL] = htval = std::make_shared<basic_csr_t>(proc, CSR_HTVAL, 0);
csrmap[CSR_HTINST] = htinst = std::make_shared<basic_csr_t>(proc, CSR_HTINST, 0);
csrmap[CSR_HGATP] = hgatp = std::make_shared<hgatp_csr_t>(proc, CSR_HGATP);
@@ -358,6 +395,8 @@ void state_t::reset(processor_t* const proc, reg_t max_isa)
debug_mode = false;
single_step = STEP_NONE;
+ csrmap[CSR_MSECCFG] = mseccfg = std::make_shared<mseccfg_csr_t>(proc, CSR_MSECCFG);
+
for (int i = 0; i < max_pmp; ++i) {
csrmap[CSR_PMPADDR0 + i] = pmpaddr[i] = std::make_shared<pmpaddr_csr_t>(proc, CSR_PMPADDR0 + i);
}
@@ -377,15 +416,78 @@ void state_t::reset(processor_t* const proc, reg_t max_isa)
csrmap[CSR_MIMPID] = std::make_shared<const_csr_t>(proc, CSR_MIMPID, 0);
csrmap[CSR_MVENDORID] = std::make_shared<const_csr_t>(proc, CSR_MVENDORID, 0);
csrmap[CSR_MHARTID] = std::make_shared<const_csr_t>(proc, CSR_MHARTID, proc->get_id());
- const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) |
- (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0);
- csrmap[CSR_MENVCFG] = menvcfg = std::make_shared<masked_csr_t>(proc, CSR_MENVCFG, menvcfg_mask, 0);
- const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) |
- (proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0);
- csrmap[CSR_SENVCFG] = senvcfg = std::make_shared<masked_csr_t>(proc, CSR_SENVCFG, senvcfg_mask, 0);
- const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) |
- (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0);
- csrmap[CSR_HENVCFG] = henvcfg = std::make_shared<masked_csr_t>(proc, CSR_HENVCFG, henvcfg_mask, 0);
+ csrmap[CSR_MCONFIGPTR] = std::make_shared<const_csr_t>(proc, CSR_MCONFIGPTR, 0);
+ if (proc->extension_enabled_const('U')) {
+ const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) |
+ (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0) |
+ (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0) |
+ (proc->extension_enabled(EXT_SSTC) ? MENVCFG_STCE : 0);
+ const reg_t menvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0);
+ menvcfg = std::make_shared<masked_csr_t>(proc, CSR_MENVCFG, menvcfg_mask, menvcfg_init);
+ if (xlen == 32) {
+ csrmap[CSR_MENVCFG] = std::make_shared<rv32_low_csr_t>(proc, CSR_MENVCFG, menvcfg);
+ csrmap[CSR_MENVCFGH] = std::make_shared<rv32_high_csr_t>(proc, CSR_MENVCFGH, menvcfg);
+ } else {
+ csrmap[CSR_MENVCFG] = menvcfg;
+ }
+ const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) |
+ (proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0);
+ csrmap[CSR_SENVCFG] = senvcfg = std::make_shared<senvcfg_csr_t>(proc, CSR_SENVCFG, senvcfg_mask, 0);
+ const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) |
+ (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0) |
+ (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0) |
+ (proc->extension_enabled(EXT_SSTC) ? HENVCFG_STCE : 0);
+ const reg_t henvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0);
+ henvcfg = std::make_shared<henvcfg_csr_t>(proc, CSR_HENVCFG, henvcfg_mask, henvcfg_init, menvcfg);
+ if (xlen == 32) {
+ csrmap[CSR_HENVCFG] = std::make_shared<rv32_low_csr_t>(proc, CSR_HENVCFG, henvcfg);
+ csrmap[CSR_HENVCFGH] = std::make_shared<rv32_high_csr_t>(proc, CSR_HENVCFGH, henvcfg);
+ } else {
+ csrmap[CSR_HENVCFG] = henvcfg;
+ }
+ }
+ if (proc->extension_enabled_const(EXT_SMSTATEEN)) {
+ const reg_t sstateen0_mask = (proc->extension_enabled(EXT_ZFINX) ? SSTATEEN0_FCSR : 0) | SSTATEEN0_CS;
+ const reg_t hstateen0_mask = sstateen0_mask | HSTATEEN0_SENVCFG | HSTATEEN_SSTATEEN;
+ const reg_t mstateen0_mask = hstateen0_mask;
+ for (int i = 0; i < 4; i++) {
+ const reg_t mstateen_mask = i == 0 ? mstateen0_mask : MSTATEEN_HSTATEEN;
+ mstateen[i] = std::make_shared<masked_csr_t>(proc, CSR_MSTATEEN0 + i, mstateen_mask, 0);
+ if (xlen == 32) {
+ csrmap[CSR_MSTATEEN0 + i] = std::make_shared<rv32_low_csr_t>(proc, CSR_MSTATEEN0 + i, mstateen[i]);
+ csrmap[CSR_MSTATEEN0H + i] = std::make_shared<rv32_high_csr_t>(proc, CSR_MSTATEEN0H + i, mstateen[i]);
+ } else {
+ csrmap[CSR_MSTATEEN0 + i] = mstateen[i];
+ }
+
+ const reg_t hstateen_mask = i == 0 ? hstateen0_mask : HSTATEEN_SSTATEEN;
+ hstateen[i] = std::make_shared<hstateen_csr_t>(proc, CSR_HSTATEEN0 + i, hstateen_mask, 0, i);
+ if (xlen == 32) {
+ csrmap[CSR_HSTATEEN0 + i] = std::make_shared<rv32_low_csr_t>(proc, CSR_HSTATEEN0 + i, hstateen[i]);
+ csrmap[CSR_HSTATEEN0H + i] = std::make_shared<rv32_high_csr_t>(proc, CSR_HSTATEEN0H + i, hstateen[i]);
+ } else {
+ csrmap[CSR_HSTATEEN0 + i] = hstateen[i];
+ }
+
+ const reg_t sstateen_mask = i == 0 ? sstateen0_mask : 0;
+ csrmap[CSR_SSTATEEN0 + i] = sstateen[i] = std::make_shared<sstateen_csr_t>(proc, CSR_HSTATEEN0 + i, sstateen_mask, 0, i);
+ }
+ }
+
+ if (proc->extension_enabled_const(EXT_SSTC)) {
+ stimecmp = std::make_shared<stimecmp_csr_t>(proc, CSR_STIMECMP, MIP_STIP);
+ vstimecmp = std::make_shared<stimecmp_csr_t>(proc, CSR_VSTIMECMP, MIP_VSTIP);
+ auto virtualized_stimecmp = std::make_shared<virtualized_stimecmp_csr_t>(proc, stimecmp, vstimecmp);
+ if (xlen == 32) {
+ csrmap[CSR_STIMECMP] = std::make_shared<rv32_low_csr_t>(proc, CSR_STIMECMP, virtualized_stimecmp);
+ csrmap[CSR_STIMECMPH] = std::make_shared<rv32_high_csr_t>(proc, CSR_STIMECMPH, virtualized_stimecmp);
+ csrmap[CSR_VSTIMECMP] = std::make_shared<rv32_low_csr_t>(proc, CSR_VSTIMECMP, vstimecmp);
+ csrmap[CSR_VSTIMECMPH] = std::make_shared<rv32_high_csr_t>(proc, CSR_VSTIMECMPH, vstimecmp);
+ } else {
+ csrmap[CSR_STIMECMP] = virtualized_stimecmp;
+ csrmap[CSR_VSTIMECMP] = vstimecmp;
+ }
+ }
serialized = false;
@@ -425,7 +527,6 @@ reg_t processor_t::vectorUnit_t::set_vl(int rd, int rs1, reg_t reqVL, reg_t newT
{
int new_vlmul = 0;
if (vtype->read() != newType) {
- vtype->write_raw(newType);
vsew = 1 << (extract64(newType, 3, 3) + 3);
new_vlmul = int8_t(extract64(newType, 0, 3) << 5) >> 5;
vflmul = new_vlmul >= 0 ? 1 << new_vlmul : 1.0 / (1 << -new_vlmul);
@@ -440,6 +541,8 @@ reg_t processor_t::vectorUnit_t::set_vl(int rd, int rs1, reg_t reqVL, reg_t newT
if (vill) {
vlmax = 0;
vtype->write_raw(UINT64_MAX << (p->get_xlen() - 1));
+ } else {
+ vtype->write_raw(newType);
}
}
@@ -501,7 +604,7 @@ void processor_t::reset()
put_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT);
}
- for (auto e : custom_extensions) // reset any extensions
+ for (auto e : custom_extensions) // reset any extensions
e.second->reset();
if (sim)
@@ -618,6 +721,8 @@ void processor_t::take_interrupt(reg_t pending_interrupts)
enabled_interrupts = MIP_SSIP;
else if (enabled_interrupts & MIP_STIP)
enabled_interrupts = MIP_STIP;
+ else if (enabled_interrupts & MIP_LCOFIP)
+ enabled_interrupts = MIP_LCOFIP;
else if (enabled_interrupts & MIP_VSEIP)
enabled_interrupts = MIP_VSEIP;
else if (enabled_interrupts & MIP_VSSIP)
@@ -714,14 +819,6 @@ void processor_t::take_trap(trap_t& t, reg_t epc)
return;
}
- if (t.cause() == CAUSE_BREAKPOINT && (
- (state.prv == PRV_M && state.dcsr->ebreakm) ||
- (state.prv == PRV_S && state.dcsr->ebreaks) ||
- (state.prv == PRV_U && state.dcsr->ebreaku))) {
- enter_debug_mode(DCSR_CAUSE_SWBP);
- return;
- }
-
// By default, trap to M-mode, unless delegated to HS-mode or VS-mode
reg_t vsdeleg, hsdeleg;
reg_t bit = t.cause();
@@ -792,13 +889,14 @@ void processor_t::take_trap(trap_t& t, reg_t epc)
s = set_field(s, MSTATUS_MPV, curr_virt);
s = set_field(s, MSTATUS_GVA, t.has_gva());
state.mstatus->write(s);
+ if (state.mstatush) state.mstatush->write(s >> 32); // log mstatush change
set_privilege(PRV_M);
}
}
void processor_t::disasm(insn_t insn)
{
- uint64_t bits = insn.bits() & ((1ULL << (8 * insn_length(insn.bits()))) - 1);
+ uint64_t bits = insn.bits();
if (last_pc != state.pc || last_bits != bits) {
std::stringstream s; // first put everything in a string, later send it to output
@@ -866,9 +964,12 @@ reg_t processor_t::get_csr(int which, insn_t insn, bool write, bool peek)
throw trap_illegal_instruction(insn.bits());
}
-reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc)
+reg_t illegal_instruction(processor_t UNUSED *p, insn_t insn, reg_t UNUSED pc)
{
- throw trap_illegal_instruction(insn.bits());
+ // The illegal instruction can be longer than ILEN bits, where the tval will
+ // contain the first ILEN bits of the faulting instruction. We hard-code the
+ // ILEN to 32 bits since all official instructions have at most 32 bits.
+ throw trap_illegal_instruction(insn.bits() & 0xffffffffULL);
}
insn_func_t processor_t::decode_insn(insn_t insn)
@@ -879,11 +980,11 @@ insn_func_t processor_t::decode_insn(insn_t insn)
bool rve = extension_enabled('E');
- if (unlikely(insn.bits() != desc.match || !desc.func(xlen, rve))) {
+ if (unlikely(insn.bits() != desc.match)) {
// fall back to linear search
int cnt = 0;
insn_desc_t* p = &instructions[0];
- while ((insn.bits() & p->mask) != p->match || !desc.func(xlen, rve))
+ while ((insn.bits() & p->mask) != p->match)
p++, cnt++;
desc = *p;
@@ -905,6 +1006,8 @@ insn_func_t processor_t::decode_insn(insn_t insn)
void processor_t::register_insn(insn_desc_t desc)
{
+ assert(desc.rv32i && desc.rv64i && desc.rv32e && desc.rv64e);
+
instructions.push_back(desc);
}
@@ -957,14 +1060,15 @@ void processor_t::register_base_instructions()
extern reg_t rv64i_##name(processor_t*, insn_t, reg_t); \
extern reg_t rv32e_##name(processor_t*, insn_t, reg_t); \
extern reg_t rv64e_##name(processor_t*, insn_t, reg_t); \
- register_insn((insn_desc_t) { \
- name##_supported, \
- name##_match, \
- name##_mask, \
- rv32i_##name, \
- rv64i_##name, \
- rv32e_##name, \
- rv64e_##name});
+ if (name##_supported) { \
+ register_insn((insn_desc_t) { \
+ name##_match, \
+ name##_mask, \
+ rv32i_##name, \
+ rv64i_##name, \
+ rv32e_##name, \
+ rv64e_##name}); \
+ }
#include "insn_list.h"
#undef DEFINE_INSN
diff --git a/riscv/processor.h b/riscv/processor.h
index 5c14a36..8194046 100644
--- a/riscv/processor.h
+++ b/riscv/processor.h
@@ -29,7 +29,6 @@ reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc);
struct insn_desc_t
{
- bool supported;
insn_bits_t match;
insn_bits_t mask;
insn_func_t rv32i;
@@ -39,9 +38,6 @@ struct insn_desc_t
insn_func_t func(int xlen, bool rve)
{
- if (!supported)
- return NULL;
-
if (rve)
return xlen == 64 ? rv64e : rv32e;
else
@@ -50,7 +46,7 @@ struct insn_desc_t
static insn_desc_t illegal()
{
- return {true, 0, 0, &illegal_instruction, &illegal_instruction, &illegal_instruction, &illegal_instruction};
+ return {0, 0, &illegal_instruction, &illegal_instruction, &illegal_instruction, &illegal_instruction};
}
};
@@ -122,7 +118,6 @@ struct type_sew_t<64>
using type=int64_t;
};
-
// architectural state of a RISC-V hart
struct state_t
{
@@ -138,6 +133,7 @@ struct state_t
bool v;
misa_csr_t_p misa;
mstatus_csr_t_p mstatus;
+ csr_t_p mstatush;
csr_t_p mepc;
csr_t_p mtval;
csr_t_p mtvec;
@@ -149,6 +145,7 @@ struct state_t
csr_t_p medeleg;
csr_t_p mideleg;
csr_t_p mcounteren;
+ csr_t_p mevent[29];
csr_t_p scounteren;
csr_t_p sepc;
csr_t_p stval;
@@ -179,6 +176,8 @@ struct state_t
tdata2_csr_t_p tdata2;
bool debug_mode;
+ mseccfg_csr_t_p mseccfg;
+
static const int max_pmp = 16;
pmpaddr_csr_t_p pmpaddr[max_pmp];
@@ -189,6 +188,17 @@ struct state_t
csr_t_p senvcfg;
csr_t_p henvcfg;
+ csr_t_p mstateen[4];
+ csr_t_p sstateen[4];
+ csr_t_p hstateen[4];
+
+ csr_t_p htimedelta;
+ time_counter_csr_t_p time;
+ csr_t_p time_proxy;
+
+ csr_t_p stimecmp;
+ csr_t_p vstimecmp;
+
bool serialized; // whether timer CSRs are in a well-defined state
// When true, execute a single instruction and then enter debug mode. This
@@ -209,14 +219,8 @@ struct state_t
#endif
};
-typedef enum {
- OPERATION_EXECUTE,
- OPERATION_STORE,
- OPERATION_LOAD,
-} trigger_operation_t;
-
// Count number of contiguous 1 bits starting from the LSB.
-static int cto(reg_t val)
+static inline int cto(reg_t val)
{
int res = 0;
while ((val & 1) == 1)
@@ -398,7 +402,7 @@ public:
// vector element for varies SEW
template<class T>
- T& elt(reg_t vReg, reg_t n, bool is_write = false){
+ T& elt(reg_t vReg, reg_t n, bool UNUSED is_write = false) {
assert(vsew != 0);
assert((VLEN >> 3)/sizeof(T) > 0);
reg_t elts_per_reg = (VLEN >> 3) / (sizeof(T));
@@ -445,7 +449,7 @@ public:
vstart_alu(false) {
}
- ~vectorUnit_t(){
+ ~vectorUnit_t() {
free(reg_file);
reg_file = 0;
}
diff --git a/riscv/riscv.mk.in b/riscv/riscv.mk.in
index 708cead..91f7a5f 100644
--- a/riscv/riscv.mk.in
+++ b/riscv/riscv.mk.in
@@ -3,8 +3,14 @@ get_opcode = $(shell grep ^DECLARE_INSN.*\\\<$(2)\\\> $(1) | sed 's/DECLARE_INSN
riscv_subproject_deps = \
fdt \
+ disasm \
+ fesvr \
softfloat \
+riscv_CFLAGS = -fPIC
+
+riscv_install_shared_lib = yes
+
riscv_install_prog_srcs = \
riscv_hdrs = \
@@ -13,9 +19,12 @@ riscv_hdrs = \
decode.h \
devices.h \
dts.h \
+ isa_parser.h \
mmu.h \
cfg.h \
processor.h \
+ p_ext_macros.h \
+ v_ext_macros.h \
sim.h \
simif.h \
trap.h \
@@ -34,7 +43,31 @@ riscv_hdrs = \
csrs.h \
triggers.h \
-riscv_install_hdrs = mmio_plugin.h
+riscv_install_hdrs = \
+ abstract_device.h \
+ cachesim.h \
+ cfg.h \
+ common.h \
+ csrs.h \
+ debug_module.h \
+ debug_rom_defines.h \
+ decode.h \
+ devices.h \
+ encoding.h \
+ entropy_source.h \
+ isa_parser.h \
+ log_file.h \
+ memtracer.h \
+ mmio_plugin.h \
+ mmu.h \
+ p_ext_macros.h \
+ platform.h \
+ processor.h \
+ sim.h \
+ simif.h \
+ trap.h \
+ triggers.h \
+ v_ext_macros.h \
riscv_precompiled_hdrs = \
insn_template.h \
diff --git a/riscv/rocc.cc b/riscv/rocc.cc
index 2d09095..f0dd0b2 100644
--- a/riscv/rocc.cc
+++ b/riscv/rocc.cc
@@ -18,7 +18,7 @@
return pc+4; \
} \
\
- reg_t rocc_t::custom##n(rocc_insn_t insn, reg_t xs1, reg_t xs2) \
+ reg_t rocc_t::custom##n(rocc_insn_t UNUSED insn, reg_t UNUSED xs1, reg_t UNUSED xs2) \
{ \
illegal_instruction(); \
return 0; \
@@ -32,10 +32,10 @@ customX(3)
std::vector<insn_desc_t> rocc_t::get_instructions()
{
std::vector<insn_desc_t> insns;
- insns.push_back((insn_desc_t){true, 0x0b, 0x7f, &::illegal_instruction, c0, &::illegal_instruction, c0});
- insns.push_back((insn_desc_t){true, 0x2b, 0x7f, &::illegal_instruction, c1, &::illegal_instruction, c1});
- insns.push_back((insn_desc_t){true, 0x5b, 0x7f, &::illegal_instruction, c2, &::illegal_instruction, c2});
- insns.push_back((insn_desc_t){true, 0x7b, 0x7f, &::illegal_instruction, c3, &::illegal_instruction, c3});
+ insns.push_back((insn_desc_t){0x0b, 0x7f, &::illegal_instruction, c0, &::illegal_instruction, c0});
+ insns.push_back((insn_desc_t){0x2b, 0x7f, &::illegal_instruction, c1, &::illegal_instruction, c1});
+ insns.push_back((insn_desc_t){0x5b, 0x7f, &::illegal_instruction, c2, &::illegal_instruction, c2});
+ insns.push_back((insn_desc_t){0x7b, 0x7f, &::illegal_instruction, c3, &::illegal_instruction, c3});
return insns;
}
diff --git a/riscv/rom.cc b/riscv/rom.cc
index b852862..2d10e91 100644
--- a/riscv/rom.cc
+++ b/riscv/rom.cc
@@ -13,7 +13,7 @@ bool rom_device_t::load(reg_t addr, size_t len, uint8_t* bytes)
return true;
}
-bool rom_device_t::store(reg_t addr, size_t len, const uint8_t* bytes)
+bool rom_device_t::store(reg_t UNUSED addr, size_t UNUSED len, const uint8_t UNUSED *bytes)
{
return false;
}
diff --git a/riscv/sim.cc b/riscv/sim.cc
index e909009..5ce7d21 100644
--- a/riscv/sim.cc
+++ b/riscv/sim.cc
@@ -225,6 +225,7 @@ int sim_t::run()
{
host = context_t::current();
target.init(sim_thread_main, this);
+ htif_t::set_expected_xlen(isa.get_max_xlen());
return htif_t::run();
}
@@ -326,7 +327,7 @@ void sim_t::make_dtb()
std::pair<reg_t, reg_t> initrd_bounds = cfg->initrd_bounds();
dts = make_dts(INSNS_PER_RTC_TICK, CPU_HZ,
initrd_bounds.first, initrd_bounds.second,
- cfg->bootargs(), procs, mems);
+ cfg->bootargs(), cfg->pmpregions, procs, mems);
dtb = dts_compile(dts);
}
@@ -449,11 +450,7 @@ void sim_t::set_target_endianness(memif_endianness_t endianness)
memif_endianness_t sim_t::get_target_endianness() const
{
-#ifdef RISCV_ENABLE_DUAL_ENDIAN
return debug_mmu->is_target_big_endian()? memif_endianness_big : memif_endianness_little;
-#else
- return memif_endianness_little;
-#endif
}
void sim_t::proc_reset(unsigned id)
diff --git a/riscv/sim.h b/riscv/sim.h
index a09c6e5..7816b87 100644
--- a/riscv/sim.h
+++ b/riscv/sim.h
@@ -138,11 +138,14 @@ private:
void interactive_pc(const std::string& cmd, const std::vector<std::string>& args);
void interactive_mem(const std::string& cmd, const std::vector<std::string>& args);
void interactive_str(const std::string& cmd, const std::vector<std::string>& args);
+ void interactive_dumpmems(const std::string& cmd, const std::vector<std::string>& args);
+ void interactive_mtime(const std::string& cmd, const std::vector<std::string>& args);
+ void interactive_mtimecmp(const std::string& cmd, const std::vector<std::string>& args);
void interactive_until(const std::string& cmd, const std::vector<std::string>& args, bool noisy);
void interactive_until_silent(const std::string& cmd, const std::vector<std::string>& args);
void interactive_until_noisy(const std::string& cmd, const std::vector<std::string>& args);
reg_t get_reg(const std::vector<std::string>& args);
- freg_t get_freg(const std::vector<std::string>& args);
+ freg_t get_freg(const std::vector<std::string>& args, int size);
reg_t get_mem(const std::vector<std::string>& args);
reg_t get_pc(const std::vector<std::string>& args);
diff --git a/riscv/simif.h b/riscv/simif.h
index 0e75d45..75e865e 100644
--- a/riscv/simif.h
+++ b/riscv/simif.h
@@ -19,6 +19,8 @@ public:
virtual const char* get_symbol(uint64_t addr) = 0;
+ virtual ~simif_t() = default;
+
};
#endif
diff --git a/riscv/tracer.h b/riscv/tracer.h
index 9f1bc78..d74edae 100644
--- a/riscv/tracer.h
+++ b/riscv/tracer.h
@@ -5,7 +5,7 @@
#include "processor.h"
-static inline void trace_opcode(processor_t* p, insn_bits_t opc, insn_t insn) {
+static inline void trace_opcode(processor_t UNUSED *p, insn_bits_t UNUSED opc, insn_t UNUSED insn) {
}
#endif
diff --git a/riscv/trap.h b/riscv/trap.h
index 1cd62e1..b5afce9 100644
--- a/riscv/trap.h
+++ b/riscv/trap.h
@@ -8,6 +8,11 @@
struct state_t;
+class trap_debug_mode
+{
+ /* Used to enter debug mode, which isn't quite a normal trap. */
+};
+
class trap_t
{
public:
@@ -28,6 +33,8 @@ class trap_t
return _name;
}
+ virtual ~trap_t() = default;
+
private:
char _name[16];
reg_t which;
diff --git a/riscv/triggers.cc b/riscv/triggers.cc
index 69888bf..90d9d54 100644
--- a/riscv/triggers.cc
+++ b/riscv/triggers.cc
@@ -1,28 +1,22 @@
+#include "debug_defines.h"
#include "processor.h"
#include "triggers.h"
namespace triggers {
-mcontrol_t::mcontrol_t() :
- type(2), maskmax(0), select(false), timing(false), chain_bit(false),
- match(MATCH_EQUAL), m(false), h(false), s(false), u(false),
- execute_bit(false), store_bit(false), load_bit(false)
-{
-}
-
reg_t mcontrol_t::tdata1_read(const processor_t * const proc) const noexcept {
reg_t v = 0;
auto xlen = proc->get_xlen();
- v = set_field(v, MCONTROL_TYPE(xlen), type);
+ v = set_field(v, MCONTROL_TYPE(xlen), MCONTROL_TYPE_MATCH);
v = set_field(v, MCONTROL_DMODE(xlen), dmode);
- v = set_field(v, MCONTROL_MASKMAX(xlen), maskmax);
+ v = set_field(v, MCONTROL_MASKMAX(xlen), 0);
+ v = set_field(v, CSR_MCONTROL_HIT, hit);
v = set_field(v, MCONTROL_SELECT, select);
v = set_field(v, MCONTROL_TIMING, timing);
v = set_field(v, MCONTROL_ACTION, action);
v = set_field(v, MCONTROL_CHAIN, chain_bit);
v = set_field(v, MCONTROL_MATCH, match);
v = set_field(v, MCONTROL_M, m);
- v = set_field(v, MCONTROL_H, h);
v = set_field(v, MCONTROL_S, s);
v = set_field(v, MCONTROL_U, u);
v = set_field(v, MCONTROL_EXECUTE, execute_bit);
@@ -37,6 +31,7 @@ bool mcontrol_t::tdata1_write(processor_t * const proc, const reg_t val) noexcep
}
auto xlen = proc->get_xlen();
dmode = get_field(val, MCONTROL_DMODE(xlen));
+ hit = get_field(val, CSR_MCONTROL_HIT);
select = get_field(val, MCONTROL_SELECT);
timing = get_field(val, MCONTROL_TIMING);
action = (triggers::action_t) get_field(val, MCONTROL_ACTION);
@@ -56,7 +51,6 @@ bool mcontrol_t::tdata1_write(processor_t * const proc, const reg_t val) noexcep
break;
}
m = get_field(val, MCONTROL_M);
- h = get_field(val, MCONTROL_H);
s = get_field(val, MCONTROL_S);
u = get_field(val, MCONTROL_U);
execute_bit = get_field(val, MCONTROL_EXECUTE);
@@ -68,7 +62,7 @@ bool mcontrol_t::tdata1_write(processor_t * const proc, const reg_t val) noexcep
return true;
}
-reg_t mcontrol_t::tdata2_read(const processor_t * const proc) const noexcept {
+reg_t mcontrol_t::tdata2_read(const processor_t UNUSED * const proc) const noexcept {
return tdata2;
}
@@ -107,7 +101,7 @@ bool mcontrol_t::simple_match(unsigned xlen, reg_t value) const {
assert(0);
}
-match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operation_t operation, reg_t address, reg_t data) {
+match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operation_t operation, reg_t address, std::optional<reg_t> data) {
state_t * const state = proc->get_state();
if ((operation == triggers::OPERATION_EXECUTE && !execute_bit) ||
(operation == triggers::OPERATION_STORE && !store_bit) ||
@@ -120,7 +114,9 @@ match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operati
reg_t value;
if (select) {
- value = data;
+ if (!data.has_value())
+ return MATCH_NONE;
+ value = *data;
} else {
value = address;
}
@@ -133,6 +129,9 @@ match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operati
}
if (simple_match(xlen, value)) {
+ /* This is OK because this function is only called if the trigger was not
+ * inhibited by the previous trigger in the chain. */
+ hit = true;
if (timing)
return MATCH_FIRE_AFTER;
else
@@ -153,7 +152,7 @@ module_t::~module_t() {
}
}
-match_result_t module_t::memory_access_match(action_t * const action, operation_t operation, reg_t address, reg_t data)
+match_result_t module_t::memory_access_match(action_t * const action, operation_t operation, reg_t address, std::optional<reg_t> data)
{
state_t * const state = proc->get_state();
if (state->debug_mode)
@@ -167,13 +166,19 @@ match_result_t module_t::memory_access_match(action_t * const action, operation_
continue;
}
+ /* Note: We call memory_access_match for each trigger in a chain as long as
+ * the triggers are matching. This results in "temperature coding" so that
+ * `hit` is set on each of the consecutive triggers that matched, even if the
+ * entire chain did not match. This is allowed by the spec, because the final
+ * trigger in the chain will never get `hit` set unless the entire chain
+ * matches. */
match_result_t result = triggers[i]->memory_access_match(proc, operation, address, data);
if (result != MATCH_NONE && !triggers[i]->chain()) {
*action = triggers[i]->action;
return result;
}
- chain_ok = true;
+ chain_ok = result != MATCH_NONE || !triggers[i]->chain();
}
return MATCH_NONE;
}
@@ -202,5 +207,4 @@ bool module_t::tdata2_write(processor_t * const proc, unsigned index, const reg_
return result;
}
-
};
diff --git a/riscv/triggers.h b/riscv/triggers.h
index ad294c8..7b40b8f 100644
--- a/riscv/triggers.h
+++ b/riscv/triggers.h
@@ -2,6 +2,7 @@
#define _RISCV_TRIGGERS_H
#include <vector>
+#include <optional>
#include "decode.h"
@@ -31,19 +32,18 @@ typedef enum {
class matched_t
{
public:
- matched_t(triggers::operation_t operation, reg_t address, reg_t data, action_t action) :
- operation(operation), address(address), data(data), action(action) {}
+ matched_t(triggers::operation_t operation, reg_t address, action_t action) :
+ operation(operation), address(address), action(action) {}
triggers::operation_t operation;
reg_t address;
- reg_t data;
action_t action;
};
class trigger_t {
public:
virtual match_result_t memory_access_match(processor_t * const proc,
- operation_t operation, reg_t address, reg_t data) = 0;
+ operation_t operation, reg_t address, std::optional<reg_t> data) = 0;
virtual reg_t tdata1_read(const processor_t * const proc) const noexcept = 0;
virtual bool tdata1_write(processor_t * const proc, const reg_t val) noexcept = 0;
@@ -55,14 +55,14 @@ public:
virtual bool store() const { return false; }
virtual bool load() const { return false; }
-public:
- bool dmode;
- action_t action;
+ bool dmode = false;
+ action_t action = ACTION_DEBUG_EXCEPTION;
+ bool hit = false;
virtual ~trigger_t() {};
protected:
- trigger_t() : dmode(false), action(ACTION_DEBUG_EXCEPTION) {};
+ trigger_t() {}
};
class mcontrol_t : public trigger_t {
@@ -77,8 +77,6 @@ public:
MATCH_MASK_HIGH = MCONTROL_MATCH_MASK_HIGH
} match_t;
- mcontrol_t();
-
virtual reg_t tdata1_read(const processor_t * const proc) const noexcept override;
virtual bool tdata1_write(processor_t * const proc, const reg_t val) noexcept override;
virtual reg_t tdata2_read(const processor_t * const proc) const noexcept override;
@@ -90,27 +88,23 @@ public:
virtual bool load() const override { return load_bit; }
virtual match_result_t memory_access_match(processor_t * const proc,
- operation_t operation, reg_t address, reg_t data) override;
+ operation_t operation, reg_t address, std::optional<reg_t> data) override;
private:
bool simple_match(unsigned xlen, reg_t value) const;
public:
- uint8_t type;
- uint8_t maskmax;
- bool select;
- bool timing;
- bool chain_bit;
- match_t match;
- bool m;
- bool h;
- bool s;
- bool u;
- bool execute_bit;
- bool store_bit;
- bool load_bit;
+ bool select = false;
+ bool timing = false;
+ bool chain_bit = false;
+ match_t match = MATCH_EQUAL;
+ bool m = false;
+ bool s = false;
+ bool u = false;
+ bool execute_bit = false;
+ bool store_bit = false;
+ bool load_bit = false;
reg_t tdata2;
-
};
class module_t {
@@ -121,7 +115,7 @@ public:
unsigned count() const { return triggers.size(); }
match_result_t memory_access_match(action_t * const action,
- operation_t operation, reg_t address, reg_t data);
+ operation_t operation, reg_t address, std::optional<reg_t> data);
reg_t tdata1_read(const processor_t * const proc, unsigned index) const noexcept;
bool tdata1_write(processor_t * const proc, unsigned index, const reg_t val) noexcept;
diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h
new file mode 100644
index 0000000..ad31938
--- /dev/null
+++ b/riscv/v_ext_macros.h
@@ -0,0 +1,2065 @@
+// See LICENSE for license details.
+
+#ifndef _RISCV_V_EXT_MACROS_H
+#define _RISCV_V_EXT_MACROS_H
+
+//
+// vector: masking skip helper
+//
+#define VI_MASK_VARS \
+ const int midx = i / 64; \
+ const int mpos = i % 64;
+
+#define VI_LOOP_ELEMENT_SKIP(BODY) \
+ VI_MASK_VARS \
+ if (insn.v_vm() == 0) { \
+ BODY; \
+ bool skip = ((P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1) == 0; \
+ if (skip) { \
+ continue; \
+ } \
+ }
+
+#define VI_ELEMENT_SKIP(inx) \
+ if (inx >= vl) { \
+ continue; \
+ } else if (inx < P.VU.vstart->read()) { \
+ continue; \
+ } else { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ }
+
+//
+// vector: operation and register acccess check helper
+//
+static inline bool is_overlapped(const int astart, int asize,
+ const int bstart, int bsize)
+{
+ asize = asize == 0 ? 1 : asize;
+ bsize = bsize == 0 ? 1 : bsize;
+
+ const int aend = astart + asize;
+ const int bend = bstart + bsize;
+
+ return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize;
+}
+
+static inline bool is_overlapped_widen(const int astart, int asize,
+ const int bstart, int bsize)
+{
+ asize = asize == 0 ? 1 : asize;
+ bsize = bsize == 0 ? 1 : bsize;
+
+ const int aend = astart + asize;
+ const int bend = bstart + bsize;
+
+ if (astart < bstart &&
+ is_overlapped(astart, asize, bstart, bsize) &&
+ !is_overlapped(astart, asize, bstart + bsize, bsize)) {
+ return false;
+ } else {
+ return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize;
+ }
+}
+
+static inline bool is_aligned(const unsigned val, const unsigned pos)
+{
+ return pos ? (val & (pos - 1)) == 0 : true;
+}
+
+#define VI_NARROW_CHECK_COMMON \
+ require_vector(true); \
+ require(P.VU.vflmul <= 4); \
+ require(P.VU.vsew * 2 <= P.VU.ELEN); \
+ require_align(insn.rs2(), P.VU.vflmul * 2); \
+ require_align(insn.rd(), P.VU.vflmul); \
+ require_vm; \
+
+#define VI_WIDE_CHECK_COMMON \
+ require_vector(true); \
+ require(P.VU.vflmul <= 4); \
+ require(P.VU.vsew * 2 <= P.VU.ELEN); \
+ require_align(insn.rd(), P.VU.vflmul * 2); \
+ require_vm; \
+
+#define VI_CHECK_ST_INDEX(elt_width) \
+ require_vector(false); \
+ require(elt_width <= P.VU.ELEN); \
+ float vemul = ((float)elt_width / P.VU.vsew * P.VU.vflmul); \
+ require(vemul >= 0.125 && vemul <= 8); \
+ reg_t UNUSED emul = vemul < 1 ? 1 : vemul; \
+ reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \
+ require_align(insn.rd(), P.VU.vflmul); \
+ require_align(insn.rs2(), vemul); \
+ require((nf * flmul) <= (NVPR / 4) && \
+ (insn.rd() + nf * flmul) <= NVPR); \
+
+#define VI_CHECK_LD_INDEX(elt_width) \
+ VI_CHECK_ST_INDEX(elt_width); \
+ for (reg_t idx = 0; idx < nf; ++idx) { \
+ reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \
+ reg_t seg_vd = insn.rd() + flmul * idx; \
+ if (elt_width > P.VU.vsew) { \
+ if (seg_vd != insn.rs2()) \
+ require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \
+ } else if (elt_width < P.VU.vsew) { \
+ if (vemul < 1) { \
+ require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \
+ } else { \
+ require_noover_widen(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \
+ } \
+ } \
+ if (nf >= 2) { \
+ require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \
+ } \
+ } \
+ require_vm; \
+
+#define VI_CHECK_MSS(is_vs1) \
+ if (insn.rd() != insn.rs2()) \
+ require_noover(insn.rd(), 1, insn.rs2(), P.VU.vflmul); \
+ require_align(insn.rs2(), P.VU.vflmul); \
+ if (is_vs1) { \
+ if (insn.rd() != insn.rs1()) \
+ require_noover(insn.rd(), 1, insn.rs1(), P.VU.vflmul); \
+ require_align(insn.rs1(), P.VU.vflmul); \
+ } \
+
+#define VI_CHECK_SSS(is_vs1) \
+ require_vm; \
+ if (P.VU.vflmul > 1) { \
+ require_align(insn.rd(), P.VU.vflmul); \
+ require_align(insn.rs2(), P.VU.vflmul); \
+ if (is_vs1) { \
+ require_align(insn.rs1(), P.VU.vflmul); \
+ } \
+ }
+
+#define VI_CHECK_STORE(elt_width, is_mask_ldst) \
+ require_vector(false); \
+ reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8; \
+ float vemul = is_mask_ldst ? 1 : ((float)veew / P.VU.vsew * P.VU.vflmul); \
+ reg_t emul = vemul < 1 ? 1 : vemul; \
+ require(vemul >= 0.125 && vemul <= 8); \
+ require_align(insn.rd(), vemul); \
+ require((nf * emul) <= (NVPR / 4) && \
+ (insn.rd() + nf * emul) <= NVPR); \
+ require(veew <= P.VU.ELEN); \
+
+#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \
+ VI_CHECK_STORE(elt_width, is_mask_ldst); \
+ require_vm; \
+
+#define VI_CHECK_DSS(is_vs1) \
+ VI_WIDE_CHECK_COMMON; \
+ require_align(insn.rs2(), P.VU.vflmul); \
+ if (P.VU.vflmul < 1) { \
+ require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \
+ } else { \
+ require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \
+ } \
+ if (is_vs1) { \
+ require_align(insn.rs1(), P.VU.vflmul); \
+ if (P.VU.vflmul < 1) { \
+ require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \
+ } else { \
+ require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \
+ } \
+ }
+
+#define VI_CHECK_DDS(is_rs) \
+ VI_WIDE_CHECK_COMMON; \
+ require_align(insn.rs2(), P.VU.vflmul * 2); \
+ if (is_rs) { \
+ require_align(insn.rs1(), P.VU.vflmul); \
+ if (P.VU.vflmul < 1) { \
+ require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \
+ } else { \
+ require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \
+ } \
+ }
+
+#define VI_CHECK_SDS(is_vs1) \
+ VI_NARROW_CHECK_COMMON; \
+ if (insn.rd() != insn.rs2()) \
+ require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul * 2); \
+ if (is_vs1) \
+ require_align(insn.rs1(), P.VU.vflmul); \
+
+#define VI_CHECK_REDUCTION(is_wide) \
+ require_vector(true); \
+ if (is_wide) { \
+ require(P.VU.vsew * 2 <= P.VU.ELEN); \
+ } \
+ require_align(insn.rs2(), P.VU.vflmul); \
+ require(P.VU.vstart->read() == 0); \
+
+#define VI_CHECK_SLIDE(is_over) \
+ require_align(insn.rs2(), P.VU.vflmul); \
+ require_align(insn.rd(), P.VU.vflmul); \
+ require_vm; \
+ if (is_over) \
+ require(insn.rd() != insn.rs2()); \
+
+//
+// vector: loop header and end helper
+//
+#define VI_GENERAL_LOOP_BASE \
+ require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \
+ require_vector(true); \
+ reg_t vl = P.VU.vl->read(); \
+ reg_t UNUSED sew = P.VU.vsew; \
+ reg_t rd_num = insn.rd(); \
+ reg_t UNUSED rs1_num = insn.rs1(); \
+ reg_t rs2_num = insn.rs2(); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) {
+
+#define VI_LOOP_BASE \
+ VI_GENERAL_LOOP_BASE \
+ VI_LOOP_ELEMENT_SKIP();
+
+#define VI_LOOP_END \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_LOOP_REDUCTION_END(x) \
+ } \
+ if (vl > 0) { \
+ vd_0_des = vd_0_res; \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_LOOP_CARRY_BASE \
+ VI_GENERAL_LOOP_BASE \
+ VI_MASK_VARS \
+ auto v0 = P.VU.elt<uint64_t>(0, midx); \
+ const uint64_t mmask = UINT64_C(1) << mpos; \
+ const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \
+ uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0; \
+ uint128_t res = 0; \
+ auto &vd = P.VU.elt<uint64_t>(rd_num, midx, true);
+
+#define VI_LOOP_CARRY_END \
+ vd = (vd & ~mmask) | (((res) << mpos) & mmask); \
+ } \
+ P.VU.vstart->write(0);
+#define VI_LOOP_WITH_CARRY_BASE \
+ VI_GENERAL_LOOP_BASE \
+ VI_MASK_VARS \
+ auto &v0 = P.VU.elt<uint64_t>(0, midx); \
+ const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \
+ uint64_t carry = (v0 >> mpos) & 0x1;
+
+#define VI_LOOP_CMP_BASE \
+ require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \
+ require_vector(true); \
+ reg_t vl = P.VU.vl->read(); \
+ reg_t sew = P.VU.vsew; \
+ reg_t UNUSED rd_num = insn.rd(); \
+ reg_t UNUSED rs1_num = insn.rs1(); \
+ reg_t rs2_num = insn.rs2(); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ uint64_t mmask = UINT64_C(1) << mpos; \
+ uint64_t &vdi = P.VU.elt<uint64_t>(insn.rd(), midx, true); \
+ uint64_t res = 0;
+
+#define VI_LOOP_CMP_END \
+ vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_LOOP_MASK(op) \
+ require(P.VU.vsew <= e64); \
+ require_vector(true); \
+ reg_t vl = P.VU.vl->read(); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ int midx = i / 64; \
+ int mpos = i % 64; \
+ uint64_t mmask = UINT64_C(1) << mpos; \
+ uint64_t vs2 = P.VU.elt<uint64_t>(insn.rs2(), midx); \
+ uint64_t vs1 = P.VU.elt<uint64_t>(insn.rs1(), midx); \
+ uint64_t &res = P.VU.elt<uint64_t>(insn.rd(), midx, true); \
+ res = (res & ~mmask) | ((op) & (1ULL << mpos)); \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_LOOP_NSHIFT_BASE \
+ VI_GENERAL_LOOP_BASE; \
+ VI_LOOP_ELEMENT_SKIP({ \
+ require(!(insn.rd() == 0 && P.VU.vflmul > 1)); \
+ });
+
+#define INT_ROUNDING(result, xrm, gb) \
+ do { \
+ const uint64_t lsb = 1UL << (gb); \
+ const uint64_t lsb_half = lsb >> 1; \
+ switch (xrm) { \
+ case VRM::RNU: \
+ result += lsb_half; \
+ break; \
+ case VRM::RNE: \
+ if ((result & lsb_half) && ((result & (lsb_half - 1)) || (result & lsb))) \
+ result += lsb; \
+ break; \
+ case VRM::RDN: \
+ break; \
+ case VRM::ROD: \
+ if (result & (lsb - 1)) \
+ result |= lsb; \
+ break; \
+ case VRM::INVALID_RM: \
+ assert(true); \
+ } \
+ } while (0)
+
+//
+// vector: integer and masking operand access helper
+//
+#define VXI_PARAMS(x) \
+ type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
+ type_sew_t<x>::type vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
+ type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)RS1; \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)insn.v_simm5();
+
+#define VV_U_PARAMS(x) \
+ type_usew_t<x>::type &vd = P.VU.elt<type_usew_t<x>::type>(rd_num, i, true); \
+ type_usew_t<x>::type vs1 = P.VU.elt<type_usew_t<x>::type>(rs1_num, i); \
+ type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
+
+#define VX_U_PARAMS(x) \
+ type_usew_t<x>::type &vd = P.VU.elt<type_usew_t<x>::type>(rd_num, i, true); \
+ type_usew_t<x>::type rs1 = (type_usew_t<x>::type)RS1; \
+ type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
+
+#define VI_U_PARAMS(x) \
+ type_usew_t<x>::type &vd = P.VU.elt<type_usew_t<x>::type>(rd_num, i, true); \
+ type_usew_t<x>::type UNUSED zimm5 = (type_usew_t<x>::type)insn.v_zimm5(); \
+ type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
+
+#define VV_PARAMS(x) \
+ type_sew_t<x>::type UNUSED &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
+ type_sew_t<x>::type vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
+ type_sew_t<x>::type UNUSED vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
+
+#define VX_PARAMS(x) \
+ type_sew_t<x>::type UNUSED &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)RS1; \
+ type_sew_t<x>::type UNUSED vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
+
+#define VI_PARAMS(x) \
+ type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)insn.v_simm5(); \
+ type_sew_t<x>::type UNUSED vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
+
+#define XV_PARAMS(x) \
+ type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
+ type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, RS1);
+
+#define VV_SU_PARAMS(x) \
+ type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
+ type_usew_t<x>::type vs1 = P.VU.elt<type_usew_t<x>::type>(rs1_num, i); \
+ type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
+
+#define VX_SU_PARAMS(x) \
+ type_sew_t<x>::type &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
+ type_usew_t<x>::type rs1 = (type_usew_t<x>::type)RS1; \
+ type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
+
+#define VV_UCMP_PARAMS(x) \
+ type_usew_t<x>::type vs1 = P.VU.elt<type_usew_t<x>::type>(rs1_num, i); \
+ type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
+
+#define VX_UCMP_PARAMS(x) \
+ type_usew_t<x>::type rs1 = (type_usew_t<x>::type)RS1; \
+ type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
+
+#define VI_UCMP_PARAMS(x) \
+ type_usew_t<x>::type vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
+
+#define VV_CMP_PARAMS(x) \
+ type_sew_t<x>::type vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
+ type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
+
+#define VX_CMP_PARAMS(x) \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)RS1; \
+ type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
+
+#define VI_CMP_PARAMS(x) \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)insn.v_simm5(); \
+ type_sew_t<x>::type vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i);
+
+#define VI_XI_SLIDEDOWN_PARAMS(x, off) \
+ auto &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
+ auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i + off);
+
+#define VI_XI_SLIDEUP_PARAMS(x, offset) \
+ auto &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
+ auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i - offset);
+
+#define VI_NARROW_PARAMS(sew1, sew2) \
+ auto &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
+ auto UNUSED vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
+ auto UNUSED vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
+ auto zimm5 = (type_usew_t<sew1>::type)insn.v_zimm5();
+
+#define VX_NARROW_PARAMS(sew1, sew2) \
+ auto &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
+ auto UNUSED vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
+ auto UNUSED vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
+ auto rs1 = (type_sew_t<sew1>::type)RS1;
+
+#define VV_NARROW_PARAMS(sew1, sew2) \
+ auto &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
+ auto UNUSED vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
+ auto UNUSED vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
+ auto vs1 = P.VU.elt<type_sew_t<sew1>::type>(rs1_num, i);
+
+#define XI_CARRY_PARAMS(x) \
+ auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
+ auto UNUSED rs1 = (type_sew_t<x>::type)RS1; \
+ auto UNUSED simm5 = (type_sew_t<x>::type)insn.v_simm5(); \
+
+#define VV_CARRY_PARAMS(x) \
+ auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
+ auto vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
+
+#define XI_WITH_CARRY_PARAMS(x) \
+ auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
+ auto UNUSED rs1 = (type_sew_t<x>::type)RS1; \
+ auto UNUSED simm5 = (type_sew_t<x>::type)insn.v_simm5(); \
+ auto &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true);
+
+#define VV_WITH_CARRY_PARAMS(x) \
+ auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
+ auto vs1 = P.VU.elt<type_sew_t<x>::type>(rs1_num, i); \
+ auto &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true);
+
+#define VFP_V_PARAMS(width) \
+ float##width##_t &vd = P.VU.elt<float##width##_t>(rd_num, i, true); \
+ float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i);
+
+#define VFP_VV_PARAMS(width) \
+ float##width##_t UNUSED &vd = P.VU.elt<float##width##_t>(rd_num, i, true); \
+ float##width##_t vs1 = P.VU.elt<float##width##_t>(rs1_num, i); \
+ float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i);
+
+#define VFP_VF_PARAMS(width) \
+ float##width##_t UNUSED &vd = P.VU.elt<float##width##_t>(rd_num, i, true); \
+ float##width##_t rs1 = f##width(READ_FREG(rs1_num)); \
+ float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i);
+
+#define CVT_FP_TO_FP_PARAMS(from_width, to_width) \
+ auto vs2 = P.VU.elt<float##from_width##_t>(rs2_num, i); \
+ auto &vd = P.VU.elt<float##to_width##_t>(rd_num, i, true);
+
+#define CVT_INT_TO_FP_PARAMS(from_width, to_width, sign) \
+ auto vs2 = P.VU.elt<sign##from_width##_t>(rs2_num, i); \
+ auto &vd = P.VU.elt<float##to_width##_t>(rd_num, i, true);
+
+#define CVT_FP_TO_INT_PARAMS(from_width, to_width, sign) \
+ auto vs2 = P.VU.elt<float##from_width##_t>(rs2_num, i); \
+ auto &vd = P.VU.elt<sign##to_width##_t>(rd_num, i, true);
+
+//
+// vector: integer and masking operation loop
+//
+
+#define INSNS_BASE(PARAMS, BODY) \
+ if (sew == e8) { \
+ PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ PARAMS(e64); \
+ BODY; \
+ }
+
+// comparision result to masking register
+#define VI_LOOP_CMP_BODY(PARAMS, BODY) \
+ VI_LOOP_CMP_BASE \
+ INSNS_BASE(PARAMS, BODY) \
+ VI_LOOP_CMP_END
+
+#define VI_VV_LOOP_CMP(BODY) \
+ VI_CHECK_MSS(true); \
+ VI_LOOP_CMP_BODY(VV_CMP_PARAMS, BODY)
+
+#define VI_VX_LOOP_CMP(BODY) \
+ VI_CHECK_MSS(false); \
+ VI_LOOP_CMP_BODY(VX_CMP_PARAMS, BODY)
+
+#define VI_VI_LOOP_CMP(BODY) \
+ VI_CHECK_MSS(false); \
+ VI_LOOP_CMP_BODY(VI_CMP_PARAMS, BODY)
+
+#define VI_VV_ULOOP_CMP(BODY) \
+ VI_CHECK_MSS(true); \
+ VI_LOOP_CMP_BODY(VV_UCMP_PARAMS, BODY)
+
+#define VI_VX_ULOOP_CMP(BODY) \
+ VI_CHECK_MSS(false); \
+ VI_LOOP_CMP_BODY(VX_UCMP_PARAMS, BODY)
+
+#define VI_VI_ULOOP_CMP(BODY) \
+ VI_CHECK_MSS(false); \
+ VI_LOOP_CMP_BODY(VI_UCMP_PARAMS, BODY)
+
+// merge and copy loop
+#define VI_MERGE_VARS \
+ VI_MASK_VARS \
+ bool UNUSED use_first = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
+
+#define VI_MERGE_LOOP_BASE \
+ VI_GENERAL_LOOP_BASE \
+ VI_MERGE_VARS
+
+#define VI_VV_MERGE_LOOP(BODY) \
+ VI_CHECK_SSS(true); \
+ VI_MERGE_LOOP_BASE \
+ if (sew == e8) { \
+ VV_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VV_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VV_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VV_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VX_MERGE_LOOP(BODY) \
+ VI_CHECK_SSS(false); \
+ VI_MERGE_LOOP_BASE \
+ if (sew == e8) { \
+ VX_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VX_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VX_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VX_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VI_MERGE_LOOP(BODY) \
+ VI_CHECK_SSS(false); \
+ VI_MERGE_LOOP_BASE \
+ if (sew == e8) { \
+ VI_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VI_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VI_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VI_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VF_MERGE_LOOP(BODY) \
+ VI_CHECK_SSS(false); \
+ VI_VFP_COMMON \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_MERGE_VARS \
+ if (P.VU.vsew == e16) { \
+ VFP_VF_PARAMS(16); \
+ BODY; \
+ } else if (P.VU.vsew == e32) { \
+ VFP_VF_PARAMS(32); \
+ BODY; \
+ } else if (P.VU.vsew == e64) { \
+ VFP_VF_PARAMS(64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+// reduction loop - signed
+#define VI_LOOP_REDUCTION_BASE(x) \
+ require(x >= e8 && x <= e64); \
+ reg_t vl = P.VU.vl->read(); \
+ reg_t rd_num = insn.rd(); \
+ reg_t rs1_num = insn.rs1(); \
+ reg_t rs2_num = insn.rs2(); \
+ auto &vd_0_des = P.VU.elt<type_sew_t<x>::type>(rd_num, 0, true); \
+ auto vd_0_res = P.VU.elt<type_sew_t<x>::type>(rs1_num, 0); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i); \
+
+#define REDUCTION_LOOP(x, BODY) \
+ VI_LOOP_REDUCTION_BASE(x) \
+ BODY; \
+ VI_LOOP_REDUCTION_END(x)
+
+#define VI_VV_LOOP_REDUCTION(BODY) \
+ VI_CHECK_REDUCTION(false); \
+ reg_t sew = P.VU.vsew; \
+ if (sew == e8) { \
+ REDUCTION_LOOP(e8, BODY) \
+ } else if (sew == e16) { \
+ REDUCTION_LOOP(e16, BODY) \
+ } else if (sew == e32) { \
+ REDUCTION_LOOP(e32, BODY) \
+ } else if (sew == e64) { \
+ REDUCTION_LOOP(e64, BODY) \
+ }
+
+// reduction loop - unsigned
+#define VI_ULOOP_REDUCTION_BASE(x) \
+ require(x >= e8 && x <= e64); \
+ reg_t vl = P.VU.vl->read(); \
+ reg_t rd_num = insn.rd(); \
+ reg_t rs1_num = insn.rs1(); \
+ reg_t rs2_num = insn.rs2(); \
+ auto &vd_0_des = P.VU.elt<type_usew_t<x>::type>(rd_num, 0, true); \
+ auto vd_0_res = P.VU.elt<type_usew_t<x>::type>(rs1_num, 0); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ auto vs2 = P.VU.elt<type_usew_t<x>::type>(rs2_num, i);
+
+#define REDUCTION_ULOOP(x, BODY) \
+ VI_ULOOP_REDUCTION_BASE(x) \
+ BODY; \
+ VI_LOOP_REDUCTION_END(x)
+
+#define VI_VV_ULOOP_REDUCTION(BODY) \
+ VI_CHECK_REDUCTION(false); \
+ reg_t sew = P.VU.vsew; \
+ if (sew == e8) { \
+ REDUCTION_ULOOP(e8, BODY) \
+ } else if (sew == e16) { \
+ REDUCTION_ULOOP(e16, BODY) \
+ } else if (sew == e32) { \
+ REDUCTION_ULOOP(e32, BODY) \
+ } else if (sew == e64) { \
+ REDUCTION_ULOOP(e64, BODY) \
+ }
+
+// genearl VXI signed/unsigned loop
+#define VI_VV_ULOOP(BODY) \
+ VI_CHECK_SSS(true) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VV_U_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VV_U_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VV_U_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VV_U_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VV_LOOP(BODY) \
+ VI_CHECK_SSS(true) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VV_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VV_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VV_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VV_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VX_ULOOP(BODY) \
+ VI_CHECK_SSS(false) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VX_U_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VX_U_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VX_U_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VX_U_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VX_LOOP(BODY) \
+ VI_CHECK_SSS(false) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VX_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VX_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VX_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VX_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VI_ULOOP(BODY) \
+ VI_CHECK_SSS(false) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VI_U_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VI_U_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VI_U_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VI_U_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VI_LOOP(BODY) \
+ VI_CHECK_SSS(false) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VI_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VI_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VI_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VI_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+// signed unsigned operation loop (e.g. mulhsu)
+#define VI_VV_SU_LOOP(BODY) \
+ VI_CHECK_SSS(true) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VV_SU_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VV_SU_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VV_SU_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VV_SU_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VX_SU_LOOP(BODY) \
+ VI_CHECK_SSS(false) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VX_SU_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VX_SU_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VX_SU_PARAMS(e32); \
+ BODY; \
+ } else if (sew == e64) { \
+ VX_SU_PARAMS(e64); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+// narrow operation loop
+#define VI_VV_LOOP_NARROW(BODY) \
+ VI_CHECK_SDS(true); \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VV_NARROW_PARAMS(e8, e16) \
+ BODY; \
+ } else if (sew == e16) { \
+ VV_NARROW_PARAMS(e16, e32) \
+ BODY; \
+ } else if (sew == e32) { \
+ VV_NARROW_PARAMS(e32, e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VX_LOOP_NARROW(BODY) \
+ VI_CHECK_SDS(false); \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VX_NARROW_PARAMS(e8, e16) \
+ BODY; \
+ } else if (sew == e16) { \
+ VX_NARROW_PARAMS(e16, e32) \
+ BODY; \
+ } else if (sew == e32) { \
+ VX_NARROW_PARAMS(e32, e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VI_LOOP_NARROW(BODY) \
+ VI_CHECK_SDS(false); \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VI_NARROW_PARAMS(e8, e16) \
+ BODY; \
+ } else if (sew == e16) { \
+ VI_NARROW_PARAMS(e16, e32) \
+ BODY; \
+ } else if (sew == e32) { \
+ VI_NARROW_PARAMS(e32, e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VI_LOOP_NSHIFT(BODY) \
+ VI_CHECK_SDS(false); \
+ VI_LOOP_NSHIFT_BASE \
+ if (sew == e8) { \
+ VI_NARROW_PARAMS(e8, e16) \
+ BODY; \
+ } else if (sew == e16) { \
+ VI_NARROW_PARAMS(e16, e32) \
+ BODY; \
+ } else if (sew == e32) { \
+ VI_NARROW_PARAMS(e32, e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VX_LOOP_NSHIFT(BODY) \
+ VI_CHECK_SDS(false); \
+ VI_LOOP_NSHIFT_BASE \
+ if (sew == e8) { \
+ VX_NARROW_PARAMS(e8, e16) \
+ BODY; \
+ } else if (sew == e16) { \
+ VX_NARROW_PARAMS(e16, e32) \
+ BODY; \
+ } else if (sew == e32) { \
+ VX_NARROW_PARAMS(e32, e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VV_LOOP_NSHIFT(BODY) \
+ VI_CHECK_SDS(true); \
+ VI_LOOP_NSHIFT_BASE \
+ if (sew == e8) { \
+ VV_NARROW_PARAMS(e8, e16) \
+ BODY; \
+ } else if (sew == e16) { \
+ VV_NARROW_PARAMS(e16, e32) \
+ BODY; \
+ } else if (sew == e32) { \
+ VV_NARROW_PARAMS(e32, e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+// widen operation loop
+#define VI_VV_LOOP_WIDEN(BODY) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VV_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VV_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VV_PARAMS(e32); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VX_LOOP_WIDEN(BODY) \
+ VI_LOOP_BASE \
+ if (sew == e8) { \
+ VX_PARAMS(e8); \
+ BODY; \
+ } else if (sew == e16) { \
+ VX_PARAMS(e16); \
+ BODY; \
+ } else if (sew == e32) { \
+ VX_PARAMS(e32); \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_WIDE_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \
+ switch (P.VU.vsew) { \
+ case e8: { \
+ sign##16_t UNUSED vd_w = P.VU.elt<sign##16_t>(rd_num, i); \
+ P.VU.elt<uint16_t>(rd_num, i, true) = \
+ op1((sign##16_t)(sign##8_t)var0 op0 (sign##16_t)(sign##8_t)var1) + var2; \
+ } \
+ break; \
+ case e16: { \
+ sign##32_t UNUSED vd_w = P.VU.elt<sign##32_t>(rd_num, i); \
+ P.VU.elt<uint32_t>(rd_num, i, true) = \
+ op1((sign##32_t)(sign##16_t)var0 op0 (sign##32_t)(sign##16_t)var1) + var2; \
+ } \
+ break; \
+ default: { \
+ sign##64_t UNUSED vd_w = P.VU.elt<sign##64_t>(rd_num, i); \
+ P.VU.elt<uint64_t>(rd_num, i, true) = \
+ op1((sign##64_t)(sign##32_t)var0 op0 (sign##64_t)(sign##32_t)var1) + var2; \
+ } \
+ break; \
+ }
+
+#define VI_WIDE_OP_AND_ASSIGN_MIX(var0, var1, var2, op0, op1, sign_d, sign_1, sign_2) \
+ switch (P.VU.vsew) { \
+ case e8: { \
+ sign_d##16_t UNUSED vd_w = P.VU.elt<sign_d##16_t>(rd_num, i); \
+ P.VU.elt<uint16_t>(rd_num, i, true) = \
+ op1((sign_1##16_t)(sign_1##8_t)var0 op0 (sign_2##16_t)(sign_2##8_t)var1) + var2; \
+ } \
+ break; \
+ case e16: { \
+ sign_d##32_t UNUSED vd_w = P.VU.elt<sign_d##32_t>(rd_num, i); \
+ P.VU.elt<uint32_t>(rd_num, i, true) = \
+ op1((sign_1##32_t)(sign_1##16_t)var0 op0 (sign_2##32_t)(sign_2##16_t)var1) + var2; \
+ } \
+ break; \
+ default: { \
+ sign_d##64_t UNUSED vd_w = P.VU.elt<sign_d##64_t>(rd_num, i); \
+ P.VU.elt<uint64_t>(rd_num, i, true) = \
+ op1((sign_1##64_t)(sign_1##32_t)var0 op0 (sign_2##64_t)(sign_2##32_t)var1) + var2; \
+ } \
+ break; \
+ }
+
+#define VI_WIDE_WVX_OP(var0, op0, sign) \
+ switch (P.VU.vsew) { \
+ case e8: { \
+ sign##16_t &vd_w = P.VU.elt<sign##16_t>(rd_num, i, true); \
+ sign##16_t vs2_w = P.VU.elt<sign##16_t>(rs2_num, i); \
+ vd_w = vs2_w op0 (sign##16_t)(sign##8_t)var0; \
+ } \
+ break; \
+ case e16: { \
+ sign##32_t &vd_w = P.VU.elt<sign##32_t>(rd_num, i, true); \
+ sign##32_t vs2_w = P.VU.elt<sign##32_t>(rs2_num, i); \
+ vd_w = vs2_w op0 (sign##32_t)(sign##16_t)var0; \
+ } \
+ break; \
+ default: { \
+ sign##64_t &vd_w = P.VU.elt<sign##64_t>(rd_num, i, true); \
+ sign##64_t vs2_w = P.VU.elt<sign##64_t>(rs2_num, i); \
+ vd_w = vs2_w op0 (sign##64_t)(sign##32_t)var0; \
+ } \
+ break; \
+ }
+
+// wide reduction loop - signed
+#define VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \
+ reg_t vl = P.VU.vl->read(); \
+ reg_t rd_num = insn.rd(); \
+ reg_t rs1_num = insn.rs1(); \
+ reg_t rs2_num = insn.rs2(); \
+ auto &vd_0_des = P.VU.elt<type_sew_t<sew2>::type>(rd_num, 0, true); \
+ auto vd_0_res = P.VU.elt<type_sew_t<sew2>::type>(rs1_num, 0); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ auto vs2 = P.VU.elt<type_sew_t<sew1>::type>(rs2_num, i);
+
+#define WIDE_REDUCTION_LOOP(sew1, sew2, BODY) \
+ VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \
+ BODY; \
+ VI_LOOP_REDUCTION_END(sew2)
+
+#define VI_VV_LOOP_WIDE_REDUCTION(BODY) \
+ VI_CHECK_REDUCTION(true); \
+ reg_t sew = P.VU.vsew; \
+ if (sew == e8) { \
+ WIDE_REDUCTION_LOOP(e8, e16, BODY) \
+ } else if (sew == e16) { \
+ WIDE_REDUCTION_LOOP(e16, e32, BODY) \
+ } else if (sew == e32) { \
+ WIDE_REDUCTION_LOOP(e32, e64, BODY) \
+ }
+
+// wide reduction loop - unsigned
+#define VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \
+ reg_t vl = P.VU.vl->read(); \
+ reg_t rd_num = insn.rd(); \
+ reg_t rs1_num = insn.rs1(); \
+ reg_t rs2_num = insn.rs2(); \
+ auto &vd_0_des = P.VU.elt<type_usew_t<sew2>::type>(rd_num, 0, true); \
+ auto vd_0_res = P.VU.elt<type_usew_t<sew2>::type>(rs1_num, 0); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ auto vs2 = P.VU.elt<type_usew_t<sew1>::type>(rs2_num, i);
+
+#define WIDE_REDUCTION_ULOOP(sew1, sew2, BODY) \
+ VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \
+ BODY; \
+ VI_LOOP_REDUCTION_END(sew2)
+
+#define VI_VV_ULOOP_WIDE_REDUCTION(BODY) \
+ VI_CHECK_REDUCTION(true); \
+ reg_t sew = P.VU.vsew; \
+ if (sew == e8) { \
+ WIDE_REDUCTION_ULOOP(e8, e16, BODY) \
+ } else if (sew == e16) { \
+ WIDE_REDUCTION_ULOOP(e16, e32, BODY) \
+ } else if (sew == e32) { \
+ WIDE_REDUCTION_ULOOP(e32, e64, BODY) \
+ }
+
+// carry/borrow bit loop
+#define VI_VV_LOOP_CARRY(BODY) \
+ VI_CHECK_MSS(true); \
+ VI_LOOP_CARRY_BASE \
+ if (sew == e8) { \
+ VV_CARRY_PARAMS(e8) \
+ BODY; \
+ } else if (sew == e16) { \
+ VV_CARRY_PARAMS(e16) \
+ BODY; \
+ } else if (sew == e32) { \
+ VV_CARRY_PARAMS(e32) \
+ BODY; \
+ } else if (sew == e64) { \
+ VV_CARRY_PARAMS(e64) \
+ BODY; \
+ } \
+ VI_LOOP_CARRY_END
+
+#define VI_XI_LOOP_CARRY(BODY) \
+ VI_CHECK_MSS(false); \
+ VI_LOOP_CARRY_BASE \
+ if (sew == e8) { \
+ XI_CARRY_PARAMS(e8) \
+ BODY; \
+ } else if (sew == e16) { \
+ XI_CARRY_PARAMS(e16) \
+ BODY; \
+ } else if (sew == e32) { \
+ XI_CARRY_PARAMS(e32) \
+ BODY; \
+ } else if (sew == e64) { \
+ XI_CARRY_PARAMS(e64) \
+ BODY; \
+ } \
+ VI_LOOP_CARRY_END
+
+#define VI_VV_LOOP_WITH_CARRY(BODY) \
+ VI_CHECK_SSS(true); \
+ VI_LOOP_WITH_CARRY_BASE \
+ if (sew == e8) { \
+ VV_WITH_CARRY_PARAMS(e8) \
+ BODY; \
+ } else if (sew == e16) { \
+ VV_WITH_CARRY_PARAMS(e16) \
+ BODY; \
+ } else if (sew == e32) { \
+ VV_WITH_CARRY_PARAMS(e32) \
+ BODY; \
+ } else if (sew == e64) { \
+ VV_WITH_CARRY_PARAMS(e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_XI_LOOP_WITH_CARRY(BODY) \
+ VI_CHECK_SSS(false); \
+ VI_LOOP_WITH_CARRY_BASE \
+ if (sew == e8) { \
+ XI_WITH_CARRY_PARAMS(e8) \
+ BODY; \
+ } else if (sew == e16) { \
+ XI_WITH_CARRY_PARAMS(e16) \
+ BODY; \
+ } else if (sew == e32) { \
+ XI_WITH_CARRY_PARAMS(e32) \
+ BODY; \
+ } else if (sew == e64) { \
+ XI_WITH_CARRY_PARAMS(e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+// average loop
+#define VI_VV_LOOP_AVG(op) \
+VRM xrm = p->VU.get_vround_mode(); \
+VI_VV_LOOP({ \
+ uint128_t res = ((uint128_t)vs2) op vs1; \
+ INT_ROUNDING(res, xrm, 1); \
+ vd = res >> 1; \
+})
+
+#define VI_VX_LOOP_AVG(op) \
+VRM xrm = p->VU.get_vround_mode(); \
+VI_VX_LOOP({ \
+ uint128_t res = ((uint128_t)vs2) op rs1; \
+ INT_ROUNDING(res, xrm, 1); \
+ vd = res >> 1; \
+})
+
+#define VI_VV_ULOOP_AVG(op) \
+VRM xrm = p->VU.get_vround_mode(); \
+VI_VV_ULOOP({ \
+ uint128_t res = ((uint128_t)vs2) op vs1; \
+ INT_ROUNDING(res, xrm, 1); \
+ vd = res >> 1; \
+})
+
+#define VI_VX_ULOOP_AVG(op) \
+VRM xrm = p->VU.get_vround_mode(); \
+VI_VX_ULOOP({ \
+ uint128_t res = ((uint128_t)vs2) op rs1; \
+ INT_ROUNDING(res, xrm, 1); \
+ vd = res >> 1; \
+})
+
+//
+// vector: load/store helper
+//
+#define VI_STRIP(inx) \
+ reg_t vreg_inx = inx;
+
+#define VI_DUPLICATE_VREG(reg_num, idx_sew) \
+reg_t index[P.VU.vlmax]; \
+ for (reg_t i = 0; i < P.VU.vlmax && P.VU.vl->read() != 0; ++i) { \
+ switch (idx_sew) { \
+ case e8: \
+ index[i] = P.VU.elt<uint8_t>(reg_num, i); \
+ break; \
+ case e16: \
+ index[i] = P.VU.elt<uint16_t>(reg_num, i); \
+ break; \
+ case e32: \
+ index[i] = P.VU.elt<uint32_t>(reg_num, i); \
+ break; \
+ case e64: \
+ index[i] = P.VU.elt<uint64_t>(reg_num, i); \
+ break; \
+ } \
+}
+
+#define VI_LD(stride, offset, elt_width, is_mask_ldst) \
+ const reg_t nf = insn.v_nf() + 1; \
+ const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \
+ const reg_t baseAddr = RS1; \
+ const reg_t vd = insn.rd(); \
+ VI_CHECK_LOAD(elt_width, is_mask_ldst); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_ELEMENT_SKIP(i); \
+ VI_STRIP(i); \
+ P.VU.vstart->write(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ elt_width##_t val = MMU.load_##elt_width( \
+ baseAddr + (stride) + (offset) * sizeof(elt_width##_t)); \
+ P.VU.elt<elt_width##_t>(vd + fn * emul, vreg_inx, true) = val; \
+ } \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_LD_INDEX(elt_width, is_seg) \
+ const reg_t nf = insn.v_nf() + 1; \
+ const reg_t vl = P.VU.vl->read(); \
+ const reg_t baseAddr = RS1; \
+ const reg_t vd = insn.rd(); \
+ if (!is_seg) \
+ require(nf == 1); \
+ VI_CHECK_LD_INDEX(elt_width); \
+ VI_DUPLICATE_VREG(insn.rs2(), elt_width); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_ELEMENT_SKIP(i); \
+ VI_STRIP(i); \
+ P.VU.vstart->write(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ switch (P.VU.vsew) { \
+ case e8: \
+ P.VU.elt<uint8_t>(vd + fn * flmul, vreg_inx, true) = \
+ MMU.load_uint8(baseAddr + index[i] + fn * 1); \
+ break; \
+ case e16: \
+ P.VU.elt<uint16_t>(vd + fn * flmul, vreg_inx, true) = \
+ MMU.load_uint16(baseAddr + index[i] + fn * 2); \
+ break; \
+ case e32: \
+ P.VU.elt<uint32_t>(vd + fn * flmul, vreg_inx, true) = \
+ MMU.load_uint32(baseAddr + index[i] + fn * 4); \
+ break; \
+ default: \
+ P.VU.elt<uint64_t>(vd + fn * flmul, vreg_inx, true) = \
+ MMU.load_uint64(baseAddr + index[i] + fn * 8); \
+ break; \
+ } \
+ } \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_ST(stride, offset, elt_width, is_mask_ldst) \
+ const reg_t nf = insn.v_nf() + 1; \
+ const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \
+ const reg_t baseAddr = RS1; \
+ const reg_t vs3 = insn.rd(); \
+ VI_CHECK_STORE(elt_width, is_mask_ldst); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_STRIP(i) \
+ VI_ELEMENT_SKIP(i); \
+ P.VU.vstart->write(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ elt_width##_t val = P.VU.elt<elt_width##_t>(vs3 + fn * emul, vreg_inx); \
+ MMU.store_##elt_width( \
+ baseAddr + (stride) + (offset) * sizeof(elt_width##_t), val); \
+ } \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_ST_INDEX(elt_width, is_seg) \
+ const reg_t nf = insn.v_nf() + 1; \
+ const reg_t vl = P.VU.vl->read(); \
+ const reg_t baseAddr = RS1; \
+ const reg_t vs3 = insn.rd(); \
+ if (!is_seg) \
+ require(nf == 1); \
+ VI_CHECK_ST_INDEX(elt_width); \
+ VI_DUPLICATE_VREG(insn.rs2(), elt_width); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_STRIP(i) \
+ VI_ELEMENT_SKIP(i); \
+ P.VU.vstart->write(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ switch (P.VU.vsew) { \
+ case e8: \
+ MMU.store_uint8(baseAddr + index[i] + fn * 1, \
+ P.VU.elt<uint8_t>(vs3 + fn * flmul, vreg_inx)); \
+ break; \
+ case e16: \
+ MMU.store_uint16(baseAddr + index[i] + fn * 2, \
+ P.VU.elt<uint16_t>(vs3 + fn * flmul, vreg_inx)); \
+ break; \
+ case e32: \
+ MMU.store_uint32(baseAddr + index[i] + fn * 4, \
+ P.VU.elt<uint32_t>(vs3 + fn * flmul, vreg_inx)); \
+ break; \
+ default: \
+ MMU.store_uint64(baseAddr + index[i] + fn * 8, \
+ P.VU.elt<uint64_t>(vs3 + fn * flmul, vreg_inx)); \
+ break; \
+ } \
+ } \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_LDST_FF(elt_width) \
+ const reg_t nf = insn.v_nf() + 1; \
+ const reg_t vl = p->VU.vl->read(); \
+ const reg_t baseAddr = RS1; \
+ const reg_t rd_num = insn.rd(); \
+ VI_CHECK_LOAD(elt_width, false); \
+ bool early_stop = false; \
+ for (reg_t i = p->VU.vstart->read(); i < vl; ++i) { \
+ VI_STRIP(i); \
+ VI_ELEMENT_SKIP(i); \
+ \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ uint64_t val; \
+ try { \
+ val = MMU.load_##elt_width( \
+ baseAddr + (i * nf + fn) * sizeof(elt_width##_t)); \
+ } catch (trap_t& t) { \
+ if (i == 0) \
+ throw; /* Only take exception on zeroth element */ \
+ /* Reduce VL if an exception occurs on a later element */ \
+ early_stop = true; \
+ P.VU.vl->write_raw(i); \
+ break; \
+ } \
+ p->VU.elt<elt_width##_t>(rd_num + fn * emul, vreg_inx, true) = val; \
+ } \
+ \
+ if (early_stop) { \
+ break; \
+ } \
+ } \
+ p->VU.vstart->write(0);
+
+#define VI_LD_WHOLE(elt_width) \
+ require_vector_novtype(true); \
+ require(sizeof(elt_width ## _t) * 8 <= P.VU.ELEN); \
+ const reg_t baseAddr = RS1; \
+ const reg_t vd = insn.rd(); \
+ const reg_t len = insn.v_nf() + 1; \
+ require_align(vd, len); \
+ const reg_t elt_per_reg = P.VU.vlenb / sizeof(elt_width ## _t); \
+ const reg_t size = len * elt_per_reg; \
+ if (P.VU.vstart->read() < size) { \
+ reg_t i = P.VU.vstart->read() / elt_per_reg; \
+ reg_t off = P.VU.vstart->read() % elt_per_reg; \
+ if (off) { \
+ for (reg_t pos = off; pos < elt_per_reg; ++pos) { \
+ auto val = MMU.load_## elt_width(baseAddr + \
+ P.VU.vstart->read() * sizeof(elt_width ## _t)); \
+ P.VU.elt<elt_width ## _t>(vd + i, pos, true) = val; \
+ P.VU.vstart->write(P.VU.vstart->read() + 1); \
+ } \
+ ++i; \
+ } \
+ for (; i < len; ++i) { \
+ for (reg_t pos = 0; pos < elt_per_reg; ++pos) { \
+ auto val = MMU.load_## elt_width(baseAddr + \
+ P.VU.vstart->read() * sizeof(elt_width ## _t)); \
+ P.VU.elt<elt_width ## _t>(vd + i, pos, true) = val; \
+ P.VU.vstart->write(P.VU.vstart->read() + 1); \
+ } \
+ } \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_ST_WHOLE \
+ require_vector_novtype(true); \
+ const reg_t baseAddr = RS1; \
+ const reg_t vs3 = insn.rd(); \
+ const reg_t len = insn.v_nf() + 1; \
+ require_align(vs3, len); \
+ const reg_t size = len * P.VU.vlenb; \
+ \
+ if (P.VU.vstart->read() < size) { \
+ reg_t i = P.VU.vstart->read() / P.VU.vlenb; \
+ reg_t off = P.VU.vstart->read() % P.VU.vlenb; \
+ if (off) { \
+ for (reg_t pos = off; pos < P.VU.vlenb; ++pos) { \
+ auto val = P.VU.elt<uint8_t>(vs3 + i, pos); \
+ MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \
+ P.VU.vstart->write(P.VU.vstart->read() + 1); \
+ } \
+ i++; \
+ } \
+ for (; i < len; ++i) { \
+ for (reg_t pos = 0; pos < P.VU.vlenb; ++pos) { \
+ auto val = P.VU.elt<uint8_t>(vs3 + i, pos); \
+ MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \
+ P.VU.vstart->write(P.VU.vstart->read() + 1); \
+ } \
+ } \
+ } \
+ P.VU.vstart->write(0);
+
+//
+// vector: amo
+//
+#define VI_AMO(op, type, idx_type) \
+ require_vector(false); \
+ require_align(insn.rd(), P.VU.vflmul); \
+ require(P.VU.vsew <= P.get_xlen() && P.VU.vsew >= 32); \
+ require_align(insn.rd(), P.VU.vflmul); \
+ float vemul = ((float)idx_type / P.VU.vsew * P.VU.vflmul); \
+ require(vemul >= 0.125 && vemul <= 8); \
+ require_align(insn.rs2(), vemul); \
+ if (insn.v_wd()) { \
+ require_vm; \
+ if (idx_type > P.VU.vsew) { \
+ if (insn.rd() != insn.rs2()) \
+ require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \
+ } else if (idx_type < P.VU.vsew) { \
+ if (vemul < 1) { \
+ require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \
+ } else { \
+ require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \
+ } \
+ } \
+ } \
+ VI_DUPLICATE_VREG(insn.rs2(), idx_type); \
+ const reg_t vl = P.VU.vl->read(); \
+ const reg_t baseAddr = RS1; \
+ const reg_t vd = insn.rd(); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_ELEMENT_SKIP(i); \
+ VI_STRIP(i); \
+ P.VU.vstart->write(i); \
+ switch (P.VU.vsew) { \
+ case e32: { \
+ auto vs3 = P.VU.elt< type ## 32_t>(vd, vreg_inx); \
+ auto val = MMU.amo_uint32(baseAddr + index[i], [&](type ## 32_t UNUSED lhs) { op }); \
+ if (insn.v_wd()) \
+ P.VU.elt< type ## 32_t>(vd, vreg_inx, true) = val; \
+ } \
+ break; \
+ case e64: { \
+ auto vs3 = P.VU.elt< type ## 64_t>(vd, vreg_inx); \
+ auto val = MMU.amo_uint64(baseAddr + index[i], [&](type ## 64_t UNUSED lhs) { op }); \
+ if (insn.v_wd()) \
+ P.VU.elt< type ## 64_t>(vd, vreg_inx, true) = val; \
+ } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ } \
+ } \
+ P.VU.vstart->write(0);
+
+// vector: sign/unsiged extension
+#define VI_VV_EXT(div, type) \
+ require(insn.rd() != insn.rs2()); \
+ require_vm; \
+ reg_t from = P.VU.vsew / div; \
+ require(from >= e8 && from <= e64); \
+ require(((float)P.VU.vflmul / div) >= 0.125 && ((float)P.VU.vflmul / div) <= 8 ); \
+ require_align(insn.rd(), P.VU.vflmul); \
+ require_align(insn.rs2(), P.VU.vflmul / div); \
+ if ((P.VU.vflmul / div) < 1) { \
+ require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \
+ } else { \
+ require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \
+ } \
+ reg_t pat = (((P.VU.vsew >> 3) << 4) | from >> 3); \
+ VI_GENERAL_LOOP_BASE \
+ VI_LOOP_ELEMENT_SKIP(); \
+ switch (pat) { \
+ case 0x21: \
+ P.VU.elt<type##16_t>(rd_num, i, true) = P.VU.elt<type##8_t>(rs2_num, i); \
+ break; \
+ case 0x41: \
+ P.VU.elt<type##32_t>(rd_num, i, true) = P.VU.elt<type##8_t>(rs2_num, i); \
+ break; \
+ case 0x81: \
+ P.VU.elt<type##64_t>(rd_num, i, true) = P.VU.elt<type##8_t>(rs2_num, i); \
+ break; \
+ case 0x42: \
+ P.VU.elt<type##32_t>(rd_num, i, true) = P.VU.elt<type##16_t>(rs2_num, i); \
+ break; \
+ case 0x82: \
+ P.VU.elt<type##64_t>(rd_num, i, true) = P.VU.elt<type##16_t>(rs2_num, i); \
+ break; \
+ case 0x84: \
+ P.VU.elt<type##64_t>(rd_num, i, true) = P.VU.elt<type##32_t>(rs2_num, i); \
+ break; \
+ default: \
+ break; \
+ } \
+ VI_LOOP_END
+
+//
+// vector: vfp helper
+//
+#define VI_VFP_COMMON \
+ require_fp; \
+ require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || \
+ (P.VU.vsew == e32 && p->extension_enabled('F')) || \
+ (P.VU.vsew == e64 && p->extension_enabled('D'))); \
+ require_vector(true); \
+ require(STATE.frm->read() < 0x5); \
+ reg_t UNUSED vl = P.VU.vl->read(); \
+ reg_t UNUSED rd_num = insn.rd(); \
+ reg_t UNUSED rs1_num = insn.rs1(); \
+ reg_t UNUSED rs2_num = insn.rs2(); \
+ softfloat_roundingMode = STATE.frm->read();
+
+#define VI_VFP_LOOP_BASE \
+ VI_VFP_COMMON \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP();
+
+#define VI_VFP_LOOP_CMP_BASE \
+ VI_VFP_COMMON \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ uint64_t mmask = UINT64_C(1) << mpos; \
+ uint64_t &vd = P.VU.elt<uint64_t>(rd_num, midx, true); \
+ uint64_t res = 0;
+
+#define VI_VFP_LOOP_REDUCTION_BASE(width) \
+ float##width##_t vd_0 = P.VU.elt<float##width##_t>(rd_num, 0); \
+ float##width##_t vs1_0 = P.VU.elt<float##width##_t>(rs1_num, 0); \
+ vd_0 = vs1_0; \
+ bool is_active = false; \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ float##width##_t vs2 = P.VU.elt<float##width##_t>(rs2_num, i); \
+ is_active = true; \
+
+#define VI_VFP_LOOP_WIDE_REDUCTION_BASE \
+ VI_VFP_COMMON \
+ float64_t vd_0 = f64(P.VU.elt<float64_t>(rs1_num, 0).v); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP();
+
+#define VI_VFP_LOOP_END \
+ } \
+ P.VU.vstart->write(0); \
+
+#define VI_VFP_LOOP_REDUCTION_END(x) \
+ } \
+ P.VU.vstart->write(0); \
+ if (vl > 0) { \
+ if (is_propagate && !is_active) { \
+ switch (x) { \
+ case e16: { \
+ auto ret = f16_classify(f16(vd_0.v)); \
+ if (ret & 0x300) { \
+ if (ret & 0x100) { \
+ softfloat_exceptionFlags |= softfloat_flag_invalid; \
+ set_fp_exceptions; \
+ } \
+ P.VU.elt<uint16_t>(rd_num, 0, true) = defaultNaNF16UI; \
+ } else { \
+ P.VU.elt<uint16_t>(rd_num, 0, true) = vd_0.v; \
+ } \
+ } \
+ break; \
+ case e32: { \
+ auto ret = f32_classify(f32(vd_0.v)); \
+ if (ret & 0x300) { \
+ if (ret & 0x100) { \
+ softfloat_exceptionFlags |= softfloat_flag_invalid; \
+ set_fp_exceptions; \
+ } \
+ P.VU.elt<uint32_t>(rd_num, 0, true) = defaultNaNF32UI; \
+ } else { \
+ P.VU.elt<uint32_t>(rd_num, 0, true) = vd_0.v; \
+ } \
+ } \
+ break; \
+ case e64: { \
+ auto ret = f64_classify(f64(vd_0.v)); \
+ if (ret & 0x300) { \
+ if (ret & 0x100) { \
+ softfloat_exceptionFlags |= softfloat_flag_invalid; \
+ set_fp_exceptions; \
+ } \
+ P.VU.elt<uint64_t>(rd_num, 0, true) = defaultNaNF64UI; \
+ } else { \
+ P.VU.elt<uint64_t>(rd_num, 0, true) = vd_0.v; \
+ } \
+ } \
+ break; \
+ } \
+ } else { \
+ P.VU.elt<type_sew_t<x>::type>(rd_num, 0, true) = vd_0.v; \
+ } \
+ }
+
+#define VI_VFP_LOOP_CMP_END \
+ switch (P.VU.vsew) { \
+ case e16: \
+ case e32: \
+ case e64: { \
+ vd = (vd & ~mmask) | (((res) << mpos) & mmask); \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+ } \
+ P.VU.vstart->write(0);
+
+#define VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \
+ VI_CHECK_SSS(true); \
+ VI_VFP_LOOP_BASE \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ VFP_VV_PARAMS(16); \
+ BODY16; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e32: { \
+ VFP_VV_PARAMS(32); \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e64: { \
+ VFP_VV_PARAMS(64); \
+ BODY64; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+ DEBUG_RVV_FP_VV; \
+ VI_VFP_LOOP_END
+
+#define VI_VFP_V_LOOP(BODY16, BODY32, BODY64) \
+ VI_CHECK_SSS(false); \
+ VI_VFP_LOOP_BASE \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ VFP_V_PARAMS(16); \
+ BODY16; \
+ break; \
+ } \
+ case e32: { \
+ VFP_V_PARAMS(32); \
+ BODY32; \
+ break; \
+ } \
+ case e64: { \
+ VFP_V_PARAMS(64); \
+ BODY64; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+ set_fp_exceptions; \
+ VI_VFP_LOOP_END
+
+#define VI_VFP_VV_LOOP_REDUCTION(BODY16, BODY32, BODY64) \
+ VI_CHECK_REDUCTION(false) \
+ VI_VFP_COMMON \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ VI_VFP_LOOP_REDUCTION_BASE(16) \
+ BODY16; \
+ set_fp_exceptions; \
+ VI_VFP_LOOP_REDUCTION_END(e16) \
+ break; \
+ } \
+ case e32: { \
+ VI_VFP_LOOP_REDUCTION_BASE(32) \
+ BODY32; \
+ set_fp_exceptions; \
+ VI_VFP_LOOP_REDUCTION_END(e32) \
+ break; \
+ } \
+ case e64: { \
+ VI_VFP_LOOP_REDUCTION_BASE(64) \
+ BODY64; \
+ set_fp_exceptions; \
+ VI_VFP_LOOP_REDUCTION_END(e64) \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+
+#define VI_VFP_VV_LOOP_WIDE_REDUCTION(BODY16, BODY32) \
+ VI_CHECK_REDUCTION(true) \
+ VI_VFP_COMMON \
+ require((P.VU.vsew == e16 && p->extension_enabled('F')) || \
+ (P.VU.vsew == e32 && p->extension_enabled('D'))); \
+ bool is_active = false; \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ float32_t vd_0 = P.VU.elt<float32_t>(rs1_num, 0); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ is_active = true; \
+ float32_t vs2 = f16_to_f32(P.VU.elt<float16_t>(rs2_num, i)); \
+ BODY16; \
+ set_fp_exceptions; \
+ VI_VFP_LOOP_REDUCTION_END(e32) \
+ break; \
+ } \
+ case e32: { \
+ float64_t vd_0 = P.VU.elt<float64_t>(rs1_num, 0); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP(); \
+ is_active = true; \
+ float64_t vs2 = f32_to_f64(P.VU.elt<float32_t>(rs2_num, i)); \
+ BODY32; \
+ set_fp_exceptions; \
+ VI_VFP_LOOP_REDUCTION_END(e64) \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+
+#define VI_VFP_VF_LOOP(BODY16, BODY32, BODY64) \
+ VI_CHECK_SSS(false); \
+ VI_VFP_LOOP_BASE \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ VFP_VF_PARAMS(16); \
+ BODY16; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e32: { \
+ VFP_VF_PARAMS(32); \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e64: { \
+ VFP_VF_PARAMS(64); \
+ BODY64; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+ DEBUG_RVV_FP_VF; \
+ VI_VFP_LOOP_END
+
+#define VI_VFP_VV_LOOP_CMP(BODY16, BODY32, BODY64) \
+ VI_CHECK_MSS(true); \
+ VI_VFP_LOOP_CMP_BASE \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ VFP_VV_PARAMS(16); \
+ BODY16; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e32: { \
+ VFP_VV_PARAMS(32); \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e64: { \
+ VFP_VV_PARAMS(64); \
+ BODY64; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+ VI_VFP_LOOP_CMP_END \
+
+#define VI_VFP_VF_LOOP_CMP(BODY16, BODY32, BODY64) \
+ VI_CHECK_MSS(false); \
+ VI_VFP_LOOP_CMP_BASE \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ VFP_VF_PARAMS(16); \
+ BODY16; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e32: { \
+ VFP_VF_PARAMS(32); \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e64: { \
+ VFP_VF_PARAMS(64); \
+ BODY64; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+ VI_VFP_LOOP_CMP_END \
+
+#define VI_VFP_VF_LOOP_WIDE(BODY16, BODY32) \
+ VI_CHECK_DSS(false); \
+ VI_VFP_LOOP_BASE \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
+ float32_t vs2 = f16_to_f32(P.VU.elt<float16_t>(rs2_num, i)); \
+ float32_t rs1 = f16_to_f32(FRS1_H); \
+ BODY16; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e32: { \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
+ float64_t vs2 = f32_to_f64(P.VU.elt<float32_t>(rs2_num, i)); \
+ float64_t rs1 = f32_to_f64(FRS1_F); \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+ DEBUG_RVV_FP_VV; \
+ VI_VFP_LOOP_END
+
+#define VI_VFP_VV_LOOP_WIDE(BODY16, BODY32) \
+ VI_CHECK_DSS(true); \
+ VI_VFP_LOOP_BASE \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
+ float32_t vs2 = f16_to_f32(P.VU.elt<float16_t>(rs2_num, i)); \
+ float32_t vs1 = f16_to_f32(P.VU.elt<float16_t>(rs1_num, i)); \
+ BODY16; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e32: { \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
+ float64_t vs2 = f32_to_f64(P.VU.elt<float32_t>(rs2_num, i)); \
+ float64_t vs1 = f32_to_f64(P.VU.elt<float32_t>(rs1_num, i)); \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ }; \
+ DEBUG_RVV_FP_VV; \
+ VI_VFP_LOOP_END
+
+#define VI_VFP_WF_LOOP_WIDE(BODY16, BODY32) \
+ VI_CHECK_DDS(false); \
+ VI_VFP_LOOP_BASE \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
+ float32_t vs2 = P.VU.elt<float32_t>(rs2_num, i); \
+ float32_t rs1 = f16_to_f32(FRS1_H); \
+ BODY16; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e32: { \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
+ float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
+ float64_t rs1 = f32_to_f64(FRS1_F); \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ }; \
+ DEBUG_RVV_FP_VV; \
+ VI_VFP_LOOP_END
+
+#define VI_VFP_WV_LOOP_WIDE(BODY16, BODY32) \
+ VI_CHECK_DDS(true); \
+ VI_VFP_LOOP_BASE \
+ switch (P.VU.vsew) { \
+ case e16: { \
+ float32_t &vd = P.VU.elt<float32_t>(rd_num, i, true); \
+ float32_t vs2 = P.VU.elt<float32_t>(rs2_num, i); \
+ float32_t vs1 = f16_to_f32(P.VU.elt<float16_t>(rs1_num, i)); \
+ BODY16; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ case e32: { \
+ float64_t &vd = P.VU.elt<float64_t>(rd_num, i, true); \
+ float64_t vs2 = P.VU.elt<float64_t>(rs2_num, i); \
+ float64_t vs1 = f32_to_f64(P.VU.elt<float32_t>(rs1_num, i)); \
+ BODY32; \
+ set_fp_exceptions; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ }; \
+ DEBUG_RVV_FP_VV; \
+ VI_VFP_LOOP_END
+
+#define VI_VFP_LOOP_SCALE_BASE \
+ require_fp; \
+ require_vector(true); \
+ require(STATE.frm->read() < 0x5); \
+ reg_t vl = P.VU.vl->read(); \
+ reg_t rd_num = insn.rd(); \
+ reg_t UNUSED rs1_num = insn.rs1(); \
+ reg_t rs2_num = insn.rs2(); \
+ softfloat_roundingMode = STATE.frm->read(); \
+ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \
+ VI_LOOP_ELEMENT_SKIP();
+
+#define VI_VFP_CVT_LOOP(CVT_PARAMS, CHECK, BODY) \
+ CHECK \
+ VI_VFP_LOOP_SCALE_BASE \
+ CVT_PARAMS \
+ BODY \
+ set_fp_exceptions; \
+ VI_VFP_LOOP_END
+
+#define VI_VFP_CVT_INT_TO_FP(BODY16, BODY32, BODY64, sign) \
+ VI_CHECK_SSS(false); \
+ VI_VFP_COMMON \
+ switch (P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 16, sign), \
+ { p->extension_enabled(EXT_ZFH); }, \
+ BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 32, sign), \
+ { p->extension_enabled('F'); }, \
+ BODY32); } \
+ break; \
+ case e64: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 64, sign), \
+ { p->extension_enabled('D'); }, \
+ BODY64); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_CVT_FP_TO_INT(BODY16, BODY32, BODY64, sign) \
+ VI_CHECK_SSS(false); \
+ VI_VFP_COMMON \
+ switch (P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 16, sign), \
+ { p->extension_enabled(EXT_ZFH); }, \
+ BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 32, sign), \
+ { p->extension_enabled('F'); }, \
+ BODY32); } \
+ break; \
+ case e64: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 64, sign), \
+ { p->extension_enabled('D'); }, \
+ BODY64); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_WCVT_FP_TO_FP(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32) \
+ VI_CHECK_DSS(false); \
+ switch (P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(16, 32), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 64), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_WCVT_INT_TO_FP(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32, \
+ sign) \
+ VI_CHECK_DSS(false); \
+ switch (P.VU.vsew) { \
+ case e8: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(8, 16, sign), CHECK8, BODY8); } \
+ break; \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 32, sign), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 64, sign), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_WCVT_FP_TO_INT(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32, \
+ sign) \
+ VI_CHECK_DSS(false); \
+ switch (P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 32, sign), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 64, sign), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_NCVT_FP_TO_FP(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32) \
+ VI_CHECK_SDS(false); \
+ switch (P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 16), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(64, 32), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_NCVT_INT_TO_FP(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32, \
+ sign) \
+ VI_CHECK_SDS(false); \
+ switch (P.VU.vsew) { \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 16, sign), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 32, sign), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#define VI_VFP_NCVT_FP_TO_INT(BODY8, BODY16, BODY32, \
+ CHECK8, CHECK16, CHECK32, \
+ sign) \
+ VI_CHECK_SDS(false); \
+ switch (P.VU.vsew) { \
+ case e8: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 8, sign), CHECK8, BODY8); } \
+ break; \
+ case e16: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 16, sign), CHECK16, BODY16); } \
+ break; \
+ case e32: \
+ { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 32, sign), CHECK32, BODY32); } \
+ break; \
+ default: \
+ require(0); \
+ break; \
+ }
+
+#endif