From c52ae38779ae027db7034abd9b0a61970df8dd45 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:19:54 -0700 Subject: Add require_alignment tag to store_func Will be used similarly as in load_func. --- riscv/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index dcf338f..03a2d39 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -147,7 +147,7 @@ public: // template for functions that store an aligned value to memory #define store_func(type, prefix, xlate_flags) \ - void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true) { \ + void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) \ return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \ reg_t vpn = addr >> PGSHIFT; \ -- cgit v1.1 From 004bdc492710e0abd23771fdba33097a9ae1f792 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:21:27 -0700 Subject: Set require alignment to true on the 'fake' store in amo_func. --- riscv/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index 03a2d39..c08acd3 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -196,7 +196,7 @@ public: template \ type##_t amo_##type(reg_t addr, op f) { \ convert_load_traps_to_store_traps({ \ - store_##type(addr, 0, false); \ + store_##type(addr, 0, false, true); \ auto lhs = load_##type(addr, true); \ store_##type(addr, f(lhs)); \ return lhs; \ -- cgit v1.1 From 61eba540e15d4f27d8c3a78b605c614096b3e552 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:23:35 -0700 Subject: Modify store_func to throw fault if misaligned and require_alignment=true --- riscv/mmu.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index c08acd3..415cf1a 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -148,8 +148,10 @@ public: // template for functions that store an aligned value to memory #define store_func(type, prefix, xlate_flags) \ void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ - if (unlikely(addr & (sizeof(type##_t)-1))) \ - return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \ + if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (require_alignment) store_conditional_address_misaligned(addr); \ + else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \ + } \ reg_t vpn = addr >> PGSHIFT; \ size_t size = sizeof(type##_t); \ if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \ -- cgit v1.1 From 8948aef6dcad90fd80d6b8267e2fc2eeb4163a64 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:35:26 -0700 Subject: Add actually_store tag to misaligned_store function Is passed along to the contained store_func. --- riscv/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index 415cf1a..bb4c27e 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -65,11 +65,11 @@ public: #endif } - inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags) + inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags, bool actually_store=true) { #ifdef RISCV_ENABLE_MISALIGNED for (size_t i = 0; i < size; i++) - store_uint8(addr + (target_big_endian? size-1-i : i), data >> (i * 8)); + store_uint8(addr + (target_big_endian? size-1-i : i), data >> (i * 8), actually_store); #else bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); throw trap_store_address_misaligned(gva, addr, 0, 0); -- cgit v1.1 From 01b88b06693d91ee3d2e5b80020e0b934828d47d Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:40:32 -0700 Subject: Pass acutally_store from store_func to misaligned_store In future, someone may expect this functionality. --- riscv/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index bb4c27e..8964e29 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -150,7 +150,7 @@ public: void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ if (require_alignment) store_conditional_address_misaligned(addr); \ - else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \ + else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ } \ reg_t vpn = addr >> PGSHIFT; \ size_t size = sizeof(type##_t); \ -- cgit v1.1 From 8e6cf2916b9cb809744ac8dbcc04d2a2b108c5b5 Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Fri, 22 Apr 2022 10:28:35 -0700 Subject: Whitespace fix. --- riscv/triggers.h | 1 - 1 file changed, 1 deletion(-) diff --git a/riscv/triggers.h b/riscv/triggers.h index ad294c8..3a9c34b 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -110,7 +110,6 @@ public: bool store_bit; bool load_bit; reg_t tdata2; - }; class module_t { -- cgit v1.1 From d9131e3b1dc571a657f9615903d1b7220c7c7d7f Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Fri, 22 Apr 2022 10:40:01 -0700 Subject: Remove mcontrol_t.type. It's not writable anyway. --- riscv/triggers.cc | 4 ++-- riscv/triggers.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/riscv/triggers.cc b/riscv/triggers.cc index 69888bf..1bcda2a 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -4,7 +4,7 @@ namespace triggers { mcontrol_t::mcontrol_t() : - type(2), maskmax(0), select(false), timing(false), chain_bit(false), + maskmax(0), select(false), timing(false), chain_bit(false), match(MATCH_EQUAL), m(false), h(false), s(false), u(false), execute_bit(false), store_bit(false), load_bit(false) { @@ -13,7 +13,7 @@ mcontrol_t::mcontrol_t() : reg_t mcontrol_t::tdata1_read(const processor_t * const proc) const noexcept { reg_t v = 0; auto xlen = proc->get_xlen(); - v = set_field(v, MCONTROL_TYPE(xlen), type); + v = set_field(v, MCONTROL_TYPE(xlen), 2); v = set_field(v, MCONTROL_DMODE(xlen), dmode); v = set_field(v, MCONTROL_MASKMAX(xlen), maskmax); v = set_field(v, MCONTROL_SELECT, select); diff --git a/riscv/triggers.h b/riscv/triggers.h index 3a9c34b..c21b638 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -96,7 +96,6 @@ private: bool simple_match(unsigned xlen, reg_t value) const; public: - uint8_t type; uint8_t maskmax; bool select; bool timing; -- cgit v1.1 From 16413646bb44d8752c25f0f36745b6b74a2a1025 Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Fri, 22 Apr 2022 10:48:00 -0700 Subject: Remove maskmax as a variable. --- riscv/triggers.cc | 4 ++-- riscv/triggers.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/riscv/triggers.cc b/riscv/triggers.cc index 1bcda2a..99e3597 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -4,7 +4,7 @@ namespace triggers { mcontrol_t::mcontrol_t() : - maskmax(0), select(false), timing(false), chain_bit(false), + select(false), timing(false), chain_bit(false), match(MATCH_EQUAL), m(false), h(false), s(false), u(false), execute_bit(false), store_bit(false), load_bit(false) { @@ -15,7 +15,7 @@ reg_t mcontrol_t::tdata1_read(const processor_t * const proc) const noexcept { auto xlen = proc->get_xlen(); v = set_field(v, MCONTROL_TYPE(xlen), 2); v = set_field(v, MCONTROL_DMODE(xlen), dmode); - v = set_field(v, MCONTROL_MASKMAX(xlen), maskmax); + v = set_field(v, MCONTROL_MASKMAX(xlen), 0); v = set_field(v, MCONTROL_SELECT, select); v = set_field(v, MCONTROL_TIMING, timing); v = set_field(v, MCONTROL_ACTION, action); diff --git a/riscv/triggers.h b/riscv/triggers.h index c21b638..1e74c91 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -96,7 +96,6 @@ private: bool simple_match(unsigned xlen, reg_t value) const; public: - uint8_t maskmax; bool select; bool timing; bool chain_bit; -- cgit v1.1 From f2f6037fea9313f43f7e94872304ab18ead0181d Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Fri, 22 Apr 2022 10:57:31 -0700 Subject: Remove mcontrol_t.h It was removed from the spec a long time ago. --- riscv/triggers.cc | 4 +--- riscv/triggers.h | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/riscv/triggers.cc b/riscv/triggers.cc index 99e3597..7f6be44 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -5,7 +5,7 @@ namespace triggers { mcontrol_t::mcontrol_t() : select(false), timing(false), chain_bit(false), - match(MATCH_EQUAL), m(false), h(false), s(false), u(false), + match(MATCH_EQUAL), m(false), s(false), u(false), execute_bit(false), store_bit(false), load_bit(false) { } @@ -22,7 +22,6 @@ reg_t mcontrol_t::tdata1_read(const processor_t * const proc) const noexcept { v = set_field(v, MCONTROL_CHAIN, chain_bit); v = set_field(v, MCONTROL_MATCH, match); v = set_field(v, MCONTROL_M, m); - v = set_field(v, MCONTROL_H, h); v = set_field(v, MCONTROL_S, s); v = set_field(v, MCONTROL_U, u); v = set_field(v, MCONTROL_EXECUTE, execute_bit); @@ -56,7 +55,6 @@ bool mcontrol_t::tdata1_write(processor_t * const proc, const reg_t val) noexcep break; } m = get_field(val, MCONTROL_M); - h = get_field(val, MCONTROL_H); s = get_field(val, MCONTROL_S); u = get_field(val, MCONTROL_U); execute_bit = get_field(val, MCONTROL_EXECUTE); diff --git a/riscv/triggers.h b/riscv/triggers.h index 1e74c91..75ee405 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -101,7 +101,6 @@ public: bool chain_bit; match_t match; bool m; - bool h; bool s; bool u; bool execute_bit; -- cgit v1.1 From 1cfffeda1e8323729e584c904b2ce78681ba0283 Mon Sep 17 00:00:00 2001 From: Yan Date: Sat, 23 Apr 2022 13:46:07 +0800 Subject: Add zknd zkne zknh zksed zksh disassembly support (#979) --- disasm/disasm.cc | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index 52dede6..d18f089 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -415,6 +415,18 @@ struct : public arg_t { } } p_imm6; +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.bs()); + } +} bs; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rcon()); + } +} rcon; + typedef struct { reg_t match; reg_t mask; @@ -2022,6 +2034,64 @@ void disassembler_t::add_instructions(const isa_parser_t* isa) if (isa->extension_enabled(EXT_ZICBOZ)) { DISASM_INSN("cbo.zero", cbo_zero, 0, {&xrs1}); } + + if (isa->extension_enabled(EXT_ZKND) || + isa->extension_enabled(EXT_ZKNE)) { + DISASM_INSN("aes64ks1i", aes64ks1i, 0, {&xrd, &xrs1, &rcon}); + DEFINE_RTYPE(aes64ks2); + } + + if (isa->extension_enabled(EXT_ZKND)) { + if(isa->get_max_xlen() == 64) { + DEFINE_RTYPE(aes64ds); + DEFINE_RTYPE(aes64dsm); + DEFINE_R1TYPE(aes64im); + } else if (isa->get_max_xlen() == 32) { + DISASM_INSN("aes32dsi", aes32dsi, 0, {&xrd, &xrs1, &xrs2, &bs}); + DISASM_INSN("aes32dsmi", aes32dsmi, 0, {&xrd, &xrs1, &xrs2, &bs}); + } + } + + if (isa->extension_enabled(EXT_ZKNE)) { + if(isa->get_max_xlen() == 64) { + DEFINE_RTYPE(aes64es); + DEFINE_RTYPE(aes64esm); + } else if (isa->get_max_xlen() == 32) { + DISASM_INSN("aes32esi", aes32esi, 0, {&xrd, &xrs1, &xrs2, &bs}); + DISASM_INSN("aes32esmi", aes32esmi, 0, {&xrd, &xrs1, &xrs2, &bs}); + } + } + + if (isa->extension_enabled(EXT_ZKNH)) { + DEFINE_R1TYPE(sha256sig0); + DEFINE_R1TYPE(sha256sig1); + DEFINE_R1TYPE(sha256sum0); + DEFINE_R1TYPE(sha256sum1); + if(isa->get_max_xlen() == 64) { + DEFINE_R1TYPE(sha512sig0); + DEFINE_R1TYPE(sha512sig1); + DEFINE_R1TYPE(sha512sum0); + DEFINE_R1TYPE(sha512sum1); + } else if (isa->get_max_xlen() == 32) { + DEFINE_RTYPE(sha512sig0h); + DEFINE_RTYPE(sha512sig0l); + DEFINE_RTYPE(sha512sig1h); + DEFINE_RTYPE(sha512sig1l); + DEFINE_RTYPE(sha512sum0r); + DEFINE_RTYPE(sha512sum1r); + } + } + + if (isa->extension_enabled(EXT_ZKSED)) { + DISASM_INSN("sm4ed", sm4ed, 0, {&xrd, &xrs1, &xrs2, &bs}); + DISASM_INSN("sm4ks", sm4ks, 0, {&xrd, &xrs1, &xrs2, &bs}); + } + + if (isa->extension_enabled(EXT_ZKSH)) { + DEFINE_R1TYPE(sm3p0); + DEFINE_R1TYPE(sm3p1); + } + } disassembler_t::disassembler_t(const isa_parser_t *isa) -- cgit v1.1 From 1df65613df9970dc7f5c2f3d1bf343dbb0497828 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Sat, 30 Apr 2022 16:59:07 -0700 Subject: Add missing description of --dtb in --help message --- spike_main/spike.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/spike_main/spike.cc b/spike_main/spike.cc index 5529045..3629e35 100644 --- a/spike_main/spike.cc +++ b/spike_main/spike.cc @@ -56,6 +56,7 @@ static void help(int exit_code = 1) fprintf(stderr, " This flag can be used multiple times.\n"); fprintf(stderr, " --rbb-port= Listen on for remote bitbang connection\n"); fprintf(stderr, " --dump-dts Print device tree string and exit\n"); + fprintf(stderr, " --dtb= Use specified device tree blob [default: auto-generate]\n"); fprintf(stderr, " --disable-dtb Don't write the device tree blob into memory\n"); fprintf(stderr, " --kernel= Load kernel flat image into memory\n"); fprintf(stderr, " --initrd= Load kernel initrd into memory\n"); -- cgit v1.1 From 85fbd75d44c369ccb358d70bb4adc3a82a23c495 Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Thu, 28 Apr 2022 10:27:42 -0700 Subject: Implement mcontrol trigger hit bit. --- riscv/triggers.cc | 12 ++++++++++++ riscv/triggers.h | 3 ++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/riscv/triggers.cc b/riscv/triggers.cc index 7f6be44..ed9105e 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -1,3 +1,4 @@ +#include "debug_defines.h" #include "processor.h" #include "triggers.h" @@ -16,6 +17,7 @@ reg_t mcontrol_t::tdata1_read(const processor_t * const proc) const noexcept { v = set_field(v, MCONTROL_TYPE(xlen), 2); v = set_field(v, MCONTROL_DMODE(xlen), dmode); v = set_field(v, MCONTROL_MASKMAX(xlen), 0); + v = set_field(v, CSR_MCONTROL_HIT, hit); v = set_field(v, MCONTROL_SELECT, select); v = set_field(v, MCONTROL_TIMING, timing); v = set_field(v, MCONTROL_ACTION, action); @@ -36,6 +38,7 @@ bool mcontrol_t::tdata1_write(processor_t * const proc, const reg_t val) noexcep } auto xlen = proc->get_xlen(); dmode = get_field(val, MCONTROL_DMODE(xlen)); + hit = get_field(val, CSR_MCONTROL_HIT); select = get_field(val, MCONTROL_SELECT); timing = get_field(val, MCONTROL_TIMING); action = (triggers::action_t) get_field(val, MCONTROL_ACTION); @@ -131,6 +134,9 @@ match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operati } if (simple_match(xlen, value)) { + /* This is OK because this function is only called if the trigger was not + * inhibited by the previous trigger in the chain. */ + hit = true; if (timing) return MATCH_FIRE_AFTER; else @@ -165,6 +171,12 @@ match_result_t module_t::memory_access_match(action_t * const action, operation_ continue; } + /* Note: We call memory_access_match for each trigger in a chain as long as + * the triggers are matching. This results in "temperature coding" so that + * `hit` is set on each of the consecutive triggers that matched, even if the + * entire chain did not match. This is allowed by the spec, because the final + * trigger in the chain will never get `hit` set unless the entire chain + * matches. */ match_result_t result = triggers[i]->memory_access_match(proc, operation, address, data); if (result != MATCH_NONE && !triggers[i]->chain()) { *action = triggers[i]->action; diff --git a/riscv/triggers.h b/riscv/triggers.h index 75ee405..19fa437 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -58,11 +58,12 @@ public: public: bool dmode; action_t action; + bool hit; virtual ~trigger_t() {}; protected: - trigger_t() : dmode(false), action(ACTION_DEBUG_EXCEPTION) {}; + trigger_t() : dmode(false), action(ACTION_DEBUG_EXCEPTION), hit(false) {}; }; class mcontrol_t : public trigger_t { -- cgit v1.1 From 62ecca6f8a8fdceb0e2bf54bc21ea5899e100ea8 Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Mon, 2 May 2022 09:26:46 -0700 Subject: Use MCONTROL_TYPE_MATCH macro instead of 2 --- riscv/triggers.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/triggers.cc b/riscv/triggers.cc index ed9105e..4d16a58 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -14,7 +14,7 @@ mcontrol_t::mcontrol_t() : reg_t mcontrol_t::tdata1_read(const processor_t * const proc) const noexcept { reg_t v = 0; auto xlen = proc->get_xlen(); - v = set_field(v, MCONTROL_TYPE(xlen), 2); + v = set_field(v, MCONTROL_TYPE(xlen), MCONTROL_TYPE_MATCH); v = set_field(v, MCONTROL_DMODE(xlen), dmode); v = set_field(v, MCONTROL_MASKMAX(xlen), 0); v = set_field(v, CSR_MCONTROL_HIT, hit); -- cgit v1.1 From 8f46a28436f8874a23c721fc336bbd0285642643 Mon Sep 17 00:00:00 2001 From: jmonesti Date: Wed, 4 May 2022 12:25:57 +0200 Subject: Linking spike_dasm misses libriscv.a dependance (#986) Whereas spike-dasm.cc now instanciates an isa_parser_t, the dependance on libriscv.a has become unconditional. --- spike_dasm/spike_dasm.mk.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spike_dasm/spike_dasm.mk.in b/spike_dasm/spike_dasm.mk.in index 0233e62..1003a79 100644 --- a/spike_dasm/spike_dasm.mk.in +++ b/spike_dasm/spike_dasm.mk.in @@ -1,7 +1,7 @@ spike_dasm_subproject_deps = \ disasm \ softfloat \ - $(if $(HAVE_DLOPEN),riscv,) \ + riscv \ spike_dasm_srcs = \ spike_dasm_option_parser.cc \ -- cgit v1.1 From a8245e92fb80023cead1a9c6efb00ff98deb5a09 Mon Sep 17 00:00:00 2001 From: Shaked Flur Date: Wed, 4 May 2022 19:43:38 +0100 Subject: Fix the padding of register names in the log (#987) This fix print x5 as "x5 ", instead of "x 5". --- riscv/execute.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/execute.cc b/riscv/execute.cc index 98e3cdb..a6ea7a4 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -133,7 +133,7 @@ static void commit_log_print_insn(processor_t *p, reg_t pc, insn_t insn) if (prefix == 'c') fprintf(log_file, " c%d_%s ", rd, csr_name(rd)); else - fprintf(log_file, " %c%2d ", prefix, rd); + fprintf(log_file, " %c%-2d ", prefix, rd); if (is_vreg) commit_log_print_value(log_file, size, &p->VU.elt(rd, 0)); else -- cgit v1.1 From 84a98f6f718cd482710238042eac3d2b855c6768 Mon Sep 17 00:00:00 2001 From: "soberl@nvidia.com" Date: Tue, 3 May 2022 19:38:07 -0700 Subject: Implement the new csr mseccfg for ePMP as dummy --- riscv/csrs.cc | 43 +++++++++++++++++++++++++++++++++++++++++++ riscv/csrs.h | 16 ++++++++++++++++ riscv/processor.cc | 2 ++ riscv/processor.h | 2 ++ 4 files changed, 63 insertions(+) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 98edacf..c887c4e 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1,5 +1,8 @@ // See LICENSE for license details. +// For std::any_of +#include + #include "csrs.h" // For processor_t: #include "processor.h" @@ -227,6 +230,46 @@ bool pmpcfg_csr_t::unlogged_write(const reg_t val) noexcept { return write_success; } +// implement class mseccfg_csr_t +mseccfg_csr_t::mseccfg_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +bool mseccfg_csr_t::get_mml() const noexcept { + return (read() & MSECCFG_MML); +} + +bool mseccfg_csr_t::get_mmwp() const noexcept { + return (read() & MSECCFG_MMWP); +} + +bool mseccfg_csr_t::get_rlb() const noexcept { + return (read() & MSECCFG_RLB); +} + +bool mseccfg_csr_t::unlogged_write(const reg_t val) noexcept { + if (proc->n_pmp == 0) + return false; + + // pmpcfg.L is 1 in any rule or entry (including disabled entries) + const bool pmplock_recorded = std::any_of(state->pmpaddr, state->pmpaddr + proc->n_pmp, + [](const pmpaddr_csr_t_p & c) { return c->is_locked(); } ); + reg_t new_val = read(); + + // When RLB is 0 and pmplock_recorded, RLB is locked to 0. + // Otherwise set the RLB bit according val + if (!(pmplock_recorded && (read() & MSECCFG_RLB) == 0)) { + new_val &= ~MSECCFG_RLB; + new_val |= (val & MSECCFG_RLB); + } + + new_val |= (val & MSECCFG_MMWP); //MMWP is sticky + new_val |= (val & MSECCFG_MML); //MML is sticky + + proc->get_mmu()->flush_tlb(); + + return basic_csr_t::unlogged_write(new_val); +} // implement class virtualized_csr_t virtualized_csr_t::virtualized_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt): diff --git a/riscv/csrs.h b/riscv/csrs.h index fb27ae6..660ddd1 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -89,6 +89,11 @@ class pmpaddr_csr_t: public csr_t { // Is the specified access allowed given the pmpcfg privileges? bool access_ok(access_type type, reg_t mode) const noexcept; + // To check lock bit status from outside like mseccfg + bool is_locked() const noexcept { + return cfg & PMP_L; + } + protected: virtual bool unlogged_write(const reg_t val) noexcept override; private: @@ -122,6 +127,17 @@ class pmpcfg_csr_t: public csr_t { virtual bool unlogged_write(const reg_t val) noexcept override; }; +class mseccfg_csr_t: public basic_csr_t { + public: + mseccfg_csr_t(processor_t* const proc, const reg_t addr); + bool get_mml() const noexcept; + bool get_mmwp() const noexcept; + bool get_rlb() const noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +typedef std::shared_ptr mseccfg_csr_t_p; // For CSRs that have a virtualized copy under another name. Each // instance of virtualized_csr_t will read/write one of two CSRs, diff --git a/riscv/processor.cc b/riscv/processor.cc index ad9944e..9ce9287 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -358,6 +358,8 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) debug_mode = false; single_step = STEP_NONE; + csrmap[CSR_MSECCFG] = mseccfg = std::make_shared(proc, CSR_MSECCFG); + for (int i = 0; i < max_pmp; ++i) { csrmap[CSR_PMPADDR0 + i] = pmpaddr[i] = std::make_shared(proc, CSR_PMPADDR0 + i); } diff --git a/riscv/processor.h b/riscv/processor.h index 8797ab1..96fdc54 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -179,6 +179,8 @@ struct state_t tdata2_csr_t_p tdata2; bool debug_mode; + mseccfg_csr_t_p mseccfg; + static const int max_pmp = 16; pmpaddr_csr_t_p pmpaddr[max_pmp]; -- cgit v1.1 From af500657c3ecde57fb88ed027dbdaa5da72a1c4b Mon Sep 17 00:00:00 2001 From: "soberl@nvidia.com" Date: Tue, 3 May 2022 19:49:21 -0700 Subject: Update csr access rules for ePMP on pmpaddr and pmpcfg --- riscv/csrs.cc | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index c887c4e..1e9b544 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -120,7 +120,9 @@ bool pmpaddr_csr_t::unlogged_write(const reg_t val) noexcept { if (proc->n_pmp == 0) return false; - bool locked = cfg & PMP_L; + const bool lock_bypass = state->mseccfg->get_rlb(); + const bool locked = !lock_bypass && (cfg & PMP_L); + if (pmpidx < proc->n_pmp && !locked && !next_locked_and_tor()) { this->val = val & ((reg_t(1) << (MAX_PADDR_BITS - PMP_SHIFT)) - 1); } @@ -132,8 +134,9 @@ bool pmpaddr_csr_t::unlogged_write(const reg_t val) noexcept { bool pmpaddr_csr_t::next_locked_and_tor() const noexcept { if (pmpidx+1 >= state->max_pmp) return false; // this is the last entry - bool next_locked = state->pmpaddr[pmpidx+1]->cfg & PMP_L; - bool next_tor = (state->pmpaddr[pmpidx+1]->cfg & PMP_A) == PMP_TOR; + const bool lock_bypass = state->mseccfg->get_rlb(); + const bool next_locked = !lock_bypass && (state->pmpaddr[pmpidx+1]->cfg & PMP_L); + const bool next_tor = (state->pmpaddr[pmpidx+1]->cfg & PMP_A) == PMP_TOR; return next_locked && next_tor; } @@ -214,14 +217,35 @@ bool pmpcfg_csr_t::unlogged_write(const reg_t val) noexcept { return false; bool write_success = false; + const bool rlb = state->mseccfg->get_rlb(); + const bool mml = state->mseccfg->get_mml(); for (size_t i0 = (address - CSR_PMPCFG0) * 4, i = i0; i < i0 + proc->get_xlen() / 8; i++) { if (i < proc->n_pmp) { - if (!(state->pmpaddr[i]->cfg & PMP_L)) { + const bool locked = (state->pmpaddr[i]->cfg & PMP_L); + if (rlb || (!locked && !state->pmpaddr[i]->next_locked_and_tor())) { uint8_t cfg = (val >> (8 * (i - i0))) & (PMP_R | PMP_W | PMP_X | PMP_A | PMP_L); - cfg &= ~PMP_W | ((cfg & PMP_R) ? PMP_W : 0); // Disallow R=0 W=1 + // Drop R=0 W=1 when MML = 0 + // Remove the restriction when MML = 1 + if (!mml) { + cfg &= ~PMP_W | ((cfg & PMP_R) ? PMP_W : 0); + } + // Disallow A=NA4 when granularity > 4 if (proc->lg_pmp_granularity != PMP_SHIFT && (cfg & PMP_A) == PMP_NA4) - cfg |= PMP_NAPOT; // Disallow A=NA4 when granularity > 4 - state->pmpaddr[i]->cfg = cfg; + cfg |= PMP_NAPOT; + /* + * Adding a rule with executable privileges that either is M-mode-only or a locked Shared-Region + * is not possible and such pmpcfg writes are ignored, leaving pmpcfg unchanged. + * This restriction can be temporarily lifted e.g. during the boot process, by setting mseccfg.RLB. + */ + const bool cfgx = cfg & PMP_X; + const bool cfgw = cfg & PMP_W; + const bool cfgr = cfg & PMP_R; + if (rlb || !(mml && ((cfg & PMP_L) // M-mode-only or a locked Shared-Region + && !(cfgx && cfgw && cfgr) // RWX = 111 is allowed + && (cfgx || (cfgw && !cfgr)) // X=1 or RW=01 is not allowed + ))) { + state->pmpaddr[i]->cfg = cfg; + } } write_success = true; } -- cgit v1.1 From 115a9b3dc21aa001275ce898bc5bc73e43fccab5 Mon Sep 17 00:00:00 2001 From: "soberl@nvidia.com" Date: Tue, 3 May 2022 19:51:33 -0700 Subject: Update mmu_t::pmp_ok() for ePMP in case matching region is not found --- riscv/mmu.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 1ef81cf..2918f0b 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -242,7 +242,11 @@ bool mmu_t::pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode) } } - return mode == PRV_M; + // in case matching region is not found + const bool mseccfg_mml = proc->state.mseccfg->get_mml(); + const bool mseccfg_mmwp = proc->state.mseccfg->get_mmwp(); + return ((mode == PRV_M) && !mseccfg_mmwp + && (!mseccfg_mml || ((type == LOAD) || (type == STORE)))); } reg_t mmu_t::pmp_homogeneous(reg_t addr, reg_t len) -- cgit v1.1 From b0fdd88d26ef4b20023e1d2a79cfcb1e9df047de Mon Sep 17 00:00:00 2001 From: "soberl@nvidia.com" Date: Tue, 3 May 2022 19:56:01 -0700 Subject: Update pmpaddr_csr_t::access_ok() for ePMP on matching regions --- riscv/csrs.cc | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 1e9b544..8366d8a 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -192,11 +192,37 @@ bool pmpaddr_csr_t::subset_match(reg_t addr, reg_t len) const noexcept { bool pmpaddr_csr_t::access_ok(access_type type, reg_t mode) const noexcept { - return - (mode == PRV_M && !(cfg & PMP_L)) || - (type == LOAD && (cfg & PMP_R)) || - (type == STORE && (cfg & PMP_W)) || - (type == FETCH && (cfg & PMP_X)); + const bool cfgx = cfg & PMP_X; + const bool cfgw = cfg & PMP_W; + const bool cfgr = cfg & PMP_R; + const bool cfgl = cfg & PMP_L; + + const bool prvm = mode == PRV_M; + + const bool typer = type == LOAD; + const bool typex = type == FETCH; + const bool typew = type == STORE; + const bool normal_rwx = (typer && cfgr) || (typew && cfgw) || (typex && cfgx); + const bool mseccfg_mml = state->mseccfg->get_mml(); + + if (mseccfg_mml) { + if (cfgx && cfgw && cfgr && cfgl) { + // Locked Shared data region: Read only on both M and S/U mode. + return typer; + } else { + const bool mml_shared_region = !cfgr && cfgw; + const bool mml_chk_normal = (prvm == cfgl) && normal_rwx; + const bool mml_chk_shared = + (!cfgl && cfgx && (typer || typew)) || + (!cfgl && !cfgx && (typer || (typew && prvm))) || + (cfgl && typex) || + (cfgl && typer && cfgx && prvm); + return mml_shared_region ? mml_chk_shared : mml_chk_normal; + } + } else { + const bool m_bypass = (prvm && !cfgl); + return m_bypass || normal_rwx; + } } -- cgit v1.1 From 6d2549d2ad54aa3bbe183ca091490422047acdca Mon Sep 17 00:00:00 2001 From: soberl Date: Thu, 5 May 2022 09:43:29 +0800 Subject: Append smepmp extension 1.0 to the feature list --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 6da9fab..7d348f7 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,7 @@ Spike supports the following RISC-V ISA features: - Svinval extension, v1.0 - CMO extension, v1.0 - Debug v0.14 + - Smepmp extension v1.0 As a Spike extension, the remainder of the proposed [Bit-Manipulation Extensions](https://github.com/riscv/riscv-bitmanip) -- cgit v1.1 From 15a370bde22aacbaa7495ec93ff1f09b9361fb9d Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 5 May 2022 13:39:09 -0700 Subject: Factor out V extension macros into their own header No functional change. --- riscv/decode.h | 2070 +------------------------------------------------ riscv/v_ext_macros.h | 2075 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 2076 insertions(+), 2069 deletions(-) create mode 100644 riscv/v_ext_macros.h diff --git a/riscv/decode.h b/riscv/decode.h index 611c910..70f146d 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -16,6 +16,7 @@ #include "common.h" #include "softfloat_types.h" #include "specialize.h" +#include "v_ext_macros.h" #include typedef int64_t sreg_t; @@ -416,2075 +417,6 @@ inline long double to_f(float128_t f) { long double r; memcpy(&r, &f, sizeof(r)) #define DEBUG_RVV_FMA_VF 0 #endif -// -// vector: masking skip helper -// -#define VI_MASK_VARS \ - const int midx = i / 64; \ - const int mpos = i % 64; - -#define VI_LOOP_ELEMENT_SKIP(BODY) \ - VI_MASK_VARS \ - if (insn.v_vm() == 0) { \ - BODY; \ - bool skip = ((P.VU.elt(0, midx) >> mpos) & 0x1) == 0; \ - if (skip) { \ - continue; \ - } \ - } - -#define VI_ELEMENT_SKIP(inx) \ - if (inx >= vl) { \ - continue; \ - } else if (inx < P.VU.vstart->read()) { \ - continue; \ - } else { \ - VI_LOOP_ELEMENT_SKIP(); \ - } - -// -// vector: operation and register acccess check helper -// -static inline bool is_overlapped(const int astart, int asize, - const int bstart, int bsize) -{ - asize = asize == 0 ? 1 : asize; - bsize = bsize == 0 ? 1 : bsize; - - const int aend = astart + asize; - const int bend = bstart + bsize; - - return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize; -} - -static inline bool is_overlapped_widen(const int astart, int asize, - const int bstart, int bsize) -{ - asize = asize == 0 ? 1 : asize; - bsize = bsize == 0 ? 1 : bsize; - - const int aend = astart + asize; - const int bend = bstart + bsize; - - if (astart < bstart && - is_overlapped(astart, asize, bstart, bsize) && - !is_overlapped(astart, asize, bstart + bsize, bsize)) { - return false; - } else { - return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize; - } -} - -static inline bool is_aligned(const unsigned val, const unsigned pos) -{ - return pos ? (val & (pos - 1)) == 0 : true; -} - -#define VI_NARROW_CHECK_COMMON \ - require_vector(true); \ - require(P.VU.vflmul <= 4); \ - require(P.VU.vsew * 2 <= P.VU.ELEN); \ - require_align(insn.rs2(), P.VU.vflmul * 2); \ - require_align(insn.rd(), P.VU.vflmul); \ - require_vm; \ - -#define VI_WIDE_CHECK_COMMON \ - require_vector(true); \ - require(P.VU.vflmul <= 4); \ - require(P.VU.vsew * 2 <= P.VU.ELEN); \ - require_align(insn.rd(), P.VU.vflmul * 2); \ - require_vm; \ - -#define VI_CHECK_ST_INDEX(elt_width) \ - require_vector(false); \ - float vemul = ((float)elt_width / P.VU.vsew * P.VU.vflmul); \ - require(vemul >= 0.125 && vemul <= 8); \ - reg_t emul = vemul < 1 ? 1 : vemul; \ - reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ - require_align(insn.rd(), P.VU.vflmul); \ - require_align(insn.rs2(), vemul); \ - require((nf * flmul) <= (NVPR / 4) && \ - (insn.rd() + nf * flmul) <= NVPR); \ - -#define VI_CHECK_LD_INDEX(elt_width) \ - VI_CHECK_ST_INDEX(elt_width); \ - for (reg_t idx = 0; idx < nf; ++idx) { \ - reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ - reg_t seg_vd = insn.rd() + flmul * idx; \ - if (elt_width > P.VU.vsew) { \ - if (seg_vd != insn.rs2()) \ - require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ - } else if (elt_width < P.VU.vsew) { \ - if (vemul < 1) { \ - require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ - } else { \ - require_noover_widen(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ - } \ - } \ - if (nf >= 2) { \ - require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ - } \ - } \ - require_vm; \ - -#define VI_CHECK_MSS(is_vs1) \ - if (insn.rd() != insn.rs2()) \ - require_noover(insn.rd(), 1, insn.rs2(), P.VU.vflmul); \ - require_align(insn.rs2(), P.VU.vflmul); \ - if (is_vs1) { \ - if (insn.rd() != insn.rs1()) \ - require_noover(insn.rd(), 1, insn.rs1(), P.VU.vflmul); \ - require_align(insn.rs1(), P.VU.vflmul); \ - } \ - -#define VI_CHECK_SSS(is_vs1) \ - require_vm; \ - if (P.VU.vflmul > 1) { \ - require_align(insn.rd(), P.VU.vflmul); \ - require_align(insn.rs2(), P.VU.vflmul); \ - if (is_vs1) { \ - require_align(insn.rs1(), P.VU.vflmul); \ - } \ - } - -#define VI_CHECK_STORE(elt_width, is_mask_ldst) \ - require_vector(false); \ - reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8; \ - float vemul = is_mask_ldst ? 1 : ((float)veew / P.VU.vsew * P.VU.vflmul); \ - reg_t emul = vemul < 1 ? 1 : vemul; \ - require(vemul >= 0.125 && vemul <= 8); \ - require_align(insn.rd(), vemul); \ - require((nf * emul) <= (NVPR / 4) && \ - (insn.rd() + nf * emul) <= NVPR); \ - require(veew <= P.VU.ELEN); \ - -#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \ - VI_CHECK_STORE(elt_width, is_mask_ldst); \ - require_vm; \ - -#define VI_CHECK_DSS(is_vs1) \ - VI_WIDE_CHECK_COMMON; \ - require_align(insn.rs2(), P.VU.vflmul); \ - if (P.VU.vflmul < 1) { \ - require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \ - } else { \ - require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \ - } \ - if (is_vs1) { \ - require_align(insn.rs1(), P.VU.vflmul); \ - if (P.VU.vflmul < 1) { \ - require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ - } else { \ - require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ - } \ - } - -#define VI_CHECK_DDS(is_rs) \ - VI_WIDE_CHECK_COMMON; \ - require_align(insn.rs2(), P.VU.vflmul * 2); \ - if (is_rs) { \ - require_align(insn.rs1(), P.VU.vflmul); \ - if (P.VU.vflmul < 1) { \ - require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ - } else { \ - require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ - } \ - } - -#define VI_CHECK_SDS(is_vs1) \ - VI_NARROW_CHECK_COMMON; \ - if (insn.rd() != insn.rs2()) \ - require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul * 2); \ - if (is_vs1) \ - require_align(insn.rs1(), P.VU.vflmul); \ - -#define VI_CHECK_REDUCTION(is_wide) \ - require_vector(true); \ - if (is_wide) { \ - require(P.VU.vsew * 2 <= P.VU.ELEN); \ - } \ - require_align(insn.rs2(), P.VU.vflmul); \ - require(P.VU.vstart->read() == 0); \ - -#define VI_CHECK_SLIDE(is_over) \ - require_align(insn.rs2(), P.VU.vflmul); \ - require_align(insn.rd(), P.VU.vflmul); \ - require_vm; \ - if (is_over) \ - require(insn.rd() != insn.rs2()); \ - - -// -// vector: loop header and end helper -// -#define VI_GENERAL_LOOP_BASE \ - require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \ - require_vector(true); \ - reg_t vl = P.VU.vl->read(); \ - reg_t sew = P.VU.vsew; \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ - reg_t rs2_num = insn.rs2(); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { - -#define VI_LOOP_BASE \ - VI_GENERAL_LOOP_BASE \ - VI_LOOP_ELEMENT_SKIP(); - -#define VI_LOOP_END \ - } \ - P.VU.vstart->write(0); - -#define VI_LOOP_REDUCTION_END(x) \ - } \ - if (vl > 0) { \ - vd_0_des = vd_0_res; \ - } \ - P.VU.vstart->write(0); - -#define VI_LOOP_CARRY_BASE \ - VI_GENERAL_LOOP_BASE \ - VI_MASK_VARS \ - auto v0 = P.VU.elt(0, midx); \ - const uint64_t mmask = UINT64_C(1) << mpos; \ - const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \ - uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0; \ - uint128_t res = 0; \ - auto &vd = P.VU.elt(rd_num, midx, true); - -#define VI_LOOP_CARRY_END \ - vd = (vd & ~mmask) | (((res) << mpos) & mmask); \ - } \ - P.VU.vstart->write(0); -#define VI_LOOP_WITH_CARRY_BASE \ - VI_GENERAL_LOOP_BASE \ - VI_MASK_VARS \ - auto &v0 = P.VU.elt(0, midx); \ - const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \ - uint64_t carry = (v0 >> mpos) & 0x1; - -#define VI_LOOP_CMP_BASE \ - require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \ - require_vector(true); \ - reg_t vl = P.VU.vl->read(); \ - reg_t sew = P.VU.vsew; \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ - reg_t rs2_num = insn.rs2(); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); \ - uint64_t mmask = UINT64_C(1) << mpos; \ - uint64_t &vdi = P.VU.elt(insn.rd(), midx, true); \ - uint64_t res = 0; - -#define VI_LOOP_CMP_END \ - vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \ - } \ - P.VU.vstart->write(0); - -#define VI_LOOP_MASK(op) \ - require(P.VU.vsew <= e64); \ - require_vector(true); \ - reg_t vl = P.VU.vl->read(); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - int midx = i / 64; \ - int mpos = i % 64; \ - uint64_t mmask = UINT64_C(1) << mpos; \ - uint64_t vs2 = P.VU.elt(insn.rs2(), midx); \ - uint64_t vs1 = P.VU.elt(insn.rs1(), midx); \ - uint64_t &res = P.VU.elt(insn.rd(), midx, true); \ - res = (res & ~mmask) | ((op) & (1ULL << mpos)); \ - } \ - P.VU.vstart->write(0); - -#define VI_LOOP_NSHIFT_BASE \ - VI_GENERAL_LOOP_BASE; \ - VI_LOOP_ELEMENT_SKIP({ \ - require(!(insn.rd() == 0 && P.VU.vflmul > 1)); \ - }); - - -#define INT_ROUNDING(result, xrm, gb) \ - do { \ - const uint64_t lsb = 1UL << (gb); \ - const uint64_t lsb_half = lsb >> 1; \ - switch (xrm) { \ - case VRM::RNU: \ - result += lsb_half; \ - break; \ - case VRM::RNE: \ - if ((result & lsb_half) && ((result & (lsb_half - 1)) || (result & lsb))) \ - result += lsb; \ - break; \ - case VRM::RDN: \ - break; \ - case VRM::ROD: \ - if (result & (lsb - 1)) \ - result |= lsb; \ - break; \ - case VRM::INVALID_RM: \ - assert(true); \ - } \ - } while (0) - -// -// vector: integer and masking operand access helper -// -#define VXI_PARAMS(x) \ - type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); \ - type_sew_t::type rs1 = (type_sew_t::type)RS1; \ - type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); - -#define VV_U_PARAMS(x) \ - type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ - type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VX_U_PARAMS(x) \ - type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_usew_t::type rs1 = (type_usew_t::type)RS1; \ - type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VI_U_PARAMS(x) \ - type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_usew_t::type zimm5 = (type_usew_t::type)insn.v_zimm5(); \ - type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VV_PARAMS(x) \ - type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VX_PARAMS(x) \ - type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_sew_t::type rs1 = (type_sew_t::type)RS1; \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VI_PARAMS(x) \ - type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define XV_PARAMS(x) \ - type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, RS1); - -#define VV_SU_PARAMS(x) \ - type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VX_SU_PARAMS(x) \ - type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_usew_t::type rs1 = (type_usew_t::type)RS1; \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VV_UCMP_PARAMS(x) \ - type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ - type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VX_UCMP_PARAMS(x) \ - type_usew_t::type rs1 = (type_usew_t::type)RS1; \ - type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VI_UCMP_PARAMS(x) \ - type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VV_CMP_PARAMS(x) \ - type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VX_CMP_PARAMS(x) \ - type_sew_t::type rs1 = (type_sew_t::type)RS1; \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VI_CMP_PARAMS(x) \ - type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); - -#define VI_XI_SLIDEDOWN_PARAMS(x, off) \ - auto &vd = P.VU.elt::type>(rd_num, i, true); \ - auto vs2 = P.VU.elt::type>(rs2_num, i + off); - -#define VI_XI_SLIDEUP_PARAMS(x, offset) \ - auto &vd = P.VU.elt::type>(rd_num, i, true); \ - auto vs2 = P.VU.elt::type>(rs2_num, i - offset); - -#define VI_NARROW_PARAMS(sew1, sew2) \ - auto &vd = P.VU.elt::type>(rd_num, i, true); \ - auto vs2_u = P.VU.elt::type>(rs2_num, i); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ - auto zimm5 = (type_usew_t::type)insn.v_zimm5(); - -#define VX_NARROW_PARAMS(sew1, sew2) \ - auto &vd = P.VU.elt::type>(rd_num, i, true); \ - auto vs2_u = P.VU.elt::type>(rs2_num, i); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ - auto rs1 = (type_sew_t::type)RS1; - -#define VV_NARROW_PARAMS(sew1, sew2) \ - auto &vd = P.VU.elt::type>(rd_num, i, true); \ - auto vs2_u = P.VU.elt::type>(rs2_num, i); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ - auto vs1 = P.VU.elt::type>(rs1_num, i); - -#define XI_CARRY_PARAMS(x) \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ - auto rs1 = (type_sew_t::type)RS1; \ - auto simm5 = (type_sew_t::type)insn.v_simm5(); \ - -#define VV_CARRY_PARAMS(x) \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ - auto vs1 = P.VU.elt::type>(rs1_num, i); \ - -#define XI_WITH_CARRY_PARAMS(x) \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ - auto rs1 = (type_sew_t::type)RS1; \ - auto simm5 = (type_sew_t::type)insn.v_simm5(); \ - auto &vd = P.VU.elt::type>(rd_num, i, true); - -#define VV_WITH_CARRY_PARAMS(x) \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ - auto vs1 = P.VU.elt::type>(rs1_num, i); \ - auto &vd = P.VU.elt::type>(rd_num, i, true); - -#define VFP_V_PARAMS(width) \ - float##width##_t &vd = P.VU.elt(rd_num, i, true); \ - float##width##_t vs2 = P.VU.elt(rs2_num, i); - -#define VFP_VV_PARAMS(width) \ - float##width##_t &vd = P.VU.elt(rd_num, i, true); \ - float##width##_t vs1 = P.VU.elt(rs1_num, i); \ - float##width##_t vs2 = P.VU.elt(rs2_num, i); - -#define VFP_VF_PARAMS(width) \ - float##width##_t &vd = P.VU.elt(rd_num, i, true); \ - float##width##_t rs1 = f##width(READ_FREG(rs1_num)); \ - float##width##_t vs2 = P.VU.elt(rs2_num, i); - -#define CVT_FP_TO_FP_PARAMS(from_width, to_width) \ - auto vs2 = P.VU.elt(rs2_num, i); \ - auto &vd = P.VU.elt(rd_num, i, true); - -#define CVT_INT_TO_FP_PARAMS(from_width, to_width, sign) \ - auto vs2 = P.VU.elt(rs2_num, i); \ - auto &vd = P.VU.elt(rd_num, i, true); - -#define CVT_FP_TO_INT_PARAMS(from_width, to_width, sign) \ - auto vs2 = P.VU.elt(rs2_num, i); \ - auto &vd = P.VU.elt(rd_num, i, true); - -// -// vector: integer and masking operation loop -// - -#define INSNS_BASE(PARAMS, BODY) \ - if (sew == e8) { \ - PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - PARAMS(e64); \ - BODY; \ - } - -// comparision result to masking register -#define VI_LOOP_CMP_BODY(PARAMS, BODY) \ - VI_LOOP_CMP_BASE \ - INSNS_BASE(PARAMS, BODY) \ - VI_LOOP_CMP_END - -#define VI_VV_LOOP_CMP(BODY) \ - VI_CHECK_MSS(true); \ - VI_LOOP_CMP_BODY(VV_CMP_PARAMS, BODY) - -#define VI_VX_LOOP_CMP(BODY) \ - VI_CHECK_MSS(false); \ - VI_LOOP_CMP_BODY(VX_CMP_PARAMS, BODY) - -#define VI_VI_LOOP_CMP(BODY) \ - VI_CHECK_MSS(false); \ - VI_LOOP_CMP_BODY(VI_CMP_PARAMS, BODY) - -#define VI_VV_ULOOP_CMP(BODY) \ - VI_CHECK_MSS(true); \ - VI_LOOP_CMP_BODY(VV_UCMP_PARAMS, BODY) - -#define VI_VX_ULOOP_CMP(BODY) \ - VI_CHECK_MSS(false); \ - VI_LOOP_CMP_BODY(VX_UCMP_PARAMS, BODY) - -#define VI_VI_ULOOP_CMP(BODY) \ - VI_CHECK_MSS(false); \ - VI_LOOP_CMP_BODY(VI_UCMP_PARAMS, BODY) - -// merge and copy loop -#define VI_MERGE_VARS \ - VI_MASK_VARS \ - bool use_first = (P.VU.elt(0, midx) >> mpos) & 0x1; - -#define VI_MERGE_LOOP_BASE \ - require_vector(true); \ - VI_GENERAL_LOOP_BASE \ - VI_MERGE_VARS - -#define VI_VV_MERGE_LOOP(BODY) \ - VI_CHECK_SSS(true); \ - VI_MERGE_LOOP_BASE \ - if (sew == e8) { \ - VV_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VV_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VV_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VV_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VX_MERGE_LOOP(BODY) \ - VI_CHECK_SSS(false); \ - VI_MERGE_LOOP_BASE \ - if (sew == e8) { \ - VX_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VX_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VX_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VX_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VI_MERGE_LOOP(BODY) \ - VI_CHECK_SSS(false); \ - VI_MERGE_LOOP_BASE \ - if (sew == e8) { \ - VI_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VI_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VI_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VI_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VF_MERGE_LOOP(BODY) \ - VI_CHECK_SSS(false); \ - VI_VFP_COMMON \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_MERGE_VARS \ - if (P.VU.vsew == e16) { \ - VFP_VF_PARAMS(16); \ - BODY; \ - } else if (P.VU.vsew == e32) { \ - VFP_VF_PARAMS(32); \ - BODY; \ - } else if (P.VU.vsew == e64) { \ - VFP_VF_PARAMS(64); \ - BODY; \ - } \ - VI_LOOP_END - -// reduction loop - signed -#define VI_LOOP_REDUCTION_BASE(x) \ - require(x >= e8 && x <= e64); \ - reg_t vl = P.VU.vl->read(); \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ - reg_t rs2_num = insn.rs2(); \ - auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ - auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ - -#define REDUCTION_LOOP(x, BODY) \ - VI_LOOP_REDUCTION_BASE(x) \ - BODY; \ - VI_LOOP_REDUCTION_END(x) - -#define VI_VV_LOOP_REDUCTION(BODY) \ - VI_CHECK_REDUCTION(false); \ - reg_t sew = P.VU.vsew; \ - if (sew == e8) { \ - REDUCTION_LOOP(e8, BODY) \ - } else if (sew == e16) { \ - REDUCTION_LOOP(e16, BODY) \ - } else if (sew == e32) { \ - REDUCTION_LOOP(e32, BODY) \ - } else if (sew == e64) { \ - REDUCTION_LOOP(e64, BODY) \ - } - -// reduction loop - unsigned -#define VI_ULOOP_REDUCTION_BASE(x) \ - require(x >= e8 && x <= e64); \ - reg_t vl = P.VU.vl->read(); \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ - reg_t rs2_num = insn.rs2(); \ - auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ - auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); - -#define REDUCTION_ULOOP(x, BODY) \ - VI_ULOOP_REDUCTION_BASE(x) \ - BODY; \ - VI_LOOP_REDUCTION_END(x) - -#define VI_VV_ULOOP_REDUCTION(BODY) \ - VI_CHECK_REDUCTION(false); \ - reg_t sew = P.VU.vsew; \ - if (sew == e8) { \ - REDUCTION_ULOOP(e8, BODY) \ - } else if (sew == e16) { \ - REDUCTION_ULOOP(e16, BODY) \ - } else if (sew == e32) { \ - REDUCTION_ULOOP(e32, BODY) \ - } else if (sew == e64) { \ - REDUCTION_ULOOP(e64, BODY) \ - } - - -// genearl VXI signed/unsigned loop -#define VI_VV_ULOOP(BODY) \ - VI_CHECK_SSS(true) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VV_U_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VV_U_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VV_U_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VV_U_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VV_LOOP(BODY) \ - VI_CHECK_SSS(true) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VV_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VV_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VV_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VV_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VX_ULOOP(BODY) \ - VI_CHECK_SSS(false) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VX_U_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VX_U_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VX_U_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VX_U_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VX_LOOP(BODY) \ - VI_CHECK_SSS(false) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VX_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VX_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VX_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VX_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VI_ULOOP(BODY) \ - VI_CHECK_SSS(false) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VI_U_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VI_U_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VI_U_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VI_U_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VI_LOOP(BODY) \ - VI_CHECK_SSS(false) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VI_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VI_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VI_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VI_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -// signed unsigned operation loop (e.g. mulhsu) -#define VI_VV_SU_LOOP(BODY) \ - VI_CHECK_SSS(true) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VV_SU_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VV_SU_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VV_SU_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VV_SU_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VX_SU_LOOP(BODY) \ - VI_CHECK_SSS(false) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VX_SU_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VX_SU_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VX_SU_PARAMS(e32); \ - BODY; \ - } else if (sew == e64) { \ - VX_SU_PARAMS(e64); \ - BODY; \ - } \ - VI_LOOP_END - -// narrow operation loop -#define VI_VV_LOOP_NARROW(BODY) \ - VI_CHECK_SDS(true); \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VV_NARROW_PARAMS(e8, e16) \ - BODY; \ - } else if (sew == e16) { \ - VV_NARROW_PARAMS(e16, e32) \ - BODY; \ - } else if (sew == e32) { \ - VV_NARROW_PARAMS(e32, e64) \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VX_LOOP_NARROW(BODY) \ - VI_CHECK_SDS(false); \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VX_NARROW_PARAMS(e8, e16) \ - BODY; \ - } else if (sew == e16) { \ - VX_NARROW_PARAMS(e16, e32) \ - BODY; \ - } else if (sew == e32) { \ - VX_NARROW_PARAMS(e32, e64) \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VI_LOOP_NARROW(BODY) \ - VI_CHECK_SDS(false); \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VI_NARROW_PARAMS(e8, e16) \ - BODY; \ - } else if (sew == e16) { \ - VI_NARROW_PARAMS(e16, e32) \ - BODY; \ - } else if (sew == e32) { \ - VI_NARROW_PARAMS(e32, e64) \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VI_LOOP_NSHIFT(BODY) \ - VI_CHECK_SDS(false); \ - VI_LOOP_NSHIFT_BASE \ - if (sew == e8) { \ - VI_NARROW_PARAMS(e8, e16) \ - BODY; \ - } else if (sew == e16) { \ - VI_NARROW_PARAMS(e16, e32) \ - BODY; \ - } else if (sew == e32) { \ - VI_NARROW_PARAMS(e32, e64) \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VX_LOOP_NSHIFT(BODY) \ - VI_CHECK_SDS(false); \ - VI_LOOP_NSHIFT_BASE \ - if (sew == e8) { \ - VX_NARROW_PARAMS(e8, e16) \ - BODY; \ - } else if (sew == e16) { \ - VX_NARROW_PARAMS(e16, e32) \ - BODY; \ - } else if (sew == e32) { \ - VX_NARROW_PARAMS(e32, e64) \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VV_LOOP_NSHIFT(BODY) \ - VI_CHECK_SDS(true); \ - VI_LOOP_NSHIFT_BASE \ - if (sew == e8) { \ - VV_NARROW_PARAMS(e8, e16) \ - BODY; \ - } else if (sew == e16) { \ - VV_NARROW_PARAMS(e16, e32) \ - BODY; \ - } else if (sew == e32) { \ - VV_NARROW_PARAMS(e32, e64) \ - BODY; \ - } \ - VI_LOOP_END - -// widen operation loop -#define VI_VV_LOOP_WIDEN(BODY) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VV_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VV_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VV_PARAMS(e32); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_VX_LOOP_WIDEN(BODY) \ - VI_LOOP_BASE \ - if (sew == e8) { \ - VX_PARAMS(e8); \ - BODY; \ - } else if (sew == e16) { \ - VX_PARAMS(e16); \ - BODY; \ - } else if (sew == e32) { \ - VX_PARAMS(e32); \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_WIDE_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \ - switch (P.VU.vsew) { \ - case e8: { \ - sign##16_t vd_w = P.VU.elt(rd_num, i); \ - P.VU.elt(rd_num, i, true) = \ - op1((sign##16_t)(sign##8_t)var0 op0 (sign##16_t)(sign##8_t)var1) + var2; \ - } \ - break; \ - case e16: { \ - sign##32_t vd_w = P.VU.elt(rd_num, i); \ - P.VU.elt(rd_num, i, true) = \ - op1((sign##32_t)(sign##16_t)var0 op0 (sign##32_t)(sign##16_t)var1) + var2; \ - } \ - break; \ - default: { \ - sign##64_t vd_w = P.VU.elt(rd_num, i); \ - P.VU.elt(rd_num, i, true) = \ - op1((sign##64_t)(sign##32_t)var0 op0 (sign##64_t)(sign##32_t)var1) + var2; \ - } \ - break; \ - } - -#define VI_WIDE_OP_AND_ASSIGN_MIX(var0, var1, var2, op0, op1, sign_d, sign_1, sign_2) \ - switch (P.VU.vsew) { \ - case e8: { \ - sign_d##16_t vd_w = P.VU.elt(rd_num, i); \ - P.VU.elt(rd_num, i, true) = \ - op1((sign_1##16_t)(sign_1##8_t)var0 op0 (sign_2##16_t)(sign_2##8_t)var1) + var2; \ - } \ - break; \ - case e16: { \ - sign_d##32_t vd_w = P.VU.elt(rd_num, i); \ - P.VU.elt(rd_num, i, true) = \ - op1((sign_1##32_t)(sign_1##16_t)var0 op0 (sign_2##32_t)(sign_2##16_t)var1) + var2; \ - } \ - break; \ - default: { \ - sign_d##64_t vd_w = P.VU.elt(rd_num, i); \ - P.VU.elt(rd_num, i, true) = \ - op1((sign_1##64_t)(sign_1##32_t)var0 op0 (sign_2##64_t)(sign_2##32_t)var1) + var2; \ - } \ - break; \ - } - -#define VI_WIDE_WVX_OP(var0, op0, sign) \ - switch (P.VU.vsew) { \ - case e8: { \ - sign##16_t &vd_w = P.VU.elt(rd_num, i, true); \ - sign##16_t vs2_w = P.VU.elt(rs2_num, i); \ - vd_w = vs2_w op0 (sign##16_t)(sign##8_t)var0; \ - } \ - break; \ - case e16: { \ - sign##32_t &vd_w = P.VU.elt(rd_num, i, true); \ - sign##32_t vs2_w = P.VU.elt(rs2_num, i); \ - vd_w = vs2_w op0 (sign##32_t)(sign##16_t)var0; \ - } \ - break; \ - default: { \ - sign##64_t &vd_w = P.VU.elt(rd_num, i, true); \ - sign##64_t vs2_w = P.VU.elt(rs2_num, i); \ - vd_w = vs2_w op0 (sign##64_t)(sign##32_t)var0; \ - } \ - break; \ - } - -// wide reduction loop - signed -#define VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ - reg_t vl = P.VU.vl->read(); \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ - reg_t rs2_num = insn.rs2(); \ - auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ - auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); - -#define WIDE_REDUCTION_LOOP(sew1, sew2, BODY) \ - VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ - BODY; \ - VI_LOOP_REDUCTION_END(sew2) - -#define VI_VV_LOOP_WIDE_REDUCTION(BODY) \ - VI_CHECK_REDUCTION(true); \ - reg_t sew = P.VU.vsew; \ - if (sew == e8) { \ - WIDE_REDUCTION_LOOP(e8, e16, BODY) \ - } else if (sew == e16) { \ - WIDE_REDUCTION_LOOP(e16, e32, BODY) \ - } else if (sew == e32) { \ - WIDE_REDUCTION_LOOP(e32, e64, BODY) \ - } - -// wide reduction loop - unsigned -#define VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ - reg_t vl = P.VU.vl->read(); \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ - reg_t rs2_num = insn.rs2(); \ - auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ - auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); - -#define WIDE_REDUCTION_ULOOP(sew1, sew2, BODY) \ - VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ - BODY; \ - VI_LOOP_REDUCTION_END(sew2) - -#define VI_VV_ULOOP_WIDE_REDUCTION(BODY) \ - VI_CHECK_REDUCTION(true); \ - reg_t sew = P.VU.vsew; \ - if (sew == e8) { \ - WIDE_REDUCTION_ULOOP(e8, e16, BODY) \ - } else if (sew == e16) { \ - WIDE_REDUCTION_ULOOP(e16, e32, BODY) \ - } else if (sew == e32) { \ - WIDE_REDUCTION_ULOOP(e32, e64, BODY) \ - } - -// carry/borrow bit loop -#define VI_VV_LOOP_CARRY(BODY) \ - VI_CHECK_MSS(true); \ - VI_LOOP_CARRY_BASE \ - if (sew == e8) { \ - VV_CARRY_PARAMS(e8) \ - BODY; \ - } else if (sew == e16) { \ - VV_CARRY_PARAMS(e16) \ - BODY; \ - } else if (sew == e32) { \ - VV_CARRY_PARAMS(e32) \ - BODY; \ - } else if (sew == e64) { \ - VV_CARRY_PARAMS(e64) \ - BODY; \ - } \ - VI_LOOP_CARRY_END - -#define VI_XI_LOOP_CARRY(BODY) \ - VI_CHECK_MSS(false); \ - VI_LOOP_CARRY_BASE \ - if (sew == e8) { \ - XI_CARRY_PARAMS(e8) \ - BODY; \ - } else if (sew == e16) { \ - XI_CARRY_PARAMS(e16) \ - BODY; \ - } else if (sew == e32) { \ - XI_CARRY_PARAMS(e32) \ - BODY; \ - } else if (sew == e64) { \ - XI_CARRY_PARAMS(e64) \ - BODY; \ - } \ - VI_LOOP_CARRY_END - -#define VI_VV_LOOP_WITH_CARRY(BODY) \ - require_vm; \ - VI_CHECK_SSS(true); \ - VI_LOOP_WITH_CARRY_BASE \ - if (sew == e8) { \ - VV_WITH_CARRY_PARAMS(e8) \ - BODY; \ - } else if (sew == e16) { \ - VV_WITH_CARRY_PARAMS(e16) \ - BODY; \ - } else if (sew == e32) { \ - VV_WITH_CARRY_PARAMS(e32) \ - BODY; \ - } else if (sew == e64) { \ - VV_WITH_CARRY_PARAMS(e64) \ - BODY; \ - } \ - VI_LOOP_END - -#define VI_XI_LOOP_WITH_CARRY(BODY) \ - require_vm; \ - VI_CHECK_SSS(false); \ - VI_LOOP_WITH_CARRY_BASE \ - if (sew == e8) { \ - XI_WITH_CARRY_PARAMS(e8) \ - BODY; \ - } else if (sew == e16) { \ - XI_WITH_CARRY_PARAMS(e16) \ - BODY; \ - } else if (sew == e32) { \ - XI_WITH_CARRY_PARAMS(e32) \ - BODY; \ - } else if (sew == e64) { \ - XI_WITH_CARRY_PARAMS(e64) \ - BODY; \ - } \ - VI_LOOP_END - -// average loop -#define VI_VV_LOOP_AVG(op) \ -VRM xrm = p->VU.get_vround_mode(); \ -VI_VV_LOOP({ \ - uint128_t res = ((uint128_t)vs2) op vs1; \ - INT_ROUNDING(res, xrm, 1); \ - vd = res >> 1; \ -}) - -#define VI_VX_LOOP_AVG(op) \ -VRM xrm = p->VU.get_vround_mode(); \ -VI_VX_LOOP({ \ - uint128_t res = ((uint128_t)vs2) op rs1; \ - INT_ROUNDING(res, xrm, 1); \ - vd = res >> 1; \ -}) - -#define VI_VV_ULOOP_AVG(op) \ -VRM xrm = p->VU.get_vround_mode(); \ -VI_VV_ULOOP({ \ - uint128_t res = ((uint128_t)vs2) op vs1; \ - INT_ROUNDING(res, xrm, 1); \ - vd = res >> 1; \ -}) - -#define VI_VX_ULOOP_AVG(op) \ -VRM xrm = p->VU.get_vround_mode(); \ -VI_VX_ULOOP({ \ - uint128_t res = ((uint128_t)vs2) op rs1; \ - INT_ROUNDING(res, xrm, 1); \ - vd = res >> 1; \ -}) - -// -// vector: load/store helper -// -#define VI_STRIP(inx) \ - reg_t vreg_inx = inx; - -#define VI_DUPLICATE_VREG(reg_num, idx_sew) \ -reg_t index[P.VU.vlmax]; \ - for (reg_t i = 0; i < P.VU.vlmax && P.VU.vl->read() != 0; ++i) { \ - switch (idx_sew) { \ - case e8: \ - index[i] = P.VU.elt(reg_num, i); \ - break; \ - case e16: \ - index[i] = P.VU.elt(reg_num, i); \ - break; \ - case e32: \ - index[i] = P.VU.elt(reg_num, i); \ - break; \ - case e64: \ - index[i] = P.VU.elt(reg_num, i); \ - break; \ - } \ -} - -#define VI_LD(stride, offset, elt_width, is_mask_ldst) \ - const reg_t nf = insn.v_nf() + 1; \ - const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \ - const reg_t baseAddr = RS1; \ - const reg_t vd = insn.rd(); \ - VI_CHECK_LOAD(elt_width, is_mask_ldst); \ - for (reg_t i = 0; i < vl; ++i) { \ - VI_ELEMENT_SKIP(i); \ - VI_STRIP(i); \ - P.VU.vstart->write(i); \ - for (reg_t fn = 0; fn < nf; ++fn) { \ - elt_width##_t val = MMU.load_##elt_width( \ - baseAddr + (stride) + (offset) * sizeof(elt_width##_t)); \ - P.VU.elt(vd + fn * emul, vreg_inx, true) = val; \ - } \ - } \ - P.VU.vstart->write(0); - -#define VI_LD_INDEX(elt_width, is_seg) \ - const reg_t nf = insn.v_nf() + 1; \ - const reg_t vl = P.VU.vl->read(); \ - const reg_t baseAddr = RS1; \ - const reg_t vd = insn.rd(); \ - if (!is_seg) \ - require(nf == 1); \ - VI_CHECK_LD_INDEX(elt_width); \ - VI_DUPLICATE_VREG(insn.rs2(), elt_width); \ - for (reg_t i = 0; i < vl; ++i) { \ - VI_ELEMENT_SKIP(i); \ - VI_STRIP(i); \ - P.VU.vstart->write(i); \ - for (reg_t fn = 0; fn < nf; ++fn) { \ - switch (P.VU.vsew) { \ - case e8: \ - P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ - MMU.load_uint8(baseAddr + index[i] + fn * 1); \ - break; \ - case e16: \ - P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ - MMU.load_uint16(baseAddr + index[i] + fn * 2); \ - break; \ - case e32: \ - P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ - MMU.load_uint32(baseAddr + index[i] + fn * 4); \ - break; \ - default: \ - P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ - MMU.load_uint64(baseAddr + index[i] + fn * 8); \ - break; \ - } \ - } \ - } \ - P.VU.vstart->write(0); - -#define VI_ST(stride, offset, elt_width, is_mask_ldst) \ - const reg_t nf = insn.v_nf() + 1; \ - const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \ - const reg_t baseAddr = RS1; \ - const reg_t vs3 = insn.rd(); \ - VI_CHECK_STORE(elt_width, is_mask_ldst); \ - for (reg_t i = 0; i < vl; ++i) { \ - VI_STRIP(i) \ - VI_ELEMENT_SKIP(i); \ - P.VU.vstart->write(i); \ - for (reg_t fn = 0; fn < nf; ++fn) { \ - elt_width##_t val = P.VU.elt(vs3 + fn * emul, vreg_inx); \ - MMU.store_##elt_width( \ - baseAddr + (stride) + (offset) * sizeof(elt_width##_t), val); \ - } \ - } \ - P.VU.vstart->write(0); - -#define VI_ST_INDEX(elt_width, is_seg) \ - const reg_t nf = insn.v_nf() + 1; \ - const reg_t vl = P.VU.vl->read(); \ - const reg_t baseAddr = RS1; \ - const reg_t vs3 = insn.rd(); \ - if (!is_seg) \ - require(nf == 1); \ - VI_CHECK_ST_INDEX(elt_width); \ - VI_DUPLICATE_VREG(insn.rs2(), elt_width); \ - for (reg_t i = 0; i < vl; ++i) { \ - VI_STRIP(i) \ - VI_ELEMENT_SKIP(i); \ - P.VU.vstart->write(i); \ - for (reg_t fn = 0; fn < nf; ++fn) { \ - switch (P.VU.vsew) { \ - case e8: \ - MMU.store_uint8(baseAddr + index[i] + fn * 1, \ - P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ - break; \ - case e16: \ - MMU.store_uint16(baseAddr + index[i] + fn * 2, \ - P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ - break; \ - case e32: \ - MMU.store_uint32(baseAddr + index[i] + fn * 4, \ - P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ - break; \ - default: \ - MMU.store_uint64(baseAddr + index[i] + fn * 8, \ - P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ - break; \ - } \ - } \ - } \ - P.VU.vstart->write(0); - -#define VI_LDST_FF(elt_width) \ - const reg_t nf = insn.v_nf() + 1; \ - const reg_t sew = p->VU.vsew; \ - const reg_t vl = p->VU.vl->read(); \ - const reg_t baseAddr = RS1; \ - const reg_t rd_num = insn.rd(); \ - VI_CHECK_LOAD(elt_width, false); \ - bool early_stop = false; \ - for (reg_t i = p->VU.vstart->read(); i < vl; ++i) { \ - VI_STRIP(i); \ - VI_ELEMENT_SKIP(i); \ - \ - for (reg_t fn = 0; fn < nf; ++fn) { \ - uint64_t val; \ - try { \ - val = MMU.load_##elt_width( \ - baseAddr + (i * nf + fn) * sizeof(elt_width##_t)); \ - } catch (trap_t& t) { \ - if (i == 0) \ - throw; /* Only take exception on zeroth element */ \ - /* Reduce VL if an exception occurs on a later element */ \ - early_stop = true; \ - P.VU.vl->write_raw(i); \ - break; \ - } \ - p->VU.elt(rd_num + fn * emul, vreg_inx, true) = val; \ - } \ - \ - if (early_stop) { \ - break; \ - } \ - } \ - p->VU.vstart->write(0); - -#define VI_LD_WHOLE(elt_width) \ - require_vector_novtype(true, false); \ - require(sizeof(elt_width ## _t) * 8 <= P.VU.ELEN); \ - const reg_t baseAddr = RS1; \ - const reg_t vd = insn.rd(); \ - const reg_t len = insn.v_nf() + 1; \ - require_align(vd, len); \ - const reg_t elt_per_reg = P.VU.vlenb / sizeof(elt_width ## _t); \ - const reg_t size = len * elt_per_reg; \ - if (P.VU.vstart->read() < size) { \ - reg_t i = P.VU.vstart->read() / elt_per_reg; \ - reg_t off = P.VU.vstart->read() % elt_per_reg; \ - if (off) { \ - for (reg_t pos = off; pos < elt_per_reg; ++pos) { \ - auto val = MMU.load_## elt_width(baseAddr + \ - P.VU.vstart->read() * sizeof(elt_width ## _t)); \ - P.VU.elt(vd + i, pos, true) = val; \ - P.VU.vstart->write(P.VU.vstart->read() + 1); \ - } \ - ++i; \ - } \ - for (; i < len; ++i) { \ - for (reg_t pos = 0; pos < elt_per_reg; ++pos) { \ - auto val = MMU.load_## elt_width(baseAddr + \ - P.VU.vstart->read() * sizeof(elt_width ## _t)); \ - P.VU.elt(vd + i, pos, true) = val; \ - P.VU.vstart->write(P.VU.vstart->read() + 1); \ - } \ - } \ - } \ - P.VU.vstart->write(0); - -#define VI_ST_WHOLE \ - require_vector_novtype(true, false); \ - const reg_t baseAddr = RS1; \ - const reg_t vs3 = insn.rd(); \ - const reg_t len = insn.v_nf() + 1; \ - require_align(vs3, len); \ - const reg_t size = len * P.VU.vlenb; \ - \ - if (P.VU.vstart->read() < size) { \ - reg_t i = P.VU.vstart->read() / P.VU.vlenb; \ - reg_t off = P.VU.vstart->read() % P.VU.vlenb; \ - if (off) { \ - for (reg_t pos = off; pos < P.VU.vlenb; ++pos) { \ - auto val = P.VU.elt(vs3 + i, pos); \ - MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \ - P.VU.vstart->write(P.VU.vstart->read() + 1); \ - } \ - i++; \ - } \ - for (; i < len; ++i) { \ - for (reg_t pos = 0; pos < P.VU.vlenb; ++pos) { \ - auto val = P.VU.elt(vs3 + i, pos); \ - MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \ - P.VU.vstart->write(P.VU.vstart->read() + 1); \ - } \ - } \ - } \ - P.VU.vstart->write(0); - -// -// vector: amo -// -#define VI_AMO(op, type, idx_type) \ - require_vector(false); \ - require_align(insn.rd(), P.VU.vflmul); \ - require(P.VU.vsew <= P.get_xlen() && P.VU.vsew >= 32); \ - require_align(insn.rd(), P.VU.vflmul); \ - float vemul = ((float)idx_type / P.VU.vsew * P.VU.vflmul); \ - require(vemul >= 0.125 && vemul <= 8); \ - require_align(insn.rs2(), vemul); \ - if (insn.v_wd()) { \ - require_vm; \ - if (idx_type > P.VU.vsew) { \ - if (insn.rd() != insn.rs2()) \ - require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ - } else if (idx_type < P.VU.vsew) { \ - if (vemul < 1) { \ - require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ - } else { \ - require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ - } \ - } \ - } \ - VI_DUPLICATE_VREG(insn.rs2(), idx_type); \ - const reg_t vl = P.VU.vl->read(); \ - const reg_t baseAddr = RS1; \ - const reg_t vd = insn.rd(); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_ELEMENT_SKIP(i); \ - VI_STRIP(i); \ - P.VU.vstart->write(i); \ - switch (P.VU.vsew) { \ - case e32: { \ - auto vs3 = P.VU.elt< type ## 32_t>(vd, vreg_inx); \ - auto val = MMU.amo_uint32(baseAddr + index[i], [&](type ## 32_t lhs) { op }); \ - if (insn.v_wd()) \ - P.VU.elt< type ## 32_t>(vd, vreg_inx, true) = val; \ - } \ - break; \ - case e64: { \ - auto vs3 = P.VU.elt< type ## 64_t>(vd, vreg_inx); \ - auto val = MMU.amo_uint64(baseAddr + index[i], [&](type ## 64_t lhs) { op }); \ - if (insn.v_wd()) \ - P.VU.elt< type ## 64_t>(vd, vreg_inx, true) = val; \ - } \ - break; \ - default: \ - require(0); \ - break; \ - } \ - } \ - P.VU.vstart->write(0); - -// vector: sign/unsiged extension -#define VI_VV_EXT(div, type) \ - require(insn.rd() != insn.rs2()); \ - require_vm; \ - reg_t from = P.VU.vsew / div; \ - require(from >= e8 && from <= e64); \ - require(((float)P.VU.vflmul / div) >= 0.125 && ((float)P.VU.vflmul / div) <= 8 ); \ - require_align(insn.rd(), P.VU.vflmul); \ - require_align(insn.rs2(), P.VU.vflmul / div); \ - if ((P.VU.vflmul / div) < 1) { \ - require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \ - } else { \ - require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \ - } \ - reg_t pat = (((P.VU.vsew >> 3) << 4) | from >> 3); \ - VI_GENERAL_LOOP_BASE \ - VI_LOOP_ELEMENT_SKIP(); \ - switch (pat) { \ - case 0x21: \ - P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ - break; \ - case 0x41: \ - P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ - break; \ - case 0x81: \ - P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ - break; \ - case 0x42: \ - P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ - break; \ - case 0x82: \ - P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ - break; \ - case 0x84: \ - P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ - break; \ - case 0x88: \ - P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ - break; \ - default: \ - break; \ - } \ - VI_LOOP_END - -// -// vector: vfp helper -// -#define VI_VFP_COMMON \ - require_fp; \ - require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || \ - (P.VU.vsew == e32 && p->extension_enabled('F')) || \ - (P.VU.vsew == e64 && p->extension_enabled('D'))); \ - require_vector(true); \ - require(STATE.frm->read() < 0x5); \ - reg_t vl = P.VU.vl->read(); \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ - reg_t rs2_num = insn.rs2(); \ - softfloat_roundingMode = STATE.frm->read(); - -#define VI_VFP_LOOP_BASE \ - VI_VFP_COMMON \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); - -#define VI_VFP_LOOP_CMP_BASE \ - VI_VFP_COMMON \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); \ - uint64_t mmask = UINT64_C(1) << mpos; \ - uint64_t &vd = P.VU.elt(rd_num, midx, true); \ - uint64_t res = 0; - -#define VI_VFP_LOOP_REDUCTION_BASE(width) \ - float##width##_t vd_0 = P.VU.elt(rd_num, 0); \ - float##width##_t vs1_0 = P.VU.elt(rs1_num, 0); \ - vd_0 = vs1_0; \ - bool is_active = false; \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); \ - float##width##_t vs2 = P.VU.elt(rs2_num, i); \ - is_active = true; \ - -#define VI_VFP_LOOP_WIDE_REDUCTION_BASE \ - VI_VFP_COMMON \ - float64_t vd_0 = f64(P.VU.elt(rs1_num, 0).v); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); - -#define VI_VFP_LOOP_END \ - } \ - P.VU.vstart->write(0); \ - -#define VI_VFP_LOOP_REDUCTION_END(x) \ - } \ - P.VU.vstart->write(0); \ - if (vl > 0) { \ - if (is_propagate && !is_active) { \ - switch (x) { \ - case e16: { \ - auto ret = f16_classify(f16(vd_0.v)); \ - if (ret & 0x300) { \ - if (ret & 0x100) { \ - softfloat_exceptionFlags |= softfloat_flag_invalid; \ - set_fp_exceptions; \ - } \ - P.VU.elt(rd_num, 0, true) = defaultNaNF16UI; \ - } else { \ - P.VU.elt(rd_num, 0, true) = vd_0.v; \ - } \ - } \ - break; \ - case e32: { \ - auto ret = f32_classify(f32(vd_0.v)); \ - if (ret & 0x300) { \ - if (ret & 0x100) { \ - softfloat_exceptionFlags |= softfloat_flag_invalid; \ - set_fp_exceptions; \ - } \ - P.VU.elt(rd_num, 0, true) = defaultNaNF32UI; \ - } else { \ - P.VU.elt(rd_num, 0, true) = vd_0.v; \ - } \ - } \ - break; \ - case e64: { \ - auto ret = f64_classify(f64(vd_0.v)); \ - if (ret & 0x300) { \ - if (ret & 0x100) { \ - softfloat_exceptionFlags |= softfloat_flag_invalid; \ - set_fp_exceptions; \ - } \ - P.VU.elt(rd_num, 0, true) = defaultNaNF64UI; \ - } else { \ - P.VU.elt(rd_num, 0, true) = vd_0.v; \ - } \ - } \ - break; \ - } \ - } else { \ - P.VU.elt::type>(rd_num, 0, true) = vd_0.v; \ - } \ - } - -#define VI_VFP_LOOP_CMP_END \ - switch (P.VU.vsew) { \ - case e16: \ - case e32: \ - case e64: { \ - vd = (vd & ~mmask) | (((res) << mpos) & mmask); \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - } \ - P.VU.vstart->write(0); - -#define VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \ - VI_CHECK_SSS(true); \ - VI_VFP_LOOP_BASE \ - switch (P.VU.vsew) { \ - case e16: { \ - VFP_VV_PARAMS(16); \ - BODY16; \ - set_fp_exceptions; \ - break; \ - } \ - case e32: { \ - VFP_VV_PARAMS(32); \ - BODY32; \ - set_fp_exceptions; \ - break; \ - } \ - case e64: { \ - VFP_VV_PARAMS(64); \ - BODY64; \ - set_fp_exceptions; \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - DEBUG_RVV_FP_VV; \ - VI_VFP_LOOP_END - -#define VI_VFP_V_LOOP(BODY16, BODY32, BODY64) \ - VI_CHECK_SSS(false); \ - VI_VFP_LOOP_BASE \ - switch (P.VU.vsew) { \ - case e16: { \ - VFP_V_PARAMS(16); \ - BODY16; \ - break; \ - } \ - case e32: { \ - VFP_V_PARAMS(32); \ - BODY32; \ - break; \ - } \ - case e64: { \ - VFP_V_PARAMS(64); \ - BODY64; \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - set_fp_exceptions; \ - VI_VFP_LOOP_END - -#define VI_VFP_VV_LOOP_REDUCTION(BODY16, BODY32, BODY64) \ - VI_CHECK_REDUCTION(false) \ - VI_VFP_COMMON \ - switch (P.VU.vsew) { \ - case e16: { \ - VI_VFP_LOOP_REDUCTION_BASE(16) \ - BODY16; \ - set_fp_exceptions; \ - VI_VFP_LOOP_REDUCTION_END(e16) \ - break; \ - } \ - case e32: { \ - VI_VFP_LOOP_REDUCTION_BASE(32) \ - BODY32; \ - set_fp_exceptions; \ - VI_VFP_LOOP_REDUCTION_END(e32) \ - break; \ - } \ - case e64: { \ - VI_VFP_LOOP_REDUCTION_BASE(64) \ - BODY64; \ - set_fp_exceptions; \ - VI_VFP_LOOP_REDUCTION_END(e64) \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - -#define VI_VFP_VV_LOOP_WIDE_REDUCTION(BODY16, BODY32) \ - VI_CHECK_REDUCTION(true) \ - VI_VFP_COMMON \ - require((P.VU.vsew == e16 && p->extension_enabled('F')) || \ - (P.VU.vsew == e32 && p->extension_enabled('D'))); \ - bool is_active = false; \ - switch (P.VU.vsew) { \ - case e16: { \ - float32_t vd_0 = P.VU.elt(rs1_num, 0); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); \ - is_active = true; \ - float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ - BODY16; \ - set_fp_exceptions; \ - VI_VFP_LOOP_REDUCTION_END(e32) \ - break; \ - } \ - case e32: { \ - float64_t vd_0 = P.VU.elt(rs1_num, 0); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); \ - is_active = true; \ - float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ - BODY32; \ - set_fp_exceptions; \ - VI_VFP_LOOP_REDUCTION_END(e64) \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - -#define VI_VFP_VF_LOOP(BODY16, BODY32, BODY64) \ - VI_CHECK_SSS(false); \ - VI_VFP_LOOP_BASE \ - switch (P.VU.vsew) { \ - case e16: { \ - VFP_VF_PARAMS(16); \ - BODY16; \ - set_fp_exceptions; \ - break; \ - } \ - case e32: { \ - VFP_VF_PARAMS(32); \ - BODY32; \ - set_fp_exceptions; \ - break; \ - } \ - case e64: { \ - VFP_VF_PARAMS(64); \ - BODY64; \ - set_fp_exceptions; \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - DEBUG_RVV_FP_VF; \ - VI_VFP_LOOP_END - -#define VI_VFP_VV_LOOP_CMP(BODY16, BODY32, BODY64) \ - VI_CHECK_MSS(true); \ - VI_VFP_LOOP_CMP_BASE \ - switch (P.VU.vsew) { \ - case e16: { \ - VFP_VV_PARAMS(16); \ - BODY16; \ - set_fp_exceptions; \ - break; \ - } \ - case e32: { \ - VFP_VV_PARAMS(32); \ - BODY32; \ - set_fp_exceptions; \ - break; \ - } \ - case e64: { \ - VFP_VV_PARAMS(64); \ - BODY64; \ - set_fp_exceptions; \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - VI_VFP_LOOP_CMP_END \ - -#define VI_VFP_VF_LOOP_CMP(BODY16, BODY32, BODY64) \ - VI_CHECK_MSS(false); \ - VI_VFP_LOOP_CMP_BASE \ - switch (P.VU.vsew) { \ - case e16: { \ - VFP_VF_PARAMS(16); \ - BODY16; \ - set_fp_exceptions; \ - break; \ - } \ - case e32: { \ - VFP_VF_PARAMS(32); \ - BODY32; \ - set_fp_exceptions; \ - break; \ - } \ - case e64: { \ - VFP_VF_PARAMS(64); \ - BODY64; \ - set_fp_exceptions; \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - VI_VFP_LOOP_CMP_END \ - -#define VI_VFP_VF_LOOP_WIDE(BODY16, BODY32) \ - VI_CHECK_DSS(false); \ - VI_VFP_LOOP_BASE \ - switch (P.VU.vsew) { \ - case e16: { \ - float32_t &vd = P.VU.elt(rd_num, i, true); \ - float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ - float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ - BODY16; \ - set_fp_exceptions; \ - break; \ - } \ - case e32: { \ - float64_t &vd = P.VU.elt(rd_num, i, true); \ - float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ - float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ - BODY32; \ - set_fp_exceptions; \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - DEBUG_RVV_FP_VV; \ - VI_VFP_LOOP_END - - -#define VI_VFP_VV_LOOP_WIDE(BODY16, BODY32) \ - VI_CHECK_DSS(true); \ - VI_VFP_LOOP_BASE \ - switch (P.VU.vsew) { \ - case e16: { \ - float32_t &vd = P.VU.elt(rd_num, i, true); \ - float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ - float32_t vs1 = f16_to_f32(P.VU.elt(rs1_num, i)); \ - BODY16; \ - set_fp_exceptions; \ - break; \ - } \ - case e32: { \ - float64_t &vd = P.VU.elt(rd_num, i, true); \ - float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ - float64_t vs1 = f32_to_f64(P.VU.elt(rs1_num, i)); \ - BODY32; \ - set_fp_exceptions; \ - break; \ - } \ - default: \ - require(0); \ - break; \ - }; \ - DEBUG_RVV_FP_VV; \ - VI_VFP_LOOP_END - -#define VI_VFP_WF_LOOP_WIDE(BODY16, BODY32) \ - VI_CHECK_DDS(false); \ - VI_VFP_LOOP_BASE \ - switch (P.VU.vsew) { \ - case e16: { \ - float32_t &vd = P.VU.elt(rd_num, i, true); \ - float32_t vs2 = P.VU.elt(rs2_num, i); \ - float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ - BODY16; \ - set_fp_exceptions; \ - break; \ - } \ - case e32: { \ - float64_t &vd = P.VU.elt(rd_num, i, true); \ - float64_t vs2 = P.VU.elt(rs2_num, i); \ - float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ - BODY32; \ - set_fp_exceptions; \ - break; \ - } \ - default: \ - require(0); \ - }; \ - DEBUG_RVV_FP_VV; \ - VI_VFP_LOOP_END - -#define VI_VFP_WV_LOOP_WIDE(BODY16, BODY32) \ - VI_CHECK_DDS(true); \ - VI_VFP_LOOP_BASE \ - switch (P.VU.vsew) { \ - case e16: { \ - float32_t &vd = P.VU.elt(rd_num, i, true); \ - float32_t vs2 = P.VU.elt(rs2_num, i); \ - float32_t vs1 = f16_to_f32(P.VU.elt(rs1_num, i)); \ - BODY16; \ - set_fp_exceptions; \ - break; \ - } \ - case e32: { \ - float64_t &vd = P.VU.elt(rd_num, i, true); \ - float64_t vs2 = P.VU.elt(rs2_num, i); \ - float64_t vs1 = f32_to_f64(P.VU.elt(rs1_num, i)); \ - BODY32; \ - set_fp_exceptions; \ - break; \ - } \ - default: \ - require(0); \ - }; \ - DEBUG_RVV_FP_VV; \ - VI_VFP_LOOP_END - -#define VI_VFP_LOOP_SCALE_BASE \ - require_fp; \ - require_vector(true); \ - require(STATE.frm->read() < 0x5); \ - reg_t vl = P.VU.vl->read(); \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ - reg_t rs2_num = insn.rs2(); \ - softfloat_roundingMode = STATE.frm->read(); \ - for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ - VI_LOOP_ELEMENT_SKIP(); - -#define VI_VFP_CVT_LOOP(CVT_PARAMS, CHECK, BODY) \ - CHECK \ - VI_VFP_LOOP_SCALE_BASE \ - CVT_PARAMS \ - BODY \ - set_fp_exceptions; \ - VI_VFP_LOOP_END - -#define VI_VFP_CVT_INT_TO_FP(BODY16, BODY32, BODY64, sign) \ - VI_CHECK_SSS(false); \ - VI_VFP_COMMON \ - switch (P.VU.vsew) { \ - case e16: \ - { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 16, sign), \ - { p->extension_enabled(EXT_ZFH); }, \ - BODY16); } \ - break; \ - case e32: \ - { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 32, sign), \ - { p->extension_enabled('F'); }, \ - BODY32); } \ - break; \ - case e64: \ - { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 64, sign), \ - { p->extension_enabled('D'); }, \ - BODY64); } \ - break; \ - default: \ - require(0); \ - break; \ - } - -#define VI_VFP_CVT_FP_TO_INT(BODY16, BODY32, BODY64, sign) \ - VI_CHECK_SSS(false); \ - VI_VFP_COMMON \ - switch (P.VU.vsew) { \ - case e16: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 16, sign), \ - { p->extension_enabled(EXT_ZFH); }, \ - BODY16); } \ - break; \ - case e32: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 32, sign), \ - { p->extension_enabled('F'); }, \ - BODY32); } \ - break; \ - case e64: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 64, sign), \ - { p->extension_enabled('D'); }, \ - BODY64); } \ - break; \ - default: \ - require(0); \ - break; \ - } - -#define VI_VFP_WCVT_FP_TO_FP(BODY8, BODY16, BODY32, \ - CHECK8, CHECK16, CHECK32) \ - VI_CHECK_DSS(false); \ - switch (P.VU.vsew) { \ - case e16: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(16, 32), CHECK16, BODY16); } \ - break; \ - case e32: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 64), CHECK32, BODY32); } \ - break; \ - default: \ - require(0); \ - break; \ - } - -#define VI_VFP_WCVT_INT_TO_FP(BODY8, BODY16, BODY32, \ - CHECK8, CHECK16, CHECK32, \ - sign) \ - VI_CHECK_DSS(false); \ - switch (P.VU.vsew) { \ - case e8: \ - { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(8, 16, sign), CHECK8, BODY8); } \ - break; \ - case e16: \ - { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 32, sign), CHECK16, BODY16); } \ - break; \ - case e32: \ - { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 64, sign), CHECK32, BODY32); } \ - break; \ - default: \ - require(0); \ - break; \ - } - -#define VI_VFP_WCVT_FP_TO_INT(BODY8, BODY16, BODY32, \ - CHECK8, CHECK16, CHECK32, \ - sign) \ - VI_CHECK_DSS(false); \ - switch (P.VU.vsew) { \ - case e16: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 32, sign), CHECK16, BODY16); } \ - break; \ - case e32: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 64, sign), CHECK32, BODY32); } \ - break; \ - default: \ - require(0); \ - break; \ - } - -#define VI_VFP_NCVT_FP_TO_FP(BODY8, BODY16, BODY32, \ - CHECK8, CHECK16, CHECK32) \ - VI_CHECK_SDS(false); \ - switch (P.VU.vsew) { \ - case e16: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 16), CHECK16, BODY16); } \ - break; \ - case e32: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(64, 32), CHECK32, BODY32); } \ - break; \ - default: \ - require(0); \ - break; \ - } - -#define VI_VFP_NCVT_INT_TO_FP(BODY8, BODY16, BODY32, \ - CHECK8, CHECK16, CHECK32, \ - sign) \ - VI_CHECK_SDS(false); \ - switch (P.VU.vsew) { \ - case e16: \ - { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 16, sign), CHECK16, BODY16); } \ - break; \ - case e32: \ - { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 32, sign), CHECK32, BODY32); } \ - break; \ - default: \ - require(0); \ - break; \ - } - -#define VI_VFP_NCVT_FP_TO_INT(BODY8, BODY16, BODY32, \ - CHECK8, CHECK16, CHECK32, \ - sign) \ - VI_CHECK_SDS(false); \ - switch (P.VU.vsew) { \ - case e8: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 8, sign), CHECK8, BODY8); } \ - break; \ - case e16: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 16, sign), CHECK16, BODY16); } \ - break; \ - case e32: \ - { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 32, sign), CHECK32, BODY32); } \ - break; \ - default: \ - require(0); \ - break; \ - } - // The p-extension support is contributed by // Programming Langauge Lab, Department of Computer Science, National Tsing-Hua University, Taiwan diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h new file mode 100644 index 0000000..2a32979 --- /dev/null +++ b/riscv/v_ext_macros.h @@ -0,0 +1,2075 @@ +// See LICENSE for license details. + +#ifndef _RISCV_V_EXT_MACROS_H +#define _RISCV_V_EXT_MACROS_H + +// +// vector: masking skip helper +// +#define VI_MASK_VARS \ + const int midx = i / 64; \ + const int mpos = i % 64; + +#define VI_LOOP_ELEMENT_SKIP(BODY) \ + VI_MASK_VARS \ + if (insn.v_vm() == 0) { \ + BODY; \ + bool skip = ((P.VU.elt(0, midx) >> mpos) & 0x1) == 0; \ + if (skip) { \ + continue; \ + } \ + } + +#define VI_ELEMENT_SKIP(inx) \ + if (inx >= vl) { \ + continue; \ + } else if (inx < P.VU.vstart->read()) { \ + continue; \ + } else { \ + VI_LOOP_ELEMENT_SKIP(); \ + } + +// +// vector: operation and register acccess check helper +// +static inline bool is_overlapped(const int astart, int asize, + const int bstart, int bsize) +{ + asize = asize == 0 ? 1 : asize; + bsize = bsize == 0 ? 1 : bsize; + + const int aend = astart + asize; + const int bend = bstart + bsize; + + return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize; +} + +static inline bool is_overlapped_widen(const int astart, int asize, + const int bstart, int bsize) +{ + asize = asize == 0 ? 1 : asize; + bsize = bsize == 0 ? 1 : bsize; + + const int aend = astart + asize; + const int bend = bstart + bsize; + + if (astart < bstart && + is_overlapped(astart, asize, bstart, bsize) && + !is_overlapped(astart, asize, bstart + bsize, bsize)) { + return false; + } else { + return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize; + } +} + +static inline bool is_aligned(const unsigned val, const unsigned pos) +{ + return pos ? (val & (pos - 1)) == 0 : true; +} + +#define VI_NARROW_CHECK_COMMON \ + require_vector(true); \ + require(P.VU.vflmul <= 4); \ + require(P.VU.vsew * 2 <= P.VU.ELEN); \ + require_align(insn.rs2(), P.VU.vflmul * 2); \ + require_align(insn.rd(), P.VU.vflmul); \ + require_vm; \ + +#define VI_WIDE_CHECK_COMMON \ + require_vector(true); \ + require(P.VU.vflmul <= 4); \ + require(P.VU.vsew * 2 <= P.VU.ELEN); \ + require_align(insn.rd(), P.VU.vflmul * 2); \ + require_vm; \ + +#define VI_CHECK_ST_INDEX(elt_width) \ + require_vector(false); \ + float vemul = ((float)elt_width / P.VU.vsew * P.VU.vflmul); \ + require(vemul >= 0.125 && vemul <= 8); \ + reg_t emul = vemul < 1 ? 1 : vemul; \ + reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ + require_align(insn.rd(), P.VU.vflmul); \ + require_align(insn.rs2(), vemul); \ + require((nf * flmul) <= (NVPR / 4) && \ + (insn.rd() + nf * flmul) <= NVPR); \ + +#define VI_CHECK_LD_INDEX(elt_width) \ + VI_CHECK_ST_INDEX(elt_width); \ + for (reg_t idx = 0; idx < nf; ++idx) { \ + reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ + reg_t seg_vd = insn.rd() + flmul * idx; \ + if (elt_width > P.VU.vsew) { \ + if (seg_vd != insn.rs2()) \ + require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } else if (elt_width < P.VU.vsew) { \ + if (vemul < 1) { \ + require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } else { \ + require_noover_widen(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } \ + } \ + if (nf >= 2) { \ + require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } \ + } \ + require_vm; \ + +#define VI_CHECK_MSS(is_vs1) \ + if (insn.rd() != insn.rs2()) \ + require_noover(insn.rd(), 1, insn.rs2(), P.VU.vflmul); \ + require_align(insn.rs2(), P.VU.vflmul); \ + if (is_vs1) { \ + if (insn.rd() != insn.rs1()) \ + require_noover(insn.rd(), 1, insn.rs1(), P.VU.vflmul); \ + require_align(insn.rs1(), P.VU.vflmul); \ + } \ + +#define VI_CHECK_SSS(is_vs1) \ + require_vm; \ + if (P.VU.vflmul > 1) { \ + require_align(insn.rd(), P.VU.vflmul); \ + require_align(insn.rs2(), P.VU.vflmul); \ + if (is_vs1) { \ + require_align(insn.rs1(), P.VU.vflmul); \ + } \ + } + +#define VI_CHECK_STORE(elt_width, is_mask_ldst) \ + require_vector(false); \ + reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8; \ + float vemul = is_mask_ldst ? 1 : ((float)veew / P.VU.vsew * P.VU.vflmul); \ + reg_t emul = vemul < 1 ? 1 : vemul; \ + require(vemul >= 0.125 && vemul <= 8); \ + require_align(insn.rd(), vemul); \ + require((nf * emul) <= (NVPR / 4) && \ + (insn.rd() + nf * emul) <= NVPR); \ + require(veew <= P.VU.ELEN); \ + +#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \ + VI_CHECK_STORE(elt_width, is_mask_ldst); \ + require_vm; \ + +#define VI_CHECK_DSS(is_vs1) \ + VI_WIDE_CHECK_COMMON; \ + require_align(insn.rs2(), P.VU.vflmul); \ + if (P.VU.vflmul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \ + } \ + if (is_vs1) { \ + require_align(insn.rs1(), P.VU.vflmul); \ + if (P.VU.vflmul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } \ + } + +#define VI_CHECK_DDS(is_rs) \ + VI_WIDE_CHECK_COMMON; \ + require_align(insn.rs2(), P.VU.vflmul * 2); \ + if (is_rs) { \ + require_align(insn.rs1(), P.VU.vflmul); \ + if (P.VU.vflmul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } \ + } + +#define VI_CHECK_SDS(is_vs1) \ + VI_NARROW_CHECK_COMMON; \ + if (insn.rd() != insn.rs2()) \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul * 2); \ + if (is_vs1) \ + require_align(insn.rs1(), P.VU.vflmul); \ + +#define VI_CHECK_REDUCTION(is_wide) \ + require_vector(true); \ + if (is_wide) { \ + require(P.VU.vsew * 2 <= P.VU.ELEN); \ + } \ + require_align(insn.rs2(), P.VU.vflmul); \ + require(P.VU.vstart->read() == 0); \ + +#define VI_CHECK_SLIDE(is_over) \ + require_align(insn.rs2(), P.VU.vflmul); \ + require_align(insn.rd(), P.VU.vflmul); \ + require_vm; \ + if (is_over) \ + require(insn.rd() != insn.rs2()); \ + + +// +// vector: loop header and end helper +// +#define VI_GENERAL_LOOP_BASE \ + require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \ + require_vector(true); \ + reg_t vl = P.VU.vl->read(); \ + reg_t sew = P.VU.vsew; \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { + +#define VI_LOOP_BASE \ + VI_GENERAL_LOOP_BASE \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_LOOP_END \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_REDUCTION_END(x) \ + } \ + if (vl > 0) { \ + vd_0_des = vd_0_res; \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_CARRY_BASE \ + VI_GENERAL_LOOP_BASE \ + VI_MASK_VARS \ + auto v0 = P.VU.elt(0, midx); \ + const uint64_t mmask = UINT64_C(1) << mpos; \ + const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \ + uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0; \ + uint128_t res = 0; \ + auto &vd = P.VU.elt(rd_num, midx, true); + +#define VI_LOOP_CARRY_END \ + vd = (vd & ~mmask) | (((res) << mpos) & mmask); \ + } \ + P.VU.vstart->write(0); +#define VI_LOOP_WITH_CARRY_BASE \ + VI_GENERAL_LOOP_BASE \ + VI_MASK_VARS \ + auto &v0 = P.VU.elt(0, midx); \ + const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \ + uint64_t carry = (v0 >> mpos) & 0x1; + +#define VI_LOOP_CMP_BASE \ + require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \ + require_vector(true); \ + reg_t vl = P.VU.vl->read(); \ + reg_t sew = P.VU.vsew; \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + uint64_t mmask = UINT64_C(1) << mpos; \ + uint64_t &vdi = P.VU.elt(insn.rd(), midx, true); \ + uint64_t res = 0; + +#define VI_LOOP_CMP_END \ + vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_MASK(op) \ + require(P.VU.vsew <= e64); \ + require_vector(true); \ + reg_t vl = P.VU.vl->read(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + int midx = i / 64; \ + int mpos = i % 64; \ + uint64_t mmask = UINT64_C(1) << mpos; \ + uint64_t vs2 = P.VU.elt(insn.rs2(), midx); \ + uint64_t vs1 = P.VU.elt(insn.rs1(), midx); \ + uint64_t &res = P.VU.elt(insn.rd(), midx, true); \ + res = (res & ~mmask) | ((op) & (1ULL << mpos)); \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_NSHIFT_BASE \ + VI_GENERAL_LOOP_BASE; \ + VI_LOOP_ELEMENT_SKIP({ \ + require(!(insn.rd() == 0 && P.VU.vflmul > 1)); \ + }); + + +#define INT_ROUNDING(result, xrm, gb) \ + do { \ + const uint64_t lsb = 1UL << (gb); \ + const uint64_t lsb_half = lsb >> 1; \ + switch (xrm) { \ + case VRM::RNU: \ + result += lsb_half; \ + break; \ + case VRM::RNE: \ + if ((result & lsb_half) && ((result & (lsb_half - 1)) || (result & lsb))) \ + result += lsb; \ + break; \ + case VRM::RDN: \ + break; \ + case VRM::ROD: \ + if (result & (lsb - 1)) \ + result |= lsb; \ + break; \ + case VRM::INVALID_RM: \ + assert(true); \ + } \ + } while (0) + +// +// vector: integer and masking operand access helper +// +#define VXI_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); \ + type_sew_t::type rs1 = (type_sew_t::type)RS1; \ + type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); + +#define VV_U_PARAMS(x) \ + type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_U_PARAMS(x) \ + type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type rs1 = (type_usew_t::type)RS1; \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_U_PARAMS(x) \ + type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type zimm5 = (type_usew_t::type)insn.v_zimm5(); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VV_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type rs1 = (type_sew_t::type)RS1; \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define XV_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, RS1); + +#define VV_SU_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_SU_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type rs1 = (type_usew_t::type)RS1; \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VV_UCMP_PARAMS(x) \ + type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_UCMP_PARAMS(x) \ + type_usew_t::type rs1 = (type_usew_t::type)RS1; \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_UCMP_PARAMS(x) \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VV_CMP_PARAMS(x) \ + type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_CMP_PARAMS(x) \ + type_sew_t::type rs1 = (type_sew_t::type)RS1; \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_CMP_PARAMS(x) \ + type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_XI_SLIDEDOWN_PARAMS(x, off) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2 = P.VU.elt::type>(rs2_num, i + off); + +#define VI_XI_SLIDEUP_PARAMS(x, offset) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2 = P.VU.elt::type>(rs2_num, i - offset); + +#define VI_NARROW_PARAMS(sew1, sew2) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto zimm5 = (type_usew_t::type)insn.v_zimm5(); + +#define VX_NARROW_PARAMS(sew1, sew2) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto rs1 = (type_sew_t::type)RS1; + +#define VV_NARROW_PARAMS(sew1, sew2) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto vs1 = P.VU.elt::type>(rs1_num, i); + +#define XI_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto rs1 = (type_sew_t::type)RS1; \ + auto simm5 = (type_sew_t::type)insn.v_simm5(); \ + +#define VV_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto vs1 = P.VU.elt::type>(rs1_num, i); \ + +#define XI_WITH_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto rs1 = (type_sew_t::type)RS1; \ + auto simm5 = (type_sew_t::type)insn.v_simm5(); \ + auto &vd = P.VU.elt::type>(rd_num, i, true); + +#define VV_WITH_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto vs1 = P.VU.elt::type>(rs1_num, i); \ + auto &vd = P.VU.elt::type>(rd_num, i, true); + +#define VFP_V_PARAMS(width) \ + float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); + +#define VFP_VV_PARAMS(width) \ + float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t vs1 = P.VU.elt(rs1_num, i); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); + +#define VFP_VF_PARAMS(width) \ + float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t rs1 = f##width(READ_FREG(rs1_num)); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); + +#define CVT_FP_TO_FP_PARAMS(from_width, to_width) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +#define CVT_INT_TO_FP_PARAMS(from_width, to_width, sign) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +#define CVT_FP_TO_INT_PARAMS(from_width, to_width, sign) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +// +// vector: integer and masking operation loop +// + +#define INSNS_BASE(PARAMS, BODY) \ + if (sew == e8) { \ + PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + PARAMS(e64); \ + BODY; \ + } + +// comparision result to masking register +#define VI_LOOP_CMP_BODY(PARAMS, BODY) \ + VI_LOOP_CMP_BASE \ + INSNS_BASE(PARAMS, BODY) \ + VI_LOOP_CMP_END + +#define VI_VV_LOOP_CMP(BODY) \ + VI_CHECK_MSS(true); \ + VI_LOOP_CMP_BODY(VV_CMP_PARAMS, BODY) + +#define VI_VX_LOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VX_CMP_PARAMS, BODY) + +#define VI_VI_LOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VI_CMP_PARAMS, BODY) + +#define VI_VV_ULOOP_CMP(BODY) \ + VI_CHECK_MSS(true); \ + VI_LOOP_CMP_BODY(VV_UCMP_PARAMS, BODY) + +#define VI_VX_ULOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VX_UCMP_PARAMS, BODY) + +#define VI_VI_ULOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VI_UCMP_PARAMS, BODY) + +// merge and copy loop +#define VI_MERGE_VARS \ + VI_MASK_VARS \ + bool use_first = (P.VU.elt(0, midx) >> mpos) & 0x1; + +#define VI_MERGE_LOOP_BASE \ + require_vector(true); \ + VI_GENERAL_LOOP_BASE \ + VI_MERGE_VARS + +#define VI_VV_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(true); \ + VI_MERGE_LOOP_BASE \ + if (sew == e8) { \ + VV_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(false); \ + VI_MERGE_LOOP_BASE \ + if (sew == e8) { \ + VX_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(false); \ + VI_MERGE_LOOP_BASE \ + if (sew == e8) { \ + VI_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VI_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VI_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VI_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VF_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(false); \ + VI_VFP_COMMON \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_MERGE_VARS \ + if (P.VU.vsew == e16) { \ + VFP_VF_PARAMS(16); \ + BODY; \ + } else if (P.VU.vsew == e32) { \ + VFP_VF_PARAMS(32); \ + BODY; \ + } else if (P.VU.vsew == e64) { \ + VFP_VF_PARAMS(64); \ + BODY; \ + } \ + VI_LOOP_END + +// reduction loop - signed +#define VI_LOOP_REDUCTION_BASE(x) \ + require(x >= e8 && x <= e64); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + +#define REDUCTION_LOOP(x, BODY) \ + VI_LOOP_REDUCTION_BASE(x) \ + BODY; \ + VI_LOOP_REDUCTION_END(x) + +#define VI_VV_LOOP_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(false); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + REDUCTION_LOOP(e8, BODY) \ + } else if (sew == e16) { \ + REDUCTION_LOOP(e16, BODY) \ + } else if (sew == e32) { \ + REDUCTION_LOOP(e32, BODY) \ + } else if (sew == e64) { \ + REDUCTION_LOOP(e64, BODY) \ + } + +// reduction loop - unsigned +#define VI_ULOOP_REDUCTION_BASE(x) \ + require(x >= e8 && x <= e64); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); + +#define REDUCTION_ULOOP(x, BODY) \ + VI_ULOOP_REDUCTION_BASE(x) \ + BODY; \ + VI_LOOP_REDUCTION_END(x) + +#define VI_VV_ULOOP_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(false); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + REDUCTION_ULOOP(e8, BODY) \ + } else if (sew == e16) { \ + REDUCTION_ULOOP(e16, BODY) \ + } else if (sew == e32) { \ + REDUCTION_ULOOP(e32, BODY) \ + } else if (sew == e64) { \ + REDUCTION_ULOOP(e64, BODY) \ + } + + +// genearl VXI signed/unsigned loop +#define VI_VV_ULOOP(BODY) \ + VI_CHECK_SSS(true) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_U_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_U_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_U_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_U_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VV_LOOP(BODY) \ + VI_CHECK_SSS(true) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_ULOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_U_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_U_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_U_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_U_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_ULOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VI_U_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VI_U_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VI_U_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VI_U_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_LOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VI_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VI_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VI_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VI_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +// signed unsigned operation loop (e.g. mulhsu) +#define VI_VV_SU_LOOP(BODY) \ + VI_CHECK_SSS(true) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_SU_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_SU_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_SU_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_SU_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_SU_LOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_SU_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_SU_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_SU_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_SU_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +// narrow operation loop +#define VI_VV_LOOP_NARROW(BODY) \ + VI_CHECK_SDS(true); \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VV_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VV_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP_NARROW(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VX_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VX_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_LOOP_NARROW(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VI_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VI_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VI_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_LOOP_NSHIFT(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_NSHIFT_BASE \ + if (sew == e8) { \ + VI_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VI_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VI_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP_NSHIFT(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_NSHIFT_BASE \ + if (sew == e8) { \ + VX_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VX_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VX_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VV_LOOP_NSHIFT(BODY) \ + VI_CHECK_SDS(true); \ + VI_LOOP_NSHIFT_BASE \ + if (sew == e8) { \ + VV_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VV_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VV_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +// widen operation loop +#define VI_VV_LOOP_WIDEN(BODY) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_PARAMS(e32); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP_WIDEN(BODY) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_PARAMS(e32); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_WIDE_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \ + switch (P.VU.vsew) { \ + case e8: { \ + sign##16_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign##16_t)(sign##8_t)var0 op0 (sign##16_t)(sign##8_t)var1) + var2; \ + } \ + break; \ + case e16: { \ + sign##32_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign##32_t)(sign##16_t)var0 op0 (sign##32_t)(sign##16_t)var1) + var2; \ + } \ + break; \ + default: { \ + sign##64_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign##64_t)(sign##32_t)var0 op0 (sign##64_t)(sign##32_t)var1) + var2; \ + } \ + break; \ + } + +#define VI_WIDE_OP_AND_ASSIGN_MIX(var0, var1, var2, op0, op1, sign_d, sign_1, sign_2) \ + switch (P.VU.vsew) { \ + case e8: { \ + sign_d##16_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign_1##16_t)(sign_1##8_t)var0 op0 (sign_2##16_t)(sign_2##8_t)var1) + var2; \ + } \ + break; \ + case e16: { \ + sign_d##32_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign_1##32_t)(sign_1##16_t)var0 op0 (sign_2##32_t)(sign_2##16_t)var1) + var2; \ + } \ + break; \ + default: { \ + sign_d##64_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign_1##64_t)(sign_1##32_t)var0 op0 (sign_2##64_t)(sign_2##32_t)var1) + var2; \ + } \ + break; \ + } + +#define VI_WIDE_WVX_OP(var0, op0, sign) \ + switch (P.VU.vsew) { \ + case e8: { \ + sign##16_t &vd_w = P.VU.elt(rd_num, i, true); \ + sign##16_t vs2_w = P.VU.elt(rs2_num, i); \ + vd_w = vs2_w op0 (sign##16_t)(sign##8_t)var0; \ + } \ + break; \ + case e16: { \ + sign##32_t &vd_w = P.VU.elt(rd_num, i, true); \ + sign##32_t vs2_w = P.VU.elt(rs2_num, i); \ + vd_w = vs2_w op0 (sign##32_t)(sign##16_t)var0; \ + } \ + break; \ + default: { \ + sign##64_t &vd_w = P.VU.elt(rd_num, i, true); \ + sign##64_t vs2_w = P.VU.elt(rs2_num, i); \ + vd_w = vs2_w op0 (sign##64_t)(sign##32_t)var0; \ + } \ + break; \ + } + +// wide reduction loop - signed +#define VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); + +#define WIDE_REDUCTION_LOOP(sew1, sew2, BODY) \ + VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + BODY; \ + VI_LOOP_REDUCTION_END(sew2) + +#define VI_VV_LOOP_WIDE_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(true); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + WIDE_REDUCTION_LOOP(e8, e16, BODY) \ + } else if (sew == e16) { \ + WIDE_REDUCTION_LOOP(e16, e32, BODY) \ + } else if (sew == e32) { \ + WIDE_REDUCTION_LOOP(e32, e64, BODY) \ + } + +// wide reduction loop - unsigned +#define VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); + +#define WIDE_REDUCTION_ULOOP(sew1, sew2, BODY) \ + VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + BODY; \ + VI_LOOP_REDUCTION_END(sew2) + +#define VI_VV_ULOOP_WIDE_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(true); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + WIDE_REDUCTION_ULOOP(e8, e16, BODY) \ + } else if (sew == e16) { \ + WIDE_REDUCTION_ULOOP(e16, e32, BODY) \ + } else if (sew == e32) { \ + WIDE_REDUCTION_ULOOP(e32, e64, BODY) \ + } + +// carry/borrow bit loop +#define VI_VV_LOOP_CARRY(BODY) \ + VI_CHECK_MSS(true); \ + VI_LOOP_CARRY_BASE \ + if (sew == e8) { \ + VV_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + VV_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + VV_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + VV_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_CARRY_END + +#define VI_XI_LOOP_CARRY(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CARRY_BASE \ + if (sew == e8) { \ + XI_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + XI_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + XI_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + XI_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_CARRY_END + +#define VI_VV_LOOP_WITH_CARRY(BODY) \ + require_vm; \ + VI_CHECK_SSS(true); \ + VI_LOOP_WITH_CARRY_BASE \ + if (sew == e8) { \ + VV_WITH_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + VV_WITH_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + VV_WITH_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + VV_WITH_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_XI_LOOP_WITH_CARRY(BODY) \ + require_vm; \ + VI_CHECK_SSS(false); \ + VI_LOOP_WITH_CARRY_BASE \ + if (sew == e8) { \ + XI_WITH_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + XI_WITH_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + XI_WITH_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + XI_WITH_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_END + +// average loop +#define VI_VV_LOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VV_LOOP({ \ + uint128_t res = ((uint128_t)vs2) op vs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +#define VI_VX_LOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VX_LOOP({ \ + uint128_t res = ((uint128_t)vs2) op rs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +#define VI_VV_ULOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VV_ULOOP({ \ + uint128_t res = ((uint128_t)vs2) op vs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +#define VI_VX_ULOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VX_ULOOP({ \ + uint128_t res = ((uint128_t)vs2) op rs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +// +// vector: load/store helper +// +#define VI_STRIP(inx) \ + reg_t vreg_inx = inx; + +#define VI_DUPLICATE_VREG(reg_num, idx_sew) \ +reg_t index[P.VU.vlmax]; \ + for (reg_t i = 0; i < P.VU.vlmax && P.VU.vl->read() != 0; ++i) { \ + switch (idx_sew) { \ + case e8: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + case e16: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + case e32: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + case e64: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + } \ +} + +#define VI_LD(stride, offset, elt_width, is_mask_ldst) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + VI_CHECK_LOAD(elt_width, is_mask_ldst); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_ELEMENT_SKIP(i); \ + VI_STRIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + elt_width##_t val = MMU.load_##elt_width( \ + baseAddr + (stride) + (offset) * sizeof(elt_width##_t)); \ + P.VU.elt(vd + fn * emul, vreg_inx, true) = val; \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_LD_INDEX(elt_width, is_seg) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + if (!is_seg) \ + require(nf == 1); \ + VI_CHECK_LD_INDEX(elt_width); \ + VI_DUPLICATE_VREG(insn.rs2(), elt_width); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_ELEMENT_SKIP(i); \ + VI_STRIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + switch (P.VU.vsew) { \ + case e8: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint8(baseAddr + index[i] + fn * 1); \ + break; \ + case e16: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint16(baseAddr + index[i] + fn * 2); \ + break; \ + case e32: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint32(baseAddr + index[i] + fn * 4); \ + break; \ + default: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint64(baseAddr + index[i] + fn * 8); \ + break; \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_ST(stride, offset, elt_width, is_mask_ldst) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vs3 = insn.rd(); \ + VI_CHECK_STORE(elt_width, is_mask_ldst); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_STRIP(i) \ + VI_ELEMENT_SKIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + elt_width##_t val = P.VU.elt(vs3 + fn * emul, vreg_inx); \ + MMU.store_##elt_width( \ + baseAddr + (stride) + (offset) * sizeof(elt_width##_t), val); \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_ST_INDEX(elt_width, is_seg) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vs3 = insn.rd(); \ + if (!is_seg) \ + require(nf == 1); \ + VI_CHECK_ST_INDEX(elt_width); \ + VI_DUPLICATE_VREG(insn.rs2(), elt_width); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_STRIP(i) \ + VI_ELEMENT_SKIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + switch (P.VU.vsew) { \ + case e8: \ + MMU.store_uint8(baseAddr + index[i] + fn * 1, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + case e16: \ + MMU.store_uint16(baseAddr + index[i] + fn * 2, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + case e32: \ + MMU.store_uint32(baseAddr + index[i] + fn * 4, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + default: \ + MMU.store_uint64(baseAddr + index[i] + fn * 8, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_LDST_FF(elt_width) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t sew = p->VU.vsew; \ + const reg_t vl = p->VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t rd_num = insn.rd(); \ + VI_CHECK_LOAD(elt_width, false); \ + bool early_stop = false; \ + for (reg_t i = p->VU.vstart->read(); i < vl; ++i) { \ + VI_STRIP(i); \ + VI_ELEMENT_SKIP(i); \ + \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + uint64_t val; \ + try { \ + val = MMU.load_##elt_width( \ + baseAddr + (i * nf + fn) * sizeof(elt_width##_t)); \ + } catch (trap_t& t) { \ + if (i == 0) \ + throw; /* Only take exception on zeroth element */ \ + /* Reduce VL if an exception occurs on a later element */ \ + early_stop = true; \ + P.VU.vl->write_raw(i); \ + break; \ + } \ + p->VU.elt(rd_num + fn * emul, vreg_inx, true) = val; \ + } \ + \ + if (early_stop) { \ + break; \ + } \ + } \ + p->VU.vstart->write(0); + +#define VI_LD_WHOLE(elt_width) \ + require_vector_novtype(true, false); \ + require(sizeof(elt_width ## _t) * 8 <= P.VU.ELEN); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + const reg_t len = insn.v_nf() + 1; \ + require_align(vd, len); \ + const reg_t elt_per_reg = P.VU.vlenb / sizeof(elt_width ## _t); \ + const reg_t size = len * elt_per_reg; \ + if (P.VU.vstart->read() < size) { \ + reg_t i = P.VU.vstart->read() / elt_per_reg; \ + reg_t off = P.VU.vstart->read() % elt_per_reg; \ + if (off) { \ + for (reg_t pos = off; pos < elt_per_reg; ++pos) { \ + auto val = MMU.load_## elt_width(baseAddr + \ + P.VU.vstart->read() * sizeof(elt_width ## _t)); \ + P.VU.elt(vd + i, pos, true) = val; \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + ++i; \ + } \ + for (; i < len; ++i) { \ + for (reg_t pos = 0; pos < elt_per_reg; ++pos) { \ + auto val = MMU.load_## elt_width(baseAddr + \ + P.VU.vstart->read() * sizeof(elt_width ## _t)); \ + P.VU.elt(vd + i, pos, true) = val; \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_ST_WHOLE \ + require_vector_novtype(true, false); \ + const reg_t baseAddr = RS1; \ + const reg_t vs3 = insn.rd(); \ + const reg_t len = insn.v_nf() + 1; \ + require_align(vs3, len); \ + const reg_t size = len * P.VU.vlenb; \ + \ + if (P.VU.vstart->read() < size) { \ + reg_t i = P.VU.vstart->read() / P.VU.vlenb; \ + reg_t off = P.VU.vstart->read() % P.VU.vlenb; \ + if (off) { \ + for (reg_t pos = off; pos < P.VU.vlenb; ++pos) { \ + auto val = P.VU.elt(vs3 + i, pos); \ + MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + i++; \ + } \ + for (; i < len; ++i) { \ + for (reg_t pos = 0; pos < P.VU.vlenb; ++pos) { \ + auto val = P.VU.elt(vs3 + i, pos); \ + MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +// +// vector: amo +// +#define VI_AMO(op, type, idx_type) \ + require_vector(false); \ + require_align(insn.rd(), P.VU.vflmul); \ + require(P.VU.vsew <= P.get_xlen() && P.VU.vsew >= 32); \ + require_align(insn.rd(), P.VU.vflmul); \ + float vemul = ((float)idx_type / P.VU.vsew * P.VU.vflmul); \ + require(vemul >= 0.125 && vemul <= 8); \ + require_align(insn.rs2(), vemul); \ + if (insn.v_wd()) { \ + require_vm; \ + if (idx_type > P.VU.vsew) { \ + if (insn.rd() != insn.rs2()) \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } else if (idx_type < P.VU.vsew) { \ + if (vemul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } \ + } \ + } \ + VI_DUPLICATE_VREG(insn.rs2(), idx_type); \ + const reg_t vl = P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_ELEMENT_SKIP(i); \ + VI_STRIP(i); \ + P.VU.vstart->write(i); \ + switch (P.VU.vsew) { \ + case e32: { \ + auto vs3 = P.VU.elt< type ## 32_t>(vd, vreg_inx); \ + auto val = MMU.amo_uint32(baseAddr + index[i], [&](type ## 32_t lhs) { op }); \ + if (insn.v_wd()) \ + P.VU.elt< type ## 32_t>(vd, vreg_inx, true) = val; \ + } \ + break; \ + case e64: { \ + auto vs3 = P.VU.elt< type ## 64_t>(vd, vreg_inx); \ + auto val = MMU.amo_uint64(baseAddr + index[i], [&](type ## 64_t lhs) { op }); \ + if (insn.v_wd()) \ + P.VU.elt< type ## 64_t>(vd, vreg_inx, true) = val; \ + } \ + break; \ + default: \ + require(0); \ + break; \ + } \ + } \ + P.VU.vstart->write(0); + +// vector: sign/unsiged extension +#define VI_VV_EXT(div, type) \ + require(insn.rd() != insn.rs2()); \ + require_vm; \ + reg_t from = P.VU.vsew / div; \ + require(from >= e8 && from <= e64); \ + require(((float)P.VU.vflmul / div) >= 0.125 && ((float)P.VU.vflmul / div) <= 8 ); \ + require_align(insn.rd(), P.VU.vflmul); \ + require_align(insn.rs2(), P.VU.vflmul / div); \ + if ((P.VU.vflmul / div) < 1) { \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \ + } \ + reg_t pat = (((P.VU.vsew >> 3) << 4) | from >> 3); \ + VI_GENERAL_LOOP_BASE \ + VI_LOOP_ELEMENT_SKIP(); \ + switch (pat) { \ + case 0x21: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x41: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x81: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x42: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x82: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x84: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x88: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + default: \ + break; \ + } \ + VI_LOOP_END + +// +// vector: vfp helper +// +#define VI_VFP_COMMON \ + require_fp; \ + require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || \ + (P.VU.vsew == e32 && p->extension_enabled('F')) || \ + (P.VU.vsew == e64 && p->extension_enabled('D'))); \ + require_vector(true); \ + require(STATE.frm->read() < 0x5); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + softfloat_roundingMode = STATE.frm->read(); + +#define VI_VFP_LOOP_BASE \ + VI_VFP_COMMON \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_VFP_LOOP_CMP_BASE \ + VI_VFP_COMMON \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + uint64_t mmask = UINT64_C(1) << mpos; \ + uint64_t &vd = P.VU.elt(rd_num, midx, true); \ + uint64_t res = 0; + +#define VI_VFP_LOOP_REDUCTION_BASE(width) \ + float##width##_t vd_0 = P.VU.elt(rd_num, 0); \ + float##width##_t vs1_0 = P.VU.elt(rs1_num, 0); \ + vd_0 = vs1_0; \ + bool is_active = false; \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); \ + is_active = true; \ + +#define VI_VFP_LOOP_WIDE_REDUCTION_BASE \ + VI_VFP_COMMON \ + float64_t vd_0 = f64(P.VU.elt(rs1_num, 0).v); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_VFP_LOOP_END \ + } \ + P.VU.vstart->write(0); \ + +#define VI_VFP_LOOP_REDUCTION_END(x) \ + } \ + P.VU.vstart->write(0); \ + if (vl > 0) { \ + if (is_propagate && !is_active) { \ + switch (x) { \ + case e16: { \ + auto ret = f16_classify(f16(vd_0.v)); \ + if (ret & 0x300) { \ + if (ret & 0x100) { \ + softfloat_exceptionFlags |= softfloat_flag_invalid; \ + set_fp_exceptions; \ + } \ + P.VU.elt(rd_num, 0, true) = defaultNaNF16UI; \ + } else { \ + P.VU.elt(rd_num, 0, true) = vd_0.v; \ + } \ + } \ + break; \ + case e32: { \ + auto ret = f32_classify(f32(vd_0.v)); \ + if (ret & 0x300) { \ + if (ret & 0x100) { \ + softfloat_exceptionFlags |= softfloat_flag_invalid; \ + set_fp_exceptions; \ + } \ + P.VU.elt(rd_num, 0, true) = defaultNaNF32UI; \ + } else { \ + P.VU.elt(rd_num, 0, true) = vd_0.v; \ + } \ + } \ + break; \ + case e64: { \ + auto ret = f64_classify(f64(vd_0.v)); \ + if (ret & 0x300) { \ + if (ret & 0x100) { \ + softfloat_exceptionFlags |= softfloat_flag_invalid; \ + set_fp_exceptions; \ + } \ + P.VU.elt(rd_num, 0, true) = defaultNaNF64UI; \ + } else { \ + P.VU.elt(rd_num, 0, true) = vd_0.v; \ + } \ + } \ + break; \ + } \ + } else { \ + P.VU.elt::type>(rd_num, 0, true) = vd_0.v; \ + } \ + } + +#define VI_VFP_LOOP_CMP_END \ + switch (P.VU.vsew) { \ + case e16: \ + case e32: \ + case e64: { \ + vd = (vd & ~mmask) | (((res) << mpos) & mmask); \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + } \ + P.VU.vstart->write(0); + +#define VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \ + VI_CHECK_SSS(true); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VV_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VV_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VV_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_V_LOOP(BODY16, BODY32, BODY64) \ + VI_CHECK_SSS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_V_PARAMS(16); \ + BODY16; \ + break; \ + } \ + case e32: { \ + VFP_V_PARAMS(32); \ + BODY32; \ + break; \ + } \ + case e64: { \ + VFP_V_PARAMS(64); \ + BODY64; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + set_fp_exceptions; \ + VI_VFP_LOOP_END + +#define VI_VFP_VV_LOOP_REDUCTION(BODY16, BODY32, BODY64) \ + VI_CHECK_REDUCTION(false) \ + VI_VFP_COMMON \ + switch (P.VU.vsew) { \ + case e16: { \ + VI_VFP_LOOP_REDUCTION_BASE(16) \ + BODY16; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e16) \ + break; \ + } \ + case e32: { \ + VI_VFP_LOOP_REDUCTION_BASE(32) \ + BODY32; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e32) \ + break; \ + } \ + case e64: { \ + VI_VFP_LOOP_REDUCTION_BASE(64) \ + BODY64; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e64) \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + +#define VI_VFP_VV_LOOP_WIDE_REDUCTION(BODY16, BODY32) \ + VI_CHECK_REDUCTION(true) \ + VI_VFP_COMMON \ + require((P.VU.vsew == e16 && p->extension_enabled('F')) || \ + (P.VU.vsew == e32 && p->extension_enabled('D'))); \ + bool is_active = false; \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t vd_0 = P.VU.elt(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + is_active = true; \ + float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ + BODY16; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e32) \ + break; \ + } \ + case e32: { \ + float64_t vd_0 = P.VU.elt(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + is_active = true; \ + float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ + BODY32; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e64) \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + +#define VI_VFP_VF_LOOP(BODY16, BODY32, BODY64) \ + VI_CHECK_SSS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VF_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VF_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VF_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VF; \ + VI_VFP_LOOP_END + +#define VI_VFP_VV_LOOP_CMP(BODY16, BODY32, BODY64) \ + VI_CHECK_MSS(true); \ + VI_VFP_LOOP_CMP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VV_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VV_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VV_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + VI_VFP_LOOP_CMP_END \ + +#define VI_VFP_VF_LOOP_CMP(BODY16, BODY32, BODY64) \ + VI_CHECK_MSS(false); \ + VI_VFP_LOOP_CMP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VF_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VF_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VF_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + VI_VFP_LOOP_CMP_END \ + +#define VI_VFP_VF_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DSS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ + float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ + float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + + +#define VI_VFP_VV_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DSS(true); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ + float32_t vs1 = f16_to_f32(P.VU.elt(rs1_num, i)); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ + float64_t vs1 = f32_to_f64(P.VU.elt(rs1_num, i)); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_WF_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DDS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = P.VU.elt(rs2_num, i); \ + float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = P.VU.elt(rs2_num, i); \ + float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_WV_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DDS(true); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = P.VU.elt(rs2_num, i); \ + float32_t vs1 = f16_to_f32(P.VU.elt(rs1_num, i)); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = P.VU.elt(rs2_num, i); \ + float64_t vs1 = f32_to_f64(P.VU.elt(rs1_num, i)); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_LOOP_SCALE_BASE \ + require_fp; \ + require_vector(true); \ + require(STATE.frm->read() < 0x5); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + softfloat_roundingMode = STATE.frm->read(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_VFP_CVT_LOOP(CVT_PARAMS, CHECK, BODY) \ + CHECK \ + VI_VFP_LOOP_SCALE_BASE \ + CVT_PARAMS \ + BODY \ + set_fp_exceptions; \ + VI_VFP_LOOP_END + +#define VI_VFP_CVT_INT_TO_FP(BODY16, BODY32, BODY64, sign) \ + VI_CHECK_SSS(false); \ + VI_VFP_COMMON \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 16, sign), \ + { p->extension_enabled(EXT_ZFH); }, \ + BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 32, sign), \ + { p->extension_enabled('F'); }, \ + BODY32); } \ + break; \ + case e64: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 64, sign), \ + { p->extension_enabled('D'); }, \ + BODY64); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_CVT_FP_TO_INT(BODY16, BODY32, BODY64, sign) \ + VI_CHECK_SSS(false); \ + VI_VFP_COMMON \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 16, sign), \ + { p->extension_enabled(EXT_ZFH); }, \ + BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 32, sign), \ + { p->extension_enabled('F'); }, \ + BODY32); } \ + break; \ + case e64: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 64, sign), \ + { p->extension_enabled('D'); }, \ + BODY64); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_WCVT_FP_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32) \ + VI_CHECK_DSS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(16, 32), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 64), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_WCVT_INT_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_DSS(false); \ + switch (P.VU.vsew) { \ + case e8: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(8, 16, sign), CHECK8, BODY8); } \ + break; \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 32, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 64, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_WCVT_FP_TO_INT(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_DSS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 32, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 64, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_NCVT_FP_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32) \ + VI_CHECK_SDS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 16), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(64, 32), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_NCVT_INT_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_SDS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 16, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 32, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_NCVT_FP_TO_INT(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_SDS(false); \ + switch (P.VU.vsew) { \ + case e8: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 8, sign), CHECK8, BODY8); } \ + break; \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 16, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 32, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#endif -- cgit v1.1 From 615de147a26d1e342642848cc22c845853f5e11f Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 5 May 2022 13:39:45 -0700 Subject: Factor out P extension macros into their own header No functional change. --- riscv/decode.h | 501 +------------------------------------------------- riscv/p_ext_macros.h | 506 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 507 insertions(+), 500 deletions(-) create mode 100644 riscv/p_ext_macros.h diff --git a/riscv/decode.h b/riscv/decode.h index 70f146d..e5e8115 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -16,6 +16,7 @@ #include "common.h" #include "softfloat_types.h" #include "specialize.h" +#include "p_ext_macros.h" #include "v_ext_macros.h" #include @@ -417,506 +418,6 @@ inline long double to_f(float128_t f) { long double r; memcpy(&r, &f, sizeof(r)) #define DEBUG_RVV_FMA_VF 0 #endif -// The p-extension support is contributed by -// Programming Langauge Lab, Department of Computer Science, National Tsing-Hua University, Taiwan - -#define P_FIELD(R, INDEX, SIZE) \ - (type_sew_t::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE)) - -#define P_UFIELD(R, INDEX, SIZE) \ - (type_usew_t::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE)) - -#define P_B(R, INDEX) P_UFIELD(R, INDEX, 8) -#define P_H(R, INDEX) P_UFIELD(R, INDEX, 16) -#define P_W(R, INDEX) P_UFIELD(R, INDEX, 32) -#define P_SB(R, INDEX) P_FIELD(R, INDEX, 8) -#define P_SH(R, INDEX) P_FIELD(R, INDEX, 16) -#define P_SW(R, INDEX) P_FIELD(R, INDEX, 32) - -#define READ_REG_PAIR(reg) ({ \ - require((reg) % 2 == 0); \ - (reg) == 0 ? reg_t(0) : \ - (READ_REG((reg) + 1) << 32) + zext32(READ_REG(reg)); }) - -#define RS1_PAIR READ_REG_PAIR(insn.rs1()) -#define RS2_PAIR READ_REG_PAIR(insn.rs2()) -#define RD_PAIR READ_REG_PAIR(insn.rd()) - -#define WRITE_PD() \ - rd_tmp = set_field(rd_tmp, make_mask64((i * sizeof(pd) * 8), sizeof(pd) * 8), pd); - -#define WRITE_RD_PAIR(value) \ - if (insn.rd() != 0) { \ - require(insn.rd() % 2 == 0); \ - WRITE_REG(insn.rd(), sext32(value)); \ - WRITE_REG(insn.rd() + 1, (sreg_t(value)) >> 32); \ - } - -#define P_SET_OV(ov) \ - if (ov) P.VU.vxsat->write(1); - -#define P_SAT(R, BIT) \ - if (R > INT##BIT##_MAX) { \ - R = INT##BIT##_MAX; \ - P_SET_OV(1); \ - } else if (R < INT##BIT##_MIN) { \ - R = INT##BIT##_MIN; \ - P_SET_OV(1); \ - } - -#define P_SATU(R, BIT) \ - if (R > UINT##BIT##_MAX) { \ - R = UINT##BIT##_MAX; \ - P_SET_OV(1); \ - } else if (R < 0) { \ - P_SET_OV(1); \ - R = 0; \ - } - -#define P_LOOP_BASE(BIT) \ - require_extension(EXT_ZPN); \ - require(BIT == e8 || BIT == e16 || BIT == e32); \ - reg_t rd_tmp = RD; \ - reg_t rs1 = RS1; \ - reg_t rs2 = RS2; \ - sreg_t len = xlen / BIT; \ - for (sreg_t i = len - 1; i >= 0; --i) { - -#define P_ONE_LOOP_BASE(BIT) \ - require_extension(EXT_ZPN); \ - require(BIT == e8 || BIT == e16 || BIT == e32); \ - reg_t rd_tmp = RD; \ - reg_t rs1 = RS1; \ - sreg_t len = xlen / BIT; \ - for (sreg_t i = len - 1; i >= 0; --i) { - -#define P_I_LOOP_BASE(BIT, IMMBIT) \ - require_extension(EXT_ZPN); \ - require(BIT == e8 || BIT == e16 || BIT == e32); \ - reg_t rd_tmp = RD; \ - reg_t rs1 = RS1; \ - type_usew_t::type imm##IMMBIT##u = insn.p_imm##IMMBIT(); \ - sreg_t len = xlen / BIT; \ - for (sreg_t i = len - 1; i >= 0; --i) { - -#define P_X_LOOP_BASE(BIT, LOWBIT) \ - require_extension(EXT_ZPN); \ - require(BIT == e8 || BIT == e16 || BIT == e32); \ - reg_t rd_tmp = RD; \ - reg_t rs1 = RS1; \ - type_usew_t::type sa = RS2 & ((uint64_t(1) << LOWBIT) - 1); \ - type_sew_t::type ssa = int64_t(RS2) << (64 - LOWBIT) >> (64 - LOWBIT); \ - sreg_t len = xlen / BIT; \ - for (sreg_t i = len - 1; i >= 0; --i) { - -#define P_MUL_LOOP_BASE(BIT) \ - require_extension(EXT_ZPN); \ - require(BIT == e8 || BIT == e16 || BIT == e32); \ - reg_t rd_tmp = RD; \ - reg_t rs1 = RS1; \ - reg_t rs2 = RS2; \ - sreg_t len = 32 / BIT; \ - for (sreg_t i = len - 1; i >= 0; --i) { - -#define P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ - require_extension(EXT_ZPN); \ - require(BIT == e16 || BIT == e32 || BIT == e64); \ - reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \ - reg_t rs1 = zext_xlen(RS1); \ - reg_t rs2 = zext_xlen(RS2); \ - sreg_t len = 64 / BIT; \ - sreg_t len_inner = BIT / BIT_INNER; \ - for (sreg_t i = len - 1; i >= 0; --i) { \ - sreg_t pd_res = P_FIELD(rd_tmp, i, BIT); \ - for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) { - -#define P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \ - require_extension(EXT_ZPN); \ - require(BIT == e16 || BIT == e32 || BIT == e64); \ - reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \ - reg_t rs1 = zext_xlen(RS1); \ - reg_t rs2 = zext_xlen(RS2); \ - sreg_t len = 64 / BIT; \ - sreg_t len_inner = BIT / BIT_INNER; \ - for (sreg_t i = len - 1; i >=0; --i) { \ - reg_t pd_res = P_UFIELD(rd_tmp, i, BIT); \ - for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) { - -#define P_PARAMS(BIT) \ - auto pd = P_FIELD(rd_tmp, i, BIT); \ - auto ps1 = P_FIELD(rs1, i, BIT); \ - auto ps2 = P_FIELD(rs2, i, BIT); - -#define P_UPARAMS(BIT) \ - auto pd = P_UFIELD(rd_tmp, i, BIT); \ - auto ps1 = P_UFIELD(rs1, i, BIT); \ - auto ps2 = P_UFIELD(rs2, i, BIT); - -#define P_CORSS_PARAMS(BIT) \ - auto pd = P_FIELD(rd_tmp, i, BIT); \ - auto ps1 = P_FIELD(rs1, i, BIT); \ - auto ps2 = P_FIELD(rs2, (i ^ 1), BIT); - -#define P_CORSS_UPARAMS(BIT) \ - auto pd = P_UFIELD(rd_tmp, i, BIT); \ - auto ps1 = P_UFIELD(rs1, i, BIT); \ - auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT); - -#define P_ONE_PARAMS(BIT) \ - auto pd = P_FIELD(rd_tmp, i, BIT); \ - auto ps1 = P_FIELD(rs1, i, BIT); - -#define P_ONE_UPARAMS(BIT) \ - auto pd = P_UFIELD(rd_tmp, i, BIT); \ - auto ps1 = P_UFIELD(rs1, i, BIT); - -#define P_ONE_SUPARAMS(BIT) \ - auto pd = P_UFIELD(rd_tmp, i, BIT); \ - auto ps1 = P_FIELD(rs1, i, BIT); - -#define P_MUL_PARAMS(BIT) \ - auto pd = P_FIELD(rd_tmp, i, BIT * 2); \ - auto ps1 = P_FIELD(rs1, i, BIT); \ - auto ps2 = P_FIELD(rs2, i, BIT); - -#define P_MUL_UPARAMS(BIT) \ - auto pd = P_UFIELD(rd_tmp, i, BIT * 2); \ - auto ps1 = P_UFIELD(rs1, i, BIT); \ - auto ps2 = P_UFIELD(rs2, i, BIT); - -#define P_MUL_CROSS_PARAMS(BIT) \ - auto pd = P_FIELD(rd_tmp, i, BIT * 2); \ - auto ps1 = P_FIELD(rs1, i, BIT); \ - auto ps2 = P_FIELD(rs2, (i ^ 1), BIT); - -#define P_MUL_CROSS_UPARAMS(BIT) \ - auto pd = P_UFIELD(rd_tmp, i, BIT*2); \ - auto ps1 = P_UFIELD(rs1, i, BIT); \ - auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT); - -#define P_REDUCTION_PARAMS(BIT_INNER) \ - auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ - auto ps2 = P_FIELD(rs2, j, BIT_INNER); - -#define P_REDUCTION_UPARAMS(BIT_INNER) \ - auto ps1 = P_UFIELD(rs1, j, BIT_INNER); \ - auto ps2 = P_UFIELD(rs2, j, BIT_INNER); - -#define P_REDUCTION_SUPARAMS(BIT_INNER) \ - auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ - auto ps2 = P_UFIELD(rs2, j, BIT_INNER); - -#define P_REDUCTION_CROSS_PARAMS(BIT_INNER) \ - auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ - auto ps2 = P_FIELD(rs2, (j ^ 1), BIT_INNER); - -#define P_LOOP_BODY(BIT, BODY) { \ - P_PARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_ULOOP_BODY(BIT, BODY) { \ - P_UPARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_ONE_LOOP_BODY(BIT, BODY) { \ - P_ONE_PARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_CROSS_LOOP_BODY(BIT, BODY) { \ - P_CORSS_PARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_CROSS_ULOOP_BODY(BIT, BODY) { \ - P_CORSS_UPARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_ONE_ULOOP_BODY(BIT, BODY) { \ - P_ONE_UPARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_MUL_LOOP_BODY(BIT, BODY) { \ - P_MUL_PARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_MUL_ULOOP_BODY(BIT, BODY) { \ - P_MUL_UPARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_MUL_CROSS_LOOP_BODY(BIT, BODY) { \ - P_MUL_CROSS_PARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_MUL_CROSS_ULOOP_BODY(BIT, BODY) { \ - P_MUL_CROSS_UPARAMS(BIT) \ - BODY \ - WRITE_PD(); \ -} - -#define P_LOOP(BIT, BODY) \ - P_LOOP_BASE(BIT) \ - P_LOOP_BODY(BIT, BODY) \ - P_LOOP_END() - -#define P_ONE_LOOP(BIT, BODY) \ - P_ONE_LOOP_BASE(BIT) \ - P_ONE_LOOP_BODY(BIT, BODY) \ - P_LOOP_END() - -#define P_ULOOP(BIT, BODY) \ - P_LOOP_BASE(BIT) \ - P_ULOOP_BODY(BIT, BODY) \ - P_LOOP_END() - -#define P_CROSS_LOOP(BIT, BODY1, BODY2) \ - P_LOOP_BASE(BIT) \ - P_CROSS_LOOP_BODY(BIT, BODY1) \ - --i; \ - if (sizeof(#BODY2) == 1) { \ - P_CROSS_LOOP_BODY(BIT, BODY1) \ - } \ - else { \ - P_CROSS_LOOP_BODY(BIT, BODY2) \ - } \ - P_LOOP_END() - -#define P_CROSS_ULOOP(BIT, BODY1, BODY2) \ - P_LOOP_BASE(BIT) \ - P_CROSS_ULOOP_BODY(BIT, BODY1) \ - --i; \ - P_CROSS_ULOOP_BODY(BIT, BODY2) \ - P_LOOP_END() - -#define P_STRAIGHT_LOOP(BIT, BODY1, BODY2) \ - P_LOOP_BASE(BIT) \ - P_LOOP_BODY(BIT, BODY1) \ - --i; \ - P_LOOP_BODY(BIT, BODY2) \ - P_LOOP_END() - -#define P_STRAIGHT_ULOOP(BIT, BODY1, BODY2) \ - P_LOOP_BASE(BIT) \ - P_ULOOP_BODY(BIT, BODY1) \ - --i; \ - P_ULOOP_BODY(BIT, BODY2) \ - P_LOOP_END() - -#define P_X_LOOP(BIT, RS2_LOW_BIT, BODY) \ - P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \ - P_ONE_LOOP_BODY(BIT, BODY) \ - P_LOOP_END() - -#define P_X_ULOOP(BIT, RS2_LOW_BIT, BODY) \ - P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \ - P_ONE_ULOOP_BODY(BIT, BODY) \ - P_LOOP_END() - -#define P_I_LOOP(BIT, IMMBIT, BODY) \ - P_I_LOOP_BASE(BIT, IMMBIT) \ - P_ONE_LOOP_BODY(BIT, BODY) \ - P_LOOP_END() - -#define P_I_ULOOP(BIT, IMMBIT, BODY) \ - P_I_LOOP_BASE(BIT, IMMBIT) \ - P_ONE_ULOOP_BODY(BIT, BODY) \ - P_LOOP_END() - -#define P_MUL_LOOP(BIT, BODY) \ - P_MUL_LOOP_BASE(BIT) \ - P_MUL_LOOP_BODY(BIT, BODY) \ - P_PAIR_LOOP_END() - -#define P_MUL_ULOOP(BIT, BODY) \ - P_MUL_LOOP_BASE(BIT) \ - P_MUL_ULOOP_BODY(BIT, BODY) \ - P_PAIR_LOOP_END() - -#define P_MUL_CROSS_LOOP(BIT, BODY) \ - P_MUL_LOOP_BASE(BIT) \ - P_MUL_CROSS_LOOP_BODY(BIT, BODY) \ - P_PAIR_LOOP_END() - -#define P_MUL_CROSS_ULOOP(BIT, BODY) \ - P_MUL_LOOP_BASE(BIT) \ - P_MUL_CROSS_ULOOP_BODY(BIT, BODY) \ - P_PAIR_LOOP_END() - -#define P_REDUCTION_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ - P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ - P_REDUCTION_PARAMS(BIT_INNER) \ - BODY \ - P_REDUCTION_LOOP_END(BIT, IS_SAT) - -#define P_REDUCTION_ULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ - P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \ - P_REDUCTION_UPARAMS(BIT_INNER) \ - BODY \ - P_REDUCTION_ULOOP_END(BIT, IS_SAT) - -#define P_REDUCTION_SULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ - P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ - P_REDUCTION_SUPARAMS(BIT_INNER) \ - BODY \ - P_REDUCTION_LOOP_END(BIT, IS_SAT) - -#define P_REDUCTION_CROSS_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ - P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ - P_REDUCTION_CROSS_PARAMS(BIT_INNER) \ - BODY \ - P_REDUCTION_LOOP_END(BIT, IS_SAT) - -#define P_LOOP_END() \ - } \ - WRITE_RD(sext_xlen(rd_tmp)); - -#define P_PAIR_LOOP_END() \ - } \ - if (xlen == 32) { \ - WRITE_RD_PAIR(rd_tmp); \ - } \ - else { \ - WRITE_RD(sext_xlen(rd_tmp)); \ - } - -#define P_REDUCTION_LOOP_END(BIT, IS_SAT) \ - } \ - if (IS_SAT) { \ - P_SAT(pd_res, BIT); \ - } \ - type_usew_t::type pd = pd_res; \ - WRITE_PD(); \ - } \ - WRITE_RD(sext_xlen(rd_tmp)); - -#define P_REDUCTION_ULOOP_END(BIT, IS_SAT) \ - } \ - if (IS_SAT) { \ - P_SATU(pd_res, BIT); \ - } \ - type_usew_t::type pd = pd_res; \ - WRITE_PD(); \ - } \ - WRITE_RD(sext_xlen(rd_tmp)); - -#define P_SUNPKD8(X, Y) \ - require_extension(EXT_ZPN); \ - reg_t rd_tmp = 0; \ - int16_t pd[4] = { \ - P_SB(RS1, Y), \ - P_SB(RS1, X), \ - P_SB(RS1, Y + 4), \ - P_SB(RS1, X + 4), \ - }; \ - if (xlen == 64) { \ - memcpy(&rd_tmp, pd, 8); \ - } else { \ - memcpy(&rd_tmp, pd, 4); \ - } \ - WRITE_RD(sext_xlen(rd_tmp)); - -#define P_ZUNPKD8(X, Y) \ - require_extension(EXT_ZPN); \ - reg_t rd_tmp = 0; \ - uint16_t pd[4] = { \ - P_B(RS1, Y), \ - P_B(RS1, X), \ - P_B(RS1, Y + 4), \ - P_B(RS1, X + 4), \ - }; \ - if (xlen == 64) { \ - memcpy(&rd_tmp, pd, 8); \ - } else { \ - memcpy(&rd_tmp, pd, 4); \ - } \ - WRITE_RD(sext_xlen(rd_tmp)); - -#define P_PK(BIT, X, Y) \ - require_extension(EXT_ZPN); \ - require(BIT == e16 || BIT == e32); \ - reg_t rd_tmp = 0, rs1 = RS1, rs2 = RS2; \ - for (sreg_t i = 0; i < xlen / BIT / 2; i++) { \ - rd_tmp = set_field(rd_tmp, make_mask64(i * 2 * BIT, BIT), \ - P_UFIELD(RS2, i * 2 + Y, BIT)); \ - rd_tmp = set_field(rd_tmp, make_mask64((i * 2 + 1) * BIT, BIT), \ - P_UFIELD(RS1, i * 2 + X, BIT)); \ - } \ - WRITE_RD(sext_xlen(rd_tmp)); - -#define P_64_PROFILE_BASE() \ - require_extension(EXT_ZPSFOPERAND); \ - sreg_t rd, rs1, rs2; - -#define P_64_UPROFILE_BASE() \ - require_extension(EXT_ZPSFOPERAND); \ - reg_t rd, rs1, rs2; - -#define P_64_PROFILE_PARAM(USE_RD, INPUT_PAIR) \ - if (xlen == 32) { \ - rs1 = INPUT_PAIR ? RS1_PAIR : RS1; \ - rs2 = INPUT_PAIR ? RS2_PAIR : RS2; \ - rd = USE_RD ? RD_PAIR : 0; \ - } else { \ - rs1 = RS1; \ - rs2 = RS2; \ - rd = USE_RD ? RD : 0; \ - } - -#define P_64_PROFILE(BODY) \ - P_64_PROFILE_BASE() \ - P_64_PROFILE_PARAM(false, true) \ - BODY \ - P_64_PROFILE_END() \ - -#define P_64_UPROFILE(BODY) \ - P_64_UPROFILE_BASE() \ - P_64_PROFILE_PARAM(false, true) \ - BODY \ - P_64_PROFILE_END() \ - -#define P_64_PROFILE_REDUCTION(BIT, BODY) \ - P_64_PROFILE_BASE() \ - P_64_PROFILE_PARAM(true, false) \ - for (sreg_t i = 0; i < xlen / BIT; i++) { \ - sreg_t ps1 = P_FIELD(rs1, i, BIT); \ - sreg_t ps2 = P_FIELD(rs2, i, BIT); \ - BODY \ - } \ - P_64_PROFILE_END() \ - -#define P_64_UPROFILE_REDUCTION(BIT, BODY) \ - P_64_UPROFILE_BASE() \ - P_64_PROFILE_PARAM(true, false) \ - for (sreg_t i = 0; i < xlen / BIT; i++) { \ - reg_t ps1 = P_UFIELD(rs1, i, BIT); \ - reg_t ps2 = P_UFIELD(rs2, i, BIT); \ - BODY \ - } \ - P_64_PROFILE_END() \ - -#define P_64_PROFILE_END() \ - if (xlen == 32) { \ - WRITE_RD_PAIR(rd); \ - } else { \ - WRITE_RD(sext_xlen(rd)); \ - } - #define DECLARE_XENVCFG_VARS(field) \ reg_t m##field = get_field(STATE.menvcfg->read(), MENVCFG_##field); \ reg_t s##field = get_field(STATE.senvcfg->read(), SENVCFG_##field); \ diff --git a/riscv/p_ext_macros.h b/riscv/p_ext_macros.h new file mode 100644 index 0000000..6ca97b5 --- /dev/null +++ b/riscv/p_ext_macros.h @@ -0,0 +1,506 @@ +// See LICENSE for license details. + +#ifndef _RISCV_P_EXT_MACROS_H +#define _RISCV_P_EXT_MACROS_H + +// The p-extension support is contributed by +// Programming Langauge Lab, Department of Computer Science, National Tsing-Hua University, Taiwan + +#define P_FIELD(R, INDEX, SIZE) \ + (type_sew_t::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE)) + +#define P_UFIELD(R, INDEX, SIZE) \ + (type_usew_t::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE)) + +#define P_B(R, INDEX) P_UFIELD(R, INDEX, 8) +#define P_H(R, INDEX) P_UFIELD(R, INDEX, 16) +#define P_W(R, INDEX) P_UFIELD(R, INDEX, 32) +#define P_SB(R, INDEX) P_FIELD(R, INDEX, 8) +#define P_SH(R, INDEX) P_FIELD(R, INDEX, 16) +#define P_SW(R, INDEX) P_FIELD(R, INDEX, 32) + +#define READ_REG_PAIR(reg) ({ \ + require((reg) % 2 == 0); \ + (reg) == 0 ? reg_t(0) : \ + (READ_REG((reg) + 1) << 32) + zext32(READ_REG(reg)); }) + +#define RS1_PAIR READ_REG_PAIR(insn.rs1()) +#define RS2_PAIR READ_REG_PAIR(insn.rs2()) +#define RD_PAIR READ_REG_PAIR(insn.rd()) + +#define WRITE_PD() \ + rd_tmp = set_field(rd_tmp, make_mask64((i * sizeof(pd) * 8), sizeof(pd) * 8), pd); + +#define WRITE_RD_PAIR(value) \ + if (insn.rd() != 0) { \ + require(insn.rd() % 2 == 0); \ + WRITE_REG(insn.rd(), sext32(value)); \ + WRITE_REG(insn.rd() + 1, (sreg_t(value)) >> 32); \ + } + +#define P_SET_OV(ov) \ + if (ov) P.VU.vxsat->write(1); + +#define P_SAT(R, BIT) \ + if (R > INT##BIT##_MAX) { \ + R = INT##BIT##_MAX; \ + P_SET_OV(1); \ + } else if (R < INT##BIT##_MIN) { \ + R = INT##BIT##_MIN; \ + P_SET_OV(1); \ + } + +#define P_SATU(R, BIT) \ + if (R > UINT##BIT##_MAX) { \ + R = UINT##BIT##_MAX; \ + P_SET_OV(1); \ + } else if (R < 0) { \ + P_SET_OV(1); \ + R = 0; \ + } + +#define P_LOOP_BASE(BIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + reg_t rs2 = RS2; \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_ONE_LOOP_BASE(BIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_I_LOOP_BASE(BIT, IMMBIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + type_usew_t::type imm##IMMBIT##u = insn.p_imm##IMMBIT(); \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_X_LOOP_BASE(BIT, LOWBIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + type_usew_t::type sa = RS2 & ((uint64_t(1) << LOWBIT) - 1); \ + type_sew_t::type ssa = int64_t(RS2) << (64 - LOWBIT) >> (64 - LOWBIT); \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_MUL_LOOP_BASE(BIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + reg_t rs2 = RS2; \ + sreg_t len = 32 / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + require_extension(EXT_ZPN); \ + require(BIT == e16 || BIT == e32 || BIT == e64); \ + reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \ + reg_t rs1 = zext_xlen(RS1); \ + reg_t rs2 = zext_xlen(RS2); \ + sreg_t len = 64 / BIT; \ + sreg_t len_inner = BIT / BIT_INNER; \ + for (sreg_t i = len - 1; i >= 0; --i) { \ + sreg_t pd_res = P_FIELD(rd_tmp, i, BIT); \ + for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) { + +#define P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \ + require_extension(EXT_ZPN); \ + require(BIT == e16 || BIT == e32 || BIT == e64); \ + reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \ + reg_t rs1 = zext_xlen(RS1); \ + reg_t rs2 = zext_xlen(RS2); \ + sreg_t len = 64 / BIT; \ + sreg_t len_inner = BIT / BIT_INNER; \ + for (sreg_t i = len - 1; i >=0; --i) { \ + reg_t pd_res = P_UFIELD(rd_tmp, i, BIT); \ + for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) { + +#define P_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, i, BIT); + +#define P_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, i, BIT); + +#define P_CORSS_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, (i ^ 1), BIT); + +#define P_CORSS_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT); + +#define P_ONE_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); + +#define P_ONE_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_UFIELD(rs1, i, BIT); + +#define P_ONE_SUPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); + +#define P_MUL_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT * 2); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, i, BIT); + +#define P_MUL_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT * 2); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, i, BIT); + +#define P_MUL_CROSS_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT * 2); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, (i ^ 1), BIT); + +#define P_MUL_CROSS_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT*2); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT); + +#define P_REDUCTION_PARAMS(BIT_INNER) \ + auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_FIELD(rs2, j, BIT_INNER); + +#define P_REDUCTION_UPARAMS(BIT_INNER) \ + auto ps1 = P_UFIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_UFIELD(rs2, j, BIT_INNER); + +#define P_REDUCTION_SUPARAMS(BIT_INNER) \ + auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_UFIELD(rs2, j, BIT_INNER); + +#define P_REDUCTION_CROSS_PARAMS(BIT_INNER) \ + auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_FIELD(rs2, (j ^ 1), BIT_INNER); + +#define P_LOOP_BODY(BIT, BODY) { \ + P_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_ULOOP_BODY(BIT, BODY) { \ + P_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_ONE_LOOP_BODY(BIT, BODY) { \ + P_ONE_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_CROSS_LOOP_BODY(BIT, BODY) { \ + P_CORSS_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_CROSS_ULOOP_BODY(BIT, BODY) { \ + P_CORSS_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_ONE_ULOOP_BODY(BIT, BODY) { \ + P_ONE_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_LOOP_BODY(BIT, BODY) { \ + P_MUL_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_ULOOP_BODY(BIT, BODY) { \ + P_MUL_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_CROSS_LOOP_BODY(BIT, BODY) { \ + P_MUL_CROSS_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_CROSS_ULOOP_BODY(BIT, BODY) { \ + P_MUL_CROSS_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_LOOP(BIT, BODY) \ + P_LOOP_BASE(BIT) \ + P_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_ONE_LOOP(BIT, BODY) \ + P_ONE_LOOP_BASE(BIT) \ + P_ONE_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_ULOOP(BIT, BODY) \ + P_LOOP_BASE(BIT) \ + P_ULOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_CROSS_LOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_CROSS_LOOP_BODY(BIT, BODY1) \ + --i; \ + if (sizeof(#BODY2) == 1) { \ + P_CROSS_LOOP_BODY(BIT, BODY1) \ + } \ + else { \ + P_CROSS_LOOP_BODY(BIT, BODY2) \ + } \ + P_LOOP_END() + +#define P_CROSS_ULOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_CROSS_ULOOP_BODY(BIT, BODY1) \ + --i; \ + P_CROSS_ULOOP_BODY(BIT, BODY2) \ + P_LOOP_END() + +#define P_STRAIGHT_LOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_LOOP_BODY(BIT, BODY1) \ + --i; \ + P_LOOP_BODY(BIT, BODY2) \ + P_LOOP_END() + +#define P_STRAIGHT_ULOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_ULOOP_BODY(BIT, BODY1) \ + --i; \ + P_ULOOP_BODY(BIT, BODY2) \ + P_LOOP_END() + +#define P_X_LOOP(BIT, RS2_LOW_BIT, BODY) \ + P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \ + P_ONE_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_X_ULOOP(BIT, RS2_LOW_BIT, BODY) \ + P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \ + P_ONE_ULOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_I_LOOP(BIT, IMMBIT, BODY) \ + P_I_LOOP_BASE(BIT, IMMBIT) \ + P_ONE_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_I_ULOOP(BIT, IMMBIT, BODY) \ + P_I_LOOP_BASE(BIT, IMMBIT) \ + P_ONE_ULOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_MUL_LOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_LOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_MUL_ULOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_ULOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_MUL_CROSS_LOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_CROSS_LOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_MUL_CROSS_ULOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_CROSS_ULOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_REDUCTION_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_PARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_LOOP_END(BIT, IS_SAT) + +#define P_REDUCTION_ULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_UPARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_ULOOP_END(BIT, IS_SAT) + +#define P_REDUCTION_SULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_SUPARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_LOOP_END(BIT, IS_SAT) + +#define P_REDUCTION_CROSS_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_CROSS_PARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_LOOP_END(BIT, IS_SAT) + +#define P_LOOP_END() \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_PAIR_LOOP_END() \ + } \ + if (xlen == 32) { \ + WRITE_RD_PAIR(rd_tmp); \ + } \ + else { \ + WRITE_RD(sext_xlen(rd_tmp)); \ + } + +#define P_REDUCTION_LOOP_END(BIT, IS_SAT) \ + } \ + if (IS_SAT) { \ + P_SAT(pd_res, BIT); \ + } \ + type_usew_t::type pd = pd_res; \ + WRITE_PD(); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_REDUCTION_ULOOP_END(BIT, IS_SAT) \ + } \ + if (IS_SAT) { \ + P_SATU(pd_res, BIT); \ + } \ + type_usew_t::type pd = pd_res; \ + WRITE_PD(); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_SUNPKD8(X, Y) \ + require_extension(EXT_ZPN); \ + reg_t rd_tmp = 0; \ + int16_t pd[4] = { \ + P_SB(RS1, Y), \ + P_SB(RS1, X), \ + P_SB(RS1, Y + 4), \ + P_SB(RS1, X + 4), \ + }; \ + if (xlen == 64) { \ + memcpy(&rd_tmp, pd, 8); \ + } else { \ + memcpy(&rd_tmp, pd, 4); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_ZUNPKD8(X, Y) \ + require_extension(EXT_ZPN); \ + reg_t rd_tmp = 0; \ + uint16_t pd[4] = { \ + P_B(RS1, Y), \ + P_B(RS1, X), \ + P_B(RS1, Y + 4), \ + P_B(RS1, X + 4), \ + }; \ + if (xlen == 64) { \ + memcpy(&rd_tmp, pd, 8); \ + } else { \ + memcpy(&rd_tmp, pd, 4); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_PK(BIT, X, Y) \ + require_extension(EXT_ZPN); \ + require(BIT == e16 || BIT == e32); \ + reg_t rd_tmp = 0, rs1 = RS1, rs2 = RS2; \ + for (sreg_t i = 0; i < xlen / BIT / 2; i++) { \ + rd_tmp = set_field(rd_tmp, make_mask64(i * 2 * BIT, BIT), \ + P_UFIELD(RS2, i * 2 + Y, BIT)); \ + rd_tmp = set_field(rd_tmp, make_mask64((i * 2 + 1) * BIT, BIT), \ + P_UFIELD(RS1, i * 2 + X, BIT)); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_64_PROFILE_BASE() \ + require_extension(EXT_ZPSFOPERAND); \ + sreg_t rd, rs1, rs2; + +#define P_64_UPROFILE_BASE() \ + require_extension(EXT_ZPSFOPERAND); \ + reg_t rd, rs1, rs2; + +#define P_64_PROFILE_PARAM(USE_RD, INPUT_PAIR) \ + if (xlen == 32) { \ + rs1 = INPUT_PAIR ? RS1_PAIR : RS1; \ + rs2 = INPUT_PAIR ? RS2_PAIR : RS2; \ + rd = USE_RD ? RD_PAIR : 0; \ + } else { \ + rs1 = RS1; \ + rs2 = RS2; \ + rd = USE_RD ? RD : 0; \ + } + +#define P_64_PROFILE(BODY) \ + P_64_PROFILE_BASE() \ + P_64_PROFILE_PARAM(false, true) \ + BODY \ + P_64_PROFILE_END() \ + +#define P_64_UPROFILE(BODY) \ + P_64_UPROFILE_BASE() \ + P_64_PROFILE_PARAM(false, true) \ + BODY \ + P_64_PROFILE_END() \ + +#define P_64_PROFILE_REDUCTION(BIT, BODY) \ + P_64_PROFILE_BASE() \ + P_64_PROFILE_PARAM(true, false) \ + for (sreg_t i = 0; i < xlen / BIT; i++) { \ + sreg_t ps1 = P_FIELD(rs1, i, BIT); \ + sreg_t ps2 = P_FIELD(rs2, i, BIT); \ + BODY \ + } \ + P_64_PROFILE_END() \ + +#define P_64_UPROFILE_REDUCTION(BIT, BODY) \ + P_64_UPROFILE_BASE() \ + P_64_PROFILE_PARAM(true, false) \ + for (sreg_t i = 0; i < xlen / BIT; i++) { \ + reg_t ps1 = P_UFIELD(rs1, i, BIT); \ + reg_t ps2 = P_UFIELD(rs2, i, BIT); \ + BODY \ + } \ + P_64_PROFILE_END() \ + +#define P_64_PROFILE_END() \ + if (xlen == 32) { \ + WRITE_RD_PAIR(rd); \ + } else { \ + WRITE_RD(sext_xlen(rd)); \ + } + +#endif -- cgit v1.1 From 08afafccfd3ceaf6e158fc6a040bb0c931caa51b Mon Sep 17 00:00:00 2001 From: Chih-Min Chao Date: Wed, 11 May 2022 02:37:24 -0700 Subject: rvv: fix the checking eew and elen for index load eew of index register can't be larger than elen ex: elen = 32, vloxei64.v is illegal Signed-off-by: Chih-Min Chao --- riscv/v_ext_macros.h | 1 + 1 file changed, 1 insertion(+) diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index 2a32979..4df6813 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -84,6 +84,7 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) #define VI_CHECK_ST_INDEX(elt_width) \ require_vector(false); \ + require(elt_width <= P.VU.ELEN); \ float vemul = ((float)elt_width / P.VU.vsew * P.VU.vflmul); \ require(vemul >= 0.125 && vemul <= 8); \ reg_t emul = vemul < 1 ? 1 : vemul; \ -- cgit v1.1 From fc35f34fd0f5307354cc25ae8018cda62f834e25 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Tue, 10 May 2022 15:22:11 -0700 Subject: Change henvcfg csr to a henvcfg_csr_t To do so implemented henvcfg_csr_t. henvcfg.PBMTE will be read only 0 if menvcfg.PBMTE = 0. --- riscv/csrs.cc | 7 +++++++ riscv/csrs.h | 14 ++++++++++++++ riscv/processor.cc | 2 +- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 8366d8a..3d6cf91 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -840,6 +840,13 @@ bool masked_csr_t::unlogged_write(const reg_t val) noexcept { } +// implement class henvcfg_csr_t +henvcfg_csr_t::henvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, csr_t_p menvcfg): + masked_csr_t(proc, addr, mask, init), + menvcfg(menvcfg) { +} + + // implement class base_atp_csr_t and family base_atp_csr_t::base_atp_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0) { diff --git a/riscv/csrs.h b/riscv/csrs.h index 660ddd1..f6d2c2c 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -448,6 +448,20 @@ class masked_csr_t: public basic_csr_t { }; +// henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 +class henvcfg_csr_t final: public masked_csr_t { + public: + henvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, csr_t_p menvcfg); + + reg_t read() const noexcept override { + return (menvcfg->read() | ~MENVCFG_PBMTE) & masked_csr_t::read(); + } + + private: + csr_t_p menvcfg; +}; + + // For satp and vsatp // These are three classes in order to handle the [V]TVM bits permission checks class base_atp_csr_t: public basic_csr_t { diff --git a/riscv/processor.cc b/riscv/processor.cc index 9ce9287..72f2d47 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -387,7 +387,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) | (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0); - csrmap[CSR_HENVCFG] = henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, 0); + csrmap[CSR_HENVCFG] = henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, 0, menvcfg); serialized = false; -- cgit v1.1 From ea70a9359daf5780b815b68aa9cdc7b2a71d2f8c Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Sat, 7 May 2022 22:25:00 -0700 Subject: Add PBMTE bit to menvcfg and henvcfg mask values Also make PBMTE set on reset for backward compatibility. Since before Spike proceeded as if these bits were set if the extension was enabled. --- riscv/processor.cc | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 72f2d47..661058c 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -380,14 +380,18 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MVENDORID] = std::make_shared(proc, CSR_MVENDORID, 0); csrmap[CSR_MHARTID] = std::make_shared(proc, CSR_MHARTID, proc->get_id()); const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) | - (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0); - csrmap[CSR_MENVCFG] = menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, 0); + (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0) | + (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); + const reg_t menvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); + csrmap[CSR_MENVCFG] = menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, menvcfg_init); const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) | (proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0); csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) | - (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0); - csrmap[CSR_HENVCFG] = henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, 0, menvcfg); + (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0) | + (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); + const reg_t henvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); + csrmap[CSR_HENVCFG] = henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, henvcfg_init, menvcfg); serialized = false; -- cgit v1.1 From 996634f0be06099401eec5165428b3ecc4a72fb6 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Sat, 7 May 2022 22:33:16 -0700 Subject: Switch from checking for SVPBMT extension to checking *ENVCFG values during tablewalks Fix issue #990. --- riscv/mmu.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 2918f0b..3868c70 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -294,12 +294,13 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian*)ppte) : from_target(*(target_endian*)ppte); reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT; + bool pbmte = proc->get_state()->menvcfg->read() & MENVCFG_PBMTE; if (pte & PTE_RSVD) { break; } else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) { break; - } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) { + } else if (!pbmte && (pte & PTE_PBMT)) { break; } else if (PTE_TABLE(pte)) { // next level of page table if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) @@ -384,12 +385,13 @@ reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool hlvx reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian*)ppte) : from_target(*(target_endian*)ppte); reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT; + bool pbmte = virt ? (proc->get_state()->henvcfg->read() & HENVCFG_PBMTE) : (proc->get_state()->menvcfg->read() & MENVCFG_PBMTE); if (pte & PTE_RSVD) { break; } else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) { break; - } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) { + } else if (!pbmte && (pte & PTE_PBMT)) { break; } else if (PTE_TABLE(pte)) { // next level of page table if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) -- cgit v1.1 From ccfeaa99732479b029ce95fc9b09968c7f3b1f64 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Sat, 7 May 2022 22:36:02 -0700 Subject: Check for reserved PBMT values during tablewalks and fault if found See #990. --- riscv/mmu.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 3868c70..d0a55f0 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -302,6 +302,8 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty break; } else if (!pbmte && (pte & PTE_PBMT)) { break; + } else if ((pte & PTE_PBMT) == PTE_PBMT) { + break; } else if (PTE_TABLE(pte)) { // next level of page table if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) break; @@ -393,6 +395,8 @@ reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool hlvx break; } else if (!pbmte && (pte & PTE_PBMT)) { break; + } else if ((pte & PTE_PBMT) == PTE_PBMT) { + break; } else if (PTE_TABLE(pte)) { // next level of page table if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) break; -- cgit v1.1 From 11f5942b7d8211e61b5ad9259d118033692c0759 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 12 May 2022 14:05:37 -0700 Subject: Don't register instructions that aren't supported These add to the length of the instruction list without providing an apparent benefit. --- riscv/processor.cc | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 9ce9287..dd61cae 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -959,14 +959,16 @@ void processor_t::register_base_instructions() extern reg_t rv64i_##name(processor_t*, insn_t, reg_t); \ extern reg_t rv32e_##name(processor_t*, insn_t, reg_t); \ extern reg_t rv64e_##name(processor_t*, insn_t, reg_t); \ - register_insn((insn_desc_t) { \ - name##_supported, \ - name##_match, \ - name##_mask, \ - rv32i_##name, \ - rv64i_##name, \ - rv32e_##name, \ - rv64e_##name}); + if (name##_supported) { \ + register_insn((insn_desc_t) { \ + name##_supported, \ + name##_match, \ + name##_mask, \ + rv32i_##name, \ + rv64i_##name, \ + rv32e_##name, \ + rv64e_##name}); \ + } #include "insn_list.h" #undef DEFINE_INSN -- cgit v1.1 From 68b20a9b8af4e9adbff9cccaef2b7c6b2c8ec190 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 12 May 2022 14:06:27 -0700 Subject: Remove insn_func_t::supported field The field is rendered unnecessary by 11f5942b7d8211e61b5ad9259d118033692c0759. Undoes some changes from 750f008e723bb3b20cec41a47ed5cec549447665. --- customext/cflush.cc | 6 +++--- riscv/processor.cc | 1 - riscv/processor.h | 6 +----- riscv/rocc.cc | 8 ++++---- 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/customext/cflush.cc b/customext/cflush.cc index 1a5cfa2..8b72a97 100644 --- a/customext/cflush.cc +++ b/customext/cflush.cc @@ -24,9 +24,9 @@ class cflush_t : public extension_t std::vector get_instructions() { std::vector insns; - insns.push_back((insn_desc_t){true, 0xFC000073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); - insns.push_back((insn_desc_t){true, 0xFC200073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); - insns.push_back((insn_desc_t){true, 0xFC100073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); + insns.push_back((insn_desc_t){0xFC000073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); + insns.push_back((insn_desc_t){0xFC200073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); + insns.push_back((insn_desc_t){0xFC100073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); return insns; } diff --git a/riscv/processor.cc b/riscv/processor.cc index dd61cae..2cc1003 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -961,7 +961,6 @@ void processor_t::register_base_instructions() extern reg_t rv64e_##name(processor_t*, insn_t, reg_t); \ if (name##_supported) { \ register_insn((insn_desc_t) { \ - name##_supported, \ name##_match, \ name##_mask, \ rv32i_##name, \ diff --git a/riscv/processor.h b/riscv/processor.h index 96fdc54..ec1b400 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -29,7 +29,6 @@ reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc); struct insn_desc_t { - bool supported; insn_bits_t match; insn_bits_t mask; insn_func_t rv32i; @@ -39,9 +38,6 @@ struct insn_desc_t insn_func_t func(int xlen, bool rve) { - if (!supported) - return NULL; - if (rve) return xlen == 64 ? rv64e : rv32e; else @@ -50,7 +46,7 @@ struct insn_desc_t static insn_desc_t illegal() { - return {true, 0, 0, &illegal_instruction, &illegal_instruction, &illegal_instruction, &illegal_instruction}; + return {0, 0, &illegal_instruction, &illegal_instruction, &illegal_instruction, &illegal_instruction}; } }; diff --git a/riscv/rocc.cc b/riscv/rocc.cc index 2d09095..f50934f 100644 --- a/riscv/rocc.cc +++ b/riscv/rocc.cc @@ -32,10 +32,10 @@ customX(3) std::vector rocc_t::get_instructions() { std::vector insns; - insns.push_back((insn_desc_t){true, 0x0b, 0x7f, &::illegal_instruction, c0, &::illegal_instruction, c0}); - insns.push_back((insn_desc_t){true, 0x2b, 0x7f, &::illegal_instruction, c1, &::illegal_instruction, c1}); - insns.push_back((insn_desc_t){true, 0x5b, 0x7f, &::illegal_instruction, c2, &::illegal_instruction, c2}); - insns.push_back((insn_desc_t){true, 0x7b, 0x7f, &::illegal_instruction, c3, &::illegal_instruction, c3}); + insns.push_back((insn_desc_t){0x0b, 0x7f, &::illegal_instruction, c0, &::illegal_instruction, c0}); + insns.push_back((insn_desc_t){0x2b, 0x7f, &::illegal_instruction, c1, &::illegal_instruction, c1}); + insns.push_back((insn_desc_t){0x5b, 0x7f, &::illegal_instruction, c2, &::illegal_instruction, c2}); + insns.push_back((insn_desc_t){0x7b, 0x7f, &::illegal_instruction, c3, &::illegal_instruction, c3}); return insns; } -- cgit v1.1 From 0676421e93180f4e2421fb5058f5e6c1de8ae2c1 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 12 May 2022 14:18:02 -0700 Subject: Assert that nullptrs can't make their way into the instructions list --- riscv/processor.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/riscv/processor.cc b/riscv/processor.cc index 2cc1003..98ae637 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -907,6 +907,8 @@ insn_func_t processor_t::decode_insn(insn_t insn) void processor_t::register_insn(insn_desc_t desc) { + assert(desc.rv32i && desc.rv64i && desc.rv32e && desc.rv64e); + instructions.push_back(desc); } -- cgit v1.1 From e66e2e2b09c04acf733089658cf32a27708b8d16 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 12 May 2022 14:18:47 -0700 Subject: Remove now-unnecessary null check from decode_insn Fixes bug introduced in 5b7cdbe1cf75112bd2a472b7490b15fa7078d798 --- riscv/processor.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 98ae637..787a795 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -881,11 +881,11 @@ insn_func_t processor_t::decode_insn(insn_t insn) bool rve = extension_enabled('E'); - if (unlikely(insn.bits() != desc.match || !desc.func(xlen, rve))) { + if (unlikely(insn.bits() != desc.match)) { // fall back to linear search int cnt = 0; insn_desc_t* p = &instructions[0]; - while ((insn.bits() & p->mask) != p->match || !desc.func(xlen, rve)) + while ((insn.bits() & p->mask) != p->match) p++, cnt++; desc = *p; -- cgit v1.1 From 2bf4c8c3dfe9ef7a454b33f70a4611c9118ce405 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 12 May 2022 15:28:47 -0700 Subject: Add missing Q, H, and Svinval extensions to disassembler fallback --- disasm/disasm.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index d18f089..2fbc1fb 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -2101,7 +2101,7 @@ disassembler_t::disassembler_t(const isa_parser_t *isa) // next-highest priority: other instructions in same base ISA std::string fallback_isa_string = std::string("rv") + std::to_string(isa->get_max_xlen()) + - "gcv_zfh_zba_zbb_zbc_zbs_zkn_zkr_zks_xbitmanip"; + "gqchv_zfh_zba_zbb_zbc_zbs_zkn_zkr_zks_svinval_xbitmanip"; isa_parser_t fallback_isa(fallback_isa_string.c_str(), DEFAULT_PRIV); add_instructions(&fallback_isa); -- cgit v1.1 From 500d987d8750ff7e048c3e2fc0898863beb051cc Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 12 May 2022 16:14:00 -0700 Subject: Add missing Zicbom and Zicbop extensions to disassembler fallback --- disasm/disasm.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index 2fbc1fb..b627636 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -2101,7 +2101,7 @@ disassembler_t::disassembler_t(const isa_parser_t *isa) // next-highest priority: other instructions in same base ISA std::string fallback_isa_string = std::string("rv") + std::to_string(isa->get_max_xlen()) + - "gqchv_zfh_zba_zbb_zbc_zbs_zkn_zkr_zks_svinval_xbitmanip"; + "gqchv_zfh_zba_zbb_zbc_zbs_zicbom_zicboz_zkn_zkr_zks_svinval_xbitmanip"; isa_parser_t fallback_isa(fallback_isa_string.c_str(), DEFAULT_PRIV); add_instructions(&fallback_isa); -- cgit v1.1 From 918cba10e103a4cb1405b7a481111b78c2274f28 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 12 May 2022 16:20:57 -0700 Subject: Update README to reflect recently added extensions --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 7d348f7..b8b5227 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,9 @@ Spike supports the following RISC-V ISA features: - Zbb extension, v1.0 - Zbc extension, v1.0 - Zbs extension, v1.0 + - Zfh and Zfhmin half-precision floating-point extensions, v1.0 + - Zmmul integer multiplication extension, v1.0 + - Zicbom, Zicbop, Zicboz cache-block maintenance extensions, v1.0 - Conformance to both RVWMO and RVTSO (Spike is sequentially consistent) - Machine, Supervisor, and User modes, v1.11 - Hypervisor extension, v1.0 -- cgit v1.1 From ff645fb4eb9cd8a957dd8826369de8dd7e1fb8a3 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 13 May 2022 13:57:55 -0700 Subject: Disassemble Zicbop/Zihintpause HINT instructions (#1000) We do not condition them on Zicbop/Zihintpause because, definitionally, all implementations provide them. --- disasm/disasm.cc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index b627636..c8e92d6 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -670,6 +670,7 @@ void disassembler_t::add_instructions(const isa_parser_t* isa) #define DEFINE_I0TYPE(name, code) DISASM_INSN(name, code, mask_rs1, {&xrd, &imm}) #define DEFINE_I1TYPE(name, code) DISASM_INSN(name, code, mask_imm, {&xrd, &xrs1}) #define DEFINE_I2TYPE(name, code) DISASM_INSN(name, code, mask_rd | mask_imm, {&xrs1}) + #define DEFINE_PREFETCH(code) DISASM_INSN(#code, code, 0, {&store_address}) #define DEFINE_LTYPE(code) DISASM_INSN(#code, code, 0, {&xrd, &bigimm}) #define DEFINE_BTYPE(code) add_btype_insn(this, #code, match_##code, mask_##code); #define DEFINE_B1TYPE(name, code) add_b1type_insn(this, name, match_##code, mask_##code); @@ -691,6 +692,14 @@ void disassembler_t::add_instructions(const isa_parser_t* isa) add_insn(new disasm_insn_t("unimp", match_csrrw|(CSR_CYCLE<<20), 0xffffffff, {})); add_insn(new disasm_insn_t("c.unimp", 0, 0xffff, {})); + // Following are HINTs, so they must precede their corresponding base-ISA + // instructions. We do not condition them on Zicbop/Zihintpause because, + // definitionally, all implementations provide them. + DEFINE_PREFETCH(prefetch_r); + DEFINE_PREFETCH(prefetch_w); + DEFINE_PREFETCH(prefetch_i); + DEFINE_NOARG(pause); + DEFINE_XLOAD(lb) DEFINE_XLOAD(lbu) DEFINE_XLOAD(lh) -- cgit v1.1 From 78dfe62633ce4fe6e6a70afae04168e1f102b673 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 13 May 2022 13:58:10 -0700 Subject: Fix disassembly of custom instructions that overlap standard ones (#999) Iterate over the instruction chains in reverse order, prioritizing the last call to `disassembler_t::add_insn`. To preserve behavior for the standard instructions, reverse the order in which we add instructions in the `disassembler_t` constructor. Supersedes #995. --- disasm/disasm.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index c8e92d6..7df39da 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -7,6 +7,8 @@ #include #include #include +// For std::reverse: +#include // Indicates that the next arg (only) is optional. // If the result of converting the next arg to a string is "" @@ -2116,13 +2118,18 @@ disassembler_t::disassembler_t(const isa_parser_t *isa) // finally: instructions with known opcodes but unknown arguments add_unknown_insns(this); + + // Now, reverse the lists, because we search them back-to-front (so that + // custom instructions later added with add_insn have highest priority). + for (size_t i = 0; i < HASH_SIZE+1; i++) + std::reverse(chain[i].begin(), chain[i].end()); } const disasm_insn_t* disassembler_t::probe_once(insn_t insn, size_t idx) const { - for (size_t j = 0; j < chain[idx].size(); j++) - if(*chain[idx][j] == insn) - return chain[idx][j]; + for (auto it = chain[idx].rbegin(); it != chain[idx].rend(); ++it) + if (*(*it) == insn) + return *it; return NULL; } -- cgit v1.1 From a59c44eb468ae366dbf2dc60c371b1c090e311cf Mon Sep 17 00:00:00 2001 From: Pirmin Vogel Date: Mon, 16 May 2022 12:10:09 +0200 Subject: Include recently added headers in riscv/riscv.mk.in --- riscv/riscv.mk.in | 3 +++ 1 file changed, 3 insertions(+) diff --git a/riscv/riscv.mk.in b/riscv/riscv.mk.in index 0c6b977..45fe2eb 100644 --- a/riscv/riscv.mk.in +++ b/riscv/riscv.mk.in @@ -13,9 +13,12 @@ riscv_hdrs = \ decode.h \ devices.h \ dts.h \ + isa_parser.h \ mmu.h \ cfg.h \ processor.h \ + p_ext_macros.h \ + v_ext_macros.h \ sim.h \ simif.h \ trap.h \ -- cgit v1.1 From a0298a33e7b2091ba8d9f3a20838d96dc1164cac Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Thu, 19 May 2022 17:11:07 -0700 Subject: Move ebreak* logic from take_trap into instructions. (#1006) Now that logic only affects ebreak instructions, and does not affect triggers that also cause a trap to be taken. Fixes #725. Although like Paul, I don't have a test for this case. Introduce trap_debug_mode so so ebreak instructions can force entry into debug mode. --- riscv/execute.cc | 4 ++++ riscv/insns/c_ebreak.h | 9 ++++++++- riscv/insns/ebreak.h | 9 ++++++++- riscv/processor.cc | 8 -------- riscv/trap.h | 5 +++++ 5 files changed, 25 insertions(+), 10 deletions(-) diff --git a/riscv/execute.cc b/riscv/execute.cc index a6ea7a4..ea4dc5b 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -337,6 +337,10 @@ void processor_t::step(size_t n) abort(); } } + catch(trap_debug_mode&) + { + enter_debug_mode(DCSR_CAUSE_SWBP); + } catch (wait_for_interrupt_t &t) { // Return to the outer simulation loop, which gives other devices/harts a diff --git a/riscv/insns/c_ebreak.h b/riscv/insns/c_ebreak.h index 7d04f46..c8cc1f5 100644 --- a/riscv/insns/c_ebreak.h +++ b/riscv/insns/c_ebreak.h @@ -1,2 +1,9 @@ require_extension('C'); -throw trap_breakpoint(STATE.v, pc); +if (!STATE.debug_mode && + ((STATE.prv == PRV_M && STATE.dcsr->ebreakm) || + (STATE.prv == PRV_S && STATE.dcsr->ebreaks) || + (STATE.prv == PRV_U && STATE.dcsr->ebreaku))) { + throw trap_debug_mode(); +} else { + throw trap_breakpoint(STATE.v, pc); +} diff --git a/riscv/insns/ebreak.h b/riscv/insns/ebreak.h index 9f3d44d..227ab93 100644 --- a/riscv/insns/ebreak.h +++ b/riscv/insns/ebreak.h @@ -1 +1,8 @@ -throw trap_breakpoint(STATE.v, pc); +if (!STATE.debug_mode && + ((STATE.prv == PRV_M && STATE.dcsr->ebreakm) || + (STATE.prv == PRV_S && STATE.dcsr->ebreaks) || + (STATE.prv == PRV_U && STATE.dcsr->ebreaku))) { + throw trap_debug_mode(); +} else { + throw trap_breakpoint(STATE.v, pc); +} diff --git a/riscv/processor.cc b/riscv/processor.cc index a9003a8..bb41248 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -720,14 +720,6 @@ void processor_t::take_trap(trap_t& t, reg_t epc) return; } - if (t.cause() == CAUSE_BREAKPOINT && ( - (state.prv == PRV_M && state.dcsr->ebreakm) || - (state.prv == PRV_S && state.dcsr->ebreaks) || - (state.prv == PRV_U && state.dcsr->ebreaku))) { - enter_debug_mode(DCSR_CAUSE_SWBP); - return; - } - // By default, trap to M-mode, unless delegated to HS-mode or VS-mode reg_t vsdeleg, hsdeleg; reg_t bit = t.cause(); diff --git a/riscv/trap.h b/riscv/trap.h index 1cd62e1..8347c6e 100644 --- a/riscv/trap.h +++ b/riscv/trap.h @@ -8,6 +8,11 @@ struct state_t; +class trap_debug_mode +{ + /* Used to enter debug mode, which isn't quite a normal trap. */ +}; + class trap_t { public: -- cgit v1.1 From 68b3eb9bf1c04c19a66631f717163dd9ba2c923c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 26 May 2022 14:21:07 -0700 Subject: Fix RV32 hgatp write mask computation (#1014) clang with `-Wall` happened to catch this. --- riscv/csrs.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 3d6cf91..6bca99c 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1075,7 +1075,7 @@ bool hgatp_csr_t::unlogged_write(const reg_t val) noexcept { if (proc->get_const_xlen() == 32) { mask = HGATP32_PPN | HGATP32_MODE | - proc->supports_impl(IMPL_MMU_VMID) ? HGATP32_VMID : 0; + (proc->supports_impl(IMPL_MMU_VMID) ? HGATP32_VMID : 0); } else { mask = (HGATP64_PPN & ((reg_t(1) << (MAX_PADDR_BITS - PGSHIFT)) - 1)) | (proc->supports_impl(IMPL_MMU_VMID) ? HGATP64_VMID : 0); -- cgit v1.1 From 7e9da9966856743fb17c42a0a2c2c915e3a3e2b9 Mon Sep 17 00:00:00 2001 From: Kip Walker Date: Wed, 1 Jun 2022 10:51:57 -0700 Subject: Remove the now-unused PC_SERIALIZE_WFI When WFI was changed to throw a C++ exception, the special-npc signaling became obsolete. --- riscv/decode.h | 2 -- riscv/execute.cc | 1 - 2 files changed, 3 deletions(-) diff --git a/riscv/decode.h b/riscv/decode.h index e5e8115..9f13a42 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -316,7 +316,6 @@ class wait_for_interrupt_t {}; #define wfi() \ do { set_pc_and_serialize(npc); \ - npc = PC_SERIALIZE_WFI; \ throw wait_for_interrupt_t(); \ } while (0) @@ -325,7 +324,6 @@ class wait_for_interrupt_t {}; /* Sentinel PC values to serialize simulator pipeline */ #define PC_SERIALIZE_BEFORE 3 #define PC_SERIALIZE_AFTER 5 -#define PC_SERIALIZE_WFI 7 #define invalid_pc(pc) ((pc) & 1) /* Convenience wrappers to simplify softfloat code sequences */ diff --git a/riscv/execute.cc b/riscv/execute.cc index ea4dc5b..7cb16dd 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -242,7 +242,6 @@ void processor_t::step(size_t n) switch (pc) { \ case PC_SERIALIZE_BEFORE: state.serialized = true; break; \ case PC_SERIALIZE_AFTER: ++instret; break; \ - case PC_SERIALIZE_WFI: n = ++instret; break; \ default: abort(); \ } \ pc = state.pc; \ -- cgit v1.1 From caf5a420ef7414031368739e16d60f3f418aa6ac Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 3 Jun 2022 15:42:14 -0700 Subject: Remove nonstandard length encoding (#1023) This was an artifact of an old P-extension draft that erroneously allocated a reserved major opcode. The newer draft uses a different opcode, so this hack is no longer needed. --- riscv/decode.h | 1 - 1 file changed, 1 deletion(-) diff --git a/riscv/decode.h b/riscv/decode.h index 9f13a42..6c90e21 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -69,7 +69,6 @@ const int NCSR = 4096; (((x) & 0x03) < 0x03 ? 2 : \ ((x) & 0x1f) < 0x1f ? 4 : \ ((x) & 0x3f) < 0x3f ? 6 : \ - ((x) & 0x7f) == 0x7f ? 4 : \ 8) #define MAX_INSN_LENGTH 8 #define PC_ALIGN 2 -- cgit v1.1 From 656fa5acf6aef85ae2326309d9bb2cdba69987ef Mon Sep 17 00:00:00 2001 From: liweiwei90 <34847211+liweiwei90@users.noreply.github.com> Date: Tue, 7 Jun 2022 09:28:36 +0800 Subject: update disasm for cbo.* instructions (#1026) --- disasm/disasm.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index 7df39da..b42bdc2 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -2037,13 +2037,13 @@ void disassembler_t::add_instructions(const isa_parser_t* isa) } if (isa->extension_enabled(EXT_ZICBOM)) { - DISASM_INSN("cbo.clean", cbo_clean, 0, {&xrs1}); - DISASM_INSN("cbo.flush", cbo_flush, 0, {&xrs1}); - DISASM_INSN("cbo.inval", cbo_inval, 0, {&xrs1}); + DISASM_INSN("cbo.clean", cbo_clean, 0, {&base_only_address}); + DISASM_INSN("cbo.flush", cbo_flush, 0, {&base_only_address}); + DISASM_INSN("cbo.inval", cbo_inval, 0, {&base_only_address}); } if (isa->extension_enabled(EXT_ZICBOZ)) { - DISASM_INSN("cbo.zero", cbo_zero, 0, {&xrs1}); + DISASM_INSN("cbo.zero", cbo_zero, 0, {&base_only_address}); } if (isa->extension_enabled(EXT_ZKND) || -- cgit v1.1 From fea4a75d37a0d63e0df41efaea53114248c790c5 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 6 Jun 2022 20:27:35 -0700 Subject: insn_t: don't rely on sign-extension of internal encoding --- riscv/decode.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/riscv/decode.h b/riscv/decode.h index 6c90e21..d56d496 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -81,11 +81,11 @@ public: insn_t(insn_bits_t bits) : b(bits) {} insn_bits_t bits() { return b & ~((UINT64_MAX) << (length() * 8)); } int length() { return insn_length(b); } - int64_t i_imm() { return int64_t(b) >> 20; } + int64_t i_imm() { return xs(20, 12); } int64_t shamt() { return x(20, 6); } int64_t s_imm() { return x(7, 5) + (xs(25, 7) << 5); } int64_t sb_imm() { return (x(8, 4) << 1) + (x(25, 6) << 5) + (x(7, 1) << 11) + (imm_sign() << 12); } - int64_t u_imm() { return int64_t(b) >> 12 << 12; } + int64_t u_imm() { return xs(12, 20) << 12; } int64_t uj_imm() { return (x(21, 10) << 1) + (x(20, 1) << 11) + (x(12, 8) << 12) + (imm_sign() << 20); } uint64_t rd() { return x(7, 5); } uint64_t rs1() { return x(15, 5); } @@ -144,7 +144,7 @@ private: insn_bits_t b; uint64_t x(int lo, int len) { return (b >> lo) & ((insn_bits_t(1) << len) - 1); } uint64_t xs(int lo, int len) { return int64_t(b) << (64 - lo - len) >> (64 - len); } - uint64_t imm_sign() { return xs(63, 1); } + uint64_t imm_sign() { return xs(31, 1); } }; template -- cgit v1.1 From 898c0dd6a0c7bfe5e332ab0b6c00b3fd3b3d06a0 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 6 Jun 2022 20:34:24 -0700 Subject: Zero-extend instructions when fetching them from memory ...since we no longer rely on their being sign-extended. --- riscv/mmu.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index 8964e29..5e776a9 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -319,15 +319,15 @@ public: int length = insn_length(insn); if (likely(length == 4)) { - insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 2)) << 16; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; } else if (length == 2) { - insn = (int16_t)insn; + // entire instruction already fetched } else if (length == 6) { - insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 4)) << 32; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 4)) << 32; insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; } else { static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t"); - insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 6)) << 48; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 6)) << 48; insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 4)) << 32; insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; } -- cgit v1.1 From d2020b3256cf9a832cbbfe7a32c5753abe75cb7d Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 6 Jun 2022 20:35:35 -0700 Subject: Zero-extend instructions in spike-dasm ...since we no longer rely on their being sign-extended. --- spike_dasm/spike-dasm.cc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/spike_dasm/spike-dasm.cc b/spike_dasm/spike-dasm.cc index c4fc840..8c85ef1 100644 --- a/spike_dasm/spike-dasm.cc +++ b/spike_dasm/spike-dasm.cc @@ -50,14 +50,10 @@ int main(int argc, char** argv) continue; char* endp; - int64_t bits = strtoull(&s[pos], &endp, 16); + insn_bits_t bits = strtoull(&s[pos], &endp, 16); if (*endp != ')') continue; - size_t nbits = 4 * (endp - &s[pos]); - if (nbits < 64) - bits = bits << (64 - nbits) >> (64 - nbits); - string dis = disassembler->disassemble(bits); s = s.substr(0, start) + dis + s.substr(endp - &s[0] + 1); pos = start + dis.length(); -- cgit v1.1 From 7d943d740afb6a42b50c979800608c6fb1614d0c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 6 Jun 2022 20:36:05 -0700 Subject: Don't mask instruction bits No longer needed, since they are no longer sign-extended. Fixes #1022 by eliminating undefined behavior (64-bit instructions resulted in a shift amount equal to the datatype width). --- riscv/decode.h | 2 +- riscv/processor.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/riscv/decode.h b/riscv/decode.h index d56d496..72b4a6b 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -79,7 +79,7 @@ class insn_t public: insn_t() = default; insn_t(insn_bits_t bits) : b(bits) {} - insn_bits_t bits() { return b & ~((UINT64_MAX) << (length() * 8)); } + insn_bits_t bits() { return b; } int length() { return insn_length(b); } int64_t i_imm() { return xs(20, 12); } int64_t shamt() { return x(20, 6); } diff --git a/riscv/processor.cc b/riscv/processor.cc index bb41248..c4ca0bc 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -796,7 +796,7 @@ void processor_t::take_trap(trap_t& t, reg_t epc) void processor_t::disasm(insn_t insn) { - uint64_t bits = insn.bits() & ((1ULL << (8 * insn_length(insn.bits()))) - 1); + uint64_t bits = insn.bits(); if (last_pc != state.pc || last_bits != bits) { std::stringstream s; // first put everything in a string, later send it to output -- cgit v1.1 From 2aedbdd01911a42565cd6d154f82fa00a66410cd Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 1 Jul 2022 16:09:02 +0800 Subject: remove multi blank lines --- riscv/csrs.cc | 63 -------------------------------------------------- riscv/csrs.h | 37 ----------------------------- riscv/dts.cc | 2 -- riscv/entropy_source.h | 1 - riscv/interactive.cc | 1 - riscv/processor.h | 1 - riscv/triggers.cc | 1 - riscv/v_ext_macros.h | 4 ---- 8 files changed, 110 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 6bca99c..7d1be10 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -18,7 +18,6 @@ #undef STATE #define STATE (*state) - // implement class csr_t csr_t::csr_t(processor_t* const proc, const reg_t addr): proc(proc), @@ -47,7 +46,6 @@ void csr_t::verify_permissions(insn_t insn, bool write) const { } } - csr_t::~csr_t() { } @@ -83,7 +81,6 @@ bool basic_csr_t::unlogged_write(const reg_t val) noexcept { return true; } - // implement class pmpaddr_csr_t pmpaddr_csr_t::pmpaddr_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr), @@ -92,7 +89,6 @@ pmpaddr_csr_t::pmpaddr_csr_t(processor_t* const proc, const reg_t addr): pmpidx(address - CSR_PMPADDR0) { } - void pmpaddr_csr_t::verify_permissions(insn_t insn, bool write) const { csr_t::verify_permissions(insn, write); // If n_pmp is zero, that means pmp is not implemented hence raise @@ -103,14 +99,12 @@ void pmpaddr_csr_t::verify_permissions(insn_t insn, bool write) const { throw trap_illegal_instruction(insn.bits()); } - reg_t pmpaddr_csr_t::read() const noexcept { if ((cfg & PMP_A) >= PMP_NAPOT) return val | (~proc->pmp_tor_mask() >> 1); return val & proc->pmp_tor_mask(); } - bool pmpaddr_csr_t::unlogged_write(const reg_t val) noexcept { // If no PMPs are configured, disallow access to all. Otherwise, // allow access to all, but unimplemented ones are hardwired to @@ -140,25 +134,21 @@ bool pmpaddr_csr_t::next_locked_and_tor() const noexcept { return next_locked && next_tor; } - reg_t pmpaddr_csr_t::tor_paddr() const noexcept { return (val & proc->pmp_tor_mask()) << PMP_SHIFT; } - reg_t pmpaddr_csr_t::tor_base_paddr() const noexcept { if (pmpidx == 0) return 0; // entry 0 always uses 0 as base return state->pmpaddr[pmpidx-1]->tor_paddr(); } - reg_t pmpaddr_csr_t::napot_mask() const noexcept { bool is_na4 = (cfg & PMP_A) == PMP_NA4; reg_t mask = (val << 1) | (!is_na4) | ~proc->pmp_tor_mask(); return ~(mask & ~(mask + 1)) << PMP_SHIFT; } - bool pmpaddr_csr_t::match4(reg_t addr) const noexcept { if ((cfg & PMP_A) == 0) return false; bool is_tor = (cfg & PMP_A) == PMP_TOR; @@ -167,7 +157,6 @@ bool pmpaddr_csr_t::match4(reg_t addr) const noexcept { return ((addr ^ tor_paddr()) & napot_mask()) == 0; } - bool pmpaddr_csr_t::subset_match(reg_t addr, reg_t len) const noexcept { if ((addr | len) & (len - 1)) abort(); @@ -190,7 +179,6 @@ bool pmpaddr_csr_t::subset_match(reg_t addr, reg_t len) const noexcept { return !(is_tor ? tor_homogeneous : napot_homogeneous); } - bool pmpaddr_csr_t::access_ok(access_type type, reg_t mode) const noexcept { const bool cfgx = cfg & PMP_X; const bool cfgw = cfg & PMP_W; @@ -225,7 +213,6 @@ bool pmpaddr_csr_t::access_ok(access_type type, reg_t mode) const noexcept { } } - // implement class pmpcfg_csr_t pmpcfg_csr_t::pmpcfg_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr) { @@ -328,7 +315,6 @@ virtualized_csr_t::virtualized_csr_t(processor_t* const proc, csr_t_p orig, csr_ virt_csr(virt) { } - reg_t virtualized_csr_t::read() const noexcept { return readvirt(state->v); } @@ -345,49 +331,41 @@ bool virtualized_csr_t::unlogged_write(const reg_t val) noexcept { return false; // virt_csr or orig_csr has already logged } - // implement class epc_csr_t epc_csr_t::epc_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr), val(0) { } - reg_t epc_csr_t::read() const noexcept { return val & proc->pc_alignment_mask(); } - bool epc_csr_t::unlogged_write(const reg_t val) noexcept { this->val = val & ~(reg_t)1; return true; } - // implement class tvec_csr_t tvec_csr_t::tvec_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr), val(0) { } - reg_t tvec_csr_t::read() const noexcept { return val; } - bool tvec_csr_t::unlogged_write(const reg_t val) noexcept { this->val = val & ~(reg_t)2; return true; } - // implement class cause_csr_t cause_csr_t::cause_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0) { } - reg_t cause_csr_t::read() const noexcept { reg_t val = basic_csr_t::read(); // When reading, the interrupt bit needs to adjust to xlen. Spike does @@ -398,7 +376,6 @@ reg_t cause_csr_t::read() const noexcept { return val; } - // implement class base_status_csr_t base_status_csr_t::base_status_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr), @@ -408,7 +385,6 @@ base_status_csr_t::base_status_csr_t(processor_t* const proc, const reg_t addr): | (proc->get_const_xlen() == 32 ? SSTATUS32_SD : SSTATUS64_SD)) { } - reg_t base_status_csr_t::compute_sstatus_write_mask() const noexcept { // If a configuration has FS bits, they will always be accessible no // matter the state of misa. @@ -424,7 +400,6 @@ reg_t base_status_csr_t::compute_sstatus_write_mask() const noexcept { ; } - reg_t base_status_csr_t::adjust_sd(const reg_t val) const noexcept { // This uses get_const_xlen() instead of get_xlen() not only because // the variable is static, so it's only called once, but also @@ -440,7 +415,6 @@ reg_t base_status_csr_t::adjust_sd(const reg_t val) const noexcept { return val & ~sd_bit; } - void base_status_csr_t::maybe_flush_tlb(const reg_t newval) noexcept { if ((newval ^ read()) & (MSTATUS_MPP | MSTATUS_MPRV @@ -449,7 +423,6 @@ void base_status_csr_t::maybe_flush_tlb(const reg_t newval) noexcept { proc->get_mmu()->flush_tlb(); } - namespace { int xlen_to_uxl(int xlen) { if (xlen == 32) @@ -460,7 +433,6 @@ namespace { } } - // implement class vsstatus_csr_t vsstatus_csr_t::vsstatus_csr_t(processor_t* const proc, const reg_t addr): base_status_csr_t(proc, addr), @@ -474,7 +446,6 @@ bool vsstatus_csr_t::unlogged_write(const reg_t val) noexcept { return true; } - // implement class sstatus_proxy_csr_t sstatus_proxy_csr_t::sstatus_proxy_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus): base_status_csr_t(proc, addr), @@ -488,7 +459,6 @@ bool sstatus_proxy_csr_t::unlogged_write(const reg_t val) noexcept { return false; // avoid double logging: already logged by mstatus->write() } - // implement class mstatus_csr_t mstatus_csr_t::mstatus_csr_t(processor_t* const proc, const reg_t addr): base_status_csr_t(proc, addr), @@ -503,7 +473,6 @@ mstatus_csr_t::mstatus_csr_t(processor_t* const proc, const reg_t addr): ) { } - bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { const bool has_mpv = proc->extension_enabled('S') && proc->extension_enabled('H'); const bool has_gva = has_mpv; @@ -576,7 +545,6 @@ bool sstatus_csr_t::enabled(const reg_t which) { return false; } - // implement class misa_csr_t misa_csr_t::misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t max_isa): basic_csr_t(proc, addr, max_isa), @@ -637,7 +605,6 @@ bool misa_csr_t::extension_enabled_const(unsigned char ext) const noexcept { return extension_enabled(ext); } - // implement class mip_or_mie_csr_t mip_or_mie_csr_t::mip_or_mie_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr), @@ -658,7 +625,6 @@ bool mip_or_mie_csr_t::unlogged_write(const reg_t val) noexcept { return false; // avoid double logging: already logged by write_with_mask() } - mip_csr_t::mip_csr_t(processor_t* const proc, const reg_t addr): mip_or_mie_csr_t(proc, addr) { } @@ -680,12 +646,10 @@ reg_t mip_csr_t::write_mask() const noexcept { (MIP_SEIP | MIP_SSIP | MIP_STIP | vssip_int); } - mie_csr_t::mie_csr_t(processor_t* const proc, const reg_t addr): mip_or_mie_csr_t(proc, addr) { } - reg_t mie_csr_t::write_mask() const noexcept { const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0; @@ -695,7 +659,6 @@ reg_t mie_csr_t::write_mask() const noexcept { return all_ints; } - // implement class generic_int_accessor_t generic_int_accessor_t::generic_int_accessor_t(state_t* const state, const reg_t read_mask, @@ -736,7 +699,6 @@ reg_t generic_int_accessor_t::deleg_mask() const { return hideleg_mask & mideleg_mask; } - // implement class mip_proxy_csr_t mip_proxy_csr_t::mip_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr): csr_t(proc, addr), @@ -767,7 +729,6 @@ bool mie_proxy_csr_t::unlogged_write(const reg_t val) noexcept { return false; // accr has already logged } - // implement class mideleg_csr_t mideleg_csr_t::mideleg_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0) { @@ -795,7 +756,6 @@ bool mideleg_csr_t::unlogged_write(const reg_t val) noexcept { return basic_csr_t::unlogged_write(val & delegable_ints); } - // implement class medeleg_csr_t medeleg_csr_t::medeleg_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0), @@ -828,7 +788,6 @@ bool medeleg_csr_t::unlogged_write(const reg_t val) noexcept { return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask)); } - // implement class masked_csr_t masked_csr_t::masked_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): basic_csr_t(proc, addr, init), @@ -839,20 +798,17 @@ bool masked_csr_t::unlogged_write(const reg_t val) noexcept { return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask)); } - // implement class henvcfg_csr_t henvcfg_csr_t::henvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, csr_t_p menvcfg): masked_csr_t(proc, addr, mask, init), menvcfg(menvcfg) { } - // implement class base_atp_csr_t and family base_atp_csr_t::base_atp_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0) { } - bool base_atp_csr_t::unlogged_write(const reg_t val) noexcept { const reg_t newval = proc->supports_impl(IMPL_MMU) ? compute_new_satp(val) : 0; if (newval != read()) @@ -926,7 +882,6 @@ bool virtualized_satp_csr_t::unlogged_write(const reg_t val) noexcept { return virtualized_csr_t::unlogged_write(newval); } - // implement class wide_counter_csr_t wide_counter_csr_t::wide_counter_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr), @@ -966,7 +921,6 @@ void wide_counter_csr_t::write_upper_half(const reg_t val) noexcept { log_special_write(address + (CSR_MINSTRETH - CSR_MINSTRET), written_value() >> 32); } - counter_top_csr_t::counter_top_csr_t(processor_t* const proc, const reg_t addr, wide_counter_csr_t_p parent): csr_t(proc, addr), parent(parent) { @@ -981,7 +935,6 @@ bool counter_top_csr_t::unlogged_write(const reg_t val) noexcept { return true; } - proxy_csr_t::proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate): csr_t(proc, addr), delegate(delegate) { @@ -996,7 +949,6 @@ bool proxy_csr_t::unlogged_write(const reg_t val) noexcept { return false; } - const_csr_t::const_csr_t(processor_t* const proc, const reg_t addr, reg_t val): csr_t(proc, addr), val(val) { @@ -1010,7 +962,6 @@ bool const_csr_t::unlogged_write(const reg_t val) noexcept { return false; } - counter_proxy_csr_t::counter_proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate): proxy_csr_t(proc, addr, delegate) { } @@ -1036,7 +987,6 @@ void counter_proxy_csr_t::verify_permissions(insn_t insn, bool write) const { } } - hypervisor_csr_t::hypervisor_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0) { } @@ -1047,7 +997,6 @@ void hypervisor_csr_t::verify_permissions(insn_t insn, bool write) const { throw trap_illegal_instruction(insn.bits()); } - hideleg_csr_t::hideleg_csr_t(processor_t* const proc, const reg_t addr, csr_t_p mideleg): masked_csr_t(proc, addr, MIP_VS_MASK, 0), mideleg(mideleg) { @@ -1057,7 +1006,6 @@ reg_t hideleg_csr_t::read() const noexcept { return masked_csr_t::read() & mideleg->read(); }; - hgatp_csr_t::hgatp_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0) { } @@ -1090,7 +1038,6 @@ bool hgatp_csr_t::unlogged_write(const reg_t val) noexcept { return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask)); } - tselect_csr_t::tselect_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0) { } @@ -1099,7 +1046,6 @@ bool tselect_csr_t::unlogged_write(const reg_t val) noexcept { return basic_csr_t::unlogged_write((val < proc->TM.count()) ? val : read()); } - tdata1_csr_t::tdata1_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr) { } @@ -1112,7 +1058,6 @@ bool tdata1_csr_t::unlogged_write(const reg_t val) noexcept { return proc->TM.tdata1_write(proc, state->tselect->read(), val); } - tdata2_csr_t::tdata2_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr) { } @@ -1125,7 +1070,6 @@ bool tdata2_csr_t::unlogged_write(const reg_t val) noexcept { return proc->TM.tdata2_write(proc, state->tselect->read(), val); } - debug_mode_csr_t::debug_mode_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0) { } @@ -1146,7 +1090,6 @@ void dpc_csr_t::verify_permissions(insn_t insn, bool write) const { throw trap_illegal_instruction(insn.bits()); } - dcsr_csr_t::dcsr_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr), prv(0), @@ -1198,7 +1141,6 @@ void dcsr_csr_t::write_cause_and_prv(uint8_t cause, reg_t prv) noexcept { log_write(); } - float_csr_t::float_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): masked_csr_t(proc, addr, mask, init) { } @@ -1215,7 +1157,6 @@ bool float_csr_t::unlogged_write(const reg_t val) noexcept { return masked_csr_t::unlogged_write(val); } - composite_csr_t::composite_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb): csr_t(proc, addr), upper_csr(upper_csr), @@ -1239,7 +1180,6 @@ bool composite_csr_t::unlogged_write(const reg_t val) noexcept { return false; // logging is done only by the underlying CSRs } - seed_csr_t::seed_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr) { } @@ -1261,8 +1201,6 @@ bool seed_csr_t::unlogged_write(const reg_t val) noexcept { return true; } - - vector_csr_t::vector_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): basic_csr_t(proc, addr, init), mask(mask) { @@ -1287,7 +1225,6 @@ bool vector_csr_t::unlogged_write(const reg_t val) noexcept { return basic_csr_t::unlogged_write(val & mask); } - vxsat_csr_t::vxsat_csr_t(processor_t* const proc, const reg_t addr): masked_csr_t(proc, addr, /*mask*/ 1, /*init*/ 0) { } diff --git a/riscv/csrs.h b/riscv/csrs.h index f6d2c2c..ab3cdb7 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -57,7 +57,6 @@ class csr_t { typedef std::shared_ptr csr_t_p; - // Basic CSRs, with XLEN bits fully readable and writable. class basic_csr_t: public csr_t { public: @@ -73,7 +72,6 @@ class basic_csr_t: public csr_t { reg_t val; }; - class pmpaddr_csr_t: public csr_t { public: pmpaddr_csr_t(processor_t* const proc, const reg_t addr); @@ -174,7 +172,6 @@ class epc_csr_t: public csr_t { reg_t val; }; - // For mtvec, stvec, and vstvec class tvec_csr_t: public csr_t { public: @@ -187,7 +184,6 @@ class tvec_csr_t: public csr_t { reg_t val; }; - // For mcause, scause, and vscause class cause_csr_t: public basic_csr_t { public: @@ -196,7 +192,6 @@ class cause_csr_t: public basic_csr_t { virtual reg_t read() const noexcept override; }; - // For *status family of CSRs class base_status_csr_t: public csr_t { public: @@ -218,7 +213,6 @@ class base_status_csr_t: public csr_t { typedef std::shared_ptr base_status_csr_t_p; - // For vsstatus, which is its own separate architectural register // (unlike sstatus) class vsstatus_csr_t final: public base_status_csr_t { @@ -237,7 +231,6 @@ class vsstatus_csr_t final: public base_status_csr_t { typedef std::shared_ptr vsstatus_csr_t_p; - class mstatus_csr_t final: public base_status_csr_t { public: mstatus_csr_t(processor_t* const proc, const reg_t addr); @@ -255,7 +248,6 @@ class mstatus_csr_t final: public base_status_csr_t { typedef std::shared_ptr mstatus_csr_t_p; - class mstatush_csr_t: public csr_t { public: mstatush_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus); @@ -267,7 +259,6 @@ class mstatush_csr_t: public csr_t { const reg_t mask; }; - class sstatus_proxy_csr_t final: public base_status_csr_t { public: sstatus_proxy_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus); @@ -299,7 +290,6 @@ class sstatus_csr_t: public virtualized_csr_t { typedef std::shared_ptr sstatus_csr_t_p; - class misa_csr_t final: public basic_csr_t { public: misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t max_isa); @@ -320,7 +310,6 @@ class misa_csr_t final: public basic_csr_t { typedef std::shared_ptr misa_csr_t_p; - class mip_or_mie_csr_t: public csr_t { public: mip_or_mie_csr_t(processor_t* const proc, const reg_t addr); @@ -335,7 +324,6 @@ class mip_or_mie_csr_t: public csr_t { virtual reg_t write_mask() const noexcept = 0; }; - // mip is special because some of the bits are driven by hardware pins class mip_csr_t: public mip_or_mie_csr_t { public: @@ -349,7 +337,6 @@ class mip_csr_t: public mip_or_mie_csr_t { typedef std::shared_ptr mip_csr_t_p; - class mie_csr_t: public mip_or_mie_csr_t { public: mie_csr_t(processor_t* const proc, const reg_t addr); @@ -359,7 +346,6 @@ class mie_csr_t: public mip_or_mie_csr_t { typedef std::shared_ptr mie_csr_t_p; - // For sip, hip, hvip, vsip, sie, hie, vsie which are all just (masked // & shifted) views into mip or mie. Each pair will have one of these // objects describing the view, e.g. one for sip+sie, one for hip+hie, @@ -391,7 +377,6 @@ class generic_int_accessor_t { typedef std::shared_ptr generic_int_accessor_t_p; - // For all CSRs that are simply (masked & shifted) views into mip class mip_proxy_csr_t: public csr_t { public: @@ -414,8 +399,6 @@ class mie_proxy_csr_t: public csr_t { generic_int_accessor_t_p accr; }; - - class mideleg_csr_t: public basic_csr_t { public: mideleg_csr_t(processor_t* const proc, const reg_t addr); @@ -425,7 +408,6 @@ class mideleg_csr_t: public basic_csr_t { virtual bool unlogged_write(const reg_t val) noexcept override; }; - class medeleg_csr_t: public basic_csr_t { public: medeleg_csr_t(processor_t* const proc, const reg_t addr); @@ -436,7 +418,6 @@ class medeleg_csr_t: public basic_csr_t { const reg_t hypervisor_exceptions; }; - // For CSRs with certain bits hardwired class masked_csr_t: public basic_csr_t { public: @@ -447,7 +428,6 @@ class masked_csr_t: public basic_csr_t { const reg_t mask; }; - // henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 class henvcfg_csr_t final: public masked_csr_t { public: @@ -461,7 +441,6 @@ class henvcfg_csr_t final: public masked_csr_t { csr_t_p menvcfg; }; - // For satp and vsatp // These are three classes in order to handle the [V]TVM bits permission checks class base_atp_csr_t: public basic_csr_t { @@ -492,7 +471,6 @@ class virtualized_satp_csr_t: public virtualized_csr_t { satp_csr_t_p orig_satp; }; - // For minstret and mcycle, which are always 64 bits, but in RV32 are // split into high and low halves. The first class always holds the // full 64-bit value. @@ -512,7 +490,6 @@ class wide_counter_csr_t: public csr_t { typedef std::shared_ptr wide_counter_csr_t_p; - // A simple proxy to read/write the upper half of minstret/mcycle class counter_top_csr_t: public csr_t { public: @@ -526,7 +503,6 @@ class counter_top_csr_t: public csr_t { typedef std::shared_ptr counter_top_csr_t_p; - // For a CSR that is an alias of another class proxy_csr_t: public csr_t { public: @@ -538,7 +514,6 @@ class proxy_csr_t: public csr_t { csr_t_p delegate; }; - // For a CSR with a fixed, unchanging value class const_csr_t: public csr_t { public: @@ -550,7 +525,6 @@ class const_csr_t: public csr_t { const reg_t val; }; - // For a CSR that is an unprivileged accessor of a privileged counter class counter_proxy_csr_t: public proxy_csr_t { public: @@ -560,7 +534,6 @@ class counter_proxy_csr_t: public proxy_csr_t { bool myenable(csr_t_p counteren) const noexcept; }; - // For machine-level CSRs that only exist with Hypervisor class hypervisor_csr_t: public basic_csr_t { public: @@ -568,7 +541,6 @@ class hypervisor_csr_t: public basic_csr_t { virtual void verify_permissions(insn_t insn, bool write) const override; }; - class hideleg_csr_t: public masked_csr_t { public: hideleg_csr_t(processor_t* const proc, const reg_t addr, csr_t_p mideleg); @@ -577,7 +549,6 @@ class hideleg_csr_t: public masked_csr_t { csr_t_p mideleg; }; - class hgatp_csr_t: public basic_csr_t { public: hgatp_csr_t(processor_t* const proc, const reg_t addr); @@ -586,7 +557,6 @@ class hgatp_csr_t: public basic_csr_t { virtual bool unlogged_write(const reg_t val) noexcept override; }; - class tselect_csr_t: public basic_csr_t { public: tselect_csr_t(processor_t* const proc, const reg_t addr); @@ -594,7 +564,6 @@ class tselect_csr_t: public basic_csr_t { virtual bool unlogged_write(const reg_t val) noexcept override; }; - class tdata1_csr_t: public csr_t { public: tdata1_csr_t(processor_t* const proc, const reg_t addr); @@ -620,7 +589,6 @@ class debug_mode_csr_t: public basic_csr_t { typedef std::shared_ptr tdata2_csr_t_p; - class dpc_csr_t: public epc_csr_t { public: dpc_csr_t(processor_t* const proc, const reg_t addr); @@ -648,7 +616,6 @@ class dcsr_csr_t: public csr_t { typedef std::shared_ptr dcsr_csr_t_p; - class float_csr_t final: public masked_csr_t { public: float_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init); @@ -659,7 +626,6 @@ class float_csr_t final: public masked_csr_t { typedef std::shared_ptr float_csr_t_p; - // For a CSR like FCSR, that is actually a view into multiple // underlying registers. class composite_csr_t: public csr_t { @@ -676,7 +642,6 @@ class composite_csr_t: public csr_t { const unsigned upper_lsb; }; - class seed_csr_t: public csr_t { public: seed_csr_t(processor_t* const proc, const reg_t addr); @@ -686,7 +651,6 @@ class seed_csr_t: public csr_t { virtual bool unlogged_write(const reg_t val) noexcept override; }; - class vector_csr_t: public basic_csr_t { public: vector_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init=0); @@ -701,7 +665,6 @@ class vector_csr_t: public basic_csr_t { typedef std::shared_ptr vector_csr_t_p; - // For CSRs shared between Vector and P extensions (vxsat) class vxsat_csr_t: public masked_csr_t { public: diff --git a/riscv/dts.cc b/riscv/dts.cc index 6b47c76..5d37463 100644 --- a/riscv/dts.cc +++ b/riscv/dts.cc @@ -183,7 +183,6 @@ std::string dts_compile(const std::string& dts) return dtb.str(); } - static int fdt_get_node_addr_size(void *fdt, int node, reg_t *addr, unsigned long *size, const char *field) { @@ -245,7 +244,6 @@ static int check_cpu_node(void *fdt, int cpu_offset) return 0; } - int fdt_get_offset(void *fdt, const char *field) { return fdt_path_offset(fdt, field); diff --git a/riscv/entropy_source.h b/riscv/entropy_source.h index 47823ff..184bec7 100644 --- a/riscv/entropy_source.h +++ b/riscv/entropy_source.h @@ -36,7 +36,6 @@ public: // to handle the side-effect of the changing seed value on a read. } - // // The format of seed is described in Section 4.1 of // the scalar cryptography specification. diff --git a/riscv/interactive.cc b/riscv/interactive.cc index 88eb86b..f41be2c 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -372,7 +372,6 @@ void sim_t::interactive_vreg(const std::string& cmd, const std::vector& args) { if (args.size() < 1) diff --git a/riscv/processor.h b/riscv/processor.h index ec1b400..0c6a6b2 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -118,7 +118,6 @@ struct type_sew_t<64> using type=int64_t; }; - // architectural state of a RISC-V hart struct state_t { diff --git a/riscv/triggers.cc b/riscv/triggers.cc index 4d16a58..c9d6161 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -212,5 +212,4 @@ bool module_t::tdata2_write(processor_t * const proc, unsigned index, const reg_ return result; } - }; diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index 4df6813..1a2a734 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -201,7 +201,6 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) if (is_over) \ require(insn.rd() != insn.rs2()); \ - // // vector: loop header and end helper // @@ -291,7 +290,6 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) require(!(insn.rd() == 0 && P.VU.vflmul > 1)); \ }); - #define INT_ROUNDING(result, xrm, gb) \ do { \ const uint64_t lsb = 1UL << (gb); \ @@ -656,7 +654,6 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) REDUCTION_ULOOP(e64, BODY) \ } - // genearl VXI signed/unsigned loop #define VI_VV_ULOOP(BODY) \ VI_CHECK_SSS(true) \ @@ -1825,7 +1822,6 @@ reg_t index[P.VU.vlmax]; \ DEBUG_RVV_FP_VV; \ VI_VFP_LOOP_END - #define VI_VFP_VV_LOOP_WIDE(BODY16, BODY32) \ VI_CHECK_DSS(true); \ VI_VFP_LOOP_BASE \ -- cgit v1.1 From daa9ca539a183cd01cd89e0342590abed17fd61f Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 1 Jul 2022 22:04:10 +0800 Subject: update encoding.h --- riscv/encoding.h | 7933 +++++++++++++++++++++++++++--------------------------- 1 file changed, 3998 insertions(+), 3935 deletions(-) diff --git a/riscv/encoding.h b/riscv/encoding.h index e6dbd7c..1af7f89 100644 --- a/riscv/encoding.h +++ b/riscv/encoding.h @@ -1,9 +1,9 @@ -/* - * This file is auto-generated by running 'make' in - * https://github.com/riscv/riscv-opcodes (d2b9aea) - */ -/* See LICENSE for license details. */ +/* +* This file is auto-generated by running 'make' in +* https://github.com/riscv/riscv-opcodes (3db008d) +*/ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ #ifndef RISCV_CSR_ENCODING_H #define RISCV_CSR_ENCODING_H @@ -139,6 +139,7 @@ #define MIP_VSEIP (1 << IRQ_VS_EXT) #define MIP_MEIP (1 << IRQ_M_EXT) #define MIP_SGEIP (1 << IRQ_S_GEXT) +#define MIP_LCOFIP (1 << IRQ_LCOF) #define MIP_S_MASK (MIP_SSIP | MIP_STIP | MIP_SEIP) #define MIP_VS_MASK (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP) @@ -159,6 +160,30 @@ #define MENVCFGH_PBMTE 0x40000000 #define MENVCFGH_STCE 0x80000000 +#define MSTATEEN0_CS 0x00000001 +#define MSTATEEN0_FCSR 0x00000002 +#define MSTATEEN0_HCONTEXT 0x0200000000000000 +#define MSTATEEN0_HENVCFG 0x4000000000000000 +#define MSTATEEN_HSTATEEN 0x8000000000000000 + +#define MSTATEEN0H_HCONTEXT 0x02000000 +#define MSTATEEN0H_HENVCFG 0x40000000 +#define MSTATEENH_HSTATEEN 0x80000000 + +#define MHPMEVENT_VUINH 0x0400000000000000 +#define MHPMEVENT_VSINH 0x0800000000000000 +#define MHPMEVENT_UINH 0x1000000000000000 +#define MHPMEVENT_SINH 0x2000000000000000 +#define MHPMEVENT_MINH 0x4000000000000000 +#define MHPMEVENT_OF 0x8000000000000000 + +#define MHPMEVENTH_VUINH 0x04000000 +#define MHPMEVENTH_VSINH 0x08000000 +#define MHPMEVENTH_UINH 0x10000000 +#define MHPMEVENTH_SINH 0x20000000 +#define MHPMEVENTH_MINH 0x40000000 +#define MHPMEVENTH_OF 0x80000000 + #define HENVCFG_FIOM 0x00000001 #define HENVCFG_CBIE 0x00000030 #define HENVCFG_CBCFE 0x00000040 @@ -169,11 +194,24 @@ #define HENVCFGH_PBMTE 0x40000000 #define HENVCFGH_STCE 0x80000000 +#define HSTATEEN0_CS 0x00000001 +#define HSTATEEN0_FCSR 0x00000002 +#define HSTATEEN0_SCONTEXT 0x0200000000000000 +#define HSTATEEN0_SENVCFG 0x4000000000000000 +#define HSTATEEN_SSTATEEN 0x8000000000000000 + +#define HSTATEEN0H_SCONTEXT 0x02000000 +#define HSTATEEN0H_SENVCFG 0x40000000 +#define HSTATEENH_SSTATEEN 0x80000000 + #define SENVCFG_FIOM 0x00000001 #define SENVCFG_CBIE 0x00000030 #define SENVCFG_CBCFE 0x00000040 #define SENVCFG_CBZE 0x00000080 +#define SSTATEEN0_CS 0x00000001 +#define SSTATEEN0_FCSR 0x00000002 + #define MSECCFG_MML 0x00000001 #define MSECCFG_MMWP 0x00000002 #define MSECCFG_RLB 0x00000004 @@ -239,7 +277,7 @@ #define IRQ_M_EXT 11 #define IRQ_S_GEXT 12 #define IRQ_COP 12 -#define IRQ_HOST 13 +#define IRQ_LCOF 13 /* page table entry (PTE) fields */ #define PTE_V 0x001 /* Valid */ @@ -310,3869 +348,3306 @@ #endif #endif -/* Automatically generated by parse_opcodes. */ + +/* Automatically generated by parse_opcodes. */ #ifndef RISCV_ENCODING_H #define RISCV_ENCODING_H -#define MATCH_SLLI_RV32 0x1013 -#define MASK_SLLI_RV32 0xfe00707f -#define MATCH_SRLI_RV32 0x5013 -#define MASK_SRLI_RV32 0xfe00707f -#define MATCH_SRAI_RV32 0x40005013 -#define MASK_SRAI_RV32 0xfe00707f -#define MATCH_FRFLAGS 0x102073 -#define MASK_FRFLAGS 0xfffff07f -#define MATCH_FSFLAGS 0x101073 -#define MASK_FSFLAGS 0xfff0707f -#define MATCH_FSFLAGSI 0x105073 -#define MASK_FSFLAGSI 0xfff0707f -#define MATCH_FRRM 0x202073 -#define MASK_FRRM 0xfffff07f -#define MATCH_FSRM 0x201073 -#define MASK_FSRM 0xfff0707f -#define MATCH_FSRMI 0x205073 -#define MASK_FSRMI 0xfff0707f -#define MATCH_FSCSR 0x301073 -#define MASK_FSCSR 0xfff0707f -#define MATCH_FRCSR 0x302073 -#define MASK_FRCSR 0xfffff07f -#define MATCH_RDCYCLE 0xc0002073 -#define MASK_RDCYCLE 0xfffff07f -#define MATCH_RDTIME 0xc0102073 -#define MASK_RDTIME 0xfffff07f -#define MATCH_RDINSTRET 0xc0202073 -#define MASK_RDINSTRET 0xfffff07f -#define MATCH_RDCYCLEH 0xc8002073 -#define MASK_RDCYCLEH 0xfffff07f -#define MATCH_RDTIMEH 0xc8102073 -#define MASK_RDTIMEH 0xfffff07f -#define MATCH_RDINSTRETH 0xc8202073 -#define MASK_RDINSTRETH 0xfffff07f -#define MATCH_SCALL 0x73 -#define MASK_SCALL 0xffffffff -#define MATCH_SBREAK 0x100073 -#define MASK_SBREAK 0xffffffff -#define MATCH_FMV_X_S 0xe0000053 -#define MASK_FMV_X_S 0xfff0707f -#define MATCH_FMV_S_X 0xf0000053 -#define MASK_FMV_S_X 0xfff0707f -#define MATCH_FENCE_TSO 0x8330000f -#define MASK_FENCE_TSO 0xfff0707f -#define MATCH_PAUSE 0x100000f -#define MASK_PAUSE 0xffffffff -#define MATCH_BEQ 0x63 -#define MASK_BEQ 0x707f -#define MATCH_BNE 0x1063 -#define MASK_BNE 0x707f -#define MATCH_BLT 0x4063 -#define MASK_BLT 0x707f -#define MATCH_BGE 0x5063 -#define MASK_BGE 0x707f -#define MATCH_BLTU 0x6063 -#define MASK_BLTU 0x707f -#define MATCH_BGEU 0x7063 -#define MASK_BGEU 0x707f -#define MATCH_JALR 0x67 -#define MASK_JALR 0x707f -#define MATCH_JAL 0x6f -#define MASK_JAL 0x7f -#define MATCH_LUI 0x37 -#define MASK_LUI 0x7f -#define MATCH_AUIPC 0x17 -#define MASK_AUIPC 0x7f -#define MATCH_ADDI 0x13 -#define MASK_ADDI 0x707f -#define MATCH_SLTI 0x2013 -#define MASK_SLTI 0x707f -#define MATCH_SLTIU 0x3013 -#define MASK_SLTIU 0x707f -#define MATCH_XORI 0x4013 -#define MASK_XORI 0x707f -#define MATCH_ORI 0x6013 -#define MASK_ORI 0x707f -#define MATCH_ANDI 0x7013 -#define MASK_ANDI 0x707f #define MATCH_ADD 0x33 -#define MASK_ADD 0xfe00707f -#define MATCH_SUB 0x40000033 -#define MASK_SUB 0xfe00707f -#define MATCH_SLL 0x1033 -#define MASK_SLL 0xfe00707f -#define MATCH_SLT 0x2033 -#define MASK_SLT 0xfe00707f -#define MATCH_SLTU 0x3033 -#define MASK_SLTU 0xfe00707f -#define MATCH_XOR 0x4033 -#define MASK_XOR 0xfe00707f -#define MATCH_SRL 0x5033 -#define MASK_SRL 0xfe00707f -#define MATCH_SRA 0x40005033 -#define MASK_SRA 0xfe00707f -#define MATCH_OR 0x6033 -#define MASK_OR 0xfe00707f -#define MATCH_AND 0x7033 -#define MASK_AND 0xfe00707f -#define MATCH_LB 0x3 -#define MASK_LB 0x707f -#define MATCH_LH 0x1003 -#define MASK_LH 0x707f -#define MATCH_LW 0x2003 -#define MASK_LW 0x707f -#define MATCH_LBU 0x4003 -#define MASK_LBU 0x707f -#define MATCH_LHU 0x5003 -#define MASK_LHU 0x707f -#define MATCH_SB 0x23 -#define MASK_SB 0x707f -#define MATCH_SH 0x1023 -#define MASK_SH 0x707f -#define MATCH_SW 0x2023 -#define MASK_SW 0x707f -#define MATCH_FENCE 0xf -#define MASK_FENCE 0x707f -#define MATCH_FENCE_I 0x100f -#define MASK_FENCE_I 0x707f +#define MASK_ADD 0xfe00707f +#define MATCH_ADD16 0x40000077 +#define MASK_ADD16 0xfe00707f +#define MATCH_ADD32 0x40002077 +#define MASK_ADD32 0xfe00707f +#define MATCH_ADD64 0xc0001077 +#define MASK_ADD64 0xfe00707f +#define MATCH_ADD8 0x48000077 +#define MASK_ADD8 0xfe00707f +#define MATCH_ADD_UW 0x800003b +#define MASK_ADD_UW 0xfe00707f +#define MATCH_ADDI 0x13 +#define MASK_ADDI 0x707f #define MATCH_ADDIW 0x1b -#define MASK_ADDIW 0x707f -#define MATCH_SLLIW 0x101b -#define MASK_SLLIW 0xfe00707f -#define MATCH_SRLIW 0x501b -#define MASK_SRLIW 0xfe00707f -#define MATCH_SRAIW 0x4000501b -#define MASK_SRAIW 0xfe00707f +#define MASK_ADDIW 0x707f #define MATCH_ADDW 0x3b -#define MASK_ADDW 0xfe00707f -#define MATCH_SUBW 0x4000003b -#define MASK_SUBW 0xfe00707f -#define MATCH_SLLW 0x103b -#define MASK_SLLW 0xfe00707f -#define MATCH_SRLW 0x503b -#define MASK_SRLW 0xfe00707f -#define MATCH_SRAW 0x4000503b -#define MASK_SRAW 0xfe00707f -#define MATCH_LD 0x3003 -#define MASK_LD 0x707f -#define MATCH_LWU 0x6003 -#define MASK_LWU 0x707f -#define MATCH_SD 0x3023 -#define MASK_SD 0x707f -#define MATCH_SLLI 0x1013 -#define MASK_SLLI 0xfc00707f -#define MATCH_SRLI 0x5013 -#define MASK_SRLI 0xfc00707f -#define MATCH_SRAI 0x40005013 -#define MASK_SRAI 0xfc00707f -#define MATCH_MUL 0x2000033 -#define MASK_MUL 0xfe00707f -#define MATCH_MULH 0x2001033 -#define MASK_MULH 0xfe00707f -#define MATCH_MULHSU 0x2002033 -#define MASK_MULHSU 0xfe00707f -#define MATCH_MULHU 0x2003033 -#define MASK_MULHU 0xfe00707f -#define MATCH_DIV 0x2004033 -#define MASK_DIV 0xfe00707f -#define MATCH_DIVU 0x2005033 -#define MASK_DIVU 0xfe00707f -#define MATCH_REM 0x2006033 -#define MASK_REM 0xfe00707f -#define MATCH_REMU 0x2007033 -#define MASK_REMU 0xfe00707f -#define MATCH_MULW 0x200003b -#define MASK_MULW 0xfe00707f -#define MATCH_DIVW 0x200403b -#define MASK_DIVW 0xfe00707f -#define MATCH_DIVUW 0x200503b -#define MASK_DIVUW 0xfe00707f -#define MATCH_REMW 0x200603b -#define MASK_REMW 0xfe00707f -#define MATCH_REMUW 0x200703b -#define MASK_REMUW 0xfe00707f +#define MASK_ADDW 0xfe00707f +#define MATCH_AES32DSI 0x2a000033 +#define MASK_AES32DSI 0x3e00707f +#define MATCH_AES32DSMI 0x2e000033 +#define MASK_AES32DSMI 0x3e00707f +#define MATCH_AES32ESI 0x22000033 +#define MASK_AES32ESI 0x3e00707f +#define MATCH_AES32ESMI 0x26000033 +#define MASK_AES32ESMI 0x3e00707f +#define MATCH_AES64DS 0x3a000033 +#define MASK_AES64DS 0xfe00707f +#define MATCH_AES64DSM 0x3e000033 +#define MASK_AES64DSM 0xfe00707f +#define MATCH_AES64ES 0x32000033 +#define MASK_AES64ES 0xfe00707f +#define MATCH_AES64ESM 0x36000033 +#define MASK_AES64ESM 0xfe00707f +#define MATCH_AES64IM 0x30001013 +#define MASK_AES64IM 0xfff0707f +#define MATCH_AES64KS1I 0x31001013 +#define MASK_AES64KS1I 0xff00707f +#define MATCH_AES64KS2 0x7e000033 +#define MASK_AES64KS2 0xfe00707f +#define MATCH_AMOADD_D 0x302f +#define MASK_AMOADD_D 0xf800707f #define MATCH_AMOADD_W 0x202f -#define MASK_AMOADD_W 0xf800707f -#define MATCH_AMOXOR_W 0x2000202f -#define MASK_AMOXOR_W 0xf800707f -#define MATCH_AMOOR_W 0x4000202f -#define MASK_AMOOR_W 0xf800707f +#define MASK_AMOADD_W 0xf800707f +#define MATCH_AMOAND_D 0x6000302f +#define MASK_AMOAND_D 0xf800707f #define MATCH_AMOAND_W 0x6000202f -#define MASK_AMOAND_W 0xf800707f -#define MATCH_AMOMIN_W 0x8000202f -#define MASK_AMOMIN_W 0xf800707f +#define MASK_AMOAND_W 0xf800707f +#define MATCH_AMOMAX_D 0xa000302f +#define MASK_AMOMAX_D 0xf800707f #define MATCH_AMOMAX_W 0xa000202f -#define MASK_AMOMAX_W 0xf800707f -#define MATCH_AMOMINU_W 0xc000202f -#define MASK_AMOMINU_W 0xf800707f +#define MASK_AMOMAX_W 0xf800707f +#define MATCH_AMOMAXU_D 0xe000302f +#define MASK_AMOMAXU_D 0xf800707f #define MATCH_AMOMAXU_W 0xe000202f -#define MASK_AMOMAXU_W 0xf800707f -#define MATCH_AMOSWAP_W 0x800202f -#define MASK_AMOSWAP_W 0xf800707f -#define MATCH_LR_W 0x1000202f -#define MASK_LR_W 0xf9f0707f -#define MATCH_SC_W 0x1800202f -#define MASK_SC_W 0xf800707f -#define MATCH_AMOADD_D 0x302f -#define MASK_AMOADD_D 0xf800707f -#define MATCH_AMOXOR_D 0x2000302f -#define MASK_AMOXOR_D 0xf800707f -#define MATCH_AMOOR_D 0x4000302f -#define MASK_AMOOR_D 0xf800707f -#define MATCH_AMOAND_D 0x6000302f -#define MASK_AMOAND_D 0xf800707f +#define MASK_AMOMAXU_W 0xf800707f #define MATCH_AMOMIN_D 0x8000302f -#define MASK_AMOMIN_D 0xf800707f -#define MATCH_AMOMAX_D 0xa000302f -#define MASK_AMOMAX_D 0xf800707f +#define MASK_AMOMIN_D 0xf800707f +#define MATCH_AMOMIN_W 0x8000202f +#define MASK_AMOMIN_W 0xf800707f #define MATCH_AMOMINU_D 0xc000302f -#define MASK_AMOMINU_D 0xf800707f -#define MATCH_AMOMAXU_D 0xe000302f -#define MASK_AMOMAXU_D 0xf800707f +#define MASK_AMOMINU_D 0xf800707f +#define MATCH_AMOMINU_W 0xc000202f +#define MASK_AMOMINU_W 0xf800707f +#define MATCH_AMOOR_D 0x4000302f +#define MASK_AMOOR_D 0xf800707f +#define MATCH_AMOOR_W 0x4000202f +#define MASK_AMOOR_W 0xf800707f #define MATCH_AMOSWAP_D 0x800302f -#define MASK_AMOSWAP_D 0xf800707f -#define MATCH_LR_D 0x1000302f -#define MASK_LR_D 0xf9f0707f -#define MATCH_SC_D 0x1800302f -#define MASK_SC_D 0xf800707f -#define MATCH_HFENCE_VVMA 0x22000073 -#define MASK_HFENCE_VVMA 0xfe007fff -#define MATCH_HFENCE_GVMA 0x62000073 -#define MASK_HFENCE_GVMA 0xfe007fff -#define MATCH_HLV_B 0x60004073 -#define MASK_HLV_B 0xfff0707f -#define MATCH_HLV_BU 0x60104073 -#define MASK_HLV_BU 0xfff0707f -#define MATCH_HLV_H 0x64004073 -#define MASK_HLV_H 0xfff0707f -#define MATCH_HLV_HU 0x64104073 -#define MASK_HLV_HU 0xfff0707f -#define MATCH_HLVX_HU 0x64304073 -#define MASK_HLVX_HU 0xfff0707f -#define MATCH_HLV_W 0x68004073 -#define MASK_HLV_W 0xfff0707f -#define MATCH_HLVX_WU 0x68304073 -#define MASK_HLVX_WU 0xfff0707f -#define MATCH_HSV_B 0x62004073 -#define MASK_HSV_B 0xfe007fff -#define MATCH_HSV_H 0x66004073 -#define MASK_HSV_H 0xfe007fff -#define MATCH_HSV_W 0x6a004073 -#define MASK_HSV_W 0xfe007fff -#define MATCH_HLV_WU 0x68104073 -#define MASK_HLV_WU 0xfff0707f -#define MATCH_HLV_D 0x6c004073 -#define MASK_HLV_D 0xfff0707f -#define MATCH_HSV_D 0x6e004073 -#define MASK_HSV_D 0xfe007fff -#define MATCH_FADD_S 0x53 -#define MASK_FADD_S 0xfe00007f -#define MATCH_FSUB_S 0x8000053 -#define MASK_FSUB_S 0xfe00007f -#define MATCH_FMUL_S 0x10000053 -#define MASK_FMUL_S 0xfe00007f -#define MATCH_FDIV_S 0x18000053 -#define MASK_FDIV_S 0xfe00007f -#define MATCH_FSGNJ_S 0x20000053 -#define MASK_FSGNJ_S 0xfe00707f -#define MATCH_FSGNJN_S 0x20001053 -#define MASK_FSGNJN_S 0xfe00707f -#define MATCH_FSGNJX_S 0x20002053 -#define MASK_FSGNJX_S 0xfe00707f -#define MATCH_FMIN_S 0x28000053 -#define MASK_FMIN_S 0xfe00707f -#define MATCH_FMAX_S 0x28001053 -#define MASK_FMAX_S 0xfe00707f -#define MATCH_FSQRT_S 0x58000053 -#define MASK_FSQRT_S 0xfff0007f -#define MATCH_FLE_S 0xa0000053 -#define MASK_FLE_S 0xfe00707f -#define MATCH_FLT_S 0xa0001053 -#define MASK_FLT_S 0xfe00707f -#define MATCH_FEQ_S 0xa0002053 -#define MASK_FEQ_S 0xfe00707f -#define MATCH_FCVT_W_S 0xc0000053 -#define MASK_FCVT_W_S 0xfff0007f -#define MATCH_FCVT_WU_S 0xc0100053 -#define MASK_FCVT_WU_S 0xfff0007f -#define MATCH_FMV_X_W 0xe0000053 -#define MASK_FMV_X_W 0xfff0707f -#define MATCH_FCLASS_S 0xe0001053 -#define MASK_FCLASS_S 0xfff0707f -#define MATCH_FCVT_S_W 0xd0000053 -#define MASK_FCVT_S_W 0xfff0007f -#define MATCH_FCVT_S_WU 0xd0100053 -#define MASK_FCVT_S_WU 0xfff0007f -#define MATCH_FMV_W_X 0xf0000053 -#define MASK_FMV_W_X 0xfff0707f -#define MATCH_FLW 0x2007 -#define MASK_FLW 0x707f -#define MATCH_FSW 0x2027 -#define MASK_FSW 0x707f -#define MATCH_FMADD_S 0x43 -#define MASK_FMADD_S 0x600007f -#define MATCH_FMSUB_S 0x47 -#define MASK_FMSUB_S 0x600007f -#define MATCH_FNMSUB_S 0x4b -#define MASK_FNMSUB_S 0x600007f -#define MATCH_FNMADD_S 0x4f -#define MASK_FNMADD_S 0x600007f -#define MATCH_FCVT_L_S 0xc0200053 -#define MASK_FCVT_L_S 0xfff0007f -#define MATCH_FCVT_LU_S 0xc0300053 -#define MASK_FCVT_LU_S 0xfff0007f -#define MATCH_FCVT_S_L 0xd0200053 -#define MASK_FCVT_S_L 0xfff0007f -#define MATCH_FCVT_S_LU 0xd0300053 -#define MASK_FCVT_S_LU 0xfff0007f -#define MATCH_FADD_D 0x2000053 -#define MASK_FADD_D 0xfe00007f -#define MATCH_FSUB_D 0xa000053 -#define MASK_FSUB_D 0xfe00007f -#define MATCH_FMUL_D 0x12000053 -#define MASK_FMUL_D 0xfe00007f -#define MATCH_FDIV_D 0x1a000053 -#define MASK_FDIV_D 0xfe00007f -#define MATCH_FSGNJ_D 0x22000053 -#define MASK_FSGNJ_D 0xfe00707f -#define MATCH_FSGNJN_D 0x22001053 -#define MASK_FSGNJN_D 0xfe00707f -#define MATCH_FSGNJX_D 0x22002053 -#define MASK_FSGNJX_D 0xfe00707f -#define MATCH_FMIN_D 0x2a000053 -#define MASK_FMIN_D 0xfe00707f -#define MATCH_FMAX_D 0x2a001053 -#define MASK_FMAX_D 0xfe00707f -#define MATCH_FCVT_S_D 0x40100053 -#define MASK_FCVT_S_D 0xfff0007f -#define MATCH_FCVT_D_S 0x42000053 -#define MASK_FCVT_D_S 0xfff0007f -#define MATCH_FSQRT_D 0x5a000053 -#define MASK_FSQRT_D 0xfff0007f -#define MATCH_FLE_D 0xa2000053 -#define MASK_FLE_D 0xfe00707f -#define MATCH_FLT_D 0xa2001053 -#define MASK_FLT_D 0xfe00707f -#define MATCH_FEQ_D 0xa2002053 -#define MASK_FEQ_D 0xfe00707f -#define MATCH_FCVT_W_D 0xc2000053 -#define MASK_FCVT_W_D 0xfff0007f -#define MATCH_FCVT_WU_D 0xc2100053 -#define MASK_FCVT_WU_D 0xfff0007f -#define MATCH_FCLASS_D 0xe2001053 -#define MASK_FCLASS_D 0xfff0707f -#define MATCH_FCVT_D_W 0xd2000053 -#define MASK_FCVT_D_W 0xfff0007f -#define MATCH_FCVT_D_WU 0xd2100053 -#define MASK_FCVT_D_WU 0xfff0007f -#define MATCH_FLD 0x3007 -#define MASK_FLD 0x707f -#define MATCH_FSD 0x3027 -#define MASK_FSD 0x707f -#define MATCH_FMADD_D 0x2000043 -#define MASK_FMADD_D 0x600007f -#define MATCH_FMSUB_D 0x2000047 -#define MASK_FMSUB_D 0x600007f -#define MATCH_FNMSUB_D 0x200004b -#define MASK_FNMSUB_D 0x600007f -#define MATCH_FNMADD_D 0x200004f -#define MASK_FNMADD_D 0x600007f -#define MATCH_FCVT_L_D 0xc2200053 -#define MASK_FCVT_L_D 0xfff0007f -#define MATCH_FCVT_LU_D 0xc2300053 -#define MASK_FCVT_LU_D 0xfff0007f -#define MATCH_FMV_X_D 0xe2000053 -#define MASK_FMV_X_D 0xfff0707f -#define MATCH_FCVT_D_L 0xd2200053 -#define MASK_FCVT_D_L 0xfff0007f -#define MATCH_FCVT_D_LU 0xd2300053 -#define MASK_FCVT_D_LU 0xfff0007f -#define MATCH_FMV_D_X 0xf2000053 -#define MASK_FMV_D_X 0xfff0707f -#define MATCH_FADD_Q 0x6000053 -#define MASK_FADD_Q 0xfe00007f -#define MATCH_FSUB_Q 0xe000053 -#define MASK_FSUB_Q 0xfe00007f -#define MATCH_FMUL_Q 0x16000053 -#define MASK_FMUL_Q 0xfe00007f -#define MATCH_FDIV_Q 0x1e000053 -#define MASK_FDIV_Q 0xfe00007f -#define MATCH_FSGNJ_Q 0x26000053 -#define MASK_FSGNJ_Q 0xfe00707f -#define MATCH_FSGNJN_Q 0x26001053 -#define MASK_FSGNJN_Q 0xfe00707f -#define MATCH_FSGNJX_Q 0x26002053 -#define MASK_FSGNJX_Q 0xfe00707f -#define MATCH_FMIN_Q 0x2e000053 -#define MASK_FMIN_Q 0xfe00707f -#define MATCH_FMAX_Q 0x2e001053 -#define MASK_FMAX_Q 0xfe00707f -#define MATCH_FCVT_S_Q 0x40300053 -#define MASK_FCVT_S_Q 0xfff0007f -#define MATCH_FCVT_Q_S 0x46000053 -#define MASK_FCVT_Q_S 0xfff0007f -#define MATCH_FCVT_D_Q 0x42300053 -#define MASK_FCVT_D_Q 0xfff0007f -#define MATCH_FCVT_Q_D 0x46100053 -#define MASK_FCVT_Q_D 0xfff0007f -#define MATCH_FSQRT_Q 0x5e000053 -#define MASK_FSQRT_Q 0xfff0007f -#define MATCH_FLE_Q 0xa6000053 -#define MASK_FLE_Q 0xfe00707f -#define MATCH_FLT_Q 0xa6001053 -#define MASK_FLT_Q 0xfe00707f -#define MATCH_FEQ_Q 0xa6002053 -#define MASK_FEQ_Q 0xfe00707f -#define MATCH_FCVT_W_Q 0xc6000053 -#define MASK_FCVT_W_Q 0xfff0007f -#define MATCH_FCVT_WU_Q 0xc6100053 -#define MASK_FCVT_WU_Q 0xfff0007f -#define MATCH_FCLASS_Q 0xe6001053 -#define MASK_FCLASS_Q 0xfff0707f -#define MATCH_FCVT_Q_W 0xd6000053 -#define MASK_FCVT_Q_W 0xfff0007f -#define MATCH_FCVT_Q_WU 0xd6100053 -#define MASK_FCVT_Q_WU 0xfff0007f -#define MATCH_FLQ 0x4007 -#define MASK_FLQ 0x707f -#define MATCH_FSQ 0x4027 -#define MASK_FSQ 0x707f -#define MATCH_FMADD_Q 0x6000043 -#define MASK_FMADD_Q 0x600007f -#define MATCH_FMSUB_Q 0x6000047 -#define MASK_FMSUB_Q 0x600007f -#define MATCH_FNMSUB_Q 0x600004b -#define MASK_FNMSUB_Q 0x600007f -#define MATCH_FNMADD_Q 0x600004f -#define MASK_FNMADD_Q 0x600007f -#define MATCH_FCVT_L_Q 0xc6200053 -#define MASK_FCVT_L_Q 0xfff0007f -#define MATCH_FCVT_LU_Q 0xc6300053 -#define MASK_FCVT_LU_Q 0xfff0007f -#define MATCH_FCVT_Q_L 0xd6200053 -#define MASK_FCVT_Q_L 0xfff0007f -#define MATCH_FCVT_Q_LU 0xd6300053 -#define MASK_FCVT_Q_LU 0xfff0007f +#define MASK_AMOSWAP_D 0xf800707f +#define MATCH_AMOSWAP_W 0x800202f +#define MASK_AMOSWAP_W 0xf800707f +#define MATCH_AMOXOR_D 0x2000302f +#define MASK_AMOXOR_D 0xf800707f +#define MATCH_AMOXOR_W 0x2000202f +#define MASK_AMOXOR_W 0xf800707f +#define MATCH_AND 0x7033 +#define MASK_AND 0xfe00707f +#define MATCH_ANDI 0x7013 +#define MASK_ANDI 0x707f #define MATCH_ANDN 0x40007033 -#define MASK_ANDN 0xfe00707f -#define MATCH_ORN 0x40006033 -#define MASK_ORN 0xfe00707f -#define MATCH_XNOR 0x40004033 -#define MASK_XNOR 0xfe00707f -#define MATCH_SLO 0x20001033 -#define MASK_SLO 0xfe00707f -#define MATCH_SRO 0x20005033 -#define MASK_SRO 0xfe00707f -#define MATCH_ROL 0x60001033 -#define MASK_ROL 0xfe00707f -#define MATCH_ROR 0x60005033 -#define MASK_ROR 0xfe00707f +#define MASK_ANDN 0xfe00707f +#define MATCH_AUIPC 0x17 +#define MASK_AUIPC 0x7f +#define MATCH_AVE 0xe0000077 +#define MASK_AVE 0xfe00707f #define MATCH_BCLR 0x48001033 -#define MASK_BCLR 0xfe00707f -#define MATCH_BSET 0x28001033 -#define MASK_BSET 0xfe00707f -#define MATCH_BINV 0x68001033 -#define MASK_BINV 0xfe00707f -#define MATCH_BEXT 0x48005033 -#define MASK_BEXT 0xfe00707f -#define MATCH_GORC 0x28005033 -#define MASK_GORC 0xfe00707f -#define MATCH_GREV 0x68005033 -#define MASK_GREV 0xfe00707f -#define MATCH_SLOI 0x20001013 -#define MASK_SLOI 0xfc00707f -#define MATCH_SROI 0x20005013 -#define MASK_SROI 0xfc00707f -#define MATCH_RORI 0x60005013 -#define MASK_RORI 0xfc00707f +#define MASK_BCLR 0xfe00707f #define MATCH_BCLRI 0x48001013 -#define MASK_BCLRI 0xfc00707f -#define MATCH_BSETI 0x28001013 -#define MASK_BSETI 0xfc00707f -#define MATCH_BINVI 0x68001013 -#define MASK_BINVI 0xfc00707f +#define MASK_BCLRI 0xfc00707f +#define MATCH_BCOMPRESS 0x8006033 +#define MASK_BCOMPRESS 0xfe00707f +#define MATCH_BCOMPRESSW 0x800603b +#define MASK_BCOMPRESSW 0xfe00707f +#define MATCH_BDECOMPRESS 0x48006033 +#define MASK_BDECOMPRESS 0xfe00707f +#define MATCH_BDECOMPRESSW 0x4800603b +#define MASK_BDECOMPRESSW 0xfe00707f +#define MATCH_BEQ 0x63 +#define MASK_BEQ 0x707f +#define MATCH_BEXT 0x48005033 +#define MASK_BEXT 0xfe00707f #define MATCH_BEXTI 0x48005013 -#define MASK_BEXTI 0xfc00707f -#define MATCH_GORCI 0x28005013 -#define MASK_GORCI 0xfc00707f -#define MATCH_GREVI 0x68005013 -#define MASK_GREVI 0xfc00707f -#define MATCH_CMIX 0x6001033 -#define MASK_CMIX 0x600707f -#define MATCH_CMOV 0x6005033 -#define MASK_CMOV 0x600707f -#define MATCH_FSL 0x4001033 -#define MASK_FSL 0x600707f -#define MATCH_FSR 0x4005033 -#define MASK_FSR 0x600707f -#define MATCH_FSRI 0x4005013 -#define MASK_FSRI 0x400707f -#define MATCH_CLZ 0x60001013 -#define MASK_CLZ 0xfff0707f -#define MATCH_CTZ 0x60101013 -#define MASK_CTZ 0xfff0707f -#define MATCH_CPOP 0x60201013 -#define MASK_CPOP 0xfff0707f -#define MATCH_SEXT_B 0x60401013 -#define MASK_SEXT_B 0xfff0707f -#define MATCH_SEXT_H 0x60501013 -#define MASK_SEXT_H 0xfff0707f -#define MATCH_CRC32_B 0x61001013 -#define MASK_CRC32_B 0xfff0707f -#define MATCH_CRC32_H 0x61101013 -#define MASK_CRC32_H 0xfff0707f -#define MATCH_CRC32_W 0x61201013 -#define MASK_CRC32_W 0xfff0707f -#define MATCH_CRC32C_B 0x61801013 -#define MASK_CRC32C_B 0xfff0707f -#define MATCH_CRC32C_H 0x61901013 -#define MASK_CRC32C_H 0xfff0707f -#define MATCH_CRC32C_W 0x61a01013 -#define MASK_CRC32C_W 0xfff0707f -#define MATCH_SH1ADD 0x20002033 -#define MASK_SH1ADD 0xfe00707f -#define MATCH_SH2ADD 0x20004033 -#define MASK_SH2ADD 0xfe00707f -#define MATCH_SH3ADD 0x20006033 -#define MASK_SH3ADD 0xfe00707f -#define MATCH_CLMUL 0xa001033 -#define MASK_CLMUL 0xfe00707f -#define MATCH_CLMULR 0xa002033 -#define MASK_CLMULR 0xfe00707f -#define MATCH_CLMULH 0xa003033 -#define MASK_CLMULH 0xfe00707f -#define MATCH_MIN 0xa004033 -#define MASK_MIN 0xfe00707f -#define MATCH_MINU 0xa005033 -#define MASK_MINU 0xfe00707f -#define MATCH_MAX 0xa006033 -#define MASK_MAX 0xfe00707f -#define MATCH_MAXU 0xa007033 -#define MASK_MAXU 0xfe00707f -#define MATCH_SHFL 0x8001033 -#define MASK_SHFL 0xfe00707f -#define MATCH_UNSHFL 0x8005033 -#define MASK_UNSHFL 0xfe00707f -#define MATCH_BCOMPRESS 0x8006033 -#define MASK_BCOMPRESS 0xfe00707f -#define MATCH_BDECOMPRESS 0x48006033 -#define MASK_BDECOMPRESS 0xfe00707f -#define MATCH_PACK 0x8004033 -#define MASK_PACK 0xfe00707f -#define MATCH_PACKU 0x48004033 -#define MASK_PACKU 0xfe00707f -#define MATCH_PACKH 0x8007033 -#define MASK_PACKH 0xfe00707f +#define MASK_BEXTI 0xfc00707f #define MATCH_BFP 0x48007033 -#define MASK_BFP 0xfe00707f -#define MATCH_SHFLI 0x8001013 -#define MASK_SHFLI 0xfe00707f -#define MATCH_UNSHFLI 0x8005013 -#define MASK_UNSHFLI 0xfe00707f -#define MATCH_XPERM4 0x28002033 -#define MASK_XPERM4 0xfe00707f -#define MATCH_XPERM8 0x28004033 -#define MASK_XPERM8 0xfe00707f -#define MATCH_XPERM16 0x28006033 -#define MASK_XPERM16 0xfe00707f +#define MASK_BFP 0xfe00707f +#define MATCH_BFPW 0x4800703b +#define MASK_BFPW 0xfe00707f +#define MATCH_BGE 0x5063 +#define MASK_BGE 0x707f +#define MATCH_BGEU 0x7063 +#define MASK_BGEU 0x707f +#define MATCH_BINV 0x68001033 +#define MASK_BINV 0xfe00707f +#define MATCH_BINVI 0x68001013 +#define MASK_BINVI 0xfc00707f +#define MATCH_BITREV 0xe6000077 +#define MASK_BITREV 0xfe00707f +#define MATCH_BITREVI 0xe8000077 +#define MASK_BITREVI 0xfc00707f +#define MATCH_BLT 0x4063 +#define MASK_BLT 0x707f +#define MATCH_BLTU 0x6063 +#define MASK_BLTU 0x707f #define MATCH_BMATFLIP 0x60301013 -#define MASK_BMATFLIP 0xfff0707f -#define MATCH_CRC32_D 0x61301013 -#define MASK_CRC32_D 0xfff0707f -#define MATCH_CRC32C_D 0x61b01013 -#define MASK_CRC32C_D 0xfff0707f +#define MASK_BMATFLIP 0xfff0707f #define MATCH_BMATOR 0x8003033 -#define MASK_BMATOR 0xfe00707f +#define MASK_BMATOR 0xfe00707f #define MATCH_BMATXOR 0x48003033 -#define MASK_BMATXOR 0xfe00707f -#define MATCH_SLLI_UW 0x800101b -#define MASK_SLLI_UW 0xfc00707f -#define MATCH_ADD_UW 0x800003b -#define MASK_ADD_UW 0xfe00707f -#define MATCH_SLOW 0x2000103b -#define MASK_SLOW 0xfe00707f -#define MATCH_SROW 0x2000503b -#define MASK_SROW 0xfe00707f -#define MATCH_ROLW 0x6000103b -#define MASK_ROLW 0xfe00707f -#define MATCH_RORW 0x6000503b -#define MASK_RORW 0xfe00707f -#define MATCH_GORCW 0x2800503b -#define MASK_GORCW 0xfe00707f -#define MATCH_GREVW 0x6800503b -#define MASK_GREVW 0xfe00707f -#define MATCH_SLOIW 0x2000101b -#define MASK_SLOIW 0xfe00707f -#define MATCH_SROIW 0x2000501b -#define MASK_SROIW 0xfe00707f -#define MATCH_RORIW 0x6000501b -#define MASK_RORIW 0xfe00707f -#define MATCH_GORCIW 0x2800501b -#define MASK_GORCIW 0xfe00707f -#define MATCH_GREVIW 0x6800501b -#define MASK_GREVIW 0xfe00707f -#define MATCH_FSLW 0x400103b -#define MASK_FSLW 0x600707f -#define MATCH_FSRW 0x400503b -#define MASK_FSRW 0x600707f -#define MATCH_FSRIW 0x400501b -#define MASK_FSRIW 0x600707f -#define MATCH_CLZW 0x6000101b -#define MASK_CLZW 0xfff0707f -#define MATCH_CTZW 0x6010101b -#define MASK_CTZW 0xfff0707f -#define MATCH_CPOPW 0x6020101b -#define MASK_CPOPW 0xfff0707f -#define MATCH_SH1ADD_UW 0x2000203b -#define MASK_SH1ADD_UW 0xfe00707f -#define MATCH_SH2ADD_UW 0x2000403b -#define MASK_SH2ADD_UW 0xfe00707f -#define MATCH_SH3ADD_UW 0x2000603b -#define MASK_SH3ADD_UW 0xfe00707f -#define MATCH_SHFLW 0x800103b -#define MASK_SHFLW 0xfe00707f -#define MATCH_UNSHFLW 0x800503b -#define MASK_UNSHFLW 0xfe00707f -#define MATCH_BCOMPRESSW 0x800603b -#define MASK_BCOMPRESSW 0xfe00707f -#define MATCH_BDECOMPRESSW 0x4800603b -#define MASK_BDECOMPRESSW 0xfe00707f -#define MATCH_PACKW 0x800403b -#define MASK_PACKW 0xfe00707f -#define MATCH_PACKUW 0x4800403b -#define MASK_PACKUW 0xfe00707f -#define MATCH_BFPW 0x4800703b -#define MASK_BFPW 0xfe00707f -#define MATCH_XPERM32 0x28000033 -#define MASK_XPERM32 0xfe00707f -#define MATCH_ECALL 0x73 -#define MASK_ECALL 0xffffffff -#define MATCH_EBREAK 0x100073 -#define MASK_EBREAK 0xffffffff -#define MATCH_SRET 0x10200073 -#define MASK_SRET 0xffffffff -#define MATCH_MRET 0x30200073 -#define MASK_MRET 0xffffffff -#define MATCH_DRET 0x7b200073 -#define MASK_DRET 0xffffffff -#define MATCH_SFENCE_VMA 0x12000073 -#define MASK_SFENCE_VMA 0xfe007fff -#define MATCH_WFI 0x10500073 -#define MASK_WFI 0xffffffff -#define MATCH_CSRRW 0x1073 -#define MASK_CSRRW 0x707f -#define MATCH_CSRRS 0x2073 -#define MASK_CSRRS 0x707f -#define MATCH_CSRRC 0x3073 -#define MASK_CSRRC 0x707f -#define MATCH_CSRRWI 0x5073 -#define MASK_CSRRWI 0x707f -#define MATCH_CSRRSI 0x6073 -#define MASK_CSRRSI 0x707f -#define MATCH_CSRRCI 0x7073 -#define MASK_CSRRCI 0x707f -#define MATCH_SINVAL_VMA 0x16000073 -#define MASK_SINVAL_VMA 0xfe007fff -#define MATCH_SFENCE_W_INVAL 0x18000073 -#define MASK_SFENCE_W_INVAL 0xffffffff -#define MATCH_SFENCE_INVAL_IR 0x18100073 -#define MASK_SFENCE_INVAL_IR 0xffffffff -#define MATCH_HINVAL_VVMA 0x26000073 -#define MASK_HINVAL_VVMA 0xfe007fff -#define MATCH_HINVAL_GVMA 0x66000073 -#define MASK_HINVAL_GVMA 0xfe007fff -#define MATCH_FADD_H 0x4000053 -#define MASK_FADD_H 0xfe00007f -#define MATCH_FSUB_H 0xc000053 -#define MASK_FSUB_H 0xfe00007f -#define MATCH_FMUL_H 0x14000053 -#define MASK_FMUL_H 0xfe00007f -#define MATCH_FDIV_H 0x1c000053 -#define MASK_FDIV_H 0xfe00007f -#define MATCH_FSGNJ_H 0x24000053 -#define MASK_FSGNJ_H 0xfe00707f -#define MATCH_FSGNJN_H 0x24001053 -#define MASK_FSGNJN_H 0xfe00707f -#define MATCH_FSGNJX_H 0x24002053 -#define MASK_FSGNJX_H 0xfe00707f -#define MATCH_FMIN_H 0x2c000053 -#define MASK_FMIN_H 0xfe00707f -#define MATCH_FMAX_H 0x2c001053 -#define MASK_FMAX_H 0xfe00707f -#define MATCH_FCVT_H_S 0x44000053 -#define MASK_FCVT_H_S 0xfff0007f -#define MATCH_FCVT_S_H 0x40200053 -#define MASK_FCVT_S_H 0xfff0007f -#define MATCH_FSQRT_H 0x5c000053 -#define MASK_FSQRT_H 0xfff0007f -#define MATCH_FLE_H 0xa4000053 -#define MASK_FLE_H 0xfe00707f -#define MATCH_FLT_H 0xa4001053 -#define MASK_FLT_H 0xfe00707f -#define MATCH_FEQ_H 0xa4002053 -#define MASK_FEQ_H 0xfe00707f -#define MATCH_FCVT_W_H 0xc4000053 -#define MASK_FCVT_W_H 0xfff0007f -#define MATCH_FCVT_WU_H 0xc4100053 -#define MASK_FCVT_WU_H 0xfff0007f -#define MATCH_FMV_X_H 0xe4000053 -#define MASK_FMV_X_H 0xfff0707f -#define MATCH_FCLASS_H 0xe4001053 -#define MASK_FCLASS_H 0xfff0707f -#define MATCH_FCVT_H_W 0xd4000053 -#define MASK_FCVT_H_W 0xfff0007f -#define MATCH_FCVT_H_WU 0xd4100053 -#define MASK_FCVT_H_WU 0xfff0007f -#define MATCH_FMV_H_X 0xf4000053 -#define MASK_FMV_H_X 0xfff0707f -#define MATCH_FLH 0x1007 -#define MASK_FLH 0x707f -#define MATCH_FSH 0x1027 -#define MASK_FSH 0x707f -#define MATCH_FMADD_H 0x4000043 -#define MASK_FMADD_H 0x600007f -#define MATCH_FMSUB_H 0x4000047 -#define MASK_FMSUB_H 0x600007f -#define MATCH_FNMSUB_H 0x400004b -#define MASK_FNMSUB_H 0x600007f -#define MATCH_FNMADD_H 0x400004f -#define MASK_FNMADD_H 0x600007f -#define MATCH_FCVT_H_D 0x44100053 -#define MASK_FCVT_H_D 0xfff0007f -#define MATCH_FCVT_D_H 0x42200053 -#define MASK_FCVT_D_H 0xfff0007f -#define MATCH_FCVT_H_Q 0x44300053 -#define MASK_FCVT_H_Q 0xfff0007f -#define MATCH_FCVT_Q_H 0x46200053 -#define MASK_FCVT_Q_H 0xfff0007f -#define MATCH_FCVT_L_H 0xc4200053 -#define MASK_FCVT_L_H 0xfff0007f -#define MATCH_FCVT_LU_H 0xc4300053 -#define MASK_FCVT_LU_H 0xfff0007f -#define MATCH_FCVT_H_L 0xd4200053 -#define MASK_FCVT_H_L 0xfff0007f -#define MATCH_FCVT_H_LU 0xd4300053 -#define MASK_FCVT_H_LU 0xfff0007f -#define MATCH_SM4ED 0x30000033 -#define MASK_SM4ED 0x3e00707f -#define MATCH_SM4KS 0x34000033 -#define MASK_SM4KS 0x3e00707f -#define MATCH_SM3P0 0x10801013 -#define MASK_SM3P0 0xfff0707f -#define MATCH_SM3P1 0x10901013 -#define MASK_SM3P1 0xfff0707f -#define MATCH_SHA256SUM0 0x10001013 -#define MASK_SHA256SUM0 0xfff0707f -#define MATCH_SHA256SUM1 0x10101013 -#define MASK_SHA256SUM1 0xfff0707f -#define MATCH_SHA256SIG0 0x10201013 -#define MASK_SHA256SIG0 0xfff0707f -#define MATCH_SHA256SIG1 0x10301013 -#define MASK_SHA256SIG1 0xfff0707f -#define MATCH_AES32ESMI 0x26000033 -#define MASK_AES32ESMI 0x3e00707f -#define MATCH_AES32ESI 0x22000033 -#define MASK_AES32ESI 0x3e00707f -#define MATCH_AES32DSMI 0x2e000033 -#define MASK_AES32DSMI 0x3e00707f -#define MATCH_AES32DSI 0x2a000033 -#define MASK_AES32DSI 0x3e00707f -#define MATCH_SHA512SUM0R 0x50000033 -#define MASK_SHA512SUM0R 0xfe00707f -#define MATCH_SHA512SUM1R 0x52000033 -#define MASK_SHA512SUM1R 0xfe00707f -#define MATCH_SHA512SIG0L 0x54000033 -#define MASK_SHA512SIG0L 0xfe00707f -#define MATCH_SHA512SIG0H 0x5c000033 -#define MASK_SHA512SIG0H 0xfe00707f -#define MATCH_SHA512SIG1L 0x56000033 -#define MASK_SHA512SIG1L 0xfe00707f -#define MATCH_SHA512SIG1H 0x5e000033 -#define MASK_SHA512SIG1H 0xfe00707f -#define MATCH_AES64KS1I 0x31001013 -#define MASK_AES64KS1I 0xff00707f -#define MATCH_AES64IM 0x30001013 -#define MASK_AES64IM 0xfff0707f -#define MATCH_AES64KS2 0x7e000033 -#define MASK_AES64KS2 0xfe00707f -#define MATCH_AES64ESM 0x36000033 -#define MASK_AES64ESM 0xfe00707f -#define MATCH_AES64ES 0x32000033 -#define MASK_AES64ES 0xfe00707f -#define MATCH_AES64DSM 0x3e000033 -#define MASK_AES64DSM 0xfe00707f -#define MATCH_AES64DS 0x3a000033 -#define MASK_AES64DS 0xfe00707f -#define MATCH_SHA512SUM0 0x10401013 -#define MASK_SHA512SUM0 0xfff0707f -#define MATCH_SHA512SUM1 0x10501013 -#define MASK_SHA512SUM1 0xfff0707f -#define MATCH_SHA512SIG0 0x10601013 -#define MASK_SHA512SIG0 0xfff0707f -#define MATCH_SHA512SIG1 0x10701013 -#define MASK_SHA512SIG1 0xfff0707f -#define MATCH_CBO_CLEAN 0x10200f -#define MASK_CBO_CLEAN 0xfff07fff -#define MATCH_CBO_FLUSH 0x20200f -#define MASK_CBO_FLUSH 0xfff07fff -#define MATCH_CBO_INVAL 0x200f -#define MASK_CBO_INVAL 0xfff07fff -#define MATCH_CBO_ZERO 0x40200f -#define MASK_CBO_ZERO 0xfff07fff -#define MATCH_PREFETCH_I 0x6013 -#define MASK_PREFETCH_I 0x1f07fff -#define MATCH_PREFETCH_R 0x106013 -#define MASK_PREFETCH_R 0x1f07fff -#define MATCH_PREFETCH_W 0x306013 -#define MASK_PREFETCH_W 0x1f07fff -#define MATCH_C_NOP 0x1 -#define MASK_C_NOP 0xffff +#define MASK_BMATXOR 0xfe00707f +#define MATCH_BNE 0x1063 +#define MASK_BNE 0x707f +#define MATCH_BPICK 0x3077 +#define MASK_BPICK 0x600707f +#define MATCH_BSET 0x28001033 +#define MASK_BSET 0xfe00707f +#define MATCH_BSETI 0x28001013 +#define MASK_BSETI 0xfc00707f +#define MATCH_C_ADD 0x9002 +#define MASK_C_ADD 0xf003 +#define MATCH_C_ADDI 0x1 +#define MASK_C_ADDI 0xe003 #define MATCH_C_ADDI16SP 0x6101 -#define MASK_C_ADDI16SP 0xef83 -#define MATCH_C_JR 0x8002 -#define MASK_C_JR 0xf07f -#define MATCH_C_JALR 0x9002 -#define MASK_C_JALR 0xf07f -#define MATCH_C_EBREAK 0x9002 -#define MASK_C_EBREAK 0xffff +#define MASK_C_ADDI16SP 0xef83 #define MATCH_C_ADDI4SPN 0x0 -#define MASK_C_ADDI4SPN 0xe003 +#define MASK_C_ADDI4SPN 0xe003 +#define MATCH_C_ADDIW 0x2001 +#define MASK_C_ADDIW 0xe003 +#define MATCH_C_ADDW 0x9c21 +#define MASK_C_ADDW 0xfc63 +#define MATCH_C_AND 0x8c61 +#define MASK_C_AND 0xfc63 +#define MATCH_C_ANDI 0x8801 +#define MASK_C_ANDI 0xec03 +#define MATCH_C_BEQZ 0xc001 +#define MASK_C_BEQZ 0xe003 +#define MATCH_C_BNEZ 0xe001 +#define MASK_C_BNEZ 0xe003 +#define MATCH_C_EBREAK 0x9002 +#define MASK_C_EBREAK 0xffff #define MATCH_C_FLD 0x2000 -#define MASK_C_FLD 0xe003 -#define MATCH_C_LW 0x4000 -#define MASK_C_LW 0xe003 +#define MASK_C_FLD 0xe003 +#define MATCH_C_FLDSP 0x2002 +#define MASK_C_FLDSP 0xe003 #define MATCH_C_FLW 0x6000 -#define MASK_C_FLW 0xe003 +#define MASK_C_FLW 0xe003 +#define MATCH_C_FLWSP 0x6002 +#define MASK_C_FLWSP 0xe003 #define MATCH_C_FSD 0xa000 -#define MASK_C_FSD 0xe003 -#define MATCH_C_SW 0xc000 -#define MASK_C_SW 0xe003 +#define MASK_C_FSD 0xe003 +#define MATCH_C_FSDSP 0xa002 +#define MASK_C_FSDSP 0xe003 #define MATCH_C_FSW 0xe000 -#define MASK_C_FSW 0xe003 -#define MATCH_C_ADDI 0x1 -#define MASK_C_ADDI 0xe003 +#define MASK_C_FSW 0xe003 +#define MATCH_C_FSWSP 0xe002 +#define MASK_C_FSWSP 0xe003 +#define MATCH_C_J 0xa001 +#define MASK_C_J 0xe003 #define MATCH_C_JAL 0x2001 -#define MASK_C_JAL 0xe003 +#define MASK_C_JAL 0xe003 +#define MATCH_C_JALR 0x9002 +#define MASK_C_JALR 0xf07f +#define MATCH_C_JR 0x8002 +#define MASK_C_JR 0xf07f +#define MATCH_C_LD 0x6000 +#define MASK_C_LD 0xe003 +#define MATCH_C_LDSP 0x6002 +#define MASK_C_LDSP 0xe003 #define MATCH_C_LI 0x4001 -#define MASK_C_LI 0xe003 +#define MASK_C_LI 0xe003 #define MATCH_C_LUI 0x6001 -#define MASK_C_LUI 0xe003 -#define MATCH_C_SRLI 0x8001 -#define MASK_C_SRLI 0xec03 -#define MATCH_C_SRAI 0x8401 -#define MASK_C_SRAI 0xec03 -#define MATCH_C_ANDI 0x8801 -#define MASK_C_ANDI 0xec03 -#define MATCH_C_SUB 0x8c01 -#define MASK_C_SUB 0xfc63 -#define MATCH_C_XOR 0x8c21 -#define MASK_C_XOR 0xfc63 -#define MATCH_C_OR 0x8c41 -#define MASK_C_OR 0xfc63 -#define MATCH_C_AND 0x8c61 -#define MASK_C_AND 0xfc63 -#define MATCH_C_J 0xa001 -#define MASK_C_J 0xe003 -#define MATCH_C_BEQZ 0xc001 -#define MASK_C_BEQZ 0xe003 -#define MATCH_C_BNEZ 0xe001 -#define MASK_C_BNEZ 0xe003 -#define MATCH_C_SLLI 0x2 -#define MASK_C_SLLI 0xe003 -#define MATCH_C_FLDSP 0x2002 -#define MASK_C_FLDSP 0xe003 +#define MASK_C_LUI 0xe003 +#define MATCH_C_LW 0x4000 +#define MASK_C_LW 0xe003 #define MATCH_C_LWSP 0x4002 -#define MASK_C_LWSP 0xe003 -#define MATCH_C_FLWSP 0x6002 -#define MASK_C_FLWSP 0xe003 +#define MASK_C_LWSP 0xe003 #define MATCH_C_MV 0x8002 -#define MASK_C_MV 0xf003 -#define MATCH_C_ADD 0x9002 -#define MASK_C_ADD 0xf003 -#define MATCH_C_FSDSP 0xa002 -#define MASK_C_FSDSP 0xe003 -#define MATCH_C_SWSP 0xc002 -#define MASK_C_SWSP 0xe003 -#define MATCH_C_FSWSP 0xe002 -#define MASK_C_FSWSP 0xe003 -#define MATCH_C_SRLI_RV32 0x8001 -#define MASK_C_SRLI_RV32 0xfc03 -#define MATCH_C_SRAI_RV32 0x8401 -#define MASK_C_SRAI_RV32 0xfc03 -#define MATCH_C_SLLI_RV32 0x2 -#define MASK_C_SLLI_RV32 0xf003 -#define MATCH_C_LD 0x6000 -#define MASK_C_LD 0xe003 +#define MASK_C_MV 0xf003 +#define MATCH_C_NOP 0x1 +#define MASK_C_NOP 0xef83 +#define MATCH_C_OR 0x8c41 +#define MASK_C_OR 0xfc63 #define MATCH_C_SD 0xe000 -#define MASK_C_SD 0xe003 -#define MATCH_C_SUBW 0x9c01 -#define MASK_C_SUBW 0xfc63 -#define MATCH_C_ADDW 0x9c21 -#define MASK_C_ADDW 0xfc63 -#define MATCH_C_ADDIW 0x2001 -#define MASK_C_ADDIW 0xe003 -#define MATCH_C_LDSP 0x6002 -#define MASK_C_LDSP 0xe003 +#define MASK_C_SD 0xe003 #define MATCH_C_SDSP 0xe002 -#define MASK_C_SDSP 0xe003 -#define MATCH_CUSTOM0 0xb -#define MASK_CUSTOM0 0x707f -#define MATCH_CUSTOM0_RS1 0x200b -#define MASK_CUSTOM0_RS1 0x707f -#define MATCH_CUSTOM0_RS1_RS2 0x300b -#define MASK_CUSTOM0_RS1_RS2 0x707f -#define MATCH_CUSTOM0_RD 0x400b -#define MASK_CUSTOM0_RD 0x707f -#define MATCH_CUSTOM0_RD_RS1 0x600b -#define MASK_CUSTOM0_RD_RS1 0x707f -#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b -#define MASK_CUSTOM0_RD_RS1_RS2 0x707f -#define MATCH_CUSTOM1 0x2b -#define MASK_CUSTOM1 0x707f -#define MATCH_CUSTOM1_RS1 0x202b -#define MASK_CUSTOM1_RS1 0x707f -#define MATCH_CUSTOM1_RS1_RS2 0x302b -#define MASK_CUSTOM1_RS1_RS2 0x707f -#define MATCH_CUSTOM1_RD 0x402b -#define MASK_CUSTOM1_RD 0x707f -#define MATCH_CUSTOM1_RD_RS1 0x602b -#define MASK_CUSTOM1_RD_RS1 0x707f -#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b -#define MASK_CUSTOM1_RD_RS1_RS2 0x707f -#define MATCH_CUSTOM2 0x5b -#define MASK_CUSTOM2 0x707f -#define MATCH_CUSTOM2_RS1 0x205b -#define MASK_CUSTOM2_RS1 0x707f -#define MATCH_CUSTOM2_RS1_RS2 0x305b -#define MASK_CUSTOM2_RS1_RS2 0x707f -#define MATCH_CUSTOM2_RD 0x405b -#define MASK_CUSTOM2_RD 0x707f -#define MATCH_CUSTOM2_RD_RS1 0x605b -#define MASK_CUSTOM2_RD_RS1 0x707f -#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b -#define MASK_CUSTOM2_RD_RS1_RS2 0x707f -#define MATCH_CUSTOM3 0x7b -#define MASK_CUSTOM3 0x707f -#define MATCH_CUSTOM3_RS1 0x207b -#define MASK_CUSTOM3_RS1 0x707f -#define MATCH_CUSTOM3_RS1_RS2 0x307b -#define MASK_CUSTOM3_RS1_RS2 0x707f -#define MATCH_CUSTOM3_RD 0x407b -#define MASK_CUSTOM3_RD 0x707f -#define MATCH_CUSTOM3_RD_RS1 0x607b -#define MASK_CUSTOM3_RD_RS1 0x707f -#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b -#define MASK_CUSTOM3_RD_RS1_RS2 0x707f -#define MATCH_VSETIVLI 0xc0007057 -#define MASK_VSETIVLI 0xc000707f -#define MATCH_VSETVLI 0x7057 -#define MASK_VSETVLI 0x8000707f -#define MATCH_VSETVL 0x80007057 -#define MASK_VSETVL 0xfe00707f -#define MATCH_VLM_V 0x2b00007 -#define MASK_VLM_V 0xfff0707f -#define MATCH_VSM_V 0x2b00027 -#define MASK_VSM_V 0xfff0707f -#define MATCH_VLE8_V 0x7 -#define MASK_VLE8_V 0x1df0707f -#define MATCH_VLE16_V 0x5007 -#define MASK_VLE16_V 0x1df0707f -#define MATCH_VLE32_V 0x6007 -#define MASK_VLE32_V 0x1df0707f -#define MATCH_VLE64_V 0x7007 -#define MASK_VLE64_V 0x1df0707f -#define MATCH_VLE128_V 0x10000007 -#define MASK_VLE128_V 0x1df0707f -#define MATCH_VLE256_V 0x10005007 -#define MASK_VLE256_V 0x1df0707f -#define MATCH_VLE512_V 0x10006007 -#define MASK_VLE512_V 0x1df0707f -#define MATCH_VLE1024_V 0x10007007 -#define MASK_VLE1024_V 0x1df0707f -#define MATCH_VSE8_V 0x27 -#define MASK_VSE8_V 0x1df0707f -#define MATCH_VSE16_V 0x5027 -#define MASK_VSE16_V 0x1df0707f -#define MATCH_VSE32_V 0x6027 -#define MASK_VSE32_V 0x1df0707f -#define MATCH_VSE64_V 0x7027 -#define MASK_VSE64_V 0x1df0707f -#define MATCH_VSE128_V 0x10000027 -#define MASK_VSE128_V 0x1df0707f -#define MATCH_VSE256_V 0x10005027 -#define MASK_VSE256_V 0x1df0707f -#define MATCH_VSE512_V 0x10006027 -#define MASK_VSE512_V 0x1df0707f -#define MATCH_VSE1024_V 0x10007027 -#define MASK_VSE1024_V 0x1df0707f -#define MATCH_VLUXEI8_V 0x4000007 -#define MASK_VLUXEI8_V 0x1c00707f -#define MATCH_VLUXEI16_V 0x4005007 -#define MASK_VLUXEI16_V 0x1c00707f -#define MATCH_VLUXEI32_V 0x4006007 -#define MASK_VLUXEI32_V 0x1c00707f -#define MATCH_VLUXEI64_V 0x4007007 -#define MASK_VLUXEI64_V 0x1c00707f -#define MATCH_VLUXEI128_V 0x14000007 -#define MASK_VLUXEI128_V 0x1c00707f -#define MATCH_VLUXEI256_V 0x14005007 -#define MASK_VLUXEI256_V 0x1c00707f -#define MATCH_VLUXEI512_V 0x14006007 -#define MASK_VLUXEI512_V 0x1c00707f -#define MATCH_VLUXEI1024_V 0x14007007 -#define MASK_VLUXEI1024_V 0x1c00707f -#define MATCH_VSUXEI8_V 0x4000027 -#define MASK_VSUXEI8_V 0x1c00707f -#define MATCH_VSUXEI16_V 0x4005027 -#define MASK_VSUXEI16_V 0x1c00707f -#define MATCH_VSUXEI32_V 0x4006027 -#define MASK_VSUXEI32_V 0x1c00707f -#define MATCH_VSUXEI64_V 0x4007027 -#define MASK_VSUXEI64_V 0x1c00707f -#define MATCH_VSUXEI128_V 0x14000027 -#define MASK_VSUXEI128_V 0x1c00707f -#define MATCH_VSUXEI256_V 0x14005027 -#define MASK_VSUXEI256_V 0x1c00707f -#define MATCH_VSUXEI512_V 0x14006027 -#define MASK_VSUXEI512_V 0x1c00707f -#define MATCH_VSUXEI1024_V 0x14007027 -#define MASK_VSUXEI1024_V 0x1c00707f -#define MATCH_VLSE8_V 0x8000007 -#define MASK_VLSE8_V 0x1c00707f -#define MATCH_VLSE16_V 0x8005007 -#define MASK_VLSE16_V 0x1c00707f -#define MATCH_VLSE32_V 0x8006007 -#define MASK_VLSE32_V 0x1c00707f -#define MATCH_VLSE64_V 0x8007007 -#define MASK_VLSE64_V 0x1c00707f -#define MATCH_VLSE128_V 0x18000007 -#define MASK_VLSE128_V 0x1c00707f -#define MATCH_VLSE256_V 0x18005007 -#define MASK_VLSE256_V 0x1c00707f -#define MATCH_VLSE512_V 0x18006007 -#define MASK_VLSE512_V 0x1c00707f -#define MATCH_VLSE1024_V 0x18007007 -#define MASK_VLSE1024_V 0x1c00707f -#define MATCH_VSSE8_V 0x8000027 -#define MASK_VSSE8_V 0x1c00707f -#define MATCH_VSSE16_V 0x8005027 -#define MASK_VSSE16_V 0x1c00707f -#define MATCH_VSSE32_V 0x8006027 -#define MASK_VSSE32_V 0x1c00707f -#define MATCH_VSSE64_V 0x8007027 -#define MASK_VSSE64_V 0x1c00707f -#define MATCH_VSSE128_V 0x18000027 -#define MASK_VSSE128_V 0x1c00707f -#define MATCH_VSSE256_V 0x18005027 -#define MASK_VSSE256_V 0x1c00707f -#define MATCH_VSSE512_V 0x18006027 -#define MASK_VSSE512_V 0x1c00707f -#define MATCH_VSSE1024_V 0x18007027 -#define MASK_VSSE1024_V 0x1c00707f -#define MATCH_VLOXEI8_V 0xc000007 -#define MASK_VLOXEI8_V 0x1c00707f -#define MATCH_VLOXEI16_V 0xc005007 -#define MASK_VLOXEI16_V 0x1c00707f -#define MATCH_VLOXEI32_V 0xc006007 -#define MASK_VLOXEI32_V 0x1c00707f -#define MATCH_VLOXEI64_V 0xc007007 -#define MASK_VLOXEI64_V 0x1c00707f -#define MATCH_VLOXEI128_V 0x1c000007 -#define MASK_VLOXEI128_V 0x1c00707f -#define MATCH_VLOXEI256_V 0x1c005007 -#define MASK_VLOXEI256_V 0x1c00707f -#define MATCH_VLOXEI512_V 0x1c006007 -#define MASK_VLOXEI512_V 0x1c00707f -#define MATCH_VLOXEI1024_V 0x1c007007 -#define MASK_VLOXEI1024_V 0x1c00707f -#define MATCH_VSOXEI8_V 0xc000027 -#define MASK_VSOXEI8_V 0x1c00707f -#define MATCH_VSOXEI16_V 0xc005027 -#define MASK_VSOXEI16_V 0x1c00707f -#define MATCH_VSOXEI32_V 0xc006027 -#define MASK_VSOXEI32_V 0x1c00707f -#define MATCH_VSOXEI64_V 0xc007027 -#define MASK_VSOXEI64_V 0x1c00707f -#define MATCH_VSOXEI128_V 0x1c000027 -#define MASK_VSOXEI128_V 0x1c00707f -#define MATCH_VSOXEI256_V 0x1c005027 -#define MASK_VSOXEI256_V 0x1c00707f -#define MATCH_VSOXEI512_V 0x1c006027 -#define MASK_VSOXEI512_V 0x1c00707f -#define MATCH_VSOXEI1024_V 0x1c007027 -#define MASK_VSOXEI1024_V 0x1c00707f -#define MATCH_VLE8FF_V 0x1000007 -#define MASK_VLE8FF_V 0x1df0707f -#define MATCH_VLE16FF_V 0x1005007 -#define MASK_VLE16FF_V 0x1df0707f -#define MATCH_VLE32FF_V 0x1006007 -#define MASK_VLE32FF_V 0x1df0707f -#define MATCH_VLE64FF_V 0x1007007 -#define MASK_VLE64FF_V 0x1df0707f -#define MATCH_VLE128FF_V 0x11000007 -#define MASK_VLE128FF_V 0x1df0707f -#define MATCH_VLE256FF_V 0x11005007 -#define MASK_VLE256FF_V 0x1df0707f -#define MATCH_VLE512FF_V 0x11006007 -#define MASK_VLE512FF_V 0x1df0707f -#define MATCH_VLE1024FF_V 0x11007007 -#define MASK_VLE1024FF_V 0x1df0707f -#define MATCH_VL1RE8_V 0x2800007 -#define MASK_VL1RE8_V 0xfff0707f -#define MATCH_VL1RE16_V 0x2805007 -#define MASK_VL1RE16_V 0xfff0707f -#define MATCH_VL1RE32_V 0x2806007 -#define MASK_VL1RE32_V 0xfff0707f -#define MATCH_VL1RE64_V 0x2807007 -#define MASK_VL1RE64_V 0xfff0707f -#define MATCH_VL2RE8_V 0x22800007 -#define MASK_VL2RE8_V 0xfff0707f -#define MATCH_VL2RE16_V 0x22805007 -#define MASK_VL2RE16_V 0xfff0707f -#define MATCH_VL2RE32_V 0x22806007 -#define MASK_VL2RE32_V 0xfff0707f -#define MATCH_VL2RE64_V 0x22807007 -#define MASK_VL2RE64_V 0xfff0707f -#define MATCH_VL4RE8_V 0x62800007 -#define MASK_VL4RE8_V 0xfff0707f -#define MATCH_VL4RE16_V 0x62805007 -#define MASK_VL4RE16_V 0xfff0707f -#define MATCH_VL4RE32_V 0x62806007 -#define MASK_VL4RE32_V 0xfff0707f -#define MATCH_VL4RE64_V 0x62807007 -#define MASK_VL4RE64_V 0xfff0707f -#define MATCH_VL8RE8_V 0xe2800007 -#define MASK_VL8RE8_V 0xfff0707f -#define MATCH_VL8RE16_V 0xe2805007 -#define MASK_VL8RE16_V 0xfff0707f -#define MATCH_VL8RE32_V 0xe2806007 -#define MASK_VL8RE32_V 0xfff0707f -#define MATCH_VL8RE64_V 0xe2807007 -#define MASK_VL8RE64_V 0xfff0707f -#define MATCH_VS1R_V 0x2800027 -#define MASK_VS1R_V 0xfff0707f -#define MATCH_VS2R_V 0x22800027 -#define MASK_VS2R_V 0xfff0707f -#define MATCH_VS4R_V 0x62800027 -#define MASK_VS4R_V 0xfff0707f -#define MATCH_VS8R_V 0xe2800027 -#define MASK_VS8R_V 0xfff0707f -#define MATCH_VFADD_VF 0x5057 -#define MASK_VFADD_VF 0xfc00707f -#define MATCH_VFSUB_VF 0x8005057 -#define MASK_VFSUB_VF 0xfc00707f -#define MATCH_VFMIN_VF 0x10005057 -#define MASK_VFMIN_VF 0xfc00707f -#define MATCH_VFMAX_VF 0x18005057 -#define MASK_VFMAX_VF 0xfc00707f -#define MATCH_VFSGNJ_VF 0x20005057 -#define MASK_VFSGNJ_VF 0xfc00707f -#define MATCH_VFSGNJN_VF 0x24005057 -#define MASK_VFSGNJN_VF 0xfc00707f -#define MATCH_VFSGNJX_VF 0x28005057 -#define MASK_VFSGNJX_VF 0xfc00707f -#define MATCH_VFSLIDE1UP_VF 0x38005057 -#define MASK_VFSLIDE1UP_VF 0xfc00707f -#define MATCH_VFSLIDE1DOWN_VF 0x3c005057 -#define MASK_VFSLIDE1DOWN_VF 0xfc00707f -#define MATCH_VFMV_S_F 0x42005057 -#define MASK_VFMV_S_F 0xfff0707f -#define MATCH_VFMERGE_VFM 0x5c005057 -#define MASK_VFMERGE_VFM 0xfe00707f -#define MATCH_VFMV_V_F 0x5e005057 -#define MASK_VFMV_V_F 0xfff0707f -#define MATCH_VMFEQ_VF 0x60005057 -#define MASK_VMFEQ_VF 0xfc00707f -#define MATCH_VMFLE_VF 0x64005057 -#define MASK_VMFLE_VF 0xfc00707f -#define MATCH_VMFLT_VF 0x6c005057 -#define MASK_VMFLT_VF 0xfc00707f -#define MATCH_VMFNE_VF 0x70005057 -#define MASK_VMFNE_VF 0xfc00707f -#define MATCH_VMFGT_VF 0x74005057 -#define MASK_VMFGT_VF 0xfc00707f -#define MATCH_VMFGE_VF 0x7c005057 -#define MASK_VMFGE_VF 0xfc00707f -#define MATCH_VFDIV_VF 0x80005057 -#define MASK_VFDIV_VF 0xfc00707f -#define MATCH_VFRDIV_VF 0x84005057 -#define MASK_VFRDIV_VF 0xfc00707f -#define MATCH_VFMUL_VF 0x90005057 -#define MASK_VFMUL_VF 0xfc00707f -#define MATCH_VFRSUB_VF 0x9c005057 -#define MASK_VFRSUB_VF 0xfc00707f -#define MATCH_VFMADD_VF 0xa0005057 -#define MASK_VFMADD_VF 0xfc00707f -#define MATCH_VFNMADD_VF 0xa4005057 -#define MASK_VFNMADD_VF 0xfc00707f -#define MATCH_VFMSUB_VF 0xa8005057 -#define MASK_VFMSUB_VF 0xfc00707f -#define MATCH_VFNMSUB_VF 0xac005057 -#define MASK_VFNMSUB_VF 0xfc00707f -#define MATCH_VFMACC_VF 0xb0005057 -#define MASK_VFMACC_VF 0xfc00707f -#define MATCH_VFNMACC_VF 0xb4005057 -#define MASK_VFNMACC_VF 0xfc00707f -#define MATCH_VFMSAC_VF 0xb8005057 -#define MASK_VFMSAC_VF 0xfc00707f -#define MATCH_VFNMSAC_VF 0xbc005057 -#define MASK_VFNMSAC_VF 0xfc00707f -#define MATCH_VFWADD_VF 0xc0005057 -#define MASK_VFWADD_VF 0xfc00707f -#define MATCH_VFWSUB_VF 0xc8005057 -#define MASK_VFWSUB_VF 0xfc00707f -#define MATCH_VFWADD_WF 0xd0005057 -#define MASK_VFWADD_WF 0xfc00707f -#define MATCH_VFWSUB_WF 0xd8005057 -#define MASK_VFWSUB_WF 0xfc00707f -#define MATCH_VFWMUL_VF 0xe0005057 -#define MASK_VFWMUL_VF 0xfc00707f -#define MATCH_VFWMACC_VF 0xf0005057 -#define MASK_VFWMACC_VF 0xfc00707f -#define MATCH_VFWNMACC_VF 0xf4005057 -#define MASK_VFWNMACC_VF 0xfc00707f -#define MATCH_VFWMSAC_VF 0xf8005057 -#define MASK_VFWMSAC_VF 0xfc00707f -#define MATCH_VFWNMSAC_VF 0xfc005057 -#define MASK_VFWNMSAC_VF 0xfc00707f -#define MATCH_VFADD_VV 0x1057 -#define MASK_VFADD_VV 0xfc00707f -#define MATCH_VFREDUSUM_VS 0x4001057 -#define MASK_VFREDUSUM_VS 0xfc00707f -#define MATCH_VFSUB_VV 0x8001057 -#define MASK_VFSUB_VV 0xfc00707f -#define MATCH_VFREDOSUM_VS 0xc001057 -#define MASK_VFREDOSUM_VS 0xfc00707f -#define MATCH_VFMIN_VV 0x10001057 -#define MASK_VFMIN_VV 0xfc00707f -#define MATCH_VFREDMIN_VS 0x14001057 -#define MASK_VFREDMIN_VS 0xfc00707f -#define MATCH_VFMAX_VV 0x18001057 -#define MASK_VFMAX_VV 0xfc00707f -#define MATCH_VFREDMAX_VS 0x1c001057 -#define MASK_VFREDMAX_VS 0xfc00707f -#define MATCH_VFSGNJ_VV 0x20001057 -#define MASK_VFSGNJ_VV 0xfc00707f -#define MATCH_VFSGNJN_VV 0x24001057 -#define MASK_VFSGNJN_VV 0xfc00707f -#define MATCH_VFSGNJX_VV 0x28001057 -#define MASK_VFSGNJX_VV 0xfc00707f -#define MATCH_VFMV_F_S 0x42001057 -#define MASK_VFMV_F_S 0xfe0ff07f -#define MATCH_VMFEQ_VV 0x60001057 -#define MASK_VMFEQ_VV 0xfc00707f -#define MATCH_VMFLE_VV 0x64001057 -#define MASK_VMFLE_VV 0xfc00707f -#define MATCH_VMFLT_VV 0x6c001057 -#define MASK_VMFLT_VV 0xfc00707f -#define MATCH_VMFNE_VV 0x70001057 -#define MASK_VMFNE_VV 0xfc00707f -#define MATCH_VFDIV_VV 0x80001057 -#define MASK_VFDIV_VV 0xfc00707f -#define MATCH_VFMUL_VV 0x90001057 -#define MASK_VFMUL_VV 0xfc00707f -#define MATCH_VFMADD_VV 0xa0001057 -#define MASK_VFMADD_VV 0xfc00707f -#define MATCH_VFNMADD_VV 0xa4001057 -#define MASK_VFNMADD_VV 0xfc00707f -#define MATCH_VFMSUB_VV 0xa8001057 -#define MASK_VFMSUB_VV 0xfc00707f -#define MATCH_VFNMSUB_VV 0xac001057 -#define MASK_VFNMSUB_VV 0xfc00707f -#define MATCH_VFMACC_VV 0xb0001057 -#define MASK_VFMACC_VV 0xfc00707f -#define MATCH_VFNMACC_VV 0xb4001057 -#define MASK_VFNMACC_VV 0xfc00707f -#define MATCH_VFMSAC_VV 0xb8001057 -#define MASK_VFMSAC_VV 0xfc00707f -#define MATCH_VFNMSAC_VV 0xbc001057 -#define MASK_VFNMSAC_VV 0xfc00707f -#define MATCH_VFCVT_XU_F_V 0x48001057 -#define MASK_VFCVT_XU_F_V 0xfc0ff07f -#define MATCH_VFCVT_X_F_V 0x48009057 -#define MASK_VFCVT_X_F_V 0xfc0ff07f -#define MATCH_VFCVT_F_XU_V 0x48011057 -#define MASK_VFCVT_F_XU_V 0xfc0ff07f -#define MATCH_VFCVT_F_X_V 0x48019057 -#define MASK_VFCVT_F_X_V 0xfc0ff07f -#define MATCH_VFCVT_RTZ_XU_F_V 0x48031057 -#define MASK_VFCVT_RTZ_XU_F_V 0xfc0ff07f -#define MATCH_VFCVT_RTZ_X_F_V 0x48039057 -#define MASK_VFCVT_RTZ_X_F_V 0xfc0ff07f -#define MATCH_VFWCVT_XU_F_V 0x48041057 -#define MASK_VFWCVT_XU_F_V 0xfc0ff07f -#define MATCH_VFWCVT_X_F_V 0x48049057 -#define MASK_VFWCVT_X_F_V 0xfc0ff07f -#define MATCH_VFWCVT_F_XU_V 0x48051057 -#define MASK_VFWCVT_F_XU_V 0xfc0ff07f -#define MATCH_VFWCVT_F_X_V 0x48059057 -#define MASK_VFWCVT_F_X_V 0xfc0ff07f -#define MATCH_VFWCVT_F_F_V 0x48061057 -#define MASK_VFWCVT_F_F_V 0xfc0ff07f -#define MATCH_VFWCVT_RTZ_XU_F_V 0x48071057 -#define MASK_VFWCVT_RTZ_XU_F_V 0xfc0ff07f -#define MATCH_VFWCVT_RTZ_X_F_V 0x48079057 -#define MASK_VFWCVT_RTZ_X_F_V 0xfc0ff07f -#define MATCH_VFNCVT_XU_F_W 0x48081057 -#define MASK_VFNCVT_XU_F_W 0xfc0ff07f -#define MATCH_VFNCVT_X_F_W 0x48089057 -#define MASK_VFNCVT_X_F_W 0xfc0ff07f -#define MATCH_VFNCVT_F_XU_W 0x48091057 -#define MASK_VFNCVT_F_XU_W 0xfc0ff07f -#define MATCH_VFNCVT_F_X_W 0x48099057 -#define MASK_VFNCVT_F_X_W 0xfc0ff07f -#define MATCH_VFNCVT_F_F_W 0x480a1057 -#define MASK_VFNCVT_F_F_W 0xfc0ff07f -#define MATCH_VFNCVT_ROD_F_F_W 0x480a9057 -#define MASK_VFNCVT_ROD_F_F_W 0xfc0ff07f -#define MATCH_VFNCVT_RTZ_XU_F_W 0x480b1057 -#define MASK_VFNCVT_RTZ_XU_F_W 0xfc0ff07f -#define MATCH_VFNCVT_RTZ_X_F_W 0x480b9057 -#define MASK_VFNCVT_RTZ_X_F_W 0xfc0ff07f -#define MATCH_VFSQRT_V 0x4c001057 -#define MASK_VFSQRT_V 0xfc0ff07f -#define MATCH_VFRSQRT7_V 0x4c021057 -#define MASK_VFRSQRT7_V 0xfc0ff07f -#define MATCH_VFREC7_V 0x4c029057 -#define MASK_VFREC7_V 0xfc0ff07f -#define MATCH_VFCLASS_V 0x4c081057 -#define MASK_VFCLASS_V 0xfc0ff07f -#define MATCH_VFWADD_VV 0xc0001057 -#define MASK_VFWADD_VV 0xfc00707f -#define MATCH_VFWREDUSUM_VS 0xc4001057 -#define MASK_VFWREDUSUM_VS 0xfc00707f -#define MATCH_VFWSUB_VV 0xc8001057 -#define MASK_VFWSUB_VV 0xfc00707f -#define MATCH_VFWREDOSUM_VS 0xcc001057 -#define MASK_VFWREDOSUM_VS 0xfc00707f -#define MATCH_VFWADD_WV 0xd0001057 -#define MASK_VFWADD_WV 0xfc00707f -#define MATCH_VFWSUB_WV 0xd8001057 -#define MASK_VFWSUB_WV 0xfc00707f -#define MATCH_VFWMUL_VV 0xe0001057 -#define MASK_VFWMUL_VV 0xfc00707f -#define MATCH_VFWMACC_VV 0xf0001057 -#define MASK_VFWMACC_VV 0xfc00707f -#define MATCH_VFWNMACC_VV 0xf4001057 -#define MASK_VFWNMACC_VV 0xfc00707f -#define MATCH_VFWMSAC_VV 0xf8001057 -#define MASK_VFWMSAC_VV 0xfc00707f -#define MATCH_VFWNMSAC_VV 0xfc001057 -#define MASK_VFWNMSAC_VV 0xfc00707f -#define MATCH_VADD_VX 0x4057 -#define MASK_VADD_VX 0xfc00707f -#define MATCH_VSUB_VX 0x8004057 -#define MASK_VSUB_VX 0xfc00707f -#define MATCH_VRSUB_VX 0xc004057 -#define MASK_VRSUB_VX 0xfc00707f -#define MATCH_VMINU_VX 0x10004057 -#define MASK_VMINU_VX 0xfc00707f -#define MATCH_VMIN_VX 0x14004057 -#define MASK_VMIN_VX 0xfc00707f -#define MATCH_VMAXU_VX 0x18004057 -#define MASK_VMAXU_VX 0xfc00707f -#define MATCH_VMAX_VX 0x1c004057 -#define MASK_VMAX_VX 0xfc00707f -#define MATCH_VAND_VX 0x24004057 -#define MASK_VAND_VX 0xfc00707f -#define MATCH_VOR_VX 0x28004057 -#define MASK_VOR_VX 0xfc00707f -#define MATCH_VXOR_VX 0x2c004057 -#define MASK_VXOR_VX 0xfc00707f -#define MATCH_VRGATHER_VX 0x30004057 -#define MASK_VRGATHER_VX 0xfc00707f -#define MATCH_VSLIDEUP_VX 0x38004057 -#define MASK_VSLIDEUP_VX 0xfc00707f -#define MATCH_VSLIDEDOWN_VX 0x3c004057 -#define MASK_VSLIDEDOWN_VX 0xfc00707f -#define MATCH_VADC_VXM 0x40004057 -#define MASK_VADC_VXM 0xfe00707f -#define MATCH_VMADC_VXM 0x44004057 -#define MASK_VMADC_VXM 0xfe00707f -#define MATCH_VMADC_VX 0x46004057 -#define MASK_VMADC_VX 0xfe00707f -#define MATCH_VSBC_VXM 0x48004057 -#define MASK_VSBC_VXM 0xfe00707f -#define MATCH_VMSBC_VXM 0x4c004057 -#define MASK_VMSBC_VXM 0xfe00707f -#define MATCH_VMSBC_VX 0x4e004057 -#define MASK_VMSBC_VX 0xfe00707f -#define MATCH_VMERGE_VXM 0x5c004057 -#define MASK_VMERGE_VXM 0xfe00707f -#define MATCH_VMV_V_X 0x5e004057 -#define MASK_VMV_V_X 0xfff0707f -#define MATCH_VMSEQ_VX 0x60004057 -#define MASK_VMSEQ_VX 0xfc00707f -#define MATCH_VMSNE_VX 0x64004057 -#define MASK_VMSNE_VX 0xfc00707f -#define MATCH_VMSLTU_VX 0x68004057 -#define MASK_VMSLTU_VX 0xfc00707f -#define MATCH_VMSLT_VX 0x6c004057 -#define MASK_VMSLT_VX 0xfc00707f -#define MATCH_VMSLEU_VX 0x70004057 -#define MASK_VMSLEU_VX 0xfc00707f -#define MATCH_VMSLE_VX 0x74004057 -#define MASK_VMSLE_VX 0xfc00707f -#define MATCH_VMSGTU_VX 0x78004057 -#define MASK_VMSGTU_VX 0xfc00707f -#define MATCH_VMSGT_VX 0x7c004057 -#define MASK_VMSGT_VX 0xfc00707f -#define MATCH_VSADDU_VX 0x80004057 -#define MASK_VSADDU_VX 0xfc00707f -#define MATCH_VSADD_VX 0x84004057 -#define MASK_VSADD_VX 0xfc00707f -#define MATCH_VSSUBU_VX 0x88004057 -#define MASK_VSSUBU_VX 0xfc00707f -#define MATCH_VSSUB_VX 0x8c004057 -#define MASK_VSSUB_VX 0xfc00707f -#define MATCH_VSLL_VX 0x94004057 -#define MASK_VSLL_VX 0xfc00707f -#define MATCH_VSMUL_VX 0x9c004057 -#define MASK_VSMUL_VX 0xfc00707f -#define MATCH_VSRL_VX 0xa0004057 -#define MASK_VSRL_VX 0xfc00707f -#define MATCH_VSRA_VX 0xa4004057 -#define MASK_VSRA_VX 0xfc00707f -#define MATCH_VSSRL_VX 0xa8004057 -#define MASK_VSSRL_VX 0xfc00707f -#define MATCH_VSSRA_VX 0xac004057 -#define MASK_VSSRA_VX 0xfc00707f -#define MATCH_VNSRL_WX 0xb0004057 -#define MASK_VNSRL_WX 0xfc00707f -#define MATCH_VNSRA_WX 0xb4004057 -#define MASK_VNSRA_WX 0xfc00707f -#define MATCH_VNCLIPU_WX 0xb8004057 -#define MASK_VNCLIPU_WX 0xfc00707f -#define MATCH_VNCLIP_WX 0xbc004057 -#define MASK_VNCLIP_WX 0xfc00707f -#define MATCH_VADD_VV 0x57 -#define MASK_VADD_VV 0xfc00707f -#define MATCH_VSUB_VV 0x8000057 -#define MASK_VSUB_VV 0xfc00707f -#define MATCH_VMINU_VV 0x10000057 -#define MASK_VMINU_VV 0xfc00707f -#define MATCH_VMIN_VV 0x14000057 -#define MASK_VMIN_VV 0xfc00707f -#define MATCH_VMAXU_VV 0x18000057 -#define MASK_VMAXU_VV 0xfc00707f -#define MATCH_VMAX_VV 0x1c000057 -#define MASK_VMAX_VV 0xfc00707f -#define MATCH_VAND_VV 0x24000057 -#define MASK_VAND_VV 0xfc00707f -#define MATCH_VOR_VV 0x28000057 -#define MASK_VOR_VV 0xfc00707f -#define MATCH_VXOR_VV 0x2c000057 -#define MASK_VXOR_VV 0xfc00707f -#define MATCH_VRGATHER_VV 0x30000057 -#define MASK_VRGATHER_VV 0xfc00707f -#define MATCH_VRGATHEREI16_VV 0x38000057 -#define MASK_VRGATHEREI16_VV 0xfc00707f -#define MATCH_VADC_VVM 0x40000057 -#define MASK_VADC_VVM 0xfe00707f -#define MATCH_VMADC_VVM 0x44000057 -#define MASK_VMADC_VVM 0xfe00707f -#define MATCH_VMADC_VV 0x46000057 -#define MASK_VMADC_VV 0xfe00707f -#define MATCH_VSBC_VVM 0x48000057 -#define MASK_VSBC_VVM 0xfe00707f -#define MATCH_VMSBC_VVM 0x4c000057 -#define MASK_VMSBC_VVM 0xfe00707f -#define MATCH_VMSBC_VV 0x4e000057 -#define MASK_VMSBC_VV 0xfe00707f -#define MATCH_VMERGE_VVM 0x5c000057 -#define MASK_VMERGE_VVM 0xfe00707f -#define MATCH_VMV_V_V 0x5e000057 -#define MASK_VMV_V_V 0xfff0707f -#define MATCH_VMSEQ_VV 0x60000057 -#define MASK_VMSEQ_VV 0xfc00707f -#define MATCH_VMSNE_VV 0x64000057 -#define MASK_VMSNE_VV 0xfc00707f -#define MATCH_VMSLTU_VV 0x68000057 -#define MASK_VMSLTU_VV 0xfc00707f -#define MATCH_VMSLT_VV 0x6c000057 -#define MASK_VMSLT_VV 0xfc00707f -#define MATCH_VMSLEU_VV 0x70000057 -#define MASK_VMSLEU_VV 0xfc00707f -#define MATCH_VMSLE_VV 0x74000057 -#define MASK_VMSLE_VV 0xfc00707f -#define MATCH_VSADDU_VV 0x80000057 -#define MASK_VSADDU_VV 0xfc00707f -#define MATCH_VSADD_VV 0x84000057 -#define MASK_VSADD_VV 0xfc00707f -#define MATCH_VSSUBU_VV 0x88000057 -#define MASK_VSSUBU_VV 0xfc00707f -#define MATCH_VSSUB_VV 0x8c000057 -#define MASK_VSSUB_VV 0xfc00707f -#define MATCH_VSLL_VV 0x94000057 -#define MASK_VSLL_VV 0xfc00707f -#define MATCH_VSMUL_VV 0x9c000057 -#define MASK_VSMUL_VV 0xfc00707f -#define MATCH_VSRL_VV 0xa0000057 -#define MASK_VSRL_VV 0xfc00707f -#define MATCH_VSRA_VV 0xa4000057 -#define MASK_VSRA_VV 0xfc00707f -#define MATCH_VSSRL_VV 0xa8000057 -#define MASK_VSSRL_VV 0xfc00707f -#define MATCH_VSSRA_VV 0xac000057 -#define MASK_VSSRA_VV 0xfc00707f -#define MATCH_VNSRL_WV 0xb0000057 -#define MASK_VNSRL_WV 0xfc00707f -#define MATCH_VNSRA_WV 0xb4000057 -#define MASK_VNSRA_WV 0xfc00707f -#define MATCH_VNCLIPU_WV 0xb8000057 -#define MASK_VNCLIPU_WV 0xfc00707f -#define MATCH_VNCLIP_WV 0xbc000057 -#define MASK_VNCLIP_WV 0xfc00707f -#define MATCH_VWREDSUMU_VS 0xc0000057 -#define MASK_VWREDSUMU_VS 0xfc00707f -#define MATCH_VWREDSUM_VS 0xc4000057 -#define MASK_VWREDSUM_VS 0xfc00707f -#define MATCH_VADD_VI 0x3057 -#define MASK_VADD_VI 0xfc00707f -#define MATCH_VRSUB_VI 0xc003057 -#define MASK_VRSUB_VI 0xfc00707f -#define MATCH_VAND_VI 0x24003057 -#define MASK_VAND_VI 0xfc00707f -#define MATCH_VOR_VI 0x28003057 -#define MASK_VOR_VI 0xfc00707f -#define MATCH_VXOR_VI 0x2c003057 -#define MASK_VXOR_VI 0xfc00707f -#define MATCH_VRGATHER_VI 0x30003057 -#define MASK_VRGATHER_VI 0xfc00707f -#define MATCH_VSLIDEUP_VI 0x38003057 -#define MASK_VSLIDEUP_VI 0xfc00707f -#define MATCH_VSLIDEDOWN_VI 0x3c003057 -#define MASK_VSLIDEDOWN_VI 0xfc00707f -#define MATCH_VADC_VIM 0x40003057 -#define MASK_VADC_VIM 0xfe00707f -#define MATCH_VMADC_VIM 0x44003057 -#define MASK_VMADC_VIM 0xfe00707f -#define MATCH_VMADC_VI 0x46003057 -#define MASK_VMADC_VI 0xfe00707f -#define MATCH_VMERGE_VIM 0x5c003057 -#define MASK_VMERGE_VIM 0xfe00707f -#define MATCH_VMV_V_I 0x5e003057 -#define MASK_VMV_V_I 0xfff0707f -#define MATCH_VMSEQ_VI 0x60003057 -#define MASK_VMSEQ_VI 0xfc00707f -#define MATCH_VMSNE_VI 0x64003057 -#define MASK_VMSNE_VI 0xfc00707f -#define MATCH_VMSLEU_VI 0x70003057 -#define MASK_VMSLEU_VI 0xfc00707f -#define MATCH_VMSLE_VI 0x74003057 -#define MASK_VMSLE_VI 0xfc00707f -#define MATCH_VMSGTU_VI 0x78003057 -#define MASK_VMSGTU_VI 0xfc00707f -#define MATCH_VMSGT_VI 0x7c003057 -#define MASK_VMSGT_VI 0xfc00707f -#define MATCH_VSADDU_VI 0x80003057 -#define MASK_VSADDU_VI 0xfc00707f -#define MATCH_VSADD_VI 0x84003057 -#define MASK_VSADD_VI 0xfc00707f -#define MATCH_VSLL_VI 0x94003057 -#define MASK_VSLL_VI 0xfc00707f -#define MATCH_VMV1R_V 0x9e003057 -#define MASK_VMV1R_V 0xfe0ff07f -#define MATCH_VMV2R_V 0x9e00b057 -#define MASK_VMV2R_V 0xfe0ff07f -#define MATCH_VMV4R_V 0x9e01b057 -#define MASK_VMV4R_V 0xfe0ff07f -#define MATCH_VMV8R_V 0x9e03b057 -#define MASK_VMV8R_V 0xfe0ff07f -#define MATCH_VSRL_VI 0xa0003057 -#define MASK_VSRL_VI 0xfc00707f -#define MATCH_VSRA_VI 0xa4003057 -#define MASK_VSRA_VI 0xfc00707f -#define MATCH_VSSRL_VI 0xa8003057 -#define MASK_VSSRL_VI 0xfc00707f -#define MATCH_VSSRA_VI 0xac003057 -#define MASK_VSSRA_VI 0xfc00707f -#define MATCH_VNSRL_WI 0xb0003057 -#define MASK_VNSRL_WI 0xfc00707f -#define MATCH_VNSRA_WI 0xb4003057 -#define MASK_VNSRA_WI 0xfc00707f -#define MATCH_VNCLIPU_WI 0xb8003057 -#define MASK_VNCLIPU_WI 0xfc00707f -#define MATCH_VNCLIP_WI 0xbc003057 -#define MASK_VNCLIP_WI 0xfc00707f -#define MATCH_VREDSUM_VS 0x2057 -#define MASK_VREDSUM_VS 0xfc00707f -#define MATCH_VREDAND_VS 0x4002057 -#define MASK_VREDAND_VS 0xfc00707f -#define MATCH_VREDOR_VS 0x8002057 -#define MASK_VREDOR_VS 0xfc00707f -#define MATCH_VREDXOR_VS 0xc002057 -#define MASK_VREDXOR_VS 0xfc00707f -#define MATCH_VREDMINU_VS 0x10002057 -#define MASK_VREDMINU_VS 0xfc00707f -#define MATCH_VREDMIN_VS 0x14002057 -#define MASK_VREDMIN_VS 0xfc00707f -#define MATCH_VREDMAXU_VS 0x18002057 -#define MASK_VREDMAXU_VS 0xfc00707f -#define MATCH_VREDMAX_VS 0x1c002057 -#define MASK_VREDMAX_VS 0xfc00707f -#define MATCH_VAADDU_VV 0x20002057 -#define MASK_VAADDU_VV 0xfc00707f -#define MATCH_VAADD_VV 0x24002057 -#define MASK_VAADD_VV 0xfc00707f -#define MATCH_VASUBU_VV 0x28002057 -#define MASK_VASUBU_VV 0xfc00707f -#define MATCH_VASUB_VV 0x2c002057 -#define MASK_VASUB_VV 0xfc00707f -#define MATCH_VMV_X_S 0x42002057 -#define MASK_VMV_X_S 0xfe0ff07f -#define MATCH_VZEXT_VF8 0x48012057 -#define MASK_VZEXT_VF8 0xfc0ff07f -#define MATCH_VSEXT_VF8 0x4801a057 -#define MASK_VSEXT_VF8 0xfc0ff07f -#define MATCH_VZEXT_VF4 0x48022057 -#define MASK_VZEXT_VF4 0xfc0ff07f -#define MATCH_VSEXT_VF4 0x4802a057 -#define MASK_VSEXT_VF4 0xfc0ff07f -#define MATCH_VZEXT_VF2 0x48032057 -#define MASK_VZEXT_VF2 0xfc0ff07f -#define MATCH_VSEXT_VF2 0x4803a057 -#define MASK_VSEXT_VF2 0xfc0ff07f -#define MATCH_VCOMPRESS_VM 0x5e002057 -#define MASK_VCOMPRESS_VM 0xfe00707f -#define MATCH_VMANDN_MM 0x60002057 -#define MASK_VMANDN_MM 0xfc00707f -#define MATCH_VMAND_MM 0x64002057 -#define MASK_VMAND_MM 0xfc00707f -#define MATCH_VMOR_MM 0x68002057 -#define MASK_VMOR_MM 0xfc00707f -#define MATCH_VMXOR_MM 0x6c002057 -#define MASK_VMXOR_MM 0xfc00707f -#define MATCH_VMORN_MM 0x70002057 -#define MASK_VMORN_MM 0xfc00707f -#define MATCH_VMNAND_MM 0x74002057 -#define MASK_VMNAND_MM 0xfc00707f -#define MATCH_VMNOR_MM 0x78002057 -#define MASK_VMNOR_MM 0xfc00707f -#define MATCH_VMXNOR_MM 0x7c002057 -#define MASK_VMXNOR_MM 0xfc00707f -#define MATCH_VMSBF_M 0x5000a057 -#define MASK_VMSBF_M 0xfc0ff07f -#define MATCH_VMSOF_M 0x50012057 -#define MASK_VMSOF_M 0xfc0ff07f -#define MATCH_VMSIF_M 0x5001a057 -#define MASK_VMSIF_M 0xfc0ff07f -#define MATCH_VIOTA_M 0x50082057 -#define MASK_VIOTA_M 0xfc0ff07f -#define MATCH_VID_V 0x5008a057 -#define MASK_VID_V 0xfdfff07f -#define MATCH_VCPOP_M 0x40082057 -#define MASK_VCPOP_M 0xfc0ff07f -#define MATCH_VFIRST_M 0x4008a057 -#define MASK_VFIRST_M 0xfc0ff07f -#define MATCH_VDIVU_VV 0x80002057 -#define MASK_VDIVU_VV 0xfc00707f -#define MATCH_VDIV_VV 0x84002057 -#define MASK_VDIV_VV 0xfc00707f -#define MATCH_VREMU_VV 0x88002057 -#define MASK_VREMU_VV 0xfc00707f -#define MATCH_VREM_VV 0x8c002057 -#define MASK_VREM_VV 0xfc00707f -#define MATCH_VMULHU_VV 0x90002057 -#define MASK_VMULHU_VV 0xfc00707f -#define MATCH_VMUL_VV 0x94002057 -#define MASK_VMUL_VV 0xfc00707f -#define MATCH_VMULHSU_VV 0x98002057 -#define MASK_VMULHSU_VV 0xfc00707f -#define MATCH_VMULH_VV 0x9c002057 -#define MASK_VMULH_VV 0xfc00707f -#define MATCH_VMADD_VV 0xa4002057 -#define MASK_VMADD_VV 0xfc00707f -#define MATCH_VNMSUB_VV 0xac002057 -#define MASK_VNMSUB_VV 0xfc00707f -#define MATCH_VMACC_VV 0xb4002057 -#define MASK_VMACC_VV 0xfc00707f -#define MATCH_VNMSAC_VV 0xbc002057 -#define MASK_VNMSAC_VV 0xfc00707f -#define MATCH_VWADDU_VV 0xc0002057 -#define MASK_VWADDU_VV 0xfc00707f -#define MATCH_VWADD_VV 0xc4002057 -#define MASK_VWADD_VV 0xfc00707f -#define MATCH_VWSUBU_VV 0xc8002057 -#define MASK_VWSUBU_VV 0xfc00707f -#define MATCH_VWSUB_VV 0xcc002057 -#define MASK_VWSUB_VV 0xfc00707f -#define MATCH_VWADDU_WV 0xd0002057 -#define MASK_VWADDU_WV 0xfc00707f -#define MATCH_VWADD_WV 0xd4002057 -#define MASK_VWADD_WV 0xfc00707f -#define MATCH_VWSUBU_WV 0xd8002057 -#define MASK_VWSUBU_WV 0xfc00707f -#define MATCH_VWSUB_WV 0xdc002057 -#define MASK_VWSUB_WV 0xfc00707f -#define MATCH_VWMULU_VV 0xe0002057 -#define MASK_VWMULU_VV 0xfc00707f -#define MATCH_VWMULSU_VV 0xe8002057 -#define MASK_VWMULSU_VV 0xfc00707f -#define MATCH_VWMUL_VV 0xec002057 -#define MASK_VWMUL_VV 0xfc00707f -#define MATCH_VWMACCU_VV 0xf0002057 -#define MASK_VWMACCU_VV 0xfc00707f -#define MATCH_VWMACC_VV 0xf4002057 -#define MASK_VWMACC_VV 0xfc00707f -#define MATCH_VWMACCSU_VV 0xfc002057 -#define MASK_VWMACCSU_VV 0xfc00707f -#define MATCH_VAADDU_VX 0x20006057 -#define MASK_VAADDU_VX 0xfc00707f -#define MATCH_VAADD_VX 0x24006057 -#define MASK_VAADD_VX 0xfc00707f -#define MATCH_VASUBU_VX 0x28006057 -#define MASK_VASUBU_VX 0xfc00707f -#define MATCH_VASUB_VX 0x2c006057 -#define MASK_VASUB_VX 0xfc00707f -#define MATCH_VMV_S_X 0x42006057 -#define MASK_VMV_S_X 0xfff0707f -#define MATCH_VSLIDE1UP_VX 0x38006057 -#define MASK_VSLIDE1UP_VX 0xfc00707f -#define MATCH_VSLIDE1DOWN_VX 0x3c006057 -#define MASK_VSLIDE1DOWN_VX 0xfc00707f -#define MATCH_VDIVU_VX 0x80006057 -#define MASK_VDIVU_VX 0xfc00707f -#define MATCH_VDIV_VX 0x84006057 -#define MASK_VDIV_VX 0xfc00707f -#define MATCH_VREMU_VX 0x88006057 -#define MASK_VREMU_VX 0xfc00707f -#define MATCH_VREM_VX 0x8c006057 -#define MASK_VREM_VX 0xfc00707f -#define MATCH_VMULHU_VX 0x90006057 -#define MASK_VMULHU_VX 0xfc00707f -#define MATCH_VMUL_VX 0x94006057 -#define MASK_VMUL_VX 0xfc00707f -#define MATCH_VMULHSU_VX 0x98006057 -#define MASK_VMULHSU_VX 0xfc00707f -#define MATCH_VMULH_VX 0x9c006057 -#define MASK_VMULH_VX 0xfc00707f -#define MATCH_VMADD_VX 0xa4006057 -#define MASK_VMADD_VX 0xfc00707f -#define MATCH_VNMSUB_VX 0xac006057 -#define MASK_VNMSUB_VX 0xfc00707f -#define MATCH_VMACC_VX 0xb4006057 -#define MASK_VMACC_VX 0xfc00707f -#define MATCH_VNMSAC_VX 0xbc006057 -#define MASK_VNMSAC_VX 0xfc00707f -#define MATCH_VWADDU_VX 0xc0006057 -#define MASK_VWADDU_VX 0xfc00707f -#define MATCH_VWADD_VX 0xc4006057 -#define MASK_VWADD_VX 0xfc00707f -#define MATCH_VWSUBU_VX 0xc8006057 -#define MASK_VWSUBU_VX 0xfc00707f -#define MATCH_VWSUB_VX 0xcc006057 -#define MASK_VWSUB_VX 0xfc00707f -#define MATCH_VWADDU_WX 0xd0006057 -#define MASK_VWADDU_WX 0xfc00707f -#define MATCH_VWADD_WX 0xd4006057 -#define MASK_VWADD_WX 0xfc00707f -#define MATCH_VWSUBU_WX 0xd8006057 -#define MASK_VWSUBU_WX 0xfc00707f -#define MATCH_VWSUB_WX 0xdc006057 -#define MASK_VWSUB_WX 0xfc00707f -#define MATCH_VWMULU_VX 0xe0006057 -#define MASK_VWMULU_VX 0xfc00707f -#define MATCH_VWMULSU_VX 0xe8006057 -#define MASK_VWMULSU_VX 0xfc00707f -#define MATCH_VWMUL_VX 0xec006057 -#define MASK_VWMUL_VX 0xfc00707f -#define MATCH_VWMACCU_VX 0xf0006057 -#define MASK_VWMACCU_VX 0xfc00707f -#define MATCH_VWMACC_VX 0xf4006057 -#define MASK_VWMACC_VX 0xfc00707f -#define MATCH_VWMACCUS_VX 0xf8006057 -#define MASK_VWMACCUS_VX 0xfc00707f -#define MATCH_VWMACCSU_VX 0xfc006057 -#define MASK_VWMACCSU_VX 0xfc00707f -#define MATCH_VAMOSWAPEI8_V 0x800002f -#define MASK_VAMOSWAPEI8_V 0xf800707f -#define MATCH_VAMOADDEI8_V 0x2f -#define MASK_VAMOADDEI8_V 0xf800707f -#define MATCH_VAMOXOREI8_V 0x2000002f -#define MASK_VAMOXOREI8_V 0xf800707f -#define MATCH_VAMOANDEI8_V 0x6000002f -#define MASK_VAMOANDEI8_V 0xf800707f -#define MATCH_VAMOOREI8_V 0x4000002f -#define MASK_VAMOOREI8_V 0xf800707f -#define MATCH_VAMOMINEI8_V 0x8000002f -#define MASK_VAMOMINEI8_V 0xf800707f -#define MATCH_VAMOMAXEI8_V 0xa000002f -#define MASK_VAMOMAXEI8_V 0xf800707f -#define MATCH_VAMOMINUEI8_V 0xc000002f -#define MASK_VAMOMINUEI8_V 0xf800707f -#define MATCH_VAMOMAXUEI8_V 0xe000002f -#define MASK_VAMOMAXUEI8_V 0xf800707f -#define MATCH_VAMOSWAPEI16_V 0x800502f -#define MASK_VAMOSWAPEI16_V 0xf800707f -#define MATCH_VAMOADDEI16_V 0x502f -#define MASK_VAMOADDEI16_V 0xf800707f -#define MATCH_VAMOXOREI16_V 0x2000502f -#define MASK_VAMOXOREI16_V 0xf800707f -#define MATCH_VAMOANDEI16_V 0x6000502f -#define MASK_VAMOANDEI16_V 0xf800707f -#define MATCH_VAMOOREI16_V 0x4000502f -#define MASK_VAMOOREI16_V 0xf800707f -#define MATCH_VAMOMINEI16_V 0x8000502f -#define MASK_VAMOMINEI16_V 0xf800707f -#define MATCH_VAMOMAXEI16_V 0xa000502f -#define MASK_VAMOMAXEI16_V 0xf800707f -#define MATCH_VAMOMINUEI16_V 0xc000502f -#define MASK_VAMOMINUEI16_V 0xf800707f -#define MATCH_VAMOMAXUEI16_V 0xe000502f -#define MASK_VAMOMAXUEI16_V 0xf800707f -#define MATCH_VAMOSWAPEI32_V 0x800602f -#define MASK_VAMOSWAPEI32_V 0xf800707f -#define MATCH_VAMOADDEI32_V 0x602f -#define MASK_VAMOADDEI32_V 0xf800707f -#define MATCH_VAMOXOREI32_V 0x2000602f -#define MASK_VAMOXOREI32_V 0xf800707f -#define MATCH_VAMOANDEI32_V 0x6000602f -#define MASK_VAMOANDEI32_V 0xf800707f -#define MATCH_VAMOOREI32_V 0x4000602f -#define MASK_VAMOOREI32_V 0xf800707f -#define MATCH_VAMOMINEI32_V 0x8000602f -#define MASK_VAMOMINEI32_V 0xf800707f -#define MATCH_VAMOMAXEI32_V 0xa000602f -#define MASK_VAMOMAXEI32_V 0xf800707f -#define MATCH_VAMOMINUEI32_V 0xc000602f -#define MASK_VAMOMINUEI32_V 0xf800707f -#define MATCH_VAMOMAXUEI32_V 0xe000602f -#define MASK_VAMOMAXUEI32_V 0xf800707f -#define MATCH_VAMOSWAPEI64_V 0x800702f -#define MASK_VAMOSWAPEI64_V 0xf800707f -#define MATCH_VAMOADDEI64_V 0x702f -#define MASK_VAMOADDEI64_V 0xf800707f -#define MATCH_VAMOXOREI64_V 0x2000702f -#define MASK_VAMOXOREI64_V 0xf800707f -#define MATCH_VAMOANDEI64_V 0x6000702f -#define MASK_VAMOANDEI64_V 0xf800707f -#define MATCH_VAMOOREI64_V 0x4000702f -#define MASK_VAMOOREI64_V 0xf800707f -#define MATCH_VAMOMINEI64_V 0x8000702f -#define MASK_VAMOMINEI64_V 0xf800707f -#define MATCH_VAMOMAXEI64_V 0xa000702f -#define MASK_VAMOMAXEI64_V 0xf800707f -#define MATCH_VAMOMINUEI64_V 0xc000702f -#define MASK_VAMOMINUEI64_V 0xf800707f -#define MATCH_VAMOMAXUEI64_V 0xe000702f -#define MASK_VAMOMAXUEI64_V 0xf800707f -#define MATCH_ADD8 0x48000077 -#define MASK_ADD8 0xfe00707f -#define MATCH_ADD16 0x40000077 -#define MASK_ADD16 0xfe00707f -#define MATCH_ADD64 0xc0001077 -#define MASK_ADD64 0xfe00707f -#define MATCH_AVE 0xe0000077 -#define MASK_AVE 0xfe00707f -#define MATCH_BITREV 0xe6000077 -#define MASK_BITREV 0xfe00707f -#define MATCH_BITREVI 0xe8000077 -#define MASK_BITREVI 0xfc00707f -#define MATCH_BPICK 0x3077 -#define MASK_BPICK 0x600707f -#define MATCH_CLRS8 0xae000077 -#define MASK_CLRS8 0xfff0707f -#define MATCH_CLRS16 0xae800077 -#define MASK_CLRS16 0xfff0707f -#define MATCH_CLRS32 0xaf800077 -#define MASK_CLRS32 0xfff0707f -#define MATCH_CLO8 0xae300077 -#define MASK_CLO8 0xfff0707f -#define MATCH_CLO16 0xaeb00077 -#define MASK_CLO16 0xfff0707f -#define MATCH_CLO32 0xafb00077 -#define MASK_CLO32 0xfff0707f -#define MATCH_CLZ8 0xae100077 -#define MASK_CLZ8 0xfff0707f -#define MATCH_CLZ16 0xae900077 -#define MASK_CLZ16 0xfff0707f -#define MATCH_CLZ32 0xaf900077 -#define MASK_CLZ32 0xfff0707f -#define MATCH_CMPEQ8 0x4e000077 -#define MASK_CMPEQ8 0xfe00707f -#define MATCH_CMPEQ16 0x4c000077 -#define MASK_CMPEQ16 0xfe00707f -#define MATCH_CRAS16 0x44000077 -#define MASK_CRAS16 0xfe00707f -#define MATCH_CRSA16 0x46000077 -#define MASK_CRSA16 0xfe00707f -#define MATCH_INSB 0xac000077 -#define MASK_INSB 0xff80707f -#define MATCH_KABS8 0xad000077 -#define MASK_KABS8 0xfff0707f -#define MATCH_KABS16 0xad100077 -#define MASK_KABS16 0xfff0707f -#define MATCH_KABSW 0xad400077 -#define MASK_KABSW 0xfff0707f -#define MATCH_KADD8 0x18000077 -#define MASK_KADD8 0xfe00707f -#define MATCH_KADD16 0x10000077 -#define MASK_KADD16 0xfe00707f -#define MATCH_KADD64 0x90001077 -#define MASK_KADD64 0xfe00707f -#define MATCH_KADDH 0x4001077 -#define MASK_KADDH 0xfe00707f -#define MATCH_KADDW 0x1077 -#define MASK_KADDW 0xfe00707f -#define MATCH_KCRAS16 0x14000077 -#define MASK_KCRAS16 0xfe00707f -#define MATCH_KCRSA16 0x16000077 -#define MASK_KCRSA16 0xfe00707f -#define MATCH_KDMBB 0xa001077 -#define MASK_KDMBB 0xfe00707f -#define MATCH_KDMBT 0x1a001077 -#define MASK_KDMBT 0xfe00707f -#define MATCH_KDMTT 0x2a001077 -#define MASK_KDMTT 0xfe00707f -#define MATCH_KDMABB 0xd2001077 -#define MASK_KDMABB 0xfe00707f -#define MATCH_KDMABT 0xe2001077 -#define MASK_KDMABT 0xfe00707f -#define MATCH_KDMATT 0xf2001077 -#define MASK_KDMATT 0xfe00707f -#define MATCH_KHM8 0x8e000077 -#define MASK_KHM8 0xfe00707f -#define MATCH_KHMX8 0x9e000077 -#define MASK_KHMX8 0xfe00707f -#define MATCH_KHM16 0x86000077 -#define MASK_KHM16 0xfe00707f -#define MATCH_KHMX16 0x96000077 -#define MASK_KHMX16 0xfe00707f -#define MATCH_KHMBB 0xc001077 -#define MASK_KHMBB 0xfe00707f -#define MATCH_KHMBT 0x1c001077 -#define MASK_KHMBT 0xfe00707f -#define MATCH_KHMTT 0x2c001077 -#define MASK_KHMTT 0xfe00707f -#define MATCH_KMABB 0x5a001077 -#define MASK_KMABB 0xfe00707f -#define MATCH_KMABT 0x6a001077 -#define MASK_KMABT 0xfe00707f -#define MATCH_KMATT 0x7a001077 -#define MASK_KMATT 0xfe00707f -#define MATCH_KMADA 0x48001077 -#define MASK_KMADA 0xfe00707f -#define MATCH_KMAXDA 0x4a001077 -#define MASK_KMAXDA 0xfe00707f -#define MATCH_KMADS 0x5c001077 -#define MASK_KMADS 0xfe00707f -#define MATCH_KMADRS 0x6c001077 -#define MASK_KMADRS 0xfe00707f -#define MATCH_KMAXDS 0x7c001077 -#define MASK_KMAXDS 0xfe00707f -#define MATCH_KMAR64 0x94001077 -#define MASK_KMAR64 0xfe00707f -#define MATCH_KMDA 0x38001077 -#define MASK_KMDA 0xfe00707f -#define MATCH_KMXDA 0x3a001077 -#define MASK_KMXDA 0xfe00707f -#define MATCH_KMMAC 0x60001077 -#define MASK_KMMAC 0xfe00707f -#define MATCH_KMMAC_U 0x70001077 -#define MASK_KMMAC_U 0xfe00707f -#define MATCH_KMMAWB 0x46001077 -#define MASK_KMMAWB 0xfe00707f -#define MATCH_KMMAWB_U 0x56001077 -#define MASK_KMMAWB_U 0xfe00707f -#define MATCH_KMMAWB2 0xce001077 -#define MASK_KMMAWB2 0xfe00707f -#define MATCH_KMMAWB2_U 0xde001077 -#define MASK_KMMAWB2_U 0xfe00707f -#define MATCH_KMMAWT 0x66001077 -#define MASK_KMMAWT 0xfe00707f -#define MATCH_KMMAWT_U 0x76001077 -#define MASK_KMMAWT_U 0xfe00707f -#define MATCH_KMMAWT2 0xee001077 -#define MASK_KMMAWT2 0xfe00707f -#define MATCH_KMMAWT2_U 0xfe001077 -#define MASK_KMMAWT2_U 0xfe00707f -#define MATCH_KMMSB 0x42001077 -#define MASK_KMMSB 0xfe00707f -#define MATCH_KMMSB_U 0x52001077 -#define MASK_KMMSB_U 0xfe00707f -#define MATCH_KMMWB2 0x8e001077 -#define MASK_KMMWB2 0xfe00707f -#define MATCH_KMMWB2_U 0x9e001077 -#define MASK_KMMWB2_U 0xfe00707f -#define MATCH_KMMWT2 0xae001077 -#define MASK_KMMWT2 0xfe00707f -#define MATCH_KMMWT2_U 0xbe001077 -#define MASK_KMMWT2_U 0xfe00707f -#define MATCH_KMSDA 0x4c001077 -#define MASK_KMSDA 0xfe00707f -#define MATCH_KMSXDA 0x4e001077 -#define MASK_KMSXDA 0xfe00707f -#define MATCH_KMSR64 0x96001077 -#define MASK_KMSR64 0xfe00707f -#define MATCH_KSLLW 0x26001077 -#define MASK_KSLLW 0xfe00707f -#define MATCH_KSLLIW 0x36001077 -#define MASK_KSLLIW 0xfe00707f -#define MATCH_KSLL8 0x6c000077 -#define MASK_KSLL8 0xfe00707f -#define MATCH_KSLLI8 0x7c800077 -#define MASK_KSLLI8 0xff80707f -#define MATCH_KSLL16 0x64000077 -#define MASK_KSLL16 0xfe00707f -#define MATCH_KSLLI16 0x75000077 -#define MASK_KSLLI16 0xff00707f -#define MATCH_KSLRA8 0x5e000077 -#define MASK_KSLRA8 0xfe00707f -#define MATCH_KSLRA8_U 0x6e000077 -#define MASK_KSLRA8_U 0xfe00707f -#define MATCH_KSLRA16 0x56000077 -#define MASK_KSLRA16 0xfe00707f -#define MATCH_KSLRA16_U 0x66000077 -#define MASK_KSLRA16_U 0xfe00707f -#define MATCH_KSLRAW 0x6e001077 -#define MASK_KSLRAW 0xfe00707f -#define MATCH_KSLRAW_U 0x7e001077 -#define MASK_KSLRAW_U 0xfe00707f -#define MATCH_KSTAS16 0xc4002077 -#define MASK_KSTAS16 0xfe00707f -#define MATCH_KSTSA16 0xc6002077 -#define MASK_KSTSA16 0xfe00707f -#define MATCH_KSUB8 0x1a000077 -#define MASK_KSUB8 0xfe00707f -#define MATCH_KSUB16 0x12000077 -#define MASK_KSUB16 0xfe00707f -#define MATCH_KSUB64 0x92001077 -#define MASK_KSUB64 0xfe00707f -#define MATCH_KSUBH 0x6001077 -#define MASK_KSUBH 0xfe00707f -#define MATCH_KSUBW 0x2001077 -#define MASK_KSUBW 0xfe00707f -#define MATCH_KWMMUL 0x62001077 -#define MASK_KWMMUL 0xfe00707f -#define MATCH_KWMMUL_U 0x72001077 -#define MASK_KWMMUL_U 0xfe00707f -#define MATCH_MADDR32 0xc4001077 -#define MASK_MADDR32 0xfe00707f -#define MATCH_MAXW 0xf2000077 -#define MASK_MAXW 0xfe00707f -#define MATCH_MINW 0xf0000077 -#define MASK_MINW 0xfe00707f -#define MATCH_MSUBR32 0xc6001077 -#define MASK_MSUBR32 0xfe00707f -#define MATCH_MULR64 0xf0001077 -#define MASK_MULR64 0xfe00707f -#define MATCH_MULSR64 0xe0001077 -#define MASK_MULSR64 0xfe00707f -#define MATCH_PBSAD 0xfc000077 -#define MASK_PBSAD 0xfe00707f -#define MATCH_PBSADA 0xfe000077 -#define MASK_PBSADA 0xfe00707f -#define MATCH_PKBB16 0xe001077 -#define MASK_PKBB16 0xfe00707f -#define MATCH_PKBT16 0x1e001077 -#define MASK_PKBT16 0xfe00707f -#define MATCH_PKTT16 0x2e001077 -#define MASK_PKTT16 0xfe00707f -#define MATCH_PKTB16 0x3e001077 -#define MASK_PKTB16 0xfe00707f -#define MATCH_RADD8 0x8000077 -#define MASK_RADD8 0xfe00707f -#define MATCH_RADD16 0x77 -#define MASK_RADD16 0xfe00707f -#define MATCH_RADD64 0x80001077 -#define MASK_RADD64 0xfe00707f -#define MATCH_RADDW 0x20001077 -#define MASK_RADDW 0xfe00707f -#define MATCH_RCRAS16 0x4000077 -#define MASK_RCRAS16 0xfe00707f -#define MATCH_RCRSA16 0x6000077 -#define MASK_RCRSA16 0xfe00707f -#define MATCH_RSTAS16 0xb4002077 -#define MASK_RSTAS16 0xfe00707f -#define MATCH_RSTSA16 0xb6002077 -#define MASK_RSTSA16 0xfe00707f -#define MATCH_RSUB8 0xa000077 -#define MASK_RSUB8 0xfe00707f -#define MATCH_RSUB16 0x2000077 -#define MASK_RSUB16 0xfe00707f -#define MATCH_RSUB64 0x82001077 -#define MASK_RSUB64 0xfe00707f -#define MATCH_RSUBW 0x22001077 -#define MASK_RSUBW 0xfe00707f -#define MATCH_SCLIP8 0x8c000077 -#define MASK_SCLIP8 0xff80707f -#define MATCH_SCLIP16 0x84000077 -#define MASK_SCLIP16 0xff00707f -#define MATCH_SCLIP32 0xe4000077 -#define MASK_SCLIP32 0xfe00707f -#define MATCH_SCMPLE8 0x1e000077 -#define MASK_SCMPLE8 0xfe00707f -#define MATCH_SCMPLE16 0x1c000077 -#define MASK_SCMPLE16 0xfe00707f -#define MATCH_SCMPLT8 0xe000077 -#define MASK_SCMPLT8 0xfe00707f -#define MATCH_SCMPLT16 0xc000077 -#define MASK_SCMPLT16 0xfe00707f -#define MATCH_SLL8 0x5c000077 -#define MASK_SLL8 0xfe00707f -#define MATCH_SLLI8 0x7c000077 -#define MASK_SLLI8 0xff80707f -#define MATCH_SLL16 0x54000077 -#define MASK_SLL16 0xfe00707f -#define MATCH_SLLI16 0x74000077 -#define MASK_SLLI16 0xff00707f -#define MATCH_SMAL 0x5e001077 -#define MASK_SMAL 0xfe00707f -#define MATCH_SMALBB 0x88001077 -#define MASK_SMALBB 0xfe00707f -#define MATCH_SMALBT 0x98001077 -#define MASK_SMALBT 0xfe00707f -#define MATCH_SMALTT 0xa8001077 -#define MASK_SMALTT 0xfe00707f -#define MATCH_SMALDA 0x8c001077 -#define MASK_SMALDA 0xfe00707f -#define MATCH_SMALXDA 0x9c001077 -#define MASK_SMALXDA 0xfe00707f -#define MATCH_SMALDS 0x8a001077 -#define MASK_SMALDS 0xfe00707f -#define MATCH_SMALDRS 0x9a001077 -#define MASK_SMALDRS 0xfe00707f -#define MATCH_SMALXDS 0xaa001077 -#define MASK_SMALXDS 0xfe00707f -#define MATCH_SMAR64 0x84001077 -#define MASK_SMAR64 0xfe00707f -#define MATCH_SMAQA 0xc8000077 -#define MASK_SMAQA 0xfe00707f -#define MATCH_SMAQA_SU 0xca000077 -#define MASK_SMAQA_SU 0xfe00707f -#define MATCH_SMAX8 0x8a000077 -#define MASK_SMAX8 0xfe00707f -#define MATCH_SMAX16 0x82000077 -#define MASK_SMAX16 0xfe00707f -#define MATCH_SMBB16 0x8001077 -#define MASK_SMBB16 0xfe00707f -#define MATCH_SMBT16 0x18001077 -#define MASK_SMBT16 0xfe00707f -#define MATCH_SMTT16 0x28001077 -#define MASK_SMTT16 0xfe00707f -#define MATCH_SMDS 0x58001077 -#define MASK_SMDS 0xfe00707f -#define MATCH_SMDRS 0x68001077 -#define MASK_SMDRS 0xfe00707f -#define MATCH_SMXDS 0x78001077 -#define MASK_SMXDS 0xfe00707f -#define MATCH_SMIN8 0x88000077 -#define MASK_SMIN8 0xfe00707f -#define MATCH_SMIN16 0x80000077 -#define MASK_SMIN16 0xfe00707f -#define MATCH_SMMUL 0x40001077 -#define MASK_SMMUL 0xfe00707f -#define MATCH_SMMUL_U 0x50001077 -#define MASK_SMMUL_U 0xfe00707f -#define MATCH_SMMWB 0x44001077 -#define MASK_SMMWB 0xfe00707f -#define MATCH_SMMWB_U 0x54001077 -#define MASK_SMMWB_U 0xfe00707f -#define MATCH_SMMWT 0x64001077 -#define MASK_SMMWT 0xfe00707f -#define MATCH_SMMWT_U 0x74001077 -#define MASK_SMMWT_U 0xfe00707f -#define MATCH_SMSLDA 0xac001077 -#define MASK_SMSLDA 0xfe00707f -#define MATCH_SMSLXDA 0xbc001077 -#define MASK_SMSLXDA 0xfe00707f -#define MATCH_SMSR64 0x86001077 -#define MASK_SMSR64 0xfe00707f -#define MATCH_SMUL8 0xa8000077 -#define MASK_SMUL8 0xfe00707f -#define MATCH_SMULX8 0xaa000077 -#define MASK_SMULX8 0xfe00707f -#define MATCH_SMUL16 0xa0000077 -#define MASK_SMUL16 0xfe00707f -#define MATCH_SMULX16 0xa2000077 -#define MASK_SMULX16 0xfe00707f -#define MATCH_SRA_U 0x24001077 -#define MASK_SRA_U 0xfe00707f -#define MATCH_SRAI_U 0xd4001077 -#define MASK_SRAI_U 0xfc00707f -#define MATCH_SRA8 0x58000077 -#define MASK_SRA8 0xfe00707f -#define MATCH_SRA8_U 0x68000077 -#define MASK_SRA8_U 0xfe00707f -#define MATCH_SRAI8 0x78000077 -#define MASK_SRAI8 0xff80707f -#define MATCH_SRAI8_U 0x78800077 -#define MASK_SRAI8_U 0xff80707f -#define MATCH_SRA16 0x50000077 -#define MASK_SRA16 0xfe00707f -#define MATCH_SRA16_U 0x60000077 -#define MASK_SRA16_U 0xfe00707f -#define MATCH_SRAI16 0x70000077 -#define MASK_SRAI16 0xff00707f -#define MATCH_SRAI16_U 0x71000077 -#define MASK_SRAI16_U 0xff00707f -#define MATCH_SRL8 0x5a000077 -#define MASK_SRL8 0xfe00707f -#define MATCH_SRL8_U 0x6a000077 -#define MASK_SRL8_U 0xfe00707f -#define MATCH_SRLI8 0x7a000077 -#define MASK_SRLI8 0xff80707f -#define MATCH_SRLI8_U 0x7a800077 -#define MASK_SRLI8_U 0xff80707f -#define MATCH_SRL16 0x52000077 -#define MASK_SRL16 0xfe00707f -#define MATCH_SRL16_U 0x62000077 -#define MASK_SRL16_U 0xfe00707f -#define MATCH_SRLI16 0x72000077 -#define MASK_SRLI16 0xff00707f -#define MATCH_SRLI16_U 0x73000077 -#define MASK_SRLI16_U 0xff00707f -#define MATCH_STAS16 0xf4002077 -#define MASK_STAS16 0xfe00707f -#define MATCH_STSA16 0xf6002077 -#define MASK_STSA16 0xfe00707f -#define MATCH_SUB8 0x4a000077 -#define MASK_SUB8 0xfe00707f -#define MATCH_SUB16 0x42000077 -#define MASK_SUB16 0xfe00707f -#define MATCH_SUB64 0xc2001077 -#define MASK_SUB64 0xfe00707f -#define MATCH_SUNPKD810 0xac800077 -#define MASK_SUNPKD810 0xfff0707f -#define MATCH_SUNPKD820 0xac900077 -#define MASK_SUNPKD820 0xfff0707f -#define MATCH_SUNPKD830 0xaca00077 -#define MASK_SUNPKD830 0xfff0707f -#define MATCH_SUNPKD831 0xacb00077 -#define MASK_SUNPKD831 0xfff0707f -#define MATCH_SUNPKD832 0xad300077 -#define MASK_SUNPKD832 0xfff0707f -#define MATCH_SWAP8 0xad800077 -#define MASK_SWAP8 0xfff0707f -#define MATCH_UCLIP8 0x8d000077 -#define MASK_UCLIP8 0xff80707f -#define MATCH_UCLIP16 0x85000077 -#define MASK_UCLIP16 0xff00707f -#define MATCH_UCLIP32 0xf4000077 -#define MASK_UCLIP32 0xfe00707f -#define MATCH_UCMPLE8 0x3e000077 -#define MASK_UCMPLE8 0xfe00707f -#define MATCH_UCMPLE16 0x3c000077 -#define MASK_UCMPLE16 0xfe00707f -#define MATCH_UCMPLT8 0x2e000077 -#define MASK_UCMPLT8 0xfe00707f -#define MATCH_UCMPLT16 0x2c000077 -#define MASK_UCMPLT16 0xfe00707f -#define MATCH_UKADD8 0x38000077 -#define MASK_UKADD8 0xfe00707f -#define MATCH_UKADD16 0x30000077 -#define MASK_UKADD16 0xfe00707f -#define MATCH_UKADD64 0xb0001077 -#define MASK_UKADD64 0xfe00707f -#define MATCH_UKADDH 0x14001077 -#define MASK_UKADDH 0xfe00707f -#define MATCH_UKADDW 0x10001077 -#define MASK_UKADDW 0xfe00707f -#define MATCH_UKCRAS16 0x34000077 -#define MASK_UKCRAS16 0xfe00707f -#define MATCH_UKCRSA16 0x36000077 -#define MASK_UKCRSA16 0xfe00707f -#define MATCH_UKMAR64 0xb4001077 -#define MASK_UKMAR64 0xfe00707f -#define MATCH_UKMSR64 0xb6001077 -#define MASK_UKMSR64 0xfe00707f -#define MATCH_UKSTAS16 0xe4002077 -#define MASK_UKSTAS16 0xfe00707f -#define MATCH_UKSTSA16 0xe6002077 -#define MASK_UKSTSA16 0xfe00707f -#define MATCH_UKSUB8 0x3a000077 -#define MASK_UKSUB8 0xfe00707f -#define MATCH_UKSUB16 0x32000077 -#define MASK_UKSUB16 0xfe00707f -#define MATCH_UKSUB64 0xb2001077 -#define MASK_UKSUB64 0xfe00707f -#define MATCH_UKSUBH 0x16001077 -#define MASK_UKSUBH 0xfe00707f -#define MATCH_UKSUBW 0x12001077 -#define MASK_UKSUBW 0xfe00707f -#define MATCH_UMAR64 0xa4001077 -#define MASK_UMAR64 0xfe00707f -#define MATCH_UMAQA 0xcc000077 -#define MASK_UMAQA 0xfe00707f -#define MATCH_UMAX8 0x9a000077 -#define MASK_UMAX8 0xfe00707f -#define MATCH_UMAX16 0x92000077 -#define MASK_UMAX16 0xfe00707f -#define MATCH_UMIN8 0x98000077 -#define MASK_UMIN8 0xfe00707f -#define MATCH_UMIN16 0x90000077 -#define MASK_UMIN16 0xfe00707f -#define MATCH_UMSR64 0xa6001077 -#define MASK_UMSR64 0xfe00707f -#define MATCH_UMUL8 0xb8000077 -#define MASK_UMUL8 0xfe00707f -#define MATCH_UMULX8 0xba000077 -#define MASK_UMULX8 0xfe00707f -#define MATCH_UMUL16 0xb0000077 -#define MASK_UMUL16 0xfe00707f -#define MATCH_UMULX16 0xb2000077 -#define MASK_UMULX16 0xfe00707f -#define MATCH_URADD8 0x28000077 -#define MASK_URADD8 0xfe00707f -#define MATCH_URADD16 0x20000077 -#define MASK_URADD16 0xfe00707f -#define MATCH_URADD64 0xa0001077 -#define MASK_URADD64 0xfe00707f -#define MATCH_URADDW 0x30001077 -#define MASK_URADDW 0xfe00707f -#define MATCH_URCRAS16 0x24000077 -#define MASK_URCRAS16 0xfe00707f -#define MATCH_URCRSA16 0x26000077 -#define MASK_URCRSA16 0xfe00707f -#define MATCH_URSTAS16 0xd4002077 -#define MASK_URSTAS16 0xfe00707f -#define MATCH_URSTSA16 0xd6002077 -#define MASK_URSTSA16 0xfe00707f -#define MATCH_URSUB8 0x2a000077 -#define MASK_URSUB8 0xfe00707f -#define MATCH_URSUB16 0x22000077 -#define MASK_URSUB16 0xfe00707f -#define MATCH_URSUB64 0xa2001077 -#define MASK_URSUB64 0xfe00707f -#define MATCH_URSUBW 0x32001077 -#define MASK_URSUBW 0xfe00707f -#define MATCH_WEXTI 0xde000077 -#define MASK_WEXTI 0xfe00707f -#define MATCH_WEXT 0xce000077 -#define MASK_WEXT 0xfe00707f -#define MATCH_ZUNPKD810 0xacc00077 -#define MASK_ZUNPKD810 0xfff0707f -#define MATCH_ZUNPKD820 0xacd00077 -#define MASK_ZUNPKD820 0xfff0707f -#define MATCH_ZUNPKD830 0xace00077 -#define MASK_ZUNPKD830 0xfff0707f -#define MATCH_ZUNPKD831 0xacf00077 -#define MASK_ZUNPKD831 0xfff0707f -#define MATCH_ZUNPKD832 0xad700077 -#define MASK_ZUNPKD832 0xfff0707f -#define MATCH_ADD32 0x40002077 -#define MASK_ADD32 0xfe00707f -#define MATCH_CRAS32 0x44002077 -#define MASK_CRAS32 0xfe00707f -#define MATCH_CRSA32 0x46002077 -#define MASK_CRSA32 0xfe00707f -#define MATCH_KABS32 0xad200077 -#define MASK_KABS32 0xfff0707f -#define MATCH_KADD32 0x10002077 -#define MASK_KADD32 0xfe00707f -#define MATCH_KCRAS32 0x14002077 -#define MASK_KCRAS32 0xfe00707f -#define MATCH_KCRSA32 0x16002077 -#define MASK_KCRSA32 0xfe00707f -#define MATCH_KDMBB16 0xda001077 -#define MASK_KDMBB16 0xfe00707f -#define MATCH_KDMBT16 0xea001077 -#define MASK_KDMBT16 0xfe00707f -#define MATCH_KDMTT16 0xfa001077 -#define MASK_KDMTT16 0xfe00707f -#define MATCH_KDMABB16 0xd8001077 -#define MASK_KDMABB16 0xfe00707f -#define MATCH_KDMABT16 0xe8001077 -#define MASK_KDMABT16 0xfe00707f -#define MATCH_KDMATT16 0xf8001077 -#define MASK_KDMATT16 0xfe00707f -#define MATCH_KHMBB16 0xdc001077 -#define MASK_KHMBB16 0xfe00707f -#define MATCH_KHMBT16 0xec001077 -#define MASK_KHMBT16 0xfe00707f -#define MATCH_KHMTT16 0xfc001077 -#define MASK_KHMTT16 0xfe00707f -#define MATCH_KMABB32 0x5a002077 -#define MASK_KMABB32 0xfe00707f -#define MATCH_KMABT32 0x6a002077 -#define MASK_KMABT32 0xfe00707f -#define MATCH_KMATT32 0x7a002077 -#define MASK_KMATT32 0xfe00707f -#define MATCH_KMAXDA32 0x4a002077 -#define MASK_KMAXDA32 0xfe00707f -#define MATCH_KMDA32 0x38002077 -#define MASK_KMDA32 0xfe00707f -#define MATCH_KMXDA32 0x3a002077 -#define MASK_KMXDA32 0xfe00707f -#define MATCH_KMADS32 0x5c002077 -#define MASK_KMADS32 0xfe00707f -#define MATCH_KMADRS32 0x6c002077 -#define MASK_KMADRS32 0xfe00707f -#define MATCH_KMAXDS32 0x7c002077 -#define MASK_KMAXDS32 0xfe00707f -#define MATCH_KMSDA32 0x4c002077 -#define MASK_KMSDA32 0xfe00707f -#define MATCH_KMSXDA32 0x4e002077 -#define MASK_KMSXDA32 0xfe00707f -#define MATCH_KSLL32 0x64002077 -#define MASK_KSLL32 0xfe00707f -#define MATCH_KSLLI32 0x84002077 -#define MASK_KSLLI32 0xfe00707f -#define MATCH_KSLRA32 0x56002077 -#define MASK_KSLRA32 0xfe00707f -#define MATCH_KSLRA32_U 0x66002077 -#define MASK_KSLRA32_U 0xfe00707f -#define MATCH_KSTAS32 0xc0002077 -#define MASK_KSTAS32 0xfe00707f -#define MATCH_KSTSA32 0xc2002077 -#define MASK_KSTSA32 0xfe00707f -#define MATCH_KSUB32 0x12002077 -#define MASK_KSUB32 0xfe00707f -#define MATCH_PKBB32 0xe002077 -#define MASK_PKBB32 0xfe00707f -#define MATCH_PKBT32 0x1e002077 -#define MASK_PKBT32 0xfe00707f -#define MATCH_PKTT32 0x2e002077 -#define MASK_PKTT32 0xfe00707f -#define MATCH_PKTB32 0x3e002077 -#define MASK_PKTB32 0xfe00707f -#define MATCH_RADD32 0x2077 -#define MASK_RADD32 0xfe00707f -#define MATCH_RCRAS32 0x4002077 -#define MASK_RCRAS32 0xfe00707f -#define MATCH_RCRSA32 0x6002077 -#define MASK_RCRSA32 0xfe00707f -#define MATCH_RSTAS32 0xb0002077 -#define MASK_RSTAS32 0xfe00707f -#define MATCH_RSTSA32 0xb2002077 -#define MASK_RSTSA32 0xfe00707f -#define MATCH_RSUB32 0x2002077 -#define MASK_RSUB32 0xfe00707f -#define MATCH_SLL32 0x54002077 -#define MASK_SLL32 0xfe00707f -#define MATCH_SLLI32 0x74002077 -#define MASK_SLLI32 0xfe00707f -#define MATCH_SMAX32 0x92002077 -#define MASK_SMAX32 0xfe00707f -#define MATCH_SMBT32 0x18002077 -#define MASK_SMBT32 0xfe00707f -#define MATCH_SMTT32 0x28002077 -#define MASK_SMTT32 0xfe00707f -#define MATCH_SMDS32 0x58002077 -#define MASK_SMDS32 0xfe00707f -#define MATCH_SMDRS32 0x68002077 -#define MASK_SMDRS32 0xfe00707f -#define MATCH_SMXDS32 0x78002077 -#define MASK_SMXDS32 0xfe00707f -#define MATCH_SMIN32 0x90002077 -#define MASK_SMIN32 0xfe00707f -#define MATCH_SRA32 0x50002077 -#define MASK_SRA32 0xfe00707f -#define MATCH_SRA32_U 0x60002077 -#define MASK_SRA32_U 0xfe00707f -#define MATCH_SRAI32 0x70002077 -#define MASK_SRAI32 0xfe00707f -#define MATCH_SRAI32_U 0x80002077 -#define MASK_SRAI32_U 0xfe00707f -#define MATCH_SRAIW_U 0x34001077 -#define MASK_SRAIW_U 0xfe00707f -#define MATCH_SRL32 0x52002077 -#define MASK_SRL32 0xfe00707f -#define MATCH_SRL32_U 0x62002077 -#define MASK_SRL32_U 0xfe00707f -#define MATCH_SRLI32 0x72002077 -#define MASK_SRLI32 0xfe00707f -#define MATCH_SRLI32_U 0x82002077 -#define MASK_SRLI32_U 0xfe00707f -#define MATCH_STAS32 0xf0002077 -#define MASK_STAS32 0xfe00707f -#define MATCH_STSA32 0xf2002077 -#define MASK_STSA32 0xfe00707f -#define MATCH_SUB32 0x42002077 -#define MASK_SUB32 0xfe00707f -#define MATCH_UKADD32 0x30002077 -#define MASK_UKADD32 0xfe00707f -#define MATCH_UKCRAS32 0x34002077 -#define MASK_UKCRAS32 0xfe00707f -#define MATCH_UKCRSA32 0x36002077 -#define MASK_UKCRSA32 0xfe00707f -#define MATCH_UKSTAS32 0xe0002077 -#define MASK_UKSTAS32 0xfe00707f -#define MATCH_UKSTSA32 0xe2002077 -#define MASK_UKSTSA32 0xfe00707f -#define MATCH_UKSUB32 0x32002077 -#define MASK_UKSUB32 0xfe00707f -#define MATCH_UMAX32 0xa2002077 -#define MASK_UMAX32 0xfe00707f -#define MATCH_UMIN32 0xa0002077 -#define MASK_UMIN32 0xfe00707f -#define MATCH_URADD32 0x20002077 -#define MASK_URADD32 0xfe00707f -#define MATCH_URCRAS32 0x24002077 -#define MASK_URCRAS32 0xfe00707f -#define MATCH_URCRSA32 0x26002077 -#define MASK_URCRSA32 0xfe00707f -#define MATCH_URSTAS32 0xd0002077 -#define MASK_URSTAS32 0xfe00707f -#define MATCH_URSTSA32 0xd2002077 -#define MASK_URSTSA32 0xfe00707f -#define MATCH_URSUB32 0x22002077 -#define MASK_URSUB32 0xfe00707f -#define MATCH_VMVNFR_V 0x9e003057 -#define MASK_VMVNFR_V 0xfe00707f -#define MATCH_VL1R_V 0x2800007 -#define MASK_VL1R_V 0xfff0707f -#define MATCH_VL2R_V 0x6805007 -#define MASK_VL2R_V 0xfff0707f -#define MATCH_VL4R_V 0xe806007 -#define MASK_VL4R_V 0xfff0707f -#define MATCH_VL8R_V 0x1e807007 -#define MASK_VL8R_V 0xfff0707f -#define MATCH_VLE1_V 0x2b00007 -#define MASK_VLE1_V 0xfff0707f -#define MATCH_VSE1_V 0x2b00027 -#define MASK_VSE1_V 0xfff0707f -#define MATCH_VFREDSUM_VS 0x4001057 -#define MASK_VFREDSUM_VS 0xfc00707f -#define MATCH_VFWREDSUM_VS 0xc4001057 -#define MASK_VFWREDSUM_VS 0xfc00707f -#define MATCH_VPOPC_M 0x40082057 -#define MASK_VPOPC_M 0xfc0ff07f -#define MATCH_VMORNOT_MM 0x70002057 -#define MASK_VMORNOT_MM 0xfc00707f -#define MATCH_VMANDNOT_MM 0x60002057 -#define MASK_VMANDNOT_MM 0xfc00707f -#define CSR_FFLAGS 0x1 -#define CSR_FRM 0x2 -#define CSR_FCSR 0x3 -#define CSR_VSTART 0x8 -#define CSR_VXSAT 0x9 -#define CSR_VXRM 0xa -#define CSR_VCSR 0xf -#define CSR_SEED 0x15 -#define CSR_CYCLE 0xc00 -#define CSR_TIME 0xc01 -#define CSR_INSTRET 0xc02 -#define CSR_HPMCOUNTER3 0xc03 -#define CSR_HPMCOUNTER4 0xc04 -#define CSR_HPMCOUNTER5 0xc05 -#define CSR_HPMCOUNTER6 0xc06 -#define CSR_HPMCOUNTER7 0xc07 -#define CSR_HPMCOUNTER8 0xc08 -#define CSR_HPMCOUNTER9 0xc09 -#define CSR_HPMCOUNTER10 0xc0a -#define CSR_HPMCOUNTER11 0xc0b -#define CSR_HPMCOUNTER12 0xc0c -#define CSR_HPMCOUNTER13 0xc0d -#define CSR_HPMCOUNTER14 0xc0e -#define CSR_HPMCOUNTER15 0xc0f -#define CSR_HPMCOUNTER16 0xc10 -#define CSR_HPMCOUNTER17 0xc11 -#define CSR_HPMCOUNTER18 0xc12 -#define CSR_HPMCOUNTER19 0xc13 -#define CSR_HPMCOUNTER20 0xc14 -#define CSR_HPMCOUNTER21 0xc15 -#define CSR_HPMCOUNTER22 0xc16 -#define CSR_HPMCOUNTER23 0xc17 -#define CSR_HPMCOUNTER24 0xc18 -#define CSR_HPMCOUNTER25 0xc19 -#define CSR_HPMCOUNTER26 0xc1a -#define CSR_HPMCOUNTER27 0xc1b -#define CSR_HPMCOUNTER28 0xc1c -#define CSR_HPMCOUNTER29 0xc1d -#define CSR_HPMCOUNTER30 0xc1e -#define CSR_HPMCOUNTER31 0xc1f -#define CSR_VL 0xc20 -#define CSR_VTYPE 0xc21 -#define CSR_VLENB 0xc22 -#define CSR_SSTATUS 0x100 -#define CSR_SEDELEG 0x102 -#define CSR_SIDELEG 0x103 -#define CSR_SIE 0x104 -#define CSR_STVEC 0x105 -#define CSR_SCOUNTEREN 0x106 -#define CSR_SENVCFG 0x10a -#define CSR_SSCRATCH 0x140 -#define CSR_SEPC 0x141 -#define CSR_SCAUSE 0x142 -#define CSR_STVAL 0x143 -#define CSR_SIP 0x144 -#define CSR_SATP 0x180 -#define CSR_SCONTEXT 0x5a8 -#define CSR_VSSTATUS 0x200 -#define CSR_VSIE 0x204 -#define CSR_VSTVEC 0x205 -#define CSR_VSSCRATCH 0x240 -#define CSR_VSEPC 0x241 -#define CSR_VSCAUSE 0x242 -#define CSR_VSTVAL 0x243 -#define CSR_VSIP 0x244 -#define CSR_VSATP 0x280 -#define CSR_HSTATUS 0x600 -#define CSR_HEDELEG 0x602 -#define CSR_HIDELEG 0x603 -#define CSR_HIE 0x604 -#define CSR_HTIMEDELTA 0x605 -#define CSR_HCOUNTEREN 0x606 -#define CSR_HGEIE 0x607 -#define CSR_HENVCFG 0x60a -#define CSR_HTVAL 0x643 -#define CSR_HIP 0x644 -#define CSR_HVIP 0x645 -#define CSR_HTINST 0x64a -#define CSR_HGATP 0x680 -#define CSR_HCONTEXT 0x6a8 -#define CSR_HGEIP 0xe12 -#define CSR_UTVT 0x7 -#define CSR_UNXTI 0x45 -#define CSR_UINTSTATUS 0x46 -#define CSR_USCRATCHCSW 0x48 -#define CSR_USCRATCHCSWL 0x49 -#define CSR_STVT 0x107 -#define CSR_SNXTI 0x145 -#define CSR_SINTSTATUS 0x146 -#define CSR_SSCRATCHCSW 0x148 -#define CSR_SSCRATCHCSWL 0x149 -#define CSR_MTVT 0x307 -#define CSR_MNXTI 0x345 -#define CSR_MINTSTATUS 0x346 -#define CSR_MSCRATCHCSW 0x348 -#define CSR_MSCRATCHCSWL 0x349 -#define CSR_MSTATUS 0x300 -#define CSR_MISA 0x301 -#define CSR_MEDELEG 0x302 -#define CSR_MIDELEG 0x303 -#define CSR_MIE 0x304 -#define CSR_MTVEC 0x305 -#define CSR_MCOUNTEREN 0x306 -#define CSR_MENVCFG 0x30a -#define CSR_MCOUNTINHIBIT 0x320 -#define CSR_MSCRATCH 0x340 -#define CSR_MEPC 0x341 -#define CSR_MCAUSE 0x342 -#define CSR_MTVAL 0x343 -#define CSR_MIP 0x344 -#define CSR_MTINST 0x34a -#define CSR_MTVAL2 0x34b -#define CSR_PMPCFG0 0x3a0 -#define CSR_PMPCFG1 0x3a1 -#define CSR_PMPCFG2 0x3a2 -#define CSR_PMPCFG3 0x3a3 -#define CSR_PMPCFG4 0x3a4 -#define CSR_PMPCFG5 0x3a5 -#define CSR_PMPCFG6 0x3a6 -#define CSR_PMPCFG7 0x3a7 -#define CSR_PMPCFG8 0x3a8 -#define CSR_PMPCFG9 0x3a9 -#define CSR_PMPCFG10 0x3aa -#define CSR_PMPCFG11 0x3ab -#define CSR_PMPCFG12 0x3ac -#define CSR_PMPCFG13 0x3ad -#define CSR_PMPCFG14 0x3ae -#define CSR_PMPCFG15 0x3af -#define CSR_PMPADDR0 0x3b0 -#define CSR_PMPADDR1 0x3b1 -#define CSR_PMPADDR2 0x3b2 -#define CSR_PMPADDR3 0x3b3 -#define CSR_PMPADDR4 0x3b4 -#define CSR_PMPADDR5 0x3b5 -#define CSR_PMPADDR6 0x3b6 -#define CSR_PMPADDR7 0x3b7 -#define CSR_PMPADDR8 0x3b8 -#define CSR_PMPADDR9 0x3b9 -#define CSR_PMPADDR10 0x3ba -#define CSR_PMPADDR11 0x3bb -#define CSR_PMPADDR12 0x3bc -#define CSR_PMPADDR13 0x3bd -#define CSR_PMPADDR14 0x3be -#define CSR_PMPADDR15 0x3bf -#define CSR_PMPADDR16 0x3c0 -#define CSR_PMPADDR17 0x3c1 -#define CSR_PMPADDR18 0x3c2 -#define CSR_PMPADDR19 0x3c3 -#define CSR_PMPADDR20 0x3c4 -#define CSR_PMPADDR21 0x3c5 -#define CSR_PMPADDR22 0x3c6 -#define CSR_PMPADDR23 0x3c7 -#define CSR_PMPADDR24 0x3c8 -#define CSR_PMPADDR25 0x3c9 -#define CSR_PMPADDR26 0x3ca -#define CSR_PMPADDR27 0x3cb -#define CSR_PMPADDR28 0x3cc -#define CSR_PMPADDR29 0x3cd -#define CSR_PMPADDR30 0x3ce -#define CSR_PMPADDR31 0x3cf -#define CSR_PMPADDR32 0x3d0 -#define CSR_PMPADDR33 0x3d1 -#define CSR_PMPADDR34 0x3d2 -#define CSR_PMPADDR35 0x3d3 -#define CSR_PMPADDR36 0x3d4 -#define CSR_PMPADDR37 0x3d5 -#define CSR_PMPADDR38 0x3d6 -#define CSR_PMPADDR39 0x3d7 -#define CSR_PMPADDR40 0x3d8 -#define CSR_PMPADDR41 0x3d9 -#define CSR_PMPADDR42 0x3da -#define CSR_PMPADDR43 0x3db -#define CSR_PMPADDR44 0x3dc -#define CSR_PMPADDR45 0x3dd -#define CSR_PMPADDR46 0x3de -#define CSR_PMPADDR47 0x3df -#define CSR_PMPADDR48 0x3e0 -#define CSR_PMPADDR49 0x3e1 -#define CSR_PMPADDR50 0x3e2 -#define CSR_PMPADDR51 0x3e3 -#define CSR_PMPADDR52 0x3e4 -#define CSR_PMPADDR53 0x3e5 -#define CSR_PMPADDR54 0x3e6 -#define CSR_PMPADDR55 0x3e7 -#define CSR_PMPADDR56 0x3e8 -#define CSR_PMPADDR57 0x3e9 -#define CSR_PMPADDR58 0x3ea -#define CSR_PMPADDR59 0x3eb -#define CSR_PMPADDR60 0x3ec -#define CSR_PMPADDR61 0x3ed -#define CSR_PMPADDR62 0x3ee -#define CSR_PMPADDR63 0x3ef -#define CSR_MSECCFG 0x747 -#define CSR_TSELECT 0x7a0 -#define CSR_TDATA1 0x7a1 -#define CSR_TDATA2 0x7a2 -#define CSR_TDATA3 0x7a3 -#define CSR_TINFO 0x7a4 -#define CSR_TCONTROL 0x7a5 -#define CSR_MCONTEXT 0x7a8 -#define CSR_MSCONTEXT 0x7aa -#define CSR_DCSR 0x7b0 -#define CSR_DPC 0x7b1 -#define CSR_DSCRATCH0 0x7b2 -#define CSR_DSCRATCH1 0x7b3 -#define CSR_MCYCLE 0xb00 -#define CSR_MINSTRET 0xb02 -#define CSR_MHPMCOUNTER3 0xb03 -#define CSR_MHPMCOUNTER4 0xb04 -#define CSR_MHPMCOUNTER5 0xb05 -#define CSR_MHPMCOUNTER6 0xb06 -#define CSR_MHPMCOUNTER7 0xb07 -#define CSR_MHPMCOUNTER8 0xb08 -#define CSR_MHPMCOUNTER9 0xb09 -#define CSR_MHPMCOUNTER10 0xb0a -#define CSR_MHPMCOUNTER11 0xb0b -#define CSR_MHPMCOUNTER12 0xb0c -#define CSR_MHPMCOUNTER13 0xb0d -#define CSR_MHPMCOUNTER14 0xb0e -#define CSR_MHPMCOUNTER15 0xb0f -#define CSR_MHPMCOUNTER16 0xb10 -#define CSR_MHPMCOUNTER17 0xb11 -#define CSR_MHPMCOUNTER18 0xb12 -#define CSR_MHPMCOUNTER19 0xb13 -#define CSR_MHPMCOUNTER20 0xb14 -#define CSR_MHPMCOUNTER21 0xb15 -#define CSR_MHPMCOUNTER22 0xb16 -#define CSR_MHPMCOUNTER23 0xb17 -#define CSR_MHPMCOUNTER24 0xb18 -#define CSR_MHPMCOUNTER25 0xb19 -#define CSR_MHPMCOUNTER26 0xb1a -#define CSR_MHPMCOUNTER27 0xb1b -#define CSR_MHPMCOUNTER28 0xb1c -#define CSR_MHPMCOUNTER29 0xb1d -#define CSR_MHPMCOUNTER30 0xb1e -#define CSR_MHPMCOUNTER31 0xb1f -#define CSR_MHPMEVENT3 0x323 -#define CSR_MHPMEVENT4 0x324 -#define CSR_MHPMEVENT5 0x325 -#define CSR_MHPMEVENT6 0x326 -#define CSR_MHPMEVENT7 0x327 -#define CSR_MHPMEVENT8 0x328 -#define CSR_MHPMEVENT9 0x329 -#define CSR_MHPMEVENT10 0x32a -#define CSR_MHPMEVENT11 0x32b -#define CSR_MHPMEVENT12 0x32c -#define CSR_MHPMEVENT13 0x32d -#define CSR_MHPMEVENT14 0x32e -#define CSR_MHPMEVENT15 0x32f -#define CSR_MHPMEVENT16 0x330 -#define CSR_MHPMEVENT17 0x331 -#define CSR_MHPMEVENT18 0x332 -#define CSR_MHPMEVENT19 0x333 -#define CSR_MHPMEVENT20 0x334 -#define CSR_MHPMEVENT21 0x335 -#define CSR_MHPMEVENT22 0x336 -#define CSR_MHPMEVENT23 0x337 -#define CSR_MHPMEVENT24 0x338 -#define CSR_MHPMEVENT25 0x339 -#define CSR_MHPMEVENT26 0x33a -#define CSR_MHPMEVENT27 0x33b -#define CSR_MHPMEVENT28 0x33c -#define CSR_MHPMEVENT29 0x33d -#define CSR_MHPMEVENT30 0x33e -#define CSR_MHPMEVENT31 0x33f -#define CSR_MVENDORID 0xf11 -#define CSR_MARCHID 0xf12 -#define CSR_MIMPID 0xf13 -#define CSR_MHARTID 0xf14 -#define CSR_MCONFIGPTR 0xf15 -#define CSR_HTIMEDELTAH 0x615 -#define CSR_HENVCFGH 0x61a -#define CSR_CYCLEH 0xc80 -#define CSR_TIMEH 0xc81 -#define CSR_INSTRETH 0xc82 -#define CSR_HPMCOUNTER3H 0xc83 -#define CSR_HPMCOUNTER4H 0xc84 -#define CSR_HPMCOUNTER5H 0xc85 -#define CSR_HPMCOUNTER6H 0xc86 -#define CSR_HPMCOUNTER7H 0xc87 -#define CSR_HPMCOUNTER8H 0xc88 -#define CSR_HPMCOUNTER9H 0xc89 -#define CSR_HPMCOUNTER10H 0xc8a -#define CSR_HPMCOUNTER11H 0xc8b -#define CSR_HPMCOUNTER12H 0xc8c -#define CSR_HPMCOUNTER13H 0xc8d -#define CSR_HPMCOUNTER14H 0xc8e -#define CSR_HPMCOUNTER15H 0xc8f -#define CSR_HPMCOUNTER16H 0xc90 -#define CSR_HPMCOUNTER17H 0xc91 -#define CSR_HPMCOUNTER18H 0xc92 -#define CSR_HPMCOUNTER19H 0xc93 -#define CSR_HPMCOUNTER20H 0xc94 -#define CSR_HPMCOUNTER21H 0xc95 -#define CSR_HPMCOUNTER22H 0xc96 -#define CSR_HPMCOUNTER23H 0xc97 -#define CSR_HPMCOUNTER24H 0xc98 -#define CSR_HPMCOUNTER25H 0xc99 -#define CSR_HPMCOUNTER26H 0xc9a -#define CSR_HPMCOUNTER27H 0xc9b -#define CSR_HPMCOUNTER28H 0xc9c -#define CSR_HPMCOUNTER29H 0xc9d -#define CSR_HPMCOUNTER30H 0xc9e -#define CSR_HPMCOUNTER31H 0xc9f -#define CSR_MSTATUSH 0x310 -#define CSR_MENVCFGH 0x31a -#define CSR_MSECCFGH 0x757 -#define CSR_MCYCLEH 0xb80 -#define CSR_MINSTRETH 0xb82 -#define CSR_MHPMCOUNTER3H 0xb83 -#define CSR_MHPMCOUNTER4H 0xb84 -#define CSR_MHPMCOUNTER5H 0xb85 -#define CSR_MHPMCOUNTER6H 0xb86 -#define CSR_MHPMCOUNTER7H 0xb87 -#define CSR_MHPMCOUNTER8H 0xb88 -#define CSR_MHPMCOUNTER9H 0xb89 -#define CSR_MHPMCOUNTER10H 0xb8a -#define CSR_MHPMCOUNTER11H 0xb8b -#define CSR_MHPMCOUNTER12H 0xb8c -#define CSR_MHPMCOUNTER13H 0xb8d -#define CSR_MHPMCOUNTER14H 0xb8e -#define CSR_MHPMCOUNTER15H 0xb8f -#define CSR_MHPMCOUNTER16H 0xb90 -#define CSR_MHPMCOUNTER17H 0xb91 -#define CSR_MHPMCOUNTER18H 0xb92 -#define CSR_MHPMCOUNTER19H 0xb93 -#define CSR_MHPMCOUNTER20H 0xb94 -#define CSR_MHPMCOUNTER21H 0xb95 -#define CSR_MHPMCOUNTER22H 0xb96 -#define CSR_MHPMCOUNTER23H 0xb97 -#define CSR_MHPMCOUNTER24H 0xb98 -#define CSR_MHPMCOUNTER25H 0xb99 -#define CSR_MHPMCOUNTER26H 0xb9a -#define CSR_MHPMCOUNTER27H 0xb9b -#define CSR_MHPMCOUNTER28H 0xb9c -#define CSR_MHPMCOUNTER29H 0xb9d -#define CSR_MHPMCOUNTER30H 0xb9e -#define CSR_MHPMCOUNTER31H 0xb9f -#define CAUSE_MISALIGNED_FETCH 0x0 -#define CAUSE_FETCH_ACCESS 0x1 -#define CAUSE_ILLEGAL_INSTRUCTION 0x2 -#define CAUSE_BREAKPOINT 0x3 -#define CAUSE_MISALIGNED_LOAD 0x4 -#define CAUSE_LOAD_ACCESS 0x5 -#define CAUSE_MISALIGNED_STORE 0x6 -#define CAUSE_STORE_ACCESS 0x7 -#define CAUSE_USER_ECALL 0x8 -#define CAUSE_SUPERVISOR_ECALL 0x9 -#define CAUSE_VIRTUAL_SUPERVISOR_ECALL 0xa -#define CAUSE_MACHINE_ECALL 0xb -#define CAUSE_FETCH_PAGE_FAULT 0xc -#define CAUSE_LOAD_PAGE_FAULT 0xd -#define CAUSE_STORE_PAGE_FAULT 0xf -#define CAUSE_FETCH_GUEST_PAGE_FAULT 0x14 -#define CAUSE_LOAD_GUEST_PAGE_FAULT 0x15 -#define CAUSE_VIRTUAL_INSTRUCTION 0x16 -#define CAUSE_STORE_GUEST_PAGE_FAULT 0x17 -#endif -#ifdef DECLARE_INSN -DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32) -DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32) -DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32) -DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS) -DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS) -DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI) -DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM) -DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM) -DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI) -DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR) -DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR) -DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE) -DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME) -DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET) -DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH) -DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH) -DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH) -DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL) -DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK) -DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S) -DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X) -DECLARE_INSN(fence_tso, MATCH_FENCE_TSO, MASK_FENCE_TSO) -DECLARE_INSN(pause, MATCH_PAUSE, MASK_PAUSE) -DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) -DECLARE_INSN(bne, MATCH_BNE, MASK_BNE) -DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) -DECLARE_INSN(bge, MATCH_BGE, MASK_BGE) -DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) -DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU) -DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR) -DECLARE_INSN(jal, MATCH_JAL, MASK_JAL) -DECLARE_INSN(lui, MATCH_LUI, MASK_LUI) -DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC) -DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI) -DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI) -DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU) -DECLARE_INSN(xori, MATCH_XORI, MASK_XORI) -DECLARE_INSN(ori, MATCH_ORI, MASK_ORI) -DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI) -DECLARE_INSN(add, MATCH_ADD, MASK_ADD) -DECLARE_INSN(sub, MATCH_SUB, MASK_SUB) -DECLARE_INSN(sll, MATCH_SLL, MASK_SLL) -DECLARE_INSN(slt, MATCH_SLT, MASK_SLT) -DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU) -DECLARE_INSN(xor, MATCH_XOR, MASK_XOR) -DECLARE_INSN(srl, MATCH_SRL, MASK_SRL) -DECLARE_INSN(sra, MATCH_SRA, MASK_SRA) -DECLARE_INSN(or, MATCH_OR, MASK_OR) -DECLARE_INSN(and, MATCH_AND, MASK_AND) -DECLARE_INSN(lb, MATCH_LB, MASK_LB) -DECLARE_INSN(lh, MATCH_LH, MASK_LH) -DECLARE_INSN(lw, MATCH_LW, MASK_LW) -DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU) -DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU) -DECLARE_INSN(sb, MATCH_SB, MASK_SB) -DECLARE_INSN(sh, MATCH_SH, MASK_SH) -DECLARE_INSN(sw, MATCH_SW, MASK_SW) -DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE) -DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I) -DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW) -DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW) -DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW) -DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW) -DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW) -DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW) -DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW) -DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW) -DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW) -DECLARE_INSN(ld, MATCH_LD, MASK_LD) -DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU) -DECLARE_INSN(sd, MATCH_SD, MASK_SD) -DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI) -DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI) -DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI) -DECLARE_INSN(mul, MATCH_MUL, MASK_MUL) -DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH) -DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU) -DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU) -DECLARE_INSN(div, MATCH_DIV, MASK_DIV) -DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU) -DECLARE_INSN(rem, MATCH_REM, MASK_REM) -DECLARE_INSN(remu, MATCH_REMU, MASK_REMU) -DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW) -DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW) -DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW) -DECLARE_INSN(remw, MATCH_REMW, MASK_REMW) -DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW) -DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W) -DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W) -DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W) -DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W) -DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W) -DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W) -DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W) -DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W) -DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W) -DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W) -DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W) -DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D) -DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D) -DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D) -DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D) -DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D) -DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D) -DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D) -DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D) -DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D) -DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D) -DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D) -DECLARE_INSN(hfence_vvma, MATCH_HFENCE_VVMA, MASK_HFENCE_VVMA) -DECLARE_INSN(hfence_gvma, MATCH_HFENCE_GVMA, MASK_HFENCE_GVMA) -DECLARE_INSN(hlv_b, MATCH_HLV_B, MASK_HLV_B) -DECLARE_INSN(hlv_bu, MATCH_HLV_BU, MASK_HLV_BU) -DECLARE_INSN(hlv_h, MATCH_HLV_H, MASK_HLV_H) -DECLARE_INSN(hlv_hu, MATCH_HLV_HU, MASK_HLV_HU) -DECLARE_INSN(hlvx_hu, MATCH_HLVX_HU, MASK_HLVX_HU) -DECLARE_INSN(hlv_w, MATCH_HLV_W, MASK_HLV_W) -DECLARE_INSN(hlvx_wu, MATCH_HLVX_WU, MASK_HLVX_WU) -DECLARE_INSN(hsv_b, MATCH_HSV_B, MASK_HSV_B) -DECLARE_INSN(hsv_h, MATCH_HSV_H, MASK_HSV_H) -DECLARE_INSN(hsv_w, MATCH_HSV_W, MASK_HSV_W) -DECLARE_INSN(hlv_wu, MATCH_HLV_WU, MASK_HLV_WU) -DECLARE_INSN(hlv_d, MATCH_HLV_D, MASK_HLV_D) -DECLARE_INSN(hsv_d, MATCH_HSV_D, MASK_HSV_D) -DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S) -DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S) -DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S) -DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S) -DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S) -DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S) -DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S) -DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S) -DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S) -DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S) -DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S) -DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S) -DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S) -DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S) -DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S) -DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W) -DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S) -DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W) -DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU) -DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X) -DECLARE_INSN(flw, MATCH_FLW, MASK_FLW) -DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW) -DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S) -DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S) -DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S) -DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S) -DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S) -DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S) -DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L) -DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU) -DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D) -DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D) -DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D) -DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D) -DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D) -DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D) -DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D) -DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D) -DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D) -DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D) -DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S) -DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D) -DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D) -DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D) -DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D) -DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D) -DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D) -DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D) -DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W) -DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU) -DECLARE_INSN(fld, MATCH_FLD, MASK_FLD) -DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD) -DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D) -DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D) -DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D) -DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D) -DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D) -DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D) -DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D) -DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L) -DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU) -DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X) -DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q) -DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q) -DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q) -DECLARE_INSN(fdiv_q, MATCH_FDIV_Q, MASK_FDIV_Q) -DECLARE_INSN(fsgnj_q, MATCH_FSGNJ_Q, MASK_FSGNJ_Q) -DECLARE_INSN(fsgnjn_q, MATCH_FSGNJN_Q, MASK_FSGNJN_Q) -DECLARE_INSN(fsgnjx_q, MATCH_FSGNJX_Q, MASK_FSGNJX_Q) -DECLARE_INSN(fmin_q, MATCH_FMIN_Q, MASK_FMIN_Q) -DECLARE_INSN(fmax_q, MATCH_FMAX_Q, MASK_FMAX_Q) -DECLARE_INSN(fcvt_s_q, MATCH_FCVT_S_Q, MASK_FCVT_S_Q) -DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S) -DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q) -DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D) -DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q) -DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q) -DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q) -DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q) -DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q) -DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q) -DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q) -DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W) -DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU) -DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ) -DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ) -DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q) -DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q) -DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q) -DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q) -DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q) -DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q) -DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L) -DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU) -DECLARE_INSN(andn, MATCH_ANDN, MASK_ANDN) -DECLARE_INSN(orn, MATCH_ORN, MASK_ORN) -DECLARE_INSN(xnor, MATCH_XNOR, MASK_XNOR) -DECLARE_INSN(slo, MATCH_SLO, MASK_SLO) -DECLARE_INSN(sro, MATCH_SRO, MASK_SRO) -DECLARE_INSN(rol, MATCH_ROL, MASK_ROL) -DECLARE_INSN(ror, MATCH_ROR, MASK_ROR) -DECLARE_INSN(bclr, MATCH_BCLR, MASK_BCLR) -DECLARE_INSN(bset, MATCH_BSET, MASK_BSET) -DECLARE_INSN(binv, MATCH_BINV, MASK_BINV) -DECLARE_INSN(bext, MATCH_BEXT, MASK_BEXT) -DECLARE_INSN(gorc, MATCH_GORC, MASK_GORC) -DECLARE_INSN(grev, MATCH_GREV, MASK_GREV) -DECLARE_INSN(sloi, MATCH_SLOI, MASK_SLOI) -DECLARE_INSN(sroi, MATCH_SROI, MASK_SROI) -DECLARE_INSN(rori, MATCH_RORI, MASK_RORI) -DECLARE_INSN(bclri, MATCH_BCLRI, MASK_BCLRI) -DECLARE_INSN(bseti, MATCH_BSETI, MASK_BSETI) -DECLARE_INSN(binvi, MATCH_BINVI, MASK_BINVI) -DECLARE_INSN(bexti, MATCH_BEXTI, MASK_BEXTI) -DECLARE_INSN(gorci, MATCH_GORCI, MASK_GORCI) -DECLARE_INSN(grevi, MATCH_GREVI, MASK_GREVI) -DECLARE_INSN(cmix, MATCH_CMIX, MASK_CMIX) -DECLARE_INSN(cmov, MATCH_CMOV, MASK_CMOV) -DECLARE_INSN(fsl, MATCH_FSL, MASK_FSL) -DECLARE_INSN(fsr, MATCH_FSR, MASK_FSR) -DECLARE_INSN(fsri, MATCH_FSRI, MASK_FSRI) -DECLARE_INSN(clz, MATCH_CLZ, MASK_CLZ) -DECLARE_INSN(ctz, MATCH_CTZ, MASK_CTZ) -DECLARE_INSN(cpop, MATCH_CPOP, MASK_CPOP) -DECLARE_INSN(sext_b, MATCH_SEXT_B, MASK_SEXT_B) -DECLARE_INSN(sext_h, MATCH_SEXT_H, MASK_SEXT_H) -DECLARE_INSN(crc32_b, MATCH_CRC32_B, MASK_CRC32_B) -DECLARE_INSN(crc32_h, MATCH_CRC32_H, MASK_CRC32_H) -DECLARE_INSN(crc32_w, MATCH_CRC32_W, MASK_CRC32_W) -DECLARE_INSN(crc32c_b, MATCH_CRC32C_B, MASK_CRC32C_B) -DECLARE_INSN(crc32c_h, MATCH_CRC32C_H, MASK_CRC32C_H) -DECLARE_INSN(crc32c_w, MATCH_CRC32C_W, MASK_CRC32C_W) -DECLARE_INSN(sh1add, MATCH_SH1ADD, MASK_SH1ADD) -DECLARE_INSN(sh2add, MATCH_SH2ADD, MASK_SH2ADD) -DECLARE_INSN(sh3add, MATCH_SH3ADD, MASK_SH3ADD) -DECLARE_INSN(clmul, MATCH_CLMUL, MASK_CLMUL) -DECLARE_INSN(clmulr, MATCH_CLMULR, MASK_CLMULR) -DECLARE_INSN(clmulh, MATCH_CLMULH, MASK_CLMULH) -DECLARE_INSN(min, MATCH_MIN, MASK_MIN) -DECLARE_INSN(minu, MATCH_MINU, MASK_MINU) -DECLARE_INSN(max, MATCH_MAX, MASK_MAX) -DECLARE_INSN(maxu, MATCH_MAXU, MASK_MAXU) -DECLARE_INSN(shfl, MATCH_SHFL, MASK_SHFL) -DECLARE_INSN(unshfl, MATCH_UNSHFL, MASK_UNSHFL) -DECLARE_INSN(bcompress, MATCH_BCOMPRESS, MASK_BCOMPRESS) -DECLARE_INSN(bdecompress, MATCH_BDECOMPRESS, MASK_BDECOMPRESS) -DECLARE_INSN(pack, MATCH_PACK, MASK_PACK) -DECLARE_INSN(packu, MATCH_PACKU, MASK_PACKU) -DECLARE_INSN(packh, MATCH_PACKH, MASK_PACKH) -DECLARE_INSN(bfp, MATCH_BFP, MASK_BFP) -DECLARE_INSN(shfli, MATCH_SHFLI, MASK_SHFLI) -DECLARE_INSN(unshfli, MATCH_UNSHFLI, MASK_UNSHFLI) -DECLARE_INSN(xperm4, MATCH_XPERM4, MASK_XPERM4) -DECLARE_INSN(xperm8, MATCH_XPERM8, MASK_XPERM8) -DECLARE_INSN(xperm16, MATCH_XPERM16, MASK_XPERM16) -DECLARE_INSN(bmatflip, MATCH_BMATFLIP, MASK_BMATFLIP) -DECLARE_INSN(crc32_d, MATCH_CRC32_D, MASK_CRC32_D) -DECLARE_INSN(crc32c_d, MATCH_CRC32C_D, MASK_CRC32C_D) -DECLARE_INSN(bmator, MATCH_BMATOR, MASK_BMATOR) -DECLARE_INSN(bmatxor, MATCH_BMATXOR, MASK_BMATXOR) -DECLARE_INSN(slli_uw, MATCH_SLLI_UW, MASK_SLLI_UW) -DECLARE_INSN(add_uw, MATCH_ADD_UW, MASK_ADD_UW) -DECLARE_INSN(slow, MATCH_SLOW, MASK_SLOW) -DECLARE_INSN(srow, MATCH_SROW, MASK_SROW) -DECLARE_INSN(rolw, MATCH_ROLW, MASK_ROLW) -DECLARE_INSN(rorw, MATCH_RORW, MASK_RORW) -DECLARE_INSN(gorcw, MATCH_GORCW, MASK_GORCW) -DECLARE_INSN(grevw, MATCH_GREVW, MASK_GREVW) -DECLARE_INSN(sloiw, MATCH_SLOIW, MASK_SLOIW) -DECLARE_INSN(sroiw, MATCH_SROIW, MASK_SROIW) -DECLARE_INSN(roriw, MATCH_RORIW, MASK_RORIW) -DECLARE_INSN(gorciw, MATCH_GORCIW, MASK_GORCIW) -DECLARE_INSN(greviw, MATCH_GREVIW, MASK_GREVIW) -DECLARE_INSN(fslw, MATCH_FSLW, MASK_FSLW) -DECLARE_INSN(fsrw, MATCH_FSRW, MASK_FSRW) -DECLARE_INSN(fsriw, MATCH_FSRIW, MASK_FSRIW) -DECLARE_INSN(clzw, MATCH_CLZW, MASK_CLZW) -DECLARE_INSN(ctzw, MATCH_CTZW, MASK_CTZW) -DECLARE_INSN(cpopw, MATCH_CPOPW, MASK_CPOPW) -DECLARE_INSN(sh1add_uw, MATCH_SH1ADD_UW, MASK_SH1ADD_UW) -DECLARE_INSN(sh2add_uw, MATCH_SH2ADD_UW, MASK_SH2ADD_UW) -DECLARE_INSN(sh3add_uw, MATCH_SH3ADD_UW, MASK_SH3ADD_UW) -DECLARE_INSN(shflw, MATCH_SHFLW, MASK_SHFLW) -DECLARE_INSN(unshflw, MATCH_UNSHFLW, MASK_UNSHFLW) -DECLARE_INSN(bcompressw, MATCH_BCOMPRESSW, MASK_BCOMPRESSW) -DECLARE_INSN(bdecompressw, MATCH_BDECOMPRESSW, MASK_BDECOMPRESSW) -DECLARE_INSN(packw, MATCH_PACKW, MASK_PACKW) -DECLARE_INSN(packuw, MATCH_PACKUW, MASK_PACKUW) -DECLARE_INSN(bfpw, MATCH_BFPW, MASK_BFPW) -DECLARE_INSN(xperm32, MATCH_XPERM32, MASK_XPERM32) -DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL) -DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK) -DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) -DECLARE_INSN(mret, MATCH_MRET, MASK_MRET) -DECLARE_INSN(dret, MATCH_DRET, MASK_DRET) -DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA) -DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI) -DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW) -DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS) -DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC) -DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI) -DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI) -DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI) -DECLARE_INSN(sinval_vma, MATCH_SINVAL_VMA, MASK_SINVAL_VMA) -DECLARE_INSN(sfence_w_inval, MATCH_SFENCE_W_INVAL, MASK_SFENCE_W_INVAL) -DECLARE_INSN(sfence_inval_ir, MATCH_SFENCE_INVAL_IR, MASK_SFENCE_INVAL_IR) -DECLARE_INSN(hinval_vvma, MATCH_HINVAL_VVMA, MASK_HINVAL_VVMA) -DECLARE_INSN(hinval_gvma, MATCH_HINVAL_GVMA, MASK_HINVAL_GVMA) -DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H) -DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H) -DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H) -DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H) -DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H) -DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H) -DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H) -DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H) -DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H) -DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S) -DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H) -DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H) -DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H) -DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H) -DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H) -DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H) -DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H) -DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H) -DECLARE_INSN(fclass_h, MATCH_FCLASS_H, MASK_FCLASS_H) -DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W) -DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU) -DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X) -DECLARE_INSN(flh, MATCH_FLH, MASK_FLH) -DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH) -DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H) -DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H) -DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H) -DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H) -DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D) -DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H) -DECLARE_INSN(fcvt_h_q, MATCH_FCVT_H_Q, MASK_FCVT_H_Q) -DECLARE_INSN(fcvt_q_h, MATCH_FCVT_Q_H, MASK_FCVT_Q_H) -DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H) -DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H) -DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L) -DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU) -DECLARE_INSN(sm4ed, MATCH_SM4ED, MASK_SM4ED) -DECLARE_INSN(sm4ks, MATCH_SM4KS, MASK_SM4KS) -DECLARE_INSN(sm3p0, MATCH_SM3P0, MASK_SM3P0) -DECLARE_INSN(sm3p1, MATCH_SM3P1, MASK_SM3P1) -DECLARE_INSN(sha256sum0, MATCH_SHA256SUM0, MASK_SHA256SUM0) -DECLARE_INSN(sha256sum1, MATCH_SHA256SUM1, MASK_SHA256SUM1) -DECLARE_INSN(sha256sig0, MATCH_SHA256SIG0, MASK_SHA256SIG0) -DECLARE_INSN(sha256sig1, MATCH_SHA256SIG1, MASK_SHA256SIG1) -DECLARE_INSN(aes32esmi, MATCH_AES32ESMI, MASK_AES32ESMI) -DECLARE_INSN(aes32esi, MATCH_AES32ESI, MASK_AES32ESI) -DECLARE_INSN(aes32dsmi, MATCH_AES32DSMI, MASK_AES32DSMI) -DECLARE_INSN(aes32dsi, MATCH_AES32DSI, MASK_AES32DSI) -DECLARE_INSN(sha512sum0r, MATCH_SHA512SUM0R, MASK_SHA512SUM0R) -DECLARE_INSN(sha512sum1r, MATCH_SHA512SUM1R, MASK_SHA512SUM1R) -DECLARE_INSN(sha512sig0l, MATCH_SHA512SIG0L, MASK_SHA512SIG0L) -DECLARE_INSN(sha512sig0h, MATCH_SHA512SIG0H, MASK_SHA512SIG0H) -DECLARE_INSN(sha512sig1l, MATCH_SHA512SIG1L, MASK_SHA512SIG1L) -DECLARE_INSN(sha512sig1h, MATCH_SHA512SIG1H, MASK_SHA512SIG1H) -DECLARE_INSN(aes64ks1i, MATCH_AES64KS1I, MASK_AES64KS1I) -DECLARE_INSN(aes64im, MATCH_AES64IM, MASK_AES64IM) -DECLARE_INSN(aes64ks2, MATCH_AES64KS2, MASK_AES64KS2) -DECLARE_INSN(aes64esm, MATCH_AES64ESM, MASK_AES64ESM) -DECLARE_INSN(aes64es, MATCH_AES64ES, MASK_AES64ES) -DECLARE_INSN(aes64dsm, MATCH_AES64DSM, MASK_AES64DSM) -DECLARE_INSN(aes64ds, MATCH_AES64DS, MASK_AES64DS) -DECLARE_INSN(sha512sum0, MATCH_SHA512SUM0, MASK_SHA512SUM0) -DECLARE_INSN(sha512sum1, MATCH_SHA512SUM1, MASK_SHA512SUM1) -DECLARE_INSN(sha512sig0, MATCH_SHA512SIG0, MASK_SHA512SIG0) -DECLARE_INSN(sha512sig1, MATCH_SHA512SIG1, MASK_SHA512SIG1) -DECLARE_INSN(cbo_clean, MATCH_CBO_CLEAN, MASK_CBO_CLEAN) -DECLARE_INSN(cbo_flush, MATCH_CBO_FLUSH, MASK_CBO_FLUSH) -DECLARE_INSN(cbo_inval, MATCH_CBO_INVAL, MASK_CBO_INVAL) -DECLARE_INSN(cbo_zero, MATCH_CBO_ZERO, MASK_CBO_ZERO) -DECLARE_INSN(prefetch_i, MATCH_PREFETCH_I, MASK_PREFETCH_I) -DECLARE_INSN(prefetch_r, MATCH_PREFETCH_R, MASK_PREFETCH_R) -DECLARE_INSN(prefetch_w, MATCH_PREFETCH_W, MASK_PREFETCH_W) -DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP) -DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP) -DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) -DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) -DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK) -DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN) -DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD) -DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW) -DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW) -DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD) -DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW) -DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW) -DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI) -DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL) -DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI) -DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI) -DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI) -DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI) -DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI) -DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB) -DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR) -DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR) -DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND) -DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J) -DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) -DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) -DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI) -DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP) -DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP) -DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP) -DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV) -DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD) -DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP) -DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP) -DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP) -DECLARE_INSN(c_srli_rv32, MATCH_C_SRLI_RV32, MASK_C_SRLI_RV32) -DECLARE_INSN(c_srai_rv32, MATCH_C_SRAI_RV32, MASK_C_SRAI_RV32) -DECLARE_INSN(c_slli_rv32, MATCH_C_SLLI_RV32, MASK_C_SLLI_RV32) -DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD) -DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD) -DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW) -DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW) -DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW) -DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP) -DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP) -DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0) -DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1) -DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2) -DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD) -DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1) -DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2) -DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1) -DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1) -DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2) -DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD) -DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1) -DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2) -DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2) -DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1) -DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2) -DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD) -DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1) -DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2) -DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3) -DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1) -DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2) -DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD) -DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1) -DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2) -DECLARE_INSN(vsetivli, MATCH_VSETIVLI, MASK_VSETIVLI) -DECLARE_INSN(vsetvli, MATCH_VSETVLI, MASK_VSETVLI) -DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL) -DECLARE_INSN(vlm_v, MATCH_VLM_V, MASK_VLM_V) -DECLARE_INSN(vsm_v, MATCH_VSM_V, MASK_VSM_V) -DECLARE_INSN(vle8_v, MATCH_VLE8_V, MASK_VLE8_V) -DECLARE_INSN(vle16_v, MATCH_VLE16_V, MASK_VLE16_V) -DECLARE_INSN(vle32_v, MATCH_VLE32_V, MASK_VLE32_V) -DECLARE_INSN(vle64_v, MATCH_VLE64_V, MASK_VLE64_V) -DECLARE_INSN(vle128_v, MATCH_VLE128_V, MASK_VLE128_V) -DECLARE_INSN(vle256_v, MATCH_VLE256_V, MASK_VLE256_V) -DECLARE_INSN(vle512_v, MATCH_VLE512_V, MASK_VLE512_V) -DECLARE_INSN(vle1024_v, MATCH_VLE1024_V, MASK_VLE1024_V) -DECLARE_INSN(vse8_v, MATCH_VSE8_V, MASK_VSE8_V) -DECLARE_INSN(vse16_v, MATCH_VSE16_V, MASK_VSE16_V) -DECLARE_INSN(vse32_v, MATCH_VSE32_V, MASK_VSE32_V) -DECLARE_INSN(vse64_v, MATCH_VSE64_V, MASK_VSE64_V) -DECLARE_INSN(vse128_v, MATCH_VSE128_V, MASK_VSE128_V) -DECLARE_INSN(vse256_v, MATCH_VSE256_V, MASK_VSE256_V) -DECLARE_INSN(vse512_v, MATCH_VSE512_V, MASK_VSE512_V) -DECLARE_INSN(vse1024_v, MATCH_VSE1024_V, MASK_VSE1024_V) -DECLARE_INSN(vluxei8_v, MATCH_VLUXEI8_V, MASK_VLUXEI8_V) -DECLARE_INSN(vluxei16_v, MATCH_VLUXEI16_V, MASK_VLUXEI16_V) -DECLARE_INSN(vluxei32_v, MATCH_VLUXEI32_V, MASK_VLUXEI32_V) -DECLARE_INSN(vluxei64_v, MATCH_VLUXEI64_V, MASK_VLUXEI64_V) -DECLARE_INSN(vluxei128_v, MATCH_VLUXEI128_V, MASK_VLUXEI128_V) -DECLARE_INSN(vluxei256_v, MATCH_VLUXEI256_V, MASK_VLUXEI256_V) -DECLARE_INSN(vluxei512_v, MATCH_VLUXEI512_V, MASK_VLUXEI512_V) -DECLARE_INSN(vluxei1024_v, MATCH_VLUXEI1024_V, MASK_VLUXEI1024_V) -DECLARE_INSN(vsuxei8_v, MATCH_VSUXEI8_V, MASK_VSUXEI8_V) -DECLARE_INSN(vsuxei16_v, MATCH_VSUXEI16_V, MASK_VSUXEI16_V) -DECLARE_INSN(vsuxei32_v, MATCH_VSUXEI32_V, MASK_VSUXEI32_V) -DECLARE_INSN(vsuxei64_v, MATCH_VSUXEI64_V, MASK_VSUXEI64_V) -DECLARE_INSN(vsuxei128_v, MATCH_VSUXEI128_V, MASK_VSUXEI128_V) -DECLARE_INSN(vsuxei256_v, MATCH_VSUXEI256_V, MASK_VSUXEI256_V) -DECLARE_INSN(vsuxei512_v, MATCH_VSUXEI512_V, MASK_VSUXEI512_V) -DECLARE_INSN(vsuxei1024_v, MATCH_VSUXEI1024_V, MASK_VSUXEI1024_V) -DECLARE_INSN(vlse8_v, MATCH_VLSE8_V, MASK_VLSE8_V) -DECLARE_INSN(vlse16_v, MATCH_VLSE16_V, MASK_VLSE16_V) -DECLARE_INSN(vlse32_v, MATCH_VLSE32_V, MASK_VLSE32_V) -DECLARE_INSN(vlse64_v, MATCH_VLSE64_V, MASK_VLSE64_V) -DECLARE_INSN(vlse128_v, MATCH_VLSE128_V, MASK_VLSE128_V) -DECLARE_INSN(vlse256_v, MATCH_VLSE256_V, MASK_VLSE256_V) -DECLARE_INSN(vlse512_v, MATCH_VLSE512_V, MASK_VLSE512_V) -DECLARE_INSN(vlse1024_v, MATCH_VLSE1024_V, MASK_VLSE1024_V) -DECLARE_INSN(vsse8_v, MATCH_VSSE8_V, MASK_VSSE8_V) -DECLARE_INSN(vsse16_v, MATCH_VSSE16_V, MASK_VSSE16_V) -DECLARE_INSN(vsse32_v, MATCH_VSSE32_V, MASK_VSSE32_V) -DECLARE_INSN(vsse64_v, MATCH_VSSE64_V, MASK_VSSE64_V) -DECLARE_INSN(vsse128_v, MATCH_VSSE128_V, MASK_VSSE128_V) -DECLARE_INSN(vsse256_v, MATCH_VSSE256_V, MASK_VSSE256_V) -DECLARE_INSN(vsse512_v, MATCH_VSSE512_V, MASK_VSSE512_V) -DECLARE_INSN(vsse1024_v, MATCH_VSSE1024_V, MASK_VSSE1024_V) -DECLARE_INSN(vloxei8_v, MATCH_VLOXEI8_V, MASK_VLOXEI8_V) -DECLARE_INSN(vloxei16_v, MATCH_VLOXEI16_V, MASK_VLOXEI16_V) -DECLARE_INSN(vloxei32_v, MATCH_VLOXEI32_V, MASK_VLOXEI32_V) -DECLARE_INSN(vloxei64_v, MATCH_VLOXEI64_V, MASK_VLOXEI64_V) -DECLARE_INSN(vloxei128_v, MATCH_VLOXEI128_V, MASK_VLOXEI128_V) -DECLARE_INSN(vloxei256_v, MATCH_VLOXEI256_V, MASK_VLOXEI256_V) -DECLARE_INSN(vloxei512_v, MATCH_VLOXEI512_V, MASK_VLOXEI512_V) -DECLARE_INSN(vloxei1024_v, MATCH_VLOXEI1024_V, MASK_VLOXEI1024_V) -DECLARE_INSN(vsoxei8_v, MATCH_VSOXEI8_V, MASK_VSOXEI8_V) -DECLARE_INSN(vsoxei16_v, MATCH_VSOXEI16_V, MASK_VSOXEI16_V) -DECLARE_INSN(vsoxei32_v, MATCH_VSOXEI32_V, MASK_VSOXEI32_V) -DECLARE_INSN(vsoxei64_v, MATCH_VSOXEI64_V, MASK_VSOXEI64_V) -DECLARE_INSN(vsoxei128_v, MATCH_VSOXEI128_V, MASK_VSOXEI128_V) -DECLARE_INSN(vsoxei256_v, MATCH_VSOXEI256_V, MASK_VSOXEI256_V) -DECLARE_INSN(vsoxei512_v, MATCH_VSOXEI512_V, MASK_VSOXEI512_V) -DECLARE_INSN(vsoxei1024_v, MATCH_VSOXEI1024_V, MASK_VSOXEI1024_V) -DECLARE_INSN(vle8ff_v, MATCH_VLE8FF_V, MASK_VLE8FF_V) -DECLARE_INSN(vle16ff_v, MATCH_VLE16FF_V, MASK_VLE16FF_V) -DECLARE_INSN(vle32ff_v, MATCH_VLE32FF_V, MASK_VLE32FF_V) -DECLARE_INSN(vle64ff_v, MATCH_VLE64FF_V, MASK_VLE64FF_V) -DECLARE_INSN(vle128ff_v, MATCH_VLE128FF_V, MASK_VLE128FF_V) -DECLARE_INSN(vle256ff_v, MATCH_VLE256FF_V, MASK_VLE256FF_V) -DECLARE_INSN(vle512ff_v, MATCH_VLE512FF_V, MASK_VLE512FF_V) -DECLARE_INSN(vle1024ff_v, MATCH_VLE1024FF_V, MASK_VLE1024FF_V) -DECLARE_INSN(vl1re8_v, MATCH_VL1RE8_V, MASK_VL1RE8_V) -DECLARE_INSN(vl1re16_v, MATCH_VL1RE16_V, MASK_VL1RE16_V) -DECLARE_INSN(vl1re32_v, MATCH_VL1RE32_V, MASK_VL1RE32_V) -DECLARE_INSN(vl1re64_v, MATCH_VL1RE64_V, MASK_VL1RE64_V) -DECLARE_INSN(vl2re8_v, MATCH_VL2RE8_V, MASK_VL2RE8_V) -DECLARE_INSN(vl2re16_v, MATCH_VL2RE16_V, MASK_VL2RE16_V) -DECLARE_INSN(vl2re32_v, MATCH_VL2RE32_V, MASK_VL2RE32_V) -DECLARE_INSN(vl2re64_v, MATCH_VL2RE64_V, MASK_VL2RE64_V) -DECLARE_INSN(vl4re8_v, MATCH_VL4RE8_V, MASK_VL4RE8_V) -DECLARE_INSN(vl4re16_v, MATCH_VL4RE16_V, MASK_VL4RE16_V) -DECLARE_INSN(vl4re32_v, MATCH_VL4RE32_V, MASK_VL4RE32_V) -DECLARE_INSN(vl4re64_v, MATCH_VL4RE64_V, MASK_VL4RE64_V) -DECLARE_INSN(vl8re8_v, MATCH_VL8RE8_V, MASK_VL8RE8_V) -DECLARE_INSN(vl8re16_v, MATCH_VL8RE16_V, MASK_VL8RE16_V) -DECLARE_INSN(vl8re32_v, MATCH_VL8RE32_V, MASK_VL8RE32_V) -DECLARE_INSN(vl8re64_v, MATCH_VL8RE64_V, MASK_VL8RE64_V) -DECLARE_INSN(vs1r_v, MATCH_VS1R_V, MASK_VS1R_V) -DECLARE_INSN(vs2r_v, MATCH_VS2R_V, MASK_VS2R_V) -DECLARE_INSN(vs4r_v, MATCH_VS4R_V, MASK_VS4R_V) -DECLARE_INSN(vs8r_v, MATCH_VS8R_V, MASK_VS8R_V) -DECLARE_INSN(vfadd_vf, MATCH_VFADD_VF, MASK_VFADD_VF) -DECLARE_INSN(vfsub_vf, MATCH_VFSUB_VF, MASK_VFSUB_VF) -DECLARE_INSN(vfmin_vf, MATCH_VFMIN_VF, MASK_VFMIN_VF) -DECLARE_INSN(vfmax_vf, MATCH_VFMAX_VF, MASK_VFMAX_VF) -DECLARE_INSN(vfsgnj_vf, MATCH_VFSGNJ_VF, MASK_VFSGNJ_VF) -DECLARE_INSN(vfsgnjn_vf, MATCH_VFSGNJN_VF, MASK_VFSGNJN_VF) -DECLARE_INSN(vfsgnjx_vf, MATCH_VFSGNJX_VF, MASK_VFSGNJX_VF) -DECLARE_INSN(vfslide1up_vf, MATCH_VFSLIDE1UP_VF, MASK_VFSLIDE1UP_VF) -DECLARE_INSN(vfslide1down_vf, MATCH_VFSLIDE1DOWN_VF, MASK_VFSLIDE1DOWN_VF) -DECLARE_INSN(vfmv_s_f, MATCH_VFMV_S_F, MASK_VFMV_S_F) -DECLARE_INSN(vfmerge_vfm, MATCH_VFMERGE_VFM, MASK_VFMERGE_VFM) -DECLARE_INSN(vfmv_v_f, MATCH_VFMV_V_F, MASK_VFMV_V_F) -DECLARE_INSN(vmfeq_vf, MATCH_VMFEQ_VF, MASK_VMFEQ_VF) -DECLARE_INSN(vmfle_vf, MATCH_VMFLE_VF, MASK_VMFLE_VF) -DECLARE_INSN(vmflt_vf, MATCH_VMFLT_VF, MASK_VMFLT_VF) -DECLARE_INSN(vmfne_vf, MATCH_VMFNE_VF, MASK_VMFNE_VF) -DECLARE_INSN(vmfgt_vf, MATCH_VMFGT_VF, MASK_VMFGT_VF) -DECLARE_INSN(vmfge_vf, MATCH_VMFGE_VF, MASK_VMFGE_VF) -DECLARE_INSN(vfdiv_vf, MATCH_VFDIV_VF, MASK_VFDIV_VF) -DECLARE_INSN(vfrdiv_vf, MATCH_VFRDIV_VF, MASK_VFRDIV_VF) -DECLARE_INSN(vfmul_vf, MATCH_VFMUL_VF, MASK_VFMUL_VF) -DECLARE_INSN(vfrsub_vf, MATCH_VFRSUB_VF, MASK_VFRSUB_VF) -DECLARE_INSN(vfmadd_vf, MATCH_VFMADD_VF, MASK_VFMADD_VF) -DECLARE_INSN(vfnmadd_vf, MATCH_VFNMADD_VF, MASK_VFNMADD_VF) -DECLARE_INSN(vfmsub_vf, MATCH_VFMSUB_VF, MASK_VFMSUB_VF) -DECLARE_INSN(vfnmsub_vf, MATCH_VFNMSUB_VF, MASK_VFNMSUB_VF) -DECLARE_INSN(vfmacc_vf, MATCH_VFMACC_VF, MASK_VFMACC_VF) -DECLARE_INSN(vfnmacc_vf, MATCH_VFNMACC_VF, MASK_VFNMACC_VF) -DECLARE_INSN(vfmsac_vf, MATCH_VFMSAC_VF, MASK_VFMSAC_VF) -DECLARE_INSN(vfnmsac_vf, MATCH_VFNMSAC_VF, MASK_VFNMSAC_VF) -DECLARE_INSN(vfwadd_vf, MATCH_VFWADD_VF, MASK_VFWADD_VF) -DECLARE_INSN(vfwsub_vf, MATCH_VFWSUB_VF, MASK_VFWSUB_VF) -DECLARE_INSN(vfwadd_wf, MATCH_VFWADD_WF, MASK_VFWADD_WF) -DECLARE_INSN(vfwsub_wf, MATCH_VFWSUB_WF, MASK_VFWSUB_WF) -DECLARE_INSN(vfwmul_vf, MATCH_VFWMUL_VF, MASK_VFWMUL_VF) -DECLARE_INSN(vfwmacc_vf, MATCH_VFWMACC_VF, MASK_VFWMACC_VF) -DECLARE_INSN(vfwnmacc_vf, MATCH_VFWNMACC_VF, MASK_VFWNMACC_VF) -DECLARE_INSN(vfwmsac_vf, MATCH_VFWMSAC_VF, MASK_VFWMSAC_VF) -DECLARE_INSN(vfwnmsac_vf, MATCH_VFWNMSAC_VF, MASK_VFWNMSAC_VF) -DECLARE_INSN(vfadd_vv, MATCH_VFADD_VV, MASK_VFADD_VV) -DECLARE_INSN(vfredusum_vs, MATCH_VFREDUSUM_VS, MASK_VFREDUSUM_VS) -DECLARE_INSN(vfsub_vv, MATCH_VFSUB_VV, MASK_VFSUB_VV) -DECLARE_INSN(vfredosum_vs, MATCH_VFREDOSUM_VS, MASK_VFREDOSUM_VS) -DECLARE_INSN(vfmin_vv, MATCH_VFMIN_VV, MASK_VFMIN_VV) -DECLARE_INSN(vfredmin_vs, MATCH_VFREDMIN_VS, MASK_VFREDMIN_VS) -DECLARE_INSN(vfmax_vv, MATCH_VFMAX_VV, MASK_VFMAX_VV) -DECLARE_INSN(vfredmax_vs, MATCH_VFREDMAX_VS, MASK_VFREDMAX_VS) -DECLARE_INSN(vfsgnj_vv, MATCH_VFSGNJ_VV, MASK_VFSGNJ_VV) -DECLARE_INSN(vfsgnjn_vv, MATCH_VFSGNJN_VV, MASK_VFSGNJN_VV) -DECLARE_INSN(vfsgnjx_vv, MATCH_VFSGNJX_VV, MASK_VFSGNJX_VV) -DECLARE_INSN(vfmv_f_s, MATCH_VFMV_F_S, MASK_VFMV_F_S) -DECLARE_INSN(vmfeq_vv, MATCH_VMFEQ_VV, MASK_VMFEQ_VV) -DECLARE_INSN(vmfle_vv, MATCH_VMFLE_VV, MASK_VMFLE_VV) -DECLARE_INSN(vmflt_vv, MATCH_VMFLT_VV, MASK_VMFLT_VV) -DECLARE_INSN(vmfne_vv, MATCH_VMFNE_VV, MASK_VMFNE_VV) -DECLARE_INSN(vfdiv_vv, MATCH_VFDIV_VV, MASK_VFDIV_VV) -DECLARE_INSN(vfmul_vv, MATCH_VFMUL_VV, MASK_VFMUL_VV) -DECLARE_INSN(vfmadd_vv, MATCH_VFMADD_VV, MASK_VFMADD_VV) -DECLARE_INSN(vfnmadd_vv, MATCH_VFNMADD_VV, MASK_VFNMADD_VV) -DECLARE_INSN(vfmsub_vv, MATCH_VFMSUB_VV, MASK_VFMSUB_VV) -DECLARE_INSN(vfnmsub_vv, MATCH_VFNMSUB_VV, MASK_VFNMSUB_VV) -DECLARE_INSN(vfmacc_vv, MATCH_VFMACC_VV, MASK_VFMACC_VV) -DECLARE_INSN(vfnmacc_vv, MATCH_VFNMACC_VV, MASK_VFNMACC_VV) -DECLARE_INSN(vfmsac_vv, MATCH_VFMSAC_VV, MASK_VFMSAC_VV) -DECLARE_INSN(vfnmsac_vv, MATCH_VFNMSAC_VV, MASK_VFNMSAC_VV) -DECLARE_INSN(vfcvt_xu_f_v, MATCH_VFCVT_XU_F_V, MASK_VFCVT_XU_F_V) -DECLARE_INSN(vfcvt_x_f_v, MATCH_VFCVT_X_F_V, MASK_VFCVT_X_F_V) -DECLARE_INSN(vfcvt_f_xu_v, MATCH_VFCVT_F_XU_V, MASK_VFCVT_F_XU_V) -DECLARE_INSN(vfcvt_f_x_v, MATCH_VFCVT_F_X_V, MASK_VFCVT_F_X_V) -DECLARE_INSN(vfcvt_rtz_xu_f_v, MATCH_VFCVT_RTZ_XU_F_V, MASK_VFCVT_RTZ_XU_F_V) -DECLARE_INSN(vfcvt_rtz_x_f_v, MATCH_VFCVT_RTZ_X_F_V, MASK_VFCVT_RTZ_X_F_V) -DECLARE_INSN(vfwcvt_xu_f_v, MATCH_VFWCVT_XU_F_V, MASK_VFWCVT_XU_F_V) -DECLARE_INSN(vfwcvt_x_f_v, MATCH_VFWCVT_X_F_V, MASK_VFWCVT_X_F_V) -DECLARE_INSN(vfwcvt_f_xu_v, MATCH_VFWCVT_F_XU_V, MASK_VFWCVT_F_XU_V) -DECLARE_INSN(vfwcvt_f_x_v, MATCH_VFWCVT_F_X_V, MASK_VFWCVT_F_X_V) -DECLARE_INSN(vfwcvt_f_f_v, MATCH_VFWCVT_F_F_V, MASK_VFWCVT_F_F_V) -DECLARE_INSN(vfwcvt_rtz_xu_f_v, MATCH_VFWCVT_RTZ_XU_F_V, MASK_VFWCVT_RTZ_XU_F_V) -DECLARE_INSN(vfwcvt_rtz_x_f_v, MATCH_VFWCVT_RTZ_X_F_V, MASK_VFWCVT_RTZ_X_F_V) -DECLARE_INSN(vfncvt_xu_f_w, MATCH_VFNCVT_XU_F_W, MASK_VFNCVT_XU_F_W) -DECLARE_INSN(vfncvt_x_f_w, MATCH_VFNCVT_X_F_W, MASK_VFNCVT_X_F_W) -DECLARE_INSN(vfncvt_f_xu_w, MATCH_VFNCVT_F_XU_W, MASK_VFNCVT_F_XU_W) -DECLARE_INSN(vfncvt_f_x_w, MATCH_VFNCVT_F_X_W, MASK_VFNCVT_F_X_W) -DECLARE_INSN(vfncvt_f_f_w, MATCH_VFNCVT_F_F_W, MASK_VFNCVT_F_F_W) -DECLARE_INSN(vfncvt_rod_f_f_w, MATCH_VFNCVT_ROD_F_F_W, MASK_VFNCVT_ROD_F_F_W) -DECLARE_INSN(vfncvt_rtz_xu_f_w, MATCH_VFNCVT_RTZ_XU_F_W, MASK_VFNCVT_RTZ_XU_F_W) -DECLARE_INSN(vfncvt_rtz_x_f_w, MATCH_VFNCVT_RTZ_X_F_W, MASK_VFNCVT_RTZ_X_F_W) -DECLARE_INSN(vfsqrt_v, MATCH_VFSQRT_V, MASK_VFSQRT_V) -DECLARE_INSN(vfrsqrt7_v, MATCH_VFRSQRT7_V, MASK_VFRSQRT7_V) -DECLARE_INSN(vfrec7_v, MATCH_VFREC7_V, MASK_VFREC7_V) -DECLARE_INSN(vfclass_v, MATCH_VFCLASS_V, MASK_VFCLASS_V) -DECLARE_INSN(vfwadd_vv, MATCH_VFWADD_VV, MASK_VFWADD_VV) -DECLARE_INSN(vfwredusum_vs, MATCH_VFWREDUSUM_VS, MASK_VFWREDUSUM_VS) -DECLARE_INSN(vfwsub_vv, MATCH_VFWSUB_VV, MASK_VFWSUB_VV) -DECLARE_INSN(vfwredosum_vs, MATCH_VFWREDOSUM_VS, MASK_VFWREDOSUM_VS) -DECLARE_INSN(vfwadd_wv, MATCH_VFWADD_WV, MASK_VFWADD_WV) -DECLARE_INSN(vfwsub_wv, MATCH_VFWSUB_WV, MASK_VFWSUB_WV) -DECLARE_INSN(vfwmul_vv, MATCH_VFWMUL_VV, MASK_VFWMUL_VV) -DECLARE_INSN(vfwmacc_vv, MATCH_VFWMACC_VV, MASK_VFWMACC_VV) -DECLARE_INSN(vfwnmacc_vv, MATCH_VFWNMACC_VV, MASK_VFWNMACC_VV) -DECLARE_INSN(vfwmsac_vv, MATCH_VFWMSAC_VV, MASK_VFWMSAC_VV) -DECLARE_INSN(vfwnmsac_vv, MATCH_VFWNMSAC_VV, MASK_VFWNMSAC_VV) -DECLARE_INSN(vadd_vx, MATCH_VADD_VX, MASK_VADD_VX) -DECLARE_INSN(vsub_vx, MATCH_VSUB_VX, MASK_VSUB_VX) -DECLARE_INSN(vrsub_vx, MATCH_VRSUB_VX, MASK_VRSUB_VX) -DECLARE_INSN(vminu_vx, MATCH_VMINU_VX, MASK_VMINU_VX) -DECLARE_INSN(vmin_vx, MATCH_VMIN_VX, MASK_VMIN_VX) -DECLARE_INSN(vmaxu_vx, MATCH_VMAXU_VX, MASK_VMAXU_VX) -DECLARE_INSN(vmax_vx, MATCH_VMAX_VX, MASK_VMAX_VX) -DECLARE_INSN(vand_vx, MATCH_VAND_VX, MASK_VAND_VX) -DECLARE_INSN(vor_vx, MATCH_VOR_VX, MASK_VOR_VX) -DECLARE_INSN(vxor_vx, MATCH_VXOR_VX, MASK_VXOR_VX) -DECLARE_INSN(vrgather_vx, MATCH_VRGATHER_VX, MASK_VRGATHER_VX) -DECLARE_INSN(vslideup_vx, MATCH_VSLIDEUP_VX, MASK_VSLIDEUP_VX) -DECLARE_INSN(vslidedown_vx, MATCH_VSLIDEDOWN_VX, MASK_VSLIDEDOWN_VX) -DECLARE_INSN(vadc_vxm, MATCH_VADC_VXM, MASK_VADC_VXM) -DECLARE_INSN(vmadc_vxm, MATCH_VMADC_VXM, MASK_VMADC_VXM) -DECLARE_INSN(vmadc_vx, MATCH_VMADC_VX, MASK_VMADC_VX) -DECLARE_INSN(vsbc_vxm, MATCH_VSBC_VXM, MASK_VSBC_VXM) -DECLARE_INSN(vmsbc_vxm, MATCH_VMSBC_VXM, MASK_VMSBC_VXM) -DECLARE_INSN(vmsbc_vx, MATCH_VMSBC_VX, MASK_VMSBC_VX) -DECLARE_INSN(vmerge_vxm, MATCH_VMERGE_VXM, MASK_VMERGE_VXM) -DECLARE_INSN(vmv_v_x, MATCH_VMV_V_X, MASK_VMV_V_X) -DECLARE_INSN(vmseq_vx, MATCH_VMSEQ_VX, MASK_VMSEQ_VX) -DECLARE_INSN(vmsne_vx, MATCH_VMSNE_VX, MASK_VMSNE_VX) -DECLARE_INSN(vmsltu_vx, MATCH_VMSLTU_VX, MASK_VMSLTU_VX) -DECLARE_INSN(vmslt_vx, MATCH_VMSLT_VX, MASK_VMSLT_VX) -DECLARE_INSN(vmsleu_vx, MATCH_VMSLEU_VX, MASK_VMSLEU_VX) -DECLARE_INSN(vmsle_vx, MATCH_VMSLE_VX, MASK_VMSLE_VX) -DECLARE_INSN(vmsgtu_vx, MATCH_VMSGTU_VX, MASK_VMSGTU_VX) -DECLARE_INSN(vmsgt_vx, MATCH_VMSGT_VX, MASK_VMSGT_VX) -DECLARE_INSN(vsaddu_vx, MATCH_VSADDU_VX, MASK_VSADDU_VX) -DECLARE_INSN(vsadd_vx, MATCH_VSADD_VX, MASK_VSADD_VX) -DECLARE_INSN(vssubu_vx, MATCH_VSSUBU_VX, MASK_VSSUBU_VX) -DECLARE_INSN(vssub_vx, MATCH_VSSUB_VX, MASK_VSSUB_VX) -DECLARE_INSN(vsll_vx, MATCH_VSLL_VX, MASK_VSLL_VX) -DECLARE_INSN(vsmul_vx, MATCH_VSMUL_VX, MASK_VSMUL_VX) -DECLARE_INSN(vsrl_vx, MATCH_VSRL_VX, MASK_VSRL_VX) -DECLARE_INSN(vsra_vx, MATCH_VSRA_VX, MASK_VSRA_VX) -DECLARE_INSN(vssrl_vx, MATCH_VSSRL_VX, MASK_VSSRL_VX) -DECLARE_INSN(vssra_vx, MATCH_VSSRA_VX, MASK_VSSRA_VX) -DECLARE_INSN(vnsrl_wx, MATCH_VNSRL_WX, MASK_VNSRL_WX) -DECLARE_INSN(vnsra_wx, MATCH_VNSRA_WX, MASK_VNSRA_WX) -DECLARE_INSN(vnclipu_wx, MATCH_VNCLIPU_WX, MASK_VNCLIPU_WX) -DECLARE_INSN(vnclip_wx, MATCH_VNCLIP_WX, MASK_VNCLIP_WX) -DECLARE_INSN(vadd_vv, MATCH_VADD_VV, MASK_VADD_VV) -DECLARE_INSN(vsub_vv, MATCH_VSUB_VV, MASK_VSUB_VV) -DECLARE_INSN(vminu_vv, MATCH_VMINU_VV, MASK_VMINU_VV) -DECLARE_INSN(vmin_vv, MATCH_VMIN_VV, MASK_VMIN_VV) -DECLARE_INSN(vmaxu_vv, MATCH_VMAXU_VV, MASK_VMAXU_VV) -DECLARE_INSN(vmax_vv, MATCH_VMAX_VV, MASK_VMAX_VV) -DECLARE_INSN(vand_vv, MATCH_VAND_VV, MASK_VAND_VV) -DECLARE_INSN(vor_vv, MATCH_VOR_VV, MASK_VOR_VV) -DECLARE_INSN(vxor_vv, MATCH_VXOR_VV, MASK_VXOR_VV) -DECLARE_INSN(vrgather_vv, MATCH_VRGATHER_VV, MASK_VRGATHER_VV) -DECLARE_INSN(vrgatherei16_vv, MATCH_VRGATHEREI16_VV, MASK_VRGATHEREI16_VV) -DECLARE_INSN(vadc_vvm, MATCH_VADC_VVM, MASK_VADC_VVM) -DECLARE_INSN(vmadc_vvm, MATCH_VMADC_VVM, MASK_VMADC_VVM) -DECLARE_INSN(vmadc_vv, MATCH_VMADC_VV, MASK_VMADC_VV) -DECLARE_INSN(vsbc_vvm, MATCH_VSBC_VVM, MASK_VSBC_VVM) -DECLARE_INSN(vmsbc_vvm, MATCH_VMSBC_VVM, MASK_VMSBC_VVM) -DECLARE_INSN(vmsbc_vv, MATCH_VMSBC_VV, MASK_VMSBC_VV) -DECLARE_INSN(vmerge_vvm, MATCH_VMERGE_VVM, MASK_VMERGE_VVM) -DECLARE_INSN(vmv_v_v, MATCH_VMV_V_V, MASK_VMV_V_V) -DECLARE_INSN(vmseq_vv, MATCH_VMSEQ_VV, MASK_VMSEQ_VV) -DECLARE_INSN(vmsne_vv, MATCH_VMSNE_VV, MASK_VMSNE_VV) -DECLARE_INSN(vmsltu_vv, MATCH_VMSLTU_VV, MASK_VMSLTU_VV) -DECLARE_INSN(vmslt_vv, MATCH_VMSLT_VV, MASK_VMSLT_VV) -DECLARE_INSN(vmsleu_vv, MATCH_VMSLEU_VV, MASK_VMSLEU_VV) -DECLARE_INSN(vmsle_vv, MATCH_VMSLE_VV, MASK_VMSLE_VV) -DECLARE_INSN(vsaddu_vv, MATCH_VSADDU_VV, MASK_VSADDU_VV) -DECLARE_INSN(vsadd_vv, MATCH_VSADD_VV, MASK_VSADD_VV) -DECLARE_INSN(vssubu_vv, MATCH_VSSUBU_VV, MASK_VSSUBU_VV) -DECLARE_INSN(vssub_vv, MATCH_VSSUB_VV, MASK_VSSUB_VV) -DECLARE_INSN(vsll_vv, MATCH_VSLL_VV, MASK_VSLL_VV) -DECLARE_INSN(vsmul_vv, MATCH_VSMUL_VV, MASK_VSMUL_VV) -DECLARE_INSN(vsrl_vv, MATCH_VSRL_VV, MASK_VSRL_VV) -DECLARE_INSN(vsra_vv, MATCH_VSRA_VV, MASK_VSRA_VV) -DECLARE_INSN(vssrl_vv, MATCH_VSSRL_VV, MASK_VSSRL_VV) -DECLARE_INSN(vssra_vv, MATCH_VSSRA_VV, MASK_VSSRA_VV) -DECLARE_INSN(vnsrl_wv, MATCH_VNSRL_WV, MASK_VNSRL_WV) -DECLARE_INSN(vnsra_wv, MATCH_VNSRA_WV, MASK_VNSRA_WV) -DECLARE_INSN(vnclipu_wv, MATCH_VNCLIPU_WV, MASK_VNCLIPU_WV) -DECLARE_INSN(vnclip_wv, MATCH_VNCLIP_WV, MASK_VNCLIP_WV) -DECLARE_INSN(vwredsumu_vs, MATCH_VWREDSUMU_VS, MASK_VWREDSUMU_VS) -DECLARE_INSN(vwredsum_vs, MATCH_VWREDSUM_VS, MASK_VWREDSUM_VS) -DECLARE_INSN(vadd_vi, MATCH_VADD_VI, MASK_VADD_VI) -DECLARE_INSN(vrsub_vi, MATCH_VRSUB_VI, MASK_VRSUB_VI) -DECLARE_INSN(vand_vi, MATCH_VAND_VI, MASK_VAND_VI) -DECLARE_INSN(vor_vi, MATCH_VOR_VI, MASK_VOR_VI) -DECLARE_INSN(vxor_vi, MATCH_VXOR_VI, MASK_VXOR_VI) -DECLARE_INSN(vrgather_vi, MATCH_VRGATHER_VI, MASK_VRGATHER_VI) -DECLARE_INSN(vslideup_vi, MATCH_VSLIDEUP_VI, MASK_VSLIDEUP_VI) -DECLARE_INSN(vslidedown_vi, MATCH_VSLIDEDOWN_VI, MASK_VSLIDEDOWN_VI) -DECLARE_INSN(vadc_vim, MATCH_VADC_VIM, MASK_VADC_VIM) -DECLARE_INSN(vmadc_vim, MATCH_VMADC_VIM, MASK_VMADC_VIM) -DECLARE_INSN(vmadc_vi, MATCH_VMADC_VI, MASK_VMADC_VI) -DECLARE_INSN(vmerge_vim, MATCH_VMERGE_VIM, MASK_VMERGE_VIM) -DECLARE_INSN(vmv_v_i, MATCH_VMV_V_I, MASK_VMV_V_I) -DECLARE_INSN(vmseq_vi, MATCH_VMSEQ_VI, MASK_VMSEQ_VI) -DECLARE_INSN(vmsne_vi, MATCH_VMSNE_VI, MASK_VMSNE_VI) -DECLARE_INSN(vmsleu_vi, MATCH_VMSLEU_VI, MASK_VMSLEU_VI) -DECLARE_INSN(vmsle_vi, MATCH_VMSLE_VI, MASK_VMSLE_VI) -DECLARE_INSN(vmsgtu_vi, MATCH_VMSGTU_VI, MASK_VMSGTU_VI) -DECLARE_INSN(vmsgt_vi, MATCH_VMSGT_VI, MASK_VMSGT_VI) -DECLARE_INSN(vsaddu_vi, MATCH_VSADDU_VI, MASK_VSADDU_VI) -DECLARE_INSN(vsadd_vi, MATCH_VSADD_VI, MASK_VSADD_VI) -DECLARE_INSN(vsll_vi, MATCH_VSLL_VI, MASK_VSLL_VI) -DECLARE_INSN(vmv1r_v, MATCH_VMV1R_V, MASK_VMV1R_V) -DECLARE_INSN(vmv2r_v, MATCH_VMV2R_V, MASK_VMV2R_V) -DECLARE_INSN(vmv4r_v, MATCH_VMV4R_V, MASK_VMV4R_V) -DECLARE_INSN(vmv8r_v, MATCH_VMV8R_V, MASK_VMV8R_V) -DECLARE_INSN(vsrl_vi, MATCH_VSRL_VI, MASK_VSRL_VI) -DECLARE_INSN(vsra_vi, MATCH_VSRA_VI, MASK_VSRA_VI) -DECLARE_INSN(vssrl_vi, MATCH_VSSRL_VI, MASK_VSSRL_VI) -DECLARE_INSN(vssra_vi, MATCH_VSSRA_VI, MASK_VSSRA_VI) -DECLARE_INSN(vnsrl_wi, MATCH_VNSRL_WI, MASK_VNSRL_WI) -DECLARE_INSN(vnsra_wi, MATCH_VNSRA_WI, MASK_VNSRA_WI) -DECLARE_INSN(vnclipu_wi, MATCH_VNCLIPU_WI, MASK_VNCLIPU_WI) -DECLARE_INSN(vnclip_wi, MATCH_VNCLIP_WI, MASK_VNCLIP_WI) -DECLARE_INSN(vredsum_vs, MATCH_VREDSUM_VS, MASK_VREDSUM_VS) -DECLARE_INSN(vredand_vs, MATCH_VREDAND_VS, MASK_VREDAND_VS) -DECLARE_INSN(vredor_vs, MATCH_VREDOR_VS, MASK_VREDOR_VS) -DECLARE_INSN(vredxor_vs, MATCH_VREDXOR_VS, MASK_VREDXOR_VS) -DECLARE_INSN(vredminu_vs, MATCH_VREDMINU_VS, MASK_VREDMINU_VS) -DECLARE_INSN(vredmin_vs, MATCH_VREDMIN_VS, MASK_VREDMIN_VS) -DECLARE_INSN(vredmaxu_vs, MATCH_VREDMAXU_VS, MASK_VREDMAXU_VS) -DECLARE_INSN(vredmax_vs, MATCH_VREDMAX_VS, MASK_VREDMAX_VS) -DECLARE_INSN(vaaddu_vv, MATCH_VAADDU_VV, MASK_VAADDU_VV) -DECLARE_INSN(vaadd_vv, MATCH_VAADD_VV, MASK_VAADD_VV) -DECLARE_INSN(vasubu_vv, MATCH_VASUBU_VV, MASK_VASUBU_VV) -DECLARE_INSN(vasub_vv, MATCH_VASUB_VV, MASK_VASUB_VV) -DECLARE_INSN(vmv_x_s, MATCH_VMV_X_S, MASK_VMV_X_S) -DECLARE_INSN(vzext_vf8, MATCH_VZEXT_VF8, MASK_VZEXT_VF8) -DECLARE_INSN(vsext_vf8, MATCH_VSEXT_VF8, MASK_VSEXT_VF8) -DECLARE_INSN(vzext_vf4, MATCH_VZEXT_VF4, MASK_VZEXT_VF4) -DECLARE_INSN(vsext_vf4, MATCH_VSEXT_VF4, MASK_VSEXT_VF4) -DECLARE_INSN(vzext_vf2, MATCH_VZEXT_VF2, MASK_VZEXT_VF2) -DECLARE_INSN(vsext_vf2, MATCH_VSEXT_VF2, MASK_VSEXT_VF2) -DECLARE_INSN(vcompress_vm, MATCH_VCOMPRESS_VM, MASK_VCOMPRESS_VM) -DECLARE_INSN(vmandn_mm, MATCH_VMANDN_MM, MASK_VMANDN_MM) -DECLARE_INSN(vmand_mm, MATCH_VMAND_MM, MASK_VMAND_MM) -DECLARE_INSN(vmor_mm, MATCH_VMOR_MM, MASK_VMOR_MM) -DECLARE_INSN(vmxor_mm, MATCH_VMXOR_MM, MASK_VMXOR_MM) -DECLARE_INSN(vmorn_mm, MATCH_VMORN_MM, MASK_VMORN_MM) -DECLARE_INSN(vmnand_mm, MATCH_VMNAND_MM, MASK_VMNAND_MM) -DECLARE_INSN(vmnor_mm, MATCH_VMNOR_MM, MASK_VMNOR_MM) -DECLARE_INSN(vmxnor_mm, MATCH_VMXNOR_MM, MASK_VMXNOR_MM) -DECLARE_INSN(vmsbf_m, MATCH_VMSBF_M, MASK_VMSBF_M) -DECLARE_INSN(vmsof_m, MATCH_VMSOF_M, MASK_VMSOF_M) -DECLARE_INSN(vmsif_m, MATCH_VMSIF_M, MASK_VMSIF_M) -DECLARE_INSN(viota_m, MATCH_VIOTA_M, MASK_VIOTA_M) -DECLARE_INSN(vid_v, MATCH_VID_V, MASK_VID_V) -DECLARE_INSN(vcpop_m, MATCH_VCPOP_M, MASK_VCPOP_M) -DECLARE_INSN(vfirst_m, MATCH_VFIRST_M, MASK_VFIRST_M) -DECLARE_INSN(vdivu_vv, MATCH_VDIVU_VV, MASK_VDIVU_VV) -DECLARE_INSN(vdiv_vv, MATCH_VDIV_VV, MASK_VDIV_VV) -DECLARE_INSN(vremu_vv, MATCH_VREMU_VV, MASK_VREMU_VV) -DECLARE_INSN(vrem_vv, MATCH_VREM_VV, MASK_VREM_VV) -DECLARE_INSN(vmulhu_vv, MATCH_VMULHU_VV, MASK_VMULHU_VV) -DECLARE_INSN(vmul_vv, MATCH_VMUL_VV, MASK_VMUL_VV) -DECLARE_INSN(vmulhsu_vv, MATCH_VMULHSU_VV, MASK_VMULHSU_VV) -DECLARE_INSN(vmulh_vv, MATCH_VMULH_VV, MASK_VMULH_VV) -DECLARE_INSN(vmadd_vv, MATCH_VMADD_VV, MASK_VMADD_VV) -DECLARE_INSN(vnmsub_vv, MATCH_VNMSUB_VV, MASK_VNMSUB_VV) -DECLARE_INSN(vmacc_vv, MATCH_VMACC_VV, MASK_VMACC_VV) -DECLARE_INSN(vnmsac_vv, MATCH_VNMSAC_VV, MASK_VNMSAC_VV) -DECLARE_INSN(vwaddu_vv, MATCH_VWADDU_VV, MASK_VWADDU_VV) -DECLARE_INSN(vwadd_vv, MATCH_VWADD_VV, MASK_VWADD_VV) -DECLARE_INSN(vwsubu_vv, MATCH_VWSUBU_VV, MASK_VWSUBU_VV) -DECLARE_INSN(vwsub_vv, MATCH_VWSUB_VV, MASK_VWSUB_VV) -DECLARE_INSN(vwaddu_wv, MATCH_VWADDU_WV, MASK_VWADDU_WV) -DECLARE_INSN(vwadd_wv, MATCH_VWADD_WV, MASK_VWADD_WV) -DECLARE_INSN(vwsubu_wv, MATCH_VWSUBU_WV, MASK_VWSUBU_WV) -DECLARE_INSN(vwsub_wv, MATCH_VWSUB_WV, MASK_VWSUB_WV) -DECLARE_INSN(vwmulu_vv, MATCH_VWMULU_VV, MASK_VWMULU_VV) -DECLARE_INSN(vwmulsu_vv, MATCH_VWMULSU_VV, MASK_VWMULSU_VV) -DECLARE_INSN(vwmul_vv, MATCH_VWMUL_VV, MASK_VWMUL_VV) -DECLARE_INSN(vwmaccu_vv, MATCH_VWMACCU_VV, MASK_VWMACCU_VV) -DECLARE_INSN(vwmacc_vv, MATCH_VWMACC_VV, MASK_VWMACC_VV) -DECLARE_INSN(vwmaccsu_vv, MATCH_VWMACCSU_VV, MASK_VWMACCSU_VV) -DECLARE_INSN(vaaddu_vx, MATCH_VAADDU_VX, MASK_VAADDU_VX) -DECLARE_INSN(vaadd_vx, MATCH_VAADD_VX, MASK_VAADD_VX) -DECLARE_INSN(vasubu_vx, MATCH_VASUBU_VX, MASK_VASUBU_VX) -DECLARE_INSN(vasub_vx, MATCH_VASUB_VX, MASK_VASUB_VX) -DECLARE_INSN(vmv_s_x, MATCH_VMV_S_X, MASK_VMV_S_X) -DECLARE_INSN(vslide1up_vx, MATCH_VSLIDE1UP_VX, MASK_VSLIDE1UP_VX) -DECLARE_INSN(vslide1down_vx, MATCH_VSLIDE1DOWN_VX, MASK_VSLIDE1DOWN_VX) -DECLARE_INSN(vdivu_vx, MATCH_VDIVU_VX, MASK_VDIVU_VX) -DECLARE_INSN(vdiv_vx, MATCH_VDIV_VX, MASK_VDIV_VX) -DECLARE_INSN(vremu_vx, MATCH_VREMU_VX, MASK_VREMU_VX) -DECLARE_INSN(vrem_vx, MATCH_VREM_VX, MASK_VREM_VX) -DECLARE_INSN(vmulhu_vx, MATCH_VMULHU_VX, MASK_VMULHU_VX) -DECLARE_INSN(vmul_vx, MATCH_VMUL_VX, MASK_VMUL_VX) -DECLARE_INSN(vmulhsu_vx, MATCH_VMULHSU_VX, MASK_VMULHSU_VX) -DECLARE_INSN(vmulh_vx, MATCH_VMULH_VX, MASK_VMULH_VX) -DECLARE_INSN(vmadd_vx, MATCH_VMADD_VX, MASK_VMADD_VX) -DECLARE_INSN(vnmsub_vx, MATCH_VNMSUB_VX, MASK_VNMSUB_VX) -DECLARE_INSN(vmacc_vx, MATCH_VMACC_VX, MASK_VMACC_VX) -DECLARE_INSN(vnmsac_vx, MATCH_VNMSAC_VX, MASK_VNMSAC_VX) -DECLARE_INSN(vwaddu_vx, MATCH_VWADDU_VX, MASK_VWADDU_VX) -DECLARE_INSN(vwadd_vx, MATCH_VWADD_VX, MASK_VWADD_VX) -DECLARE_INSN(vwsubu_vx, MATCH_VWSUBU_VX, MASK_VWSUBU_VX) -DECLARE_INSN(vwsub_vx, MATCH_VWSUB_VX, MASK_VWSUB_VX) -DECLARE_INSN(vwaddu_wx, MATCH_VWADDU_WX, MASK_VWADDU_WX) -DECLARE_INSN(vwadd_wx, MATCH_VWADD_WX, MASK_VWADD_WX) -DECLARE_INSN(vwsubu_wx, MATCH_VWSUBU_WX, MASK_VWSUBU_WX) -DECLARE_INSN(vwsub_wx, MATCH_VWSUB_WX, MASK_VWSUB_WX) -DECLARE_INSN(vwmulu_vx, MATCH_VWMULU_VX, MASK_VWMULU_VX) -DECLARE_INSN(vwmulsu_vx, MATCH_VWMULSU_VX, MASK_VWMULSU_VX) -DECLARE_INSN(vwmul_vx, MATCH_VWMUL_VX, MASK_VWMUL_VX) -DECLARE_INSN(vwmaccu_vx, MATCH_VWMACCU_VX, MASK_VWMACCU_VX) -DECLARE_INSN(vwmacc_vx, MATCH_VWMACC_VX, MASK_VWMACC_VX) -DECLARE_INSN(vwmaccus_vx, MATCH_VWMACCUS_VX, MASK_VWMACCUS_VX) -DECLARE_INSN(vwmaccsu_vx, MATCH_VWMACCSU_VX, MASK_VWMACCSU_VX) -DECLARE_INSN(vamoswapei8_v, MATCH_VAMOSWAPEI8_V, MASK_VAMOSWAPEI8_V) -DECLARE_INSN(vamoaddei8_v, MATCH_VAMOADDEI8_V, MASK_VAMOADDEI8_V) -DECLARE_INSN(vamoxorei8_v, MATCH_VAMOXOREI8_V, MASK_VAMOXOREI8_V) -DECLARE_INSN(vamoandei8_v, MATCH_VAMOANDEI8_V, MASK_VAMOANDEI8_V) -DECLARE_INSN(vamoorei8_v, MATCH_VAMOOREI8_V, MASK_VAMOOREI8_V) -DECLARE_INSN(vamominei8_v, MATCH_VAMOMINEI8_V, MASK_VAMOMINEI8_V) -DECLARE_INSN(vamomaxei8_v, MATCH_VAMOMAXEI8_V, MASK_VAMOMAXEI8_V) -DECLARE_INSN(vamominuei8_v, MATCH_VAMOMINUEI8_V, MASK_VAMOMINUEI8_V) -DECLARE_INSN(vamomaxuei8_v, MATCH_VAMOMAXUEI8_V, MASK_VAMOMAXUEI8_V) -DECLARE_INSN(vamoswapei16_v, MATCH_VAMOSWAPEI16_V, MASK_VAMOSWAPEI16_V) -DECLARE_INSN(vamoaddei16_v, MATCH_VAMOADDEI16_V, MASK_VAMOADDEI16_V) -DECLARE_INSN(vamoxorei16_v, MATCH_VAMOXOREI16_V, MASK_VAMOXOREI16_V) -DECLARE_INSN(vamoandei16_v, MATCH_VAMOANDEI16_V, MASK_VAMOANDEI16_V) -DECLARE_INSN(vamoorei16_v, MATCH_VAMOOREI16_V, MASK_VAMOOREI16_V) -DECLARE_INSN(vamominei16_v, MATCH_VAMOMINEI16_V, MASK_VAMOMINEI16_V) -DECLARE_INSN(vamomaxei16_v, MATCH_VAMOMAXEI16_V, MASK_VAMOMAXEI16_V) -DECLARE_INSN(vamominuei16_v, MATCH_VAMOMINUEI16_V, MASK_VAMOMINUEI16_V) -DECLARE_INSN(vamomaxuei16_v, MATCH_VAMOMAXUEI16_V, MASK_VAMOMAXUEI16_V) -DECLARE_INSN(vamoswapei32_v, MATCH_VAMOSWAPEI32_V, MASK_VAMOSWAPEI32_V) -DECLARE_INSN(vamoaddei32_v, MATCH_VAMOADDEI32_V, MASK_VAMOADDEI32_V) -DECLARE_INSN(vamoxorei32_v, MATCH_VAMOXOREI32_V, MASK_VAMOXOREI32_V) -DECLARE_INSN(vamoandei32_v, MATCH_VAMOANDEI32_V, MASK_VAMOANDEI32_V) -DECLARE_INSN(vamoorei32_v, MATCH_VAMOOREI32_V, MASK_VAMOOREI32_V) -DECLARE_INSN(vamominei32_v, MATCH_VAMOMINEI32_V, MASK_VAMOMINEI32_V) -DECLARE_INSN(vamomaxei32_v, MATCH_VAMOMAXEI32_V, MASK_VAMOMAXEI32_V) -DECLARE_INSN(vamominuei32_v, MATCH_VAMOMINUEI32_V, MASK_VAMOMINUEI32_V) -DECLARE_INSN(vamomaxuei32_v, MATCH_VAMOMAXUEI32_V, MASK_VAMOMAXUEI32_V) -DECLARE_INSN(vamoswapei64_v, MATCH_VAMOSWAPEI64_V, MASK_VAMOSWAPEI64_V) -DECLARE_INSN(vamoaddei64_v, MATCH_VAMOADDEI64_V, MASK_VAMOADDEI64_V) -DECLARE_INSN(vamoxorei64_v, MATCH_VAMOXOREI64_V, MASK_VAMOXOREI64_V) -DECLARE_INSN(vamoandei64_v, MATCH_VAMOANDEI64_V, MASK_VAMOANDEI64_V) -DECLARE_INSN(vamoorei64_v, MATCH_VAMOOREI64_V, MASK_VAMOOREI64_V) -DECLARE_INSN(vamominei64_v, MATCH_VAMOMINEI64_V, MASK_VAMOMINEI64_V) -DECLARE_INSN(vamomaxei64_v, MATCH_VAMOMAXEI64_V, MASK_VAMOMAXEI64_V) -DECLARE_INSN(vamominuei64_v, MATCH_VAMOMINUEI64_V, MASK_VAMOMINUEI64_V) -DECLARE_INSN(vamomaxuei64_v, MATCH_VAMOMAXUEI64_V, MASK_VAMOMAXUEI64_V) -DECLARE_INSN(add8, MATCH_ADD8, MASK_ADD8) +#define MASK_C_SDSP 0xe003 +#define MATCH_C_SLLI 0x2 +#define MASK_C_SLLI 0xe003 +#define MATCH_C_SRAI 0x8401 +#define MASK_C_SRAI 0xec03 +#define MATCH_C_SRLI 0x8001 +#define MASK_C_SRLI 0xec03 +#define MATCH_C_SUB 0x8c01 +#define MASK_C_SUB 0xfc63 +#define MATCH_C_SUBW 0x9c01 +#define MASK_C_SUBW 0xfc63 +#define MATCH_C_SW 0xc000 +#define MASK_C_SW 0xe003 +#define MATCH_C_SWSP 0xc002 +#define MASK_C_SWSP 0xe003 +#define MATCH_C_XOR 0x8c21 +#define MASK_C_XOR 0xfc63 +#define MATCH_CBO_CLEAN 0x10200f +#define MASK_CBO_CLEAN 0xfff07fff +#define MATCH_CBO_FLUSH 0x20200f +#define MASK_CBO_FLUSH 0xfff07fff +#define MATCH_CBO_INVAL 0x200f +#define MASK_CBO_INVAL 0xfff07fff +#define MATCH_CBO_ZERO 0x40200f +#define MASK_CBO_ZERO 0xfff07fff +#define MATCH_CLMUL 0xa001033 +#define MASK_CLMUL 0xfe00707f +#define MATCH_CLMULH 0xa003033 +#define MASK_CLMULH 0xfe00707f +#define MATCH_CLMULR 0xa002033 +#define MASK_CLMULR 0xfe00707f +#define MATCH_CLO16 0xaeb00077 +#define MASK_CLO16 0xfff0707f +#define MATCH_CLO32 0xafb00077 +#define MASK_CLO32 0xfff0707f +#define MATCH_CLO8 0xae300077 +#define MASK_CLO8 0xfff0707f +#define MATCH_CLRS16 0xae800077 +#define MASK_CLRS16 0xfff0707f +#define MATCH_CLRS32 0xaf800077 +#define MASK_CLRS32 0xfff0707f +#define MATCH_CLRS8 0xae000077 +#define MASK_CLRS8 0xfff0707f +#define MATCH_CLZ 0x60001013 +#define MASK_CLZ 0xfff0707f +#define MATCH_CLZ16 0xae900077 +#define MASK_CLZ16 0xfff0707f +#define MATCH_CLZ32 0xaf900077 +#define MASK_CLZ32 0xfff0707f +#define MATCH_CLZ8 0xae100077 +#define MASK_CLZ8 0xfff0707f +#define MATCH_CLZW 0x6000101b +#define MASK_CLZW 0xfff0707f +#define MATCH_CMIX 0x6001033 +#define MASK_CMIX 0x600707f +#define MATCH_CMOV 0x6005033 +#define MASK_CMOV 0x600707f +#define MATCH_CMPEQ16 0x4c000077 +#define MASK_CMPEQ16 0xfe00707f +#define MATCH_CMPEQ8 0x4e000077 +#define MASK_CMPEQ8 0xfe00707f +#define MATCH_CPOP 0x60201013 +#define MASK_CPOP 0xfff0707f +#define MATCH_CPOPW 0x6020101b +#define MASK_CPOPW 0xfff0707f +#define MATCH_CRAS16 0x44000077 +#define MASK_CRAS16 0xfe00707f +#define MATCH_CRAS32 0x44002077 +#define MASK_CRAS32 0xfe00707f +#define MATCH_CRC32_B 0x61001013 +#define MASK_CRC32_B 0xfff0707f +#define MATCH_CRC32_D 0x61301013 +#define MASK_CRC32_D 0xfff0707f +#define MATCH_CRC32_H 0x61101013 +#define MASK_CRC32_H 0xfff0707f +#define MATCH_CRC32_W 0x61201013 +#define MASK_CRC32_W 0xfff0707f +#define MATCH_CRC32C_B 0x61801013 +#define MASK_CRC32C_B 0xfff0707f +#define MATCH_CRC32C_D 0x61b01013 +#define MASK_CRC32C_D 0xfff0707f +#define MATCH_CRC32C_H 0x61901013 +#define MASK_CRC32C_H 0xfff0707f +#define MATCH_CRC32C_W 0x61a01013 +#define MASK_CRC32C_W 0xfff0707f +#define MATCH_CRSA16 0x46000077 +#define MASK_CRSA16 0xfe00707f +#define MATCH_CRSA32 0x46002077 +#define MASK_CRSA32 0xfe00707f +#define MATCH_CSRRC 0x3073 +#define MASK_CSRRC 0x707f +#define MATCH_CSRRCI 0x7073 +#define MASK_CSRRCI 0x707f +#define MATCH_CSRRS 0x2073 +#define MASK_CSRRS 0x707f +#define MATCH_CSRRSI 0x6073 +#define MASK_CSRRSI 0x707f +#define MATCH_CSRRW 0x1073 +#define MASK_CSRRW 0x707f +#define MATCH_CSRRWI 0x5073 +#define MASK_CSRRWI 0x707f +#define MATCH_CTZ 0x60101013 +#define MASK_CTZ 0xfff0707f +#define MATCH_CTZW 0x6010101b +#define MASK_CTZW 0xfff0707f +#define MATCH_DIV 0x2004033 +#define MASK_DIV 0xfe00707f +#define MATCH_DIVU 0x2005033 +#define MASK_DIVU 0xfe00707f +#define MATCH_DIVUW 0x200503b +#define MASK_DIVUW 0xfe00707f +#define MATCH_DIVW 0x200403b +#define MASK_DIVW 0xfe00707f +#define MATCH_DRET 0x7b200073 +#define MASK_DRET 0xffffffff +#define MATCH_EBREAK 0x100073 +#define MASK_EBREAK 0xffffffff +#define MATCH_ECALL 0x73 +#define MASK_ECALL 0xffffffff +#define MATCH_FADD_D 0x2000053 +#define MASK_FADD_D 0xfe00007f +#define MATCH_FADD_H 0x4000053 +#define MASK_FADD_H 0xfe00007f +#define MATCH_FADD_Q 0x6000053 +#define MASK_FADD_Q 0xfe00007f +#define MATCH_FADD_S 0x53 +#define MASK_FADD_S 0xfe00007f +#define MATCH_FCLASS_D 0xe2001053 +#define MASK_FCLASS_D 0xfff0707f +#define MATCH_FCLASS_H 0xe4001053 +#define MASK_FCLASS_H 0xfff0707f +#define MATCH_FCLASS_Q 0xe6001053 +#define MASK_FCLASS_Q 0xfff0707f +#define MATCH_FCLASS_S 0xe0001053 +#define MASK_FCLASS_S 0xfff0707f +#define MATCH_FCVT_D_H 0x42200053 +#define MASK_FCVT_D_H 0xfff0007f +#define MATCH_FCVT_D_L 0xd2200053 +#define MASK_FCVT_D_L 0xfff0007f +#define MATCH_FCVT_D_LU 0xd2300053 +#define MASK_FCVT_D_LU 0xfff0007f +#define MATCH_FCVT_D_Q 0x42300053 +#define MASK_FCVT_D_Q 0xfff0007f +#define MATCH_FCVT_D_S 0x42000053 +#define MASK_FCVT_D_S 0xfff0007f +#define MATCH_FCVT_D_W 0xd2000053 +#define MASK_FCVT_D_W 0xfff0007f +#define MATCH_FCVT_D_WU 0xd2100053 +#define MASK_FCVT_D_WU 0xfff0007f +#define MATCH_FCVT_H_D 0x44100053 +#define MASK_FCVT_H_D 0xfff0007f +#define MATCH_FCVT_H_L 0xd4200053 +#define MASK_FCVT_H_L 0xfff0007f +#define MATCH_FCVT_H_LU 0xd4300053 +#define MASK_FCVT_H_LU 0xfff0007f +#define MATCH_FCVT_H_Q 0x44300053 +#define MASK_FCVT_H_Q 0xfff0007f +#define MATCH_FCVT_H_S 0x44000053 +#define MASK_FCVT_H_S 0xfff0007f +#define MATCH_FCVT_H_W 0xd4000053 +#define MASK_FCVT_H_W 0xfff0007f +#define MATCH_FCVT_H_WU 0xd4100053 +#define MASK_FCVT_H_WU 0xfff0007f +#define MATCH_FCVT_L_D 0xc2200053 +#define MASK_FCVT_L_D 0xfff0007f +#define MATCH_FCVT_L_H 0xc4200053 +#define MASK_FCVT_L_H 0xfff0007f +#define MATCH_FCVT_L_Q 0xc6200053 +#define MASK_FCVT_L_Q 0xfff0007f +#define MATCH_FCVT_L_S 0xc0200053 +#define MASK_FCVT_L_S 0xfff0007f +#define MATCH_FCVT_LU_D 0xc2300053 +#define MASK_FCVT_LU_D 0xfff0007f +#define MATCH_FCVT_LU_H 0xc4300053 +#define MASK_FCVT_LU_H 0xfff0007f +#define MATCH_FCVT_LU_Q 0xc6300053 +#define MASK_FCVT_LU_Q 0xfff0007f +#define MATCH_FCVT_LU_S 0xc0300053 +#define MASK_FCVT_LU_S 0xfff0007f +#define MATCH_FCVT_Q_D 0x46100053 +#define MASK_FCVT_Q_D 0xfff0007f +#define MATCH_FCVT_Q_H 0x46200053 +#define MASK_FCVT_Q_H 0xfff0007f +#define MATCH_FCVT_Q_L 0xd6200053 +#define MASK_FCVT_Q_L 0xfff0007f +#define MATCH_FCVT_Q_LU 0xd6300053 +#define MASK_FCVT_Q_LU 0xfff0007f +#define MATCH_FCVT_Q_S 0x46000053 +#define MASK_FCVT_Q_S 0xfff0007f +#define MATCH_FCVT_Q_W 0xd6000053 +#define MASK_FCVT_Q_W 0xfff0007f +#define MATCH_FCVT_Q_WU 0xd6100053 +#define MASK_FCVT_Q_WU 0xfff0007f +#define MATCH_FCVT_S_D 0x40100053 +#define MASK_FCVT_S_D 0xfff0007f +#define MATCH_FCVT_S_H 0x40200053 +#define MASK_FCVT_S_H 0xfff0007f +#define MATCH_FCVT_S_L 0xd0200053 +#define MASK_FCVT_S_L 0xfff0007f +#define MATCH_FCVT_S_LU 0xd0300053 +#define MASK_FCVT_S_LU 0xfff0007f +#define MATCH_FCVT_S_Q 0x40300053 +#define MASK_FCVT_S_Q 0xfff0007f +#define MATCH_FCVT_S_W 0xd0000053 +#define MASK_FCVT_S_W 0xfff0007f +#define MATCH_FCVT_S_WU 0xd0100053 +#define MASK_FCVT_S_WU 0xfff0007f +#define MATCH_FCVT_W_D 0xc2000053 +#define MASK_FCVT_W_D 0xfff0007f +#define MATCH_FCVT_W_H 0xc4000053 +#define MASK_FCVT_W_H 0xfff0007f +#define MATCH_FCVT_W_Q 0xc6000053 +#define MASK_FCVT_W_Q 0xfff0007f +#define MATCH_FCVT_W_S 0xc0000053 +#define MASK_FCVT_W_S 0xfff0007f +#define MATCH_FCVT_WU_D 0xc2100053 +#define MASK_FCVT_WU_D 0xfff0007f +#define MATCH_FCVT_WU_H 0xc4100053 +#define MASK_FCVT_WU_H 0xfff0007f +#define MATCH_FCVT_WU_Q 0xc6100053 +#define MASK_FCVT_WU_Q 0xfff0007f +#define MATCH_FCVT_WU_S 0xc0100053 +#define MASK_FCVT_WU_S 0xfff0007f +#define MATCH_FDIV_D 0x1a000053 +#define MASK_FDIV_D 0xfe00007f +#define MATCH_FDIV_H 0x1c000053 +#define MASK_FDIV_H 0xfe00007f +#define MATCH_FDIV_Q 0x1e000053 +#define MASK_FDIV_Q 0xfe00007f +#define MATCH_FDIV_S 0x18000053 +#define MASK_FDIV_S 0xfe00007f +#define MATCH_FENCE 0xf +#define MASK_FENCE 0x707f +#define MATCH_FENCE_I 0x100f +#define MASK_FENCE_I 0x707f +#define MATCH_FEQ_D 0xa2002053 +#define MASK_FEQ_D 0xfe00707f +#define MATCH_FEQ_H 0xa4002053 +#define MASK_FEQ_H 0xfe00707f +#define MATCH_FEQ_Q 0xa6002053 +#define MASK_FEQ_Q 0xfe00707f +#define MATCH_FEQ_S 0xa0002053 +#define MASK_FEQ_S 0xfe00707f +#define MATCH_FLD 0x3007 +#define MASK_FLD 0x707f +#define MATCH_FLE_D 0xa2000053 +#define MASK_FLE_D 0xfe00707f +#define MATCH_FLE_H 0xa4000053 +#define MASK_FLE_H 0xfe00707f +#define MATCH_FLE_Q 0xa6000053 +#define MASK_FLE_Q 0xfe00707f +#define MATCH_FLE_S 0xa0000053 +#define MASK_FLE_S 0xfe00707f +#define MATCH_FLH 0x1007 +#define MASK_FLH 0x707f +#define MATCH_FLQ 0x4007 +#define MASK_FLQ 0x707f +#define MATCH_FLT_D 0xa2001053 +#define MASK_FLT_D 0xfe00707f +#define MATCH_FLT_H 0xa4001053 +#define MASK_FLT_H 0xfe00707f +#define MATCH_FLT_Q 0xa6001053 +#define MASK_FLT_Q 0xfe00707f +#define MATCH_FLT_S 0xa0001053 +#define MASK_FLT_S 0xfe00707f +#define MATCH_FLW 0x2007 +#define MASK_FLW 0x707f +#define MATCH_FMADD_D 0x2000043 +#define MASK_FMADD_D 0x600007f +#define MATCH_FMADD_H 0x4000043 +#define MASK_FMADD_H 0x600007f +#define MATCH_FMADD_Q 0x6000043 +#define MASK_FMADD_Q 0x600007f +#define MATCH_FMADD_S 0x43 +#define MASK_FMADD_S 0x600007f +#define MATCH_FMAX_D 0x2a001053 +#define MASK_FMAX_D 0xfe00707f +#define MATCH_FMAX_H 0x2c001053 +#define MASK_FMAX_H 0xfe00707f +#define MATCH_FMAX_Q 0x2e001053 +#define MASK_FMAX_Q 0xfe00707f +#define MATCH_FMAX_S 0x28001053 +#define MASK_FMAX_S 0xfe00707f +#define MATCH_FMIN_D 0x2a000053 +#define MASK_FMIN_D 0xfe00707f +#define MATCH_FMIN_H 0x2c000053 +#define MASK_FMIN_H 0xfe00707f +#define MATCH_FMIN_Q 0x2e000053 +#define MASK_FMIN_Q 0xfe00707f +#define MATCH_FMIN_S 0x28000053 +#define MASK_FMIN_S 0xfe00707f +#define MATCH_FMSUB_D 0x2000047 +#define MASK_FMSUB_D 0x600007f +#define MATCH_FMSUB_H 0x4000047 +#define MASK_FMSUB_H 0x600007f +#define MATCH_FMSUB_Q 0x6000047 +#define MASK_FMSUB_Q 0x600007f +#define MATCH_FMSUB_S 0x47 +#define MASK_FMSUB_S 0x600007f +#define MATCH_FMUL_D 0x12000053 +#define MASK_FMUL_D 0xfe00007f +#define MATCH_FMUL_H 0x14000053 +#define MASK_FMUL_H 0xfe00007f +#define MATCH_FMUL_Q 0x16000053 +#define MASK_FMUL_Q 0xfe00007f +#define MATCH_FMUL_S 0x10000053 +#define MASK_FMUL_S 0xfe00007f +#define MATCH_FMV_D_X 0xf2000053 +#define MASK_FMV_D_X 0xfff0707f +#define MATCH_FMV_H_X 0xf4000053 +#define MASK_FMV_H_X 0xfff0707f +#define MATCH_FMV_W_X 0xf0000053 +#define MASK_FMV_W_X 0xfff0707f +#define MATCH_FMV_X_D 0xe2000053 +#define MASK_FMV_X_D 0xfff0707f +#define MATCH_FMV_X_H 0xe4000053 +#define MASK_FMV_X_H 0xfff0707f +#define MATCH_FMV_X_W 0xe0000053 +#define MASK_FMV_X_W 0xfff0707f +#define MATCH_FNMADD_D 0x200004f +#define MASK_FNMADD_D 0x600007f +#define MATCH_FNMADD_H 0x400004f +#define MASK_FNMADD_H 0x600007f +#define MATCH_FNMADD_Q 0x600004f +#define MASK_FNMADD_Q 0x600007f +#define MATCH_FNMADD_S 0x4f +#define MASK_FNMADD_S 0x600007f +#define MATCH_FNMSUB_D 0x200004b +#define MASK_FNMSUB_D 0x600007f +#define MATCH_FNMSUB_H 0x400004b +#define MASK_FNMSUB_H 0x600007f +#define MATCH_FNMSUB_Q 0x600004b +#define MASK_FNMSUB_Q 0x600007f +#define MATCH_FNMSUB_S 0x4b +#define MASK_FNMSUB_S 0x600007f +#define MATCH_FSD 0x3027 +#define MASK_FSD 0x707f +#define MATCH_FSGNJ_D 0x22000053 +#define MASK_FSGNJ_D 0xfe00707f +#define MATCH_FSGNJ_H 0x24000053 +#define MASK_FSGNJ_H 0xfe00707f +#define MATCH_FSGNJ_Q 0x26000053 +#define MASK_FSGNJ_Q 0xfe00707f +#define MATCH_FSGNJ_S 0x20000053 +#define MASK_FSGNJ_S 0xfe00707f +#define MATCH_FSGNJN_D 0x22001053 +#define MASK_FSGNJN_D 0xfe00707f +#define MATCH_FSGNJN_H 0x24001053 +#define MASK_FSGNJN_H 0xfe00707f +#define MATCH_FSGNJN_Q 0x26001053 +#define MASK_FSGNJN_Q 0xfe00707f +#define MATCH_FSGNJN_S 0x20001053 +#define MASK_FSGNJN_S 0xfe00707f +#define MATCH_FSGNJX_D 0x22002053 +#define MASK_FSGNJX_D 0xfe00707f +#define MATCH_FSGNJX_H 0x24002053 +#define MASK_FSGNJX_H 0xfe00707f +#define MATCH_FSGNJX_Q 0x26002053 +#define MASK_FSGNJX_Q 0xfe00707f +#define MATCH_FSGNJX_S 0x20002053 +#define MASK_FSGNJX_S 0xfe00707f +#define MATCH_FSH 0x1027 +#define MASK_FSH 0x707f +#define MATCH_FSL 0x4001033 +#define MASK_FSL 0x600707f +#define MATCH_FSLW 0x400103b +#define MASK_FSLW 0x600707f +#define MATCH_FSQ 0x4027 +#define MASK_FSQ 0x707f +#define MATCH_FSQRT_D 0x5a000053 +#define MASK_FSQRT_D 0xfff0007f +#define MATCH_FSQRT_H 0x5c000053 +#define MASK_FSQRT_H 0xfff0007f +#define MATCH_FSQRT_Q 0x5e000053 +#define MASK_FSQRT_Q 0xfff0007f +#define MATCH_FSQRT_S 0x58000053 +#define MASK_FSQRT_S 0xfff0007f +#define MATCH_FSR 0x4005033 +#define MASK_FSR 0x600707f +#define MATCH_FSRI 0x4005013 +#define MASK_FSRI 0x400707f +#define MATCH_FSRIW 0x400501b +#define MASK_FSRIW 0x600707f +#define MATCH_FSRW 0x400503b +#define MASK_FSRW 0x600707f +#define MATCH_FSUB_D 0xa000053 +#define MASK_FSUB_D 0xfe00007f +#define MATCH_FSUB_H 0xc000053 +#define MASK_FSUB_H 0xfe00007f +#define MATCH_FSUB_Q 0xe000053 +#define MASK_FSUB_Q 0xfe00007f +#define MATCH_FSUB_S 0x8000053 +#define MASK_FSUB_S 0xfe00007f +#define MATCH_FSW 0x2027 +#define MASK_FSW 0x707f +#define MATCH_GORC 0x28005033 +#define MASK_GORC 0xfe00707f +#define MATCH_GORCI 0x28005013 +#define MASK_GORCI 0xfc00707f +#define MATCH_GORCIW 0x2800501b +#define MASK_GORCIW 0xfe00707f +#define MATCH_GORCW 0x2800503b +#define MASK_GORCW 0xfe00707f +#define MATCH_GREV 0x68005033 +#define MASK_GREV 0xfe00707f +#define MATCH_GREVI 0x68005013 +#define MASK_GREVI 0xfc00707f +#define MATCH_GREVIW 0x6800501b +#define MASK_GREVIW 0xfe00707f +#define MATCH_GREVW 0x6800503b +#define MASK_GREVW 0xfe00707f +#define MATCH_HFENCE_GVMA 0x62000073 +#define MASK_HFENCE_GVMA 0xfe007fff +#define MATCH_HFENCE_VVMA 0x22000073 +#define MASK_HFENCE_VVMA 0xfe007fff +#define MATCH_HINVAL_GVMA 0x66000073 +#define MASK_HINVAL_GVMA 0xfe007fff +#define MATCH_HINVAL_VVMA 0x26000073 +#define MASK_HINVAL_VVMA 0xfe007fff +#define MATCH_HLV_B 0x60004073 +#define MASK_HLV_B 0xfff0707f +#define MATCH_HLV_BU 0x60104073 +#define MASK_HLV_BU 0xfff0707f +#define MATCH_HLV_D 0x6c004073 +#define MASK_HLV_D 0xfff0707f +#define MATCH_HLV_H 0x64004073 +#define MASK_HLV_H 0xfff0707f +#define MATCH_HLV_HU 0x64104073 +#define MASK_HLV_HU 0xfff0707f +#define MATCH_HLV_W 0x68004073 +#define MASK_HLV_W 0xfff0707f +#define MATCH_HLV_WU 0x68104073 +#define MASK_HLV_WU 0xfff0707f +#define MATCH_HLVX_HU 0x64304073 +#define MASK_HLVX_HU 0xfff0707f +#define MATCH_HLVX_WU 0x68304073 +#define MASK_HLVX_WU 0xfff0707f +#define MATCH_HSV_B 0x62004073 +#define MASK_HSV_B 0xfe007fff +#define MATCH_HSV_D 0x6e004073 +#define MASK_HSV_D 0xfe007fff +#define MATCH_HSV_H 0x66004073 +#define MASK_HSV_H 0xfe007fff +#define MATCH_HSV_W 0x6a004073 +#define MASK_HSV_W 0xfe007fff +#define MATCH_INSB 0xac000077 +#define MASK_INSB 0xff80707f +#define MATCH_JAL 0x6f +#define MASK_JAL 0x7f +#define MATCH_JALR 0x67 +#define MASK_JALR 0x707f +#define MATCH_KABS16 0xad100077 +#define MASK_KABS16 0xfff0707f +#define MATCH_KABS32 0xad200077 +#define MASK_KABS32 0xfff0707f +#define MATCH_KABS8 0xad000077 +#define MASK_KABS8 0xfff0707f +#define MATCH_KABSW 0xad400077 +#define MASK_KABSW 0xfff0707f +#define MATCH_KADD16 0x10000077 +#define MASK_KADD16 0xfe00707f +#define MATCH_KADD32 0x10002077 +#define MASK_KADD32 0xfe00707f +#define MATCH_KADD64 0x90001077 +#define MASK_KADD64 0xfe00707f +#define MATCH_KADD8 0x18000077 +#define MASK_KADD8 0xfe00707f +#define MATCH_KADDH 0x4001077 +#define MASK_KADDH 0xfe00707f +#define MATCH_KADDW 0x1077 +#define MASK_KADDW 0xfe00707f +#define MATCH_KCRAS16 0x14000077 +#define MASK_KCRAS16 0xfe00707f +#define MATCH_KCRAS32 0x14002077 +#define MASK_KCRAS32 0xfe00707f +#define MATCH_KCRSA16 0x16000077 +#define MASK_KCRSA16 0xfe00707f +#define MATCH_KCRSA32 0x16002077 +#define MASK_KCRSA32 0xfe00707f +#define MATCH_KDMABB 0xd2001077 +#define MASK_KDMABB 0xfe00707f +#define MATCH_KDMABB16 0xd8001077 +#define MASK_KDMABB16 0xfe00707f +#define MATCH_KDMABT 0xe2001077 +#define MASK_KDMABT 0xfe00707f +#define MATCH_KDMABT16 0xe8001077 +#define MASK_KDMABT16 0xfe00707f +#define MATCH_KDMATT 0xf2001077 +#define MASK_KDMATT 0xfe00707f +#define MATCH_KDMATT16 0xf8001077 +#define MASK_KDMATT16 0xfe00707f +#define MATCH_KDMBB 0xa001077 +#define MASK_KDMBB 0xfe00707f +#define MATCH_KDMBB16 0xda001077 +#define MASK_KDMBB16 0xfe00707f +#define MATCH_KDMBT 0x1a001077 +#define MASK_KDMBT 0xfe00707f +#define MATCH_KDMBT16 0xea001077 +#define MASK_KDMBT16 0xfe00707f +#define MATCH_KDMTT 0x2a001077 +#define MASK_KDMTT 0xfe00707f +#define MATCH_KDMTT16 0xfa001077 +#define MASK_KDMTT16 0xfe00707f +#define MATCH_KHM16 0x86000077 +#define MASK_KHM16 0xfe00707f +#define MATCH_KHM8 0x8e000077 +#define MASK_KHM8 0xfe00707f +#define MATCH_KHMBB 0xc001077 +#define MASK_KHMBB 0xfe00707f +#define MATCH_KHMBB16 0xdc001077 +#define MASK_KHMBB16 0xfe00707f +#define MATCH_KHMBT 0x1c001077 +#define MASK_KHMBT 0xfe00707f +#define MATCH_KHMBT16 0xec001077 +#define MASK_KHMBT16 0xfe00707f +#define MATCH_KHMTT 0x2c001077 +#define MASK_KHMTT 0xfe00707f +#define MATCH_KHMTT16 0xfc001077 +#define MASK_KHMTT16 0xfe00707f +#define MATCH_KHMX16 0x96000077 +#define MASK_KHMX16 0xfe00707f +#define MATCH_KHMX8 0x9e000077 +#define MASK_KHMX8 0xfe00707f +#define MATCH_KMABB 0x5a001077 +#define MASK_KMABB 0xfe00707f +#define MATCH_KMABB32 0x5a002077 +#define MASK_KMABB32 0xfe00707f +#define MATCH_KMABT 0x6a001077 +#define MASK_KMABT 0xfe00707f +#define MATCH_KMABT32 0x6a002077 +#define MASK_KMABT32 0xfe00707f +#define MATCH_KMADA 0x48001077 +#define MASK_KMADA 0xfe00707f +#define MATCH_KMADRS 0x6c001077 +#define MASK_KMADRS 0xfe00707f +#define MATCH_KMADRS32 0x6c002077 +#define MASK_KMADRS32 0xfe00707f +#define MATCH_KMADS 0x5c001077 +#define MASK_KMADS 0xfe00707f +#define MATCH_KMADS32 0x5c002077 +#define MASK_KMADS32 0xfe00707f +#define MATCH_KMAR64 0x94001077 +#define MASK_KMAR64 0xfe00707f +#define MATCH_KMATT 0x7a001077 +#define MASK_KMATT 0xfe00707f +#define MATCH_KMATT32 0x7a002077 +#define MASK_KMATT32 0xfe00707f +#define MATCH_KMAXDA 0x4a001077 +#define MASK_KMAXDA 0xfe00707f +#define MATCH_KMAXDA32 0x4a002077 +#define MASK_KMAXDA32 0xfe00707f +#define MATCH_KMAXDS 0x7c001077 +#define MASK_KMAXDS 0xfe00707f +#define MATCH_KMAXDS32 0x7c002077 +#define MASK_KMAXDS32 0xfe00707f +#define MATCH_KMDA 0x38001077 +#define MASK_KMDA 0xfe00707f +#define MATCH_KMDA32 0x38002077 +#define MASK_KMDA32 0xfe00707f +#define MATCH_KMMAC 0x60001077 +#define MASK_KMMAC 0xfe00707f +#define MATCH_KMMAC_U 0x70001077 +#define MASK_KMMAC_U 0xfe00707f +#define MATCH_KMMAWB 0x46001077 +#define MASK_KMMAWB 0xfe00707f +#define MATCH_KMMAWB2 0xce001077 +#define MASK_KMMAWB2 0xfe00707f +#define MATCH_KMMAWB2_U 0xde001077 +#define MASK_KMMAWB2_U 0xfe00707f +#define MATCH_KMMAWB_U 0x56001077 +#define MASK_KMMAWB_U 0xfe00707f +#define MATCH_KMMAWT 0x66001077 +#define MASK_KMMAWT 0xfe00707f +#define MATCH_KMMAWT2 0xee001077 +#define MASK_KMMAWT2 0xfe00707f +#define MATCH_KMMAWT2_U 0xfe001077 +#define MASK_KMMAWT2_U 0xfe00707f +#define MATCH_KMMAWT_U 0x76001077 +#define MASK_KMMAWT_U 0xfe00707f +#define MATCH_KMMSB 0x42001077 +#define MASK_KMMSB 0xfe00707f +#define MATCH_KMMSB_U 0x52001077 +#define MASK_KMMSB_U 0xfe00707f +#define MATCH_KMMWB2 0x8e001077 +#define MASK_KMMWB2 0xfe00707f +#define MATCH_KMMWB2_U 0x9e001077 +#define MASK_KMMWB2_U 0xfe00707f +#define MATCH_KMMWT2 0xae001077 +#define MASK_KMMWT2 0xfe00707f +#define MATCH_KMMWT2_U 0xbe001077 +#define MASK_KMMWT2_U 0xfe00707f +#define MATCH_KMSDA 0x4c001077 +#define MASK_KMSDA 0xfe00707f +#define MATCH_KMSDA32 0x4c002077 +#define MASK_KMSDA32 0xfe00707f +#define MATCH_KMSR64 0x96001077 +#define MASK_KMSR64 0xfe00707f +#define MATCH_KMSXDA 0x4e001077 +#define MASK_KMSXDA 0xfe00707f +#define MATCH_KMSXDA32 0x4e002077 +#define MASK_KMSXDA32 0xfe00707f +#define MATCH_KMXDA 0x3a001077 +#define MASK_KMXDA 0xfe00707f +#define MATCH_KMXDA32 0x3a002077 +#define MASK_KMXDA32 0xfe00707f +#define MATCH_KSLL16 0x64000077 +#define MASK_KSLL16 0xfe00707f +#define MATCH_KSLL32 0x64002077 +#define MASK_KSLL32 0xfe00707f +#define MATCH_KSLL8 0x6c000077 +#define MASK_KSLL8 0xfe00707f +#define MATCH_KSLLI16 0x75000077 +#define MASK_KSLLI16 0xff00707f +#define MATCH_KSLLI32 0x84002077 +#define MASK_KSLLI32 0xfe00707f +#define MATCH_KSLLI8 0x7c800077 +#define MASK_KSLLI8 0xff80707f +#define MATCH_KSLLIW 0x36001077 +#define MASK_KSLLIW 0xfe00707f +#define MATCH_KSLLW 0x26001077 +#define MASK_KSLLW 0xfe00707f +#define MATCH_KSLRA16 0x56000077 +#define MASK_KSLRA16 0xfe00707f +#define MATCH_KSLRA16_U 0x66000077 +#define MASK_KSLRA16_U 0xfe00707f +#define MATCH_KSLRA32 0x56002077 +#define MASK_KSLRA32 0xfe00707f +#define MATCH_KSLRA32_U 0x66002077 +#define MASK_KSLRA32_U 0xfe00707f +#define MATCH_KSLRA8 0x5e000077 +#define MASK_KSLRA8 0xfe00707f +#define MATCH_KSLRA8_U 0x6e000077 +#define MASK_KSLRA8_U 0xfe00707f +#define MATCH_KSLRAW 0x6e001077 +#define MASK_KSLRAW 0xfe00707f +#define MATCH_KSLRAW_U 0x7e001077 +#define MASK_KSLRAW_U 0xfe00707f +#define MATCH_KSTAS16 0xc4002077 +#define MASK_KSTAS16 0xfe00707f +#define MATCH_KSTAS32 0xc0002077 +#define MASK_KSTAS32 0xfe00707f +#define MATCH_KSTSA16 0xc6002077 +#define MASK_KSTSA16 0xfe00707f +#define MATCH_KSTSA32 0xc2002077 +#define MASK_KSTSA32 0xfe00707f +#define MATCH_KSUB16 0x12000077 +#define MASK_KSUB16 0xfe00707f +#define MATCH_KSUB32 0x12002077 +#define MASK_KSUB32 0xfe00707f +#define MATCH_KSUB64 0x92001077 +#define MASK_KSUB64 0xfe00707f +#define MATCH_KSUB8 0x1a000077 +#define MASK_KSUB8 0xfe00707f +#define MATCH_KSUBH 0x6001077 +#define MASK_KSUBH 0xfe00707f +#define MATCH_KSUBW 0x2001077 +#define MASK_KSUBW 0xfe00707f +#define MATCH_KWMMUL 0x62001077 +#define MASK_KWMMUL 0xfe00707f +#define MATCH_KWMMUL_U 0x72001077 +#define MASK_KWMMUL_U 0xfe00707f +#define MATCH_LB 0x3 +#define MASK_LB 0x707f +#define MATCH_LBU 0x4003 +#define MASK_LBU 0x707f +#define MATCH_LD 0x3003 +#define MASK_LD 0x707f +#define MATCH_LH 0x1003 +#define MASK_LH 0x707f +#define MATCH_LHU 0x5003 +#define MASK_LHU 0x707f +#define MATCH_LR_D 0x1000302f +#define MASK_LR_D 0xf9f0707f +#define MATCH_LR_W 0x1000202f +#define MASK_LR_W 0xf9f0707f +#define MATCH_LUI 0x37 +#define MASK_LUI 0x7f +#define MATCH_LW 0x2003 +#define MASK_LW 0x707f +#define MATCH_LWU 0x6003 +#define MASK_LWU 0x707f +#define MATCH_MADDR32 0xc4001077 +#define MASK_MADDR32 0xfe00707f +#define MATCH_MAX 0xa006033 +#define MASK_MAX 0xfe00707f +#define MATCH_MAXU 0xa007033 +#define MASK_MAXU 0xfe00707f +#define MATCH_MAXW 0xf2000077 +#define MASK_MAXW 0xfe00707f +#define MATCH_MIN 0xa004033 +#define MASK_MIN 0xfe00707f +#define MATCH_MINU 0xa005033 +#define MASK_MINU 0xfe00707f +#define MATCH_MINW 0xf0000077 +#define MASK_MINW 0xfe00707f +#define MATCH_MRET 0x30200073 +#define MASK_MRET 0xffffffff +#define MATCH_MSUBR32 0xc6001077 +#define MASK_MSUBR32 0xfe00707f +#define MATCH_MUL 0x2000033 +#define MASK_MUL 0xfe00707f +#define MATCH_MULH 0x2001033 +#define MASK_MULH 0xfe00707f +#define MATCH_MULHSU 0x2002033 +#define MASK_MULHSU 0xfe00707f +#define MATCH_MULHU 0x2003033 +#define MASK_MULHU 0xfe00707f +#define MATCH_MULR64 0xf0001077 +#define MASK_MULR64 0xfe00707f +#define MATCH_MULSR64 0xe0001077 +#define MASK_MULSR64 0xfe00707f +#define MATCH_MULW 0x200003b +#define MASK_MULW 0xfe00707f +#define MATCH_OR 0x6033 +#define MASK_OR 0xfe00707f +#define MATCH_ORI 0x6013 +#define MASK_ORI 0x707f +#define MATCH_ORN 0x40006033 +#define MASK_ORN 0xfe00707f +#define MATCH_PACK 0x8004033 +#define MASK_PACK 0xfe00707f +#define MATCH_PACKH 0x8007033 +#define MASK_PACKH 0xfe00707f +#define MATCH_PACKU 0x48004033 +#define MASK_PACKU 0xfe00707f +#define MATCH_PACKUW 0x4800403b +#define MASK_PACKUW 0xfe00707f +#define MATCH_PACKW 0x800403b +#define MASK_PACKW 0xfe00707f +#define MATCH_PAUSE 0x100000f +#define MASK_PAUSE 0xffffffff +#define MATCH_PBSAD 0xfc000077 +#define MASK_PBSAD 0xfe00707f +#define MATCH_PBSADA 0xfe000077 +#define MASK_PBSADA 0xfe00707f +#define MATCH_PKBB16 0xe001077 +#define MASK_PKBB16 0xfe00707f +#define MATCH_PKBB32 0xe002077 +#define MASK_PKBB32 0xfe00707f +#define MATCH_PKBT16 0x1e001077 +#define MASK_PKBT16 0xfe00707f +#define MATCH_PKBT32 0x1e002077 +#define MASK_PKBT32 0xfe00707f +#define MATCH_PKTB16 0x3e001077 +#define MASK_PKTB16 0xfe00707f +#define MATCH_PKTB32 0x3e002077 +#define MASK_PKTB32 0xfe00707f +#define MATCH_PKTT16 0x2e001077 +#define MASK_PKTT16 0xfe00707f +#define MATCH_PKTT32 0x2e002077 +#define MASK_PKTT32 0xfe00707f +#define MATCH_PREFETCH_I 0x6013 +#define MASK_PREFETCH_I 0x1f07fff +#define MATCH_PREFETCH_R 0x106013 +#define MASK_PREFETCH_R 0x1f07fff +#define MATCH_PREFETCH_W 0x306013 +#define MASK_PREFETCH_W 0x1f07fff +#define MATCH_RADD16 0x77 +#define MASK_RADD16 0xfe00707f +#define MATCH_RADD32 0x2077 +#define MASK_RADD32 0xfe00707f +#define MATCH_RADD64 0x80001077 +#define MASK_RADD64 0xfe00707f +#define MATCH_RADD8 0x8000077 +#define MASK_RADD8 0xfe00707f +#define MATCH_RADDW 0x20001077 +#define MASK_RADDW 0xfe00707f +#define MATCH_RCRAS16 0x4000077 +#define MASK_RCRAS16 0xfe00707f +#define MATCH_RCRAS32 0x4002077 +#define MASK_RCRAS32 0xfe00707f +#define MATCH_RCRSA16 0x6000077 +#define MASK_RCRSA16 0xfe00707f +#define MATCH_RCRSA32 0x6002077 +#define MASK_RCRSA32 0xfe00707f +#define MATCH_REM 0x2006033 +#define MASK_REM 0xfe00707f +#define MATCH_REMU 0x2007033 +#define MASK_REMU 0xfe00707f +#define MATCH_REMUW 0x200703b +#define MASK_REMUW 0xfe00707f +#define MATCH_REMW 0x200603b +#define MASK_REMW 0xfe00707f +#define MATCH_ROL 0x60001033 +#define MASK_ROL 0xfe00707f +#define MATCH_ROLW 0x6000103b +#define MASK_ROLW 0xfe00707f +#define MATCH_ROR 0x60005033 +#define MASK_ROR 0xfe00707f +#define MATCH_RORI 0x60005013 +#define MASK_RORI 0xfc00707f +#define MATCH_RORIW 0x6000501b +#define MASK_RORIW 0xfe00707f +#define MATCH_RORW 0x6000503b +#define MASK_RORW 0xfe00707f +#define MATCH_RSTAS16 0xb4002077 +#define MASK_RSTAS16 0xfe00707f +#define MATCH_RSTAS32 0xb0002077 +#define MASK_RSTAS32 0xfe00707f +#define MATCH_RSTSA16 0xb6002077 +#define MASK_RSTSA16 0xfe00707f +#define MATCH_RSTSA32 0xb2002077 +#define MASK_RSTSA32 0xfe00707f +#define MATCH_RSUB16 0x2000077 +#define MASK_RSUB16 0xfe00707f +#define MATCH_RSUB32 0x2002077 +#define MASK_RSUB32 0xfe00707f +#define MATCH_RSUB64 0x82001077 +#define MASK_RSUB64 0xfe00707f +#define MATCH_RSUB8 0xa000077 +#define MASK_RSUB8 0xfe00707f +#define MATCH_RSUBW 0x22001077 +#define MASK_RSUBW 0xfe00707f +#define MATCH_SB 0x23 +#define MASK_SB 0x707f +#define MATCH_SC_D 0x1800302f +#define MASK_SC_D 0xf800707f +#define MATCH_SC_W 0x1800202f +#define MASK_SC_W 0xf800707f +#define MATCH_SCLIP16 0x84000077 +#define MASK_SCLIP16 0xff00707f +#define MATCH_SCLIP32 0xe4000077 +#define MASK_SCLIP32 0xfe00707f +#define MATCH_SCLIP8 0x8c000077 +#define MASK_SCLIP8 0xff80707f +#define MATCH_SCMPLE16 0x1c000077 +#define MASK_SCMPLE16 0xfe00707f +#define MATCH_SCMPLE8 0x1e000077 +#define MASK_SCMPLE8 0xfe00707f +#define MATCH_SCMPLT16 0xc000077 +#define MASK_SCMPLT16 0xfe00707f +#define MATCH_SCMPLT8 0xe000077 +#define MASK_SCMPLT8 0xfe00707f +#define MATCH_SD 0x3023 +#define MASK_SD 0x707f +#define MATCH_SEXT_B 0x60401013 +#define MASK_SEXT_B 0xfff0707f +#define MATCH_SEXT_H 0x60501013 +#define MASK_SEXT_H 0xfff0707f +#define MATCH_SFENCE_INVAL_IR 0x18100073 +#define MASK_SFENCE_INVAL_IR 0xffffffff +#define MATCH_SFENCE_VMA 0x12000073 +#define MASK_SFENCE_VMA 0xfe007fff +#define MATCH_SFENCE_W_INVAL 0x18000073 +#define MASK_SFENCE_W_INVAL 0xffffffff +#define MATCH_SH 0x1023 +#define MASK_SH 0x707f +#define MATCH_SH1ADD 0x20002033 +#define MASK_SH1ADD 0xfe00707f +#define MATCH_SH1ADD_UW 0x2000203b +#define MASK_SH1ADD_UW 0xfe00707f +#define MATCH_SH2ADD 0x20004033 +#define MASK_SH2ADD 0xfe00707f +#define MATCH_SH2ADD_UW 0x2000403b +#define MASK_SH2ADD_UW 0xfe00707f +#define MATCH_SH3ADD 0x20006033 +#define MASK_SH3ADD 0xfe00707f +#define MATCH_SH3ADD_UW 0x2000603b +#define MASK_SH3ADD_UW 0xfe00707f +#define MATCH_SHA256SIG0 0x10201013 +#define MASK_SHA256SIG0 0xfff0707f +#define MATCH_SHA256SIG1 0x10301013 +#define MASK_SHA256SIG1 0xfff0707f +#define MATCH_SHA256SUM0 0x10001013 +#define MASK_SHA256SUM0 0xfff0707f +#define MATCH_SHA256SUM1 0x10101013 +#define MASK_SHA256SUM1 0xfff0707f +#define MATCH_SHA512SIG0 0x10601013 +#define MASK_SHA512SIG0 0xfff0707f +#define MATCH_SHA512SIG0H 0x5c000033 +#define MASK_SHA512SIG0H 0xfe00707f +#define MATCH_SHA512SIG0L 0x54000033 +#define MASK_SHA512SIG0L 0xfe00707f +#define MATCH_SHA512SIG1 0x10701013 +#define MASK_SHA512SIG1 0xfff0707f +#define MATCH_SHA512SIG1H 0x5e000033 +#define MASK_SHA512SIG1H 0xfe00707f +#define MATCH_SHA512SIG1L 0x56000033 +#define MASK_SHA512SIG1L 0xfe00707f +#define MATCH_SHA512SUM0 0x10401013 +#define MASK_SHA512SUM0 0xfff0707f +#define MATCH_SHA512SUM0R 0x50000033 +#define MASK_SHA512SUM0R 0xfe00707f +#define MATCH_SHA512SUM1 0x10501013 +#define MASK_SHA512SUM1 0xfff0707f +#define MATCH_SHA512SUM1R 0x52000033 +#define MASK_SHA512SUM1R 0xfe00707f +#define MATCH_SHFL 0x8001033 +#define MASK_SHFL 0xfe00707f +#define MATCH_SHFLI 0x8001013 +#define MASK_SHFLI 0xfe00707f +#define MATCH_SHFLW 0x800103b +#define MASK_SHFLW 0xfe00707f +#define MATCH_SINVAL_VMA 0x16000073 +#define MASK_SINVAL_VMA 0xfe007fff +#define MATCH_SLL 0x1033 +#define MASK_SLL 0xfe00707f +#define MATCH_SLL16 0x54000077 +#define MASK_SLL16 0xfe00707f +#define MATCH_SLL32 0x54002077 +#define MASK_SLL32 0xfe00707f +#define MATCH_SLL8 0x5c000077 +#define MASK_SLL8 0xfe00707f +#define MATCH_SLLI 0x1013 +#define MASK_SLLI 0xfc00707f +#define MATCH_SLLI16 0x74000077 +#define MASK_SLLI16 0xff00707f +#define MATCH_SLLI32 0x74002077 +#define MASK_SLLI32 0xfe00707f +#define MATCH_SLLI8 0x7c000077 +#define MASK_SLLI8 0xff80707f +#define MATCH_SLLI_UW 0x800101b +#define MASK_SLLI_UW 0xfc00707f +#define MATCH_SLLIW 0x101b +#define MASK_SLLIW 0xfe00707f +#define MATCH_SLLW 0x103b +#define MASK_SLLW 0xfe00707f +#define MATCH_SLO 0x20001033 +#define MASK_SLO 0xfe00707f +#define MATCH_SLOI 0x20001013 +#define MASK_SLOI 0xfc00707f +#define MATCH_SLOIW 0x2000101b +#define MASK_SLOIW 0xfe00707f +#define MATCH_SLOW 0x2000103b +#define MASK_SLOW 0xfe00707f +#define MATCH_SLT 0x2033 +#define MASK_SLT 0xfe00707f +#define MATCH_SLTI 0x2013 +#define MASK_SLTI 0x707f +#define MATCH_SLTIU 0x3013 +#define MASK_SLTIU 0x707f +#define MATCH_SLTU 0x3033 +#define MASK_SLTU 0xfe00707f +#define MATCH_SM3P0 0x10801013 +#define MASK_SM3P0 0xfff0707f +#define MATCH_SM3P1 0x10901013 +#define MASK_SM3P1 0xfff0707f +#define MATCH_SM4ED 0x30000033 +#define MASK_SM4ED 0x3e00707f +#define MATCH_SM4KS 0x34000033 +#define MASK_SM4KS 0x3e00707f +#define MATCH_SMAL 0x5e001077 +#define MASK_SMAL 0xfe00707f +#define MATCH_SMALBB 0x88001077 +#define MASK_SMALBB 0xfe00707f +#define MATCH_SMALBT 0x98001077 +#define MASK_SMALBT 0xfe00707f +#define MATCH_SMALDA 0x8c001077 +#define MASK_SMALDA 0xfe00707f +#define MATCH_SMALDRS 0x9a001077 +#define MASK_SMALDRS 0xfe00707f +#define MATCH_SMALDS 0x8a001077 +#define MASK_SMALDS 0xfe00707f +#define MATCH_SMALTT 0xa8001077 +#define MASK_SMALTT 0xfe00707f +#define MATCH_SMALXDA 0x9c001077 +#define MASK_SMALXDA 0xfe00707f +#define MATCH_SMALXDS 0xaa001077 +#define MASK_SMALXDS 0xfe00707f +#define MATCH_SMAQA 0xc8000077 +#define MASK_SMAQA 0xfe00707f +#define MATCH_SMAQA_SU 0xca000077 +#define MASK_SMAQA_SU 0xfe00707f +#define MATCH_SMAR64 0x84001077 +#define MASK_SMAR64 0xfe00707f +#define MATCH_SMAX16 0x82000077 +#define MASK_SMAX16 0xfe00707f +#define MATCH_SMAX32 0x92002077 +#define MASK_SMAX32 0xfe00707f +#define MATCH_SMAX8 0x8a000077 +#define MASK_SMAX8 0xfe00707f +#define MATCH_SMBB16 0x8001077 +#define MASK_SMBB16 0xfe00707f +#define MATCH_SMBT16 0x18001077 +#define MASK_SMBT16 0xfe00707f +#define MATCH_SMBT32 0x18002077 +#define MASK_SMBT32 0xfe00707f +#define MATCH_SMDRS 0x68001077 +#define MASK_SMDRS 0xfe00707f +#define MATCH_SMDRS32 0x68002077 +#define MASK_SMDRS32 0xfe00707f +#define MATCH_SMDS 0x58001077 +#define MASK_SMDS 0xfe00707f +#define MATCH_SMDS32 0x58002077 +#define MASK_SMDS32 0xfe00707f +#define MATCH_SMIN16 0x80000077 +#define MASK_SMIN16 0xfe00707f +#define MATCH_SMIN32 0x90002077 +#define MASK_SMIN32 0xfe00707f +#define MATCH_SMIN8 0x88000077 +#define MASK_SMIN8 0xfe00707f +#define MATCH_SMMUL 0x40001077 +#define MASK_SMMUL 0xfe00707f +#define MATCH_SMMUL_U 0x50001077 +#define MASK_SMMUL_U 0xfe00707f +#define MATCH_SMMWB 0x44001077 +#define MASK_SMMWB 0xfe00707f +#define MATCH_SMMWB_U 0x54001077 +#define MASK_SMMWB_U 0xfe00707f +#define MATCH_SMMWT 0x64001077 +#define MASK_SMMWT 0xfe00707f +#define MATCH_SMMWT_U 0x74001077 +#define MASK_SMMWT_U 0xfe00707f +#define MATCH_SMSLDA 0xac001077 +#define MASK_SMSLDA 0xfe00707f +#define MATCH_SMSLXDA 0xbc001077 +#define MASK_SMSLXDA 0xfe00707f +#define MATCH_SMSR64 0x86001077 +#define MASK_SMSR64 0xfe00707f +#define MATCH_SMTT16 0x28001077 +#define MASK_SMTT16 0xfe00707f +#define MATCH_SMTT32 0x28002077 +#define MASK_SMTT32 0xfe00707f +#define MATCH_SMUL16 0xa0000077 +#define MASK_SMUL16 0xfe00707f +#define MATCH_SMUL8 0xa8000077 +#define MASK_SMUL8 0xfe00707f +#define MATCH_SMULX16 0xa2000077 +#define MASK_SMULX16 0xfe00707f +#define MATCH_SMULX8 0xaa000077 +#define MASK_SMULX8 0xfe00707f +#define MATCH_SMXDS 0x78001077 +#define MASK_SMXDS 0xfe00707f +#define MATCH_SMXDS32 0x78002077 +#define MASK_SMXDS32 0xfe00707f +#define MATCH_SRA 0x40005033 +#define MASK_SRA 0xfe00707f +#define MATCH_SRA16 0x50000077 +#define MASK_SRA16 0xfe00707f +#define MATCH_SRA16_U 0x60000077 +#define MASK_SRA16_U 0xfe00707f +#define MATCH_SRA32 0x50002077 +#define MASK_SRA32 0xfe00707f +#define MATCH_SRA32_U 0x60002077 +#define MASK_SRA32_U 0xfe00707f +#define MATCH_SRA8 0x58000077 +#define MASK_SRA8 0xfe00707f +#define MATCH_SRA8_U 0x68000077 +#define MASK_SRA8_U 0xfe00707f +#define MATCH_SRA_U 0x24001077 +#define MASK_SRA_U 0xfe00707f +#define MATCH_SRAI 0x40005013 +#define MASK_SRAI 0xfc00707f +#define MATCH_SRAI16 0x70000077 +#define MASK_SRAI16 0xff00707f +#define MATCH_SRAI16_U 0x71000077 +#define MASK_SRAI16_U 0xff00707f +#define MATCH_SRAI32 0x70002077 +#define MASK_SRAI32 0xfe00707f +#define MATCH_SRAI32_U 0x80002077 +#define MASK_SRAI32_U 0xfe00707f +#define MATCH_SRAI8 0x78000077 +#define MASK_SRAI8 0xff80707f +#define MATCH_SRAI8_U 0x78800077 +#define MASK_SRAI8_U 0xff80707f +#define MATCH_SRAI_U 0xd4001077 +#define MASK_SRAI_U 0xfc00707f +#define MATCH_SRAIW 0x4000501b +#define MASK_SRAIW 0xfe00707f +#define MATCH_SRAIW_U 0x34001077 +#define MASK_SRAIW_U 0xfe00707f +#define MATCH_SRAW 0x4000503b +#define MASK_SRAW 0xfe00707f +#define MATCH_SRET 0x10200073 +#define MASK_SRET 0xffffffff +#define MATCH_SRL 0x5033 +#define MASK_SRL 0xfe00707f +#define MATCH_SRL16 0x52000077 +#define MASK_SRL16 0xfe00707f +#define MATCH_SRL16_U 0x62000077 +#define MASK_SRL16_U 0xfe00707f +#define MATCH_SRL32 0x52002077 +#define MASK_SRL32 0xfe00707f +#define MATCH_SRL32_U 0x62002077 +#define MASK_SRL32_U 0xfe00707f +#define MATCH_SRL8 0x5a000077 +#define MASK_SRL8 0xfe00707f +#define MATCH_SRL8_U 0x6a000077 +#define MASK_SRL8_U 0xfe00707f +#define MATCH_SRLI 0x5013 +#define MASK_SRLI 0xfc00707f +#define MATCH_SRLI16 0x72000077 +#define MASK_SRLI16 0xff00707f +#define MATCH_SRLI16_U 0x73000077 +#define MASK_SRLI16_U 0xff00707f +#define MATCH_SRLI32 0x72002077 +#define MASK_SRLI32 0xfe00707f +#define MATCH_SRLI32_U 0x82002077 +#define MASK_SRLI32_U 0xfe00707f +#define MATCH_SRLI8 0x7a000077 +#define MASK_SRLI8 0xff80707f +#define MATCH_SRLI8_U 0x7a800077 +#define MASK_SRLI8_U 0xff80707f +#define MATCH_SRLIW 0x501b +#define MASK_SRLIW 0xfe00707f +#define MATCH_SRLW 0x503b +#define MASK_SRLW 0xfe00707f +#define MATCH_SRO 0x20005033 +#define MASK_SRO 0xfe00707f +#define MATCH_SROI 0x20005013 +#define MASK_SROI 0xfc00707f +#define MATCH_SROIW 0x2000501b +#define MASK_SROIW 0xfe00707f +#define MATCH_SROW 0x2000503b +#define MASK_SROW 0xfe00707f +#define MATCH_STAS16 0xf4002077 +#define MASK_STAS16 0xfe00707f +#define MATCH_STAS32 0xf0002077 +#define MASK_STAS32 0xfe00707f +#define MATCH_STSA16 0xf6002077 +#define MASK_STSA16 0xfe00707f +#define MATCH_STSA32 0xf2002077 +#define MASK_STSA32 0xfe00707f +#define MATCH_SUB 0x40000033 +#define MASK_SUB 0xfe00707f +#define MATCH_SUB16 0x42000077 +#define MASK_SUB16 0xfe00707f +#define MATCH_SUB32 0x42002077 +#define MASK_SUB32 0xfe00707f +#define MATCH_SUB64 0xc2001077 +#define MASK_SUB64 0xfe00707f +#define MATCH_SUB8 0x4a000077 +#define MASK_SUB8 0xfe00707f +#define MATCH_SUBW 0x4000003b +#define MASK_SUBW 0xfe00707f +#define MATCH_SUNPKD810 0xac800077 +#define MASK_SUNPKD810 0xfff0707f +#define MATCH_SUNPKD820 0xac900077 +#define MASK_SUNPKD820 0xfff0707f +#define MATCH_SUNPKD830 0xaca00077 +#define MASK_SUNPKD830 0xfff0707f +#define MATCH_SUNPKD831 0xacb00077 +#define MASK_SUNPKD831 0xfff0707f +#define MATCH_SUNPKD832 0xad300077 +#define MASK_SUNPKD832 0xfff0707f +#define MATCH_SW 0x2023 +#define MASK_SW 0x707f +#define MATCH_SWAP8 0xad800077 +#define MASK_SWAP8 0xfff0707f +#define MATCH_UCLIP16 0x85000077 +#define MASK_UCLIP16 0xff00707f +#define MATCH_UCLIP32 0xf4000077 +#define MASK_UCLIP32 0xfe00707f +#define MATCH_UCLIP8 0x8d000077 +#define MASK_UCLIP8 0xff80707f +#define MATCH_UCMPLE16 0x3c000077 +#define MASK_UCMPLE16 0xfe00707f +#define MATCH_UCMPLE8 0x3e000077 +#define MASK_UCMPLE8 0xfe00707f +#define MATCH_UCMPLT16 0x2c000077 +#define MASK_UCMPLT16 0xfe00707f +#define MATCH_UCMPLT8 0x2e000077 +#define MASK_UCMPLT8 0xfe00707f +#define MATCH_UKADD16 0x30000077 +#define MASK_UKADD16 0xfe00707f +#define MATCH_UKADD32 0x30002077 +#define MASK_UKADD32 0xfe00707f +#define MATCH_UKADD64 0xb0001077 +#define MASK_UKADD64 0xfe00707f +#define MATCH_UKADD8 0x38000077 +#define MASK_UKADD8 0xfe00707f +#define MATCH_UKADDH 0x14001077 +#define MASK_UKADDH 0xfe00707f +#define MATCH_UKADDW 0x10001077 +#define MASK_UKADDW 0xfe00707f +#define MATCH_UKCRAS16 0x34000077 +#define MASK_UKCRAS16 0xfe00707f +#define MATCH_UKCRAS32 0x34002077 +#define MASK_UKCRAS32 0xfe00707f +#define MATCH_UKCRSA16 0x36000077 +#define MASK_UKCRSA16 0xfe00707f +#define MATCH_UKCRSA32 0x36002077 +#define MASK_UKCRSA32 0xfe00707f +#define MATCH_UKMAR64 0xb4001077 +#define MASK_UKMAR64 0xfe00707f +#define MATCH_UKMSR64 0xb6001077 +#define MASK_UKMSR64 0xfe00707f +#define MATCH_UKSTAS16 0xe4002077 +#define MASK_UKSTAS16 0xfe00707f +#define MATCH_UKSTAS32 0xe0002077 +#define MASK_UKSTAS32 0xfe00707f +#define MATCH_UKSTSA16 0xe6002077 +#define MASK_UKSTSA16 0xfe00707f +#define MATCH_UKSTSA32 0xe2002077 +#define MASK_UKSTSA32 0xfe00707f +#define MATCH_UKSUB16 0x32000077 +#define MASK_UKSUB16 0xfe00707f +#define MATCH_UKSUB32 0x32002077 +#define MASK_UKSUB32 0xfe00707f +#define MATCH_UKSUB64 0xb2001077 +#define MASK_UKSUB64 0xfe00707f +#define MATCH_UKSUB8 0x3a000077 +#define MASK_UKSUB8 0xfe00707f +#define MATCH_UKSUBH 0x16001077 +#define MASK_UKSUBH 0xfe00707f +#define MATCH_UKSUBW 0x12001077 +#define MASK_UKSUBW 0xfe00707f +#define MATCH_UMAQA 0xcc000077 +#define MASK_UMAQA 0xfe00707f +#define MATCH_UMAR64 0xa4001077 +#define MASK_UMAR64 0xfe00707f +#define MATCH_UMAX16 0x92000077 +#define MASK_UMAX16 0xfe00707f +#define MATCH_UMAX32 0xa2002077 +#define MASK_UMAX32 0xfe00707f +#define MATCH_UMAX8 0x9a000077 +#define MASK_UMAX8 0xfe00707f +#define MATCH_UMIN16 0x90000077 +#define MASK_UMIN16 0xfe00707f +#define MATCH_UMIN32 0xa0002077 +#define MASK_UMIN32 0xfe00707f +#define MATCH_UMIN8 0x98000077 +#define MASK_UMIN8 0xfe00707f +#define MATCH_UMSR64 0xa6001077 +#define MASK_UMSR64 0xfe00707f +#define MATCH_UMUL16 0xb0000077 +#define MASK_UMUL16 0xfe00707f +#define MATCH_UMUL8 0xb8000077 +#define MASK_UMUL8 0xfe00707f +#define MATCH_UMULX16 0xb2000077 +#define MASK_UMULX16 0xfe00707f +#define MATCH_UMULX8 0xba000077 +#define MASK_UMULX8 0xfe00707f +#define MATCH_UNSHFL 0x8005033 +#define MASK_UNSHFL 0xfe00707f +#define MATCH_UNSHFLI 0x8005013 +#define MASK_UNSHFLI 0xfe00707f +#define MATCH_UNSHFLW 0x800503b +#define MASK_UNSHFLW 0xfe00707f +#define MATCH_URADD16 0x20000077 +#define MASK_URADD16 0xfe00707f +#define MATCH_URADD32 0x20002077 +#define MASK_URADD32 0xfe00707f +#define MATCH_URADD64 0xa0001077 +#define MASK_URADD64 0xfe00707f +#define MATCH_URADD8 0x28000077 +#define MASK_URADD8 0xfe00707f +#define MATCH_URADDW 0x30001077 +#define MASK_URADDW 0xfe00707f +#define MATCH_URCRAS16 0x24000077 +#define MASK_URCRAS16 0xfe00707f +#define MATCH_URCRAS32 0x24002077 +#define MASK_URCRAS32 0xfe00707f +#define MATCH_URCRSA16 0x26000077 +#define MASK_URCRSA16 0xfe00707f +#define MATCH_URCRSA32 0x26002077 +#define MASK_URCRSA32 0xfe00707f +#define MATCH_URSTAS16 0xd4002077 +#define MASK_URSTAS16 0xfe00707f +#define MATCH_URSTAS32 0xd0002077 +#define MASK_URSTAS32 0xfe00707f +#define MATCH_URSTSA16 0xd6002077 +#define MASK_URSTSA16 0xfe00707f +#define MATCH_URSTSA32 0xd2002077 +#define MASK_URSTSA32 0xfe00707f +#define MATCH_URSUB16 0x22000077 +#define MASK_URSUB16 0xfe00707f +#define MATCH_URSUB32 0x22002077 +#define MASK_URSUB32 0xfe00707f +#define MATCH_URSUB64 0xa2001077 +#define MASK_URSUB64 0xfe00707f +#define MATCH_URSUB8 0x2a000077 +#define MASK_URSUB8 0xfe00707f +#define MATCH_URSUBW 0x32001077 +#define MASK_URSUBW 0xfe00707f +#define MATCH_VAADD_VV 0x24002057 +#define MASK_VAADD_VV 0xfc00707f +#define MATCH_VAADD_VX 0x24006057 +#define MASK_VAADD_VX 0xfc00707f +#define MATCH_VAADDU_VV 0x20002057 +#define MASK_VAADDU_VV 0xfc00707f +#define MATCH_VAADDU_VX 0x20006057 +#define MASK_VAADDU_VX 0xfc00707f +#define MATCH_VADC_VIM 0x40003057 +#define MASK_VADC_VIM 0xfe00707f +#define MATCH_VADC_VVM 0x40000057 +#define MASK_VADC_VVM 0xfe00707f +#define MATCH_VADC_VXM 0x40004057 +#define MASK_VADC_VXM 0xfe00707f +#define MATCH_VADD_VI 0x3057 +#define MASK_VADD_VI 0xfc00707f +#define MATCH_VADD_VV 0x57 +#define MASK_VADD_VV 0xfc00707f +#define MATCH_VADD_VX 0x4057 +#define MASK_VADD_VX 0xfc00707f +#define MATCH_VAMOADDEI16_V 0x502f +#define MASK_VAMOADDEI16_V 0xf800707f +#define MATCH_VAMOADDEI32_V 0x602f +#define MASK_VAMOADDEI32_V 0xf800707f +#define MATCH_VAMOADDEI64_V 0x702f +#define MASK_VAMOADDEI64_V 0xf800707f +#define MATCH_VAMOADDEI8_V 0x2f +#define MASK_VAMOADDEI8_V 0xf800707f +#define MATCH_VAMOANDEI16_V 0x6000502f +#define MASK_VAMOANDEI16_V 0xf800707f +#define MATCH_VAMOANDEI32_V 0x6000602f +#define MASK_VAMOANDEI32_V 0xf800707f +#define MATCH_VAMOANDEI64_V 0x6000702f +#define MASK_VAMOANDEI64_V 0xf800707f +#define MATCH_VAMOANDEI8_V 0x6000002f +#define MASK_VAMOANDEI8_V 0xf800707f +#define MATCH_VAMOMAXEI16_V 0xa000502f +#define MASK_VAMOMAXEI16_V 0xf800707f +#define MATCH_VAMOMAXEI32_V 0xa000602f +#define MASK_VAMOMAXEI32_V 0xf800707f +#define MATCH_VAMOMAXEI64_V 0xa000702f +#define MASK_VAMOMAXEI64_V 0xf800707f +#define MATCH_VAMOMAXEI8_V 0xa000002f +#define MASK_VAMOMAXEI8_V 0xf800707f +#define MATCH_VAMOMAXUEI16_V 0xe000502f +#define MASK_VAMOMAXUEI16_V 0xf800707f +#define MATCH_VAMOMAXUEI32_V 0xe000602f +#define MASK_VAMOMAXUEI32_V 0xf800707f +#define MATCH_VAMOMAXUEI64_V 0xe000702f +#define MASK_VAMOMAXUEI64_V 0xf800707f +#define MATCH_VAMOMAXUEI8_V 0xe000002f +#define MASK_VAMOMAXUEI8_V 0xf800707f +#define MATCH_VAMOMINEI16_V 0x8000502f +#define MASK_VAMOMINEI16_V 0xf800707f +#define MATCH_VAMOMINEI32_V 0x8000602f +#define MASK_VAMOMINEI32_V 0xf800707f +#define MATCH_VAMOMINEI64_V 0x8000702f +#define MASK_VAMOMINEI64_V 0xf800707f +#define MATCH_VAMOMINEI8_V 0x8000002f +#define MASK_VAMOMINEI8_V 0xf800707f +#define MATCH_VAMOMINUEI16_V 0xc000502f +#define MASK_VAMOMINUEI16_V 0xf800707f +#define MATCH_VAMOMINUEI32_V 0xc000602f +#define MASK_VAMOMINUEI32_V 0xf800707f +#define MATCH_VAMOMINUEI64_V 0xc000702f +#define MASK_VAMOMINUEI64_V 0xf800707f +#define MATCH_VAMOMINUEI8_V 0xc000002f +#define MASK_VAMOMINUEI8_V 0xf800707f +#define MATCH_VAMOOREI16_V 0x4000502f +#define MASK_VAMOOREI16_V 0xf800707f +#define MATCH_VAMOOREI32_V 0x4000602f +#define MASK_VAMOOREI32_V 0xf800707f +#define MATCH_VAMOOREI64_V 0x4000702f +#define MASK_VAMOOREI64_V 0xf800707f +#define MATCH_VAMOOREI8_V 0x4000002f +#define MASK_VAMOOREI8_V 0xf800707f +#define MATCH_VAMOSWAPEI16_V 0x800502f +#define MASK_VAMOSWAPEI16_V 0xf800707f +#define MATCH_VAMOSWAPEI32_V 0x800602f +#define MASK_VAMOSWAPEI32_V 0xf800707f +#define MATCH_VAMOSWAPEI64_V 0x800702f +#define MASK_VAMOSWAPEI64_V 0xf800707f +#define MATCH_VAMOSWAPEI8_V 0x800002f +#define MASK_VAMOSWAPEI8_V 0xf800707f +#define MATCH_VAMOXOREI16_V 0x2000502f +#define MASK_VAMOXOREI16_V 0xf800707f +#define MATCH_VAMOXOREI32_V 0x2000602f +#define MASK_VAMOXOREI32_V 0xf800707f +#define MATCH_VAMOXOREI64_V 0x2000702f +#define MASK_VAMOXOREI64_V 0xf800707f +#define MATCH_VAMOXOREI8_V 0x2000002f +#define MASK_VAMOXOREI8_V 0xf800707f +#define MATCH_VAND_VI 0x24003057 +#define MASK_VAND_VI 0xfc00707f +#define MATCH_VAND_VV 0x24000057 +#define MASK_VAND_VV 0xfc00707f +#define MATCH_VAND_VX 0x24004057 +#define MASK_VAND_VX 0xfc00707f +#define MATCH_VASUB_VV 0x2c002057 +#define MASK_VASUB_VV 0xfc00707f +#define MATCH_VASUB_VX 0x2c006057 +#define MASK_VASUB_VX 0xfc00707f +#define MATCH_VASUBU_VV 0x28002057 +#define MASK_VASUBU_VV 0xfc00707f +#define MATCH_VASUBU_VX 0x28006057 +#define MASK_VASUBU_VX 0xfc00707f +#define MATCH_VCOMPRESS_VM 0x5e002057 +#define MASK_VCOMPRESS_VM 0xfe00707f +#define MATCH_VCPOP_M 0x40082057 +#define MASK_VCPOP_M 0xfc0ff07f +#define MATCH_VDIV_VV 0x84002057 +#define MASK_VDIV_VV 0xfc00707f +#define MATCH_VDIV_VX 0x84006057 +#define MASK_VDIV_VX 0xfc00707f +#define MATCH_VDIVU_VV 0x80002057 +#define MASK_VDIVU_VV 0xfc00707f +#define MATCH_VDIVU_VX 0x80006057 +#define MASK_VDIVU_VX 0xfc00707f +#define MATCH_VFADD_VF 0x5057 +#define MASK_VFADD_VF 0xfc00707f +#define MATCH_VFADD_VV 0x1057 +#define MASK_VFADD_VV 0xfc00707f +#define MATCH_VFCLASS_V 0x4c081057 +#define MASK_VFCLASS_V 0xfc0ff07f +#define MATCH_VFCVT_F_X_V 0x48019057 +#define MASK_VFCVT_F_X_V 0xfc0ff07f +#define MATCH_VFCVT_F_XU_V 0x48011057 +#define MASK_VFCVT_F_XU_V 0xfc0ff07f +#define MATCH_VFCVT_RTZ_X_F_V 0x48039057 +#define MASK_VFCVT_RTZ_X_F_V 0xfc0ff07f +#define MATCH_VFCVT_RTZ_XU_F_V 0x48031057 +#define MASK_VFCVT_RTZ_XU_F_V 0xfc0ff07f +#define MATCH_VFCVT_X_F_V 0x48009057 +#define MASK_VFCVT_X_F_V 0xfc0ff07f +#define MATCH_VFCVT_XU_F_V 0x48001057 +#define MASK_VFCVT_XU_F_V 0xfc0ff07f +#define MATCH_VFDIV_VF 0x80005057 +#define MASK_VFDIV_VF 0xfc00707f +#define MATCH_VFDIV_VV 0x80001057 +#define MASK_VFDIV_VV 0xfc00707f +#define MATCH_VFIRST_M 0x4008a057 +#define MASK_VFIRST_M 0xfc0ff07f +#define MATCH_VFMACC_VF 0xb0005057 +#define MASK_VFMACC_VF 0xfc00707f +#define MATCH_VFMACC_VV 0xb0001057 +#define MASK_VFMACC_VV 0xfc00707f +#define MATCH_VFMADD_VF 0xa0005057 +#define MASK_VFMADD_VF 0xfc00707f +#define MATCH_VFMADD_VV 0xa0001057 +#define MASK_VFMADD_VV 0xfc00707f +#define MATCH_VFMAX_VF 0x18005057 +#define MASK_VFMAX_VF 0xfc00707f +#define MATCH_VFMAX_VV 0x18001057 +#define MASK_VFMAX_VV 0xfc00707f +#define MATCH_VFMERGE_VFM 0x5c005057 +#define MASK_VFMERGE_VFM 0xfe00707f +#define MATCH_VFMIN_VF 0x10005057 +#define MASK_VFMIN_VF 0xfc00707f +#define MATCH_VFMIN_VV 0x10001057 +#define MASK_VFMIN_VV 0xfc00707f +#define MATCH_VFMSAC_VF 0xb8005057 +#define MASK_VFMSAC_VF 0xfc00707f +#define MATCH_VFMSAC_VV 0xb8001057 +#define MASK_VFMSAC_VV 0xfc00707f +#define MATCH_VFMSUB_VF 0xa8005057 +#define MASK_VFMSUB_VF 0xfc00707f +#define MATCH_VFMSUB_VV 0xa8001057 +#define MASK_VFMSUB_VV 0xfc00707f +#define MATCH_VFMUL_VF 0x90005057 +#define MASK_VFMUL_VF 0xfc00707f +#define MATCH_VFMUL_VV 0x90001057 +#define MASK_VFMUL_VV 0xfc00707f +#define MATCH_VFMV_F_S 0x42001057 +#define MASK_VFMV_F_S 0xfe0ff07f +#define MATCH_VFMV_S_F 0x42005057 +#define MASK_VFMV_S_F 0xfff0707f +#define MATCH_VFMV_V_F 0x5e005057 +#define MASK_VFMV_V_F 0xfff0707f +#define MATCH_VFNCVT_F_F_W 0x480a1057 +#define MASK_VFNCVT_F_F_W 0xfc0ff07f +#define MATCH_VFNCVT_F_X_W 0x48099057 +#define MASK_VFNCVT_F_X_W 0xfc0ff07f +#define MATCH_VFNCVT_F_XU_W 0x48091057 +#define MASK_VFNCVT_F_XU_W 0xfc0ff07f +#define MATCH_VFNCVT_ROD_F_F_W 0x480a9057 +#define MASK_VFNCVT_ROD_F_F_W 0xfc0ff07f +#define MATCH_VFNCVT_RTZ_X_F_W 0x480b9057 +#define MASK_VFNCVT_RTZ_X_F_W 0xfc0ff07f +#define MATCH_VFNCVT_RTZ_XU_F_W 0x480b1057 +#define MASK_VFNCVT_RTZ_XU_F_W 0xfc0ff07f +#define MATCH_VFNCVT_X_F_W 0x48089057 +#define MASK_VFNCVT_X_F_W 0xfc0ff07f +#define MATCH_VFNCVT_XU_F_W 0x48081057 +#define MASK_VFNCVT_XU_F_W 0xfc0ff07f +#define MATCH_VFNMACC_VF 0xb4005057 +#define MASK_VFNMACC_VF 0xfc00707f +#define MATCH_VFNMACC_VV 0xb4001057 +#define MASK_VFNMACC_VV 0xfc00707f +#define MATCH_VFNMADD_VF 0xa4005057 +#define MASK_VFNMADD_VF 0xfc00707f +#define MATCH_VFNMADD_VV 0xa4001057 +#define MASK_VFNMADD_VV 0xfc00707f +#define MATCH_VFNMSAC_VF 0xbc005057 +#define MASK_VFNMSAC_VF 0xfc00707f +#define MATCH_VFNMSAC_VV 0xbc001057 +#define MASK_VFNMSAC_VV 0xfc00707f +#define MATCH_VFNMSUB_VF 0xac005057 +#define MASK_VFNMSUB_VF 0xfc00707f +#define MATCH_VFNMSUB_VV 0xac001057 +#define MASK_VFNMSUB_VV 0xfc00707f +#define MATCH_VFRDIV_VF 0x84005057 +#define MASK_VFRDIV_VF 0xfc00707f +#define MATCH_VFREC7_V 0x4c029057 +#define MASK_VFREC7_V 0xfc0ff07f +#define MATCH_VFREDMAX_VS 0x1c001057 +#define MASK_VFREDMAX_VS 0xfc00707f +#define MATCH_VFREDMIN_VS 0x14001057 +#define MASK_VFREDMIN_VS 0xfc00707f +#define MATCH_VFREDOSUM_VS 0xc001057 +#define MASK_VFREDOSUM_VS 0xfc00707f +#define MATCH_VFREDUSUM_VS 0x4001057 +#define MASK_VFREDUSUM_VS 0xfc00707f +#define MATCH_VFRSQRT7_V 0x4c021057 +#define MASK_VFRSQRT7_V 0xfc0ff07f +#define MATCH_VFRSUB_VF 0x9c005057 +#define MASK_VFRSUB_VF 0xfc00707f +#define MATCH_VFSGNJ_VF 0x20005057 +#define MASK_VFSGNJ_VF 0xfc00707f +#define MATCH_VFSGNJ_VV 0x20001057 +#define MASK_VFSGNJ_VV 0xfc00707f +#define MATCH_VFSGNJN_VF 0x24005057 +#define MASK_VFSGNJN_VF 0xfc00707f +#define MATCH_VFSGNJN_VV 0x24001057 +#define MASK_VFSGNJN_VV 0xfc00707f +#define MATCH_VFSGNJX_VF 0x28005057 +#define MASK_VFSGNJX_VF 0xfc00707f +#define MATCH_VFSGNJX_VV 0x28001057 +#define MASK_VFSGNJX_VV 0xfc00707f +#define MATCH_VFSLIDE1DOWN_VF 0x3c005057 +#define MASK_VFSLIDE1DOWN_VF 0xfc00707f +#define MATCH_VFSLIDE1UP_VF 0x38005057 +#define MASK_VFSLIDE1UP_VF 0xfc00707f +#define MATCH_VFSQRT_V 0x4c001057 +#define MASK_VFSQRT_V 0xfc0ff07f +#define MATCH_VFSUB_VF 0x8005057 +#define MASK_VFSUB_VF 0xfc00707f +#define MATCH_VFSUB_VV 0x8001057 +#define MASK_VFSUB_VV 0xfc00707f +#define MATCH_VFWADD_VF 0xc0005057 +#define MASK_VFWADD_VF 0xfc00707f +#define MATCH_VFWADD_VV 0xc0001057 +#define MASK_VFWADD_VV 0xfc00707f +#define MATCH_VFWADD_WF 0xd0005057 +#define MASK_VFWADD_WF 0xfc00707f +#define MATCH_VFWADD_WV 0xd0001057 +#define MASK_VFWADD_WV 0xfc00707f +#define MATCH_VFWCVT_F_F_V 0x48061057 +#define MASK_VFWCVT_F_F_V 0xfc0ff07f +#define MATCH_VFWCVT_F_X_V 0x48059057 +#define MASK_VFWCVT_F_X_V 0xfc0ff07f +#define MATCH_VFWCVT_F_XU_V 0x48051057 +#define MASK_VFWCVT_F_XU_V 0xfc0ff07f +#define MATCH_VFWCVT_RTZ_X_F_V 0x48079057 +#define MASK_VFWCVT_RTZ_X_F_V 0xfc0ff07f +#define MATCH_VFWCVT_RTZ_XU_F_V 0x48071057 +#define MASK_VFWCVT_RTZ_XU_F_V 0xfc0ff07f +#define MATCH_VFWCVT_X_F_V 0x48049057 +#define MASK_VFWCVT_X_F_V 0xfc0ff07f +#define MATCH_VFWCVT_XU_F_V 0x48041057 +#define MASK_VFWCVT_XU_F_V 0xfc0ff07f +#define MATCH_VFWMACC_VF 0xf0005057 +#define MASK_VFWMACC_VF 0xfc00707f +#define MATCH_VFWMACC_VV 0xf0001057 +#define MASK_VFWMACC_VV 0xfc00707f +#define MATCH_VFWMSAC_VF 0xf8005057 +#define MASK_VFWMSAC_VF 0xfc00707f +#define MATCH_VFWMSAC_VV 0xf8001057 +#define MASK_VFWMSAC_VV 0xfc00707f +#define MATCH_VFWMUL_VF 0xe0005057 +#define MASK_VFWMUL_VF 0xfc00707f +#define MATCH_VFWMUL_VV 0xe0001057 +#define MASK_VFWMUL_VV 0xfc00707f +#define MATCH_VFWNMACC_VF 0xf4005057 +#define MASK_VFWNMACC_VF 0xfc00707f +#define MATCH_VFWNMACC_VV 0xf4001057 +#define MASK_VFWNMACC_VV 0xfc00707f +#define MATCH_VFWNMSAC_VF 0xfc005057 +#define MASK_VFWNMSAC_VF 0xfc00707f +#define MATCH_VFWNMSAC_VV 0xfc001057 +#define MASK_VFWNMSAC_VV 0xfc00707f +#define MATCH_VFWREDOSUM_VS 0xcc001057 +#define MASK_VFWREDOSUM_VS 0xfc00707f +#define MATCH_VFWREDUSUM_VS 0xc4001057 +#define MASK_VFWREDUSUM_VS 0xfc00707f +#define MATCH_VFWSUB_VF 0xc8005057 +#define MASK_VFWSUB_VF 0xfc00707f +#define MATCH_VFWSUB_VV 0xc8001057 +#define MASK_VFWSUB_VV 0xfc00707f +#define MATCH_VFWSUB_WF 0xd8005057 +#define MASK_VFWSUB_WF 0xfc00707f +#define MATCH_VFWSUB_WV 0xd8001057 +#define MASK_VFWSUB_WV 0xfc00707f +#define MATCH_VID_V 0x5008a057 +#define MASK_VID_V 0xfdfff07f +#define MATCH_VIOTA_M 0x50082057 +#define MASK_VIOTA_M 0xfc0ff07f +#define MATCH_VL1RE16_V 0x2805007 +#define MASK_VL1RE16_V 0xfff0707f +#define MATCH_VL1RE32_V 0x2806007 +#define MASK_VL1RE32_V 0xfff0707f +#define MATCH_VL1RE64_V 0x2807007 +#define MASK_VL1RE64_V 0xfff0707f +#define MATCH_VL1RE8_V 0x2800007 +#define MASK_VL1RE8_V 0xfff0707f +#define MATCH_VL2RE16_V 0x22805007 +#define MASK_VL2RE16_V 0xfff0707f +#define MATCH_VL2RE32_V 0x22806007 +#define MASK_VL2RE32_V 0xfff0707f +#define MATCH_VL2RE64_V 0x22807007 +#define MASK_VL2RE64_V 0xfff0707f +#define MATCH_VL2RE8_V 0x22800007 +#define MASK_VL2RE8_V 0xfff0707f +#define MATCH_VL4RE16_V 0x62805007 +#define MASK_VL4RE16_V 0xfff0707f +#define MATCH_VL4RE32_V 0x62806007 +#define MASK_VL4RE32_V 0xfff0707f +#define MATCH_VL4RE64_V 0x62807007 +#define MASK_VL4RE64_V 0xfff0707f +#define MATCH_VL4RE8_V 0x62800007 +#define MASK_VL4RE8_V 0xfff0707f +#define MATCH_VL8RE16_V 0xe2805007 +#define MASK_VL8RE16_V 0xfff0707f +#define MATCH_VL8RE32_V 0xe2806007 +#define MASK_VL8RE32_V 0xfff0707f +#define MATCH_VL8RE64_V 0xe2807007 +#define MASK_VL8RE64_V 0xfff0707f +#define MATCH_VL8RE8_V 0xe2800007 +#define MASK_VL8RE8_V 0xfff0707f +#define MATCH_VLE1024_V 0x10007007 +#define MASK_VLE1024_V 0x1df0707f +#define MATCH_VLE1024FF_V 0x11007007 +#define MASK_VLE1024FF_V 0x1df0707f +#define MATCH_VLE128_V 0x10000007 +#define MASK_VLE128_V 0x1df0707f +#define MATCH_VLE128FF_V 0x11000007 +#define MASK_VLE128FF_V 0x1df0707f +#define MATCH_VLE16_V 0x5007 +#define MASK_VLE16_V 0x1df0707f +#define MATCH_VLE16FF_V 0x1005007 +#define MASK_VLE16FF_V 0x1df0707f +#define MATCH_VLE256_V 0x10005007 +#define MASK_VLE256_V 0x1df0707f +#define MATCH_VLE256FF_V 0x11005007 +#define MASK_VLE256FF_V 0x1df0707f +#define MATCH_VLE32_V 0x6007 +#define MASK_VLE32_V 0x1df0707f +#define MATCH_VLE32FF_V 0x1006007 +#define MASK_VLE32FF_V 0x1df0707f +#define MATCH_VLE512_V 0x10006007 +#define MASK_VLE512_V 0x1df0707f +#define MATCH_VLE512FF_V 0x11006007 +#define MASK_VLE512FF_V 0x1df0707f +#define MATCH_VLE64_V 0x7007 +#define MASK_VLE64_V 0x1df0707f +#define MATCH_VLE64FF_V 0x1007007 +#define MASK_VLE64FF_V 0x1df0707f +#define MATCH_VLE8_V 0x7 +#define MASK_VLE8_V 0x1df0707f +#define MATCH_VLE8FF_V 0x1000007 +#define MASK_VLE8FF_V 0x1df0707f +#define MATCH_VLM_V 0x2b00007 +#define MASK_VLM_V 0xfff0707f +#define MATCH_VLOXEI1024_V 0x1c007007 +#define MASK_VLOXEI1024_V 0x1c00707f +#define MATCH_VLOXEI128_V 0x1c000007 +#define MASK_VLOXEI128_V 0x1c00707f +#define MATCH_VLOXEI16_V 0xc005007 +#define MASK_VLOXEI16_V 0x1c00707f +#define MATCH_VLOXEI256_V 0x1c005007 +#define MASK_VLOXEI256_V 0x1c00707f +#define MATCH_VLOXEI32_V 0xc006007 +#define MASK_VLOXEI32_V 0x1c00707f +#define MATCH_VLOXEI512_V 0x1c006007 +#define MASK_VLOXEI512_V 0x1c00707f +#define MATCH_VLOXEI64_V 0xc007007 +#define MASK_VLOXEI64_V 0x1c00707f +#define MATCH_VLOXEI8_V 0xc000007 +#define MASK_VLOXEI8_V 0x1c00707f +#define MATCH_VLSE1024_V 0x18007007 +#define MASK_VLSE1024_V 0x1c00707f +#define MATCH_VLSE128_V 0x18000007 +#define MASK_VLSE128_V 0x1c00707f +#define MATCH_VLSE16_V 0x8005007 +#define MASK_VLSE16_V 0x1c00707f +#define MATCH_VLSE256_V 0x18005007 +#define MASK_VLSE256_V 0x1c00707f +#define MATCH_VLSE32_V 0x8006007 +#define MASK_VLSE32_V 0x1c00707f +#define MATCH_VLSE512_V 0x18006007 +#define MASK_VLSE512_V 0x1c00707f +#define MATCH_VLSE64_V 0x8007007 +#define MASK_VLSE64_V 0x1c00707f +#define MATCH_VLSE8_V 0x8000007 +#define MASK_VLSE8_V 0x1c00707f +#define MATCH_VLUXEI1024_V 0x14007007 +#define MASK_VLUXEI1024_V 0x1c00707f +#define MATCH_VLUXEI128_V 0x14000007 +#define MASK_VLUXEI128_V 0x1c00707f +#define MATCH_VLUXEI16_V 0x4005007 +#define MASK_VLUXEI16_V 0x1c00707f +#define MATCH_VLUXEI256_V 0x14005007 +#define MASK_VLUXEI256_V 0x1c00707f +#define MATCH_VLUXEI32_V 0x4006007 +#define MASK_VLUXEI32_V 0x1c00707f +#define MATCH_VLUXEI512_V 0x14006007 +#define MASK_VLUXEI512_V 0x1c00707f +#define MATCH_VLUXEI64_V 0x4007007 +#define MASK_VLUXEI64_V 0x1c00707f +#define MATCH_VLUXEI8_V 0x4000007 +#define MASK_VLUXEI8_V 0x1c00707f +#define MATCH_VMACC_VV 0xb4002057 +#define MASK_VMACC_VV 0xfc00707f +#define MATCH_VMACC_VX 0xb4006057 +#define MASK_VMACC_VX 0xfc00707f +#define MATCH_VMADC_VI 0x46003057 +#define MASK_VMADC_VI 0xfe00707f +#define MATCH_VMADC_VIM 0x44003057 +#define MASK_VMADC_VIM 0xfe00707f +#define MATCH_VMADC_VV 0x46000057 +#define MASK_VMADC_VV 0xfe00707f +#define MATCH_VMADC_VVM 0x44000057 +#define MASK_VMADC_VVM 0xfe00707f +#define MATCH_VMADC_VX 0x46004057 +#define MASK_VMADC_VX 0xfe00707f +#define MATCH_VMADC_VXM 0x44004057 +#define MASK_VMADC_VXM 0xfe00707f +#define MATCH_VMADD_VV 0xa4002057 +#define MASK_VMADD_VV 0xfc00707f +#define MATCH_VMADD_VX 0xa4006057 +#define MASK_VMADD_VX 0xfc00707f +#define MATCH_VMAND_MM 0x64002057 +#define MASK_VMAND_MM 0xfc00707f +#define MATCH_VMANDN_MM 0x60002057 +#define MASK_VMANDN_MM 0xfc00707f +#define MATCH_VMAX_VV 0x1c000057 +#define MASK_VMAX_VV 0xfc00707f +#define MATCH_VMAX_VX 0x1c004057 +#define MASK_VMAX_VX 0xfc00707f +#define MATCH_VMAXU_VV 0x18000057 +#define MASK_VMAXU_VV 0xfc00707f +#define MATCH_VMAXU_VX 0x18004057 +#define MASK_VMAXU_VX 0xfc00707f +#define MATCH_VMERGE_VIM 0x5c003057 +#define MASK_VMERGE_VIM 0xfe00707f +#define MATCH_VMERGE_VVM 0x5c000057 +#define MASK_VMERGE_VVM 0xfe00707f +#define MATCH_VMERGE_VXM 0x5c004057 +#define MASK_VMERGE_VXM 0xfe00707f +#define MATCH_VMFEQ_VF 0x60005057 +#define MASK_VMFEQ_VF 0xfc00707f +#define MATCH_VMFEQ_VV 0x60001057 +#define MASK_VMFEQ_VV 0xfc00707f +#define MATCH_VMFGE_VF 0x7c005057 +#define MASK_VMFGE_VF 0xfc00707f +#define MATCH_VMFGT_VF 0x74005057 +#define MASK_VMFGT_VF 0xfc00707f +#define MATCH_VMFLE_VF 0x64005057 +#define MASK_VMFLE_VF 0xfc00707f +#define MATCH_VMFLE_VV 0x64001057 +#define MASK_VMFLE_VV 0xfc00707f +#define MATCH_VMFLT_VF 0x6c005057 +#define MASK_VMFLT_VF 0xfc00707f +#define MATCH_VMFLT_VV 0x6c001057 +#define MASK_VMFLT_VV 0xfc00707f +#define MATCH_VMFNE_VF 0x70005057 +#define MASK_VMFNE_VF 0xfc00707f +#define MATCH_VMFNE_VV 0x70001057 +#define MASK_VMFNE_VV 0xfc00707f +#define MATCH_VMIN_VV 0x14000057 +#define MASK_VMIN_VV 0xfc00707f +#define MATCH_VMIN_VX 0x14004057 +#define MASK_VMIN_VX 0xfc00707f +#define MATCH_VMINU_VV 0x10000057 +#define MASK_VMINU_VV 0xfc00707f +#define MATCH_VMINU_VX 0x10004057 +#define MASK_VMINU_VX 0xfc00707f +#define MATCH_VMNAND_MM 0x74002057 +#define MASK_VMNAND_MM 0xfc00707f +#define MATCH_VMNOR_MM 0x78002057 +#define MASK_VMNOR_MM 0xfc00707f +#define MATCH_VMOR_MM 0x68002057 +#define MASK_VMOR_MM 0xfc00707f +#define MATCH_VMORN_MM 0x70002057 +#define MASK_VMORN_MM 0xfc00707f +#define MATCH_VMSBC_VV 0x4e000057 +#define MASK_VMSBC_VV 0xfe00707f +#define MATCH_VMSBC_VVM 0x4c000057 +#define MASK_VMSBC_VVM 0xfe00707f +#define MATCH_VMSBC_VX 0x4e004057 +#define MASK_VMSBC_VX 0xfe00707f +#define MATCH_VMSBC_VXM 0x4c004057 +#define MASK_VMSBC_VXM 0xfe00707f +#define MATCH_VMSBF_M 0x5000a057 +#define MASK_VMSBF_M 0xfc0ff07f +#define MATCH_VMSEQ_VI 0x60003057 +#define MASK_VMSEQ_VI 0xfc00707f +#define MATCH_VMSEQ_VV 0x60000057 +#define MASK_VMSEQ_VV 0xfc00707f +#define MATCH_VMSEQ_VX 0x60004057 +#define MASK_VMSEQ_VX 0xfc00707f +#define MATCH_VMSGT_VI 0x7c003057 +#define MASK_VMSGT_VI 0xfc00707f +#define MATCH_VMSGT_VX 0x7c004057 +#define MASK_VMSGT_VX 0xfc00707f +#define MATCH_VMSGTU_VI 0x78003057 +#define MASK_VMSGTU_VI 0xfc00707f +#define MATCH_VMSGTU_VX 0x78004057 +#define MASK_VMSGTU_VX 0xfc00707f +#define MATCH_VMSIF_M 0x5001a057 +#define MASK_VMSIF_M 0xfc0ff07f +#define MATCH_VMSLE_VI 0x74003057 +#define MASK_VMSLE_VI 0xfc00707f +#define MATCH_VMSLE_VV 0x74000057 +#define MASK_VMSLE_VV 0xfc00707f +#define MATCH_VMSLE_VX 0x74004057 +#define MASK_VMSLE_VX 0xfc00707f +#define MATCH_VMSLEU_VI 0x70003057 +#define MASK_VMSLEU_VI 0xfc00707f +#define MATCH_VMSLEU_VV 0x70000057 +#define MASK_VMSLEU_VV 0xfc00707f +#define MATCH_VMSLEU_VX 0x70004057 +#define MASK_VMSLEU_VX 0xfc00707f +#define MATCH_VMSLT_VV 0x6c000057 +#define MASK_VMSLT_VV 0xfc00707f +#define MATCH_VMSLT_VX 0x6c004057 +#define MASK_VMSLT_VX 0xfc00707f +#define MATCH_VMSLTU_VV 0x68000057 +#define MASK_VMSLTU_VV 0xfc00707f +#define MATCH_VMSLTU_VX 0x68004057 +#define MASK_VMSLTU_VX 0xfc00707f +#define MATCH_VMSNE_VI 0x64003057 +#define MASK_VMSNE_VI 0xfc00707f +#define MATCH_VMSNE_VV 0x64000057 +#define MASK_VMSNE_VV 0xfc00707f +#define MATCH_VMSNE_VX 0x64004057 +#define MASK_VMSNE_VX 0xfc00707f +#define MATCH_VMSOF_M 0x50012057 +#define MASK_VMSOF_M 0xfc0ff07f +#define MATCH_VMUL_VV 0x94002057 +#define MASK_VMUL_VV 0xfc00707f +#define MATCH_VMUL_VX 0x94006057 +#define MASK_VMUL_VX 0xfc00707f +#define MATCH_VMULH_VV 0x9c002057 +#define MASK_VMULH_VV 0xfc00707f +#define MATCH_VMULH_VX 0x9c006057 +#define MASK_VMULH_VX 0xfc00707f +#define MATCH_VMULHSU_VV 0x98002057 +#define MASK_VMULHSU_VV 0xfc00707f +#define MATCH_VMULHSU_VX 0x98006057 +#define MASK_VMULHSU_VX 0xfc00707f +#define MATCH_VMULHU_VV 0x90002057 +#define MASK_VMULHU_VV 0xfc00707f +#define MATCH_VMULHU_VX 0x90006057 +#define MASK_VMULHU_VX 0xfc00707f +#define MATCH_VMV1R_V 0x9e003057 +#define MASK_VMV1R_V 0xfe0ff07f +#define MATCH_VMV2R_V 0x9e00b057 +#define MASK_VMV2R_V 0xfe0ff07f +#define MATCH_VMV4R_V 0x9e01b057 +#define MASK_VMV4R_V 0xfe0ff07f +#define MATCH_VMV8R_V 0x9e03b057 +#define MASK_VMV8R_V 0xfe0ff07f +#define MATCH_VMV_S_X 0x42006057 +#define MASK_VMV_S_X 0xfff0707f +#define MATCH_VMV_V_I 0x5e003057 +#define MASK_VMV_V_I 0xfff0707f +#define MATCH_VMV_V_V 0x5e000057 +#define MASK_VMV_V_V 0xfff0707f +#define MATCH_VMV_V_X 0x5e004057 +#define MASK_VMV_V_X 0xfff0707f +#define MATCH_VMV_X_S 0x42002057 +#define MASK_VMV_X_S 0xfe0ff07f +#define MATCH_VMXNOR_MM 0x7c002057 +#define MASK_VMXNOR_MM 0xfc00707f +#define MATCH_VMXOR_MM 0x6c002057 +#define MASK_VMXOR_MM 0xfc00707f +#define MATCH_VNCLIP_WI 0xbc003057 +#define MASK_VNCLIP_WI 0xfc00707f +#define MATCH_VNCLIP_WV 0xbc000057 +#define MASK_VNCLIP_WV 0xfc00707f +#define MATCH_VNCLIP_WX 0xbc004057 +#define MASK_VNCLIP_WX 0xfc00707f +#define MATCH_VNCLIPU_WI 0xb8003057 +#define MASK_VNCLIPU_WI 0xfc00707f +#define MATCH_VNCLIPU_WV 0xb8000057 +#define MASK_VNCLIPU_WV 0xfc00707f +#define MATCH_VNCLIPU_WX 0xb8004057 +#define MASK_VNCLIPU_WX 0xfc00707f +#define MATCH_VNMSAC_VV 0xbc002057 +#define MASK_VNMSAC_VV 0xfc00707f +#define MATCH_VNMSAC_VX 0xbc006057 +#define MASK_VNMSAC_VX 0xfc00707f +#define MATCH_VNMSUB_VV 0xac002057 +#define MASK_VNMSUB_VV 0xfc00707f +#define MATCH_VNMSUB_VX 0xac006057 +#define MASK_VNMSUB_VX 0xfc00707f +#define MATCH_VNSRA_WI 0xb4003057 +#define MASK_VNSRA_WI 0xfc00707f +#define MATCH_VNSRA_WV 0xb4000057 +#define MASK_VNSRA_WV 0xfc00707f +#define MATCH_VNSRA_WX 0xb4004057 +#define MASK_VNSRA_WX 0xfc00707f +#define MATCH_VNSRL_WI 0xb0003057 +#define MASK_VNSRL_WI 0xfc00707f +#define MATCH_VNSRL_WV 0xb0000057 +#define MASK_VNSRL_WV 0xfc00707f +#define MATCH_VNSRL_WX 0xb0004057 +#define MASK_VNSRL_WX 0xfc00707f +#define MATCH_VOR_VI 0x28003057 +#define MASK_VOR_VI 0xfc00707f +#define MATCH_VOR_VV 0x28000057 +#define MASK_VOR_VV 0xfc00707f +#define MATCH_VOR_VX 0x28004057 +#define MASK_VOR_VX 0xfc00707f +#define MATCH_VREDAND_VS 0x4002057 +#define MASK_VREDAND_VS 0xfc00707f +#define MATCH_VREDMAX_VS 0x1c002057 +#define MASK_VREDMAX_VS 0xfc00707f +#define MATCH_VREDMAXU_VS 0x18002057 +#define MASK_VREDMAXU_VS 0xfc00707f +#define MATCH_VREDMIN_VS 0x14002057 +#define MASK_VREDMIN_VS 0xfc00707f +#define MATCH_VREDMINU_VS 0x10002057 +#define MASK_VREDMINU_VS 0xfc00707f +#define MATCH_VREDOR_VS 0x8002057 +#define MASK_VREDOR_VS 0xfc00707f +#define MATCH_VREDSUM_VS 0x2057 +#define MASK_VREDSUM_VS 0xfc00707f +#define MATCH_VREDXOR_VS 0xc002057 +#define MASK_VREDXOR_VS 0xfc00707f +#define MATCH_VREM_VV 0x8c002057 +#define MASK_VREM_VV 0xfc00707f +#define MATCH_VREM_VX 0x8c006057 +#define MASK_VREM_VX 0xfc00707f +#define MATCH_VREMU_VV 0x88002057 +#define MASK_VREMU_VV 0xfc00707f +#define MATCH_VREMU_VX 0x88006057 +#define MASK_VREMU_VX 0xfc00707f +#define MATCH_VRGATHER_VI 0x30003057 +#define MASK_VRGATHER_VI 0xfc00707f +#define MATCH_VRGATHER_VV 0x30000057 +#define MASK_VRGATHER_VV 0xfc00707f +#define MATCH_VRGATHER_VX 0x30004057 +#define MASK_VRGATHER_VX 0xfc00707f +#define MATCH_VRGATHEREI16_VV 0x38000057 +#define MASK_VRGATHEREI16_VV 0xfc00707f +#define MATCH_VRSUB_VI 0xc003057 +#define MASK_VRSUB_VI 0xfc00707f +#define MATCH_VRSUB_VX 0xc004057 +#define MASK_VRSUB_VX 0xfc00707f +#define MATCH_VS1R_V 0x2800027 +#define MASK_VS1R_V 0xfff0707f +#define MATCH_VS2R_V 0x22800027 +#define MASK_VS2R_V 0xfff0707f +#define MATCH_VS4R_V 0x62800027 +#define MASK_VS4R_V 0xfff0707f +#define MATCH_VS8R_V 0xe2800027 +#define MASK_VS8R_V 0xfff0707f +#define MATCH_VSADD_VI 0x84003057 +#define MASK_VSADD_VI 0xfc00707f +#define MATCH_VSADD_VV 0x84000057 +#define MASK_VSADD_VV 0xfc00707f +#define MATCH_VSADD_VX 0x84004057 +#define MASK_VSADD_VX 0xfc00707f +#define MATCH_VSADDU_VI 0x80003057 +#define MASK_VSADDU_VI 0xfc00707f +#define MATCH_VSADDU_VV 0x80000057 +#define MASK_VSADDU_VV 0xfc00707f +#define MATCH_VSADDU_VX 0x80004057 +#define MASK_VSADDU_VX 0xfc00707f +#define MATCH_VSBC_VVM 0x48000057 +#define MASK_VSBC_VVM 0xfe00707f +#define MATCH_VSBC_VXM 0x48004057 +#define MASK_VSBC_VXM 0xfe00707f +#define MATCH_VSE1024_V 0x10007027 +#define MASK_VSE1024_V 0x1df0707f +#define MATCH_VSE128_V 0x10000027 +#define MASK_VSE128_V 0x1df0707f +#define MATCH_VSE16_V 0x5027 +#define MASK_VSE16_V 0x1df0707f +#define MATCH_VSE256_V 0x10005027 +#define MASK_VSE256_V 0x1df0707f +#define MATCH_VSE32_V 0x6027 +#define MASK_VSE32_V 0x1df0707f +#define MATCH_VSE512_V 0x10006027 +#define MASK_VSE512_V 0x1df0707f +#define MATCH_VSE64_V 0x7027 +#define MASK_VSE64_V 0x1df0707f +#define MATCH_VSE8_V 0x27 +#define MASK_VSE8_V 0x1df0707f +#define MATCH_VSETIVLI 0xc0007057 +#define MASK_VSETIVLI 0xc000707f +#define MATCH_VSETVL 0x80007057 +#define MASK_VSETVL 0xfe00707f +#define MATCH_VSETVLI 0x7057 +#define MASK_VSETVLI 0x8000707f +#define MATCH_VSEXT_VF2 0x4803a057 +#define MASK_VSEXT_VF2 0xfc0ff07f +#define MATCH_VSEXT_VF4 0x4802a057 +#define MASK_VSEXT_VF4 0xfc0ff07f +#define MATCH_VSEXT_VF8 0x4801a057 +#define MASK_VSEXT_VF8 0xfc0ff07f +#define MATCH_VSLIDE1DOWN_VX 0x3c006057 +#define MASK_VSLIDE1DOWN_VX 0xfc00707f +#define MATCH_VSLIDE1UP_VX 0x38006057 +#define MASK_VSLIDE1UP_VX 0xfc00707f +#define MATCH_VSLIDEDOWN_VI 0x3c003057 +#define MASK_VSLIDEDOWN_VI 0xfc00707f +#define MATCH_VSLIDEDOWN_VX 0x3c004057 +#define MASK_VSLIDEDOWN_VX 0xfc00707f +#define MATCH_VSLIDEUP_VI 0x38003057 +#define MASK_VSLIDEUP_VI 0xfc00707f +#define MATCH_VSLIDEUP_VX 0x38004057 +#define MASK_VSLIDEUP_VX 0xfc00707f +#define MATCH_VSLL_VI 0x94003057 +#define MASK_VSLL_VI 0xfc00707f +#define MATCH_VSLL_VV 0x94000057 +#define MASK_VSLL_VV 0xfc00707f +#define MATCH_VSLL_VX 0x94004057 +#define MASK_VSLL_VX 0xfc00707f +#define MATCH_VSM_V 0x2b00027 +#define MASK_VSM_V 0xfff0707f +#define MATCH_VSMUL_VV 0x9c000057 +#define MASK_VSMUL_VV 0xfc00707f +#define MATCH_VSMUL_VX 0x9c004057 +#define MASK_VSMUL_VX 0xfc00707f +#define MATCH_VSOXEI1024_V 0x1c007027 +#define MASK_VSOXEI1024_V 0x1c00707f +#define MATCH_VSOXEI128_V 0x1c000027 +#define MASK_VSOXEI128_V 0x1c00707f +#define MATCH_VSOXEI16_V 0xc005027 +#define MASK_VSOXEI16_V 0x1c00707f +#define MATCH_VSOXEI256_V 0x1c005027 +#define MASK_VSOXEI256_V 0x1c00707f +#define MATCH_VSOXEI32_V 0xc006027 +#define MASK_VSOXEI32_V 0x1c00707f +#define MATCH_VSOXEI512_V 0x1c006027 +#define MASK_VSOXEI512_V 0x1c00707f +#define MATCH_VSOXEI64_V 0xc007027 +#define MASK_VSOXEI64_V 0x1c00707f +#define MATCH_VSOXEI8_V 0xc000027 +#define MASK_VSOXEI8_V 0x1c00707f +#define MATCH_VSRA_VI 0xa4003057 +#define MASK_VSRA_VI 0xfc00707f +#define MATCH_VSRA_VV 0xa4000057 +#define MASK_VSRA_VV 0xfc00707f +#define MATCH_VSRA_VX 0xa4004057 +#define MASK_VSRA_VX 0xfc00707f +#define MATCH_VSRL_VI 0xa0003057 +#define MASK_VSRL_VI 0xfc00707f +#define MATCH_VSRL_VV 0xa0000057 +#define MASK_VSRL_VV 0xfc00707f +#define MATCH_VSRL_VX 0xa0004057 +#define MASK_VSRL_VX 0xfc00707f +#define MATCH_VSSE1024_V 0x18007027 +#define MASK_VSSE1024_V 0x1c00707f +#define MATCH_VSSE128_V 0x18000027 +#define MASK_VSSE128_V 0x1c00707f +#define MATCH_VSSE16_V 0x8005027 +#define MASK_VSSE16_V 0x1c00707f +#define MATCH_VSSE256_V 0x18005027 +#define MASK_VSSE256_V 0x1c00707f +#define MATCH_VSSE32_V 0x8006027 +#define MASK_VSSE32_V 0x1c00707f +#define MATCH_VSSE512_V 0x18006027 +#define MASK_VSSE512_V 0x1c00707f +#define MATCH_VSSE64_V 0x8007027 +#define MASK_VSSE64_V 0x1c00707f +#define MATCH_VSSE8_V 0x8000027 +#define MASK_VSSE8_V 0x1c00707f +#define MATCH_VSSRA_VI 0xac003057 +#define MASK_VSSRA_VI 0xfc00707f +#define MATCH_VSSRA_VV 0xac000057 +#define MASK_VSSRA_VV 0xfc00707f +#define MATCH_VSSRA_VX 0xac004057 +#define MASK_VSSRA_VX 0xfc00707f +#define MATCH_VSSRL_VI 0xa8003057 +#define MASK_VSSRL_VI 0xfc00707f +#define MATCH_VSSRL_VV 0xa8000057 +#define MASK_VSSRL_VV 0xfc00707f +#define MATCH_VSSRL_VX 0xa8004057 +#define MASK_VSSRL_VX 0xfc00707f +#define MATCH_VSSUB_VV 0x8c000057 +#define MASK_VSSUB_VV 0xfc00707f +#define MATCH_VSSUB_VX 0x8c004057 +#define MASK_VSSUB_VX 0xfc00707f +#define MATCH_VSSUBU_VV 0x88000057 +#define MASK_VSSUBU_VV 0xfc00707f +#define MATCH_VSSUBU_VX 0x88004057 +#define MASK_VSSUBU_VX 0xfc00707f +#define MATCH_VSUB_VV 0x8000057 +#define MASK_VSUB_VV 0xfc00707f +#define MATCH_VSUB_VX 0x8004057 +#define MASK_VSUB_VX 0xfc00707f +#define MATCH_VSUXEI1024_V 0x14007027 +#define MASK_VSUXEI1024_V 0x1c00707f +#define MATCH_VSUXEI128_V 0x14000027 +#define MASK_VSUXEI128_V 0x1c00707f +#define MATCH_VSUXEI16_V 0x4005027 +#define MASK_VSUXEI16_V 0x1c00707f +#define MATCH_VSUXEI256_V 0x14005027 +#define MASK_VSUXEI256_V 0x1c00707f +#define MATCH_VSUXEI32_V 0x4006027 +#define MASK_VSUXEI32_V 0x1c00707f +#define MATCH_VSUXEI512_V 0x14006027 +#define MASK_VSUXEI512_V 0x1c00707f +#define MATCH_VSUXEI64_V 0x4007027 +#define MASK_VSUXEI64_V 0x1c00707f +#define MATCH_VSUXEI8_V 0x4000027 +#define MASK_VSUXEI8_V 0x1c00707f +#define MATCH_VWADD_VV 0xc4002057 +#define MASK_VWADD_VV 0xfc00707f +#define MATCH_VWADD_VX 0xc4006057 +#define MASK_VWADD_VX 0xfc00707f +#define MATCH_VWADD_WV 0xd4002057 +#define MASK_VWADD_WV 0xfc00707f +#define MATCH_VWADD_WX 0xd4006057 +#define MASK_VWADD_WX 0xfc00707f +#define MATCH_VWADDU_VV 0xc0002057 +#define MASK_VWADDU_VV 0xfc00707f +#define MATCH_VWADDU_VX 0xc0006057 +#define MASK_VWADDU_VX 0xfc00707f +#define MATCH_VWADDU_WV 0xd0002057 +#define MASK_VWADDU_WV 0xfc00707f +#define MATCH_VWADDU_WX 0xd0006057 +#define MASK_VWADDU_WX 0xfc00707f +#define MATCH_VWMACC_VV 0xf4002057 +#define MASK_VWMACC_VV 0xfc00707f +#define MATCH_VWMACC_VX 0xf4006057 +#define MASK_VWMACC_VX 0xfc00707f +#define MATCH_VWMACCSU_VV 0xfc002057 +#define MASK_VWMACCSU_VV 0xfc00707f +#define MATCH_VWMACCSU_VX 0xfc006057 +#define MASK_VWMACCSU_VX 0xfc00707f +#define MATCH_VWMACCU_VV 0xf0002057 +#define MASK_VWMACCU_VV 0xfc00707f +#define MATCH_VWMACCU_VX 0xf0006057 +#define MASK_VWMACCU_VX 0xfc00707f +#define MATCH_VWMACCUS_VX 0xf8006057 +#define MASK_VWMACCUS_VX 0xfc00707f +#define MATCH_VWMUL_VV 0xec002057 +#define MASK_VWMUL_VV 0xfc00707f +#define MATCH_VWMUL_VX 0xec006057 +#define MASK_VWMUL_VX 0xfc00707f +#define MATCH_VWMULSU_VV 0xe8002057 +#define MASK_VWMULSU_VV 0xfc00707f +#define MATCH_VWMULSU_VX 0xe8006057 +#define MASK_VWMULSU_VX 0xfc00707f +#define MATCH_VWMULU_VV 0xe0002057 +#define MASK_VWMULU_VV 0xfc00707f +#define MATCH_VWMULU_VX 0xe0006057 +#define MASK_VWMULU_VX 0xfc00707f +#define MATCH_VWREDSUM_VS 0xc4000057 +#define MASK_VWREDSUM_VS 0xfc00707f +#define MATCH_VWREDSUMU_VS 0xc0000057 +#define MASK_VWREDSUMU_VS 0xfc00707f +#define MATCH_VWSUB_VV 0xcc002057 +#define MASK_VWSUB_VV 0xfc00707f +#define MATCH_VWSUB_VX 0xcc006057 +#define MASK_VWSUB_VX 0xfc00707f +#define MATCH_VWSUB_WV 0xdc002057 +#define MASK_VWSUB_WV 0xfc00707f +#define MATCH_VWSUB_WX 0xdc006057 +#define MASK_VWSUB_WX 0xfc00707f +#define MATCH_VWSUBU_VV 0xc8002057 +#define MASK_VWSUBU_VV 0xfc00707f +#define MATCH_VWSUBU_VX 0xc8006057 +#define MASK_VWSUBU_VX 0xfc00707f +#define MATCH_VWSUBU_WV 0xd8002057 +#define MASK_VWSUBU_WV 0xfc00707f +#define MATCH_VWSUBU_WX 0xd8006057 +#define MASK_VWSUBU_WX 0xfc00707f +#define MATCH_VXOR_VI 0x2c003057 +#define MASK_VXOR_VI 0xfc00707f +#define MATCH_VXOR_VV 0x2c000057 +#define MASK_VXOR_VV 0xfc00707f +#define MATCH_VXOR_VX 0x2c004057 +#define MASK_VXOR_VX 0xfc00707f +#define MATCH_VZEXT_VF2 0x48032057 +#define MASK_VZEXT_VF2 0xfc0ff07f +#define MATCH_VZEXT_VF4 0x48022057 +#define MASK_VZEXT_VF4 0xfc0ff07f +#define MATCH_VZEXT_VF8 0x48012057 +#define MASK_VZEXT_VF8 0xfc0ff07f +#define MATCH_WEXT 0xce000077 +#define MASK_WEXT 0xfe00707f +#define MATCH_WEXTI 0xde000077 +#define MASK_WEXTI 0xfe00707f +#define MATCH_WFI 0x10500073 +#define MASK_WFI 0xffffffff +#define MATCH_WRS_NTO 0xd00073 +#define MASK_WRS_NTO 0xffffffff +#define MATCH_WRS_STO 0x1d00073 +#define MASK_WRS_STO 0xffffffff +#define MATCH_XNOR 0x40004033 +#define MASK_XNOR 0xfe00707f +#define MATCH_XOR 0x4033 +#define MASK_XOR 0xfe00707f +#define MATCH_XORI 0x4013 +#define MASK_XORI 0x707f +#define MATCH_XPERM16 0x28006033 +#define MASK_XPERM16 0xfe00707f +#define MATCH_XPERM32 0x28000033 +#define MASK_XPERM32 0xfe00707f +#define MATCH_XPERM4 0x28002033 +#define MASK_XPERM4 0xfe00707f +#define MATCH_XPERM8 0x28004033 +#define MASK_XPERM8 0xfe00707f +#define MATCH_ZUNPKD810 0xacc00077 +#define MASK_ZUNPKD810 0xfff0707f +#define MATCH_ZUNPKD820 0xacd00077 +#define MASK_ZUNPKD820 0xfff0707f +#define MATCH_ZUNPKD830 0xace00077 +#define MASK_ZUNPKD830 0xfff0707f +#define MATCH_ZUNPKD831 0xacf00077 +#define MASK_ZUNPKD831 0xfff0707f +#define MATCH_ZUNPKD832 0xad700077 +#define MASK_ZUNPKD832 0xfff0707f + +#define CSR_FFLAGS 0x1 +#define CSR_FRM 0x2 +#define CSR_FCSR 0x3 +#define CSR_VSTART 0x8 +#define CSR_VXSAT 0x9 +#define CSR_VXRM 0xa +#define CSR_VCSR 0xf +#define CSR_SEED 0x15 +#define CSR_CYCLE 0xc00 +#define CSR_TIME 0xc01 +#define CSR_INSTRET 0xc02 +#define CSR_HPMCOUNTER3 0xc03 +#define CSR_HPMCOUNTER4 0xc04 +#define CSR_HPMCOUNTER5 0xc05 +#define CSR_HPMCOUNTER6 0xc06 +#define CSR_HPMCOUNTER7 0xc07 +#define CSR_HPMCOUNTER8 0xc08 +#define CSR_HPMCOUNTER9 0xc09 +#define CSR_HPMCOUNTER10 0xc0a +#define CSR_HPMCOUNTER11 0xc0b +#define CSR_HPMCOUNTER12 0xc0c +#define CSR_HPMCOUNTER13 0xc0d +#define CSR_HPMCOUNTER14 0xc0e +#define CSR_HPMCOUNTER15 0xc0f +#define CSR_HPMCOUNTER16 0xc10 +#define CSR_HPMCOUNTER17 0xc11 +#define CSR_HPMCOUNTER18 0xc12 +#define CSR_HPMCOUNTER19 0xc13 +#define CSR_HPMCOUNTER20 0xc14 +#define CSR_HPMCOUNTER21 0xc15 +#define CSR_HPMCOUNTER22 0xc16 +#define CSR_HPMCOUNTER23 0xc17 +#define CSR_HPMCOUNTER24 0xc18 +#define CSR_HPMCOUNTER25 0xc19 +#define CSR_HPMCOUNTER26 0xc1a +#define CSR_HPMCOUNTER27 0xc1b +#define CSR_HPMCOUNTER28 0xc1c +#define CSR_HPMCOUNTER29 0xc1d +#define CSR_HPMCOUNTER30 0xc1e +#define CSR_HPMCOUNTER31 0xc1f +#define CSR_VL 0xc20 +#define CSR_VTYPE 0xc21 +#define CSR_VLENB 0xc22 +#define CSR_SSTATUS 0x100 +#define CSR_SEDELEG 0x102 +#define CSR_SIDELEG 0x103 +#define CSR_SIE 0x104 +#define CSR_STVEC 0x105 +#define CSR_SCOUNTEREN 0x106 +#define CSR_SENVCFG 0x10a +#define CSR_SSTATEEN0 0x10c +#define CSR_SSTATEEN1 0x10d +#define CSR_SSTATEEN2 0x10e +#define CSR_SSTATEEN3 0x10f +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SCAUSE 0x142 +#define CSR_STVAL 0x143 +#define CSR_SIP 0x144 +#define CSR_STIMECMP 0x14d +#define CSR_SATP 0x180 +#define CSR_SCONTEXT 0x5a8 +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSTIMECMP 0x24d +#define CSR_VSATP 0x280 +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HIE 0x604 +#define CSR_HTIMEDELTA 0x605 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HGEIE 0x607 +#define CSR_HENVCFG 0x60a +#define CSR_HSTATEEN0 0x60c +#define CSR_HSTATEEN1 0x60d +#define CSR_HSTATEEN2 0x60e +#define CSR_HSTATEEN3 0x60f +#define CSR_HTVAL 0x643 +#define CSR_HIP 0x644 +#define CSR_HVIP 0x645 +#define CSR_HTINST 0x64a +#define CSR_HGATP 0x680 +#define CSR_HCONTEXT 0x6a8 +#define CSR_HGEIP 0xe12 +#define CSR_SCOUNTOVF 0xda0 +#define CSR_UTVT 0x7 +#define CSR_UNXTI 0x45 +#define CSR_UINTSTATUS 0x46 +#define CSR_USCRATCHCSW 0x48 +#define CSR_USCRATCHCSWL 0x49 +#define CSR_STVT 0x107 +#define CSR_SNXTI 0x145 +#define CSR_SINTSTATUS 0x146 +#define CSR_SSCRATCHCSW 0x148 +#define CSR_SSCRATCHCSWL 0x149 +#define CSR_MTVT 0x307 +#define CSR_MNXTI 0x345 +#define CSR_MINTSTATUS 0x346 +#define CSR_MSCRATCHCSW 0x348 +#define CSR_MSCRATCHCSWL 0x349 +#define CSR_MSTATUS 0x300 +#define CSR_MISA 0x301 +#define CSR_MEDELEG 0x302 +#define CSR_MIDELEG 0x303 +#define CSR_MIE 0x304 +#define CSR_MTVEC 0x305 +#define CSR_MCOUNTEREN 0x306 +#define CSR_MENVCFG 0x30a +#define CSR_MSTATEEN0 0x30c +#define CSR_MSTATEEN1 0x30d +#define CSR_MSTATEEN2 0x30e +#define CSR_MSTATEEN3 0x30f +#define CSR_MCOUNTINHIBIT 0x320 +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0x342 +#define CSR_MTVAL 0x343 +#define CSR_MIP 0x344 +#define CSR_MTINST 0x34a +#define CSR_MTVAL2 0x34b +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPCFG1 0x3a1 +#define CSR_PMPCFG2 0x3a2 +#define CSR_PMPCFG3 0x3a3 +#define CSR_PMPCFG4 0x3a4 +#define CSR_PMPCFG5 0x3a5 +#define CSR_PMPCFG6 0x3a6 +#define CSR_PMPCFG7 0x3a7 +#define CSR_PMPCFG8 0x3a8 +#define CSR_PMPCFG9 0x3a9 +#define CSR_PMPCFG10 0x3aa +#define CSR_PMPCFG11 0x3ab +#define CSR_PMPCFG12 0x3ac +#define CSR_PMPCFG13 0x3ad +#define CSR_PMPCFG14 0x3ae +#define CSR_PMPCFG15 0x3af +#define CSR_PMPADDR0 0x3b0 +#define CSR_PMPADDR1 0x3b1 +#define CSR_PMPADDR2 0x3b2 +#define CSR_PMPADDR3 0x3b3 +#define CSR_PMPADDR4 0x3b4 +#define CSR_PMPADDR5 0x3b5 +#define CSR_PMPADDR6 0x3b6 +#define CSR_PMPADDR7 0x3b7 +#define CSR_PMPADDR8 0x3b8 +#define CSR_PMPADDR9 0x3b9 +#define CSR_PMPADDR10 0x3ba +#define CSR_PMPADDR11 0x3bb +#define CSR_PMPADDR12 0x3bc +#define CSR_PMPADDR13 0x3bd +#define CSR_PMPADDR14 0x3be +#define CSR_PMPADDR15 0x3bf +#define CSR_PMPADDR16 0x3c0 +#define CSR_PMPADDR17 0x3c1 +#define CSR_PMPADDR18 0x3c2 +#define CSR_PMPADDR19 0x3c3 +#define CSR_PMPADDR20 0x3c4 +#define CSR_PMPADDR21 0x3c5 +#define CSR_PMPADDR22 0x3c6 +#define CSR_PMPADDR23 0x3c7 +#define CSR_PMPADDR24 0x3c8 +#define CSR_PMPADDR25 0x3c9 +#define CSR_PMPADDR26 0x3ca +#define CSR_PMPADDR27 0x3cb +#define CSR_PMPADDR28 0x3cc +#define CSR_PMPADDR29 0x3cd +#define CSR_PMPADDR30 0x3ce +#define CSR_PMPADDR31 0x3cf +#define CSR_PMPADDR32 0x3d0 +#define CSR_PMPADDR33 0x3d1 +#define CSR_PMPADDR34 0x3d2 +#define CSR_PMPADDR35 0x3d3 +#define CSR_PMPADDR36 0x3d4 +#define CSR_PMPADDR37 0x3d5 +#define CSR_PMPADDR38 0x3d6 +#define CSR_PMPADDR39 0x3d7 +#define CSR_PMPADDR40 0x3d8 +#define CSR_PMPADDR41 0x3d9 +#define CSR_PMPADDR42 0x3da +#define CSR_PMPADDR43 0x3db +#define CSR_PMPADDR44 0x3dc +#define CSR_PMPADDR45 0x3dd +#define CSR_PMPADDR46 0x3de +#define CSR_PMPADDR47 0x3df +#define CSR_PMPADDR48 0x3e0 +#define CSR_PMPADDR49 0x3e1 +#define CSR_PMPADDR50 0x3e2 +#define CSR_PMPADDR51 0x3e3 +#define CSR_PMPADDR52 0x3e4 +#define CSR_PMPADDR53 0x3e5 +#define CSR_PMPADDR54 0x3e6 +#define CSR_PMPADDR55 0x3e7 +#define CSR_PMPADDR56 0x3e8 +#define CSR_PMPADDR57 0x3e9 +#define CSR_PMPADDR58 0x3ea +#define CSR_PMPADDR59 0x3eb +#define CSR_PMPADDR60 0x3ec +#define CSR_PMPADDR61 0x3ed +#define CSR_PMPADDR62 0x3ee +#define CSR_PMPADDR63 0x3ef +#define CSR_MSECCFG 0x747 +#define CSR_TSELECT 0x7a0 +#define CSR_TDATA1 0x7a1 +#define CSR_TDATA2 0x7a2 +#define CSR_TDATA3 0x7a3 +#define CSR_TINFO 0x7a4 +#define CSR_TCONTROL 0x7a5 +#define CSR_MCONTEXT 0x7a8 +#define CSR_MSCONTEXT 0x7aa +#define CSR_DCSR 0x7b0 +#define CSR_DPC 0x7b1 +#define CSR_DSCRATCH0 0x7b2 +#define CSR_DSCRATCH1 0x7b3 +#define CSR_MCYCLE 0xb00 +#define CSR_MINSTRET 0xb02 +#define CSR_MHPMCOUNTER3 0xb03 +#define CSR_MHPMCOUNTER4 0xb04 +#define CSR_MHPMCOUNTER5 0xb05 +#define CSR_MHPMCOUNTER6 0xb06 +#define CSR_MHPMCOUNTER7 0xb07 +#define CSR_MHPMCOUNTER8 0xb08 +#define CSR_MHPMCOUNTER9 0xb09 +#define CSR_MHPMCOUNTER10 0xb0a +#define CSR_MHPMCOUNTER11 0xb0b +#define CSR_MHPMCOUNTER12 0xb0c +#define CSR_MHPMCOUNTER13 0xb0d +#define CSR_MHPMCOUNTER14 0xb0e +#define CSR_MHPMCOUNTER15 0xb0f +#define CSR_MHPMCOUNTER16 0xb10 +#define CSR_MHPMCOUNTER17 0xb11 +#define CSR_MHPMCOUNTER18 0xb12 +#define CSR_MHPMCOUNTER19 0xb13 +#define CSR_MHPMCOUNTER20 0xb14 +#define CSR_MHPMCOUNTER21 0xb15 +#define CSR_MHPMCOUNTER22 0xb16 +#define CSR_MHPMCOUNTER23 0xb17 +#define CSR_MHPMCOUNTER24 0xb18 +#define CSR_MHPMCOUNTER25 0xb19 +#define CSR_MHPMCOUNTER26 0xb1a +#define CSR_MHPMCOUNTER27 0xb1b +#define CSR_MHPMCOUNTER28 0xb1c +#define CSR_MHPMCOUNTER29 0xb1d +#define CSR_MHPMCOUNTER30 0xb1e +#define CSR_MHPMCOUNTER31 0xb1f +#define CSR_MHPMEVENT3 0x323 +#define CSR_MHPMEVENT4 0x324 +#define CSR_MHPMEVENT5 0x325 +#define CSR_MHPMEVENT6 0x326 +#define CSR_MHPMEVENT7 0x327 +#define CSR_MHPMEVENT8 0x328 +#define CSR_MHPMEVENT9 0x329 +#define CSR_MHPMEVENT10 0x32a +#define CSR_MHPMEVENT11 0x32b +#define CSR_MHPMEVENT12 0x32c +#define CSR_MHPMEVENT13 0x32d +#define CSR_MHPMEVENT14 0x32e +#define CSR_MHPMEVENT15 0x32f +#define CSR_MHPMEVENT16 0x330 +#define CSR_MHPMEVENT17 0x331 +#define CSR_MHPMEVENT18 0x332 +#define CSR_MHPMEVENT19 0x333 +#define CSR_MHPMEVENT20 0x334 +#define CSR_MHPMEVENT21 0x335 +#define CSR_MHPMEVENT22 0x336 +#define CSR_MHPMEVENT23 0x337 +#define CSR_MHPMEVENT24 0x338 +#define CSR_MHPMEVENT25 0x339 +#define CSR_MHPMEVENT26 0x33a +#define CSR_MHPMEVENT27 0x33b +#define CSR_MHPMEVENT28 0x33c +#define CSR_MHPMEVENT29 0x33d +#define CSR_MHPMEVENT30 0x33e +#define CSR_MHPMEVENT31 0x33f +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 +#define CSR_MHARTID 0xf14 +#define CSR_MCONFIGPTR 0xf15 +#define CSR_STIMECMPH 0x15d +#define CSR_VSTIMECMPH 0x25d +#define CSR_HTIMEDELTAH 0x615 +#define CSR_HENVCFGH 0x61a +#define CSR_HSTATEEN0H 0x61c +#define CSR_HSTATEEN1H 0x61d +#define CSR_HSTATEEN2H 0x61e +#define CSR_HSTATEEN3H 0x61f +#define CSR_CYCLEH 0xc80 +#define CSR_TIMEH 0xc81 +#define CSR_INSTRETH 0xc82 +#define CSR_HPMCOUNTER3H 0xc83 +#define CSR_HPMCOUNTER4H 0xc84 +#define CSR_HPMCOUNTER5H 0xc85 +#define CSR_HPMCOUNTER6H 0xc86 +#define CSR_HPMCOUNTER7H 0xc87 +#define CSR_HPMCOUNTER8H 0xc88 +#define CSR_HPMCOUNTER9H 0xc89 +#define CSR_HPMCOUNTER10H 0xc8a +#define CSR_HPMCOUNTER11H 0xc8b +#define CSR_HPMCOUNTER12H 0xc8c +#define CSR_HPMCOUNTER13H 0xc8d +#define CSR_HPMCOUNTER14H 0xc8e +#define CSR_HPMCOUNTER15H 0xc8f +#define CSR_HPMCOUNTER16H 0xc90 +#define CSR_HPMCOUNTER17H 0xc91 +#define CSR_HPMCOUNTER18H 0xc92 +#define CSR_HPMCOUNTER19H 0xc93 +#define CSR_HPMCOUNTER20H 0xc94 +#define CSR_HPMCOUNTER21H 0xc95 +#define CSR_HPMCOUNTER22H 0xc96 +#define CSR_HPMCOUNTER23H 0xc97 +#define CSR_HPMCOUNTER24H 0xc98 +#define CSR_HPMCOUNTER25H 0xc99 +#define CSR_HPMCOUNTER26H 0xc9a +#define CSR_HPMCOUNTER27H 0xc9b +#define CSR_HPMCOUNTER28H 0xc9c +#define CSR_HPMCOUNTER29H 0xc9d +#define CSR_HPMCOUNTER30H 0xc9e +#define CSR_HPMCOUNTER31H 0xc9f +#define CSR_MSTATUSH 0x310 +#define CSR_MENVCFGH 0x31a +#define CSR_MSTATEEN0H 0x31c +#define CSR_MSTATEEN1H 0x31d +#define CSR_MSTATEEN2H 0x31e +#define CSR_MSTATEEN3H 0x31f +#define CSR_MHPMEVENT3H 0x723 +#define CSR_MHPMEVENT4H 0x724 +#define CSR_MHPMEVENT5H 0x725 +#define CSR_MHPMEVENT6H 0x726 +#define CSR_MHPMEVENT7H 0x727 +#define CSR_MHPMEVENT8H 0x728 +#define CSR_MHPMEVENT9H 0x729 +#define CSR_MHPMEVENT10H 0x72a +#define CSR_MHPMEVENT11H 0x72b +#define CSR_MHPMEVENT12H 0x72c +#define CSR_MHPMEVENT13H 0x72d +#define CSR_MHPMEVENT14H 0x72e +#define CSR_MHPMEVENT15H 0x72f +#define CSR_MHPMEVENT16H 0x730 +#define CSR_MHPMEVENT17H 0x731 +#define CSR_MHPMEVENT18H 0x732 +#define CSR_MHPMEVENT19H 0x733 +#define CSR_MHPMEVENT20H 0x734 +#define CSR_MHPMEVENT21H 0x735 +#define CSR_MHPMEVENT22H 0x736 +#define CSR_MHPMEVENT23H 0x737 +#define CSR_MHPMEVENT24H 0x738 +#define CSR_MHPMEVENT25H 0x739 +#define CSR_MHPMEVENT26H 0x73a +#define CSR_MHPMEVENT27H 0x73b +#define CSR_MHPMEVENT28H 0x73c +#define CSR_MHPMEVENT29H 0x73d +#define CSR_MHPMEVENT30H 0x73e +#define CSR_MHPMEVENT31H 0x73f +#define CSR_MSECCFGH 0x757 +#define CSR_MCYCLEH 0xb80 +#define CSR_MINSTRETH 0xb82 +#define CSR_MHPMCOUNTER3H 0xb83 +#define CSR_MHPMCOUNTER4H 0xb84 +#define CSR_MHPMCOUNTER5H 0xb85 +#define CSR_MHPMCOUNTER6H 0xb86 +#define CSR_MHPMCOUNTER7H 0xb87 +#define CSR_MHPMCOUNTER8H 0xb88 +#define CSR_MHPMCOUNTER9H 0xb89 +#define CSR_MHPMCOUNTER10H 0xb8a +#define CSR_MHPMCOUNTER11H 0xb8b +#define CSR_MHPMCOUNTER12H 0xb8c +#define CSR_MHPMCOUNTER13H 0xb8d +#define CSR_MHPMCOUNTER14H 0xb8e +#define CSR_MHPMCOUNTER15H 0xb8f +#define CSR_MHPMCOUNTER16H 0xb90 +#define CSR_MHPMCOUNTER17H 0xb91 +#define CSR_MHPMCOUNTER18H 0xb92 +#define CSR_MHPMCOUNTER19H 0xb93 +#define CSR_MHPMCOUNTER20H 0xb94 +#define CSR_MHPMCOUNTER21H 0xb95 +#define CSR_MHPMCOUNTER22H 0xb96 +#define CSR_MHPMCOUNTER23H 0xb97 +#define CSR_MHPMCOUNTER24H 0xb98 +#define CSR_MHPMCOUNTER25H 0xb99 +#define CSR_MHPMCOUNTER26H 0xb9a +#define CSR_MHPMCOUNTER27H 0xb9b +#define CSR_MHPMCOUNTER28H 0xb9c +#define CSR_MHPMCOUNTER29H 0xb9d +#define CSR_MHPMCOUNTER30H 0xb9e +#define CSR_MHPMCOUNTER31H 0xb9f + +#define CAUSE_MISALIGNED_FETCH 0x0 +#define CAUSE_FETCH_ACCESS 0x1 +#define CAUSE_ILLEGAL_INSTRUCTION 0x2 +#define CAUSE_BREAKPOINT 0x3 +#define CAUSE_MISALIGNED_LOAD 0x4 +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_MISALIGNED_STORE 0x6 +#define CAUSE_STORE_ACCESS 0x7 +#define CAUSE_USER_ECALL 0x8 +#define CAUSE_SUPERVISOR_ECALL 0x9 +#define CAUSE_VIRTUAL_SUPERVISOR_ECALL 0xa +#define CAUSE_MACHINE_ECALL 0xb +#define CAUSE_FETCH_PAGE_FAULT 0xc +#define CAUSE_LOAD_PAGE_FAULT 0xd +#define CAUSE_STORE_PAGE_FAULT 0xf +#define CAUSE_FETCH_GUEST_PAGE_FAULT 0x14 +#define CAUSE_LOAD_GUEST_PAGE_FAULT 0x15 +#define CAUSE_VIRTUAL_INSTRUCTION 0x16 +#define CAUSE_STORE_GUEST_PAGE_FAULT 0x17 + +#define INSN_FIELD_RD 0xf80 +#define INSN_FIELD_RT 0xf8000 +#define INSN_FIELD_RS1 0xf8000 +#define INSN_FIELD_RS2 0x1f00000 +#define INSN_FIELD_RS3 0xf8000000 +#define INSN_FIELD_AQRL 0x6000000 +#define INSN_FIELD_AQ 0x4000000 +#define INSN_FIELD_RL 0x2000000 +#define INSN_FIELD_FM 0xf0000000 +#define INSN_FIELD_PRED 0xf000000 +#define INSN_FIELD_SUCC 0xf00000 +#define INSN_FIELD_RM 0x7000 +#define INSN_FIELD_FUNCT3 0x7000 +#define INSN_FIELD_FUNCT2 0x6000000 +#define INSN_FIELD_IMM20 0xfffff000 +#define INSN_FIELD_JIMM20 0xfffff000 +#define INSN_FIELD_IMM12 0xfff00000 +#define INSN_FIELD_CSR 0xfff00000 +#define INSN_FIELD_IMM12HI 0xfe000000 +#define INSN_FIELD_BIMM12HI 0xfe000000 +#define INSN_FIELD_IMM12LO 0xf80 +#define INSN_FIELD_BIMM12LO 0xf80 +#define INSN_FIELD_ZIMM 0xf8000 +#define INSN_FIELD_SHAMT 0x7f00000 +#define INSN_FIELD_SHAMTW 0x1f00000 +#define INSN_FIELD_SHAMTW4 0xf00000 +#define INSN_FIELD_SHAMTD 0x3f00000 +#define INSN_FIELD_BS 0xc0000000 +#define INSN_FIELD_RNUM 0xf00000 +#define INSN_FIELD_RC 0x3e000000 +#define INSN_FIELD_IMM2 0x300000 +#define INSN_FIELD_IMM3 0x700000 +#define INSN_FIELD_IMM4 0xf00000 +#define INSN_FIELD_IMM5 0x1f00000 +#define INSN_FIELD_IMM6 0x3f00000 +#define INSN_FIELD_OPCODE 0x7f +#define INSN_FIELD_FUNCT7 0xfe000000 +#define INSN_FIELD_VD 0xf80 +#define INSN_FIELD_VS3 0xf80 +#define INSN_FIELD_VS1 0xf8000 +#define INSN_FIELD_VS2 0x1f00000 +#define INSN_FIELD_VM 0x2000000 +#define INSN_FIELD_WD 0x4000000 +#define INSN_FIELD_AMOOP 0xf8000000 +#define INSN_FIELD_NF 0xe0000000 +#define INSN_FIELD_SIMM5 0xf8000 +#define INSN_FIELD_ZIMM10 0x3ff00000 +#define INSN_FIELD_ZIMM11 0x7ff00000 +#define INSN_FIELD_C_NZUIMM10 0x1fe0 +#define INSN_FIELD_C_UIMM7LO 0x60 +#define INSN_FIELD_C_UIMM7HI 0x1c00 +#define INSN_FIELD_C_UIMM8LO 0x60 +#define INSN_FIELD_C_UIMM8HI 0x1c00 +#define INSN_FIELD_C_UIMM9LO 0x60 +#define INSN_FIELD_C_UIMM9HI 0x1c00 +#define INSN_FIELD_C_NZIMM6LO 0x7c +#define INSN_FIELD_C_NZIMM6HI 0x1000 +#define INSN_FIELD_C_IMM6LO 0x7c +#define INSN_FIELD_C_IMM6HI 0x1000 +#define INSN_FIELD_C_NZIMM10HI 0x1000 +#define INSN_FIELD_C_NZIMM10LO 0x7c +#define INSN_FIELD_C_NZIMM18HI 0x1000 +#define INSN_FIELD_C_NZIMM18LO 0x7c +#define INSN_FIELD_C_IMM12 0x1ffc +#define INSN_FIELD_C_BIMM9LO 0x7c +#define INSN_FIELD_C_BIMM9HI 0x1c00 +#define INSN_FIELD_C_NZUIMM5 0x7c +#define INSN_FIELD_C_NZUIMM6LO 0x7c +#define INSN_FIELD_C_NZUIMM6HI 0x1000 +#define INSN_FIELD_C_UIMM8SPLO 0x7c +#define INSN_FIELD_C_UIMM8SPHI 0x1000 +#define INSN_FIELD_C_UIMM8SP_S 0x1f80 +#define INSN_FIELD_C_UIMM10SPLO 0x7c +#define INSN_FIELD_C_UIMM10SPHI 0x1000 +#define INSN_FIELD_C_UIMM9SPLO 0x7c +#define INSN_FIELD_C_UIMM9SPHI 0x1000 +#define INSN_FIELD_C_UIMM10SP_S 0x1f80 +#define INSN_FIELD_C_UIMM9SP_S 0x1f80 +#define INSN_FIELD_RS1_P 0x380 +#define INSN_FIELD_RS2_P 0x1c +#define INSN_FIELD_RD_P 0x1c +#define INSN_FIELD_RD_RS1_N0 0xf80 +#define INSN_FIELD_RD_RS1_P 0x380 +#define INSN_FIELD_RD_RS1 0xf80 +#define INSN_FIELD_RD_N2 0xf80 +#define INSN_FIELD_RD_N0 0xf80 +#define INSN_FIELD_RS1_N0 0xf80 +#define INSN_FIELD_C_RS2_N0 0x7c +#define INSN_FIELD_C_RS1_N0 0xf80 +#define INSN_FIELD_C_RS2 0x7c +#endif +#ifdef DECLARE_INSN +DECLARE_INSN(add, MATCH_ADD, MASK_ADD) DECLARE_INSN(add16, MATCH_ADD16, MASK_ADD16) +DECLARE_INSN(add32, MATCH_ADD32, MASK_ADD32) DECLARE_INSN(add64, MATCH_ADD64, MASK_ADD64) +DECLARE_INSN(add8, MATCH_ADD8, MASK_ADD8) +DECLARE_INSN(add_uw, MATCH_ADD_UW, MASK_ADD_UW) +DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI) +DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW) +DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW) +DECLARE_INSN(aes32dsi, MATCH_AES32DSI, MASK_AES32DSI) +DECLARE_INSN(aes32dsmi, MATCH_AES32DSMI, MASK_AES32DSMI) +DECLARE_INSN(aes32esi, MATCH_AES32ESI, MASK_AES32ESI) +DECLARE_INSN(aes32esmi, MATCH_AES32ESMI, MASK_AES32ESMI) +DECLARE_INSN(aes64ds, MATCH_AES64DS, MASK_AES64DS) +DECLARE_INSN(aes64dsm, MATCH_AES64DSM, MASK_AES64DSM) +DECLARE_INSN(aes64es, MATCH_AES64ES, MASK_AES64ES) +DECLARE_INSN(aes64esm, MATCH_AES64ESM, MASK_AES64ESM) +DECLARE_INSN(aes64im, MATCH_AES64IM, MASK_AES64IM) +DECLARE_INSN(aes64ks1i, MATCH_AES64KS1I, MASK_AES64KS1I) +DECLARE_INSN(aes64ks2, MATCH_AES64KS2, MASK_AES64KS2) +DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D) +DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W) +DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D) +DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W) +DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D) +DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W) +DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D) +DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W) +DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D) +DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W) +DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D) +DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W) +DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D) +DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W) +DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D) +DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W) +DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D) +DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W) +DECLARE_INSN(and, MATCH_AND, MASK_AND) +DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI) +DECLARE_INSN(andn, MATCH_ANDN, MASK_ANDN) +DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC) DECLARE_INSN(ave, MATCH_AVE, MASK_AVE) +DECLARE_INSN(bclr, MATCH_BCLR, MASK_BCLR) +DECLARE_INSN(bclri, MATCH_BCLRI, MASK_BCLRI) +DECLARE_INSN(bcompress, MATCH_BCOMPRESS, MASK_BCOMPRESS) +DECLARE_INSN(bcompressw, MATCH_BCOMPRESSW, MASK_BCOMPRESSW) +DECLARE_INSN(bdecompress, MATCH_BDECOMPRESS, MASK_BDECOMPRESS) +DECLARE_INSN(bdecompressw, MATCH_BDECOMPRESSW, MASK_BDECOMPRESSW) +DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) +DECLARE_INSN(bext, MATCH_BEXT, MASK_BEXT) +DECLARE_INSN(bexti, MATCH_BEXTI, MASK_BEXTI) +DECLARE_INSN(bfp, MATCH_BFP, MASK_BFP) +DECLARE_INSN(bfpw, MATCH_BFPW, MASK_BFPW) +DECLARE_INSN(bge, MATCH_BGE, MASK_BGE) +DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU) +DECLARE_INSN(binv, MATCH_BINV, MASK_BINV) +DECLARE_INSN(binvi, MATCH_BINVI, MASK_BINVI) DECLARE_INSN(bitrev, MATCH_BITREV, MASK_BITREV) DECLARE_INSN(bitrevi, MATCH_BITREVI, MASK_BITREVI) +DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) +DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) +DECLARE_INSN(bmatflip, MATCH_BMATFLIP, MASK_BMATFLIP) +DECLARE_INSN(bmator, MATCH_BMATOR, MASK_BMATOR) +DECLARE_INSN(bmatxor, MATCH_BMATXOR, MASK_BMATXOR) +DECLARE_INSN(bne, MATCH_BNE, MASK_BNE) DECLARE_INSN(bpick, MATCH_BPICK, MASK_BPICK) -DECLARE_INSN(clrs8, MATCH_CLRS8, MASK_CLRS8) -DECLARE_INSN(clrs16, MATCH_CLRS16, MASK_CLRS16) -DECLARE_INSN(clrs32, MATCH_CLRS32, MASK_CLRS32) -DECLARE_INSN(clo8, MATCH_CLO8, MASK_CLO8) +DECLARE_INSN(bset, MATCH_BSET, MASK_BSET) +DECLARE_INSN(bseti, MATCH_BSETI, MASK_BSETI) +DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD) +DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI) +DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP) +DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN) +DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW) +DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW) +DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND) +DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI) +DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) +DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) +DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK) +DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD) +DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP) +DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW) +DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP) +DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD) +DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP) +DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW) +DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP) +DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J) +DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL) +DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) +DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) +DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD) +DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP) +DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI) +DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI) +DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW) +DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP) +DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV) +DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP) +DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR) +DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD) +DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP) +DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI) +DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI) +DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI) +DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB) +DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW) +DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW) +DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP) +DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR) +DECLARE_INSN(cbo_clean, MATCH_CBO_CLEAN, MASK_CBO_CLEAN) +DECLARE_INSN(cbo_flush, MATCH_CBO_FLUSH, MASK_CBO_FLUSH) +DECLARE_INSN(cbo_inval, MATCH_CBO_INVAL, MASK_CBO_INVAL) +DECLARE_INSN(cbo_zero, MATCH_CBO_ZERO, MASK_CBO_ZERO) +DECLARE_INSN(clmul, MATCH_CLMUL, MASK_CLMUL) +DECLARE_INSN(clmulh, MATCH_CLMULH, MASK_CLMULH) +DECLARE_INSN(clmulr, MATCH_CLMULR, MASK_CLMULR) DECLARE_INSN(clo16, MATCH_CLO16, MASK_CLO16) DECLARE_INSN(clo32, MATCH_CLO32, MASK_CLO32) -DECLARE_INSN(clz8, MATCH_CLZ8, MASK_CLZ8) +DECLARE_INSN(clo8, MATCH_CLO8, MASK_CLO8) +DECLARE_INSN(clrs16, MATCH_CLRS16, MASK_CLRS16) +DECLARE_INSN(clrs32, MATCH_CLRS32, MASK_CLRS32) +DECLARE_INSN(clrs8, MATCH_CLRS8, MASK_CLRS8) +DECLARE_INSN(clz, MATCH_CLZ, MASK_CLZ) DECLARE_INSN(clz16, MATCH_CLZ16, MASK_CLZ16) DECLARE_INSN(clz32, MATCH_CLZ32, MASK_CLZ32) -DECLARE_INSN(cmpeq8, MATCH_CMPEQ8, MASK_CMPEQ8) +DECLARE_INSN(clz8, MATCH_CLZ8, MASK_CLZ8) +DECLARE_INSN(clzw, MATCH_CLZW, MASK_CLZW) +DECLARE_INSN(cmix, MATCH_CMIX, MASK_CMIX) +DECLARE_INSN(cmov, MATCH_CMOV, MASK_CMOV) DECLARE_INSN(cmpeq16, MATCH_CMPEQ16, MASK_CMPEQ16) +DECLARE_INSN(cmpeq8, MATCH_CMPEQ8, MASK_CMPEQ8) +DECLARE_INSN(cpop, MATCH_CPOP, MASK_CPOP) +DECLARE_INSN(cpopw, MATCH_CPOPW, MASK_CPOPW) DECLARE_INSN(cras16, MATCH_CRAS16, MASK_CRAS16) +DECLARE_INSN(cras32, MATCH_CRAS32, MASK_CRAS32) +DECLARE_INSN(crc32_b, MATCH_CRC32_B, MASK_CRC32_B) +DECLARE_INSN(crc32_d, MATCH_CRC32_D, MASK_CRC32_D) +DECLARE_INSN(crc32_h, MATCH_CRC32_H, MASK_CRC32_H) +DECLARE_INSN(crc32_w, MATCH_CRC32_W, MASK_CRC32_W) +DECLARE_INSN(crc32c_b, MATCH_CRC32C_B, MASK_CRC32C_B) +DECLARE_INSN(crc32c_d, MATCH_CRC32C_D, MASK_CRC32C_D) +DECLARE_INSN(crc32c_h, MATCH_CRC32C_H, MASK_CRC32C_H) +DECLARE_INSN(crc32c_w, MATCH_CRC32C_W, MASK_CRC32C_W) DECLARE_INSN(crsa16, MATCH_CRSA16, MASK_CRSA16) +DECLARE_INSN(crsa32, MATCH_CRSA32, MASK_CRSA32) +DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC) +DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI) +DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS) +DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI) +DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW) +DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI) +DECLARE_INSN(ctz, MATCH_CTZ, MASK_CTZ) +DECLARE_INSN(ctzw, MATCH_CTZW, MASK_CTZW) +DECLARE_INSN(div, MATCH_DIV, MASK_DIV) +DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU) +DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW) +DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW) +DECLARE_INSN(dret, MATCH_DRET, MASK_DRET) +DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK) +DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL) +DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D) +DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H) +DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q) +DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S) +DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D) +DECLARE_INSN(fclass_h, MATCH_FCLASS_H, MASK_FCLASS_H) +DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q) +DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S) +DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H) +DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L) +DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU) +DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q) +DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S) +DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W) +DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU) +DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D) +DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L) +DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU) +DECLARE_INSN(fcvt_h_q, MATCH_FCVT_H_Q, MASK_FCVT_H_Q) +DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S) +DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W) +DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU) +DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D) +DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H) +DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q) +DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S) +DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D) +DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H) +DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q) +DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S) +DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D) +DECLARE_INSN(fcvt_q_h, MATCH_FCVT_Q_H, MASK_FCVT_Q_H) +DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L) +DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU) +DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S) +DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W) +DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU) +DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D) +DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H) +DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L) +DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU) +DECLARE_INSN(fcvt_s_q, MATCH_FCVT_S_Q, MASK_FCVT_S_Q) +DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W) +DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU) +DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D) +DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H) +DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q) +DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S) +DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D) +DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H) +DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q) +DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S) +DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D) +DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H) +DECLARE_INSN(fdiv_q, MATCH_FDIV_Q, MASK_FDIV_Q) +DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S) +DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE) +DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I) +DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D) +DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H) +DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q) +DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S) +DECLARE_INSN(fld, MATCH_FLD, MASK_FLD) +DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D) +DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H) +DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q) +DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S) +DECLARE_INSN(flh, MATCH_FLH, MASK_FLH) +DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ) +DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D) +DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H) +DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q) +DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S) +DECLARE_INSN(flw, MATCH_FLW, MASK_FLW) +DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D) +DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H) +DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q) +DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S) +DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D) +DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H) +DECLARE_INSN(fmax_q, MATCH_FMAX_Q, MASK_FMAX_Q) +DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S) +DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D) +DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H) +DECLARE_INSN(fmin_q, MATCH_FMIN_Q, MASK_FMIN_Q) +DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S) +DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D) +DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H) +DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q) +DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S) +DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D) +DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H) +DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q) +DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S) +DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X) +DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X) +DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X) +DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D) +DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H) +DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W) +DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D) +DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H) +DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q) +DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S) +DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D) +DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H) +DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q) +DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S) +DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD) +DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D) +DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H) +DECLARE_INSN(fsgnj_q, MATCH_FSGNJ_Q, MASK_FSGNJ_Q) +DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S) +DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D) +DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H) +DECLARE_INSN(fsgnjn_q, MATCH_FSGNJN_Q, MASK_FSGNJN_Q) +DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S) +DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D) +DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H) +DECLARE_INSN(fsgnjx_q, MATCH_FSGNJX_Q, MASK_FSGNJX_Q) +DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S) +DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH) +DECLARE_INSN(fsl, MATCH_FSL, MASK_FSL) +DECLARE_INSN(fslw, MATCH_FSLW, MASK_FSLW) +DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ) +DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D) +DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H) +DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q) +DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S) +DECLARE_INSN(fsr, MATCH_FSR, MASK_FSR) +DECLARE_INSN(fsri, MATCH_FSRI, MASK_FSRI) +DECLARE_INSN(fsriw, MATCH_FSRIW, MASK_FSRIW) +DECLARE_INSN(fsrw, MATCH_FSRW, MASK_FSRW) +DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D) +DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H) +DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q) +DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S) +DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW) +DECLARE_INSN(gorc, MATCH_GORC, MASK_GORC) +DECLARE_INSN(gorci, MATCH_GORCI, MASK_GORCI) +DECLARE_INSN(gorciw, MATCH_GORCIW, MASK_GORCIW) +DECLARE_INSN(gorcw, MATCH_GORCW, MASK_GORCW) +DECLARE_INSN(grev, MATCH_GREV, MASK_GREV) +DECLARE_INSN(grevi, MATCH_GREVI, MASK_GREVI) +DECLARE_INSN(greviw, MATCH_GREVIW, MASK_GREVIW) +DECLARE_INSN(grevw, MATCH_GREVW, MASK_GREVW) +DECLARE_INSN(hfence_gvma, MATCH_HFENCE_GVMA, MASK_HFENCE_GVMA) +DECLARE_INSN(hfence_vvma, MATCH_HFENCE_VVMA, MASK_HFENCE_VVMA) +DECLARE_INSN(hinval_gvma, MATCH_HINVAL_GVMA, MASK_HINVAL_GVMA) +DECLARE_INSN(hinval_vvma, MATCH_HINVAL_VVMA, MASK_HINVAL_VVMA) +DECLARE_INSN(hlv_b, MATCH_HLV_B, MASK_HLV_B) +DECLARE_INSN(hlv_bu, MATCH_HLV_BU, MASK_HLV_BU) +DECLARE_INSN(hlv_d, MATCH_HLV_D, MASK_HLV_D) +DECLARE_INSN(hlv_h, MATCH_HLV_H, MASK_HLV_H) +DECLARE_INSN(hlv_hu, MATCH_HLV_HU, MASK_HLV_HU) +DECLARE_INSN(hlv_w, MATCH_HLV_W, MASK_HLV_W) +DECLARE_INSN(hlv_wu, MATCH_HLV_WU, MASK_HLV_WU) +DECLARE_INSN(hlvx_hu, MATCH_HLVX_HU, MASK_HLVX_HU) +DECLARE_INSN(hlvx_wu, MATCH_HLVX_WU, MASK_HLVX_WU) +DECLARE_INSN(hsv_b, MATCH_HSV_B, MASK_HSV_B) +DECLARE_INSN(hsv_d, MATCH_HSV_D, MASK_HSV_D) +DECLARE_INSN(hsv_h, MATCH_HSV_H, MASK_HSV_H) +DECLARE_INSN(hsv_w, MATCH_HSV_W, MASK_HSV_W) DECLARE_INSN(insb, MATCH_INSB, MASK_INSB) -DECLARE_INSN(kabs8, MATCH_KABS8, MASK_KABS8) +DECLARE_INSN(jal, MATCH_JAL, MASK_JAL) +DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR) DECLARE_INSN(kabs16, MATCH_KABS16, MASK_KABS16) +DECLARE_INSN(kabs32, MATCH_KABS32, MASK_KABS32) +DECLARE_INSN(kabs8, MATCH_KABS8, MASK_KABS8) DECLARE_INSN(kabsw, MATCH_KABSW, MASK_KABSW) -DECLARE_INSN(kadd8, MATCH_KADD8, MASK_KADD8) DECLARE_INSN(kadd16, MATCH_KADD16, MASK_KADD16) +DECLARE_INSN(kadd32, MATCH_KADD32, MASK_KADD32) DECLARE_INSN(kadd64, MATCH_KADD64, MASK_KADD64) +DECLARE_INSN(kadd8, MATCH_KADD8, MASK_KADD8) DECLARE_INSN(kaddh, MATCH_KADDH, MASK_KADDH) DECLARE_INSN(kaddw, MATCH_KADDW, MASK_KADDW) DECLARE_INSN(kcras16, MATCH_KCRAS16, MASK_KCRAS16) +DECLARE_INSN(kcras32, MATCH_KCRAS32, MASK_KCRAS32) DECLARE_INSN(kcrsa16, MATCH_KCRSA16, MASK_KCRSA16) -DECLARE_INSN(kdmbb, MATCH_KDMBB, MASK_KDMBB) -DECLARE_INSN(kdmbt, MATCH_KDMBT, MASK_KDMBT) -DECLARE_INSN(kdmtt, MATCH_KDMTT, MASK_KDMTT) +DECLARE_INSN(kcrsa32, MATCH_KCRSA32, MASK_KCRSA32) DECLARE_INSN(kdmabb, MATCH_KDMABB, MASK_KDMABB) +DECLARE_INSN(kdmabb16, MATCH_KDMABB16, MASK_KDMABB16) DECLARE_INSN(kdmabt, MATCH_KDMABT, MASK_KDMABT) +DECLARE_INSN(kdmabt16, MATCH_KDMABT16, MASK_KDMABT16) DECLARE_INSN(kdmatt, MATCH_KDMATT, MASK_KDMATT) -DECLARE_INSN(khm8, MATCH_KHM8, MASK_KHM8) -DECLARE_INSN(khmx8, MATCH_KHMX8, MASK_KHMX8) +DECLARE_INSN(kdmatt16, MATCH_KDMATT16, MASK_KDMATT16) +DECLARE_INSN(kdmbb, MATCH_KDMBB, MASK_KDMBB) +DECLARE_INSN(kdmbb16, MATCH_KDMBB16, MASK_KDMBB16) +DECLARE_INSN(kdmbt, MATCH_KDMBT, MASK_KDMBT) +DECLARE_INSN(kdmbt16, MATCH_KDMBT16, MASK_KDMBT16) +DECLARE_INSN(kdmtt, MATCH_KDMTT, MASK_KDMTT) +DECLARE_INSN(kdmtt16, MATCH_KDMTT16, MASK_KDMTT16) DECLARE_INSN(khm16, MATCH_KHM16, MASK_KHM16) -DECLARE_INSN(khmx16, MATCH_KHMX16, MASK_KHMX16) +DECLARE_INSN(khm8, MATCH_KHM8, MASK_KHM8) DECLARE_INSN(khmbb, MATCH_KHMBB, MASK_KHMBB) +DECLARE_INSN(khmbb16, MATCH_KHMBB16, MASK_KHMBB16) DECLARE_INSN(khmbt, MATCH_KHMBT, MASK_KHMBT) +DECLARE_INSN(khmbt16, MATCH_KHMBT16, MASK_KHMBT16) DECLARE_INSN(khmtt, MATCH_KHMTT, MASK_KHMTT) +DECLARE_INSN(khmtt16, MATCH_KHMTT16, MASK_KHMTT16) +DECLARE_INSN(khmx16, MATCH_KHMX16, MASK_KHMX16) +DECLARE_INSN(khmx8, MATCH_KHMX8, MASK_KHMX8) DECLARE_INSN(kmabb, MATCH_KMABB, MASK_KMABB) +DECLARE_INSN(kmabb32, MATCH_KMABB32, MASK_KMABB32) DECLARE_INSN(kmabt, MATCH_KMABT, MASK_KMABT) -DECLARE_INSN(kmatt, MATCH_KMATT, MASK_KMATT) +DECLARE_INSN(kmabt32, MATCH_KMABT32, MASK_KMABT32) DECLARE_INSN(kmada, MATCH_KMADA, MASK_KMADA) -DECLARE_INSN(kmaxda, MATCH_KMAXDA, MASK_KMAXDA) -DECLARE_INSN(kmads, MATCH_KMADS, MASK_KMADS) DECLARE_INSN(kmadrs, MATCH_KMADRS, MASK_KMADRS) -DECLARE_INSN(kmaxds, MATCH_KMAXDS, MASK_KMAXDS) +DECLARE_INSN(kmadrs32, MATCH_KMADRS32, MASK_KMADRS32) +DECLARE_INSN(kmads, MATCH_KMADS, MASK_KMADS) +DECLARE_INSN(kmads32, MATCH_KMADS32, MASK_KMADS32) DECLARE_INSN(kmar64, MATCH_KMAR64, MASK_KMAR64) +DECLARE_INSN(kmatt, MATCH_KMATT, MASK_KMATT) +DECLARE_INSN(kmatt32, MATCH_KMATT32, MASK_KMATT32) +DECLARE_INSN(kmaxda, MATCH_KMAXDA, MASK_KMAXDA) +DECLARE_INSN(kmaxda32, MATCH_KMAXDA32, MASK_KMAXDA32) +DECLARE_INSN(kmaxds, MATCH_KMAXDS, MASK_KMAXDS) +DECLARE_INSN(kmaxds32, MATCH_KMAXDS32, MASK_KMAXDS32) DECLARE_INSN(kmda, MATCH_KMDA, MASK_KMDA) -DECLARE_INSN(kmxda, MATCH_KMXDA, MASK_KMXDA) +DECLARE_INSN(kmda32, MATCH_KMDA32, MASK_KMDA32) DECLARE_INSN(kmmac, MATCH_KMMAC, MASK_KMMAC) DECLARE_INSN(kmmac_u, MATCH_KMMAC_U, MASK_KMMAC_U) DECLARE_INSN(kmmawb, MATCH_KMMAWB, MASK_KMMAWB) -DECLARE_INSN(kmmawb_u, MATCH_KMMAWB_U, MASK_KMMAWB_U) DECLARE_INSN(kmmawb2, MATCH_KMMAWB2, MASK_KMMAWB2) DECLARE_INSN(kmmawb2_u, MATCH_KMMAWB2_U, MASK_KMMAWB2_U) +DECLARE_INSN(kmmawb_u, MATCH_KMMAWB_U, MASK_KMMAWB_U) DECLARE_INSN(kmmawt, MATCH_KMMAWT, MASK_KMMAWT) -DECLARE_INSN(kmmawt_u, MATCH_KMMAWT_U, MASK_KMMAWT_U) DECLARE_INSN(kmmawt2, MATCH_KMMAWT2, MASK_KMMAWT2) DECLARE_INSN(kmmawt2_u, MATCH_KMMAWT2_U, MASK_KMMAWT2_U) +DECLARE_INSN(kmmawt_u, MATCH_KMMAWT_U, MASK_KMMAWT_U) DECLARE_INSN(kmmsb, MATCH_KMMSB, MASK_KMMSB) DECLARE_INSN(kmmsb_u, MATCH_KMMSB_U, MASK_KMMSB_U) DECLARE_INSN(kmmwb2, MATCH_KMMWB2, MASK_KMMWB2) @@ -4180,86 +3655,205 @@ DECLARE_INSN(kmmwb2_u, MATCH_KMMWB2_U, MASK_KMMWB2_U) DECLARE_INSN(kmmwt2, MATCH_KMMWT2, MASK_KMMWT2) DECLARE_INSN(kmmwt2_u, MATCH_KMMWT2_U, MASK_KMMWT2_U) DECLARE_INSN(kmsda, MATCH_KMSDA, MASK_KMSDA) -DECLARE_INSN(kmsxda, MATCH_KMSXDA, MASK_KMSXDA) +DECLARE_INSN(kmsda32, MATCH_KMSDA32, MASK_KMSDA32) DECLARE_INSN(kmsr64, MATCH_KMSR64, MASK_KMSR64) -DECLARE_INSN(ksllw, MATCH_KSLLW, MASK_KSLLW) -DECLARE_INSN(kslliw, MATCH_KSLLIW, MASK_KSLLIW) -DECLARE_INSN(ksll8, MATCH_KSLL8, MASK_KSLL8) -DECLARE_INSN(kslli8, MATCH_KSLLI8, MASK_KSLLI8) +DECLARE_INSN(kmsxda, MATCH_KMSXDA, MASK_KMSXDA) +DECLARE_INSN(kmsxda32, MATCH_KMSXDA32, MASK_KMSXDA32) +DECLARE_INSN(kmxda, MATCH_KMXDA, MASK_KMXDA) +DECLARE_INSN(kmxda32, MATCH_KMXDA32, MASK_KMXDA32) DECLARE_INSN(ksll16, MATCH_KSLL16, MASK_KSLL16) +DECLARE_INSN(ksll32, MATCH_KSLL32, MASK_KSLL32) +DECLARE_INSN(ksll8, MATCH_KSLL8, MASK_KSLL8) DECLARE_INSN(kslli16, MATCH_KSLLI16, MASK_KSLLI16) -DECLARE_INSN(kslra8, MATCH_KSLRA8, MASK_KSLRA8) -DECLARE_INSN(kslra8_u, MATCH_KSLRA8_U, MASK_KSLRA8_U) +DECLARE_INSN(kslli32, MATCH_KSLLI32, MASK_KSLLI32) +DECLARE_INSN(kslli8, MATCH_KSLLI8, MASK_KSLLI8) +DECLARE_INSN(kslliw, MATCH_KSLLIW, MASK_KSLLIW) +DECLARE_INSN(ksllw, MATCH_KSLLW, MASK_KSLLW) DECLARE_INSN(kslra16, MATCH_KSLRA16, MASK_KSLRA16) DECLARE_INSN(kslra16_u, MATCH_KSLRA16_U, MASK_KSLRA16_U) +DECLARE_INSN(kslra32, MATCH_KSLRA32, MASK_KSLRA32) +DECLARE_INSN(kslra32_u, MATCH_KSLRA32_U, MASK_KSLRA32_U) +DECLARE_INSN(kslra8, MATCH_KSLRA8, MASK_KSLRA8) +DECLARE_INSN(kslra8_u, MATCH_KSLRA8_U, MASK_KSLRA8_U) DECLARE_INSN(kslraw, MATCH_KSLRAW, MASK_KSLRAW) DECLARE_INSN(kslraw_u, MATCH_KSLRAW_U, MASK_KSLRAW_U) DECLARE_INSN(kstas16, MATCH_KSTAS16, MASK_KSTAS16) +DECLARE_INSN(kstas32, MATCH_KSTAS32, MASK_KSTAS32) DECLARE_INSN(kstsa16, MATCH_KSTSA16, MASK_KSTSA16) -DECLARE_INSN(ksub8, MATCH_KSUB8, MASK_KSUB8) +DECLARE_INSN(kstsa32, MATCH_KSTSA32, MASK_KSTSA32) DECLARE_INSN(ksub16, MATCH_KSUB16, MASK_KSUB16) +DECLARE_INSN(ksub32, MATCH_KSUB32, MASK_KSUB32) DECLARE_INSN(ksub64, MATCH_KSUB64, MASK_KSUB64) +DECLARE_INSN(ksub8, MATCH_KSUB8, MASK_KSUB8) DECLARE_INSN(ksubh, MATCH_KSUBH, MASK_KSUBH) DECLARE_INSN(ksubw, MATCH_KSUBW, MASK_KSUBW) DECLARE_INSN(kwmmul, MATCH_KWMMUL, MASK_KWMMUL) DECLARE_INSN(kwmmul_u, MATCH_KWMMUL_U, MASK_KWMMUL_U) +DECLARE_INSN(lb, MATCH_LB, MASK_LB) +DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU) +DECLARE_INSN(ld, MATCH_LD, MASK_LD) +DECLARE_INSN(lh, MATCH_LH, MASK_LH) +DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU) +DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D) +DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W) +DECLARE_INSN(lui, MATCH_LUI, MASK_LUI) +DECLARE_INSN(lw, MATCH_LW, MASK_LW) +DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU) DECLARE_INSN(maddr32, MATCH_MADDR32, MASK_MADDR32) +DECLARE_INSN(max, MATCH_MAX, MASK_MAX) +DECLARE_INSN(maxu, MATCH_MAXU, MASK_MAXU) DECLARE_INSN(maxw, MATCH_MAXW, MASK_MAXW) +DECLARE_INSN(min, MATCH_MIN, MASK_MIN) +DECLARE_INSN(minu, MATCH_MINU, MASK_MINU) DECLARE_INSN(minw, MATCH_MINW, MASK_MINW) +DECLARE_INSN(mret, MATCH_MRET, MASK_MRET) DECLARE_INSN(msubr32, MATCH_MSUBR32, MASK_MSUBR32) +DECLARE_INSN(mul, MATCH_MUL, MASK_MUL) +DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH) +DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU) +DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU) DECLARE_INSN(mulr64, MATCH_MULR64, MASK_MULR64) DECLARE_INSN(mulsr64, MATCH_MULSR64, MASK_MULSR64) +DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW) +DECLARE_INSN(or, MATCH_OR, MASK_OR) +DECLARE_INSN(ori, MATCH_ORI, MASK_ORI) +DECLARE_INSN(orn, MATCH_ORN, MASK_ORN) +DECLARE_INSN(pack, MATCH_PACK, MASK_PACK) +DECLARE_INSN(packh, MATCH_PACKH, MASK_PACKH) +DECLARE_INSN(packu, MATCH_PACKU, MASK_PACKU) +DECLARE_INSN(packuw, MATCH_PACKUW, MASK_PACKUW) +DECLARE_INSN(packw, MATCH_PACKW, MASK_PACKW) +DECLARE_INSN(pause, MATCH_PAUSE, MASK_PAUSE) DECLARE_INSN(pbsad, MATCH_PBSAD, MASK_PBSAD) DECLARE_INSN(pbsada, MATCH_PBSADA, MASK_PBSADA) DECLARE_INSN(pkbb16, MATCH_PKBB16, MASK_PKBB16) +DECLARE_INSN(pkbb32, MATCH_PKBB32, MASK_PKBB32) DECLARE_INSN(pkbt16, MATCH_PKBT16, MASK_PKBT16) -DECLARE_INSN(pktt16, MATCH_PKTT16, MASK_PKTT16) +DECLARE_INSN(pkbt32, MATCH_PKBT32, MASK_PKBT32) DECLARE_INSN(pktb16, MATCH_PKTB16, MASK_PKTB16) -DECLARE_INSN(radd8, MATCH_RADD8, MASK_RADD8) +DECLARE_INSN(pktb32, MATCH_PKTB32, MASK_PKTB32) +DECLARE_INSN(pktt16, MATCH_PKTT16, MASK_PKTT16) +DECLARE_INSN(pktt32, MATCH_PKTT32, MASK_PKTT32) +DECLARE_INSN(prefetch_i, MATCH_PREFETCH_I, MASK_PREFETCH_I) +DECLARE_INSN(prefetch_r, MATCH_PREFETCH_R, MASK_PREFETCH_R) +DECLARE_INSN(prefetch_w, MATCH_PREFETCH_W, MASK_PREFETCH_W) DECLARE_INSN(radd16, MATCH_RADD16, MASK_RADD16) +DECLARE_INSN(radd32, MATCH_RADD32, MASK_RADD32) DECLARE_INSN(radd64, MATCH_RADD64, MASK_RADD64) +DECLARE_INSN(radd8, MATCH_RADD8, MASK_RADD8) DECLARE_INSN(raddw, MATCH_RADDW, MASK_RADDW) DECLARE_INSN(rcras16, MATCH_RCRAS16, MASK_RCRAS16) +DECLARE_INSN(rcras32, MATCH_RCRAS32, MASK_RCRAS32) DECLARE_INSN(rcrsa16, MATCH_RCRSA16, MASK_RCRSA16) +DECLARE_INSN(rcrsa32, MATCH_RCRSA32, MASK_RCRSA32) +DECLARE_INSN(rem, MATCH_REM, MASK_REM) +DECLARE_INSN(remu, MATCH_REMU, MASK_REMU) +DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW) +DECLARE_INSN(remw, MATCH_REMW, MASK_REMW) +DECLARE_INSN(rol, MATCH_ROL, MASK_ROL) +DECLARE_INSN(rolw, MATCH_ROLW, MASK_ROLW) +DECLARE_INSN(ror, MATCH_ROR, MASK_ROR) +DECLARE_INSN(rori, MATCH_RORI, MASK_RORI) +DECLARE_INSN(roriw, MATCH_RORIW, MASK_RORIW) +DECLARE_INSN(rorw, MATCH_RORW, MASK_RORW) DECLARE_INSN(rstas16, MATCH_RSTAS16, MASK_RSTAS16) +DECLARE_INSN(rstas32, MATCH_RSTAS32, MASK_RSTAS32) DECLARE_INSN(rstsa16, MATCH_RSTSA16, MASK_RSTSA16) -DECLARE_INSN(rsub8, MATCH_RSUB8, MASK_RSUB8) +DECLARE_INSN(rstsa32, MATCH_RSTSA32, MASK_RSTSA32) DECLARE_INSN(rsub16, MATCH_RSUB16, MASK_RSUB16) +DECLARE_INSN(rsub32, MATCH_RSUB32, MASK_RSUB32) DECLARE_INSN(rsub64, MATCH_RSUB64, MASK_RSUB64) +DECLARE_INSN(rsub8, MATCH_RSUB8, MASK_RSUB8) DECLARE_INSN(rsubw, MATCH_RSUBW, MASK_RSUBW) -DECLARE_INSN(sclip8, MATCH_SCLIP8, MASK_SCLIP8) +DECLARE_INSN(sb, MATCH_SB, MASK_SB) +DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D) +DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W) DECLARE_INSN(sclip16, MATCH_SCLIP16, MASK_SCLIP16) DECLARE_INSN(sclip32, MATCH_SCLIP32, MASK_SCLIP32) -DECLARE_INSN(scmple8, MATCH_SCMPLE8, MASK_SCMPLE8) +DECLARE_INSN(sclip8, MATCH_SCLIP8, MASK_SCLIP8) DECLARE_INSN(scmple16, MATCH_SCMPLE16, MASK_SCMPLE16) -DECLARE_INSN(scmplt8, MATCH_SCMPLT8, MASK_SCMPLT8) +DECLARE_INSN(scmple8, MATCH_SCMPLE8, MASK_SCMPLE8) DECLARE_INSN(scmplt16, MATCH_SCMPLT16, MASK_SCMPLT16) -DECLARE_INSN(sll8, MATCH_SLL8, MASK_SLL8) -DECLARE_INSN(slli8, MATCH_SLLI8, MASK_SLLI8) +DECLARE_INSN(scmplt8, MATCH_SCMPLT8, MASK_SCMPLT8) +DECLARE_INSN(sd, MATCH_SD, MASK_SD) +DECLARE_INSN(sext_b, MATCH_SEXT_B, MASK_SEXT_B) +DECLARE_INSN(sext_h, MATCH_SEXT_H, MASK_SEXT_H) +DECLARE_INSN(sfence_inval_ir, MATCH_SFENCE_INVAL_IR, MASK_SFENCE_INVAL_IR) +DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA) +DECLARE_INSN(sfence_w_inval, MATCH_SFENCE_W_INVAL, MASK_SFENCE_W_INVAL) +DECLARE_INSN(sh, MATCH_SH, MASK_SH) +DECLARE_INSN(sh1add, MATCH_SH1ADD, MASK_SH1ADD) +DECLARE_INSN(sh1add_uw, MATCH_SH1ADD_UW, MASK_SH1ADD_UW) +DECLARE_INSN(sh2add, MATCH_SH2ADD, MASK_SH2ADD) +DECLARE_INSN(sh2add_uw, MATCH_SH2ADD_UW, MASK_SH2ADD_UW) +DECLARE_INSN(sh3add, MATCH_SH3ADD, MASK_SH3ADD) +DECLARE_INSN(sh3add_uw, MATCH_SH3ADD_UW, MASK_SH3ADD_UW) +DECLARE_INSN(sha256sig0, MATCH_SHA256SIG0, MASK_SHA256SIG0) +DECLARE_INSN(sha256sig1, MATCH_SHA256SIG1, MASK_SHA256SIG1) +DECLARE_INSN(sha256sum0, MATCH_SHA256SUM0, MASK_SHA256SUM0) +DECLARE_INSN(sha256sum1, MATCH_SHA256SUM1, MASK_SHA256SUM1) +DECLARE_INSN(sha512sig0, MATCH_SHA512SIG0, MASK_SHA512SIG0) +DECLARE_INSN(sha512sig0h, MATCH_SHA512SIG0H, MASK_SHA512SIG0H) +DECLARE_INSN(sha512sig0l, MATCH_SHA512SIG0L, MASK_SHA512SIG0L) +DECLARE_INSN(sha512sig1, MATCH_SHA512SIG1, MASK_SHA512SIG1) +DECLARE_INSN(sha512sig1h, MATCH_SHA512SIG1H, MASK_SHA512SIG1H) +DECLARE_INSN(sha512sig1l, MATCH_SHA512SIG1L, MASK_SHA512SIG1L) +DECLARE_INSN(sha512sum0, MATCH_SHA512SUM0, MASK_SHA512SUM0) +DECLARE_INSN(sha512sum0r, MATCH_SHA512SUM0R, MASK_SHA512SUM0R) +DECLARE_INSN(sha512sum1, MATCH_SHA512SUM1, MASK_SHA512SUM1) +DECLARE_INSN(sha512sum1r, MATCH_SHA512SUM1R, MASK_SHA512SUM1R) +DECLARE_INSN(shfl, MATCH_SHFL, MASK_SHFL) +DECLARE_INSN(shfli, MATCH_SHFLI, MASK_SHFLI) +DECLARE_INSN(shflw, MATCH_SHFLW, MASK_SHFLW) +DECLARE_INSN(sinval_vma, MATCH_SINVAL_VMA, MASK_SINVAL_VMA) +DECLARE_INSN(sll, MATCH_SLL, MASK_SLL) DECLARE_INSN(sll16, MATCH_SLL16, MASK_SLL16) +DECLARE_INSN(sll32, MATCH_SLL32, MASK_SLL32) +DECLARE_INSN(sll8, MATCH_SLL8, MASK_SLL8) +DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI) DECLARE_INSN(slli16, MATCH_SLLI16, MASK_SLLI16) +DECLARE_INSN(slli32, MATCH_SLLI32, MASK_SLLI32) +DECLARE_INSN(slli8, MATCH_SLLI8, MASK_SLLI8) +DECLARE_INSN(slli_uw, MATCH_SLLI_UW, MASK_SLLI_UW) +DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW) +DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW) +DECLARE_INSN(slo, MATCH_SLO, MASK_SLO) +DECLARE_INSN(sloi, MATCH_SLOI, MASK_SLOI) +DECLARE_INSN(sloiw, MATCH_SLOIW, MASK_SLOIW) +DECLARE_INSN(slow, MATCH_SLOW, MASK_SLOW) +DECLARE_INSN(slt, MATCH_SLT, MASK_SLT) +DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI) +DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU) +DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU) +DECLARE_INSN(sm3p0, MATCH_SM3P0, MASK_SM3P0) +DECLARE_INSN(sm3p1, MATCH_SM3P1, MASK_SM3P1) +DECLARE_INSN(sm4ed, MATCH_SM4ED, MASK_SM4ED) +DECLARE_INSN(sm4ks, MATCH_SM4KS, MASK_SM4KS) DECLARE_INSN(smal, MATCH_SMAL, MASK_SMAL) DECLARE_INSN(smalbb, MATCH_SMALBB, MASK_SMALBB) DECLARE_INSN(smalbt, MATCH_SMALBT, MASK_SMALBT) -DECLARE_INSN(smaltt, MATCH_SMALTT, MASK_SMALTT) DECLARE_INSN(smalda, MATCH_SMALDA, MASK_SMALDA) -DECLARE_INSN(smalxda, MATCH_SMALXDA, MASK_SMALXDA) -DECLARE_INSN(smalds, MATCH_SMALDS, MASK_SMALDS) DECLARE_INSN(smaldrs, MATCH_SMALDRS, MASK_SMALDRS) +DECLARE_INSN(smalds, MATCH_SMALDS, MASK_SMALDS) +DECLARE_INSN(smaltt, MATCH_SMALTT, MASK_SMALTT) +DECLARE_INSN(smalxda, MATCH_SMALXDA, MASK_SMALXDA) DECLARE_INSN(smalxds, MATCH_SMALXDS, MASK_SMALXDS) -DECLARE_INSN(smar64, MATCH_SMAR64, MASK_SMAR64) DECLARE_INSN(smaqa, MATCH_SMAQA, MASK_SMAQA) DECLARE_INSN(smaqa_su, MATCH_SMAQA_SU, MASK_SMAQA_SU) -DECLARE_INSN(smax8, MATCH_SMAX8, MASK_SMAX8) +DECLARE_INSN(smar64, MATCH_SMAR64, MASK_SMAR64) DECLARE_INSN(smax16, MATCH_SMAX16, MASK_SMAX16) +DECLARE_INSN(smax32, MATCH_SMAX32, MASK_SMAX32) +DECLARE_INSN(smax8, MATCH_SMAX8, MASK_SMAX8) DECLARE_INSN(smbb16, MATCH_SMBB16, MASK_SMBB16) DECLARE_INSN(smbt16, MATCH_SMBT16, MASK_SMBT16) -DECLARE_INSN(smtt16, MATCH_SMTT16, MASK_SMTT16) -DECLARE_INSN(smds, MATCH_SMDS, MASK_SMDS) +DECLARE_INSN(smbt32, MATCH_SMBT32, MASK_SMBT32) DECLARE_INSN(smdrs, MATCH_SMDRS, MASK_SMDRS) -DECLARE_INSN(smxds, MATCH_SMXDS, MASK_SMXDS) -DECLARE_INSN(smin8, MATCH_SMIN8, MASK_SMIN8) +DECLARE_INSN(smdrs32, MATCH_SMDRS32, MASK_SMDRS32) +DECLARE_INSN(smds, MATCH_SMDS, MASK_SMDS) +DECLARE_INSN(smds32, MATCH_SMDS32, MASK_SMDS32) DECLARE_INSN(smin16, MATCH_SMIN16, MASK_SMIN16) +DECLARE_INSN(smin32, MATCH_SMIN32, MASK_SMIN32) +DECLARE_INSN(smin8, MATCH_SMIN8, MASK_SMIN8) DECLARE_INSN(smmul, MATCH_SMMUL, MASK_SMMUL) DECLARE_INSN(smmul_u, MATCH_SMMUL_U, MASK_SMMUL_U) DECLARE_INSN(smmwb, MATCH_SMMWB, MASK_SMMWB) @@ -4269,183 +3863,598 @@ DECLARE_INSN(smmwt_u, MATCH_SMMWT_U, MASK_SMMWT_U) DECLARE_INSN(smslda, MATCH_SMSLDA, MASK_SMSLDA) DECLARE_INSN(smslxda, MATCH_SMSLXDA, MASK_SMSLXDA) DECLARE_INSN(smsr64, MATCH_SMSR64, MASK_SMSR64) -DECLARE_INSN(smul8, MATCH_SMUL8, MASK_SMUL8) -DECLARE_INSN(smulx8, MATCH_SMULX8, MASK_SMULX8) +DECLARE_INSN(smtt16, MATCH_SMTT16, MASK_SMTT16) +DECLARE_INSN(smtt32, MATCH_SMTT32, MASK_SMTT32) DECLARE_INSN(smul16, MATCH_SMUL16, MASK_SMUL16) +DECLARE_INSN(smul8, MATCH_SMUL8, MASK_SMUL8) DECLARE_INSN(smulx16, MATCH_SMULX16, MASK_SMULX16) -DECLARE_INSN(sra_u, MATCH_SRA_U, MASK_SRA_U) -DECLARE_INSN(srai_u, MATCH_SRAI_U, MASK_SRAI_U) -DECLARE_INSN(sra8, MATCH_SRA8, MASK_SRA8) -DECLARE_INSN(sra8_u, MATCH_SRA8_U, MASK_SRA8_U) -DECLARE_INSN(srai8, MATCH_SRAI8, MASK_SRAI8) -DECLARE_INSN(srai8_u, MATCH_SRAI8_U, MASK_SRAI8_U) +DECLARE_INSN(smulx8, MATCH_SMULX8, MASK_SMULX8) +DECLARE_INSN(smxds, MATCH_SMXDS, MASK_SMXDS) +DECLARE_INSN(smxds32, MATCH_SMXDS32, MASK_SMXDS32) +DECLARE_INSN(sra, MATCH_SRA, MASK_SRA) DECLARE_INSN(sra16, MATCH_SRA16, MASK_SRA16) DECLARE_INSN(sra16_u, MATCH_SRA16_U, MASK_SRA16_U) +DECLARE_INSN(sra32, MATCH_SRA32, MASK_SRA32) +DECLARE_INSN(sra32_u, MATCH_SRA32_U, MASK_SRA32_U) +DECLARE_INSN(sra8, MATCH_SRA8, MASK_SRA8) +DECLARE_INSN(sra8_u, MATCH_SRA8_U, MASK_SRA8_U) +DECLARE_INSN(sra_u, MATCH_SRA_U, MASK_SRA_U) +DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI) DECLARE_INSN(srai16, MATCH_SRAI16, MASK_SRAI16) DECLARE_INSN(srai16_u, MATCH_SRAI16_U, MASK_SRAI16_U) -DECLARE_INSN(srl8, MATCH_SRL8, MASK_SRL8) -DECLARE_INSN(srl8_u, MATCH_SRL8_U, MASK_SRL8_U) -DECLARE_INSN(srli8, MATCH_SRLI8, MASK_SRLI8) -DECLARE_INSN(srli8_u, MATCH_SRLI8_U, MASK_SRLI8_U) +DECLARE_INSN(srai32, MATCH_SRAI32, MASK_SRAI32) +DECLARE_INSN(srai32_u, MATCH_SRAI32_U, MASK_SRAI32_U) +DECLARE_INSN(srai8, MATCH_SRAI8, MASK_SRAI8) +DECLARE_INSN(srai8_u, MATCH_SRAI8_U, MASK_SRAI8_U) +DECLARE_INSN(srai_u, MATCH_SRAI_U, MASK_SRAI_U) +DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW) +DECLARE_INSN(sraiw_u, MATCH_SRAIW_U, MASK_SRAIW_U) +DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW) +DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) +DECLARE_INSN(srl, MATCH_SRL, MASK_SRL) DECLARE_INSN(srl16, MATCH_SRL16, MASK_SRL16) DECLARE_INSN(srl16_u, MATCH_SRL16_U, MASK_SRL16_U) +DECLARE_INSN(srl32, MATCH_SRL32, MASK_SRL32) +DECLARE_INSN(srl32_u, MATCH_SRL32_U, MASK_SRL32_U) +DECLARE_INSN(srl8, MATCH_SRL8, MASK_SRL8) +DECLARE_INSN(srl8_u, MATCH_SRL8_U, MASK_SRL8_U) +DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI) DECLARE_INSN(srli16, MATCH_SRLI16, MASK_SRLI16) DECLARE_INSN(srli16_u, MATCH_SRLI16_U, MASK_SRLI16_U) +DECLARE_INSN(srli32, MATCH_SRLI32, MASK_SRLI32) +DECLARE_INSN(srli32_u, MATCH_SRLI32_U, MASK_SRLI32_U) +DECLARE_INSN(srli8, MATCH_SRLI8, MASK_SRLI8) +DECLARE_INSN(srli8_u, MATCH_SRLI8_U, MASK_SRLI8_U) +DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW) +DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW) +DECLARE_INSN(sro, MATCH_SRO, MASK_SRO) +DECLARE_INSN(sroi, MATCH_SROI, MASK_SROI) +DECLARE_INSN(sroiw, MATCH_SROIW, MASK_SROIW) +DECLARE_INSN(srow, MATCH_SROW, MASK_SROW) DECLARE_INSN(stas16, MATCH_STAS16, MASK_STAS16) +DECLARE_INSN(stas32, MATCH_STAS32, MASK_STAS32) DECLARE_INSN(stsa16, MATCH_STSA16, MASK_STSA16) -DECLARE_INSN(sub8, MATCH_SUB8, MASK_SUB8) +DECLARE_INSN(stsa32, MATCH_STSA32, MASK_STSA32) +DECLARE_INSN(sub, MATCH_SUB, MASK_SUB) DECLARE_INSN(sub16, MATCH_SUB16, MASK_SUB16) +DECLARE_INSN(sub32, MATCH_SUB32, MASK_SUB32) DECLARE_INSN(sub64, MATCH_SUB64, MASK_SUB64) +DECLARE_INSN(sub8, MATCH_SUB8, MASK_SUB8) +DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW) DECLARE_INSN(sunpkd810, MATCH_SUNPKD810, MASK_SUNPKD810) DECLARE_INSN(sunpkd820, MATCH_SUNPKD820, MASK_SUNPKD820) DECLARE_INSN(sunpkd830, MATCH_SUNPKD830, MASK_SUNPKD830) DECLARE_INSN(sunpkd831, MATCH_SUNPKD831, MASK_SUNPKD831) DECLARE_INSN(sunpkd832, MATCH_SUNPKD832, MASK_SUNPKD832) +DECLARE_INSN(sw, MATCH_SW, MASK_SW) DECLARE_INSN(swap8, MATCH_SWAP8, MASK_SWAP8) -DECLARE_INSN(uclip8, MATCH_UCLIP8, MASK_UCLIP8) DECLARE_INSN(uclip16, MATCH_UCLIP16, MASK_UCLIP16) DECLARE_INSN(uclip32, MATCH_UCLIP32, MASK_UCLIP32) -DECLARE_INSN(ucmple8, MATCH_UCMPLE8, MASK_UCMPLE8) +DECLARE_INSN(uclip8, MATCH_UCLIP8, MASK_UCLIP8) DECLARE_INSN(ucmple16, MATCH_UCMPLE16, MASK_UCMPLE16) -DECLARE_INSN(ucmplt8, MATCH_UCMPLT8, MASK_UCMPLT8) +DECLARE_INSN(ucmple8, MATCH_UCMPLE8, MASK_UCMPLE8) DECLARE_INSN(ucmplt16, MATCH_UCMPLT16, MASK_UCMPLT16) -DECLARE_INSN(ukadd8, MATCH_UKADD8, MASK_UKADD8) +DECLARE_INSN(ucmplt8, MATCH_UCMPLT8, MASK_UCMPLT8) DECLARE_INSN(ukadd16, MATCH_UKADD16, MASK_UKADD16) +DECLARE_INSN(ukadd32, MATCH_UKADD32, MASK_UKADD32) DECLARE_INSN(ukadd64, MATCH_UKADD64, MASK_UKADD64) +DECLARE_INSN(ukadd8, MATCH_UKADD8, MASK_UKADD8) DECLARE_INSN(ukaddh, MATCH_UKADDH, MASK_UKADDH) DECLARE_INSN(ukaddw, MATCH_UKADDW, MASK_UKADDW) DECLARE_INSN(ukcras16, MATCH_UKCRAS16, MASK_UKCRAS16) +DECLARE_INSN(ukcras32, MATCH_UKCRAS32, MASK_UKCRAS32) DECLARE_INSN(ukcrsa16, MATCH_UKCRSA16, MASK_UKCRSA16) +DECLARE_INSN(ukcrsa32, MATCH_UKCRSA32, MASK_UKCRSA32) DECLARE_INSN(ukmar64, MATCH_UKMAR64, MASK_UKMAR64) DECLARE_INSN(ukmsr64, MATCH_UKMSR64, MASK_UKMSR64) DECLARE_INSN(ukstas16, MATCH_UKSTAS16, MASK_UKSTAS16) +DECLARE_INSN(ukstas32, MATCH_UKSTAS32, MASK_UKSTAS32) DECLARE_INSN(ukstsa16, MATCH_UKSTSA16, MASK_UKSTSA16) -DECLARE_INSN(uksub8, MATCH_UKSUB8, MASK_UKSUB8) +DECLARE_INSN(ukstsa32, MATCH_UKSTSA32, MASK_UKSTSA32) DECLARE_INSN(uksub16, MATCH_UKSUB16, MASK_UKSUB16) +DECLARE_INSN(uksub32, MATCH_UKSUB32, MASK_UKSUB32) DECLARE_INSN(uksub64, MATCH_UKSUB64, MASK_UKSUB64) +DECLARE_INSN(uksub8, MATCH_UKSUB8, MASK_UKSUB8) DECLARE_INSN(uksubh, MATCH_UKSUBH, MASK_UKSUBH) DECLARE_INSN(uksubw, MATCH_UKSUBW, MASK_UKSUBW) -DECLARE_INSN(umar64, MATCH_UMAR64, MASK_UMAR64) DECLARE_INSN(umaqa, MATCH_UMAQA, MASK_UMAQA) -DECLARE_INSN(umax8, MATCH_UMAX8, MASK_UMAX8) +DECLARE_INSN(umar64, MATCH_UMAR64, MASK_UMAR64) DECLARE_INSN(umax16, MATCH_UMAX16, MASK_UMAX16) -DECLARE_INSN(umin8, MATCH_UMIN8, MASK_UMIN8) +DECLARE_INSN(umax32, MATCH_UMAX32, MASK_UMAX32) +DECLARE_INSN(umax8, MATCH_UMAX8, MASK_UMAX8) DECLARE_INSN(umin16, MATCH_UMIN16, MASK_UMIN16) +DECLARE_INSN(umin32, MATCH_UMIN32, MASK_UMIN32) +DECLARE_INSN(umin8, MATCH_UMIN8, MASK_UMIN8) DECLARE_INSN(umsr64, MATCH_UMSR64, MASK_UMSR64) -DECLARE_INSN(umul8, MATCH_UMUL8, MASK_UMUL8) -DECLARE_INSN(umulx8, MATCH_UMULX8, MASK_UMULX8) DECLARE_INSN(umul16, MATCH_UMUL16, MASK_UMUL16) +DECLARE_INSN(umul8, MATCH_UMUL8, MASK_UMUL8) DECLARE_INSN(umulx16, MATCH_UMULX16, MASK_UMULX16) -DECLARE_INSN(uradd8, MATCH_URADD8, MASK_URADD8) +DECLARE_INSN(umulx8, MATCH_UMULX8, MASK_UMULX8) +DECLARE_INSN(unshfl, MATCH_UNSHFL, MASK_UNSHFL) +DECLARE_INSN(unshfli, MATCH_UNSHFLI, MASK_UNSHFLI) +DECLARE_INSN(unshflw, MATCH_UNSHFLW, MASK_UNSHFLW) DECLARE_INSN(uradd16, MATCH_URADD16, MASK_URADD16) +DECLARE_INSN(uradd32, MATCH_URADD32, MASK_URADD32) DECLARE_INSN(uradd64, MATCH_URADD64, MASK_URADD64) +DECLARE_INSN(uradd8, MATCH_URADD8, MASK_URADD8) DECLARE_INSN(uraddw, MATCH_URADDW, MASK_URADDW) DECLARE_INSN(urcras16, MATCH_URCRAS16, MASK_URCRAS16) +DECLARE_INSN(urcras32, MATCH_URCRAS32, MASK_URCRAS32) DECLARE_INSN(urcrsa16, MATCH_URCRSA16, MASK_URCRSA16) +DECLARE_INSN(urcrsa32, MATCH_URCRSA32, MASK_URCRSA32) DECLARE_INSN(urstas16, MATCH_URSTAS16, MASK_URSTAS16) +DECLARE_INSN(urstas32, MATCH_URSTAS32, MASK_URSTAS32) DECLARE_INSN(urstsa16, MATCH_URSTSA16, MASK_URSTSA16) -DECLARE_INSN(ursub8, MATCH_URSUB8, MASK_URSUB8) +DECLARE_INSN(urstsa32, MATCH_URSTSA32, MASK_URSTSA32) DECLARE_INSN(ursub16, MATCH_URSUB16, MASK_URSUB16) +DECLARE_INSN(ursub32, MATCH_URSUB32, MASK_URSUB32) DECLARE_INSN(ursub64, MATCH_URSUB64, MASK_URSUB64) +DECLARE_INSN(ursub8, MATCH_URSUB8, MASK_URSUB8) DECLARE_INSN(ursubw, MATCH_URSUBW, MASK_URSUBW) -DECLARE_INSN(wexti, MATCH_WEXTI, MASK_WEXTI) +DECLARE_INSN(vaadd_vv, MATCH_VAADD_VV, MASK_VAADD_VV) +DECLARE_INSN(vaadd_vx, MATCH_VAADD_VX, MASK_VAADD_VX) +DECLARE_INSN(vaaddu_vv, MATCH_VAADDU_VV, MASK_VAADDU_VV) +DECLARE_INSN(vaaddu_vx, MATCH_VAADDU_VX, MASK_VAADDU_VX) +DECLARE_INSN(vadc_vim, MATCH_VADC_VIM, MASK_VADC_VIM) +DECLARE_INSN(vadc_vvm, MATCH_VADC_VVM, MASK_VADC_VVM) +DECLARE_INSN(vadc_vxm, MATCH_VADC_VXM, MASK_VADC_VXM) +DECLARE_INSN(vadd_vi, MATCH_VADD_VI, MASK_VADD_VI) +DECLARE_INSN(vadd_vv, MATCH_VADD_VV, MASK_VADD_VV) +DECLARE_INSN(vadd_vx, MATCH_VADD_VX, MASK_VADD_VX) +DECLARE_INSN(vamoaddei16_v, MATCH_VAMOADDEI16_V, MASK_VAMOADDEI16_V) +DECLARE_INSN(vamoaddei32_v, MATCH_VAMOADDEI32_V, MASK_VAMOADDEI32_V) +DECLARE_INSN(vamoaddei64_v, MATCH_VAMOADDEI64_V, MASK_VAMOADDEI64_V) +DECLARE_INSN(vamoaddei8_v, MATCH_VAMOADDEI8_V, MASK_VAMOADDEI8_V) +DECLARE_INSN(vamoandei16_v, MATCH_VAMOANDEI16_V, MASK_VAMOANDEI16_V) +DECLARE_INSN(vamoandei32_v, MATCH_VAMOANDEI32_V, MASK_VAMOANDEI32_V) +DECLARE_INSN(vamoandei64_v, MATCH_VAMOANDEI64_V, MASK_VAMOANDEI64_V) +DECLARE_INSN(vamoandei8_v, MATCH_VAMOANDEI8_V, MASK_VAMOANDEI8_V) +DECLARE_INSN(vamomaxei16_v, MATCH_VAMOMAXEI16_V, MASK_VAMOMAXEI16_V) +DECLARE_INSN(vamomaxei32_v, MATCH_VAMOMAXEI32_V, MASK_VAMOMAXEI32_V) +DECLARE_INSN(vamomaxei64_v, MATCH_VAMOMAXEI64_V, MASK_VAMOMAXEI64_V) +DECLARE_INSN(vamomaxei8_v, MATCH_VAMOMAXEI8_V, MASK_VAMOMAXEI8_V) +DECLARE_INSN(vamomaxuei16_v, MATCH_VAMOMAXUEI16_V, MASK_VAMOMAXUEI16_V) +DECLARE_INSN(vamomaxuei32_v, MATCH_VAMOMAXUEI32_V, MASK_VAMOMAXUEI32_V) +DECLARE_INSN(vamomaxuei64_v, MATCH_VAMOMAXUEI64_V, MASK_VAMOMAXUEI64_V) +DECLARE_INSN(vamomaxuei8_v, MATCH_VAMOMAXUEI8_V, MASK_VAMOMAXUEI8_V) +DECLARE_INSN(vamominei16_v, MATCH_VAMOMINEI16_V, MASK_VAMOMINEI16_V) +DECLARE_INSN(vamominei32_v, MATCH_VAMOMINEI32_V, MASK_VAMOMINEI32_V) +DECLARE_INSN(vamominei64_v, MATCH_VAMOMINEI64_V, MASK_VAMOMINEI64_V) +DECLARE_INSN(vamominei8_v, MATCH_VAMOMINEI8_V, MASK_VAMOMINEI8_V) +DECLARE_INSN(vamominuei16_v, MATCH_VAMOMINUEI16_V, MASK_VAMOMINUEI16_V) +DECLARE_INSN(vamominuei32_v, MATCH_VAMOMINUEI32_V, MASK_VAMOMINUEI32_V) +DECLARE_INSN(vamominuei64_v, MATCH_VAMOMINUEI64_V, MASK_VAMOMINUEI64_V) +DECLARE_INSN(vamominuei8_v, MATCH_VAMOMINUEI8_V, MASK_VAMOMINUEI8_V) +DECLARE_INSN(vamoorei16_v, MATCH_VAMOOREI16_V, MASK_VAMOOREI16_V) +DECLARE_INSN(vamoorei32_v, MATCH_VAMOOREI32_V, MASK_VAMOOREI32_V) +DECLARE_INSN(vamoorei64_v, MATCH_VAMOOREI64_V, MASK_VAMOOREI64_V) +DECLARE_INSN(vamoorei8_v, MATCH_VAMOOREI8_V, MASK_VAMOOREI8_V) +DECLARE_INSN(vamoswapei16_v, MATCH_VAMOSWAPEI16_V, MASK_VAMOSWAPEI16_V) +DECLARE_INSN(vamoswapei32_v, MATCH_VAMOSWAPEI32_V, MASK_VAMOSWAPEI32_V) +DECLARE_INSN(vamoswapei64_v, MATCH_VAMOSWAPEI64_V, MASK_VAMOSWAPEI64_V) +DECLARE_INSN(vamoswapei8_v, MATCH_VAMOSWAPEI8_V, MASK_VAMOSWAPEI8_V) +DECLARE_INSN(vamoxorei16_v, MATCH_VAMOXOREI16_V, MASK_VAMOXOREI16_V) +DECLARE_INSN(vamoxorei32_v, MATCH_VAMOXOREI32_V, MASK_VAMOXOREI32_V) +DECLARE_INSN(vamoxorei64_v, MATCH_VAMOXOREI64_V, MASK_VAMOXOREI64_V) +DECLARE_INSN(vamoxorei8_v, MATCH_VAMOXOREI8_V, MASK_VAMOXOREI8_V) +DECLARE_INSN(vand_vi, MATCH_VAND_VI, MASK_VAND_VI) +DECLARE_INSN(vand_vv, MATCH_VAND_VV, MASK_VAND_VV) +DECLARE_INSN(vand_vx, MATCH_VAND_VX, MASK_VAND_VX) +DECLARE_INSN(vasub_vv, MATCH_VASUB_VV, MASK_VASUB_VV) +DECLARE_INSN(vasub_vx, MATCH_VASUB_VX, MASK_VASUB_VX) +DECLARE_INSN(vasubu_vv, MATCH_VASUBU_VV, MASK_VASUBU_VV) +DECLARE_INSN(vasubu_vx, MATCH_VASUBU_VX, MASK_VASUBU_VX) +DECLARE_INSN(vcompress_vm, MATCH_VCOMPRESS_VM, MASK_VCOMPRESS_VM) +DECLARE_INSN(vcpop_m, MATCH_VCPOP_M, MASK_VCPOP_M) +DECLARE_INSN(vdiv_vv, MATCH_VDIV_VV, MASK_VDIV_VV) +DECLARE_INSN(vdiv_vx, MATCH_VDIV_VX, MASK_VDIV_VX) +DECLARE_INSN(vdivu_vv, MATCH_VDIVU_VV, MASK_VDIVU_VV) +DECLARE_INSN(vdivu_vx, MATCH_VDIVU_VX, MASK_VDIVU_VX) +DECLARE_INSN(vfadd_vf, MATCH_VFADD_VF, MASK_VFADD_VF) +DECLARE_INSN(vfadd_vv, MATCH_VFADD_VV, MASK_VFADD_VV) +DECLARE_INSN(vfclass_v, MATCH_VFCLASS_V, MASK_VFCLASS_V) +DECLARE_INSN(vfcvt_f_x_v, MATCH_VFCVT_F_X_V, MASK_VFCVT_F_X_V) +DECLARE_INSN(vfcvt_f_xu_v, MATCH_VFCVT_F_XU_V, MASK_VFCVT_F_XU_V) +DECLARE_INSN(vfcvt_rtz_x_f_v, MATCH_VFCVT_RTZ_X_F_V, MASK_VFCVT_RTZ_X_F_V) +DECLARE_INSN(vfcvt_rtz_xu_f_v, MATCH_VFCVT_RTZ_XU_F_V, MASK_VFCVT_RTZ_XU_F_V) +DECLARE_INSN(vfcvt_x_f_v, MATCH_VFCVT_X_F_V, MASK_VFCVT_X_F_V) +DECLARE_INSN(vfcvt_xu_f_v, MATCH_VFCVT_XU_F_V, MASK_VFCVT_XU_F_V) +DECLARE_INSN(vfdiv_vf, MATCH_VFDIV_VF, MASK_VFDIV_VF) +DECLARE_INSN(vfdiv_vv, MATCH_VFDIV_VV, MASK_VFDIV_VV) +DECLARE_INSN(vfirst_m, MATCH_VFIRST_M, MASK_VFIRST_M) +DECLARE_INSN(vfmacc_vf, MATCH_VFMACC_VF, MASK_VFMACC_VF) +DECLARE_INSN(vfmacc_vv, MATCH_VFMACC_VV, MASK_VFMACC_VV) +DECLARE_INSN(vfmadd_vf, MATCH_VFMADD_VF, MASK_VFMADD_VF) +DECLARE_INSN(vfmadd_vv, MATCH_VFMADD_VV, MASK_VFMADD_VV) +DECLARE_INSN(vfmax_vf, MATCH_VFMAX_VF, MASK_VFMAX_VF) +DECLARE_INSN(vfmax_vv, MATCH_VFMAX_VV, MASK_VFMAX_VV) +DECLARE_INSN(vfmerge_vfm, MATCH_VFMERGE_VFM, MASK_VFMERGE_VFM) +DECLARE_INSN(vfmin_vf, MATCH_VFMIN_VF, MASK_VFMIN_VF) +DECLARE_INSN(vfmin_vv, MATCH_VFMIN_VV, MASK_VFMIN_VV) +DECLARE_INSN(vfmsac_vf, MATCH_VFMSAC_VF, MASK_VFMSAC_VF) +DECLARE_INSN(vfmsac_vv, MATCH_VFMSAC_VV, MASK_VFMSAC_VV) +DECLARE_INSN(vfmsub_vf, MATCH_VFMSUB_VF, MASK_VFMSUB_VF) +DECLARE_INSN(vfmsub_vv, MATCH_VFMSUB_VV, MASK_VFMSUB_VV) +DECLARE_INSN(vfmul_vf, MATCH_VFMUL_VF, MASK_VFMUL_VF) +DECLARE_INSN(vfmul_vv, MATCH_VFMUL_VV, MASK_VFMUL_VV) +DECLARE_INSN(vfmv_f_s, MATCH_VFMV_F_S, MASK_VFMV_F_S) +DECLARE_INSN(vfmv_s_f, MATCH_VFMV_S_F, MASK_VFMV_S_F) +DECLARE_INSN(vfmv_v_f, MATCH_VFMV_V_F, MASK_VFMV_V_F) +DECLARE_INSN(vfncvt_f_f_w, MATCH_VFNCVT_F_F_W, MASK_VFNCVT_F_F_W) +DECLARE_INSN(vfncvt_f_x_w, MATCH_VFNCVT_F_X_W, MASK_VFNCVT_F_X_W) +DECLARE_INSN(vfncvt_f_xu_w, MATCH_VFNCVT_F_XU_W, MASK_VFNCVT_F_XU_W) +DECLARE_INSN(vfncvt_rod_f_f_w, MATCH_VFNCVT_ROD_F_F_W, MASK_VFNCVT_ROD_F_F_W) +DECLARE_INSN(vfncvt_rtz_x_f_w, MATCH_VFNCVT_RTZ_X_F_W, MASK_VFNCVT_RTZ_X_F_W) +DECLARE_INSN(vfncvt_rtz_xu_f_w, MATCH_VFNCVT_RTZ_XU_F_W, MASK_VFNCVT_RTZ_XU_F_W) +DECLARE_INSN(vfncvt_x_f_w, MATCH_VFNCVT_X_F_W, MASK_VFNCVT_X_F_W) +DECLARE_INSN(vfncvt_xu_f_w, MATCH_VFNCVT_XU_F_W, MASK_VFNCVT_XU_F_W) +DECLARE_INSN(vfnmacc_vf, MATCH_VFNMACC_VF, MASK_VFNMACC_VF) +DECLARE_INSN(vfnmacc_vv, MATCH_VFNMACC_VV, MASK_VFNMACC_VV) +DECLARE_INSN(vfnmadd_vf, MATCH_VFNMADD_VF, MASK_VFNMADD_VF) +DECLARE_INSN(vfnmadd_vv, MATCH_VFNMADD_VV, MASK_VFNMADD_VV) +DECLARE_INSN(vfnmsac_vf, MATCH_VFNMSAC_VF, MASK_VFNMSAC_VF) +DECLARE_INSN(vfnmsac_vv, MATCH_VFNMSAC_VV, MASK_VFNMSAC_VV) +DECLARE_INSN(vfnmsub_vf, MATCH_VFNMSUB_VF, MASK_VFNMSUB_VF) +DECLARE_INSN(vfnmsub_vv, MATCH_VFNMSUB_VV, MASK_VFNMSUB_VV) +DECLARE_INSN(vfrdiv_vf, MATCH_VFRDIV_VF, MASK_VFRDIV_VF) +DECLARE_INSN(vfrec7_v, MATCH_VFREC7_V, MASK_VFREC7_V) +DECLARE_INSN(vfredmax_vs, MATCH_VFREDMAX_VS, MASK_VFREDMAX_VS) +DECLARE_INSN(vfredmin_vs, MATCH_VFREDMIN_VS, MASK_VFREDMIN_VS) +DECLARE_INSN(vfredosum_vs, MATCH_VFREDOSUM_VS, MASK_VFREDOSUM_VS) +DECLARE_INSN(vfredusum_vs, MATCH_VFREDUSUM_VS, MASK_VFREDUSUM_VS) +DECLARE_INSN(vfrsqrt7_v, MATCH_VFRSQRT7_V, MASK_VFRSQRT7_V) +DECLARE_INSN(vfrsub_vf, MATCH_VFRSUB_VF, MASK_VFRSUB_VF) +DECLARE_INSN(vfsgnj_vf, MATCH_VFSGNJ_VF, MASK_VFSGNJ_VF) +DECLARE_INSN(vfsgnj_vv, MATCH_VFSGNJ_VV, MASK_VFSGNJ_VV) +DECLARE_INSN(vfsgnjn_vf, MATCH_VFSGNJN_VF, MASK_VFSGNJN_VF) +DECLARE_INSN(vfsgnjn_vv, MATCH_VFSGNJN_VV, MASK_VFSGNJN_VV) +DECLARE_INSN(vfsgnjx_vf, MATCH_VFSGNJX_VF, MASK_VFSGNJX_VF) +DECLARE_INSN(vfsgnjx_vv, MATCH_VFSGNJX_VV, MASK_VFSGNJX_VV) +DECLARE_INSN(vfslide1down_vf, MATCH_VFSLIDE1DOWN_VF, MASK_VFSLIDE1DOWN_VF) +DECLARE_INSN(vfslide1up_vf, MATCH_VFSLIDE1UP_VF, MASK_VFSLIDE1UP_VF) +DECLARE_INSN(vfsqrt_v, MATCH_VFSQRT_V, MASK_VFSQRT_V) +DECLARE_INSN(vfsub_vf, MATCH_VFSUB_VF, MASK_VFSUB_VF) +DECLARE_INSN(vfsub_vv, MATCH_VFSUB_VV, MASK_VFSUB_VV) +DECLARE_INSN(vfwadd_vf, MATCH_VFWADD_VF, MASK_VFWADD_VF) +DECLARE_INSN(vfwadd_vv, MATCH_VFWADD_VV, MASK_VFWADD_VV) +DECLARE_INSN(vfwadd_wf, MATCH_VFWADD_WF, MASK_VFWADD_WF) +DECLARE_INSN(vfwadd_wv, MATCH_VFWADD_WV, MASK_VFWADD_WV) +DECLARE_INSN(vfwcvt_f_f_v, MATCH_VFWCVT_F_F_V, MASK_VFWCVT_F_F_V) +DECLARE_INSN(vfwcvt_f_x_v, MATCH_VFWCVT_F_X_V, MASK_VFWCVT_F_X_V) +DECLARE_INSN(vfwcvt_f_xu_v, MATCH_VFWCVT_F_XU_V, MASK_VFWCVT_F_XU_V) +DECLARE_INSN(vfwcvt_rtz_x_f_v, MATCH_VFWCVT_RTZ_X_F_V, MASK_VFWCVT_RTZ_X_F_V) +DECLARE_INSN(vfwcvt_rtz_xu_f_v, MATCH_VFWCVT_RTZ_XU_F_V, MASK_VFWCVT_RTZ_XU_F_V) +DECLARE_INSN(vfwcvt_x_f_v, MATCH_VFWCVT_X_F_V, MASK_VFWCVT_X_F_V) +DECLARE_INSN(vfwcvt_xu_f_v, MATCH_VFWCVT_XU_F_V, MASK_VFWCVT_XU_F_V) +DECLARE_INSN(vfwmacc_vf, MATCH_VFWMACC_VF, MASK_VFWMACC_VF) +DECLARE_INSN(vfwmacc_vv, MATCH_VFWMACC_VV, MASK_VFWMACC_VV) +DECLARE_INSN(vfwmsac_vf, MATCH_VFWMSAC_VF, MASK_VFWMSAC_VF) +DECLARE_INSN(vfwmsac_vv, MATCH_VFWMSAC_VV, MASK_VFWMSAC_VV) +DECLARE_INSN(vfwmul_vf, MATCH_VFWMUL_VF, MASK_VFWMUL_VF) +DECLARE_INSN(vfwmul_vv, MATCH_VFWMUL_VV, MASK_VFWMUL_VV) +DECLARE_INSN(vfwnmacc_vf, MATCH_VFWNMACC_VF, MASK_VFWNMACC_VF) +DECLARE_INSN(vfwnmacc_vv, MATCH_VFWNMACC_VV, MASK_VFWNMACC_VV) +DECLARE_INSN(vfwnmsac_vf, MATCH_VFWNMSAC_VF, MASK_VFWNMSAC_VF) +DECLARE_INSN(vfwnmsac_vv, MATCH_VFWNMSAC_VV, MASK_VFWNMSAC_VV) +DECLARE_INSN(vfwredosum_vs, MATCH_VFWREDOSUM_VS, MASK_VFWREDOSUM_VS) +DECLARE_INSN(vfwredusum_vs, MATCH_VFWREDUSUM_VS, MASK_VFWREDUSUM_VS) +DECLARE_INSN(vfwsub_vf, MATCH_VFWSUB_VF, MASK_VFWSUB_VF) +DECLARE_INSN(vfwsub_vv, MATCH_VFWSUB_VV, MASK_VFWSUB_VV) +DECLARE_INSN(vfwsub_wf, MATCH_VFWSUB_WF, MASK_VFWSUB_WF) +DECLARE_INSN(vfwsub_wv, MATCH_VFWSUB_WV, MASK_VFWSUB_WV) +DECLARE_INSN(vid_v, MATCH_VID_V, MASK_VID_V) +DECLARE_INSN(viota_m, MATCH_VIOTA_M, MASK_VIOTA_M) +DECLARE_INSN(vl1re16_v, MATCH_VL1RE16_V, MASK_VL1RE16_V) +DECLARE_INSN(vl1re32_v, MATCH_VL1RE32_V, MASK_VL1RE32_V) +DECLARE_INSN(vl1re64_v, MATCH_VL1RE64_V, MASK_VL1RE64_V) +DECLARE_INSN(vl1re8_v, MATCH_VL1RE8_V, MASK_VL1RE8_V) +DECLARE_INSN(vl2re16_v, MATCH_VL2RE16_V, MASK_VL2RE16_V) +DECLARE_INSN(vl2re32_v, MATCH_VL2RE32_V, MASK_VL2RE32_V) +DECLARE_INSN(vl2re64_v, MATCH_VL2RE64_V, MASK_VL2RE64_V) +DECLARE_INSN(vl2re8_v, MATCH_VL2RE8_V, MASK_VL2RE8_V) +DECLARE_INSN(vl4re16_v, MATCH_VL4RE16_V, MASK_VL4RE16_V) +DECLARE_INSN(vl4re32_v, MATCH_VL4RE32_V, MASK_VL4RE32_V) +DECLARE_INSN(vl4re64_v, MATCH_VL4RE64_V, MASK_VL4RE64_V) +DECLARE_INSN(vl4re8_v, MATCH_VL4RE8_V, MASK_VL4RE8_V) +DECLARE_INSN(vl8re16_v, MATCH_VL8RE16_V, MASK_VL8RE16_V) +DECLARE_INSN(vl8re32_v, MATCH_VL8RE32_V, MASK_VL8RE32_V) +DECLARE_INSN(vl8re64_v, MATCH_VL8RE64_V, MASK_VL8RE64_V) +DECLARE_INSN(vl8re8_v, MATCH_VL8RE8_V, MASK_VL8RE8_V) +DECLARE_INSN(vle1024_v, MATCH_VLE1024_V, MASK_VLE1024_V) +DECLARE_INSN(vle1024ff_v, MATCH_VLE1024FF_V, MASK_VLE1024FF_V) +DECLARE_INSN(vle128_v, MATCH_VLE128_V, MASK_VLE128_V) +DECLARE_INSN(vle128ff_v, MATCH_VLE128FF_V, MASK_VLE128FF_V) +DECLARE_INSN(vle16_v, MATCH_VLE16_V, MASK_VLE16_V) +DECLARE_INSN(vle16ff_v, MATCH_VLE16FF_V, MASK_VLE16FF_V) +DECLARE_INSN(vle256_v, MATCH_VLE256_V, MASK_VLE256_V) +DECLARE_INSN(vle256ff_v, MATCH_VLE256FF_V, MASK_VLE256FF_V) +DECLARE_INSN(vle32_v, MATCH_VLE32_V, MASK_VLE32_V) +DECLARE_INSN(vle32ff_v, MATCH_VLE32FF_V, MASK_VLE32FF_V) +DECLARE_INSN(vle512_v, MATCH_VLE512_V, MASK_VLE512_V) +DECLARE_INSN(vle512ff_v, MATCH_VLE512FF_V, MASK_VLE512FF_V) +DECLARE_INSN(vle64_v, MATCH_VLE64_V, MASK_VLE64_V) +DECLARE_INSN(vle64ff_v, MATCH_VLE64FF_V, MASK_VLE64FF_V) +DECLARE_INSN(vle8_v, MATCH_VLE8_V, MASK_VLE8_V) +DECLARE_INSN(vle8ff_v, MATCH_VLE8FF_V, MASK_VLE8FF_V) +DECLARE_INSN(vlm_v, MATCH_VLM_V, MASK_VLM_V) +DECLARE_INSN(vloxei1024_v, MATCH_VLOXEI1024_V, MASK_VLOXEI1024_V) +DECLARE_INSN(vloxei128_v, MATCH_VLOXEI128_V, MASK_VLOXEI128_V) +DECLARE_INSN(vloxei16_v, MATCH_VLOXEI16_V, MASK_VLOXEI16_V) +DECLARE_INSN(vloxei256_v, MATCH_VLOXEI256_V, MASK_VLOXEI256_V) +DECLARE_INSN(vloxei32_v, MATCH_VLOXEI32_V, MASK_VLOXEI32_V) +DECLARE_INSN(vloxei512_v, MATCH_VLOXEI512_V, MASK_VLOXEI512_V) +DECLARE_INSN(vloxei64_v, MATCH_VLOXEI64_V, MASK_VLOXEI64_V) +DECLARE_INSN(vloxei8_v, MATCH_VLOXEI8_V, MASK_VLOXEI8_V) +DECLARE_INSN(vlse1024_v, MATCH_VLSE1024_V, MASK_VLSE1024_V) +DECLARE_INSN(vlse128_v, MATCH_VLSE128_V, MASK_VLSE128_V) +DECLARE_INSN(vlse16_v, MATCH_VLSE16_V, MASK_VLSE16_V) +DECLARE_INSN(vlse256_v, MATCH_VLSE256_V, MASK_VLSE256_V) +DECLARE_INSN(vlse32_v, MATCH_VLSE32_V, MASK_VLSE32_V) +DECLARE_INSN(vlse512_v, MATCH_VLSE512_V, MASK_VLSE512_V) +DECLARE_INSN(vlse64_v, MATCH_VLSE64_V, MASK_VLSE64_V) +DECLARE_INSN(vlse8_v, MATCH_VLSE8_V, MASK_VLSE8_V) +DECLARE_INSN(vluxei1024_v, MATCH_VLUXEI1024_V, MASK_VLUXEI1024_V) +DECLARE_INSN(vluxei128_v, MATCH_VLUXEI128_V, MASK_VLUXEI128_V) +DECLARE_INSN(vluxei16_v, MATCH_VLUXEI16_V, MASK_VLUXEI16_V) +DECLARE_INSN(vluxei256_v, MATCH_VLUXEI256_V, MASK_VLUXEI256_V) +DECLARE_INSN(vluxei32_v, MATCH_VLUXEI32_V, MASK_VLUXEI32_V) +DECLARE_INSN(vluxei512_v, MATCH_VLUXEI512_V, MASK_VLUXEI512_V) +DECLARE_INSN(vluxei64_v, MATCH_VLUXEI64_V, MASK_VLUXEI64_V) +DECLARE_INSN(vluxei8_v, MATCH_VLUXEI8_V, MASK_VLUXEI8_V) +DECLARE_INSN(vmacc_vv, MATCH_VMACC_VV, MASK_VMACC_VV) +DECLARE_INSN(vmacc_vx, MATCH_VMACC_VX, MASK_VMACC_VX) +DECLARE_INSN(vmadc_vi, MATCH_VMADC_VI, MASK_VMADC_VI) +DECLARE_INSN(vmadc_vim, MATCH_VMADC_VIM, MASK_VMADC_VIM) +DECLARE_INSN(vmadc_vv, MATCH_VMADC_VV, MASK_VMADC_VV) +DECLARE_INSN(vmadc_vvm, MATCH_VMADC_VVM, MASK_VMADC_VVM) +DECLARE_INSN(vmadc_vx, MATCH_VMADC_VX, MASK_VMADC_VX) +DECLARE_INSN(vmadc_vxm, MATCH_VMADC_VXM, MASK_VMADC_VXM) +DECLARE_INSN(vmadd_vv, MATCH_VMADD_VV, MASK_VMADD_VV) +DECLARE_INSN(vmadd_vx, MATCH_VMADD_VX, MASK_VMADD_VX) +DECLARE_INSN(vmand_mm, MATCH_VMAND_MM, MASK_VMAND_MM) +DECLARE_INSN(vmandn_mm, MATCH_VMANDN_MM, MASK_VMANDN_MM) +DECLARE_INSN(vmax_vv, MATCH_VMAX_VV, MASK_VMAX_VV) +DECLARE_INSN(vmax_vx, MATCH_VMAX_VX, MASK_VMAX_VX) +DECLARE_INSN(vmaxu_vv, MATCH_VMAXU_VV, MASK_VMAXU_VV) +DECLARE_INSN(vmaxu_vx, MATCH_VMAXU_VX, MASK_VMAXU_VX) +DECLARE_INSN(vmerge_vim, MATCH_VMERGE_VIM, MASK_VMERGE_VIM) +DECLARE_INSN(vmerge_vvm, MATCH_VMERGE_VVM, MASK_VMERGE_VVM) +DECLARE_INSN(vmerge_vxm, MATCH_VMERGE_VXM, MASK_VMERGE_VXM) +DECLARE_INSN(vmfeq_vf, MATCH_VMFEQ_VF, MASK_VMFEQ_VF) +DECLARE_INSN(vmfeq_vv, MATCH_VMFEQ_VV, MASK_VMFEQ_VV) +DECLARE_INSN(vmfge_vf, MATCH_VMFGE_VF, MASK_VMFGE_VF) +DECLARE_INSN(vmfgt_vf, MATCH_VMFGT_VF, MASK_VMFGT_VF) +DECLARE_INSN(vmfle_vf, MATCH_VMFLE_VF, MASK_VMFLE_VF) +DECLARE_INSN(vmfle_vv, MATCH_VMFLE_VV, MASK_VMFLE_VV) +DECLARE_INSN(vmflt_vf, MATCH_VMFLT_VF, MASK_VMFLT_VF) +DECLARE_INSN(vmflt_vv, MATCH_VMFLT_VV, MASK_VMFLT_VV) +DECLARE_INSN(vmfne_vf, MATCH_VMFNE_VF, MASK_VMFNE_VF) +DECLARE_INSN(vmfne_vv, MATCH_VMFNE_VV, MASK_VMFNE_VV) +DECLARE_INSN(vmin_vv, MATCH_VMIN_VV, MASK_VMIN_VV) +DECLARE_INSN(vmin_vx, MATCH_VMIN_VX, MASK_VMIN_VX) +DECLARE_INSN(vminu_vv, MATCH_VMINU_VV, MASK_VMINU_VV) +DECLARE_INSN(vminu_vx, MATCH_VMINU_VX, MASK_VMINU_VX) +DECLARE_INSN(vmnand_mm, MATCH_VMNAND_MM, MASK_VMNAND_MM) +DECLARE_INSN(vmnor_mm, MATCH_VMNOR_MM, MASK_VMNOR_MM) +DECLARE_INSN(vmor_mm, MATCH_VMOR_MM, MASK_VMOR_MM) +DECLARE_INSN(vmorn_mm, MATCH_VMORN_MM, MASK_VMORN_MM) +DECLARE_INSN(vmsbc_vv, MATCH_VMSBC_VV, MASK_VMSBC_VV) +DECLARE_INSN(vmsbc_vvm, MATCH_VMSBC_VVM, MASK_VMSBC_VVM) +DECLARE_INSN(vmsbc_vx, MATCH_VMSBC_VX, MASK_VMSBC_VX) +DECLARE_INSN(vmsbc_vxm, MATCH_VMSBC_VXM, MASK_VMSBC_VXM) +DECLARE_INSN(vmsbf_m, MATCH_VMSBF_M, MASK_VMSBF_M) +DECLARE_INSN(vmseq_vi, MATCH_VMSEQ_VI, MASK_VMSEQ_VI) +DECLARE_INSN(vmseq_vv, MATCH_VMSEQ_VV, MASK_VMSEQ_VV) +DECLARE_INSN(vmseq_vx, MATCH_VMSEQ_VX, MASK_VMSEQ_VX) +DECLARE_INSN(vmsgt_vi, MATCH_VMSGT_VI, MASK_VMSGT_VI) +DECLARE_INSN(vmsgt_vx, MATCH_VMSGT_VX, MASK_VMSGT_VX) +DECLARE_INSN(vmsgtu_vi, MATCH_VMSGTU_VI, MASK_VMSGTU_VI) +DECLARE_INSN(vmsgtu_vx, MATCH_VMSGTU_VX, MASK_VMSGTU_VX) +DECLARE_INSN(vmsif_m, MATCH_VMSIF_M, MASK_VMSIF_M) +DECLARE_INSN(vmsle_vi, MATCH_VMSLE_VI, MASK_VMSLE_VI) +DECLARE_INSN(vmsle_vv, MATCH_VMSLE_VV, MASK_VMSLE_VV) +DECLARE_INSN(vmsle_vx, MATCH_VMSLE_VX, MASK_VMSLE_VX) +DECLARE_INSN(vmsleu_vi, MATCH_VMSLEU_VI, MASK_VMSLEU_VI) +DECLARE_INSN(vmsleu_vv, MATCH_VMSLEU_VV, MASK_VMSLEU_VV) +DECLARE_INSN(vmsleu_vx, MATCH_VMSLEU_VX, MASK_VMSLEU_VX) +DECLARE_INSN(vmslt_vv, MATCH_VMSLT_VV, MASK_VMSLT_VV) +DECLARE_INSN(vmslt_vx, MATCH_VMSLT_VX, MASK_VMSLT_VX) +DECLARE_INSN(vmsltu_vv, MATCH_VMSLTU_VV, MASK_VMSLTU_VV) +DECLARE_INSN(vmsltu_vx, MATCH_VMSLTU_VX, MASK_VMSLTU_VX) +DECLARE_INSN(vmsne_vi, MATCH_VMSNE_VI, MASK_VMSNE_VI) +DECLARE_INSN(vmsne_vv, MATCH_VMSNE_VV, MASK_VMSNE_VV) +DECLARE_INSN(vmsne_vx, MATCH_VMSNE_VX, MASK_VMSNE_VX) +DECLARE_INSN(vmsof_m, MATCH_VMSOF_M, MASK_VMSOF_M) +DECLARE_INSN(vmul_vv, MATCH_VMUL_VV, MASK_VMUL_VV) +DECLARE_INSN(vmul_vx, MATCH_VMUL_VX, MASK_VMUL_VX) +DECLARE_INSN(vmulh_vv, MATCH_VMULH_VV, MASK_VMULH_VV) +DECLARE_INSN(vmulh_vx, MATCH_VMULH_VX, MASK_VMULH_VX) +DECLARE_INSN(vmulhsu_vv, MATCH_VMULHSU_VV, MASK_VMULHSU_VV) +DECLARE_INSN(vmulhsu_vx, MATCH_VMULHSU_VX, MASK_VMULHSU_VX) +DECLARE_INSN(vmulhu_vv, MATCH_VMULHU_VV, MASK_VMULHU_VV) +DECLARE_INSN(vmulhu_vx, MATCH_VMULHU_VX, MASK_VMULHU_VX) +DECLARE_INSN(vmv1r_v, MATCH_VMV1R_V, MASK_VMV1R_V) +DECLARE_INSN(vmv2r_v, MATCH_VMV2R_V, MASK_VMV2R_V) +DECLARE_INSN(vmv4r_v, MATCH_VMV4R_V, MASK_VMV4R_V) +DECLARE_INSN(vmv8r_v, MATCH_VMV8R_V, MASK_VMV8R_V) +DECLARE_INSN(vmv_s_x, MATCH_VMV_S_X, MASK_VMV_S_X) +DECLARE_INSN(vmv_v_i, MATCH_VMV_V_I, MASK_VMV_V_I) +DECLARE_INSN(vmv_v_v, MATCH_VMV_V_V, MASK_VMV_V_V) +DECLARE_INSN(vmv_v_x, MATCH_VMV_V_X, MASK_VMV_V_X) +DECLARE_INSN(vmv_x_s, MATCH_VMV_X_S, MASK_VMV_X_S) +DECLARE_INSN(vmxnor_mm, MATCH_VMXNOR_MM, MASK_VMXNOR_MM) +DECLARE_INSN(vmxor_mm, MATCH_VMXOR_MM, MASK_VMXOR_MM) +DECLARE_INSN(vnclip_wi, MATCH_VNCLIP_WI, MASK_VNCLIP_WI) +DECLARE_INSN(vnclip_wv, MATCH_VNCLIP_WV, MASK_VNCLIP_WV) +DECLARE_INSN(vnclip_wx, MATCH_VNCLIP_WX, MASK_VNCLIP_WX) +DECLARE_INSN(vnclipu_wi, MATCH_VNCLIPU_WI, MASK_VNCLIPU_WI) +DECLARE_INSN(vnclipu_wv, MATCH_VNCLIPU_WV, MASK_VNCLIPU_WV) +DECLARE_INSN(vnclipu_wx, MATCH_VNCLIPU_WX, MASK_VNCLIPU_WX) +DECLARE_INSN(vnmsac_vv, MATCH_VNMSAC_VV, MASK_VNMSAC_VV) +DECLARE_INSN(vnmsac_vx, MATCH_VNMSAC_VX, MASK_VNMSAC_VX) +DECLARE_INSN(vnmsub_vv, MATCH_VNMSUB_VV, MASK_VNMSUB_VV) +DECLARE_INSN(vnmsub_vx, MATCH_VNMSUB_VX, MASK_VNMSUB_VX) +DECLARE_INSN(vnsra_wi, MATCH_VNSRA_WI, MASK_VNSRA_WI) +DECLARE_INSN(vnsra_wv, MATCH_VNSRA_WV, MASK_VNSRA_WV) +DECLARE_INSN(vnsra_wx, MATCH_VNSRA_WX, MASK_VNSRA_WX) +DECLARE_INSN(vnsrl_wi, MATCH_VNSRL_WI, MASK_VNSRL_WI) +DECLARE_INSN(vnsrl_wv, MATCH_VNSRL_WV, MASK_VNSRL_WV) +DECLARE_INSN(vnsrl_wx, MATCH_VNSRL_WX, MASK_VNSRL_WX) +DECLARE_INSN(vor_vi, MATCH_VOR_VI, MASK_VOR_VI) +DECLARE_INSN(vor_vv, MATCH_VOR_VV, MASK_VOR_VV) +DECLARE_INSN(vor_vx, MATCH_VOR_VX, MASK_VOR_VX) +DECLARE_INSN(vredand_vs, MATCH_VREDAND_VS, MASK_VREDAND_VS) +DECLARE_INSN(vredmax_vs, MATCH_VREDMAX_VS, MASK_VREDMAX_VS) +DECLARE_INSN(vredmaxu_vs, MATCH_VREDMAXU_VS, MASK_VREDMAXU_VS) +DECLARE_INSN(vredmin_vs, MATCH_VREDMIN_VS, MASK_VREDMIN_VS) +DECLARE_INSN(vredminu_vs, MATCH_VREDMINU_VS, MASK_VREDMINU_VS) +DECLARE_INSN(vredor_vs, MATCH_VREDOR_VS, MASK_VREDOR_VS) +DECLARE_INSN(vredsum_vs, MATCH_VREDSUM_VS, MASK_VREDSUM_VS) +DECLARE_INSN(vredxor_vs, MATCH_VREDXOR_VS, MASK_VREDXOR_VS) +DECLARE_INSN(vrem_vv, MATCH_VREM_VV, MASK_VREM_VV) +DECLARE_INSN(vrem_vx, MATCH_VREM_VX, MASK_VREM_VX) +DECLARE_INSN(vremu_vv, MATCH_VREMU_VV, MASK_VREMU_VV) +DECLARE_INSN(vremu_vx, MATCH_VREMU_VX, MASK_VREMU_VX) +DECLARE_INSN(vrgather_vi, MATCH_VRGATHER_VI, MASK_VRGATHER_VI) +DECLARE_INSN(vrgather_vv, MATCH_VRGATHER_VV, MASK_VRGATHER_VV) +DECLARE_INSN(vrgather_vx, MATCH_VRGATHER_VX, MASK_VRGATHER_VX) +DECLARE_INSN(vrgatherei16_vv, MATCH_VRGATHEREI16_VV, MASK_VRGATHEREI16_VV) +DECLARE_INSN(vrsub_vi, MATCH_VRSUB_VI, MASK_VRSUB_VI) +DECLARE_INSN(vrsub_vx, MATCH_VRSUB_VX, MASK_VRSUB_VX) +DECLARE_INSN(vs1r_v, MATCH_VS1R_V, MASK_VS1R_V) +DECLARE_INSN(vs2r_v, MATCH_VS2R_V, MASK_VS2R_V) +DECLARE_INSN(vs4r_v, MATCH_VS4R_V, MASK_VS4R_V) +DECLARE_INSN(vs8r_v, MATCH_VS8R_V, MASK_VS8R_V) +DECLARE_INSN(vsadd_vi, MATCH_VSADD_VI, MASK_VSADD_VI) +DECLARE_INSN(vsadd_vv, MATCH_VSADD_VV, MASK_VSADD_VV) +DECLARE_INSN(vsadd_vx, MATCH_VSADD_VX, MASK_VSADD_VX) +DECLARE_INSN(vsaddu_vi, MATCH_VSADDU_VI, MASK_VSADDU_VI) +DECLARE_INSN(vsaddu_vv, MATCH_VSADDU_VV, MASK_VSADDU_VV) +DECLARE_INSN(vsaddu_vx, MATCH_VSADDU_VX, MASK_VSADDU_VX) +DECLARE_INSN(vsbc_vvm, MATCH_VSBC_VVM, MASK_VSBC_VVM) +DECLARE_INSN(vsbc_vxm, MATCH_VSBC_VXM, MASK_VSBC_VXM) +DECLARE_INSN(vse1024_v, MATCH_VSE1024_V, MASK_VSE1024_V) +DECLARE_INSN(vse128_v, MATCH_VSE128_V, MASK_VSE128_V) +DECLARE_INSN(vse16_v, MATCH_VSE16_V, MASK_VSE16_V) +DECLARE_INSN(vse256_v, MATCH_VSE256_V, MASK_VSE256_V) +DECLARE_INSN(vse32_v, MATCH_VSE32_V, MASK_VSE32_V) +DECLARE_INSN(vse512_v, MATCH_VSE512_V, MASK_VSE512_V) +DECLARE_INSN(vse64_v, MATCH_VSE64_V, MASK_VSE64_V) +DECLARE_INSN(vse8_v, MATCH_VSE8_V, MASK_VSE8_V) +DECLARE_INSN(vsetivli, MATCH_VSETIVLI, MASK_VSETIVLI) +DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL) +DECLARE_INSN(vsetvli, MATCH_VSETVLI, MASK_VSETVLI) +DECLARE_INSN(vsext_vf2, MATCH_VSEXT_VF2, MASK_VSEXT_VF2) +DECLARE_INSN(vsext_vf4, MATCH_VSEXT_VF4, MASK_VSEXT_VF4) +DECLARE_INSN(vsext_vf8, MATCH_VSEXT_VF8, MASK_VSEXT_VF8) +DECLARE_INSN(vslide1down_vx, MATCH_VSLIDE1DOWN_VX, MASK_VSLIDE1DOWN_VX) +DECLARE_INSN(vslide1up_vx, MATCH_VSLIDE1UP_VX, MASK_VSLIDE1UP_VX) +DECLARE_INSN(vslidedown_vi, MATCH_VSLIDEDOWN_VI, MASK_VSLIDEDOWN_VI) +DECLARE_INSN(vslidedown_vx, MATCH_VSLIDEDOWN_VX, MASK_VSLIDEDOWN_VX) +DECLARE_INSN(vslideup_vi, MATCH_VSLIDEUP_VI, MASK_VSLIDEUP_VI) +DECLARE_INSN(vslideup_vx, MATCH_VSLIDEUP_VX, MASK_VSLIDEUP_VX) +DECLARE_INSN(vsll_vi, MATCH_VSLL_VI, MASK_VSLL_VI) +DECLARE_INSN(vsll_vv, MATCH_VSLL_VV, MASK_VSLL_VV) +DECLARE_INSN(vsll_vx, MATCH_VSLL_VX, MASK_VSLL_VX) +DECLARE_INSN(vsm_v, MATCH_VSM_V, MASK_VSM_V) +DECLARE_INSN(vsmul_vv, MATCH_VSMUL_VV, MASK_VSMUL_VV) +DECLARE_INSN(vsmul_vx, MATCH_VSMUL_VX, MASK_VSMUL_VX) +DECLARE_INSN(vsoxei1024_v, MATCH_VSOXEI1024_V, MASK_VSOXEI1024_V) +DECLARE_INSN(vsoxei128_v, MATCH_VSOXEI128_V, MASK_VSOXEI128_V) +DECLARE_INSN(vsoxei16_v, MATCH_VSOXEI16_V, MASK_VSOXEI16_V) +DECLARE_INSN(vsoxei256_v, MATCH_VSOXEI256_V, MASK_VSOXEI256_V) +DECLARE_INSN(vsoxei32_v, MATCH_VSOXEI32_V, MASK_VSOXEI32_V) +DECLARE_INSN(vsoxei512_v, MATCH_VSOXEI512_V, MASK_VSOXEI512_V) +DECLARE_INSN(vsoxei64_v, MATCH_VSOXEI64_V, MASK_VSOXEI64_V) +DECLARE_INSN(vsoxei8_v, MATCH_VSOXEI8_V, MASK_VSOXEI8_V) +DECLARE_INSN(vsra_vi, MATCH_VSRA_VI, MASK_VSRA_VI) +DECLARE_INSN(vsra_vv, MATCH_VSRA_VV, MASK_VSRA_VV) +DECLARE_INSN(vsra_vx, MATCH_VSRA_VX, MASK_VSRA_VX) +DECLARE_INSN(vsrl_vi, MATCH_VSRL_VI, MASK_VSRL_VI) +DECLARE_INSN(vsrl_vv, MATCH_VSRL_VV, MASK_VSRL_VV) +DECLARE_INSN(vsrl_vx, MATCH_VSRL_VX, MASK_VSRL_VX) +DECLARE_INSN(vsse1024_v, MATCH_VSSE1024_V, MASK_VSSE1024_V) +DECLARE_INSN(vsse128_v, MATCH_VSSE128_V, MASK_VSSE128_V) +DECLARE_INSN(vsse16_v, MATCH_VSSE16_V, MASK_VSSE16_V) +DECLARE_INSN(vsse256_v, MATCH_VSSE256_V, MASK_VSSE256_V) +DECLARE_INSN(vsse32_v, MATCH_VSSE32_V, MASK_VSSE32_V) +DECLARE_INSN(vsse512_v, MATCH_VSSE512_V, MASK_VSSE512_V) +DECLARE_INSN(vsse64_v, MATCH_VSSE64_V, MASK_VSSE64_V) +DECLARE_INSN(vsse8_v, MATCH_VSSE8_V, MASK_VSSE8_V) +DECLARE_INSN(vssra_vi, MATCH_VSSRA_VI, MASK_VSSRA_VI) +DECLARE_INSN(vssra_vv, MATCH_VSSRA_VV, MASK_VSSRA_VV) +DECLARE_INSN(vssra_vx, MATCH_VSSRA_VX, MASK_VSSRA_VX) +DECLARE_INSN(vssrl_vi, MATCH_VSSRL_VI, MASK_VSSRL_VI) +DECLARE_INSN(vssrl_vv, MATCH_VSSRL_VV, MASK_VSSRL_VV) +DECLARE_INSN(vssrl_vx, MATCH_VSSRL_VX, MASK_VSSRL_VX) +DECLARE_INSN(vssub_vv, MATCH_VSSUB_VV, MASK_VSSUB_VV) +DECLARE_INSN(vssub_vx, MATCH_VSSUB_VX, MASK_VSSUB_VX) +DECLARE_INSN(vssubu_vv, MATCH_VSSUBU_VV, MASK_VSSUBU_VV) +DECLARE_INSN(vssubu_vx, MATCH_VSSUBU_VX, MASK_VSSUBU_VX) +DECLARE_INSN(vsub_vv, MATCH_VSUB_VV, MASK_VSUB_VV) +DECLARE_INSN(vsub_vx, MATCH_VSUB_VX, MASK_VSUB_VX) +DECLARE_INSN(vsuxei1024_v, MATCH_VSUXEI1024_V, MASK_VSUXEI1024_V) +DECLARE_INSN(vsuxei128_v, MATCH_VSUXEI128_V, MASK_VSUXEI128_V) +DECLARE_INSN(vsuxei16_v, MATCH_VSUXEI16_V, MASK_VSUXEI16_V) +DECLARE_INSN(vsuxei256_v, MATCH_VSUXEI256_V, MASK_VSUXEI256_V) +DECLARE_INSN(vsuxei32_v, MATCH_VSUXEI32_V, MASK_VSUXEI32_V) +DECLARE_INSN(vsuxei512_v, MATCH_VSUXEI512_V, MASK_VSUXEI512_V) +DECLARE_INSN(vsuxei64_v, MATCH_VSUXEI64_V, MASK_VSUXEI64_V) +DECLARE_INSN(vsuxei8_v, MATCH_VSUXEI8_V, MASK_VSUXEI8_V) +DECLARE_INSN(vwadd_vv, MATCH_VWADD_VV, MASK_VWADD_VV) +DECLARE_INSN(vwadd_vx, MATCH_VWADD_VX, MASK_VWADD_VX) +DECLARE_INSN(vwadd_wv, MATCH_VWADD_WV, MASK_VWADD_WV) +DECLARE_INSN(vwadd_wx, MATCH_VWADD_WX, MASK_VWADD_WX) +DECLARE_INSN(vwaddu_vv, MATCH_VWADDU_VV, MASK_VWADDU_VV) +DECLARE_INSN(vwaddu_vx, MATCH_VWADDU_VX, MASK_VWADDU_VX) +DECLARE_INSN(vwaddu_wv, MATCH_VWADDU_WV, MASK_VWADDU_WV) +DECLARE_INSN(vwaddu_wx, MATCH_VWADDU_WX, MASK_VWADDU_WX) +DECLARE_INSN(vwmacc_vv, MATCH_VWMACC_VV, MASK_VWMACC_VV) +DECLARE_INSN(vwmacc_vx, MATCH_VWMACC_VX, MASK_VWMACC_VX) +DECLARE_INSN(vwmaccsu_vv, MATCH_VWMACCSU_VV, MASK_VWMACCSU_VV) +DECLARE_INSN(vwmaccsu_vx, MATCH_VWMACCSU_VX, MASK_VWMACCSU_VX) +DECLARE_INSN(vwmaccu_vv, MATCH_VWMACCU_VV, MASK_VWMACCU_VV) +DECLARE_INSN(vwmaccu_vx, MATCH_VWMACCU_VX, MASK_VWMACCU_VX) +DECLARE_INSN(vwmaccus_vx, MATCH_VWMACCUS_VX, MASK_VWMACCUS_VX) +DECLARE_INSN(vwmul_vv, MATCH_VWMUL_VV, MASK_VWMUL_VV) +DECLARE_INSN(vwmul_vx, MATCH_VWMUL_VX, MASK_VWMUL_VX) +DECLARE_INSN(vwmulsu_vv, MATCH_VWMULSU_VV, MASK_VWMULSU_VV) +DECLARE_INSN(vwmulsu_vx, MATCH_VWMULSU_VX, MASK_VWMULSU_VX) +DECLARE_INSN(vwmulu_vv, MATCH_VWMULU_VV, MASK_VWMULU_VV) +DECLARE_INSN(vwmulu_vx, MATCH_VWMULU_VX, MASK_VWMULU_VX) +DECLARE_INSN(vwredsum_vs, MATCH_VWREDSUM_VS, MASK_VWREDSUM_VS) +DECLARE_INSN(vwredsumu_vs, MATCH_VWREDSUMU_VS, MASK_VWREDSUMU_VS) +DECLARE_INSN(vwsub_vv, MATCH_VWSUB_VV, MASK_VWSUB_VV) +DECLARE_INSN(vwsub_vx, MATCH_VWSUB_VX, MASK_VWSUB_VX) +DECLARE_INSN(vwsub_wv, MATCH_VWSUB_WV, MASK_VWSUB_WV) +DECLARE_INSN(vwsub_wx, MATCH_VWSUB_WX, MASK_VWSUB_WX) +DECLARE_INSN(vwsubu_vv, MATCH_VWSUBU_VV, MASK_VWSUBU_VV) +DECLARE_INSN(vwsubu_vx, MATCH_VWSUBU_VX, MASK_VWSUBU_VX) +DECLARE_INSN(vwsubu_wv, MATCH_VWSUBU_WV, MASK_VWSUBU_WV) +DECLARE_INSN(vwsubu_wx, MATCH_VWSUBU_WX, MASK_VWSUBU_WX) +DECLARE_INSN(vxor_vi, MATCH_VXOR_VI, MASK_VXOR_VI) +DECLARE_INSN(vxor_vv, MATCH_VXOR_VV, MASK_VXOR_VV) +DECLARE_INSN(vxor_vx, MATCH_VXOR_VX, MASK_VXOR_VX) +DECLARE_INSN(vzext_vf2, MATCH_VZEXT_VF2, MASK_VZEXT_VF2) +DECLARE_INSN(vzext_vf4, MATCH_VZEXT_VF4, MASK_VZEXT_VF4) +DECLARE_INSN(vzext_vf8, MATCH_VZEXT_VF8, MASK_VZEXT_VF8) DECLARE_INSN(wext, MATCH_WEXT, MASK_WEXT) +DECLARE_INSN(wexti, MATCH_WEXTI, MASK_WEXTI) +DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI) +DECLARE_INSN(wrs_nto, MATCH_WRS_NTO, MASK_WRS_NTO) +DECLARE_INSN(wrs_sto, MATCH_WRS_STO, MASK_WRS_STO) +DECLARE_INSN(xnor, MATCH_XNOR, MASK_XNOR) +DECLARE_INSN(xor, MATCH_XOR, MASK_XOR) +DECLARE_INSN(xori, MATCH_XORI, MASK_XORI) +DECLARE_INSN(xperm16, MATCH_XPERM16, MASK_XPERM16) +DECLARE_INSN(xperm32, MATCH_XPERM32, MASK_XPERM32) +DECLARE_INSN(xperm4, MATCH_XPERM4, MASK_XPERM4) +DECLARE_INSN(xperm8, MATCH_XPERM8, MASK_XPERM8) DECLARE_INSN(zunpkd810, MATCH_ZUNPKD810, MASK_ZUNPKD810) DECLARE_INSN(zunpkd820, MATCH_ZUNPKD820, MASK_ZUNPKD820) DECLARE_INSN(zunpkd830, MATCH_ZUNPKD830, MASK_ZUNPKD830) DECLARE_INSN(zunpkd831, MATCH_ZUNPKD831, MASK_ZUNPKD831) DECLARE_INSN(zunpkd832, MATCH_ZUNPKD832, MASK_ZUNPKD832) -DECLARE_INSN(add32, MATCH_ADD32, MASK_ADD32) -DECLARE_INSN(cras32, MATCH_CRAS32, MASK_CRAS32) -DECLARE_INSN(crsa32, MATCH_CRSA32, MASK_CRSA32) -DECLARE_INSN(kabs32, MATCH_KABS32, MASK_KABS32) -DECLARE_INSN(kadd32, MATCH_KADD32, MASK_KADD32) -DECLARE_INSN(kcras32, MATCH_KCRAS32, MASK_KCRAS32) -DECLARE_INSN(kcrsa32, MATCH_KCRSA32, MASK_KCRSA32) -DECLARE_INSN(kdmbb16, MATCH_KDMBB16, MASK_KDMBB16) -DECLARE_INSN(kdmbt16, MATCH_KDMBT16, MASK_KDMBT16) -DECLARE_INSN(kdmtt16, MATCH_KDMTT16, MASK_KDMTT16) -DECLARE_INSN(kdmabb16, MATCH_KDMABB16, MASK_KDMABB16) -DECLARE_INSN(kdmabt16, MATCH_KDMABT16, MASK_KDMABT16) -DECLARE_INSN(kdmatt16, MATCH_KDMATT16, MASK_KDMATT16) -DECLARE_INSN(khmbb16, MATCH_KHMBB16, MASK_KHMBB16) -DECLARE_INSN(khmbt16, MATCH_KHMBT16, MASK_KHMBT16) -DECLARE_INSN(khmtt16, MATCH_KHMTT16, MASK_KHMTT16) -DECLARE_INSN(kmabb32, MATCH_KMABB32, MASK_KMABB32) -DECLARE_INSN(kmabt32, MATCH_KMABT32, MASK_KMABT32) -DECLARE_INSN(kmatt32, MATCH_KMATT32, MASK_KMATT32) -DECLARE_INSN(kmaxda32, MATCH_KMAXDA32, MASK_KMAXDA32) -DECLARE_INSN(kmda32, MATCH_KMDA32, MASK_KMDA32) -DECLARE_INSN(kmxda32, MATCH_KMXDA32, MASK_KMXDA32) -DECLARE_INSN(kmads32, MATCH_KMADS32, MASK_KMADS32) -DECLARE_INSN(kmadrs32, MATCH_KMADRS32, MASK_KMADRS32) -DECLARE_INSN(kmaxds32, MATCH_KMAXDS32, MASK_KMAXDS32) -DECLARE_INSN(kmsda32, MATCH_KMSDA32, MASK_KMSDA32) -DECLARE_INSN(kmsxda32, MATCH_KMSXDA32, MASK_KMSXDA32) -DECLARE_INSN(ksll32, MATCH_KSLL32, MASK_KSLL32) -DECLARE_INSN(kslli32, MATCH_KSLLI32, MASK_KSLLI32) -DECLARE_INSN(kslra32, MATCH_KSLRA32, MASK_KSLRA32) -DECLARE_INSN(kslra32_u, MATCH_KSLRA32_U, MASK_KSLRA32_U) -DECLARE_INSN(kstas32, MATCH_KSTAS32, MASK_KSTAS32) -DECLARE_INSN(kstsa32, MATCH_KSTSA32, MASK_KSTSA32) -DECLARE_INSN(ksub32, MATCH_KSUB32, MASK_KSUB32) -DECLARE_INSN(pkbb32, MATCH_PKBB32, MASK_PKBB32) -DECLARE_INSN(pkbt32, MATCH_PKBT32, MASK_PKBT32) -DECLARE_INSN(pktt32, MATCH_PKTT32, MASK_PKTT32) -DECLARE_INSN(pktb32, MATCH_PKTB32, MASK_PKTB32) -DECLARE_INSN(radd32, MATCH_RADD32, MASK_RADD32) -DECLARE_INSN(rcras32, MATCH_RCRAS32, MASK_RCRAS32) -DECLARE_INSN(rcrsa32, MATCH_RCRSA32, MASK_RCRSA32) -DECLARE_INSN(rstas32, MATCH_RSTAS32, MASK_RSTAS32) -DECLARE_INSN(rstsa32, MATCH_RSTSA32, MASK_RSTSA32) -DECLARE_INSN(rsub32, MATCH_RSUB32, MASK_RSUB32) -DECLARE_INSN(sll32, MATCH_SLL32, MASK_SLL32) -DECLARE_INSN(slli32, MATCH_SLLI32, MASK_SLLI32) -DECLARE_INSN(smax32, MATCH_SMAX32, MASK_SMAX32) -DECLARE_INSN(smbt32, MATCH_SMBT32, MASK_SMBT32) -DECLARE_INSN(smtt32, MATCH_SMTT32, MASK_SMTT32) -DECLARE_INSN(smds32, MATCH_SMDS32, MASK_SMDS32) -DECLARE_INSN(smdrs32, MATCH_SMDRS32, MASK_SMDRS32) -DECLARE_INSN(smxds32, MATCH_SMXDS32, MASK_SMXDS32) -DECLARE_INSN(smin32, MATCH_SMIN32, MASK_SMIN32) -DECLARE_INSN(sra32, MATCH_SRA32, MASK_SRA32) -DECLARE_INSN(sra32_u, MATCH_SRA32_U, MASK_SRA32_U) -DECLARE_INSN(srai32, MATCH_SRAI32, MASK_SRAI32) -DECLARE_INSN(srai32_u, MATCH_SRAI32_U, MASK_SRAI32_U) -DECLARE_INSN(sraiw_u, MATCH_SRAIW_U, MASK_SRAIW_U) -DECLARE_INSN(srl32, MATCH_SRL32, MASK_SRL32) -DECLARE_INSN(srl32_u, MATCH_SRL32_U, MASK_SRL32_U) -DECLARE_INSN(srli32, MATCH_SRLI32, MASK_SRLI32) -DECLARE_INSN(srli32_u, MATCH_SRLI32_U, MASK_SRLI32_U) -DECLARE_INSN(stas32, MATCH_STAS32, MASK_STAS32) -DECLARE_INSN(stsa32, MATCH_STSA32, MASK_STSA32) -DECLARE_INSN(sub32, MATCH_SUB32, MASK_SUB32) -DECLARE_INSN(ukadd32, MATCH_UKADD32, MASK_UKADD32) -DECLARE_INSN(ukcras32, MATCH_UKCRAS32, MASK_UKCRAS32) -DECLARE_INSN(ukcrsa32, MATCH_UKCRSA32, MASK_UKCRSA32) -DECLARE_INSN(ukstas32, MATCH_UKSTAS32, MASK_UKSTAS32) -DECLARE_INSN(ukstsa32, MATCH_UKSTSA32, MASK_UKSTSA32) -DECLARE_INSN(uksub32, MATCH_UKSUB32, MASK_UKSUB32) -DECLARE_INSN(umax32, MATCH_UMAX32, MASK_UMAX32) -DECLARE_INSN(umin32, MATCH_UMIN32, MASK_UMIN32) -DECLARE_INSN(uradd32, MATCH_URADD32, MASK_URADD32) -DECLARE_INSN(urcras32, MATCH_URCRAS32, MASK_URCRAS32) -DECLARE_INSN(urcrsa32, MATCH_URCRSA32, MASK_URCRSA32) -DECLARE_INSN(urstas32, MATCH_URSTAS32, MASK_URSTAS32) -DECLARE_INSN(urstsa32, MATCH_URSTSA32, MASK_URSTSA32) -DECLARE_INSN(ursub32, MATCH_URSUB32, MASK_URSUB32) -DECLARE_INSN(vmvnfr_v, MATCH_VMVNFR_V, MASK_VMVNFR_V) -DECLARE_INSN(vl1r_v, MATCH_VL1R_V, MASK_VL1R_V) -DECLARE_INSN(vl2r_v, MATCH_VL2R_V, MASK_VL2R_V) -DECLARE_INSN(vl4r_v, MATCH_VL4R_V, MASK_VL4R_V) -DECLARE_INSN(vl8r_v, MATCH_VL8R_V, MASK_VL8R_V) -DECLARE_INSN(vle1_v, MATCH_VLE1_V, MASK_VLE1_V) -DECLARE_INSN(vse1_v, MATCH_VSE1_V, MASK_VSE1_V) -DECLARE_INSN(vfredsum_vs, MATCH_VFREDSUM_VS, MASK_VFREDSUM_VS) -DECLARE_INSN(vfwredsum_vs, MATCH_VFWREDSUM_VS, MASK_VFWREDSUM_VS) -DECLARE_INSN(vpopc_m, MATCH_VPOPC_M, MASK_VPOPC_M) -DECLARE_INSN(vmornot_mm, MATCH_VMORNOT_MM, MASK_VMORNOT_MM) -DECLARE_INSN(vmandnot_mm, MATCH_VMANDNOT_MM, MASK_VMANDNOT_MM) #endif #ifdef DECLARE_CSR DECLARE_CSR(fflags, CSR_FFLAGS) @@ -4498,11 +4507,16 @@ DECLARE_CSR(sie, CSR_SIE) DECLARE_CSR(stvec, CSR_STVEC) DECLARE_CSR(scounteren, CSR_SCOUNTEREN) DECLARE_CSR(senvcfg, CSR_SENVCFG) +DECLARE_CSR(sstateen0, CSR_SSTATEEN0) +DECLARE_CSR(sstateen1, CSR_SSTATEEN1) +DECLARE_CSR(sstateen2, CSR_SSTATEEN2) +DECLARE_CSR(sstateen3, CSR_SSTATEEN3) DECLARE_CSR(sscratch, CSR_SSCRATCH) DECLARE_CSR(sepc, CSR_SEPC) DECLARE_CSR(scause, CSR_SCAUSE) DECLARE_CSR(stval, CSR_STVAL) DECLARE_CSR(sip, CSR_SIP) +DECLARE_CSR(stimecmp, CSR_STIMECMP) DECLARE_CSR(satp, CSR_SATP) DECLARE_CSR(scontext, CSR_SCONTEXT) DECLARE_CSR(vsstatus, CSR_VSSTATUS) @@ -4513,6 +4527,7 @@ DECLARE_CSR(vsepc, CSR_VSEPC) DECLARE_CSR(vscause, CSR_VSCAUSE) DECLARE_CSR(vstval, CSR_VSTVAL) DECLARE_CSR(vsip, CSR_VSIP) +DECLARE_CSR(vstimecmp, CSR_VSTIMECMP) DECLARE_CSR(vsatp, CSR_VSATP) DECLARE_CSR(hstatus, CSR_HSTATUS) DECLARE_CSR(hedeleg, CSR_HEDELEG) @@ -4522,6 +4537,10 @@ DECLARE_CSR(htimedelta, CSR_HTIMEDELTA) DECLARE_CSR(hcounteren, CSR_HCOUNTEREN) DECLARE_CSR(hgeie, CSR_HGEIE) DECLARE_CSR(henvcfg, CSR_HENVCFG) +DECLARE_CSR(hstateen0, CSR_HSTATEEN0) +DECLARE_CSR(hstateen1, CSR_HSTATEEN1) +DECLARE_CSR(hstateen2, CSR_HSTATEEN2) +DECLARE_CSR(hstateen3, CSR_HSTATEEN3) DECLARE_CSR(htval, CSR_HTVAL) DECLARE_CSR(hip, CSR_HIP) DECLARE_CSR(hvip, CSR_HVIP) @@ -4529,6 +4548,7 @@ DECLARE_CSR(htinst, CSR_HTINST) DECLARE_CSR(hgatp, CSR_HGATP) DECLARE_CSR(hcontext, CSR_HCONTEXT) DECLARE_CSR(hgeip, CSR_HGEIP) +DECLARE_CSR(scountovf, CSR_SCOUNTOVF) DECLARE_CSR(utvt, CSR_UTVT) DECLARE_CSR(unxti, CSR_UNXTI) DECLARE_CSR(uintstatus, CSR_UINTSTATUS) @@ -4552,6 +4572,10 @@ DECLARE_CSR(mie, CSR_MIE) DECLARE_CSR(mtvec, CSR_MTVEC) DECLARE_CSR(mcounteren, CSR_MCOUNTEREN) DECLARE_CSR(menvcfg, CSR_MENVCFG) +DECLARE_CSR(mstateen0, CSR_MSTATEEN0) +DECLARE_CSR(mstateen1, CSR_MSTATEEN1) +DECLARE_CSR(mstateen2, CSR_MSTATEEN2) +DECLARE_CSR(mstateen3, CSR_MSTATEEN3) DECLARE_CSR(mcountinhibit, CSR_MCOUNTINHIBIT) DECLARE_CSR(mscratch, CSR_MSCRATCH) DECLARE_CSR(mepc, CSR_MEPC) @@ -4718,8 +4742,14 @@ DECLARE_CSR(marchid, CSR_MARCHID) DECLARE_CSR(mimpid, CSR_MIMPID) DECLARE_CSR(mhartid, CSR_MHARTID) DECLARE_CSR(mconfigptr, CSR_MCONFIGPTR) +DECLARE_CSR(stimecmph, CSR_STIMECMPH) +DECLARE_CSR(vstimecmph, CSR_VSTIMECMPH) DECLARE_CSR(htimedeltah, CSR_HTIMEDELTAH) DECLARE_CSR(henvcfgh, CSR_HENVCFGH) +DECLARE_CSR(hstateen0h, CSR_HSTATEEN0H) +DECLARE_CSR(hstateen1h, CSR_HSTATEEN1H) +DECLARE_CSR(hstateen2h, CSR_HSTATEEN2H) +DECLARE_CSR(hstateen3h, CSR_HSTATEEN3H) DECLARE_CSR(cycleh, CSR_CYCLEH) DECLARE_CSR(timeh, CSR_TIMEH) DECLARE_CSR(instreth, CSR_INSTRETH) @@ -4754,6 +4784,39 @@ DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H) DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H) DECLARE_CSR(mstatush, CSR_MSTATUSH) DECLARE_CSR(menvcfgh, CSR_MENVCFGH) +DECLARE_CSR(mstateen0h, CSR_MSTATEEN0H) +DECLARE_CSR(mstateen1h, CSR_MSTATEEN1H) +DECLARE_CSR(mstateen2h, CSR_MSTATEEN2H) +DECLARE_CSR(mstateen3h, CSR_MSTATEEN3H) +DECLARE_CSR(mhpmevent3h, CSR_MHPMEVENT3H) +DECLARE_CSR(mhpmevent4h, CSR_MHPMEVENT4H) +DECLARE_CSR(mhpmevent5h, CSR_MHPMEVENT5H) +DECLARE_CSR(mhpmevent6h, CSR_MHPMEVENT6H) +DECLARE_CSR(mhpmevent7h, CSR_MHPMEVENT7H) +DECLARE_CSR(mhpmevent8h, CSR_MHPMEVENT8H) +DECLARE_CSR(mhpmevent9h, CSR_MHPMEVENT9H) +DECLARE_CSR(mhpmevent10h, CSR_MHPMEVENT10H) +DECLARE_CSR(mhpmevent11h, CSR_MHPMEVENT11H) +DECLARE_CSR(mhpmevent12h, CSR_MHPMEVENT12H) +DECLARE_CSR(mhpmevent13h, CSR_MHPMEVENT13H) +DECLARE_CSR(mhpmevent14h, CSR_MHPMEVENT14H) +DECLARE_CSR(mhpmevent15h, CSR_MHPMEVENT15H) +DECLARE_CSR(mhpmevent16h, CSR_MHPMEVENT16H) +DECLARE_CSR(mhpmevent17h, CSR_MHPMEVENT17H) +DECLARE_CSR(mhpmevent18h, CSR_MHPMEVENT18H) +DECLARE_CSR(mhpmevent19h, CSR_MHPMEVENT19H) +DECLARE_CSR(mhpmevent20h, CSR_MHPMEVENT20H) +DECLARE_CSR(mhpmevent21h, CSR_MHPMEVENT21H) +DECLARE_CSR(mhpmevent22h, CSR_MHPMEVENT22H) +DECLARE_CSR(mhpmevent23h, CSR_MHPMEVENT23H) +DECLARE_CSR(mhpmevent24h, CSR_MHPMEVENT24H) +DECLARE_CSR(mhpmevent25h, CSR_MHPMEVENT25H) +DECLARE_CSR(mhpmevent26h, CSR_MHPMEVENT26H) +DECLARE_CSR(mhpmevent27h, CSR_MHPMEVENT27H) +DECLARE_CSR(mhpmevent28h, CSR_MHPMEVENT28H) +DECLARE_CSR(mhpmevent29h, CSR_MHPMEVENT29H) +DECLARE_CSR(mhpmevent30h, CSR_MHPMEVENT30H) +DECLARE_CSR(mhpmevent31h, CSR_MHPMEVENT31H) DECLARE_CSR(mseccfgh, CSR_MSECCFGH) DECLARE_CSR(mcycleh, CSR_MCYCLEH) DECLARE_CSR(minstreth, CSR_MINSTRETH) -- cgit v1.1 From 45b9b9c2940704c6e2d77f04450abb2155dbb8b7 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 1 Jul 2022 22:06:58 +0800 Subject: add isa string parser for smstateen --- riscv/isa_parser.cc | 2 ++ riscv/isa_parser.h | 1 + 2 files changed, 3 insertions(+) diff --git a/riscv/isa_parser.cc b/riscv/isa_parser.cc index 0adec2c..0f9eb06 100644 --- a/riscv/isa_parser.cc +++ b/riscv/isa_parser.cc @@ -164,6 +164,8 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv) } else if (ext_str == "zkr") { extension_table[EXT_ZKR] = true; } else if (ext_str == "zkt") { + } else if (ext_str == "smstateen") { + extension_table[EXT_SMSTATEEN] = true; } else if (ext_str == "svnapot") { extension_table[EXT_SVNAPOT] = true; } else if (ext_str == "svpbmt") { diff --git a/riscv/isa_parser.h b/riscv/isa_parser.h index 3cefe12..8debf5d 100644 --- a/riscv/isa_parser.h +++ b/riscv/isa_parser.h @@ -31,6 +31,7 @@ typedef enum { EXT_ZBPBO, EXT_ZPN, EXT_ZPSFOPERAND, + EXT_SMSTATEEN, EXT_SVNAPOT, EXT_SVPBMT, EXT_SVINVAL, -- cgit v1.1 From 9b66f89b8102f032f721fe332819325508aa3b95 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Wed, 6 Jul 2022 10:43:57 +0800 Subject: modify mstatush_csr_t to general rv32_high_csr_t --- riscv/csrs.cc | 20 ++++++++++++-------- riscv/csrs.h | 10 ++++++---- riscv/processor.cc | 6 +++++- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 7d1be10..b8fa920 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -493,19 +493,23 @@ bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { return true; } -// implement class mstatush_csr_t -mstatush_csr_t::mstatush_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus): +// implement class rv32_high_csr_t +rv32_high_csr_t::rv32_high_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, csr_t_p orig): csr_t(proc, addr), - mstatus(mstatus), - mask(MSTATUSH_MPV | MSTATUSH_GVA | MSTATUSH_SBE | MSTATUSH_MBE) { + orig(orig), + mask(mask) { +} + +reg_t rv32_high_csr_t::read() const noexcept { + return (orig->read() >> 32) & mask; } -reg_t mstatush_csr_t::read() const noexcept { - return (mstatus->read() >> 32) & mask; +void rv32_high_csr_t::verify_permissions(insn_t insn, bool write) const { + orig->verify_permissions(insn, write); } -bool mstatush_csr_t::unlogged_write(const reg_t val) noexcept { - return mstatus->unlogged_write((mstatus->written_value() & ~(mask << 32)) | ((val & mask) << 32)); +bool rv32_high_csr_t::unlogged_write(const reg_t val) noexcept { + return orig->unlogged_write((orig->written_value() & ~(mask << 32)) | ((val & mask) << 32)); } // implement class sstatus_csr_t diff --git a/riscv/csrs.h b/riscv/csrs.h index ab3cdb7..03a3ed6 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -53,6 +53,8 @@ class csr_t { private: const unsigned csr_priv; const bool csr_read_only; + + friend class rv32_high_csr_t; }; typedef std::shared_ptr csr_t_p; @@ -243,19 +245,19 @@ class mstatus_csr_t final: public base_status_csr_t { virtual bool unlogged_write(const reg_t val) noexcept override; private: reg_t val; - friend class mstatush_csr_t; }; typedef std::shared_ptr mstatus_csr_t_p; -class mstatush_csr_t: public csr_t { +class rv32_high_csr_t: public csr_t { public: - mstatush_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus); + rv32_high_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, csr_t_p orig); virtual reg_t read() const noexcept override; + virtual void verify_permissions(insn_t insn, bool write) const override; protected: virtual bool unlogged_write(const reg_t val) noexcept override; private: - mstatus_csr_t_p mstatus; + csr_t_p orig; const reg_t mask; }; diff --git a/riscv/processor.cc b/riscv/processor.cc index c4ca0bc..7ffc8d1 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -190,7 +190,11 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) v = false; csrmap[CSR_MISA] = misa = std::make_shared(proc, CSR_MISA, max_isa); csrmap[CSR_MSTATUS] = mstatus = std::make_shared(proc, CSR_MSTATUS); - if (xlen == 32) csrmap[CSR_MSTATUSH] = std::make_shared(proc, CSR_MSTATUSH, mstatus); + + if (xlen == 32) { + const reg_t mstatush_mask = MSTATUSH_MPV | MSTATUSH_GVA | MSTATUSH_SBE | MSTATUSH_MBE; + csrmap[CSR_MSTATUSH] = std::make_shared(proc, CSR_MSTATUSH, mstatush_mask, mstatus); + } csrmap[CSR_MEPC] = mepc = std::make_shared(proc, CSR_MEPC); csrmap[CSR_MTVAL] = mtval = std::make_shared(proc, CSR_MTVAL, 0); csrmap[CSR_MSCRATCH] = std::make_shared(proc, CSR_MSCRATCH, 0); -- cgit v1.1 From 2bf74857f0f7f3a63e029d7c7ecaf3d4523a846e Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Wed, 6 Jul 2022 10:45:04 +0800 Subject: add support for csrs of smstateen extensions --- riscv/csrs.cc | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ riscv/csrs.h | 19 +++++++++++++++++ riscv/processor.cc | 22 ++++++++++++++++++++ riscv/processor.h | 4 ++++ 4 files changed, 106 insertions(+) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index b8fa920..c2bdf0a 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1244,3 +1244,64 @@ bool vxsat_csr_t::unlogged_write(const reg_t val) noexcept { dirty_vs_state; return masked_csr_t::unlogged_write(val); } + +// implement class hstateen_csr_t +hstateen_csr_t::hstateen_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, + const reg_t init, uint8_t index): + masked_csr_t(proc, addr, mask, init), + index(index) { +} + +reg_t hstateen_csr_t::read() const noexcept { + // For every bit in an mstateen CSR that is zero (whether read-only zero or set to zero), + // the same bit appears as read-only zero in the matching hstateen and sstateen CSRs + return masked_csr_t::read() & state->mstateen[index]->read(); +} + +bool hstateen_csr_t::unlogged_write(const reg_t val) noexcept { + // For every bit in an mstateen CSR that is zero (whether read-only zero or set to zero), + // the same bit appears as read-only zero in the matching hstateen and sstateen CSRs + return masked_csr_t::unlogged_write(val & state->mstateen[index]->read()); +} + +void hstateen_csr_t::verify_permissions(insn_t insn, bool write) const { + masked_csr_t::verify_permissions(insn, write); + + if ((state->prv < PRV_M) && !(state->mstateen[index]->read() & MSTATEEN_HSTATEEN)) + throw trap_illegal_instruction(insn.bits()); +} + +// implement class sstateen_csr_t +sstateen_csr_t::sstateen_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, + const reg_t init, uint8_t index): + hstateen_csr_t(proc, addr, mask, init, index) { +} + +reg_t sstateen_csr_t::read() const noexcept { + // For every bit in an mstateen CSR that is zero (whether read-only zero or set to zero), + // the same bit appears as read-only zero in the matching hstateen and sstateen CSRs + // For every bit in an hstateen CSR that is zero (whether read-only zero or set to zero), + // the same bit appears as read-only zero in sstateen when accessed in VS-mode + if (state->v) + return hstateen_csr_t::read() & state->hstateen[index]->read(); + else + return hstateen_csr_t::read(); +} + +bool sstateen_csr_t::unlogged_write(const reg_t val) noexcept { + // For every bit in an mstateen CSR that is zero (whether read-only zero or set to zero), + // the same bit appears as read-only zero in the matching hstateen and sstateen CSRs + // For every bit in an hstateen CSR that is zero (whether read-only zero or set to zero), + // the same bit appears as read-only zero in sstateen when accessed in VS-mode + if (state->v) + return hstateen_csr_t::unlogged_write(val & state->hstateen[index]->read()); + else + return hstateen_csr_t::unlogged_write(val); +} + +void sstateen_csr_t::verify_permissions(insn_t insn, bool write) const { + hstateen_csr_t::verify_permissions(insn, write); + + if (state->v && !(state->hstateen[index]->read() & HSTATEEN_SSTATEEN)) + throw trap_virtual_instruction(insn.bits()); +} diff --git a/riscv/csrs.h b/riscv/csrs.h index 03a3ed6..15f868c 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -676,4 +676,23 @@ class vxsat_csr_t: public masked_csr_t { virtual bool unlogged_write(const reg_t val) noexcept override; }; +class hstateen_csr_t: public masked_csr_t { + public: + hstateen_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, uint8_t index); + virtual reg_t read() const noexcept override; + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +protected: + uint8_t index; +}; + +class sstateen_csr_t: public hstateen_csr_t { + public: + sstateen_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, uint8_t index); + virtual reg_t read() const noexcept override; + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; #endif diff --git a/riscv/processor.cc b/riscv/processor.cc index 7ffc8d1..1c33911 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -397,6 +397,28 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) const reg_t henvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); csrmap[CSR_HENVCFG] = henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, henvcfg_init, menvcfg); + if (proc->extension_enabled_const(EXT_SMSTATEEN)) { + const reg_t sstateen0_mask = (proc->extension_enabled(EXT_ZFINX) ? SSTATEEN0_FCSR : 0) | SSTATEEN0_CS; + const reg_t hstateen0_mask = sstateen0_mask | HSTATEEN0_SENVCFG | HSTATEEN_SSTATEEN; + const reg_t mstateen0_mask = hstateen0_mask; + for (int i = 0; i < 4; i++) { + const reg_t mstateen_mask = i == 0 ? mstateen0_mask : MSTATEEN_HSTATEEN; + csrmap[CSR_MSTATEEN0 + i] = mstateen[i] = std::make_shared(proc, CSR_MSTATEEN0 + i, mstateen_mask, 0); + if (xlen == 32) { + csrmap[CSR_MSTATEEN0H + i] = std::make_shared(proc, CSR_MSTATEEN0H + i, -1, mstateen[i]); + } + + const reg_t hstateen_mask = i == 0 ? hstateen0_mask : HSTATEEN_SSTATEEN; + csrmap[CSR_HSTATEEN0 + i] = hstateen[i] = std::make_shared(proc, CSR_HSTATEEN0 + i, hstateen_mask, 0, i); + if (xlen == 32) { + csrmap[CSR_HSTATEEN0H + i] = std::make_shared(proc, CSR_HSTATEEN0H + i, -1, hstateen[i]); + } + + const reg_t sstateen_mask = i == 0 ? sstateen0_mask : 0; + csrmap[CSR_SSTATEEN0 + i] = sstateen[i] = std::make_shared(proc, CSR_HSTATEEN0 + i, sstateen_mask, 0, i); + } + } + serialized = false; #ifdef RISCV_ENABLE_COMMITLOG diff --git a/riscv/processor.h b/riscv/processor.h index 0c6a6b2..727c404 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -186,6 +186,10 @@ struct state_t csr_t_p senvcfg; csr_t_p henvcfg; + csr_t_p mstateen[4]; + csr_t_p sstateen[4]; + csr_t_p hstateen[4]; + bool serialized; // whether timer CSRs are in a well-defined state // When true, execute a single instruction and then enter debug mode. This -- cgit v1.1 From 11dacaedc4b55ac1d79f1152a549ab9bfb170d2d Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Wed, 6 Jul 2022 10:51:36 +0800 Subject: add standalone class for fcsr and senvcfg csr --- riscv/csrs.cc | 11 +++++++++++ riscv/csrs.h | 11 +++++++++++ riscv/processor.cc | 4 ++-- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index c2bdf0a..1a23ff9 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1305,3 +1305,14 @@ void sstateen_csr_t::verify_permissions(insn_t insn, bool write) const { if (state->v && !(state->hstateen[index]->read() & HSTATEEN_SSTATEEN)) throw trap_virtual_instruction(insn.bits()); } + +// implement class fcsr_csr_t +fcsr_csr_t::fcsr_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb): + composite_csr_t(proc, addr, upper_csr, lower_csr, upper_lsb) { +} + +// implement class senvcfg_csr_t +senvcfg_csr_t::senvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, + const reg_t init): + masked_csr_t(proc, addr, mask, init) { +} diff --git a/riscv/csrs.h b/riscv/csrs.h index 15f868c..2b8a2fb 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -695,4 +695,15 @@ class sstateen_csr_t: public hstateen_csr_t { protected: virtual bool unlogged_write(const reg_t val) noexcept override; }; + +class fcsr_csr_t: public composite_csr_t { + public: + fcsr_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb); +}; + +class senvcfg_csr_t final: public masked_csr_t { + public: + senvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init); +}; + #endif diff --git a/riscv/processor.cc b/riscv/processor.cc index 1c33911..5973baf 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -375,7 +375,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_FFLAGS] = fflags = std::make_shared(proc, CSR_FFLAGS, FSR_AEXC >> FSR_AEXC_SHIFT, 0); csrmap[CSR_FRM] = frm = std::make_shared(proc, CSR_FRM, FSR_RD >> FSR_RD_SHIFT, 0); assert(FSR_AEXC_SHIFT == 0); // composite_csr_t assumes fflags begins at bit 0 - csrmap[CSR_FCSR] = std::make_shared(proc, CSR_FCSR, frm, fflags, FSR_RD_SHIFT); + csrmap[CSR_FCSR] = std::make_shared(proc, CSR_FCSR, frm, fflags, FSR_RD_SHIFT); csrmap[CSR_SEED] = std::make_shared(proc, CSR_SEED); @@ -390,7 +390,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MENVCFG] = menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, menvcfg_init); const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) | (proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0); - csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); + csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) | (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0) | (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); -- cgit v1.1 From 10fefa1542756a7f7caf45670a64d965a995153a Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Wed, 6 Jul 2022 10:52:15 +0800 Subject: add smstateen check for fcsr, senvcfg, henvcfg --- riscv/csrs.cc | 40 ++++++++++++++++++++++++++++++++++++++++ riscv/csrs.h | 4 ++++ 2 files changed, 44 insertions(+) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 1a23ff9..45b9c4a 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1311,8 +1311,48 @@ fcsr_csr_t::fcsr_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_ composite_csr_t(proc, addr, upper_csr, lower_csr, upper_lsb) { } +void fcsr_csr_t::verify_permissions(insn_t insn, bool write) const { + composite_csr_t::verify_permissions(insn, write); + + if (proc->extension_enabled(EXT_SMSTATEEN) && proc->extension_enabled(EXT_ZFINX)) { + if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_FCSR)) + throw trap_illegal_instruction(insn.bits()); + + if (state->v && !(state->hstateen[0]->read() & HSTATEEN0_FCSR)) + throw trap_virtual_instruction(insn.bits()); + + if ((proc->extension_enabled('S') && state->prv < PRV_S) && !(state->sstateen[0]->read() & SSTATEEN0_FCSR)) { + if (state->v) + throw trap_virtual_instruction(insn.bits()); + else + throw trap_illegal_instruction(insn.bits()); + } + } +} + // implement class senvcfg_csr_t senvcfg_csr_t::senvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): masked_csr_t(proc, addr, mask, init) { } + +void senvcfg_csr_t::verify_permissions(insn_t insn, bool write) const { + masked_csr_t::verify_permissions(insn, write); + + if (proc->extension_enabled(EXT_SMSTATEEN)) { + if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_HENVCFG)) + throw trap_illegal_instruction(insn.bits()); + + if (state->v && !(state->hstateen[0]->read() & HSTATEEN0_SENVCFG)) + throw trap_virtual_instruction(insn.bits()); + } +} + +void henvcfg_csr_t::verify_permissions(insn_t insn, bool write) const { + masked_csr_t::verify_permissions(insn, write); + + if (proc->extension_enabled(EXT_SMSTATEEN)) { + if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_HENVCFG)) + throw trap_illegal_instruction(insn.bits()); + } +} diff --git a/riscv/csrs.h b/riscv/csrs.h index 2b8a2fb..bde31da 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -439,6 +439,8 @@ class henvcfg_csr_t final: public masked_csr_t { return (menvcfg->read() | ~MENVCFG_PBMTE) & masked_csr_t::read(); } + virtual void verify_permissions(insn_t insn, bool write) const override; + private: csr_t_p menvcfg; }; @@ -699,11 +701,13 @@ class sstateen_csr_t: public hstateen_csr_t { class fcsr_csr_t: public composite_csr_t { public: fcsr_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb); + virtual void verify_permissions(insn_t insn, bool write) const override; }; class senvcfg_csr_t final: public masked_csr_t { public: senvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init); + virtual void verify_permissions(insn_t insn, bool write) const override; }; #endif -- cgit v1.1 From 8fbeaab91669684551ae4115438aa90f8f755f4f Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 1 Jul 2022 22:47:41 +0800 Subject: update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index b8b5227..afc7187 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,7 @@ Spike supports the following RISC-V ISA features: - CMO extension, v1.0 - Debug v0.14 - Smepmp extension v1.0 + - Smstateen extension, v1.0 As a Spike extension, the remainder of the proposed [Bit-Manipulation Extensions](https://github.com/riscv/riscv-bitmanip) -- cgit v1.1 From d920cdeeb47056e6976198384cd31f6e2e7b97da Mon Sep 17 00:00:00 2001 From: YenHaoChen <39526191+YenHaoChen@users.noreply.github.com> Date: Tue, 12 Jul 2022 06:13:10 +0800 Subject: Allow writes to pmp(i-1)cfg on locked pmp(i)cfg (#1039) The privileged spec allows writes to pmp(i-1)cfg with locked pmp(i)cfg (According to a recent discussion: https://github.com/riscv/riscv-isa-manual/issues/866) --- riscv/csrs.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 6bca99c..c5e65c8 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -248,7 +248,7 @@ bool pmpcfg_csr_t::unlogged_write(const reg_t val) noexcept { for (size_t i0 = (address - CSR_PMPCFG0) * 4, i = i0; i < i0 + proc->get_xlen() / 8; i++) { if (i < proc->n_pmp) { const bool locked = (state->pmpaddr[i]->cfg & PMP_L); - if (rlb || (!locked && !state->pmpaddr[i]->next_locked_and_tor())) { + if (rlb || !locked) { uint8_t cfg = (val >> (8 * (i - i0))) & (PMP_R | PMP_W | PMP_X | PMP_A | PMP_L); // Drop R=0 W=1 when MML = 0 // Remove the restriction when MML = 1 -- cgit v1.1 From c4a5f54094fc925f96b93f3d24b20f79de1dc5fd Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Wed, 13 Jul 2022 09:14:28 +0800 Subject: add isa string parser for smepmp --- riscv/isa_parser.cc | 2 ++ riscv/isa_parser.h | 1 + 2 files changed, 3 insertions(+) diff --git a/riscv/isa_parser.cc b/riscv/isa_parser.cc index 0f9eb06..040840a 100644 --- a/riscv/isa_parser.cc +++ b/riscv/isa_parser.cc @@ -164,6 +164,8 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv) } else if (ext_str == "zkr") { extension_table[EXT_ZKR] = true; } else if (ext_str == "zkt") { + } else if (ext_str == "smepmp") { + extension_table[EXT_SMEPMP] = true; } else if (ext_str == "smstateen") { extension_table[EXT_SMSTATEEN] = true; } else if (ext_str == "svnapot") { diff --git a/riscv/isa_parser.h b/riscv/isa_parser.h index 8debf5d..6065dbc 100644 --- a/riscv/isa_parser.h +++ b/riscv/isa_parser.h @@ -31,6 +31,7 @@ typedef enum { EXT_ZBPBO, EXT_ZPN, EXT_ZPSFOPERAND, + EXT_SMEPMP, EXT_SMSTATEEN, EXT_SVNAPOT, EXT_SVPBMT, -- cgit v1.1 From e050da4c27635a22d6dbdab920371012b2ea8b4f Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Wed, 13 Jul 2022 09:23:18 +0800 Subject: Add verify_permissions() for mseccfg_csr_t The mseccfg only exists when enabling the Smepmp extension. If not enabling the Smepmp extension, CSR instructions to the mseccfg raise illegal instruction faults, and the PMP behaviors as hardwiring mseccfg 0 (the reset value of mseccfg). --- riscv/csrs.cc | 6 ++++++ riscv/csrs.h | 1 + 2 files changed, 7 insertions(+) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index ab6b4bb..4e18ac0 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -272,6 +272,12 @@ mseccfg_csr_t::mseccfg_csr_t(processor_t* const proc, const reg_t addr): basic_csr_t(proc, addr, 0) { } +void mseccfg_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!proc->extension_enabled(EXT_SMEPMP)) + throw trap_illegal_instruction(insn.bits()); +} + bool mseccfg_csr_t::get_mml() const noexcept { return (read() & MSECCFG_MML); } diff --git a/riscv/csrs.h b/riscv/csrs.h index bde31da..6c7c47d 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -130,6 +130,7 @@ class pmpcfg_csr_t: public csr_t { class mseccfg_csr_t: public basic_csr_t { public: mseccfg_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; bool get_mml() const noexcept; bool get_mmwp() const noexcept; bool get_rlb() const noexcept; -- cgit v1.1 From 5eada616d48e1e0f1f9f5add53dfca76dc29de68 Mon Sep 17 00:00:00 2001 From: liweiwei90 <34847211+liweiwei90@users.noreply.github.com> Date: Thu, 14 Jul 2022 09:39:30 +0800 Subject: add check for H extension requires S mode (#1042) --- riscv/csrs.cc | 2 +- riscv/isa_parser.cc | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index ab6b4bb..83906cc 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -474,7 +474,7 @@ mstatus_csr_t::mstatus_csr_t(processor_t* const proc, const reg_t addr): } bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { - const bool has_mpv = proc->extension_enabled('S') && proc->extension_enabled('H'); + const bool has_mpv = proc->extension_enabled('H'); const bool has_gva = has_mpv; const reg_t mask = sstatus_write_mask diff --git a/riscv/isa_parser.cc b/riscv/isa_parser.cc index 0f9eb06..720f373 100644 --- a/riscv/isa_parser.cc +++ b/riscv/isa_parser.cc @@ -246,4 +246,7 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv) max_isa |= reg_t(supervisor) << ('s' - 'a'); extension_table['S'] = true; } + + if (((max_isa >> ('h' - 'a')) & 1) && !supervisor) + bad_isa_string(str, "'H' extension requires S mode"); } -- cgit v1.1 From a3d112d6f29870249df1080c5c610291dbe94442 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Wed, 13 Jul 2022 08:50:11 -0700 Subject: Remove mstatush mask as unnecessary Mask in underlying CSR is sufficient. Mask field in rv32_high_csr_t is now unneeded and will be removed next. --- riscv/processor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 5973baf..febd61f 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -192,7 +192,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MSTATUS] = mstatus = std::make_shared(proc, CSR_MSTATUS); if (xlen == 32) { - const reg_t mstatush_mask = MSTATUSH_MPV | MSTATUSH_GVA | MSTATUSH_SBE | MSTATUSH_MBE; + const reg_t mstatush_mask = -1; csrmap[CSR_MSTATUSH] = std::make_shared(proc, CSR_MSTATUSH, mstatush_mask, mstatus); } csrmap[CSR_MEPC] = mepc = std::make_shared(proc, CSR_MEPC); -- cgit v1.1 From 00c38fdb95dff4e18ed75361da03436075a03b3a Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Wed, 13 Jul 2022 09:01:15 -0700 Subject: Remove unnecessary mask from rv32_high_csr_t constructor --- riscv/csrs.cc | 5 ++--- riscv/csrs.h | 4 ++-- riscv/processor.cc | 7 +++---- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 83906cc..18956ff 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -494,10 +494,9 @@ bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { } // implement class rv32_high_csr_t -rv32_high_csr_t::rv32_high_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, csr_t_p orig): +rv32_high_csr_t::rv32_high_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig): csr_t(proc, addr), - orig(orig), - mask(mask) { + orig(orig) { } reg_t rv32_high_csr_t::read() const noexcept { diff --git a/riscv/csrs.h b/riscv/csrs.h index bde31da..7b1f87c 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -251,14 +251,14 @@ typedef std::shared_ptr mstatus_csr_t_p; class rv32_high_csr_t: public csr_t { public: - rv32_high_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, csr_t_p orig); + rv32_high_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig); virtual reg_t read() const noexcept override; virtual void verify_permissions(insn_t insn, bool write) const override; protected: virtual bool unlogged_write(const reg_t val) noexcept override; private: csr_t_p orig; - const reg_t mask; + const reg_t mask = -1; }; class sstatus_proxy_csr_t final: public base_status_csr_t { diff --git a/riscv/processor.cc b/riscv/processor.cc index febd61f..c0f4964 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -192,8 +192,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MSTATUS] = mstatus = std::make_shared(proc, CSR_MSTATUS); if (xlen == 32) { - const reg_t mstatush_mask = -1; - csrmap[CSR_MSTATUSH] = std::make_shared(proc, CSR_MSTATUSH, mstatush_mask, mstatus); + csrmap[CSR_MSTATUSH] = std::make_shared(proc, CSR_MSTATUSH, mstatus); } csrmap[CSR_MEPC] = mepc = std::make_shared(proc, CSR_MEPC); csrmap[CSR_MTVAL] = mtval = std::make_shared(proc, CSR_MTVAL, 0); @@ -405,13 +404,13 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) const reg_t mstateen_mask = i == 0 ? mstateen0_mask : MSTATEEN_HSTATEEN; csrmap[CSR_MSTATEEN0 + i] = mstateen[i] = std::make_shared(proc, CSR_MSTATEEN0 + i, mstateen_mask, 0); if (xlen == 32) { - csrmap[CSR_MSTATEEN0H + i] = std::make_shared(proc, CSR_MSTATEEN0H + i, -1, mstateen[i]); + csrmap[CSR_MSTATEEN0H + i] = std::make_shared(proc, CSR_MSTATEEN0H + i, mstateen[i]); } const reg_t hstateen_mask = i == 0 ? hstateen0_mask : HSTATEEN_SSTATEEN; csrmap[CSR_HSTATEEN0 + i] = hstateen[i] = std::make_shared(proc, CSR_HSTATEEN0 + i, hstateen_mask, 0, i); if (xlen == 32) { - csrmap[CSR_HSTATEEN0H + i] = std::make_shared(proc, CSR_HSTATEEN0H + i, -1, hstateen[i]); + csrmap[CSR_HSTATEEN0H + i] = std::make_shared(proc, CSR_HSTATEEN0H + i, hstateen[i]); } const reg_t sstateen_mask = i == 0 ? sstateen0_mask : 0; -- cgit v1.1 From 8f36f1a5f8a47282743706e7777a277b9f17ba6f Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Wed, 13 Jul 2022 09:14:05 -0700 Subject: Remove no-longer-needed mask from rv32_high_csr_t --- riscv/csrs.cc | 4 ++-- riscv/csrs.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 18956ff..3a929ff 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -500,7 +500,7 @@ rv32_high_csr_t::rv32_high_csr_t(processor_t* const proc, const reg_t addr, csr_ } reg_t rv32_high_csr_t::read() const noexcept { - return (orig->read() >> 32) & mask; + return (orig->read() >> 32) & 0xffffffffU; } void rv32_high_csr_t::verify_permissions(insn_t insn, bool write) const { @@ -508,7 +508,7 @@ void rv32_high_csr_t::verify_permissions(insn_t insn, bool write) const { } bool rv32_high_csr_t::unlogged_write(const reg_t val) noexcept { - return orig->unlogged_write((orig->written_value() & ~(mask << 32)) | ((val & mask) << 32)); + return orig->unlogged_write((orig->written_value() << 32 >> 32) | ((val & 0xffffffffU) << 32)); } // implement class sstatus_csr_t diff --git a/riscv/csrs.h b/riscv/csrs.h index 7b1f87c..3998d79 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -258,7 +258,6 @@ class rv32_high_csr_t: public csr_t { virtual bool unlogged_write(const reg_t val) noexcept override; private: csr_t_p orig; - const reg_t mask = -1; }; class sstatus_proxy_csr_t final: public base_status_csr_t { -- cgit v1.1 From 85ab2228ddb802c33a967349d69b2d948846bd01 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Wed, 13 Jul 2022 09:33:36 -0700 Subject: Add proxy for accessing the low 32 bits of a 64-bit CSR Use this for mstatus on RV32 so that `csrw mstatus` does not modify the bits in `mstatush`. Fixes #1044. --- riscv/csrs.cc | 18 ++++++++++++++++++ riscv/csrs.h | 15 +++++++++++++++ riscv/processor.cc | 5 ++++- 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 3a929ff..dac3eef 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -493,6 +493,24 @@ bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { return true; } +// implement class rv32_low_csr_t +rv32_low_csr_t::rv32_low_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig): + csr_t(proc, addr), + orig(orig) { +} + +reg_t rv32_low_csr_t::read() const noexcept { + return orig->read() & 0xffffffffU; +} + +void rv32_low_csr_t::verify_permissions(insn_t insn, bool write) const { + orig->verify_permissions(insn, write); +} + +bool rv32_low_csr_t::unlogged_write(const reg_t val) noexcept { + return orig->unlogged_write((orig->written_value() >> 32 << 32) | (val & 0xffffffffU)); +} + // implement class rv32_high_csr_t rv32_high_csr_t::rv32_high_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig): csr_t(proc, addr), diff --git a/riscv/csrs.h b/riscv/csrs.h index 3998d79..500bde7 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -54,7 +54,9 @@ class csr_t { const unsigned csr_priv; const bool csr_read_only; + // For access to written_value() and unlogged_write(): friend class rv32_high_csr_t; + friend class rv32_low_csr_t; }; typedef std::shared_ptr csr_t_p; @@ -249,6 +251,19 @@ class mstatus_csr_t final: public base_status_csr_t { typedef std::shared_ptr mstatus_csr_t_p; +// For RV32 CSRs that are split into two, e.g. mstatus/mstatush +// CSRW should only modify the lower half +class rv32_low_csr_t: public csr_t { + public: + rv32_low_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig); + virtual reg_t read() const noexcept override; + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + csr_t_p orig; +}; + class rv32_high_csr_t: public csr_t { public: rv32_high_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig); diff --git a/riscv/processor.cc b/riscv/processor.cc index c0f4964..28129ba 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -189,10 +189,13 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) prv = PRV_M; v = false; csrmap[CSR_MISA] = misa = std::make_shared(proc, CSR_MISA, max_isa); - csrmap[CSR_MSTATUS] = mstatus = std::make_shared(proc, CSR_MSTATUS); + mstatus = std::make_shared(proc, CSR_MSTATUS); if (xlen == 32) { + csrmap[CSR_MSTATUS] = std::make_shared(proc, CSR_MSTATUS, mstatus); csrmap[CSR_MSTATUSH] = std::make_shared(proc, CSR_MSTATUSH, mstatus); + } else { + csrmap[CSR_MSTATUS] = mstatus; } csrmap[CSR_MEPC] = mepc = std::make_shared(proc, CSR_MEPC); csrmap[CSR_MTVAL] = mtval = std::make_shared(proc, CSR_MTVAL, 0); -- cgit v1.1 From f85b76edd00a529b3dbda88aee2802014c09de7d Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Wed, 13 Jul 2022 09:48:23 -0700 Subject: Use rv32_low_csr_t for Smstateen CSRs Otherwise they will have the same problem as #1044 --- riscv/processor.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 28129ba..d431c68 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -405,15 +405,21 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) const reg_t mstateen0_mask = hstateen0_mask; for (int i = 0; i < 4; i++) { const reg_t mstateen_mask = i == 0 ? mstateen0_mask : MSTATEEN_HSTATEEN; - csrmap[CSR_MSTATEEN0 + i] = mstateen[i] = std::make_shared(proc, CSR_MSTATEEN0 + i, mstateen_mask, 0); + mstateen[i] = std::make_shared(proc, CSR_MSTATEEN0 + i, mstateen_mask, 0); if (xlen == 32) { + csrmap[CSR_MSTATEEN0 + i] = std::make_shared(proc, CSR_MSTATEEN0 + i, mstateen[i]); csrmap[CSR_MSTATEEN0H + i] = std::make_shared(proc, CSR_MSTATEEN0H + i, mstateen[i]); + } else { + csrmap[CSR_MSTATEEN0 + i] = mstateen[i]; } const reg_t hstateen_mask = i == 0 ? hstateen0_mask : HSTATEEN_SSTATEEN; - csrmap[CSR_HSTATEEN0 + i] = hstateen[i] = std::make_shared(proc, CSR_HSTATEEN0 + i, hstateen_mask, 0, i); + hstateen[i] = std::make_shared(proc, CSR_HSTATEEN0 + i, hstateen_mask, 0, i); if (xlen == 32) { + csrmap[CSR_HSTATEEN0 + i] = std::make_shared(proc, CSR_HSTATEEN0 + i, hstateen[i]); csrmap[CSR_HSTATEEN0H + i] = std::make_shared(proc, CSR_HSTATEEN0H + i, hstateen[i]); + } else { + csrmap[CSR_HSTATEEN0 + i] = hstateen[i]; } const reg_t sstateen_mask = i == 0 ? sstateen0_mask : 0; -- cgit v1.1 From b21a28bce174f2d1ac3e8a424bc9b9b1f01251a6 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Wed, 13 Jul 2022 10:21:43 -0700 Subject: Add assertion to ensure proper logging of mstatus changes on RV32 --- riscv/csrs.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index dac3eef..7a52353 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -455,6 +455,10 @@ sstatus_proxy_csr_t::sstatus_proxy_csr_t(processor_t* const proc, const reg_t ad bool sstatus_proxy_csr_t::unlogged_write(const reg_t val) noexcept { const reg_t new_mstatus = (mstatus->read() & ~sstatus_write_mask) | (val & sstatus_write_mask); + // On RV32 this will only log the low 32 bits, so make sure we're + // not modifying anything in the upper 32 bits. + assert((sstatus_write_mask & 0xffffffffU) == sstatus_write_mask); + mstatus->write(new_mstatus); return false; // avoid double logging: already logged by mstatus->write() } -- cgit v1.1 From 3688fd8302d1b7b8eea1dd8f6206ceab1bedfb2c Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Wed, 13 Jul 2022 10:45:41 -0700 Subject: Properly log mstatush side effect updates These have never been logged properly. --- riscv/csrs.cc | 4 +++- riscv/processor.cc | 3 ++- riscv/processor.h | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 7a52353..6f8f260 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -616,7 +616,9 @@ bool misa_csr_t::unlogged_write(const reg_t val) noexcept { | (1 << CAUSE_STORE_GUEST_PAGE_FAULT) ; state->medeleg->write(state->medeleg->read() & ~hypervisor_exceptions); - state->mstatus->write(state->mstatus->read() & ~(MSTATUS_GVA | MSTATUS_MPV)); + const reg_t new_mstatus = state->mstatus->read() & ~(MSTATUS_GVA | MSTATUS_MPV); + state->mstatus->write(new_mstatus); + if (state->mstatush) state->mstatush->write(new_mstatus >> 32); // log mstatush change state->mie->write_with_mask(MIP_HS_MASK, 0); // also takes care of hie, sie state->mip->write_with_mask(MIP_HS_MASK, 0); // also takes care of hip, sip, hvip state->hstatus->write(0); diff --git a/riscv/processor.cc b/riscv/processor.cc index d431c68..8f77b47 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -193,7 +193,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) if (xlen == 32) { csrmap[CSR_MSTATUS] = std::make_shared(proc, CSR_MSTATUS, mstatus); - csrmap[CSR_MSTATUSH] = std::make_shared(proc, CSR_MSTATUSH, mstatus); + csrmap[CSR_MSTATUSH] = mstatush = std::make_shared(proc, CSR_MSTATUSH, mstatus); } else { csrmap[CSR_MSTATUS] = mstatus; } @@ -824,6 +824,7 @@ void processor_t::take_trap(trap_t& t, reg_t epc) s = set_field(s, MSTATUS_MPV, curr_virt); s = set_field(s, MSTATUS_GVA, t.has_gva()); state.mstatus->write(s); + if (state.mstatush) state.mstatush->write(s >> 32); // log mstatush change set_privilege(PRV_M); } } diff --git a/riscv/processor.h b/riscv/processor.h index 727c404..347ae16 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -133,6 +133,7 @@ struct state_t bool v; misa_csr_t_p misa; mstatus_csr_t_p mstatus; + csr_t_p mstatush; csr_t_p mepc; csr_t_p mtval; csr_t_p mtvec; -- cgit v1.1 From e8f5ce062b755a1c76eb02bc98adefbc64183cd3 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 8 Jul 2022 20:48:09 +0800 Subject: add support for m/henvcfgh csrs --- riscv/processor.cc | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 8f77b47..4b0b330 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -385,11 +385,18 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MIMPID] = std::make_shared(proc, CSR_MIMPID, 0); csrmap[CSR_MVENDORID] = std::make_shared(proc, CSR_MVENDORID, 0); csrmap[CSR_MHARTID] = std::make_shared(proc, CSR_MHARTID, proc->get_id()); + const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) | (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0) | (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); const reg_t menvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); - csrmap[CSR_MENVCFG] = menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, menvcfg_init); + menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, menvcfg_init); + if (xlen == 32) { + csrmap[CSR_MENVCFG] = std::make_shared(proc, CSR_MENVCFG, menvcfg); + csrmap[CSR_MENVCFGH] = std::make_shared(proc, CSR_MENVCFGH, menvcfg); + } else { + csrmap[CSR_MENVCFG] = menvcfg; + } const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) | (proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0); csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); @@ -397,8 +404,13 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0) | (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); const reg_t henvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); - csrmap[CSR_HENVCFG] = henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, henvcfg_init, menvcfg); - + henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, henvcfg_init, menvcfg); + if (xlen == 32) { + csrmap[CSR_HENVCFG] = std::make_shared(proc, CSR_HENVCFG, henvcfg); + csrmap[CSR_HENVCFGH] = std::make_shared(proc, CSR_HENVCFGH, henvcfg); + } else { + csrmap[CSR_HENVCFG] = henvcfg; + } if (proc->extension_enabled_const(EXT_SMSTATEEN)) { const reg_t sstateen0_mask = (proc->extension_enabled(EXT_ZFINX) ? SSTATEEN0_FCSR : 0) | SSTATEEN0_CS; const reg_t hstateen0_mask = sstateen0_mask | HSTATEEN0_SENVCFG | HSTATEEN_SSTATEEN; -- cgit v1.1 From dd11aceaf6fb19c2e9ab549f1bcc3ae89ce1b735 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 8 Jul 2022 20:53:55 +0800 Subject: add support for mconfigptr csr: it's hardwired to zero currently --- riscv/processor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 4b0b330..92f4b41 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -385,7 +385,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MIMPID] = std::make_shared(proc, CSR_MIMPID, 0); csrmap[CSR_MVENDORID] = std::make_shared(proc, CSR_MVENDORID, 0); csrmap[CSR_MHARTID] = std::make_shared(proc, CSR_MHARTID, proc->get_id()); - + csrmap[CSR_MCONFIGPTR] = std::make_shared(proc, CSR_MCONFIGPTR, 0); const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) | (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0) | (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); -- cgit v1.1 From 031681b2f3bfa120769d9ead1ca866ca1be163b4 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Fri, 15 Jul 2022 18:06:24 -0700 Subject: Split up misaligned load into several steps Since the middle step is about to get much more complex --- riscv/mmu.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index 5e776a9..6d8072b 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -56,8 +56,11 @@ public: { #ifdef RISCV_ENABLE_MISALIGNED reg_t res = 0; - for (size_t i = 0; i < size; i++) - res += (reg_t)load_uint8(addr + (target_big_endian? size-1-i : i)) << (i * 8); + for (size_t i = 0; i < size; i++) { + const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); + const reg_t bytedata = (reg_t)load_uint8(byteaddr); + res += bytedata << (i * 8); + } return res; #else bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); -- cgit v1.1 From a2697ac775dac31e0aabf0223171b1e2f8a7fcde Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Fri, 15 Jul 2022 18:08:00 -0700 Subject: Split up misaligned store into several steps Since the last step is about to get much more complex --- riscv/mmu.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index 6d8072b..f652bf8 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -71,8 +71,11 @@ public: inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags, bool actually_store=true) { #ifdef RISCV_ENABLE_MISALIGNED - for (size_t i = 0; i < size; i++) - store_uint8(addr + (target_big_endian? size-1-i : i), data >> (i * 8), actually_store); + for (size_t i = 0; i < size; i++) { + const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); + const reg_t bytedata = data >> (i * 8); + store_uint8(byteaddr, bytedata, actually_store); + } #else bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); throw trap_store_address_misaligned(gva, addr, 0, 0); -- cgit v1.1 From 61a2c0ee6306562e084b25e4734d6ae725c475b4 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Sun, 17 Jul 2022 09:13:06 +0800 Subject: extract the progress of computing the inital value of mstatus into separate function compute_mstatus_initial_value() --- riscv/csrs.cc | 20 +++++++++++--------- riscv/csrs.h | 1 + 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 6f8f260..be23a2e 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -466,15 +466,7 @@ bool sstatus_proxy_csr_t::unlogged_write(const reg_t val) noexcept { // implement class mstatus_csr_t mstatus_csr_t::mstatus_csr_t(processor_t* const proc, const reg_t addr): base_status_csr_t(proc, addr), - val(0 - | (proc->extension_enabled_const('U') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_UXL, xlen_to_uxl(proc->get_const_xlen())) : 0) - | (proc->extension_enabled_const('S') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_SXL, xlen_to_uxl(proc->get_const_xlen())) : 0) - -#ifdef RISCV_ENABLE_DUAL_ENDIAN - | (proc->get_mmu()->is_target_big_endian() ? MSTATUS_UBE | MSTATUS_SBE | MSTATUS_MBE : 0) -#endif - | 0 // initial value for mstatus - ) { + val(compute_mstatus_initial_value()) { } bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { @@ -497,6 +489,16 @@ bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { return true; } +reg_t mstatus_csr_t::compute_mstatus_initial_value() const noexcept { + return 0 + | (proc->extension_enabled_const('U') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_UXL, xlen_to_uxl(proc->get_const_xlen())) : 0) + | (proc->extension_enabled_const('S') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_SXL, xlen_to_uxl(proc->get_const_xlen())) : 0) +#ifdef RISCV_ENABLE_DUAL_ENDIAN + | (proc->get_mmu()->is_target_big_endian() ? MSTATUS_UBE | MSTATUS_SBE | MSTATUS_MBE : 0) +#endif + | 0; // initial value for mstatus +} + // implement class rv32_low_csr_t rv32_low_csr_t::rv32_low_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig): csr_t(proc, addr), diff --git a/riscv/csrs.h b/riscv/csrs.h index 500bde7..ea6d4d9 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -246,6 +246,7 @@ class mstatus_csr_t final: public base_status_csr_t { protected: virtual bool unlogged_write(const reg_t val) noexcept override; private: + reg_t compute_mstatus_initial_value() const noexcept; reg_t val; }; -- cgit v1.1 From f82e54124345f348abaa80ec82d67528a9a8f774 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Sun, 17 Jul 2022 09:46:09 +0800 Subject: remove unnecessary ifdef for RISCV_ENABLE_DUAL_ENDIAN the default target endian is always little endian: - mmu::is_target_big_endian() return false - sim_t::get_target_endianness() return memif_endianness_little when RISCV_ENABLE_DUAL_ENDIAN macro is undefined --- fesvr/htif.h | 8 -------- riscv/csrs.cc | 2 -- riscv/sim.cc | 4 ---- 3 files changed, 14 deletions(-) diff --git a/fesvr/htif.h b/fesvr/htif.h index 3cee25f..ca5b362 100644 --- a/fesvr/htif.h +++ b/fesvr/htif.h @@ -31,26 +31,18 @@ class htif_t : public chunked_memif_t template inline T from_target(target_endian n) const { -#ifdef RISCV_ENABLE_DUAL_ENDIAN memif_endianness_t endianness = get_target_endianness(); assert(endianness == memif_endianness_little || endianness == memif_endianness_big); return endianness == memif_endianness_big? n.from_be() : n.from_le(); -#else - return n.from_le(); -#endif } template inline target_endian to_target(T n) const { -#ifdef RISCV_ENABLE_DUAL_ENDIAN memif_endianness_t endianness = get_target_endianness(); assert(endianness == memif_endianness_little || endianness == memif_endianness_big); return endianness == memif_endianness_big? target_endian::to_be(n) : target_endian::to_le(n); -#else - return target_endian::to_le(n); -#endif } protected: diff --git a/riscv/csrs.cc b/riscv/csrs.cc index be23a2e..c27410c 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -493,9 +493,7 @@ reg_t mstatus_csr_t::compute_mstatus_initial_value() const noexcept { return 0 | (proc->extension_enabled_const('U') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_UXL, xlen_to_uxl(proc->get_const_xlen())) : 0) | (proc->extension_enabled_const('S') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_SXL, xlen_to_uxl(proc->get_const_xlen())) : 0) -#ifdef RISCV_ENABLE_DUAL_ENDIAN | (proc->get_mmu()->is_target_big_endian() ? MSTATUS_UBE | MSTATUS_SBE | MSTATUS_MBE : 0) -#endif | 0; // initial value for mstatus } diff --git a/riscv/sim.cc b/riscv/sim.cc index 069e1b5..0000537 100644 --- a/riscv/sim.cc +++ b/riscv/sim.cc @@ -425,11 +425,7 @@ void sim_t::set_target_endianness(memif_endianness_t endianness) memif_endianness_t sim_t::get_target_endianness() const { -#ifdef RISCV_ENABLE_DUAL_ENDIAN return debug_mmu->is_target_big_endian()? memif_endianness_big : memif_endianness_little; -#else - return memif_endianness_little; -#endif } void sim_t::proc_reset(unsigned id) -- cgit v1.1 From 89a79b673208a61f7ff16be628955109a742c1ac Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Sun, 17 Jul 2022 10:00:23 +0800 Subject: Fix the initial value and write mask for mstatus - MPRV is read-only 0 if U-mode is not supported - If U-mode is not supported, UBE is read-only 0 - If S-mode is not supported, SBE is read-only 0 --- riscv/csrs.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index c27410c..57778c3 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -474,7 +474,8 @@ bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { const bool has_gva = has_mpv; const reg_t mask = sstatus_write_mask - | MSTATUS_MIE | MSTATUS_MPIE | MSTATUS_MPRV + | MSTATUS_MIE | MSTATUS_MPIE + | (proc->extension_enabled('U') ? MSTATUS_MPRV : 0) | MSTATUS_MPP | MSTATUS_TW | (proc->extension_enabled('S') ? MSTATUS_TSR : 0) | (has_page ? MSTATUS_TVM : 0) @@ -490,10 +491,13 @@ bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { } reg_t mstatus_csr_t::compute_mstatus_initial_value() const noexcept { + const reg_t big_endian_bits = (proc->extension_enabled_const('U') ? MSTATUS_UBE : 0) + | (proc->extension_enabled_const('S') ? MSTATUS_SBE : 0) + | MSTATUS_MBE; return 0 | (proc->extension_enabled_const('U') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_UXL, xlen_to_uxl(proc->get_const_xlen())) : 0) | (proc->extension_enabled_const('S') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_SXL, xlen_to_uxl(proc->get_const_xlen())) : 0) - | (proc->get_mmu()->is_target_big_endian() ? MSTATUS_UBE | MSTATUS_SBE | MSTATUS_MBE : 0) + | (proc->get_mmu()->is_target_big_endian() ? big_endian_bits : 0) | 0; // initial value for mstatus } -- cgit v1.1 From d6f332d63aa5141cacd652cc5476e9d01ceb91e8 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Tue, 12 Jul 2022 23:21:01 +0800 Subject: add U mode check for *envcfg* - If U-mode is not supported, then registers menvcfg and menvcfgh do not exist - Since H extension requires S-mode, and S mode can not exsit without U-mode, so senvcfg, henvcfg/henvcfgh also do not exist if U-mode is not supported --- riscv/processor.cc | 50 ++++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 92f4b41..df82f9e 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -386,30 +386,32 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MVENDORID] = std::make_shared(proc, CSR_MVENDORID, 0); csrmap[CSR_MHARTID] = std::make_shared(proc, CSR_MHARTID, proc->get_id()); csrmap[CSR_MCONFIGPTR] = std::make_shared(proc, CSR_MCONFIGPTR, 0); - const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) | - (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0) | - (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); - const reg_t menvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); - menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, menvcfg_init); - if (xlen == 32) { - csrmap[CSR_MENVCFG] = std::make_shared(proc, CSR_MENVCFG, menvcfg); - csrmap[CSR_MENVCFGH] = std::make_shared(proc, CSR_MENVCFGH, menvcfg); - } else { - csrmap[CSR_MENVCFG] = menvcfg; - } - const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) | - (proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0); - csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); - const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) | - (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0) | - (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); - const reg_t henvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); - henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, henvcfg_init, menvcfg); - if (xlen == 32) { - csrmap[CSR_HENVCFG] = std::make_shared(proc, CSR_HENVCFG, henvcfg); - csrmap[CSR_HENVCFGH] = std::make_shared(proc, CSR_HENVCFGH, henvcfg); - } else { - csrmap[CSR_HENVCFG] = henvcfg; + if (proc->extension_enabled_const('U')) { + const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) | + (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0) | + (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); + const reg_t menvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); + menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, menvcfg_init); + if (xlen == 32) { + csrmap[CSR_MENVCFG] = std::make_shared(proc, CSR_MENVCFG, menvcfg); + csrmap[CSR_MENVCFGH] = std::make_shared(proc, CSR_MENVCFGH, menvcfg); + } else { + csrmap[CSR_MENVCFG] = menvcfg; + } + const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) | + (proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0); + csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); + const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) | + (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0) | + (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); + const reg_t henvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); + henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, henvcfg_init, menvcfg); + if (xlen == 32) { + csrmap[CSR_HENVCFG] = std::make_shared(proc, CSR_HENVCFG, henvcfg); + csrmap[CSR_HENVCFGH] = std::make_shared(proc, CSR_HENVCFGH, henvcfg); + } else { + csrmap[CSR_HENVCFG] = henvcfg; + } } if (proc->extension_enabled_const(EXT_SMSTATEEN)) { const reg_t sstateen0_mask = (proc->extension_enabled(EXT_ZFINX) ? SSTATEEN0_FCSR : 0) | SSTATEEN0_CS; -- cgit v1.1 From c0b3fdcbaba99576393c57607985a0009bb2ebb1 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Tue, 12 Jul 2022 23:21:34 +0800 Subject: modify the check for "state->prv >= PRV_M" to "state->prv == PRV_M" prv can never be larger than PRV_M --- riscv/csrs.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 57778c3..c43812b 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -885,7 +885,7 @@ satp_csr_t::satp_csr_t(processor_t* const proc, const reg_t addr): void satp_csr_t::verify_permissions(insn_t insn, bool write) const { base_atp_csr_t::verify_permissions(insn, write); if (get_field(state->mstatus->read(), MSTATUS_TVM)) - require(state->prv >= PRV_M); + require(state->prv == PRV_M); } virtualized_satp_csr_t::virtualized_satp_csr_t(processor_t* const proc, satp_csr_t_p orig, csr_t_p virt): -- cgit v1.1 From f0d84787423ef5a0329bb79c45775dbc7ec16de5 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Mon, 18 Jul 2022 07:00:45 -0700 Subject: Remove no-longer-necessary typecast It was previously necessary because we were shifting left before assigning to a reg_t, but that changed in the previous commit. --- riscv/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index f652bf8..fdc641f 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -58,7 +58,7 @@ public: reg_t res = 0; for (size_t i = 0; i < size; i++) { const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); - const reg_t bytedata = (reg_t)load_uint8(byteaddr); + const reg_t bytedata = load_uint8(byteaddr); res += bytedata << (i * 8); } return res; -- cgit v1.1 From d61dceccdb43ae0025b3f02d825c3783e8ae10ea Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Fri, 15 Jul 2022 18:15:09 -0700 Subject: Fix totally-broken misaligned HLV/HLVX They were accessing memory using the current privilege mode instead of the expected guest privilege. Once #872 is fixed, I suspect we can greatly simplify this. --- riscv/mmu.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index fdc641f..70cb9e2 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -58,7 +58,11 @@ public: reg_t res = 0; for (size_t i = 0; i < size; i++) { const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); - const reg_t bytedata = load_uint8(byteaddr); + const reg_t bytedata + = (RISCV_XLATE_VIRT_HLVX & xlate_flags) ? guest_load_x_uint8(byteaddr) + : (RISCV_XLATE_VIRT & xlate_flags) ? guest_load_uint8(byteaddr) + : load_uint8(byteaddr) + ; res += bytedata << (i * 8); } return res; @@ -129,6 +133,7 @@ public: load_func(uint16, guest_load, RISCV_XLATE_VIRT) load_func(uint32, guest_load, RISCV_XLATE_VIRT) load_func(uint64, guest_load, RISCV_XLATE_VIRT) + load_func(uint8, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) // only for use by misaligned HLVX load_func(uint16, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) load_func(uint32, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) -- cgit v1.1 From 43ecb3424d68391e033f4df421c67c7f468fdff6 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Fri, 15 Jul 2022 18:20:52 -0700 Subject: Fix totally-broken misaligned HSV It was accessing memory using the current privilege mode instead of the expected guest privilege. Once #872 is fixed, I suspect we can greatly simplify this. --- riscv/mmu.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index 70cb9e2..4e12805 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -78,7 +78,11 @@ public: for (size_t i = 0; i < size; i++) { const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); const reg_t bytedata = data >> (i * 8); - store_uint8(byteaddr, bytedata, actually_store); + if (RISCV_XLATE_VIRT & xlate_flags) { + guest_store_uint8(byteaddr, bytedata, actually_store); + } else { + store_uint8(byteaddr, bytedata, actually_store); + } } #else bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); -- cgit v1.1 From b0d9782e13156abd5884fa73017a0286441202d1 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 18 Jul 2022 15:15:41 -0700 Subject: Fix load/store performance under clang Hopefully for the last time :-) --- riscv/common.h | 2 ++ riscv/mmu.h | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/riscv/common.h b/riscv/common.h index 002a83f..7d37001 100644 --- a/riscv/common.h +++ b/riscv/common.h @@ -8,11 +8,13 @@ # define unlikely(x) __builtin_expect(x, 0) # define NOINLINE __attribute__ ((noinline)) # define NORETURN __attribute__ ((noreturn)) +# define ALWAYS_INLINE __attribute__ ((always_inline)) #else # define likely(x) (x) # define unlikely(x) (x) # define NOINLINE # define NORETURN +# define ALWAYS_INLINE #endif #endif diff --git a/riscv/mmu.h b/riscv/mmu.h index 4e12805..6f24ec7 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -99,7 +99,7 @@ public: // template for functions that load an aligned value from memory #define load_func(type, prefix, xlate_flags) \ - inline type##_t prefix##_##type(reg_t addr, bool require_alignment = false) { \ + type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ if (require_alignment) load_reserved_address_misaligned(addr); \ else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \ @@ -162,7 +162,7 @@ public: // template for functions that store an aligned value to memory #define store_func(type, prefix, xlate_flags) \ - void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ + void ALWAYS_INLINE prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ if (require_alignment) store_conditional_address_misaligned(addr); \ else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ -- cgit v1.1 From 28ee0c4d6a1ed221f1a05ba48f54023ac7d455cc Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 8 Jul 2022 18:34:13 +0800 Subject: modify minstret/mcycle/minstreth/mcycleh to reuse rv32_low/high_csr_t --- riscv/csrs.cc | 34 +++++++++------------------------- riscv/csrs.h | 16 ++-------------- riscv/processor.cc | 15 ++++++++++----- 3 files changed, 21 insertions(+), 44 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 6f8f260..9721f87 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -515,6 +515,10 @@ bool rv32_low_csr_t::unlogged_write(const reg_t val) noexcept { return orig->unlogged_write((orig->written_value() >> 32 << 32) | (val & 0xffffffffU)); } +reg_t rv32_low_csr_t::written_value() const noexcept { + return orig->written_value() & 0xffffffffU; +} + // implement class rv32_high_csr_t rv32_high_csr_t::rv32_high_csr_t(processor_t* const proc, const reg_t addr, csr_t_p orig): csr_t(proc, addr), @@ -533,6 +537,10 @@ bool rv32_high_csr_t::unlogged_write(const reg_t val) noexcept { return orig->unlogged_write((orig->written_value() << 32 >> 32) | ((val & 0xffffffffU) << 32)); } +reg_t rv32_high_csr_t::written_value() const noexcept { + return (orig->written_value() >> 32) & 0xffffffffU; +} + // implement class sstatus_csr_t sstatus_csr_t::sstatus_csr_t(processor_t* const proc, sstatus_proxy_csr_t_p orig, vsstatus_csr_t_p virt): virtualized_csr_t(proc, orig, virt), @@ -924,10 +932,7 @@ void wide_counter_csr_t::bump(const reg_t howmuch) noexcept { } bool wide_counter_csr_t::unlogged_write(const reg_t val) noexcept { - if (proc->get_xlen() == 32) - this->val = (this->val >> 32 << 32) | (val & 0xffffffffU); - else - this->val = val; + this->val = val; // The ISA mandates that if an instruction writes instret, the write // takes precedence over the increment to instret. However, Spike // unconditionally increments instret after executing an instruction. @@ -941,27 +946,6 @@ reg_t wide_counter_csr_t::written_value() const noexcept { return this->val + 1; } -void wide_counter_csr_t::write_upper_half(const reg_t val) noexcept { - this->val = (val << 32) | (this->val << 32 >> 32); - this->val--; // See comment above. - // Log upper half only. - log_special_write(address + (CSR_MINSTRETH - CSR_MINSTRET), written_value() >> 32); -} - -counter_top_csr_t::counter_top_csr_t(processor_t* const proc, const reg_t addr, wide_counter_csr_t_p parent): - csr_t(proc, addr), - parent(parent) { -} - -reg_t counter_top_csr_t::read() const noexcept { - return parent->read() >> 32; -} - -bool counter_top_csr_t::unlogged_write(const reg_t val) noexcept { - parent->write_upper_half(val); - return true; -} - proxy_csr_t::proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate): csr_t(proc, addr), delegate(delegate) { diff --git a/riscv/csrs.h b/riscv/csrs.h index 500bde7..8108d1e 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -260,6 +260,7 @@ class rv32_low_csr_t: public csr_t { virtual void verify_permissions(insn_t insn, bool write) const override; protected: virtual bool unlogged_write(const reg_t val) noexcept override; + virtual reg_t written_value() const noexcept override; private: csr_t_p orig; }; @@ -271,6 +272,7 @@ class rv32_high_csr_t: public csr_t { virtual void verify_permissions(insn_t insn, bool write) const override; protected: virtual bool unlogged_write(const reg_t val) noexcept override; + virtual reg_t written_value() const noexcept override; private: csr_t_p orig; }; @@ -498,7 +500,6 @@ class wide_counter_csr_t: public csr_t { // Always returns full 64-bit value virtual reg_t read() const noexcept override; void bump(const reg_t howmuch) noexcept; - void write_upper_half(const reg_t val) noexcept; protected: virtual bool unlogged_write(const reg_t val) noexcept override; virtual reg_t written_value() const noexcept override; @@ -508,19 +509,6 @@ class wide_counter_csr_t: public csr_t { typedef std::shared_ptr wide_counter_csr_t_p; -// A simple proxy to read/write the upper half of minstret/mcycle -class counter_top_csr_t: public csr_t { - public: - counter_top_csr_t(processor_t* const proc, const reg_t addr, wide_counter_csr_t_p parent); - virtual reg_t read() const noexcept override; - protected: - virtual bool unlogged_write(const reg_t val) noexcept override; - private: - wide_counter_csr_t_p parent; -}; - -typedef std::shared_ptr counter_top_csr_t_p; - // For a CSR that is an alias of another class proxy_csr_t: public csr_t { public: diff --git a/riscv/processor.cc b/riscv/processor.cc index 8f77b47..c351d1d 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -202,20 +202,25 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MSCRATCH] = std::make_shared(proc, CSR_MSCRATCH, 0); csrmap[CSR_MTVEC] = mtvec = std::make_shared(proc, CSR_MTVEC); csrmap[CSR_MCAUSE] = mcause = std::make_shared(proc, CSR_MCAUSE); - csrmap[CSR_MINSTRET] = minstret = std::make_shared(proc, CSR_MINSTRET); - csrmap[CSR_MCYCLE] = mcycle = std::make_shared(proc, CSR_MCYCLE); + minstret = std::make_shared(proc, CSR_MINSTRET); + mcycle = std::make_shared(proc, CSR_MCYCLE); if (proc->extension_enabled_const(EXT_ZICNTR)) { csrmap[CSR_INSTRET] = std::make_shared(proc, CSR_INSTRET, minstret); csrmap[CSR_CYCLE] = std::make_shared(proc, CSR_CYCLE, mcycle); } if (xlen == 32) { - counter_top_csr_t_p minstreth, mcycleh; - csrmap[CSR_MINSTRETH] = minstreth = std::make_shared(proc, CSR_MINSTRETH, minstret); - csrmap[CSR_MCYCLEH] = mcycleh = std::make_shared(proc, CSR_MCYCLEH, mcycle); + csr_t_p minstreth, mcycleh; + csrmap[CSR_MINSTRET] = std::make_shared(proc, CSR_MINSTRET, minstret); + csrmap[CSR_MINSTRETH] = minstreth = std::make_shared(proc, CSR_MINSTRETH, minstret); + csrmap[CSR_MCYCLE] = std::make_shared(proc, CSR_MCYCLE, mcycle); + csrmap[CSR_MCYCLEH] = mcycleh = std::make_shared(proc, CSR_MCYCLEH, mcycle); if (proc->extension_enabled_const(EXT_ZICNTR)) { csrmap[CSR_INSTRETH] = std::make_shared(proc, CSR_INSTRETH, minstreth); csrmap[CSR_CYCLEH] = std::make_shared(proc, CSR_CYCLEH, mcycleh); } + } else { + csrmap[CSR_MINSTRET] = minstret; + csrmap[CSR_MCYCLE] = mcycle; } for (reg_t i = 3; i <= 31; ++i) { const reg_t which_mevent = CSR_MHPMEVENT3 + i - 3; -- cgit v1.1 From 3ff1b5f1c6c6e13777be1c677abc2340f3dabd1a Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 8 Jul 2022 20:30:02 +0800 Subject: add support for time/timeh/htimedelta/htimedeltah csrs --- riscv/clint.cc | 1 + riscv/csrs.cc | 19 +++++++++++++++++++ riscv/csrs.h | 15 +++++++++++++++ riscv/processor.cc | 11 +++++++++++ riscv/processor.h | 3 +++ 5 files changed, 49 insertions(+) diff --git a/riscv/clint.cc b/riscv/clint.cc index 72d1bbe..3f2d4d7 100644 --- a/riscv/clint.cc +++ b/riscv/clint.cc @@ -82,6 +82,7 @@ void clint_t::increment(reg_t inc) mtime += inc; } for (size_t i = 0; i < procs.size(); i++) { + procs[i]->state.time->sync(mtime); procs[i]->state.mip->backdoor_write_with_mask(MIP_MTIP, 0); if (mtime >= mtimecmp[i]) procs[i]->state.mip->backdoor_write_with_mask(MIP_MTIP, MIP_MTIP); diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 9721f87..8cb9953 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -946,6 +946,25 @@ reg_t wide_counter_csr_t::written_value() const noexcept { return this->val + 1; } +// implement class time_counter_csr_t +time_counter_csr_t::time_counter_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + shadow_val(0) { +} + +reg_t time_counter_csr_t::read() const noexcept { + // reading the time CSR in VS or VU mode returns the sum of the contents of + // htimedelta and the actual value of time. + if (state->v) + return shadow_val + state->htimedelta->read(); + else + return shadow_val; +} + +void time_counter_csr_t::sync(const reg_t val) noexcept { + shadow_val = val; +} + proxy_csr_t::proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate): csr_t(proc, addr), delegate(delegate) { diff --git a/riscv/csrs.h b/riscv/csrs.h index 8108d1e..34bc9d0 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -509,6 +509,21 @@ class wide_counter_csr_t: public csr_t { typedef std::shared_ptr wide_counter_csr_t_p; +class time_counter_csr_t: public csr_t { + public: + time_counter_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override; + + void sync(const reg_t val) noexcept; + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override { return false; }; + private: + reg_t shadow_val; +}; + +typedef std::shared_ptr time_counter_csr_t_p; + // For a CSR that is an alias of another class proxy_csr_t: public csr_t { public: diff --git a/riscv/processor.cc b/riscv/processor.cc index c351d1d..642f1fb 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -204,9 +204,11 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MCAUSE] = mcause = std::make_shared(proc, CSR_MCAUSE); minstret = std::make_shared(proc, CSR_MINSTRET); mcycle = std::make_shared(proc, CSR_MCYCLE); + time = std::make_shared(proc, CSR_TIME); if (proc->extension_enabled_const(EXT_ZICNTR)) { csrmap[CSR_INSTRET] = std::make_shared(proc, CSR_INSTRET, minstret); csrmap[CSR_CYCLE] = std::make_shared(proc, CSR_CYCLE, mcycle); + csrmap[CSR_TIME] = std::make_shared(proc, CSR_TIME, time); } if (xlen == 32) { csr_t_p minstreth, mcycleh; @@ -215,8 +217,10 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_MCYCLE] = std::make_shared(proc, CSR_MCYCLE, mcycle); csrmap[CSR_MCYCLEH] = mcycleh = std::make_shared(proc, CSR_MCYCLEH, mcycle); if (proc->extension_enabled_const(EXT_ZICNTR)) { + auto timeh = std::make_shared(proc, CSR_TIMEH, time); csrmap[CSR_INSTRETH] = std::make_shared(proc, CSR_INSTRETH, minstreth); csrmap[CSR_CYCLEH] = std::make_shared(proc, CSR_CYCLEH, mcycleh); + csrmap[CSR_TIMEH] = std::make_shared(proc, CSR_TIMEH, timeh); } } else { csrmap[CSR_MINSTRET] = minstret; @@ -349,6 +353,13 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) (1 << CAUSE_STORE_PAGE_FAULT); csrmap[CSR_HEDELEG] = hedeleg = std::make_shared(proc, CSR_HEDELEG, hedeleg_mask, 0); csrmap[CSR_HCOUNTEREN] = hcounteren = std::make_shared(proc, CSR_HCOUNTEREN, counteren_mask, 0); + htimedelta = std::make_shared(proc, CSR_HTIMEDELTA, 0); + if (xlen == 32) { + csrmap[CSR_HTIMEDELTA] = std::make_shared(proc, CSR_HTIMEDELTA, htimedelta); + csrmap[CSR_HTIMEDELTAH] = std::make_shared(proc, CSR_HTIMEDELTAH, htimedelta); + } else { + csrmap[CSR_HTIMEDELTA] = htimedelta; + } csrmap[CSR_HTVAL] = htval = std::make_shared(proc, CSR_HTVAL, 0); csrmap[CSR_HTINST] = htinst = std::make_shared(proc, CSR_HTINST, 0); csrmap[CSR_HGATP] = hgatp = std::make_shared(proc, CSR_HGATP); diff --git a/riscv/processor.h b/riscv/processor.h index 347ae16..b415402 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -191,6 +191,9 @@ struct state_t csr_t_p sstateen[4]; csr_t_p hstateen[4]; + csr_t_p htimedelta; + time_counter_csr_t_p time; + bool serialized; // whether timer CSRs are in a well-defined state // When true, execute a single instruction and then enter debug mode. This -- cgit v1.1 From eff4011f24390017e4582cb8556ebc6e5ce41d61 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 8 Jul 2022 10:00:00 +0800 Subject: add base verify_permission in counter_proxy_csr_t::verify_permissions Normally, csrs will reuse the checks in verify_permissions of its base csr type This modification will not cause any functional change, just reuse the check in csr_t class to check whether it writes to read-only csr instead of checking writes to counter_proxy_csr_t by itself. --- riscv/csrs.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 8cb9953..27bef5e 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1001,11 +1001,13 @@ bool counter_proxy_csr_t::myenable(csr_t_p counteren) const noexcept { } void counter_proxy_csr_t::verify_permissions(insn_t insn, bool write) const { + proxy_csr_t::verify_permissions(insn, write); + const bool mctr_ok = (state->prv < PRV_M) ? myenable(state->mcounteren) : true; const bool hctr_ok = state->v ? myenable(state->hcounteren) : true; const bool sctr_ok = (proc->extension_enabled('S') && state->prv < PRV_S) ? myenable(state->scounteren) : true; - if (write || !mctr_ok) + if (!mctr_ok) throw trap_illegal_instruction(insn.bits()); if (!hctr_ok) throw trap_virtual_instruction(insn.bits()); -- cgit v1.1 From cdc05e6719f30e6e9192dc92f8cd8d5b22f53417 Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Mon, 25 Jul 2022 16:47:22 -0700 Subject: Pay attention to dmcs2.grouptype. (#1049) --- riscv/debug_module.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/riscv/debug_module.cc b/riscv/debug_module.cc index 0eac842..a3d35ea 100644 --- a/riscv/debug_module.cc +++ b/riscv/debug_module.cc @@ -920,7 +920,9 @@ bool debug_module_t::dmi_write(unsigned address, uint32_t value) } return true; case DM_DMCS2: - if (config.support_haltgroups && get_field(value, DM_DMCS2_HGWRITE)) { + if (config.support_haltgroups && + get_field(value, DM_DMCS2_HGWRITE) && + get_field(value, DM_DMCS2_GROUPTYPE) == 0) { hart_state[dmcontrol.hartsel].haltgroup = get_field(value, DM_DMCS2_GROUP); } -- cgit v1.1 From 8d016bffdbc8d7cbeb253cc4f2ee9e5d4bf0a626 Mon Sep 17 00:00:00 2001 From: Brendan Sweeney Date: Tue, 26 Jul 2022 14:36:01 -0500 Subject: Add additional bits to medeleg (#1050) Pursuant to https://github.com/riscv-software-src/riscv-isa-sim/issues/668 and https://github.com/riscv-software-src/riscv-isa-sim/issues/194, allowing for additional exceptions to be delegated from M-mode. It is implementation-defined whether these bits are defined or are read-only-zero. QEMU implements the added bits (Fetch/Load/StoreAMO access, Load/StoreAMO misalignment, and illegal instruction). (https://github.com/qemu/qemu/blob/f6cce6bcb2ef959cdd4da0e368f7c72045f21d6d/target/riscv/csr.c#L813) ECALL_FROM_M is not implemented here because it would have no effect, although QEMU does implement it. This allows Spike to emulate QEMU and other systems which allow for the delegation of such exceptions. Signed-off-by: Brendan Sweeney --- riscv/csrs.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 3cb6af2..d02212b 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -822,7 +822,13 @@ void medeleg_csr_t::verify_permissions(insn_t insn, bool write) const { bool medeleg_csr_t::unlogged_write(const reg_t val) noexcept { const reg_t mask = 0 | (1 << CAUSE_MISALIGNED_FETCH) + | (1 << CAUSE_FETCH_ACCESS) + | (1 << CAUSE_ILLEGAL_INSTRUCTION) | (1 << CAUSE_BREAKPOINT) + | (1 << CAUSE_MISALIGNED_LOAD) + | (1 << CAUSE_LOAD_ACCESS) + | (1 << CAUSE_MISALIGNED_STORE) + | (1 << CAUSE_STORE_ACCESS) | (1 << CAUSE_USER_ECALL) | (1 << CAUSE_SUPERVISOR_ECALL) | (1 << CAUSE_FETCH_PAGE_FAULT) -- cgit v1.1 From 66fd65c0d2ebc84bd8d8f82258e0988fdca07719 Mon Sep 17 00:00:00 2001 From: ChunPing Chung Date: Fri, 29 Jul 2022 03:01:26 +0800 Subject: Fix overflow issue of p-ext multiply instructions (#1053) --- riscv/insns/smul16.h | 2 +- riscv/insns/smul8.h | 2 +- riscv/insns/smulx16.h | 2 +- riscv/insns/smulx8.h | 2 +- riscv/insns/umul16.h | 2 +- riscv/insns/umul8.h | 2 +- riscv/insns/umulx16.h | 2 +- riscv/insns/umulx8.h | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/riscv/insns/smul16.h b/riscv/insns/smul16.h index 8f87612..7e0f08a 100644 --- a/riscv/insns/smul16.h +++ b/riscv/insns/smul16.h @@ -1,3 +1,3 @@ P_MUL_LOOP(16, { - pd = ps1 * ps2; + pd = (int32_t)ps1 * (int32_t)ps2; }) diff --git a/riscv/insns/smul8.h b/riscv/insns/smul8.h index 155e50e..a4a3ed9 100644 --- a/riscv/insns/smul8.h +++ b/riscv/insns/smul8.h @@ -1,3 +1,3 @@ P_MUL_LOOP(8, { - pd = ps1 * ps2; + pd = (int16_t)ps1 * (int16_t)ps2; }) diff --git a/riscv/insns/smulx16.h b/riscv/insns/smulx16.h index 14ae047..58e9a08 100644 --- a/riscv/insns/smulx16.h +++ b/riscv/insns/smulx16.h @@ -1,3 +1,3 @@ P_MUL_CROSS_LOOP(16, { - pd = ps1 * ps2; + pd = (int32_t)ps1 * (int32_t)ps2; }) diff --git a/riscv/insns/smulx8.h b/riscv/insns/smulx8.h index b5ae41c..9270ce3 100644 --- a/riscv/insns/smulx8.h +++ b/riscv/insns/smulx8.h @@ -1,3 +1,3 @@ P_MUL_CROSS_LOOP(8, { - pd = ps1 * ps2; + pd = (int16_t)ps1 * (int16_t)ps2; }) diff --git a/riscv/insns/umul16.h b/riscv/insns/umul16.h index 860f942..09b839c 100644 --- a/riscv/insns/umul16.h +++ b/riscv/insns/umul16.h @@ -1,3 +1,3 @@ P_MUL_ULOOP(16, { - pd = ps1 * ps2; + pd = (uint32_t)ps1 * (uint32_t)ps2; }) diff --git a/riscv/insns/umul8.h b/riscv/insns/umul8.h index 04d7a6e..29cae88 100644 --- a/riscv/insns/umul8.h +++ b/riscv/insns/umul8.h @@ -1,3 +1,3 @@ P_MUL_ULOOP(8, { - pd = ps1 * ps2; + pd = (uint16_t)ps1 * (uint16_t)ps2; }) diff --git a/riscv/insns/umulx16.h b/riscv/insns/umulx16.h index 5abe9cf..3f0cce8 100644 --- a/riscv/insns/umulx16.h +++ b/riscv/insns/umulx16.h @@ -1,3 +1,3 @@ P_MUL_CROSS_ULOOP(16, { - pd = ps1 * ps2; + pd = (uint32_t)ps1 * (uint32_t)ps2; }) diff --git a/riscv/insns/umulx8.h b/riscv/insns/umulx8.h index a2b073d..848b5d5 100644 --- a/riscv/insns/umulx8.h +++ b/riscv/insns/umulx8.h @@ -1,3 +1,3 @@ P_MUL_CROSS_ULOOP(8, { - pd = ps1 * ps2; + pd = (uint16_t)ps1 * (uint16_t)ps2; }) -- cgit v1.1 From c274695b204ce271d0fa272c8b8ff4d02b55f709 Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Sat, 30 Jul 2022 10:22:47 -0700 Subject: Fix debug_rom.S build command error. Previously gcc would complain that link.lds was mentioned twice (once on the command line and once with a -T directive.) --- debug_rom/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debug_rom/Makefile b/debug_rom/Makefile index c5f2205..d6546e9 100644 --- a/debug_rom/Makefile +++ b/debug_rom/Makefile @@ -18,7 +18,7 @@ all: $(patsubst %,%.h,$(ELFS)) $(OBJCOPY) -O binary --only-section .text $^ $@ debug_rom: $(DEPS) - $(COMPILE) -o $@ $^ + $(COMPILE) -o $@ $< clean: rm -f $(ELFS) debug_rom*.raw debug_rom.h -- cgit v1.1 From 4d985feb4e65fd09e6fd9f6f5e9c534c2e522892 Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Sat, 30 Jul 2022 10:27:55 -0700 Subject: DSCRATCH is now called DSCRATCH0 Fixes build. --- debug_rom/debug_rom.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/debug_rom/debug_rom.S b/debug_rom/debug_rom.S index 8d8e4cd..2d36139 100755 --- a/debug_rom/debug_rom.S +++ b/debug_rom/debug_rom.S @@ -23,7 +23,7 @@ _entry: // This fence is required because the execution may have written something // into the Abstract Data or Program Buffer registers. fence - csrw CSR_DSCRATCH, s0 // Save s0 to allow signaling MHARTID + csrw CSR_DSCRATCH0, s0 // Save s0 to allow signaling MHARTID // We continue to let the hart know that we are halted in order that // a DM which was reset is still made aware that a hart is halted. @@ -46,14 +46,14 @@ _exception: // Restore S0, which we always save to dscratch. // We need this in case the user tried an abstract write to a // non-existent CSR. - csrr s0, CSR_DSCRATCH + csrr s0, CSR_DSCRATCH0 sw zero, DEBUG_ROM_EXCEPTION(zero) // Let debug module know you got an exception. ebreak going: csrr s0, CSR_MHARTID sw s0, DEBUG_ROM_GOING(zero) // When debug module sees this write, the GO flag is reset. - csrr s0, CSR_DSCRATCH // Restore s0 here + csrr s0, CSR_DSCRATCH0 // Restore s0 here fence fence.i jalr zero, zero, %lo(whereto) // Debug module will put different instructions and data in the RAM, @@ -63,7 +63,7 @@ going: _resume: csrr s0, CSR_MHARTID sw s0, DEBUG_ROM_RESUMING(zero) // When Debug Module sees this write, the RESUME flag is reset. - csrr s0, CSR_DSCRATCH // Restore s0 + csrr s0, CSR_DSCRATCH0 // Restore s0 dret // END OF ACTUAL "ROM" CONTENTS. BELOW IS JUST FOR LINKER SCRIPT. -- cgit v1.1 From 2d69ec069e6e771eaf59b419b2bfd89d5298c53c Mon Sep 17 00:00:00 2001 From: Canberk Topal Date: Tue, 26 Jul 2022 16:26:39 +0100 Subject: WFI condition fix Before this commit Spike was requiring S mode privilege even without S mode implemented. This commit fixes it. Signed-off-by: Canberk Topal --- riscv/insns/wfi.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/riscv/insns/wfi.h b/riscv/insns/wfi.h index 299cb01..3411da0 100644 --- a/riscv/insns/wfi.h +++ b/riscv/insns/wfi.h @@ -5,7 +5,9 @@ if (STATE.v && STATE.prv == PRV_U) { } else if (STATE.v) { // VS-mode if (get_field(STATE.hstatus->read(), HSTATUS_VTW)) require_novirt(); -} else { +} else if (p->extension_enabled('S')) { + // When S-mode is implemented, then executing WFI in + // U-mode causes an illegal instruction exception. require_privilege(PRV_S); } wfi(); -- cgit v1.1 From eb2cce0c99075f89e77b0c1db92108f9c49ccab0 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Wed, 3 Aug 2022 10:32:51 +0800 Subject: add stateen related check to frm/fflags and then apply to fcsr implicitly --- riscv/csrs.cc | 39 +++++++++++++++------------------------ riscv/csrs.h | 6 ------ riscv/processor.cc | 2 +- 3 files changed, 16 insertions(+), 31 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index d02212b..4ec404b 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1198,6 +1198,21 @@ void float_csr_t::verify_permissions(insn_t insn, bool write) const { require_fp; if (!proc->extension_enabled('F')) throw trap_illegal_instruction(insn.bits()); + + if (proc->extension_enabled(EXT_SMSTATEEN) && proc->extension_enabled(EXT_ZFINX)) { + if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_FCSR)) + throw trap_illegal_instruction(insn.bits()); + + if (state->v && !(state->hstateen[0]->read() & HSTATEEN0_FCSR)) + throw trap_virtual_instruction(insn.bits()); + + if ((proc->extension_enabled('S') && state->prv < PRV_S) && !(state->sstateen[0]->read() & SSTATEEN0_FCSR)) { + if (state->v) + throw trap_virtual_instruction(insn.bits()); + else + throw trap_illegal_instruction(insn.bits()); + } + } } bool float_csr_t::unlogged_write(const reg_t val) noexcept { @@ -1350,30 +1365,6 @@ void sstateen_csr_t::verify_permissions(insn_t insn, bool write) const { throw trap_virtual_instruction(insn.bits()); } -// implement class fcsr_csr_t -fcsr_csr_t::fcsr_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb): - composite_csr_t(proc, addr, upper_csr, lower_csr, upper_lsb) { -} - -void fcsr_csr_t::verify_permissions(insn_t insn, bool write) const { - composite_csr_t::verify_permissions(insn, write); - - if (proc->extension_enabled(EXT_SMSTATEEN) && proc->extension_enabled(EXT_ZFINX)) { - if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_FCSR)) - throw trap_illegal_instruction(insn.bits()); - - if (state->v && !(state->hstateen[0]->read() & HSTATEEN0_FCSR)) - throw trap_virtual_instruction(insn.bits()); - - if ((proc->extension_enabled('S') && state->prv < PRV_S) && !(state->sstateen[0]->read() & SSTATEEN0_FCSR)) { - if (state->v) - throw trap_virtual_instruction(insn.bits()); - else - throw trap_illegal_instruction(insn.bits()); - } - } -} - // implement class senvcfg_csr_t senvcfg_csr_t::senvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): diff --git a/riscv/csrs.h b/riscv/csrs.h index c979942..5fc3a49 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -717,12 +717,6 @@ class sstateen_csr_t: public hstateen_csr_t { virtual bool unlogged_write(const reg_t val) noexcept override; }; -class fcsr_csr_t: public composite_csr_t { - public: - fcsr_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb); - virtual void verify_permissions(insn_t insn, bool write) const override; -}; - class senvcfg_csr_t final: public masked_csr_t { public: senvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init); diff --git a/riscv/processor.cc b/riscv/processor.cc index 6d0d349..ddb0344 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -393,7 +393,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_FFLAGS] = fflags = std::make_shared(proc, CSR_FFLAGS, FSR_AEXC >> FSR_AEXC_SHIFT, 0); csrmap[CSR_FRM] = frm = std::make_shared(proc, CSR_FRM, FSR_RD >> FSR_RD_SHIFT, 0); assert(FSR_AEXC_SHIFT == 0); // composite_csr_t assumes fflags begins at bit 0 - csrmap[CSR_FCSR] = std::make_shared(proc, CSR_FCSR, frm, fflags, FSR_RD_SHIFT); + csrmap[CSR_FCSR] = std::make_shared(proc, CSR_FCSR, frm, fflags, FSR_RD_SHIFT); csrmap[CSR_SEED] = std::make_shared(proc, CSR_SEED); -- cgit v1.1 From a7de776de66a1c1caea8d896e6ff51503b0a46bf Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Wed, 3 Aug 2022 22:10:17 +0800 Subject: Fix exception type for accessing senvcfg/henvcfg/hstateen Illegal instruciton trap should be raised when accessing senvcfg/ henvcfg/hstateen if related bit of mstateen is zero in VU mode --- riscv/csrs.cc | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 4ec404b..875461a 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1324,10 +1324,9 @@ bool hstateen_csr_t::unlogged_write(const reg_t val) noexcept { } void hstateen_csr_t::verify_permissions(insn_t insn, bool write) const { - masked_csr_t::verify_permissions(insn, write); - if ((state->prv < PRV_M) && !(state->mstateen[index]->read() & MSTATEEN_HSTATEEN)) throw trap_illegal_instruction(insn.bits()); + masked_csr_t::verify_permissions(insn, write); } // implement class sstateen_csr_t @@ -1372,8 +1371,6 @@ senvcfg_csr_t::senvcfg_csr_t(processor_t* const proc, const reg_t addr, const re } void senvcfg_csr_t::verify_permissions(insn_t insn, bool write) const { - masked_csr_t::verify_permissions(insn, write); - if (proc->extension_enabled(EXT_SMSTATEEN)) { if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_HENVCFG)) throw trap_illegal_instruction(insn.bits()); @@ -1381,13 +1378,15 @@ void senvcfg_csr_t::verify_permissions(insn_t insn, bool write) const { if (state->v && !(state->hstateen[0]->read() & HSTATEEN0_SENVCFG)) throw trap_virtual_instruction(insn.bits()); } -} -void henvcfg_csr_t::verify_permissions(insn_t insn, bool write) const { masked_csr_t::verify_permissions(insn, write); +} +void henvcfg_csr_t::verify_permissions(insn_t insn, bool write) const { if (proc->extension_enabled(EXT_SMSTATEEN)) { if ((state->prv < PRV_M) && !(state->mstateen[0]->read() & MSTATEEN0_HENVCFG)) throw trap_illegal_instruction(insn.bits()); } + + masked_csr_t::verify_permissions(insn, write); } -- cgit v1.1 From 5672c4a41ad7a9af011d385962c175a5a6012fd9 Mon Sep 17 00:00:00 2001 From: i2h2 <110197402+i2h2@users.noreply.github.com> Date: Wed, 3 Aug 2022 16:01:57 -0600 Subject: Add Sstc support. (#1057) --- riscv/csrs.cc | 37 ++++++++++++++++++++++++++++++++++++- riscv/csrs.h | 17 ++++++++++++++++- riscv/isa_parser.cc | 2 ++ riscv/isa_parser.h | 1 + riscv/processor.cc | 23 ++++++++++++++++++++--- riscv/processor.h | 4 ++++ 6 files changed, 79 insertions(+), 5 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index d02212b..50cbfb3 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -679,7 +679,8 @@ void mip_csr_t::backdoor_write_with_mask(const reg_t mask, const reg_t val) noex } reg_t mip_csr_t::write_mask() const noexcept { - const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; + // MIP_STIP is writable unless SSTC exists and STCE is set in MENVCFG + const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | ((state->menvcfg->read() & MENVCFG_STCE) ? 0 : MIP_STIP) | MIP_SEIP : 0; const reg_t vssip_int = proc->extension_enabled('H') ? MIP_VSSIP : 0; const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0; // We must mask off sgeip, vstip, and vseip. All three of these @@ -979,6 +980,11 @@ reg_t time_counter_csr_t::read() const noexcept { void time_counter_csr_t::sync(const reg_t val) noexcept { shadow_val = val; + if (proc->extension_enabled(EXT_SSTC)) { + const reg_t mip_val = (shadow_val >= state->stimecmp->read() ? MIP_STIP : 0) | + (shadow_val + state->htimedelta->read() >= state->vstimecmp->read() ? MIP_VSTIP : 0); + state->mip->backdoor_write_with_mask(MIP_STIP | MIP_VSTIP, mip_val); + } } proxy_csr_t::proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate): @@ -1400,3 +1406,32 @@ void henvcfg_csr_t::verify_permissions(insn_t insn, bool write) const { throw trap_illegal_instruction(insn.bits()); } } + +stimecmp_csr_t::stimecmp_csr_t(processor_t* const proc, const reg_t addr, const reg_t imask): + basic_csr_t(proc, addr, 0), intr_mask(imask) { +} + +bool stimecmp_csr_t::unlogged_write(const reg_t val) noexcept { + state->mip->backdoor_write_with_mask(intr_mask, state->time->read() >= val ? intr_mask : 0); + return basic_csr_t::unlogged_write(val); +} + +virtualized_stimecmp_csr_t::virtualized_stimecmp_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt): + virtualized_csr_t(proc, orig, virt) { +} + +void virtualized_stimecmp_csr_t::verify_permissions(insn_t insn, bool write) const { + virtualized_csr_t::verify_permissions(insn, write); + + // check for read permission to time as enabled by xcounteren + state->time_proxy->verify_permissions(insn, false); + + if (!(state->menvcfg->read() & MENVCFG_STCE)) { + // access to (v)stimecmp with MENVCFG.STCE = 0 + if (state->prv < PRV_M) + throw trap_illegal_instruction(insn.bits()); + } else if (state->v && !(state->henvcfg->read() & HENVCFG_STCE)) { + // access to vstimecmp with MENVCFG.STCE = 1 and HENVCFG.STCE = 0 when V = 1 + throw trap_virtual_instruction(insn.bits()); + } +} diff --git a/riscv/csrs.h b/riscv/csrs.h index c979942..acd889c 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -449,12 +449,13 @@ class masked_csr_t: public basic_csr_t { }; // henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 +// henvcfg.stce is read_only 0 when menvcfg.stce = 0 class henvcfg_csr_t final: public masked_csr_t { public: henvcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init, csr_t_p menvcfg); reg_t read() const noexcept override { - return (menvcfg->read() | ~MENVCFG_PBMTE) & masked_csr_t::read(); + return (menvcfg->read() | ~(MENVCFG_PBMTE | MENVCFG_STCE)) & masked_csr_t::read(); } virtual void verify_permissions(insn_t insn, bool write) const override; @@ -729,4 +730,18 @@ class senvcfg_csr_t final: public masked_csr_t { virtual void verify_permissions(insn_t insn, bool write) const override; }; +class stimecmp_csr_t: public basic_csr_t { + public: + stimecmp_csr_t(processor_t* const proc, const reg_t addr, const reg_t imask); + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t intr_mask; +}; + +class virtualized_stimecmp_csr_t: public virtualized_csr_t { + public: + virtualized_stimecmp_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt); + virtual void verify_permissions(insn_t insn, bool write) const override; +}; #endif diff --git a/riscv/isa_parser.cc b/riscv/isa_parser.cc index 81b770c..6ac146b 100644 --- a/riscv/isa_parser.cc +++ b/riscv/isa_parser.cc @@ -181,6 +181,8 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv) } else if (ext_str == "zicbop") { } else if (ext_str == "zicntr") { } else if (ext_str == "zihpm") { + } else if (ext_str == "sstc") { + extension_table[EXT_SSTC] = true; } else if (ext_str[0] == 'x') { max_isa |= 1L << ('x' - 'a'); extension_table[toupper('x')] = true; diff --git a/riscv/isa_parser.h b/riscv/isa_parser.h index 6065dbc..c57436e 100644 --- a/riscv/isa_parser.h +++ b/riscv/isa_parser.h @@ -52,6 +52,7 @@ typedef enum { EXT_XZBM, EXT_XZBR, EXT_XZBT, + EXT_SSTC, } isa_extension_t; typedef enum { diff --git a/riscv/processor.cc b/riscv/processor.cc index 6d0d349..0389707 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -208,7 +208,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) if (proc->extension_enabled_const(EXT_ZICNTR)) { csrmap[CSR_INSTRET] = std::make_shared(proc, CSR_INSTRET, minstret); csrmap[CSR_CYCLE] = std::make_shared(proc, CSR_CYCLE, mcycle); - csrmap[CSR_TIME] = std::make_shared(proc, CSR_TIME, time); + csrmap[CSR_TIME] = time_proxy = std::make_shared(proc, CSR_TIME, time); } if (xlen == 32) { csr_t_p minstreth, mcycleh; @@ -405,7 +405,8 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) if (proc->extension_enabled_const('U')) { const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) | (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0) | - (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); + (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0) | + (proc->extension_enabled(EXT_SSTC) ? MENVCFG_STCE : 0); const reg_t menvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0); menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, menvcfg_init); if (xlen == 32) { @@ -419,7 +420,8 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) | (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0) | - (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); + (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0) | + (proc->extension_enabled(EXT_SSTC) ? HENVCFG_STCE : 0); const reg_t henvcfg_init = (proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0); henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, henvcfg_init, menvcfg); if (xlen == 32) { @@ -457,6 +459,21 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) } } + if (proc->extension_enabled_const(EXT_SSTC)) { + stimecmp = std::make_shared(proc, CSR_STIMECMP, MIP_STIP); + vstimecmp = std::make_shared(proc, CSR_VSTIMECMP, MIP_VSTIP); + auto virtualized_stimecmp = std::make_shared(proc, stimecmp, vstimecmp); + if (xlen == 32) { + csrmap[CSR_STIMECMP] = std::make_shared(proc, CSR_STIMECMP, virtualized_stimecmp); + csrmap[CSR_STIMECMPH] = std::make_shared(proc, CSR_STIMECMPH, virtualized_stimecmp); + csrmap[CSR_VSTIMECMP] = std::make_shared(proc, CSR_VSTIMECMP, vstimecmp); + csrmap[CSR_VSTIMECMPH] = std::make_shared(proc, CSR_VSTIMECMPH, vstimecmp); + } else { + csrmap[CSR_STIMECMP] = virtualized_stimecmp; + csrmap[CSR_VSTIMECMP] = vstimecmp; + } + } + serialized = false; #ifdef RISCV_ENABLE_COMMITLOG diff --git a/riscv/processor.h b/riscv/processor.h index b415402..88ddf70 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -193,6 +193,10 @@ struct state_t csr_t_p htimedelta; time_counter_csr_t_p time; + csr_t_p time_proxy; + + csr_t_p stimecmp; + csr_t_p vstimecmp; bool serialized; // whether timer CSRs are in a well-defined state -- cgit v1.1 From 7812b01154c6a2f19e0d4bc9f00e5d5fcb9aec22 Mon Sep 17 00:00:00 2001 From: liweiwei Date: Thu, 14 Oct 2021 09:44:54 +0800 Subject: Add flags for Zfinx/Zdinx/Zhinx{min} Hardwire mstatus_fs to zero when enable ZFinx --- riscv/csrs.cc | 6 +++--- riscv/isa_parser.cc | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 50cbfb3..fc5dce1 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -394,8 +394,8 @@ base_status_csr_t::base_status_csr_t(processor_t* const proc, const reg_t addr): reg_t base_status_csr_t::compute_sstatus_write_mask() const noexcept { // If a configuration has FS bits, they will always be accessible no // matter the state of misa. - const bool has_fs = proc->extension_enabled('S') || proc->extension_enabled('F') - || proc->extension_enabled('V'); + const bool has_fs = (proc->extension_enabled('S') || proc->extension_enabled('F') + || proc->extension_enabled('V')) && !proc->extension_enabled(EXT_ZFINX); const bool has_vs = proc->extension_enabled('V'); return 0 | (proc->extension_enabled('S') ? (SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_SPP) : 0) @@ -1202,7 +1202,7 @@ float_csr_t::float_csr_t(processor_t* const proc, const reg_t addr, const reg_t void float_csr_t::verify_permissions(insn_t insn, bool write) const { masked_csr_t::verify_permissions(insn, write); require_fp; - if (!proc->extension_enabled('F')) + if (!proc->extension_enabled('F') && !proc->extension_enabled(EXT_ZFINX)) throw trap_illegal_instruction(insn.bits()); } diff --git a/riscv/isa_parser.cc b/riscv/isa_parser.cc index 6ac146b..ddb98c9 100644 --- a/riscv/isa_parser.cc +++ b/riscv/isa_parser.cc @@ -130,6 +130,18 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv) extension_table[EXT_ZBKC] = true; } else if (ext_str == "zbkx") { extension_table[EXT_ZBKX] = true; + } else if (ext_str == "zdinx") { + extension_table[EXT_ZFINX] = true; + extension_table[EXT_ZDINX] = true; + } else if (ext_str == "zfinx") { + extension_table[EXT_ZFINX] = true; + } else if (ext_str == "zhinx") { + extension_table[EXT_ZFINX] = true; + extension_table[EXT_ZHINX] = true; + extension_table[EXT_ZHINXMIN] = true; + } else if (ext_str == "zhinxmin") { + extension_table[EXT_ZFINX] = true; + extension_table[EXT_ZHINXMIN] = true; } else if (ext_str == "zk") { extension_table[EXT_ZBKB] = true; extension_table[EXT_ZBKC] = true; @@ -229,6 +241,10 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv) bad_isa_string(str, ("can't parse: " + std::string(p)).c_str()); } + if (extension_table[EXT_ZFINX] && ((max_isa >> ('f' - 'a')) & 1)) { + bad_isa_string(str, ("Zfinx/ZDinx/Zhinx{min} extensions conflict with Base 'F/D/Q/Zfh{min}' extensions")); + } + std::string lowercase = strtolower(priv); bool user = false, supervisor = false; -- cgit v1.1 From 5de0c89c034cf64fdab8e36d2dc7488aa035d823 Mon Sep 17 00:00:00 2001 From: liweiwei Date: Thu, 14 Oct 2021 09:49:10 +0800 Subject: Modify F/D/Zfh instructions to add support for Zfinx/Zdinx/Zhinx{min} instructions change the extention check for F/D/Zfh instructions modify the F/D/Zfh instructions to read X regs when enable Zfinx Co-authored-by: wangmeng --- riscv/decode.h | 42 ++++++++++++++++++++++++++++++++++++++++++ riscv/insns/fadd_d.h | 4 ++-- riscv/insns/fadd_h.h | 4 ++-- riscv/insns/fadd_s.h | 4 ++-- riscv/insns/fclass_d.h | 4 ++-- riscv/insns/fclass_h.h | 4 ++-- riscv/insns/fclass_s.h | 4 ++-- riscv/insns/fcvt_d_h.h | 6 +++--- riscv/insns/fcvt_d_l.h | 4 ++-- riscv/insns/fcvt_d_lu.h | 4 ++-- riscv/insns/fcvt_d_s.h | 4 ++-- riscv/insns/fcvt_d_w.h | 4 ++-- riscv/insns/fcvt_d_wu.h | 4 ++-- riscv/insns/fcvt_h_d.h | 6 +++--- riscv/insns/fcvt_h_l.h | 4 ++-- riscv/insns/fcvt_h_lu.h | 4 ++-- riscv/insns/fcvt_h_s.h | 4 ++-- riscv/insns/fcvt_h_w.h | 4 ++-- riscv/insns/fcvt_h_wu.h | 4 ++-- riscv/insns/fcvt_l_d.h | 4 ++-- riscv/insns/fcvt_l_h.h | 4 ++-- riscv/insns/fcvt_l_s.h | 4 ++-- riscv/insns/fcvt_lu_d.h | 4 ++-- riscv/insns/fcvt_lu_h.h | 4 ++-- riscv/insns/fcvt_lu_s.h | 4 ++-- riscv/insns/fcvt_s_d.h | 4 ++-- riscv/insns/fcvt_s_h.h | 4 ++-- riscv/insns/fcvt_s_l.h | 4 ++-- riscv/insns/fcvt_s_lu.h | 4 ++-- riscv/insns/fcvt_s_w.h | 4 ++-- riscv/insns/fcvt_s_wu.h | 4 ++-- riscv/insns/fcvt_w_d.h | 4 ++-- riscv/insns/fcvt_w_h.h | 4 ++-- riscv/insns/fcvt_w_s.h | 4 ++-- riscv/insns/fcvt_wu_d.h | 4 ++-- riscv/insns/fcvt_wu_h.h | 4 ++-- riscv/insns/fcvt_wu_s.h | 4 ++-- riscv/insns/fdiv_d.h | 4 ++-- riscv/insns/fdiv_h.h | 4 ++-- riscv/insns/fdiv_s.h | 4 ++-- riscv/insns/feq_d.h | 4 ++-- riscv/insns/feq_h.h | 4 ++-- riscv/insns/feq_s.h | 4 ++-- riscv/insns/fle_d.h | 4 ++-- riscv/insns/fle_h.h | 4 ++-- riscv/insns/fle_s.h | 4 ++-- riscv/insns/flt_d.h | 4 ++-- riscv/insns/flt_h.h | 4 ++-- riscv/insns/flt_s.h | 4 ++-- riscv/insns/fmadd_d.h | 4 ++-- riscv/insns/fmadd_h.h | 4 ++-- riscv/insns/fmadd_s.h | 4 ++-- riscv/insns/fmax_d.h | 12 ++++++------ riscv/insns/fmax_h.h | 4 ++-- riscv/insns/fmax_s.h | 12 ++++++------ riscv/insns/fmin_d.h | 12 ++++++------ riscv/insns/fmin_h.h | 4 ++-- riscv/insns/fmin_s.h | 12 ++++++------ riscv/insns/fmsub_d.h | 4 ++-- riscv/insns/fmsub_h.h | 4 ++-- riscv/insns/fmsub_s.h | 4 ++-- riscv/insns/fmul_d.h | 4 ++-- riscv/insns/fmul_h.h | 4 ++-- riscv/insns/fmul_s.h | 4 ++-- riscv/insns/fnmadd_d.h | 4 ++-- riscv/insns/fnmadd_h.h | 4 ++-- riscv/insns/fnmadd_s.h | 4 ++-- riscv/insns/fnmsub_d.h | 4 ++-- riscv/insns/fnmsub_h.h | 4 ++-- riscv/insns/fnmsub_s.h | 4 ++-- riscv/insns/fsgnj_d.h | 4 ++-- riscv/insns/fsgnj_h.h | 4 ++-- riscv/insns/fsgnj_s.h | 4 ++-- riscv/insns/fsgnjn_d.h | 4 ++-- riscv/insns/fsgnjn_h.h | 4 ++-- riscv/insns/fsgnjn_q.h | 2 +- riscv/insns/fsgnjn_s.h | 4 ++-- riscv/insns/fsgnjx_d.h | 4 ++-- riscv/insns/fsgnjx_h.h | 4 ++-- riscv/insns/fsgnjx_s.h | 4 ++-- riscv/insns/fsqrt_d.h | 4 ++-- riscv/insns/fsqrt_h.h | 4 ++-- riscv/insns/fsqrt_s.h | 4 ++-- riscv/insns/fsub_d.h | 4 ++-- riscv/insns/fsub_h.h | 4 ++-- riscv/insns/fsub_s.h | 4 ++-- riscv/insns/vfslide1down_vf.h | 6 +++--- riscv/insns/vfslide1up_vf.h | 6 +++--- riscv/v_ext_macros.h | 8 ++++---- 89 files changed, 239 insertions(+), 197 deletions(-) diff --git a/riscv/decode.h b/riscv/decode.h index 72b4a6b..b5fad2d 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -223,14 +223,56 @@ private: #define RVC_SP READ_REG(X_SP) // FPU macros +#define READ_ZDINX_REG(reg) (xlen == 32 ? f64(READ_REG_PAIR(reg)) : f64(STATE.XPR[reg] & (uint64_t)-1)) +#define READ_FREG_H(reg) (p->extension_enabled(EXT_ZFINX) ? f16(STATE.XPR[reg] & (uint16_t)-1) : f16(READ_FREG(reg))) +#define READ_FREG_F(reg) (p->extension_enabled(EXT_ZFINX) ? f32(STATE.XPR[reg] & (uint32_t)-1) : f32(READ_FREG(reg))) +#define READ_FREG_D(reg) (p->extension_enabled(EXT_ZFINX) ? READ_ZDINX_REG(reg) : f64(READ_FREG(reg))) #define FRS1 READ_FREG(insn.rs1()) #define FRS2 READ_FREG(insn.rs2()) #define FRS3 READ_FREG(insn.rs3()) +#define FRS1_H READ_FREG_H(insn.rs1()) +#define FRS1_F READ_FREG_F(insn.rs1()) +#define FRS1_D READ_FREG_D(insn.rs1()) +#define FRS2_H READ_FREG_H(insn.rs2()) +#define FRS2_F READ_FREG_F(insn.rs2()) +#define FRS2_D READ_FREG_D(insn.rs2()) +#define FRS3_H READ_FREG_H(insn.rs3()) +#define FRS3_F READ_FREG_F(insn.rs3()) +#define FRS3_D READ_FREG_D(insn.rs3()) #define dirty_fp_state STATE.sstatus->dirty(SSTATUS_FS) #define dirty_ext_state STATE.sstatus->dirty(SSTATUS_XS) #define dirty_vs_state STATE.sstatus->dirty(SSTATUS_VS) #define DO_WRITE_FREG(reg, value) (STATE.FPR.write(reg, value), dirty_fp_state) #define WRITE_FRD(value) WRITE_FREG(insn.rd(), value) +#define WRITE_FRD_H(value) \ +do { \ + if (p->extension_enabled(EXT_ZFINX)) \ + WRITE_REG(insn.rd(), sext_xlen((int16_t)((value).v))); \ + else { \ + WRITE_FRD(value); \ + } \ +} while(0) +#define WRITE_FRD_F(value) \ +do { \ + if (p->extension_enabled(EXT_ZFINX)) \ + WRITE_REG(insn.rd(), sext_xlen((value).v)); \ + else { \ + WRITE_FRD(value); \ + } \ +} while(0) +#define WRITE_FRD_D(value) \ +do { \ + if (p->extension_enabled(EXT_ZFINX)) { \ + if (xlen == 32) { \ + uint64_t val = (value).v; \ + WRITE_RD_PAIR(val); \ + } else { \ + WRITE_REG(insn.rd(), (value).v); \ + } \ + } else { \ + WRITE_FRD(value); \ + } \ +} while(0) #define SHAMT (insn.i_imm() & 0x3F) #define BRANCH_TARGET (pc + insn.sb_imm()) diff --git a/riscv/insns/fadd_d.h b/riscv/insns/fadd_d.h index 4a436e2..9bfff5f 100644 --- a/riscv/insns/fadd_d.h +++ b/riscv/insns/fadd_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_add(f64(FRS1), f64(FRS2))); +WRITE_FRD_D(f64_add(FRS1_D, FRS2_D)); set_fp_exceptions; diff --git a/riscv/insns/fadd_h.h b/riscv/insns/fadd_h.h index 2b646ae..f57e5fa 100644 --- a/riscv/insns/fadd_h.h +++ b/riscv/insns/fadd_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_add(f16(FRS1), f16(FRS2))); +WRITE_FRD_H(f16_add(FRS1_H, FRS2_H)); set_fp_exceptions; diff --git a/riscv/insns/fadd_s.h b/riscv/insns/fadd_s.h index cc18d58..7a40b1b 100644 --- a/riscv/insns/fadd_s.h +++ b/riscv/insns/fadd_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_add(f32(FRS1), f32(FRS2))); +WRITE_FRD_F(f32_add(FRS1_F, FRS2_F)); set_fp_exceptions; diff --git a/riscv/insns/fclass_d.h b/riscv/insns/fclass_d.h index 9456123..a355062 100644 --- a/riscv/insns/fclass_d.h +++ b/riscv/insns/fclass_d.h @@ -1,3 +1,3 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; -WRITE_RD(f64_classify(f64(FRS1))); +WRITE_RD(f64_classify(FRS1_D)); diff --git a/riscv/insns/fclass_h.h b/riscv/insns/fclass_h.h index 066a2d2..2638ac8 100644 --- a/riscv/insns/fclass_h.h +++ b/riscv/insns/fclass_h.h @@ -1,3 +1,3 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; -WRITE_RD(f16_classify(f16(FRS1))); +WRITE_RD(f16_classify(FRS1_H)); diff --git a/riscv/insns/fclass_s.h b/riscv/insns/fclass_s.h index a392db8..3d529ad 100644 --- a/riscv/insns/fclass_s.h +++ b/riscv/insns/fclass_s.h @@ -1,3 +1,3 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; -WRITE_RD(f32_classify(f32(FRS1))); +WRITE_RD(f32_classify(FRS1_F)); diff --git a/riscv/insns/fcvt_d_h.h b/riscv/insns/fcvt_d_h.h index 04e9ff4..061a271 100644 --- a/riscv/insns/fcvt_d_h.h +++ b/riscv/insns/fcvt_d_h.h @@ -1,6 +1,6 @@ -require_extension(EXT_ZFHMIN); -require_extension('D'); +require_either_extension(EXT_ZFHMIN, EXT_ZHINXMIN); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_to_f64(f16(FRS1))); +WRITE_FRD_D(f16_to_f64(FRS1_H)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_d_l.h b/riscv/insns/fcvt_d_l.h index 08716cf..7788f1f 100644 --- a/riscv/insns/fcvt_d_l.h +++ b/riscv/insns/fcvt_d_l.h @@ -1,6 +1,6 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_FRD(i64_to_f64(RS1)); +WRITE_FRD_D(i64_to_f64(RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_d_lu.h b/riscv/insns/fcvt_d_lu.h index 306d7fe..edb694f 100644 --- a/riscv/insns/fcvt_d_lu.h +++ b/riscv/insns/fcvt_d_lu.h @@ -1,6 +1,6 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_FRD(ui64_to_f64(RS1)); +WRITE_FRD_D(ui64_to_f64(RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_d_s.h b/riscv/insns/fcvt_d_s.h index 5f805b0..8039e94 100644 --- a/riscv/insns/fcvt_d_s.h +++ b/riscv/insns/fcvt_d_s.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_to_f64(f32(FRS1))); +WRITE_FRD_D(f32_to_f64(FRS1_F)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_d_w.h b/riscv/insns/fcvt_d_w.h index 4c4861c..e3375fa 100644 --- a/riscv/insns/fcvt_d_w.h +++ b/riscv/insns/fcvt_d_w.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(i32_to_f64((int32_t)RS1)); +WRITE_FRD_D(i32_to_f64((int32_t)RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_d_wu.h b/riscv/insns/fcvt_d_wu.h index 1dbf218..d903561 100644 --- a/riscv/insns/fcvt_d_wu.h +++ b/riscv/insns/fcvt_d_wu.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(ui32_to_f64((uint32_t)RS1)); +WRITE_FRD_D(ui32_to_f64((uint32_t)RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_h_d.h b/riscv/insns/fcvt_h_d.h index e9987b7..e06b1a5 100644 --- a/riscv/insns/fcvt_h_d.h +++ b/riscv/insns/fcvt_h_d.h @@ -1,6 +1,6 @@ -require_extension(EXT_ZFHMIN); -require_extension('D'); +require_either_extension(EXT_ZFHMIN, EXT_ZHINXMIN); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_to_f16(f64(FRS1))); +WRITE_FRD_H(f64_to_f16(FRS1_D)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_h_l.h b/riscv/insns/fcvt_h_l.h index 39178c2..31e8a1e 100644 --- a/riscv/insns/fcvt_h_l.h +++ b/riscv/insns/fcvt_h_l.h @@ -1,6 +1,6 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_FRD(i64_to_f16(RS1)); +WRITE_FRD_H(i64_to_f16(RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_h_lu.h b/riscv/insns/fcvt_h_lu.h index a872c48..189b160 100644 --- a/riscv/insns/fcvt_h_lu.h +++ b/riscv/insns/fcvt_h_lu.h @@ -1,6 +1,6 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_FRD(ui64_to_f16(RS1)); +WRITE_FRD_H(ui64_to_f16(RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_h_s.h b/riscv/insns/fcvt_h_s.h index ce39d81..57ba005 100644 --- a/riscv/insns/fcvt_h_s.h +++ b/riscv/insns/fcvt_h_s.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFHMIN); +require_either_extension(EXT_ZFHMIN, EXT_ZHINXMIN); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_to_f16(f32(FRS1))); +WRITE_FRD_H(f32_to_f16(FRS1_F)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_h_w.h b/riscv/insns/fcvt_h_w.h index c082454..de4cbe5 100644 --- a/riscv/insns/fcvt_h_w.h +++ b/riscv/insns/fcvt_h_w.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(i32_to_f16((int32_t)RS1)); +WRITE_FRD_H(i32_to_f16((int32_t)RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_h_wu.h b/riscv/insns/fcvt_h_wu.h index 9f2f5f6..230c354 100644 --- a/riscv/insns/fcvt_h_wu.h +++ b/riscv/insns/fcvt_h_wu.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(ui32_to_f16((uint32_t)RS1)); +WRITE_FRD_H(ui32_to_f16((uint32_t)RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_l_d.h b/riscv/insns/fcvt_l_d.h index c09e6c4..f2374d2 100644 --- a/riscv/insns/fcvt_l_d.h +++ b/riscv/insns/fcvt_l_d.h @@ -1,6 +1,6 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_RD(f64_to_i64(f64(FRS1), RM, true)); +WRITE_RD(f64_to_i64(FRS1_D, RM, true)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_l_h.h b/riscv/insns/fcvt_l_h.h index 5a1fea8..3b63027 100644 --- a/riscv/insns/fcvt_l_h.h +++ b/riscv/insns/fcvt_l_h.h @@ -1,6 +1,6 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_RD(f16_to_i64(f16(FRS1), RM, true)); +WRITE_RD(f16_to_i64(FRS1_H, RM, true)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_l_s.h b/riscv/insns/fcvt_l_s.h index 267e0eb..d121a65 100644 --- a/riscv/insns/fcvt_l_s.h +++ b/riscv/insns/fcvt_l_s.h @@ -1,6 +1,6 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_RD(f32_to_i64(f32(FRS1), RM, true)); +WRITE_RD(f32_to_i64(FRS1_F, RM, true)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_lu_d.h b/riscv/insns/fcvt_lu_d.h index 3a02120..939bc0e 100644 --- a/riscv/insns/fcvt_lu_d.h +++ b/riscv/insns/fcvt_lu_d.h @@ -1,6 +1,6 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_RD(f64_to_ui64(f64(FRS1), RM, true)); +WRITE_RD(f64_to_ui64(FRS1_D, RM, true)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_lu_h.h b/riscv/insns/fcvt_lu_h.h index f1454c3..d27f175 100644 --- a/riscv/insns/fcvt_lu_h.h +++ b/riscv/insns/fcvt_lu_h.h @@ -1,6 +1,6 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_RD(f16_to_ui64(f16(FRS1), RM, true)); +WRITE_RD(f16_to_ui64(FRS1_H, RM, true)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_lu_s.h b/riscv/insns/fcvt_lu_s.h index 94115a3..69c95ef 100644 --- a/riscv/insns/fcvt_lu_s.h +++ b/riscv/insns/fcvt_lu_s.h @@ -1,6 +1,6 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_RD(f32_to_ui64(f32(FRS1), RM, true)); +WRITE_RD(f32_to_ui64(FRS1_F, RM, true)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_s_d.h b/riscv/insns/fcvt_s_d.h index 4033335..f3cd26e 100644 --- a/riscv/insns/fcvt_s_d.h +++ b/riscv/insns/fcvt_s_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_to_f32(f64(FRS1))); +WRITE_FRD_F(f64_to_f32(FRS1_D)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_s_h.h b/riscv/insns/fcvt_s_h.h index 22cdd72..346440a 100644 --- a/riscv/insns/fcvt_s_h.h +++ b/riscv/insns/fcvt_s_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFHMIN); +require_either_extension(EXT_ZFHMIN, EXT_ZHINXMIN); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_to_f32(f16(FRS1))); +WRITE_FRD_F(f16_to_f32(FRS1_H)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_s_l.h b/riscv/insns/fcvt_s_l.h index 9abcc80..1d096d2 100644 --- a/riscv/insns/fcvt_s_l.h +++ b/riscv/insns/fcvt_s_l.h @@ -1,6 +1,6 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_FRD(i64_to_f32(RS1)); +WRITE_FRD_F(i64_to_f32(RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_s_lu.h b/riscv/insns/fcvt_s_lu.h index 70c676e..e4e84cf 100644 --- a/riscv/insns/fcvt_s_lu.h +++ b/riscv/insns/fcvt_s_lu.h @@ -1,6 +1,6 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_rv64; require_fp; softfloat_roundingMode = RM; -WRITE_FRD(ui64_to_f32(RS1)); +WRITE_FRD_F(ui64_to_f32(RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_s_w.h b/riscv/insns/fcvt_s_w.h index 1ddabd8..75c87db 100644 --- a/riscv/insns/fcvt_s_w.h +++ b/riscv/insns/fcvt_s_w.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(i32_to_f32((int32_t)RS1)); +WRITE_FRD_F(i32_to_f32((int32_t)RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_s_wu.h b/riscv/insns/fcvt_s_wu.h index c1394c3..ec90fad 100644 --- a/riscv/insns/fcvt_s_wu.h +++ b/riscv/insns/fcvt_s_wu.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(ui32_to_f32((uint32_t)RS1)); +WRITE_FRD_F(ui32_to_f32((uint32_t)RS1)); set_fp_exceptions; diff --git a/riscv/insns/fcvt_w_d.h b/riscv/insns/fcvt_w_d.h index 28eb245..a839f4b 100644 --- a/riscv/insns/fcvt_w_d.h +++ b/riscv/insns/fcvt_w_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_RD(sext32(f64_to_i32(f64(FRS1), RM, true))); +WRITE_RD(sext32(f64_to_i32(FRS1_D, RM, true))); set_fp_exceptions; diff --git a/riscv/insns/fcvt_w_h.h b/riscv/insns/fcvt_w_h.h index fe8bb48..97e49a5 100644 --- a/riscv/insns/fcvt_w_h.h +++ b/riscv/insns/fcvt_w_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_RD(sext32(f16_to_i32(f16(FRS1), RM, true))); +WRITE_RD(sext32(f16_to_i32(FRS1_H, RM, true))); set_fp_exceptions; diff --git a/riscv/insns/fcvt_w_s.h b/riscv/insns/fcvt_w_s.h index d30f1b4..6aeb510 100644 --- a/riscv/insns/fcvt_w_s.h +++ b/riscv/insns/fcvt_w_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_RD(sext32(f32_to_i32(f32(FRS1), RM, true))); +WRITE_RD(sext32(f32_to_i32(FRS1_F, RM, true))); set_fp_exceptions; diff --git a/riscv/insns/fcvt_wu_d.h b/riscv/insns/fcvt_wu_d.h index 5cdc004..906f003 100644 --- a/riscv/insns/fcvt_wu_d.h +++ b/riscv/insns/fcvt_wu_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_RD(sext32(f64_to_ui32(f64(FRS1), RM, true))); +WRITE_RD(sext32(f64_to_ui32(FRS1_D, RM, true))); set_fp_exceptions; diff --git a/riscv/insns/fcvt_wu_h.h b/riscv/insns/fcvt_wu_h.h index bf6648d..ce11143 100644 --- a/riscv/insns/fcvt_wu_h.h +++ b/riscv/insns/fcvt_wu_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_RD(sext32(f16_to_ui32(f16(FRS1), RM, true))); +WRITE_RD(sext32(f16_to_ui32(FRS1_H, RM, true))); set_fp_exceptions; diff --git a/riscv/insns/fcvt_wu_s.h b/riscv/insns/fcvt_wu_s.h index 034d681..a8b8455 100644 --- a/riscv/insns/fcvt_wu_s.h +++ b/riscv/insns/fcvt_wu_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_RD(sext32(f32_to_ui32(f32(FRS1), RM, true))); +WRITE_RD(sext32(f32_to_ui32(FRS1_F, RM, true))); set_fp_exceptions; diff --git a/riscv/insns/fdiv_d.h b/riscv/insns/fdiv_d.h index ae7911a..990afca 100644 --- a/riscv/insns/fdiv_d.h +++ b/riscv/insns/fdiv_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_div(f64(FRS1), f64(FRS2))); +WRITE_FRD_D(f64_div(FRS1_D, FRS2_D)); set_fp_exceptions; diff --git a/riscv/insns/fdiv_h.h b/riscv/insns/fdiv_h.h index a169eae..91c518b 100644 --- a/riscv/insns/fdiv_h.h +++ b/riscv/insns/fdiv_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_div(f16(FRS1), f16(FRS2))); +WRITE_FRD_H(f16_div(FRS1_H, FRS2_H)); set_fp_exceptions; diff --git a/riscv/insns/fdiv_s.h b/riscv/insns/fdiv_s.h index c74ff04..180b41d 100644 --- a/riscv/insns/fdiv_s.h +++ b/riscv/insns/fdiv_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_div(f32(FRS1), f32(FRS2))); +WRITE_FRD_F(f32_div(FRS1_F, FRS2_F)); set_fp_exceptions; diff --git a/riscv/insns/feq_d.h b/riscv/insns/feq_d.h index 541ed5b..9585bad 100644 --- a/riscv/insns/feq_d.h +++ b/riscv/insns/feq_d.h @@ -1,4 +1,4 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; -WRITE_RD(f64_eq(f64(FRS1), f64(FRS2))); +WRITE_RD(f64_eq(FRS1_D, FRS2_D)); set_fp_exceptions; diff --git a/riscv/insns/feq_h.h b/riscv/insns/feq_h.h index 47e75a5..5988db9 100644 --- a/riscv/insns/feq_h.h +++ b/riscv/insns/feq_h.h @@ -1,4 +1,4 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; -WRITE_RD(f16_eq(f16(FRS1), f16(FRS2))); +WRITE_RD(f16_eq(FRS1_H, FRS2_H)); set_fp_exceptions; diff --git a/riscv/insns/feq_s.h b/riscv/insns/feq_s.h index 489bea6..97b57c2 100644 --- a/riscv/insns/feq_s.h +++ b/riscv/insns/feq_s.h @@ -1,4 +1,4 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; -WRITE_RD(f32_eq(f32(FRS1), f32(FRS2))); +WRITE_RD(f32_eq(FRS1_F, FRS2_F)); set_fp_exceptions; diff --git a/riscv/insns/fle_d.h b/riscv/insns/fle_d.h index 419a36f..17b4932 100644 --- a/riscv/insns/fle_d.h +++ b/riscv/insns/fle_d.h @@ -1,4 +1,4 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; -WRITE_RD(f64_le(f64(FRS1), f64(FRS2))); +WRITE_RD(f64_le(FRS1_D, FRS2_D)); set_fp_exceptions; diff --git a/riscv/insns/fle_h.h b/riscv/insns/fle_h.h index 9fc5968..31ed8a7 100644 --- a/riscv/insns/fle_h.h +++ b/riscv/insns/fle_h.h @@ -1,4 +1,4 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; -WRITE_RD(f16_le(f16(FRS1), f16(FRS2))); +WRITE_RD(f16_le(FRS1_H, FRS2_H)); set_fp_exceptions; diff --git a/riscv/insns/fle_s.h b/riscv/insns/fle_s.h index 5c0124e..e26f055 100644 --- a/riscv/insns/fle_s.h +++ b/riscv/insns/fle_s.h @@ -1,4 +1,4 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; -WRITE_RD(f32_le(f32(FRS1), f32(FRS2))); +WRITE_RD(f32_le(FRS1_F, FRS2_F)); set_fp_exceptions; diff --git a/riscv/insns/flt_d.h b/riscv/insns/flt_d.h index 7176a96..5fb0572 100644 --- a/riscv/insns/flt_d.h +++ b/riscv/insns/flt_d.h @@ -1,4 +1,4 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; -WRITE_RD(f64_lt(f64(FRS1), f64(FRS2))); +WRITE_RD(f64_lt(FRS1_D, FRS2_D)); set_fp_exceptions; diff --git a/riscv/insns/flt_h.h b/riscv/insns/flt_h.h index f516a38..dd6bc79 100644 --- a/riscv/insns/flt_h.h +++ b/riscv/insns/flt_h.h @@ -1,4 +1,4 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; -WRITE_RD(f16_lt(f16(FRS1), f16(FRS2))); +WRITE_RD(f16_lt(FRS1_H, FRS2_H)); set_fp_exceptions; diff --git a/riscv/insns/flt_s.h b/riscv/insns/flt_s.h index 40acc34..2f50ed6 100644 --- a/riscv/insns/flt_s.h +++ b/riscv/insns/flt_s.h @@ -1,4 +1,4 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; -WRITE_RD(f32_lt(f32(FRS1), f32(FRS2))); +WRITE_RD(f32_lt(FRS1_F, FRS2_F)); set_fp_exceptions; diff --git a/riscv/insns/fmadd_d.h b/riscv/insns/fmadd_d.h index ab22beb..07a8b25 100644 --- a/riscv/insns/fmadd_d.h +++ b/riscv/insns/fmadd_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_mulAdd(f64(FRS1), f64(FRS2), f64(FRS3))); +WRITE_FRD_D(f64_mulAdd(FRS1_D, FRS2_D, FRS3_D)); set_fp_exceptions; diff --git a/riscv/insns/fmadd_h.h b/riscv/insns/fmadd_h.h index 6551de5..5428897 100644 --- a/riscv/insns/fmadd_h.h +++ b/riscv/insns/fmadd_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_mulAdd(f16(FRS1), f16(FRS2), f16(FRS3))); +WRITE_FRD_H(f16_mulAdd(FRS1_H, FRS2_H, FRS3_H)); set_fp_exceptions; diff --git a/riscv/insns/fmadd_s.h b/riscv/insns/fmadd_s.h index e919190..5a72cf8 100644 --- a/riscv/insns/fmadd_s.h +++ b/riscv/insns/fmadd_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_mulAdd(f32(FRS1), f32(FRS2), f32(FRS3))); +WRITE_FRD_F(f32_mulAdd(FRS1_F, FRS2_F, FRS3_F)); set_fp_exceptions; diff --git a/riscv/insns/fmax_d.h b/riscv/insns/fmax_d.h index 11491f5..3e05b7e 100644 --- a/riscv/insns/fmax_d.h +++ b/riscv/insns/fmax_d.h @@ -1,9 +1,9 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; -bool greater = f64_lt_quiet(f64(FRS2), f64(FRS1)) || - (f64_eq(f64(FRS2), f64(FRS1)) && (f64(FRS2).v & F64_SIGN)); -if (isNaNF64UI(f64(FRS1).v) && isNaNF64UI(f64(FRS2).v)) - WRITE_FRD(f64(defaultNaNF64UI)); +bool greater = f64_lt_quiet(FRS2_D, FRS1_D) || + (f64_eq(FRS2_D, FRS1_D) && (FRS2_D.v & F64_SIGN)); +if (isNaNF64UI(FRS1_D.v) && isNaNF64UI(FRS2_D.v)) + WRITE_FRD_D(f64(defaultNaNF64UI)); else - WRITE_FRD(greater || isNaNF64UI(f64(FRS2).v) ? FRS1 : FRS2); + WRITE_FRD_D((greater || isNaNF64UI(FRS2_D.v) ? FRS1_D : FRS2_D)); set_fp_exceptions; diff --git a/riscv/insns/fmax_h.h b/riscv/insns/fmax_h.h index 3d4c40e..c864258 100644 --- a/riscv/insns/fmax_h.h +++ b/riscv/insns/fmax_h.h @@ -1,4 +1,4 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; -WRITE_FRD(f16_max(f16(FRS1), f16(FRS2))); +WRITE_FRD_H(f16_max(FRS1_H, FRS2_H)); set_fp_exceptions; diff --git a/riscv/insns/fmax_s.h b/riscv/insns/fmax_s.h index 41d8f92..17d8b3c 100644 --- a/riscv/insns/fmax_s.h +++ b/riscv/insns/fmax_s.h @@ -1,9 +1,9 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; -bool greater = f32_lt_quiet(f32(FRS2), f32(FRS1)) || - (f32_eq(f32(FRS2), f32(FRS1)) && (f32(FRS2).v & F32_SIGN)); -if (isNaNF32UI(f32(FRS1).v) && isNaNF32UI(f32(FRS2).v)) - WRITE_FRD(f32(defaultNaNF32UI)); +bool greater = f32_lt_quiet(FRS2_F, FRS1_F) || + (f32_eq(FRS2_F, FRS1_F) && (FRS2_F.v & F32_SIGN)); +if (isNaNF32UI(FRS1_F.v) && isNaNF32UI(FRS2_F.v)) + WRITE_FRD_F(f32(defaultNaNF32UI)); else - WRITE_FRD(greater || isNaNF32UI(f32(FRS2).v) ? FRS1 : FRS2); + WRITE_FRD_F((greater || isNaNF32UI(FRS2_F.v) ? FRS1_F : FRS2_F)); set_fp_exceptions; diff --git a/riscv/insns/fmin_d.h b/riscv/insns/fmin_d.h index 5cf349d..f60a73e 100644 --- a/riscv/insns/fmin_d.h +++ b/riscv/insns/fmin_d.h @@ -1,9 +1,9 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; -bool less = f64_lt_quiet(f64(FRS1), f64(FRS2)) || - (f64_eq(f64(FRS1), f64(FRS2)) && (f64(FRS1).v & F64_SIGN)); -if (isNaNF64UI(f64(FRS1).v) && isNaNF64UI(f64(FRS2).v)) - WRITE_FRD(f64(defaultNaNF64UI)); +bool less = f64_lt_quiet(FRS1_D, FRS2_D) || + (f64_eq(FRS1_D, FRS2_D) && (FRS1_D.v & F64_SIGN)); +if (isNaNF64UI(FRS1_D.v) && isNaNF64UI(FRS2_D.v)) + WRITE_FRD_D(f64(defaultNaNF64UI)); else - WRITE_FRD(less || isNaNF64UI(f64(FRS2).v) ? FRS1 : FRS2); + WRITE_FRD_D((less || isNaNF64UI(FRS2_D.v) ? FRS1_D : FRS2_D)); set_fp_exceptions; diff --git a/riscv/insns/fmin_h.h b/riscv/insns/fmin_h.h index 5fb1404..cd02f20 100644 --- a/riscv/insns/fmin_h.h +++ b/riscv/insns/fmin_h.h @@ -1,4 +1,4 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; -WRITE_FRD(f16_min(f16(FRS1), f16(FRS2))); +WRITE_FRD_H(f16_min(FRS1_H, FRS2_H)); set_fp_exceptions; diff --git a/riscv/insns/fmin_s.h b/riscv/insns/fmin_s.h index 19e1193..476a586 100644 --- a/riscv/insns/fmin_s.h +++ b/riscv/insns/fmin_s.h @@ -1,9 +1,9 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; -bool less = f32_lt_quiet(f32(FRS1), f32(FRS2)) || - (f32_eq(f32(FRS1), f32(FRS2)) && (f32(FRS1).v & F32_SIGN)); -if (isNaNF32UI(f32(FRS1).v) && isNaNF32UI(f32(FRS2).v)) - WRITE_FRD(f32(defaultNaNF32UI)); +bool less = f32_lt_quiet(FRS1_F, FRS2_F) || + (f32_eq(FRS1_F, FRS2_F) && (FRS1_F.v & F32_SIGN)); +if (isNaNF32UI(FRS1_F.v) && isNaNF32UI(FRS2_F.v)) + WRITE_FRD_F(f32(defaultNaNF32UI)); else - WRITE_FRD(less || isNaNF32UI(f32(FRS2).v) ? FRS1 : FRS2); + WRITE_FRD_F((less || isNaNF32UI(FRS2_F.v) ? FRS1_F : FRS2_F)); set_fp_exceptions; diff --git a/riscv/insns/fmsub_d.h b/riscv/insns/fmsub_d.h index 5b5bc0f..1a7d784 100644 --- a/riscv/insns/fmsub_d.h +++ b/riscv/insns/fmsub_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_mulAdd(f64(FRS1), f64(FRS2), f64(f64(FRS3).v ^ F64_SIGN))); +WRITE_FRD_D(f64_mulAdd(FRS1_D, FRS2_D, f64(FRS3_D.v ^ F64_SIGN))); set_fp_exceptions; diff --git a/riscv/insns/fmsub_h.h b/riscv/insns/fmsub_h.h index 934291f..dc6a8e6 100644 --- a/riscv/insns/fmsub_h.h +++ b/riscv/insns/fmsub_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_mulAdd(f16(FRS1), f16(FRS2), f16(f16(FRS3).v ^ F16_SIGN))); +WRITE_FRD_H(f16_mulAdd(FRS1_H, FRS2_H, f16(FRS3_H.v ^ F16_SIGN))); set_fp_exceptions; diff --git a/riscv/insns/fmsub_s.h b/riscv/insns/fmsub_s.h index d46c887..179cc2f 100644 --- a/riscv/insns/fmsub_s.h +++ b/riscv/insns/fmsub_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_mulAdd(f32(FRS1), f32(FRS2), f32(f32(FRS3).v ^ F32_SIGN))); +WRITE_FRD_F(f32_mulAdd(FRS1_F, FRS2_F, f32(FRS3_F.v ^ F32_SIGN))); set_fp_exceptions; diff --git a/riscv/insns/fmul_d.h b/riscv/insns/fmul_d.h index 9189d8d..e5caa34 100644 --- a/riscv/insns/fmul_d.h +++ b/riscv/insns/fmul_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_mul(f64(FRS1), f64(FRS2))); +WRITE_FRD_D(f64_mul(FRS1_D, FRS2_D)); set_fp_exceptions; diff --git a/riscv/insns/fmul_h.h b/riscv/insns/fmul_h.h index 0152df8..dc7f9c4 100644 --- a/riscv/insns/fmul_h.h +++ b/riscv/insns/fmul_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_mul(f16(FRS1), f16(FRS2))); +WRITE_FRD_H(f16_mul(FRS1_H, FRS2_H)); set_fp_exceptions; diff --git a/riscv/insns/fmul_s.h b/riscv/insns/fmul_s.h index 145d5ce..9cf30b4 100644 --- a/riscv/insns/fmul_s.h +++ b/riscv/insns/fmul_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_mul(f32(FRS1), f32(FRS2))); +WRITE_FRD_F(f32_mul(FRS1_F, FRS2_F)); set_fp_exceptions; diff --git a/riscv/insns/fnmadd_d.h b/riscv/insns/fnmadd_d.h index e8dd743..a2a14e9 100644 --- a/riscv/insns/fnmadd_d.h +++ b/riscv/insns/fnmadd_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_mulAdd(f64(f64(FRS1).v ^ F64_SIGN), f64(FRS2), f64(f64(FRS3).v ^ F64_SIGN))); +WRITE_FRD_D(f64_mulAdd(f64(FRS1_D.v ^ F64_SIGN), FRS2_D, f64(FRS3_D.v ^ F64_SIGN))); set_fp_exceptions; diff --git a/riscv/insns/fnmadd_h.h b/riscv/insns/fnmadd_h.h index e4c619e..b1ca283 100644 --- a/riscv/insns/fnmadd_h.h +++ b/riscv/insns/fnmadd_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_mulAdd(f16(f16(FRS1).v ^ F16_SIGN), f16(FRS2), f16(f16(FRS3).v ^ F16_SIGN))); +WRITE_FRD_H(f16_mulAdd(f16(FRS1_H.v ^ F16_SIGN), FRS2_H, f16(FRS3_H.v ^ F16_SIGN))); set_fp_exceptions; diff --git a/riscv/insns/fnmadd_s.h b/riscv/insns/fnmadd_s.h index 1c2996e..683257a 100644 --- a/riscv/insns/fnmadd_s.h +++ b/riscv/insns/fnmadd_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_mulAdd(f32(f32(FRS1).v ^ F32_SIGN), f32(FRS2), f32(f32(FRS3).v ^ F32_SIGN))); +WRITE_FRD_F(f32_mulAdd(f32(FRS1_F.v ^ F32_SIGN), FRS2_F, f32(FRS3_F.v ^ F32_SIGN))); set_fp_exceptions; diff --git a/riscv/insns/fnmsub_d.h b/riscv/insns/fnmsub_d.h index c29a0b9..9352c3f 100644 --- a/riscv/insns/fnmsub_d.h +++ b/riscv/insns/fnmsub_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_mulAdd(f64(f64(FRS1).v ^ F64_SIGN), f64(FRS2), f64(FRS3))); +WRITE_FRD_D(f64_mulAdd(f64(FRS1_D.v ^ F64_SIGN), FRS2_D, FRS3_D)); set_fp_exceptions; diff --git a/riscv/insns/fnmsub_h.h b/riscv/insns/fnmsub_h.h index 0410c3b..e05fcd1 100644 --- a/riscv/insns/fnmsub_h.h +++ b/riscv/insns/fnmsub_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_mulAdd(f16(f16(FRS1).v ^ F16_SIGN), f16(FRS2), f16(FRS3))); +WRITE_FRD_H(f16_mulAdd(f16(FRS1_H.v ^ F16_SIGN), FRS2_H, FRS3_H)); set_fp_exceptions; diff --git a/riscv/insns/fnmsub_s.h b/riscv/insns/fnmsub_s.h index 4c61fc7..b22b3db 100644 --- a/riscv/insns/fnmsub_s.h +++ b/riscv/insns/fnmsub_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_mulAdd(f32(f32(FRS1).v ^ F32_SIGN), f32(FRS2), f32(FRS3))); +WRITE_FRD_F(f32_mulAdd(f32(FRS1_F.v ^ F32_SIGN), FRS2_F, FRS3_F)); set_fp_exceptions; diff --git a/riscv/insns/fsgnj_d.h b/riscv/insns/fsgnj_d.h index 78f9ce7..8f02fd1 100644 --- a/riscv/insns/fsgnj_d.h +++ b/riscv/insns/fsgnj_d.h @@ -1,3 +1,3 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; -WRITE_FRD(fsgnj64(FRS1, FRS2, false, false)); +WRITE_FRD_D(fsgnj64(freg(FRS1_D), freg(FRS2_D), false, false)); diff --git a/riscv/insns/fsgnj_h.h b/riscv/insns/fsgnj_h.h index 79d50f5..080f27d 100644 --- a/riscv/insns/fsgnj_h.h +++ b/riscv/insns/fsgnj_h.h @@ -1,3 +1,3 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; -WRITE_FRD(fsgnj16(FRS1, FRS2, false, false)); +WRITE_FRD_H(fsgnj16(freg(FRS1_H), freg(FRS2_H), false, false)); diff --git a/riscv/insns/fsgnj_s.h b/riscv/insns/fsgnj_s.h index c1a70cb..ea511b8 100644 --- a/riscv/insns/fsgnj_s.h +++ b/riscv/insns/fsgnj_s.h @@ -1,3 +1,3 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; -WRITE_FRD(fsgnj32(FRS1, FRS2, false, false)); +WRITE_FRD_F(fsgnj32(freg(FRS1_F), freg(FRS2_F), false, false)); diff --git a/riscv/insns/fsgnjn_d.h b/riscv/insns/fsgnjn_d.h index f02c311..870a979 100644 --- a/riscv/insns/fsgnjn_d.h +++ b/riscv/insns/fsgnjn_d.h @@ -1,3 +1,3 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; -WRITE_FRD(fsgnj64(FRS1, FRS2, true, false)); +WRITE_FRD_D(fsgnj64(freg(FRS1_D), freg(FRS2_D), true, false)); diff --git a/riscv/insns/fsgnjn_h.h b/riscv/insns/fsgnjn_h.h index ebb4ac9..1d7bf03 100644 --- a/riscv/insns/fsgnjn_h.h +++ b/riscv/insns/fsgnjn_h.h @@ -1,3 +1,3 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; -WRITE_FRD(fsgnj16(FRS1, FRS2, true, false)); +WRITE_FRD_H(fsgnj16(freg(FRS1_H), freg(FRS2_H), true, false)); \ No newline at end of file diff --git a/riscv/insns/fsgnjn_q.h b/riscv/insns/fsgnjn_q.h index 38c7bbf..dcf7235 100644 --- a/riscv/insns/fsgnjn_q.h +++ b/riscv/insns/fsgnjn_q.h @@ -1,3 +1,3 @@ require_extension('Q'); require_fp; -WRITE_FRD(fsgnj128(FRS1, FRS2, true, false)); +WRITE_FRD(fsgnj128(FRS1, FRS2, true, false)); \ No newline at end of file diff --git a/riscv/insns/fsgnjn_s.h b/riscv/insns/fsgnjn_s.h index 35906d6..a0994b4 100644 --- a/riscv/insns/fsgnjn_s.h +++ b/riscv/insns/fsgnjn_s.h @@ -1,3 +1,3 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; -WRITE_FRD(fsgnj32(FRS1, FRS2, true, false)); +WRITE_FRD_F(fsgnj32(freg(FRS1_F), freg(FRS2_F), true, false)); diff --git a/riscv/insns/fsgnjx_d.h b/riscv/insns/fsgnjx_d.h index c121737..25906f0 100644 --- a/riscv/insns/fsgnjx_d.h +++ b/riscv/insns/fsgnjx_d.h @@ -1,3 +1,3 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; -WRITE_FRD(fsgnj64(FRS1, FRS2, false, true)); +WRITE_FRD_D(fsgnj64(freg(FRS1_D), freg(FRS2_D), false, true)); diff --git a/riscv/insns/fsgnjx_h.h b/riscv/insns/fsgnjx_h.h index 9310269..1d29bb1 100644 --- a/riscv/insns/fsgnjx_h.h +++ b/riscv/insns/fsgnjx_h.h @@ -1,3 +1,3 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; -WRITE_FRD(fsgnj16(FRS1, FRS2, false, true)); +WRITE_FRD_H(fsgnj16(freg(FRS1_H), freg(FRS2_H), false, true)); diff --git a/riscv/insns/fsgnjx_s.h b/riscv/insns/fsgnjx_s.h index 4d5c624..9bc0798 100644 --- a/riscv/insns/fsgnjx_s.h +++ b/riscv/insns/fsgnjx_s.h @@ -1,3 +1,3 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; -WRITE_FRD(fsgnj32(FRS1, FRS2, false, true)); +WRITE_FRD_F(fsgnj32(freg(FRS1_F), freg(FRS2_F), false, true)); diff --git a/riscv/insns/fsqrt_d.h b/riscv/insns/fsqrt_d.h index da138ba..363b457 100644 --- a/riscv/insns/fsqrt_d.h +++ b/riscv/insns/fsqrt_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_sqrt(f64(FRS1))); +WRITE_FRD_D(f64_sqrt(FRS1_D)); set_fp_exceptions; diff --git a/riscv/insns/fsqrt_h.h b/riscv/insns/fsqrt_h.h index 138d572..fea429b 100644 --- a/riscv/insns/fsqrt_h.h +++ b/riscv/insns/fsqrt_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_sqrt(f16(FRS1))); +WRITE_FRD_H(f16_sqrt(FRS1_H)); set_fp_exceptions; diff --git a/riscv/insns/fsqrt_s.h b/riscv/insns/fsqrt_s.h index 7476846..d44503a 100644 --- a/riscv/insns/fsqrt_s.h +++ b/riscv/insns/fsqrt_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_sqrt(f32(FRS1))); +WRITE_FRD_F(f32_sqrt(FRS1_F)); set_fp_exceptions; diff --git a/riscv/insns/fsub_d.h b/riscv/insns/fsub_d.h index 1418a06..4f8bf50 100644 --- a/riscv/insns/fsub_d.h +++ b/riscv/insns/fsub_d.h @@ -1,5 +1,5 @@ -require_extension('D'); +require_either_extension('D', EXT_ZDINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f64_sub(f64(FRS1), f64(FRS2))); +WRITE_FRD_D(f64_sub(FRS1_D, FRS2_D)); set_fp_exceptions; diff --git a/riscv/insns/fsub_h.h b/riscv/insns/fsub_h.h index 43b51cc..f7006fb 100644 --- a/riscv/insns/fsub_h.h +++ b/riscv/insns/fsub_h.h @@ -1,5 +1,5 @@ -require_extension(EXT_ZFH); +require_either_extension(EXT_ZFH, EXT_ZHINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f16_sub(f16(FRS1), f16(FRS2))); +WRITE_FRD_H(f16_sub(FRS1_H, FRS2_H)); set_fp_exceptions; diff --git a/riscv/insns/fsub_s.h b/riscv/insns/fsub_s.h index f6183ea..1a33ffd 100644 --- a/riscv/insns/fsub_s.h +++ b/riscv/insns/fsub_s.h @@ -1,5 +1,5 @@ -require_extension('F'); +require_either_extension('F', EXT_ZFINX); require_fp; softfloat_roundingMode = RM; -WRITE_FRD(f32_sub(f32(FRS1), f32(FRS2))); +WRITE_FRD_F(f32_sub(FRS1_F, FRS2_F)); set_fp_exceptions; diff --git a/riscv/insns/vfslide1down_vf.h b/riscv/insns/vfslide1down_vf.h index 66eeacc..40f3c18 100644 --- a/riscv/insns/vfslide1down_vf.h +++ b/riscv/insns/vfslide1down_vf.h @@ -23,13 +23,13 @@ if (i != vl - 1) { } else { switch (P.VU.vsew) { case e16: - P.VU.elt(rd_num, vl - 1, true) = f16(FRS1); + P.VU.elt(rd_num, vl - 1, true) = FRS1_H; break; case e32: - P.VU.elt(rd_num, vl - 1, true) = f32(FRS1); + P.VU.elt(rd_num, vl - 1, true) = FRS1_F; break; case e64: - P.VU.elt(rd_num, vl - 1, true) = f64(FRS1); + P.VU.elt(rd_num, vl - 1, true) = FRS1_D; break; } } diff --git a/riscv/insns/vfslide1up_vf.h b/riscv/insns/vfslide1up_vf.h index b9c2817..4e4e499 100644 --- a/riscv/insns/vfslide1up_vf.h +++ b/riscv/insns/vfslide1up_vf.h @@ -23,13 +23,13 @@ if (i != 0) { } else { switch (P.VU.vsew) { case e16: - P.VU.elt(rd_num, 0, true) = f16(FRS1); + P.VU.elt(rd_num, 0, true) = FRS1_H; break; case e32: - P.VU.elt(rd_num, 0, true) = f32(FRS1); + P.VU.elt(rd_num, 0, true) = FRS1_F; break; case e64: - P.VU.elt(rd_num, 0, true) = f64(FRS1); + P.VU.elt(rd_num, 0, true) = FRS1_D; break; } } diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index 1a2a734..9ff383c 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -1802,7 +1802,7 @@ reg_t index[P.VU.vlmax]; \ case e16: { \ float32_t &vd = P.VU.elt(rd_num, i, true); \ float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ - float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ + float32_t rs1 = f16_to_f32(FRS1_H); \ BODY16; \ set_fp_exceptions; \ break; \ @@ -1810,7 +1810,7 @@ reg_t index[P.VU.vlmax]; \ case e32: { \ float64_t &vd = P.VU.elt(rd_num, i, true); \ float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ - float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ + float64_t rs1 = f32_to_f64(FRS1_F); \ BODY32; \ set_fp_exceptions; \ break; \ @@ -1856,7 +1856,7 @@ reg_t index[P.VU.vlmax]; \ case e16: { \ float32_t &vd = P.VU.elt(rd_num, i, true); \ float32_t vs2 = P.VU.elt(rs2_num, i); \ - float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ + float32_t rs1 = f16_to_f32(FRS1_H); \ BODY16; \ set_fp_exceptions; \ break; \ @@ -1864,7 +1864,7 @@ reg_t index[P.VU.vlmax]; \ case e32: { \ float64_t &vd = P.VU.elt(rd_num, i, true); \ float64_t vs2 = P.VU.elt(rs2_num, i); \ - float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ + float64_t rs1 = f32_to_f64(FRS1_F); \ BODY32; \ set_fp_exceptions; \ break; \ -- cgit v1.1 From 8aaae32d1d8fc3d20d62aae3039d6591346e95de Mon Sep 17 00:00:00 2001 From: liweiwei Date: Thu, 14 Oct 2021 12:38:38 +0800 Subject: Add support for freg command to read X regs when enable Zfinx Update README --- README.md | 1 + riscv/interactive.cc | 38 ++++++++++++++++++++++++++------------ riscv/sim.h | 2 +- 3 files changed, 28 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index afc7187..1935b04 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ Spike supports the following RISC-V ISA features: - Zbc extension, v1.0 - Zbs extension, v1.0 - Zfh and Zfhmin half-precision floating-point extensions, v1.0 + - Zfinx extension, v1.0 - Zmmul integer multiplication extension, v1.0 - Zicbom, Zicbop, Zicboz cache-block maintenance extensions, v1.0 - Conformance to both RVWMO and RVTSO (Spike is sequentially consistent) diff --git a/riscv/interactive.cc b/riscv/interactive.cc index f41be2c..4b29069 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -305,19 +305,33 @@ reg_t sim_t::get_reg(const std::vector& args) return p->get_state()->XPR[r]; } -freg_t sim_t::get_freg(const std::vector& args) +freg_t sim_t::get_freg(const std::vector& args, int size) { if(args.size() != 2) throw trap_interactive(); processor_t *p = get_core(args[0]); - int r = std::find(fpr_name, fpr_name + NFPR, args[1]) - fpr_name; - if (r == NFPR) - r = atoi(args[1].c_str()); - if (r >= NFPR) - throw trap_interactive(); - - return p->get_state()->FPR[r]; + if (p->extension_enabled(EXT_ZFINX)) { + int r = std::find(xpr_name, xpr_name + NXPR, args[1]) - xpr_name; + if (r == NXPR) + r = atoi(args[1].c_str()); + if (r >= NXPR) + throw trap_interactive(); + if ((p->get_xlen() == 32) && (size == 64)) { + if (r % 2 != 0) + throw trap_interactive(); + return freg(f64(r== 0 ? reg_t(0) : (READ_REG(r + 1) << 32) + zext32(READ_REG(r)))); + } else { //xlen >= size + return {p->get_state()->XPR[r] | ~(((uint64_t)-1) >> (64 - size)) ,(uint64_t)-1}; + } + } else { + int r = std::find(fpr_name, fpr_name + NFPR, args[1]) - fpr_name; + if (r == NFPR) + r = atoi(args[1].c_str()); + if (r >= NFPR) + throw trap_interactive(); + return p->get_state()->FPR[r]; + } } void sim_t::interactive_vreg(const std::string& cmd, const std::vector& args) @@ -408,7 +422,7 @@ union fpr void sim_t::interactive_freg(const std::string& cmd, const std::vector& args) { - freg_t r = get_freg(args); + freg_t r = get_freg(args, 64); std::ostream out(sout_.rdbuf()); out << std::hex << "0x" << std::setfill ('0') << std::setw(16) << r.v[1] << std::setw(16) << r.v[0] << std::endl; @@ -417,7 +431,7 @@ void sim_t::interactive_freg(const std::string& cmd, const std::vector& args) { fpr f; - f.r = freg(f16_to_f32(f16(get_freg(args)))); + f.r = freg(f16_to_f32(f16(get_freg(args, 16)))); std::ostream out(sout_.rdbuf()); out << (isBoxedF32(f.r) ? (double)f.s : NAN) << std::endl; @@ -426,7 +440,7 @@ void sim_t::interactive_fregh(const std::string& cmd, const std::vector& args) { fpr f; - f.r = get_freg(args); + f.r = get_freg(args, 32); std::ostream out(sout_.rdbuf()); out << (isBoxedF32(f.r) ? (double)f.s : NAN) << std::endl; @@ -435,7 +449,7 @@ void sim_t::interactive_fregs(const std::string& cmd, const std::vector& args) { fpr f; - f.r = get_freg(args); + f.r = get_freg(args, 64); std::ostream out(sout_.rdbuf()); out << (isBoxedF64(f.r) ? f.d : NAN) << std::endl; diff --git a/riscv/sim.h b/riscv/sim.h index 97cada1..c355f31 100644 --- a/riscv/sim.h +++ b/riscv/sim.h @@ -140,7 +140,7 @@ private: void interactive_until_silent(const std::string& cmd, const std::vector& args); void interactive_until_noisy(const std::string& cmd, const std::vector& args); reg_t get_reg(const std::vector& args); - freg_t get_freg(const std::vector& args); + freg_t get_freg(const std::vector& args, int size); reg_t get_mem(const std::vector& args); reg_t get_pc(const std::vector& args); -- cgit v1.1 From caee7f3fa508b0bf84eb3f8d60d6c9e43b0ccf61 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Thu, 4 Aug 2022 10:18:19 +0800 Subject: Add stateen related check for float point instructions --- riscv/csrs.cc | 2 +- riscv/decode.h | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index fc5dce1..7592c2d 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1201,7 +1201,7 @@ float_csr_t::float_csr_t(processor_t* const proc, const reg_t addr, const reg_t void float_csr_t::verify_permissions(insn_t insn, bool write) const { masked_csr_t::verify_permissions(insn, write); - require_fp; + require_fs; if (!proc->extension_enabled('F') && !proc->extension_enabled(EXT_ZFINX)) throw trap_illegal_instruction(insn.bits()); } diff --git a/riscv/decode.h b/riscv/decode.h index b5fad2d..850863f 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -292,7 +292,8 @@ do { \ #define require_extension(s) require(p->extension_enabled(s)) #define require_either_extension(A,B) require(p->extension_enabled(A) || p->extension_enabled(B)); #define require_impl(s) require(p->supports_impl(s)) -#define require_fp require(STATE.sstatus->enabled(SSTATUS_FS)) +#define require_fs require(STATE.sstatus->enabled(SSTATUS_FS)) +#define require_fp STATE.fflags->verify_permissions(insn, false) #define require_accelerator require(STATE.sstatus->enabled(SSTATUS_XS)) #define require_vector_vs require(STATE.sstatus->enabled(SSTATUS_VS)) #define require_vector(alu) \ -- cgit v1.1 From ba10686fd18f3fbb036ca04b906deb57e7d1fe54 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Mon, 4 Jul 2022 21:31:09 +0800 Subject: add support for sscofpmf extension v0.5.2 since spike doesn't truly support counting of hardware performance events, only csr related read/write functions is supported currently --- riscv/csrs.cc | 42 ++++++++++++++++++++++++++++++++++++++---- riscv/csrs.h | 9 +++++++++ riscv/isa_parser.cc | 2 ++ riscv/isa_parser.h | 1 + riscv/processor.cc | 17 ++++++++++++++--- riscv/processor.h | 1 + 6 files changed, 65 insertions(+), 7 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 50cbfb3..f50ac6e 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -681,6 +681,7 @@ void mip_csr_t::backdoor_write_with_mask(const reg_t mask, const reg_t val) noex reg_t mip_csr_t::write_mask() const noexcept { // MIP_STIP is writable unless SSTC exists and STCE is set in MENVCFG const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | ((state->menvcfg->read() & MENVCFG_STCE) ? 0 : MIP_STIP) | MIP_SEIP : 0; + const reg_t lscof_int = proc->extension_enabled(EXT_SSCOFPMF) ? MIP_LCOFIP : 0; const reg_t vssip_int = proc->extension_enabled('H') ? MIP_VSSIP : 0; const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0; // We must mask off sgeip, vstip, and vseip. All three of these @@ -688,8 +689,8 @@ reg_t mip_csr_t::write_mask() const noexcept { // * sgeip is read-only -- write hgeip instead // * vseip is read-only -- write hvip instead // * vstip is read-only -- write hvip instead - return (supervisor_ints | hypervisor_ints) & - (MIP_SEIP | MIP_SSIP | MIP_STIP | vssip_int); + return (supervisor_ints | hypervisor_ints | lscof_int) & + (MIP_SEIP | MIP_SSIP | MIP_STIP | MIP_LCOFIP | vssip_int); } mie_csr_t::mie_csr_t(processor_t* const proc, const reg_t addr): @@ -698,9 +699,10 @@ mie_csr_t::mie_csr_t(processor_t* const proc, const reg_t addr): reg_t mie_csr_t::write_mask() const noexcept { const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; + const reg_t lscof_int = proc->extension_enabled(EXT_SSCOFPMF) ? MIP_LCOFIP : 0; const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0; const reg_t coprocessor_ints = (reg_t)proc->any_custom_extensions() << IRQ_COP; - const reg_t delegable_ints = supervisor_ints | coprocessor_ints; + const reg_t delegable_ints = supervisor_ints | coprocessor_ints | lscof_int; const reg_t all_ints = delegable_ints | hypervisor_ints | MIP_MSIP | MIP_MTIP | MIP_MEIP; return all_ints; } @@ -796,8 +798,9 @@ void mideleg_csr_t::verify_permissions(insn_t insn, bool write) const { bool mideleg_csr_t::unlogged_write(const reg_t val) noexcept { const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; + const reg_t lscof_int = proc->extension_enabled(EXT_SSCOFPMF) ? MIP_LCOFIP : 0; const reg_t coprocessor_ints = (reg_t)proc->any_custom_extensions() << IRQ_COP; - const reg_t delegable_ints = supervisor_ints | coprocessor_ints; + const reg_t delegable_ints = supervisor_ints | coprocessor_ints | lscof_int; return basic_csr_t::unlogged_write(val & delegable_ints); } @@ -1435,3 +1438,34 @@ void virtualized_stimecmp_csr_t::verify_permissions(insn_t insn, bool write) con throw trap_virtual_instruction(insn.bits()); } } + +scountovf_csr_t::scountovf_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr) { +} + +void scountovf_csr_t::verify_permissions(insn_t insn, bool write) const { + if (!proc->extension_enabled(EXT_SSCOFPMF)) + throw trap_illegal_instruction(insn.bits()); + csr_t::verify_permissions(insn, write); +} + +reg_t scountovf_csr_t::read() const noexcept { + reg_t val = 0; + for (reg_t i = 3; i <= 31; ++i) { + bool of = state->mevent[i - 3]->read() & MHPMEVENT_OF; + val |= of << i; + } + + /* In M and S modes, scountovf bit X is readable when mcounteren bit X is set, */ + /* and otherwise reads as zero. Similarly, in VS mode, scountovf bit X is readable */ + /* when mcounteren bit X and hcounteren bit X are both set, and otherwise reads as zero. */ + val &= state->mcounteren->read(); + if (state->v) + val &= state->hcounteren->read(); + return val; +} + +bool scountovf_csr_t::unlogged_write(const reg_t val) noexcept { + /* this function is unused */ + return false; +} diff --git a/riscv/csrs.h b/riscv/csrs.h index acd889c..5503255 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -744,4 +744,13 @@ class virtualized_stimecmp_csr_t: public virtualized_csr_t { virtualized_stimecmp_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt); virtual void verify_permissions(insn_t insn, bool write) const override; }; + +class scountovf_csr_t: public csr_t { + public: + scountovf_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; #endif diff --git a/riscv/isa_parser.cc b/riscv/isa_parser.cc index 6ac146b..ed1295e 100644 --- a/riscv/isa_parser.cc +++ b/riscv/isa_parser.cc @@ -168,6 +168,8 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv) extension_table[EXT_SMEPMP] = true; } else if (ext_str == "smstateen") { extension_table[EXT_SMSTATEEN] = true; + } else if (ext_str == "sscofpmf") { + extension_table[EXT_SSCOFPMF] = true; } else if (ext_str == "svnapot") { extension_table[EXT_SVNAPOT] = true; } else if (ext_str == "svpbmt") { diff --git a/riscv/isa_parser.h b/riscv/isa_parser.h index c57436e..6b5bb0a 100644 --- a/riscv/isa_parser.h +++ b/riscv/isa_parser.h @@ -33,6 +33,7 @@ typedef enum { EXT_ZPSFOPERAND, EXT_SMEPMP, EXT_SMSTATEEN, + EXT_SSCOFPMF, EXT_SVNAPOT, EXT_SVPBMT, EXT_SVINVAL, diff --git a/riscv/processor.cc b/riscv/processor.cc index 0389707..7cac387 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -228,13 +228,15 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) } for (reg_t i = 3; i <= 31; ++i) { const reg_t which_mevent = CSR_MHPMEVENT3 + i - 3; + const reg_t which_meventh = CSR_MHPMEVENT3H + i - 3; const reg_t which_mcounter = CSR_MHPMCOUNTER3 + i - 3; const reg_t which_mcounterh = CSR_MHPMCOUNTER3H + i - 3; const reg_t which_counter = CSR_HPMCOUNTER3 + i - 3; const reg_t which_counterh = CSR_HPMCOUNTER3H + i - 3; - auto mevent = std::make_shared(proc, which_mevent, 0); + const reg_t mevent_mask = proc->extension_enabled_const(EXT_SSCOFPMF) ? MHPMEVENT_VUINH | MHPMEVENT_VSINH | MHPMEVENTH_UINH | + MHPMEVENT_UINH | MHPMEVENT_MINH | MHPMEVENT_OF : 0; + mevent[i - 3] = std::make_shared(proc, which_mevent, mevent_mask, 0); auto mcounter = std::make_shared(proc, which_mcounter, 0); - csrmap[which_mevent] = mevent; csrmap[which_mcounter] = mcounter; if (proc->extension_enabled_const(EXT_ZICNTR) && proc->extension_enabled_const(EXT_ZIHPM)) { @@ -242,21 +244,30 @@ void state_t::reset(processor_t* const proc, reg_t max_isa) csrmap[which_counter] = counter; } if (xlen == 32) { + csrmap[which_mevent] = std::make_shared(proc, which_mevent, mevent[i - 3]);; auto mcounterh = std::make_shared(proc, which_mcounterh, 0); csrmap[which_mcounterh] = mcounterh; if (proc->extension_enabled_const(EXT_ZICNTR) && proc->extension_enabled_const(EXT_ZIHPM)) { auto counterh = std::make_shared(proc, which_counterh, mcounterh); csrmap[which_counterh] = counterh; } + if (proc->extension_enabled_const(EXT_SSCOFPMF)) { + auto meventh = std::make_shared(proc, which_meventh, mevent[i - 3]); + csrmap[which_meventh] = meventh; + } + } else { + csrmap[which_mevent] = mevent[i - 3]; } } csrmap[CSR_MCOUNTINHIBIT] = std::make_shared(proc, CSR_MCOUNTINHIBIT, 0); + if (proc->extension_enabled_const(EXT_SSCOFPMF)) + csrmap[CSR_SCOUNTOVF] = std::make_shared(proc, CSR_SCOUNTOVF); csrmap[CSR_MIE] = mie = std::make_shared(proc, CSR_MIE); csrmap[CSR_MIP] = mip = std::make_shared(proc, CSR_MIP); auto sip_sie_accr = std::make_shared( this, ~MIP_HS_MASK, // read_mask - MIP_SSIP, // ip_write_mask + MIP_SSIP | MIP_LCOFIP, // ip_write_mask ~MIP_HS_MASK, // ie_write_mask generic_int_accessor_t::mask_mode_t::MIDELEG, 0 // shiftamt diff --git a/riscv/processor.h b/riscv/processor.h index 88ddf70..9b821b3 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -145,6 +145,7 @@ struct state_t csr_t_p medeleg; csr_t_p mideleg; csr_t_p mcounteren; + csr_t_p mevent[29]; csr_t_p scounteren; csr_t_p sepc; csr_t_p stval; -- cgit v1.1 From 6cca5ec9c566301f8e85f83c4505e53b1ffe9249 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Tue, 5 Jul 2022 17:21:09 +0800 Subject: modify take_interrupt to support LCOFIP irq --- riscv/processor.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/riscv/processor.cc b/riscv/processor.cc index 7cac387..164ecff 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -716,6 +716,8 @@ void processor_t::take_interrupt(reg_t pending_interrupts) enabled_interrupts = MIP_SSIP; else if (enabled_interrupts & MIP_STIP) enabled_interrupts = MIP_STIP; + else if (enabled_interrupts & MIP_LCOFIP) + enabled_interrupts = MIP_LCOFIP; else if (enabled_interrupts & MIP_VSEIP) enabled_interrupts = MIP_VSEIP; else if (enabled_interrupts & MIP_VSSIP) -- cgit v1.1 From 32e199cce48a7b837e026bf072b8fd1a8f9c62f6 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Mon, 4 Jul 2022 22:08:20 +0800 Subject: update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index afc7187..5815dd0 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,7 @@ Spike supports the following RISC-V ISA features: - Debug v0.14 - Smepmp extension v1.0 - Smstateen extension, v1.0 + - Sscofpmf v0.5.2 As a Spike extension, the remainder of the proposed [Bit-Manipulation Extensions](https://github.com/riscv/riscv-bitmanip) -- cgit v1.1 From 7383118078a98112ca4036919e6654d8171d2274 Mon Sep 17 00:00:00 2001 From: liweiwei90 <34847211+liweiwei90@users.noreply.github.com> Date: Wed, 10 Aug 2022 05:50:53 +0800 Subject: Fix exception type for accessing (v)stimecmp (#1061) Illegal instruciton trap should be raised when accessing if related bit of mcounteren.TM or menvcfg.STCE is zero in VS/VU mode --- riscv/csrs.cc | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 7de49a7..bc5db50 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -1411,17 +1411,18 @@ virtualized_stimecmp_csr_t::virtualized_stimecmp_csr_t(processor_t* const proc, } void virtualized_stimecmp_csr_t::verify_permissions(insn_t insn, bool write) const { - virtualized_csr_t::verify_permissions(insn, write); - - // check for read permission to time as enabled by xcounteren - state->time_proxy->verify_permissions(insn, false); - if (!(state->menvcfg->read() & MENVCFG_STCE)) { // access to (v)stimecmp with MENVCFG.STCE = 0 if (state->prv < PRV_M) throw trap_illegal_instruction(insn.bits()); - } else if (state->v && !(state->henvcfg->read() & HENVCFG_STCE)) { + } + + state->time_proxy->verify_permissions(insn, false); + + if (state->v && !(state->henvcfg->read() & HENVCFG_STCE)) { // access to vstimecmp with MENVCFG.STCE = 1 and HENVCFG.STCE = 0 when V = 1 throw trap_virtual_instruction(insn.bits()); } + + virtualized_csr_t::verify_permissions(insn, write); } -- cgit v1.1 From 6b28c78b2cfdf562934fd1d65812cc8707f2cc41 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Sun, 31 Jul 2022 20:38:10 +0800 Subject: Add #ifdef RISCV_ENABLE_COMMITLOG for commitlog related code --- riscv/execute.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/riscv/execute.cc b/riscv/execute.cc index 7cb16dd..cc0d4c3 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -172,8 +172,10 @@ inline void processor_t::update_histogram(reg_t pc) // function calls. static inline reg_t execute_insn(processor_t* p, reg_t pc, insn_fetch_t fetch) { +#ifdef RISCV_ENABLE_COMMITLOG commit_log_reset(p); commit_log_stash_privilege(p); +#endif reg_t npc; try { -- cgit v1.1 From 3bbce450315dece506aacf0662cbefa6f6948d4a Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Sun, 31 Jul 2022 20:38:50 +0800 Subject: Fix description for mem related interactive commands Add missed description for untiln interactive commands --- riscv/interactive.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/riscv/interactive.cc b/riscv/interactive.cc index 4b29069..0b71507 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -211,15 +211,17 @@ void sim_t::interactive_help(const std::string& cmd, const std::vector # Display double precision in \n" "vreg [reg] # Display vector [reg] (all if omitted) in \n" "pc # Show current PC in \n" - "mem # Show contents of physical memory\n" - "str # Show NUL-terminated C string at in core \n" + "mem [core] # Show contents of virtual memory in [core] (physical memory if omitted)\n" + "str [core] # Show NUL-terminated C string at virtual address in [core] (physical address if omitted)\n" "until reg # Stop when in hits \n" + "untiln reg # Run noisy and stop when in hits \n" "until pc # Stop when PC in hits \n" "untiln pc # Run noisy and stop when PC in hits \n" - "until mem # Stop when memory becomes \n" + "until mem [core] # Stop when virtual memory in [core] (physical address if omitted) becomes \n" + "untiln mem [core] # Run noisy and stop when virtual memory in [core] (physical address if omitted) becomes \n" "while reg # Run while in is \n" "while pc # Run while PC in is \n" - "while mem # Run while memory is \n" + "while mem [core] # Run while virtual memory in [core] (physical memory if omitted) is \n" "run [count] # Resume noisy execution (until CTRL+C, or [count] insns)\n" "r [count] Alias for run\n" "rs [count] # Resume silent execution (until CTRL+C, or [count] insns)\n" -- cgit v1.1 From db24e31569cab07f7d91c4f417aca12fab4fd018 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Sun, 31 Jul 2022 20:39:08 +0800 Subject: Add missed decription for log-commits option --- spike_main/spike.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/spike_main/spike.cc b/spike_main/spike.cc index 3629e35..70b4313 100644 --- a/spike_main/spike.cc +++ b/spike_main/spike.cc @@ -50,6 +50,7 @@ static void help(int exit_code = 1) fprintf(stderr, " This flag can be used multiple times.\n"); fprintf(stderr, " The extlib flag for the library must come first.\n"); fprintf(stderr, " --log-cache-miss Generate a log of cache miss\n"); + fprintf(stderr, " --log-commits Generate a log of commits info\n"); fprintf(stderr, " --extension= Specify RoCC Extension\n"); fprintf(stderr, " This flag can be used multiple times.\n"); fprintf(stderr, " --extlib= Shared library to load\n"); -- cgit v1.1 From 793ffe508a5b81ce27f1baf2c5afb0b58a4236c6 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Sun, 31 Jul 2022 20:39:47 +0800 Subject: Fix code indentation in processor.cc, interactive.cc, debug_module.h/cc execute.cc, entropy_source.h and v_ext_macros.h --- riscv/debug_module.cc | 58 +++++++++++++++++++++++++------------------------- riscv/debug_module.h | 32 ++++++++++++++-------------- riscv/entropy_source.h | 36 +++++++++++++++---------------- riscv/execute.cc | 24 ++++++++++----------- riscv/interactive.cc | 26 +++++++++++----------- riscv/processor.cc | 2 +- riscv/v_ext_macros.h | 2 +- 7 files changed, 90 insertions(+), 90 deletions(-) diff --git a/riscv/debug_module.cc b/riscv/debug_module.cc index a3d35ea..16520d1 100644 --- a/riscv/debug_module.cc +++ b/riscv/debug_module.cc @@ -165,18 +165,18 @@ bool debug_module_t::load(reg_t addr, size_t len, uint8_t* bytes) bool debug_module_t::store(reg_t addr, size_t len, const uint8_t* bytes) { D( - switch (len) { - case 4: - fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=0x%08x); " - "hartsel=0x%x\n", addr, (unsigned) len, *(uint32_t *) bytes, - dmcontrol.hartsel); - break; - default: - fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=...); " - "hartsel=0x%x\n", addr, (unsigned) len, dmcontrol.hartsel); - break; - } - ); + switch (len) { + case 4: + fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=0x%08x); " + "hartsel=0x%x\n", addr, (unsigned) len, *(uint32_t *) bytes, + dmcontrol.hartsel); + break; + default: + fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=...); " + "hartsel=0x%x\n", addr, (unsigned) len, dmcontrol.hartsel); + break; + } + ); uint8_t id_bytes[4]; uint32_t id = 0; @@ -215,11 +215,11 @@ bool debug_module_t::store(reg_t addr, size_t len, const uint8_t* bytes) } } if (dmcontrol.hartsel == id) { - if (0 == (debug_rom_flags[id] & (1 << DEBUG_ROM_FLAG_GO))){ - if (dmcontrol.hartsel == id) { - abstract_command_completed = true; - } + if (0 == (debug_rom_flags[id] & (1 << DEBUG_ROM_FLAG_GO))){ + if (dmcontrol.hartsel == id) { + abstract_command_completed = true; } + } } return true; } @@ -394,15 +394,15 @@ bool debug_module_t::dmi_read(unsigned address, uint32_t *value) result = set_field(result, DM_DMCONTROL_HASEL, dmcontrol.hasel); result = set_field(result, DM_DMCONTROL_HARTSELLO, dmcontrol.hartsel); result = set_field(result, DM_DMCONTROL_HARTRESET, dmcontrol.hartreset); - result = set_field(result, DM_DMCONTROL_NDMRESET, dmcontrol.ndmreset); + result = set_field(result, DM_DMCONTROL_NDMRESET, dmcontrol.ndmreset); result = set_field(result, DM_DMCONTROL_DMACTIVE, dmcontrol.dmactive); } break; case DM_DMSTATUS: { - dmstatus.allhalted = true; + dmstatus.allhalted = true; dmstatus.anyhalted = false; - dmstatus.allrunning = true; + dmstatus.allrunning = true; dmstatus.anyrunning = false; dmstatus.allnonexistant = true; dmstatus.allresumeack = true; @@ -430,8 +430,8 @@ bool debug_module_t::dmi_read(unsigned address, uint32_t *value) // non-existant hartsel. dmstatus.anynonexistant = (dmcontrol.hartsel >= nprocs); - dmstatus.allunavail = false; - dmstatus.anyunavail = false; + dmstatus.allunavail = false; + dmstatus.anyunavail = false; result = set_field(result, DM_DMSTATUS_IMPEBREAK, dmstatus.impebreak); @@ -439,15 +439,15 @@ bool debug_module_t::dmi_read(unsigned address, uint32_t *value) hart_state[dmcontrol.hartsel].havereset); result = set_field(result, DM_DMSTATUS_ANYHAVERESET, hart_state[dmcontrol.hartsel].havereset); - result = set_field(result, DM_DMSTATUS_ALLNONEXISTENT, dmstatus.allnonexistant); - result = set_field(result, DM_DMSTATUS_ALLUNAVAIL, dmstatus.allunavail); - result = set_field(result, DM_DMSTATUS_ALLRUNNING, dmstatus.allrunning); - result = set_field(result, DM_DMSTATUS_ALLHALTED, dmstatus.allhalted); + result = set_field(result, DM_DMSTATUS_ALLNONEXISTENT, dmstatus.allnonexistant); + result = set_field(result, DM_DMSTATUS_ALLUNAVAIL, dmstatus.allunavail); + result = set_field(result, DM_DMSTATUS_ALLRUNNING, dmstatus.allrunning); + result = set_field(result, DM_DMSTATUS_ALLHALTED, dmstatus.allhalted); result = set_field(result, DM_DMSTATUS_ALLRESUMEACK, dmstatus.allresumeack); - result = set_field(result, DM_DMSTATUS_ANYNONEXISTENT, dmstatus.anynonexistant); - result = set_field(result, DM_DMSTATUS_ANYUNAVAIL, dmstatus.anyunavail); - result = set_field(result, DM_DMSTATUS_ANYRUNNING, dmstatus.anyrunning); - result = set_field(result, DM_DMSTATUS_ANYHALTED, dmstatus.anyhalted); + result = set_field(result, DM_DMSTATUS_ANYNONEXISTENT, dmstatus.anynonexistant); + result = set_field(result, DM_DMSTATUS_ANYUNAVAIL, dmstatus.anyunavail); + result = set_field(result, DM_DMSTATUS_ANYRUNNING, dmstatus.anyrunning); + result = set_field(result, DM_DMSTATUS_ANYHALTED, dmstatus.anyhalted); result = set_field(result, DM_DMSTATUS_ANYRESUMEACK, dmstatus.anyresumeack); result = set_field(result, DM_DMSTATUS_AUTHENTICATED, dmstatus.authenticated); result = set_field(result, DM_DMSTATUS_AUTHBUSY, dmstatus.authbusy); diff --git a/riscv/debug_module.h b/riscv/debug_module.h index d79ce7d..8230557 100644 --- a/riscv/debug_module.h +++ b/riscv/debug_module.h @@ -11,16 +11,16 @@ class sim_t; class bus_t; typedef struct { - // Size of program_buffer in 32-bit words, as exposed to the rest of the - // world. - unsigned progbufsize; - unsigned max_sba_data_width; - bool require_authentication; - unsigned abstract_rti; - bool support_hasel; - bool support_abstract_csr_access; - bool support_haltgroups; - bool support_impebreak; + // Size of program_buffer in 32-bit words, as exposed to the rest of the + // world. + unsigned progbufsize; + unsigned max_sba_data_width; + bool require_authentication; + unsigned abstract_rti; + bool support_hasel; + bool support_abstract_csr_access; + bool support_haltgroups; + bool support_impebreak; } debug_module_config_t; typedef struct { @@ -54,12 +54,12 @@ typedef struct { } dmstatus_t; typedef enum cmderr { - CMDERR_NONE = 0, - CMDERR_BUSY = 1, - CMDERR_NOTSUP = 2, - CMDERR_EXCEPTION = 3, - CMDERR_HALTRESUME = 4, - CMDERR_OTHER = 7 + CMDERR_NONE = 0, + CMDERR_BUSY = 1, + CMDERR_NOTSUP = 2, + CMDERR_EXCEPTION = 3, + CMDERR_HALTRESUME = 4, + CMDERR_OTHER = 7 } cmderr_t; typedef struct { diff --git a/riscv/entropy_source.h b/riscv/entropy_source.h index 184bec7..38645ae 100644 --- a/riscv/entropy_source.h +++ b/riscv/entropy_source.h @@ -51,25 +51,25 @@ public: if(return_status == OPST_ES16) { - // Add some sampled entropy into the low 16 bits - uint16_t entropy = this -> get_two_random_bytes(); - result |= entropy; + // Add some sampled entropy into the low 16 bits + uint16_t entropy = this -> get_two_random_bytes(); + result |= entropy; } else if(return_status == OPST_BIST) { - // Do nothing. + // Do nothing. } else if(return_status == OPST_WAIT) { - // Do nothing. + // Do nothing. } else if(return_status == OPST_DEAD) { - // Do nothing. Stay dead. + // Do nothing. Stay dead. } else { - // Unreachable. + // Unreachable. } @@ -92,25 +92,25 @@ public: // Read two random bytes from the entropy source file. uint16_t get_two_random_bytes() { - std::ifstream fh(this -> randomness_source, std::ios::binary); + std::ifstream fh(this -> randomness_source, std::ios::binary); - if(fh.is_open()) { + if(fh.is_open()) { - uint16_t random_bytes; + uint16_t random_bytes; - fh.read((char*)(&random_bytes), 2); + fh.read((char*)(&random_bytes), 2); - fh.close(); + fh.close(); - return random_bytes; + return random_bytes; - } else { + } else { - fprintf(stderr, "Could not open randomness source file:\n\t"); - fprintf(stderr, "%s", randomness_source.c_str()); - abort(); + fprintf(stderr, "Could not open randomness source file:\n\t"); + fprintf(stderr, "%s", randomness_source.c_str()); + abort(); - } + } } diff --git a/riscv/execute.cc b/riscv/execute.cc index cc0d4c3..1f9ddef 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -240,18 +240,18 @@ void processor_t::step(size_t n) mmu_t* _mmu = mmu; #define advance_pc() \ - if (unlikely(invalid_pc(pc))) { \ - switch (pc) { \ - case PC_SERIALIZE_BEFORE: state.serialized = true; break; \ - case PC_SERIALIZE_AFTER: ++instret; break; \ - default: abort(); \ - } \ - pc = state.pc; \ - break; \ - } else { \ - state.pc = pc; \ - instret++; \ - } + if (unlikely(invalid_pc(pc))) { \ + switch (pc) { \ + case PC_SERIALIZE_BEFORE: state.serialized = true; break; \ + case PC_SERIALIZE_AFTER: ++instret; break; \ + default: abort(); \ + } \ + pc = state.pc; \ + break; \ + } else { \ + state.pc = pc; \ + instret++; \ + } try { diff --git a/riscv/interactive.cc b/riscv/interactive.cc index 0b71507..4e2ffb1 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -150,17 +150,17 @@ void sim_t::interactive() // first get commands from file, if cmd_file has been set if (cmd_file && !feof(cmd_file) && fscanf(cmd_file,"%" STR(MAX_CMD_STR) "[^\n]\n", cmd_str)==1) { // up to MAX_CMD_STR characters before \n, skipping \n - s = cmd_str; - // while we get input from file, output goes to stderr - sout_.rdbuf(std::cerr.rdbuf()); + s = cmd_str; + // while we get input from file, output goes to stderr + sout_.rdbuf(std::cerr.rdbuf()); } else { - // when there are no commands left from file or if there was no file from the beginning - cmd_file = NULL; // mark file pointer as being not valid, so any method can test this easily + // when there are no commands left from file or if there was no file from the beginning + cmd_file = NULL; // mark file pointer as being not valid, so any method can test this easily #ifdef HAVE_BOOST_ASIO - s = rin(&bout); // get command string from socket or terminal + s = rin(&bout); // get command string from socket or terminal #else - std::cerr << ": " << std::flush; - s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin + std::cerr << ": " << std::flush; + s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin #endif } @@ -391,7 +391,7 @@ void sim_t::interactive_vreg(const std::string& cmd, const std::vector& args) { if (args.size() < 1) - throw trap_interactive(); + throw trap_interactive(); processor_t *p = get_core(args[0]); int max_xlen = p->get_isa().get_max_xlen(); @@ -404,14 +404,14 @@ void sim_t::interactive_reg(const std::string& cmd, const std::vectorget_state()->XPR[r], max_xlen); + << ": 0x" << std::setfill('0') << std::setw(max_xlen/4) + << zext(p->get_state()->XPR[r], max_xlen); if ((r + 1) % 4 == 0) out << std::endl; } } else { - out << "0x" << std::setfill('0') << std::setw(max_xlen/4) - << zext(get_reg(args), max_xlen) << std::endl; + out << "0x" << std::setfill('0') << std::setw(max_xlen/4) + << zext(get_reg(args), max_xlen) << std::endl; } } diff --git a/riscv/processor.cc b/riscv/processor.cc index 0325c51..790f18c 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -588,7 +588,7 @@ void processor_t::reset() put_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); } - for (auto e : custom_extensions) // reset any extensions + for (auto e : custom_extensions) // reset any extensions e.second->reset(); if (sim) diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index 9ff383c..19207f7 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -56,7 +56,7 @@ static inline bool is_overlapped_widen(const int astart, int asize, if (astart < bstart && is_overlapped(astart, asize, bstart, bsize) && !is_overlapped(astart, asize, bstart + bsize, bsize)) { - return false; + return false; } else { return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize; } -- cgit v1.1 From 2aaa89c0cf8fe0f45d284c0847f11d175eb82e03 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Wed, 10 Aug 2022 11:02:23 +0800 Subject: Improve write log for vtype in set_vl Two writes to vtype will be logged in commitlog if vill is true --- riscv/processor.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 790f18c..620a6f4 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -512,7 +512,6 @@ reg_t processor_t::vectorUnit_t::set_vl(int rd, int rs1, reg_t reqVL, reg_t newT { int new_vlmul = 0; if (vtype->read() != newType) { - vtype->write_raw(newType); vsew = 1 << (extract64(newType, 3, 3) + 3); new_vlmul = int8_t(extract64(newType, 0, 3) << 5) >> 5; vflmul = new_vlmul >= 0 ? 1 << new_vlmul : 1.0 / (1 << -new_vlmul); @@ -527,6 +526,8 @@ reg_t processor_t::vectorUnit_t::set_vl(int rd, int rs1, reg_t reqVL, reg_t newT if (vill) { vlmax = 0; vtype->write_raw(UINT64_MAX << (p->get_xlen() - 1)); + } else { + vtype->write_raw(newType); } } -- cgit v1.1 From ce34edb0eecec520d6d2cfec5bda57ca90a69f14 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Wed, 10 Aug 2022 22:55:07 +0800 Subject: Add space between if/while/switch and '(' Add space between ')' and '{' --- riscv/cachesim.cc | 6 +++--- riscv/debug_module.cc | 2 +- riscv/decode.h | 6 +++--- riscv/entropy_source.h | 10 +++++----- riscv/insns/aes64ks1i.h | 4 ++-- riscv/insns/beq.h | 2 +- riscv/insns/bge.h | 2 +- riscv/insns/bgeu.h | 2 +- riscv/insns/blt.h | 2 +- riscv/insns/bltu.h | 2 +- riscv/insns/bne.h | 2 +- riscv/insns/div.h | 4 ++-- riscv/insns/divu.h | 2 +- riscv/insns/divuw.h | 2 +- riscv/insns/divw.h | 2 +- riscv/insns/kmmawb2.h | 2 +- riscv/insns/kmmawb2_u.h | 2 +- riscv/insns/kmmawt2.h | 2 +- riscv/insns/kmmawt2_u.h | 2 +- riscv/insns/kmmwb2.h | 2 +- riscv/insns/kmmwb2_u.h | 2 +- riscv/insns/kmmwt2.h | 2 +- riscv/insns/kmmwt2_u.h | 2 +- riscv/insns/kslra16_u.h | 2 +- riscv/insns/kslra32_u.h | 2 +- riscv/insns/kslra8_u.h | 2 +- riscv/insns/kwmmul.h | 2 +- riscv/insns/kwmmul_u.h | 2 +- riscv/insns/rem.h | 4 ++-- riscv/insns/remu.h | 2 +- riscv/insns/remuw.h | 2 +- riscv/insns/remw.h | 2 +- riscv/insns/rsub64.h | 2 +- riscv/insns/sra16_u.h | 2 +- riscv/insns/sra32_u.h | 2 +- riscv/insns/sra8_u.h | 2 +- riscv/insns/vdiv_vx.h | 4 ++-- riscv/insns/vdivu_vv.h | 2 +- riscv/insns/vdivu_vx.h | 2 +- riscv/insns/vfmv_f_s.h | 2 +- riscv/insns/vfmv_s_f.h | 2 +- riscv/insns/vmsbf_m.h | 2 +- riscv/insns/vmsif_m.h | 2 +- riscv/insns/vmsof_m.h | 2 +- riscv/insns/vmv_s_x.h | 2 +- riscv/insns/vmv_x_s.h | 2 +- riscv/insns/vrem_vv.h | 2 +- riscv/insns/vsadd_vi.h | 2 +- riscv/insns/vsadd_vv.h | 2 +- riscv/insns/vsadd_vx.h | 2 +- riscv/insns/vslide1up_vx.h | 12 ++++++------ riscv/interactive.cc | 12 ++++++------ riscv/isa_parser.h | 2 +- riscv/processor.h | 4 ++-- 54 files changed, 77 insertions(+), 77 deletions(-) diff --git a/riscv/cachesim.cc b/riscv/cachesim.cc index 48840cb..498d407 100644 --- a/riscv/cachesim.cc +++ b/riscv/cachesim.cc @@ -39,9 +39,9 @@ cache_sim_t* cache_sim_t::construct(const char* config, const char* name) void cache_sim_t::init() { - if(sets == 0 || (sets & (sets-1))) + if (sets == 0 || (sets & (sets-1))) help(); - if(linesz < 8 || (linesz & (linesz-1))) + if (linesz < 8 || (linesz & (linesz-1))) help(); idx_shift = 0; @@ -76,7 +76,7 @@ cache_sim_t::~cache_sim_t() void cache_sim_t::print_stats() { - if(read_accesses + write_accesses == 0) + if (read_accesses + write_accesses == 0) return; float mr = 100.0f*(read_misses+write_misses)/(read_accesses+write_accesses); diff --git a/riscv/debug_module.cc b/riscv/debug_module.cc index 16520d1..d297f14 100644 --- a/riscv/debug_module.cc +++ b/riscv/debug_module.cc @@ -215,7 +215,7 @@ bool debug_module_t::store(reg_t addr, size_t len, const uint8_t* bytes) } } if (dmcontrol.hartsel == id) { - if (0 == (debug_rom_flags[id] & (1 << DEBUG_ROM_FLAG_GO))){ + if (0 == (debug_rom_flags[id] & (1 << DEBUG_ROM_FLAG_GO))) { if (dmcontrol.hartsel == id) { abstract_command_completed = true; } diff --git a/riscv/decode.h b/riscv/decode.h index 850863f..874d239 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -251,7 +251,7 @@ do { \ else { \ WRITE_FRD(value); \ } \ -} while(0) +} while (0) #define WRITE_FRD_F(value) \ do { \ if (p->extension_enabled(EXT_ZFINX)) \ @@ -259,7 +259,7 @@ do { \ else { \ WRITE_FRD(value); \ } \ -} while(0) +} while (0) #define WRITE_FRD_D(value) \ do { \ if (p->extension_enabled(EXT_ZFINX)) { \ @@ -272,7 +272,7 @@ do { \ } else { \ WRITE_FRD(value); \ } \ -} while(0) +} while (0) #define SHAMT (insn.i_imm() & 0x3F) #define BRANCH_TARGET (pc + insn.sb_imm()) diff --git a/riscv/entropy_source.h b/riscv/entropy_source.h index 38645ae..c2ee2c4 100644 --- a/riscv/entropy_source.h +++ b/riscv/entropy_source.h @@ -49,21 +49,21 @@ public: // the bare minimum. uint32_t return_status = OPST_ES16; - if(return_status == OPST_ES16) { + if (return_status == OPST_ES16) { // Add some sampled entropy into the low 16 bits uint16_t entropy = this -> get_two_random_bytes(); result |= entropy; - } else if(return_status == OPST_BIST) { + } else if (return_status == OPST_BIST) { // Do nothing. - } else if(return_status == OPST_WAIT) { + } else if (return_status == OPST_WAIT) { // Do nothing. - } else if(return_status == OPST_DEAD) { + } else if (return_status == OPST_DEAD) { // Do nothing. Stay dead. @@ -94,7 +94,7 @@ public: std::ifstream fh(this -> randomness_source, std::ios::binary); - if(fh.is_open()) { + if (fh.is_open()) { uint16_t random_bytes; diff --git a/riscv/insns/aes64ks1i.h b/riscv/insns/aes64ks1i.h index fff7109..3ce3c3f 100644 --- a/riscv/insns/aes64ks1i.h +++ b/riscv/insns/aes64ks1i.h @@ -10,7 +10,7 @@ uint8_t round_consts [10] = { uint8_t enc_rcon = insn.rcon() ; -if(enc_rcon > 0xA) { +if (enc_rcon > 0xA) { // Invalid opcode. throw trap_illegal_instruction(0); } @@ -19,7 +19,7 @@ uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF ; uint8_t rcon = 0 ; uint64_t result ; -if(enc_rcon != 0xA) { +if (enc_rcon != 0xA) { temp = (temp >> 8) | (temp << 24); // Rotate right by 8 rcon = round_consts[enc_rcon]; } diff --git a/riscv/insns/beq.h b/riscv/insns/beq.h index fd7e061..3d2c975 100644 --- a/riscv/insns/beq.h +++ b/riscv/insns/beq.h @@ -1,2 +1,2 @@ -if(RS1 == RS2) +if (RS1 == RS2) set_pc(BRANCH_TARGET); diff --git a/riscv/insns/bge.h b/riscv/insns/bge.h index da0c68e..b2421c2 100644 --- a/riscv/insns/bge.h +++ b/riscv/insns/bge.h @@ -1,2 +1,2 @@ -if(sreg_t(RS1) >= sreg_t(RS2)) +if (sreg_t(RS1) >= sreg_t(RS2)) set_pc(BRANCH_TARGET); diff --git a/riscv/insns/bgeu.h b/riscv/insns/bgeu.h index d764a34..f09b7f4 100644 --- a/riscv/insns/bgeu.h +++ b/riscv/insns/bgeu.h @@ -1,2 +1,2 @@ -if(RS1 >= RS2) +if (RS1 >= RS2) set_pc(BRANCH_TARGET); diff --git a/riscv/insns/blt.h b/riscv/insns/blt.h index c54fb76..cad064b 100644 --- a/riscv/insns/blt.h +++ b/riscv/insns/blt.h @@ -1,2 +1,2 @@ -if(sreg_t(RS1) < sreg_t(RS2)) +if (sreg_t(RS1) < sreg_t(RS2)) set_pc(BRANCH_TARGET); diff --git a/riscv/insns/bltu.h b/riscv/insns/bltu.h index ff75e8a..b7c3300 100644 --- a/riscv/insns/bltu.h +++ b/riscv/insns/bltu.h @@ -1,2 +1,2 @@ -if(RS1 < RS2) +if (RS1 < RS2) set_pc(BRANCH_TARGET); diff --git a/riscv/insns/bne.h b/riscv/insns/bne.h index 1e6cb7c..e832fa1 100644 --- a/riscv/insns/bne.h +++ b/riscv/insns/bne.h @@ -1,2 +1,2 @@ -if(RS1 != RS2) +if (RS1 != RS2) set_pc(BRANCH_TARGET); diff --git a/riscv/insns/div.h b/riscv/insns/div.h index 9cbe8d6..fb62437 100644 --- a/riscv/insns/div.h +++ b/riscv/insns/div.h @@ -1,9 +1,9 @@ require_extension('M'); sreg_t lhs = sext_xlen(RS1); sreg_t rhs = sext_xlen(RS2); -if(rhs == 0) +if (rhs == 0) WRITE_RD(UINT64_MAX); -else if(lhs == INT64_MIN && rhs == -1) +else if (lhs == INT64_MIN && rhs == -1) WRITE_RD(lhs); else WRITE_RD(sext_xlen(lhs / rhs)); diff --git a/riscv/insns/divu.h b/riscv/insns/divu.h index 31d7585..ed05818 100644 --- a/riscv/insns/divu.h +++ b/riscv/insns/divu.h @@ -1,7 +1,7 @@ require_extension('M'); reg_t lhs = zext_xlen(RS1); reg_t rhs = zext_xlen(RS2); -if(rhs == 0) +if (rhs == 0) WRITE_RD(UINT64_MAX); else WRITE_RD(sext_xlen(lhs / rhs)); diff --git a/riscv/insns/divuw.h b/riscv/insns/divuw.h index e127619..bc7e9d2 100644 --- a/riscv/insns/divuw.h +++ b/riscv/insns/divuw.h @@ -2,7 +2,7 @@ require_extension('M'); require_rv64; reg_t lhs = zext32(RS1); reg_t rhs = zext32(RS2); -if(rhs == 0) +if (rhs == 0) WRITE_RD(UINT64_MAX); else WRITE_RD(sext32(lhs / rhs)); diff --git a/riscv/insns/divw.h b/riscv/insns/divw.h index 11be17e..54409b0 100644 --- a/riscv/insns/divw.h +++ b/riscv/insns/divw.h @@ -2,7 +2,7 @@ require_extension('M'); require_rv64; sreg_t lhs = sext32(RS1); sreg_t rhs = sext32(RS2); -if(rhs == 0) +if (rhs == 0) WRITE_RD(UINT64_MAX); else WRITE_RD(sext32(lhs / rhs)); diff --git a/riscv/insns/kmmawb2.h b/riscv/insns/kmmawb2.h index 6b3aa0d..274f9dd 100644 --- a/riscv/insns/kmmawb2.h +++ b/riscv/insns/kmmawb2.h @@ -3,7 +3,7 @@ P_LOOP(32, { int64_t addop = 0; int64_t mres = 0; bool sat = false; - if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; addop = mres >> 16; } else { diff --git a/riscv/insns/kmmawb2_u.h b/riscv/insns/kmmawb2_u.h index f44346e..447a3f4 100644 --- a/riscv/insns/kmmawb2_u.h +++ b/riscv/insns/kmmawb2_u.h @@ -3,7 +3,7 @@ P_LOOP(32, { int64_t addop = 0; int64_t mres = 0; bool sat = false; - if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; addop = ((mres >> 15) + 1) >> 1; } else { diff --git a/riscv/insns/kmmawt2.h b/riscv/insns/kmmawt2.h index 3cd72de..6eb22ac 100644 --- a/riscv/insns/kmmawt2.h +++ b/riscv/insns/kmmawt2.h @@ -3,7 +3,7 @@ P_LOOP(32, { int64_t addop = 0; int64_t mres = 0; bool sat = false; - if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; addop = mres >> 16; } else { diff --git a/riscv/insns/kmmawt2_u.h b/riscv/insns/kmmawt2_u.h index 7fe378c..b82e090 100644 --- a/riscv/insns/kmmawt2_u.h +++ b/riscv/insns/kmmawt2_u.h @@ -3,7 +3,7 @@ P_LOOP(32, { int64_t addop = 0; int64_t mres = 0; bool sat = false; - if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; addop = ((mres >> 15) + 1) >> 1; } else { diff --git a/riscv/insns/kmmwb2.h b/riscv/insns/kmmwb2.h index 272f738..d08b0ef 100644 --- a/riscv/insns/kmmwb2.h +++ b/riscv/insns/kmmwb2.h @@ -1,6 +1,6 @@ require_vector_vs; P_LOOP(32, { - if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { int64_t mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; pd = mres >> 16; } else { diff --git a/riscv/insns/kmmwb2_u.h b/riscv/insns/kmmwb2_u.h index b5a5006..d308bf3 100644 --- a/riscv/insns/kmmwb2_u.h +++ b/riscv/insns/kmmwb2_u.h @@ -1,6 +1,6 @@ require_vector_vs; P_LOOP(32, { - if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { int64_t mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; pd = ((mres >> 15) + 1) >> 1; } else { diff --git a/riscv/insns/kmmwt2.h b/riscv/insns/kmmwt2.h index 73d3dc8..38ba9b1 100644 --- a/riscv/insns/kmmwt2.h +++ b/riscv/insns/kmmwt2.h @@ -1,6 +1,6 @@ require_vector_vs; P_LOOP(32, { - if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { int64_t mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; pd = mres >> 16; } else { diff --git a/riscv/insns/kmmwt2_u.h b/riscv/insns/kmmwt2_u.h index 1f525a8..e855786 100644 --- a/riscv/insns/kmmwt2_u.h +++ b/riscv/insns/kmmwt2_u.h @@ -1,6 +1,6 @@ require_vector_vs; P_LOOP(32, { - if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + if ((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { int64_t mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; pd = ((mres >> 15) + 1) >> 1; } else { diff --git a/riscv/insns/kslra16_u.h b/riscv/insns/kslra16_u.h index 8335f3e..27bb77c 100644 --- a/riscv/insns/kslra16_u.h +++ b/riscv/insns/kslra16_u.h @@ -3,7 +3,7 @@ P_X_LOOP(16, 5, { if (ssa < 0) { sa = -ssa; sa = (sa == 16) ? 15 : sa; - if(sa != 0) + if (sa != 0) pd = ((ps1 >> (sa - 1)) + 1) >> 1; else pd = ps1; diff --git a/riscv/insns/kslra32_u.h b/riscv/insns/kslra32_u.h index d53c8fe..b9c06cf 100644 --- a/riscv/insns/kslra32_u.h +++ b/riscv/insns/kslra32_u.h @@ -4,7 +4,7 @@ P_X_LOOP(32, 6, { if (ssa < 0) { sa = -ssa; sa = (sa == 32) ? 31 : sa; - if(sa != 0) + if (sa != 0) pd = ((ps1 >> (sa - 1)) + 1) >> 1; else pd = ps1; diff --git a/riscv/insns/kslra8_u.h b/riscv/insns/kslra8_u.h index 620f3bd..340283f 100644 --- a/riscv/insns/kslra8_u.h +++ b/riscv/insns/kslra8_u.h @@ -3,7 +3,7 @@ P_X_LOOP(8, 4, { if (ssa < 0) { sa = -ssa; sa = (sa == 8) ? 7 : sa; - if(sa != 0) + if (sa != 0) pd = ((ps1 >> (sa - 1)) + 1) >> 1; else pd = ps1; diff --git a/riscv/insns/kwmmul.h b/riscv/insns/kwmmul.h index b0ab8d4..ca654f2 100644 --- a/riscv/insns/kwmmul.h +++ b/riscv/insns/kwmmul.h @@ -1,6 +1,6 @@ require_vector_vs; P_LOOP(32, { - if((INT32_MIN != ps1) | (INT32_MIN != ps2)) { + if ((INT32_MIN != ps1) | (INT32_MIN != ps2)) { int64_t mres = ((int64_t) ps1 * (int64_t) ps2) << 1; pd = mres >> 32; } else { diff --git a/riscv/insns/kwmmul_u.h b/riscv/insns/kwmmul_u.h index c2045e1..b435561 100644 --- a/riscv/insns/kwmmul_u.h +++ b/riscv/insns/kwmmul_u.h @@ -1,6 +1,6 @@ require_vector_vs; P_LOOP(32, { - if((INT32_MIN != ps1) | (INT32_MIN != ps2)) { + if ((INT32_MIN != ps1) | (INT32_MIN != ps2)) { int64_t mres = ((int64_t) ps1 * (int64_t) ps2) << 1; pd = ((mres >> 31) + 1) >> 1; } else { diff --git a/riscv/insns/rem.h b/riscv/insns/rem.h index 8587995..d2ee066 100644 --- a/riscv/insns/rem.h +++ b/riscv/insns/rem.h @@ -1,9 +1,9 @@ require_extension('M'); sreg_t lhs = sext_xlen(RS1); sreg_t rhs = sext_xlen(RS2); -if(rhs == 0) +if (rhs == 0) WRITE_RD(lhs); -else if(lhs == INT64_MIN && rhs == -1) +else if (lhs == INT64_MIN && rhs == -1) WRITE_RD(0); else WRITE_RD(sext_xlen(lhs % rhs)); diff --git a/riscv/insns/remu.h b/riscv/insns/remu.h index e74774c..676747a 100644 --- a/riscv/insns/remu.h +++ b/riscv/insns/remu.h @@ -1,7 +1,7 @@ require_extension('M'); reg_t lhs = zext_xlen(RS1); reg_t rhs = zext_xlen(RS2); -if(rhs == 0) +if (rhs == 0) WRITE_RD(sext_xlen(RS1)); else WRITE_RD(sext_xlen(lhs % rhs)); diff --git a/riscv/insns/remuw.h b/riscv/insns/remuw.h index b239c8f..caa1583 100644 --- a/riscv/insns/remuw.h +++ b/riscv/insns/remuw.h @@ -2,7 +2,7 @@ require_extension('M'); require_rv64; reg_t lhs = zext32(RS1); reg_t rhs = zext32(RS2); -if(rhs == 0) +if (rhs == 0) WRITE_RD(sext32(lhs)); else WRITE_RD(sext32(lhs % rhs)); diff --git a/riscv/insns/remw.h b/riscv/insns/remw.h index 56221cc..076096c 100644 --- a/riscv/insns/remw.h +++ b/riscv/insns/remw.h @@ -2,7 +2,7 @@ require_extension('M'); require_rv64; sreg_t lhs = sext32(RS1); sreg_t rhs = sext32(RS2); -if(rhs == 0) +if (rhs == 0) WRITE_RD(lhs); else WRITE_RD(sext32(lhs % rhs)); diff --git a/riscv/insns/rsub64.h b/riscv/insns/rsub64.h index 397c973..2a58485 100644 --- a/riscv/insns/rsub64.h +++ b/riscv/insns/rsub64.h @@ -2,7 +2,7 @@ P_64_PROFILE({ rd = (rs1 - rs2) >> 1; if (rs1 > 0 && rs2 < 0) { rd &= ~((reg_t)1 << 63); - } else if(rs1 < 0 && rs2 > 0) { + } else if (rs1 < 0 && rs2 > 0) { rd |= ((reg_t)1 << 63); } }) diff --git a/riscv/insns/sra16_u.h b/riscv/insns/sra16_u.h index c28178e..6fcc398 100644 --- a/riscv/insns/sra16_u.h +++ b/riscv/insns/sra16_u.h @@ -1,5 +1,5 @@ P_X_LOOP(16, 4, { - if(sa > 0) + if (sa > 0) pd = ((ps1 >> (sa - 1)) + 1) >> 1; else pd = ps1; diff --git a/riscv/insns/sra32_u.h b/riscv/insns/sra32_u.h index e062a88..1a4488c 100644 --- a/riscv/insns/sra32_u.h +++ b/riscv/insns/sra32_u.h @@ -1,6 +1,6 @@ require_rv64; P_X_LOOP(32, 5, { - if(sa > 0) + if (sa > 0) pd = (((uint64_t)(ps1 >> (sa - 1))) + 1) >> 1; else pd = ps1; diff --git a/riscv/insns/sra8_u.h b/riscv/insns/sra8_u.h index 7061fc4..1f47623 100644 --- a/riscv/insns/sra8_u.h +++ b/riscv/insns/sra8_u.h @@ -1,5 +1,5 @@ P_X_LOOP(8, 3, { - if(sa > 0) + if (sa > 0) pd = ((ps1 >> (sa - 1)) + 1) >> 1; else pd = ps1; diff --git a/riscv/insns/vdiv_vx.h b/riscv/insns/vdiv_vx.h index 4052952..2b93eac 100644 --- a/riscv/insns/vdiv_vx.h +++ b/riscv/insns/vdiv_vx.h @@ -1,9 +1,9 @@ // vdiv.vx vd, vs2, rs1 VI_VX_LOOP ({ - if(rs1 == 0) + if (rs1 == 0) vd = -1; - else if(vs2 == (INT64_MIN >> (64 - sew)) && rs1 == -1) + else if (vs2 == (INT64_MIN >> (64 - sew)) && rs1 == -1) vd = vs2; else vd = vs2 / rs1; diff --git a/riscv/insns/vdivu_vv.h b/riscv/insns/vdivu_vv.h index ef6e777..89aeed6 100644 --- a/riscv/insns/vdivu_vv.h +++ b/riscv/insns/vdivu_vv.h @@ -1,7 +1,7 @@ // vdivu.vv vd, vs2, vs1 VI_VV_ULOOP ({ - if(vs1 == 0) + if (vs1 == 0) vd = -1; else vd = vs2 / vs1; diff --git a/riscv/insns/vdivu_vx.h b/riscv/insns/vdivu_vx.h index 7ffe1c6..ce3e964 100644 --- a/riscv/insns/vdivu_vx.h +++ b/riscv/insns/vdivu_vx.h @@ -1,7 +1,7 @@ // vdivu.vx vd, vs2, rs1 VI_VX_ULOOP ({ - if(rs1 == 0) + if (rs1 == 0) vd = -1; else vd = vs2 / rs1; diff --git a/riscv/insns/vfmv_f_s.h b/riscv/insns/vfmv_f_s.h index 81605ea..0f3cf8c 100644 --- a/riscv/insns/vfmv_f_s.h +++ b/riscv/insns/vfmv_f_s.h @@ -9,7 +9,7 @@ require(STATE.frm->read() < 0x5); reg_t rs2_num = insn.rs2(); uint64_t vs2_0 = 0; const reg_t sew = P.VU.vsew; -switch(sew) { +switch (sew) { case e16: vs2_0 = P.VU.elt(rs2_num, 0); break; diff --git a/riscv/insns/vfmv_s_f.h b/riscv/insns/vfmv_s_f.h index edc376e..e50ad41 100644 --- a/riscv/insns/vfmv_s_f.h +++ b/riscv/insns/vfmv_s_f.h @@ -11,7 +11,7 @@ reg_t vl = P.VU.vl->read(); if (vl > 0 && P.VU.vstart->read() < vl) { reg_t rd_num = insn.rd(); - switch(P.VU.vsew) { + switch (P.VU.vsew) { case e16: P.VU.elt(rd_num, 0, true) = f16(FRS1).v; break; diff --git a/riscv/insns/vmsbf_m.h b/riscv/insns/vmsbf_m.h index 6147f6d..1275872 100644 --- a/riscv/insns/vmsbf_m.h +++ b/riscv/insns/vmsbf_m.h @@ -24,7 +24,7 @@ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { uint64_t res = 0; if (!has_one && !vs2_lsb) { res = 1; - } else if(!has_one && vs2_lsb) { + } else if (!has_one && vs2_lsb) { has_one = true; } vd = (vd & ~mmask) | ((res << mpos) & mmask); diff --git a/riscv/insns/vmsif_m.h b/riscv/insns/vmsif_m.h index 447813f..cbcbc2a 100644 --- a/riscv/insns/vmsif_m.h +++ b/riscv/insns/vmsif_m.h @@ -23,7 +23,7 @@ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { uint64_t res = 0; if (!has_one && !vs2_lsb) { res = 1; - } else if(!has_one && vs2_lsb) { + } else if (!has_one && vs2_lsb) { has_one = true; res = 1; } diff --git a/riscv/insns/vmsof_m.h b/riscv/insns/vmsof_m.h index b9edcf3..9bd4f0c 100644 --- a/riscv/insns/vmsof_m.h +++ b/riscv/insns/vmsof_m.h @@ -21,7 +21,7 @@ for (reg_t i = P.VU.vstart->read() ; i < vl; ++i) { if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) { uint64_t &vd = P.VU.elt(rd_num, midx, true); uint64_t res = 0; - if(!has_one && vs2_lsb) { + if (!has_one && vs2_lsb) { has_one = true; res = 1; } diff --git a/riscv/insns/vmv_s_x.h b/riscv/insns/vmv_s_x.h index b66855b..23a6b56 100644 --- a/riscv/insns/vmv_s_x.h +++ b/riscv/insns/vmv_s_x.h @@ -8,7 +8,7 @@ if (vl > 0 && P.VU.vstart->read() < vl) { reg_t rd_num = insn.rd(); reg_t sew = P.VU.vsew; - switch(sew) { + switch (sew) { case e8: P.VU.elt(rd_num, 0, true) = RS1; break; diff --git a/riscv/insns/vmv_x_s.h b/riscv/insns/vmv_x_s.h index d33c3e5..8451d6a 100644 --- a/riscv/insns/vmv_x_s.h +++ b/riscv/insns/vmv_x_s.h @@ -6,7 +6,7 @@ reg_t rs1 = RS1; reg_t sew = P.VU.vsew; reg_t rs2_num = insn.rs2(); -switch(sew) { +switch (sew) { case e8: WRITE_RD(P.VU.elt(rs2_num, 0)); break; diff --git a/riscv/insns/vrem_vv.h b/riscv/insns/vrem_vv.h index 260716a..5c58fa4 100644 --- a/riscv/insns/vrem_vv.h +++ b/riscv/insns/vrem_vv.h @@ -3,7 +3,7 @@ VI_VV_LOOP ({ if (vs1 == 0) vd = vs2; - else if(vs2 == -(((intmax_t)1) << (sew - 1)) && vs1 == -1) + else if (vs2 == -(((intmax_t)1) << (sew - 1)) && vs1 == -1) vd = 0; else { vd = vs2 % vs1; diff --git a/riscv/insns/vsadd_vi.h b/riscv/insns/vsadd_vi.h index 7e3b652..3a8b1d4 100644 --- a/riscv/insns/vsadd_vi.h +++ b/riscv/insns/vsadd_vi.h @@ -2,7 +2,7 @@ VI_CHECK_SSS(false); VI_LOOP_BASE bool sat = false; -switch(sew) { +switch (sew) { case e8: { VI_PARAMS(e8); vd = sat_add(vs2, vsext(simm5, sew), sat); diff --git a/riscv/insns/vsadd_vv.h b/riscv/insns/vsadd_vv.h index 60ad5f3..d4cfe78 100644 --- a/riscv/insns/vsadd_vv.h +++ b/riscv/insns/vsadd_vv.h @@ -2,7 +2,7 @@ VI_CHECK_SSS(true); VI_LOOP_BASE bool sat = false; -switch(sew) { +switch (sew) { case e8: { VV_PARAMS(e8); vd = sat_add(vs2, vs1, sat); diff --git a/riscv/insns/vsadd_vx.h b/riscv/insns/vsadd_vx.h index bf68f15..e5e6c40 100644 --- a/riscv/insns/vsadd_vx.h +++ b/riscv/insns/vsadd_vx.h @@ -2,7 +2,7 @@ VI_CHECK_SSS(false); VI_LOOP_BASE bool sat = false; -switch(sew) { +switch (sew) { case e8: { VX_PARAMS(e8); vd = sat_add(vs2, rs1, sat); diff --git a/riscv/insns/vslide1up_vx.h b/riscv/insns/vslide1up_vx.h index 33cb9ed..256419e 100644 --- a/riscv/insns/vslide1up_vx.h +++ b/riscv/insns/vslide1up_vx.h @@ -6,24 +6,24 @@ if (i != 0) { if (sew == e8) { VI_XI_SLIDEUP_PARAMS(e8, 1); vd = vs2; - } else if(sew == e16) { + } else if (sew == e16) { VI_XI_SLIDEUP_PARAMS(e16, 1); vd = vs2; - } else if(sew == e32) { + } else if (sew == e32) { VI_XI_SLIDEUP_PARAMS(e32, 1); vd = vs2; - } else if(sew == e64) { + } else if (sew == e64) { VI_XI_SLIDEUP_PARAMS(e64, 1); vd = vs2; } } else { if (sew == e8) { P.VU.elt(rd_num, 0, true) = RS1; - } else if(sew == e16) { + } else if (sew == e16) { P.VU.elt(rd_num, 0, true) = RS1; - } else if(sew == e32) { + } else if (sew == e32) { P.VU.elt(rd_num, 0, true) = RS1; - } else if(sew == e64) { + } else if (sew == e64) { P.VU.elt(rd_num, 0, true) = RS1; } } diff --git a/riscv/interactive.cc b/riscv/interactive.cc index 4e2ffb1..c6a9f80 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -271,7 +271,7 @@ reg_t sim_t::get_pc(const std::vector& args) void sim_t::interactive_pc(const std::string& cmd, const std::vector& args) { - if(args.size() != 1) + if (args.size() != 1) throw trap_interactive(); processor_t *p = get_core(args[0]); @@ -309,7 +309,7 @@ reg_t sim_t::get_reg(const std::vector& args) freg_t sim_t::get_freg(const std::vector& args, int size) { - if(args.size() != 2) + if (args.size() != 2) throw trap_interactive(); processor_t *p = get_core(args[0]); @@ -363,9 +363,9 @@ void sim_t::interactive_vreg(const std::string& cmd, const std::vector= 0; --e){ + for (int e = num_elem-1; e >= 0; --e) { uint64_t val; - switch(elen){ + switch (elen) { case 8: val = p->VU.elt(r, e); out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(16) << val << " "; @@ -475,7 +475,7 @@ reg_t sim_t::get_mem(const std::vector& args) if (addr == LONG_MAX) addr = strtoul(addr_str.c_str(),NULL,16); - switch(addr % 8) + switch (addr % 8) { case 0: val = mmu->load_uint64(addr); @@ -522,7 +522,7 @@ void sim_t::interactive_str(const std::string& cmd, const std::vectorload_uint8(addr++))) + while ((ch = mmu->load_uint8(addr++))) out << ch; out << std::endl; diff --git a/riscv/isa_parser.h b/riscv/isa_parser.h index c57436e..2aed262 100644 --- a/riscv/isa_parser.h +++ b/riscv/isa_parser.h @@ -69,7 +69,7 @@ typedef enum { class isa_parser_t { public: isa_parser_t(const char* str, const char *priv); - ~isa_parser_t(){}; + ~isa_parser_t() {}; unsigned get_max_xlen() const { return max_xlen; } reg_t get_max_isa() const { return max_isa; } std::string get_isa_string() const { return isa_string; } diff --git a/riscv/processor.h b/riscv/processor.h index 88ddf70..073b25b 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -406,7 +406,7 @@ public: // vector element for varies SEW template - T& elt(reg_t vReg, reg_t n, bool is_write = false){ + T& elt(reg_t vReg, reg_t n, bool is_write = false) { assert(vsew != 0); assert((VLEN >> 3)/sizeof(T) > 0); reg_t elts_per_reg = (VLEN >> 3) / (sizeof(T)); @@ -453,7 +453,7 @@ public: vstart_alu(false) { } - ~vectorUnit_t(){ + ~vectorUnit_t() { free(reg_file); reg_file = 0; } -- cgit v1.1 From f04675c96c5c4668848431f4c729363060ce13e7 Mon Sep 17 00:00:00 2001 From: ksco Date: Thu, 11 Aug 2022 17:45:06 +0800 Subject: Remove dead code in VI_VV_EXT macro (#1065) --- riscv/v_ext_macros.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index 19207f7..3f9b4af 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -1468,9 +1468,6 @@ reg_t index[P.VU.vlmax]; \ case 0x84: \ P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ break; \ - case 0x88: \ - P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ - break; \ default: \ break; \ } \ -- cgit v1.1 From ac117cc35aa2bc5296f7a7bfd817a539e269919f Mon Sep 17 00:00:00 2001 From: Greg Chadwick Date: Thu, 11 Aug 2022 21:09:57 +0100 Subject: Unify PMPCFGx behaviour with PMPADDRx where PMP is disabled (#1068) Previously any access to the PMPADDRx CSRs when no PMP regions were configured would result in an illegal instruction trap, whilst PMPCFGx registers would act as WARL, ignoring writes and reading as 0. This unifies the behaviour so both PMPADDRx and PMPCFGx CSRs produce an illegal instruction trap when accessed when no PMP regions are configured. --- riscv/csrs.cc | 10 ++++++++++ riscv/csrs.h | 1 + 2 files changed, 11 insertions(+) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index bc5db50..e516e4a 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -218,6 +218,16 @@ pmpcfg_csr_t::pmpcfg_csr_t(processor_t* const proc, const reg_t addr): csr_t(proc, addr) { } +void pmpcfg_csr_t::verify_permissions(insn_t insn, bool write) const { + csr_t::verify_permissions(insn, write); + // If n_pmp is zero, that means pmp is not implemented hence raise + // trap if it tries to access the csr. I would prefer to implement + // this by not instantiating any pmpcfg_csr_t for these regs, but + // n_pmp can change after reset() is run. + if (proc->n_pmp == 0) + throw trap_illegal_instruction(insn.bits()); +} + reg_t pmpcfg_csr_t::read() const noexcept { reg_t cfg_res = 0; for (size_t i0 = (address - CSR_PMPCFG0) * 4, i = i0; i < i0 + proc->get_xlen() / 8 && i < state->max_pmp; i++) diff --git a/riscv/csrs.h b/riscv/csrs.h index 85a4d50..cabd61e 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -124,6 +124,7 @@ typedef std::shared_ptr pmpaddr_csr_t_p; class pmpcfg_csr_t: public csr_t { public: pmpcfg_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; virtual reg_t read() const noexcept override; protected: virtual bool unlogged_write(const reg_t val) noexcept override; -- cgit v1.1 From 6aa46e99c36f1bf394b3f7fae3c729e8b7918a80 Mon Sep 17 00:00:00 2001 From: ksco Date: Sat, 13 Aug 2022 04:37:26 +0800 Subject: Remove unused code in vsmul* (#1069) --- riscv/insns/vsmul_vv.h | 12 ++---------- riscv/insns/vsmul_vx.h | 9 --------- 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/riscv/insns/vsmul_vv.h b/riscv/insns/vsmul_vv.h index 413981c..49e42c1 100644 --- a/riscv/insns/vsmul_vv.h +++ b/riscv/insns/vsmul_vv.h @@ -2,27 +2,19 @@ VRM xrm = P.VU.get_vround_mode(); int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); -int64_t sign_mask = uint64_t(1) << (P.VU.vsew - 1); VI_VV_LOOP ({ - int64_t vs1_sign; - int64_t vs2_sign; - int64_t result_sign; - - vs1_sign = vs1 & sign_mask; - vs2_sign = vs2 & sign_mask; bool overflow = vs1 == vs2 && vs1 == int_min; - int128_t result = (int128_t)vs1 * (int128_t)vs2; - result_sign = (vs1_sign ^ vs2_sign) & sign_mask; // rounding INT_ROUNDING(result, xrm, sew - 1); + // remove guard bits result = result >> (sew - 1); - // saturation + // max saturation if (overflow) { result = int_max; P_SET_OV(1); diff --git a/riscv/insns/vsmul_vx.h b/riscv/insns/vsmul_vx.h index 2e25670..d2724ee 100644 --- a/riscv/insns/vsmul_vx.h +++ b/riscv/insns/vsmul_vx.h @@ -2,20 +2,11 @@ VRM xrm = P.VU.get_vround_mode(); int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); -int64_t sign_mask = uint64_t(1) << (P.VU.vsew - 1); VI_VX_LOOP ({ - int64_t rs1_sign; - int64_t vs2_sign; - int64_t result_sign; - - rs1_sign = rs1 & sign_mask; - vs2_sign = vs2 & sign_mask; bool overflow = rs1 == vs2 && rs1 == int_min; - int128_t result = (int128_t)rs1 * (int128_t)vs2; - result_sign = (rs1_sign ^ vs2_sign) & sign_mask; // rounding INT_ROUNDING(result, xrm, sew - 1); -- cgit v1.1 From 25f2e61d0e178890c0fd46d6a5a288c33d7c6fc8 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 22 Aug 2022 13:53:44 -0700 Subject: "Implement" Zihintntl extension (#1071) These are just HINTs, so Spike only needs ISA-parser support. --- riscv/isa_parser.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/riscv/isa_parser.cc b/riscv/isa_parser.cc index ddb98c9..0869bf3 100644 --- a/riscv/isa_parser.cc +++ b/riscv/isa_parser.cc @@ -114,6 +114,8 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv) // unconditionally include FENCE.I, so Zifencei adds nothing more. } else if (ext_str == "zihintpause") { // HINTs encoded in base-ISA instructions are always present. + } else if (ext_str == "zihintntl") { + // HINTs encoded in base-ISA instructions are always present. } else if (ext_str == "zmmul") { extension_table[EXT_ZMMUL] = true; } else if (ext_str == "zba") { -- cgit v1.1 From 1556cf7fe2d8e31ca9e80b8e6e9eea7fbf12314d Mon Sep 17 00:00:00 2001 From: yangcheng <28107329+yorange1@users.noreply.github.com> Date: Tue, 23 Aug 2022 05:05:19 +0800 Subject: Fix redundant loops when calculating vrgather.vi. (#1072) --- riscv/insns/vrgather_vi.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/riscv/insns/vrgather_vi.h b/riscv/insns/vrgather_vi.h index 56e11e1..85ba621 100644 --- a/riscv/insns/vrgather_vi.h +++ b/riscv/insns/vrgather_vi.h @@ -7,10 +7,6 @@ require_vm; reg_t zimm5 = insn.v_zimm5(); VI_LOOP_BASE - -for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { - VI_LOOP_ELEMENT_SKIP(); - switch (sew) { case e8: P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); @@ -25,6 +21,4 @@ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); break; } -} - VI_LOOP_END; -- cgit v1.1 From 2667f611fb258ffdfb7741521e6dd8aac61f30ea Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Tue, 23 Aug 2022 10:51:24 -0700 Subject: Separate variables that contain two different things No reason to use a variable misleadingly named 'paddr' to hold the virtual address. --- riscv/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index 6f24ec7..db23892 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -263,8 +263,8 @@ public: void clean_inval(reg_t addr, bool clean, bool inval) { convert_load_traps_to_store_traps({ - reg_t paddr = addr & ~(blocksz - 1); - paddr = translate(paddr, blocksz, LOAD, 0); + reg_t vaddr = addr & ~(blocksz - 1); + reg_t paddr = translate(vaddr, blocksz, LOAD, 0); if (auto host_addr = sim->addr_to_mem(paddr)) { if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.clean_invalidate(paddr, blocksz, clean, inval); -- cgit v1.1 From d3cb2470518cfb25ee205269deb060715df3427e Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Tue, 23 Aug 2022 10:53:00 -0700 Subject: Constantize variables Because it's always better to do so where possible. --- riscv/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index db23892..9a93f16 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -263,8 +263,8 @@ public: void clean_inval(reg_t addr, bool clean, bool inval) { convert_load_traps_to_store_traps({ - reg_t vaddr = addr & ~(blocksz - 1); - reg_t paddr = translate(vaddr, blocksz, LOAD, 0); + const reg_t vaddr = addr & ~(blocksz - 1); + const reg_t paddr = translate(vaddr, blocksz, LOAD, 0); if (auto host_addr = sim->addr_to_mem(paddr)) { if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.clean_invalidate(paddr, blocksz, clean, inval); -- cgit v1.1 From 3ac734e195e76e7c5e52448a95ea91885af31647 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Thu, 25 Aug 2022 20:47:15 +0800 Subject: Fix tval on illegal instruction faults with long illegal instruction The current spike implementation does not include the ILEN (maximum instruction length supported by the implementation), which is required to constrain the value of tval on an illegal instruction exception. Consider an implementation supporting only an RV64I base instruction set. The ILEN is 32 bits (spec sec. 1.5), and the MXLEN is 64 bits (spec sec. 5.1). Under an illegal instruction exception with the instruction longer than the ILEN, the mtval should contain the first ILEN (32 bits) of the faulting instruction. However, the current spike implementation lets the mtval be the instruction's first MXLEN (64 bits). To fix this bug, this PR masks out the upper bits of the tval and leaves the first ILEN bits of the faulting instruction. When this PR is being made, all official instructions are either 16 or 32 bits. So, We hard- code the ILEN to 32 bits. --- riscv/processor.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 620a6f4..3d56abc 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -949,7 +949,10 @@ reg_t processor_t::get_csr(int which, insn_t insn, bool write, bool peek) reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc) { - throw trap_illegal_instruction(insn.bits()); + // The illegal instruction can be longer than ILEN bits, where the tval will + // contain the first ILEN bits of the faulting instruction. We hard-code the + // ILEN to 32 bits since all official instructions have at most 32 bits. + throw trap_illegal_instruction(insn.bits() & 0xffffffffULL); } insn_func_t processor_t::decode_insn(insn_t insn) -- cgit v1.1 From f1c5178f2c559c2c6281f10d46c410c684fe41de Mon Sep 17 00:00:00 2001 From: Rupert Swarbrick Date: Mon, 29 Aug 2022 12:33:09 +0100 Subject: Fix libdir and includedir in pkg-config files (#917) The autoconf @libdir@ and @includedir@ variables include the prefix part so we were generating things like "/usr//usr/lib". --- riscv-disasm.pc.in | 4 ++-- riscv-fesvr.pc.in | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/riscv-disasm.pc.in b/riscv-disasm.pc.in index 8e022e9..915136a 100644 --- a/riscv-disasm.pc.in +++ b/riscv-disasm.pc.in @@ -1,7 +1,7 @@ prefix=@prefix@ exec_prefix=@prefix@ -libdir=${prefix}/@libdir@ -includedir=${prefix}/@includedir@ +libdir=@libdir@ +includedir=@includedir@ Name: riscv-disasm Description: RISC-V disassembler diff --git a/riscv-fesvr.pc.in b/riscv-fesvr.pc.in index efd7eed..82a9569 100644 --- a/riscv-fesvr.pc.in +++ b/riscv-fesvr.pc.in @@ -1,7 +1,7 @@ prefix=@prefix@ exec_prefix=@prefix@ -libdir=${prefix}/@libdir@ -includedir=${prefix}/@includedir@ +libdir=@libdir@ +includedir=@includedir@ Name: riscv-fesvr Description: RISC-V front-end server -- cgit v1.1 From 204f43cf8288550f1070c216e57f16bb45c5f560 Mon Sep 17 00:00:00 2001 From: Kip Walker Date: Wed, 31 Aug 2022 19:07:33 -0700 Subject: Add disassembly support for Zbc instructions (#1076) --- disasm/disasm.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index b42bdc2..8a03677 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -889,6 +889,12 @@ void disassembler_t::add_instructions(const isa_parser_t* isa) } } + if (isa->extension_enabled(EXT_ZBC)) { + DEFINE_RTYPE(clmul); + DEFINE_RTYPE(clmulh); + DEFINE_RTYPE(clmulr); + } + if (isa->extension_enabled(EXT_ZBS)) { DEFINE_RTYPE(bclr); DEFINE_RTYPE(binv); -- cgit v1.1 From 476a79fe2fc6c4c7b809d0b359ad041a1034ffce Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 1 Sep 2022 20:23:16 -0700 Subject: Substantially increase context_t stack size It's nearly free to do so, because it's just virtual address space. @davidbiancolin recently fell into this pit when using context_t with VCS. --- fesvr/context.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fesvr/context.cc b/fesvr/context.cc index ca73813..1dceeec 100644 --- a/fesvr/context.cc +++ b/fesvr/context.cc @@ -49,7 +49,7 @@ void context_t::init(void (*f)(void*), void* a) #ifdef USE_UCONTEXT getcontext(context.get()); context->uc_link = creator->context.get(); - context->uc_stack.ss_size = 64*1024; + context->uc_stack.ss_size = 1024 * 1024; context->uc_stack.ss_sp = new void*[context->uc_stack.ss_size/sizeof(void*)]; #ifndef GLIBC_64BIT_PTR_BUG makecontext(context.get(), (void(*)(void))&context_t::wrapper, 1, this); -- cgit v1.1 From 4d515ade6ef853f505e168d05e964f04b1bd938f Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Tue, 6 Sep 2022 15:48:11 -0700 Subject: vmvr.v depends on vtype, and therefore should check vill --- riscv/insns/vmvnfr_v.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/insns/vmvnfr_v.h b/riscv/insns/vmvnfr_v.h index f6dc2c0..1dc304a 100644 --- a/riscv/insns/vmvnfr_v.h +++ b/riscv/insns/vmvnfr_v.h @@ -1,5 +1,5 @@ // vmv1r.v vd, vs2 -require_vector_novtype(true, true); +require_vector(true); const reg_t baseAddr = RS1; const reg_t vd = insn.rd(); const reg_t vs2 = insn.rs2(); -- cgit v1.1 From 503b0106d7a328d9aa4282186fa48436b389e934 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Tue, 6 Sep 2022 15:49:18 -0700 Subject: fix comment in definition of vmvr.v --- riscv/insns/vmvnfr_v.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/insns/vmvnfr_v.h b/riscv/insns/vmvnfr_v.h index 1dc304a..543ac58 100644 --- a/riscv/insns/vmvnfr_v.h +++ b/riscv/insns/vmvnfr_v.h @@ -1,4 +1,4 @@ -// vmv1r.v vd, vs2 +// vmvr.v vd, vs2 require_vector(true); const reg_t baseAddr = RS1; const reg_t vd = insn.rd(); -- cgit v1.1 From 3721abe667842a47c7b0447399fb59e1cb46f696 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Thu, 8 Sep 2022 21:04:42 +0800 Subject: Remove redundant require_vector in macro VI_MERGE_LOOP_BASE require_vector is also included in VI_GENERAL_LOOP_BASE --- riscv/v_ext_macros.h | 1 - 1 file changed, 1 deletion(-) diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index 3f9b4af..71a5401 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -517,7 +517,6 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) bool use_first = (P.VU.elt(0, midx) >> mpos) & 0x1; #define VI_MERGE_LOOP_BASE \ - require_vector(true); \ VI_GENERAL_LOOP_BASE \ VI_MERGE_VARS -- cgit v1.1 From d85446f81f3c9405a2041ab5235281a3bbaab77f Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Thu, 8 Sep 2022 21:16:35 +0800 Subject: Remove unnecessary argument alu(always false) from macro require_vector_novtype --- riscv/decode.h | 4 +--- riscv/insns/vsetivli.h | 2 +- riscv/insns/vsetvl.h | 2 +- riscv/insns/vsetvli.h | 2 +- riscv/v_ext_macros.h | 4 ++-- 5 files changed, 6 insertions(+), 8 deletions(-) diff --git a/riscv/decode.h b/riscv/decode.h index 874d239..5fb0358 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -306,12 +306,10 @@ do { \ WRITE_VSTATUS; \ dirty_vs_state; \ } while (0); -#define require_vector_novtype(is_log, alu) \ +#define require_vector_novtype(is_log) \ do { \ require_vector_vs; \ require_extension('V'); \ - if (alu && !P.VU.vstart_alu) \ - require(P.VU.vstart->read() == 0); \ if (is_log) \ WRITE_VSTATUS; \ dirty_vs_state; \ diff --git a/riscv/insns/vsetivli.h b/riscv/insns/vsetivli.h index 04900a2..f880e96 100644 --- a/riscv/insns/vsetivli.h +++ b/riscv/insns/vsetivli.h @@ -1,2 +1,2 @@ -require_vector_novtype(false, false); +require_vector_novtype(false); WRITE_RD(P.VU.set_vl(insn.rd(), -1, insn.rs1(), insn.v_zimm10())); diff --git a/riscv/insns/vsetvl.h b/riscv/insns/vsetvl.h index 2969edc..4d03542 100644 --- a/riscv/insns/vsetvl.h +++ b/riscv/insns/vsetvl.h @@ -1,2 +1,2 @@ -require_vector_novtype(false, false); +require_vector_novtype(false); WRITE_RD(P.VU.set_vl(insn.rd(), insn.rs1(), RS1, RS2)); diff --git a/riscv/insns/vsetvli.h b/riscv/insns/vsetvli.h index 7b1f1d7..d1f43b5 100644 --- a/riscv/insns/vsetvli.h +++ b/riscv/insns/vsetvli.h @@ -1,2 +1,2 @@ -require_vector_novtype(false, false); +require_vector_novtype(false); WRITE_RD(P.VU.set_vl(insn.rd(), insn.rs1(), RS1, insn.v_zimm11())); diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index 71a5401..2893306 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -1317,7 +1317,7 @@ reg_t index[P.VU.vlmax]; \ p->VU.vstart->write(0); #define VI_LD_WHOLE(elt_width) \ - require_vector_novtype(true, false); \ + require_vector_novtype(true); \ require(sizeof(elt_width ## _t) * 8 <= P.VU.ELEN); \ const reg_t baseAddr = RS1; \ const reg_t vd = insn.rd(); \ @@ -1349,7 +1349,7 @@ reg_t index[P.VU.vlmax]; \ P.VU.vstart->write(0); #define VI_ST_WHOLE \ - require_vector_novtype(true, false); \ + require_vector_novtype(true); \ const reg_t baseAddr = RS1; \ const reg_t vs3 = insn.rd(); \ const reg_t len = insn.v_nf() + 1; \ -- cgit v1.1 From 25bca65b95d975d8f3bd4735483a4a3ee5c38440 Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Thu, 8 Sep 2022 22:10:03 +0800 Subject: Remove redundant require_vm in macro VI_VV_LOOP_WITH_CARRY and VI_XI_LOOP_WITH_CARRY: require_vm is also included in VI_CHECK_SSS --- riscv/v_ext_macros.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index 2893306..afc4837 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -1079,7 +1079,6 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) VI_LOOP_CARRY_END #define VI_VV_LOOP_WITH_CARRY(BODY) \ - require_vm; \ VI_CHECK_SSS(true); \ VI_LOOP_WITH_CARRY_BASE \ if (sew == e8) { \ @@ -1098,7 +1097,6 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) VI_LOOP_END #define VI_XI_LOOP_WITH_CARRY(BODY) \ - require_vm; \ VI_CHECK_SSS(false); \ VI_LOOP_WITH_CARRY_BASE \ if (sew == e8) { \ -- cgit v1.1 From 711215124bdf5bed4d1941c0a023a1d6881aca1d Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Tue, 6 Sep 2022 12:56:23 -0700 Subject: Remove obsolete enum trigger_operation_t Moved to triggers.h and renamed in a2a2587426e57f6207d5389620e9109bc0f82e6b, but the old enum was mistakenly left behind. --- riscv/processor.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/riscv/processor.h b/riscv/processor.h index 073b25b..3937211 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -218,12 +218,6 @@ struct state_t #endif }; -typedef enum { - OPERATION_EXECUTE, - OPERATION_STORE, - OPERATION_LOAD, -} trigger_operation_t; - // Count number of contiguous 1 bits starting from the LSB. static int cto(reg_t val) { -- cgit v1.1 From 95f36bc28eb79bb9d7a8681dcb2a83b364842b62 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Thu, 15 Sep 2022 10:29:28 -0700 Subject: Initialize triggers using default member initializers Instead of constructor member initializer lists, which require repeating the list of members, and cause ugly merge conflicts when the list of members changes. --- riscv/triggers.cc | 7 ------- riscv/triggers.h | 31 ++++++++++++++----------------- 2 files changed, 14 insertions(+), 24 deletions(-) diff --git a/riscv/triggers.cc b/riscv/triggers.cc index c9d6161..4b73715 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -4,13 +4,6 @@ namespace triggers { -mcontrol_t::mcontrol_t() : - select(false), timing(false), chain_bit(false), - match(MATCH_EQUAL), m(false), s(false), u(false), - execute_bit(false), store_bit(false), load_bit(false) -{ -} - reg_t mcontrol_t::tdata1_read(const processor_t * const proc) const noexcept { reg_t v = 0; auto xlen = proc->get_xlen(); diff --git a/riscv/triggers.h b/riscv/triggers.h index 19fa437..8f1b81d 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -55,15 +55,14 @@ public: virtual bool store() const { return false; } virtual bool load() const { return false; } -public: - bool dmode; - action_t action; - bool hit; + bool dmode = false; + action_t action = ACTION_DEBUG_EXCEPTION; + bool hit = false; virtual ~trigger_t() {}; protected: - trigger_t() : dmode(false), action(ACTION_DEBUG_EXCEPTION), hit(false) {}; + trigger_t() {} }; class mcontrol_t : public trigger_t { @@ -78,8 +77,6 @@ public: MATCH_MASK_HIGH = MCONTROL_MATCH_MASK_HIGH } match_t; - mcontrol_t(); - virtual reg_t tdata1_read(const processor_t * const proc) const noexcept override; virtual bool tdata1_write(processor_t * const proc, const reg_t val) noexcept override; virtual reg_t tdata2_read(const processor_t * const proc) const noexcept override; @@ -97,16 +94,16 @@ private: bool simple_match(unsigned xlen, reg_t value) const; public: - bool select; - bool timing; - bool chain_bit; - match_t match; - bool m; - bool s; - bool u; - bool execute_bit; - bool store_bit; - bool load_bit; + bool select = false; + bool timing = false; + bool chain_bit = false; + match_t match = MATCH_EQUAL; + bool m = false; + bool s = false; + bool u = false; + bool execute_bit = false; + bool store_bit = false; + bool load_bit = false; reg_t tdata2; }; -- cgit v1.1 From ccc9791807b59d848e7c8983cea49a16fb4a2912 Mon Sep 17 00:00:00 2001 From: YenHaoChen <39526191+YenHaoChen@users.noreply.github.com> Date: Sat, 17 Sep 2022 02:59:57 +0800 Subject: Fix trigger never fire on executing an instruction on plugin devices (#1084) The trigger matching only checks on TLB hit (after refill_tlb()). However, instructions on plugin devices will never be filled into the TLB; thus, triggers cannot fire when executing instructions on the plugin devices. The PR removes the if-condition of TLB hit for trigger checking. Co-authored-by: Howard Yen-Hao Chen --- riscv/mmu.h | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index 9a93f16..ca8b792 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -477,13 +477,11 @@ private: } else { result = tlb_data[vpn % TLB_ENTRIES]; } - if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { - target_endian* ptr = (target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, from_target(*ptr)); - if (match != triggers::MATCH_NONE) { - throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); - } + target_endian* ptr = (target_endian*)(result.host_offset + addr); + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, from_target(*ptr)); + if (match != triggers::MATCH_NONE) { + throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); } return result; } -- cgit v1.1 From dfd191367991cb157b53767dcc05824c826b5abd Mon Sep 17 00:00:00 2001 From: Iman Hosseini Date: Tue, 20 Sep 2022 00:05:00 -0400 Subject: detects the loading of isa-incompatible (i.e. 32 bit code to 64bit HART) code and emits an error message to help avoid unintentionally loading wrong elf. --- fesvr/elfloader.cc | 6 +++++- fesvr/elfloader.h | 2 +- fesvr/htif.cc | 10 ++++++++-- fesvr/htif.h | 4 ++-- fesvr/memif.h | 8 ++++++++ riscv/sim.cc | 1 + 6 files changed, 25 insertions(+), 6 deletions(-) diff --git a/fesvr/elfloader.cc b/fesvr/elfloader.cc index 76cd6da..1bdccd3 100644 --- a/fesvr/elfloader.cc +++ b/fesvr/elfloader.cc @@ -16,7 +16,7 @@ #include #include -std::map load_elf(const char* fn, memif_t* memif, reg_t* entry) +std::map load_elf(const char* fn, memif_t* memif, reg_t* entry, unsigned required_xlen = 0) { int fd = open(fn, O_RDONLY); struct stat s; @@ -32,6 +32,10 @@ std::map load_elf(const char* fn, memif_t* memif, reg_t* assert(size >= sizeof(Elf64_Ehdr)); const Elf64_Ehdr* eh64 = (const Elf64_Ehdr*)buf; assert(IS_ELF32(*eh64) || IS_ELF64(*eh64)); + unsigned xlen = IS_ELF32(*eh64) ? 32 : 64; + if (required_xlen != 0 && required_xlen != xlen) { + throw incompat_xlen(required_xlen, xlen); + } assert(IS_ELFLE(*eh64) || IS_ELFBE(*eh64)); assert(IS_ELF_EXEC(*eh64)); assert(IS_ELF_RISCV(*eh64) || IS_ELF_EM_NONE(*eh64)); diff --git a/fesvr/elfloader.h b/fesvr/elfloader.h index 696ef47..ae4ee78 100644 --- a/fesvr/elfloader.h +++ b/fesvr/elfloader.h @@ -8,6 +8,6 @@ #include class memif_t; -std::map load_elf(const char* fn, memif_t* memif, reg_t* entry); +std::map load_elf(const char* fn, memif_t* memif, reg_t* entry, unsigned required_xlen = 0); #endif diff --git a/fesvr/htif.cc b/fesvr/htif.cc index ead309c..29eb7d8 100644 --- a/fesvr/htif.cc +++ b/fesvr/htif.cc @@ -82,8 +82,14 @@ htif_t::~htif_t() void htif_t::start() { - if (!targs.empty() && targs[0] != "none") + if (!targs.empty() && targs[0] != "none") { + try { load_program(); + } catch (const incompat_xlen & err) { + fprintf(stderr, "Error: cannot execute %d-bit program on RV%d hart\n", err.actual_xlen, err.expected_xlen); + exit(1); + } + } reset(); } @@ -129,7 +135,7 @@ std::map htif_t::load_payload(const std::string& payload, } preload_aware_memif(this); try { - return load_elf(path.c_str(), &preload_aware_memif, entry); + return load_elf(path.c_str(), &preload_aware_memif, entry, expected_xlen); } catch (mem_trap_t& t) { bad_address("loading payload " + payload, t.get_tval()); abort(); diff --git a/fesvr/htif.h b/fesvr/htif.h index ca5b362..cf8b2d5 100644 --- a/fesvr/htif.h +++ b/fesvr/htif.h @@ -26,7 +26,7 @@ class htif_t : public chunked_memif_t int run(); bool done(); int exit_code(); - + void set_expected_xlen(unsigned int m) { expected_xlen = m; } virtual memif_t& memif() { return mem; } template inline T from_target(target_endian n) const @@ -74,7 +74,7 @@ class htif_t : public chunked_memif_t void parse_arguments(int argc, char ** argv); void register_devices(); void usage(const char * program_name); - + unsigned int expected_xlen = 0; memif_t mem; reg_t entry; bool writezeros; diff --git a/fesvr/memif.h b/fesvr/memif.h index 001c425..3f9bd2d 100644 --- a/fesvr/memif.h +++ b/fesvr/memif.h @@ -5,6 +5,7 @@ #include #include +#include #include "byteorder.h" typedef uint64_t reg_t; @@ -79,4 +80,11 @@ protected: chunked_memif_t* cmemif; }; +class incompat_xlen : public std::exception { +public: + const unsigned expected_xlen; + const unsigned actual_xlen; + incompat_xlen(unsigned _expected_xlen, unsigned _actual_xlen) : expected_xlen(_expected_xlen), actual_xlen(_actual_xlen) {} +}; + #endif // __MEMIF_H diff --git a/riscv/sim.cc b/riscv/sim.cc index 0000537..0ef13b8 100644 --- a/riscv/sim.cc +++ b/riscv/sim.cc @@ -202,6 +202,7 @@ int sim_t::run() { host = context_t::current(); target.init(sim_thread_main, this); + htif_t::set_expected_xlen(isa.get_max_xlen()); return htif_t::run(); } -- cgit v1.1 From c2a47571be85d77e9a06ba9675e020bda77c445b Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 14:30:13 -0700 Subject: Propagate CFLAGS passed to configure into CXXFLAGS --- Makefile.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.in b/Makefile.in index d4cb83e..41bc5e1 100644 --- a/Makefile.in +++ b/Makefile.in @@ -98,7 +98,7 @@ default-CXXFLAGS := $(default-CFLAGS) -std=c++17 mcppbs-CPPFLAGS := @CPPFLAGS@ mcppbs-CFLAGS := $(default-CFLAGS) @CFLAGS@ -mcppbs-CXXFLAGS := $(default-CXXFLAGS) @CXXFLAGS@ +mcppbs-CXXFLAGS := $(mcppbs-CFLAGS) $(default-CXXFLAGS) @CXXFLAGS@ CC := @CC@ CXX := @CXX@ -- cgit v1.1 From 7cf82d72f10352ae05755159996e5ce52e9aed77 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 14:30:30 -0700 Subject: Run CI with -Werror --- ci-tests/test-spike | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci-tests/test-spike b/ci-tests/test-spike index 3d5ed6d..94ae321 100755 --- a/ci-tests/test-spike +++ b/ci-tests/test-spike @@ -6,6 +6,6 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" mkdir build cd build mkdir install -$DIR/../configure --prefix=`pwd`/install +CFLAGS="-Werror" $DIR/../configure --prefix=`pwd`/install make -j4 make install -- cgit v1.1 From 1c227a1d3b5802b5bc01ce2dfd4377f35db1800e Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 23 Sep 2022 12:40:46 -0700 Subject: In CI, check that help message prints without error --- ci-tests/test-spike | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci-tests/test-spike b/ci-tests/test-spike index 94ae321..2da04ff 100755 --- a/ci-tests/test-spike +++ b/ci-tests/test-spike @@ -9,3 +9,6 @@ mkdir install CFLAGS="-Werror" $DIR/../configure --prefix=`pwd`/install make -j4 make install + +# check that help message prints without error +install/bin/spike -h -- cgit v1.1 From 204a639780dda374320a4ef31d714694db7ae46f Mon Sep 17 00:00:00 2001 From: Jerin Joy Date: Fri, 23 Sep 2022 15:43:32 -0700 Subject: Build and install lib and header dependencies for Hammer https://github.com/rivosinc/hammer Signed-off-by: Jerin Joy --- riscv/riscv.mk.in | 32 +++++++++++++++++++++++++++++++- softfloat/softfloat.mk.in | 9 ++++++++- spike_main/spike_main.mk.in | 2 ++ 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/riscv/riscv.mk.in b/riscv/riscv.mk.in index 45fe2eb..bbac794 100644 --- a/riscv/riscv.mk.in +++ b/riscv/riscv.mk.in @@ -3,8 +3,14 @@ get_opcode = $(shell grep ^DECLARE_INSN.*\\\<$(2)\\\> $(1) | sed 's/DECLARE_INSN riscv_subproject_deps = \ fdt \ + disasm \ + fesvr \ softfloat \ +riscv_CFLAGS = -fPIC + +riscv_install_shared_lib = yes + riscv_install_prog_srcs = \ riscv_hdrs = \ @@ -37,7 +43,31 @@ riscv_hdrs = \ csrs.h \ triggers.h \ -riscv_install_hdrs = mmio_plugin.h +riscv_install_hdrs = \ + abstract_device.h \ + cachesim.h \ + cfg.h \ + common.h \ + csrs.h \ + debug_module.h \ + debug_rom_defines.h \ + decode.h \ + devices.h \ + encoding.h \ + entropy_source.h \ + isa_parser.h \ + log_file.h \ + memtracer.h \ + mmio_plugin.h \ + mmu.h \ + p_ext_macros.h \ + platform.h \ + processor.h \ + sim.h \ + simif.h \ + trap.h \ + triggers.h \ + v_ext_macros.h \ riscv_precompiled_hdrs = \ insn_template.h \ diff --git a/softfloat/softfloat.mk.in b/softfloat/softfloat.mk.in index a20ab7e..6e7bbe9 100644 --- a/softfloat/softfloat.mk.in +++ b/softfloat/softfloat.mk.in @@ -238,4 +238,11 @@ softfloat_install_shared_lib = yes softfloat_test_srcs = -softfloat_install_prog_srcs = +softfloat_install_hdrs = \ + softfloat.h \ + softfloat_types.h \ + primitives.h \ + internals.h \ + platform.h \ + primitiveTypes.h \ + specialize.h \ diff --git a/spike_main/spike_main.mk.in b/spike_main/spike_main.mk.in index 35bef39..ec6c817 100644 --- a/spike_main/spike_main.mk.in +++ b/spike_main/spike_main.mk.in @@ -14,3 +14,5 @@ spike_main_install_prog_srcs = \ spike_main_hdrs = \ spike_main_srcs = \ + +spike_main_CFLAGS = -fPIC -- cgit v1.1 From d17b1767885000ed829eee9e985ae4a680f13377 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 23 Sep 2022 16:06:21 -0700 Subject: Actually run a program in CI To avoid long CI times, keep the program in binary form. To avoid storing binary files in this repository, store the binaries as an asset on a github release. --- ci-tests/create-ci-binary-tarball | 20 ++++++++++++++++++++ ci-tests/hello.c | 23 +++++++++++++++++++++++ ci-tests/test-spike | 8 ++++++++ 3 files changed, 51 insertions(+) create mode 100755 ci-tests/create-ci-binary-tarball create mode 100644 ci-tests/hello.c diff --git a/ci-tests/create-ci-binary-tarball b/ci-tests/create-ci-binary-tarball new file mode 100755 index 0000000..b4fa545 --- /dev/null +++ b/ci-tests/create-ci-binary-tarball @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +rm -rf build + +mkdir -p build/pk && cd "$_" +`git rev-parse --show-toplevel`/../riscv-pk/configure --host=riscv64-unknown-elf +make -j4 +cd - + +mkdir -p build/hello && cd "$_" +riscv64-unknown-elf-gcc -O2 -o hello `git rev-parse --show-toplevel`/ci-tests/hello.c +cd - + +mv build/pk/pk . +mv build/hello/hello . + +tar -cf spike-ci.tar pk hello + +rm pk hello diff --git a/ci-tests/hello.c b/ci-tests/hello.c new file mode 100644 index 0000000..cf10e80 --- /dev/null +++ b/ci-tests/hello.c @@ -0,0 +1,23 @@ +#include + +int main() +{ + // As a simple benchmark, estimate pi + int n = 16384, misses = 0; + + for (int i = 0; i < n; i++) { + + for (int j = 0; j < n; j++) { + int x = i - (n / 2); + int y = j - (n / 2); + + misses += (x * x + y * y >= (n / 2) * (n / 2)); + } + } + + double pi = 4.0 * (n * n - misses) / (n * n); + + printf("Hello, world! Pi is approximately %f.\n", pi); + + return 0; +} diff --git a/ci-tests/test-spike b/ci-tests/test-spike index 2da04ff..fa9bbdd 100755 --- a/ci-tests/test-spike +++ b/ci-tests/test-spike @@ -12,3 +12,11 @@ make install # check that help message prints without error install/bin/spike -h + + +# run a program and check for correct output +mkdir run +cd run +wget https://github.com/riscv-software-src/riscv-isa-sim/releases/download/dummy-tag-for-ci-storage/spike-ci.tar +tar xf spike-ci.tar +time ../install/bin/spike --isa=rv64gc pk hello | grep "Hello, world! Pi is approximately 3.141588." -- cgit v1.1 From 6b9c49193ae41398fbd0558af9d91e47436941a8 Mon Sep 17 00:00:00 2001 From: XiaJin-RiVAI Date: Mon, 26 Sep 2022 14:32:45 +0800 Subject: Update interactive.cc support arrow/home/end keys in debug mode support the arrow/home/end keys to move the cursor and trace back the history command in the debug mode --- riscv/interactive.cc | 205 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 189 insertions(+), 16 deletions(-) diff --git a/riscv/interactive.cc b/riscv/interactive.cc index c6a9f80..df83580 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -21,12 +21,40 @@ #include #define MAX_CMD_STR 40 // maximum possible size of a command line +#define BITS_PER_CHAR 8 #define STR_(X) #X // these definitions allow to use a macro as a string #define STR(X) STR_(X) DECLARE_TRAP(-1, interactive) +static std::vector history_commands; + +// if input an arrow/home key, there will be a 3/4-key input sequence, +// so we use an uint32_t to buffer it +typedef uint32_t keybuffer_t; + +enum KEYCODE +{ + KEYCODE_HEADER0 = 0x1b, + KEYCODE_HEADER1 = 0x1b5b, + KEYCODE_LEFT = 0x1b5b44, + KEYCODE_RIGHT = 0x1b5b43, + KEYCODE_UP = 0x1b5b41, + KEYCODE_DOWN = 0x1b5b42, + KEYCODE_HOME0 = 0x1b5b48, + KEYCODE_HOME1_0 = 0x1b5b31, + KEYCODE_HOME1_1 = 0x1b5b317e, + KEYCODE_END0 = 0x1b5b46, + KEYCODE_END1_0 = 0x1b5b34, + KEYCODE_END1_1 = 0x1b5b347e, + KEYCODE_BACKSPACE0 = 0x8, + KEYCODE_BACKSPACE1_0 = 0x1b5b33, + KEYCODE_BACKSPACE1_1 = 0x1b5b337e, + KEYCODE_BACKSPACE2 = 0x7f, + KEYCODE_ENTER = '\n', +}; + processor_t *sim_t::get_core(const std::string& i) { char *ptr; @@ -36,30 +64,177 @@ processor_t *sim_t::get_core(const std::string& i) return get_core(p); } +static void clear_str(bool noncanonical, int fd, std::string target_str) +{ + if (noncanonical) + { + std::string clear_motion; + clear_motion += '\r'; + for (unsigned i = 0; i < target_str.size(); i++) + { + clear_motion += ' '; + } + clear_motion += '\r'; + if (write(fd, clear_motion.c_str(), clear_motion.size() + 1)); // shut up gcc + } +} + +static void send_key(bool noncanonical, int fd, keybuffer_t key_code, const int len) +{ + if (noncanonical) + { + std::string key_motion; + for (int i = len - 1; i >= 0; i--) + { + key_motion += (char) ((key_code >> (i * BITS_PER_CHAR)) & 0xff); + } + if (write(fd, key_motion.c_str(), len) != len); // shut up gcc + } +} + static std::string readline(int fd) { struct termios tios; + // try to make sure the terminal is noncanonical and nonecho + if (tcgetattr(fd, &tios) == 0) + { + tios.c_lflag &= (~ICANON); + tios.c_lflag &= (~ECHO); + tcsetattr(fd, TCSANOW, &tios); + } bool noncanonical = tcgetattr(fd, &tios) == 0 && (tios.c_lflag & ICANON) == 0; - std::string s; + std::string s_head = std::string("(spike) "); + std::string s = s_head; + keybuffer_t key_buffer = 0; + // index for up/down arrow + size_t history_index = 0; + // position for left/right arrow + size_t cursor_pos = s.size(); + const size_t initial_s_len = cursor_pos; + std::cerr << s << std::flush; for (char ch; read(fd, &ch, 1) == 1; ) { - if (ch == '\x7f') + uint32_t keycode = key_buffer << BITS_PER_CHAR | ch; + switch (keycode) { - if (s.empty()) - continue; - s.erase(s.end()-1); - - if (noncanonical && write(fd, "\b \b", 3) != 3) {} + // the partial keycode, add to the key_buffer + case KEYCODE_HEADER0: + case KEYCODE_HEADER1: + case KEYCODE_HOME1_0: + case KEYCODE_END1_0: + case KEYCODE_BACKSPACE1_0: + key_buffer = keycode; + break; + // for backspace key + case KEYCODE_BACKSPACE0: + case KEYCODE_BACKSPACE1_1: + case KEYCODE_BACKSPACE2: + if (cursor_pos <= initial_s_len) + continue; + clear_str(noncanonical, fd, s); + cursor_pos--; + s.erase(cursor_pos, 1); + if (noncanonical && write(fd, s.c_str(), s.size() + 1) != 1); // shut up gcc + // move cursor by left arrow key + for (unsigned i = 0; i < s.size() - cursor_pos; i++) { + send_key(noncanonical, fd, KEYCODE_LEFT, 3); + } + key_buffer = 0; + break; + case KEYCODE_HOME0: + case KEYCODE_HOME1_1: + // move cursor by left arrow key + for (unsigned i = 0; i < cursor_pos - initial_s_len; i++) { + send_key(noncanonical, fd, KEYCODE_LEFT, 3); + } + cursor_pos = initial_s_len; + key_buffer = 0; + break; + case KEYCODE_END0: + case KEYCODE_END1_1: + // move cursor by right arrow key + for (unsigned i = 0; i < s.size() - cursor_pos; i++) { + send_key(noncanonical, fd, KEYCODE_RIGHT, 3); + } + cursor_pos = s.size(); + key_buffer = 0; + break; + case KEYCODE_UP: + // up arrow + if (history_commands.size() > 0) { + clear_str(noncanonical, fd, s); + history_index = std::min(history_commands.size(), history_index + 1); + s = history_commands[history_commands.size() - history_index]; + if (noncanonical && write(fd, s.c_str(), s.size() + 1)) + ; // shut up gcc + cursor_pos = s.size(); + } + key_buffer = 0; + break; + case KEYCODE_DOWN: + // down arrow + if (history_commands.size() > 0) { + clear_str(noncanonical, fd, s); + history_index = std::max(0, (int)history_index - 1); + if (history_index == 0) { + s = s_head; + } else { + s = history_commands[history_commands.size() - history_index]; + } + if (noncanonical && write(fd, s.c_str(), s.size() + 1)) + ; // shut up gcc + cursor_pos = s.size(); + } + key_buffer = 0; + break; + case KEYCODE_LEFT: + if (s.size() > initial_s_len) { + cursor_pos = cursor_pos - 1; + if ((int)cursor_pos < (int)initial_s_len) { + cursor_pos = initial_s_len; + } else { + send_key(noncanonical, fd, KEYCODE_LEFT, 3); + } + } + key_buffer = 0; + break; + case KEYCODE_RIGHT: + if (s.size() > initial_s_len) { + cursor_pos = cursor_pos + 1; + if (cursor_pos > s.size()) { + cursor_pos = s.size(); + } else { + send_key(noncanonical, fd, KEYCODE_RIGHT, 3); + } + } + key_buffer = 0; + break; + case KEYCODE_ENTER: + if (noncanonical && write(fd, &ch, 1) != 1); // shut up gcc + if (s.size() > initial_s_len && (history_commands.size() == 0 || s != history_commands[history_commands.size() - 1])) { + history_commands.push_back(s); + } + return s.substr(initial_s_len); + default: + DEFAULT_KEY: + // unknown buffered key, do nothing + if (key_buffer != 0) { + key_buffer = 0; + break; + } + clear_str(noncanonical, fd, s); + s.insert(cursor_pos, 1, ch); + cursor_pos++; + if (noncanonical && write(fd, s.c_str(), s.size() + 1) != 1); // shut up gcc + // send left arrow key to move cursor + for (unsigned i = 0; i < s.size() - cursor_pos; i++) { + send_key(noncanonical, fd, KEYCODE_LEFT, 3); + } + break; } - else if (noncanonical && write(fd, &ch, 1) != 1) {} - - if (ch == '\n') - break; - if (ch != '\x7f') - s += ch; } - return s; + return s.substr(initial_s_len); } #ifdef HAVE_BOOST_ASIO @@ -90,7 +265,6 @@ std::string sim_t::rin(boost::asio::streambuf *bout_ptr) { // output goes to socket sout_.rdbuf(bout_ptr); } else { // if we are not listening on a socket, get commands from terminal - std::cerr << ": " << std::flush; s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin // output goes to stderr sout_.rdbuf(std::cerr.rdbuf()); @@ -159,7 +333,6 @@ void sim_t::interactive() #ifdef HAVE_BOOST_ASIO s = rin(&bout); // get command string from socket or terminal #else - std::cerr << ": " << std::flush; s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin #endif } -- cgit v1.1 From 55c66198fd6bc8f1b16e67a33d2865ca3e7269cb Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 28 Sep 2022 01:06:53 -0700 Subject: Fix vmv.x.s for RV32 The Spike internals require that, when XLEN is narrower than reg_t, values be sign-extended to the width of reg_t. --- riscv/insns/vmv_x_s.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/riscv/insns/vmv_x_s.h b/riscv/insns/vmv_x_s.h index 8451d6a..16153eb 100644 --- a/riscv/insns/vmv_x_s.h +++ b/riscv/insns/vmv_x_s.h @@ -1,27 +1,28 @@ // vmv_x_s: rd = vs2[0] require_vector(true); require(insn.v_vm() == 1); -uint64_t xmask = UINT64_MAX >> (64 - P.get_isa().get_max_xlen()); reg_t rs1 = RS1; reg_t sew = P.VU.vsew; reg_t rs2_num = insn.rs2(); +reg_t res; switch (sew) { case e8: - WRITE_RD(P.VU.elt(rs2_num, 0)); + res = P.VU.elt(rs2_num, 0); break; case e16: - WRITE_RD(P.VU.elt(rs2_num, 0)); + res = P.VU.elt(rs2_num, 0); break; case e32: - WRITE_RD(P.VU.elt(rs2_num, 0)); + res = P.VU.elt(rs2_num, 0); break; case e64: - if (P.get_isa().get_max_xlen() <= sew) - WRITE_RD(P.VU.elt(rs2_num, 0) & xmask); - else - WRITE_RD(P.VU.elt(rs2_num, 0)); + res = P.VU.elt(rs2_num, 0); break; +default: + abort(); } +WRITE_RD(sext_xlen(res)); + P.VU.vstart->write(0); -- cgit v1.1 From 8af5c39de1d929a0858127073002c2c7d165f15b Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 28 Sep 2022 01:47:24 -0700 Subject: Fix build using Xcode 14 Don't include .a files in other .a files. Doing so trips a sanity check in the Xcode 14 toolchain, because the included .a files are not themselves mach-o format, even though their contents are. --- Makefile.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.in b/Makefile.in index 41bc5e1..afa4203 100644 --- a/Makefile.in +++ b/Makefile.in @@ -241,7 +241,7 @@ $(2)_lib_libnames := $$(patsubst %, lib%.a, $$($(2)_lib_libs)) $(2)_lib_libarg := $$(patsubst %, -l%, $$($(2)_lib_libs)) $(2)_lib_libnames_shared := $$(if $$($(2)_install_shared_lib),lib$(1).so,) -lib$(1).a : $$($(2)_objs) $$($(2)_c_objs) $$($(2)_lib_libnames) +lib$(1).a : $$($(2)_objs) $$($(2)_c_objs) $(AR) rcs $$@ $$^ lib$(1).so : $$($(2)_objs) $$($(2)_c_objs) $$($(2)_lib_libnames_shared) $$($(2)_lib_libnames) $(LINK) -shared -o $$@ $(if $(filter Darwin,$(shell uname -s)),-install_name $(install_libs_dir)/$$@) $$^ $$($(2)_lib_libnames) $(LIBS) -- cgit v1.1 From b724db52f9d7be3e3068e5bf01ac939ece8d032b Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 11:48:44 +0800 Subject: Add has_data argument to trigger checking functions The mcontrol trigger can select either address or data for checking. The The selection decides the priority of the trigger. For instance, the address trigger has a higher priority over the page fault, and the page fault has a higher priority over the data trigger. The previous implementation only has the checking functions for data trigger, which results in incorrect priority of address trigger. This commit adds a has_data argument to indicate address trigger and the priority of the trigger. --- riscv/mmu.cc | 4 ++-- riscv/mmu.h | 10 +++++----- riscv/triggers.cc | 8 +++++--- riscv/triggers.h | 6 +++--- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index d0a55f0..0dbb94e 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -155,7 +155,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate if (!matched_trigger) { reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data); + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data); if (matched_trigger) throw *matched_trigger; } @@ -167,7 +167,7 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ if (!matched_trigger) { reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, data); + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); if (matched_trigger) throw *matched_trigger; } diff --git a/riscv/mmu.h b/riscv/mmu.h index ca8b792..ebe4f9b 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -113,7 +113,7 @@ public: if ((xlate_flags) == 0 && unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ type##_t data = from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data); \ + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data); \ if (matched_trigger) \ throw *matched_trigger; \ } \ @@ -178,7 +178,7 @@ public: else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ if (actually_store) { \ if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, val); \ + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \ if (matched_trigger) \ throw *matched_trigger; \ } \ @@ -479,7 +479,7 @@ private: } target_endian* ptr = (target_endian*)(result.host_offset + addr); triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, from_target(*ptr)); + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, true, from_target(*ptr)); if (match != triggers::MATCH_NONE) { throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); } @@ -491,13 +491,13 @@ private: } inline triggers::matched_t *trigger_exception(triggers::operation_t operation, - reg_t address, reg_t data) + reg_t address, bool has_data, reg_t data=0) { if (!proc) { return NULL; } triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, operation, address, data); + auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data); if (match == triggers::MATCH_NONE) return NULL; if (match == triggers::MATCH_FIRE_BEFORE) { diff --git a/riscv/triggers.cc b/riscv/triggers.cc index 4b73715..e357b4a 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -101,7 +101,7 @@ bool mcontrol_t::simple_match(unsigned xlen, reg_t value) const { assert(0); } -match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operation_t operation, reg_t address, reg_t data) { +match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operation_t operation, reg_t address, bool has_data, reg_t data) { state_t * const state = proc->get_state(); if ((operation == triggers::OPERATION_EXECUTE && !execute_bit) || (operation == triggers::OPERATION_STORE && !store_bit) || @@ -115,6 +115,8 @@ match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operati reg_t value; if (select) { value = data; + if (!has_data) + return MATCH_NONE; } else { value = address; } @@ -150,7 +152,7 @@ module_t::~module_t() { } } -match_result_t module_t::memory_access_match(action_t * const action, operation_t operation, reg_t address, reg_t data) +match_result_t module_t::memory_access_match(action_t * const action, operation_t operation, reg_t address, bool has_data, reg_t data) { state_t * const state = proc->get_state(); if (state->debug_mode) @@ -170,7 +172,7 @@ match_result_t module_t::memory_access_match(action_t * const action, operation_ * entire chain did not match. This is allowed by the spec, because the final * trigger in the chain will never get `hit` set unless the entire chain * matches. */ - match_result_t result = triggers[i]->memory_access_match(proc, operation, address, data); + match_result_t result = triggers[i]->memory_access_match(proc, operation, address, has_data, data); if (result != MATCH_NONE && !triggers[i]->chain()) { *action = triggers[i]->action; return result; diff --git a/riscv/triggers.h b/riscv/triggers.h index 8f1b81d..b7512ef 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -43,7 +43,7 @@ class matched_t class trigger_t { public: virtual match_result_t memory_access_match(processor_t * const proc, - operation_t operation, reg_t address, reg_t data) = 0; + operation_t operation, reg_t address, bool has_data, reg_t data=0) = 0; virtual reg_t tdata1_read(const processor_t * const proc) const noexcept = 0; virtual bool tdata1_write(processor_t * const proc, const reg_t val) noexcept = 0; @@ -88,7 +88,7 @@ public: virtual bool load() const override { return load_bit; } virtual match_result_t memory_access_match(processor_t * const proc, - operation_t operation, reg_t address, reg_t data) override; + operation_t operation, reg_t address, bool has_data, reg_t data=0) override; private: bool simple_match(unsigned xlen, reg_t value) const; @@ -115,7 +115,7 @@ public: unsigned count() const { return triggers.size(); } match_result_t memory_access_match(action_t * const action, - operation_t operation, reg_t address, reg_t data); + operation_t operation, reg_t address, bool has_data, reg_t data=0); reg_t tdata1_read(const processor_t * const proc, unsigned index) const noexcept; bool tdata1_write(processor_t * const proc, unsigned index, const reg_t val) noexcept; -- cgit v1.1 From a5752cebafa75b8dc539ee327bac8630e3fb84f1 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 11:55:57 +0800 Subject: Fix priority of mcontrol trigger execute address before The spec defines the mcontrol execute address has a higher priority over page fault (Debug spec, Table 5.2). Thus, the trigger checking should be before the translation. The previous implementation checks the trigger after the translation, resulting in incorrect priority. For instance, when page fault and trigger occur on the same instruction, the previous implementation will choose to raise the page fault, which contradicts the priority requirement. This commit adds an address-only trigger checking before the translation. The trigger will fire on the instruction instead of the page fault in the above case. --- riscv/mmu.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index ebe4f9b..d076aeb 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -471,6 +471,11 @@ private: reg_t vpn = addr >> PGSHIFT; if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn)) return tlb_data[vpn % TLB_ENTRIES]; + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, false); + if (match != triggers::MATCH_NONE) { + throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, 0, action); + } tlb_entry_t result; if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { result = fetch_slow_path(addr); @@ -478,8 +483,7 @@ private: result = tlb_data[vpn % TLB_ENTRIES]; } target_endian* ptr = (target_endian*)(result.host_offset + addr); - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, true, from_target(*ptr)); + match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, true, from_target(*ptr)); if (match != triggers::MATCH_NONE) { throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); } -- cgit v1.1 From 99cb603973b3f9575b411b80934035f3ee7fbcf3 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 12:01:09 +0800 Subject: Fix priority of mcontrol trigger load address before The spec defines the mcontrol load address has a higher priority over page fault and address misaligned (Debug spec, Table 5.2). Thus, the trigger checking should be before the translation and alignment checking. The previous implementation checks the trigger after the translation and alignment, resulting in incorrect priority. For instance, when page fault and trigger occur on the same instruction, the previous implementation will choose to raise the page fault, which contradicts the priority requirement. This commit adds an address-only trigger checking before the misaligned checking and translation. The trigger will fire on the instruction instead of the page fault in the above case. --- riscv/mmu.cc | 7 +++++++ riscv/mmu.h | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 0dbb94e..3048bde 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -141,6 +141,13 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) { + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); + if (matched_trigger) + throw *matched_trigger; + } + reg_t paddr = translate(addr, len, LOAD, xlate_flags); if (auto host_addr = sim->addr_to_mem(paddr)) { diff --git a/riscv/mmu.h b/riscv/mmu.h index d076aeb..3a06c1e 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -101,6 +101,11 @@ public: #define load_func(type, prefix, xlate_flags) \ type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (!matched_trigger) { \ + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); \ + if (matched_trigger) \ + throw *matched_trigger; \ + } \ if (require_alignment) load_reserved_address_misaligned(addr); \ else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \ } \ -- cgit v1.1 From d52858f3a80d3f87bfedffbff1fec34245a63082 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 12:05:40 +0800 Subject: Fix priority of mcontrol trigger store address/data before The spec defines that the mcontrol store address/data has a higher priority over page fault and address misalignment (Debug spec, Table 5.2). Thus, the trigger checking should be before the translation and alignment checking. The previous implementation checks the trigger after the translation and alignment, resulting in incorrect priority. For instance, when page fault and trigger occur on the same instruction, the previous implementation will choose to raise the page fault, which contradicts the priority requirement. This commit moves the trigger checking before the misaligned checking and translation. The trigger will fire on the instruction instead of the page fault in the above case. --- riscv/mmu.cc | 4 ++-- riscv/mmu.h | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 3048bde..ede5722 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -170,8 +170,6 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) { - reg_t paddr = translate(addr, len, STORE, xlate_flags); - if (!matched_trigger) { reg_t data = reg_from_bytes(len, bytes); matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); @@ -179,6 +177,8 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ throw *matched_trigger; } + reg_t paddr = translate(addr, len, STORE, xlate_flags); + if (actually_store) { if (auto host_addr = sim->addr_to_mem(paddr)) { memcpy(host_addr, bytes, len); diff --git a/riscv/mmu.h b/riscv/mmu.h index 3a06c1e..40a435f 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -169,6 +169,13 @@ public: #define store_func(type, prefix, xlate_flags) \ void ALWAYS_INLINE prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (actually_store) { \ + if (!matched_trigger) { \ + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \ + if (matched_trigger) \ + throw *matched_trigger; \ + } \ + } \ if (require_alignment) store_conditional_address_misaligned(addr); \ else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ } \ -- cgit v1.1 From ab104011ba66fb3c88ce8405236908dc8700c679 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 12:08:38 +0800 Subject: Check trigger only with actually_store The data value of the function store_slow_path() is meaningful only when the actually_store=true. Otherwise, the data value is a hollow value of 0, which may result in unintended trigger matching. --- riscv/mmu.cc | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index ede5722..9935ed2 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -170,11 +170,13 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) { - if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); - if (matched_trigger) - throw *matched_trigger; + if (actually_store) { + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); + if (matched_trigger) + throw *matched_trigger; + } } reg_t paddr = translate(addr, len, STORE, xlate_flags); -- cgit v1.1 From 0bc176b3fca43560b9e8586cdbc41cfde073e17a Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 3 Oct 2022 15:54:06 -0700 Subject: Fix newly introduced Clang warnings --- riscv/interactive.cc | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/riscv/interactive.cc b/riscv/interactive.cc index df83580..21cc31c 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -75,7 +75,8 @@ static void clear_str(bool noncanonical, int fd, std::string target_str) clear_motion += ' '; } clear_motion += '\r'; - if (write(fd, clear_motion.c_str(), clear_motion.size() + 1)); // shut up gcc + if (write(fd, clear_motion.c_str(), clear_motion.size() + 1)) + ; // shut up gcc } } @@ -88,7 +89,8 @@ static void send_key(bool noncanonical, int fd, keybuffer_t key_code, const int { key_motion += (char) ((key_code >> (i * BITS_PER_CHAR)) & 0xff); } - if (write(fd, key_motion.c_str(), len) != len); // shut up gcc + if (write(fd, key_motion.c_str(), len) != len) + ; // shut up gcc } } @@ -135,7 +137,8 @@ static std::string readline(int fd) clear_str(noncanonical, fd, s); cursor_pos--; s.erase(cursor_pos, 1); - if (noncanonical && write(fd, s.c_str(), s.size() + 1) != 1); // shut up gcc + if (noncanonical && write(fd, s.c_str(), s.size() + 1) != 1) + ; // shut up gcc // move cursor by left arrow key for (unsigned i = 0; i < s.size() - cursor_pos; i++) { send_key(noncanonical, fd, KEYCODE_LEFT, 3); @@ -167,7 +170,7 @@ static std::string readline(int fd) history_index = std::min(history_commands.size(), history_index + 1); s = history_commands[history_commands.size() - history_index]; if (noncanonical && write(fd, s.c_str(), s.size() + 1)) - ; // shut up gcc + ; // shut up gcc cursor_pos = s.size(); } key_buffer = 0; @@ -183,7 +186,7 @@ static std::string readline(int fd) s = history_commands[history_commands.size() - history_index]; } if (noncanonical && write(fd, s.c_str(), s.size() + 1)) - ; // shut up gcc + ; // shut up gcc cursor_pos = s.size(); } key_buffer = 0; @@ -211,7 +214,8 @@ static std::string readline(int fd) key_buffer = 0; break; case KEYCODE_ENTER: - if (noncanonical && write(fd, &ch, 1) != 1); // shut up gcc + if (noncanonical && write(fd, &ch, 1) != 1) + ; // shut up gcc if (s.size() > initial_s_len && (history_commands.size() == 0 || s != history_commands[history_commands.size() - 1])) { history_commands.push_back(s); } @@ -226,7 +230,8 @@ static std::string readline(int fd) clear_str(noncanonical, fd, s); s.insert(cursor_pos, 1, ch); cursor_pos++; - if (noncanonical && write(fd, s.c_str(), s.size() + 1) != 1); // shut up gcc + if (noncanonical && write(fd, s.c_str(), s.size() + 1) != 1) + ; // shut up gcc // send left arrow key to move cursor for (unsigned i = 0; i < s.size() - cursor_pos; i++) { send_key(noncanonical, fd, KEYCODE_LEFT, 3); -- cgit v1.1 From 3c9cfac9b1f7f1ddf07816f948c15f88aac27dcf Mon Sep 17 00:00:00 2001 From: YenHaoChen <39526191+YenHaoChen@users.noreply.github.com> Date: Wed, 5 Oct 2022 06:22:15 +0800 Subject: Fix trigger mcontrol.chain match issue #599 #627 (#1083) The variable chain_ok is used to indicate if the current trigger is suppressed by the trigger chain. A true value means the trigger is either un-chained or matches all previous triggers in the chain, and a false value means the trigger is chained and mismatches previous triggers. A false condition of variable chain_ok is missing. The false condition should be mcontrol.chain=1 and not matching; otherwise, the chain_ok=true (including initialization). The bug results in issues #559 and #627. Related issues: - https://github.com/riscv-software-src/riscv-isa-sim/issues/599 - https://github.com/riscv-software-src/riscv-isa-sim/issues/627 This PR fixes the issues #559 and #627. --- riscv/triggers.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/triggers.cc b/riscv/triggers.cc index 4b73715..ef05551 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -176,7 +176,7 @@ match_result_t module_t::memory_access_match(action_t * const action, operation_ return result; } - chain_ok = true; + chain_ok = result != MATCH_NONE || !triggers[i]->chain(); } return MATCH_NONE; } -- cgit v1.1 From 3e166310b4fab72cbbf73e34de6a0b66ba6155ed Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 14:55:00 -0700 Subject: Rewrite require macro so it can be used as an expression --- riscv/insn_macros.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/insn_macros.h b/riscv/insn_macros.h index 2fdfced..8ec8f76 100644 --- a/riscv/insn_macros.h +++ b/riscv/insn_macros.h @@ -4,6 +4,6 @@ // These conflict with Boost headers so can't be included from insn_template.h #define P (*p) -#define require(x) do { if (unlikely(!(x))) throw trap_illegal_instruction(insn.bits()); } while (0) +#define require(x) (unlikely(!(x)) ? throw trap_illegal_instruction(insn.bits()) : (void) 0) #endif -- cgit v1.1 From 3447aecd16e8ce34109a122d3f95b5e69176505f Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 14:55:50 -0700 Subject: Rewrite READ_REG macro to avoid GNU statement expression extension This way, it can be used as an expression within a template argument. --- riscv/decode.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/decode.h b/riscv/decode.h index 5fb0358..1b55502 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -177,7 +177,7 @@ private: #define STATE (*p->get_state()) #define FLEN (p->get_flen()) #define CHECK_REG(reg) ((void) 0) -#define READ_REG(reg) ({ CHECK_REG(reg); STATE.XPR[reg]; }) +#define READ_REG(reg) (CHECK_REG(reg), STATE.XPR[reg]) #define READ_FREG(reg) STATE.FPR[reg] #define RD READ_REG(insn.rd()) #define RS1 READ_REG(insn.rs1()) -- cgit v1.1 From dc9a8e28eebefe693e0cd6baf60b933d7a973a3b Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 14:57:10 -0700 Subject: Fix ignored-qualifiers warnings in get_field/set_field macros --- riscv/decode.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/riscv/decode.h b/riscv/decode.h index 1b55502..2bf9ddf 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -19,6 +19,7 @@ #include "p_ext_macros.h" #include "v_ext_macros.h" #include +#include typedef int64_t sreg_t; typedef uint64_t reg_t; @@ -282,8 +283,11 @@ do { \ if (rm > 4) throw trap_illegal_instruction(insn.bits()); \ rm; }) -#define get_field(reg, mask) (((reg) & (decltype(reg))(mask)) / ((mask) & ~((mask) << 1))) -#define set_field(reg, mask, val) (((reg) & ~(decltype(reg))(mask)) | (((decltype(reg))(val) * ((mask) & ~((mask) << 1))) & (decltype(reg))(mask))) +#define get_field(reg, mask) \ + (((reg) & (std::remove_cv::type)(mask)) / ((mask) & ~((mask) << 1))) + +#define set_field(reg, mask, val) \ + (((reg) & ~(std::remove_cv::type)(mask)) | (((std::remove_cv::type)(val) * ((mask) & ~((mask) << 1))) & (std::remove_cv::type)(mask))) #define require_privilege(p) require(STATE.prv >= (p)) #define require_novirt() if (unlikely(STATE.v)) throw trap_virtual_instruction(insn.bits()) -- cgit v1.1 From 8f511653940cb2b9edd9a18ec30a51422b34a573 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 14:57:42 -0700 Subject: Fix remaining ignored-qualifiers warning --- riscv/csrs.cc | 2 +- riscv/csrs.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/riscv/csrs.cc b/riscv/csrs.cc index 858f12d..c8f8c8a 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -615,7 +615,7 @@ misa_csr_t::misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t ma ) { } -const reg_t misa_csr_t::dependency(const reg_t val, const char feature, const char depends_on) const noexcept { +reg_t misa_csr_t::dependency(const reg_t val, const char feature, const char depends_on) const noexcept { return (val & (1L << (depends_on - 'A'))) ? val : (val & ~(1L << (feature - 'A'))); } diff --git a/riscv/csrs.h b/riscv/csrs.h index 5dc6da3..1cc2d74 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -326,7 +326,7 @@ class misa_csr_t final: public basic_csr_t { private: const reg_t max_isa; const reg_t write_mask; - const reg_t dependency(const reg_t val, const char feature, const char depends_on) const noexcept; + reg_t dependency(const reg_t val, const char feature, const char depends_on) const noexcept; }; typedef std::shared_ptr misa_csr_t_p; -- cgit v1.1 From da93bdc435b985fd354e01c26470f64c33cecaa6 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 15:31:08 -0700 Subject: Delete functions that are actually unused --- disasm/disasm.cc | 6 ------ riscv/execute.cc | 4 ---- 2 files changed, 10 deletions(-) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index 8a03677..7bb3ec0 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -568,11 +568,6 @@ static void NOINLINE add_pitype5_insn(disassembler_t* d, const char* name, uint3 d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm5})); } -static void NOINLINE add_pitype6_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) -{ - d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm6})); -} - static void NOINLINE add_vector_v_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) { d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, opt, &vm})); @@ -1684,7 +1679,6 @@ void disassembler_t::add_instructions(const isa_parser_t* isa) #define DEFINE_PI3TYPE(code) add_pitype3_insn(this, #code, match_##code, mask_##code); #define DEFINE_PI4TYPE(code) add_pitype4_insn(this, #code, match_##code, mask_##code); #define DEFINE_PI5TYPE(code) add_pitype5_insn(this, #code, match_##code, mask_##code); -#define DEFINE_PI6TYPE(code) add_pitype6_insn(this, #code, match_##code, mask_##code); #define DISASM_8_AND_16_RINSN(code) \ DEFINE_RTYPE(code##8); \ diff --git a/riscv/execute.cc b/riscv/execute.cc index 1f9ddef..36621ca 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -154,10 +154,6 @@ static void commit_log_print_insn(processor_t *p, reg_t pc, insn_t insn) } fprintf(log_file, "\n"); } -#else -static void commit_log_reset(processor_t* p) {} -static void commit_log_stash_privilege(processor_t* p) {} -static void commit_log_print_insn(processor_t* p, reg_t pc, insn_t insn) {} #endif inline void processor_t::update_histogram(reg_t pc) -- cgit v1.1 From ac89fe6ce3150dc651238991235b7f2e8ac93aec Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 15:31:42 -0700 Subject: Fix unused-function warning on sometimes-used function cto --- riscv/processor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/processor.h b/riscv/processor.h index 7fc4be7..441e522 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -220,7 +220,7 @@ struct state_t }; // Count number of contiguous 1 bits starting from the LSB. -static int cto(reg_t val) +static inline int cto(reg_t val) { int res = 0; while ((val & 1) == 1) -- cgit v1.1 From f8752d8feea04303d5f3bdeabce8864b3b2430ed Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 17:29:50 -0700 Subject: Suppress several unused-parameter warnings in fesvr --- fesvr/device.cc | 2 +- fesvr/htif.h | 2 +- fesvr/htif_hexwriter.h | 2 +- fesvr/memif.h | 3 ++- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/fesvr/device.cc b/fesvr/device.cc index b149f3c..cbfdb50 100644 --- a/fesvr/device.cc +++ b/fesvr/device.cc @@ -33,7 +33,7 @@ void device_t::handle_command(command_t cmd) command_handlers[cmd.cmd()](cmd); } -void device_t::handle_null_command(command_t cmd) +void device_t::handle_null_command(command_t) { } diff --git a/fesvr/htif.h b/fesvr/htif.h index cf8b2d5..5767e31 100644 --- a/fesvr/htif.h +++ b/fesvr/htif.h @@ -65,7 +65,7 @@ class htif_t : public chunked_memif_t // indicates that the initial program load can skip writing this address // range to memory, because it has already been loaded through a sideband - virtual bool is_address_preloaded(addr_t taddr, size_t len) { return false; } + virtual bool is_address_preloaded(addr_t, size_t) { return false; } // Given an address, return symbol from addr2symbol map const char* get_symbol(uint64_t addr); diff --git a/fesvr/htif_hexwriter.h b/fesvr/htif_hexwriter.h index 7256166..0cd859b 100644 --- a/fesvr/htif_hexwriter.h +++ b/fesvr/htif_hexwriter.h @@ -21,7 +21,7 @@ protected: void read_chunk(addr_t taddr, size_t len, void* dst); void write_chunk(addr_t taddr, size_t len, const void* src); - void clear_chunk(addr_t taddr, size_t len) {} + void clear_chunk(addr_t, size_t) {} size_t chunk_max_size() { return width; } size_t chunk_align() { return width; } diff --git a/fesvr/memif.h b/fesvr/memif.h index 3f9bd2d..9eebed4 100644 --- a/fesvr/memif.h +++ b/fesvr/memif.h @@ -28,7 +28,8 @@ public: virtual size_t chunk_align() = 0; virtual size_t chunk_max_size() = 0; - virtual void set_target_endianness(memif_endianness_t endianness) {} + virtual void set_target_endianness(memif_endianness_t) {} + virtual memif_endianness_t get_target_endianness() const { return memif_endianness_undecided; } -- cgit v1.1 From 36009d07e8545dd43349c309f052fbb9b2ccce8c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 17:30:09 -0700 Subject: Delete unused parameter in rfb_t::fb_update --- fesvr/rfb.cc | 4 ++-- fesvr/rfb.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fesvr/rfb.cc b/fesvr/rfb.cc index 2594a1b..bd72fda 100644 --- a/fesvr/rfb.cc +++ b/fesvr/rfb.cc @@ -119,7 +119,7 @@ void rfb_t::set_pixel_format(const std::string& s) throw std::runtime_error("bad pixel format"); } -void rfb_t::fb_update(const std::string& s) +void rfb_t::fb_update() { std::string u; u += str(uint8_t(0)); @@ -153,7 +153,7 @@ void rfb_t::tick() std::swap(fb1, fb2); if (pthread_mutex_trylock(&lock) == 0) { - fb_update(""); + fb_update(); pthread_mutex_unlock(&lock); } } diff --git a/fesvr/rfb.h b/fesvr/rfb.h index 263663a..e153bc8 100644 --- a/fesvr/rfb.h +++ b/fesvr/rfb.h @@ -25,7 +25,7 @@ class rfb_t : public device_t void thread_main(); friend void* rfb_thread_main(void*); std::string pixel_format(); - void fb_update(const std::string& s); + void fb_update(); void set_encodings(const std::string& s); void set_pixel_format(const std::string& s); void write(const std::string& s); -- cgit v1.1 From ae747db810ac59da81d6c5f055c04ed2a882aded Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 17:31:47 -0700 Subject: Suppress unused-paramter warnings in softfloat --- softfloat/specialize.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/softfloat/specialize.h b/softfloat/specialize.h index 556476c..d8a1985 100644 --- a/softfloat/specialize.h +++ b/softfloat/specialize.h @@ -238,6 +238,9 @@ INLINE struct uint128 softfloat_commonNaNToExtF80UI( const struct commonNaN *aPtr ) { struct uint128 uiZ; + + (void) aPtr; + uiZ.v64 = defaultNaNExtF80UI64; uiZ.v0 = defaultNaNExtF80UI0; return uiZ; @@ -295,6 +298,9 @@ INLINE struct uint128 softfloat_commonNaNToF128UI( const struct commonNaN *aPtr ) { struct uint128 uiZ; + + (void) aPtr; + uiZ.v64 = defaultNaNF128UI64; uiZ.v0 = defaultNaNF128UI0; return uiZ; -- cgit v1.1 From fe6335701815b302de391aac7177091c2ca3eb2e Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 17:32:15 -0700 Subject: Add UNUSED macro for suppressing unused-parameter/variable warnings --- riscv/common.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/riscv/common.h b/riscv/common.h index 7d37001..a354ced 100644 --- a/riscv/common.h +++ b/riscv/common.h @@ -9,12 +9,14 @@ # define NOINLINE __attribute__ ((noinline)) # define NORETURN __attribute__ ((noreturn)) # define ALWAYS_INLINE __attribute__ ((always_inline)) +# define UNUSED __attribute__ ((unused)) #else # define likely(x) (x) # define unlikely(x) (x) # define NOINLINE # define NORETURN # define ALWAYS_INLINE +# define UNUSED #endif #endif -- cgit v1.1 From d4747aaab21ac1df31dbc40fddc2b0ae9f0221a5 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Fri, 23 Sep 2022 15:20:16 -0700 Subject: Suppress unused-parameter warnings in spike main --- spike_main/spike.cc | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/spike_main/spike.cc b/spike_main/spike.cc index 70b4313..571c56d 100644 --- a/spike_main/spike.cc +++ b/spike_main/spike.cc @@ -328,17 +328,17 @@ int main(int argc, char** argv) option_parser_t parser; parser.help(&suggest_help); - parser.option('h', "help", 0, [&](const char* s){help(0);}); - parser.option('d', 0, 0, [&](const char* s){debug = true;}); - parser.option('g', 0, 0, [&](const char* s){histogram = true;}); - parser.option('l', 0, 0, [&](const char* s){log = true;}); + parser.option('h', "help", 0, [&](const char UNUSED *s){help(0);}); + parser.option('d', 0, 0, [&](const char UNUSED *s){debug = true;}); + parser.option('g', 0, 0, [&](const char UNUSED *s){histogram = true;}); + parser.option('l', 0, 0, [&](const char UNUSED *s){log = true;}); #ifdef HAVE_BOOST_ASIO - parser.option('s', 0, 0, [&](const char* s){socket = true;}); + parser.option('s', 0, 0, [&](const char UNUSED *s){socket = true;}); #endif parser.option('p', 0, 1, [&](const char* s){nprocs = atoul_nonzero_safe(s);}); parser.option('m', 0, 1, [&](const char* s){cfg.mem_layout = parse_mem_layout(s);}); // I wanted to use --halted, but for some reason that doesn't work. - parser.option('H', 0, 0, [&](const char* s){halted = true;}); + parser.option('H', 0, 0, [&](const char UNUSED *s){halted = true;}); parser.option(0, "rbb-port", 1, [&](const char* s){use_rbb = true; rbb_port = atoul_safe(s);}); parser.option(0, "pc", 1, [&](const char* s){cfg.start_pc = strtoull(s, 0, 0);}); parser.option(0, "hartids", 1, [&](const char* s){ @@ -348,19 +348,19 @@ int main(int argc, char** argv) parser.option(0, "ic", 1, [&](const char* s){ic.reset(new icache_sim_t(s));}); parser.option(0, "dc", 1, [&](const char* s){dc.reset(new dcache_sim_t(s));}); parser.option(0, "l2", 1, [&](const char* s){l2.reset(cache_sim_t::construct(s, "L2$"));}); - parser.option(0, "log-cache-miss", 0, [&](const char* s){log_cache = true;}); + parser.option(0, "log-cache-miss", 0, [&](const char UNUSED *s){log_cache = true;}); parser.option(0, "isa", 1, [&](const char* s){cfg.isa = s;}); parser.option(0, "priv", 1, [&](const char* s){cfg.priv = s;}); parser.option(0, "varch", 1, [&](const char* s){cfg.varch = s;}); parser.option(0, "device", 1, device_parser); parser.option(0, "extension", 1, [&](const char* s){extensions.push_back(find_extension(s));}); - parser.option(0, "dump-dts", 0, [&](const char *s){dump_dts = true;}); - parser.option(0, "disable-dtb", 0, [&](const char *s){dtb_enabled = false;}); + parser.option(0, "dump-dts", 0, [&](const char UNUSED *s){dump_dts = true;}); + parser.option(0, "disable-dtb", 0, [&](const char UNUSED *s){dtb_enabled = false;}); parser.option(0, "dtb", 1, [&](const char *s){dtb_file = s;}); parser.option(0, "kernel", 1, [&](const char* s){kernel = s;}); parser.option(0, "initrd", 1, [&](const char* s){initrd = s;}); parser.option(0, "bootargs", 1, [&](const char* s){cfg.bootargs = s;}); - parser.option(0, "real-time-clint", 0, [&](const char *s){cfg.real_time_clint = true;}); + parser.option(0, "real-time-clint", 0, [&](const char UNUSED *s){cfg.real_time_clint = true;}); parser.option(0, "extlib", 1, [&](const char *s){ void *lib = dlopen(s, RTLD_NOW | RTLD_GLOBAL); if (lib == NULL) { @@ -371,23 +371,23 @@ int main(int argc, char** argv) parser.option(0, "dm-progsize", 1, [&](const char* s){dm_config.progbufsize = atoul_safe(s);}); parser.option(0, "dm-no-impebreak", 0, - [&](const char* s){dm_config.support_impebreak = false;}); + [&](const char UNUSED *s){dm_config.support_impebreak = false;}); parser.option(0, "dm-sba", 1, [&](const char* s){dm_config.max_sba_data_width = atoul_safe(s);}); parser.option(0, "dm-auth", 0, - [&](const char* s){dm_config.require_authentication = true;}); + [&](const char UNUSED *s){dm_config.require_authentication = true;}); parser.option(0, "dmi-rti", 1, [&](const char* s){dmi_rti = atoul_safe(s);}); parser.option(0, "dm-abstract-rti", 1, [&](const char* s){dm_config.abstract_rti = atoul_safe(s);}); parser.option(0, "dm-no-hasel", 0, - [&](const char* s){dm_config.support_hasel = false;}); + [&](const char UNUSED *s){dm_config.support_hasel = false;}); parser.option(0, "dm-no-abstract-csr", 0, - [&](const char* s){dm_config.support_abstract_csr_access = false;}); + [&](const char UNUSED *s){dm_config.support_abstract_csr_access = false;}); parser.option(0, "dm-no-halt-groups", 0, - [&](const char* s){dm_config.support_haltgroups = false;}); + [&](const char UNUSED *s){dm_config.support_haltgroups = false;}); parser.option(0, "log-commits", 0, - [&](const char* s){log_commits = true;}); + [&](const char UNUSED *s){log_commits = true;}); parser.option(0, "log", 1, [&](const char* s){log_path = s;}); FILE *cmd_file = NULL; -- cgit v1.1 From a51e44ed228e48fc1dbf24ec7dc23cbd61a7874a Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 17:28:53 -0700 Subject: Fix or work around other unused-parameter warnings in ancillary programs --- spike_dasm/spike-dasm.cc | 2 +- spike_main/spike-log-parser.cc | 2 +- spike_main/xspike.cc | 9 +++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/spike_dasm/spike-dasm.cc b/spike_dasm/spike-dasm.cc index 8c85ef1..62e7212 100644 --- a/spike_dasm/spike-dasm.cc +++ b/spike_dasm/spike-dasm.cc @@ -14,7 +14,7 @@ #include using namespace std; -int main(int argc, char** argv) +int main(int UNUSED argc, char** argv) { string s; const char* isa = DEFAULT_ISA; diff --git a/spike_main/spike-log-parser.cc b/spike_main/spike-log-parser.cc index fab00f0..10a3209 100644 --- a/spike_main/spike-log-parser.cc +++ b/spike_main/spike-log-parser.cc @@ -16,7 +16,7 @@ using namespace std; -int main(int argc, char** argv) +int main(int UNUSED argc, char** argv) { string s; const char* isa_string = DEFAULT_ISA; diff --git a/spike_main/xspike.cc b/spike_main/xspike.cc index f8c8ca7..661a206 100644 --- a/spike_main/xspike.cc +++ b/spike_main/xspike.cc @@ -3,6 +3,7 @@ // xspike forks an xterm for spike's target machine console, // preserving the current terminal for debugging. +#include "common.h" #include #include #include @@ -12,10 +13,10 @@ #include #include -static pid_t fork_spike(int tty_fd, int argc, char** argv); +static pid_t fork_spike(int tty_fd, char** argv); static pid_t fork_xterm(int* tty_fd); -int main(int argc, char** argv) +int main(int UNUSED argc, char** argv) { int tty_fd, wait_status, ret = -1; pid_t xterm, spike; @@ -31,7 +32,7 @@ int main(int argc, char** argv) signal(SIGINT, handle_signal); - if ((spike = fork_spike(tty_fd, argc, argv)) < 0) + if ((spike = fork_spike(tty_fd, argv)) < 0) { fprintf(stderr, "could not open spike\n"); goto close_xterm; @@ -52,7 +53,7 @@ out: return ret; } -static pid_t fork_spike(int tty_fd, int argc, char** argv) +static pid_t fork_spike(int tty_fd, char** argv) { pid_t pid = fork(); if (pid < 0) -- cgit v1.1 From ce69fb5db97ecf240336b7826dd9dddeb32e5dca Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 17:34:33 -0700 Subject: Suppress most unused variable warnings --- customext/dummy_rocc.cc | 2 +- disasm/disasm.cc | 6 +++--- riscv/cachesim.h | 5 +++-- riscv/csrs.cc | 6 +++--- riscv/csrs.h | 2 +- riscv/entropy_source.h | 3 ++- riscv/execute.cc | 2 +- riscv/extension.h | 2 +- riscv/insns/amoswap_d.h | 2 +- riscv/insns/amoswap_w.h | 2 +- riscv/mmu.cc | 2 +- riscv/mmu.h | 4 ++-- riscv/processor.cc | 2 +- riscv/processor.h | 2 +- riscv/rocc.cc | 2 +- riscv/rom.cc | 2 +- riscv/tracer.h | 2 +- riscv/triggers.cc | 2 +- riscv/v_ext_macros.h | 4 ++-- 19 files changed, 28 insertions(+), 26 deletions(-) diff --git a/customext/dummy_rocc.cc b/customext/dummy_rocc.cc index 85ab7aa..5afa56a 100644 --- a/customext/dummy_rocc.cc +++ b/customext/dummy_rocc.cc @@ -7,7 +7,7 @@ class dummy_rocc_t : public rocc_t public: const char* name() { return "dummy_rocc"; } - reg_t custom0(rocc_insn_t insn, reg_t xs1, reg_t xs2) + reg_t custom0(rocc_insn_t insn, reg_t xs1, reg_t UNUSED xs2) { reg_t prev_acc = acc[insn.rs2]; diff --git a/disasm/disasm.cc b/disasm/disasm.cc index 7bb3ec0..1d7c6d5 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -180,7 +180,7 @@ struct : public arg_t { } rvc_fp_rs2s; struct : public arg_t { - std::string to_string(insn_t insn) const { + std::string to_string(insn_t UNUSED insn) const { return xpr_name[X_SP]; } } rvc_sp; @@ -314,7 +314,7 @@ struct : public arg_t { } vm; struct : public arg_t { - std::string to_string(insn_t insn) const { + std::string to_string(insn_t UNUSED insn) const { return "v0"; } } v0; @@ -358,7 +358,7 @@ struct : public arg_t { } v_vtype; struct : public arg_t { - std::string to_string(insn_t insn) const { + std::string to_string(insn_t UNUSED insn) const { return "x0"; } } x0; diff --git a/riscv/cachesim.h b/riscv/cachesim.h index b7f9014..d7046f9 100644 --- a/riscv/cachesim.h +++ b/riscv/cachesim.h @@ -4,6 +4,7 @@ #define _RISCV_CACHE_SIM_H #include "memtracer.h" +#include "common.h" #include #include #include @@ -108,7 +109,7 @@ class icache_sim_t : public cache_memtracer_t { public: icache_sim_t(const char* config) : cache_memtracer_t(config, "I$") {} - bool interested_in_range(uint64_t begin, uint64_t end, access_type type) + bool interested_in_range(uint64_t UNUSED begin, uint64_t UNUSED end, access_type type) { return type == FETCH; } @@ -122,7 +123,7 @@ class dcache_sim_t : public cache_memtracer_t { public: dcache_sim_t(const char* config) : cache_memtracer_t(config, "D$") {} - bool interested_in_range(uint64_t begin, uint64_t end, access_type type) + bool interested_in_range(uint64_t UNUSED begin, uint64_t UNUSED end, access_type type) { return type == LOAD || type == STORE; } diff --git a/riscv/csrs.cc b/riscv/csrs.cc index c8f8c8a..93b0bae 100644 --- a/riscv/csrs.cc +++ b/riscv/csrs.cc @@ -60,7 +60,7 @@ void csr_t::log_write() const noexcept { log_special_write(address, written_value()); } -void csr_t::log_special_write(const reg_t address, const reg_t val) const noexcept { +void csr_t::log_special_write(const reg_t UNUSED address, const reg_t UNUSED val) const noexcept { #if defined(RISCV_ENABLE_COMMITLOG) proc->get_state()->log_reg_write[((address) << 4) | 4] = {val, 0}; #endif @@ -1023,7 +1023,7 @@ reg_t const_csr_t::read() const noexcept { return val; } -bool const_csr_t::unlogged_write(const reg_t val) noexcept { +bool const_csr_t::unlogged_write(const reg_t UNUSED val) noexcept { return false; } @@ -1466,7 +1466,7 @@ reg_t scountovf_csr_t::read() const noexcept { return val; } -bool scountovf_csr_t::unlogged_write(const reg_t val) noexcept { +bool scountovf_csr_t::unlogged_write(const reg_t UNUSED val) noexcept { /* this function is unused */ return false; } diff --git a/riscv/csrs.h b/riscv/csrs.h index 1cc2d74..70fa2f4 100644 --- a/riscv/csrs.h +++ b/riscv/csrs.h @@ -521,7 +521,7 @@ class time_counter_csr_t: public csr_t { void sync(const reg_t val) noexcept; protected: - virtual bool unlogged_write(const reg_t val) noexcept override { return false; }; + virtual bool unlogged_write(const reg_t UNUSED val) noexcept override { return false; }; private: reg_t shadow_val; }; diff --git a/riscv/entropy_source.h b/riscv/entropy_source.h index c2ee2c4..3a3c8e6 100644 --- a/riscv/entropy_source.h +++ b/riscv/entropy_source.h @@ -3,6 +3,7 @@ #include #include "internals.h" +#include "common.h" // // Used to model the cryptography extension entropy source. @@ -30,7 +31,7 @@ public: // seed register // ------------------------------------------------------------ - void set_seed(reg_t val) { + void set_seed(reg_t UNUSED val) { // Always ignore writes to seed. // This CSR is strictly read only. It occupies a RW CSR address // to handle the side-effect of the changing seed value on a read. diff --git a/riscv/execute.cc b/riscv/execute.cc index 36621ca..5d24ce8 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -156,7 +156,7 @@ static void commit_log_print_insn(processor_t *p, reg_t pc, insn_t insn) } #endif -inline void processor_t::update_histogram(reg_t pc) +inline void processor_t::update_histogram(reg_t UNUSED pc) { #ifdef RISCV_ENABLE_HISTOGRAM pc_histogram[pc]++; diff --git a/riscv/extension.h b/riscv/extension.h index d1e847d..de6ece3 100644 --- a/riscv/extension.h +++ b/riscv/extension.h @@ -15,7 +15,7 @@ class extension_t virtual std::vector get_disasms() = 0; virtual const char* name() = 0; virtual void reset() {}; - virtual void set_debug(bool value) {}; + virtual void set_debug(bool UNUSED value) {} virtual ~extension_t(); void set_processor(processor_t* _p) { p = _p; } diff --git a/riscv/insns/amoswap_d.h b/riscv/insns/amoswap_d.h index e1bffde..f9188ea 100644 --- a/riscv/insns/amoswap_d.h +++ b/riscv/insns/amoswap_d.h @@ -1,3 +1,3 @@ require_extension('A'); require_rv64; -WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return RS2; })); +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t UNUSED lhs) { return RS2; })); diff --git a/riscv/insns/amoswap_w.h b/riscv/insns/amoswap_w.h index 0f78369..151f095 100644 --- a/riscv/insns/amoswap_w.h +++ b/riscv/insns/amoswap_w.h @@ -1,2 +1,2 @@ require_extension('A'); -WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return RS2; }))); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t UNUSED lhs) { return RS2; }))); diff --git a/riscv/mmu.cc b/riscv/mmu.cc index d0a55f0..1028026 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -114,7 +114,7 @@ reg_t reg_from_bytes(size_t len, const uint8_t* bytes) abort(); } -bool mmu_t::mmio_ok(reg_t addr, access_type type) +bool mmu_t::mmio_ok(reg_t addr, access_type UNUSED type) { // Disallow access to debug region when not in debug mode if (addr >= DEBUG_START && addr <= DEBUG_END && proc && !proc->state.debug_mode) diff --git a/riscv/mmu.h b/riscv/mmu.h index ca8b792..1cd614b 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -52,7 +52,7 @@ public: #define RISCV_XLATE_VIRT (1U << 0) #define RISCV_XLATE_VIRT_HLVX (1U << 1) - inline reg_t misaligned_load(reg_t addr, size_t size, uint32_t xlate_flags) + inline reg_t misaligned_load(reg_t addr, size_t UNUSED size, uint32_t xlate_flags) { #ifdef RISCV_ENABLE_MISALIGNED reg_t res = 0; @@ -72,7 +72,7 @@ public: #endif } - inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags, bool actually_store=true) + inline void misaligned_store(reg_t addr, reg_t UNUSED data, size_t UNUSED size, uint32_t xlate_flags, bool UNUSED actually_store=true) { #ifdef RISCV_ENABLE_MISALIGNED for (size_t i = 0; i < size; i++) { diff --git a/riscv/processor.cc b/riscv/processor.cc index 5ae6bbb..9bb7d04 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -960,7 +960,7 @@ reg_t processor_t::get_csr(int which, insn_t insn, bool write, bool peek) throw trap_illegal_instruction(insn.bits()); } -reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc) +reg_t illegal_instruction(processor_t UNUSED *p, insn_t insn, reg_t UNUSED pc) { // The illegal instruction can be longer than ILEN bits, where the tval will // contain the first ILEN bits of the faulting instruction. We hard-code the diff --git a/riscv/processor.h b/riscv/processor.h index 441e522..fc80914 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -401,7 +401,7 @@ public: // vector element for varies SEW template - T& elt(reg_t vReg, reg_t n, bool is_write = false) { + T& elt(reg_t vReg, reg_t n, bool UNUSED is_write = false) { assert(vsew != 0); assert((VLEN >> 3)/sizeof(T) > 0); reg_t elts_per_reg = (VLEN >> 3) / (sizeof(T)); diff --git a/riscv/rocc.cc b/riscv/rocc.cc index f50934f..f0dd0b2 100644 --- a/riscv/rocc.cc +++ b/riscv/rocc.cc @@ -18,7 +18,7 @@ return pc+4; \ } \ \ - reg_t rocc_t::custom##n(rocc_insn_t insn, reg_t xs1, reg_t xs2) \ + reg_t rocc_t::custom##n(rocc_insn_t UNUSED insn, reg_t UNUSED xs1, reg_t UNUSED xs2) \ { \ illegal_instruction(); \ return 0; \ diff --git a/riscv/rom.cc b/riscv/rom.cc index b852862..2d10e91 100644 --- a/riscv/rom.cc +++ b/riscv/rom.cc @@ -13,7 +13,7 @@ bool rom_device_t::load(reg_t addr, size_t len, uint8_t* bytes) return true; } -bool rom_device_t::store(reg_t addr, size_t len, const uint8_t* bytes) +bool rom_device_t::store(reg_t UNUSED addr, size_t UNUSED len, const uint8_t UNUSED *bytes) { return false; } diff --git a/riscv/tracer.h b/riscv/tracer.h index 9f1bc78..d74edae 100644 --- a/riscv/tracer.h +++ b/riscv/tracer.h @@ -5,7 +5,7 @@ #include "processor.h" -static inline void trace_opcode(processor_t* p, insn_bits_t opc, insn_t insn) { +static inline void trace_opcode(processor_t UNUSED *p, insn_bits_t UNUSED opc, insn_t UNUSED insn) { } #endif diff --git a/riscv/triggers.cc b/riscv/triggers.cc index ef05551..390ebe4 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -62,7 +62,7 @@ bool mcontrol_t::tdata1_write(processor_t * const proc, const reg_t val) noexcep return true; } -reg_t mcontrol_t::tdata2_read(const processor_t * const proc) const noexcept { +reg_t mcontrol_t::tdata2_read(const processor_t UNUSED * const proc) const noexcept { return tdata2; } diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index afc4837..4696343 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -1410,14 +1410,14 @@ reg_t index[P.VU.vlmax]; \ switch (P.VU.vsew) { \ case e32: { \ auto vs3 = P.VU.elt< type ## 32_t>(vd, vreg_inx); \ - auto val = MMU.amo_uint32(baseAddr + index[i], [&](type ## 32_t lhs) { op }); \ + auto val = MMU.amo_uint32(baseAddr + index[i], [&](type ## 32_t UNUSED lhs) { op }); \ if (insn.v_wd()) \ P.VU.elt< type ## 32_t>(vd, vreg_inx, true) = val; \ } \ break; \ case e64: { \ auto vs3 = P.VU.elt< type ## 64_t>(vd, vreg_inx); \ - auto val = MMU.amo_uint64(baseAddr + index[i], [&](type ## 64_t lhs) { op }); \ + auto val = MMU.amo_uint64(baseAddr + index[i], [&](type ## 64_t UNUSED lhs) { op }); \ if (insn.v_wd()) \ P.VU.elt< type ## 64_t>(vd, vreg_inx, true) = val; \ } \ -- cgit v1.1 From 09369aa5f8a5de7868b75ef15bc0ebf4e9e3e101 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 17:30:23 -0700 Subject: Silence remaining unused-parameter warnings Suppressing these individually would add too much clutter. --- fesvr/syscall.cc | 4 ++++ riscv/interactive.cc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/fesvr/syscall.cc b/fesvr/syscall.cc index ab7fc3b..19c9f38 100644 --- a/fesvr/syscall.cc +++ b/fesvr/syscall.cc @@ -17,6 +17,10 @@ using namespace std::placeholders; #define RISCV_AT_FDCWD -100 +#ifdef __GNUC__ +# pragma GCC diagnostic ignored "-Wunused-parameter" +#endif + struct riscv_stat { target_endian dev; diff --git a/riscv/interactive.cc b/riscv/interactive.cc index 21cc31c..bba4540 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -20,6 +20,10 @@ #include #include +#ifdef __GNUC__ +# pragma GCC diagnostic ignored "-Wunused-parameter" +#endif + #define MAX_CMD_STR 40 // maximum possible size of a command line #define BITS_PER_CHAR 8 -- cgit v1.1 From c20c4a2331a6a2f2cf9e170a9b387024ef543b47 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:01:06 -0700 Subject: Suppress most unused-variable warnings --- riscv/mmu.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/riscv/mmu.h b/riscv/mmu.h index 1cd614b..4d52618 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -91,7 +91,7 @@ public: } #ifndef RISCV_ENABLE_COMMITLOG -# define READ_MEM(addr, size) ({}) +# define READ_MEM(addr, size) ((void)(addr), (void)(size)) #else # define READ_MEM(addr, size) \ proc->state.log_mem_read.push_back(std::make_tuple(addr, 0, size)); @@ -154,7 +154,7 @@ public: load_func(int64, guest_load, RISCV_XLATE_VIRT) #ifndef RISCV_ENABLE_COMMITLOG -# define WRITE_MEM(addr, value, size) ({}) +# define WRITE_MEM(addr, value, size) ((void)(addr), (void)(value), (void)(size)) #else # define WRITE_MEM(addr, val, size) \ proc->state.log_mem_write.push_back(std::make_tuple(addr, val, size)); @@ -265,7 +265,7 @@ public: convert_load_traps_to_store_traps({ const reg_t vaddr = addr & ~(blocksz - 1); const reg_t paddr = translate(vaddr, blocksz, LOAD, 0); - if (auto host_addr = sim->addr_to_mem(paddr)) { + if (sim->addr_to_mem(paddr)) { if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.clean_invalidate(paddr, blocksz, clean, inval); } else { -- cgit v1.1 From d2f34d1e41a7d2884aec4d1d3daa1dbf549d059e Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:34:45 -0700 Subject: Suppress unused-variable warnings in AES code --- riscv/insns/aes_common.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/riscv/insns/aes_common.h b/riscv/insns/aes_common.h index 9cc353c..4f3f618 100644 --- a/riscv/insns/aes_common.h +++ b/riscv/insns/aes_common.h @@ -1,5 +1,5 @@ -uint8_t AES_ENC_SBOX[]= { +static uint8_t UNUSED AES_ENC_SBOX[]= { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, @@ -34,7 +34,7 @@ uint8_t AES_ENC_SBOX[]= { 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; -uint8_t AES_DEC_SBOX[] = { +static uint8_t UNUSED AES_DEC_SBOX[] = { 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, -- cgit v1.1 From e2139a5d1c11954ede663539c8666e8696474c01 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:35:12 -0700 Subject: Suppress unused-variable warnings in vector instruction definitions --- riscv/insns/vcpop_m.h | 2 -- riscv/insns/vfirst_m.h | 2 -- riscv/insns/vid_v.h | 3 --- riscv/insns/viota_m.h | 1 - riscv/insns/vmv_x_s.h | 1 - riscv/insns/vmvnfr_v.h | 1 - 6 files changed, 10 deletions(-) diff --git a/riscv/insns/vcpop_m.h b/riscv/insns/vcpop_m.h index cbe45a4..671362f 100644 --- a/riscv/insns/vcpop_m.h +++ b/riscv/insns/vcpop_m.h @@ -2,8 +2,6 @@ require(P.VU.vsew >= e8 && P.VU.vsew <= e64); require_vector(true); reg_t vl = P.VU.vl->read(); -reg_t sew = P.VU.vsew; -reg_t rd_num = insn.rd(); reg_t rs2_num = insn.rs2(); require(P.VU.vstart->read() == 0); reg_t popcount = 0; diff --git a/riscv/insns/vfirst_m.h b/riscv/insns/vfirst_m.h index 5b768ed..9ddc82b 100644 --- a/riscv/insns/vfirst_m.h +++ b/riscv/insns/vfirst_m.h @@ -2,8 +2,6 @@ require(P.VU.vsew >= e8 && P.VU.vsew <= e64); require_vector(true); reg_t vl = P.VU.vl->read(); -reg_t sew = P.VU.vsew; -reg_t rd_num = insn.rd(); reg_t rs2_num = insn.rs2(); require(P.VU.vstart->read() == 0); reg_t pos = -1; diff --git a/riscv/insns/vid_v.h b/riscv/insns/vid_v.h index c316291..510132d 100644 --- a/riscv/insns/vid_v.h +++ b/riscv/insns/vid_v.h @@ -1,11 +1,8 @@ // vmpopc rd, vs2, vm require(P.VU.vsew >= e8 && P.VU.vsew <= e64); require_vector(true); -reg_t vl = P.VU.vl->read(); reg_t sew = P.VU.vsew; reg_t rd_num = insn.rd(); -reg_t rs1_num = insn.rs1(); -reg_t rs2_num = insn.rs2(); require_align(rd_num, P.VU.vflmul); require_vm; diff --git a/riscv/insns/viota_m.h b/riscv/insns/viota_m.h index f74f2c2..1ee9229 100644 --- a/riscv/insns/viota_m.h +++ b/riscv/insns/viota_m.h @@ -4,7 +4,6 @@ require_vector(true); reg_t vl = P.VU.vl->read(); reg_t sew = P.VU.vsew; reg_t rd_num = insn.rd(); -reg_t rs1_num = insn.rs1(); reg_t rs2_num = insn.rs2(); require(P.VU.vstart->read() == 0); require_vm; diff --git a/riscv/insns/vmv_x_s.h b/riscv/insns/vmv_x_s.h index 16153eb..57a9e1a 100644 --- a/riscv/insns/vmv_x_s.h +++ b/riscv/insns/vmv_x_s.h @@ -1,7 +1,6 @@ // vmv_x_s: rd = vs2[0] require_vector(true); require(insn.v_vm() == 1); -reg_t rs1 = RS1; reg_t sew = P.VU.vsew; reg_t rs2_num = insn.rs2(); reg_t res; diff --git a/riscv/insns/vmvnfr_v.h b/riscv/insns/vmvnfr_v.h index 543ac58..9c52810 100644 --- a/riscv/insns/vmvnfr_v.h +++ b/riscv/insns/vmvnfr_v.h @@ -1,6 +1,5 @@ // vmvr.v vd, vs2 require_vector(true); -const reg_t baseAddr = RS1; const reg_t vd = insn.rd(); const reg_t vs2 = insn.rs2(); const reg_t len = insn.rs1() + 1; -- cgit v1.1 From 98ba435395108fb15d8b18ec83b6efc2ff2ad928 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:35:40 -0700 Subject: Suppress or fix unused-variable warnings in vector macros --- riscv/v_ext_macros.h | 71 ++++++++++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 36 deletions(-) diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h index 4696343..ad31938 100644 --- a/riscv/v_ext_macros.h +++ b/riscv/v_ext_macros.h @@ -87,7 +87,7 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) require(elt_width <= P.VU.ELEN); \ float vemul = ((float)elt_width / P.VU.vsew * P.VU.vflmul); \ require(vemul >= 0.125 && vemul <= 8); \ - reg_t emul = vemul < 1 ? 1 : vemul; \ + reg_t UNUSED emul = vemul < 1 ? 1 : vemul; \ reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ require_align(insn.rd(), P.VU.vflmul); \ require_align(insn.rs2(), vemul); \ @@ -208,9 +208,9 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \ require_vector(true); \ reg_t vl = P.VU.vl->read(); \ - reg_t sew = P.VU.vsew; \ + reg_t UNUSED sew = P.VU.vsew; \ reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ + reg_t UNUSED rs1_num = insn.rs1(); \ reg_t rs2_num = insn.rs2(); \ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { @@ -255,8 +255,8 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) require_vector(true); \ reg_t vl = P.VU.vl->read(); \ reg_t sew = P.VU.vsew; \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ + reg_t UNUSED rd_num = insn.rd(); \ + reg_t UNUSED rs1_num = insn.rs1(); \ reg_t rs2_num = insn.rs2(); \ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ VI_LOOP_ELEMENT_SKIP(); \ @@ -335,23 +335,23 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) #define VI_U_PARAMS(x) \ type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ - type_usew_t::type zimm5 = (type_usew_t::type)insn.v_zimm5(); \ + type_usew_t::type UNUSED zimm5 = (type_usew_t::type)insn.v_zimm5(); \ type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); #define VV_PARAMS(x) \ - type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type UNUSED &vd = P.VU.elt::type>(rd_num, i, true); \ type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + type_sew_t::type UNUSED vs2 = P.VU.elt::type>(rs2_num, i); #define VX_PARAMS(x) \ - type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type UNUSED &vd = P.VU.elt::type>(rd_num, i, true); \ type_sew_t::type rs1 = (type_sew_t::type)RS1; \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + type_sew_t::type UNUSED vs2 = P.VU.elt::type>(rs2_num, i); #define VI_PARAMS(x) \ type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); \ - type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + type_sew_t::type UNUSED vs2 = P.VU.elt::type>(rs2_num, i); #define XV_PARAMS(x) \ type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ @@ -400,26 +400,26 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) #define VI_NARROW_PARAMS(sew1, sew2) \ auto &vd = P.VU.elt::type>(rd_num, i, true); \ - auto vs2_u = P.VU.elt::type>(rs2_num, i); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto UNUSED vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto UNUSED vs2 = P.VU.elt::type>(rs2_num, i); \ auto zimm5 = (type_usew_t::type)insn.v_zimm5(); #define VX_NARROW_PARAMS(sew1, sew2) \ auto &vd = P.VU.elt::type>(rd_num, i, true); \ - auto vs2_u = P.VU.elt::type>(rs2_num, i); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto UNUSED vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto UNUSED vs2 = P.VU.elt::type>(rs2_num, i); \ auto rs1 = (type_sew_t::type)RS1; #define VV_NARROW_PARAMS(sew1, sew2) \ auto &vd = P.VU.elt::type>(rd_num, i, true); \ - auto vs2_u = P.VU.elt::type>(rs2_num, i); \ - auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto UNUSED vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto UNUSED vs2 = P.VU.elt::type>(rs2_num, i); \ auto vs1 = P.VU.elt::type>(rs1_num, i); #define XI_CARRY_PARAMS(x) \ auto vs2 = P.VU.elt::type>(rs2_num, i); \ - auto rs1 = (type_sew_t::type)RS1; \ - auto simm5 = (type_sew_t::type)insn.v_simm5(); \ + auto UNUSED rs1 = (type_sew_t::type)RS1; \ + auto UNUSED simm5 = (type_sew_t::type)insn.v_simm5(); \ #define VV_CARRY_PARAMS(x) \ auto vs2 = P.VU.elt::type>(rs2_num, i); \ @@ -427,8 +427,8 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) #define XI_WITH_CARRY_PARAMS(x) \ auto vs2 = P.VU.elt::type>(rs2_num, i); \ - auto rs1 = (type_sew_t::type)RS1; \ - auto simm5 = (type_sew_t::type)insn.v_simm5(); \ + auto UNUSED rs1 = (type_sew_t::type)RS1; \ + auto UNUSED simm5 = (type_sew_t::type)insn.v_simm5(); \ auto &vd = P.VU.elt::type>(rd_num, i, true); #define VV_WITH_CARRY_PARAMS(x) \ @@ -441,12 +441,12 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) float##width##_t vs2 = P.VU.elt(rs2_num, i); #define VFP_VV_PARAMS(width) \ - float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t UNUSED &vd = P.VU.elt(rd_num, i, true); \ float##width##_t vs1 = P.VU.elt(rs1_num, i); \ float##width##_t vs2 = P.VU.elt(rs2_num, i); #define VFP_VF_PARAMS(width) \ - float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t UNUSED &vd = P.VU.elt(rd_num, i, true); \ float##width##_t rs1 = f##width(READ_FREG(rs1_num)); \ float##width##_t vs2 = P.VU.elt(rs2_num, i); @@ -514,7 +514,7 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) // merge and copy loop #define VI_MERGE_VARS \ VI_MASK_VARS \ - bool use_first = (P.VU.elt(0, midx) >> mpos) & 0x1; + bool UNUSED use_first = (P.VU.elt(0, midx) >> mpos) & 0x1; #define VI_MERGE_LOOP_BASE \ VI_GENERAL_LOOP_BASE \ @@ -922,19 +922,19 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) #define VI_WIDE_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \ switch (P.VU.vsew) { \ case e8: { \ - sign##16_t vd_w = P.VU.elt(rd_num, i); \ + sign##16_t UNUSED vd_w = P.VU.elt(rd_num, i); \ P.VU.elt(rd_num, i, true) = \ op1((sign##16_t)(sign##8_t)var0 op0 (sign##16_t)(sign##8_t)var1) + var2; \ } \ break; \ case e16: { \ - sign##32_t vd_w = P.VU.elt(rd_num, i); \ + sign##32_t UNUSED vd_w = P.VU.elt(rd_num, i); \ P.VU.elt(rd_num, i, true) = \ op1((sign##32_t)(sign##16_t)var0 op0 (sign##32_t)(sign##16_t)var1) + var2; \ } \ break; \ default: { \ - sign##64_t vd_w = P.VU.elt(rd_num, i); \ + sign##64_t UNUSED vd_w = P.VU.elt(rd_num, i); \ P.VU.elt(rd_num, i, true) = \ op1((sign##64_t)(sign##32_t)var0 op0 (sign##64_t)(sign##32_t)var1) + var2; \ } \ @@ -944,19 +944,19 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) #define VI_WIDE_OP_AND_ASSIGN_MIX(var0, var1, var2, op0, op1, sign_d, sign_1, sign_2) \ switch (P.VU.vsew) { \ case e8: { \ - sign_d##16_t vd_w = P.VU.elt(rd_num, i); \ + sign_d##16_t UNUSED vd_w = P.VU.elt(rd_num, i); \ P.VU.elt(rd_num, i, true) = \ op1((sign_1##16_t)(sign_1##8_t)var0 op0 (sign_2##16_t)(sign_2##8_t)var1) + var2; \ } \ break; \ case e16: { \ - sign_d##32_t vd_w = P.VU.elt(rd_num, i); \ + sign_d##32_t UNUSED vd_w = P.VU.elt(rd_num, i); \ P.VU.elt(rd_num, i, true) = \ op1((sign_1##32_t)(sign_1##16_t)var0 op0 (sign_2##32_t)(sign_2##16_t)var1) + var2; \ } \ break; \ default: { \ - sign_d##64_t vd_w = P.VU.elt(rd_num, i); \ + sign_d##64_t UNUSED vd_w = P.VU.elt(rd_num, i); \ P.VU.elt(rd_num, i, true) = \ op1((sign_1##64_t)(sign_1##32_t)var0 op0 (sign_2##64_t)(sign_2##32_t)var1) + var2; \ } \ @@ -1282,7 +1282,6 @@ reg_t index[P.VU.vlmax]; \ #define VI_LDST_FF(elt_width) \ const reg_t nf = insn.v_nf() + 1; \ - const reg_t sew = p->VU.vsew; \ const reg_t vl = p->VU.vl->read(); \ const reg_t baseAddr = RS1; \ const reg_t rd_num = insn.rd(); \ @@ -1480,10 +1479,10 @@ reg_t index[P.VU.vlmax]; \ (P.VU.vsew == e64 && p->extension_enabled('D'))); \ require_vector(true); \ require(STATE.frm->read() < 0x5); \ - reg_t vl = P.VU.vl->read(); \ - reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ - reg_t rs2_num = insn.rs2(); \ + reg_t UNUSED vl = P.VU.vl->read(); \ + reg_t UNUSED rd_num = insn.rd(); \ + reg_t UNUSED rs1_num = insn.rs1(); \ + reg_t UNUSED rs2_num = insn.rs2(); \ softfloat_roundingMode = STATE.frm->read(); #define VI_VFP_LOOP_BASE \ @@ -1901,7 +1900,7 @@ reg_t index[P.VU.vlmax]; \ require(STATE.frm->read() < 0x5); \ reg_t vl = P.VU.vl->read(); \ reg_t rd_num = insn.rd(); \ - reg_t rs1_num = insn.rs1(); \ + reg_t UNUSED rs1_num = insn.rs1(); \ reg_t rs2_num = insn.rs2(); \ softfloat_roundingMode = STATE.frm->read(); \ for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ -- cgit v1.1 From 08bac06bfdeeb95d54de6e2ff25f81dcf12b1ba2 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:35:58 -0700 Subject: Fix unused-variable warnings in softfloat --- softfloat/specialize.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/softfloat/specialize.h b/softfloat/specialize.h index d8a1985..19504b6 100644 --- a/softfloat/specialize.h +++ b/softfloat/specialize.h @@ -111,7 +111,7 @@ struct commonNaN { char _unused; }; | location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid | exception is raised. *----------------------------------------------------------------------------*/ -#define softfloat_f16UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & 0x0200) ) softfloat_raiseFlags( softfloat_flag_invalid ) +#define softfloat_f16UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & 0x0200) ) (void) (zPtr), softfloat_raiseFlags( softfloat_flag_invalid ) /*---------------------------------------------------------------------------- | Converts the common NaN pointed to by `aPtr' into a 16-bit floating-point @@ -146,7 +146,7 @@ uint_fast16_t | location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid | exception is raised. *----------------------------------------------------------------------------*/ -#define softfloat_f32UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & 0x00400000) ) softfloat_raiseFlags( softfloat_flag_invalid ) +#define softfloat_f32UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & 0x00400000) ) (void) (zPtr), softfloat_raiseFlags( softfloat_flag_invalid ) /*---------------------------------------------------------------------------- | Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point @@ -181,7 +181,7 @@ uint_fast32_t | location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid | exception is raised. *----------------------------------------------------------------------------*/ -#define softfloat_f64UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & UINT64_C( 0x0008000000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) +#define softfloat_f64UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & UINT64_C( 0x0008000000000000 )) ) (void) (zPtr), softfloat_raiseFlags( softfloat_flag_invalid ) /*---------------------------------------------------------------------------- | Converts the common NaN pointed to by `aPtr' into a 64-bit floating-point @@ -287,7 +287,7 @@ struct uint128 | pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid exception | is raised. *----------------------------------------------------------------------------*/ -#define softfloat_f128UIToCommonNaN( uiA64, uiA0, zPtr ) if ( ! ((uiA64) & UINT64_C( 0x0000800000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) +#define softfloat_f128UIToCommonNaN( uiA64, uiA0, zPtr ) if ( ! ((uiA64) & UINT64_C( 0x0000800000000000 )) ) (void) (zPtr), softfloat_raiseFlags( softfloat_flag_invalid ) /*---------------------------------------------------------------------------- | Converts the common NaN pointed to by `aPtr' into a 128-bit floating-point -- cgit v1.1 From 3ee74b7be334d1e2393a134f26de8dfe0cd65feb Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:01:19 -0700 Subject: Silence unused-variable warnings in auto-generated code --- disasm/disasm.cc | 4 ++++ riscv/processor.cc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index 1d7c6d5..a8ba91e 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -10,6 +10,10 @@ // For std::reverse: #include +#ifdef __GNUC__ +# pragma GCC diagnostic ignored "-Wunused-variable" +#endif + // Indicates that the next arg (only) is optional. // If the result of converting the next arg to a string is "" // then it will not be printed. diff --git a/riscv/processor.cc b/riscv/processor.cc index 9bb7d04..217d49d 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -20,6 +20,10 @@ #include #include +#ifdef __GNUC__ +# pragma GCC diagnostic ignored "-Wunused-variable" +#endif + #undef STATE #define STATE state -- cgit v1.1 From 9acfb3ccaeb890bbf5e7ec10e8ef2f0d4b98baa4 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:40:26 -0700 Subject: Suppress unused-variable warnings in P-extension macros --- riscv/p_ext_macros.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/riscv/p_ext_macros.h b/riscv/p_ext_macros.h index 6ca97b5..29437ad 100644 --- a/riscv/p_ext_macros.h +++ b/riscv/p_ext_macros.h @@ -91,7 +91,7 @@ reg_t rd_tmp = RD; \ reg_t rs1 = RS1; \ type_usew_t::type sa = RS2 & ((uint64_t(1) << LOWBIT) - 1); \ - type_sew_t::type ssa = int64_t(RS2) << (64 - LOWBIT) >> (64 - LOWBIT); \ + type_sew_t::type UNUSED ssa = int64_t(RS2) << (64 - LOWBIT) >> (64 - LOWBIT); \ sreg_t len = xlen / BIT; \ for (sreg_t i = len - 1; i >= 0; --i) { @@ -140,8 +140,8 @@ #define P_CORSS_PARAMS(BIT) \ auto pd = P_FIELD(rd_tmp, i, BIT); \ - auto ps1 = P_FIELD(rs1, i, BIT); \ - auto ps2 = P_FIELD(rs2, (i ^ 1), BIT); + auto UNUSED ps1 = P_FIELD(rs1, i, BIT); \ + auto UNUSED ps2 = P_FIELD(rs2, (i ^ 1), BIT); #define P_CORSS_UPARAMS(BIT) \ auto pd = P_UFIELD(rd_tmp, i, BIT); \ @@ -436,7 +436,7 @@ #define P_PK(BIT, X, Y) \ require_extension(EXT_ZPN); \ require(BIT == e16 || BIT == e32); \ - reg_t rd_tmp = 0, rs1 = RS1, rs2 = RS2; \ + reg_t rd_tmp = 0, UNUSED rs1 = RS1, UNUSED rs2 = RS2; \ for (sreg_t i = 0; i < xlen / BIT / 2; i++) { \ rd_tmp = set_field(rd_tmp, make_mask64(i * 2 * BIT, BIT), \ P_UFIELD(RS2, i * 2 + Y, BIT)); \ -- cgit v1.1 From 258a0c62eb400380d72fade82e154a4bd2af9b83 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:41:02 -0700 Subject: Fix unused-variable warnings in P-extension instruction definitions --- riscv/insns/kmar64.h | 1 - 1 file changed, 1 deletion(-) diff --git a/riscv/insns/kmar64.h b/riscv/insns/kmar64.h index 49f4482..a4d332b 100644 --- a/riscv/insns/kmar64.h +++ b/riscv/insns/kmar64.h @@ -5,7 +5,6 @@ P_64_PROFILE_PARAM(true, false) bool sat = false; sreg_t mres0 = (sreg_t)P_SW(rs1, 0) * P_SW(rs2, 0); sreg_t mres1 = (sreg_t)P_SW(rs1, 1) * P_SW(rs2, 1); -sreg_t res; if (xlen == 32) { rd = (sat_add(rd, mres0, sat)); -- cgit v1.1 From 97219fe85eecdcc298078b6b94db44cdb99b4cab Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:41:59 -0700 Subject: Suppress unused-variable warnings in spike main --- spike_main/spike.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spike_main/spike.cc b/spike_main/spike.cc index 571c56d..44db7cd 100644 --- a/spike_main/spike.cc +++ b/spike_main/spike.cc @@ -243,7 +243,7 @@ int main(int argc, char** argv) bool halted = false; bool histogram = false; bool log = false; - bool socket = false; // command line option -s + bool UNUSED socket = false; // command line option -s bool dump_dts = false; bool dtb_enabled = true; const char* kernel = NULL; -- cgit v1.1 From ab20d616ae610a0b54cddb2af9b3b36b84df5e85 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:43:26 -0700 Subject: Expand set of warnings we error on in CI --- ci-tests/test-spike | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci-tests/test-spike b/ci-tests/test-spike index fa9bbdd..cd3fccc 100755 --- a/ci-tests/test-spike +++ b/ci-tests/test-spike @@ -6,7 +6,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" mkdir build cd build mkdir install -CFLAGS="-Werror" $DIR/../configure --prefix=`pwd`/install +CFLAGS="-Werror -Wignored-qualifiers -Wunused-function -Wunused-parameter -Wunused-variable" $DIR/../configure --prefix=`pwd`/install make -j4 make install -- cgit v1.1 From f5ec721fa966ce92390afc48b13d492f4045c4ae Mon Sep 17 00:00:00 2001 From: Jerin Joy Date: Thu, 22 Sep 2022 23:12:36 -0700 Subject: Fixed -Wnon-virtual-dtor warnings Signed-off-by: Jerin Joy --- fesvr/memif.h | 2 ++ riscv/simif.h | 2 ++ riscv/trap.h | 2 ++ 3 files changed, 6 insertions(+) diff --git a/fesvr/memif.h b/fesvr/memif.h index 9eebed4..7c73f48 100644 --- a/fesvr/memif.h +++ b/fesvr/memif.h @@ -33,6 +33,8 @@ public: virtual memif_endianness_t get_target_endianness() const { return memif_endianness_undecided; } + + virtual ~chunked_memif_t() = default; }; class memif_t diff --git a/riscv/simif.h b/riscv/simif.h index 0e75d45..75e865e 100644 --- a/riscv/simif.h +++ b/riscv/simif.h @@ -19,6 +19,8 @@ public: virtual const char* get_symbol(uint64_t addr) = 0; + virtual ~simif_t() = default; + }; #endif diff --git a/riscv/trap.h b/riscv/trap.h index 8347c6e..b5afce9 100644 --- a/riscv/trap.h +++ b/riscv/trap.h @@ -33,6 +33,8 @@ class trap_t return _name; } + virtual ~trap_t() = default; + private: char _name[16]; reg_t which; -- cgit v1.1 From 054a4aa579b02cb7bb2616bc78591cdb1a1f32b7 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 5 Oct 2022 09:56:24 -0700 Subject: Engage non-virtual-dtor warning in CI tests --- ci-tests/test-spike | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci-tests/test-spike b/ci-tests/test-spike index cd3fccc..bf2290b 100755 --- a/ci-tests/test-spike +++ b/ci-tests/test-spike @@ -6,7 +6,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" mkdir build cd build mkdir install -CFLAGS="-Werror -Wignored-qualifiers -Wunused-function -Wunused-parameter -Wunused-variable" $DIR/../configure --prefix=`pwd`/install +CXXFLAGS="-Wnon-virtual-dtor" CFLAGS="-Werror -Wignored-qualifiers -Wunused-function -Wunused-parameter -Wunused-variable" $DIR/../configure --prefix=`pwd`/install make -j4 make install -- cgit v1.1 From d7edd7ac550dab81b84f2001793e04b663330658 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 5 Oct 2022 17:21:11 -0700 Subject: Remove unused variable to fix build --- riscv/mmu.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 367c859..38a7241 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -142,7 +142,6 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) { if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); if (matched_trigger) throw *matched_trigger; -- cgit v1.1 From 749ead90a5c254ca23d54ef3e0669e51df127a5d Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 5 Oct 2022 18:09:53 -0700 Subject: Move all uncommon-case load functionality into load_slow_path As a side effect, misaligned loads now behave the same as aligned loads with respect to triggers: only the first byte is checked. --- riscv/mmu.cc | 37 ++++++++++++++++++++++++++++++----- riscv/mmu.h | 64 +++++++++--------------------------------------------------- 2 files changed, 41 insertions(+), 60 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 38a7241..49889df 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -139,12 +139,13 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) return sim->mmio_store(addr, len, bytes); } -void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) +void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) { - if (!matched_trigger) { - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); - if (matched_trigger) - throw *matched_trigger; + reg_t vpn = addr >> PGSHIFT; + if (xlate_flags == 0 && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; + memcpy(bytes, host_addr, len); + return; } reg_t paddr = translate(addr, len, LOAD, xlate_flags); @@ -158,6 +159,32 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate } else if (!mmio_load(paddr, len, bytes)) { throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); } +} + +void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool UNUSED require_alignment) +{ + if (!matched_trigger) { + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); + if (matched_trigger) + throw *matched_trigger; + } + + if ((addr & (len - 1)) == 0) { + load_slow_path_intrapage(addr, len, bytes, xlate_flags); + } else { + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); +#ifndef RISCV_ENABLE_MISALIGNED + throw trap_load_address_misaligned(gva, addr, 0, 0); +#else + if (require_alignment) + throw trap_load_access_fault(gva, addr, 0, 0); + + reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); + load_slow_path_intrapage(addr, len_page0, bytes, xlate_flags); + if (len_page0 != len) + load_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags); +#endif + } if (!matched_trigger) { reg_t data = reg_from_bytes(len, bytes); diff --git a/riscv/mmu.h b/riscv/mmu.h index 8b9ff9b..5d761dc 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -52,26 +52,6 @@ public: #define RISCV_XLATE_VIRT (1U << 0) #define RISCV_XLATE_VIRT_HLVX (1U << 1) - inline reg_t misaligned_load(reg_t addr, size_t UNUSED size, uint32_t xlate_flags) - { -#ifdef RISCV_ENABLE_MISALIGNED - reg_t res = 0; - for (size_t i = 0; i < size; i++) { - const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); - const reg_t bytedata - = (RISCV_XLATE_VIRT_HLVX & xlate_flags) ? guest_load_x_uint8(byteaddr) - : (RISCV_XLATE_VIRT & xlate_flags) ? guest_load_uint8(byteaddr) - : load_uint8(byteaddr) - ; - res += bytedata << (i * 8); - } - return res; -#else - bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); - throw trap_load_address_misaligned(gva, addr, 0, 0); -#endif - } - inline void misaligned_store(reg_t addr, reg_t UNUSED data, size_t UNUSED size, uint32_t xlate_flags, bool UNUSED actually_store=true) { #ifdef RISCV_ENABLE_MISALIGNED @@ -100,35 +80,19 @@ public: // template for functions that load an aligned value from memory #define load_func(type, prefix, xlate_flags) \ type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { \ - if (unlikely(addr & (sizeof(type##_t)-1))) { \ - if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); \ - if (matched_trigger) \ - throw *matched_trigger; \ - } \ - if (require_alignment) load_reserved_address_misaligned(addr); \ - else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \ - } \ reg_t vpn = addr >> PGSHIFT; \ size_t size = sizeof(type##_t); \ - if ((xlate_flags) == 0 && likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) { \ + bool aligned = (addr & (size - 1)) == 0; \ + bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn; \ + if (likely((xlate_flags) == 0 && aligned && tlb_hit)) { \ if (proc) READ_MEM(addr, size); \ return from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ - } \ - if ((xlate_flags) == 0 && unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ - type##_t data = from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ - if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data); \ - if (matched_trigger) \ - throw *matched_trigger; \ - } \ + } else { \ + target_endian res; \ + load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags), require_alignment); \ if (proc) READ_MEM(addr, size); \ - return data; \ + return from_target(res); \ } \ - target_endian res; \ - load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags)); \ - if (proc) READ_MEM(addr, size); \ - return from_target(res); \ } // load value from memory at aligned address; zero extend to register width @@ -142,7 +106,6 @@ public: load_func(uint16, guest_load, RISCV_XLATE_VIRT) load_func(uint32, guest_load, RISCV_XLATE_VIRT) load_func(uint64, guest_load, RISCV_XLATE_VIRT) - load_func(uint8, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) // only for use by misaligned HLVX load_func(uint16, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) load_func(uint32, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) @@ -300,16 +263,6 @@ public: throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space } - inline void load_reserved_address_misaligned(reg_t vaddr) - { - bool gva = proc ? proc->state.v : false; -#ifdef RISCV_ENABLE_MISALIGNED - throw trap_load_access_fault(gva, vaddr, 0, 0); -#else - throw trap_load_address_misaligned(gva, vaddr, 0, 0); -#endif - } - inline void store_conditional_address_misaligned(reg_t vaddr) { bool gva = proc ? proc->state.v : false; @@ -471,7 +424,8 @@ private: // handle uncommon cases: TLB misses, page faults, MMIO tlb_entry_t fetch_slow_path(reg_t addr); - void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); + void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool require_alignment); + void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); -- cgit v1.1 From 6311f7513aa150797f69ecac906978bb9e9fecbd Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 5 Oct 2022 18:38:02 -0700 Subject: Move all uncommon-case store functionality into store_slow_path As a side effect, misaligned stores now behave the same as aligned stores with respect to triggers: only the first byte is checked. --- riscv/mmu.cc | 43 +++++++++++++++++++++++++++++++------- riscv/mmu.h | 68 ++++++++++-------------------------------------------------- 2 files changed, 46 insertions(+), 65 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 49889df..9c9a6c5 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -194,15 +194,13 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate } } -void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) +void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) { - if (actually_store) { - if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); - if (matched_trigger) - throw *matched_trigger; - } + reg_t vpn = addr >> PGSHIFT; + if (xlate_flags == 0 && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; + memcpy(host_addr, bytes, len); + return; } reg_t paddr = translate(addr, len, STORE, xlate_flags); @@ -220,6 +218,35 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ } } +void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment) +{ + if (actually_store) { + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); + if (matched_trigger) + throw *matched_trigger; + } + } + + if (addr & (len - 1)) { + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); +#ifndef RISCV_ENABLE_MISALIGNED + throw trap_store_address_misaligned(gva, addr, 0, 0); +#else + if (require_alignment) + throw trap_store_access_fault(gva, addr, 0, 0); + + reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); + store_slow_path_intrapage(addr, len_page0, bytes, xlate_flags, actually_store); + if (len_page0 != len) + store_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags, actually_store); +#endif + } else { + store_slow_path_intrapage(addr, len, bytes, xlate_flags, actually_store); + } +} + tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type) { reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES; diff --git a/riscv/mmu.h b/riscv/mmu.h index 5d761dc..137b306 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -52,24 +52,6 @@ public: #define RISCV_XLATE_VIRT (1U << 0) #define RISCV_XLATE_VIRT_HLVX (1U << 1) - inline void misaligned_store(reg_t addr, reg_t UNUSED data, size_t UNUSED size, uint32_t xlate_flags, bool UNUSED actually_store=true) - { -#ifdef RISCV_ENABLE_MISALIGNED - for (size_t i = 0; i < size; i++) { - const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); - const reg_t bytedata = data >> (i * 8); - if (RISCV_XLATE_VIRT & xlate_flags) { - guest_store_uint8(byteaddr, bytedata, actually_store); - } else { - store_uint8(byteaddr, bytedata, actually_store); - } - } -#else - bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); - throw trap_store_address_misaligned(gva, addr, 0, 0); -#endif - } - #ifndef RISCV_ENABLE_COMMITLOG # define READ_MEM(addr, size) ((void)(addr), (void)(size)) #else @@ -131,39 +113,18 @@ public: // template for functions that store an aligned value to memory #define store_func(type, prefix, xlate_flags) \ void ALWAYS_INLINE prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ - if (unlikely(addr & (sizeof(type##_t)-1))) { \ - if (actually_store) { \ - if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \ - if (matched_trigger) \ - throw *matched_trigger; \ - } \ - } \ - if (require_alignment) store_conditional_address_misaligned(addr); \ - else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ - } \ reg_t vpn = addr >> PGSHIFT; \ size_t size = sizeof(type##_t); \ - if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \ - if (actually_store) { \ - if (proc) WRITE_MEM(addr, val, size); \ - *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ - } \ - } \ - else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ + bool aligned = (addr & (size - 1)) == 0; \ + bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn; \ + if ((xlate_flags) == 0 && likely(aligned && tlb_hit)) { \ if (actually_store) { \ - if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \ - if (matched_trigger) \ - throw *matched_trigger; \ - } \ if (proc) WRITE_MEM(addr, val, size); \ *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ } \ - } \ - else { \ + } else { \ target_endian target_val = to_target(val); \ - store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store); \ + store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store, require_alignment); \ if (actually_store && proc) WRITE_MEM(addr, val, size); \ } \ } @@ -263,20 +224,12 @@ public: throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space } - inline void store_conditional_address_misaligned(reg_t vaddr) - { - bool gva = proc ? proc->state.v : false; -#ifdef RISCV_ENABLE_MISALIGNED - throw trap_store_access_fault(gva, vaddr, 0, 0); -#else - throw trap_store_address_misaligned(gva, vaddr, 0, 0); -#endif - } - inline bool check_load_reservation(reg_t vaddr, size_t size) { - if (vaddr & (size-1)) - store_conditional_address_misaligned(vaddr); + if (vaddr & (size-1)) { + // Raise either access fault or misaligned exception + store_slow_path(vaddr, size, nullptr, 0, false, true); + } reg_t paddr = translate(vaddr, 1, STORE, 0); if (auto host_addr = sim->addr_to_mem(paddr)) @@ -426,7 +379,8 @@ private: tlb_entry_t fetch_slow_path(reg_t addr); void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool require_alignment); void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); - void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); + void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool require_alignment); + void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); bool mmio_ok(reg_t addr, access_type type); -- cgit v1.1 From 14410156b2d796dad0e439b9535057f1e4c5a13c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 14:31:07 -0700 Subject: Move uncommon-case fetch functionality into fetch_slow_path --- riscv/mmu.cc | 32 ++++++++++++++++++++++++-------- riscv/mmu.h | 18 +----------------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 9c9a6c5..5a5f5c0 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -76,16 +76,32 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) { - reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0); - - if (auto host_addr = sim->addr_to_mem(paddr)) { - return refill_tlb(vaddr, paddr, host_addr, FETCH); + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, vaddr, false); + if (match != triggers::MATCH_NONE) + throw triggers::matched_t(triggers::OPERATION_EXECUTE, vaddr, 0, action); + + tlb_entry_t result; + reg_t vpn = vaddr >> PGSHIFT; + if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { + reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0); + if (auto host_addr = sim->addr_to_mem(paddr)) { + result = refill_tlb(vaddr, paddr, host_addr, FETCH); + } else { + if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp)) + throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0); + result = {(char*)&fetch_temp - vaddr, paddr - vaddr}; + } } else { - if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp)) - throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0); - tlb_entry_t entry = {(char*)&fetch_temp - vaddr, paddr - vaddr}; - return entry; + result = tlb_data[vpn % TLB_ENTRIES]; } + + target_endian* ptr = (target_endian*)(result.host_offset + vaddr); + match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, vaddr, true, from_target(*ptr)); + if (match != triggers::MATCH_NONE) + throw triggers::matched_t(triggers::OPERATION_EXECUTE, vaddr, from_target(*ptr), action); + + return result; } reg_t reg_from_bytes(size_t len, const uint8_t* bytes) diff --git a/riscv/mmu.h b/riscv/mmu.h index 137b306..74c9a71 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -391,23 +391,7 @@ private: reg_t vpn = addr >> PGSHIFT; if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn)) return tlb_data[vpn % TLB_ENTRIES]; - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, false); - if (match != triggers::MATCH_NONE) { - throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, 0, action); - } - tlb_entry_t result; - if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { - result = fetch_slow_path(addr); - } else { - result = tlb_data[vpn % TLB_ENTRIES]; - } - target_endian* ptr = (target_endian*)(result.host_offset + addr); - match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, true, from_target(*ptr)); - if (match != triggers::MATCH_NONE) { - throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); - } - return result; + return fetch_slow_path(addr); } inline const uint16_t* translate_insn_addr_to_host(reg_t addr) { -- cgit v1.1 From 197f3e2640a182c7734d781bf61f570457cce5b8 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 14:35:59 -0700 Subject: DRY in checking triggers --- riscv/mmu.cc | 53 +++++++++++++++++++++++++++-------------------------- riscv/mmu.h | 17 +---------------- 2 files changed, 28 insertions(+), 42 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 5a5f5c0..f87e879 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -76,10 +76,7 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) { - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, vaddr, false); - if (match != triggers::MATCH_NONE) - throw triggers::matched_t(triggers::OPERATION_EXECUTE, vaddr, 0, action); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, false); tlb_entry_t result; reg_t vpn = vaddr >> PGSHIFT; @@ -97,9 +94,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) } target_endian* ptr = (target_endian*)(result.host_offset + vaddr); - match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, vaddr, true, from_target(*ptr)); - if (match != triggers::MATCH_NONE) - throw triggers::matched_t(triggers::OPERATION_EXECUTE, vaddr, from_target(*ptr), action); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, true, from_target(*ptr)); return result; } @@ -155,6 +150,27 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) return sim->mmio_store(addr, len, bytes); } +void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data) +{ + if (matched_trigger || !proc) + return; + + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data); + + switch (match) { + case triggers::MATCH_NONE: + return; + + case triggers::MATCH_FIRE_BEFORE: + throw triggers::matched_t(operation, address, data, action); + + case triggers::MATCH_FIRE_AFTER: + matched_trigger = new triggers::matched_t(operation, address, data, action); + throw *matched_trigger; + } +} + void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) { reg_t vpn = addr >> PGSHIFT; @@ -179,11 +195,7 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool UNUSED require_alignment) { - if (!matched_trigger) { - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); - if (matched_trigger) - throw *matched_trigger; - } + check_triggers(triggers::OPERATION_LOAD, addr, false); if ((addr & (len - 1)) == 0) { load_slow_path_intrapage(addr, len, bytes, xlate_flags); @@ -202,12 +214,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate #endif } - if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data); - if (matched_trigger) - throw *matched_trigger; - } + check_triggers(triggers::OPERATION_LOAD, addr, true, reg_from_bytes(len, bytes)); } void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) @@ -236,14 +243,8 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment) { - if (actually_store) { - if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); - if (matched_trigger) - throw *matched_trigger; - } - } + if (actually_store) + check_triggers(triggers::OPERATION_STORE, addr, true, reg_from_bytes(len, bytes)); if (addr & (len - 1)) { bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); diff --git a/riscv/mmu.h b/riscv/mmu.h index 74c9a71..8eeea31 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -384,6 +384,7 @@ private: bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); bool mmio_ok(reg_t addr, access_type type); + void check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data = 0); reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags); // ITLB lookup @@ -398,22 +399,6 @@ private: return (uint16_t*)(translate_insn_addr(addr).host_offset + addr); } - inline triggers::matched_t *trigger_exception(triggers::operation_t operation, - reg_t address, bool has_data, reg_t data=0) - { - if (!proc) { - return NULL; - } - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data); - if (match == triggers::MATCH_NONE) - return NULL; - if (match == triggers::MATCH_FIRE_BEFORE) { - throw triggers::matched_t(operation, address, data, action); - } - return new triggers::matched_t(operation, address, data, action); - } - reg_t pmp_homogeneous(reg_t addr, reg_t len); bool pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode); -- cgit v1.1 From fd50768df9ec4d9f80c6a37d89734d9e27443f6b Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 14:53:32 -0700 Subject: Fix endianness bug in fetch triggers Instruction fetch is always little-endian. --- riscv/mmu.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index f87e879..b8690ec 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -93,8 +93,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) result = tlb_data[vpn % TLB_ENTRIES]; } - target_endian* ptr = (target_endian*)(result.host_offset + vaddr); - check_triggers(triggers::OPERATION_EXECUTE, vaddr, true, from_target(*ptr)); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, true, from_le(*(const uint16_t*)(result.host_offset + vaddr))); return result; } -- cgit v1.1 From 7b8114f707a7b2de9fd2d393b9d019180de83025 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 17:40:41 -0700 Subject: Don't use reexecution as the means to implement trigger-after The scheme was based on the notion that memory accesses are idempotent up until the point the trigger would've been hit, which isn't true in the case of side-effecting loads and data-value triggers. Instead, check the trigger on the next instruction fetch. To keep the perf overhead minimal, perform this check on the I$ refill path, and ensure that path is taken by flushing the I$. --- riscv/execute.cc | 9 --------- riscv/mmu.cc | 5 ++++- riscv/mmu.h | 3 +++ 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/riscv/execute.cc b/riscv/execute.cc index 5d24ce8..f0bb946 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -309,15 +309,6 @@ void processor_t::step(size_t n) catch (triggers::matched_t& t) { if (mmu->matched_trigger) { - // This exception came from the MMU. That means the instruction hasn't - // fully executed yet. We start it again, but this time it won't throw - // an exception because matched_trigger is already set. (All memory - // instructions are idempotent so restarting is safe.) - - insn_fetch_t fetch = mmu->load_insn(pc); - pc = execute_insn(this, pc, fetch); - advance_pc(); - delete mmu->matched_trigger; mmu->matched_trigger = NULL; } diff --git a/riscv/mmu.cc b/riscv/mmu.cc index b8690ec..fdad05f 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -165,8 +165,11 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool throw triggers::matched_t(operation, address, data, action); case triggers::MATCH_FIRE_AFTER: + // We want to take this exception on the next instruction. We check + // whether to do so in the I$ refill path, so flush the I$. + flush_icache(); matched_trigger = new triggers::matched_t(operation, address, data, action); - throw *matched_trigger; + return; } } diff --git a/riscv/mmu.h b/riscv/mmu.h index 8eeea31..01e74ef 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -247,6 +247,9 @@ public: inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry) { + if (matched_trigger) + throw *matched_trigger; + auto tlb_entry = translate_insn_addr(addr); insn_bits_t insn = from_le(*(uint16_t*)(tlb_entry.host_offset + addr)); int length = insn_length(insn); -- cgit v1.1 From 1112fd33a41f130e374b8456859cc818a426782d Mon Sep 17 00:00:00 2001 From: Tim Newsome Date: Fri, 7 Oct 2022 10:25:19 -0700 Subject: Add --dm-no-abstract-fpr option. Previously FPRs could always be accessed using abstract commands. I need this to get coverage of some OpenOCD code that I broke. (See https://github.com/riscv/riscv-openocd/pull/745) --- riscv/debug_module.cc | 2 +- riscv/debug_module.h | 1 + spike_main/spike.cc | 6 +++++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/riscv/debug_module.cc b/riscv/debug_module.cc index d297f14..f5c0c73 100644 --- a/riscv/debug_module.cc +++ b/riscv/debug_module.cc @@ -672,7 +672,7 @@ bool debug_module_t::perform_abstract_command() write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH0)); } - } else if (regno >= 0x1020 && regno < 0x1040) { + } else if (regno >= 0x1020 && regno < 0x1040 && config.support_abstract_fpr_access) { unsigned fprnum = regno - 0x1020; if (write) { diff --git a/riscv/debug_module.h b/riscv/debug_module.h index 8230557..8fb335d 100644 --- a/riscv/debug_module.h +++ b/riscv/debug_module.h @@ -19,6 +19,7 @@ typedef struct { unsigned abstract_rti; bool support_hasel; bool support_abstract_csr_access; + bool support_abstract_fpr_access; bool support_haltgroups; bool support_impebreak; } debug_module_config_t; diff --git a/spike_main/spike.cc b/spike_main/spike.cc index 44db7cd..f23a2f4 100644 --- a/spike_main/spike.cc +++ b/spike_main/spike.cc @@ -72,7 +72,8 @@ static void help(int exit_code = 1) fprintf(stderr, " --dm-abstract-rti= Number of Run-Test/Idle cycles " "required for an abstract command to execute [default 0]\n"); fprintf(stderr, " --dm-no-hasel Debug module supports hasel\n"); - fprintf(stderr, " --dm-no-abstract-csr Debug module won't support abstract to authenticate\n"); + fprintf(stderr, " --dm-no-abstract-csr Debug module won't support abstract CSR access\n"); + fprintf(stderr, " --dm-no-abstract-fpr Debug module won't support abstract FPR access\n"); fprintf(stderr, " --dm-no-halt-groups Debug module won't support halt groups\n"); fprintf(stderr, " --dm-no-impebreak Debug module won't support implicit ebreak in program buffer\n"); fprintf(stderr, " --blocksz= Cache block size (B) for CMO operations(powers of 2) [default 64]\n"); @@ -269,6 +270,7 @@ int main(int argc, char** argv) .abstract_rti = 0, .support_hasel = true, .support_abstract_csr_access = true, + .support_abstract_fpr_access = true, .support_haltgroups = true, .support_impebreak = true }; @@ -384,6 +386,8 @@ int main(int argc, char** argv) [&](const char UNUSED *s){dm_config.support_hasel = false;}); parser.option(0, "dm-no-abstract-csr", 0, [&](const char UNUSED *s){dm_config.support_abstract_csr_access = false;}); + parser.option(0, "dm-no-abstract-fpr", 0, + [&](const char UNUSED *s){dm_config.support_abstract_fpr_access = false;}); parser.option(0, "dm-no-halt-groups", 0, [&](const char UNUSED *s){dm_config.support_haltgroups = false;}); parser.option(0, "log-commits", 0, -- cgit v1.1 From 8ff186bd0f8446ac6d129fef6836d192f2ce21b1 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 10 Oct 2022 09:31:31 -0700 Subject: Fix disassembly of RV64 srai.u The shift amount is 6 bits wide on RV64. As with the base ISA shifts, we ignore XLEN and unconditionally disassemble the 6-bit immediate on RV32. Partially reverts da93bdc435b985fd354e01c26470f64c33cecaa6 --- disasm/disasm.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/disasm/disasm.cc b/disasm/disasm.cc index a8ba91e..856a651 100644 --- a/disasm/disasm.cc +++ b/disasm/disasm.cc @@ -572,6 +572,11 @@ static void NOINLINE add_pitype5_insn(disassembler_t* d, const char* name, uint3 d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm5})); } +static void NOINLINE add_pitype6_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm6})); +} + static void NOINLINE add_vector_v_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) { d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, opt, &vm})); @@ -1683,6 +1688,7 @@ void disassembler_t::add_instructions(const isa_parser_t* isa) #define DEFINE_PI3TYPE(code) add_pitype3_insn(this, #code, match_##code, mask_##code); #define DEFINE_PI4TYPE(code) add_pitype4_insn(this, #code, match_##code, mask_##code); #define DEFINE_PI5TYPE(code) add_pitype5_insn(this, #code, match_##code, mask_##code); +#define DEFINE_PI6TYPE(code) add_pitype6_insn(this, #code, match_##code, mask_##code); #define DISASM_8_AND_16_RINSN(code) \ DEFINE_RTYPE(code##8); \ @@ -1921,7 +1927,7 @@ void disassembler_t::add_instructions(const isa_parser_t* isa) DEFINE_RTYPE(msubr32); DEFINE_RTYPE(ave); DEFINE_RTYPE(sra_u); - DEFINE_PI5TYPE(srai_u); + DEFINE_PI6TYPE(srai_u); DEFINE_PI3TYPE(insb); DEFINE_RTYPE(maddr32) -- cgit v1.1 From 86d9fe49eda1fff863a43e682015216a25cc72f3 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Tue, 11 Oct 2022 15:19:00 -0700 Subject: Set tval on illegal subforms of aes64ks1i h/t @YenHaoChen --- riscv/insns/aes64ks1i.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/riscv/insns/aes64ks1i.h b/riscv/insns/aes64ks1i.h index 3ce3c3f..c7354d6 100644 --- a/riscv/insns/aes64ks1i.h +++ b/riscv/insns/aes64ks1i.h @@ -10,10 +10,7 @@ uint8_t round_consts [10] = { uint8_t enc_rcon = insn.rcon() ; -if (enc_rcon > 0xA) { - // Invalid opcode. - throw trap_illegal_instruction(0); -} +require(enc_rcon <= 0xA); uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF ; uint8_t rcon = 0 ; -- cgit v1.1 From 37c8985013b1a15a8043f002e94f38e09cb55ef4 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 13 Oct 2022 13:51:06 -0700 Subject: Remove unused field matched_t::data --- riscv/mmu.cc | 4 ++-- riscv/triggers.h | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index fdad05f..53c6fba 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -162,13 +162,13 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool return; case triggers::MATCH_FIRE_BEFORE: - throw triggers::matched_t(operation, address, data, action); + throw triggers::matched_t(operation, address, action); case triggers::MATCH_FIRE_AFTER: // We want to take this exception on the next instruction. We check // whether to do so in the I$ refill path, so flush the I$. flush_icache(); - matched_trigger = new triggers::matched_t(operation, address, data, action); + matched_trigger = new triggers::matched_t(operation, address, action); return; } } diff --git a/riscv/triggers.h b/riscv/triggers.h index b7512ef..05ce75e 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -31,12 +31,11 @@ typedef enum { class matched_t { public: - matched_t(triggers::operation_t operation, reg_t address, reg_t data, action_t action) : - operation(operation), address(address), data(data), action(action) {} + matched_t(triggers::operation_t operation, reg_t address, action_t action) : + operation(operation), address(address), action(action) {} triggers::operation_t operation; reg_t address; - reg_t data; action_t action; }; -- cgit v1.1 From 60c0c86c288d14c81177e6f56aa5c6b77e40fbec Mon Sep 17 00:00:00 2001 From: Parshintsev Anatoly Date: Mon, 10 Oct 2022 15:01:32 +0300 Subject: Report error if an unsupported memory configuration is detected --- riscv/cfg.h | 16 ++++++++++------ spike_main/spike.cc | 11 ++++++++--- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/riscv/cfg.h b/riscv/cfg.h index 6369bd8..13dcf3a 100644 --- a/riscv/cfg.h +++ b/riscv/cfg.h @@ -32,18 +32,22 @@ private: class mem_cfg_t { public: - mem_cfg_t(reg_t base, reg_t size) - : base(base), size(size) - { - // The truth of these assertions should be ensured by whatever is creating + static bool check_if_supported(reg_t base, reg_t size) { + // The truth of these conditions should be ensured by whatever is creating // the regions in the first place, but we have them here to make sure that // we can't end up describing memory regions that don't make sense. They // ask that the page size is a multiple of the minimum page size, that the // page is aligned to the minimum page size, that the page is non-empty and // that the top address is still representable in a reg_t. - assert((size % PGSIZE == 0) && + return (size % PGSIZE == 0) && (base % PGSIZE == 0) && - (base + size > base)); + (base + size > base); + } + + mem_cfg_t(reg_t base, reg_t size) + : base(base), size(size) + { + assert(mem_cfg_t::check_if_supported(base, size)); } reg_t base; diff --git a/spike_main/spike.cc b/spike_main/spike.cc index f23a2f4..ecce0c5 100644 --- a/spike_main/spike.cc +++ b/spike_main/spike.cc @@ -174,15 +174,20 @@ static std::vector parse_mem_layout(const char* arg) if (size % PGSIZE != 0) size += PGSIZE - size % PGSIZE; - if (base + size < base) - help(); - if (size != size0) { fprintf(stderr, "Warning: the memory at [0x%llX, 0x%llX] has been realigned\n" "to the %ld KiB page size: [0x%llX, 0x%llX]\n", base0, base0 + size0 - 1, long(PGSIZE / 1024), base, base + size - 1); } + if (!mem_cfg_t::check_if_supported(base, size)) { + fprintf(stderr, "unsupported memory region " + "{base = 0x%llX, size = 0x%llX} specified\n", + (unsigned long long)base, + (unsigned long long)size); + exit(EXIT_FAILURE); + } + res.push_back(mem_cfg_t(reg_t(base), reg_t(size))); if (!*p) break; -- cgit v1.1 From 062ef8868033605b56269b185e3584ef2370ac12 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 13 Oct 2022 13:57:08 -0700 Subject: In triggers, use optional instead of {has_data, data} --- riscv/mmu.cc | 14 +++++++------- riscv/mmu.h | 2 +- riscv/triggers.cc | 10 +++++----- riscv/triggers.h | 7 ++++--- 4 files changed, 17 insertions(+), 16 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 53c6fba..c77b6b1 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -76,7 +76,7 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) { - check_triggers(triggers::OPERATION_EXECUTE, vaddr, false); + check_triggers(triggers::OPERATION_EXECUTE, vaddr); tlb_entry_t result; reg_t vpn = vaddr >> PGSHIFT; @@ -93,7 +93,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) result = tlb_data[vpn % TLB_ENTRIES]; } - check_triggers(triggers::OPERATION_EXECUTE, vaddr, true, from_le(*(const uint16_t*)(result.host_offset + vaddr))); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, from_le(*(const uint16_t*)(result.host_offset + vaddr))); return result; } @@ -149,13 +149,13 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) return sim->mmio_store(addr, len, bytes); } -void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data) +void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std::optional data) { if (matched_trigger || !proc) return; triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data); + auto match = proc->TM.memory_access_match(&action, operation, address, data); switch (match) { case triggers::MATCH_NONE: @@ -197,7 +197,7 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool UNUSED require_alignment) { - check_triggers(triggers::OPERATION_LOAD, addr, false); + check_triggers(triggers::OPERATION_LOAD, addr); if ((addr & (len - 1)) == 0) { load_slow_path_intrapage(addr, len, bytes, xlate_flags); @@ -216,7 +216,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate #endif } - check_triggers(triggers::OPERATION_LOAD, addr, true, reg_from_bytes(len, bytes)); + check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes)); } void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) @@ -246,7 +246,7 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment) { if (actually_store) - check_triggers(triggers::OPERATION_STORE, addr, true, reg_from_bytes(len, bytes)); + check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes)); if (addr & (len - 1)) { bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); diff --git a/riscv/mmu.h b/riscv/mmu.h index 01e74ef..da84adc 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -387,7 +387,7 @@ private: bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); bool mmio_ok(reg_t addr, access_type type); - void check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data = 0); + void check_triggers(triggers::operation_t operation, reg_t address, std::optional data = std::nullopt); reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags); // ITLB lookup diff --git a/riscv/triggers.cc b/riscv/triggers.cc index 5e2d1f6..90d9d54 100644 --- a/riscv/triggers.cc +++ b/riscv/triggers.cc @@ -101,7 +101,7 @@ bool mcontrol_t::simple_match(unsigned xlen, reg_t value) const { assert(0); } -match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operation_t operation, reg_t address, bool has_data, reg_t data) { +match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operation_t operation, reg_t address, std::optional data) { state_t * const state = proc->get_state(); if ((operation == triggers::OPERATION_EXECUTE && !execute_bit) || (operation == triggers::OPERATION_STORE && !store_bit) || @@ -114,9 +114,9 @@ match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operati reg_t value; if (select) { - value = data; - if (!has_data) + if (!data.has_value()) return MATCH_NONE; + value = *data; } else { value = address; } @@ -152,7 +152,7 @@ module_t::~module_t() { } } -match_result_t module_t::memory_access_match(action_t * const action, operation_t operation, reg_t address, bool has_data, reg_t data) +match_result_t module_t::memory_access_match(action_t * const action, operation_t operation, reg_t address, std::optional data) { state_t * const state = proc->get_state(); if (state->debug_mode) @@ -172,7 +172,7 @@ match_result_t module_t::memory_access_match(action_t * const action, operation_ * entire chain did not match. This is allowed by the spec, because the final * trigger in the chain will never get `hit` set unless the entire chain * matches. */ - match_result_t result = triggers[i]->memory_access_match(proc, operation, address, has_data, data); + match_result_t result = triggers[i]->memory_access_match(proc, operation, address, data); if (result != MATCH_NONE && !triggers[i]->chain()) { *action = triggers[i]->action; return result; diff --git a/riscv/triggers.h b/riscv/triggers.h index 05ce75e..7b40b8f 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -2,6 +2,7 @@ #define _RISCV_TRIGGERS_H #include +#include #include "decode.h" @@ -42,7 +43,7 @@ class matched_t class trigger_t { public: virtual match_result_t memory_access_match(processor_t * const proc, - operation_t operation, reg_t address, bool has_data, reg_t data=0) = 0; + operation_t operation, reg_t address, std::optional data) = 0; virtual reg_t tdata1_read(const processor_t * const proc) const noexcept = 0; virtual bool tdata1_write(processor_t * const proc, const reg_t val) noexcept = 0; @@ -87,7 +88,7 @@ public: virtual bool load() const override { return load_bit; } virtual match_result_t memory_access_match(processor_t * const proc, - operation_t operation, reg_t address, bool has_data, reg_t data=0) override; + operation_t operation, reg_t address, std::optional data) override; private: bool simple_match(unsigned xlen, reg_t value) const; @@ -114,7 +115,7 @@ public: unsigned count() const { return triggers.size(); } match_result_t memory_access_match(action_t * const action, - operation_t operation, reg_t address, bool has_data, reg_t data=0); + operation_t operation, reg_t address, std::optional data); reg_t tdata1_read(const processor_t * const proc, unsigned index) const noexcept; bool tdata1_write(processor_t * const proc, unsigned index, const reg_t val) noexcept; -- cgit v1.1 From 7e8d1e6f29a0e6b9f8b1b65a88b5dc87c25a4f9a Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 14 Oct 2022 11:31:21 -0700 Subject: Support command-line configuration of number of pmpregions --- riscv/cfg.h | 3 +++ riscv/dts.cc | 3 ++- riscv/dts.h | 1 + riscv/sim.cc | 2 +- spike_main/spike.cc | 3 +++ 5 files changed, 10 insertions(+), 2 deletions(-) diff --git a/riscv/cfg.h b/riscv/cfg.h index 13dcf3a..dbdb58b 100644 --- a/riscv/cfg.h +++ b/riscv/cfg.h @@ -61,6 +61,7 @@ public: const char *default_bootargs, const char *default_isa, const char *default_priv, const char *default_varch, + const reg_t default_pmpregions, const std::vector &default_mem_layout, const std::vector default_hartids, bool default_real_time_clint) @@ -69,6 +70,7 @@ public: isa(default_isa), priv(default_priv), varch(default_varch), + pmpregions(default_pmpregions), mem_layout(default_mem_layout), hartids(default_hartids), explicit_hartids(false), @@ -80,6 +82,7 @@ public: cfg_arg_t isa; cfg_arg_t priv; cfg_arg_t varch; + reg_t pmpregions; cfg_arg_t> mem_layout; std::optional start_pc; cfg_arg_t> hartids; diff --git a/riscv/dts.cc b/riscv/dts.cc index 5d37463..9937d57 100644 --- a/riscv/dts.cc +++ b/riscv/dts.cc @@ -14,6 +14,7 @@ std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz, reg_t initrd_start, reg_t initrd_end, const char* bootargs, + size_t pmpregions, std::vector procs, std::vector> mems) { @@ -57,7 +58,7 @@ std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz, " compatible = \"riscv\";\n" " riscv,isa = \"" << procs[i]->get_isa().get_isa_string() << "\";\n" " mmu-type = \"riscv," << (procs[i]->get_isa().get_max_xlen() <= 32 ? "sv32" : "sv57") << "\";\n" - " riscv,pmpregions = <16>;\n" + " riscv,pmpregions = <" << pmpregions << ">;\n" " riscv,pmpgranularity = <4>;\n" " clock-frequency = <" << cpu_hz << ">;\n" " CPU" << i << "_intc: interrupt-controller {\n" diff --git a/riscv/dts.h b/riscv/dts.h index 6208151..1878ca1 100644 --- a/riscv/dts.h +++ b/riscv/dts.h @@ -10,6 +10,7 @@ std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz, reg_t initrd_start, reg_t initrd_end, const char* bootargs, + size_t pmpregions, std::vector procs, std::vector> mems); diff --git a/riscv/sim.cc b/riscv/sim.cc index 0ef13b8..8a15560 100644 --- a/riscv/sim.cc +++ b/riscv/sim.cc @@ -303,7 +303,7 @@ void sim_t::make_dtb() std::pair initrd_bounds = cfg->initrd_bounds(); dts = make_dts(INSNS_PER_RTC_TICK, CPU_HZ, initrd_bounds.first, initrd_bounds.second, - cfg->bootargs(), procs, mems); + cfg->bootargs(), cfg->pmpregions, procs, mems); dtb = dts_compile(dts); } diff --git a/spike_main/spike.cc b/spike_main/spike.cc index ecce0c5..933f626 100644 --- a/spike_main/spike.cc +++ b/spike_main/spike.cc @@ -36,6 +36,7 @@ static void help(int exit_code = 1) fprintf(stderr, " --log= File name for option -l\n"); fprintf(stderr, " --debug-cmd= Read commands from file (use with -d)\n"); fprintf(stderr, " --isa= RISC-V ISA string [default %s]\n", DEFAULT_ISA); + fprintf(stderr, " --pmpregions= Number of PMP regions [default 16]\n"); fprintf(stderr, " --priv= RISC-V privilege modes supported [default %s]\n", DEFAULT_PRIV); fprintf(stderr, " --varch= RISC-V Vector uArch string [default %s]\n", DEFAULT_VARCH); fprintf(stderr, " --pc=
Override ELF entry point\n"); @@ -286,6 +287,7 @@ int main(int argc, char** argv) /*default_isa=*/DEFAULT_ISA, /*default_priv=*/DEFAULT_PRIV, /*default_varch=*/DEFAULT_VARCH, + /*default_pmpregions=*/16, /*default_mem_layout=*/parse_mem_layout("2048"), /*default_hartids=*/std::vector(), /*default_real_time_clint=*/false); @@ -357,6 +359,7 @@ int main(int argc, char** argv) parser.option(0, "l2", 1, [&](const char* s){l2.reset(cache_sim_t::construct(s, "L2$"));}); parser.option(0, "log-cache-miss", 0, [&](const char UNUSED *s){log_cache = true;}); parser.option(0, "isa", 1, [&](const char* s){cfg.isa = s;}); + parser.option(0, "pmpregions", 1, [&](const char* s){cfg.pmpregions = atoul_safe(s);}); parser.option(0, "priv", 1, [&](const char* s){cfg.priv = s;}); parser.option(0, "varch", 1, [&](const char* s){cfg.varch = s;}); parser.option(0, "device", 1, device_parser); -- cgit v1.1 From 5cf439b24e945db47edf6e259044c923384ccdfd Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 14 Oct 2022 11:41:58 -0700 Subject: Add dump memory command to interactive mode --- riscv/devices.cc | 13 +++++++++++++ riscv/devices.h | 1 + riscv/interactive.cc | 14 ++++++++++++++ riscv/sim.h | 1 + 4 files changed, 29 insertions(+) diff --git a/riscv/devices.cc b/riscv/devices.cc index eb677a5..81b232d 100644 --- a/riscv/devices.cc +++ b/riscv/devices.cc @@ -137,3 +137,16 @@ char* mem_t::contents(reg_t addr) { } return search->second + pgoff; } + +void mem_t::dump(std::ostream& o) { + const char empty[PGSIZE] = {0}; + for (reg_t i = 0; i < sz; i += PGSIZE) { + reg_t ppn = i >> PGSHIFT; + auto search = sparse_memory_map.find(ppn); + if (search == sparse_memory_map.end()) { + o.write(empty, PGSIZE); + } else { + o.write(sparse_memory_map[ppn], PGSIZE); + } + } +} diff --git a/riscv/devices.h b/riscv/devices.h index 9200f29..8907b87 100644 --- a/riscv/devices.h +++ b/riscv/devices.h @@ -43,6 +43,7 @@ class mem_t : public abstract_device_t { bool store(reg_t addr, size_t len, const uint8_t* bytes) { return load_store(addr, len, const_cast(bytes), true); } char* contents(reg_t addr); reg_t size() { return sz; } + void dump(std::ostream& o); private: bool load_store(reg_t addr, size_t len, uint8_t* bytes, bool store); diff --git a/riscv/interactive.cc b/riscv/interactive.cc index bba4540..ffc706b 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -318,6 +318,7 @@ void sim_t::interactive() funcs["until"] = &sim_t::interactive_until_silent; funcs["untiln"] = &sim_t::interactive_until_noisy; funcs["while"] = &sim_t::interactive_until_silent; + funcs["dump"] = &sim_t::interactive_dumpmems; funcs["quit"] = &sim_t::interactive_quit; funcs["q"] = funcs["quit"]; funcs["help"] = &sim_t::interactive_help; @@ -395,6 +396,7 @@ void sim_t::interactive_help(const std::string& cmd, const std::vector # Show current PC in \n" "mem [core] # Show contents of virtual memory in [core] (physical memory if omitted)\n" "str [core] # Show NUL-terminated C string at virtual address in [core] (physical address if omitted)\n" + "dump # Dump physical memory to binary files\n" "until reg # Stop when in hits \n" "untiln reg # Run noisy and stop when in hits \n" "until pc # Stop when PC in hits \n" @@ -774,3 +776,15 @@ void sim_t::interactive_until(const std::string& cmd, const std::vector& args) +{ + for (unsigned i = 0; i < mems.size(); i++) { + std::stringstream mem_fname; + mem_fname << "mem.0x" << std::hex << mems[i].first << ".bin"; + + std::ofstream mem_file(mem_fname.str()); + mems[i].second->dump(mem_file); + mem_file.close(); + } +} diff --git a/riscv/sim.h b/riscv/sim.h index c355f31..5787d5d 100644 --- a/riscv/sim.h +++ b/riscv/sim.h @@ -136,6 +136,7 @@ private: void interactive_pc(const std::string& cmd, const std::vector& args); void interactive_mem(const std::string& cmd, const std::vector& args); void interactive_str(const std::string& cmd, const std::vector& args); + void interactive_dumpmems(const std::string& cmd, const std::vector& args); void interactive_until(const std::string& cmd, const std::vector& args, bool noisy); void interactive_until_silent(const std::string& cmd, const std::vector& args); void interactive_until_noisy(const std::string& cmd, const std::vector& args); -- cgit v1.1 From 03be4ae6c7b8e9865083b61427ff9724c7706fcf Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 14 Oct 2022 13:40:56 -0700 Subject: Add interactive mode commands to read clint mtime/mtimecmp --- riscv/devices.h | 2 ++ riscv/interactive.cc | 23 +++++++++++++++++++++++ riscv/sim.h | 2 ++ 3 files changed, 27 insertions(+) diff --git a/riscv/devices.h b/riscv/devices.h index 8907b87..08e9c45 100644 --- a/riscv/devices.h +++ b/riscv/devices.h @@ -59,6 +59,8 @@ class clint_t : public abstract_device_t { bool store(reg_t addr, size_t len, const uint8_t* bytes); size_t size() { return CLINT_SIZE; } void increment(reg_t inc); + uint64_t get_mtimecmp(reg_t hartid) { return mtimecmp[hartid]; } + uint64_t get_mtime() { return mtime; } private: typedef uint64_t mtime_t; typedef uint64_t mtimecmp_t; diff --git a/riscv/interactive.cc b/riscv/interactive.cc index ffc706b..cf95bf6 100644 --- a/riscv/interactive.cc +++ b/riscv/interactive.cc @@ -315,6 +315,8 @@ void sim_t::interactive() funcs["pc"] = &sim_t::interactive_pc; funcs["mem"] = &sim_t::interactive_mem; funcs["str"] = &sim_t::interactive_str; + funcs["mtime"] = &sim_t::interactive_mtime; + funcs["mtimecmp"] = &sim_t::interactive_mtimecmp; funcs["until"] = &sim_t::interactive_until_silent; funcs["untiln"] = &sim_t::interactive_until_noisy; funcs["while"] = &sim_t::interactive_until_silent; @@ -397,6 +399,8 @@ void sim_t::interactive_help(const std::string& cmd, const std::vector # Show contents of virtual memory in [core] (physical memory if omitted)\n" "str [core] # Show NUL-terminated C string at virtual address in [core] (physical address if omitted)\n" "dump # Dump physical memory to binary files\n" + "mtime # Show mtime\n" + "mtimecmp # Show mtimecmp for \n" "until reg # Stop when in hits \n" "untiln reg # Run noisy and stop when in hits \n" "until pc # Stop when PC in hits \n" @@ -788,3 +792,22 @@ void sim_t::interactive_dumpmems(const std::string& cmd, const std::vector& args) +{ + std::ostream out(sout_.rdbuf()); + out << std::hex << std::setfill('0') << "0x" << std::setw(16) + << clint->get_mtime() << std::endl; +} + +void sim_t::interactive_mtimecmp(const std::string& cmd, const std::vector& args) +{ + if (args.size() != 1) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + std::ostream out(sout_.rdbuf()); + out << std::hex << std::setfill('0') << "0x" << std::setw(16) + << clint->get_mtimecmp(p->get_id()) << std::endl; +} + diff --git a/riscv/sim.h b/riscv/sim.h index 5787d5d..c7dbd47 100644 --- a/riscv/sim.h +++ b/riscv/sim.h @@ -137,6 +137,8 @@ private: void interactive_mem(const std::string& cmd, const std::vector& args); void interactive_str(const std::string& cmd, const std::vector& args); void interactive_dumpmems(const std::string& cmd, const std::vector& args); + void interactive_mtime(const std::string& cmd, const std::vector& args); + void interactive_mtimecmp(const std::string& cmd, const std::vector& args); void interactive_until(const std::string& cmd, const std::vector& args, bool noisy); void interactive_until_silent(const std::string& cmd, const std::vector& args); void interactive_until_noisy(const std::string& cmd, const std::vector& args); -- cgit v1.1