aboutsummaryrefslogtreecommitdiff
path: root/target/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'target/riscv')
-rw-r--r--target/riscv/Kconfig4
-rw-r--r--target/riscv/arch_dump.c2
-rw-r--r--target/riscv/bitmanip_helper.c2
-rw-r--r--target/riscv/cpu-param.h14
-rw-r--r--target/riscv/cpu-qom.h8
-rw-r--r--target/riscv/cpu.c1271
-rw-r--r--target/riscv/cpu.h189
-rw-r--r--target/riscv/cpu_bits.h234
-rw-r--r--target/riscv/cpu_cfg.h156
-rw-r--r--target/riscv/cpu_cfg_fields.h.inc170
-rw-r--r--target/riscv/cpu_helper.c954
-rw-r--r--target/riscv/cpu_user.h1
-rw-r--r--target/riscv/crypto_helper.c1
-rw-r--r--target/riscv/csr.c1787
-rw-r--r--target/riscv/debug.c131
-rw-r--r--target/riscv/debug.h3
-rw-r--r--target/riscv/fpu_helper.c1
-rw-r--r--target/riscv/gdbstub.c29
-rw-r--r--target/riscv/helper.h3
-rw-r--r--target/riscv/insn16.decode4
-rw-r--r--target/riscv/insn32.decode49
-rw-r--r--target/riscv/insn_trans/trans_privileged.c.inc46
-rw-r--r--target/riscv/insn_trans/trans_rva.c.inc4
-rw-r--r--target/riscv/insn_trans/trans_rvbf16.c.inc9
-rw-r--r--target/riscv/insn_trans/trans_rvd.c.inc22
-rw-r--r--target/riscv/insn_trans/trans_rvf.c.inc8
-rw-r--r--target/riscv/insn_trans/trans_rvh.c.inc8
-rw-r--r--target/riscv/insn_trans/trans_rvi.c.inc148
-rw-r--r--target/riscv/insn_trans/trans_rvv.c.inc647
-rw-r--r--target/riscv/insn_trans/trans_rvvk.c.inc10
-rw-r--r--target/riscv/insn_trans/trans_rvzacas.c.inc4
-rw-r--r--target/riscv/insn_trans/trans_rvzce.c.inc21
-rw-r--r--target/riscv/insn_trans/trans_rvzfh.c.inc4
-rw-r--r--target/riscv/insn_trans/trans_rvzicfiss.c.inc131
-rw-r--r--target/riscv/insn_trans/trans_svinval.c.inc6
-rw-r--r--target/riscv/internals.h69
-rw-r--r--target/riscv/kvm/kvm-cpu.c471
-rw-r--r--target/riscv/kvm/kvm_riscv.h4
-rw-r--r--target/riscv/m128_helper.c1
-rw-r--r--target/riscv/machine.c87
-rw-r--r--target/riscv/monitor.c1
-rw-r--r--target/riscv/op_helper.c218
-rw-r--r--target/riscv/pmp.c171
-rw-r--r--target/riscv/pmp.h4
-rw-r--r--target/riscv/pmu.c10
-rw-r--r--target/riscv/riscv-qmp-cmds.c10
-rw-r--r--target/riscv/tcg/tcg-cpu.c358
-rw-r--r--target/riscv/tcg/tcg-cpu.h2
-rw-r--r--target/riscv/th_csr.c30
-rw-r--r--target/riscv/time_helper.c1
-rw-r--r--target/riscv/trace-events3
-rw-r--r--target/riscv/translate.c162
-rw-r--r--target/riscv/vcrypto_helper.c33
-rw-r--r--target/riscv/vector_helper.c826
-rw-r--r--target/riscv/vector_internals.c4
-rw-r--r--target/riscv/vector_internals.h13
-rw-r--r--target/riscv/zce_helper.c3
57 files changed, 6222 insertions, 2340 deletions
diff --git a/target/riscv/Kconfig b/target/riscv/Kconfig
index 5f30df2..11bc09b 100644
--- a/target/riscv/Kconfig
+++ b/target/riscv/Kconfig
@@ -1,9 +1,9 @@
config RISCV32
bool
- select ARM_COMPATIBLE_SEMIHOSTING # for do_common_semihosting()
+ select ARM_COMPATIBLE_SEMIHOSTING if TCG
select DEVICE_TREE # needed by boot.c
config RISCV64
bool
- select ARM_COMPATIBLE_SEMIHOSTING # for do_common_semihosting()
+ select ARM_COMPATIBLE_SEMIHOSTING if TCG
select DEVICE_TREE # needed by boot.c
diff --git a/target/riscv/arch_dump.c b/target/riscv/arch_dump.c
index 434c8a3..12b6879 100644
--- a/target/riscv/arch_dump.c
+++ b/target/riscv/arch_dump.c
@@ -19,7 +19,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "elf.h"
-#include "sysemu/dump.h"
+#include "system/dump.h"
/* struct user_regs_struct from arch/riscv/include/uapi/asm/ptrace.h */
struct riscv64_user_regs {
diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
index b99c4a3..e9c8d7f 100644
--- a/target/riscv/bitmanip_helper.c
+++ b/target/riscv/bitmanip_helper.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
+#include "exec/target_long.h"
#include "exec/helper-proto.h"
#include "tcg/tcg.h"
diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h
index 1fbd649..cfdc67c 100644
--- a/target/riscv/cpu-param.h
+++ b/target/riscv/cpu-param.h
@@ -2,22 +2,28 @@
* RISC-V cpu parameters for qemu.
*
* Copyright (c) 2017-2018 SiFive, Inc.
- * SPDX-License-Identifier: GPL-2.0+
+ * SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef RISCV_CPU_PARAM_H
#define RISCV_CPU_PARAM_H
#if defined(TARGET_RISCV64)
-# define TARGET_LONG_BITS 64
# define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */
# define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */
#elif defined(TARGET_RISCV32)
-# define TARGET_LONG_BITS 32
# define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */
# define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */
#endif
#define TARGET_PAGE_BITS 12 /* 4 KiB Pages */
+
+/*
+ * RISC-V-specific extra insn start words:
+ * 1: Original instruction opcode
+ * 2: more information about instruction
+ */
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
/*
* The current MMU Modes are:
* - U mode 0b000
@@ -28,6 +34,4 @@
* - M mode HLV/HLVX/HSV 0b111
*/
-#define TCG_GUEST_DEFAULT_MO 0
-
#endif
diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h
index 3670cfe..1ee05eb 100644
--- a/target/riscv/cpu-qom.h
+++ b/target/riscv/cpu-qom.h
@@ -29,8 +29,8 @@
#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
-#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any")
#define TYPE_RISCV_CPU_MAX RISCV_CPU_TYPE_NAME("max")
+#define TYPE_RISCV_CPU_MAX32 RISCV_CPU_TYPE_NAME("max32")
#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128")
@@ -40,15 +40,21 @@
#define TYPE_RISCV_CPU_RV64E RISCV_CPU_TYPE_NAME("rv64e")
#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
+#define TYPE_RISCV_CPU_RVA23U64 RISCV_CPU_TYPE_NAME("rva23u64")
+#define TYPE_RISCV_CPU_RVA23S64 RISCV_CPU_TYPE_NAME("rva23s64")
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
+#define TYPE_RISCV_CPU_SIFIVE_E RISCV_CPU_TYPE_NAME("sifive-e")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
#define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34")
#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51")
+#define TYPE_RISCV_CPU_SIFIVE_U RISCV_CPU_TYPE_NAME("sifive-u")
#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34")
#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54")
#define TYPE_RISCV_CPU_THEAD_C906 RISCV_CPU_TYPE_NAME("thead-c906")
#define TYPE_RISCV_CPU_VEYRON_V1 RISCV_CPU_TYPE_NAME("veyron-v1")
+#define TYPE_RISCV_CPU_TT_ASCALON RISCV_CPU_TYPE_NAME("tt-ascalon")
+#define TYPE_RISCV_CPU_XIANGSHAN_NANHU RISCV_CPU_TYPE_NAME("xiangshan-nanhu")
#define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host")
OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU)
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index a90808a..629ac37 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -24,7 +24,6 @@
#include "cpu.h"
#include "cpu_vendorid.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/error-report.h"
@@ -32,9 +31,9 @@
#include "hw/core/qdev-prop-internal.h"
#include "migration/vmstate.h"
#include "fpu/softfloat-helpers.h"
-#include "sysemu/device_tree.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/device_tree.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "kvm/kvm_riscv.h"
#include "tcg/tcg-cpu.h"
#include "tcg/tcg.h"
@@ -42,7 +41,7 @@
/* RISC-V CPU definitions */
static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
- RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
+ RVC, RVS, RVU, RVH, RVG, RVB, 0};
/*
* From vector_helper.c
@@ -74,6 +73,13 @@ bool riscv_cpu_option_set(const char *optname)
return g_hash_table_contains(general_user_opts, optname);
}
+static void riscv_cpu_cfg_merge(RISCVCPUConfig *dest, const RISCVCPUConfig *src)
+{
+#define BOOL_FIELD(x) dest->x |= src->x;
+#define TYPED_FIELD(type, x, default_) if (src->x != default_) dest->x = src->x;
+#include "cpu_cfg_fields.h.inc"
+}
+
#define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
{#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
@@ -105,7 +111,9 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
- ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
+ ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse),
+ ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
+ ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
@@ -115,7 +123,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
- ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
+ ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
@@ -181,22 +189,47 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
+ ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha),
+ ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
+ ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg),
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
+ ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind),
+ ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp),
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
+ ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi),
+ ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
+ ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm),
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
+ ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg),
ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind),
+ ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp),
+ ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
+ ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm),
+ ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
+ ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr),
+ ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr),
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
+ ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte),
+ ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc),
ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
@@ -210,7 +243,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
@@ -283,7 +316,7 @@ static const char * const riscv_excp_names[] = {
"load_page_fault",
"reserved",
"store_page_fault",
- "reserved",
+ "double_trap",
"reserved",
"reserved",
"reserved",
@@ -330,7 +363,7 @@ void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
{
- return 16 << mcc->misa_mxl_max;
+ return 16 << mcc->def->misa_mxl_max;
}
#ifndef CONFIG_USER_ONLY
@@ -363,7 +396,7 @@ static uint8_t satp_mode_from_str(const char *satp_mode_str)
g_assert_not_reached();
}
-uint8_t satp_mode_max_from_map(uint32_t map)
+static uint8_t satp_mode_max_from_map(uint32_t map)
{
/*
* 'map = 0' will make us return (31 - 32), which C will
@@ -407,17 +440,23 @@ const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
g_assert_not_reached();
}
-static void set_satp_mode_max_supported(RISCVCPU *cpu,
- uint8_t satp_mode)
+static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported)
{
- bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
+ bool rv32 = riscv_cpu_is_32bit(cpu);
const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
+ int satp_mode = cpu->cfg.max_satp_mode;
+ if (satp_mode == -1) {
+ return false;
+ }
+
+ *supported = 0;
for (int i = 0; i <= satp_mode; ++i) {
if (valid_vm[i]) {
- cpu->cfg.satp_mode.supported |= (1 << i);
+ *supported |= (1 << i);
}
}
+ return true;
}
/* Set the satp mode to the max supported */
@@ -426,310 +465,26 @@ static void set_satp_mode_default_map(RISCVCPU *cpu)
/*
* Bare CPUs do not default to the max available.
* Users must set a valid satp_mode in the command
- * line.
+ * line. Otherwise, leave the existing max_satp_mode
+ * in place.
*/
if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
warn_report("No satp mode set. Defaulting to 'bare'");
- cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
- return;
+ cpu->cfg.max_satp_mode = VM_1_10_MBARE;
}
-
- cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
-}
-#endif
-
-static void riscv_any_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
-
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj),
- riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
- VM_1_10_SV32 : VM_1_10_SV57);
-#endif
-
- env->priv_ver = PRIV_VERSION_LATEST;
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-}
-
-static void riscv_max_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
-#ifdef TARGET_RISCV32
- set_satp_mode_max_supported(cpu, VM_1_10_SV32);
-#else
- set_satp_mode_max_supported(cpu, VM_1_10_SV57);
-#endif
-#endif
-}
-
-#if defined(TARGET_RISCV64)
-static void rv64_base_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- /* Set latest version of privileged specification */
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
-#endif
}
-
-static void rv64_sifive_u_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
#endif
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv64_sifive_e_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv64_thead_c906_cpu_init(Object *obj)
+static void riscv_register_custom_csrs(RISCVCPU *cpu, const RISCVCSR *csr_list)
{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_11_0;
-
- cpu->cfg.ext_zfa = true;
- cpu->cfg.ext_zfh = true;
- cpu->cfg.mmu = true;
- cpu->cfg.ext_xtheadba = true;
- cpu->cfg.ext_xtheadbb = true;
- cpu->cfg.ext_xtheadbs = true;
- cpu->cfg.ext_xtheadcmo = true;
- cpu->cfg.ext_xtheadcondmov = true;
- cpu->cfg.ext_xtheadfmemidx = true;
- cpu->cfg.ext_xtheadmac = true;
- cpu->cfg.ext_xtheadmemidx = true;
- cpu->cfg.ext_xtheadmempair = true;
- cpu->cfg.ext_xtheadsync = true;
-
- cpu->cfg.mvendorid = THEAD_VENDOR_ID;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV39);
- th_register_custom_csrs(cpu);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.pmp = true;
-}
-
-static void rv64_veyron_v1_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
- env->priv_ver = PRIV_VERSION_1_12_0;
-
- /* Enable ISA extensions */
- cpu->cfg.mmu = true;
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
- cpu->cfg.ext_zicbom = true;
- cpu->cfg.cbom_blocksize = 64;
- cpu->cfg.cboz_blocksize = 64;
- cpu->cfg.ext_zicboz = true;
- cpu->cfg.ext_smaia = true;
- cpu->cfg.ext_ssaia = true;
- cpu->cfg.ext_sscofpmf = true;
- cpu->cfg.ext_sstc = true;
- cpu->cfg.ext_svinval = true;
- cpu->cfg.ext_svnapot = true;
- cpu->cfg.ext_svpbmt = true;
- cpu->cfg.ext_smstateen = true;
- cpu->cfg.ext_zba = true;
- cpu->cfg.ext_zbb = true;
- cpu->cfg.ext_zbc = true;
- cpu->cfg.ext_zbs = true;
- cpu->cfg.ext_XVentanaCondOps = true;
-
- cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
- cpu->cfg.marchid = VEYRON_V1_MARCHID;
- cpu->cfg.mimpid = VEYRON_V1_MIMPID;
-
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV48);
-#endif
-}
-
-#ifdef CONFIG_TCG
-static void rv128_base_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- if (qemu_tcg_mttcg_enabled()) {
- /* Missing 128-bit aligned atomics */
- error_report("128-bit RISC-V currently does not work with Multi "
- "Threaded TCG. Please use: -accel tcg,thread=single");
- exit(EXIT_FAILURE);
+ for (size_t i = 0; csr_list[i].csr_ops.name; i++) {
+ int csrno = csr_list[i].csrno;
+ const riscv_csr_operations *csr_ops = &csr_list[i].csr_ops;
+ if (!csr_list[i].insertion_test || csr_list[i].insertion_test(cpu)) {
+ riscv_set_csr_ops(csrno, csr_ops);
+ }
}
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- /* Set latest version of privileged specification */
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
-#endif
-}
-#endif /* CONFIG_TCG */
-
-static void rv64i_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVI);
-}
-
-static void rv64e_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVE);
-}
-
-#else /* !TARGET_RISCV64 */
-
-static void rv32_base_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- /* Set latest version of privileged specification */
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
-#endif
-}
-
-static void rv32_sifive_u_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv32_sifive_e_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv32_ibex_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_12_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
- cpu->cfg.ext_smepmp = true;
-}
-
-static void rv32_imafcu_nommu_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv32i_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVI);
-}
-
-static void rv32e_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVE);
}
#endif
@@ -805,13 +560,6 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
CSR_MSCRATCH,
CSR_SSCRATCH,
CSR_SATP,
- CSR_MMTE,
- CSR_UPMBASE,
- CSR_UPMMASK,
- CSR_SPMBASE,
- CSR_SPMMASK,
- CSR_MPMBASE,
- CSR_MPMMASK,
};
for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
@@ -839,6 +587,12 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
if (flags & CPU_DUMP_FPU) {
+ target_ulong val = 0;
+ RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0);
+ if (res == RISCV_EXCP_NONE) {
+ qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
+ csr_ops[CSR_FCSR].name, val);
+ }
for (i = 0; i < 32; i++) {
qemu_fprintf(f, " %-8s %016" PRIx64,
riscv_fpr_regnames[i], env->fpr[i]);
@@ -908,9 +662,9 @@ static vaddr riscv_cpu_get_pc(CPUState *cs)
return env->pc;
}
+#ifndef CONFIG_USER_ONLY
bool riscv_cpu_has_work(CPUState *cs)
{
-#ifndef CONFIG_USER_ONLY
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
/*
@@ -920,15 +674,8 @@ bool riscv_cpu_has_work(CPUState *cs)
return riscv_cpu_all_pending(env) != 0 ||
riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
-#else
- return true;
-#endif
-}
-
-static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- return riscv_env_mmu_index(cpu_env(cs), ifetch);
}
+#endif /* !CONFIG_USER_ONLY */
static void riscv_cpu_reset_hold(Object *obj, ResetType type)
{
@@ -945,7 +692,7 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
mcc->parent_phases.hold(obj, type);
}
#ifndef CONFIG_USER_ONLY
- env->misa_mxl = mcc->misa_mxl_max;
+ env->misa_mxl = mcc->def->misa_mxl_max;
env->priv = PRV_M;
env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
if (env->misa_mxl > MXL_RV32) {
@@ -965,6 +712,9 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
env->mstatus_hs = set_field(env->mstatus_hs,
MSTATUS64_UXL, env->misa_mxl);
}
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1);
+ }
}
env->mcause = 0;
env->miclaim = MIP_SGEIP;
@@ -991,8 +741,6 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
}
i++;
}
- /* mmte is supposed to have pm.current hardwired to 1 */
- env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
/*
* Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
@@ -1012,18 +760,35 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
}
pmp_unlock_entries(env);
+#else
+ env->priv = PRV_U;
+ env->senvcfg = 0;
+ env->menvcfg = 0;
#endif
+
+ /* on reset elp is clear */
+ env->elp = false;
+ /* on reset ssp is set to 0 */
+ env->ssp = 0;
+
env->xl = riscv_cpu_mxl(env);
- riscv_cpu_update_mask(env);
cs->exception_index = RISCV_EXCP_NONE;
env->load_res = -1;
set_default_nan_mode(1, &env->fp_status);
+ /* Default NaN value: sign bit clear, frac msb set */
+ set_float_default_nan_pattern(0b01000000, &env->fp_status);
+ env->vill = true;
#ifndef CONFIG_USER_ONLY
if (cpu->cfg.debug) {
riscv_trigger_reset_hold(env);
}
+ if (cpu->cfg.ext_smrnmi) {
+ env->rnmip = 0;
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
+ }
+
if (kvm_enabled()) {
kvm_riscv_reset_vcpu(cpu);
}
@@ -1036,6 +801,15 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
CPURISCVState *env = &cpu->env;
info->target_info = &cpu->cfg;
+ /*
+ * A couple of bits in MSTATUS set the endianness:
+ * - MSTATUS_UBE (User-mode),
+ * - MSTATUS_SBE (Supervisor-mode),
+ * - MSTATUS_MBE (Machine-mode)
+ * but we don't implement that yet.
+ */
+ info->endian = BFD_ENDIAN_LITTLE;
+
switch (env->xl) {
case MXL_RV32:
info->print_insn = print_insn_riscv32;
@@ -1055,18 +829,16 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
{
bool rv32 = riscv_cpu_is_32bit(cpu);
- uint8_t satp_mode_map_max, satp_mode_supported_max;
+ uint16_t supported;
+ uint8_t satp_mode_map_max;
- /* The CPU wants the OS to decide which satp mode to use */
- if (cpu->cfg.satp_mode.supported == 0) {
+ if (!get_satp_mode_supported(cpu, &supported)) {
+ /* The CPU wants the hypervisor to decide which satp mode to allow */
return;
}
- satp_mode_supported_max =
- satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
-
- if (cpu->cfg.satp_mode.map == 0) {
- if (cpu->cfg.satp_mode.init == 0) {
+ if (cpu->satp_modes.map == 0) {
+ if (cpu->satp_modes.init == 0) {
/* If unset by the user, we fallback to the default satp mode. */
set_satp_mode_default_map(cpu);
} else {
@@ -1076,27 +848,27 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
* valid_vm_1_10_32/64.
*/
for (int i = 1; i < 16; ++i) {
- if ((cpu->cfg.satp_mode.init & (1 << i)) &&
- (cpu->cfg.satp_mode.supported & (1 << i))) {
+ if ((cpu->satp_modes.init & (1 << i)) &&
+ supported & (1 << i)) {
for (int j = i - 1; j >= 0; --j) {
- if (cpu->cfg.satp_mode.supported & (1 << j)) {
- cpu->cfg.satp_mode.map |= (1 << j);
- break;
+ if (supported & (1 << j)) {
+ cpu->cfg.max_satp_mode = j;
+ return;
}
}
- break;
}
}
}
+ return;
}
- satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
+ satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map);
/* Make sure the user asked for a supported configuration (HW and qemu) */
- if (satp_mode_map_max > satp_mode_supported_max) {
+ if (satp_mode_map_max > cpu->cfg.max_satp_mode) {
error_setg(errp, "satp_mode %s is higher than hw max capability %s",
satp_mode_str(satp_mode_map_max, rv32),
- satp_mode_str(satp_mode_supported_max, rv32));
+ satp_mode_str(cpu->cfg.max_satp_mode, rv32));
return;
}
@@ -1106,9 +878,9 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
*/
if (!rv32) {
for (int i = satp_mode_map_max - 1; i >= 0; --i) {
- if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
- (cpu->cfg.satp_mode.init & (1 << i)) &&
- (cpu->cfg.satp_mode.supported & (1 << i))) {
+ if (!(cpu->satp_modes.map & (1 << i)) &&
+ (cpu->satp_modes.init & (1 << i)) &&
+ (supported & (1 << i))) {
error_setg(errp, "cannot disable %s satp mode if %s "
"is enabled", satp_mode_str(i, false),
satp_mode_str(satp_mode_map_max, false));
@@ -1117,12 +889,7 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
}
}
- /* Finally expand the map so that all valid modes are set */
- for (int i = satp_mode_map_max - 1; i >= 0; --i) {
- if (cpu->cfg.satp_mode.supported & (1 << i)) {
- cpu->cfg.satp_mode.map |= (1 << i);
- }
- }
+ cpu->cfg.max_satp_mode = satp_mode_map_max;
}
#endif
@@ -1161,11 +928,6 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
Error *local_err = NULL;
- if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) {
- warn_report("The 'any' CPU is deprecated and will be "
- "removed in the future.");
- }
-
cpu_exec_realizefn(cs, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
@@ -1205,11 +967,11 @@ bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- RISCVSATPMap *satp_map = opaque;
+ RISCVSATPModes *satp_modes = opaque;
uint8_t satp = satp_mode_from_str(name);
bool value;
- value = satp_map->map & (1 << satp);
+ value = satp_modes->map & (1 << satp);
visit_type_bool(v, name, &value, errp);
}
@@ -1217,7 +979,7 @@ static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- RISCVSATPMap *satp_map = opaque;
+ RISCVSATPModes *satp_modes = opaque;
uint8_t satp = satp_mode_from_str(name);
bool value;
@@ -1225,8 +987,8 @@ static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
return;
}
- satp_map->map = deposit32(satp_map->map, satp, 1, value);
- satp_map->init |= 1 << satp;
+ satp_modes->map = deposit32(satp_modes->map, satp, 1, value);
+ satp_modes->init |= 1 << satp;
}
void riscv_add_satp_mode_properties(Object *obj)
@@ -1235,16 +997,16 @@ void riscv_add_satp_mode_properties(Object *obj)
if (cpu->env.misa_mxl == MXL_RV32) {
object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
} else {
object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
}
}
@@ -1309,16 +1071,16 @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level)
g_assert_not_reached();
}
}
-#endif /* CONFIG_USER_ONLY */
-static bool riscv_cpu_is_dynamic(Object *cpu_obj)
+static void riscv_cpu_set_nmi(void *opaque, int irq, int level)
{
- return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
+ riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level);
}
+#endif /* CONFIG_USER_ONLY */
-static void riscv_cpu_post_init(Object *obj)
+static bool riscv_cpu_is_dynamic(Object *cpu_obj)
{
- accel_cpu_instance_init(CPU(obj));
+ return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
}
static void riscv_cpu_init(Object *obj)
@@ -1327,11 +1089,13 @@ static void riscv_cpu_init(Object *obj)
RISCVCPU *cpu = RISCV_CPU(obj);
CPURISCVState *env = &cpu->env;
- env->misa_mxl = mcc->misa_mxl_max;
+ env->misa_mxl = mcc->def->misa_mxl_max;
#ifndef CONFIG_USER_ONLY
qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
+ qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi,
+ "riscv.cpu.rnmi", RNMI_MAX);
#endif /* CONFIG_USER_ONLY */
general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
@@ -1343,8 +1107,8 @@ static void riscv_cpu_init(Object *obj)
* for all CPUs. Each accelerator will decide what to do when
* users disable them.
*/
- RISCV_CPU(obj)->cfg.ext_zicntr = true;
- RISCV_CPU(obj)->cfg.ext_zihpm = true;
+ RISCV_CPU(obj)->cfg.ext_zicntr = !mcc->def->bare;
+ RISCV_CPU(obj)->cfg.ext_zihpm = !mcc->def->bare;
/* Default values for non-bool cpu properties */
cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
@@ -1354,34 +1118,28 @@ static void riscv_cpu_init(Object *obj)
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
cpu->env.vext_ver = VEXT_VERSION_1_00_0;
-}
-
-static void riscv_bare_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
+ cpu->cfg.max_satp_mode = -1;
- /*
- * Bare CPUs do not inherit the timer and performance
- * counters from the parent class (see riscv_cpu_init()
- * for info on why the parent enables them).
- *
- * Users have to explicitly enable these counters for
- * bare CPUs.
- */
- cpu->cfg.ext_zicntr = false;
- cpu->cfg.ext_zihpm = false;
+ if (mcc->def->profile) {
+ mcc->def->profile->enabled = true;
+ }
- /* Set to QEMU's first supported priv version */
- cpu->env.priv_ver = PRIV_VERSION_1_10_0;
+ env->misa_ext_mask = env->misa_ext = mcc->def->misa_ext;
+ riscv_cpu_cfg_merge(&cpu->cfg, &mcc->def->cfg);
- /*
- * Support all available satp_mode settings. The default
- * value will be set to MBARE if the user doesn't set
- * satp_mode manually (see set_satp_mode_default()).
- */
+ if (mcc->def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ cpu->env.priv_ver = mcc->def->priv_spec;
+ }
+ if (mcc->def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ cpu->env.vext_ver = mcc->def->vext_spec;
+ }
#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV64);
+ if (mcc->def->custom_csrs) {
+ riscv_register_custom_csrs(cpu, mcc->def->custom_csrs);
+ }
#endif
+
+ accel_cpu_instance_init(CPU(obj));
}
typedef struct misa_ext_info {
@@ -1406,7 +1164,6 @@ static const MISAExtInfo misa_ext_info_arr[] = {
MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
MISA_EXT_INFO(RVU, "u", "User-level instructions"),
MISA_EXT_INFO(RVH, "h", "Hypervisor"),
- MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
MISA_EXT_INFO(RVV, "v", "Vector operations"),
MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
@@ -1417,7 +1174,7 @@ static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
CPUClass *cc = CPU_CLASS(mcc);
/* Validate that MISA_MXL is set properly. */
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
#ifdef TARGET_RISCV64
case MXL_RV64:
case MXL_RV128:
@@ -1473,7 +1230,15 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
/* Defaults for standard extensions */
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
+ MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false),
+ MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
+ MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
+ MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
+ MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false),
+ MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false),
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
+ MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
+ MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
@@ -1499,16 +1264,25 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
+ MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false),
+ MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false),
+ MULTI_EXT_CFG_BOOL("supm", ext_supm, false),
MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
+ MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false),
MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
+ MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false),
+ MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false),
+ MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false),
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
+ MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false),
MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
+ MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true),
MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
@@ -1570,7 +1344,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
@@ -1587,12 +1361,14 @@ const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
/* These are experimental so mark with 'x-' */
const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
- DEFINE_PROP_END_OF_LIST(),
+ MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false),
+
+ { },
};
/*
@@ -1604,8 +1380,11 @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
*/
const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
+ MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
+ MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
+ MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
/* Deprecated entries marked for future removal */
@@ -1622,7 +1401,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
- DEFINE_PROP_END_OF_LIST(),
+ { },
};
static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
@@ -1677,7 +1456,8 @@ static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_pmu_num = {
- .name = "pmu-num",
+ .type = "int8",
+ .description = "pmu-num",
.get = prop_pmu_num_get,
.set = prop_pmu_num_set,
};
@@ -1718,7 +1498,8 @@ static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_pmu_mask = {
- .name = "pmu-mask",
+ .type = "int8",
+ .description = "pmu-mask",
.get = prop_pmu_mask_get,
.set = prop_pmu_mask_set,
};
@@ -1749,7 +1530,8 @@ static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_mmu = {
- .name = "mmu",
+ .type = "bool",
+ .description = "mmu",
.get = prop_mmu_get,
.set = prop_mmu_set,
};
@@ -1780,7 +1562,8 @@ static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_pmp = {
- .name = "pmp",
+ .type = "bool",
+ .description = "pmp",
.get = prop_pmp_get,
.set = prop_pmp_set,
};
@@ -1854,7 +1637,9 @@ static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_priv_spec = {
- .name = "priv_spec",
+ .type = "str",
+ .description = "priv_spec",
+ /* FIXME enum? */
.get = prop_priv_spec_get,
.set = prop_priv_spec_set,
};
@@ -1885,7 +1670,9 @@ static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_vext_spec = {
- .name = "vext_spec",
+ .type = "str",
+ .description = "vext_spec",
+ /* FIXME enum? */
.get = prop_vext_spec_get,
.set = prop_vext_spec_set,
};
@@ -1894,6 +1681,7 @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
RISCVCPU *cpu = RISCV_CPU(obj);
+ uint16_t cpu_vlen = cpu->cfg.vlenb << 3;
uint16_t value;
if (!visit_type_uint16(v, name, &value, errp)) {
@@ -1905,10 +1693,10 @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
return;
}
- if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
+ if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) {
cpu_set_prop_err(cpu, name, errp);
error_append_hint(errp, "Current '%s' val: %u\n",
- name, cpu->cfg.vlenb << 3);
+ name, cpu_vlen);
return;
}
@@ -1925,7 +1713,8 @@ static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_vlen = {
- .name = "vlen",
+ .type = "uint16",
+ .description = "vlen",
.get = prop_vlen_get,
.set = prop_vlen_set,
};
@@ -1965,7 +1754,8 @@ static void prop_elen_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_elen = {
- .name = "elen",
+ .type = "uint16",
+ .description = "elen",
.get = prop_elen_get,
.set = prop_elen_set,
};
@@ -2000,7 +1790,8 @@ static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_cbom_blksize = {
- .name = "cbom_blocksize",
+ .type = "uint16",
+ .description = "cbom_blocksize",
.get = prop_cbom_blksize_get,
.set = prop_cbom_blksize_set,
};
@@ -2035,7 +1826,8 @@ static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_cbop_blksize = {
- .name = "cbop_blocksize",
+ .type = "uint16",
+ .description = "cbop_blocksize",
.get = prop_cbop_blksize_get,
.set = prop_cbop_blksize_set,
};
@@ -2070,7 +1862,8 @@ static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_cboz_blksize = {
- .name = "cboz_blocksize",
+ .type = "uint16",
+ .description = "cboz_blocksize",
.get = prop_cboz_blksize_get,
.set = prop_cboz_blksize_set,
};
@@ -2105,7 +1898,8 @@ static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_mvendorid = {
- .name = "mvendorid",
+ .type = "uint32",
+ .description = "mvendorid",
.get = prop_mvendorid_get,
.set = prop_mvendorid_set,
};
@@ -2140,7 +1934,8 @@ static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_mimpid = {
- .name = "mimpid",
+ .type = "uint64",
+ .description = "mimpid",
.get = prop_mimpid_get,
.set = prop_mimpid_set,
};
@@ -2196,7 +1991,8 @@ static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
}
static const PropertyInfo prop_marchid = {
- .name = "marchid",
+ .type = "uint64",
+ .description = "marchid",
.get = prop_marchid_get,
.set = prop_marchid_set,
};
@@ -2208,9 +2004,10 @@ static const PropertyInfo prop_marchid = {
* doesn't need to be manually enabled by the profile.
*/
static RISCVCPUProfile RVA22U64 = {
- .parent = NULL,
+ .u_parent = NULL,
+ .s_parent = NULL,
.name = "rva22u64",
- .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
+ .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU,
.priv_spec = RISCV_PROFILE_ATTR_UNUSED,
.satp_mode = RISCV_PROFILE_ATTR_UNUSED,
.ext_offsets = {
@@ -2240,7 +2037,8 @@ static RISCVCPUProfile RVA22U64 = {
* The remaining features/extensions comes from RVA22U64.
*/
static RISCVCPUProfile RVA22S64 = {
- .parent = &RVA22U64,
+ .u_parent = &RVA22U64,
+ .s_parent = NULL,
.name = "rva22s64",
.misa_ext = RVS,
.priv_spec = PRIV_VERSION_1_12_0,
@@ -2254,9 +2052,65 @@ static RISCVCPUProfile RVA22S64 = {
}
};
+/*
+ * All mandatory extensions from RVA22U64 are present
+ * in RVA23U64 so set RVA22 as a parent. We need to
+ * declare just the newly added mandatory extensions.
+ */
+static RISCVCPUProfile RVA23U64 = {
+ .u_parent = &RVA22U64,
+ .s_parent = NULL,
+ .name = "rva23u64",
+ .misa_ext = RVV,
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
+ .ext_offsets = {
+ CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb),
+ CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl),
+ CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop),
+ CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb),
+ CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs),
+ CPU_CFG_OFFSET(ext_supm),
+
+ RISCV_PROFILE_EXT_LIST_END
+ }
+};
+
+/*
+ * As with RVA23U64, RVA23S64 also defines 'named features'.
+ *
+ * Cache related features that we consider enabled since we don't
+ * implement cache: Ssccptr
+ *
+ * Other named features that we already implement: Sstvecd, Sstvala,
+ * Sscounterenw, Ssu64xl
+ *
+ * The remaining features/extensions comes from RVA23S64.
+ */
+static RISCVCPUProfile RVA23S64 = {
+ .u_parent = &RVA23U64,
+ .s_parent = &RVA22S64,
+ .name = "rva23s64",
+ .misa_ext = RVS,
+ .priv_spec = PRIV_VERSION_1_13_0,
+ .satp_mode = VM_1_10_SV39,
+ .ext_offsets = {
+ /* New in RVA23S64 */
+ CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc),
+ CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm),
+
+ /* Named features: Sha */
+ CPU_CFG_OFFSET(ext_sha),
+
+ RISCV_PROFILE_EXT_LIST_END
+ }
+};
+
RISCVCPUProfile *riscv_profiles[] = {
&RVA22U64,
&RVA22S64,
+ &RVA23U64,
+ &RVA23S64,
NULL,
};
@@ -2635,6 +2489,54 @@ static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
},
};
+static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_ssccfg),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind),
+ CPU_CFG_OFFSET(ext_smcdeleg),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SUPM_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_supm),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_sspm),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_smnpm),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_smctr),
+ .implied_misa_exts = RVS,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_sscsrind),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_ssctr),
+ .implied_misa_exts = RVS,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_sscsrind),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
&RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
&RVM_IMPLIED, &RVV_IMPLIED, NULL
@@ -2652,11 +2554,12 @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
&ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
&ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
&ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
- &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
+ &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
+ &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED,
NULL
};
-static Property riscv_cpu_properties[] = {
+static const Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
{.name = "pmu-mask", .info = &prop_pmu_mask},
@@ -2675,43 +2578,31 @@ static Property riscv_cpu_properties[] = {
{.name = "cbop_blocksize", .info = &prop_cbop_blksize},
{.name = "cboz_blocksize", .info = &prop_cboz_blksize},
- {.name = "mvendorid", .info = &prop_mvendorid},
- {.name = "mimpid", .info = &prop_mimpid},
- {.name = "marchid", .info = &prop_marchid},
+ {.name = "mvendorid", .info = &prop_mvendorid},
+ {.name = "mimpid", .info = &prop_mimpid},
+ {.name = "marchid", .info = &prop_marchid},
#ifndef CONFIG_USER_ONLY
DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
+ DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec,
+ DEFAULT_RNMI_IRQVEC),
+ DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec,
+ DEFAULT_RNMI_EXCPVEC),
#endif
DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
+ DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false),
/*
* write_misa() is marked as experimental for now so mark
* it with -x and default to 'false'.
*/
DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
- DEFINE_PROP_END_OF_LIST(),
};
-#if defined(TARGET_RISCV64)
-static void rva22u64_profile_cpu_init(Object *obj)
-{
- rv64i_bare_cpu_init(obj);
-
- RVA22U64.enabled = true;
-}
-
-static void rva22s64_profile_cpu_init(Object *obj)
-{
- rv64i_bare_cpu_init(obj);
-
- RVA22S64.enabled = true;
-}
-#endif
-
static const gchar *riscv_gdb_arch_name(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
@@ -2739,6 +2630,7 @@ static int64_t riscv_get_arch_id(CPUState *cs)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps riscv_sysemu_ops = {
+ .has_work = riscv_cpu_has_work,
.get_phys_page_debug = riscv_cpu_get_phys_page_debug,
.write_elf64_note = riscv_cpu_write_elf64_note,
.write_elf32_note = riscv_cpu_write_elf32_note,
@@ -2746,7 +2638,7 @@ static const struct SysemuCPUOps riscv_sysemu_ops = {
};
#endif
-static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
+static void riscv_cpu_common_class_init(ObjectClass *c, const void *data)
{
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
CPUClass *cc = CPU_CLASS(c);
@@ -2760,8 +2652,6 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
&mcc->parent_phases);
cc->class_by_name = riscv_cpu_class_by_name;
- cc->has_work = riscv_cpu_has_work;
- cc->mmu_index = riscv_cpu_mmu_index;
cc->dump_state = riscv_cpu_dump_state;
cc->set_pc = riscv_cpu_set_pc;
cc->get_pc = riscv_cpu_get_pc;
@@ -2774,16 +2664,94 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
cc->get_arch_id = riscv_get_arch_id;
#endif
cc->gdb_arch_name = riscv_gdb_arch_name;
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &riscv_tcg_ops;
+#endif /* CONFIG_TCG */
device_class_set_props(dc, riscv_cpu_properties);
}
-static void riscv_cpu_class_init(ObjectClass *c, void *data)
+static bool profile_extends(RISCVCPUProfile *trial, RISCVCPUProfile *parent)
+{
+ RISCVCPUProfile *curr;
+ if (!parent) {
+ return true;
+ }
+
+ curr = trial;
+ while (curr) {
+ if (curr == parent) {
+ return true;
+ }
+ curr = curr->u_parent;
+ }
+
+ curr = trial;
+ while (curr) {
+ if (curr == parent) {
+ return true;
+ }
+ curr = curr->s_parent;
+ }
+
+ return false;
+}
+
+static void riscv_cpu_class_base_init(ObjectClass *c, const void *data)
{
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
+ RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c));
- mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
- riscv_cpu_validate_misa_mxl(mcc);
+ if (pcc->def) {
+ mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def));
+ } else {
+ mcc->def = g_new0(RISCVCPUDef, 1);
+ }
+
+ if (data) {
+ const RISCVCPUDef *def = data;
+ mcc->def->bare |= def->bare;
+ if (def->profile) {
+ assert(profile_extends(def->profile, mcc->def->profile));
+ assert(mcc->def->bare);
+ mcc->def->profile = def->profile;
+ }
+ if (def->misa_mxl_max) {
+ assert(def->misa_mxl_max <= MXL_RV128);
+ mcc->def->misa_mxl_max = def->misa_mxl_max;
+
+#ifndef CONFIG_USER_ONLY
+ /*
+ * Hack to simplify CPU class hierarchies that include both 32- and
+ * 64-bit models: reduce SV39/48/57/64 to SV32 for 32-bit models.
+ */
+ if (mcc->def->misa_mxl_max == MXL_RV32 &&
+ !valid_vm_1_10_32[mcc->def->cfg.max_satp_mode]) {
+ mcc->def->cfg.max_satp_mode = VM_1_10_SV32;
+ }
+#endif
+ }
+ if (def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ assert(def->priv_spec <= PRIV_VERSION_LATEST);
+ mcc->def->priv_spec = def->priv_spec;
+ }
+ if (def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ assert(def->vext_spec != 0);
+ mcc->def->vext_spec = def->vext_spec;
+ }
+ mcc->def->misa_ext |= def->misa_ext;
+
+ riscv_cpu_cfg_merge(&mcc->def->cfg, &def->cfg);
+
+ if (def->custom_csrs) {
+ assert(!mcc->def->custom_csrs);
+ mcc->def->custom_csrs = def->custom_csrs;
+ }
+ }
+
+ if (!object_class_is_abstract(c)) {
+ riscv_cpu_validate_misa_mxl(mcc);
+ }
}
static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
@@ -2878,50 +2846,34 @@ void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
}
#endif
-#define DEFINE_CPU(type_name, misa_mxl_max, initfn) \
+#define DEFINE_ABSTRACT_RISCV_CPU(type_name, parent_type_name, ...) \
{ \
.name = (type_name), \
- .parent = TYPE_RISCV_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
+ .parent = (parent_type_name), \
+ .abstract = true, \
+ .class_data = &(const RISCVCPUDef) { \
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .cfg.max_satp_mode = -1, \
+ __VA_ARGS__ \
+ }, \
}
-#define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
+#define DEFINE_RISCV_CPU(type_name, parent_type_name, ...) \
{ \
.name = (type_name), \
- .parent = TYPE_RISCV_DYNAMIC_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
+ .parent = (parent_type_name), \
+ .class_data = &(const RISCVCPUDef) { \
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .cfg.max_satp_mode = -1, \
+ __VA_ARGS__ \
+ }, \
}
-#define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \
- { \
- .name = (type_name), \
- .parent = TYPE_RISCV_VENDOR_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
- }
-
-#define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \
- { \
- .name = (type_name), \
- .parent = TYPE_RISCV_BARE_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
- }
-
-#define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
- { \
- .name = (type_name), \
- .parent = TYPE_RISCV_BARE_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = (void *)(misa_mxl_max) \
- }
+#define DEFINE_PROFILE_CPU(type_name, parent_type_name, profile_) \
+ DEFINE_RISCV_CPU(type_name, parent_type_name, \
+ .profile = &(profile_))
static const TypeInfo riscv_cpu_type_infos[] = {
{
@@ -2930,53 +2882,310 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.instance_size = sizeof(RISCVCPU),
.instance_align = __alignof(RISCVCPU),
.instance_init = riscv_cpu_init,
- .instance_post_init = riscv_cpu_post_init,
.abstract = true,
.class_size = sizeof(RISCVCPUClass),
.class_init = riscv_cpu_common_class_init,
+ .class_base_init = riscv_cpu_class_base_init,
},
- {
- .name = TYPE_RISCV_DYNAMIC_CPU,
- .parent = TYPE_RISCV_CPU,
- .abstract = true,
- },
- {
- .name = TYPE_RISCV_VENDOR_CPU,
- .parent = TYPE_RISCV_CPU,
- .abstract = true,
- },
- {
- .name = TYPE_RISCV_BARE_CPU,
- .parent = TYPE_RISCV_CPU,
- .instance_init = riscv_bare_cpu_init,
- .abstract = true,
- },
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU,
+ .cfg.mmu = true,
+ .cfg.pmp = true,
+ .priv_spec = PRIV_VERSION_LATEST,
+ ),
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_VENDOR_CPU, TYPE_RISCV_CPU),
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_BARE_CPU, TYPE_RISCV_CPU,
+ /*
+ * Bare CPUs do not inherit the timer and performance
+ * counters from the parent class (see riscv_cpu_init()
+ * for info on why the parent enables them).
+ *
+ * Users have to explicitly enable these counters for
+ * bare CPUs.
+ */
+ .bare = true,
+
+ /* Set to QEMU's first supported priv version */
+ .priv_spec = PRIV_VERSION_1_10_0,
+
+ /*
+ * Support all available satp_mode settings. By default
+ * only MBARE will be available if the user doesn't enable
+ * a mode manually (see riscv_cpu_satp_mode_finalize()).
+ */
+#ifdef TARGET_RISCV32
+ .cfg.max_satp_mode = VM_1_10_SV32,
+#else
+ .cfg.max_satp_mode = VM_1_10_SV57,
+#endif
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX, TYPE_RISCV_DYNAMIC_CPU,
#if defined(TARGET_RISCV32)
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init),
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init),
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init),
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init),
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init),
+ .misa_mxl_max = MXL_RV32,
+ .cfg.max_satp_mode = VM_1_10_SV32,
#elif defined(TARGET_RISCV64)
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init),
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init),
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init),
-#ifdef CONFIG_TCG
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init),
+ .misa_mxl_max = MXL_RV64,
+ .cfg.max_satp_mode = VM_1_10_SV57,
+#endif
+ ),
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E, TYPE_RISCV_VENDOR_CPU,
+ .misa_ext = RVI | RVM | RVA | RVC | RVU,
+ .priv_spec = PRIV_VERSION_1_10_0,
+ .cfg.max_satp_mode = VM_1_10_MBARE,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.pmp = true
+ ),
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
+ .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU,
+ .priv_spec = PRIV_VERSION_1_10_0,
+
+ .cfg.max_satp_mode = VM_1_10_SV39,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.mmu = true,
+ .cfg.pmp = true
+ ),
+
+#if defined(TARGET_RISCV32) || \
+ (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE32, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV32,
+ .misa_mxl_max = MXL_RV32,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_IBEX, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVI | RVM | RVC | RVU,
+ .priv_spec = PRIV_VERSION_1_12_0,
+ .cfg.max_satp_mode = VM_1_10_MBARE,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.pmp = true,
+ .cfg.ext_smepmp = true,
+
+ .cfg.ext_zba = true,
+ .cfg.ext_zbb = true,
+ .cfg.ext_zbc = true,
+ .cfg.ext_zbs = true
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E31, TYPE_RISCV_CPU_SIFIVE_E,
+ .misa_mxl_max = MXL_RV32
+ ),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E34, TYPE_RISCV_CPU_SIFIVE_E,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVF, /* IMAFCU */
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U34, TYPE_RISCV_CPU_SIFIVE_U,
+ .misa_mxl_max = MXL_RV32,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32I, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVI
+ ),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32E, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVE
+ ),
+#endif
+
+#if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX32, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV32,
+ .misa_mxl_max = MXL_RV32,
+ ),
+#endif
+
+#if defined(TARGET_RISCV64)
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE64, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV57,
+ .misa_mxl_max = MXL_RV64,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E51, TYPE_RISCV_CPU_SIFIVE_E,
+ .misa_mxl_max = MXL_RV64
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U54, TYPE_RISCV_CPU_SIFIVE_U,
+ .misa_mxl_max = MXL_RV64,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SHAKTI_C, TYPE_RISCV_CPU_SIFIVE_U,
+ .misa_mxl_max = MXL_RV64,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_THEAD_C906, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVS | RVU,
+ .priv_spec = PRIV_VERSION_1_11_0,
+
+ .cfg.ext_zfa = true,
+ .cfg.ext_zfh = true,
+ .cfg.mmu = true,
+ .cfg.ext_xtheadba = true,
+ .cfg.ext_xtheadbb = true,
+ .cfg.ext_xtheadbs = true,
+ .cfg.ext_xtheadcmo = true,
+ .cfg.ext_xtheadcondmov = true,
+ .cfg.ext_xtheadfmemidx = true,
+ .cfg.ext_xtheadmac = true,
+ .cfg.ext_xtheadmemidx = true,
+ .cfg.ext_xtheadmempair = true,
+ .cfg.ext_xtheadsync = true,
+ .cfg.pmp = true,
+
+ .cfg.mvendorid = THEAD_VENDOR_ID,
+
+ .cfg.max_satp_mode = VM_1_10_SV39,
+#ifndef CONFIG_USER_ONLY
+ .custom_csrs = th_csr_list,
+#endif
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_TT_ASCALON, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVS | RVU | RVH | RVV,
+ .priv_spec = PRIV_VERSION_1_13_0,
+ .vext_spec = VEXT_VERSION_1_00_0,
+
+ /* ISA extensions */
+ .cfg.mmu = true,
+ .cfg.vlenb = 256 >> 3,
+ .cfg.elen = 64,
+ .cfg.rvv_ma_all_1s = true,
+ .cfg.rvv_ta_all_1s = true,
+ .cfg.misa_w = true,
+ .cfg.pmp = true,
+ .cfg.cbom_blocksize = 64,
+ .cfg.cbop_blocksize = 64,
+ .cfg.cboz_blocksize = 64,
+ .cfg.ext_zic64b = true,
+ .cfg.ext_zicbom = true,
+ .cfg.ext_zicbop = true,
+ .cfg.ext_zicboz = true,
+ .cfg.ext_zicntr = true,
+ .cfg.ext_zicond = true,
+ .cfg.ext_zicsr = true,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zihintntl = true,
+ .cfg.ext_zihintpause = true,
+ .cfg.ext_zihpm = true,
+ .cfg.ext_zimop = true,
+ .cfg.ext_zawrs = true,
+ .cfg.ext_zfa = true,
+ .cfg.ext_zfbfmin = true,
+ .cfg.ext_zfh = true,
+ .cfg.ext_zfhmin = true,
+ .cfg.ext_zcb = true,
+ .cfg.ext_zcmop = true,
+ .cfg.ext_zba = true,
+ .cfg.ext_zbb = true,
+ .cfg.ext_zbs = true,
+ .cfg.ext_zkt = true,
+ .cfg.ext_zvbb = true,
+ .cfg.ext_zvbc = true,
+ .cfg.ext_zvfbfmin = true,
+ .cfg.ext_zvfbfwma = true,
+ .cfg.ext_zvfh = true,
+ .cfg.ext_zvfhmin = true,
+ .cfg.ext_zvkng = true,
+ .cfg.ext_smaia = true,
+ .cfg.ext_smstateen = true,
+ .cfg.ext_ssaia = true,
+ .cfg.ext_sscofpmf = true,
+ .cfg.ext_sstc = true,
+ .cfg.ext_svade = true,
+ .cfg.ext_svinval = true,
+ .cfg.ext_svnapot = true,
+ .cfg.ext_svpbmt = true,
+
+ .cfg.max_satp_mode = VM_1_10_SV57,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_VEYRON_V1, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVS | RVU | RVH,
+ .priv_spec = PRIV_VERSION_1_12_0,
+
+ /* ISA extensions */
+ .cfg.mmu = true,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.pmp = true,
+ .cfg.ext_zicbom = true,
+ .cfg.cbom_blocksize = 64,
+ .cfg.cboz_blocksize = 64,
+ .cfg.ext_zicboz = true,
+ .cfg.ext_smaia = true,
+ .cfg.ext_ssaia = true,
+ .cfg.ext_sscofpmf = true,
+ .cfg.ext_sstc = true,
+ .cfg.ext_svinval = true,
+ .cfg.ext_svnapot = true,
+ .cfg.ext_svpbmt = true,
+ .cfg.ext_smstateen = true,
+ .cfg.ext_zba = true,
+ .cfg.ext_zbb = true,
+ .cfg.ext_zbc = true,
+ .cfg.ext_zbs = true,
+ .cfg.ext_XVentanaCondOps = true,
+
+ .cfg.mvendorid = VEYRON_V1_MVENDORID,
+ .cfg.marchid = VEYRON_V1_MARCHID,
+ .cfg.mimpid = VEYRON_V1_MIMPID,
+
+ .cfg.max_satp_mode = VM_1_10_SV48,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVB | RVS | RVU,
+ .priv_spec = PRIV_VERSION_1_12_0,
+
+ /* ISA extensions */
+ .cfg.ext_zbc = true,
+ .cfg.ext_zbkb = true,
+ .cfg.ext_zbkc = true,
+ .cfg.ext_zbkx = true,
+ .cfg.ext_zknd = true,
+ .cfg.ext_zkne = true,
+ .cfg.ext_zknh = true,
+ .cfg.ext_zksed = true,
+ .cfg.ext_zksh = true,
+ .cfg.ext_svinval = true,
+
+ .cfg.mmu = true,
+ .cfg.pmp = true,
+
+ .cfg.max_satp_mode = VM_1_10_SV39,
+ ),
+
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE128, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV57,
+ .misa_mxl_max = MXL_RV128,
+ ),
#endif /* CONFIG_TCG */
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init),
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init),
- DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
- DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64I, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVI
+ ),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64E, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVE
+ ),
+
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, TYPE_RISCV_CPU_RV64I, RVA22U64),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, TYPE_RISCV_CPU_RV64I, RVA22S64),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, TYPE_RISCV_CPU_RV64I, RVA23U64),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, TYPE_RISCV_CPU_RV64I, RVA23S64),
#endif /* TARGET_RISCV64 */
};
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 1619c3a..229ade9 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -23,7 +23,9 @@
#include "hw/core/cpu.h"
#include "hw/registerfields.h"
#include "hw/qdev-properties.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "exec/gdbstub.h"
#include "qemu/cpu-float.h"
#include "qom/object.h"
@@ -44,10 +46,9 @@ typedef struct CPUArchState CPURISCVState;
#endif
/*
- * RISC-V-specific extra insn start words:
- * 1: Original instruction opcode
+ * b0: Whether a instruction always raise a store AMO or not.
*/
-#define TARGET_INSN_START_EXTRA_WORDS 1
+#define RISCV_UW2_ALWAYS_STORE_AMO 1
#define RV(x) ((target_ulong)1 << (x - 'A'))
@@ -66,7 +67,6 @@ typedef struct CPUArchState CPURISCVState;
#define RVS RV('S')
#define RVU RV('U')
#define RVH RV('H')
-#define RVJ RV('J')
#define RVG RV('G')
#define RVB RV('B')
@@ -75,9 +75,11 @@ const char *riscv_get_misa_ext_name(uint32_t bit);
const char *riscv_get_misa_ext_description(uint32_t bit);
#define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
+#define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr)
typedef struct riscv_cpu_profile {
- struct riscv_cpu_profile *parent;
+ struct riscv_cpu_profile *u_parent;
+ struct riscv_cpu_profile *s_parent;
const char *name;
uint32_t misa_ext;
bool enabled;
@@ -124,6 +126,14 @@ typedef enum {
EXT_STATUS_DIRTY,
} RISCVExtStatus;
+/* Enum holds PMM field values for Zjpm v1.0 extension */
+typedef enum {
+ PMM_FIELD_DISABLED = 0,
+ PMM_FIELD_RESERVED = 1,
+ PMM_FIELD_PMLEN7 = 2,
+ PMM_FIELD_PMLEN16 = 3,
+} RISCVPmPmm;
+
typedef struct riscv_cpu_implied_exts_rule {
#ifndef CONFIG_USER_ONLY
/*
@@ -230,12 +240,24 @@ struct CPUArchState {
target_ulong jvt;
+ /* elp state for zicfilp extension */
+ bool elp;
+ /* shadow stack register for zicfiss extension */
+ target_ulong ssp;
+ /* env place holder for extra word 2 during unwind */
+ target_ulong excp_uw2;
+ /* sw check code for sw check exception */
+ target_ulong sw_check_code;
#ifdef CONFIG_USER_ONLY
uint32_t elf_flags;
#endif
-#ifndef CONFIG_USER_ONLY
target_ulong priv;
+ /* CSRs for execution environment configuration */
+ uint64_t menvcfg;
+ target_ulong senvcfg;
+
+#ifndef CONFIG_USER_ONLY
/* This contains QEMU specific information about the virt state. */
bool virt_enabled;
target_ulong geilen;
@@ -288,6 +310,15 @@ struct CPUArchState {
target_ulong mcause;
target_ulong mtval; /* since: priv-1.10.0 */
+ uint64_t mctrctl;
+ uint32_t sctrdepth;
+ uint32_t sctrstatus;
+ uint64_t vsctrctl;
+
+ uint64_t ctr_src[16 << SCTRDEPTH_MAX];
+ uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
+ uint64_t ctr_data[16 << SCTRDEPTH_MAX];
+
/* Machine and Supervisor interrupt priorities */
uint8_t miprio[64];
uint8_t siprio[64];
@@ -368,6 +399,7 @@ struct CPUArchState {
uint32_t scounteren;
uint32_t mcounteren;
+ uint32_t scountinhibit;
uint32_t mcountinhibit;
/* PMU cycle & instret privilege mode filtering */
@@ -434,27 +466,11 @@ struct CPUArchState {
/* True if in debugger mode. */
bool debugger;
- /*
- * CSRs for PointerMasking extension
- */
- target_ulong mmte;
- target_ulong mpmmask;
- target_ulong mpmbase;
- target_ulong spmmask;
- target_ulong spmbase;
- target_ulong upmmask;
- target_ulong upmbase;
-
- /* CSRs for execution environment configuration */
- uint64_t menvcfg;
uint64_t mstateen[SMSTATEEN_MAX_COUNT];
uint64_t hstateen[SMSTATEEN_MAX_COUNT];
uint64_t sstateen[SMSTATEEN_MAX_COUNT];
- target_ulong senvcfg;
uint64_t henvcfg;
#endif
- target_ulong cur_pmmask;
- target_ulong cur_pmbase;
/* Fields from here on are preserved across CPU reset. */
QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
@@ -472,9 +488,31 @@ struct CPUArchState {
uint64_t kvm_timer_state;
uint64_t kvm_timer_frequency;
#endif /* CONFIG_KVM */
+
+ /* RNMI */
+ target_ulong mnscratch;
+ target_ulong mnepc;
+ target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
+ target_ulong mnstatus;
+ target_ulong rnmip;
+ uint64_t rnmi_irqvec;
+ uint64_t rnmi_excpvec;
};
/*
+ * map is a 16-bit bitmap: the most significant set bit in map is the maximum
+ * satp mode that is supported. It may be chosen by the user and must respect
+ * what qemu implements (valid_1_10_32/64) and what the hw is capable of
+ * (supported bitmap below).
+ *
+ * init is a 16-bit bitmap used to make sure the user selected a correct
+ * configuration as per the specification.
+ */
+typedef struct {
+ uint16_t map, init;
+} RISCVSATPModes;
+
+/*
* RISCVCPU:
* @env: #CPURISCVState
*
@@ -490,6 +528,7 @@ struct ArchCPU {
/* Configuration Settings */
RISCVCPUConfig cfg;
+ RISCVSATPModes satp_modes;
QEMUTimer *pmu_timer;
/* A bitmask of Available programmable counters */
@@ -499,6 +538,19 @@ struct ArchCPU {
const GPtrArray *decoders;
};
+typedef struct RISCVCSR RISCVCSR;
+
+typedef struct RISCVCPUDef {
+ RISCVMXL misa_mxl_max; /* max mxl for this cpu */
+ RISCVCPUProfile *profile;
+ uint32_t misa_ext;
+ int priv_spec;
+ int32_t vext_spec;
+ RISCVCPUConfig cfg;
+ bool bare;
+ const RISCVCSR *custom_csrs;
+} RISCVCPUDef;
+
/**
* RISCVCPUClass:
* @parent_realize: The parent class' realize handler.
@@ -511,7 +563,7 @@ struct RISCVCPUClass {
DeviceRealize parent_realize;
ResettablePhases parent_phases;
- uint32_t misa_mxl_max; /* max mxl for this cpu */
+ RISCVCPUDef *def;
};
static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
@@ -544,6 +596,9 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
bool riscv_cpu_vector_enabled(CPURISCVState *env);
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
+bool cpu_get_fcfien(CPURISCVState *env);
+bool cpu_get_bcfien(CPURISCVState *env);
+bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
@@ -568,6 +623,7 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
uint64_t value);
+void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
void riscv_cpu_interrupt(CPURISCVState *env);
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
@@ -585,15 +641,21 @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
+void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
+ enum CTRType type, target_ulong prev_priv, bool prev_virt);
+void riscv_ctr_clear(CPURISCVState *env);
+
void riscv_translate_init(void);
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc);
+
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
- uint32_t exception, uintptr_t pc);
+ RISCVException exception,
+ uintptr_t pc);
target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
-#include "exec/cpu-all.h"
-
FIELD(TB_FLAGS, MEM_IDX, 0, 3)
FIELD(TB_FLAGS, FS, 3, 2)
/* Vector flags */
@@ -608,14 +670,22 @@ FIELD(TB_FLAGS, XL, 16, 2)
/* If PointerMasking should be applied */
FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
-FIELD(TB_FLAGS, VTA, 20, 1)
-FIELD(TB_FLAGS, VMA, 21, 1)
+FIELD(TB_FLAGS, VTA, 18, 1)
+FIELD(TB_FLAGS, VMA, 19, 1)
/* Native debug itrigger */
-FIELD(TB_FLAGS, ITRIGGER, 22, 1)
+FIELD(TB_FLAGS, ITRIGGER, 20, 1)
/* Virtual mode enabled */
-FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
-FIELD(TB_FLAGS, PRIV, 24, 2)
-FIELD(TB_FLAGS, AXL, 26, 2)
+FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
+FIELD(TB_FLAGS, PRIV, 22, 2)
+FIELD(TB_FLAGS, AXL, 24, 2)
+/* zicfilp needs a TB flag to track indirect branches */
+FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
+FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
+/* zicfiss needs a TB flag so that correct TB is located based on tb flags */
+FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
+/* If pointer masking should be applied and address sign extended */
+FIELD(TB_FLAGS, PM_PMM, 29, 2)
+FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
@@ -709,11 +779,26 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
#ifdef CONFIG_USER_ONLY
return env->misa_mxl;
#else
- return get_field(env->mstatus, MSTATUS64_SXL);
+ if (env->misa_mxl != MXL_RV32) {
+ return get_field(env->mstatus, MSTATUS64_SXL);
+ }
#endif
+ return MXL_RV32;
}
#endif
+static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
+ target_long priv_ver,
+ uint32_t misa_ext)
+{
+ /* In priv spec version 1.12 or newer, C always implies Zca */
+ if (priv_ver >= PRIV_VERSION_1_12_0) {
+ return cfg->ext_zca;
+ } else {
+ return misa_ext & RVC;
+ }
+}
+
/*
* Encode LMUL to lmul as follows:
* LMUL vlmul lmul
@@ -745,17 +830,19 @@ static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
return vlen >> (vsew + 3 - lmul);
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags);
-
-void riscv_cpu_update_mask(CPURISCVState *env);
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
+bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
+RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
+RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
+uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
+
RISCVException riscv_csrr(CPURISCVState *env, int csrno,
target_ulong *ret_value);
+
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
- target_ulong *ret_value,
- target_ulong new_value, target_ulong write_mask);
+ target_ulong *ret_value, target_ulong new_value,
+ target_ulong write_mask, uintptr_t ra);
RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
@@ -764,13 +851,13 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
static inline void riscv_csr_write(CPURISCVState *env, int csrno,
target_ulong val)
{
- riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
+ riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0);
}
static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
{
target_ulong val = 0;
- riscv_csrrw(env, csrno, &val, 0, 0);
+ riscv_csrrw(env, csrno, &val, 0, 0, 0);
return val;
}
@@ -779,7 +866,8 @@ typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value);
typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
- target_ulong new_value);
+ target_ulong new_value,
+ uintptr_t ra);
typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
@@ -788,8 +876,8 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
Int128 *ret_value);
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
- Int128 *ret_value,
- Int128 new_value, Int128 write_mask);
+ Int128 *ret_value, Int128 new_value,
+ Int128 write_mask, uintptr_t ra);
typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
Int128 *ret_value);
@@ -808,6 +896,12 @@ typedef struct {
uint32_t min_priv_ver;
} riscv_csr_operations;
+struct RISCVCSR {
+ int csrno;
+ bool (*insertion_test)(RISCVCPU *cpu);
+ riscv_csr_operations csr_ops;
+};
+
/* CSR function table constants */
enum {
CSR_TABLE_SIZE = 0x1000
@@ -862,18 +956,17 @@ extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
-void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
+void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops);
void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
target_ulong riscv_new_csr_seed(target_ulong new_value,
target_ulong write_mask);
-uint8_t satp_mode_max_from_map(uint32_t map);
const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
-/* Implemented in th_csr.c */
-void th_register_custom_csrs(RISCVCPU *cpu);
+/* In th_csr.c */
+extern const RISCVCSR th_csr_list[];
const char *priv_spec_to_str(int priv_version);
#endif /* RISCV_CPU_H */
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 32b068f..a30317c 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -34,6 +34,9 @@
/* Control and Status Registers */
+/* zicfiss user ssp csr */
+#define CSR_SSP 0x011
+
/* User Trap Setup */
#define CSR_USTATUS 0x000
#define CSR_UIE 0x004
@@ -170,6 +173,13 @@
#define CSR_MISELECT 0x350
#define CSR_MIREG 0x351
+/* Machine Indirect Register Alias */
+#define CSR_MIREG2 0x352
+#define CSR_MIREG3 0x353
+#define CSR_MIREG4 0x355
+#define CSR_MIREG5 0x356
+#define CSR_MIREG6 0x357
+
/* Machine-Level Interrupts (AIA) */
#define CSR_MTOPEI 0x35c
#define CSR_MTOPI 0xfb0
@@ -200,6 +210,9 @@
#define CSR_SSTATEEN2 0x10E
#define CSR_SSTATEEN3 0x10F
+/* Supervisor Counter Delegation */
+#define CSR_SCOUNTINHIBIT 0x120
+
/* Supervisor Trap Handling */
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
@@ -219,6 +232,13 @@
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
+/* Supervisor Indirect Register Alias */
+#define CSR_SIREG2 0x152
+#define CSR_SIREG3 0x153
+#define CSR_SIREG4 0x155
+#define CSR_SIREG5 0x156
+#define CSR_SIREG6 0x157
+
/* Supervisor-Level Interrupts (AIA) */
#define CSR_STOPEI 0x15c
#define CSR_STOPI 0xdb0
@@ -227,6 +247,17 @@
#define CSR_SIEH 0x114
#define CSR_SIPH 0x154
+/* Machine-Level Control transfer records CSRs */
+#define CSR_MCTRCTL 0x34e
+
+/* Supervisor-Level Control transfer records CSRs */
+#define CSR_SCTRCTL 0x14e
+#define CSR_SCTRSTATUS 0x14f
+#define CSR_SCTRDEPTH 0x15f
+
+/* VS-Level Control transfer records CSRs */
+#define CSR_VSCTRCTL 0x24e
+
/* Hpervisor CSRs */
#define CSR_HSTATUS 0x600
#define CSR_HEDELEG 0x602
@@ -285,6 +316,13 @@
#define CSR_VSISELECT 0x250
#define CSR_VSIREG 0x251
+/* Virtual Supervisor Indirect Alias */
+#define CSR_VSIREG2 0x252
+#define CSR_VSIREG3 0x253
+#define CSR_VSIREG4 0x255
+#define CSR_VSIREG5 0x256
+#define CSR_VSIREG6 0x257
+
/* VS-Level Interrupts (H-extension with AIA) */
#define CSR_VSTOPEI 0x25c
#define CSR_VSTOPI 0xeb0
@@ -317,6 +355,7 @@
#define SMSTATEEN0_CS (1ULL << 0)
#define SMSTATEEN0_FCSR (1ULL << 1)
#define SMSTATEEN0_JVT (1ULL << 2)
+#define SMSTATEEN0_CTR (1ULL << 54)
#define SMSTATEEN0_P1P13 (1ULL << 56)
#define SMSTATEEN0_HSCONTXT (1ULL << 57)
#define SMSTATEEN0_IMSIC (1ULL << 58)
@@ -350,6 +389,12 @@
#define CSR_PMPADDR14 0x3be
#define CSR_PMPADDR15 0x3bf
+/* RNMI */
+#define CSR_MNSCRATCH 0x740
+#define CSR_MNEPC 0x741
+#define CSR_MNCAUSE 0x742
+#define CSR_MNSTATUS 0x744
+
/* Debug/Trace Registers (shared with Debug Mode) */
#define CSR_TSELECT 0x7a0
#define CSR_TDATA1 0x7a1
@@ -494,37 +539,6 @@
#define CSR_MHPMCOUNTER30H 0xb9e
#define CSR_MHPMCOUNTER31H 0xb9f
-/*
- * User PointerMasking registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_UMTE 0x4c0
-#define CSR_UPMMASK 0x4c1
-#define CSR_UPMBASE 0x4c2
-
-/*
- * Machine PointerMasking registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_MMTE 0x3c0
-#define CSR_MPMMASK 0x3c1
-#define CSR_MPMBASE 0x3c2
-
-/*
- * Supervisor PointerMaster registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_SMTE 0x1c0
-#define CSR_SPMMASK 0x1c1
-#define CSR_SPMBASE 0x1c2
-
-/*
- * Hypervisor PointerMaster registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_VSMTE 0x2c0
-#define CSR_VSPMMASK 0x2c1
-#define CSR_VSPMBASE 0x2c2
#define CSR_SCOUNTOVF 0xda0
/* Crypto Extension */
@@ -552,8 +566,12 @@
#define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */
#define MSTATUS_TW 0x00200000 /* since: priv-1.10 */
#define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */
+#define MSTATUS_SPELP 0x00800000 /* zicfilp */
+#define MSTATUS_SDT 0x01000000
+#define MSTATUS_MPELP 0x020000000000 /* zicfilp */
#define MSTATUS_GVA 0x4000000000ULL
#define MSTATUS_MPV 0x8000000000ULL
+#define MSTATUS_MDT 0x40000000000ULL /* Smdbltrp extension */
#define MSTATUS64_UXL 0x0000000300000000ULL
#define MSTATUS64_SXL 0x0000000C00000000ULL
@@ -582,6 +600,8 @@ typedef enum {
#define SSTATUS_XS 0x00018000
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
#define SSTATUS_MXR 0x00080000
+#define SSTATUS_SPELP MSTATUS_SPELP /* zicfilp */
+#define SSTATUS_SDT MSTATUS_SDT
#define SSTATUS64_UXL 0x0000000300000000ULL
@@ -598,7 +618,9 @@ typedef enum {
#define HSTATUS_VTVM 0x00100000
#define HSTATUS_VTW 0x00200000
#define HSTATUS_VTSR 0x00400000
+#define HSTATUS_HUKTE 0x01000000
#define HSTATUS_VSXL 0x300000000
+#define HSTATUS_HUPMM 0x3000000000000
#define HSTATUS32_WPRI 0xFF8FF87E
#define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL
@@ -627,6 +649,12 @@ typedef enum {
#define SATP64_ASID 0x0FFFF00000000000ULL
#define SATP64_PPN 0x00000FFFFFFFFFFFULL
+/* RNMI mnstatus CSR mask */
+#define MNSTATUS_NMIE 0x00000008
+#define MNSTATUS_MNPV 0x00000080
+#define MNSTATUS_MNPELP 0x00000200
+#define MNSTATUS_MNPP 0x00001800
+
/* VM modes (satp.mode) privileged ISA 1.10 */
#define VM_1_10_MBARE 0
#define VM_1_10_SV32 1
@@ -662,6 +690,12 @@ typedef enum {
/* Default Reset Vector address */
#define DEFAULT_RSTVEC 0x1000
+/* Default RNMI Interrupt Vector address */
+#define DEFAULT_RNMI_IRQVEC 0x0
+
+/* Default RNMI Exception Vector address */
+#define DEFAULT_RNMI_EXCPVEC 0x0
+
/* Exception causes */
typedef enum RISCVException {
RISCV_EXCP_NONE = -1, /* sentinel value */
@@ -680,6 +714,7 @@ typedef enum RISCVException {
RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */
RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */
RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */
+ RISCV_EXCP_DOUBLE_TRAP = 0x10,
RISCV_EXCP_SW_CHECK = 0x12, /* since: priv-1.13.0 */
RISCV_EXCP_HW_ERR = 0x13, /* since: priv-1.13.0 */
RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14,
@@ -689,6 +724,11 @@ typedef enum RISCVException {
RISCV_EXCP_SEMIHOST = 0x3f,
} RISCVException;
+/* zicfilp defines lp violation results in sw check with tval = 2*/
+#define RISCV_EXCP_SW_CHECK_FCFI_TVAL 2
+/* zicfiss defines ss violation results in sw check with tval = 3*/
+#define RISCV_EXCP_SW_CHECK_BCFI_TVAL 3
+
#define RISCV_EXCP_INT_FLAG 0x80000000
#define RISCV_EXCP_INT_MASK 0x7fffffff
@@ -711,6 +751,9 @@ typedef enum RISCVException {
/* -1 is due to bit zero of hgeip and hgeie being ROZ. */
#define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1)
+/* RNMI causes */
+#define RNMI_MAX 16
+
/* mip masks */
#define MIP_USIP (1 << IRQ_U_SOFT)
#define MIP_SSIP (1 << IRQ_S_SOFT)
@@ -747,39 +790,49 @@ typedef enum RISCVException {
#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
-/* General PointerMasking CSR bits */
-#define PM_ENABLE 0x00000001ULL
-#define PM_CURRENT 0x00000002ULL
-#define PM_INSN 0x00000004ULL
-
/* Execution environment configuration bits */
#define MENVCFG_FIOM BIT(0)
+#define MENVCFG_LPE BIT(2) /* zicfilp */
+#define MENVCFG_SSE BIT(3) /* zicfiss */
#define MENVCFG_CBIE (3UL << 4)
#define MENVCFG_CBCFE BIT(6)
#define MENVCFG_CBZE BIT(7)
+#define MENVCFG_PMM (3ULL << 32)
+#define MENVCFG_DTE (1ULL << 59)
+#define MENVCFG_CDE (1ULL << 60)
#define MENVCFG_ADUE (1ULL << 61)
#define MENVCFG_PBMTE (1ULL << 62)
#define MENVCFG_STCE (1ULL << 63)
/* For RV32 */
+#define MENVCFGH_DTE BIT(27)
#define MENVCFGH_ADUE BIT(29)
#define MENVCFGH_PBMTE BIT(30)
#define MENVCFGH_STCE BIT(31)
#define SENVCFG_FIOM MENVCFG_FIOM
+#define SENVCFG_LPE MENVCFG_LPE
+#define SENVCFG_SSE MENVCFG_SSE
#define SENVCFG_CBIE MENVCFG_CBIE
#define SENVCFG_CBCFE MENVCFG_CBCFE
#define SENVCFG_CBZE MENVCFG_CBZE
+#define SENVCFG_UKTE BIT(8)
+#define SENVCFG_PMM MENVCFG_PMM
#define HENVCFG_FIOM MENVCFG_FIOM
+#define HENVCFG_LPE MENVCFG_LPE
+#define HENVCFG_SSE MENVCFG_SSE
#define HENVCFG_CBIE MENVCFG_CBIE
#define HENVCFG_CBCFE MENVCFG_CBCFE
#define HENVCFG_CBZE MENVCFG_CBZE
+#define HENVCFG_PMM MENVCFG_PMM
+#define HENVCFG_DTE MENVCFG_DTE
#define HENVCFG_ADUE MENVCFG_ADUE
#define HENVCFG_PBMTE MENVCFG_PBMTE
#define HENVCFG_STCE MENVCFG_STCE
/* For RV32 */
+#define HENVCFGH_DTE MENVCFGH_DTE
#define HENVCFGH_ADUE MENVCFGH_ADUE
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
#define HENVCFGH_STCE MENVCFGH_STCE
@@ -835,6 +888,88 @@ typedef enum RISCVException {
#define UMTE_U_PM_INSN U_PM_INSN
#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
+/* CTR control register commom fields */
+#define XCTRCTL_U BIT_ULL(0)
+#define XCTRCTL_S BIT_ULL(1)
+#define XCTRCTL_RASEMU BIT_ULL(7)
+#define XCTRCTL_STE BIT_ULL(8)
+#define XCTRCTL_BPFRZ BIT_ULL(11)
+#define XCTRCTL_LCOFIFRZ BIT_ULL(12)
+#define XCTRCTL_EXCINH BIT_ULL(33)
+#define XCTRCTL_INTRINH BIT_ULL(34)
+#define XCTRCTL_TRETINH BIT_ULL(35)
+#define XCTRCTL_NTBREN BIT_ULL(36)
+#define XCTRCTL_TKBRINH BIT_ULL(37)
+#define XCTRCTL_INDCALLINH BIT_ULL(40)
+#define XCTRCTL_DIRCALLINH BIT_ULL(41)
+#define XCTRCTL_INDJMPINH BIT_ULL(42)
+#define XCTRCTL_DIRJMPINH BIT_ULL(43)
+#define XCTRCTL_CORSWAPINH BIT_ULL(44)
+#define XCTRCTL_RETINH BIT_ULL(45)
+#define XCTRCTL_INDLJMPINH BIT_ULL(46)
+#define XCTRCTL_DIRLJMPINH BIT_ULL(47)
+
+#define XCTRCTL_MASK (XCTRCTL_U | XCTRCTL_S | XCTRCTL_RASEMU | \
+ XCTRCTL_STE | XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ | \
+ XCTRCTL_EXCINH | XCTRCTL_INTRINH | XCTRCTL_TRETINH | \
+ XCTRCTL_NTBREN | XCTRCTL_TKBRINH | XCTRCTL_INDCALLINH | \
+ XCTRCTL_DIRCALLINH | XCTRCTL_INDJMPINH | \
+ XCTRCTL_DIRJMPINH | XCTRCTL_CORSWAPINH | \
+ XCTRCTL_RETINH | XCTRCTL_INDLJMPINH | XCTRCTL_DIRLJMPINH)
+
+#define XCTRCTL_INH_START 32U
+
+/* CTR mctrctl bits */
+#define MCTRCTL_M BIT_ULL(2)
+#define MCTRCTL_MTE BIT_ULL(9)
+
+#define MCTRCTL_MASK (XCTRCTL_MASK | MCTRCTL_M | MCTRCTL_MTE)
+#define SCTRCTL_MASK XCTRCTL_MASK
+#define VSCTRCTL_MASK XCTRCTL_MASK
+
+/* sctrstatus CSR bits. */
+#define SCTRSTATUS_WRPTR_MASK 0xFF
+#define SCTRSTATUS_FROZEN BIT(31)
+#define SCTRSTATUS_MASK (SCTRSTATUS_WRPTR_MASK | SCTRSTATUS_FROZEN)
+
+/* sctrdepth CSR bits. */
+#define SCTRDEPTH_MASK 0x7
+#define SCTRDEPTH_MIN 0U /* 16 Entries. */
+#define SCTRDEPTH_MAX 4U /* 256 Entries. */
+
+#define CTR_ENTRIES_FIRST 0x200
+#define CTR_ENTRIES_LAST 0x2ff
+
+#define CTRSOURCE_VALID BIT(0)
+#define CTRTARGET_MISP BIT(0)
+
+#define CTRDATA_TYPE_MASK 0xF
+#define CTRDATA_CCV BIT(15)
+#define CTRDATA_CCM_MASK 0xFFF0000
+#define CTRDATA_CCE_MASK 0xF0000000
+
+#define CTRDATA_MASK (CTRDATA_TYPE_MASK | CTRDATA_CCV | \
+ CTRDATA_CCM_MASK | CTRDATA_CCE_MASK)
+
+typedef enum CTRType {
+ CTRDATA_TYPE_NONE = 0,
+ CTRDATA_TYPE_EXCEPTION = 1,
+ CTRDATA_TYPE_INTERRUPT = 2,
+ CTRDATA_TYPE_EXCEP_INT_RET = 3,
+ CTRDATA_TYPE_NONTAKEN_BRANCH = 4,
+ CTRDATA_TYPE_TAKEN_BRANCH = 5,
+ CTRDATA_TYPE_RESERVED_0 = 6,
+ CTRDATA_TYPE_RESERVED_1 = 7,
+ CTRDATA_TYPE_INDIRECT_CALL = 8,
+ CTRDATA_TYPE_DIRECT_CALL = 9,
+ CTRDATA_TYPE_INDIRECT_JUMP = 10,
+ CTRDATA_TYPE_DIRECT_JUMP = 11,
+ CTRDATA_TYPE_CO_ROUTINE_SWAP = 12,
+ CTRDATA_TYPE_RETURN = 13,
+ CTRDATA_TYPE_OTHER_INDIRECT_JUMP = 14,
+ CTRDATA_TYPE_OTHER_DIRECT_JUMP = 15,
+} CTRType;
+
/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
#define ISELECT_IPRIO0 0x30
#define ISELECT_IPRIO15 0x3f
@@ -846,10 +981,15 @@ typedef enum RISCVException {
#define ISELECT_IMSIC_EIE63 0xff
#define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY
#define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63
-#define ISELECT_MASK 0x1ff
+#define ISELECT_MASK_AIA 0x1ff
+
+/* [M|S|VS]SELCT value for Indirect CSR Access Extension */
+#define ISELECT_CD_FIRST 0x40
+#define ISELECT_CD_LAST 0x5f
+#define ISELECT_MASK_SXCSRIND 0xfff
/* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */
-#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1)
+#define ISELECT_IMSIC_TOPEI (ISELECT_MASK_AIA + 1)
/* IMSIC bits (AIA) */
#define IMSIC_TOPEI_IID_SHIFT 16
@@ -938,15 +1078,27 @@ typedef enum RISCVException {
MHPMEVENTH_BIT_VSINH | \
MHPMEVENTH_BIT_VUINH)
-#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
-#define MHPMEVENT_IDX_MASK 0xFFFFF
-#define MHPMEVENT_SSCOF_RESVD 16
+#define MHPMEVENT_SSCOF_MASK MAKE_64BIT_MASK(63, 56)
+#define MHPMEVENT_IDX_MASK (~MHPMEVENT_SSCOF_MASK)
+
+/* RISC-V-specific interrupt pending bits. */
+#define CPU_INTERRUPT_RNMI CPU_INTERRUPT_TGT_EXT_0
/* JVT CSR bits */
#define JVT_MODE 0x3F
#define JVT_BASE (~0x3F)
/* Debug Sdtrig CSR masks */
+#define TEXTRA32_MHVALUE 0xFC000000
+#define TEXTRA32_MHSELECT 0x03800000
+#define TEXTRA32_SBYTEMASK 0x000C0000
+#define TEXTRA32_SVALUE 0x0003FFFC
+#define TEXTRA32_SSELECT 0x00000003
+#define TEXTRA64_MHVALUE 0xFFF8000000000000ULL
+#define TEXTRA64_MHSELECT 0x0007000000000000ULL
+#define TEXTRA64_SBYTEMASK 0x000000F000000000ULL
+#define TEXTRA64_SVALUE 0x00000003FFFFFFFCULL
+#define TEXTRA64_SSELECT 0x0000000000000003ULL
#define MCONTEXT32 0x0000003F
#define MCONTEXT64 0x0000000000001FFFULL
#define MCONTEXT32_HCONTEXT 0x0000007F
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index 8b272fb..aa28dc8 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -21,160 +21,10 @@
#ifndef RISCV_CPU_CFG_H
#define RISCV_CPU_CFG_H
-/*
- * map is a 16-bit bitmap: the most significant set bit in map is the maximum
- * satp mode that is supported. It may be chosen by the user and must respect
- * what qemu implements (valid_1_10_32/64) and what the hw is capable of
- * (supported bitmap below).
- *
- * init is a 16-bit bitmap used to make sure the user selected a correct
- * configuration as per the specification.
- *
- * supported is a 16-bit bitmap used to reflect the hw capabilities.
- */
-typedef struct {
- uint16_t map, init, supported;
-} RISCVSATPMap;
-
struct RISCVCPUConfig {
- bool ext_zba;
- bool ext_zbb;
- bool ext_zbc;
- bool ext_zbkb;
- bool ext_zbkc;
- bool ext_zbkx;
- bool ext_zbs;
- bool ext_zca;
- bool ext_zcb;
- bool ext_zcd;
- bool ext_zce;
- bool ext_zcf;
- bool ext_zcmp;
- bool ext_zcmt;
- bool ext_zk;
- bool ext_zkn;
- bool ext_zknd;
- bool ext_zkne;
- bool ext_zknh;
- bool ext_zkr;
- bool ext_zks;
- bool ext_zksed;
- bool ext_zksh;
- bool ext_zkt;
- bool ext_zifencei;
- bool ext_zicntr;
- bool ext_zicsr;
- bool ext_zicbom;
- bool ext_zicbop;
- bool ext_zicboz;
- bool ext_zicond;
- bool ext_zihintntl;
- bool ext_zihintpause;
- bool ext_zihpm;
- bool ext_zimop;
- bool ext_zcmop;
- bool ext_ztso;
- bool ext_smstateen;
- bool ext_sstc;
- bool ext_smcntrpmf;
- bool ext_svadu;
- bool ext_svinval;
- bool ext_svnapot;
- bool ext_svpbmt;
- bool ext_zdinx;
- bool ext_zaamo;
- bool ext_zacas;
- bool ext_zama16b;
- bool ext_zabha;
- bool ext_zalrsc;
- bool ext_zawrs;
- bool ext_zfa;
- bool ext_zfbfmin;
- bool ext_zfh;
- bool ext_zfhmin;
- bool ext_zfinx;
- bool ext_zhinx;
- bool ext_zhinxmin;
- bool ext_zve32f;
- bool ext_zve32x;
- bool ext_zve64f;
- bool ext_zve64d;
- bool ext_zve64x;
- bool ext_zvbb;
- bool ext_zvbc;
- bool ext_zvkb;
- bool ext_zvkg;
- bool ext_zvkned;
- bool ext_zvknha;
- bool ext_zvknhb;
- bool ext_zvksed;
- bool ext_zvksh;
- bool ext_zvkt;
- bool ext_zvkn;
- bool ext_zvknc;
- bool ext_zvkng;
- bool ext_zvks;
- bool ext_zvksc;
- bool ext_zvksg;
- bool ext_zmmul;
- bool ext_zvfbfmin;
- bool ext_zvfbfwma;
- bool ext_zvfh;
- bool ext_zvfhmin;
- bool ext_smaia;
- bool ext_ssaia;
- bool ext_sscofpmf;
- bool ext_smepmp;
- bool rvv_ta_all_1s;
- bool rvv_ma_all_1s;
-
- uint32_t mvendorid;
- uint64_t marchid;
- uint64_t mimpid;
-
- /* Named features */
- bool ext_svade;
- bool ext_zic64b;
-
- /*
- * Always 'true' booleans for named features
- * TCG always implement/can't be user disabled,
- * based on spec version.
- */
- bool has_priv_1_13;
- bool has_priv_1_12;
- bool has_priv_1_11;
-
- /* Vendor-specific custom extensions */
- bool ext_xtheadba;
- bool ext_xtheadbb;
- bool ext_xtheadbs;
- bool ext_xtheadcmo;
- bool ext_xtheadcondmov;
- bool ext_xtheadfmemidx;
- bool ext_xtheadfmv;
- bool ext_xtheadmac;
- bool ext_xtheadmemidx;
- bool ext_xtheadmempair;
- bool ext_xtheadsync;
- bool ext_XVentanaCondOps;
-
- uint32_t pmu_mask;
- uint16_t vlenb;
- uint16_t elen;
- uint16_t cbom_blocksize;
- uint16_t cbop_blocksize;
- uint16_t cboz_blocksize;
- bool mmu;
- bool pmp;
- bool debug;
- bool misa_w;
-
- bool short_isa_string;
-
-#ifndef CONFIG_USER_ONLY
- RISCVSATPMap satp_mode;
-#endif
+#define BOOL_FIELD(x) bool x;
+#define TYPED_FIELD(type, x, default) type x;
+#include "cpu_cfg_fields.h.inc"
};
typedef struct RISCVCPUConfig RISCVCPUConfig;
diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc
new file mode 100644
index 0000000..59f134a
--- /dev/null
+++ b/target/riscv/cpu_cfg_fields.h.inc
@@ -0,0 +1,170 @@
+/*
+ * Required definitions before including this file:
+ *
+ * #define BOOL_FIELD(x)
+ * #define TYPED_FIELD(type, x, default)
+ */
+
+BOOL_FIELD(ext_zba)
+BOOL_FIELD(ext_zbb)
+BOOL_FIELD(ext_zbc)
+BOOL_FIELD(ext_zbkb)
+BOOL_FIELD(ext_zbkc)
+BOOL_FIELD(ext_zbkx)
+BOOL_FIELD(ext_zbs)
+BOOL_FIELD(ext_zca)
+BOOL_FIELD(ext_zcb)
+BOOL_FIELD(ext_zcd)
+BOOL_FIELD(ext_zce)
+BOOL_FIELD(ext_zcf)
+BOOL_FIELD(ext_zcmp)
+BOOL_FIELD(ext_zcmt)
+BOOL_FIELD(ext_zk)
+BOOL_FIELD(ext_zkn)
+BOOL_FIELD(ext_zknd)
+BOOL_FIELD(ext_zkne)
+BOOL_FIELD(ext_zknh)
+BOOL_FIELD(ext_zkr)
+BOOL_FIELD(ext_zks)
+BOOL_FIELD(ext_zksed)
+BOOL_FIELD(ext_zksh)
+BOOL_FIELD(ext_zkt)
+BOOL_FIELD(ext_zifencei)
+BOOL_FIELD(ext_zicntr)
+BOOL_FIELD(ext_zicsr)
+BOOL_FIELD(ext_zicbom)
+BOOL_FIELD(ext_zicbop)
+BOOL_FIELD(ext_zicboz)
+BOOL_FIELD(ext_zicfilp)
+BOOL_FIELD(ext_zicfiss)
+BOOL_FIELD(ext_zicond)
+BOOL_FIELD(ext_zihintntl)
+BOOL_FIELD(ext_zihintpause)
+BOOL_FIELD(ext_zihpm)
+BOOL_FIELD(ext_zimop)
+BOOL_FIELD(ext_zcmop)
+BOOL_FIELD(ext_ztso)
+BOOL_FIELD(ext_smstateen)
+BOOL_FIELD(ext_sstc)
+BOOL_FIELD(ext_smcdeleg)
+BOOL_FIELD(ext_ssccfg)
+BOOL_FIELD(ext_smcntrpmf)
+BOOL_FIELD(ext_smcsrind)
+BOOL_FIELD(ext_sscsrind)
+BOOL_FIELD(ext_ssdbltrp)
+BOOL_FIELD(ext_smdbltrp)
+BOOL_FIELD(ext_svadu)
+BOOL_FIELD(ext_svinval)
+BOOL_FIELD(ext_svnapot)
+BOOL_FIELD(ext_svpbmt)
+BOOL_FIELD(ext_svvptc)
+BOOL_FIELD(ext_svukte)
+BOOL_FIELD(ext_zdinx)
+BOOL_FIELD(ext_zaamo)
+BOOL_FIELD(ext_zacas)
+BOOL_FIELD(ext_zama16b)
+BOOL_FIELD(ext_zabha)
+BOOL_FIELD(ext_zalrsc)
+BOOL_FIELD(ext_zawrs)
+BOOL_FIELD(ext_zfa)
+BOOL_FIELD(ext_zfbfmin)
+BOOL_FIELD(ext_zfh)
+BOOL_FIELD(ext_zfhmin)
+BOOL_FIELD(ext_zfinx)
+BOOL_FIELD(ext_zhinx)
+BOOL_FIELD(ext_zhinxmin)
+BOOL_FIELD(ext_zve32f)
+BOOL_FIELD(ext_zve32x)
+BOOL_FIELD(ext_zve64f)
+BOOL_FIELD(ext_zve64d)
+BOOL_FIELD(ext_zve64x)
+BOOL_FIELD(ext_zvbb)
+BOOL_FIELD(ext_zvbc)
+BOOL_FIELD(ext_zvkb)
+BOOL_FIELD(ext_zvkg)
+BOOL_FIELD(ext_zvkned)
+BOOL_FIELD(ext_zvknha)
+BOOL_FIELD(ext_zvknhb)
+BOOL_FIELD(ext_zvksed)
+BOOL_FIELD(ext_zvksh)
+BOOL_FIELD(ext_zvkt)
+BOOL_FIELD(ext_zvkn)
+BOOL_FIELD(ext_zvknc)
+BOOL_FIELD(ext_zvkng)
+BOOL_FIELD(ext_zvks)
+BOOL_FIELD(ext_zvksc)
+BOOL_FIELD(ext_zvksg)
+BOOL_FIELD(ext_zmmul)
+BOOL_FIELD(ext_zvfbfmin)
+BOOL_FIELD(ext_zvfbfwma)
+BOOL_FIELD(ext_zvfh)
+BOOL_FIELD(ext_zvfhmin)
+BOOL_FIELD(ext_smaia)
+BOOL_FIELD(ext_ssaia)
+BOOL_FIELD(ext_smctr)
+BOOL_FIELD(ext_ssctr)
+BOOL_FIELD(ext_sscofpmf)
+BOOL_FIELD(ext_smepmp)
+BOOL_FIELD(ext_smrnmi)
+BOOL_FIELD(ext_ssnpm)
+BOOL_FIELD(ext_smnpm)
+BOOL_FIELD(ext_smmpm)
+BOOL_FIELD(ext_sspm)
+BOOL_FIELD(ext_supm)
+BOOL_FIELD(rvv_ta_all_1s)
+BOOL_FIELD(rvv_ma_all_1s)
+BOOL_FIELD(rvv_vl_half_avl)
+/* Named features */
+BOOL_FIELD(ext_svade)
+BOOL_FIELD(ext_zic64b)
+BOOL_FIELD(ext_ssstateen)
+BOOL_FIELD(ext_sha)
+
+/*
+ * Always 'true' booleans for named features
+ * TCG always implement/can't be user disabled,
+ * based on spec version.
+ */
+BOOL_FIELD(has_priv_1_13)
+BOOL_FIELD(has_priv_1_12)
+BOOL_FIELD(has_priv_1_11)
+
+/* Always enabled for TCG if has_priv_1_11 */
+BOOL_FIELD(ext_ziccrse)
+
+/* Vendor-specific custom extensions */
+BOOL_FIELD(ext_xtheadba)
+BOOL_FIELD(ext_xtheadbb)
+BOOL_FIELD(ext_xtheadbs)
+BOOL_FIELD(ext_xtheadcmo)
+BOOL_FIELD(ext_xtheadcondmov)
+BOOL_FIELD(ext_xtheadfmemidx)
+BOOL_FIELD(ext_xtheadfmv)
+BOOL_FIELD(ext_xtheadmac)
+BOOL_FIELD(ext_xtheadmemidx)
+BOOL_FIELD(ext_xtheadmempair)
+BOOL_FIELD(ext_xtheadsync)
+BOOL_FIELD(ext_XVentanaCondOps)
+
+BOOL_FIELD(mmu)
+BOOL_FIELD(pmp)
+BOOL_FIELD(debug)
+BOOL_FIELD(misa_w)
+
+BOOL_FIELD(short_isa_string)
+
+TYPED_FIELD(uint32_t, mvendorid, 0)
+TYPED_FIELD(uint64_t, marchid, 0)
+TYPED_FIELD(uint64_t, mimpid, 0)
+
+TYPED_FIELD(uint32_t, pmu_mask, 0)
+TYPED_FIELD(uint16_t, vlenb, 0)
+TYPED_FIELD(uint16_t, elen, 0)
+TYPED_FIELD(uint16_t, cbom_blocksize, 0)
+TYPED_FIELD(uint16_t, cbop_blocksize, 0)
+TYPED_FIELD(uint16_t, cboz_blocksize, 0)
+
+TYPED_FIELD(int8_t, max_satp_mode, -1)
+
+#undef BOOL_FIELD
+#undef TYPED_FIELD
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 395a1d9..2ed69d7 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -23,16 +23,19 @@
#include "cpu.h"
#include "internals.h"
#include "pmu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "system/memory.h"
#include "instmap.h"
#include "tcg/tcg-op.h"
+#include "accel/tcg/cpu-ops.h"
#include "trace.h"
#include "semihosting/common-semi.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
#include "cpu_bits.h"
#include "debug.h"
-#include "tcg/oversized-guest.h"
+#include "pmp.h"
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
{
@@ -63,134 +66,169 @@ int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
#endif
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
+bool cpu_get_fcfien(CPURISCVState *env)
{
- RISCVCPU *cpu = env_archcpu(env);
- RISCVExtStatus fs, vs;
- uint32_t flags = 0;
+ /* no cfi extension, return false */
+ if (!env_archcpu(env)->cfg.ext_zicfilp) {
+ return false;
+ }
- *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
- *cs_base = 0;
+ switch (env->priv) {
+ case PRV_U:
+ if (riscv_has_ext(env, RVS)) {
+ return env->senvcfg & SENVCFG_LPE;
+ }
+ return env->menvcfg & MENVCFG_LPE;
+#ifndef CONFIG_USER_ONLY
+ case PRV_S:
+ if (env->virt_enabled) {
+ return env->henvcfg & HENVCFG_LPE;
+ }
+ return env->menvcfg & MENVCFG_LPE;
+ case PRV_M:
+ return env->mseccfg & MSECCFG_MLPE;
+#endif
+ default:
+ g_assert_not_reached();
+ }
+}
- if (cpu->cfg.ext_zve32x) {
+bool cpu_get_bcfien(CPURISCVState *env)
+{
+ /* no cfi extension, return false */
+ if (!env_archcpu(env)->cfg.ext_zicfiss) {
+ return false;
+ }
+
+ switch (env->priv) {
+ case PRV_U:
/*
- * If env->vl equals to VLMAX, we can use generic vector operation
- * expanders (GVEC) to accerlate the vector operations.
- * However, as LMUL could be a fractional number. The maximum
- * vector size can be operated might be less than 8 bytes,
- * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
- * only when maxsz >= 8 bytes.
+ * If S is not implemented then shadow stack for U can't be turned on
+ * It is checked in `riscv_cpu_validate_set_extensions`, so no need to
+ * check here or assert here
*/
-
- /* lmul encoded as in DisasContext::lmul */
- int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
- uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
- uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
- uint32_t maxsz = vlmax << vsew;
- bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
- (maxsz >= 8);
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
- flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
- flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
- FIELD_EX64(env->vtype, VTYPE, VLMUL));
- flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
- flags = FIELD_DP32(flags, TB_FLAGS, VTA,
- FIELD_EX64(env->vtype, VTYPE, VTA));
- flags = FIELD_DP32(flags, TB_FLAGS, VMA,
- FIELD_EX64(env->vtype, VTYPE, VMA));
- flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
- } else {
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
+ return env->senvcfg & SENVCFG_SSE;
+#ifndef CONFIG_USER_ONLY
+ case PRV_S:
+ if (env->virt_enabled) {
+ return env->henvcfg & HENVCFG_SSE;
+ }
+ return env->menvcfg & MENVCFG_SSE;
+ case PRV_M: /* M-mode shadow stack is always off */
+ return false;
+#endif
+ default:
+ g_assert_not_reached();
}
+}
+bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
+{
#ifdef CONFIG_USER_ONLY
- fs = EXT_STATUS_DIRTY;
- vs = EXT_STATUS_DIRTY;
+ return false;
#else
- flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
-
- flags |= riscv_env_mmu_index(env, 0);
- fs = get_field(env->mstatus, MSTATUS_FS);
- vs = get_field(env->mstatus, MSTATUS_VS);
-
- if (env->virt_enabled) {
- flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
- /*
- * Merge DISABLED and !DIRTY states using MIN.
- * We will set both fields when dirtying.
- */
- fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
- vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
+ if (virt) {
+ return (env->henvcfg & HENVCFG_DTE) != 0;
+ } else {
+ return (env->menvcfg & MENVCFG_DTE) != 0;
}
+#endif
+}
- /* With Zfinx, floating point is enabled/disabled by Smstateen. */
- if (!riscv_has_ext(env, RVF)) {
- fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
- ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
- }
+RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ int priv_mode = cpu_address_mode(env);
- if (cpu->cfg.debug && !icount_enabled()) {
- flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
+ if (get_field(env->mstatus, MSTATUS_MPRV) &&
+ get_field(env->mstatus, MSTATUS_MXR)) {
+ return PMM_FIELD_DISABLED;
}
-#endif
- flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
- flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
- flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
- flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
- if (env->cur_pmmask != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
- }
- if (env->cur_pmbase != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
+ /* Get current PMM field */
+ switch (priv_mode) {
+ case PRV_M:
+ if (riscv_cpu_cfg(env)->ext_smmpm) {
+ return get_field(env->mseccfg, MSECCFG_PMM);
+ }
+ break;
+ case PRV_S:
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
+ if (get_field(env->mstatus, MSTATUS_MPV)) {
+ return get_field(env->henvcfg, HENVCFG_PMM);
+ } else {
+ return get_field(env->menvcfg, MENVCFG_PMM);
+ }
+ }
+ break;
+ case PRV_U:
+ if (riscv_has_ext(env, RVS)) {
+ if (riscv_cpu_cfg(env)->ext_ssnpm) {
+ return get_field(env->senvcfg, SENVCFG_PMM);
+ }
+ } else {
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
+ return get_field(env->menvcfg, MENVCFG_PMM);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
}
-
- *pflags = flags;
+ return PMM_FIELD_DISABLED;
+#else
+ return PMM_FIELD_DISABLED;
+#endif
}
-void riscv_cpu_update_mask(CPURISCVState *env)
+RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
{
- target_ulong mask = 0, base = 0;
- RISCVMXL xl = env->xl;
- /*
- * TODO: Current RVJ spec does not specify
- * how the extension interacts with XLEN.
- */
#ifndef CONFIG_USER_ONLY
- int mode = cpu_address_mode(env);
- xl = cpu_get_xl(env, mode);
- if (riscv_has_ext(env, RVJ)) {
- switch (mode) {
- case PRV_M:
- if (env->mmte & M_PM_ENABLE) {
- mask = env->mpmmask;
- base = env->mpmbase;
- }
- break;
- case PRV_S:
- if (env->mmte & S_PM_ENABLE) {
- mask = env->spmmask;
- base = env->spmbase;
- }
- break;
- case PRV_U:
- if (env->mmte & U_PM_ENABLE) {
- mask = env->upmmask;
- base = env->upmbase;
- }
- break;
- default:
- g_assert_not_reached();
+ int priv_mode = cpu_address_mode(env);
+
+ if (priv_mode == PRV_U) {
+ return get_field(env->hstatus, HSTATUS_HUPMM);
+ } else {
+ if (get_field(env->hstatus, HSTATUS_SPVP)) {
+ return get_field(env->henvcfg, HENVCFG_PMM);
+ } else {
+ return get_field(env->senvcfg, SENVCFG_PMM);
}
}
+#else
+ return PMM_FIELD_DISABLED;
#endif
- if (xl == MXL_RV32) {
- env->cur_pmmask = mask & UINT32_MAX;
- env->cur_pmbase = base & UINT32_MAX;
+}
+
+bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ int satp_mode = 0;
+ int priv_mode = cpu_address_mode(env);
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ satp_mode = get_field(env->satp, SATP32_MODE);
} else {
- env->cur_pmmask = mask;
- env->cur_pmbase = base;
+ satp_mode = get_field(env->satp, SATP64_MODE);
+ }
+
+ return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
+#else
+ return false;
+#endif
+}
+
+uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
+{
+ switch (pmm) {
+ case PMM_FIELD_DISABLED:
+ return 0;
+ case PMM_FIELD_PMLEN7:
+ return 7;
+ case PMM_FIELD_PMLEN16:
+ return 16;
+ default:
+ g_assert_not_reached();
}
}
@@ -434,6 +472,18 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
uint64_t vsbits, irq_delegated;
int virq;
+ /* Priority: RNMI > Other interrupt. */
+ if (riscv_cpu_cfg(env)->ext_smrnmi) {
+ /* If mnstatus.NMIE == 0, all interrupts are disabled. */
+ if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ if (env->rnmip) {
+ return ctz64(env->rnmip); /* since non-zero */
+ }
+ }
+
/* Determine interrupt enable state of all privilege modes */
if (env->virt_enabled) {
mie = 1;
@@ -496,7 +546,9 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
+ uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
+
+ if (interrupt_request & mask) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
int interruptno = riscv_cpu_local_irq_pending(env);
@@ -546,8 +598,21 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
}
bool current_virt = env->virt_enabled;
+ /*
+ * If zicfilp extension available and henvcfg.LPE = 1,
+ * then apply SPELP mask on mstatus
+ */
+ if (env_archcpu(env)->cfg.ext_zicfilp &&
+ get_field(env->henvcfg, HENVCFG_LPE)) {
+ mstatus_mask |= SSTATUS_SPELP;
+ }
+
g_assert(riscv_has_ext(env, RVH));
+ if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
+ mstatus_mask |= MSTATUS_SDT;
+ }
+
if (current_virt) {
/* Current V=1 and we are about to change to V=0 */
env->vsstatus = env->mstatus & mstatus_mask;
@@ -619,6 +684,30 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
env->geilen = geilen;
}
+void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
+{
+ CPURISCVState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ bool release_lock = false;
+
+ if (!bql_locked()) {
+ release_lock = true;
+ bql_lock();
+ }
+
+ if (level) {
+ env->rnmip |= 1 << irq;
+ cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
+ } else {
+ env->rnmip &= ~(1 << irq);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
+ }
+
+ if (release_lock) {
+ bql_unlock();
+ }
+}
+
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
{
CPURISCVState *env = &cpu->env;
@@ -691,6 +780,254 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
}
}
+static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
+ bool virt)
+{
+ uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
+
+ assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
+
+ if (ctl & freeze_mask) {
+ env->sctrstatus |= SCTRSTATUS_FROZEN;
+ }
+}
+
+void riscv_ctr_clear(CPURISCVState *env)
+{
+ memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
+ memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
+ memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
+}
+
+static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
+{
+ switch (priv) {
+ case PRV_M:
+ return MCTRCTL_M;
+ case PRV_S:
+ if (virt) {
+ return XCTRCTL_S;
+ }
+ return XCTRCTL_S;
+ case PRV_U:
+ if (virt) {
+ return XCTRCTL_U;
+ }
+ return XCTRCTL_U;
+ }
+
+ g_assert_not_reached();
+}
+
+static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
+ bool virt)
+{
+ switch (priv) {
+ case PRV_M:
+ return env->mctrctl;
+ case PRV_S:
+ case PRV_U:
+ if (virt) {
+ return env->vsctrctl;
+ }
+ return env->mctrctl;
+ }
+
+ g_assert_not_reached();
+}
+
+/*
+ * This function assumes that src privilege and target privilege are not same
+ * and src privilege is less than target privilege. This includes the virtual
+ * state as well.
+ */
+static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
+ bool src_virt)
+{
+ target_long tgt_prv = env->priv;
+ bool res = true;
+
+ /*
+ * VS and U mode are same in terms of xTE bits required to record an
+ * external trap. See 6.1.2. External Traps, table 8 External Trap Enable
+ * Requirements. This changes VS to U to simplify the logic a bit.
+ */
+ if (src_virt && src_prv == PRV_S) {
+ src_prv = PRV_U;
+ } else if (env->virt_enabled && tgt_prv == PRV_S) {
+ tgt_prv = PRV_U;
+ }
+
+ /* VU mode is an outlier here. */
+ if (src_virt && src_prv == PRV_U) {
+ res &= !!(env->vsctrctl & XCTRCTL_STE);
+ }
+
+ switch (src_prv) {
+ case PRV_U:
+ if (tgt_prv == PRV_U) {
+ break;
+ }
+ res &= !!(env->mctrctl & XCTRCTL_STE);
+ /* fall-through */
+ case PRV_S:
+ if (tgt_prv == PRV_S) {
+ break;
+ }
+ res &= !!(env->mctrctl & MCTRCTL_MTE);
+ /* fall-through */
+ case PRV_M:
+ break;
+ }
+
+ return res;
+}
+
+/*
+ * Special cases for traps and trap returns:
+ *
+ * 1- Traps, and trap returns, between enabled modes are recorded as normal.
+ * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
+ * enabled mode back to an inhibited mode, are partially recorded. In such
+ * cases, the PC from the inhibited mode (source PC for traps, and target PC
+ * for trap returns) is 0.
+ *
+ * 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
+ * Traps from an enabled mode to an inhibited mode, known as external traps,
+ * receive special handling.
+ * By default external traps are not recorded, but a handshake mechanism exists
+ * to allow partial recording. Software running in the target mode of the trap
+ * can opt-in to allowing CTR to record traps into that mode even when the mode
+ * is inhibited. The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
+ * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
+ * x is the target privilege mode of the trap, will CTR record the trap. In such
+ * cases, the target PC is 0.
+ */
+/*
+ * CTR arrays are implemented as circular buffers and new entry is stored at
+ * sctrstatus.WRPTR, but they are presented to software as moving circular
+ * buffers. Which means, software get's the illusion that whenever a new entry
+ * is added the whole buffer is moved by one place and the new entry is added at
+ * the start keeping new entry at idx 0 and older ones follow.
+ *
+ * Depth = 16.
+ *
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * WRPTR W
+ * entry 7 6 5 4 3 2 1 0 F E D C B A 9 8
+ *
+ * When a new entry is added:
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * WRPTR W
+ * entry 8 7 6 5 4 3 2 1 0 F E D C B A 9
+ *
+ * entry here denotes the logical entry number that software can access
+ * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
+ * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
+ * buffer[7]. Here is how we convert entry to buffer idx.
+ *
+ * entry = isel - CTR_ENTRIES_FIRST;
+ * idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
+ */
+void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
+ enum CTRType type, target_ulong src_priv, bool src_virt)
+{
+ bool tgt_virt = env->virt_enabled;
+ uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
+ uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
+ uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
+ uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
+ uint64_t depth, head;
+ bool ext_trap = false;
+
+ /*
+ * Return immediately if both target and src recording is disabled or if
+ * CTR is in frozen state.
+ */
+ if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
+ env->sctrstatus & SCTRSTATUS_FROZEN) {
+ return;
+ }
+
+ /*
+ * With RAS Emul enabled, only allow Indirect, direct calls, Function
+ * returns and Co-routine swap types.
+ */
+ if (tgt_ctrl & XCTRCTL_RASEMU &&
+ type != CTRDATA_TYPE_INDIRECT_CALL &&
+ type != CTRDATA_TYPE_DIRECT_CALL &&
+ type != CTRDATA_TYPE_RETURN &&
+ type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
+ return;
+ }
+
+ if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
+ /* Case 2 for traps. */
+ if (!(src_ctrl & src_mask)) {
+ src = 0;
+ } else if (!(tgt_ctrl & tgt_mask)) {
+ /* Check if target priv-mode has allowed external trap recording. */
+ if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
+ return;
+ }
+
+ ext_trap = true;
+ dst = 0;
+ }
+ } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
+ /*
+ * Case 3 for trap returns. Trap returns from inhibited mode are not
+ * recorded.
+ */
+ if (!(src_ctrl & src_mask)) {
+ return;
+ }
+
+ /* Case 2 for trap returns. */
+ if (!(tgt_ctrl & tgt_mask)) {
+ dst = 0;
+ }
+ }
+
+ /* Ignore filters in case of RASEMU mode or External trap. */
+ if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
+ /*
+ * Check if the specific type is inhibited. Not taken branch filter is
+ * an enable bit and needs to be checked separatly.
+ */
+ bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
+ if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
+ (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
+ return;
+ }
+ }
+
+ head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
+
+ depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
+ head = (head - 1) & (depth - 1);
+
+ env->ctr_src[head] &= ~CTRSOURCE_VALID;
+ env->sctrstatus =
+ set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
+ return;
+ }
+
+ /* In case of Co-routine SWAP we overwrite latest entry. */
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
+ head = (head - 1) & (depth - 1);
+ }
+
+ env->ctr_src[head] = src | CTRSOURCE_VALID;
+ env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
+ env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
+
+ head = (head + 1) & (depth - 1);
+
+ env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
+}
+
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
{
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
@@ -706,7 +1043,6 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
env->priv = newpriv;
env->xl = cpu_recompute_xl(env);
- riscv_cpu_update_mask(env);
/*
* Clear the load reservation - otherwise a reservation placed in one
@@ -777,6 +1113,55 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
return TRANSLATE_SUCCESS;
}
+/* Returns 'true' if a svukte address check is needed */
+static bool do_svukte_check(CPURISCVState *env, bool first_stage,
+ int mode, bool virt)
+{
+ /* Svukte extension depends on Sv39. */
+ if (!(env_archcpu(env)->cfg.ext_svukte ||
+ !first_stage ||
+ VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) {
+ return false;
+ }
+
+ /*
+ * Check hstatus.HUKTE if the effective mode is switched to VU-mode by
+ * executing HLV/HLVX/HSV in U-mode.
+ * For other cases, check senvcfg.UKTE.
+ */
+ if (env->priv == PRV_U && !env->virt_enabled && virt) {
+ if (!get_field(env->hstatus, HSTATUS_HUKTE)) {
+ return false;
+ }
+ } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) {
+ return false;
+ }
+
+ /*
+ * Svukte extension is qualified only in U or VU-mode.
+ *
+ * Effective mode can be switched to U or VU-mode by:
+ * - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode.
+ * - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0.
+ * - U-mode.
+ * - VU-mode.
+ * - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1.
+ */
+ if (mode != PRV_U) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_svukte_addr(CPURISCVState *env, vaddr addr)
+{
+ /* svukte extension excludes RV32 */
+ uint32_t sxlen = 32 * riscv_cpu_sxl(env);
+ uint64_t high_bit = addr & (1UL << (sxlen - 1));
+ return !high_bit;
+}
+
/*
* get_physical_address - get the physical address for this virtual address
*
@@ -804,7 +1189,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
target_ulong *fault_pte_addr,
int access_type, int mmu_idx,
bool first_stage, bool two_stage,
- bool is_debug)
+ bool is_debug, bool is_probe)
{
/*
* NOTE: the env->pc value visible here will not be
@@ -814,10 +1199,18 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
MemTxResult res;
MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int mode = mmuidx_priv(mmu_idx);
+ bool virt = mmuidx_2stage(mmu_idx);
bool use_background = false;
hwaddr ppn;
int napot_bits = 0;
target_ulong napot_mask;
+ bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
+ bool sstack_page = false;
+
+ if (do_svukte_check(env, first_stage, mode, virt) &&
+ !check_svukte_addr(env, addr)) {
+ return TRANSLATE_FAIL;
+ }
/*
* Check if we should use the background registers for the two
@@ -890,12 +1283,14 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
CPUState *cs = env_cpu(env);
int va_bits = PGSHIFT + levels * ptidxbits + widened;
+ int sxlen = 16 << riscv_cpu_sxl(env);
+ int sxlen_bytes = sxlen / 8;
if (first_stage == true) {
target_ulong mask, masked_msbs;
- if (TARGET_LONG_BITS > (va_bits - 1)) {
- mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
+ if (sxlen > (va_bits - 1)) {
+ mask = (1L << (sxlen - (va_bits - 1))) - 1;
} else {
mask = 0;
}
@@ -925,9 +1320,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
hwaddr pte_addr;
int i;
-#if !TCG_OVERSIZED_GUEST
-restart:
-#endif
+ restart:
for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
target_ulong idx;
if (i == 0) {
@@ -948,7 +1341,7 @@ restart:
int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
base, NULL, MMU_DATA_LOAD,
MMUIdx_U, false, true,
- is_debug);
+ is_debug, false);
if (vbase_ret != TRANSLATE_SUCCESS) {
if (fault_pte_addr) {
@@ -964,7 +1357,7 @@ restart:
int pmp_prot;
int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
- sizeof(target_ulong),
+ sxlen_bytes,
MMU_DATA_LOAD, PRV_S);
if (pmp_ret != TRANSLATE_SUCCESS) {
return TRANSLATE_PMP_FAIL;
@@ -984,14 +1377,27 @@ restart:
ppn = pte >> PTE_PPN_SHIFT;
} else {
if (pte & PTE_RESERVED) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!pbmte && (pte & PTE_PBMT)) {
+ /* Reserved without Svpbmt. */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
+ "and Svpbmt extension is disabled: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
+ /* Reserved without Svnapot extension */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
+ "and Svnapot extension is disabled: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
@@ -1002,14 +1408,19 @@ restart:
/* Invalid PTE */
return TRANSLATE_FAIL;
}
+
if (pte & (PTE_R | PTE_W | PTE_X)) {
goto leaf;
}
- /* Inner PTE, continue walking */
if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
+ /* D, A, and U bits are reserved in non-leaf/inner PTEs */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
+ /* Inner PTE, continue walking */
base = ppn << PGSHIFT;
}
@@ -1019,28 +1430,57 @@ restart:
leaf:
if (ppn & ((1ULL << ptshift) - 1)) {
/* Misaligned PPN */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!pbmte && (pte & PTE_PBMT)) {
/* Reserved without Svpbmt. */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
+ "and Svpbmt extension is disabled: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
+ target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
/* Check for reserved combinations of RWX flags. */
- switch (pte & (PTE_R | PTE_W | PTE_X)) {
- case PTE_W:
+ switch (rwx) {
case PTE_W | PTE_X:
return TRANSLATE_FAIL;
+ case PTE_W:
+ /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
+ if (cpu_get_bcfien(env) && first_stage) {
+ sstack_page = true;
+ /*
+ * if ss index, read and write allowed. else if not a probe
+ * then only read allowed
+ */
+ rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 : PTE_R);
+ break;
+ }
+ return TRANSLATE_FAIL;
+ case PTE_R:
+ /*
+ * no matter what's the `access_type`, shadow stack access to readonly
+ * memory are always store page faults. During unwind, loads will be
+ * promoted as store fault.
+ */
+ if (is_sstack_idx) {
+ return TRANSLATE_FAIL;
+ }
+ break;
}
int prot = 0;
- if (pte & PTE_R) {
+ if (rwx & PTE_R) {
prot |= PAGE_READ;
}
- if (pte & PTE_W) {
+ if (rwx & PTE_W) {
prot |= PAGE_WRITE;
}
- if (pte & PTE_X) {
+ if (rwx & PTE_X) {
bool mxr = false;
/*
@@ -1084,8 +1524,11 @@ restart:
}
if (!((prot >> access_type) & 1)) {
- /* Access check failed */
- return TRANSLATE_FAIL;
+ /*
+ * Access check failed, access check failures for shadow stack are
+ * access faults.
+ */
+ return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
}
target_ulong updated_pte = pte;
@@ -1116,24 +1559,23 @@ restart:
* it is no longer valid and we must re-walk the page table.
*/
MemoryRegion *mr;
- hwaddr l = sizeof(target_ulong), addr1;
+ hwaddr l = sxlen_bytes, addr1;
mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
false, MEMTXATTRS_UNSPECIFIED);
if (memory_region_is_ram(mr)) {
target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
-#if TCG_OVERSIZED_GUEST
- /*
- * MTTCG is not enabled on oversized TCG guests so
- * page table updates do not need to be atomic
- */
- *pte_pa = pte = updated_pte;
-#else
- target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
+ target_ulong old_pte;
+ if (riscv_cpu_sxl(env) == MXL_RV32) {
+ old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte));
+ old_pte = le32_to_cpu(old_pte);
+ } else {
+ old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte));
+ old_pte = le64_to_cpu(old_pte);
+ }
if (old_pte != pte) {
goto restart;
}
pte = updated_pte;
-#endif
} else {
/*
* Misconfigured PTE in ROM (AD bits are not preset) or
@@ -1223,13 +1665,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
- true, env->virt_enabled, true)) {
+ true, env->virt_enabled, true, false)) {
return -1;
}
if (env->virt_enabled) {
if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
- 0, MMUIdx_U, false, true, true)) {
+ 0, MMUIdx_U, false, true, true, false)) {
return -1;
}
}
@@ -1272,9 +1714,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
break;
case MMU_DATA_LOAD:
cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
+ }
break;
case MMU_DATA_STORE:
cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ }
break;
default:
g_assert_not_reached();
@@ -1323,7 +1773,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
int ret = TRANSLATE_FAIL;
int mode = mmuidx_priv(mmu_idx);
/* default TLB page size */
- target_ulong tlb_size = TARGET_PAGE_SIZE;
+ hwaddr tlb_size = TARGET_PAGE_SIZE;
env->guest_phys_fault_addr = 0;
@@ -1335,7 +1785,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
/* Two stage lookup */
ret = get_physical_address(env, &pa, &prot, address,
&env->guest_phys_fault_addr, access_type,
- mmu_idx, true, true, false);
+ mmu_idx, true, true, false, probe);
/*
* A G-stage exception may be triggered during two state lookup.
@@ -1358,7 +1808,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
access_type, MMUIdx_U, false, true,
- false);
+ false, probe);
qemu_log_mask(CPU_LOG_MMU,
"%s 2nd-stage address=%" VADDR_PRIx
@@ -1375,7 +1825,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
qemu_log_mask(CPU_LOG_MMU,
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
- " %d tlb_size " TARGET_FMT_lu "\n",
+ " %d tlb_size %" HWADDR_PRIu "\n",
__func__, pa, ret, prot_pmp, tlb_size);
prot &= prot_pmp;
@@ -1395,7 +1845,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else {
/* Single stage lookup */
ret = get_physical_address(env, &pa, &prot, address, NULL,
- access_type, mmu_idx, true, false, false);
+ access_type, mmu_idx, true, false, false,
+ probe);
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " ret %d physical "
@@ -1409,7 +1860,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
qemu_log_mask(CPU_LOG_MMU,
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
- " %d tlb_size " TARGET_FMT_lu "\n",
+ " %d tlb_size %" HWADDR_PRIu "\n",
__func__, pa, ret, prot_pmp, tlb_size);
prot &= prot_pmp;
@@ -1427,6 +1878,23 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else if (probe) {
return false;
} else {
+ int wp_access = 0;
+
+ if (access_type == MMU_DATA_LOAD) {
+ wp_access |= BP_MEM_READ;
+ } else if (access_type == MMU_DATA_STORE) {
+ wp_access |= BP_MEM_WRITE;
+ }
+
+ /*
+ * If a watchpoint isn't found for 'addr' this will
+ * be a no-op and we'll resume the mmu_exception path.
+ * Otherwise we'll throw a debug exception and execution
+ * will continue elsewhere.
+ */
+ cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
+ wp_access, retaddr);
+
raise_mmu_exception(env, address, access_type, pmp_violation,
first_stage_error, two_stage_lookup,
two_stage_indirect_error);
@@ -1641,6 +2109,40 @@ static target_ulong riscv_transformed_insn(CPURISCVState *env,
return xinsn;
}
+static target_ulong promote_load_fault(target_ulong orig_cause)
+{
+ switch (orig_cause) {
+ case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
+ return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
+
+ case RISCV_EXCP_LOAD_ACCESS_FAULT:
+ return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+
+ case RISCV_EXCP_LOAD_PAGE_FAULT:
+ return RISCV_EXCP_STORE_PAGE_FAULT;
+ }
+
+ /* if no promotion, return original cause */
+ return orig_cause;
+}
+
+static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
+{
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
+ env->mncause = cause;
+ env->mnepc = env->pc;
+ env->pc = env->rnmi_irqvec;
+
+ if (cpu_get_fcfien(env)) {
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
+ }
+
+ /* Trapping to M mode, virt is disabled */
+ riscv_cpu_set_mode(env, PRV_M, false);
+}
+
/*
* Handle Traps
*
@@ -1653,7 +2155,10 @@ void riscv_cpu_do_interrupt(CPUState *cs)
CPURISCVState *env = &cpu->env;
bool virt = env->virt_enabled;
bool write_gva = false;
+ bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
+ bool vsmode_exc;
uint64_t s;
+ int mode;
/*
* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
@@ -1662,22 +2167,38 @@ void riscv_cpu_do_interrupt(CPUState *cs)
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
uint64_t deleg = async ? env->mideleg : env->medeleg;
- bool s_injected = env->mvip & (1 << cause) & env->mvien &&
- !(env->mip & (1 << cause));
- bool vs_injected = env->hvip & (1 << cause) & env->hvien &&
- !(env->mip & (1 << cause));
+ bool s_injected = env->mvip & (1ULL << cause) & env->mvien &&
+ !(env->mip & (1ULL << cause));
+ bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
+ !(env->mip & (1ULL << cause));
+ bool smode_double_trap = false;
+ uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
+ const bool prev_virt = env->virt_enabled;
+ const target_ulong prev_priv = env->priv;
target_ulong tval = 0;
target_ulong tinst = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
+ target_ulong src;
+ int sxlen = 0;
+ int mxlen = 16 << riscv_cpu_mxl(env);
+ bool nnmi_excep = false;
+
+ if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
+ riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
+ env->virt_enabled);
+ return;
+ }
if (!async) {
/* set tval to badaddr for traps with address information */
switch (cause) {
+#ifdef CONFIG_TCG
case RISCV_EXCP_SEMIHOST:
do_common_semihosting(cs);
env->pc += 4;
return;
+#endif
case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
case RISCV_EXCP_LOAD_ADDR_MIS:
@@ -1686,6 +2207,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
case RISCV_EXCP_LOAD_PAGE_FAULT:
case RISCV_EXCP_STORE_PAGE_FAULT:
+ if (always_storeamo) {
+ cause = promote_load_fault(cause);
+ }
write_gva = env->two_stage_lookup;
tval = env->badaddr;
if (env->two_stage_indirect_lookup) {
@@ -1727,6 +2251,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
cs->watchpoint_hit = NULL;
}
break;
+ case RISCV_EXCP_SW_CHECK:
+ tval = env->sw_check_code;
+ break;
default:
break;
}
@@ -1755,14 +2282,44 @@ void riscv_cpu_do_interrupt(CPUState *cs)
__func__, env->mhartid, async, cause, env->pc, tval,
riscv_cpu_get_trap_name(cause, async));
- if (env->priv <= PRV_S && cause < 64 &&
- (((deleg >> cause) & 1) || s_injected || vs_injected)) {
- /* handle the trap in S-mode */
+ mode = env->priv <= PRV_S && cause < 64 &&
+ (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
+
+ vsmode_exc = env->virt_enabled && cause < 64 &&
+ (((hdeleg >> cause) & 1) || vs_injected);
+
+ /*
+ * Check double trap condition only if already in S-mode and targeting
+ * S-mode
+ */
+ if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
+ bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
+ bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
+ /* In VS or HS */
if (riscv_has_ext(env, RVH)) {
- uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
+ if (vsmode_exc) {
+ /* VS -> VS, use henvcfg instead of menvcfg*/
+ dte = (env->henvcfg & HENVCFG_DTE) != 0;
+ } else if (env->virt_enabled) {
+ /* VS -> HS, use mstatus_hs */
+ sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
+ }
+ }
+ smode_double_trap = dte && sdt;
+ if (smode_double_trap) {
+ mode = PRV_M;
+ }
+ }
+
+ if (mode == PRV_S) {
+ /* handle the trap in S-mode */
+ /* save elp status */
+ if (cpu_get_fcfien(env)) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
+ }
- if (env->virt_enabled &&
- (((hdeleg >> cause) & 1) || vs_injected)) {
+ if (riscv_has_ext(env, RVH)) {
+ if (vsmode_exc) {
/* Trap to VS mode */
/*
* See if we need to adjust cause. Yes if its VS mode interrupt
@@ -1795,8 +2352,12 @@ void riscv_cpu_do_interrupt(CPUState *cs)
s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
s = set_field(s, MSTATUS_SPP, env->priv);
s = set_field(s, MSTATUS_SIE, 0);
+ if (riscv_env_smode_dbltrp_enabled(env, virt)) {
+ s = set_field(s, MSTATUS_SDT, 1);
+ }
env->mstatus = s;
- env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
+ sxlen = 16 << riscv_cpu_sxl(env);
+ env->scause = cause | ((target_ulong)async << (sxlen - 1));
env->sepc = env->pc;
env->stval = tval;
env->htval = htval;
@@ -1804,8 +2365,28 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->pc = (env->stvec >> 2 << 2) +
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_S, virt);
+
+ src = env->sepc;
} else {
+ /*
+ * If the hart encounters an exception while executing in M-mode
+ * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
+ */
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
+ !async;
+
/* handle the trap in M-mode */
+ /* save elp status */
+ if (cpu_get_fcfien(env)) {
+ if (nnmi_excep) {
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
+ env->elp);
+ } else {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
+ }
+ }
+
if (riscv_has_ext(env, RVH)) {
if (env->virt_enabled) {
riscv_cpu_swap_hypervisor_regs(env);
@@ -1821,23 +2402,78 @@ void riscv_cpu_do_interrupt(CPUState *cs)
/* Trapping to M mode, virt is disabled */
virt = false;
}
+ /*
+ * If the hart encounters an exception while executing in M-mode,
+ * with the mnstatus.NMIE bit clear, the program counter is set to
+ * the RNMI exception trap handler address.
+ */
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
+ !async;
s = env->mstatus;
s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
s = set_field(s, MSTATUS_MPP, env->priv);
s = set_field(s, MSTATUS_MIE, 0);
+ if (cpu->cfg.ext_smdbltrp) {
+ if (env->mstatus & MSTATUS_MDT) {
+ assert(env->priv == PRV_M);
+ if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
+ cpu_abort(CPU(cpu), "M-mode double trap\n");
+ } else {
+ riscv_do_nmi(env, cause, false);
+ return;
+ }
+ }
+
+ s = set_field(s, MSTATUS_MDT, 1);
+ }
env->mstatus = s;
- env->mcause = cause | ~(((target_ulong)-1) >> async);
+ env->mcause = cause | ((target_ulong)async << (mxlen - 1));
+ if (smode_double_trap) {
+ env->mtval2 = env->mcause;
+ env->mcause = RISCV_EXCP_DOUBLE_TRAP;
+ } else {
+ env->mtval2 = mtval2;
+ }
env->mepc = env->pc;
env->mtval = tval;
- env->mtval2 = mtval2;
env->mtinst = tinst;
- env->pc = (env->mtvec >> 2 << 2) +
- ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
+
+ /*
+ * For RNMI exception, program counter is set to the RNMI exception
+ * trap handler address.
+ */
+ if (nnmi_excep) {
+ env->pc = env->rnmi_excpvec;
+ } else {
+ env->pc = (env->mtvec >> 2 << 2) +
+ ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
+ }
riscv_cpu_set_mode(env, PRV_M, virt);
+ src = env->mepc;
+ }
+
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
+ if (async && cause == IRQ_PMU_OVF) {
+ riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
+ } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
+ riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
+ }
+
+ riscv_ctr_add_entry(env, src, env->pc,
+ async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
+ prev_priv, prev_virt);
}
/*
+ * Interrupt/exception/trap delivery is asynchronous event and as per
+ * zicfilp spec CPU should clear up the ELP state. No harm in clearing
+ * unconditionally.
+ */
+ env->elp = false;
+
+ /*
* NOTE: it is not necessary to yield load reservations here. It is only
* necessary for an SC from "another hart" to cause a load reservation
* to be yielded. Refer to the memory consistency model section of the
diff --git a/target/riscv/cpu_user.h b/target/riscv/cpu_user.h
index 02afad6..e6927ff 100644
--- a/target/riscv/cpu_user.h
+++ b/target/riscv/cpu_user.h
@@ -15,5 +15,6 @@
#define xA6 16
#define xA7 17 /* syscall number for RVI ABI */
#define xT0 5 /* syscall number for RVE ABI */
+#define xT2 7
#endif
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
index bb084e0..a0fb54b 100644
--- a/target/riscv/crypto_helper.c
+++ b/target/riscv/crypto_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "crypto/aes.h"
#include "crypto/aes-round.h"
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index ea35603..fb14972 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -24,11 +24,15 @@
#include "tcg/tcg-cpu.h"
#include "pmu.h"
#include "time_helper.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/tb-flush.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
+#include "accel/tcg/getpc.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
+#include "tcg/insn-start-words.h"
+#include "internals.h"
+#include <stdbool.h>
/* CSR function table public API */
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
@@ -36,7 +40,7 @@ void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
*ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
}
-void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
+void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops)
{
csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
}
@@ -184,6 +188,30 @@ static RISCVException zcmt(CPURISCVState *env, int csrno)
return RISCV_EXCP_NONE;
}
+static RISCVException cfi_ss(CPURISCVState *env, int csrno)
+{
+ if (!env_archcpu(env)->cfg.ext_zicfiss) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ /* If ext implemented, M-mode always have access to SSP CSR */
+ if (env->priv == PRV_M) {
+ return RISCV_EXCP_NONE;
+ }
+
+ /* if bcfi not active for current env, access to csr is illegal */
+ if (!cpu_get_bcfien(env)) {
+#if !defined(CONFIG_USER_ONLY)
+ if (env->debugger) {
+ return RISCV_EXCP_NONE;
+ }
+#endif
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
#if !defined(CONFIG_USER_ONLY)
static RISCVException mctr(CPURISCVState *env, int csrno)
{
@@ -286,6 +314,24 @@ static RISCVException aia_any32(CPURISCVState *env, int csrno)
return any32(env, csrno);
}
+static RISCVException csrind_any(CPURISCVState *env, int csrno)
+{
+ if (!riscv_cpu_cfg(env)->ext_smcsrind) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno)
+{
+ if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return any(env, csrno);
+}
+
static RISCVException smode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVS)) {
@@ -306,22 +352,89 @@ static RISCVException smode32(CPURISCVState *env, int csrno)
static RISCVException aia_smode(CPURISCVState *env, int csrno)
{
+ int ret;
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ if (csrno == CSR_STOPEI) {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
+ } else {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ }
+
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
return smode(env, csrno);
}
static RISCVException aia_smode32(CPURISCVState *env, int csrno)
{
+ int ret;
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
return smode32(env, csrno);
}
+static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+
+ if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
+ return smode(env, csrno);
+}
+
+static bool csrind_extensions_present(CPURISCVState *env)
+{
+ return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind;
+}
+
+static bool aia_extensions_present(CPURISCVState *env)
+{
+ return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia;
+}
+
+static bool csrind_or_aia_extensions_present(CPURISCVState *env)
+{
+ return csrind_extensions_present(env) || aia_extensions_present(env);
+}
+
+static RISCVException csrind_smode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return smode(env, csrno);
+}
+
+static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_or_aia_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return smode(env, csrno);
+}
+
static RISCVException hmode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVH)) {
@@ -341,6 +454,24 @@ static RISCVException hmode32(CPURISCVState *env, int csrno)
}
+static RISCVException csrind_hmode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return hmode(env, csrno);
+}
+
+static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_or_aia_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return hmode(env, csrno);
+}
+
static RISCVException umode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVU)) {
@@ -512,27 +643,82 @@ static RISCVException hgatp(CPURISCVState *env, int csrno)
return hmode(env, csrno);
}
-/* Checks if PointerMasking registers could be accessed */
-static RISCVException pointer_masking(CPURISCVState *env, int csrno)
+/*
+ * M-mode:
+ * Without ext_smctr raise illegal inst excep.
+ * Otherwise everything is accessible to m-mode.
+ *
+ * S-mode:
+ * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
+ * Otherwise everything other than mctrctl is accessible.
+ *
+ * VS-mode:
+ * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
+ * Without hstateen.ctr raise virtual illegal inst excep.
+ * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range.
+ * Always raise illegal instruction exception for sctrdepth.
+ */
+static RISCVException ctr_mmode(CPURISCVState *env, int csrno)
{
- /* Check if j-ext is present */
- if (riscv_has_ext(env, RVJ)) {
+ /* Check if smctr-ext is present */
+ if (riscv_cpu_cfg(env)->ext_smctr) {
return RISCV_EXCP_NONE;
}
+
return RISCV_EXCP_ILLEGAL_INST;
}
+static RISCVException ctr_smode(CPURISCVState *env, int csrno)
+{
+ const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
+
+ if (!cfg->ext_smctr && !cfg->ext_ssctr) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
+ if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH &&
+ env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
+ return ret;
+}
+
static RISCVException aia_hmode(CPURISCVState *env, int csrno)
{
+ int ret;
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
- return hmode(env, csrno);
+ if (csrno == CSR_VSTOPEI) {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
+ } else {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ }
+
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ return hmode(env, csrno);
}
static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
{
+ int ret;
+
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -540,6 +726,15 @@ static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
return hmode32(env, csrno);
}
+static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ return RISCV_EXCP_NONE;
+ }
+
+ return hmode(env, csrno);
+}
+
static RISCVException pmp(CPURISCVState *env, int csrno)
{
if (riscv_cpu_cfg(env)->pmp) {
@@ -566,6 +761,9 @@ static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
if (riscv_cpu_cfg(env)->ext_zkr) {
return RISCV_EXCP_NONE;
}
+ if (riscv_cpu_cfg(env)->ext_smmpm) {
+ return RISCV_EXCP_NONE;
+ }
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -578,6 +776,17 @@ static RISCVException debug(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
+
+static RISCVException rnmi(CPURISCVState *env, int csrno)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+
+ if (cpu->cfg.ext_smrnmi) {
+ return RISCV_EXCP_NONE;
+ }
+
+ return RISCV_EXCP_ILLEGAL_INST;
+}
#endif
static RISCVException seed(CPURISCVState *env, int csrno)
@@ -622,6 +831,21 @@ static RISCVException seed(CPURISCVState *env, int csrno)
#endif
}
+/* zicfiss CSR_SSP read and write */
+static RISCVException read_ssp(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->ssp;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_ssp(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
+{
+ env->ssp = val;
+ return RISCV_EXCP_NONE;
+}
+
/* User Floating-Point CSRs */
static RISCVException read_fflags(CPURISCVState *env, int csrno,
target_ulong *val)
@@ -631,7 +855,7 @@ static RISCVException read_fflags(CPURISCVState *env, int csrno,
}
static RISCVException write_fflags(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@@ -650,7 +874,7 @@ static RISCVException read_frm(CPURISCVState *env, int csrno,
}
static RISCVException write_frm(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@@ -670,7 +894,7 @@ static RISCVException read_fcsr(CPURISCVState *env, int csrno,
}
static RISCVException write_fcsr(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@@ -722,7 +946,7 @@ static RISCVException read_vxrm(CPURISCVState *env, int csrno,
}
static RISCVException write_vxrm(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -734,17 +958,17 @@ static RISCVException write_vxrm(CPURISCVState *env, int csrno,
static RISCVException read_vxsat(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = env->vxsat;
+ *val = env->vxsat & BIT(0);
return RISCV_EXCP_NONE;
}
static RISCVException write_vxsat(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
#endif
- env->vxsat = val;
+ env->vxsat = val & BIT(0);
return RISCV_EXCP_NONE;
}
@@ -756,7 +980,7 @@ static RISCVException read_vstart(CPURISCVState *env, int csrno,
}
static RISCVException write_vstart(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -777,7 +1001,7 @@ static RISCVException read_vcsr(CPURISCVState *env, int csrno,
}
static RISCVException write_vcsr(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -835,7 +1059,7 @@ static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
}
static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t inh_avail_mask;
@@ -864,7 +1088,7 @@ static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MCYCLECFGH_BIT_MINH);
@@ -889,7 +1113,7 @@ static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t inh_avail_mask;
@@ -916,7 +1140,7 @@ static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MINSTRETCFGH_BIT_MINH);
@@ -943,7 +1167,7 @@ static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
}
static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
int evt_index = csrno - CSR_MCOUNTINHIBIT;
uint64_t mhpmevt_val = val;
@@ -981,7 +1205,7 @@ static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
}
static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
int evt_index = csrno - CSR_MHPMEVENT3H + 3;
uint64_t mhpmevth_val;
@@ -1072,10 +1296,9 @@ done:
return result;
}
-static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val,
+ uint32_t ctr_idx)
{
- int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = val;
@@ -1100,10 +1323,9 @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
+ uint32_t ctr_idx)
{
- int ctr_idx = csrno - CSR_MCYCLEH;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = counter->mhpmcounter_val;
uint64_t mhpmctrh_val = val;
@@ -1125,6 +1347,22 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
+{
+ int ctr_idx = csrno - CSR_MCYCLE;
+
+ return riscv_pmu_write_ctr(env, val, ctr_idx);
+}
+
+static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
+{
+ int ctr_idx = csrno - CSR_MCYCLEH;
+
+ return riscv_pmu_write_ctrh(env, val, ctr_idx);
+}
+
RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
bool upper_half, uint32_t ctr_idx)
{
@@ -1190,6 +1428,167 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
return riscv_pmu_read_ctr(env, val, true, ctr_index);
}
+static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ riscv_pmu_read_ctr(env, val, false, ctr_idx);
+ } else if (wr_mask) {
+ riscv_pmu_write_ctr(env, new_val, ctr_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ riscv_pmu_read_ctr(env, val, true, ctr_idx);
+ } else if (wr_mask) {
+ riscv_pmu_write_ctrh(env, new_val, ctr_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ uint64_t mhpmevt_val = new_val;
+
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ *val = env->mhpmevent_val[evt_index];
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
+ *val &= ~MHPMEVENT_BIT_MINH;
+ }
+ } else if (wr_mask) {
+ wr_mask &= ~MHPMEVENT_BIT_MINH;
+ mhpmevt_val = (new_val & wr_mask) |
+ (env->mhpmevent_val[evt_index] & ~wr_mask);
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ mhpmevt_val = mhpmevt_val |
+ ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
+ }
+ env->mhpmevent_val[evt_index] = mhpmevt_val;
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ uint64_t mhpmevth_val;
+ uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
+
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ *val = env->mhpmeventh_val[evt_index];
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
+ *val &= ~MHPMEVENTH_BIT_MINH;
+ }
+ } else if (wr_mask) {
+ wr_mask &= ~MHPMEVENTH_BIT_MINH;
+ env->mhpmeventh_val[evt_index] =
+ (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask);
+ mhpmevth_val = env->mhpmeventh_val[evt_index];
+ mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ switch (cfg_index) {
+ case 0: /* CYCLECFG */
+ if (wr_mask) {
+ wr_mask &= ~MCYCLECFG_BIT_MINH;
+ env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask);
+ } else {
+ *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH;
+ }
+ break;
+ case 2: /* INSTRETCFG */
+ if (wr_mask) {
+ wr_mask &= ~MINSTRETCFG_BIT_MINH;
+ env->minstretcfg = (new_val & wr_mask) |
+ (env->minstretcfg & ~wr_mask);
+ } else {
+ *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ switch (cfg_index) {
+ case 0: /* CYCLECFGH */
+ if (wr_mask) {
+ wr_mask &= ~MCYCLECFGH_BIT_MINH;
+ env->mcyclecfgh = (new_val & wr_mask) |
+ (env->mcyclecfgh & ~wr_mask);
+ } else {
+ *val = env->mcyclecfgh;
+ }
+ break;
+ case 2: /* INSTRETCFGH */
+ if (wr_mask) {
+ wr_mask &= ~MINSTRETCFGH_BIT_MINH;
+ env->minstretcfgh = (new_val & wr_mask) |
+ (env->minstretcfgh & ~wr_mask);
+ } else {
+ *val = env->minstretcfgh;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
static RISCVException read_scountovf(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -1199,6 +1598,14 @@ static RISCVException read_scountovf(CPURISCVState *env, int csrno,
target_ulong *mhpm_evt_val;
uint64_t of_bit_mask;
+ /* Virtualize scountovf for counter delegation */
+ if (riscv_cpu_cfg(env)->ext_sscofpmf &&
+ riscv_cpu_cfg(env)->ext_ssccfg &&
+ get_field(env->menvcfg, MENVCFG_CDE) &&
+ env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpm_evt_val = env->mhpmeventh_val;
of_bit_mask = MHPMEVENTH_BIT_OF;
@@ -1260,7 +1667,7 @@ static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
}
static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (riscv_cpu_mxl(env) == MXL_RV32) {
env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
@@ -1275,7 +1682,7 @@ static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
}
static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
@@ -1309,13 +1716,13 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
}
static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (env->virt_enabled) {
if (env->hvictl & HVICTL_VTI) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
- return write_vstimecmp(env, csrno, val);
+ return write_vstimecmp(env, csrno, val, ra);
}
if (riscv_cpu_mxl(env) == MXL_RV32) {
@@ -1330,13 +1737,13 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
}
static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (env->virt_enabled) {
if (env->hvictl & HVICTL_VTI) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
- return write_vstimecmph(env, csrno, val);
+ return write_vstimecmph(env, csrno, val, ra);
}
env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
@@ -1377,6 +1784,7 @@ static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
(1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
(1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
(1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
+ (1ULL << (RISCV_EXCP_SW_CHECK)) | \
(1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
(1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
(1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
@@ -1440,7 +1848,7 @@ static RISCVException read_zero(CPURISCVState *env, int csrno,
}
static RISCVException write_ignore(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return RISCV_EXCP_NONE;
}
@@ -1504,8 +1912,13 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno,
static bool validate_vm(CPURISCVState *env, target_ulong vm)
{
- uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map;
- return get_field(mode_supported, (1 << vm));
+ bool rv32 = riscv_cpu_mxl(env) == MXL_RV32;
+ RISCVCPU *cpu = env_archcpu(env);
+ int satp_mode_supported_max = cpu->cfg.max_satp_mode;
+ const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
+
+ assert(satp_mode_supported_max >= 0);
+ return vm <= satp_mode_supported_max && valid_vm[vm];
}
static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
@@ -1561,7 +1974,7 @@ static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
}
static RISCVException write_mstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mstatus = env->mstatus;
uint64_t mask = 0;
@@ -1589,6 +2002,20 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
mask |= MSTATUS_VS;
}
+ if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) {
+ mask |= MSTATUS_SDT;
+ if ((val & MSTATUS_SDT) != 0) {
+ val &= ~MSTATUS_SIE;
+ }
+ }
+
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ mask |= MSTATUS_MDT;
+ if ((val & MSTATUS_MDT) != 0) {
+ val &= ~MSTATUS_MIE;
+ }
+ }
+
if (xl != MXL_RV32 || env->debugger) {
if (riscv_has_ext(env, RVH)) {
mask |= MSTATUS_MPV | MSTATUS_GVA;
@@ -1598,6 +2025,11 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
}
}
+ /* If cfi lp extension is available, then apply cfi lp mask */
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= (MSTATUS_MPELP | MSTATUS_SPELP);
+ }
+
mstatus = (mstatus & ~mask) | (val & mask);
env->mstatus = mstatus;
@@ -1610,7 +2042,6 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
env->xl = cpu_recompute_xl(env);
}
- riscv_cpu_update_mask(env);
return RISCV_EXCP_NONE;
}
@@ -1622,11 +2053,17 @@ static RISCVException read_mstatush(CPURISCVState *env, int csrno,
}
static RISCVException write_mstatush(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t valh = (uint64_t)val << 32;
uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ mask |= MSTATUS_MDT;
+ if ((valh & MSTATUS_MDT) != 0) {
+ mask |= MSTATUS_MIE;
+ }
+ }
env->mstatus = (env->mstatus & ~mask) | (valh & mask);
return RISCV_EXCP_NONE;
@@ -1669,8 +2106,21 @@ static RISCVException read_misa(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra)
+{
+ uint64_t data[INSN_START_WORDS];
+
+ /* Outside of a running cpu, env contains the next pc. */
+ if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) {
+ return env->pc;
+ }
+
+ /* Within unwind data, [0] is pc and [1] is the opcode. */
+ return data[0] + insn_len(data[1]);
+}
+
static RISCVException write_misa(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
uint32_t orig_misa_ext = env->misa_ext;
@@ -1684,11 +2134,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
/* Mask extensions that are not supported by this hart */
val &= env->misa_ext_mask;
- /*
- * Suppress 'C' if next instruction is not aligned
- * TODO: this should check next_pc
- */
- if ((val & RVC) && (GETPC() & ~3) != 0) {
+ /* Suppress 'C' if next instruction is not aligned. */
+ if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) {
val &= ~RVC;
}
@@ -1734,7 +2181,7 @@ static RISCVException read_medeleg(CPURISCVState *env, int csrno,
}
static RISCVException write_medeleg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
return RISCV_EXCP_NONE;
@@ -1928,14 +2375,41 @@ static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
};
}
+static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno)
+{
+ if (!env->virt_enabled) {
+ return csrno;
+ }
+
+ switch (csrno) {
+ case CSR_SISELECT:
+ return CSR_VSISELECT;
+ case CSR_SIREG:
+ case CSR_SIREG2:
+ case CSR_SIREG3:
+ case CSR_SIREG4:
+ case CSR_SIREG5:
+ case CSR_SIREG6:
+ return CSR_VSIREG + (csrno - CSR_SIREG);
+ default:
+ return csrno;
+ };
+}
+
static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
{
target_ulong *iselect;
+ int ret;
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
/* Translate CSR number for VS-mode */
- csrno = aia_xlate_vs_csrno(env, csrno);
+ csrno = csrind_xlate_vs_csrno(env, csrno);
/* Find the iselect CSR based on CSR number */
switch (csrno) {
@@ -1956,7 +2430,12 @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
*val = *iselect;
}
- wr_mask &= ISELECT_MASK;
+ if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) {
+ wr_mask &= ISELECT_MASK_SXCSRIND;
+ } else {
+ wr_mask &= ISELECT_MASK_AIA;
+ }
+
if (wr_mask) {
*iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
}
@@ -1964,6 +2443,24 @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static bool xiselect_aia_range(target_ulong isel)
+{
+ return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) ||
+ (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST);
+}
+
+static bool xiselect_cd_range(target_ulong isel)
+{
+ return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
+}
+
+static bool xiselect_ctr_range(int csrno, target_ulong isel)
+{
+ /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */
+ return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST &&
+ csrno < CSR_MIREG;
+}
+
static int rmw_iprio(target_ulong xlen,
target_ulong iselect, uint8_t *iprio,
target_ulong *val, target_ulong new_val,
@@ -2009,45 +2506,162 @@ static int rmw_iprio(target_ulong xlen,
return 0;
}
-static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
- target_ulong *val, target_ulong new_val,
- target_ulong wr_mask)
+static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
{
- bool virt, isel_reserved;
- uint8_t *iprio;
- int ret = -EINVAL;
- target_ulong priv, isel, vgein;
+ /*
+ * CTR arrays are treated as circular buffers and TOS always points to next
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
+ * 0 is always the latest one, traversal is a bit different here. See the
+ * below example.
+ *
+ * Depth = 16.
+ *
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * TOS H
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
+ */
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ uint64_t idx;
+
+ /* Entry greater than depth-1 is read-only zero */
+ if (entry >= depth) {
+ if (val) {
+ *val = 0;
+ }
+ return 0;
+ }
- /* Translate CSR number for VS-mode */
- csrno = aia_xlate_vs_csrno(env, csrno);
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
+ idx = (idx - entry - 1) & (depth - 1);
- /* Decode register details from CSR number */
- virt = false;
- isel_reserved = false;
+ if (val) {
+ *val = env->ctr_src[idx];
+ }
+
+ env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask);
+
+ return 0;
+}
+
+static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ /*
+ * CTR arrays are treated as circular buffers and TOS always points to next
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
+ * 0 is always the latest one, traversal is a bit different here. See the
+ * below example.
+ *
+ * Depth = 16.
+ *
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * head H
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
+ */
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ uint64_t idx;
+
+ /* Entry greater than depth-1 is read-only zero */
+ if (entry >= depth) {
+ if (val) {
+ *val = 0;
+ }
+ return 0;
+ }
+
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
+ idx = (idx - entry - 1) & (depth - 1);
+
+ if (val) {
+ *val = env->ctr_dst[idx];
+ }
+
+ env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask);
+
+ return 0;
+}
+
+static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ /*
+ * CTR arrays are treated as circular buffers and TOS always points to next
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
+ * 0 is always the latest one, traversal is a bit different here. See the
+ * below example.
+ *
+ * Depth = 16.
+ *
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * head H
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
+ */
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
+ const uint64_t mask = wr_mask & CTRDATA_MASK;
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ uint64_t idx;
+
+ /* Entry greater than depth-1 is read-only zero */
+ if (entry >= depth) {
+ if (val) {
+ *val = 0;
+ }
+ return 0;
+ }
+
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
+ idx = (idx - entry - 1) & (depth - 1);
+
+ if (val) {
+ *val = env->ctr_data[idx];
+ }
+
+ env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask);
+
+ return 0;
+}
+
+static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ bool virt = false, isel_reserved = false;
+ int ret = -EINVAL;
+ uint8_t *iprio;
+ target_ulong priv, vgein;
+
+ /* VS-mode CSR number passed in has already been translated */
switch (csrno) {
case CSR_MIREG:
+ if (!riscv_cpu_cfg(env)->ext_smaia) {
+ goto done;
+ }
iprio = env->miprio;
- isel = env->miselect;
priv = PRV_M;
break;
case CSR_SIREG:
- if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
+ if (!riscv_cpu_cfg(env)->ext_ssaia ||
+ (env->priv == PRV_S && env->mvien & MIP_SEIP &&
env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
- env->siselect <= ISELECT_IMSIC_EIE63) {
+ env->siselect <= ISELECT_IMSIC_EIE63)) {
goto done;
}
iprio = env->siprio;
- isel = env->siselect;
priv = PRV_S;
break;
case CSR_VSIREG:
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
+ goto done;
+ }
iprio = env->hviprio;
- isel = env->vsiselect;
priv = PRV_S;
virt = true;
break;
default:
- goto done;
+ goto done;
};
/* Find the selected guest interrupt file */
@@ -2078,13 +2692,224 @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
}
done:
+ /*
+ * If AIA is not enabled, illegal instruction exception is always
+ * returned regardless of whether we are in VS-mode or not
+ */
if (ret) {
return (env->virt_enabled && virt && !isel_reserved) ?
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
}
+
return RISCV_EXCP_NONE;
}
+static int rmw_xireg_cd(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ int ret = -EINVAL;
+ int ctr_index = isel - ISELECT_CD_FIRST;
+ int isel_hpm_start = ISELECT_CD_FIRST + 3;
+
+ if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) {
+ ret = RISCV_EXCP_ILLEGAL_INST;
+ goto done;
+ }
+
+ /* Invalid siselect value for reserved */
+ if (ctr_index == 1) {
+ goto done;
+ }
+
+ /* sireg4 and sireg5 provides access RV32 only CSRs */
+ if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) &&
+ (riscv_cpu_mxl(env) != MXL_RV32)) {
+ ret = RISCV_EXCP_ILLEGAL_INST;
+ goto done;
+ }
+
+ /* Check Sscofpmf dependancy */
+ if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 &&
+ (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) {
+ goto done;
+ }
+
+ /* Check smcntrpmf dependancy */
+ if (!riscv_cpu_cfg(env)->ext_smcntrpmf &&
+ (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) &&
+ (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) {
+ goto done;
+ }
+
+ if (!get_field(env->mcounteren, BIT(ctr_index)) ||
+ !get_field(env->menvcfg, MENVCFG_CDE)) {
+ goto done;
+ }
+
+ switch (csrno) {
+ case CSR_SIREG:
+ ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask);
+ break;
+ case CSR_SIREG4:
+ ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask);
+ break;
+ case CSR_SIREG2:
+ if (ctr_index <= 2) {
+ ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask);
+ } else {
+ ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask);
+ }
+ break;
+ case CSR_SIREG5:
+ if (ctr_index <= 2) {
+ ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask);
+ } else {
+ ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask);
+ }
+ break;
+ default:
+ goto done;
+ }
+
+done:
+ return ret;
+}
+
+static int rmw_xireg_ctr(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) {
+ return -EINVAL;
+ }
+
+ if (csrno == CSR_SIREG || csrno == CSR_VSIREG) {
+ return rmw_ctrsource(env, isel, val, new_val, wr_mask);
+ } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) {
+ return rmw_ctrtarget(env, isel, val, new_val, wr_mask);
+ } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) {
+ return rmw_ctrdata(env, isel, val, new_val, wr_mask);
+ } else if (val) {
+ *val = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
+ *
+ * Perform indirect access to xireg and xireg2-xireg6.
+ * This is a generic interface for all xireg CSRs. Apart from AIA, all other
+ * extension using csrind should be implemented here.
+ */
+static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ bool virt = csrno == CSR_VSIREG ? true : false;
+ int ret = -EINVAL;
+
+ if (xiselect_cd_range(isel)) {
+ ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
+ } else if (xiselect_ctr_range(csrno, isel)) {
+ ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask);
+ } else {
+ /*
+ * As per the specification, access to unimplented region is undefined
+ * but recommendation is to raise illegal instruction exception.
+ */
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (ret) {
+ return (env->virt_enabled && virt) ?
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ int ret = -EINVAL;
+ target_ulong isel;
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* Translate CSR number for VS-mode */
+ csrno = csrind_xlate_vs_csrno(env, csrno);
+
+ if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 &&
+ csrno != CSR_MIREG4 - 1) {
+ isel = env->miselect;
+ } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 &&
+ csrno != CSR_SIREG4 - 1) {
+ isel = env->siselect;
+ } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
+ csrno != CSR_VSIREG4 - 1) {
+ isel = env->vsiselect;
+ } else {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
+}
+
+static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ int ret = -EINVAL;
+ target_ulong isel;
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* Translate CSR number for VS-mode */
+ csrno = csrind_xlate_vs_csrno(env, csrno);
+
+ /* Decode register details from CSR number */
+ switch (csrno) {
+ case CSR_MIREG:
+ isel = env->miselect;
+ break;
+ case CSR_SIREG:
+ isel = env->siselect;
+ break;
+ case CSR_VSIREG:
+ isel = env->vsiselect;
+ break;
+ default:
+ goto done;
+ };
+
+ /*
+ * Use the xiselect range to determine actual op on xireg.
+ *
+ * Since we only checked the existence of AIA or Indirect Access in the
+ * predicate, we should check the existence of the exact extension when
+ * we get to a specific range and return illegal instruction exception even
+ * in VS-mode.
+ */
+ if (xiselect_aia_range(isel)) {
+ return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask);
+ } else if (riscv_cpu_cfg(env)->ext_smcsrind ||
+ riscv_cpu_cfg(env)->ext_sscsrind) {
+ return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
+ }
+
+done:
+ return RISCV_EXCP_ILLEGAL_INST;
+}
+
static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
@@ -2151,7 +2976,7 @@ static RISCVException read_mtvec(CPURISCVState *env, int csrno,
}
static RISCVException write_mtvec(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@@ -2170,7 +2995,7 @@ static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
}
static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
int cidx;
PMUCTRState *counter;
@@ -2236,6 +3061,20 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ /* S-mode can only access the bits delegated by M-mode */
+ *val = env->mcountinhibit & env->mcounteren;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
+{
+ return write_mcountinhibit(env, csrno, val & env->mcounteren, ra);
+}
+
static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -2244,7 +3083,7 @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -2278,7 +3117,7 @@ static RISCVException read_mscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_mscratch(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mscratch = val;
return RISCV_EXCP_NONE;
@@ -2292,7 +3131,7 @@ static RISCVException read_mepc(CPURISCVState *env, int csrno,
}
static RISCVException write_mepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mepc = val;
return RISCV_EXCP_NONE;
@@ -2306,7 +3145,7 @@ static RISCVException read_mcause(CPURISCVState *env, int csrno,
}
static RISCVException write_mcause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mcause = val;
return RISCV_EXCP_NONE;
@@ -2320,7 +3159,7 @@ static RISCVException read_mtval(CPURISCVState *env, int csrno,
}
static RISCVException write_mtval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mtval = val;
return RISCV_EXCP_NONE;
@@ -2334,20 +3173,42 @@ static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra);
static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
- uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
+ uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
+ MENVCFG_CBZE | MENVCFG_CDE;
if (riscv_cpu_mxl(env) == MXL_RV64) {
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
- (cfg->ext_svadu ? MENVCFG_ADUE : 0);
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
+ (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= MENVCFG_LPE;
+ }
+
+ if (env_archcpu(env)->cfg.ext_zicfiss) {
+ mask |= MENVCFG_SSE;
+ }
+
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (env_archcpu(env)->cfg.ext_smnpm &&
+ get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= MENVCFG_PMM;
+ }
+
+ if ((val & MENVCFG_DTE) == 0) {
+ env->mstatus &= ~MSTATUS_SDT;
+ }
}
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
-
- return RISCV_EXCP_NONE;
+ return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra);
}
static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
@@ -2357,18 +3218,25 @@ static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra);
static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
- (cfg->ext_svadu ? MENVCFG_ADUE : 0);
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
+ (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
uint64_t valh = (uint64_t)val << 32;
- env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
+ if ((valh & MENVCFG_DTE) == 0) {
+ env->mstatus &= ~MSTATUS_SDT;
+ }
- return RISCV_EXCP_NONE;
+ env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
+ return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra);
}
static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
@@ -2386,16 +3254,37 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
RISCVException ret;
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (env_archcpu(env)->cfg.ext_ssnpm &&
+ riscv_cpu_mxl(env) == MXL_RV64 &&
+ get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= SENVCFG_PMM;
+ }
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= SENVCFG_LPE;
+ }
+
+ /* Higher mode SSE must be ON for next-less mode SSE to be ON */
+ if (env_archcpu(env)->cfg.ext_zicfiss &&
+ get_field(env->menvcfg, MENVCFG_SSE) &&
+ (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) {
+ mask |= SENVCFG_SSE;
+ }
+
+ if (env_archcpu(env)->cfg.ext_svukte) {
+ mask |= SENVCFG_UKTE;
+ }
+
env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
return RISCV_EXCP_NONE;
}
@@ -2414,14 +3303,15 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
* henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
* henvcfg.stce is read_only 0 when menvcfg.stce = 0
* henvcfg.adue is read_only 0 when menvcfg.adue = 0
+ * henvcfg.dte is read_only 0 when menvcfg.dte = 0
*/
- *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
- env->menvcfg);
+ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
+ HENVCFG_DTE) | env->menvcfg);
return RISCV_EXCP_NONE;
}
static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
RISCVException ret;
@@ -2432,10 +3322,30 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
}
if (riscv_cpu_mxl(env) == MXL_RV64) {
- mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
+ mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
+ HENVCFG_DTE);
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= HENVCFG_LPE;
+ }
+
+ /* H can light up SSE for VS only if HS had it from menvcfg */
+ if (env_archcpu(env)->cfg.ext_zicfiss &&
+ get_field(env->menvcfg, MENVCFG_SSE)) {
+ mask |= HENVCFG_SSE;
+ }
+
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (env_archcpu(env)->cfg.ext_ssnpm &&
+ get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= HENVCFG_PMM;
+ }
}
- env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
+ env->henvcfg = val & mask;
+ if ((env->henvcfg & HENVCFG_DTE) == 0) {
+ env->vsstatus &= ~MSTATUS_SDT;
+ }
return RISCV_EXCP_NONE;
}
@@ -2450,16 +3360,16 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
return ret;
}
- *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
- env->menvcfg)) >> 32;
+ *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
+ HENVCFG_DTE) | env->menvcfg)) >> 32;
return RISCV_EXCP_NONE;
}
static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
- HENVCFG_ADUE);
+ HENVCFG_ADUE | HENVCFG_DTE);
uint64_t valh = (uint64_t)val << 32;
RISCVException ret;
@@ -2467,8 +3377,10 @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
if (ret != RISCV_EXCP_NONE) {
return ret;
}
-
- env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
+ env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask);
+ if ((env->henvcfg & HENVCFG_DTE) == 0) {
+ env->vsstatus &= ~MSTATUS_SDT;
+ }
return RISCV_EXCP_NONE;
}
@@ -2492,7 +3404,7 @@ static RISCVException write_mstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
if (!riscv_has_ext(env, RVF)) {
@@ -2503,11 +3415,28 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_P1P13;
}
+ if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) {
+ wr_mask |= SMSTATEEN0_SVSLCT;
+ }
+
+ /*
+ * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
+ * implemented. However, that information is with MachineState and we can't
+ * figure that out in csr.c. Just enable if Smaia is available.
+ */
+ if (riscv_cpu_cfg(env)->ext_smaia) {
+ wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
+ }
+
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
+ wr_mask |= SMSTATEEN0_CTR;
+ }
+
return write_mstateen(env, csrno, wr_mask, new_val);
}
static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2534,7 +3463,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -2542,11 +3471,15 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_P1P13;
}
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
+ wr_mask |= SMSTATEEN0_CTR;
+ }
+
return write_mstateenh(env, csrno, wr_mask, new_val);
}
static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2575,7 +3508,7 @@ static RISCVException write_hstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -2583,11 +3516,28 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_FCSR;
}
+ if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) {
+ wr_mask |= SMSTATEEN0_SVSLCT;
+ }
+
+ /*
+ * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
+ * implemented. However, that information is with MachineState and we can't
+ * figure that out in csr.c. Just enable if Ssaia is available.
+ */
+ if (riscv_cpu_cfg(env)->ext_ssaia) {
+ wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
+ }
+
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
+ wr_mask |= SMSTATEEN0_CTR;
+ }
+
return write_hstateen(env, csrno, wr_mask, new_val);
}
static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2618,15 +3568,19 @@ static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
+ wr_mask |= SMSTATEEN0_CTR;
+ }
+
return write_hstateenh(env, csrno, wr_mask, new_val);
}
static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2665,7 +3619,7 @@ static RISCVException write_sstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -2677,7 +3631,7 @@ static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
}
static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2896,6 +3850,13 @@ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
if (env->xl != MXL_RV32 || env->debugger) {
mask |= SSTATUS64_UXL;
}
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mask |= SSTATUS_SDT;
+ }
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= SSTATUS_SPELP;
+ }
*val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
return RISCV_EXCP_NONE;
@@ -2908,13 +3869,20 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno,
if (env->xl != MXL_RV32 || env->debugger) {
mask |= SSTATUS64_UXL;
}
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= SSTATUS_SPELP;
+ }
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mask |= SSTATUS_SDT;
+ }
/* TODO: Use SXL not MXL. */
*val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
return RISCV_EXCP_NONE;
}
static RISCVException write_sstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
target_ulong mask = (sstatus_v1_10_mask);
@@ -2923,8 +3891,15 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno,
mask |= SSTATUS64_UXL;
}
}
+
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ mask |= SSTATUS_SPELP;
+ }
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mask |= SSTATUS_SDT;
+ }
target_ulong newval = (env->mstatus & ~mask) | (val & mask);
- return write_mstatus(env, CSR_MSTATUS, newval);
+ return write_mstatus(env, CSR_MSTATUS, newval, ra);
}
static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
@@ -3076,7 +4051,7 @@ static RISCVException read_stvec(CPURISCVState *env, int csrno,
}
static RISCVException write_stvec(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@@ -3095,7 +4070,7 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_scounteren(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -3129,7 +4104,7 @@ static RISCVException read_sscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_sscratch(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->sscratch = val;
return RISCV_EXCP_NONE;
@@ -3143,7 +4118,7 @@ static RISCVException read_sepc(CPURISCVState *env, int csrno,
}
static RISCVException write_sepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->sepc = val;
return RISCV_EXCP_NONE;
@@ -3157,7 +4132,7 @@ static RISCVException read_scause(CPURISCVState *env, int csrno,
}
static RISCVException write_scause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->scause = val;
return RISCV_EXCP_NONE;
@@ -3171,7 +4146,7 @@ static RISCVException read_stval(CPURISCVState *env, int csrno,
}
static RISCVException write_stval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->stval = val;
return RISCV_EXCP_NONE;
@@ -3311,7 +4286,7 @@ static RISCVException read_satp(CPURISCVState *env, int csrno,
}
static RISCVException write_satp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!riscv_cpu_cfg(env)->mmu) {
return RISCV_EXCP_NONE;
@@ -3321,6 +4296,86 @@ static RISCVException write_satp(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
+ target_ulong *ret_val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ uint64_t mask = wr_mask & SCTRDEPTH_MASK;
+
+ if (ret_val) {
+ *ret_val = env->sctrdepth;
+ }
+
+ env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask);
+
+ /* Correct depth. */
+ if (mask) {
+ uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK);
+
+ if (depth > SCTRDEPTH_MAX) {
+ depth = SCTRDEPTH_MAX;
+ env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth);
+ }
+
+ /* Update sctrstatus.WRPTR with a legal value */
+ depth = 16ULL << depth;
+ env->sctrstatus =
+ env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno,
+ target_ulong *ret_val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ uint32_t mask = wr_mask & SCTRSTATUS_MASK;
+
+ if (ret_val) {
+ *ret_val = env->sctrstatus;
+ }
+
+ env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask);
+
+ /* Update sctrstatus.WRPTR with a legal value */
+ env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno,
+ target_ulong *ret_val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ uint64_t csr_mask, mask = wr_mask;
+ uint64_t *ctl_ptr = &env->mctrctl;
+
+ if (csrno == CSR_MCTRCTL) {
+ csr_mask = MCTRCTL_MASK;
+ } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) {
+ csr_mask = SCTRCTL_MASK;
+ } else {
+ /*
+ * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true
+ * or csrno == CSR_VSCTRCTL.
+ */
+ csr_mask = VSCTRCTL_MASK;
+ ctl_ptr = &env->vsctrctl;
+ }
+
+ mask &= csr_mask;
+
+ if (ret_val) {
+ *ret_val = *ctl_ptr & csr_mask;
+ }
+
+ *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask);
+
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_vstopi(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -3453,9 +4508,20 @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_hstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
- env->hstatus = val;
+ uint64_t mask = (target_ulong)-1;
+ if (!env_archcpu(env)->cfg.ext_svukte) {
+ mask &= ~HSTATUS_HUKTE;
+ }
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (!env_archcpu(env)->cfg.ext_ssnpm ||
+ riscv_cpu_mxl(env) != MXL_RV64 ||
+ get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) {
+ mask &= ~HSTATUS_HUPMM;
+ }
+ env->hstatus = (env->hstatus & ~mask) | (val & mask);
+
if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
qemu_log_mask(LOG_UNIMP,
"QEMU does not support mixed HSXLEN options.");
@@ -3474,7 +4540,7 @@ static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
}
static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->hedeleg = val & vs_delegable_excps;
return RISCV_EXCP_NONE;
@@ -3495,7 +4561,7 @@ static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
}
static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVException ret;
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
@@ -3758,7 +4824,7 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -3778,7 +4844,7 @@ static RISCVException read_hgeie(CPURISCVState *env, int csrno,
}
static RISCVException write_hgeie(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* Only GEILEN:1 bits implemented and BIT0 is never implemented */
val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
@@ -3797,7 +4863,7 @@ static RISCVException read_htval(CPURISCVState *env, int csrno,
}
static RISCVException write_htval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->htval = val;
return RISCV_EXCP_NONE;
@@ -3811,7 +4877,7 @@ static RISCVException read_htinst(CPURISCVState *env, int csrno,
}
static RISCVException write_htinst(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return RISCV_EXCP_NONE;
}
@@ -3833,7 +4899,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno,
}
static RISCVException write_hgatp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->hgatp = legalize_xatp(env, env->hgatp, val);
return RISCV_EXCP_NONE;
@@ -3851,7 +4917,7 @@ static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
}
static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!env->rdtime_fn) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -3883,7 +4949,7 @@ static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
}
static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!env->rdtime_fn) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -3907,7 +4973,7 @@ static RISCVException read_hvictl(CPURISCVState *env, int csrno,
}
static RISCVException write_hvictl(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->hvictl = val & HVICTL_VALID_MASK;
return RISCV_EXCP_NONE;
@@ -3972,7 +5038,7 @@ static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 0, env->hviprio, val);
}
@@ -3984,7 +5050,7 @@ static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 4, env->hviprio, val);
}
@@ -3996,7 +5062,7 @@ static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 8, env->hviprio, val);
}
@@ -4008,7 +5074,7 @@ static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 12, env->hviprio, val);
}
@@ -4022,12 +5088,19 @@ static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = (target_ulong)-1;
if ((val & VSSTATUS64_UXL) == 0) {
mask &= ~VSSTATUS64_UXL;
}
+ if ((env->henvcfg & HENVCFG_DTE)) {
+ if ((val & SSTATUS_SDT) != 0) {
+ val &= ~SSTATUS_SIE;
+ }
+ } else {
+ val &= ~SSTATUS_SDT;
+ }
env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
return RISCV_EXCP_NONE;
}
@@ -4040,7 +5113,7 @@ static RISCVException read_vstvec(CPURISCVState *env, int csrno,
}
static RISCVException write_vstvec(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@@ -4059,7 +5132,7 @@ static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vsscratch = val;
return RISCV_EXCP_NONE;
@@ -4073,7 +5146,7 @@ static RISCVException read_vsepc(CPURISCVState *env, int csrno,
}
static RISCVException write_vsepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vsepc = val;
return RISCV_EXCP_NONE;
@@ -4087,7 +5160,7 @@ static RISCVException read_vscause(CPURISCVState *env, int csrno,
}
static RISCVException write_vscause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vscause = val;
return RISCV_EXCP_NONE;
@@ -4101,7 +5174,7 @@ static RISCVException read_vstval(CPURISCVState *env, int csrno,
}
static RISCVException write_vstval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vstval = val;
return RISCV_EXCP_NONE;
@@ -4115,7 +5188,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno,
}
static RISCVException write_vsatp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vsatp = legalize_xatp(env, env->vsatp, val);
return RISCV_EXCP_NONE;
@@ -4129,7 +5202,7 @@ static RISCVException read_mtval2(CPURISCVState *env, int csrno,
}
static RISCVException write_mtval2(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mtval2 = val;
return RISCV_EXCP_NONE;
@@ -4143,7 +5216,7 @@ static RISCVException read_mtinst(CPURISCVState *env, int csrno,
}
static RISCVException write_mtinst(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mtinst = val;
return RISCV_EXCP_NONE;
@@ -4158,7 +5231,7 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
}
static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
mseccfg_csr_write(env, val);
return RISCV_EXCP_NONE;
@@ -4174,7 +5247,7 @@ static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint32_t reg_index = csrno - CSR_PMPCFG0;
@@ -4190,7 +5263,7 @@ static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
}
static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
return RISCV_EXCP_NONE;
@@ -4204,7 +5277,7 @@ static RISCVException read_tselect(CPURISCVState *env, int csrno,
}
static RISCVException write_tselect(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
tselect_csr_write(env, val);
return RISCV_EXCP_NONE;
@@ -4228,7 +5301,7 @@ static RISCVException read_tdata(CPURISCVState *env, int csrno,
}
static RISCVException write_tdata(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!tdata_available(env, csrno - CSR_TDATA1)) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -4253,7 +5326,7 @@ static RISCVException read_mcontext(CPURISCVState *env, int csrno,
}
static RISCVException write_mcontext(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
int32_t mask;
@@ -4270,299 +5343,71 @@ static RISCVException write_mcontext(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-/*
- * Functions to access Pointer Masking feature registers
- * We have to check if current priv lvl could modify
- * csr in given mode
- */
-static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
-{
- int csr_priv = get_field(csrno, 0x300);
- int pm_current;
-
- if (env->debugger) {
- return false;
- }
- /*
- * If priv lvls differ that means we're accessing csr from higher priv lvl,
- * so allow the access
- */
- if (env->priv != csr_priv) {
- return false;
- }
- switch (env->priv) {
- case PRV_M:
- pm_current = get_field(env->mmte, M_PM_CURRENT);
- break;
- case PRV_S:
- pm_current = get_field(env->mmte, S_PM_CURRENT);
- break;
- case PRV_U:
- pm_current = get_field(env->mmte, U_PM_CURRENT);
- break;
- default:
- g_assert_not_reached();
- }
- /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
- return !pm_current;
-}
-
-static RISCVException read_mmte(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mmte & MMTE_MASK;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_mmte(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- uint64_t mstatus;
- target_ulong wpri_val = val & MMTE_MASK;
-
- if (val != wpri_val) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
- TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x",
- val, "vs expected 0x", wpri_val);
- }
- /* for machine mode pm.current is hardwired to 1 */
- wpri_val |= MMTE_M_PM_CURRENT;
-
- /* hardwiring pm.instruction bit to 0, since it's not supported yet */
- wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
- env->mmte = wpri_val | EXT_STATUS_DIRTY;
- riscv_cpu_update_mask(env);
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_smte(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mmte & SMTE_MASK;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_smte(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- target_ulong wpri_val = val & SMTE_MASK;
-
- if (val != wpri_val) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
- TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x",
- val, "vs expected 0x", wpri_val);
- }
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
-
- wpri_val |= (env->mmte & ~SMTE_MASK);
- write_mmte(env, csrno, wpri_val);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_umte(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mmte & UMTE_MASK;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_umte(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- target_ulong wpri_val = val & UMTE_MASK;
-
- if (val != wpri_val) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
- TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x",
- val, "vs expected 0x", wpri_val);
- }
-
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
-
- wpri_val |= (env->mmte & ~UMTE_MASK);
- write_mmte(env, csrno, wpri_val);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mpmmask;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- uint64_t mstatus;
-
- env->mpmmask = val;
- if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
- env->cur_pmmask = val;
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_spmmask(CPURISCVState *env, int csrno,
- target_ulong *val)
+static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
- *val = env->spmmask;
+ *val = env->mnscratch;
return RISCV_EXCP_NONE;
}
-static RISCVException write_spmmask(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException write_mnscratch(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
- uint64_t mstatus;
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
- env->spmmask = val;
- if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
- env->cur_pmmask = val;
- if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
- env->cur_pmmask &= UINT32_MAX;
- }
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ env->mnscratch = val;
return RISCV_EXCP_NONE;
}
-static RISCVException read_upmmask(CPURISCVState *env, int csrno,
- target_ulong *val)
+static RISCVException read_mnepc(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
- *val = env->upmmask;
+ *val = env->mnepc;
return RISCV_EXCP_NONE;
}
-static RISCVException write_upmmask(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException write_mnepc(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
- uint64_t mstatus;
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
- env->upmmask = val;
- if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
- env->cur_pmmask = val;
- if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
- env->cur_pmmask &= UINT32_MAX;
- }
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ env->mnepc = val;
return RISCV_EXCP_NONE;
}
-static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
+static RISCVException read_mncause(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = env->mpmbase;
+ *val = env->mncause;
return RISCV_EXCP_NONE;
}
-static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException write_mncause(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
- uint64_t mstatus;
-
- env->mpmbase = val;
- if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
- env->cur_pmbase = val;
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ env->mncause = val;
return RISCV_EXCP_NONE;
}
-static RISCVException read_spmbase(CPURISCVState *env, int csrno,
- target_ulong *val)
+static RISCVException read_mnstatus(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
- *val = env->spmbase;
+ *val = env->mnstatus;
return RISCV_EXCP_NONE;
}
-static RISCVException write_spmbase(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException write_mnstatus(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
- uint64_t mstatus;
+ target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
- env->spmbase = val;
- if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
- env->cur_pmbase = val;
- if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
- env->cur_pmbase &= UINT32_MAX;
+ if (riscv_has_ext(env, RVH)) {
+ /* Flush tlb on mnstatus fields that affect VM. */
+ if ((val ^ env->mnstatus) & MNSTATUS_MNPV) {
+ tlb_flush(env_cpu(env));
}
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_upmbase(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->upmbase;
- return RISCV_EXCP_NONE;
-}
-static RISCVException write_upmbase(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- uint64_t mstatus;
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
+ mask |= MNSTATUS_MNPV;
}
- env->upmbase = val;
- if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
- env->cur_pmbase = val;
- if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
- env->cur_pmbase &= UINT32_MAX;
- }
- }
- env->mmte |= EXT_STATUS_DIRTY;
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ /* mnstatus.mnie can only be cleared by hardware. */
+ env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask);
return RISCV_EXCP_NONE;
}
@@ -4688,7 +5533,8 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
- target_ulong write_mask)
+ target_ulong write_mask,
+ uintptr_t ra)
{
RISCVException ret;
target_ulong old_value = 0;
@@ -4718,7 +5564,7 @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
if (write_mask) {
new_value = (old_value & ~write_mask) | (new_value & write_mask);
if (csr_ops[csrno].write) {
- ret = csr_ops[csrno].write(env, csrno, new_value);
+ ret = csr_ops[csrno].write(env, csrno, new_value, ra);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@@ -4741,25 +5587,25 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno,
return ret;
}
- return riscv_csrrw_do64(env, csrno, ret_value, 0, 0);
+ return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0);
}
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
- target_ulong *ret_value,
- target_ulong new_value, target_ulong write_mask)
+ target_ulong *ret_value, target_ulong new_value,
+ target_ulong write_mask, uintptr_t ra)
{
RISCVException ret = riscv_csrrw_check(env, csrno, true);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
- return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
+ return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra);
}
static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
Int128 *ret_value,
Int128 new_value,
- Int128 write_mask)
+ Int128 write_mask, uintptr_t ra)
{
RISCVException ret;
Int128 old_value;
@@ -4781,7 +5627,7 @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
}
} else if (csr_ops[csrno].write) {
/* avoids having to write wrappers for all registers */
- ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
+ ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@@ -4808,7 +5654,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
if (csr_ops[csrno].read128) {
return riscv_csrrw_do128(env, csrno, ret_value,
- int128_zero(), int128_zero());
+ int128_zero(), int128_zero(), 0);
}
/*
@@ -4819,9 +5665,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
* accesses
*/
target_ulong old_value;
- ret = riscv_csrrw_do64(env, csrno, &old_value,
- (target_ulong)0,
- (target_ulong)0);
+ ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0);
if (ret == RISCV_EXCP_NONE && ret_value) {
*ret_value = int128_make64(old_value);
}
@@ -4829,8 +5673,8 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
}
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
- Int128 *ret_value,
- Int128 new_value, Int128 write_mask)
+ Int128 *ret_value, Int128 new_value,
+ Int128 write_mask, uintptr_t ra)
{
RISCVException ret;
@@ -4840,7 +5684,8 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
}
if (csr_ops[csrno].read128) {
- return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
+ return riscv_csrrw_do128(env, csrno, ret_value,
+ new_value, write_mask, ra);
}
/*
@@ -4853,7 +5698,7 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
target_ulong old_value;
ret = riscv_csrrw_do64(env, csrno, &old_value,
int128_getlo(new_value),
- int128_getlo(write_mask));
+ int128_getlo(write_mask), ra);
if (ret == RISCV_EXCP_NONE && ret_value) {
*ret_value = int128_make64(old_value);
}
@@ -4876,7 +5721,7 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
if (!write_mask) {
ret = riscv_csrr(env, csrno, ret_value);
} else {
- ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
+ ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0);
}
#if !defined(CONFIG_USER_ONLY)
env->debugger = false;
@@ -4892,7 +5737,7 @@ static RISCVException read_jvt(CPURISCVState *env, int csrno,
}
static RISCVException write_jvt(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->jvt = val;
return RISCV_EXCP_NONE;
@@ -4934,6 +5779,9 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
/* Zcmt Extension */
[CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
+ /* zicfiss Extension, shadow stack register */
+ [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp },
+
#if !defined(CONFIG_USER_ONLY)
/* Machine Timers and Counters */
[CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
@@ -4981,8 +5829,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
- [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
- [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
+ [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL,
+ rmw_xiselect },
+ [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL,
+ rmw_xireg },
+
+ /* Machine Indirect Register Alias */
+ [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
/* Machine-Level Interrupts (AIA) */
[CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
@@ -5070,6 +5932,21 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
write_sstateen_1_3,
.min_priv_ver = PRIV_VERSION_1_12_0 },
+ /* RNMI */
+ [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
+ /* Supervisor Counter Delegation */
+ [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred,
+ read_scountinhibit, write_scountinhibit,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
/* Supervisor Trap Setup */
[CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
NULL, read_sstatus_i128 },
@@ -5100,8 +5977,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_SATP] = { "satp", satp, read_satp, write_satp },
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
- [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
- [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
+ [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL,
+ rmw_xiselect },
+ [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL,
+ rmw_xireg },
+
+ /* Supervisor Indirect Register Alias */
+ [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
/* Supervisor-Level Interrupts (AIA) */
[CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
@@ -5164,7 +6055,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
+ [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
.min_priv_ver = PRIV_VERSION_1_12_0 },
@@ -5180,9 +6071,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
/*
* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
*/
- [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
- rmw_xiselect },
- [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
+ [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL,
+ rmw_xiselect },
+ [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL,
+ rmw_xireg },
+
+ /* Virtual Supervisor Indirect Alias */
+ [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
/* VS-Level Interrupts (H-extension with AIA) */
[CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
@@ -5232,24 +6136,11 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
[CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
- /* User Pointer Masking */
- [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
- [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
- write_upmmask },
- [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
- write_upmbase },
- /* Machine Pointer Masking */
- [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
- [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
- write_mpmmask },
- [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
- write_mpmbase },
- /* Supervisor Pointer Masking */
- [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
- [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
- write_spmmask },
- [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
- write_spmbase },
+ [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl },
+ [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
+ [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
+ [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth },
+ [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus },
/* Performance Counters */
[CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
index 0b5099f..5664466 100644
--- a/target/riscv/debug.c
+++ b/target/riscv/debug.c
@@ -28,9 +28,10 @@
#include "qapi/error.h"
#include "cpu.h"
#include "trace.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/watchpoint.h"
+#include "system/cpu-timers.h"
+#include "exec/icount.h"
/*
* The following M-mode trigger CSRs are implemented:
@@ -217,6 +218,66 @@ static inline void warn_always_zero_bit(target_ulong val, target_ulong mask,
}
}
+static target_ulong textra_validate(CPURISCVState *env, target_ulong tdata3)
+{
+ target_ulong mhvalue, mhselect;
+ target_ulong mhselect_new;
+ target_ulong textra;
+ const uint32_t mhselect_no_rvh[8] = { 0, 0, 0, 0, 4, 4, 4, 4 };
+
+ switch (riscv_cpu_mxl(env)) {
+ case MXL_RV32:
+ mhvalue = get_field(tdata3, TEXTRA32_MHVALUE);
+ mhselect = get_field(tdata3, TEXTRA32_MHSELECT);
+ /* Validate unimplemented (always zero) bits */
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA32_SBYTEMASK,
+ "sbytemask");
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA32_SVALUE,
+ "svalue");
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA32_SSELECT,
+ "sselect");
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ mhvalue = get_field(tdata3, TEXTRA64_MHVALUE);
+ mhselect = get_field(tdata3, TEXTRA64_MHSELECT);
+ /* Validate unimplemented (always zero) bits */
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA64_SBYTEMASK,
+ "sbytemask");
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA64_SVALUE,
+ "svalue");
+ warn_always_zero_bit(tdata3, (target_ulong)TEXTRA64_SSELECT,
+ "sselect");
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Validate mhselect. */
+ mhselect_new = mhselect_no_rvh[mhselect];
+ if (mhselect != mhselect_new) {
+ qemu_log_mask(LOG_UNIMP, "mhselect only supports 0 or 4 for now\n");
+ }
+
+ /* Write legal values into textra */
+ textra = 0;
+ switch (riscv_cpu_mxl(env)) {
+ case MXL_RV32:
+ textra = set_field(textra, TEXTRA32_MHVALUE, mhvalue);
+ textra = set_field(textra, TEXTRA32_MHSELECT, mhselect_new);
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ textra = set_field(textra, TEXTRA64_MHVALUE, mhvalue);
+ textra = set_field(textra, TEXTRA64_MHSELECT, mhselect_new);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return textra;
+}
+
static void do_trigger_action(CPURISCVState *env, target_ulong trigger_index)
{
trigger_action_t action = get_trigger_action(env, trigger_index);
@@ -304,11 +365,54 @@ static bool trigger_priv_match(CPURISCVState *env, trigger_type_t type,
return false;
}
+static bool trigger_textra_match(CPURISCVState *env, trigger_type_t type,
+ int trigger_index)
+{
+ target_ulong textra = env->tdata3[trigger_index];
+ target_ulong mhvalue, mhselect;
+
+ if (type < TRIGGER_TYPE_AD_MATCH || type > TRIGGER_TYPE_AD_MATCH6) {
+ /* textra checking is only applicable when type is 2, 3, 4, 5, or 6 */
+ return true;
+ }
+
+ switch (riscv_cpu_mxl(env)) {
+ case MXL_RV32:
+ mhvalue = get_field(textra, TEXTRA32_MHVALUE);
+ mhselect = get_field(textra, TEXTRA32_MHSELECT);
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ mhvalue = get_field(textra, TEXTRA64_MHVALUE);
+ mhselect = get_field(textra, TEXTRA64_MHSELECT);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Check mhvalue and mhselect. */
+ switch (mhselect) {
+ case MHSELECT_IGNORE:
+ break;
+ case MHSELECT_MCONTEXT:
+ /* Match if the low bits of mcontext/hcontext equal mhvalue. */
+ if (mhvalue != env->mcontext) {
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
/* Common matching conditions for all types of the triggers. */
static bool trigger_common_match(CPURISCVState *env, trigger_type_t type,
int trigger_index)
{
- return trigger_priv_match(env, type, trigger_index);
+ return trigger_priv_match(env, type, trigger_index) &&
+ trigger_textra_match(env, type, trigger_index);
}
/* type 2 trigger */
@@ -375,7 +479,7 @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
bool enabled = type2_breakpoint_enabled(ctrl);
CPUState *cs = env_cpu(env);
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
- uint32_t size;
+ uint32_t size, def_size;
if (!enabled) {
return;
@@ -398,7 +502,9 @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
cpu_watchpoint_insert(cs, addr, size, flags,
&env->cpu_watchpoint[index]);
} else {
- cpu_watchpoint_insert(cs, addr, 8, flags,
+ def_size = riscv_cpu_mxl(env) == MXL_RV64 ? 8 : 4;
+
+ cpu_watchpoint_insert(cs, addr, def_size, flags,
&env->cpu_watchpoint[index]);
}
}
@@ -441,14 +547,11 @@ static void type2_reg_write(CPURISCVState *env, target_ulong index,
}
break;
case TDATA3:
- qemu_log_mask(LOG_UNIMP,
- "tdata3 is not supported for type 2 trigger\n");
+ env->tdata3[index] = textra_validate(env, val);
break;
default:
g_assert_not_reached();
}
-
- return;
}
/* type 6 trigger */
@@ -558,14 +661,11 @@ static void type6_reg_write(CPURISCVState *env, target_ulong index,
}
break;
case TDATA3:
- qemu_log_mask(LOG_UNIMP,
- "tdata3 is not supported for type 6 trigger\n");
+ env->tdata3[index] = textra_validate(env, val);
break;
default:
g_assert_not_reached();
}
-
- return;
}
/* icount trigger type */
@@ -741,14 +841,11 @@ static void itrigger_reg_write(CPURISCVState *env, target_ulong index,
"tdata2 is not supported for icount trigger\n");
break;
case TDATA3:
- qemu_log_mask(LOG_UNIMP,
- "tdata3 is not supported for icount trigger\n");
+ env->tdata3[index] = textra_validate(env, val);
break;
default:
g_assert_not_reached();
}
-
- return;
}
static int itrigger_get_adjust_count(CPURISCVState *env)
diff --git a/target/riscv/debug.h b/target/riscv/debug.h
index c347863..f76b8f9 100644
--- a/target/riscv/debug.h
+++ b/target/riscv/debug.h
@@ -131,6 +131,9 @@ enum {
#define ITRIGGER_VU BIT(25)
#define ITRIGGER_VS BIT(26)
+#define MHSELECT_IGNORE 0
+#define MHSELECT_MCONTEXT 4
+
bool tdata_available(CPURISCVState *env, int tdata_index);
target_ulong tselect_csr_read(CPURISCVState *env);
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
index 91b1a56..706bdfa 100644
--- a/target/riscv/fpu_helper.c
+++ b/target/riscv/fpu_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "internals.h"
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
index c07df97..1934f91 100644
--- a/target/riscv/gdbstub.c
+++ b/target/riscv/gdbstub.c
@@ -62,7 +62,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
return 0;
}
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
case MXL_RV32:
return gdb_get_reg32(mem_buf, tmp);
case MXL_RV64:
@@ -82,7 +82,7 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
int length = 0;
target_ulong tmp;
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
case MXL_RV32:
tmp = (int32_t)ldl_p(mem_buf);
length = 4;
@@ -213,7 +213,10 @@ static int riscv_gdb_get_virtual(CPUState *cs, GByteArray *buf, int n)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- return gdb_get_regl(buf, env->priv);
+ /* Per RiscV debug spec v1.0.0 rc4 */
+ target_ulong vbit = (env->virt_enabled) ? BIT(2) : 0;
+
+ return gdb_get_regl(buf, env->priv | vbit);
#endif
}
return 0;
@@ -226,10 +229,22 @@ static int riscv_gdb_set_virtual(CPUState *cs, uint8_t *mem_buf, int n)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- env->priv = ldtul_p(mem_buf) & 0x3;
- if (env->priv == PRV_RESERVED) {
- env->priv = PRV_S;
+ target_ulong new_priv = ldtul_p(mem_buf) & 0x3;
+ bool new_virt = 0;
+
+ if (new_priv == PRV_RESERVED) {
+ new_priv = PRV_S;
+ }
+
+ if (new_priv != PRV_M) {
+ new_virt = (ldtul_p(mem_buf) & BIT(2)) >> 2;
}
+
+ if (riscv_has_ext(env, RVH) && new_virt != env->virt_enabled) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_mode(env, new_priv, new_virt);
#endif
return sizeof(target_ulong);
}
@@ -344,7 +359,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
ricsv_gen_dynamic_vector_feature(cs, cs->gdb_num_regs),
0);
}
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
case MXL_RV32:
gdb_register_coprocessor(cs, riscv_gdb_get_virtual,
riscv_gdb_set_virtual,
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 451261c..85d73e4 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -131,10 +131,13 @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_1(sret, tl, env)
DEF_HELPER_1(mret, tl, env)
+DEF_HELPER_1(mnret, tl, env)
+DEF_HELPER_1(ctr_clear, void, env)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_1(wrs_nto, void, env)
DEF_HELPER_1(tlb_flush, void, env)
DEF_HELPER_1(tlb_flush_all, void, env)
+DEF_HELPER_4(ctr_add_entry, void, env, tl, tl, tl)
/* Native Debug */
DEF_HELPER_1(itrigger_match, void, env)
#endif
diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode
index 3953bcf..bf893d1 100644
--- a/target/riscv/insn16.decode
+++ b/target/riscv/insn16.decode
@@ -140,6 +140,10 @@ sw 110 ... ... .. ... 00 @cs_w
addi 000 . ..... ..... 01 @ci
addi 010 . ..... ..... 01 @c_li
{
+ # c.sspush x1 carving out of zcmops
+ sspush 011 0 00001 00000 01 &r2_s rs2=1 rs1=0
+ # c.sspopchk x5 carving out of zcmops
+ sspopchk 011 0 00101 00000 01 &r2 rs1=5 rd=0
c_mop_n 011 0 0 n:3 1 00000 01
illegal 011 0 ----- 00000 01 # c.addi16sp and c.lui, RES nzimm=0
addi 011 . 00010 ..... 01 @c_addi16sp
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index c45b8fa..cd23b1f 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -114,16 +114,22 @@
# *** Privileged Instructions ***
ecall 000000000000 00000 000 00000 1110011
ebreak 000000000001 00000 000 00000 1110011
+sctrclr 000100000100 00000 000 00000 1110011
uret 0000000 00010 00000 000 00000 1110011
sret 0001000 00010 00000 000 00000 1110011
mret 0011000 00010 00000 000 00000 1110011
wfi 0001000 00101 00000 000 00000 1110011
sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
-sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
+
+# *** NMI ***
+mnret 0111000 00010 00000 000 00000 1110011
# *** RV32I Base Instruction Set ***
lui .................... ..... 0110111 @u
-auipc .................... ..... 0010111 @u
+{
+ lpad label:20 00000 0010111
+ auipc .................... ..... 0010111 @u
+}
jal .................... ..... 1101111 @j
jalr ............ ..... 000 ..... 1100111 @i
beq ....... ..... ..... 000 ..... 1100011 @b
@@ -243,6 +249,7 @@ remud 0000001 ..... ..... 111 ..... 1111011 @r
lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld
sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st
amoswap_w 00001 . . ..... ..... 010 ..... 0101111 @atom_st
+ssamoswap_w 01001 . . ..... ..... 010 ..... 0101111 @atom_st
amoadd_w 00000 . . ..... ..... 010 ..... 0101111 @atom_st
amoxor_w 00100 . . ..... ..... 010 ..... 0101111 @atom_st
amoand_w 01100 . . ..... ..... 010 ..... 0101111 @atom_st
@@ -256,6 +263,7 @@ amomaxu_w 11100 . . ..... ..... 010 ..... 0101111 @atom_st
lr_d 00010 . . 00000 ..... 011 ..... 0101111 @atom_ld
sc_d 00011 . . ..... ..... 011 ..... 0101111 @atom_st
amoswap_d 00001 . . ..... ..... 011 ..... 0101111 @atom_st
+ssamoswap_d 01001 . . ..... ..... 011 ..... 0101111 @atom_st
amoadd_d 00000 . . ..... ..... 011 ..... 0101111 @atom_st
amoxor_d 00100 . . ..... ..... 011 ..... 0101111 @atom_st
amoand_d 01100 . . ..... ..... 011 ..... 0101111 @atom_st
@@ -695,14 +703,14 @@ vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
# Vector widening ordered and unordered float reduction sum
vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm
vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm
-vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
-vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
-vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r
-vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r
-vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r
-vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r
-vmorn_mm 011100 - ..... ..... 010 ..... 1010111 @r
-vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r
+vmand_mm 011001 1 ..... ..... 010 ..... 1010111 @r
+vmnand_mm 011101 1 ..... ..... 010 ..... 1010111 @r
+vmandn_mm 011000 1 ..... ..... 010 ..... 1010111 @r
+vmxor_mm 011011 1 ..... ..... 010 ..... 1010111 @r
+vmor_mm 011010 1 ..... ..... 010 ..... 1010111 @r
+vmnor_mm 011110 1 ..... ..... 010 ..... 1010111 @r
+vmorn_mm 011100 1 ..... ..... 010 ..... 1010111 @r
+vmxnor_mm 011111 1 ..... ..... 010 ..... 1010111 @r
vcpop_m 010000 . ..... 10000 010 ..... 1010111 @r2_vm
vfirst_m 010000 . ..... 10001 010 ..... 1010111 @r2_vm
vmsbf_m 010100 . ..... 00001 010 ..... 1010111 @r2_vm
@@ -724,7 +732,7 @@ vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm
vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm
vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm
vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm
-vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r
+vcompress_vm 010111 1 ..... ..... 010 ..... 1010111 @r
vmv1r_v 100111 1 ..... 00000 011 ..... 1010111 @r2rd
vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd
vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd
@@ -1019,8 +1027,23 @@ amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st
amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st
# *** Zimop may-be-operation extension ***
-mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5
-mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3
+{
+ # zicfiss instructions carved out of mop.r
+ [
+ ssrdp 1100110 11100 00000 100 rd:5 1110011
+ sspopchk 1100110 11100 00001 100 00000 1110011 &r2 rs1=1 rd=0
+ sspopchk 1100110 11100 00101 100 00000 1110011 &r2 rs1=5 rd=0
+ ]
+ mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5
+}
+{
+ # zicfiss instruction carved out of mop.rr
+ [
+ sspush 1100111 00001 00000 100 00000 1110011 &r2_s rs2=1 rs1=0
+ sspush 1100111 00101 00000 100 00000 1110011 &r2_s rs2=5 rs1=0
+ ]
+ mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3
+}
# *** Zabhb Standard Extension ***
amoswap_b 00001 . . ..... ..... 000 ..... 0101111 @atom_st
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
index bc5263a..8a62b4c 100644
--- a/target/riscv/insn_trans/trans_privileged.c.inc
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
@@ -18,6 +18,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define REQUIRE_SMRNMI(ctx) do { \
+ if (!ctx->cfg_ptr->ext_smrnmi) { \
+ return false; \
+ } \
+} while (0)
+
static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
{
/* always generates U-level ECALL, fixed in do_interrupt handler */
@@ -69,6 +75,17 @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
return true;
}
+static bool trans_sctrclr(DisasContext *ctx, arg_sctrclr *a)
+{
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ gen_helper_ctr_clear(tcg_env);
+ return true;
+ }
+#endif
+ return false;
+}
+
static bool trans_uret(DisasContext *ctx, arg_uret *a)
{
return false;
@@ -78,8 +95,9 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
{
#ifndef CONFIG_USER_ONLY
if (has_ext(ctx, RVS)) {
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
translator_io_start(&ctx->base);
+ gen_update_pc(ctx, 0);
gen_helper_sret(cpu_pc, tcg_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
@@ -95,8 +113,9 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
static bool trans_mret(DisasContext *ctx, arg_mret *a)
{
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
translator_io_start(&ctx->base);
+ gen_update_pc(ctx, 0);
gen_helper_mret(cpu_pc, tcg_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
@@ -106,10 +125,24 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
#endif
}
+static bool trans_mnret(DisasContext *ctx, arg_mnret *a)
+{
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_SMRNMI(ctx);
+ decode_save_opc(ctx, 0);
+ gen_helper_mnret(cpu_pc, tcg_env);
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+#else
+ return false;
+#endif
+}
+
static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
{
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_update_pc(ctx, ctx->cur_insn_len);
gen_helper_wfi(tcg_env);
return true;
@@ -121,14 +154,9 @@ static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a)
{
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_tlb_flush(tcg_env);
return true;
#endif
return false;
}
-
-static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a)
-{
- return false;
-}
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
index 39bbf60..9cf3ae8 100644
--- a/target/riscv/insn_trans/trans_rva.c.inc
+++ b/target/riscv/insn_trans/trans_rva.c.inc
@@ -34,7 +34,7 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv src1;
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
src1 = get_address(ctx, a->rs1, 0);
if (a->rl) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
@@ -61,7 +61,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
src1 = get_address(ctx, a->rs1, 0);
tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc
index 0a9cd1e..066dc36 100644
--- a/target/riscv/insn_trans/trans_rvbf16.c.inc
+++ b/target/riscv/insn_trans/trans_rvbf16.c.inc
@@ -119,8 +119,11 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a)
REQUIRE_FPU;
REQUIRE_ZVFBFWMA(ctx);
+ uint8_t sew = ctx->sew;
if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) &&
- vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) {
+ vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm) &&
+ vext_check_input_eew(ctx, a->rd, sew + 1, a->rs1, sew, a->vm) &&
+ vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) {
uint32_t data = 0;
gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
@@ -146,8 +149,10 @@ static bool trans_vfwmaccbf16_vf(DisasContext *ctx, arg_vfwmaccbf16_vf *a)
REQUIRE_FPU;
REQUIRE_ZVFBFWMA(ctx);
+ uint8_t sew = ctx->sew;
if (require_rvv(ctx) && (ctx->sew == MO_16) && vext_check_isa_ill(ctx) &&
- vext_check_ds(ctx, a->rd, a->rs2, a->vm)) {
+ vext_check_ds(ctx, a->rd, a->rs2, a->vm) &&
+ vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) {
uint32_t data = 0;
gen_set_rm(ctx, RISCV_FRM_DYN);
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
index 1f5fac6..30883ea 100644
--- a/target/riscv/insn_trans/trans_rvd.c.inc
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
@@ -47,11 +47,21 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a)
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ /*
+ * FLD and FSD are only guaranteed to execute atomically if the effective
+ * address is naturally aligned and XLEN≥64. Also, zama16b applies to
+ * loads and stores of no more than MXLEN bits defined in the F, D, and
+ * Q extensions.
+ */
+ if (get_xl_max(ctx) == MXL_RV32) {
+ memop |= MO_ATOM_NONE;
+ } else if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
+ } else {
+ memop |= MO_ATOM_IFALIGN;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop);
@@ -67,11 +77,15 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVD);
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (get_xl_max(ctx) == MXL_RV32) {
+ memop |= MO_ATOM_NONE;
+ } else if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
+ } else {
+ memop |= MO_ATOM_IFALIGN;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
return true;
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
index f771aa1..ed73afe 100644
--- a/target/riscv/insn_trans/trans_rvf.c.inc
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
@@ -48,11 +48,11 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
dest = cpu_fpr[a->rd];
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop);
@@ -70,11 +70,11 @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop);
return true;
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
index aa9d41c..03c6694 100644
--- a/target/riscv/insn_trans/trans_rvh.c.inc
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
@@ -44,7 +44,7 @@ static bool do_hlv(DisasContext *ctx, arg_r2 *a,
TCGv dest = dest_gpr(ctx, a->rd);
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
func(dest, tcg_env, addr);
gen_set_gpr(ctx, a->rd, dest);
return true;
@@ -56,7 +56,7 @@ static bool do_hsv(DisasContext *ctx, arg_r2_s *a,
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
func(tcg_env, addr, data);
return true;
}
@@ -147,7 +147,7 @@ static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a)
{
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_hyp_gvma_tlb_flush(tcg_env);
return true;
#endif
@@ -158,7 +158,7 @@ static bool trans_hfence_vvma(DisasContext *ctx, arg_sfence_vma *a)
{
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_hyp_tlb_flush(tcg_env);
return true;
#endif
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
index 98e3806..b9c7160 100644
--- a/target/riscv/insn_trans/trans_rvi.c.inc
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
@@ -36,6 +36,49 @@ static bool trans_lui(DisasContext *ctx, arg_lui *a)
return true;
}
+static bool trans_lpad(DisasContext *ctx, arg_lpad *a)
+{
+ /*
+ * fcfi_lp_expected can set only if fcfi was eanbled.
+ * translate further only if fcfi_lp_expected set.
+ * lpad comes from NOP space anyways, so return true if
+ * fcfi_lp_expected is false.
+ */
+ if (!ctx->fcfi_lp_expected) {
+ return true;
+ }
+
+ ctx->fcfi_lp_expected = false;
+ if ((ctx->base.pc_next) & 0x3) {
+ /*
+ * misaligned, according to spec we should raise sw check exception
+ */
+ tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
+ tcg_env, offsetof(CPURISCVState, sw_check_code));
+ gen_helper_raise_exception(tcg_env,
+ tcg_constant_i32(RISCV_EXCP_SW_CHECK));
+ return true;
+ }
+
+ /* per spec, label check performed only when embedded label non-zero */
+ if (a->label != 0) {
+ TCGLabel *skip = gen_new_label();
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_extract_tl(tmp, get_gpr(ctx, xT2, EXT_NONE), 12, 20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, a->label, skip);
+ tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
+ tcg_env, offsetof(CPURISCVState, sw_check_code));
+ gen_helper_raise_exception(tcg_env,
+ tcg_constant_i32(RISCV_EXCP_SW_CHECK));
+ gen_set_label(skip);
+ }
+
+ tcg_gen_st8_tl(tcg_constant_tl(0), tcg_env,
+ offsetof(CPURISCVState, elp));
+
+ return true;
+}
+
static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
{
TCGv target_pc = dest_gpr(ctx, a->rd);
@@ -50,6 +93,51 @@ static bool trans_jal(DisasContext *ctx, arg_jal *a)
return true;
}
+#ifndef CONFIG_USER_ONLY
+/*
+ * Indirect calls
+ * - jalr x1, rs where rs != x5;
+ * - jalr x5, rs where rs != x1;
+ * - c.jalr rs1 where rs1 != x5;
+ *
+ * Indirect jumps
+ * - jalr x0, rs where rs != x1 and rs != x5;
+ * - c.jr rs1 where rs1 != x1 and rs1 != x5.
+ *
+ * Returns
+ * - jalr rd, rs where (rs == x1 or rs == x5) and rd != x1 and rd != x5;
+ * - c.jr rs1 where rs1 == x1 or rs1 == x5.
+ *
+ * Co-routine swap
+ * - jalr x1, x5;
+ * - jalr x5, x1;
+ * - c.jalr x5.
+ *
+ * Other indirect jumps
+ * - jalr rd, rs where rs != x1, rs != x5, rd != x0, rd != x1 and rd != x5.
+ */
+static void gen_ctr_jalr(DisasContext *ctx, arg_jalr *a, TCGv dest)
+{
+ TCGv src = tcg_temp_new();
+ TCGv type;
+
+ if ((a->rd == 1 && a->rs1 != 5) || (a->rd == 5 && a->rs1 != 1)) {
+ type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_CALL);
+ } else if (a->rd == 0 && a->rs1 != 1 && a->rs1 != 5) {
+ type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_JUMP);
+ } else if ((a->rs1 == 1 || a->rs1 == 5) && (a->rd != 1 && a->rd != 5)) {
+ type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
+ } else if ((a->rs1 == 1 && a->rd == 5) || (a->rs1 == 5 && a->rd == 1)) {
+ type = tcg_constant_tl(CTRDATA_TYPE_CO_ROUTINE_SWAP);
+ } else {
+ type = tcg_constant_tl(CTRDATA_TYPE_OTHER_INDIRECT_JUMP);
+ }
+
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
+}
+#endif
+
static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
{
TCGLabel *misaligned = NULL;
@@ -63,7 +151,9 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
tcg_gen_ext32s_tl(target_pc, target_pc);
}
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
+ ctx->priv_ver,
+ ctx->misa_ext)) {
TCGv t0 = tcg_temp_new();
misaligned = gen_new_label();
@@ -74,7 +164,25 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
gen_set_gpr(ctx, a->rd, succ_pc);
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ gen_ctr_jalr(ctx, a, target_pc);
+ }
+#endif
+
tcg_gen_mov_tl(cpu_pc, target_pc);
+ if (ctx->fcfi_enabled) {
+ /*
+ * return from functions (i.e. rs1 == xRA || rs1 == xT0) are not
+ * tracked. zicfilp introduces sw guarded branch as well. sw guarded
+ * branch are not tracked. rs1 == xT2 is a sw guarded branch.
+ */
+ if (a->rs1 != xRA && a->rs1 != xT0 && a->rs1 != xT2) {
+ tcg_gen_st8_tl(tcg_constant_tl(1),
+ tcg_env, offsetof(CPURISCVState, elp));
+ }
+ }
+
lookup_and_goto_ptr(ctx);
if (misaligned) {
@@ -176,18 +284,44 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
} else {
tcg_gen_brcond_tl(cond, src1, src2, l);
}
+
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_NONTAKEN_BRANCH);
+ TCGv dest = tcg_temp_new();
+ TCGv src = tcg_temp_new();
+
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_pc_plus_diff(dest, ctx, ctx->cur_insn_len);
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
+ }
+#endif
+
gen_goto_tb(ctx, 1, ctx->cur_insn_len);
ctx->pc_save = orig_pc_save;
gen_set_label(l); /* branch taken */
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
+ ctx->priv_ver,
+ ctx->misa_ext) &&
(a->imm & 0x3)) {
/* misaligned */
TCGv target_pc = tcg_temp_new();
gen_pc_plus_diff(target_pc, ctx, a->imm);
gen_exception_inst_addr_mis(ctx, target_pc);
} else {
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_TAKEN_BRANCH);
+ TCGv dest = tcg_temp_new();
+ TCGv src = tcg_temp_new();
+
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_pc_plus_diff(dest, ctx, a->imm);
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
+ }
+#endif
gen_goto_tb(ctx, 0, a->imm);
}
ctx->pc_save = -1;
@@ -268,10 +402,10 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
{
bool out;
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
if (get_xl(ctx) == MXL_RV128) {
out = gen_load_i128(ctx, a, memop);
} else {
@@ -369,10 +503,10 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
{
- if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) {
+ if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
if (get_xl(ctx) == MXL_RV128) {
return gen_store_i128(ctx, a, memop);
} else {
@@ -834,7 +968,7 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
static bool do_csr_post(DisasContext *ctx)
{
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
/* We may have changed important cpu state -- exit to main loop. */
gen_update_pc(ctx, ctx->cur_insn_len);
exit_tb(ctx);
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index 3a3896b..2b6077a 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -100,10 +100,33 @@ static bool require_scale_rvfmin(DisasContext *s)
}
}
-/* Destination vector register group cannot overlap source mask register. */
-static bool require_vm(int vm, int vd)
+/*
+ * Source and destination vector register groups cannot overlap source mask
+ * register:
+ *
+ * A vector register cannot be used to provide source operands with more than
+ * one EEW for a single instruction. A mask register source is considered to
+ * have EEW=1 for this constraint. An encoding that would result in the same
+ * vector register being read with two or more different EEWs, including when
+ * the vector register appears at different positions within two or more vector
+ * register groups, is reserved.
+ * (Section 5.2)
+ *
+ * A destination vector register group can overlap a source vector
+ * register group only if one of the following holds:
+ * 1. The destination EEW equals the source EEW.
+ * 2. The destination EEW is smaller than the source EEW and the overlap
+ * is in the lowest-numbered part of the source register group.
+ * 3. The destination EEW is greater than the source EEW, the source EMUL
+ * is at least 1, and the overlap is in the highest-numbered part of
+ * the destination register group.
+ * For the purpose of determining register group overlap constraints, mask
+ * elements have EEW=1.
+ * (Section 5.2)
+ */
+static bool require_vm(int vm, int v)
{
- return (vm != 0 || vd != 0);
+ return (vm != 0 || v != 0);
}
static bool require_nf(int vd, int nf, int lmul)
@@ -356,11 +379,41 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
return ret;
}
+/*
+ * Check whether a vector register is used to provide source operands with
+ * more than one EEW for the vector instruction.
+ * Returns true if the instruction has valid encoding
+ * Returns false if encoding violates the mismatched input EEWs constraint
+ */
+static bool vext_check_input_eew(DisasContext *s, int vs1, uint8_t eew_vs1,
+ int vs2, uint8_t eew_vs2, int vm)
+{
+ bool is_valid = true;
+ int8_t emul_vs1 = eew_vs1 - s->sew + s->lmul;
+ int8_t emul_vs2 = eew_vs2 - s->sew + s->lmul;
+
+ /* When vm is 0, vs1 & vs2(EEW!=1) group can't overlap v0 (EEW=1) */
+ if ((vs1 != -1 && !require_vm(vm, vs1)) ||
+ (vs2 != -1 && !require_vm(vm, vs2))) {
+ is_valid = false;
+ }
+
+ /* When eew_vs1 != eew_vs2, check whether vs1 and vs2 are overlapped */
+ if ((vs1 != -1 && vs2 != -1) && (eew_vs1 != eew_vs2) &&
+ is_overlapped(vs1, 1 << MAX(emul_vs1, 0),
+ vs2, 1 << MAX(emul_vs2, 0))) {
+ is_valid = false;
+ }
+
+ return is_valid;
+}
+
static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
{
return require_vm(vm, vd) &&
require_align(vd, s->lmul) &&
- require_align(vs, s->lmul);
+ require_align(vs, s->lmul) &&
+ vext_check_input_eew(s, vs, s->sew, -1, s->sew, vm);
}
/*
@@ -379,6 +432,7 @@ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ss(s, vd, vs2, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) &&
require_align(vs1, s->lmul);
}
@@ -474,6 +528,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) &&
+ vext_check_input_eew(s, vs, s->sew, -1, 0, vm) &&
require_align(vs, s->lmul) &&
require_noover(vd, s->lmul + 1, vs, s->lmul);
}
@@ -481,6 +536,7 @@ static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) &&
+ vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm) &&
require_align(vs, s->lmul + 1);
}
@@ -499,6 +555,7 @@ static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs2, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) &&
require_align(vs1, s->lmul) &&
require_noover(vd, s->lmul + 1, vs1, s->lmul);
}
@@ -521,12 +578,14 @@ static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs1, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) &&
require_align(vs2, s->lmul + 1);
}
static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
{
- bool ret = vext_narrow_check_common(s, vd, vs, vm);
+ bool ret = vext_narrow_check_common(s, vd, vs, vm) &&
+ vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm);
if (vd != vs) {
ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
}
@@ -549,6 +608,7 @@ static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_sd(s, vd, vs2, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) &&
require_align(vs1, s->lmul);
}
@@ -584,7 +644,9 @@ static bool vext_check_slide(DisasContext *s, int vd, int vs2,
{
bool ret = require_align(vs2, s->lmul) &&
require_align(vd, s->lmul) &&
- require_vm(vm, vd);
+ require_vm(vm, vd) &&
+ vext_check_input_eew(s, -1, 0, vs2, s->sew, vm);
+
if (is_over) {
ret &= (vd != vs2);
}
@@ -770,6 +832,7 @@ static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
/* Mask destination register are always tail-agnostic */
data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
+ data = FIELD_DP32(data, VDATA, VM, 1);
return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
}
@@ -787,6 +850,7 @@ static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew)
/* EMUL = 1, NFIELDS = 1 */
data = FIELD_DP32(data, VDATA, LMUL, 0);
data = FIELD_DP32(data, VDATA, NF, 1);
+ data = FIELD_DP32(data, VDATA, VM, 1);
return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
}
@@ -800,32 +864,286 @@ GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
/*
- *** stride load and store
+ * MAXSZ returns the maximum vector size can be operated in bytes,
+ * which is used in GVEC IR when vl_eq_vlmax flag is set to true
+ * to accelerate vector operation.
+ */
+static inline uint32_t MAXSZ(DisasContext *s)
+{
+ int max_sz = s->cfg_ptr->vlenb << 3;
+ return max_sz >> (3 - s->lmul);
+}
+
+static inline uint32_t get_log2(uint32_t a)
+{
+ uint32_t i = 0;
+ for (; a > 0;) {
+ a >>= 1;
+ i++;
+ }
+ return i;
+}
+
+typedef void gen_tl_ldst(TCGv, TCGv_ptr, tcg_target_long);
+
+/*
+ * Simulate the strided load/store main loop:
+ *
+ * for (i = env->vstart; i < env->vl; env->vstart = ++i) {
+ * k = 0;
+ * while (k < nf) {
+ * if (!vm && !vext_elem_mask(v0, i)) {
+ * vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ * (i + k * max_elems + 1) * esz);
+ * k++;
+ * continue;
+ * }
+ * target_ulong addr = base + stride * i + (k << log2_esz);
+ * ldst(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
+ * k++;
+ * }
+ * }
+ */
+static void gen_ldst_stride_main_loop(DisasContext *s, TCGv dest, uint32_t rs1,
+ uint32_t rs2, uint32_t vm, uint32_t nf,
+ gen_tl_ldst *ld_fn, gen_tl_ldst *st_fn,
+ bool is_load)
+{
+ TCGv addr = tcg_temp_new();
+ TCGv base = get_gpr(s, rs1, EXT_NONE);
+ TCGv stride = get_gpr(s, rs2, EXT_NONE);
+
+ TCGv i = tcg_temp_new();
+ TCGv i_esz = tcg_temp_new();
+ TCGv k = tcg_temp_new();
+ TCGv k_esz = tcg_temp_new();
+ TCGv k_max = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+ TCGv mask_offs = tcg_temp_new();
+ TCGv mask_offs_64 = tcg_temp_new();
+ TCGv mask_elem = tcg_temp_new();
+ TCGv mask_offs_rem = tcg_temp_new();
+ TCGv vreg = tcg_temp_new();
+ TCGv dest_offs = tcg_temp_new();
+ TCGv stride_offs = tcg_temp_new();
+
+ uint32_t max_elems = MAXSZ(s) >> s->sew;
+
+ TCGLabel *start = gen_new_label();
+ TCGLabel *end = gen_new_label();
+ TCGLabel *start_k = gen_new_label();
+ TCGLabel *inc_k = gen_new_label();
+ TCGLabel *end_k = gen_new_label();
+
+ MemOp atomicity = MO_ATOM_NONE;
+ if (s->sew == 0) {
+ atomicity = MO_ATOM_NONE;
+ } else {
+ atomicity = MO_ATOM_IFALIGN_PAIR;
+ }
+
+ mark_vs_dirty(s);
+
+ tcg_gen_addi_tl(mask, (TCGv)tcg_env, vreg_ofs(s, 0));
+
+ /* Start of outer loop. */
+ tcg_gen_mov_tl(i, cpu_vstart);
+ gen_set_label(start);
+ tcg_gen_brcond_tl(TCG_COND_GE, i, cpu_vl, end);
+ tcg_gen_shli_tl(i_esz, i, s->sew);
+ /* Start of inner loop. */
+ tcg_gen_movi_tl(k, 0);
+ gen_set_label(start_k);
+ tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end_k);
+ /*
+ * If we are in mask agnostic regime and the operation is not unmasked we
+ * set the inactive elements to 1.
+ */
+ if (!vm && s->vma) {
+ TCGLabel *active_element = gen_new_label();
+ /* (i + k * max_elems) * esz */
+ tcg_gen_shli_tl(mask_offs, k, get_log2(max_elems << s->sew));
+ tcg_gen_add_tl(mask_offs, mask_offs, i_esz);
+
+ /*
+ * Check whether the i bit of the mask is 0 or 1.
+ *
+ * static inline int vext_elem_mask(void *v0, int index)
+ * {
+ * int idx = index / 64;
+ * int pos = index % 64;
+ * return (((uint64_t *)v0)[idx] >> pos) & 1;
+ * }
+ */
+ tcg_gen_shri_tl(mask_offs_64, mask_offs, 3);
+ tcg_gen_add_tl(mask_offs_64, mask_offs_64, mask);
+ tcg_gen_ld_i64((TCGv_i64)mask_elem, (TCGv_ptr)mask_offs_64, 0);
+ tcg_gen_rem_tl(mask_offs_rem, mask_offs, tcg_constant_tl(8));
+ tcg_gen_shr_tl(mask_elem, mask_elem, mask_offs_rem);
+ tcg_gen_andi_tl(mask_elem, mask_elem, 1);
+ tcg_gen_brcond_tl(TCG_COND_NE, mask_elem, tcg_constant_tl(0),
+ active_element);
+ /*
+ * Set masked-off elements in the destination vector register to 1s.
+ * Store instructions simply skip this bit as memory ops access memory
+ * only for active elements.
+ */
+ if (is_load) {
+ tcg_gen_shli_tl(mask_offs, mask_offs, s->sew);
+ tcg_gen_add_tl(mask_offs, mask_offs, dest);
+ st_fn(tcg_constant_tl(-1), (TCGv_ptr)mask_offs, 0);
+ }
+ tcg_gen_br(inc_k);
+ gen_set_label(active_element);
+ }
+ /*
+ * The element is active, calculate the address with stride:
+ * target_ulong addr = base + stride * i + (k << log2_esz);
+ */
+ tcg_gen_mul_tl(stride_offs, stride, i);
+ tcg_gen_shli_tl(k_esz, k, s->sew);
+ tcg_gen_add_tl(stride_offs, stride_offs, k_esz);
+ tcg_gen_add_tl(addr, base, stride_offs);
+ /* Calculate the offset in the dst/src vector register. */
+ tcg_gen_shli_tl(k_max, k, get_log2(max_elems));
+ tcg_gen_add_tl(dest_offs, i, k_max);
+ tcg_gen_shli_tl(dest_offs, dest_offs, s->sew);
+ tcg_gen_add_tl(dest_offs, dest_offs, dest);
+ if (is_load) {
+ tcg_gen_qemu_ld_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
+ st_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
+ } else {
+ ld_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
+ tcg_gen_qemu_st_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
+ }
+ /*
+ * We don't execute the load/store above if the element was inactive.
+ * We jump instead directly to incrementing k and continuing the loop.
+ */
+ if (!vm && s->vma) {
+ gen_set_label(inc_k);
+ }
+ tcg_gen_addi_tl(k, k, 1);
+ tcg_gen_br(start_k);
+ /* End of the inner loop. */
+ gen_set_label(end_k);
+
+ tcg_gen_addi_tl(i, i, 1);
+ tcg_gen_mov_tl(cpu_vstart, i);
+ tcg_gen_br(start);
+
+ /* End of the outer loop. */
+ gen_set_label(end);
+
+ return;
+}
+
+
+/*
+ * Set the tail bytes of the strided loads/stores to 1:
+ *
+ * for (k = 0; k < nf; ++k) {
+ * cnt = (k * max_elems + vl) * esz;
+ * tot = (k * max_elems + max_elems) * esz;
+ * for (i = cnt; i < tot; i += esz) {
+ * store_1s(-1, vd[vl+i]);
+ * }
+ * }
*/
-typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
- TCGv, TCGv_env, TCGv_i32);
+static void gen_ldst_stride_tail_loop(DisasContext *s, TCGv dest, uint32_t nf,
+ gen_tl_ldst *st_fn)
+{
+ TCGv i = tcg_temp_new();
+ TCGv k = tcg_temp_new();
+ TCGv tail_cnt = tcg_temp_new();
+ TCGv tail_tot = tcg_temp_new();
+ TCGv tail_addr = tcg_temp_new();
+
+ TCGLabel *start = gen_new_label();
+ TCGLabel *end = gen_new_label();
+ TCGLabel *start_i = gen_new_label();
+ TCGLabel *end_i = gen_new_label();
+
+ uint32_t max_elems_b = MAXSZ(s);
+ uint32_t esz = 1 << s->sew;
+
+ /* Start of the outer loop. */
+ tcg_gen_movi_tl(k, 0);
+ tcg_gen_shli_tl(tail_cnt, cpu_vl, s->sew);
+ tcg_gen_movi_tl(tail_tot, max_elems_b);
+ tcg_gen_add_tl(tail_addr, dest, tail_cnt);
+ gen_set_label(start);
+ tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end);
+ /* Start of the inner loop. */
+ tcg_gen_mov_tl(i, tail_cnt);
+ gen_set_label(start_i);
+ tcg_gen_brcond_tl(TCG_COND_GE, i, tail_tot, end_i);
+ /* store_1s(-1, vd[vl+i]); */
+ st_fn(tcg_constant_tl(-1), (TCGv_ptr)tail_addr, 0);
+ tcg_gen_addi_tl(tail_addr, tail_addr, esz);
+ tcg_gen_addi_tl(i, i, esz);
+ tcg_gen_br(start_i);
+ /* End of the inner loop. */
+ gen_set_label(end_i);
+ /* Update the counts */
+ tcg_gen_addi_tl(tail_cnt, tail_cnt, max_elems_b);
+ tcg_gen_addi_tl(tail_tot, tail_cnt, max_elems_b);
+ tcg_gen_addi_tl(k, k, 1);
+ tcg_gen_br(start);
+ /* End of the outer loop. */
+ gen_set_label(end);
+
+ return;
+}
static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
- uint32_t data, gen_helper_ldst_stride *fn,
- DisasContext *s)
+ uint32_t data, DisasContext *s, bool is_load)
{
- TCGv_ptr dest, mask;
- TCGv base, stride;
- TCGv_i32 desc;
+ if (!s->vstart_eq_zero) {
+ return false;
+ }
- dest = tcg_temp_new_ptr();
- mask = tcg_temp_new_ptr();
- base = get_gpr(s, rs1, EXT_NONE);
- stride = get_gpr(s, rs2, EXT_NONE);
- desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
- s->cfg_ptr->vlenb, data));
+ TCGv dest = tcg_temp_new();
- tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
+ uint32_t nf = FIELD_EX32(data, VDATA, NF);
+ uint32_t vm = FIELD_EX32(data, VDATA, VM);
+
+ /* Destination register and mask register */
+ tcg_gen_addi_tl(dest, (TCGv)tcg_env, vreg_ofs(s, vd));
+
+ /*
+ * Select the appropriate load/tore to retrieve data from the vector
+ * register given a specific sew.
+ */
+ static gen_tl_ldst * const ld_fns[4] = {
+ tcg_gen_ld8u_tl, tcg_gen_ld16u_tl,
+ tcg_gen_ld32u_tl, tcg_gen_ld_tl
+ };
+
+ static gen_tl_ldst * const st_fns[4] = {
+ tcg_gen_st8_tl, tcg_gen_st16_tl,
+ tcg_gen_st32_tl, tcg_gen_st_tl
+ };
+
+ gen_tl_ldst *ld_fn = ld_fns[s->sew];
+ gen_tl_ldst *st_fn = st_fns[s->sew];
+
+ if (ld_fn == NULL || st_fn == NULL) {
+ return false;
+ }
mark_vs_dirty(s);
- fn(dest, mask, base, stride, tcg_env, desc);
+ gen_ldst_stride_main_loop(s, dest, rs1, rs2, vm, nf, ld_fn, st_fn, is_load);
+
+ tcg_gen_movi_tl(cpu_vstart, 0);
+
+ /*
+ * Set the tail bytes to 1 if tail agnostic:
+ */
+ if (s->vta != 0 && is_load) {
+ gen_ldst_stride_tail_loop(s, dest, nf, st_fn);
+ }
finalize_rvv_inst(s);
return true;
@@ -834,16 +1152,6 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
- gen_helper_ldst_stride *fn;
- static gen_helper_ldst_stride * const fns[4] = {
- gen_helper_vlse8_v, gen_helper_vlse16_v,
- gen_helper_vlse32_v, gen_helper_vlse64_v
- };
-
- fn = fns[eew];
- if (fn == NULL) {
- return false;
- }
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
@@ -851,7 +1159,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
- return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, true);
}
static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@@ -869,23 +1177,13 @@ GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
- gen_helper_ldst_stride *fn;
- static gen_helper_ldst_stride * const fns[4] = {
- /* masked stride store */
- gen_helper_vsse8_v, gen_helper_vsse16_v,
- gen_helper_vsse32_v, gen_helper_vsse64_v
- };
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
- fn = fns[eew];
- if (fn == NULL) {
- return false;
- }
- return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, false);
}
static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@@ -979,7 +1277,8 @@ static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
- vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
+ vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew) &&
+ vext_check_input_eew(s, -1, 0, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
@@ -1031,7 +1330,8 @@ static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
- vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
+ vext_check_st_index(s, a->rd, a->rs2, a->nf, eew) &&
+ vext_check_input_eew(s, a->rd, s->sew, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
@@ -1098,24 +1398,86 @@ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
- gen_helper_ldst_whole *fn,
- DisasContext *s)
+ uint32_t log2_esz, gen_helper_ldst_whole *fn,
+ DisasContext *s, bool is_load)
{
- TCGv_ptr dest;
- TCGv base;
- TCGv_i32 desc;
-
- uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
- dest = tcg_temp_new_ptr();
- desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
- s->cfg_ptr->vlenb, data));
-
- base = get_gpr(s, rs1, EXT_NONE);
- tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
-
mark_vs_dirty(s);
- fn(dest, base, tcg_env, desc);
+ /*
+ * Load/store multiple bytes per iteration.
+ * When possible do this atomically.
+ * Update vstart with the number of processed elements.
+ * Use the helper function if either:
+ * - vstart is not 0.
+ * - the target has 32 bit registers and we are loading/storing 64 bit long
+ * elements. This is to ensure that we process every element with a single
+ * memory instruction.
+ */
+
+ bool use_helper_fn = !(s->vstart_eq_zero) ||
+ (TCG_TARGET_REG_BITS == 32 && log2_esz == 3);
+
+ if (!use_helper_fn) {
+ TCGv addr = tcg_temp_new();
+ uint32_t size = s->cfg_ptr->vlenb * nf;
+ TCGv_i64 t8 = tcg_temp_new_i64();
+ TCGv_i32 t4 = tcg_temp_new_i32();
+ MemOp atomicity = MO_ATOM_NONE;
+ if (log2_esz == 0) {
+ atomicity = MO_ATOM_NONE;
+ } else {
+ atomicity = MO_ATOM_IFALIGN_PAIR;
+ }
+ if (TCG_TARGET_REG_BITS == 64) {
+ for (int i = 0; i < size; i += 8) {
+ addr = get_address(s, rs1, i);
+ if (is_load) {
+ tcg_gen_qemu_ld_i64(t8, addr, s->mem_idx,
+ MO_LE | MO_64 | atomicity);
+ tcg_gen_st_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
+ } else {
+ tcg_gen_ld_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
+ tcg_gen_qemu_st_i64(t8, addr, s->mem_idx,
+ MO_LE | MO_64 | atomicity);
+ }
+ if (i == size - 8) {
+ tcg_gen_movi_tl(cpu_vstart, 0);
+ } else {
+ tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 8 >> log2_esz);
+ }
+ }
+ } else {
+ for (int i = 0; i < size; i += 4) {
+ addr = get_address(s, rs1, i);
+ if (is_load) {
+ tcg_gen_qemu_ld_i32(t4, addr, s->mem_idx,
+ MO_LE | MO_32 | atomicity);
+ tcg_gen_st_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
+ } else {
+ tcg_gen_ld_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
+ tcg_gen_qemu_st_i32(t4, addr, s->mem_idx,
+ MO_LE | MO_32 | atomicity);
+ }
+ if (i == size - 4) {
+ tcg_gen_movi_tl(cpu_vstart, 0);
+ } else {
+ tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 4 >> log2_esz);
+ }
+ }
+ }
+ } else {
+ TCGv_ptr dest;
+ TCGv base;
+ TCGv_i32 desc;
+ uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
+ data = FIELD_DP32(data, VDATA, VM, 1);
+ dest = tcg_temp_new_ptr();
+ desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
+ s->cfg_ptr->vlenb, data));
+ base = get_gpr(s, rs1, EXT_NONE);
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ fn(dest, base, tcg_env, desc);
+ }
finalize_rvv_inst(s);
return true;
@@ -1125,58 +1487,47 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
* load and store whole register instructions ignore vtype and vl setting.
* Thus, we don't need to check vill bit. (Section 7.9)
*/
-#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \
-static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
-{ \
- if (require_rvv(s) && \
- QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
- return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \
- gen_helper_##NAME, s); \
- } \
- return false; \
-}
-
-GEN_LDST_WHOLE_TRANS(vl1re8_v, 1)
-GEN_LDST_WHOLE_TRANS(vl1re16_v, 1)
-GEN_LDST_WHOLE_TRANS(vl1re32_v, 1)
-GEN_LDST_WHOLE_TRANS(vl1re64_v, 1)
-GEN_LDST_WHOLE_TRANS(vl2re8_v, 2)
-GEN_LDST_WHOLE_TRANS(vl2re16_v, 2)
-GEN_LDST_WHOLE_TRANS(vl2re32_v, 2)
-GEN_LDST_WHOLE_TRANS(vl2re64_v, 2)
-GEN_LDST_WHOLE_TRANS(vl4re8_v, 4)
-GEN_LDST_WHOLE_TRANS(vl4re16_v, 4)
-GEN_LDST_WHOLE_TRANS(vl4re32_v, 4)
-GEN_LDST_WHOLE_TRANS(vl4re64_v, 4)
-GEN_LDST_WHOLE_TRANS(vl8re8_v, 8)
-GEN_LDST_WHOLE_TRANS(vl8re16_v, 8)
-GEN_LDST_WHOLE_TRANS(vl8re32_v, 8)
-GEN_LDST_WHOLE_TRANS(vl8re64_v, 8)
+#define GEN_LDST_WHOLE_TRANS(NAME, ETYPE, ARG_NF, IS_LOAD) \
+static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
+{ \
+ if (require_rvv(s) && \
+ QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
+ return ldst_whole_trans(a->rd, a->rs1, ARG_NF, ctzl(sizeof(ETYPE)), \
+ gen_helper_##NAME, s, IS_LOAD); \
+ } \
+ return false; \
+}
+
+GEN_LDST_WHOLE_TRANS(vl1re8_v, int8_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl1re16_v, int16_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl1re32_v, int32_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl1re64_v, int64_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl2re8_v, int8_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl2re16_v, int16_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl2re32_v, int32_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl2re64_v, int64_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl4re8_v, int8_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl4re16_v, int16_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl4re32_v, int32_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl4re64_v, int64_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl8re8_v, int8_t, 8, true)
+GEN_LDST_WHOLE_TRANS(vl8re16_v, int16_t, 8, true)
+GEN_LDST_WHOLE_TRANS(vl8re32_v, int32_t, 8, true)
+GEN_LDST_WHOLE_TRANS(vl8re64_v, int64_t, 8, true)
/*
* The vector whole register store instructions are encoded similar to
* unmasked unit-stride store of elements with EEW=8.
*/
-GEN_LDST_WHOLE_TRANS(vs1r_v, 1)
-GEN_LDST_WHOLE_TRANS(vs2r_v, 2)
-GEN_LDST_WHOLE_TRANS(vs4r_v, 4)
-GEN_LDST_WHOLE_TRANS(vs8r_v, 8)
+GEN_LDST_WHOLE_TRANS(vs1r_v, int8_t, 1, false)
+GEN_LDST_WHOLE_TRANS(vs2r_v, int8_t, 2, false)
+GEN_LDST_WHOLE_TRANS(vs4r_v, int8_t, 4, false)
+GEN_LDST_WHOLE_TRANS(vs8r_v, int8_t, 8, false)
/*
*** Vector Integer Arithmetic Instructions
*/
-/*
- * MAXSZ returns the maximum vector size can be operated in bytes,
- * which is used in GVEC IR when vl_eq_vlmax flag is set to true
- * to accelerate vector operation.
- */
-static inline uint32_t MAXSZ(DisasContext *s)
-{
- int max_sz = s->cfg_ptr->vlenb * 8;
- return max_sz >> (3 - s->lmul);
-}
-
static bool opivv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
@@ -1472,6 +1823,16 @@ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
+/* OPIVV with overwrite and WIDEN */
+static bool opivv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
gen_helper_gvec_4_ptr *fn,
bool (*checkfn)(DisasContext *, arg_rmrr *))
@@ -1519,6 +1880,14 @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
+static bool opivx_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_ds(s, a->rd, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
@@ -1990,13 +2359,13 @@ GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
/* Vector Widening Integer Multiply-Add Instructions */
-GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
-GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
-GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_overwrite_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_overwrite_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_overwrite_widen_check)
/* Vector Integer Merge and Move Instructions */
static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
@@ -2337,6 +2706,17 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
+static bool opfvv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ require_rvf(s) &&
+ require_scale_rvf(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
/* OPFVV with WIDEN */
#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
@@ -2376,11 +2756,21 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
+static bool opfvf_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ require_rvf(s) &&
+ require_scale_rvf(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_ds(s, a->rd, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
/* OPFVF with WIDEN */
-#define GEN_OPFVF_WIDEN_TRANS(NAME) \
+#define GEN_OPFVF_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
- if (opfvf_widen_check(s, a)) { \
+ if (CHECK(s, a)) { \
uint32_t data = 0; \
static gen_helper_opfvf *const fns[2] = { \
gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
@@ -2396,8 +2786,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
return false; \
}
-GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwadd_vf, opfvf_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwsub_vf, opfvf_widen_check)
static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
{
@@ -2479,7 +2869,7 @@ GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
/* Vector Widening Floating-Point Multiply */
GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
-GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwmul_vf, opfvf_widen_check)
/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
@@ -2500,14 +2890,14 @@ GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
-GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
-GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
-GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
-GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
-GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
+GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_overwrite_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_overwrite_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_overwrite_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf, opfvf_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf, opfvf_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf, opfvf_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf, opfvf_overwrite_widen_check)
/* Vector Floating-Point Square-Root Instruction */
@@ -3172,7 +3562,6 @@ static void load_element(TCGv_i64 dest, TCGv_ptr base,
break;
default:
g_assert_not_reached();
- break;
}
}
@@ -3257,7 +3646,6 @@ static void store_element(TCGv_i64 val, TCGv_ptr base,
break;
default:
g_assert_not_reached();
- break;
}
}
@@ -3425,6 +3813,7 @@ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
+ vext_check_input_eew(s, a->rs1, s->sew, a->rs2, s->sew, a->vm) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs1, s->lmul) &&
require_align(a->rs2, s->lmul) &&
@@ -3437,6 +3826,7 @@ static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
int8_t emul = MO_16 - s->sew + s->lmul;
return require_rvv(s) &&
vext_check_isa_ill(s) &&
+ vext_check_input_eew(s, a->rs1, MO_16, a->rs2, s->sew, a->vm) &&
(emul >= -3 && emul <= 3) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs1, emul) &&
@@ -3456,6 +3846,7 @@ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
+ vext_check_input_eew(s, -1, MO_64, a->rs2, s->sew, a->vm) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs2, s->lmul) &&
(a->rd != a->rs2) &&
@@ -3599,7 +3990,9 @@ static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
require_align(a->rd, s->lmul) &&
require_align(a->rs2, s->lmul - div) &&
require_vm(a->vm, a->rd) &&
- require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
+ require_noover(a->rd, s->lmul, a->rs2, s->lmul - div) &&
+ vext_check_input_eew(s, -1, 0, a->rs2, s->sew, a->vm);
+
return ret;
}
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
index ae1f401..27bf3f0 100644
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
@@ -249,7 +249,7 @@ GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
\
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
/* save opcode for unwinding in case we throw an exception */ \
- decode_save_opc(s); \
+ decode_save_opc(s, 0); \
egs = tcg_constant_i32(EGS); \
gen_helper_egs_check(egs, tcg_env); \
} \
@@ -322,7 +322,7 @@ GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS)
\
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
/* save opcode for unwinding in case we throw an exception */ \
- decode_save_opc(s); \
+ decode_save_opc(s, 0); \
egs = tcg_constant_i32(EGS); \
gen_helper_egs_check(egs, tcg_env); \
} \
@@ -389,7 +389,7 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
\
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
/* save opcode for unwinding in case we throw an exception */ \
- decode_save_opc(s); \
+ decode_save_opc(s, 0); \
egs = tcg_constant_i32(EGS); \
gen_helper_egs_check(egs, tcg_env); \
} \
@@ -440,7 +440,7 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a)
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
/* save opcode for unwinding in case we throw an exception */
- decode_save_opc(s);
+ decode_save_opc(s, 0);
egs = tcg_constant_i32(ZVKNH_EGS);
gen_helper_egs_check(egs, tcg_env);
}
@@ -471,7 +471,7 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
/* save opcode for unwinding in case we throw an exception */
- decode_save_opc(s);
+ decode_save_opc(s, 0);
egs = tcg_constant_i32(ZVKNH_EGS);
gen_helper_egs_check(egs, tcg_env);
}
diff --git a/target/riscv/insn_trans/trans_rvzacas.c.inc b/target/riscv/insn_trans/trans_rvzacas.c.inc
index fcced99..15e688a 100644
--- a/target/riscv/insn_trans/trans_rvzacas.c.inc
+++ b/target/riscv/insn_trans/trans_rvzacas.c.inc
@@ -76,7 +76,7 @@ static bool gen_cmpxchg64(DisasContext *ctx, arg_atomic *a, MemOp mop)
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop);
gen_set_gpr_pair(ctx, a->rd, dest);
@@ -121,7 +121,7 @@ static bool trans_amocas_q(DisasContext *ctx, arg_amocas_q *a)
tcg_gen_concat_i64_i128(src2, src2l, src2h);
tcg_gen_concat_i64_i128(dest, destl, desth);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
tcg_gen_atomic_cmpxchg_i128(dest, src1, dest, src2, ctx->mem_idx,
(MO_ALIGN | MO_TEUO));
diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc
index cd234ad..c77c2b9 100644
--- a/target/riscv/insn_trans/trans_rvzce.c.inc
+++ b/target/riscv/insn_trans/trans_rvzce.c.inc
@@ -203,6 +203,14 @@ static bool gen_pop(DisasContext *ctx, arg_cmpp *a, bool ret, bool ret_val)
if (ret) {
TCGv ret_addr = get_gpr(ctx, xRA, EXT_SIGN);
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
+ TCGv src = tcg_temp_new();
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_helper_ctr_add_entry(tcg_env, src, ret_addr, type);
+ }
+#endif
tcg_gen_mov_tl(cpu_pc, ret_addr);
tcg_gen_lookup_and_goto_ptr();
ctx->base.is_jmp = DISAS_NORETURN;
@@ -309,6 +317,19 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a)
gen_set_gpr(ctx, xRA, succ_pc);
}
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ if (a->index >= 32) {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
+ gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
+ } else {
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
+ gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
+ }
+ }
+#endif
+
+
tcg_gen_mov_tl(cpu_pc, addr);
tcg_gen_lookup_and_goto_ptr();
diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc
index 1eb458b..bece48e 100644
--- a/target/riscv/insn_trans/trans_rvzfh.c.inc
+++ b/target/riscv/insn_trans/trans_rvzfh.c.inc
@@ -48,7 +48,7 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a)
REQUIRE_FPU;
REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
TCGv temp = tcg_temp_new();
@@ -71,7 +71,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
REQUIRE_FPU;
REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
TCGv temp = tcg_temp_new();
diff --git a/target/riscv/insn_trans/trans_rvzicfiss.c.inc b/target/riscv/insn_trans/trans_rvzicfiss.c.inc
new file mode 100644
index 0000000..b0096ad
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvzicfiss.c.inc
@@ -0,0 +1,131 @@
+/*
+ * RISC-V translation routines for the Control-Flow Integrity Extension
+ *
+ * Copyright (c) 2024 Rivos Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_ZICFISS(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zicfiss) { \
+ return false; \
+ } \
+} while (0)
+
+static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a)
+{
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ TCGv addr = tcg_temp_new();
+ TCGLabel *skip = gen_new_label();
+ uint32_t tmp = (get_xl(ctx) == MXL_RV64) ? 8 : 4;
+ TCGv data = tcg_temp_new();
+ tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
+ tcg_gen_qemu_ld_tl(data, addr, SS_MMU_INDEX(ctx),
+ mxl_memop(ctx) | MO_ALIGN);
+ TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ tcg_gen_brcond_tl(TCG_COND_EQ, data, rs1, skip);
+ tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_BCFI_TVAL),
+ tcg_env, offsetof(CPURISCVState, sw_check_code));
+ gen_helper_raise_exception(tcg_env,
+ tcg_constant_i32(RISCV_EXCP_SW_CHECK));
+ gen_set_label(skip);
+ tcg_gen_addi_tl(addr, addr, tmp);
+ tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
+
+ return true;
+}
+
+static bool trans_sspush(DisasContext *ctx, arg_sspush *a)
+{
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ TCGv addr = tcg_temp_new();
+ int tmp = (get_xl(ctx) == MXL_RV64) ? -8 : -4;
+ TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
+ tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
+ tcg_gen_addi_tl(addr, addr, tmp);
+ tcg_gen_qemu_st_tl(data, addr, SS_MMU_INDEX(ctx),
+ mxl_memop(ctx) | MO_ALIGN);
+ tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp));
+
+ return true;
+}
+
+static bool trans_ssrdp(DisasContext *ctx, arg_ssrdp *a)
+{
+ if (!ctx->bcfi_enabled || a->rd == 0) {
+ return false;
+ }
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ tcg_gen_ld_tl(dest, tcg_env, offsetof(CPURISCVState, ssp));
+ gen_set_gpr(ctx, a->rd, dest);
+
+ return true;
+}
+
+static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a)
+{
+ REQUIRE_A_OR_ZAAMO(ctx);
+ REQUIRE_ZICFISS(ctx);
+ if (ctx->priv == PRV_M) {
+ generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
+ }
+
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
+ src1 = get_address(ctx, a->rs1, 0);
+
+ tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx),
+ (MO_ALIGN | MO_TESL));
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_A_OR_ZAAMO(ctx);
+ REQUIRE_ZICFISS(ctx);
+ if (ctx->priv == PRV_M) {
+ generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
+ }
+
+ if (!ctx->bcfi_enabled) {
+ return false;
+ }
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
+ src1 = get_address(ctx, a->rs1, 0);
+
+ tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx),
+ (MO_ALIGN | MO_TESQ));
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
diff --git a/target/riscv/insn_trans/trans_svinval.c.inc b/target/riscv/insn_trans/trans_svinval.c.inc
index 0f692a1..a06c3b2 100644
--- a/target/riscv/insn_trans/trans_svinval.c.inc
+++ b/target/riscv/insn_trans/trans_svinval.c.inc
@@ -28,7 +28,7 @@ static bool trans_sinval_vma(DisasContext *ctx, arg_sinval_vma *a)
/* Do the same as sfence.vma currently */
REQUIRE_EXT(ctx, RVS);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_tlb_flush(tcg_env);
return true;
#endif
@@ -57,7 +57,7 @@ static bool trans_hinval_vvma(DisasContext *ctx, arg_hinval_vvma *a)
/* Do the same as hfence.vvma currently */
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_hyp_tlb_flush(tcg_env);
return true;
#endif
@@ -70,7 +70,7 @@ static bool trans_hinval_gvma(DisasContext *ctx, arg_hinval_gvma *a)
/* Do the same as hfence.gvma currently */
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_hyp_gvma_tlb_flush(tcg_env);
return true;
#endif
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
index 0ac17bc..4570bd5 100644
--- a/target/riscv/internals.h
+++ b/target/riscv/internals.h
@@ -19,7 +19,10 @@
#ifndef RISCV_CPU_INTERNALS_H
#define RISCV_CPU_INTERNALS_H
+#include "exec/cpu-common.h"
#include "hw/registerfields.h"
+#include "fpu/softfloat-types.h"
+#include "target/riscv/cpu_bits.h"
/*
* The current MMU Modes are:
@@ -30,12 +33,15 @@
* - U+2STAGE 0b100
* - S+2STAGE 0b101
* - S+SUM+2STAGE 0b110
+ * - Shadow stack+U 0b1000
+ * - Shadow stack+S 0b1001
*/
#define MMUIdx_U 0
#define MMUIdx_S 1
#define MMUIdx_S_SUM 2
#define MMUIdx_M 3
#define MMU_2STAGE_BIT (1 << 2)
+#define MMU_IDX_SS_WRITE (1 << 3)
static inline int mmuidx_priv(int mmu_idx)
{
@@ -136,7 +142,68 @@ static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
}
}
-/* Our implementation of CPUClass::has_work */
+#ifndef CONFIG_USER_ONLY
+/* Our implementation of SysemuCPUOps::has_work */
bool riscv_cpu_has_work(CPUState *cs);
+#endif
+
+/* Zjpm addr masking routine */
+static inline target_ulong adjust_addr_body(CPURISCVState *env,
+ target_ulong addr,
+ bool is_virt_addr)
+{
+ RISCVPmPmm pmm = PMM_FIELD_DISABLED;
+ uint32_t pmlen = 0;
+ bool signext = false;
+
+ /* do nothing for rv32 mode */
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ return addr;
+ }
+
+ /* get pmm field depending on whether addr is */
+ if (is_virt_addr) {
+ pmm = riscv_pm_get_virt_pmm(env);
+ } else {
+ pmm = riscv_pm_get_pmm(env);
+ }
+
+ /* if pointer masking is disabled, return original addr */
+ if (pmm == PMM_FIELD_DISABLED) {
+ return addr;
+ }
+
+ if (!is_virt_addr) {
+ signext = riscv_cpu_virt_mem_enabled(env);
+ }
+ addr = addr << pmlen;
+ pmlen = riscv_pm_get_pmlen(pmm);
+
+ /* sign/zero extend masked address by N-1 bit */
+ if (signext) {
+ addr = (target_long)addr >> pmlen;
+ } else {
+ addr = addr >> pmlen;
+ }
+
+ return addr;
+}
+
+static inline target_ulong adjust_addr(CPURISCVState *env,
+ target_ulong addr)
+{
+ return adjust_addr_body(env, addr, false);
+}
+
+static inline target_ulong adjust_addr_virt(CPURISCVState *env,
+ target_ulong addr)
+{
+ return adjust_addr_body(env, addr, true);
+}
+
+static inline int insn_len(uint16_t first_word)
+{
+ return (first_word & 3) == 3 ? 4 : 2;
+}
#endif
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index f6e3156..e1a04be 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -27,15 +27,15 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qapi/visitor.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_int.h"
+#include "system/system.h"
+#include "system/kvm.h"
+#include "system/kvm_int.h"
#include "cpu.h"
#include "trace.h"
-#include "hw/core/accel-cpu.h"
+#include "accel/accel-cpu-target.h"
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "hw/boards.h"
#include "hw/irq.h"
#include "hw/intc/riscv_imsic.h"
@@ -45,7 +45,7 @@
#include "sbi_ecall_interface.h"
#include "chardev/char-fe.h"
#include "migration/misc.h"
-#include "sysemu/runstate.h"
+#include "system/runstate.h"
#include "hw/riscv/numa.h"
#define PR_RISCV_V_SET_CONTROL 69
@@ -58,33 +58,17 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level)
static bool cap_has_mp_state;
-static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type,
- uint64_t idx)
-{
- uint64_t id = KVM_REG_RISCV | type | idx;
+#define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \
+ type | idx)
- switch (riscv_cpu_mxl(env)) {
- case MXL_RV32:
- id |= KVM_REG_SIZE_U32;
- break;
- case MXL_RV64:
- id |= KVM_REG_SIZE_U64;
- break;
- default:
- g_assert_not_reached();
- }
- return id;
-}
+#define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \
+ type | idx)
-static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx)
-{
- return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx;
-}
-
-static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx)
-{
- return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx;
-}
+#if defined(TARGET_RISCV64)
+#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx)
+#else
+#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx)
+#endif
static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
{
@@ -107,45 +91,29 @@ static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
return kvm_encode_reg_size_id(id, size_b);
}
-#define RISCV_CORE_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \
+#define RISCV_CORE_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \
KVM_REG_RISCV_CORE_REG(name))
-#define RISCV_CSR_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \
+#define RISCV_CSR_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \
KVM_REG_RISCV_CSR_REG(name))
-#define RISCV_CONFIG_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \
+#define RISCV_CONFIG_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \
KVM_REG_RISCV_CONFIG_REG(name))
-#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \
+#define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \
KVM_REG_RISCV_TIMER_REG(name))
-#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx)
+#define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx)
-#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx)
+#define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx)
-#define RISCV_VECTOR_CSR_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \
+#define RISCV_VECTOR_CSR_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \
KVM_REG_RISCV_VECTOR_CSR_REG(name))
-#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
- do { \
- int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
- if (_ret) { \
- return _ret; \
- } \
- } while (0)
-
-#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
- do { \
- int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
- if (_ret) { \
- return _ret; \
- } \
- } while (0)
-
#define KVM_RISCV_GET_TIMER(cs, name, reg) \
do { \
int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), &reg); \
@@ -167,6 +135,7 @@ typedef struct KVMCPUConfig {
const char *description;
target_ulong offset;
uint64_t kvm_reg_id;
+ uint32_t prop_size;
bool user_set;
bool supported;
} KVMCPUConfig;
@@ -248,7 +217,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
/* If we're here we're going to disable the MISA bit */
reg = 0;
- id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
misa_cfg->kvm_reg_id);
ret = kvm_set_one_reg(cs, id, &reg);
if (ret != 0) {
@@ -267,6 +236,56 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
}
}
+#define KVM_CSR_CFG(_name, _env_prop, reg_id) \
+ {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \
+ .prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \
+ .kvm_reg_id = reg_id}
+
+static KVMCPUConfig kvm_csr_cfgs[] = {
+ KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)),
+ KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)),
+ KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)),
+ KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)),
+ KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)),
+ KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)),
+ KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)),
+ KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)),
+ KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)),
+ KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)),
+ KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)),
+};
+
+static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
+{
+ return (void *)&cpu->env + csr_cfg->offset;
+}
+
+static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
+{
+ uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ return *val32;
+}
+
+static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
+{
+ uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ return *val64;
+}
+
+static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
+ uint32_t val)
+{
+ uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ *val32 = val;
+}
+
+static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
+ uint64_t val)
+{
+ uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ *val64 = val;
+}
+
#define KVM_EXT_CFG(_name, _prop, _reg_id) \
{.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
.kvm_reg_id = _reg_id}
@@ -274,6 +293,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
+ KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE),
KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
@@ -281,7 +301,11 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL),
KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
+ KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP),
+ KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP),
+ KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA),
KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
+ KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS),
KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH),
KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN),
@@ -292,6 +316,10 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC),
KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX),
KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS),
+ KVM_EXT_CFG("zca", ext_zca, KVM_RISCV_ISA_EXT_ZCA),
+ KVM_EXT_CFG("zcb", ext_zcb, KVM_RISCV_ISA_EXT_ZCB),
+ KVM_EXT_CFG("zcd", ext_zcd, KVM_RISCV_ISA_EXT_ZCD),
+ KVM_EXT_CFG("zcf", ext_zcf, KVM_RISCV_ISA_EXT_ZCF),
KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND),
KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE),
KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH),
@@ -312,12 +340,18 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
+ KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM),
KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
+ KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF),
+ KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM),
KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
+ KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE),
+ KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU),
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
+ KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC),
};
static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
@@ -419,7 +453,6 @@ static KVMCPUConfig kvm_sbi_dbcn = {
static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
{
- CPURISCVState *env = &cpu->env;
uint64_t id, reg;
int i, ret;
@@ -430,7 +463,7 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
continue;
}
- id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
ret = kvm_set_one_reg(cs, id, &reg);
@@ -555,14 +588,14 @@ static int kvm_riscv_get_regs_core(CPUState *cs)
target_ulong reg;
CPURISCVState *env = &RISCV_CPU(cs)->env;
- ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), &reg);
if (ret) {
return ret;
}
env->pc = reg;
for (i = 1; i < 32; i++) {
- uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
+ uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
ret = kvm_get_one_reg(cs, id, &reg);
if (ret) {
return ret;
@@ -581,13 +614,13 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
CPURISCVState *env = &RISCV_CPU(cs)->env;
reg = env->pc;
- ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), &reg);
if (ret) {
return ret;
}
for (i = 1; i < 32; i++) {
- uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
+ uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
reg = env->gpr[i];
ret = kvm_set_one_reg(cs, id, &reg);
if (ret) {
@@ -600,38 +633,79 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
static int kvm_riscv_get_regs_csr(CPUState *cs)
{
- CPURISCVState *env = &RISCV_CPU(cs)->env;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ uint64_t reg;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
- KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
- KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
- KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
- KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
- KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
- KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
- KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
- KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
- KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
+ if (!csr_cfg->supported) {
+ continue;
+ }
+
+ ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ if (csr_cfg->prop_size == sizeof(uint32_t)) {
+ kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg);
+ } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
+ kvm_cpu_csr_set_u64(cpu, csr_cfg, reg);
+ } else {
+ g_assert_not_reached();
+ }
+ }
return 0;
}
static int kvm_riscv_put_regs_csr(CPUState *cs)
{
- CPURISCVState *env = &RISCV_CPU(cs)->env;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ uint64_t reg;
+ int i, ret;
- KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
- KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
- KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
- KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
- KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
- KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
- KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
- KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
- KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
+ for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+
+ if (!csr_cfg->supported) {
+ continue;
+ }
+
+ if (csr_cfg->prop_size == sizeof(uint32_t)) {
+ reg = kvm_cpu_csr_get_u32(cpu, csr_cfg);
+ } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
+ reg = kvm_cpu_csr_get_u64(cpu, csr_cfg);
+ } else {
+ g_assert_not_reached();
+ }
+
+ ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
return 0;
}
+static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
+{
+ env->mstatus = 0;
+ env->mie = 0;
+ env->stvec = 0;
+ env->sscratch = 0;
+ env->sepc = 0;
+ env->scause = 0;
+ env->stval = 0;
+ env->mip = 0;
+ env->satp = 0;
+ env->scounteren = 0;
+ env->senvcfg = 0;
+}
+
static int kvm_riscv_get_regs_fp(CPUState *cs)
{
int ret = 0;
@@ -751,11 +825,11 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
env->kvm_timer_dirty = false;
}
-uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs)
+uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu)
{
uint64_t reg;
- KVM_RISCV_GET_TIMER(cs, frequency, reg);
+ KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg);
return reg;
}
@@ -772,26 +846,26 @@ static int kvm_riscv_get_regs_vector(CPUState *cs)
return 0;
}
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), &reg);
if (ret) {
return ret;
}
env->vstart = reg;
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), &reg);
if (ret) {
return ret;
}
env->vl = reg;
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), &reg);
if (ret) {
return ret;
}
env->vtype = reg;
if (kvm_v_vlenb.supported) {
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), &reg);
if (ret) {
return ret;
}
@@ -829,26 +903,26 @@ static int kvm_riscv_put_regs_vector(CPUState *cs)
}
reg = env->vstart;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), &reg);
if (ret) {
return ret;
}
reg = env->vl;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), &reg);
if (ret) {
return ret;
}
reg = env->vtype;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), &reg);
if (ret) {
return ret;
}
if (kvm_v_vlenb.supported) {
reg = cpu->cfg.vlenb;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), &reg);
for (int i = 0; i < 32; i++) {
/*
@@ -927,25 +1001,24 @@ static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
{
- CPURISCVState *env = &cpu->env;
struct kvm_one_reg reg;
int ret;
- reg.id = RISCV_CONFIG_REG(env, mvendorid);
+ reg.id = RISCV_CONFIG_REG(mvendorid);
reg.addr = (uint64_t)&cpu->cfg.mvendorid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve mvendorid from host, error %d", ret);
}
- reg.id = RISCV_CONFIG_REG(env, marchid);
+ reg.id = RISCV_CONFIG_REG(marchid);
reg.addr = (uint64_t)&cpu->cfg.marchid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve marchid from host, error %d", ret);
}
- reg.id = RISCV_CONFIG_REG(env, mimpid);
+ reg.id = RISCV_CONFIG_REG(mimpid);
reg.addr = (uint64_t)&cpu->cfg.mimpid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
@@ -960,7 +1033,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
struct kvm_one_reg reg;
int ret;
- reg.id = RISCV_CONFIG_REG(env, isa);
+ reg.id = RISCV_CONFIG_REG(isa);
reg.addr = (uint64_t)&env->misa_ext_mask;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@@ -977,11 +1050,10 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
KVMCPUConfig *cbomz_cfg)
{
- CPURISCVState *env = &cpu->env;
struct kvm_one_reg reg;
int ret;
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
cbomz_cfg->kvm_reg_id);
reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@@ -995,7 +1067,6 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
KVMScratchCPU *kvmcpu)
{
- CPURISCVState *env = &cpu->env;
uint64_t val;
int i, ret;
@@ -1003,7 +1074,7 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
struct kvm_one_reg reg;
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@@ -1033,6 +1104,32 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
}
}
+static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu)
+{
+ uint64_t val;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+ struct kvm_one_reg reg;
+
+ reg.id = csr_cfg->kvm_reg_id;
+ reg.addr = (uint64_t)&val;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ if (errno == EINVAL) {
+ csr_cfg->supported = false;
+ } else {
+ error_report("Unable to read KVM CSR %s: %s",
+ csr_cfg->name, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ csr_cfg->supported = true;
+ }
+ }
+}
+
static int uint64_cmp(const void *a, const void *b)
{
uint64_t val1 = *(const uint64_t *)a;
@@ -1050,7 +1147,6 @@ static int uint64_cmp(const void *a, const void *b)
}
static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
- KVMScratchCPU *kvmcpu,
struct kvm_reg_list *reglist)
{
struct kvm_reg_list *reg_search;
@@ -1090,12 +1186,31 @@ static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
}
}
-static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
+static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist)
{
+ struct kvm_reg_list *reg_search;
+ uint64_t reg_id;
+
+ for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+
+ reg_id = csr_cfg->kvm_reg_id;
+ reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
+ sizeof(uint64_t), uint64_cmp);
+ if (!reg_search) {
+ continue;
+ }
+
+ csr_cfg->supported = true;
+ }
+}
+
+static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
+{
+ g_autofree struct kvm_reg_list *reglist = NULL;
KVMCPUConfig *multi_ext_cfg;
struct kvm_one_reg reg;
struct kvm_reg_list rl_struct;
- struct kvm_reg_list *reglist;
uint64_t val, reg_id, *reg_search;
int i, ret;
@@ -1107,7 +1222,9 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
* (EINVAL). Use read_legacy() in this case.
*/
if (errno == EINVAL) {
- return kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
+ kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
+ kvm_riscv_read_csr_cfg_legacy(kvmcpu);
+ return;
} else if (errno != E2BIG) {
/*
* E2BIG is an expected error message for the API since we
@@ -1136,7 +1253,7 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
multi_ext_cfg = &kvm_multi_ext_cfgs[i];
- reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT,
+ reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
sizeof(uint64_t), uint64_cmp);
@@ -1169,7 +1286,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
}
- kvm_riscv_check_sbi_dbcn_support(cpu, kvmcpu, reglist);
+ kvm_riscv_check_sbi_dbcn_support(cpu, reglist);
+ kvm_riscv_read_csr_cfg(reglist);
}
static void riscv_init_kvm_registers(Object *cpu_obj)
@@ -1183,7 +1301,7 @@ static void riscv_init_kvm_registers(Object *cpu_obj)
kvm_riscv_init_machine_ids(cpu, &kvmcpu);
kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
- kvm_riscv_init_multiext_cfg(cpu, &kvmcpu);
+ kvm_riscv_init_cfg(cpu, &kvmcpu);
kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
}
@@ -1192,7 +1310,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
-int kvm_arch_get_registers(CPUState *cs)
+int kvm_arch_get_registers(CPUState *cs, Error **errp)
{
int ret = 0;
@@ -1237,7 +1355,7 @@ int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state)
return 0;
}
-int kvm_arch_put_registers(CPUState *cs, int level)
+int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
{
int ret = 0;
@@ -1315,12 +1433,11 @@ void kvm_arch_init_irq_routing(KVMState *s)
static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
{
- CPURISCVState *env = &cpu->env;
target_ulong reg;
uint64_t id;
int ret;
- id = RISCV_CONFIG_REG(env, mvendorid);
+ id = RISCV_CONFIG_REG(mvendorid);
/*
* cfg.mvendorid is an uint32 but a target_ulong will
* be written. Assign it to a target_ulong var to avoid
@@ -1332,13 +1449,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
return ret;
}
- id = RISCV_CONFIG_REG(env, marchid);
+ id = RISCV_CONFIG_REG(marchid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
if (ret != 0) {
return ret;
}
- id = RISCV_CONFIG_REG(env, mimpid);
+ id = RISCV_CONFIG_REG(mimpid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
return ret;
@@ -1355,6 +1472,11 @@ static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, &reg);
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
int ret = 0;
@@ -1401,11 +1523,6 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
int kvm_arch_irqchip_create(KVMState *s)
{
- if (kvm_kernel_irqchip_split()) {
- error_report("-machine kernel_irqchip=split is not supported on RISC-V.");
- exit(1);
- }
-
/*
* We can create the VAIA using the newer device control API.
*/
@@ -1601,23 +1718,14 @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
CPURISCVState *env = &cpu->env;
int i;
- if (!kvm_enabled()) {
- return;
- }
for (i = 0; i < 32; i++) {
env->gpr[i] = 0;
}
env->pc = cpu->env.kernel_addr;
env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
env->gpr[11] = cpu->env.fdt_addr; /* a1 */
- env->satp = 0;
- env->mie = 0;
- env->stvec = 0;
- env->sscratch = 0;
- env->sepc = 0;
- env->scause = 0;
- env->stval = 0;
- env->mip = 0;
+
+ kvm_riscv_reset_regs_csr(env);
}
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
@@ -1676,9 +1784,9 @@ void kvm_arch_accel_class_init(ObjectClass *oc)
object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
riscv_set_kvm_aia);
object_class_property_set_description(oc, "riscv-aia",
- "Set KVM AIA mode. Valid values are "
- "emul, hwaccel, and auto. Default "
- "is auto.");
+ "Set KVM AIA mode. Valid values are 'emul', 'hwaccel' and 'auto'. "
+ "Changing KVM AIA modes relies on host support. Defaults to 'auto' "
+ "if the host supports it");
object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
"auto");
}
@@ -1695,6 +1803,7 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
uint64_t max_hart_per_socket = 0;
uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
uint64_t socket_bits, hart_bits, guest_bits;
+ uint64_t max_group_id;
aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
@@ -1710,28 +1819,46 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
error_report("KVM AIA: failed to get current KVM AIA mode");
exit(1);
}
- qemu_log("KVM AIA: default mode is %s\n",
- kvm_aia_mode_str(default_aia_mode));
if (default_aia_mode != aia_mode) {
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_MODE,
&aia_mode, true, NULL);
- if (ret < 0)
- warn_report("KVM AIA: failed to set KVM AIA mode");
- else
- qemu_log("KVM AIA: set current mode to %s\n",
- kvm_aia_mode_str(aia_mode));
- }
+ if (ret < 0) {
+ warn_report("KVM AIA: failed to set KVM AIA mode '%s', using "
+ "default host mode '%s'",
+ kvm_aia_mode_str(aia_mode),
+ kvm_aia_mode_str(default_aia_mode));
- ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
- KVM_DEV_RISCV_AIA_CONFIG_SRCS,
- &aia_irq_num, true, NULL);
- if (ret < 0) {
- error_report("KVM AIA: failed to set number of input irq lines");
- exit(1);
+ /* failed to change AIA mode, use default */
+ aia_mode = default_aia_mode;
+ }
}
+ /*
+ * Skip APLIC creation in KVM if we're running split mode.
+ * This is done by leaving KVM_DEV_RISCV_AIA_CONFIG_SRCS
+ * unset. We can also skip KVM_DEV_RISCV_AIA_ADDR_APLIC
+ * since KVM won't be using it.
+ */
+ if (!kvm_kernel_irqchip_split()) {
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
+ KVM_DEV_RISCV_AIA_CONFIG_SRCS,
+ &aia_irq_num, true, NULL);
+ if (ret < 0) {
+ error_report("KVM AIA: failed to set number of input irq lines");
+ exit(1);
+ }
+
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
+ KVM_DEV_RISCV_AIA_ADDR_APLIC,
+ &aplic_base, true, NULL);
+ if (ret < 0) {
+ error_report("KVM AIA: failed to set the base address of APLIC");
+ exit(1);
+ }
+ }
+
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_IDS,
&aia_msi_num, true, NULL);
@@ -1742,7 +1869,8 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
if (socket_count > 1) {
- socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1;
+ max_group_id = socket_count - 1;
+ socket_bits = find_last_bit(&max_group_id, BITS_PER_LONG) + 1;
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
&socket_bits, true, NULL);
@@ -1770,14 +1898,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
exit(1);
}
- ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
- KVM_DEV_RISCV_AIA_ADDR_APLIC,
- &aplic_base, true, NULL);
- if (ret < 0) {
- error_report("KVM AIA: failed to set the base address of APLIC");
- exit(1);
- }
-
for (socket = 0; socket < socket_count; socket++) {
socket_imsic_base = imsic_base + socket * (1U << group_shift);
hart_count = riscv_socket_hart_count(machine, socket);
@@ -1890,7 +2010,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
if (cpu->cfg.ext_zicbom &&
riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
kvm_cbom_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
@@ -1909,7 +2029,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
if (cpu->cfg.ext_zicboz &&
riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
kvm_cboz_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
@@ -1950,7 +2070,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
}
-static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
+static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
@@ -1971,22 +2091,25 @@ static void kvm_cpu_accel_register_types(void)
}
type_init(kvm_cpu_accel_register_types);
-static void riscv_host_cpu_class_init(ObjectClass *c, void *data)
-{
- RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
-
-#if defined(TARGET_RISCV32)
- mcc->misa_mxl_max = MXL_RV32;
-#elif defined(TARGET_RISCV64)
- mcc->misa_mxl_max = MXL_RV64;
-#endif
-}
-
static const TypeInfo riscv_kvm_cpu_type_infos[] = {
{
.name = TYPE_RISCV_CPU_HOST,
.parent = TYPE_RISCV_CPU,
- .class_init = riscv_host_cpu_class_init,
+#if defined(TARGET_RISCV32)
+ .class_data = &(const RISCVCPUDef) {
+ .misa_mxl_max = MXL_RV32,
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .cfg.max_satp_mode = -1,
+ },
+#elif defined(TARGET_RISCV64)
+ .class_data = &(const RISCVCPUDef) {
+ .misa_mxl_max = MXL_RV64,
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .cfg.max_satp_mode = -1,
+ },
+#endif
}
};
diff --git a/target/riscv/kvm/kvm_riscv.h b/target/riscv/kvm/kvm_riscv.h
index 5851898..b2bcd10 100644
--- a/target/riscv/kvm/kvm_riscv.h
+++ b/target/riscv/kvm/kvm_riscv.h
@@ -19,6 +19,8 @@
#ifndef QEMU_KVM_RISCV_H
#define QEMU_KVM_RISCV_H
+#include "target/riscv/cpu-qom.h"
+
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
@@ -28,6 +30,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
void riscv_kvm_aplic_request(void *opaque, int irq, int level);
int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state);
void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
-uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs);
+uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu);
#endif
diff --git a/target/riscv/m128_helper.c b/target/riscv/m128_helper.c
index ec14aaa..7d9b83b 100644
--- a/target/riscv/m128_helper.c
+++ b/target/riscv/m128_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
target_ulong HELPER(divu_i128)(CPURISCVState *env,
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index 492c2c6..c97e9ce 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -19,9 +19,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
+#include "system/kvm.h"
#include "migration/cpu.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
#include "debug.h"
static bool pmp_needed(void *opaque)
@@ -152,25 +152,15 @@ static const VMStateDescription vmstate_vector = {
static bool pointermasking_needed(void *opaque)
{
- RISCVCPU *cpu = opaque;
- CPURISCVState *env = &cpu->env;
-
- return riscv_has_ext(env, RVJ);
+ return false;
}
static const VMStateDescription vmstate_pointermasking = {
.name = "cpu/pointer_masking",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.needed = pointermasking_needed,
.fields = (const VMStateField[]) {
- VMSTATE_UINTTL(env.mmte, RISCVCPU),
- VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
- VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
- VMSTATE_UINTTL(env.spmmask, RISCVCPU),
- VMSTATE_UINTTL(env.spmbase, RISCVCPU),
- VMSTATE_UINTTL(env.upmmask, RISCVCPU),
- VMSTATE_UINTTL(env.upmbase, RISCVCPU),
VMSTATE_END_OF_LIST()
}
@@ -180,7 +170,7 @@ static bool rv128_needed(void *opaque)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque);
- return mcc->misa_mxl_max == MXL_RV128;
+ return mcc->def->misa_mxl_max == MXL_RV128;
}
static const VMStateDescription vmstate_rv128 = {
@@ -266,7 +256,6 @@ static int riscv_cpu_post_load(void *opaque, int version_id)
CPURISCVState *env = &cpu->env;
env->xl = cpu_recompute_xl(env);
- riscv_cpu_update_mask(env);
return 0;
}
@@ -311,6 +300,30 @@ static const VMStateDescription vmstate_envcfg = {
}
};
+static bool ctr_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+
+ return cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr;
+}
+
+static const VMStateDescription vmstate_ctr = {
+ .name = "cpu/ctr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ctr_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT64(env.mctrctl, RISCVCPU),
+ VMSTATE_UINT32(env.sctrdepth, RISCVCPU),
+ VMSTATE_UINT32(env.sctrstatus, RISCVCPU),
+ VMSTATE_UINT64(env.vsctrctl, RISCVCPU),
+ VMSTATE_UINT64_ARRAY(env.ctr_src, RISCVCPU, 16 << SCTRDEPTH_MAX),
+ VMSTATE_UINT64_ARRAY(env.ctr_dst, RISCVCPU, 16 << SCTRDEPTH_MAX),
+ VMSTATE_UINT64_ARRAY(env.ctr_data, RISCVCPU, 16 << SCTRDEPTH_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool pmu_needed(void *opaque)
{
RISCVCPU *cpu = opaque;
@@ -350,6 +363,42 @@ static const VMStateDescription vmstate_jvt = {
}
};
+static bool elp_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+
+ return cpu->cfg.ext_zicfilp;
+}
+
+static const VMStateDescription vmstate_elp = {
+ .name = "cpu/elp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = elp_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_BOOL(env.elp, RISCVCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool ssp_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+
+ return cpu->cfg.ext_zicfiss;
+}
+
+static const VMStateDescription vmstate_ssp = {
+ .name = "cpu/ssp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ssp_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINTTL(env.ssp, RISCVCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_riscv_cpu = {
.name = "cpu",
.version_id = 10,
@@ -398,6 +447,7 @@ const VMStateDescription vmstate_riscv_cpu = {
VMSTATE_UINTTL(env.siselect, RISCVCPU),
VMSTATE_UINT32(env.scounteren, RISCVCPU),
VMSTATE_UINT32(env.mcounteren, RISCVCPU),
+ VMSTATE_UINT32(env.scountinhibit, RISCVCPU),
VMSTATE_UINT32(env.mcountinhibit, RISCVCPU),
VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
vmstate_pmu_ctr_state, PMUCTRState),
@@ -422,6 +472,9 @@ const VMStateDescription vmstate_riscv_cpu = {
&vmstate_debug,
&vmstate_smstateen,
&vmstate_jvt,
+ &vmstate_elp,
+ &vmstate_ssp,
+ &vmstate_ctr,
NULL
}
};
diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c
index f5b1ffe..100005e 100644
--- a/target/riscv/monitor.c
+++ b/target/riscv/monitor.c
@@ -184,7 +184,6 @@ static void mem_info_svxx(Monitor *mon, CPUArchState *env)
break;
default:
g_assert_not_reached();
- break;
}
/* calculate virtual address bits */
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 25a5263..557807b 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -21,15 +21,24 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "exec/cputlb.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
+#include "exec/tlb-flags.h"
+#include "trace.h"
/* Exceptions processing helpers */
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
- uint32_t exception, uintptr_t pc)
+ RISCVException exception,
+ uintptr_t pc)
{
CPUState *cs = env_cpu(env);
+
+ trace_riscv_exception(exception,
+ riscv_cpu_get_trap_name(exception, false),
+ env->pc);
+
cs->exception_index = exception;
cpu_loop_exit_restore(cs, pc);
}
@@ -62,7 +71,7 @@ target_ulong helper_csrr(CPURISCVState *env, int csr)
void helper_csrw(CPURISCVState *env, int csr, target_ulong src)
{
target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1;
- RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask);
+ RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -73,7 +82,7 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr,
target_ulong src, target_ulong write_mask)
{
target_ulong val = 0;
- RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask);
+ RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -99,7 +108,7 @@ void helper_csrw_i128(CPURISCVState *env, int csr,
{
RISCVException ret = riscv_csrrw_i128(env, csr, NULL,
int128_make128(srcl, srch),
- UINT128_MAX);
+ UINT128_MAX, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -107,13 +116,14 @@ void helper_csrw_i128(CPURISCVState *env, int csr,
}
target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
- target_ulong srcl, target_ulong srch,
- target_ulong maskl, target_ulong maskh)
+ target_ulong srcl, target_ulong srch,
+ target_ulong maskl, target_ulong maskh)
{
Int128 rv = int128_zero();
RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
int128_make128(srcl, srch),
- int128_make128(maskl, maskh));
+ int128_make128(maskl, maskh),
+ GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -263,13 +273,17 @@ target_ulong helper_sret(CPURISCVState *env)
{
uint64_t mstatus;
target_ulong prev_priv, prev_virt = env->virt_enabled;
+ const target_ulong src_priv = env->priv;
+ const bool src_virt = env->virt_enabled;
if (!(env->priv >= PRV_S)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
target_ulong retpc = env->sepc;
- if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+ if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
+ env->priv_ver,
+ env->misa_ext) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
@@ -287,6 +301,21 @@ target_ulong helper_sret(CPURISCVState *env)
get_field(mstatus, MSTATUS_SPIE));
mstatus = set_field(mstatus, MSTATUS_SPIE, 1);
mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
+
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ if (riscv_has_ext(env, RVH)) {
+ target_ulong prev_vu = get_field(env->hstatus, HSTATUS_SPV) &&
+ prev_priv == PRV_U;
+ /* Returning to VU from HS, vsstatus.sdt = 0 */
+ if (!env->virt_enabled && prev_vu) {
+ env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
+ }
+ }
+ mstatus = set_field(mstatus, MSTATUS_SDT, 0);
+ }
+ if (riscv_cpu_cfg(env)->ext_smdbltrp && env->priv >= PRV_M) {
+ mstatus = set_field(mstatus, MSTATUS_MDT, 0);
+ }
if (env->priv_ver >= PRIV_VERSION_1_12_0) {
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
@@ -297,7 +326,6 @@ target_ulong helper_sret(CPURISCVState *env)
target_ulong hstatus = env->hstatus;
prev_virt = get_field(hstatus, HSTATUS_SPV);
-
hstatus = set_field(hstatus, HSTATUS_SPV, 0);
env->hstatus = hstatus;
@@ -309,27 +337,65 @@ target_ulong helper_sret(CPURISCVState *env)
riscv_cpu_set_mode(env, prev_priv, prev_virt);
+ /*
+ * If forward cfi enabled for new priv, restore elp status
+ * and clear spelp in mstatus
+ */
+ if (cpu_get_fcfien(env)) {
+ env->elp = get_field(env->mstatus, MSTATUS_SPELP);
+ }
+ env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0);
+
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
+ riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
+ src_priv, src_virt);
+ }
+
return retpc;
}
-target_ulong helper_mret(CPURISCVState *env)
+static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
+ target_ulong prev_priv)
{
if (!(env->priv >= PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
- target_ulong retpc = env->mepc;
- if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+ if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
+ env->priv_ver,
+ env->misa_ext) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
- uint64_t mstatus = env->mstatus;
- target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
-
if (riscv_cpu_cfg(env)->pmp &&
!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
}
+}
+static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus,
+ target_ulong prev_priv,
+ target_ulong prev_virt)
+{
+ /* If returning to U, VS or VU, sstatus.sdt = 0 */
+ if (prev_priv == PRV_U || (prev_virt &&
+ (prev_priv == PRV_S || prev_priv == PRV_U))) {
+ mstatus = set_field(mstatus, MSTATUS_SDT, 0);
+ /* If returning to VU, vsstatus.sdt = 0 */
+ if (prev_virt && prev_priv == PRV_U) {
+ env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
+ }
+ }
+
+ return mstatus;
+}
+
+target_ulong helper_mret(CPURISCVState *env)
+{
+ target_ulong retpc = env->mepc;
+ uint64_t mstatus = env->mstatus;
+ target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
+
+ check_ret_from_m_mode(env, retpc, prev_priv);
target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) &&
(prev_priv != PRV_M);
@@ -339,6 +405,12 @@ target_ulong helper_mret(CPURISCVState *env)
mstatus = set_field(mstatus, MSTATUS_MPP,
riscv_has_ext(env, RVU) ? PRV_U : PRV_M);
mstatus = set_field(mstatus, MSTATUS_MPV, 0);
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mstatus = ssdbltrp_mxret(env, mstatus, prev_priv, prev_virt);
+ }
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ mstatus = set_field(mstatus, MSTATUS_MDT, 0);
+ }
if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) {
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
@@ -349,10 +421,106 @@ target_ulong helper_mret(CPURISCVState *env)
}
riscv_cpu_set_mode(env, prev_priv, prev_virt);
+ /*
+ * If forward cfi enabled for new priv, restore elp status
+ * and clear mpelp in mstatus
+ */
+ if (cpu_get_fcfien(env)) {
+ env->elp = get_field(env->mstatus, MSTATUS_MPELP);
+ }
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0);
+
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
+ riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
+ PRV_M, false);
+ }
+
+ return retpc;
+}
+
+target_ulong helper_mnret(CPURISCVState *env)
+{
+ target_ulong retpc = env->mnepc;
+ target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP);
+ target_ulong prev_virt;
+
+ check_ret_from_m_mode(env, retpc, prev_priv);
+
+ prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) &&
+ (prev_priv != PRV_M);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, true);
+
+ /*
+ * If MNRET changes the privilege mode to a mode
+ * less privileged than M, it also sets mstatus.MPRV to 0.
+ */
+ if (prev_priv < PRV_M) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPRV, false);
+ }
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ env->mstatus = ssdbltrp_mxret(env, env->mstatus, prev_priv, prev_virt);
+ }
+
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ if (prev_priv < PRV_M) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 0);
+ }
+ }
+
+ if (riscv_has_ext(env, RVH) && prev_virt) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_mode(env, prev_priv, prev_virt);
+
+ /*
+ * If forward cfi enabled for new priv, restore elp status
+ * and clear mnpelp in mnstatus
+ */
+ if (cpu_get_fcfien(env)) {
+ env->elp = get_field(env->mnstatus, MNSTATUS_MNPELP);
+ }
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, 0);
return retpc;
}
+void helper_ctr_add_entry(CPURISCVState *env, target_ulong src,
+ target_ulong dest, target_ulong type)
+{
+ riscv_ctr_add_entry(env, src, dest, (enum CTRType)type,
+ env->priv, env->virt_enabled);
+}
+
+void helper_ctr_clear(CPURISCVState *env)
+{
+ /*
+ * It's safe to call smstateen_acc_ok() for umode access regardless of the
+ * state of bit 54 (CTR bit in case of m/hstateen) of sstateen. If the bit
+ * is zero, smstateen_acc_ok() will return the correct exception code and
+ * if it's one, smstateen_acc_ok() will return RISCV_EXCP_NONE. In that
+ * scenario the U-mode check below will handle that case.
+ */
+ RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+
+ if (env->priv == PRV_U) {
+ /*
+ * One corner case is when sctrclr is executed from VU-mode and
+ * mstateen.CTR = 0, in which case we are supposed to raise
+ * RISCV_EXCP_ILLEGAL_INST. This case is already handled in
+ * smstateen_acc_ok().
+ */
+ uint32_t excep = env->virt_enabled ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT :
+ RISCV_EXCP_ILLEGAL_INST;
+ riscv_raise_exception(env, excep, GETPC());
+ }
+
+ riscv_ctr_clear(env);
+}
+
void helper_wfi(CPURISCVState *env)
{
CPUState *cs = env_cpu(env);
@@ -455,7 +623,7 @@ target_ulong helper_hyp_hlv_bu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- return cpu_ldb_mmu(env, addr, oi, ra);
+ return cpu_ldb_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
@@ -464,7 +632,7 @@ target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
- return cpu_ldw_mmu(env, addr, oi, ra);
+ return cpu_ldw_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
@@ -473,7 +641,7 @@ target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
- return cpu_ldl_mmu(env, addr, oi, ra);
+ return cpu_ldl_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
@@ -482,7 +650,7 @@ target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
- return cpu_ldq_mmu(env, addr, oi, ra);
+ return cpu_ldq_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -491,7 +659,7 @@ void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- cpu_stb_mmu(env, addr, val, oi, ra);
+ cpu_stb_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -500,7 +668,7 @@ void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
- cpu_stw_mmu(env, addr, val, oi, ra);
+ cpu_stw_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -509,7 +677,7 @@ void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
- cpu_stl_mmu(env, addr, val, oi, ra);
+ cpu_stl_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -518,7 +686,7 @@ void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
- cpu_stq_mmu(env, addr, val, oi, ra);
+ cpu_stq_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
/*
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 9eea397..5af295e 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -24,14 +24,24 @@
#include "qapi/error.h"
#include "cpu.h"
#include "trace.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
uint8_t val);
static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
/*
+ * Convert the PMP permissions to match the truth table in the Smepmp spec.
+ */
+static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg)
+{
+ return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) |
+ (cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2);
+}
+
+/*
* Accessor method to extract address matching type 'a field' from cfg reg
*/
static inline uint8_t pmp_get_a_field(uint8_t cfg)
@@ -45,21 +55,58 @@ static inline uint8_t pmp_get_a_field(uint8_t cfg)
*/
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
{
- /* mseccfg.RLB is set */
- if (MSECCFG_RLB_ISSET(env)) {
- return 0;
- }
-
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
return 1;
}
- /* Top PMP has no 'next' to check */
- if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
+ return 0;
+}
+
+/*
+ * Check whether a PMP is locked for writing or not.
+ * (i.e. has LOCK flag and mseccfg.RLB is unset)
+ */
+static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index)
+{
+ return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env);
+}
+
+/*
+ * Check whether `val` is an invalid Smepmp config value
+ */
+static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val)
+{
+ /* No check if mseccfg.MML is not set or if mseccfg.RLB is set */
+ if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) {
return 0;
}
- return 0;
+ /*
+ * Adding a rule with executable privileges that either is M-mode-only
+ * or a locked Shared-Region is not possible
+ */
+ switch (pmp_get_smepmp_operation(val)) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 12:
+ case 14:
+ case 15:
+ return 0;
+ case 9:
+ case 10:
+ case 11:
+ case 13:
+ return 1;
+ default:
+ g_assert_not_reached();
+ }
}
/*
@@ -90,45 +137,18 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
- bool locked = true;
-
- if (riscv_cpu_cfg(env)->ext_smepmp) {
- /* mseccfg.RLB is set */
- if (MSECCFG_RLB_ISSET(env)) {
- locked = false;
- }
-
- /* mseccfg.MML is not set */
- if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
- locked = false;
- }
-
- /* mseccfg.MML is set */
- if (MSECCFG_MML_ISSET(env)) {
- /* not adding execute bit */
- if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
- locked = false;
- }
- /* shared region and not adding X bit */
- if ((val & PMP_LOCK) != PMP_LOCK &&
- (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
- locked = false;
- }
- }
- } else {
- if (!pmp_is_locked(env, pmp_index)) {
- locked = false;
- }
+ if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
+ /* no change */
+ return false;
}
- if (locked) {
- qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
- } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
- /* If !mseccfg.MML then ignore writes with encoding RW=01 */
- if ((val & PMP_WRITE) && !(val & PMP_READ) &&
- !MSECCFG_MML_ISSET(env)) {
- return false;
- }
+ if (pmp_is_readonly(env, pmp_index)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpcfg write - read only\n");
+ } else if (pmp_is_invalid_smepmp_cfg(env, val)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpcfg write - invalid\n");
+ } else {
env->pmp_state.pmp[pmp_index].cfg_reg = val;
pmp_update_rule_addr(env, pmp_index);
return true;
@@ -326,7 +346,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
*/
pmp_size = -(addr | TARGET_PAGE_MASK);
} else {
- pmp_size = sizeof(target_ulong);
+ pmp_size = 2 << riscv_cpu_mxl(env);
}
} else {
pmp_size = size;
@@ -352,16 +372,6 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
const uint8_t a_field =
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
- /*
- * Convert the PMP permissions to match the truth table in the
- * Smepmp spec.
- */
- const uint8_t smepmp_operation =
- ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
- ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
- (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
- ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
-
if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
/*
* If the PMP entry is not off and the address is in range,
@@ -380,6 +390,9 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
/*
* If mseccfg.MML Bit set, do the enhanced pmp priv check
*/
+ const uint8_t smepmp_operation =
+ pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg);
+
if (mode == PRV_M) {
switch (smepmp_operation) {
case 0:
@@ -516,6 +529,11 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
bool is_next_cfg_tor = false;
if (addr_index < MAX_RISCV_PMPS) {
+ if (env->pmp_state.pmp[addr_index].addr_reg == val) {
+ /* no change */
+ return;
+ }
+
/*
* In TOR mode, need to check the lock bit of the next pmp
* (if there is a next).
@@ -524,25 +542,23 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
- if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) {
+ if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) {
qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpaddr write - pmpcfg + 1 locked\n");
+ "ignoring pmpaddr write - pmpcfg+1 read only\n");
return;
}
}
- if (!pmp_is_locked(env, addr_index)) {
- if (env->pmp_state.pmp[addr_index].addr_reg != val) {
- env->pmp_state.pmp[addr_index].addr_reg = val;
- pmp_update_rule_addr(env, addr_index);
- if (is_next_cfg_tor) {
- pmp_update_rule_addr(env, addr_index + 1);
- }
- tlb_flush(env_cpu(env));
+ if (!pmp_is_readonly(env, addr_index)) {
+ env->pmp_state.pmp[addr_index].addr_reg = val;
+ pmp_update_rule_addr(env, addr_index);
+ if (is_next_cfg_tor) {
+ pmp_update_rule_addr(env, addr_index + 1);
}
+ tlb_flush(env_cpu(env));
} else {
qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpaddr write - locked\n");
+ "ignoring pmpaddr write - read only\n");
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -575,6 +591,13 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
{
int i;
+ uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (riscv_cpu_cfg(env)->ext_smmpm &&
+ riscv_cpu_mxl(env) == MXL_RV64 &&
+ get_field(val, MSECCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= MSECCFG_PMM;
+ }
trace_mseccfg_csr_write(env->mhartid, val);
@@ -590,12 +613,18 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
if (riscv_cpu_cfg(env)->ext_smepmp) {
/* Sticky bits */
- val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
- if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
+ val |= (env->mseccfg & mask);
+ if ((val ^ env->mseccfg) & mask) {
tlb_flush(env_cpu(env));
}
} else {
- val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB);
+ mask |= MSECCFG_RLB;
+ val &= ~(mask);
+ }
+
+ /* M-mode forward cfi to be enabled if cfi extension is implemented */
+ if (env_archcpu(env)->cfg.ext_zicfilp) {
+ val |= (val & MSECCFG_MLPE);
}
env->mseccfg = val;
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
index f5c10ce..271cf24 100644
--- a/target/riscv/pmp.h
+++ b/target/riscv/pmp.h
@@ -44,7 +44,9 @@ typedef enum {
MSECCFG_MMWP = 1 << 1,
MSECCFG_RLB = 1 << 2,
MSECCFG_USEED = 1 << 8,
- MSECCFG_SSEED = 1 << 9
+ MSECCFG_SSEED = 1 << 9,
+ MSECCFG_MLPE = 1 << 10,
+ MSECCFG_PMM = 3ULL << 32,
} mseccfg_field_t;
typedef struct {
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index 3cc0b36..a68809e 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -22,8 +22,8 @@
#include "qemu/timer.h"
#include "cpu.h"
#include "pmu.h"
-#include "sysemu/cpu-timers.h"
-#include "sysemu/device_tree.h"
+#include "exec/icount.h"
+#include "system/device_tree.h"
#define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */
@@ -204,6 +204,7 @@ static void riscv_pmu_icount_update_priv(CPURISCVState *env,
}
if (env->virt_enabled) {
+ g_assert(env->priv <= PRV_S);
counter_arr = env->pmu_fixed_ctrs[1].counter_virt;
snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev;
} else {
@@ -212,6 +213,7 @@ static void riscv_pmu_icount_update_priv(CPURISCVState *env,
}
if (new_virt) {
+ g_assert(newpriv <= PRV_S);
snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev;
} else {
snapshot_new = env->pmu_fixed_ctrs[1].counter_prev;
@@ -242,6 +244,7 @@ static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
}
if (env->virt_enabled) {
+ g_assert(env->priv <= PRV_S);
counter_arr = env->pmu_fixed_ctrs[0].counter_virt;
snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev;
} else {
@@ -250,6 +253,7 @@ static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
}
if (new_virt) {
+ g_assert(newpriv <= PRV_S);
snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev;
} else {
snapshot_new = env->pmu_fixed_ctrs[0].counter_prev;
@@ -386,7 +390,7 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
* Expected mhpmevent value is zero for reset case. Remove the current
* mapping.
*/
- if (!value) {
+ if (!(value & MHPMEVENT_IDX_MASK)) {
g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
pmu_remove_event_map,
GUINT_TO_POINTER(ctr_idx));
diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c
index d363dc3..8ba8aa0 100644
--- a/target/riscv/riscv-qmp-cmds.c
+++ b/target/riscv/riscv-qmp-cmds.c
@@ -25,14 +25,14 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "qapi/qmp/qbool.h"
-#include "qapi/qmp/qdict.h"
+#include "qapi/qapi-commands-machine.h"
+#include "qobject/qbool.h"
+#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/visitor.h"
#include "qom/qom-qobject.h"
-#include "sysemu/kvm.h"
-#include "sysemu/tcg.h"
+#include "system/kvm.h"
+#include "system/tcg.h"
#include "cpu-qom.h"
#include "cpu.h"
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
index b8814ab..55fd9e5 100644
--- a/target/riscv/tcg/tcg-cpu.c
+++ b/target/riscv/tcg/tcg-cpu.c
@@ -18,9 +18,10 @@
*/
#include "qemu/osdep.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
#include "tcg-cpu.h"
#include "cpu.h"
+#include "exec/target_page.h"
#include "internals.h"
#include "pmu.h"
#include "time_helper.h"
@@ -29,11 +30,13 @@
#include "qemu/accel.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
-#include "hw/core/accel-cpu.h"
-#include "hw/core/tcg-cpu-ops.h"
+#include "accel/accel-cpu-target.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
+#include "system/tcg.h"
+#include "exec/icount.h"
#endif
/* Hash that stores user set extensions */
@@ -90,6 +93,108 @@ static const char *cpu_priv_ver_to_str(int priv_ver)
return priv_spec_str;
}
+static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ return riscv_env_mmu_index(cpu_env(cs), ifetch);
+}
+
+static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs)
+{
+ CPURISCVState *env = cpu_env(cs);
+ RISCVCPU *cpu = env_archcpu(env);
+ RISCVExtStatus fs, vs;
+ uint32_t flags = 0;
+ bool pm_signext = riscv_cpu_virt_mem_enabled(env);
+
+ if (cpu->cfg.ext_zve32x) {
+ /*
+ * If env->vl equals to VLMAX, we can use generic vector operation
+ * expanders (GVEC) to accerlate the vector operations.
+ * However, as LMUL could be a fractional number. The maximum
+ * vector size can be operated might be less than 8 bytes,
+ * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
+ * only when maxsz >= 8 bytes.
+ */
+
+ /* lmul encoded as in DisasContext::lmul */
+ int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
+ uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+ uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
+ uint32_t maxsz = vlmax << vsew;
+ bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
+ (maxsz >= 8);
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
+ flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
+ flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
+ FIELD_EX64(env->vtype, VTYPE, VLMUL));
+ flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
+ flags = FIELD_DP32(flags, TB_FLAGS, VTA,
+ FIELD_EX64(env->vtype, VTYPE, VTA));
+ flags = FIELD_DP32(flags, TB_FLAGS, VMA,
+ FIELD_EX64(env->vtype, VTYPE, VMA));
+ flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
+ } else {
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
+ }
+
+ if (cpu_get_fcfien(env)) {
+ /*
+ * For Forward CFI, only the expectation of a lpad at
+ * the start of the block is tracked via env->elp. env->elp
+ * is turned on during jalr translation.
+ */
+ flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
+ flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
+ }
+
+ if (cpu_get_bcfien(env)) {
+ flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ fs = EXT_STATUS_DIRTY;
+ vs = EXT_STATUS_DIRTY;
+#else
+ flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
+
+ flags |= riscv_env_mmu_index(env, 0);
+ fs = get_field(env->mstatus, MSTATUS_FS);
+ vs = get_field(env->mstatus, MSTATUS_VS);
+
+ if (env->virt_enabled) {
+ flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
+ /*
+ * Merge DISABLED and !DIRTY states using MIN.
+ * We will set both fields when dirtying.
+ */
+ fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
+ vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
+ }
+
+ /* With Zfinx, floating point is enabled/disabled by Smstateen. */
+ if (!riscv_has_ext(env, RVF)) {
+ fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
+ ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
+ }
+
+ if (cpu->cfg.debug && !icount_enabled()) {
+ flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
+ }
+#endif
+
+ flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
+ flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
+ flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
+ flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
+
+ return (TCGTBCPUState){
+ .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc,
+ .flags = flags
+ };
+}
+
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -129,17 +234,51 @@ static void riscv_restore_state_to_opc(CPUState *cs,
env->pc = pc;
}
env->bins = data[1];
+ env->excp_uw2 = data[2];
}
-static const TCGCPUOps riscv_tcg_ops = {
+#ifndef CONFIG_USER_ONLY
+static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ CPURISCVState *env = cpu_env(cs);
+ uint32_t pm_len;
+ bool pm_signext;
+
+ if (cpu_address_xl(env) == MXL_RV32) {
+ return (uint32_t)result;
+ }
+
+ pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env));
+ if (pm_len == 0) {
+ return result;
+ }
+
+ pm_signext = riscv_cpu_virt_mem_enabled(env);
+ if (pm_signext) {
+ return sextract64(result, 0, 64 - pm_len);
+ }
+ return extract64(result, 0, 64 - pm_len);
+}
+#endif
+
+const TCGCPUOps riscv_tcg_ops = {
+ .mttcg_supported = true,
+ .guest_default_memory_order = 0,
+
.initialize = riscv_translate_init,
+ .translate_code = riscv_translate_code,
+ .get_tb_cpu_state = riscv_get_tb_cpu_state,
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
.restore_state_to_opc = riscv_restore_state_to_opc,
+ .mmu_index = riscv_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = riscv_cpu_tlb_fill,
+ .pointer_wrap = riscv_pointer_wrap,
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.cpu_exec_halt = riscv_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = riscv_cpu_do_interrupt,
.do_transaction_failed = riscv_cpu_do_transaction_failed,
.do_unaligned_access = riscv_cpu_do_unaligned_access,
@@ -203,10 +342,20 @@ static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
* All other named features are already enabled
* in riscv_tcg_cpu_instance_init().
*/
- if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) {
+ switch (feat_offset) {
+ case CPU_CFG_OFFSET(ext_zic64b):
cpu->cfg.cbom_blocksize = 64;
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
+ break;
+ case CPU_CFG_OFFSET(ext_sha):
+ if (!cpu_misa_ext_is_user_set(RVH)) {
+ riscv_cpu_write_misa_bit(cpu, RVH, true);
+ }
+ /* fallthrough */
+ case CPU_CFG_OFFSET(ext_ssstateen):
+ cpu->cfg.ext_smstateen = true;
+ break;
}
}
@@ -303,6 +452,15 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
}
isa_ext_update_enabled(cpu, edata->ext_enable_offset, false);
+
+ /*
+ * Do not show user warnings for named features that users
+ * can't enable/disable in the command line. See commit
+ * 68c9e54bea for more info.
+ */
+ if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) {
+ continue;
+ }
#ifndef CONFIG_USER_ONLY
warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
" because privilege spec version does not match",
@@ -330,11 +488,16 @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu)
cpu->cfg.has_priv_1_13 = true;
}
- /* zic64b is 1.12 or later */
cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 &&
cpu->cfg.cbop_blocksize == 64 &&
- cpu->cfg.cboz_blocksize == 64 &&
- cpu->cfg.has_priv_1_12;
+ cpu->cfg.cboz_blocksize == 64;
+
+ cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen;
+
+ cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
+ cpu->cfg.ext_ssstateen;
+
+ cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11;
}
static void riscv_cpu_validate_g(RISCVCPU *cpu)
@@ -554,7 +717,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
- if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
+ if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
error_setg(errp, "Zcf extension is only relevant to RV32");
return;
}
@@ -618,11 +781,55 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
cpu->cfg.ext_zihpm = false;
}
+ if (cpu->cfg.ext_zicfiss) {
+ if (!cpu->cfg.ext_zicsr) {
+ error_setg(errp, "zicfiss extension requires zicsr extension");
+ return;
+ }
+ if (!riscv_has_ext(env, RVA)) {
+ error_setg(errp, "zicfiss extension requires A extension");
+ return;
+ }
+ if (!riscv_has_ext(env, RVS)) {
+ error_setg(errp, "zicfiss extension requires S");
+ return;
+ }
+ if (!cpu->cfg.ext_zimop) {
+ error_setg(errp, "zicfiss extension requires zimop extension");
+ return;
+ }
+ if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) {
+ error_setg(errp, "zicfiss with zca requires zcmop extension");
+ return;
+ }
+ }
+
if (!cpu->cfg.ext_zihpm) {
cpu->cfg.pmu_mask = 0;
cpu->pmu_avail_ctrs = 0;
}
+ if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) {
+ error_setg(errp, "zicfilp extension requires zicsr extension");
+ return;
+ }
+
+ if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) {
+ error_setg(errp, "svukte is not supported for RV32");
+ return;
+ }
+
+ if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) &&
+ (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) {
+ if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) ||
+ cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) {
+ error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind");
+ return;
+ }
+ cpu->cfg.ext_smctr = false;
+ cpu->cfg.ext_ssctr = false;
+ }
+
/*
* Disable isa extensions based on priv spec after we
* validated and set everything we need.
@@ -635,8 +842,9 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
RISCVCPUProfile *profile,
bool send_warn)
{
- int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
+ int satp_max = cpu->cfg.max_satp_mode;
+ assert(satp_max >= 0);
if (profile->satp_mode > satp_max) {
if (send_warn) {
bool is_32bit = riscv_cpu_is_32bit(cpu);
@@ -655,13 +863,29 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
}
#endif
+static void riscv_cpu_check_parent_profile(RISCVCPU *cpu,
+ RISCVCPUProfile *profile,
+ RISCVCPUProfile *parent)
+{
+ const char *parent_name;
+ bool parent_enabled;
+
+ if (!profile->enabled || !parent) {
+ return;
+ }
+
+ parent_name = parent->name;
+ parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL);
+ profile->enabled = parent_enabled;
+}
+
static void riscv_cpu_validate_profile(RISCVCPU *cpu,
RISCVCPUProfile *profile)
{
CPURISCVState *env = &cpu->env;
const char *warn_msg = "Profile %s mandates disabled extension %s";
bool send_warn = profile->user_set && profile->enabled;
- bool parent_enabled, profile_impl = true;
+ bool profile_impl = true;
int i;
#ifndef CONFIG_USER_ONLY
@@ -672,7 +896,7 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
#endif
if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
- profile->priv_spec != env->priv_ver) {
+ profile->priv_spec > env->priv_ver) {
profile_impl = false;
if (send_warn) {
@@ -715,12 +939,8 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
profile->enabled = profile_impl;
- if (profile->parent != NULL) {
- parent_enabled = object_property_get_bool(OBJECT(cpu),
- profile->parent->name,
- NULL);
- profile->enabled = profile->enabled && parent_enabled;
- }
+ riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent);
+ riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent);
}
static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
@@ -778,11 +998,18 @@ static void cpu_enable_implied_rule(RISCVCPU *cpu,
if (!enabled) {
/* Enable the implied MISAs. */
if (rule->implied_misa_exts) {
- riscv_cpu_set_misa_ext(env,
- env->misa_ext | rule->implied_misa_exts);
-
for (i = 0; misa_bits[i] != 0; i++) {
if (rule->implied_misa_exts & misa_bits[i]) {
+ /*
+ * If the user disabled the misa_bit do not re-enable it
+ * and do not apply any implied rules related to it.
+ */
+ if (cpu_misa_ext_is_user_set(misa_bits[i]) &&
+ !(env->misa_ext & misa_bits[i])) {
+ continue;
+ }
+
+ riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]);
ir = g_hash_table_lookup(misa_ext_implied_rules,
GUINT_TO_POINTER(misa_bits[i]));
@@ -825,7 +1052,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
- if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
+ if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
}
}
@@ -834,7 +1061,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
- if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
+ if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
}
@@ -898,6 +1125,20 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
error_propagate(errp, local_err);
return;
}
+#ifndef CONFIG_USER_ONLY
+ if (cpu->cfg.pmu_mask) {
+ riscv_pmu_init(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (cpu->cfg.ext_sscofpmf) {
+ cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ riscv_pmu_timer_cb, cpu);
+ }
+ }
+#endif
}
void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
@@ -944,8 +1185,17 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
}
#ifndef CONFIG_USER_ONLY
+ RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
+
+ if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) {
+ /* Missing 128-bit aligned atomics */
+ error_setg(errp,
+ "128-bit RISC-V currently does not work with Multi "
+ "Threaded TCG. Please use: -accel tcg,thread=single");
+ return false;
+ }
+
CPURISCVState *env = &cpu->env;
- Error *local_err = NULL;
tcg_cflags_set(CPU(cs), CF_PCREL);
@@ -953,19 +1203,6 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
riscv_timer_init(cpu);
}
- if (cpu->cfg.pmu_mask) {
- riscv_pmu_init(cpu, &local_err);
- if (local_err != NULL) {
- error_propagate(errp, local_err);
- return false;
- }
-
- if (cpu->cfg.ext_sscofpmf) {
- cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
- riscv_pmu_timer_cb, cpu);
- }
- }
-
/* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
if (riscv_has_ext(env, RVH)) {
env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
@@ -1050,7 +1287,6 @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
MISA_CFG(RVS, true),
MISA_CFG(RVU, true),
MISA_CFG(RVH, true),
- MISA_CFG(RVJ, false),
MISA_CFG(RVV, false),
MISA_CFG(RVG, false),
MISA_CFG(RVB, false),
@@ -1117,8 +1353,13 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
profile->user_set = true;
profile->enabled = value;
- if (profile->parent != NULL) {
- object_property_set_bool(obj, profile->parent->name,
+ if (profile->u_parent != NULL) {
+ object_property_set_bool(obj, profile->u_parent->name,
+ profile->enabled, NULL);
+ }
+
+ if (profile->s_parent != NULL) {
+ object_property_set_bool(obj, profile->s_parent->name,
profile->enabled, NULL);
}
@@ -1337,8 +1578,8 @@ static void riscv_init_max_cpu_extensions(Object *obj)
CPURISCVState *env = &cpu->env;
const RISCVCPUMultiExtConfig *prop;
- /* Enable RVG, RVJ and RVV that are disabled by default */
- riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV);
+ /* Enable RVG and RVV that are disabled by default */
+ riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV);
for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
isa_ext_update_enabled(cpu, prop->offset, true);
@@ -1366,6 +1607,23 @@ static void riscv_init_max_cpu_extensions(Object *obj)
if (env->misa_mxl != MXL_RV32) {
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
}
+
+ /*
+ * TODO: ext_smrnmi requires OpenSBI changes that our current
+ * image does not have. Disable it for now.
+ */
+ if (cpu->cfg.ext_smrnmi) {
+ isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
+ }
+
+ /*
+ * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup
+ * to avoid generating a double trap. OpenSBI does not currently support it,
+ * disable it for now.
+ */
+ if (cpu->cfg.ext_smdbltrp) {
+ isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
+ }
}
static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
@@ -1396,24 +1654,10 @@ static void riscv_tcg_cpu_instance_init(CPUState *cs)
}
}
-static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
-{
- /*
- * All cpus use the same set of operations.
- */
- cc->tcg_ops = &riscv_tcg_ops;
-}
-
-static void riscv_tcg_cpu_class_init(CPUClass *cc)
-{
- cc->init_accel_cpu = riscv_tcg_cpu_init_ops;
-}
-
-static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
+static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
- acc->cpu_class_init = riscv_tcg_cpu_class_init;
acc->cpu_instance_init = riscv_tcg_cpu_instance_init;
acc->cpu_target_realize = riscv_tcg_cpu_realize;
}
diff --git a/target/riscv/tcg/tcg-cpu.h b/target/riscv/tcg/tcg-cpu.h
index ce94253..a23716a 100644
--- a/target/riscv/tcg/tcg-cpu.h
+++ b/target/riscv/tcg/tcg-cpu.h
@@ -26,6 +26,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp);
void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
bool riscv_cpu_tcg_compatible(RISCVCPU *cpu);
+extern const TCGCPUOps riscv_tcg_ops;
+
struct DisasContext;
struct RISCVCPUConfig;
typedef struct RISCVDecoder {
diff --git a/target/riscv/th_csr.c b/target/riscv/th_csr.c
index 6c970d4..49eb7bb 100644
--- a/target/riscv/th_csr.c
+++ b/target/riscv/th_csr.c
@@ -27,12 +27,6 @@
#define TH_SXSTATUS_MAEE BIT(21)
#define TH_SXSTATUS_THEADISAEE BIT(22)
-typedef struct {
- int csrno;
- int (*insertion_test)(RISCVCPU *cpu);
- riscv_csr_operations csr_ops;
-} riscv_csr;
-
static RISCVException smode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVS)) {
@@ -42,13 +36,9 @@ static RISCVException smode(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
-static int test_thead_mvendorid(RISCVCPU *cpu)
+static bool test_thead_mvendorid(RISCVCPU *cpu)
{
- if (cpu->cfg.mvendorid != THEAD_VENDOR_ID) {
- return -1;
- }
-
- return 0;
+ return cpu->cfg.mvendorid == THEAD_VENDOR_ID;
}
static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno,
@@ -59,21 +49,11 @@ static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-static riscv_csr th_csr_list[] = {
+const RISCVCSR th_csr_list[] = {
{
.csrno = CSR_TH_SXSTATUS,
.insertion_test = test_thead_mvendorid,
.csr_ops = { "th.sxstatus", smode, read_th_sxstatus }
- }
+ },
+ { }
};
-
-void th_register_custom_csrs(RISCVCPU *cpu)
-{
- for (size_t i = 0; i < ARRAY_SIZE(th_csr_list); i++) {
- int csrno = th_csr_list[i].csrno;
- riscv_csr_operations *csr_ops = &th_csr_list[i].csr_ops;
- if (!th_csr_list[i].insertion_test(cpu)) {
- riscv_set_csr_ops(csrno, csr_ops);
- }
- }
-}
diff --git a/target/riscv/time_helper.c b/target/riscv/time_helper.c
index 8d245be..bc0d9a0 100644
--- a/target/riscv/time_helper.c
+++ b/target/riscv/time_helper.c
@@ -92,6 +92,7 @@ void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer,
* equals UINT64_MAX.
*/
if (timecmp == UINT64_MAX) {
+ timer_del(timer);
return;
}
diff --git a/target/riscv/trace-events b/target/riscv/trace-events
index 49ec4d3..93837f8 100644
--- a/target/riscv/trace-events
+++ b/target/riscv/trace-events
@@ -9,3 +9,6 @@ pmpaddr_csr_write(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %"
mseccfg_csr_read(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": read mseccfg, val: 0x%" PRIx64
mseccfg_csr_write(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": write mseccfg, val: 0x%" PRIx64
+
+# op_helper.c
+riscv_exception(uint32_t exception, const char *desc, uint64_t epc) "%u (%s) on epc 0x%"PRIx64""
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index acba90f..d7a6de0 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -20,11 +20,11 @@
#include "qemu/log.h"
#include "cpu.h"
#include "tcg/tcg-op.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
-
+#include "exec/target_page.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
#include "exec/log.h"
#include "semihosting/semihost.h"
@@ -41,9 +41,6 @@ static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
static TCGv load_res;
static TCGv load_val;
-/* globals for PM CSRs */
-static TCGv pm_mask;
-static TCGv pm_base;
/*
* If an operation is being performed on less than TARGET_LONG_BITS,
@@ -105,9 +102,9 @@ typedef struct DisasContext {
bool vl_eq_vlmax;
CPUState *cs;
TCGv zero;
- /* PointerMasking extension */
- bool pm_mask_enabled;
- bool pm_base_enabled;
+ /* actual address width */
+ uint8_t addr_xl;
+ bool addr_signed;
/* Ztso */
bool ztso;
/* Use icount trigger for native debug */
@@ -116,6 +113,11 @@ typedef struct DisasContext {
bool frm_valid;
bool insn_start_updated;
const GPtrArray *decoders;
+ /* zicfilp extension. fcfi_enabled, lp expected or not */
+ bool fcfi_enabled;
+ bool fcfi_lp_expected;
+ /* zicfiss extension, if shadow stack was enabled during TB gen */
+ bool bcfi_enabled;
} DisasContext;
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
@@ -139,6 +141,8 @@ static inline bool has_ext(DisasContext *ctx, uint32_t ext)
#define get_address_xl(ctx) ((ctx)->address_xl)
#endif
+#define mxl_memop(ctx) ((get_xl(ctx) + 1) | MO_TE)
+
/* The word size for this machine mode. */
static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
{
@@ -204,11 +208,12 @@ static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
}
-static void decode_save_opc(DisasContext *ctx)
+static void decode_save_opc(DisasContext *ctx, target_ulong excp_uw2)
{
assert(!ctx->insn_start_updated);
ctx->insn_start_updated = true;
tcg_set_insn_start_param(ctx->base.insn_start, 1, ctx->opcode);
+ tcg_set_insn_start_param(ctx->base.insn_start, 2, excp_uw2);
}
static void gen_pc_plus_diff(TCGv target, DisasContext *ctx,
@@ -236,7 +241,7 @@ static void gen_update_pc(DisasContext *ctx, target_long diff)
ctx->pc_save = ctx->base.pc_next + diff;
}
-static void generate_exception(DisasContext *ctx, int excp)
+static void generate_exception(DisasContext *ctx, RISCVException excp)
{
gen_update_pc(ctx, 0);
gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
@@ -555,12 +560,54 @@ static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
}
}
+#ifndef CONFIG_USER_ONLY
+/*
+ * Direct calls
+ * - jal x1;
+ * - jal x5;
+ * - c.jal.
+ * - cm.jalt.
+ *
+ * Direct jumps
+ * - jal x0;
+ * - c.j;
+ * - cm.jt.
+ *
+ * Other direct jumps
+ * - jal rd where rd != x1 and rd != x5 and rd != x0;
+ */
+static void gen_ctr_jal(DisasContext *ctx, int rd, target_ulong imm)
+{
+ TCGv dest = tcg_temp_new();
+ TCGv src = tcg_temp_new();
+ TCGv type;
+
+ /*
+ * If rd is x1 or x5 link registers, treat this as direct call otherwise
+ * its a direct jump.
+ */
+ if (rd == 1 || rd == 5) {
+ type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
+ } else if (rd == 0) {
+ type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
+ } else {
+ type = tcg_constant_tl(CTRDATA_TYPE_OTHER_DIRECT_JUMP);
+ }
+
+ gen_pc_plus_diff(dest, ctx, imm);
+ gen_pc_plus_diff(src, ctx, 0);
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
+}
+#endif
+
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
{
TCGv succ_pc = dest_gpr(ctx, rd);
/* check misaligned: */
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
+ ctx->priv_ver,
+ ctx->misa_ext)) {
if ((imm & 0x3) != 0) {
TCGv target_pc = tcg_temp_new();
gen_pc_plus_diff(target_pc, ctx, imm);
@@ -569,6 +616,12 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
}
}
+#ifndef CONFIG_USER_ONLY
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
+ gen_ctr_jal(ctx, rd, imm);
+ }
+#endif
+
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
gen_set_gpr(ctx, rd, succ_pc);
@@ -583,13 +636,10 @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm)
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
tcg_gen_addi_tl(addr, src1, imm);
- if (ctx->pm_mask_enabled) {
- tcg_gen_andc_tl(addr, addr, pm_mask);
- } else if (get_address_xl(ctx) == MXL_RV32) {
- tcg_gen_ext32u_tl(addr, addr);
- }
- if (ctx->pm_base_enabled) {
- tcg_gen_or_tl(addr, addr, pm_base);
+ if (ctx->addr_signed) {
+ tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
+ } else {
+ tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
}
return addr;
@@ -602,14 +652,12 @@ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs)
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
tcg_gen_add_tl(addr, src1, offs);
- if (ctx->pm_mask_enabled) {
- tcg_gen_andc_tl(addr, addr, pm_mask);
- } else if (get_xl(ctx) == MXL_RV32) {
- tcg_gen_ext32u_tl(addr, addr);
- }
- if (ctx->pm_base_enabled) {
- tcg_gen_or_tl(addr, addr, pm_base);
+ if (ctx->addr_signed) {
+ tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
+ } else {
+ tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
}
+
return addr;
}
@@ -694,7 +742,7 @@ static void gen_set_rm(DisasContext *ctx, int rm)
}
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_set_rounding_mode(tcg_env, tcg_constant_i32(rm));
}
@@ -707,7 +755,7 @@ static void gen_set_rm_chkfrm(DisasContext *ctx, int rm)
ctx->frm_valid = true;
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
- decode_save_opc(ctx);
+ decode_save_opc(ctx, 0);
gen_helper_set_rounding_mode_chkfrm(tcg_env, tcg_constant_i32(rm));
}
@@ -1091,7 +1139,7 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
mop |= MO_ALIGN;
}
- decode_save_opc(ctx);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
src1 = get_address(ctx, a->rs1, 0);
func(dest, src1, src2, ctx->mem_idx, mop);
@@ -1105,7 +1153,7 @@ static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop)
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
- decode_save_opc(ctx);
+ decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop);
gen_set_gpr(ctx, a->rd, dest);
@@ -1121,6 +1169,8 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
return translator_ldl(env, &ctx->base, pc);
}
+#define SS_MMU_INDEX(ctx) (ctx->mem_idx | MMU_IDX_SS_WRITE)
+
/* Include insn module translation function */
#include "insn_trans/trans_rvi.c.inc"
#include "insn_trans/trans_rvm.c.inc"
@@ -1151,6 +1201,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
#include "decode-insn16.c.inc"
#include "insn_trans/trans_rvzce.c.inc"
#include "insn_trans/trans_rvzcmop.c.inc"
+#include "insn_trans/trans_rvzicfiss.c.inc"
/* Include decoders for factored-out extensions */
#include "decode-XVentanaCondOps.c.inc"
@@ -1158,11 +1209,6 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
/* The specification allows for longer insns, but not supported by qemu. */
#define MAX_INSN_LEN 4
-static inline int insn_len(uint16_t first_word)
-{
- return (first_word & 3) == 3 ? 4 : 2;
-}
-
const RISCVDecoder decoder_table[] = {
{ always_true_p, decode_insn32 },
{ has_xthead_p, decode_xthead},
@@ -1230,14 +1276,23 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s;
ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO);
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
- ctx->misa_mxl_max = mcc->misa_mxl_max;
+ ctx->misa_mxl_max = mcc->def->misa_mxl_max;
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
ctx->cs = cs;
- ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
- ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
+ if (get_xl(ctx) == MXL_RV32) {
+ ctx->addr_xl = 32;
+ ctx->addr_signed = false;
+ } else {
+ int pm_pmm = FIELD_EX32(tb_flags, TB_FLAGS, PM_PMM);
+ ctx->addr_xl = 64 - riscv_pm_get_pmlen(pm_pmm);
+ ctx->addr_signed = FIELD_EX32(tb_flags, TB_FLAGS, PM_SIGNEXTEND);
+ }
ctx->ztso = cpu->cfg.ext_ztso;
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
+ ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED);
+ ctx->fcfi_lp_expected = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_LP_EXPECTED);
+ ctx->fcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_ENABLED);
ctx->zero = tcg_constant_tl(0);
ctx->virt_inst_excp = false;
ctx->decoders = cpu->decoders;
@@ -1256,7 +1311,7 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
pc_next &= ~TARGET_PAGE_MASK;
}
- tcg_gen_insn_start(pc_next, 0);
+ tcg_gen_insn_start(pc_next, 0, 0);
ctx->insn_start_updated = false;
}
@@ -1270,9 +1325,27 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
decode_opc(env, ctx, opcode16);
ctx->base.pc_next += ctx->cur_insn_len;
+ /*
+ * If 'fcfi_lp_expected' is still true after processing the instruction,
+ * then we did not see an 'lpad' instruction, and must raise an exception.
+ * Insert code to raise the exception at the start of the insn; any other
+ * code the insn may have emitted will be deleted as dead code following
+ * the noreturn exception
+ */
+ if (ctx->fcfi_lp_expected) {
+ /* Emit after insn_start, i.e. before the op following insn_start. */
+ tcg_ctx->emit_before_op = QTAILQ_NEXT(ctx->base.insn_start, link);
+ tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
+ tcg_env, offsetof(CPURISCVState, sw_check_code));
+ gen_helper_raise_exception(tcg_env,
+ tcg_constant_i32(RISCV_EXCP_SW_CHECK));
+ tcg_ctx->emit_before_op = NULL;
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+
/* Only the first insn within a TB is allowed to cross a page boundary. */
if (ctx->base.is_jmp == DISAS_NEXT) {
- if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) {
+ if (ctx->itrigger || !translator_is_same_page(&ctx->base, ctx->base.pc_next)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
} else {
unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
@@ -1282,7 +1355,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
translator_lduw(env, &ctx->base, ctx->base.pc_next);
int len = insn_len(next_insn);
- if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) {
+ if (!translator_is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
}
@@ -1313,8 +1386,8 @@ static const TranslatorOps riscv_tr_ops = {
.tb_stop = riscv_tr_tb_stop,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
- vaddr pc, void *host_pc)
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
+ int *max_insns, vaddr pc, void *host_pc)
{
DisasContext ctx;
@@ -1353,9 +1426,4 @@ void riscv_translate_init(void)
"load_res");
load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val),
"load_val");
- /* Assign PM CSRs to tcg globals */
- pm_mask = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmmask),
- "pmmask");
- pm_base = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmbase),
- "pmbase");
}
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index f7423df..9a0d9b4 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -26,7 +26,6 @@
#include "crypto/aes-round.h"
#include "crypto/sm4.h"
#include "exec/memop.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "internals.h"
#include "vector_internals.h"
@@ -222,7 +221,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
uint32_t vta = vext_vta(desc); \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
AESState round_key; \
@@ -248,7 +247,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
uint32_t vta = vext_vta(desc); \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
AESState round_key; \
@@ -309,7 +308,7 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
uimm &= 0b1111;
if (uimm > 10 || uimm == 0) {
@@ -357,7 +356,7 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
uimm &= 0b1111;
if (uimm > 14 || uimm < 2) {
@@ -465,7 +464,7 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
if (sew == MO_32) {
@@ -582,7 +581,7 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
@@ -602,7 +601,7 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
@@ -622,7 +621,7 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
@@ -642,7 +641,7 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
@@ -676,7 +675,7 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
uint32_t *vs1 = vs1_vptr;
uint32_t *vs2 = vs2_vptr;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
uint32_t w[24];
@@ -777,7 +776,7 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
uint32_t *vs2 = vs2_vptr;
uint32_t v1[8], v2[8], v3[8];
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
for (int k = 0; k < 8; k++) {
@@ -802,7 +801,7 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
uint32_t vta = vext_vta(desc);
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
@@ -841,7 +840,7 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
uint32_t vta = vext_vta(desc);
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
@@ -879,7 +878,7 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
uint32_t esz = sizeof(uint32_t);
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = group_start; i < group_end; ++i) {
uint32_t vstart = i * egs;
@@ -937,7 +936,7 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
uint32_t esz = sizeof(uint32_t);
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = group_start; i < group_end; ++i) {
uint32_t vstart = i * egs;
@@ -973,7 +972,7 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
uint32_t esz = sizeof(uint32_t);
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = group_start; i < group_end; ++i) {
uint32_t vstart = i * egs;
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 1b4d5a8..5dc1c10 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -21,10 +21,13 @@
#include "qemu/bitops.h"
#include "cpu.h"
#include "exec/memop.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "exec/helper-proto.h"
+#include "exec/tlb-flags.h"
+#include "exec/target_page.h"
+#include "exec/tswap.h"
#include "fpu/softfloat.h"
#include "tcg/tcg-gvec-desc.h"
#include "internals.h"
@@ -75,6 +78,8 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
if (s1 <= vlmax) {
vl = s1;
+ } else if (s1 < 2 * vlmax && cpu->cfg.rvv_vl_half_avl) {
+ vl = (s1 + 1) >> 1;
} else {
vl = vlmax;
}
@@ -103,11 +108,6 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
return scale < 0 ? vlenb >> -scale : vlenb << scale;
}
-static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
-{
- return (addr & ~env->cur_pmmask) | env->cur_pmbase;
-}
-
/*
* This function checks watchpoint before real load operation.
*
@@ -117,25 +117,42 @@ static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
* It will trigger an exception if there is no mapping in TLB
* and page table walk can't fill the TLB entry. Then the guest
* software can return here after process the exception or never return.
+ *
+ * This function can also be used when direct access to probe_access_flags is
+ * needed in order to access the flags. If a pointer to a flags operand is
+ * provided the function will call probe_access_flags instead, use nonfault
+ * and update host and flags.
*/
-static void probe_pages(CPURISCVState *env, target_ulong addr,
- target_ulong len, uintptr_t ra,
- MMUAccessType access_type)
+static void probe_pages(CPURISCVState *env, target_ulong addr, target_ulong len,
+ uintptr_t ra, MMUAccessType access_type, int mmu_index,
+ void **host, int *flags, bool nonfault)
{
target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
target_ulong curlen = MIN(pagelen, len);
- int mmu_index = riscv_env_mmu_index(env, false);
- probe_access(env, adjust_addr(env, addr), curlen, access_type,
- mmu_index, ra);
+ if (flags != NULL) {
+ *flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
+ access_type, mmu_index, nonfault, host, ra);
+ } else {
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
+ mmu_index, ra);
+ }
+
if (len > curlen) {
addr += curlen;
curlen = len - curlen;
- probe_access(env, adjust_addr(env, addr), curlen, access_type,
- mmu_index, ra);
+ if (flags != NULL) {
+ *flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
+ access_type, mmu_index, nonfault,
+ host, ra);
+ } else {
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
+ mmu_index, ra);
+ }
}
}
+
static inline void vext_set_elem_mask(void *v0, int index,
uint8_t value)
{
@@ -146,34 +163,90 @@ static inline void vext_set_elem_mask(void *v0, int index,
}
/* elements operations for load and store */
-typedef void vext_ldst_elem_fn(CPURISCVState *env, abi_ptr addr,
- uint32_t idx, void *vd, uintptr_t retaddr);
+typedef void vext_ldst_elem_fn_tlb(CPURISCVState *env, abi_ptr addr,
+ uint32_t idx, void *vd, uintptr_t retaddr);
+typedef void vext_ldst_elem_fn_host(void *vd, uint32_t idx, void *host);
+
+#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_tlb(CPURISCVState *env, abi_ptr addr, \
+ uint32_t idx, void *vd, uintptr_t retaddr) \
+{ \
+ ETYPE *cur = ((ETYPE *)vd + H(idx)); \
+ *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
+} \
+ \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_host(void *vd, uint32_t idx, void *host) \
+{ \
+ ETYPE *cur = ((ETYPE *)vd + H(idx)); \
+ *cur = (ETYPE)LDSUF##_p(host); \
+}
-#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
-static void NAME(CPURISCVState *env, abi_ptr addr, \
- uint32_t idx, void *vd, uintptr_t retaddr)\
-{ \
- ETYPE *cur = ((ETYPE *)vd + H(idx)); \
- *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
-} \
-
-GEN_VEXT_LD_ELEM(lde_b, int8_t, H1, ldsb)
-GEN_VEXT_LD_ELEM(lde_h, int16_t, H2, ldsw)
-GEN_VEXT_LD_ELEM(lde_w, int32_t, H4, ldl)
-GEN_VEXT_LD_ELEM(lde_d, int64_t, H8, ldq)
-
-#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
-static void NAME(CPURISCVState *env, abi_ptr addr, \
- uint32_t idx, void *vd, uintptr_t retaddr)\
-{ \
- ETYPE data = *((ETYPE *)vd + H(idx)); \
- cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
+GEN_VEXT_LD_ELEM(lde_b, uint8_t, H1, ldub)
+GEN_VEXT_LD_ELEM(lde_h, uint16_t, H2, lduw)
+GEN_VEXT_LD_ELEM(lde_w, uint32_t, H4, ldl)
+GEN_VEXT_LD_ELEM(lde_d, uint64_t, H8, ldq)
+
+#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_tlb(CPURISCVState *env, abi_ptr addr, \
+ uint32_t idx, void *vd, uintptr_t retaddr) \
+{ \
+ ETYPE data = *((ETYPE *)vd + H(idx)); \
+ cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
+} \
+ \
+static inline QEMU_ALWAYS_INLINE \
+void NAME##_host(void *vd, uint32_t idx, void *host) \
+{ \
+ ETYPE data = *((ETYPE *)vd + H(idx)); \
+ STSUF##_p(host, data); \
+}
+
+GEN_VEXT_ST_ELEM(ste_b, uint8_t, H1, stb)
+GEN_VEXT_ST_ELEM(ste_h, uint16_t, H2, stw)
+GEN_VEXT_ST_ELEM(ste_w, uint32_t, H4, stl)
+GEN_VEXT_ST_ELEM(ste_d, uint64_t, H8, stq)
+
+static inline QEMU_ALWAYS_INLINE void
+vext_continuous_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
+ void *vd, uint32_t evl, target_ulong addr,
+ uint32_t reg_start, uintptr_t ra, uint32_t esz,
+ bool is_load)
+{
+ uint32_t i;
+ for (i = env->vstart; i < evl; env->vstart = ++i, addr += esz) {
+ ldst_tlb(env, adjust_addr(env, addr), i, vd, ra);
+ }
}
-GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb)
-GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw)
-GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl)
-GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq)
+static inline QEMU_ALWAYS_INLINE void
+vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
+ void *vd, uint32_t evl, uint32_t reg_start, void *host,
+ uint32_t esz, bool is_load)
+{
+#if HOST_BIG_ENDIAN
+ for (; reg_start < evl; reg_start++, host += esz) {
+ ldst_host(vd, reg_start, host);
+ }
+#else
+ if (esz == 1) {
+ uint32_t byte_offset = reg_start * esz;
+ uint32_t size = (evl - reg_start) * esz;
+
+ if (is_load) {
+ memcpy(vd + byte_offset, host, size);
+ } else {
+ memcpy(host, vd + byte_offset, size);
+ }
+ } else {
+ for (; reg_start < evl; reg_start++, host += esz) {
+ ldst_host(vd, reg_start, host);
+ }
+ }
+#endif
+}
static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
uint32_t desc, uint32_t nf,
@@ -196,11 +269,10 @@ static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
* stride: access vector element from strided memory
*/
static void
-vext_ldst_stride(void *vd, void *v0, target_ulong base,
- target_ulong stride, CPURISCVState *env,
- uint32_t desc, uint32_t vm,
- vext_ldst_elem_fn *ldst_elem,
- uint32_t log2_esz, uintptr_t ra)
+vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride,
+ CPURISCVState *env, uint32_t desc, uint32_t vm,
+ vext_ldst_elem_fn_tlb *ldst_elem, uint32_t log2_esz,
+ uintptr_t ra)
{
uint32_t i, k;
uint32_t nf = vext_nf(desc);
@@ -208,7 +280,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t vma = vext_vma(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
k = 0;
@@ -240,10 +312,10 @@ void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b)
-GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h)
-GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w)
-GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d)
+GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b_tlb)
+GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h_tlb)
+GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w_tlb)
+GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d_tlb)
#define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN) \
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
@@ -255,39 +327,137 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b)
-GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h)
-GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w)
-GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d)
+GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b_tlb)
+GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h_tlb)
+GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w_tlb)
+GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d_tlb)
/*
* unit-stride: access elements stored contiguously in memory
*/
/* unmasked unit-stride load and store operation */
-static void
+static inline QEMU_ALWAYS_INLINE void
+vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
+ uint32_t elems, uint32_t nf, uint32_t max_elems,
+ uint32_t log2_esz, bool is_load, int mmu_index,
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uintptr_t ra)
+{
+ void *host;
+ int i, k, flags;
+ uint32_t esz = 1 << log2_esz;
+ uint32_t size = (elems * nf) << log2_esz;
+ uint32_t evl = env->vstart + elems;
+ MMUAccessType access_type = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
+
+ /* Check page permission/pmp/watchpoint/etc. */
+ probe_pages(env, addr, size, ra, access_type, mmu_index, &host, &flags,
+ true);
+
+ if (flags == 0) {
+ if (nf == 1) {
+ vext_continuous_ldst_host(env, ldst_host, vd, evl, env->vstart,
+ host, esz, is_load);
+ } else {
+ for (i = env->vstart; i < evl; ++i) {
+ k = 0;
+ while (k < nf) {
+ ldst_host(vd, i + k * max_elems, host);
+ host += esz;
+ k++;
+ }
+ }
+ }
+ env->vstart += elems;
+ } else {
+ if (nf == 1) {
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
+ ra, esz, is_load);
+ } else {
+ /* load bytes from guest memory */
+ for (i = env->vstart; i < evl; env->vstart = ++i) {
+ k = 0;
+ while (k < nf) {
+ ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems,
+ vd, ra);
+ addr += esz;
+ k++;
+ }
+ }
+ }
+ }
+}
+
+static inline QEMU_ALWAYS_INLINE void
vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uint32_t evl,
- uintptr_t ra)
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz,
+ uint32_t evl, uintptr_t ra, bool is_load)
{
- uint32_t i, k;
+ uint32_t k;
+ target_ulong page_split, elems, addr;
uint32_t nf = vext_nf(desc);
uint32_t max_elems = vext_max_elems(desc, log2_esz);
uint32_t esz = 1 << log2_esz;
+ uint32_t msize = nf * esz;
+ int mmu_index = riscv_env_mmu_index(env, false);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, evl);
- /* load bytes from guest memory */
- for (i = env->vstart; i < evl; env->vstart = ++i) {
- k = 0;
- while (k < nf) {
- target_ulong addr = base + ((i * nf + k) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
- k++;
+#if defined(CONFIG_USER_ONLY)
+ /*
+ * For data sizes <= 6 bytes we get better performance by simply calling
+ * vext_continuous_ldst_tlb
+ */
+ if (nf == 1 && (evl << log2_esz) <= 6) {
+ addr = base + (env->vstart << log2_esz);
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart, ra,
+ esz, is_load);
+
+ env->vstart = 0;
+ vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
+ return;
+ }
+#endif
+
+ /* Calculate the page range of first page */
+ addr = base + ((env->vstart * nf) << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / msize;
+ if (unlikely(env->vstart + elems >= evl)) {
+ elems = evl - env->vstart;
+ }
+
+ /* Load/store elements in the first page */
+ if (likely(elems)) {
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
+ }
+
+ /* Load/store elements in the second page */
+ if (unlikely(env->vstart < evl)) {
+ /* Cross page element */
+ if (unlikely(page_split % msize)) {
+ for (k = 0; k < nf; k++) {
+ addr = base + ((env->vstart * nf + k) << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr),
+ env->vstart + k * max_elems, vd, ra);
+ }
+ env->vstart++;
}
+
+ addr = base + ((env->vstart * nf) << log2_esz);
+ /* Get number of elements of second page */
+ elems = evl - env->vstart;
+
+ /* Load/store elements in the second page */
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
}
- env->vstart = 0;
+ env->vstart = 0;
vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
}
@@ -296,47 +466,47 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
* stride, stride = NF * sizeof (ETYPE)
*/
-#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
- vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-} \
- \
-void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_us(vd, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), env->vl, GETPC()); \
+#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, false, \
+ LOAD_FN_TLB, ctzl(sizeof(ETYPE)), GETPC()); \
+} \
+ \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldst_us(vd, base, env, desc, LOAD_FN_TLB, LOAD_FN_HOST, \
+ ctzl(sizeof(ETYPE)), env->vl, GETPC(), true); \
}
-GEN_VEXT_LD_US(vle8_v, int8_t, lde_b)
-GEN_VEXT_LD_US(vle16_v, int16_t, lde_h)
-GEN_VEXT_LD_US(vle32_v, int32_t, lde_w)
-GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
+GEN_VEXT_LD_US(vle8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_US(vle16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_US(vle32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_US(vle64_v, int64_t, lde_d_tlb, lde_d_host)
-#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
+#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN_TLB, STORE_FN_HOST) \
void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
- vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, false, \
+ STORE_FN_TLB, ctzl(sizeof(ETYPE)), GETPC()); \
} \
\
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
- vext_ldst_us(vd, base, env, desc, STORE_FN, \
- ctzl(sizeof(ETYPE)), env->vl, GETPC()); \
+ vext_ldst_us(vd, base, env, desc, STORE_FN_TLB, STORE_FN_HOST, \
+ ctzl(sizeof(ETYPE)), env->vl, GETPC(), false); \
}
-GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)
-GEN_VEXT_ST_US(vse16_v, int16_t, ste_h)
-GEN_VEXT_ST_US(vse32_v, int32_t, ste_w)
-GEN_VEXT_ST_US(vse64_v, int64_t, ste_d)
+GEN_VEXT_ST_US(vse8_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_US(vse16_v, int16_t, ste_h_tlb, ste_h_host)
+GEN_VEXT_ST_US(vse32_v, int32_t, ste_w_tlb, ste_w_host)
+GEN_VEXT_ST_US(vse64_v, int64_t, ste_d_tlb, ste_d_host)
/*
* unit stride mask load and store, EEW = 1
@@ -346,8 +516,8 @@ void HELPER(vlm_v)(void *vd, void *v0, target_ulong base,
{
/* evl = ceil(vl/8) */
uint8_t evl = (env->vl + 7) >> 3;
- vext_ldst_us(vd, base, env, desc, lde_b,
- 0, evl, GETPC());
+ vext_ldst_us(vd, base, env, desc, lde_b_tlb, lde_b_host,
+ 0, evl, GETPC(), true);
}
void HELPER(vsm_v)(void *vd, void *v0, target_ulong base,
@@ -355,8 +525,8 @@ void HELPER(vsm_v)(void *vd, void *v0, target_ulong base,
{
/* evl = ceil(vl/8) */
uint8_t evl = (env->vl + 7) >> 3;
- vext_ldst_us(vd, base, env, desc, ste_b,
- 0, evl, GETPC());
+ vext_ldst_us(vd, base, env, desc, ste_b_tlb, ste_b_host,
+ 0, evl, GETPC(), false);
}
/*
@@ -381,7 +551,7 @@ static inline void
vext_ldst_index(void *vd, void *v0, target_ulong base,
void *vs2, CPURISCVState *env, uint32_t desc,
vext_get_index_addr get_index_addr,
- vext_ldst_elem_fn *ldst_elem,
+ vext_ldst_elem_fn_tlb *ldst_elem,
uint32_t log2_esz, uintptr_t ra)
{
uint32_t i, k;
@@ -391,7 +561,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t vma = vext_vma(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
/* load bytes from guest memory */
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
@@ -422,22 +592,22 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
LOAD_FN, ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b)
-GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h)
-GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w)
-GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d)
-GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b)
-GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h)
-GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w)
-GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d)
-GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b)
-GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h)
-GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w)
-GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d)
-GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b)
-GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h)
-GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w)
-GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d)
+GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w_tlb)
+GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d_tlb)
#define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN) \
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
@@ -448,76 +618,101 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
GETPC()); \
}
-GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b)
-GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h)
-GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w)
-GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d)
-GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b)
-GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h)
-GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w)
-GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d)
-GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b)
-GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h)
-GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w)
-GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d)
-GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b)
-GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h)
-GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w)
-GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d)
+GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w_tlb)
+GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d_tlb)
/*
* unit-stride fault-only-fisrt load instructions
*/
static inline void
-vext_ldff(void *vd, void *v0, target_ulong base,
- CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn *ldst_elem,
- uint32_t log2_esz, uintptr_t ra)
+vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
+ uint32_t desc, vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz, uintptr_t ra)
{
- void *host;
uint32_t i, k, vl = 0;
uint32_t nf = vext_nf(desc);
uint32_t vm = vext_vm(desc);
uint32_t max_elems = vext_max_elems(desc, log2_esz);
uint32_t esz = 1 << log2_esz;
+ uint32_t msize = nf * esz;
uint32_t vma = vext_vma(desc);
- target_ulong addr, offset, remain;
+ target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems;
int mmu_index = riscv_env_mmu_index(env, false);
+ int flags, probe_flags;
+ void *host;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
- /* probe every access */
- for (i = env->vstart; i < env->vl; i++) {
- if (!vm && !vext_elem_mask(v0, i)) {
- continue;
- }
- addr = adjust_addr(env, base + i * (nf << log2_esz));
- if (i == 0) {
- probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD);
- } else {
- /* if it triggers an exception, no need to check watchpoint */
- remain = nf << log2_esz;
- while (remain > 0) {
- offset = -(addr | TARGET_PAGE_MASK);
- host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_index);
- if (host) {
-#ifdef CONFIG_USER_ONLY
- if (!page_check_range(addr, offset, PAGE_READ)) {
+ addr = base + ((env->vstart * nf) << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / msize;
+ if (unlikely(env->vstart + elems >= env->vl)) {
+ elems = env->vl - env->vstart;
+ }
+
+ /* Check page permission/pmp/watchpoint/etc. */
+ probe_pages(env, addr, elems * msize, ra, MMU_DATA_LOAD, mmu_index, &host,
+ &flags, true);
+
+ /* If we are crossing a page check also the second page. */
+ if (env->vl > elems) {
+ addr_probe = addr + (elems << log2_esz);
+ probe_pages(env, addr_probe, elems * msize, ra, MMU_DATA_LOAD,
+ mmu_index, &host, &probe_flags, true);
+ flags |= probe_flags;
+ }
+
+ if (flags & ~TLB_WATCHPOINT) {
+ /* probe every access */
+ for (i = env->vstart; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, i)) {
+ continue;
+ }
+ addr_i = adjust_addr(env, base + i * (nf << log2_esz));
+ if (i == 0) {
+ /* Allow fault on first element. */
+ probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD,
+ mmu_index, &host, NULL, false);
+ } else {
+ remain = nf << log2_esz;
+ while (remain > 0) {
+ offset = -(addr_i | TARGET_PAGE_MASK);
+
+ /* Probe nonfault on subsequent elements. */
+ probe_pages(env, addr_i, offset, 0, MMU_DATA_LOAD,
+ mmu_index, &host, &flags, true);
+
+ /*
+ * Stop if invalid (unmapped) or mmio (transaction may
+ * fail). Do not stop if watchpoint, as the spec says that
+ * first-fault should continue to access the same
+ * elements regardless of any watchpoint.
+ */
+ if (flags & ~TLB_WATCHPOINT) {
vl = i;
goto ProbeSuccess;
}
-#else
- probe_pages(env, addr, offset, ra, MMU_DATA_LOAD);
-#endif
- } else {
- vl = i;
- goto ProbeSuccess;
- }
- if (remain <= offset) {
- break;
+ if (remain <= offset) {
+ break;
+ }
+ remain -= offset;
+ addr_i = adjust_addr(env, addr_i + offset);
}
- remain -= offset;
- addr = adjust_addr(env, addr + offset);
}
}
}
@@ -526,19 +721,54 @@ ProbeSuccess:
if (vl != 0) {
env->vl = vl;
}
- for (i = env->vstart; i < env->vl; i++) {
- k = 0;
- while (k < nf) {
- if (!vm && !vext_elem_mask(v0, i)) {
- /* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
- (i + k * max_elems + 1) * esz);
- k++;
- continue;
+
+ if (env->vstart < env->vl) {
+ if (vm) {
+ /* Load/store elements in the first page */
+ if (likely(elems)) {
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
+ log2_esz, true, mmu_index, ldst_tlb,
+ ldst_host, ra);
+ }
+
+ /* Load/store elements in the second page */
+ if (unlikely(env->vstart < env->vl)) {
+ /* Cross page element */
+ if (unlikely(page_split % msize)) {
+ for (k = 0; k < nf; k++) {
+ addr = base + ((env->vstart * nf + k) << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr),
+ env->vstart + k * max_elems, vd, ra);
+ }
+ env->vstart++;
+ }
+
+ addr = base + ((env->vstart * nf) << log2_esz);
+ /* Get number of elements of second page */
+ elems = env->vl - env->vstart;
+
+ /* Load/store elements in the second page */
+ vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
+ log2_esz, true, mmu_index, ldst_tlb,
+ ldst_host, ra);
+ }
+ } else {
+ for (i = env->vstart; i < env->vl; i++) {
+ k = 0;
+ while (k < nf) {
+ if (!vext_elem_mask(v0, i)) {
+ /* set masked-off elements to 1s */
+ vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ (i + k * max_elems + 1) * esz);
+ k++;
+ continue;
+ }
+ addr = base + ((i * nf + k) << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems,
+ vd, ra);
+ k++;
+ }
}
- addr = base + ((i * nf + k) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
- k++;
}
}
env->vstart = 0;
@@ -546,18 +776,18 @@ ProbeSuccess:
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
}
-#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldff(vd, v0, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
+#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldff(vd, v0, base, env, desc, LOAD_FN_TLB, \
+ LOAD_FN_HOST, ctzl(sizeof(ETYPE)), GETPC()); \
}
-GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b)
-GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h)
-GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w)
-GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
+GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d_tlb, lde_d_host)
#define DO_SWAP(N, M) (M)
#define DO_AND(N, M) (N & M)
@@ -572,81 +802,93 @@ GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
/*
* load and store whole register instructions
*/
-static void
+static inline QEMU_ALWAYS_INLINE void
vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
- vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uintptr_t ra)
+ vext_ldst_elem_fn_tlb *ldst_tlb,
+ vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz,
+ uintptr_t ra, bool is_load)
{
- uint32_t i, k, off, pos;
+ target_ulong page_split, elems, addr;
uint32_t nf = vext_nf(desc);
uint32_t vlenb = riscv_cpu_cfg(env)->vlenb;
uint32_t max_elems = vlenb >> log2_esz;
+ uint32_t evl = nf * max_elems;
+ uint32_t esz = 1 << log2_esz;
+ int mmu_index = riscv_env_mmu_index(env, false);
- if (env->vstart >= ((vlenb * nf) >> log2_esz)) {
- env->vstart = 0;
- return;
+ /* Calculate the page range of first page */
+ addr = base + (env->vstart << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / esz;
+ if (unlikely(env->vstart + elems >= evl)) {
+ elems = evl - env->vstart;
}
- k = env->vstart / max_elems;
- off = env->vstart % max_elems;
-
- if (off) {
- /* load/store rest of elements of current segment pointed by vstart */
- for (pos = off; pos < max_elems; pos++, env->vstart++) {
- target_ulong addr = base + ((pos + k * max_elems) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), pos + k * max_elems, vd,
- ra);
- }
- k++;
+ /* Load/store elements in the first page */
+ if (likely(elems)) {
+ vext_page_ldst_us(env, vd, addr, elems, 1, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
}
- /* load/store elements for rest of segments */
- for (; k < nf; k++) {
- for (i = 0; i < max_elems; i++, env->vstart++) {
- target_ulong addr = base + ((i + k * max_elems) << log2_esz);
- ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
+ /* Load/store elements in the second page */
+ if (unlikely(env->vstart < evl)) {
+ /* Cross page element */
+ if (unlikely(page_split % esz)) {
+ addr = base + (env->vstart << log2_esz);
+ ldst_tlb(env, adjust_addr(env, addr), env->vstart, vd, ra);
+ env->vstart++;
}
+
+ addr = base + (env->vstart << log2_esz);
+ /* Get number of elements of second page */
+ elems = evl - env->vstart;
+
+ /* Load/store elements in the second page */
+ vext_page_ldst_us(env, vd, addr, elems, 1, max_elems, log2_esz,
+ is_load, mmu_index, ldst_tlb, ldst_host, ra);
}
env->vstart = 0;
}
-#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN) \
-void HELPER(NAME)(void *vd, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_whole(vd, base, env, desc, LOAD_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-}
-
-GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d)
-GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d)
-GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d)
-GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b)
-GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h)
-GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w)
-GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d)
-
-#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN) \
-void HELPER(NAME)(void *vd, target_ulong base, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vext_ldst_whole(vd, base, env, desc, STORE_FN, \
- ctzl(sizeof(ETYPE)), GETPC()); \
-}
-
-GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b)
-GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b)
-GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b)
-GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
+#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
+void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ vext_ldst_whole(vd, base, env, desc, LOAD_FN_TLB, LOAD_FN_HOST, \
+ ctzl(sizeof(ETYPE)), GETPC(), true); \
+}
+
+GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d_tlb, lde_d_host)
+GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b_tlb, lde_b_host)
+GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h_tlb, lde_h_host)
+GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w_tlb, lde_w_host)
+GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d_tlb, lde_d_host)
+
+#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN_TLB, STORE_FN_HOST) \
+void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ vext_ldst_whole(vd, base, env, desc, STORE_FN_TLB, STORE_FN_HOST, \
+ ctzl(sizeof(ETYPE)), GETPC(), false); \
+}
+
+GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b_tlb, ste_b_host)
+GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b_tlb, ste_b_host)
/*
* Vector Integer Arithmetic Instructions
@@ -891,7 +1133,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -925,7 +1167,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -962,7 +1204,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1002,7 +1244,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1100,7 +1342,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -1149,7 +1391,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -1213,7 +1455,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1280,7 +1522,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1829,7 +2071,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1855,7 +2097,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
*((ETYPE *)vd + H(i)) = (ETYPE)s1; \
@@ -1880,7 +2122,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
@@ -1906,7 +2148,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1953,8 +2195,6 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vl, uint32_t vm, int vxrm,
opivv2_rm_fn *fn, uint32_t vma, uint32_t esz)
{
- VSTART_CHECK_EARLY_EXIT(env);
-
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -1978,6 +2218,8 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vta = vext_vta(desc);
uint32_t vma = vext_vma(desc);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
switch (env->vxrm) {
case 0: /* rnu */
vext_vv_rm_1(vd, v0, vs1, vs2,
@@ -2080,8 +2322,6 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vl, uint32_t vm, int vxrm,
opivx2_rm_fn *fn, uint32_t vma, uint32_t esz)
{
- VSTART_CHECK_EARLY_EXIT(env);
-
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -2105,6 +2345,8 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vta = vext_vta(desc);
uint32_t vma = vext_vma(desc);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
switch (env->vxrm) {
case 0: /* rnu */
vext_vx_rm_1(vd, v0, s1, vs2,
@@ -2879,7 +3121,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -2924,7 +3166,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -3512,7 +3754,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
if (vl == 0) { \
return; \
@@ -4035,7 +4277,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -4077,7 +4319,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -4272,7 +4514,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -4440,6 +4682,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
TD s1 = *((TD *)vs1 + HD(0)); \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4447,7 +4691,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
s1 = OP(s1, (TD)s2); \
} \
- *((TD *)vd + HD(0)) = s1; \
+ if (vl > 0) { \
+ *((TD *)vd + HD(0)) = s1; \
+ } \
env->vstart = 0; \
/* set tail elements to 1s */ \
vext_set_elems_1s(vd, vta, esz, vlenb); \
@@ -4526,6 +4772,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
TD s1 = *((TD *)vs1 + HD(0)); \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4533,7 +4781,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
s1 = OP(s1, (TD)s2, &env->fp_status); \
} \
- *((TD *)vd + HD(0)) = s1; \
+ if (vl > 0) { \
+ *((TD *)vd + HD(0)) = s1; \
+ } \
env->vstart = 0; \
/* set tail elements to 1s */ \
vext_set_elems_1s(vd, vta, esz, vlenb); \
@@ -4598,7 +4848,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
int a, b; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
a = vext_elem_mask(vs1, i); \
@@ -4688,6 +4938,8 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
int i;
bool first_mask_bit = false;
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -4760,6 +5012,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
uint32_t sum = 0; \
int i; \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
@@ -4793,7 +5047,7 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
uint32_t vma = vext_vma(desc); \
int i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4830,7 +5084,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
target_ulong offset = s1, i_min, i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
i_min = MAX(env->vstart, offset); \
for (i = i_min; i < vl; i++) { \
@@ -4865,7 +5119,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
target_ulong i_max, i_min, i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
i_max = MAX(i_min, env->vstart); \
@@ -4879,9 +5133,11 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
} \
\
for (i = i_max; i < vl; ++i) { \
- if (vm || vext_elem_mask(v0, i)) { \
- *((ETYPE *)vd + H(i)) = 0; \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ continue; \
} \
+ *((ETYPE *)vd + H(i)) = 0; \
} \
\
env->vstart = 0; \
@@ -4909,7 +5165,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4960,7 +5216,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5037,7 +5293,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint64_t index; \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5082,7 +5338,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint64_t index = s1; \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5118,6 +5374,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t num = 0, i; \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
if (!vext_elem_mask(vs1, i)) { \
continue; \
@@ -5127,7 +5385,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, num * esz, total_elems * esz); \
}
/* Compress into vd elements of vs2 where vs1 is enabled */
@@ -5178,7 +5436,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
index 05b2d01..b490b1d 100644
--- a/target/riscv/vector_internals.c
+++ b/target/riscv/vector_internals.c
@@ -66,7 +66,7 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vma = vext_vma(desc);
uint32_t i;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
@@ -92,7 +92,7 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vma = vext_vma(desc);
uint32_t i;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
index 9e1e15b..8eee7e5 100644
--- a/target/riscv/vector_internals.h
+++ b/target/riscv/vector_internals.h
@@ -20,15 +20,16 @@
#define TARGET_RISCV_VECTOR_INTERNALS_H
#include "qemu/bitops.h"
+#include "hw/registerfields.h"
#include "cpu.h"
#include "tcg/tcg-gvec-desc.h"
#include "internals.h"
-#define VSTART_CHECK_EARLY_EXIT(env) do { \
- if (env->vstart >= env->vl) { \
- env->vstart = 0; \
- return; \
- } \
+#define VSTART_CHECK_EARLY_EXIT(env, vl) do { \
+ if (env->vstart >= vl) { \
+ env->vstart = 0; \
+ return; \
+ } \
} while (0)
static inline uint32_t vext_nf(uint32_t desc)
@@ -158,7 +159,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
diff --git a/target/riscv/zce_helper.c b/target/riscv/zce_helper.c
index b433bda..55221f5 100644
--- a/target/riscv/zce_helper.c
+++ b/target/riscv/zce_helper.c
@@ -18,9 +18,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
target_ulong HELPER(cm_jalt)(CPURISCVState *env, uint32_t index)
{