aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2025-01-19 08:55:46 -0500
committerStefan Hajnoczi <stefanha@redhat.com>2025-01-19 08:55:46 -0500
commitd6430c17d7113d3c38480dc34e59d00b0504e2f7 (patch)
tree1e8dc26937361f759890faa8791d4353d615853f /target
parent20fac491cfeebb042f59cc61ae76fed1b397d25c (diff)
parentf04cac4f8f254931f2af9d059b2175769e576afa (diff)
downloadqemu-d6430c17d7113d3c38480dc34e59d00b0504e2f7.zip
qemu-d6430c17d7113d3c38480dc34e59d00b0504e2f7.tar.gz
qemu-d6430c17d7113d3c38480dc34e59d00b0504e2f7.tar.bz2
Merge tag 'pull-riscv-to-apply-20250119-1' of https://github.com/alistair23/qemu into stagingHEADmaster
Second RISC-V PR for 10.0 * Reduce the overhead for simple RISC-V vector unit-stride loads and stores * Add V bit to GDB priv reg * Add 'sha' support * Add traces for exceptions in user mode * Update Pointer Masking to Zjpm v1.0 * Add Smrnmi support * Fix timebase-frequency when using KVM acceleration * Add RISC-V Counter delegation ISA extension support * Add support for Smdbltrp and Ssdbltrp extensions * Introduce a translation tag for the IOMMU page table cache * Support Supm and Sspm as part of Zjpm v1.0 * Convert htif debug prints to trace event # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmeMUUwACgkQr3yVEwxT # gBNgDQ/+JeqcsbJRX+PZQJEV06tDIJpk+mfaBHUYSGdNkjI9fzowNaxFIEB2vaLt # 4+xAGMnJ4vMcjJyBcPOn1FKAlowM7MsUNITOF9Rstnyriqnj2UsUZ9YBtkuG6gWH # ZHoYEKu7mAZoZw5RRx4TatHDXw7TYfUsrDPrn+x6yeCZTq9ruRTlHkzp2LC725Vq # KTnbWAP7WlqiJaSxB5eIFYT5tYP1Blp0yD358B037C57EU9j5zm2FQdFmVK1+xRF # dFg/urBIzfAjjkCS/t9DmH+S6NgMEut6udUhllk/KUJAzWvsggc4wZZlWjFOJFJY # fIxx3alhY3pcm1PYjFpf15Poz6Pqva/KGjwgZafirKQtPbRSzfRkUwcHOYRTQT9j # abeiB44XPaeIl8Jvw7GLxcWtlJ5NmBrZho+2Z9mIhB/Ix5H3PDgs18Oc/s73P2qQ # JFLRb7cpYy1HbRc0ugvwAmOTY1t6HX8HAtT+3rNhiXpXnj4RW2C/WU1cEqrg8QkM # cTPiy2zHoBhAWt9aDK1Kvbhb1vur3JaF7rk9jeKlriFr87Ly+yPU+8mnEDw40NMR # Tc9nivqmOqqXS5AM9O/W1uzTWzpxIUy7XBy3cuSk0uZCoge4IE2Or7P2Rb2uyaNZ # RkAo/PL2N1cMjP7gB3kLRtYY7FA+nal66KhfbHPRHqj+ZwUAxzs= # =F3IG # -----END PGP SIGNATURE----- # gpg: Signature made Sat 18 Jan 2025 20:11:40 EST # gpg: using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013 # gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65 9296 AF7C 9513 0C53 8013 * tag 'pull-riscv-to-apply-20250119-1' of https://github.com/alistair23/qemu: (50 commits) hw/char/riscv_htif: Convert HTIF_DEBUG() to trace events target/riscv: Support Supm and Sspm as part of Zjpm v1.0 hw/riscv/riscv-iommu.c: Introduce a translation tag for the page table cache target/riscv: Add Smdbltrp ISA extension enable switch target/riscv: Implement Smdbltrp behavior target/riscv: Implement Smdbltrp sret, mret and mnret behavior target/riscv: Add Smdbltrp CSRs handling target/riscv: Add Ssdbltrp ISA extension enable switch target/riscv: Implement Ssdbltrp exception handling target/riscv: Implement Ssdbltrp sret, mret and mnret behavior target/riscv: Add Ssdbltrp CSRs handling target/riscv: Fix henvcfg potentially containing stale bits target/riscv: Add configuration for S[m|s]csrind, Smcdeleg/Ssccfg target/riscv: Add implied rule for counter delegation extensions target/riscv: Invoke pmu init after feature enable target/riscv: Add counter delegation/configuration support target/riscv: Add select value range check for counter delegation target/riscv: Add counter delegation definitions target/riscv: Add properties for counter delegation ISA extensions target/riscv: Support generic CSR indirect access ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'target')
-rw-r--r--target/riscv/cpu.c97
-rw-r--r--target/riscv/cpu.h65
-rw-r--r--target/riscv/cpu_bits.h157
-rw-r--r--target/riscv/cpu_cfg.h13
-rw-r--r--target/riscv/cpu_helper.c311
-rw-r--r--target/riscv/csr.c1177
-rw-r--r--target/riscv/gdbstub.c23
-rw-r--r--target/riscv/helper.h1
-rw-r--r--target/riscv/insn32.decode3
-rw-r--r--target/riscv/insn_trans/trans_privileged.c.inc20
-rw-r--r--target/riscv/internals.h54
-rw-r--r--target/riscv/kvm/kvm-cpu.c4
-rw-r--r--target/riscv/kvm/kvm_riscv.h4
-rw-r--r--target/riscv/machine.c18
-rw-r--r--target/riscv/op_helper.c126
-rw-r--r--target/riscv/pmp.c14
-rw-r--r--target/riscv/pmp.h1
-rw-r--r--target/riscv/tcg/tcg-cpu.c60
-rw-r--r--target/riscv/trace-events3
-rw-r--r--target/riscv/translate.c49
-rw-r--r--target/riscv/vector_helper.c31
21 files changed, 1616 insertions, 615 deletions
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index b8d5120..3d4bd15 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -42,7 +42,7 @@
/* RISC-V CPU definitions */
static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
- RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
+ RVC, RVS, RVU, RVH, RVG, RVB, 0};
/*
* From vector_helper.c
@@ -183,18 +183,37 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
+ ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha),
+ ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
+ ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg),
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
+ ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind),
+ ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp),
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
+ ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi),
+ ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
+ ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm),
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
+ ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg),
ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind),
+ ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp),
+ ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
+ ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm),
ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
+ ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
@@ -288,7 +307,7 @@ static const char * const riscv_excp_names[] = {
"load_page_fault",
"reserved",
"store_page_fault",
- "reserved",
+ "double_trap",
"reserved",
"reserved",
"reserved",
@@ -889,13 +908,6 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
CSR_MSCRATCH,
CSR_SSCRATCH,
CSR_SATP,
- CSR_MMTE,
- CSR_UPMBASE,
- CSR_UPMMASK,
- CSR_SPMBASE,
- CSR_SPMMASK,
- CSR_MPMBASE,
- CSR_MPMMASK,
};
for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
@@ -1055,6 +1067,9 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
env->mstatus_hs = set_field(env->mstatus_hs,
MSTATUS64_UXL, env->misa_mxl);
}
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1);
+ }
}
env->mcause = 0;
env->miclaim = MIP_SGEIP;
@@ -1081,8 +1096,6 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
}
i++;
}
- /* mmte is supposed to have pm.current hardwired to 1 */
- env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
/*
* Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
@@ -1114,7 +1127,6 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
env->ssp = 0;
env->xl = riscv_cpu_mxl(env);
- riscv_cpu_update_mask(env);
cs->exception_index = RISCV_EXCP_NONE;
env->load_res = -1;
set_default_nan_mode(1, &env->fp_status);
@@ -1127,6 +1139,11 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
riscv_trigger_reset_hold(env);
}
+ if (cpu->cfg.ext_smrnmi) {
+ env->rnmip = 0;
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
+ }
+
if (kvm_enabled()) {
kvm_riscv_reset_vcpu(cpu);
}
@@ -1407,6 +1424,11 @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level)
g_assert_not_reached();
}
}
+
+static void riscv_cpu_set_nmi(void *opaque, int irq, int level)
+{
+ riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level);
+}
#endif /* CONFIG_USER_ONLY */
static bool riscv_cpu_is_dynamic(Object *cpu_obj)
@@ -1430,6 +1452,8 @@ static void riscv_cpu_init(Object *obj)
#ifndef CONFIG_USER_ONLY
qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
+ qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi,
+ "riscv.cpu.rnmi", RNMI_MAX);
#endif /* CONFIG_USER_ONLY */
general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
@@ -1504,7 +1528,6 @@ static const MISAExtInfo misa_ext_info_arr[] = {
MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
MISA_EXT_INFO(RVU, "u", "User-level instructions"),
MISA_EXT_INFO(RVH, "h", "Hypervisor"),
- MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
MISA_EXT_INFO(RVV, "v", "Vector operations"),
MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
@@ -1571,6 +1594,10 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
/* Defaults for standard extensions */
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
+ MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false),
+ MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
+ MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
+ MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
@@ -1599,11 +1626,19 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
+ MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false),
+ MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false),
+ MULTI_EXT_CFG_BOOL("supm", ext_supm, false),
MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
+ MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false),
MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
+ MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false),
+ MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false),
+ MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false),
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
+ MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false),
MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
@@ -1708,6 +1743,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
+ MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
{ },
};
@@ -2739,6 +2775,34 @@ static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
},
};
+static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_ssccfg),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind),
+ CPU_CFG_OFFSET(ext_smcdeleg),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SUPM_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_supm),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_sspm),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_smnpm),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
&RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
&RVM_IMPLIED, &RVV_IMPLIED, NULL
@@ -2756,7 +2820,8 @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
&ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
&ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
&ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
- &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
+ &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
+ &SUPM_IMPLIED, &SSPM_IMPLIED,
NULL
};
@@ -2785,6 +2850,10 @@ static const Property riscv_cpu_properties[] = {
#ifndef CONFIG_USER_ONLY
DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
+ DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec,
+ DEFAULT_RNMI_IRQVEC),
+ DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec,
+ DEFAULT_RNMI_EXCPVEC),
#endif
DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 252fdb8..9771368 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -71,7 +71,6 @@ typedef struct CPUArchState CPURISCVState;
#define RVS RV('S')
#define RVU RV('U')
#define RVH RV('H')
-#define RVJ RV('J')
#define RVG RV('G')
#define RVB RV('B')
@@ -129,6 +128,14 @@ typedef enum {
EXT_STATUS_DIRTY,
} RISCVExtStatus;
+/* Enum holds PMM field values for Zjpm v1.0 extension */
+typedef enum {
+ PMM_FIELD_DISABLED = 0,
+ PMM_FIELD_RESERVED = 1,
+ PMM_FIELD_PMLEN7 = 2,
+ PMM_FIELD_PMLEN16 = 3,
+} RISCVPmPmm;
+
typedef struct riscv_cpu_implied_exts_rule {
#ifndef CONFIG_USER_ONLY
/*
@@ -385,6 +392,7 @@ struct CPUArchState {
uint32_t scounteren;
uint32_t mcounteren;
+ uint32_t scountinhibit;
uint32_t mcountinhibit;
/* PMU cycle & instret privilege mode filtering */
@@ -451,24 +459,11 @@ struct CPUArchState {
/* True if in debugger mode. */
bool debugger;
- /*
- * CSRs for PointerMasking extension
- */
- target_ulong mmte;
- target_ulong mpmmask;
- target_ulong mpmbase;
- target_ulong spmmask;
- target_ulong spmbase;
- target_ulong upmmask;
- target_ulong upmbase;
-
uint64_t mstateen[SMSTATEEN_MAX_COUNT];
uint64_t hstateen[SMSTATEEN_MAX_COUNT];
uint64_t sstateen[SMSTATEEN_MAX_COUNT];
uint64_t henvcfg;
#endif
- target_ulong cur_pmmask;
- target_ulong cur_pmbase;
/* Fields from here on are preserved across CPU reset. */
QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
@@ -486,6 +481,15 @@ struct CPUArchState {
uint64_t kvm_timer_state;
uint64_t kvm_timer_frequency;
#endif /* CONFIG_KVM */
+
+ /* RNMI */
+ target_ulong mnscratch;
+ target_ulong mnepc;
+ target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
+ target_ulong mnstatus;
+ target_ulong rnmip;
+ uint64_t rnmi_irqvec;
+ uint64_t rnmi_excpvec;
};
/*
@@ -560,6 +564,7 @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
bool cpu_get_fcfien(CPURISCVState *env);
bool cpu_get_bcfien(CPURISCVState *env);
+bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
@@ -584,6 +589,7 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
uint64_t value);
+void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
void riscv_cpu_interrupt(CPURISCVState *env);
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
@@ -606,7 +612,8 @@ void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
int *max_insns, vaddr pc, void *host_pc);
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
- uint32_t exception, uintptr_t pc);
+ RISCVException exception,
+ uintptr_t pc);
target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
@@ -627,19 +634,22 @@ FIELD(TB_FLAGS, XL, 16, 2)
/* If PointerMasking should be applied */
FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
-FIELD(TB_FLAGS, VTA, 20, 1)
-FIELD(TB_FLAGS, VMA, 21, 1)
+FIELD(TB_FLAGS, VTA, 18, 1)
+FIELD(TB_FLAGS, VMA, 19, 1)
/* Native debug itrigger */
-FIELD(TB_FLAGS, ITRIGGER, 22, 1)
+FIELD(TB_FLAGS, ITRIGGER, 20, 1)
/* Virtual mode enabled */
-FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
-FIELD(TB_FLAGS, PRIV, 24, 2)
-FIELD(TB_FLAGS, AXL, 26, 2)
+FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
+FIELD(TB_FLAGS, PRIV, 22, 2)
+FIELD(TB_FLAGS, AXL, 24, 2)
/* zicfilp needs a TB flag to track indirect branches */
-FIELD(TB_FLAGS, FCFI_ENABLED, 28, 1)
-FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 29, 1)
+FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
+FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
/* zicfiss needs a TB flag so that correct TB is located based on tb flags */
-FIELD(TB_FLAGS, BCFI_ENABLED, 30, 1)
+FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
+/* If pointer masking should be applied and address sign extended */
+FIELD(TB_FLAGS, PM_PMM, 29, 2)
+FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
@@ -775,11 +785,16 @@ static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *pflags);
-void riscv_cpu_update_mask(CPURISCVState *env);
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
+bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
+RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
+RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
+uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
+
RISCVException riscv_csrr(CPURISCVState *env, int csrno,
target_ulong *ret_value);
+
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask);
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index fe4e34c..f97c48a 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -173,6 +173,13 @@
#define CSR_MISELECT 0x350
#define CSR_MIREG 0x351
+/* Machine Indirect Register Alias */
+#define CSR_MIREG2 0x352
+#define CSR_MIREG3 0x353
+#define CSR_MIREG4 0x355
+#define CSR_MIREG5 0x356
+#define CSR_MIREG6 0x357
+
/* Machine-Level Interrupts (AIA) */
#define CSR_MTOPEI 0x35c
#define CSR_MTOPI 0xfb0
@@ -203,6 +210,9 @@
#define CSR_SSTATEEN2 0x10E
#define CSR_SSTATEEN3 0x10F
+/* Supervisor Counter Delegation */
+#define CSR_SCOUNTINHIBIT 0x120
+
/* Supervisor Trap Handling */
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
@@ -222,6 +232,13 @@
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
+/* Supervisor Indirect Register Alias */
+#define CSR_SIREG2 0x152
+#define CSR_SIREG3 0x153
+#define CSR_SIREG4 0x155
+#define CSR_SIREG5 0x156
+#define CSR_SIREG6 0x157
+
/* Supervisor-Level Interrupts (AIA) */
#define CSR_STOPEI 0x15c
#define CSR_STOPI 0xdb0
@@ -288,6 +305,13 @@
#define CSR_VSISELECT 0x250
#define CSR_VSIREG 0x251
+/* Virtual Supervisor Indirect Alias */
+#define CSR_VSIREG2 0x252
+#define CSR_VSIREG3 0x253
+#define CSR_VSIREG4 0x255
+#define CSR_VSIREG5 0x256
+#define CSR_VSIREG6 0x257
+
/* VS-Level Interrupts (H-extension with AIA) */
#define CSR_VSTOPEI 0x25c
#define CSR_VSTOPI 0xeb0
@@ -353,6 +377,12 @@
#define CSR_PMPADDR14 0x3be
#define CSR_PMPADDR15 0x3bf
+/* RNMI */
+#define CSR_MNSCRATCH 0x740
+#define CSR_MNEPC 0x741
+#define CSR_MNCAUSE 0x742
+#define CSR_MNSTATUS 0x744
+
/* Debug/Trace Registers (shared with Debug Mode) */
#define CSR_TSELECT 0x7a0
#define CSR_TDATA1 0x7a1
@@ -497,37 +527,6 @@
#define CSR_MHPMCOUNTER30H 0xb9e
#define CSR_MHPMCOUNTER31H 0xb9f
-/*
- * User PointerMasking registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_UMTE 0x4c0
-#define CSR_UPMMASK 0x4c1
-#define CSR_UPMBASE 0x4c2
-
-/*
- * Machine PointerMasking registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_MMTE 0x3c0
-#define CSR_MPMMASK 0x3c1
-#define CSR_MPMBASE 0x3c2
-
-/*
- * Supervisor PointerMaster registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_SMTE 0x1c0
-#define CSR_SPMMASK 0x1c1
-#define CSR_SPMBASE 0x1c2
-
-/*
- * Hypervisor PointerMaster registers
- * NB: actual CSR numbers might be changed in future
- */
-#define CSR_VSMTE 0x2c0
-#define CSR_VSPMMASK 0x2c1
-#define CSR_VSPMBASE 0x2c2
#define CSR_SCOUNTOVF 0xda0
/* Crypto Extension */
@@ -556,9 +555,11 @@
#define MSTATUS_TW 0x00200000 /* since: priv-1.10 */
#define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */
#define MSTATUS_SPELP 0x00800000 /* zicfilp */
+#define MSTATUS_SDT 0x01000000
#define MSTATUS_MPELP 0x020000000000 /* zicfilp */
#define MSTATUS_GVA 0x4000000000ULL
#define MSTATUS_MPV 0x8000000000ULL
+#define MSTATUS_MDT 0x40000000000ULL /* Smdbltrp extension */
#define MSTATUS64_UXL 0x0000000300000000ULL
#define MSTATUS64_SXL 0x0000000C00000000ULL
@@ -588,6 +589,7 @@ typedef enum {
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
#define SSTATUS_MXR 0x00080000
#define SSTATUS_SPELP MSTATUS_SPELP /* zicfilp */
+#define SSTATUS_SDT MSTATUS_SDT
#define SSTATUS64_UXL 0x0000000300000000ULL
@@ -606,6 +608,7 @@ typedef enum {
#define HSTATUS_VTSR 0x00400000
#define HSTATUS_HUKTE 0x01000000
#define HSTATUS_VSXL 0x300000000
+#define HSTATUS_HUPMM 0x3000000000000
#define HSTATUS32_WPRI 0xFF8FF87E
#define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL
@@ -634,6 +637,12 @@ typedef enum {
#define SATP64_ASID 0x0FFFF00000000000ULL
#define SATP64_PPN 0x00000FFFFFFFFFFFULL
+/* RNMI mnstatus CSR mask */
+#define MNSTATUS_NMIE 0x00000008
+#define MNSTATUS_MNPV 0x00000080
+#define MNSTATUS_MNPELP 0x00000200
+#define MNSTATUS_MNPP 0x00001800
+
/* VM modes (satp.mode) privileged ISA 1.10 */
#define VM_1_10_MBARE 0
#define VM_1_10_SV32 1
@@ -669,6 +678,12 @@ typedef enum {
/* Default Reset Vector address */
#define DEFAULT_RSTVEC 0x1000
+/* Default RNMI Interrupt Vector address */
+#define DEFAULT_RNMI_IRQVEC 0x0
+
+/* Default RNMI Exception Vector address */
+#define DEFAULT_RNMI_EXCPVEC 0x0
+
/* Exception causes */
typedef enum RISCVException {
RISCV_EXCP_NONE = -1, /* sentinel value */
@@ -687,6 +702,7 @@ typedef enum RISCVException {
RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */
RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */
RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */
+ RISCV_EXCP_DOUBLE_TRAP = 0x10,
RISCV_EXCP_SW_CHECK = 0x12, /* since: priv-1.13.0 */
RISCV_EXCP_HW_ERR = 0x13, /* since: priv-1.13.0 */
RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14,
@@ -723,6 +739,9 @@ typedef enum RISCVException {
/* -1 is due to bit zero of hgeip and hgeie being ROZ. */
#define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1)
+/* RNMI causes */
+#define RNMI_MAX 16
+
/* mip masks */
#define MIP_USIP (1 << IRQ_U_SOFT)
#define MIP_SSIP (1 << IRQ_S_SOFT)
@@ -759,11 +778,6 @@ typedef enum RISCVException {
#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
-/* General PointerMasking CSR bits */
-#define PM_ENABLE 0x00000001ULL
-#define PM_CURRENT 0x00000002ULL
-#define PM_INSN 0x00000004ULL
-
/* Execution environment configuration bits */
#define MENVCFG_FIOM BIT(0)
#define MENVCFG_LPE BIT(2) /* zicfilp */
@@ -771,11 +785,15 @@ typedef enum RISCVException {
#define MENVCFG_CBIE (3UL << 4)
#define MENVCFG_CBCFE BIT(6)
#define MENVCFG_CBZE BIT(7)
+#define MENVCFG_PMM (3ULL << 32)
+#define MENVCFG_DTE (1ULL << 59)
+#define MENVCFG_CDE (1ULL << 60)
#define MENVCFG_ADUE (1ULL << 61)
#define MENVCFG_PBMTE (1ULL << 62)
#define MENVCFG_STCE (1ULL << 63)
/* For RV32 */
+#define MENVCFGH_DTE BIT(27)
#define MENVCFGH_ADUE BIT(29)
#define MENVCFGH_PBMTE BIT(30)
#define MENVCFGH_STCE BIT(31)
@@ -787,6 +805,7 @@ typedef enum RISCVException {
#define SENVCFG_CBCFE MENVCFG_CBCFE
#define SENVCFG_CBZE MENVCFG_CBZE
#define SENVCFG_UKTE BIT(8)
+#define SENVCFG_PMM MENVCFG_PMM
#define HENVCFG_FIOM MENVCFG_FIOM
#define HENVCFG_LPE MENVCFG_LPE
@@ -794,66 +813,18 @@ typedef enum RISCVException {
#define HENVCFG_CBIE MENVCFG_CBIE
#define HENVCFG_CBCFE MENVCFG_CBCFE
#define HENVCFG_CBZE MENVCFG_CBZE
+#define HENVCFG_PMM MENVCFG_PMM
+#define HENVCFG_DTE MENVCFG_DTE
#define HENVCFG_ADUE MENVCFG_ADUE
#define HENVCFG_PBMTE MENVCFG_PBMTE
#define HENVCFG_STCE MENVCFG_STCE
/* For RV32 */
+#define HENVCFGH_DTE MENVCFGH_DTE
#define HENVCFGH_ADUE MENVCFGH_ADUE
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
#define HENVCFGH_STCE MENVCFGH_STCE
-/* Offsets for every pair of control bits per each priv level */
-#define XS_OFFSET 0ULL
-#define U_OFFSET 2ULL
-#define S_OFFSET 5ULL
-#define M_OFFSET 8ULL
-
-#define PM_XS_BITS (EXT_STATUS_MASK << XS_OFFSET)
-#define U_PM_ENABLE (PM_ENABLE << U_OFFSET)
-#define U_PM_CURRENT (PM_CURRENT << U_OFFSET)
-#define U_PM_INSN (PM_INSN << U_OFFSET)
-#define S_PM_ENABLE (PM_ENABLE << S_OFFSET)
-#define S_PM_CURRENT (PM_CURRENT << S_OFFSET)
-#define S_PM_INSN (PM_INSN << S_OFFSET)
-#define M_PM_ENABLE (PM_ENABLE << M_OFFSET)
-#define M_PM_CURRENT (PM_CURRENT << M_OFFSET)
-#define M_PM_INSN (PM_INSN << M_OFFSET)
-
-/* mmte CSR bits */
-#define MMTE_PM_XS_BITS PM_XS_BITS
-#define MMTE_U_PM_ENABLE U_PM_ENABLE
-#define MMTE_U_PM_CURRENT U_PM_CURRENT
-#define MMTE_U_PM_INSN U_PM_INSN
-#define MMTE_S_PM_ENABLE S_PM_ENABLE
-#define MMTE_S_PM_CURRENT S_PM_CURRENT
-#define MMTE_S_PM_INSN S_PM_INSN
-#define MMTE_M_PM_ENABLE M_PM_ENABLE
-#define MMTE_M_PM_CURRENT M_PM_CURRENT
-#define MMTE_M_PM_INSN M_PM_INSN
-#define MMTE_MASK (MMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | MMTE_U_PM_INSN | \
- MMTE_S_PM_ENABLE | MMTE_S_PM_CURRENT | MMTE_S_PM_INSN | \
- MMTE_M_PM_ENABLE | MMTE_M_PM_CURRENT | MMTE_M_PM_INSN | \
- MMTE_PM_XS_BITS)
-
-/* (v)smte CSR bits */
-#define SMTE_PM_XS_BITS PM_XS_BITS
-#define SMTE_U_PM_ENABLE U_PM_ENABLE
-#define SMTE_U_PM_CURRENT U_PM_CURRENT
-#define SMTE_U_PM_INSN U_PM_INSN
-#define SMTE_S_PM_ENABLE S_PM_ENABLE
-#define SMTE_S_PM_CURRENT S_PM_CURRENT
-#define SMTE_S_PM_INSN S_PM_INSN
-#define SMTE_MASK (SMTE_U_PM_ENABLE | SMTE_U_PM_CURRENT | SMTE_U_PM_INSN | \
- SMTE_S_PM_ENABLE | SMTE_S_PM_CURRENT | SMTE_S_PM_INSN | \
- SMTE_PM_XS_BITS)
-
-/* umte CSR bits */
-#define UMTE_U_PM_ENABLE U_PM_ENABLE
-#define UMTE_U_PM_CURRENT U_PM_CURRENT
-#define UMTE_U_PM_INSN U_PM_INSN
-#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
-
/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
#define ISELECT_IPRIO0 0x30
#define ISELECT_IPRIO15 0x3f
@@ -865,10 +836,15 @@ typedef enum RISCVException {
#define ISELECT_IMSIC_EIE63 0xff
#define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY
#define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63
-#define ISELECT_MASK 0x1ff
+#define ISELECT_MASK_AIA 0x1ff
+
+/* [M|S|VS]SELCT value for Indirect CSR Access Extension */
+#define ISELECT_CD_FIRST 0x40
+#define ISELECT_CD_LAST 0x5f
+#define ISELECT_MASK_SXCSRIND 0xfff
/* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */
-#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1)
+#define ISELECT_IMSIC_TOPEI (ISELECT_MASK_AIA + 1)
/* IMSIC bits (AIA) */
#define IMSIC_TOPEI_IID_SHIFT 16
@@ -961,6 +937,9 @@ typedef enum RISCVException {
#define MHPMEVENT_IDX_MASK 0xFFFFF
#define MHPMEVENT_SSCOF_RESVD 16
+/* RISC-V-specific interrupt pending bits. */
+#define CPU_INTERRUPT_RNMI CPU_INTERRUPT_TGT_EXT_0
+
/* JVT CSR bits */
#define JVT_MODE 0x3F
#define JVT_BASE (~0x3F)
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index a1457ab..b410b1e 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -78,7 +78,13 @@ struct RISCVCPUConfig {
bool ext_ztso;
bool ext_smstateen;
bool ext_sstc;
+ bool ext_smcdeleg;
+ bool ext_ssccfg;
bool ext_smcntrpmf;
+ bool ext_smcsrind;
+ bool ext_sscsrind;
+ bool ext_ssdbltrp;
+ bool ext_smdbltrp;
bool ext_svadu;
bool ext_svinval;
bool ext_svnapot;
@@ -129,6 +135,12 @@ struct RISCVCPUConfig {
bool ext_ssaia;
bool ext_sscofpmf;
bool ext_smepmp;
+ bool ext_smrnmi;
+ bool ext_ssnpm;
+ bool ext_smnpm;
+ bool ext_smmpm;
+ bool ext_sspm;
+ bool ext_supm;
bool rvv_ta_all_1s;
bool rvv_ma_all_1s;
bool rvv_vl_half_avl;
@@ -141,6 +153,7 @@ struct RISCVCPUConfig {
bool ext_svade;
bool ext_zic64b;
bool ext_ssstateen;
+ bool ext_sha;
/*
* Always 'true' booleans for named features
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index f62b21e..e1dfc4e 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -120,12 +120,26 @@ bool cpu_get_bcfien(CPURISCVState *env)
}
}
+bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
+{
+#ifdef CONFIG_USER_ONLY
+ return false;
+#else
+ if (virt) {
+ return (env->henvcfg & HENVCFG_DTE) != 0;
+ } else {
+ return (env->menvcfg & MENVCFG_DTE) != 0;
+ }
+#endif
+}
+
void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *pflags)
{
RISCVCPU *cpu = env_archcpu(env);
RISCVExtStatus fs, vs;
uint32_t flags = 0;
+ bool pm_signext = riscv_cpu_virt_mem_enabled(env);
*pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
*cs_base = 0;
@@ -210,58 +224,106 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
- if (env->cur_pmmask != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
- }
- if (env->cur_pmbase != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
- }
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
*pflags = flags;
}
-void riscv_cpu_update_mask(CPURISCVState *env)
+RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
{
- target_ulong mask = 0, base = 0;
- RISCVMXL xl = env->xl;
- /*
- * TODO: Current RVJ spec does not specify
- * how the extension interacts with XLEN.
- */
#ifndef CONFIG_USER_ONLY
- int mode = cpu_address_mode(env);
- xl = cpu_get_xl(env, mode);
- if (riscv_has_ext(env, RVJ)) {
- switch (mode) {
- case PRV_M:
- if (env->mmte & M_PM_ENABLE) {
- mask = env->mpmmask;
- base = env->mpmbase;
+ int priv_mode = cpu_address_mode(env);
+
+ if (get_field(env->mstatus, MSTATUS_MPRV) &&
+ get_field(env->mstatus, MSTATUS_MXR)) {
+ return PMM_FIELD_DISABLED;
+ }
+
+ /* Get current PMM field */
+ switch (priv_mode) {
+ case PRV_M:
+ if (riscv_cpu_cfg(env)->ext_smmpm) {
+ return get_field(env->mseccfg, MSECCFG_PMM);
+ }
+ break;
+ case PRV_S:
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
+ if (get_field(env->mstatus, MSTATUS_MPV)) {
+ return get_field(env->henvcfg, HENVCFG_PMM);
+ } else {
+ return get_field(env->menvcfg, MENVCFG_PMM);
}
- break;
- case PRV_S:
- if (env->mmte & S_PM_ENABLE) {
- mask = env->spmmask;
- base = env->spmbase;
+ }
+ break;
+ case PRV_U:
+ if (riscv_has_ext(env, RVS)) {
+ if (riscv_cpu_cfg(env)->ext_ssnpm) {
+ return get_field(env->senvcfg, SENVCFG_PMM);
}
- break;
- case PRV_U:
- if (env->mmte & U_PM_ENABLE) {
- mask = env->upmmask;
- base = env->upmbase;
+ } else {
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
+ return get_field(env->menvcfg, MENVCFG_PMM);
}
- break;
- default:
- g_assert_not_reached();
}
+ break;
+ default:
+ g_assert_not_reached();
}
+ return PMM_FIELD_DISABLED;
+#else
+ return PMM_FIELD_DISABLED;
#endif
- if (xl == MXL_RV32) {
- env->cur_pmmask = mask & UINT32_MAX;
- env->cur_pmbase = base & UINT32_MAX;
+}
+
+RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ int priv_mode = cpu_address_mode(env);
+
+ if (priv_mode == PRV_U) {
+ return get_field(env->hstatus, HSTATUS_HUPMM);
+ } else {
+ if (get_field(env->hstatus, HSTATUS_SPVP)) {
+ return get_field(env->henvcfg, HENVCFG_PMM);
+ } else {
+ return get_field(env->senvcfg, SENVCFG_PMM);
+ }
+ }
+#else
+ return PMM_FIELD_DISABLED;
+#endif
+}
+
+bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ int satp_mode = 0;
+ int priv_mode = cpu_address_mode(env);
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ satp_mode = get_field(env->satp, SATP32_MODE);
} else {
- env->cur_pmmask = mask;
- env->cur_pmbase = base;
+ satp_mode = get_field(env->satp, SATP64_MODE);
+ }
+
+ return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
+#else
+ return false;
+#endif
+}
+
+uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
+{
+ switch (pmm) {
+ case PMM_FIELD_DISABLED:
+ return 0;
+ case PMM_FIELD_PMLEN7:
+ return 7;
+ case PMM_FIELD_PMLEN16:
+ return 16;
+ default:
+ g_assert_not_reached();
}
}
@@ -505,6 +567,18 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
uint64_t vsbits, irq_delegated;
int virq;
+ /* Priority: RNMI > Other interrupt. */
+ if (riscv_cpu_cfg(env)->ext_smrnmi) {
+ /* If mnstatus.NMIE == 0, all interrupts are disabled. */
+ if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ if (env->rnmip) {
+ return ctz64(env->rnmip); /* since non-zero */
+ }
+ }
+
/* Determine interrupt enable state of all privilege modes */
if (env->virt_enabled) {
mie = 1;
@@ -567,7 +641,9 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
+ uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
+
+ if (interrupt_request & mask) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
int interruptno = riscv_cpu_local_irq_pending(env);
@@ -628,6 +704,10 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
g_assert(riscv_has_ext(env, RVH));
+ if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
+ mstatus_mask |= MSTATUS_SDT;
+ }
+
if (current_virt) {
/* Current V=1 and we are about to change to V=0 */
env->vsstatus = env->mstatus & mstatus_mask;
@@ -699,6 +779,30 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
env->geilen = geilen;
}
+void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
+{
+ CPURISCVState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ bool release_lock = false;
+
+ if (!bql_locked()) {
+ release_lock = true;
+ bql_lock();
+ }
+
+ if (level) {
+ env->rnmip |= 1 << irq;
+ cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
+ } else {
+ env->rnmip &= ~(1 << irq);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
+ }
+
+ if (release_lock) {
+ bql_unlock();
+ }
+}
+
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
{
CPURISCVState *env = &cpu->env;
@@ -786,7 +890,6 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
env->priv = newpriv;
env->xl = cpu_recompute_xl(env);
- riscv_cpu_update_mask(env);
/*
* Clear the load reservation - otherwise a reservation placed in one
@@ -1835,6 +1938,24 @@ static target_ulong promote_load_fault(target_ulong orig_cause)
/* if no promotion, return original cause */
return orig_cause;
}
+
+static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
+{
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
+ env->mncause = cause;
+ env->mnepc = env->pc;
+ env->pc = env->rnmi_irqvec;
+
+ if (cpu_get_fcfien(env)) {
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
+ }
+
+ /* Trapping to M mode, virt is disabled */
+ riscv_cpu_set_mode(env, PRV_M, false);
+}
+
/*
* Handle Traps
*
@@ -1848,7 +1969,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
bool virt = env->virt_enabled;
bool write_gva = false;
bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
+ bool vsmode_exc;
uint64_t s;
+ int mode;
/*
* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
@@ -1861,12 +1984,21 @@ void riscv_cpu_do_interrupt(CPUState *cs)
!(env->mip & (1ULL << cause));
bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
!(env->mip & (1ULL << cause));
+ bool smode_double_trap = false;
+ uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
target_ulong tval = 0;
target_ulong tinst = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
int sxlen = 0;
- int mxlen = 0;
+ int mxlen = 16 << riscv_cpu_mxl(env);
+ bool nnmi_excep = false;
+
+ if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
+ riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
+ env->virt_enabled);
+ return;
+ }
if (!async) {
/* set tval to badaddr for traps with address information */
@@ -1960,8 +2092,34 @@ void riscv_cpu_do_interrupt(CPUState *cs)
__func__, env->mhartid, async, cause, env->pc, tval,
riscv_cpu_get_trap_name(cause, async));
- if (env->priv <= PRV_S && cause < 64 &&
- (((deleg >> cause) & 1) || s_injected || vs_injected)) {
+ mode = env->priv <= PRV_S && cause < 64 &&
+ (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
+
+ vsmode_exc = env->virt_enabled && (((hdeleg >> cause) & 1) || vs_injected);
+ /*
+ * Check double trap condition only if already in S-mode and targeting
+ * S-mode
+ */
+ if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
+ bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
+ bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
+ /* In VS or HS */
+ if (riscv_has_ext(env, RVH)) {
+ if (vsmode_exc) {
+ /* VS -> VS, use henvcfg instead of menvcfg*/
+ dte = (env->henvcfg & HENVCFG_DTE) != 0;
+ } else if (env->virt_enabled) {
+ /* VS -> HS, use mstatus_hs */
+ sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
+ }
+ }
+ smode_double_trap = dte && sdt;
+ if (smode_double_trap) {
+ mode = PRV_M;
+ }
+ }
+
+ if (mode == PRV_S) {
/* handle the trap in S-mode */
/* save elp status */
if (cpu_get_fcfien(env)) {
@@ -1969,10 +2127,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
}
if (riscv_has_ext(env, RVH)) {
- uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
-
- if (env->virt_enabled &&
- (((hdeleg >> cause) & 1) || vs_injected)) {
+ if (vsmode_exc) {
/* Trap to VS mode */
/*
* See if we need to adjust cause. Yes if its VS mode interrupt
@@ -2005,6 +2160,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
s = set_field(s, MSTATUS_SPP, env->priv);
s = set_field(s, MSTATUS_SIE, 0);
+ if (riscv_env_smode_dbltrp_enabled(env, virt)) {
+ s = set_field(s, MSTATUS_SDT, 1);
+ }
env->mstatus = s;
sxlen = 16 << riscv_cpu_sxl(env);
env->scause = cause | ((target_ulong)async << (sxlen - 1));
@@ -2016,10 +2174,23 @@ void riscv_cpu_do_interrupt(CPUState *cs)
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_S, virt);
} else {
+ /*
+ * If the hart encounters an exception while executing in M-mode
+ * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
+ */
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
+ !async;
+
/* handle the trap in M-mode */
/* save elp status */
if (cpu_get_fcfien(env)) {
- env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
+ if (nnmi_excep) {
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
+ env->elp);
+ } else {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
+ }
}
if (riscv_has_ext(env, RVH)) {
@@ -2037,20 +2208,54 @@ void riscv_cpu_do_interrupt(CPUState *cs)
/* Trapping to M mode, virt is disabled */
virt = false;
}
+ /*
+ * If the hart encounters an exception while executing in M-mode,
+ * with the mnstatus.NMIE bit clear, the program counter is set to
+ * the RNMI exception trap handler address.
+ */
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
+ !async;
s = env->mstatus;
s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
s = set_field(s, MSTATUS_MPP, env->priv);
s = set_field(s, MSTATUS_MIE, 0);
+ if (cpu->cfg.ext_smdbltrp) {
+ if (env->mstatus & MSTATUS_MDT) {
+ assert(env->priv == PRV_M);
+ if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
+ cpu_abort(CPU(cpu), "M-mode double trap\n");
+ } else {
+ riscv_do_nmi(env, cause, false);
+ return;
+ }
+ }
+
+ s = set_field(s, MSTATUS_MDT, 1);
+ }
env->mstatus = s;
- mxlen = 16 << riscv_cpu_mxl(env);
env->mcause = cause | ((target_ulong)async << (mxlen - 1));
+ if (smode_double_trap) {
+ env->mtval2 = env->mcause;
+ env->mcause = RISCV_EXCP_DOUBLE_TRAP;
+ } else {
+ env->mtval2 = mtval2;
+ }
env->mepc = env->pc;
env->mtval = tval;
- env->mtval2 = mtval2;
env->mtinst = tinst;
- env->pc = (env->mtvec >> 2 << 2) +
- ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
+
+ /*
+ * For RNMI exception, program counter is set to the RNMI exception
+ * trap handler address.
+ */
+ if (nnmi_excep) {
+ env->pc = env->rnmi_excpvec;
+ } else {
+ env->pc = (env->mtvec >> 2 << 2) +
+ ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
+ }
riscv_cpu_set_mode(env, PRV_M, virt);
}
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 381cda8..afb7544 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -29,6 +29,7 @@
#include "system/cpu-timers.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
+#include <stdbool.h>
/* CSR function table public API */
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
@@ -305,6 +306,24 @@ static RISCVException aia_any32(CPURISCVState *env, int csrno)
return any32(env, csrno);
}
+static RISCVException csrind_any(CPURISCVState *env, int csrno)
+{
+ if (!riscv_cpu_cfg(env)->ext_smcsrind) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno)
+{
+ if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return any(env, csrno);
+}
+
static RISCVException smode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVS)) {
@@ -325,22 +344,93 @@ static RISCVException smode32(CPURISCVState *env, int csrno)
static RISCVException aia_smode(CPURISCVState *env, int csrno)
{
+ int ret;
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ if (csrno == CSR_STOPEI) {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
+ } else {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ }
+
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
return smode(env, csrno);
}
static RISCVException aia_smode32(CPURISCVState *env, int csrno)
{
+ int ret;
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
return smode32(env, csrno);
}
+static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+
+ if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
+ return smode(env, csrno);
+}
+
+static bool csrind_extensions_present(CPURISCVState *env)
+{
+ return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind;
+}
+
+static bool aia_extensions_present(CPURISCVState *env)
+{
+ return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia;
+}
+
+static bool csrind_or_aia_extensions_present(CPURISCVState *env)
+{
+ return csrind_extensions_present(env) || aia_extensions_present(env);
+}
+
+static RISCVException csrind_smode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return smode(env, csrno);
+}
+
+static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_or_aia_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return smode(env, csrno);
+}
+
static RISCVException hmode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVH)) {
@@ -360,6 +450,24 @@ static RISCVException hmode32(CPURISCVState *env, int csrno)
}
+static RISCVException csrind_hmode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return hmode(env, csrno);
+}
+
+static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno)
+{
+ if (!csrind_or_aia_extensions_present(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return hmode(env, csrno);
+}
+
static RISCVException umode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVU)) {
@@ -531,27 +639,40 @@ static RISCVException hgatp(CPURISCVState *env, int csrno)
return hmode(env, csrno);
}
-/* Checks if PointerMasking registers could be accessed */
-static RISCVException pointer_masking(CPURISCVState *env, int csrno)
-{
- /* Check if j-ext is present */
- if (riscv_has_ext(env, RVJ)) {
- return RISCV_EXCP_NONE;
- }
- return RISCV_EXCP_ILLEGAL_INST;
-}
-
static RISCVException aia_hmode(CPURISCVState *env, int csrno)
{
+ int ret;
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
- return hmode(env, csrno);
+ if (csrno == CSR_VSTOPEI) {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
+ } else {
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ }
+
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ return hmode(env, csrno);
}
static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
{
+ int ret;
+
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -559,6 +680,15 @@ static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
return hmode32(env, csrno);
}
+static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ return RISCV_EXCP_NONE;
+ }
+
+ return hmode(env, csrno);
+}
+
static RISCVException pmp(CPURISCVState *env, int csrno)
{
if (riscv_cpu_cfg(env)->pmp) {
@@ -585,6 +715,9 @@ static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
if (riscv_cpu_cfg(env)->ext_zkr) {
return RISCV_EXCP_NONE;
}
+ if (riscv_cpu_cfg(env)->ext_smmpm) {
+ return RISCV_EXCP_NONE;
+ }
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -597,6 +730,17 @@ static RISCVException debug(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
+
+static RISCVException rnmi(CPURISCVState *env, int csrno)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+
+ if (cpu->cfg.ext_smrnmi) {
+ return RISCV_EXCP_NONE;
+ }
+
+ return RISCV_EXCP_ILLEGAL_INST;
+}
#endif
static RISCVException seed(CPURISCVState *env, int csrno)
@@ -1104,10 +1248,9 @@ done:
return result;
}
-static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val,
+ uint32_t ctr_idx)
{
- int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = val;
@@ -1132,10 +1275,9 @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
+ uint32_t ctr_idx)
{
- int ctr_idx = csrno - CSR_MCYCLEH;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = counter->mhpmcounter_val;
uint64_t mhpmctrh_val = val;
@@ -1157,6 +1299,20 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
+{
+ int ctr_idx = csrno - CSR_MCYCLE;
+
+ return riscv_pmu_write_ctr(env, val, ctr_idx);
+}
+
+static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
+{
+ int ctr_idx = csrno - CSR_MCYCLEH;
+
+ return riscv_pmu_write_ctrh(env, val, ctr_idx);
+}
+
RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
bool upper_half, uint32_t ctr_idx)
{
@@ -1222,6 +1378,167 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
return riscv_pmu_read_ctr(env, val, true, ctr_index);
}
+static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ riscv_pmu_read_ctr(env, val, false, ctr_idx);
+ } else if (wr_mask) {
+ riscv_pmu_write_ctr(env, new_val, ctr_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ riscv_pmu_read_ctr(env, val, true, ctr_idx);
+ } else if (wr_mask) {
+ riscv_pmu_write_ctrh(env, new_val, ctr_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ uint64_t mhpmevt_val = new_val;
+
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ *val = env->mhpmevent_val[evt_index];
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
+ *val &= ~MHPMEVENT_BIT_MINH;
+ }
+ } else if (wr_mask) {
+ wr_mask &= ~MHPMEVENT_BIT_MINH;
+ mhpmevt_val = (new_val & wr_mask) |
+ (env->mhpmevent_val[evt_index] & ~wr_mask);
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ mhpmevt_val = mhpmevt_val |
+ ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
+ }
+ env->mhpmevent_val[evt_index] = mhpmevt_val;
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ uint64_t mhpmevth_val;
+ uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
+
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ *val = env->mhpmeventh_val[evt_index];
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
+ *val &= ~MHPMEVENTH_BIT_MINH;
+ }
+ } else if (wr_mask) {
+ wr_mask &= ~MHPMEVENTH_BIT_MINH;
+ env->mhpmeventh_val[evt_index] =
+ (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask);
+ mhpmevth_val = env->mhpmeventh_val[evt_index];
+ mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ switch (cfg_index) {
+ case 0: /* CYCLECFG */
+ if (wr_mask) {
+ wr_mask &= ~MCYCLECFG_BIT_MINH;
+ env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask);
+ } else {
+ *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH;
+ }
+ break;
+ case 2: /* INSTRETCFG */
+ if (wr_mask) {
+ wr_mask &= ~MINSTRETCFG_BIT_MINH;
+ env->minstretcfg = (new_val & wr_mask) |
+ (env->minstretcfg & ~wr_mask);
+ } else {
+ *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ switch (cfg_index) {
+ case 0: /* CYCLECFGH */
+ if (wr_mask) {
+ wr_mask &= ~MCYCLECFGH_BIT_MINH;
+ env->mcyclecfgh = (new_val & wr_mask) |
+ (env->mcyclecfgh & ~wr_mask);
+ } else {
+ *val = env->mcyclecfgh;
+ }
+ break;
+ case 2: /* INSTRETCFGH */
+ if (wr_mask) {
+ wr_mask &= ~MINSTRETCFGH_BIT_MINH;
+ env->minstretcfgh = (new_val & wr_mask) |
+ (env->minstretcfgh & ~wr_mask);
+ } else {
+ *val = env->minstretcfgh;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
static RISCVException read_scountovf(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -1231,6 +1548,14 @@ static RISCVException read_scountovf(CPURISCVState *env, int csrno,
target_ulong *mhpm_evt_val;
uint64_t of_bit_mask;
+ /* Virtualize scountovf for counter delegation */
+ if (riscv_cpu_cfg(env)->ext_sscofpmf &&
+ riscv_cpu_cfg(env)->ext_ssccfg &&
+ get_field(env->menvcfg, MENVCFG_CDE) &&
+ env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpm_evt_val = env->mhpmeventh_val;
of_bit_mask = MHPMEVENTH_BIT_OF;
@@ -1622,6 +1947,20 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
mask |= MSTATUS_VS;
}
+ if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) {
+ mask |= MSTATUS_SDT;
+ if ((val & MSTATUS_SDT) != 0) {
+ val &= ~MSTATUS_SIE;
+ }
+ }
+
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ mask |= MSTATUS_MDT;
+ if ((val & MSTATUS_MDT) != 0) {
+ val &= ~MSTATUS_MIE;
+ }
+ }
+
if (xl != MXL_RV32 || env->debugger) {
if (riscv_has_ext(env, RVH)) {
mask |= MSTATUS_MPV | MSTATUS_GVA;
@@ -1648,7 +1987,6 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
env->xl = cpu_recompute_xl(env);
}
- riscv_cpu_update_mask(env);
return RISCV_EXCP_NONE;
}
@@ -1665,6 +2003,12 @@ static RISCVException write_mstatush(CPURISCVState *env, int csrno,
uint64_t valh = (uint64_t)val << 32;
uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ mask |= MSTATUS_MDT;
+ if ((valh & MSTATUS_MDT) != 0) {
+ mask |= MSTATUS_MIE;
+ }
+ }
env->mstatus = (env->mstatus & ~mask) | (valh & mask);
return RISCV_EXCP_NONE;
@@ -1966,14 +2310,41 @@ static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
};
}
+static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno)
+{
+ if (!env->virt_enabled) {
+ return csrno;
+ }
+
+ switch (csrno) {
+ case CSR_SISELECT:
+ return CSR_VSISELECT;
+ case CSR_SIREG:
+ case CSR_SIREG2:
+ case CSR_SIREG3:
+ case CSR_SIREG4:
+ case CSR_SIREG5:
+ case CSR_SIREG6:
+ return CSR_VSIREG + (csrno - CSR_SIREG);
+ default:
+ return csrno;
+ };
+}
+
static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
target_ulong *val, target_ulong new_val,
target_ulong wr_mask)
{
target_ulong *iselect;
+ int ret;
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
/* Translate CSR number for VS-mode */
- csrno = aia_xlate_vs_csrno(env, csrno);
+ csrno = csrind_xlate_vs_csrno(env, csrno);
/* Find the iselect CSR based on CSR number */
switch (csrno) {
@@ -1994,7 +2365,12 @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
*val = *iselect;
}
- wr_mask &= ISELECT_MASK;
+ if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) {
+ wr_mask &= ISELECT_MASK_SXCSRIND;
+ } else {
+ wr_mask &= ISELECT_MASK_AIA;
+ }
+
if (wr_mask) {
*iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
}
@@ -2002,6 +2378,17 @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static bool xiselect_aia_range(target_ulong isel)
+{
+ return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) ||
+ (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST);
+}
+
+static bool xiselect_cd_range(target_ulong isel)
+{
+ return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
+}
+
static int rmw_iprio(target_ulong xlen,
target_ulong iselect, uint8_t *iprio,
target_ulong *val, target_ulong new_val,
@@ -2047,45 +2434,44 @@ static int rmw_iprio(target_ulong xlen,
return 0;
}
-static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
- target_ulong *val, target_ulong new_val,
- target_ulong wr_mask)
+static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
{
- bool virt, isel_reserved;
- uint8_t *iprio;
+ bool virt = false, isel_reserved = false;
int ret = -EINVAL;
- target_ulong priv, isel, vgein;
-
- /* Translate CSR number for VS-mode */
- csrno = aia_xlate_vs_csrno(env, csrno);
+ uint8_t *iprio;
+ target_ulong priv, vgein;
- /* Decode register details from CSR number */
- virt = false;
- isel_reserved = false;
+ /* VS-mode CSR number passed in has already been translated */
switch (csrno) {
case CSR_MIREG:
+ if (!riscv_cpu_cfg(env)->ext_smaia) {
+ goto done;
+ }
iprio = env->miprio;
- isel = env->miselect;
priv = PRV_M;
break;
case CSR_SIREG:
- if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
+ if (!riscv_cpu_cfg(env)->ext_ssaia ||
+ (env->priv == PRV_S && env->mvien & MIP_SEIP &&
env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
- env->siselect <= ISELECT_IMSIC_EIE63) {
+ env->siselect <= ISELECT_IMSIC_EIE63)) {
goto done;
}
iprio = env->siprio;
- isel = env->siselect;
priv = PRV_S;
break;
case CSR_VSIREG:
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
+ goto done;
+ }
iprio = env->hviprio;
- isel = env->vsiselect;
priv = PRV_S;
virt = true;
break;
default:
- goto done;
+ goto done;
};
/* Find the selected guest interrupt file */
@@ -2116,10 +2502,212 @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
}
done:
+ /*
+ * If AIA is not enabled, illegal instruction exception is always
+ * returned regardless of whether we are in VS-mode or not
+ */
if (ret) {
return (env->virt_enabled && virt && !isel_reserved) ?
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
}
+
+ return RISCV_EXCP_NONE;
+}
+
+static int rmw_xireg_cd(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ int ret = -EINVAL;
+ int ctr_index = isel - ISELECT_CD_FIRST;
+ int isel_hpm_start = ISELECT_CD_FIRST + 3;
+
+ if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) {
+ ret = RISCV_EXCP_ILLEGAL_INST;
+ goto done;
+ }
+
+ /* Invalid siselect value for reserved */
+ if (ctr_index == 1) {
+ goto done;
+ }
+
+ /* sireg4 and sireg5 provides access RV32 only CSRs */
+ if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) &&
+ (riscv_cpu_mxl(env) != MXL_RV32)) {
+ ret = RISCV_EXCP_ILLEGAL_INST;
+ goto done;
+ }
+
+ /* Check Sscofpmf dependancy */
+ if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 &&
+ (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) {
+ goto done;
+ }
+
+ /* Check smcntrpmf dependancy */
+ if (!riscv_cpu_cfg(env)->ext_smcntrpmf &&
+ (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) &&
+ (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) {
+ goto done;
+ }
+
+ if (!get_field(env->mcounteren, BIT(ctr_index)) ||
+ !get_field(env->menvcfg, MENVCFG_CDE)) {
+ goto done;
+ }
+
+ switch (csrno) {
+ case CSR_SIREG:
+ ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask);
+ break;
+ case CSR_SIREG4:
+ ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask);
+ break;
+ case CSR_SIREG2:
+ if (ctr_index <= 2) {
+ ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask);
+ } else {
+ ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask);
+ }
+ break;
+ case CSR_SIREG5:
+ if (ctr_index <= 2) {
+ ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask);
+ } else {
+ ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask);
+ }
+ break;
+ default:
+ goto done;
+ }
+
+done:
+ return ret;
+}
+
+/*
+ * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
+ *
+ * Perform indirect access to xireg and xireg2-xireg6.
+ * This is a generic interface for all xireg CSRs. Apart from AIA, all other
+ * extension using csrind should be implemented here.
+ */
+static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
+ target_ulong isel, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ int ret = -EINVAL;
+ bool virt = csrno == CSR_VSIREG ? true : false;
+
+ if (xiselect_cd_range(isel)) {
+ ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
+ } else {
+ /*
+ * As per the specification, access to unimplented region is undefined
+ * but recommendation is to raise illegal instruction exception.
+ */
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (ret) {
+ return (env->virt_enabled && virt) ?
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ bool virt = false;
+ int ret = -EINVAL;
+ target_ulong isel;
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* Translate CSR number for VS-mode */
+ csrno = csrind_xlate_vs_csrno(env, csrno);
+
+ if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 &&
+ csrno != CSR_MIREG4 - 1) {
+ isel = env->miselect;
+ } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 &&
+ csrno != CSR_SIREG4 - 1) {
+ isel = env->siselect;
+ } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
+ csrno != CSR_VSIREG4 - 1) {
+ isel = env->vsiselect;
+ virt = true;
+ } else {
+ goto done;
+ }
+
+ return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
+
+done:
+ return (env->virt_enabled && virt) ?
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
+}
+
+static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ bool virt = false;
+ int ret = -EINVAL;
+ target_ulong isel;
+
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* Translate CSR number for VS-mode */
+ csrno = csrind_xlate_vs_csrno(env, csrno);
+
+ /* Decode register details from CSR number */
+ switch (csrno) {
+ case CSR_MIREG:
+ isel = env->miselect;
+ break;
+ case CSR_SIREG:
+ isel = env->siselect;
+ break;
+ case CSR_VSIREG:
+ isel = env->vsiselect;
+ virt = true;
+ break;
+ default:
+ goto done;
+ };
+
+ /*
+ * Use the xiselect range to determine actual op on xireg.
+ *
+ * Since we only checked the existence of AIA or Indirect Access in the
+ * predicate, we should check the existence of the exact extension when
+ * we get to a specific range and return illegal instruction exception even
+ * in VS-mode.
+ */
+ if (xiselect_aia_range(isel)) {
+ return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask);
+ } else if (riscv_cpu_cfg(env)->ext_smcsrind ||
+ riscv_cpu_cfg(env)->ext_sscsrind) {
+ return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
+ } else {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+done:
+ if (ret) {
+ return (env->virt_enabled && virt) ?
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
+ }
return RISCV_EXCP_NONE;
}
@@ -2274,6 +2862,21 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ /* S-mode can only access the bits delegated by M-mode */
+ *val = env->mcountinhibit & env->mcounteren;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ write_mcountinhibit(env, csrno, val & env->mcounteren);
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -2372,16 +2975,21 @@ static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
+ target_ulong val);
static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
target_ulong val)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
- uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
+ uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
+ MENVCFG_CBZE | MENVCFG_CDE;
if (riscv_cpu_mxl(env) == MXL_RV64) {
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
- (cfg->ext_svadu ? MENVCFG_ADUE : 0);
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
+ (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
if (env_archcpu(env)->cfg.ext_zicfilp) {
mask |= MENVCFG_LPE;
@@ -2390,8 +2998,19 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
if (env_archcpu(env)->cfg.ext_zicfiss) {
mask |= MENVCFG_SSE;
}
+
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (env_archcpu(env)->cfg.ext_smnpm &&
+ get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= MENVCFG_PMM;
+ }
+
+ if ((val & MENVCFG_DTE) == 0) {
+ env->mstatus &= ~MSTATUS_SDT;
+ }
}
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
+ write_henvcfg(env, CSR_HENVCFG, env->henvcfg);
return RISCV_EXCP_NONE;
}
@@ -2403,16 +3022,25 @@ static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
+ target_ulong val);
static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
target_ulong val)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
- (cfg->ext_svadu ? MENVCFG_ADUE : 0);
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
+ (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
uint64_t valh = (uint64_t)val << 32;
+ if ((valh & MENVCFG_DTE) == 0) {
+ env->mstatus &= ~MSTATUS_SDT;
+ }
+
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
+ write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32);
return RISCV_EXCP_NONE;
}
@@ -2436,6 +3064,12 @@ static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
{
uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
RISCVException ret;
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (env_archcpu(env)->cfg.ext_ssnpm &&
+ riscv_cpu_mxl(env) == MXL_RV64 &&
+ get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= SENVCFG_PMM;
+ }
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
if (ret != RISCV_EXCP_NONE) {
@@ -2475,9 +3109,10 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
* henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
* henvcfg.stce is read_only 0 when menvcfg.stce = 0
* henvcfg.adue is read_only 0 when menvcfg.adue = 0
+ * henvcfg.dte is read_only 0 when menvcfg.dte = 0
*/
- *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
- env->menvcfg);
+ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
+ HENVCFG_DTE) | env->menvcfg);
return RISCV_EXCP_NONE;
}
@@ -2493,7 +3128,8 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
}
if (riscv_cpu_mxl(env) == MXL_RV64) {
- mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
+ mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
+ HENVCFG_DTE);
if (env_archcpu(env)->cfg.ext_zicfilp) {
mask |= HENVCFG_LPE;
@@ -2504,9 +3140,18 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
get_field(env->menvcfg, MENVCFG_SSE)) {
mask |= HENVCFG_SSE;
}
+
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (env_archcpu(env)->cfg.ext_ssnpm &&
+ get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= HENVCFG_PMM;
+ }
}
- env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
+ env->henvcfg = val & mask;
+ if ((env->henvcfg & HENVCFG_DTE) == 0) {
+ env->vsstatus &= ~MSTATUS_SDT;
+ }
return RISCV_EXCP_NONE;
}
@@ -2521,8 +3166,8 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
return ret;
}
- *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
- env->menvcfg)) >> 32;
+ *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
+ HENVCFG_DTE) | env->menvcfg)) >> 32;
return RISCV_EXCP_NONE;
}
@@ -2530,7 +3175,7 @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
target_ulong val)
{
uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
- HENVCFG_ADUE);
+ HENVCFG_ADUE | HENVCFG_DTE);
uint64_t valh = (uint64_t)val << 32;
RISCVException ret;
@@ -2538,8 +3183,10 @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
if (ret != RISCV_EXCP_NONE) {
return ret;
}
-
- env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
+ env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask);
+ if ((env->henvcfg & HENVCFG_DTE) == 0) {
+ env->vsstatus &= ~MSTATUS_SDT;
+ }
return RISCV_EXCP_NONE;
}
@@ -2574,6 +3221,19 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_P1P13;
}
+ if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) {
+ wr_mask |= SMSTATEEN0_SVSLCT;
+ }
+
+ /*
+ * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
+ * implemented. However, that information is with MachineState and we can't
+ * figure that out in csr.c. Just enable if Smaia is available.
+ */
+ if (riscv_cpu_cfg(env)->ext_smaia) {
+ wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
+ }
+
return write_mstateen(env, csrno, wr_mask, new_val);
}
@@ -2654,6 +3314,19 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_FCSR;
}
+ if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) {
+ wr_mask |= SMSTATEEN0_SVSLCT;
+ }
+
+ /*
+ * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
+ * implemented. However, that information is with MachineState and we can't
+ * figure that out in csr.c. Just enable if Ssaia is available.
+ */
+ if (riscv_cpu_cfg(env)->ext_ssaia) {
+ wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
+ }
+
return write_hstateen(env, csrno, wr_mask, new_val);
}
@@ -2967,6 +3640,9 @@ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
if (env->xl != MXL_RV32 || env->debugger) {
mask |= SSTATUS64_UXL;
}
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mask |= SSTATUS_SDT;
+ }
if (env_archcpu(env)->cfg.ext_zicfilp) {
mask |= SSTATUS_SPELP;
@@ -2987,7 +3663,9 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno,
if (env_archcpu(env)->cfg.ext_zicfilp) {
mask |= SSTATUS_SPELP;
}
-
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mask |= SSTATUS_SDT;
+ }
/* TODO: Use SXL not MXL. */
*val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
return RISCV_EXCP_NONE;
@@ -3007,7 +3685,9 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno,
if (env_archcpu(env)->cfg.ext_zicfilp) {
mask |= SSTATUS_SPELP;
}
-
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mask |= SSTATUS_SDT;
+ }
target_ulong newval = (env->mstatus & ~mask) | (val & mask);
return write_mstatus(env, CSR_MSTATUS, newval);
}
@@ -3540,10 +4220,18 @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno,
static RISCVException write_hstatus(CPURISCVState *env, int csrno,
target_ulong val)
{
+ uint64_t mask = (target_ulong)-1;
if (!env_archcpu(env)->cfg.ext_svukte) {
- val = val & (~HSTATUS_HUKTE);
+ mask &= ~HSTATUS_HUKTE;
+ }
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (!env_archcpu(env)->cfg.ext_ssnpm ||
+ riscv_cpu_mxl(env) != MXL_RV64 ||
+ get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) {
+ mask &= ~HSTATUS_HUPMM;
}
- env->hstatus = val;
+ env->hstatus = (env->hstatus & ~mask) | (val & mask);
+
if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
qemu_log_mask(LOG_UNIMP,
"QEMU does not support mixed HSXLEN options.");
@@ -4116,6 +4804,13 @@ static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
if ((val & VSSTATUS64_UXL) == 0) {
mask &= ~VSSTATUS64_UXL;
}
+ if ((env->henvcfg & HENVCFG_DTE)) {
+ if ((val & SSTATUS_SDT) != 0) {
+ val &= ~SSTATUS_SIE;
+ }
+ } else {
+ val &= ~SSTATUS_SDT;
+ }
env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
return RISCV_EXCP_NONE;
}
@@ -4358,299 +5053,64 @@ static RISCVException write_mcontext(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-/*
- * Functions to access Pointer Masking feature registers
- * We have to check if current priv lvl could modify
- * csr in given mode
- */
-static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
-{
- int csr_priv = get_field(csrno, 0x300);
- int pm_current;
-
- if (env->debugger) {
- return false;
- }
- /*
- * If priv lvls differ that means we're accessing csr from higher priv lvl,
- * so allow the access
- */
- if (env->priv != csr_priv) {
- return false;
- }
- switch (env->priv) {
- case PRV_M:
- pm_current = get_field(env->mmte, M_PM_CURRENT);
- break;
- case PRV_S:
- pm_current = get_field(env->mmte, S_PM_CURRENT);
- break;
- case PRV_U:
- pm_current = get_field(env->mmte, U_PM_CURRENT);
- break;
- default:
- g_assert_not_reached();
- }
- /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
- return !pm_current;
-}
-
-static RISCVException read_mmte(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mmte & MMTE_MASK;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_mmte(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- uint64_t mstatus;
- target_ulong wpri_val = val & MMTE_MASK;
-
- if (val != wpri_val) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
- TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x",
- val, "vs expected 0x", wpri_val);
- }
- /* for machine mode pm.current is hardwired to 1 */
- wpri_val |= MMTE_M_PM_CURRENT;
-
- /* hardwiring pm.instruction bit to 0, since it's not supported yet */
- wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
- env->mmte = wpri_val | EXT_STATUS_DIRTY;
- riscv_cpu_update_mask(env);
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_smte(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mmte & SMTE_MASK;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_smte(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- target_ulong wpri_val = val & SMTE_MASK;
-
- if (val != wpri_val) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
- TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x",
- val, "vs expected 0x", wpri_val);
- }
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
-
- wpri_val |= (env->mmte & ~SMTE_MASK);
- write_mmte(env, csrno, wpri_val);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_umte(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->mmte & UMTE_MASK;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_umte(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- target_ulong wpri_val = val & UMTE_MASK;
-
- if (val != wpri_val) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
- TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x",
- val, "vs expected 0x", wpri_val);
- }
-
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
-
- wpri_val |= (env->mmte & ~UMTE_MASK);
- write_mmte(env, csrno, wpri_val);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
- target_ulong *val)
+static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
- *val = env->mpmmask;
+ *val = env->mnscratch;
return RISCV_EXCP_NONE;
}
-static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
- target_ulong val)
+static int write_mnscratch(CPURISCVState *env, int csrno, target_ulong val)
{
- uint64_t mstatus;
-
- env->mpmmask = val;
- if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
- env->cur_pmmask = val;
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ env->mnscratch = val;
return RISCV_EXCP_NONE;
}
-static RISCVException read_spmmask(CPURISCVState *env, int csrno,
- target_ulong *val)
+static int read_mnepc(CPURISCVState *env, int csrno, target_ulong *val)
{
- *val = env->spmmask;
+ *val = env->mnepc;
return RISCV_EXCP_NONE;
}
-static RISCVException write_spmmask(CPURISCVState *env, int csrno,
- target_ulong val)
+static int write_mnepc(CPURISCVState *env, int csrno, target_ulong val)
{
- uint64_t mstatus;
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
- env->spmmask = val;
- if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
- env->cur_pmmask = val;
- if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
- env->cur_pmmask &= UINT32_MAX;
- }
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ env->mnepc = val;
return RISCV_EXCP_NONE;
}
-static RISCVException read_upmmask(CPURISCVState *env, int csrno,
- target_ulong *val)
+static int read_mncause(CPURISCVState *env, int csrno, target_ulong *val)
{
- *val = env->upmmask;
+ *val = env->mncause;
return RISCV_EXCP_NONE;
}
-static RISCVException write_upmmask(CPURISCVState *env, int csrno,
- target_ulong val)
+static int write_mncause(CPURISCVState *env, int csrno, target_ulong val)
{
- uint64_t mstatus;
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
- env->upmmask = val;
- if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
- env->cur_pmmask = val;
- if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
- env->cur_pmmask &= UINT32_MAX;
- }
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ env->mncause = val;
return RISCV_EXCP_NONE;
}
-static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
- target_ulong *val)
+static int read_mnstatus(CPURISCVState *env, int csrno, target_ulong *val)
{
- *val = env->mpmbase;
+ *val = env->mnstatus;
return RISCV_EXCP_NONE;
}
-static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
- target_ulong val)
+static int write_mnstatus(CPURISCVState *env, int csrno, target_ulong val)
{
- uint64_t mstatus;
+ target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
- env->mpmbase = val;
- if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
- env->cur_pmbase = val;
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_spmbase(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->spmbase;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_spmbase(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- uint64_t mstatus;
-
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
- }
- env->spmbase = val;
- if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
- env->cur_pmbase = val;
- if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
- env->cur_pmbase &= UINT32_MAX;
+ if (riscv_has_ext(env, RVH)) {
+ /* Flush tlb on mnstatus fields that affect VM. */
+ if ((val ^ env->mnstatus) & MNSTATUS_MNPV) {
+ tlb_flush(env_cpu(env));
}
- }
- env->mmte |= EXT_STATUS_DIRTY;
-
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException read_upmbase(CPURISCVState *env, int csrno,
- target_ulong *val)
-{
- *val = env->upmbase;
- return RISCV_EXCP_NONE;
-}
-
-static RISCVException write_upmbase(CPURISCVState *env, int csrno,
- target_ulong val)
-{
- uint64_t mstatus;
- /* if pm.current==0 we can't modify current PM CSRs */
- if (check_pm_current_disabled(env, csrno)) {
- return RISCV_EXCP_NONE;
+ mask |= MNSTATUS_MNPV;
}
- env->upmbase = val;
- if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
- env->cur_pmbase = val;
- if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
- env->cur_pmbase &= UINT32_MAX;
- }
- }
- env->mmte |= EXT_STATUS_DIRTY;
- /* Set XS and SD bits, since PM CSRs are dirty */
- mstatus = env->mstatus | MSTATUS_XS;
- write_mstatus(env, csrno, mstatus);
+ /* mnstatus.mnie can only be cleared by hardware. */
+ env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask);
return RISCV_EXCP_NONE;
}
@@ -5072,8 +5532,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
- [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
- [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
+ [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL,
+ rmw_xiselect },
+ [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL,
+ rmw_xireg },
+
+ /* Machine Indirect Register Alias */
+ [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
/* Machine-Level Interrupts (AIA) */
[CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
@@ -5161,6 +5635,21 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
write_sstateen_1_3,
.min_priv_ver = PRIV_VERSION_1_12_0 },
+ /* RNMI */
+ [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
+ /* Supervisor Counter Delegation */
+ [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred,
+ read_scountinhibit, write_scountinhibit,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
/* Supervisor Trap Setup */
[CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
NULL, read_sstatus_i128 },
@@ -5191,8 +5680,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_SATP] = { "satp", satp, read_satp, write_satp },
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
- [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
- [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
+ [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL,
+ rmw_xiselect },
+ [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL,
+ rmw_xireg },
+
+ /* Supervisor Indirect Register Alias */
+ [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
/* Supervisor-Level Interrupts (AIA) */
[CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
@@ -5255,7 +5758,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
+ [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2,
.min_priv_ver = PRIV_VERSION_1_12_0 },
[CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
.min_priv_ver = PRIV_VERSION_1_12_0 },
@@ -5271,9 +5774,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
/*
* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
*/
- [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
- rmw_xiselect },
- [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
+ [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL,
+ rmw_xiselect },
+ [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL,
+ rmw_xireg },
+
+ /* Virtual Supervisor Indirect Alias */
+ [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
/* VS-Level Interrupts (H-extension with AIA) */
[CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
@@ -5323,25 +5839,6 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
[CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
- /* User Pointer Masking */
- [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
- [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
- write_upmmask },
- [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
- write_upmbase },
- /* Machine Pointer Masking */
- [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
- [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
- write_mpmmask },
- [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
- write_mpmbase },
- /* Supervisor Pointer Masking */
- [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
- [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
- write_spmmask },
- [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
- write_spmbase },
-
/* Performance Counters */
[CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
[CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
index c07df97..18e88f4 100644
--- a/target/riscv/gdbstub.c
+++ b/target/riscv/gdbstub.c
@@ -213,7 +213,10 @@ static int riscv_gdb_get_virtual(CPUState *cs, GByteArray *buf, int n)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- return gdb_get_regl(buf, env->priv);
+ /* Per RiscV debug spec v1.0.0 rc4 */
+ target_ulong vbit = (env->virt_enabled) ? BIT(2) : 0;
+
+ return gdb_get_regl(buf, env->priv | vbit);
#endif
}
return 0;
@@ -226,10 +229,22 @@ static int riscv_gdb_set_virtual(CPUState *cs, uint8_t *mem_buf, int n)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- env->priv = ldtul_p(mem_buf) & 0x3;
- if (env->priv == PRV_RESERVED) {
- env->priv = PRV_S;
+ target_ulong new_priv = ldtul_p(mem_buf) & 0x3;
+ bool new_virt = 0;
+
+ if (new_priv == PRV_RESERVED) {
+ new_priv = PRV_S;
+ }
+
+ if (new_priv != PRV_M) {
+ new_virt = (ldtul_p(mem_buf) & BIT(2)) >> 2;
}
+
+ if (riscv_has_ext(env, RVH) && new_virt != env->virt_enabled) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_mode(env, new_priv, new_virt);
#endif
return sizeof(target_ulong);
}
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 451261c..16ea240 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -131,6 +131,7 @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_1(sret, tl, env)
DEF_HELPER_1(mret, tl, env)
+DEF_HELPER_1(mnret, tl, env)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_1(wrs_nto, void, env)
DEF_HELPER_1(tlb_flush, void, env)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index e9139ec..942c434 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -121,6 +121,9 @@ wfi 0001000 00101 00000 000 00000 1110011
sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
+# *** NMI ***
+mnret 0111000 00010 00000 000 00000 1110011
+
# *** RV32I Base Instruction Set ***
lui .................... ..... 0110111 @u
{
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
index ecd3b8b..73f940d 100644
--- a/target/riscv/insn_trans/trans_privileged.c.inc
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
@@ -18,6 +18,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define REQUIRE_SMRNMI(ctx) do { \
+ if (!ctx->cfg_ptr->ext_smrnmi) { \
+ return false; \
+ } \
+} while (0)
+
static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
{
/* always generates U-level ECALL, fixed in do_interrupt handler */
@@ -106,6 +112,20 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
#endif
}
+static bool trans_mnret(DisasContext *ctx, arg_mnret *a)
+{
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_SMRNMI(ctx);
+ decode_save_opc(ctx, 0);
+ gen_helper_mnret(cpu_pc, tcg_env);
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+#else
+ return false;
+#endif
+}
+
static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
{
#ifndef CONFIG_USER_ONLY
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
index 76934ea..6729193 100644
--- a/target/riscv/internals.h
+++ b/target/riscv/internals.h
@@ -145,4 +145,58 @@ static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
/* Our implementation of CPUClass::has_work */
bool riscv_cpu_has_work(CPUState *cs);
+/* Zjpm addr masking routine */
+static inline target_ulong adjust_addr_body(CPURISCVState *env,
+ target_ulong addr,
+ bool is_virt_addr)
+{
+ RISCVPmPmm pmm = PMM_FIELD_DISABLED;
+ uint32_t pmlen = 0;
+ bool signext = false;
+
+ /* do nothing for rv32 mode */
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ return addr;
+ }
+
+ /* get pmm field depending on whether addr is */
+ if (is_virt_addr) {
+ pmm = riscv_pm_get_virt_pmm(env);
+ } else {
+ pmm = riscv_pm_get_pmm(env);
+ }
+
+ /* if pointer masking is disabled, return original addr */
+ if (pmm == PMM_FIELD_DISABLED) {
+ return addr;
+ }
+
+ if (!is_virt_addr) {
+ signext = riscv_cpu_virt_mem_enabled(env);
+ }
+ addr = addr << pmlen;
+ pmlen = riscv_pm_get_pmlen(pmm);
+
+ /* sign/zero extend masked address by N-1 bit */
+ if (signext) {
+ addr = (target_long)addr >> pmlen;
+ } else {
+ addr = addr >> pmlen;
+ }
+
+ return addr;
+}
+
+static inline target_ulong adjust_addr(CPURISCVState *env,
+ target_ulong addr)
+{
+ return adjust_addr_body(env, addr, false);
+}
+
+static inline target_ulong adjust_addr_virt(CPURISCVState *env,
+ target_ulong addr)
+{
+ return adjust_addr_body(env, addr, true);
+}
+
#endif
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index 11278ea..23ce779 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -758,11 +758,11 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
env->kvm_timer_dirty = false;
}
-uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs)
+uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu)
{
uint64_t reg;
- KVM_RISCV_GET_TIMER(cs, frequency, reg);
+ KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg);
return reg;
}
diff --git a/target/riscv/kvm/kvm_riscv.h b/target/riscv/kvm/kvm_riscv.h
index 5851898..b2bcd10 100644
--- a/target/riscv/kvm/kvm_riscv.h
+++ b/target/riscv/kvm/kvm_riscv.h
@@ -19,6 +19,8 @@
#ifndef QEMU_KVM_RISCV_H
#define QEMU_KVM_RISCV_H
+#include "target/riscv/cpu-qom.h"
+
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
@@ -28,6 +30,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
void riscv_kvm_aplic_request(void *opaque, int irq, int level);
int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state);
void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
-uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs);
+uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu);
#endif
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index b2e1f25..d844524 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -152,25 +152,15 @@ static const VMStateDescription vmstate_vector = {
static bool pointermasking_needed(void *opaque)
{
- RISCVCPU *cpu = opaque;
- CPURISCVState *env = &cpu->env;
-
- return riscv_has_ext(env, RVJ);
+ return false;
}
static const VMStateDescription vmstate_pointermasking = {
.name = "cpu/pointer_masking",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.needed = pointermasking_needed,
.fields = (const VMStateField[]) {
- VMSTATE_UINTTL(env.mmte, RISCVCPU),
- VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
- VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
- VMSTATE_UINTTL(env.spmmask, RISCVCPU),
- VMSTATE_UINTTL(env.spmbase, RISCVCPU),
- VMSTATE_UINTTL(env.upmmask, RISCVCPU),
- VMSTATE_UINTTL(env.upmbase, RISCVCPU),
VMSTATE_END_OF_LIST()
}
@@ -266,7 +256,6 @@ static int riscv_cpu_post_load(void *opaque, int version_id)
CPURISCVState *env = &cpu->env;
env->xl = cpu_recompute_xl(env);
- riscv_cpu_update_mask(env);
return 0;
}
@@ -434,6 +423,7 @@ const VMStateDescription vmstate_riscv_cpu = {
VMSTATE_UINTTL(env.siselect, RISCVCPU),
VMSTATE_UINT32(env.scounteren, RISCVCPU),
VMSTATE_UINT32(env.mcounteren, RISCVCPU),
+ VMSTATE_UINT32(env.scountinhibit, RISCVCPU),
VMSTATE_UINT32(env.mcountinhibit, RISCVCPU),
VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
vmstate_pmu_ctr_state, PMUCTRState),
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index eddedac..ce1256f 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -24,12 +24,19 @@
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
+#include "trace.h"
/* Exceptions processing helpers */
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
- uint32_t exception, uintptr_t pc)
+ RISCVException exception,
+ uintptr_t pc)
{
CPUState *cs = env_cpu(env);
+
+ trace_riscv_exception(exception,
+ riscv_cpu_get_trap_name(exception, false),
+ env->pc);
+
cs->exception_index = exception;
cpu_loop_exit_restore(cs, pc);
}
@@ -287,6 +294,21 @@ target_ulong helper_sret(CPURISCVState *env)
get_field(mstatus, MSTATUS_SPIE));
mstatus = set_field(mstatus, MSTATUS_SPIE, 1);
mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
+
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ if (riscv_has_ext(env, RVH)) {
+ target_ulong prev_vu = get_field(env->hstatus, HSTATUS_SPV) &&
+ prev_priv == PRV_U;
+ /* Returning to VU from HS, vsstatus.sdt = 0 */
+ if (!env->virt_enabled && prev_vu) {
+ env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
+ }
+ }
+ mstatus = set_field(mstatus, MSTATUS_SDT, 0);
+ }
+ if (riscv_cpu_cfg(env)->ext_smdbltrp && env->priv >= PRV_M) {
+ mstatus = set_field(mstatus, MSTATUS_MDT, 0);
+ }
if (env->priv_ver >= PRIV_VERSION_1_12_0) {
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
@@ -297,7 +319,6 @@ target_ulong helper_sret(CPURISCVState *env)
target_ulong hstatus = env->hstatus;
prev_virt = get_field(hstatus, HSTATUS_SPV);
-
hstatus = set_field(hstatus, HSTATUS_SPV, 0);
env->hstatus = hstatus;
@@ -321,24 +342,46 @@ target_ulong helper_sret(CPURISCVState *env)
return retpc;
}
-target_ulong helper_mret(CPURISCVState *env)
+static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
+ target_ulong prev_priv)
{
if (!(env->priv >= PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
- target_ulong retpc = env->mepc;
if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
- uint64_t mstatus = env->mstatus;
- target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
-
if (riscv_cpu_cfg(env)->pmp &&
!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
}
+}
+static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus,
+ target_ulong prev_priv,
+ target_ulong prev_virt)
+{
+ /* If returning to U, VS or VU, sstatus.sdt = 0 */
+ if (prev_priv == PRV_U || (prev_virt &&
+ (prev_priv == PRV_S || prev_priv == PRV_U))) {
+ mstatus = set_field(mstatus, MSTATUS_SDT, 0);
+ /* If returning to VU, vsstatus.sdt = 0 */
+ if (prev_virt && prev_priv == PRV_U) {
+ env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
+ }
+ }
+
+ return mstatus;
+}
+
+target_ulong helper_mret(CPURISCVState *env)
+{
+ target_ulong retpc = env->mepc;
+ uint64_t mstatus = env->mstatus;
+ target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
+
+ check_ret_from_m_mode(env, retpc, prev_priv);
target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) &&
(prev_priv != PRV_M);
@@ -348,6 +391,12 @@ target_ulong helper_mret(CPURISCVState *env)
mstatus = set_field(mstatus, MSTATUS_MPP,
riscv_has_ext(env, RVU) ? PRV_U : PRV_M);
mstatus = set_field(mstatus, MSTATUS_MPV, 0);
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ mstatus = ssdbltrp_mxret(env, mstatus, prev_priv, prev_virt);
+ }
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ mstatus = set_field(mstatus, MSTATUS_MDT, 0);
+ }
if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) {
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
@@ -370,6 +419,53 @@ target_ulong helper_mret(CPURISCVState *env)
return retpc;
}
+target_ulong helper_mnret(CPURISCVState *env)
+{
+ target_ulong retpc = env->mnepc;
+ target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP);
+ target_ulong prev_virt;
+
+ check_ret_from_m_mode(env, retpc, prev_priv);
+
+ prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) &&
+ (prev_priv != PRV_M);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, true);
+
+ /*
+ * If MNRET changes the privilege mode to a mode
+ * less privileged than M, it also sets mstatus.MPRV to 0.
+ */
+ if (prev_priv < PRV_M) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPRV, false);
+ }
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
+ env->mstatus = ssdbltrp_mxret(env, env->mstatus, prev_priv, prev_virt);
+ }
+
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
+ if (prev_priv < PRV_M) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 0);
+ }
+ }
+
+ if (riscv_has_ext(env, RVH) && prev_virt) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_mode(env, prev_priv, prev_virt);
+
+ /*
+ * If forward cfi enabled for new priv, restore elp status
+ * and clear mnpelp in mnstatus
+ */
+ if (cpu_get_fcfien(env)) {
+ env->elp = get_field(env->mnstatus, MNSTATUS_MNPELP);
+ }
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, 0);
+
+ return retpc;
+}
+
void helper_wfi(CPURISCVState *env)
{
CPUState *cs = env_cpu(env);
@@ -472,7 +568,7 @@ target_ulong helper_hyp_hlv_bu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- return cpu_ldb_mmu(env, addr, oi, ra);
+ return cpu_ldb_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
@@ -481,7 +577,7 @@ target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
- return cpu_ldw_mmu(env, addr, oi, ra);
+ return cpu_ldw_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
@@ -490,7 +586,7 @@ target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
- return cpu_ldl_mmu(env, addr, oi, ra);
+ return cpu_ldl_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
@@ -499,7 +595,7 @@ target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
- return cpu_ldq_mmu(env, addr, oi, ra);
+ return cpu_ldq_mmu(env, adjust_addr_virt(env, addr), oi, ra);
}
void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -508,7 +604,7 @@ void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- cpu_stb_mmu(env, addr, val, oi, ra);
+ cpu_stb_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -517,7 +613,7 @@ void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
- cpu_stw_mmu(env, addr, val, oi, ra);
+ cpu_stw_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -526,7 +622,7 @@ void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
- cpu_stl_mmu(env, addr, val, oi, ra);
+ cpu_stl_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
@@ -535,7 +631,7 @@ void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
int mmu_idx = check_access_hlsv(env, false, ra);
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
- cpu_stq_mmu(env, addr, val, oi, ra);
+ cpu_stq_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
}
/*
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index a1b36664..a185c24 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -575,6 +575,13 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
{
int i;
+ uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
+ if (riscv_cpu_cfg(env)->ext_smmpm &&
+ riscv_cpu_mxl(env) == MXL_RV64 &&
+ get_field(val, MSECCFG_PMM) != PMM_FIELD_RESERVED) {
+ mask |= MSECCFG_PMM;
+ }
trace_mseccfg_csr_write(env->mhartid, val);
@@ -590,12 +597,13 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
if (riscv_cpu_cfg(env)->ext_smepmp) {
/* Sticky bits */
- val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
- if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
+ val |= (env->mseccfg & mask);
+ if ((val ^ env->mseccfg) & mask) {
tlb_flush(env_cpu(env));
}
} else {
- val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB);
+ mask |= MSECCFG_RLB;
+ val &= ~(mask);
}
/* M-mode forward cfi to be enabled if cfi extension is implemented */
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
index e0530a1..271cf24 100644
--- a/target/riscv/pmp.h
+++ b/target/riscv/pmp.h
@@ -46,6 +46,7 @@ typedef enum {
MSECCFG_USEED = 1 << 8,
MSECCFG_SSEED = 1 << 9,
MSECCFG_MLPE = 1 << 10,
+ MSECCFG_PMM = 3ULL << 32,
} mseccfg_field_t;
typedef struct {
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
index 8b89c99..0a13728 100644
--- a/target/riscv/tcg/tcg-cpu.c
+++ b/target/riscv/tcg/tcg-cpu.c
@@ -212,6 +212,11 @@ static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
break;
+ case CPU_CFG_OFFSET(ext_sha):
+ if (!cpu_misa_ext_is_user_set(RVH)) {
+ riscv_cpu_write_misa_bit(cpu, RVH, true);
+ }
+ /* fallthrough */
case CPU_CFG_OFFSET(ext_ssstateen):
cpu->cfg.ext_smstateen = true;
break;
@@ -352,6 +357,9 @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu)
cpu->cfg.cboz_blocksize == 64;
cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen;
+
+ cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
+ cpu->cfg.ext_ssstateen;
}
static void riscv_cpu_validate_g(RISCVCPU *cpu)
@@ -955,6 +963,20 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
error_propagate(errp, local_err);
return;
}
+#ifndef CONFIG_USER_ONLY
+ if (cpu->cfg.pmu_mask) {
+ riscv_pmu_init(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (cpu->cfg.ext_sscofpmf) {
+ cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ riscv_pmu_timer_cb, cpu);
+ }
+ }
+#endif
}
void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
@@ -1002,7 +1024,6 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
#ifndef CONFIG_USER_ONLY
CPURISCVState *env = &cpu->env;
- Error *local_err = NULL;
tcg_cflags_set(CPU(cs), CF_PCREL);
@@ -1010,19 +1031,6 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
riscv_timer_init(cpu);
}
- if (cpu->cfg.pmu_mask) {
- riscv_pmu_init(cpu, &local_err);
- if (local_err != NULL) {
- error_propagate(errp, local_err);
- return false;
- }
-
- if (cpu->cfg.ext_sscofpmf) {
- cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
- riscv_pmu_timer_cb, cpu);
- }
- }
-
/* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
if (riscv_has_ext(env, RVH)) {
env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
@@ -1107,7 +1115,6 @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
MISA_CFG(RVS, true),
MISA_CFG(RVU, true),
MISA_CFG(RVH, true),
- MISA_CFG(RVJ, false),
MISA_CFG(RVV, false),
MISA_CFG(RVG, false),
MISA_CFG(RVB, false),
@@ -1394,8 +1401,8 @@ static void riscv_init_max_cpu_extensions(Object *obj)
CPURISCVState *env = &cpu->env;
const RISCVCPUMultiExtConfig *prop;
- /* Enable RVG, RVJ and RVV that are disabled by default */
- riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV);
+ /* Enable RVG and RVV that are disabled by default */
+ riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV);
for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
isa_ext_update_enabled(cpu, prop->offset, true);
@@ -1423,6 +1430,25 @@ static void riscv_init_max_cpu_extensions(Object *obj)
if (env->misa_mxl != MXL_RV32) {
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
}
+
+ /*
+ * ext_smrnmi requires OpenSBI changes that our current
+ * image does not have. Disable it for now.
+ */
+ if (cpu->cfg.ext_smrnmi) {
+ isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
+ qemu_log("Smrnmi is disabled in the 'max' type CPU\n");
+ }
+
+ /*
+ * ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup to
+ * avoid generating a double trap. OpenSBI does not currently support it,
+ * disable it for now.
+ */
+ if (cpu->cfg.ext_smdbltrp) {
+ isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
+ qemu_log("Smdbltrp is disabled in the 'max' type CPU\n");
+ }
}
static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
diff --git a/target/riscv/trace-events b/target/riscv/trace-events
index 49ec4d3..93837f8 100644
--- a/target/riscv/trace-events
+++ b/target/riscv/trace-events
@@ -9,3 +9,6 @@ pmpaddr_csr_write(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %"
mseccfg_csr_read(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": read mseccfg, val: 0x%" PRIx64
mseccfg_csr_write(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": write mseccfg, val: 0x%" PRIx64
+
+# op_helper.c
+riscv_exception(uint32_t exception, const char *desc, uint64_t epc) "%u (%s) on epc 0x%"PRIx64""
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index a992d4f..698b74f 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -42,9 +42,6 @@ static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
static TCGv load_res;
static TCGv load_val;
-/* globals for PM CSRs */
-static TCGv pm_mask;
-static TCGv pm_base;
/*
* If an operation is being performed on less than TARGET_LONG_BITS,
@@ -106,9 +103,9 @@ typedef struct DisasContext {
bool vl_eq_vlmax;
CPUState *cs;
TCGv zero;
- /* PointerMasking extension */
- bool pm_mask_enabled;
- bool pm_base_enabled;
+ /* actual address width */
+ uint8_t addr_xl;
+ bool addr_signed;
/* Ztso */
bool ztso;
/* Use icount trigger for native debug */
@@ -245,7 +242,7 @@ static void gen_update_pc(DisasContext *ctx, target_long diff)
ctx->pc_save = ctx->base.pc_next + diff;
}
-static void generate_exception(DisasContext *ctx, int excp)
+static void generate_exception(DisasContext *ctx, RISCVException excp)
{
gen_update_pc(ctx, 0);
gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
@@ -592,13 +589,10 @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm)
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
tcg_gen_addi_tl(addr, src1, imm);
- if (ctx->pm_mask_enabled) {
- tcg_gen_andc_tl(addr, addr, pm_mask);
- } else if (get_address_xl(ctx) == MXL_RV32) {
- tcg_gen_ext32u_tl(addr, addr);
- }
- if (ctx->pm_base_enabled) {
- tcg_gen_or_tl(addr, addr, pm_base);
+ if (ctx->addr_signed) {
+ tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
+ } else {
+ tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
}
return addr;
@@ -611,14 +605,12 @@ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs)
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
tcg_gen_add_tl(addr, src1, offs);
- if (ctx->pm_mask_enabled) {
- tcg_gen_andc_tl(addr, addr, pm_mask);
- } else if (get_xl(ctx) == MXL_RV32) {
- tcg_gen_ext32u_tl(addr, addr);
- }
- if (ctx->pm_base_enabled) {
- tcg_gen_or_tl(addr, addr, pm_base);
+ if (ctx->addr_signed) {
+ tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
+ } else {
+ tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
}
+
return addr;
}
@@ -1246,8 +1238,14 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
ctx->cs = cs;
- ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
- ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
+ if (get_xl(ctx) == MXL_RV32) {
+ ctx->addr_xl = 32;
+ ctx->addr_signed = false;
+ } else {
+ int pm_pmm = FIELD_EX32(tb_flags, TB_FLAGS, PM_PMM);
+ ctx->addr_xl = 64 - riscv_pm_get_pmlen(pm_pmm);
+ ctx->addr_signed = FIELD_EX32(tb_flags, TB_FLAGS, PM_SIGNEXTEND);
+ }
ctx->ztso = cpu->cfg.ext_ztso;
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED);
@@ -1386,9 +1384,4 @@ void riscv_translate_init(void)
"load_res");
load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val),
"load_val");
- /* Assign PM CSRs to tcg globals */
- pm_mask = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmmask),
- "pmmask");
- pm_base = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmbase),
- "pmbase");
}
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index a85dd1d..5386e3b 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -105,11 +105,6 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
return scale < 0 ? vlenb >> -scale : vlenb << scale;
}
-static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
-{
- return (addr & ~env->cur_pmmask) | env->cur_pmbase;
-}
-
/*
* This function checks watchpoint before real load operation.
*
@@ -195,7 +190,7 @@ GEN_VEXT_ST_ELEM(ste_w, uint32_t, H4, stl)
GEN_VEXT_ST_ELEM(ste_d, uint64_t, H8, stq)
static inline QEMU_ALWAYS_INLINE void
-vext_continus_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
+vext_continuous_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
void *vd, uint32_t evl, target_ulong addr,
uint32_t reg_start, uintptr_t ra, uint32_t esz,
bool is_load)
@@ -207,7 +202,7 @@ vext_continus_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
}
static inline QEMU_ALWAYS_INLINE void
-vext_continus_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
+vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
void *vd, uint32_t evl, uint32_t reg_start, void *host,
uint32_t esz, bool is_load)
{
@@ -342,8 +337,8 @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
if (flags == 0) {
if (nf == 1) {
- vext_continus_ldst_host(env, ldst_host, vd, evl, env->vstart, host,
- esz, is_load);
+ vext_continuous_ldst_host(env, ldst_host, vd, evl, env->vstart,
+ host, esz, is_load);
} else {
for (i = env->vstart; i < evl; ++i) {
k = 0;
@@ -357,7 +352,7 @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
env->vstart += elems;
} else {
if (nf == 1) {
- vext_continus_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
ra, esz, is_load);
} else {
/* load bytes from guest memory */
@@ -393,6 +388,22 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
return;
}
+#if defined(CONFIG_USER_ONLY)
+ /*
+ * For data sizes <= 6 bytes we get better performance by simply calling
+ * vext_continuous_ldst_tlb
+ */
+ if (nf == 1 && (evl << log2_esz) <= 6) {
+ addr = base + (env->vstart << log2_esz);
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart, ra,
+ esz, is_load);
+
+ env->vstart = 0;
+ vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
+ return;
+ }
+#endif
+
/* Calculate the page range of first page */
addr = base + ((env->vstart * nf) << log2_esz);
page_split = -(addr | TARGET_PAGE_MASK);