aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configs/targets/riscv64-softmmu.mak1
-rw-r--r--hw/riscv/virt.c38
-rw-r--r--include/hw/riscv/virt.h1
-rw-r--r--target/riscv/cpu.c404
-rw-r--r--target/riscv/cpu.h28
-rw-r--r--target/riscv/cpu_bits.h8
-rw-r--r--target/riscv/cpu_cfg.h1
-rw-r--r--target/riscv/csr.c92
-rw-r--r--target/riscv/debug.c129
-rw-r--r--target/riscv/fpu_helper.c2
-rw-r--r--target/riscv/kvm/kvm-cpu.c89
-rw-r--r--target/riscv/tcg/tcg-cpu.c287
12 files changed, 877 insertions, 203 deletions
diff --git a/configs/targets/riscv64-softmmu.mak b/configs/targets/riscv64-softmmu.mak
index f688ffa..917980e 100644
--- a/configs/targets/riscv64-softmmu.mak
+++ b/configs/targets/riscv64-softmmu.mak
@@ -1,6 +1,7 @@
TARGET_ARCH=riscv64
TARGET_BASE_ARCH=riscv
TARGET_SUPPORTS_MTTCG=y
+TARGET_KVM_HAVE_GUEST_DEBUG=y
TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml
# needed by boot.c
TARGET_NEED_FDT=y
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index 5676d66..bc0893e 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -515,6 +515,9 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
uint32_t imsic_max_hart_per_socket, imsic_addr, imsic_size;
g_autofree uint32_t *imsic_cells = NULL;
g_autofree uint32_t *imsic_regs = NULL;
+ static const char * const imsic_compat[2] = {
+ "qemu,imsics", "riscv,imsics"
+ };
imsic_cells = g_new0(uint32_t, ms->smp.cpus * 2);
imsic_regs = g_new0(uint32_t, socket_count * 4);
@@ -538,13 +541,18 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
}
}
- imsic_name = g_strdup_printf("/soc/imsics@%lx", (unsigned long)base_addr);
+ imsic_name = g_strdup_printf("/soc/interrupt-controller@%lx",
+ (unsigned long)base_addr);
qemu_fdt_add_subnode(ms->fdt, imsic_name);
- qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible", "riscv,imsics");
+ qemu_fdt_setprop_string_array(ms->fdt, imsic_name, "compatible",
+ (char **)&imsic_compat,
+ ARRAY_SIZE(imsic_compat));
+
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#interrupt-cells",
FDT_IMSIC_INT_CELLS);
qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller", NULL, 0);
qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller", NULL, 0);
+ qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#msi-cells", 0);
qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended",
imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs,
@@ -588,6 +596,12 @@ static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
}
+/* Caller must free string after use */
+static char *fdt_get_aplic_nodename(unsigned long aplic_addr)
+{
+ return g_strdup_printf("/soc/interrupt-controller@%lx", aplic_addr);
+}
+
static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
unsigned long aplic_addr, uint32_t aplic_size,
uint32_t msi_phandle,
@@ -597,18 +611,24 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
bool m_mode, int num_harts)
{
int cpu;
- g_autofree char *aplic_name = NULL;
+ g_autofree char *aplic_name = fdt_get_aplic_nodename(aplic_addr);
g_autofree uint32_t *aplic_cells = g_new0(uint32_t, num_harts * 2);
MachineState *ms = MACHINE(s);
+ static const char * const aplic_compat[2] = {
+ "qemu,aplic", "riscv,aplic"
+ };
for (cpu = 0; cpu < num_harts; cpu++) {
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
}
- aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
qemu_fdt_add_subnode(ms->fdt, aplic_name);
- qemu_fdt_setprop_string(ms->fdt, aplic_name, "compatible", "riscv,aplic");
+ qemu_fdt_setprop_string_array(ms->fdt, aplic_name, "compatible",
+ (char **)&aplic_compat,
+ ARRAY_SIZE(aplic_compat));
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "#address-cells",
+ FDT_APLIC_ADDR_CELLS);
qemu_fdt_setprop_cell(ms->fdt, aplic_name,
"#interrupt-cells", FDT_APLIC_INT_CELLS);
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupt-controller", NULL, 0);
@@ -628,7 +648,7 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
if (aplic_child_phandle) {
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,children",
aplic_child_phandle);
- qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegate",
+ qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegation",
aplic_child_phandle, 0x1,
VIRT_IRQCHIP_NUM_SOURCES);
}
@@ -646,7 +666,6 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
uint32_t *aplic_phandles,
int num_harts)
{
- g_autofree char *aplic_name = NULL;
unsigned long aplic_addr;
MachineState *ms = MACHINE(s);
uint32_t aplic_m_phandle, aplic_s_phandle;
@@ -672,9 +691,8 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
aplic_s_phandle, 0,
false, num_harts);
- aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
-
if (!socket) {
+ g_autofree char *aplic_name = fdt_get_aplic_nodename(aplic_addr);
platform_bus_add_all_fdt_nodes(ms->fdt, aplic_name,
memmap[VIRT_PLATFORM_BUS].base,
memmap[VIRT_PLATFORM_BUS].size,
@@ -1746,6 +1764,8 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
mc->init = virt_machine_init;
mc->max_cpus = VIRT_CPUS_MAX;
mc->default_cpu_type = TYPE_RISCV_CPU_BASE;
+ mc->block_default_type = IF_VIRTIO;
+ mc->no_cdrom = 1;
mc->pci_allow_0_address = true;
mc->possible_cpu_arch_ids = riscv_numa_possible_cpu_arch_ids;
mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props;
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
index 3db8391..c0dc41f 100644
--- a/include/hw/riscv/virt.h
+++ b/include/hw/riscv/virt.h
@@ -118,6 +118,7 @@ enum {
#define FDT_PLIC_ADDR_CELLS 0
#define FDT_PLIC_INT_CELLS 1
#define FDT_APLIC_INT_CELLS 2
+#define FDT_APLIC_ADDR_CELLS 0
#define FDT_IMSIC_INT_CELLS 0
#define FDT_MAX_INT_CELLS 2
#define FDT_MAX_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 69a08e8..a2640cf 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -1779,7 +1779,9 @@ static int priv_spec_from_str(const char *priv_spec_str)
{
int priv_version = -1;
- if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
+ if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
+ priv_version = PRIV_VERSION_1_13_0;
+ } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
priv_version = PRIV_VERSION_1_12_0;
} else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
priv_version = PRIV_VERSION_1_11_0;
@@ -1790,7 +1792,7 @@ static int priv_spec_from_str(const char *priv_spec_str)
return priv_version;
}
-static const char *priv_spec_to_str(int priv_version)
+const char *priv_spec_to_str(int priv_version)
{
switch (priv_version) {
case PRIV_VERSION_1_10_0:
@@ -1799,6 +1801,8 @@ static const char *priv_spec_to_str(int priv_version)
return PRIV_VER_1_11_0_STR;
case PRIV_VERSION_1_12_0:
return PRIV_VER_1_12_0_STR;
+ case PRIV_VERSION_1_13_0:
+ return PRIV_VER_1_13_0_STR;
default:
return NULL;
}
@@ -2246,6 +2250,402 @@ RISCVCPUProfile *riscv_profiles[] = {
NULL,
};
+static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
+ .is_misa = true,
+ .ext = RVA,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
+ .is_misa = true,
+ .ext = RVD,
+ .implied_misa_exts = RVF,
+ .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
+};
+
+static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
+ .is_misa = true,
+ .ext = RVF,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zicsr),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
+ .is_misa = true,
+ .ext = RVM,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zmmul),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
+ .is_misa = true,
+ .ext = RVV,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zve64d),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zcb),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zca),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zcd),
+ .implied_misa_exts = RVD,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zca),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zce),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
+ CPU_CFG_OFFSET(ext_zcmt),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zcf),
+ .implied_misa_exts = RVF,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zca),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zcmp),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zca),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zcmt),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zdinx),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zfinx),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zfa),
+ .implied_misa_exts = RVF,
+ .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
+};
+
+static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zfbfmin),
+ .implied_misa_exts = RVF,
+ .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
+};
+
+static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zfh),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zfhmin),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zfhmin),
+ .implied_misa_exts = RVF,
+ .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
+};
+
+static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zfinx),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zicsr),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zhinx),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zhinxmin),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zhinxmin),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zfinx),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zicntr),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zicsr),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zihpm),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zicsr),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zk),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
+ CPU_CFG_OFFSET(ext_zkt),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zkn),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
+ CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
+ CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zks),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
+ CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
+ CPU_CFG_OFFSET(ext_zksh),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvbb),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zvkb),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zve32f),
+ .implied_misa_exts = RVF,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zve32x),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zve32x),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zicsr),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zve64d),
+ .implied_misa_exts = RVD,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zve64f),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zve64f),
+ .implied_misa_exts = RVF,
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zve64x),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zve32x),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zve32f),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvfh),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvfhmin),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zve32f),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvkn),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
+ CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvknc),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvkng),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvknhb),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zve64x),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvks),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
+ CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvksc),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
+ .ext = CPU_CFG_OFFSET(ext_zvksg),
+ .implied_multi_exts = {
+ CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
+
+ RISCV_IMPLIED_EXTS_RULE_END
+ },
+};
+
+RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
+ &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
+ &RVM_IMPLIED, &RVV_IMPLIED, NULL
+};
+
+RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
+ &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
+ &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
+ &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
+ &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
+ &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
+ &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
+ &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
+ &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
+ &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
+ &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
+ &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
+ &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
+ NULL
+};
+
static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 6fe0d71..8774204 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -96,12 +96,14 @@ extern RISCVCPUProfile *riscv_profiles[];
#define PRIV_VER_1_10_0_STR "v1.10.0"
#define PRIV_VER_1_11_0_STR "v1.11.0"
#define PRIV_VER_1_12_0_STR "v1.12.0"
+#define PRIV_VER_1_13_0_STR "v1.13.0"
enum {
PRIV_VERSION_1_10_0 = 0,
PRIV_VERSION_1_11_0,
PRIV_VERSION_1_12_0,
+ PRIV_VERSION_1_13_0,
- PRIV_VERSION_LATEST = PRIV_VERSION_1_12_0,
+ PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
};
#define VEXT_VERSION_1_00_0 0x00010000
@@ -122,6 +124,29 @@ typedef enum {
EXT_STATUS_DIRTY,
} RISCVExtStatus;
+typedef struct riscv_cpu_implied_exts_rule {
+#ifndef CONFIG_USER_ONLY
+ /*
+ * Bitmask indicates the rule enabled status for the harts.
+ * This enhancement is only available in system-mode QEMU,
+ * as we don't have a good way (e.g. mhartid) to distinguish
+ * the SMP cores in user-mode QEMU.
+ */
+ unsigned long *enabled;
+#endif
+ /* True if this is a MISA implied rule. */
+ bool is_misa;
+ /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
+ const uint32_t ext;
+ const uint32_t implied_misa_exts;
+ const uint32_t implied_multi_exts[];
+} RISCVCPUImpliedExtsRule;
+
+extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
+extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
+
+#define RISCV_IMPLIED_EXTS_RULE_END -1
+
#define MMU_USER_IDX 3
#define MAX_RISCV_PMPS (16)
@@ -830,4 +855,5 @@ const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
/* Implemented in th_csr.c */
void th_register_custom_csrs(RISCVCPU *cpu);
+const char *priv_spec_to_str(int priv_version);
#endif /* RISCV_CPU_H */
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 74318a9..c257c5e 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -156,6 +156,8 @@
/* 32-bit only */
#define CSR_MSTATUSH 0x310
+#define CSR_MEDELEGH 0x312
+#define CSR_HEDELEGH 0x612
/* Machine Trap Handling */
#define CSR_MSCRATCH 0x340
@@ -315,6 +317,7 @@
#define SMSTATEEN0_CS (1ULL << 0)
#define SMSTATEEN0_FCSR (1ULL << 1)
#define SMSTATEEN0_JVT (1ULL << 2)
+#define SMSTATEEN0_P1P13 (1ULL << 56)
#define SMSTATEEN0_HSCONTXT (1ULL << 57)
#define SMSTATEEN0_IMSIC (1ULL << 58)
#define SMSTATEEN0_AIA (1ULL << 59)
@@ -670,6 +673,8 @@ typedef enum RISCVException {
RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */
RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */
RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */
+ RISCV_EXCP_SW_CHECK = 0x12, /* since: priv-1.13.0 */
+ RISCV_EXCP_HW_ERR = 0x13, /* since: priv-1.13.0 */
RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14,
RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT = 0x15,
RISCV_EXCP_VIRT_INSTRUCTION_FAULT = 0x16,
@@ -695,7 +700,8 @@ typedef enum RISCVException {
#define IRQ_M_EXT 11
#define IRQ_S_GEXT 12
#define IRQ_PMU_OVF 13
-#define IRQ_LOCAL_MAX 16
+#define IRQ_LOCAL_MAX 64
+/* -1 is due to bit zero of hgeip and hgeie being ROZ. */
#define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1)
/* mip masks */
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index e1e4f32..fb7eebd 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -136,6 +136,7 @@ struct RISCVCPUConfig {
* TCG always implement/can't be user disabled,
* based on spec version.
*/
+ bool has_priv_1_13;
bool has_priv_1_12;
bool has_priv_1_11;
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 58ef707..432c59d 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -762,14 +762,18 @@ static RISCVException write_vcsr(CPURISCVState *env, int csrno,
}
/* User Timers and Counters */
-static target_ulong get_ticks(bool shift)
+static target_ulong get_ticks(bool shift, bool instructions)
{
int64_t val;
target_ulong result;
#if !defined(CONFIG_USER_ONLY)
if (icount_enabled()) {
- val = icount_get();
+ if (instructions) {
+ val = icount_get_raw();
+ } else {
+ val = icount_get();
+ }
} else {
val = cpu_get_host_ticks();
}
@@ -804,14 +808,14 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno,
static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = get_ticks(false);
+ *val = get_ticks(false, (csrno == CSR_INSTRET));
return RISCV_EXCP_NONE;
}
static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = get_ticks(true);
+ *val = get_ticks(true, (csrno == CSR_INSTRETH));
return RISCV_EXCP_NONE;
}
@@ -875,11 +879,11 @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = val;
+ bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounter_val = val;
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
- riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
- counter->mhpmcounter_prev = get_ticks(false);
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
+ counter->mhpmcounter_prev = get_ticks(false, instr);
if (ctr_idx > 2) {
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpmctr_val = mhpmctr_val |
@@ -902,12 +906,12 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = counter->mhpmcounter_val;
uint64_t mhpmctrh_val = val;
+ bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounterh_val = val;
mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
- riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
- counter->mhpmcounterh_prev = get_ticks(true);
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
+ counter->mhpmcounterh_prev = get_ticks(true, instr);
if (ctr_idx > 2) {
riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
}
@@ -926,6 +930,7 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
counter->mhpmcounter_prev;
target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
counter->mhpmcounter_val;
+ bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
/*
@@ -946,9 +951,8 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
* The kernel computes the perf delta by subtracting the current value from
* the value it initialized previously (ctr_val).
*/
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
- riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
- *val = get_ticks(upper_half) - ctr_prev + ctr_val;
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
+ *val = get_ticks(upper_half, instr) - ctr_prev + ctr_val;
} else {
*val = ctr_val;
}
@@ -1145,7 +1149,14 @@ static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
#define VSTOPI_NUM_SRCS 5
-#define LOCAL_INTERRUPTS (~0x1FFF)
+/*
+ * All core local interrupts except the fixed ones 0:12. This macro is for
+ * virtual interrupts logic so please don't change this to avoid messing up
+ * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and
+ * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for
+ * VS level`.
+ */
+#define LOCAL_INTERRUPTS (~0x1FFFULL)
static const uint64_t delegable_ints =
S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
@@ -1197,18 +1208,18 @@ static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
*/
/* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
-static const target_ulong mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
+static const uint64_t mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
LOCAL_INTERRUPTS;
-static const target_ulong mvien_writable_mask = MIP_SSIP | MIP_SEIP |
+static const uint64_t mvien_writable_mask = MIP_SSIP | MIP_SEIP |
LOCAL_INTERRUPTS;
-static const target_ulong sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
-static const target_ulong hip_writable_mask = MIP_VSSIP;
-static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
+static const uint64_t sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
+static const uint64_t hip_writable_mask = MIP_VSSIP;
+static const uint64_t hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
MIP_VSEIP | LOCAL_INTERRUPTS;
-static const target_ulong hvien_writable_mask = LOCAL_INTERRUPTS;
+static const uint64_t hvien_writable_mask = LOCAL_INTERRUPTS;
-static const target_ulong vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
+static const uint64_t vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
const bool valid_vm_1_10_32[16] = {
[VM_1_10_MBARE] = true,
@@ -2245,6 +2256,10 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
wr_mask |= SMSTATEEN0_FCSR;
}
+ if (env->priv_ver >= PRIV_VERSION_1_13_0) {
+ wr_mask |= SMSTATEEN0_P1P13;
+ }
+
return write_mstateen(env, csrno, wr_mask, new_val);
}
@@ -2280,6 +2295,10 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
+ if (env->priv_ver >= PRIV_VERSION_1_13_0) {
+ wr_mask |= SMSTATEEN0_P1P13;
+ }
+
return write_mstateenh(env, csrno, wr_mask, new_val);
}
@@ -3214,6 +3233,33 @@ static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ RISCVException ret;
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* Reserved, now read zero */
+ *val = 0;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ RISCVException ret;
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* Reserved, now write ignore */
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
uint64_t *ret_val,
uint64_t new_val, uint64_t wr_mask)
@@ -4618,6 +4664,10 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
write_mstatush },
+ [CSR_MEDELEGH] = { "medelegh", any32, read_zero, write_ignore,
+ .min_priv_ver = PRIV_VERSION_1_13_0 },
+ [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh,
+ .min_priv_ver = PRIV_VERSION_1_13_0 },
/* Machine Trap Handling */
[CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
index b110370..0b5099f 100644
--- a/target/riscv/debug.c
+++ b/target/riscv/debug.c
@@ -241,6 +241,76 @@ static void do_trigger_action(CPURISCVState *env, target_ulong trigger_index)
}
}
+/*
+ * Check the privilege level of specific trigger matches CPU's current privilege
+ * level.
+ */
+static bool trigger_priv_match(CPURISCVState *env, trigger_type_t type,
+ int trigger_index)
+{
+ target_ulong ctrl = env->tdata1[trigger_index];
+
+ switch (type) {
+ case TRIGGER_TYPE_AD_MATCH:
+ /* type 2 trigger cannot be fired in VU/VS mode */
+ if (env->virt_enabled) {
+ return false;
+ }
+ /* check U/S/M bit against current privilege level */
+ if ((ctrl >> 3) & BIT(env->priv)) {
+ return true;
+ }
+ break;
+ case TRIGGER_TYPE_AD_MATCH6:
+ if (env->virt_enabled) {
+ /* check VU/VS bit against current privilege level */
+ if ((ctrl >> 23) & BIT(env->priv)) {
+ return true;
+ }
+ } else {
+ /* check U/S/M bit against current privilege level */
+ if ((ctrl >> 3) & BIT(env->priv)) {
+ return true;
+ }
+ }
+ break;
+ case TRIGGER_TYPE_INST_CNT:
+ if (env->virt_enabled) {
+ /* check VU/VS bit against current privilege level */
+ if ((ctrl >> 25) & BIT(env->priv)) {
+ return true;
+ }
+ } else {
+ /* check U/S/M bit against current privilege level */
+ if ((ctrl >> 6) & BIT(env->priv)) {
+ return true;
+ }
+ }
+ break;
+ case TRIGGER_TYPE_INT:
+ case TRIGGER_TYPE_EXCP:
+ case TRIGGER_TYPE_EXT_SRC:
+ qemu_log_mask(LOG_UNIMP, "trigger type: %d is not supported\n", type);
+ break;
+ case TRIGGER_TYPE_NO_EXIST:
+ case TRIGGER_TYPE_UNAVAIL:
+ qemu_log_mask(LOG_GUEST_ERROR, "trigger type: %d does not exist\n",
+ type);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return false;
+}
+
+/* Common matching conditions for all types of the triggers. */
+static bool trigger_common_match(CPURISCVState *env, trigger_type_t type,
+ int trigger_index)
+{
+ return trigger_priv_match(env, type, trigger_index);
+}
+
/* type 2 trigger */
static uint32_t type2_breakpoint_size(CPURISCVState *env, target_ulong ctrl)
@@ -554,7 +624,7 @@ void helper_itrigger_match(CPURISCVState *env)
if (get_trigger_type(env, i) != TRIGGER_TYPE_INST_CNT) {
continue;
}
- if (check_itrigger_priv(env, i)) {
+ if (!trigger_common_match(env, TRIGGER_TYPE_INST_CNT, i)) {
continue;
}
count = itrigger_get_count(env, i);
@@ -785,22 +855,18 @@ bool riscv_cpu_debug_check_breakpoint(CPUState *cs)
for (i = 0; i < RV_MAX_TRIGGERS; i++) {
trigger_type = get_trigger_type(env, i);
+ if (!trigger_common_match(env, trigger_type, i)) {
+ continue;
+ }
+
switch (trigger_type) {
case TRIGGER_TYPE_AD_MATCH:
- /* type 2 trigger cannot be fired in VU/VS mode */
- if (env->virt_enabled) {
- return false;
- }
-
ctrl = env->tdata1[i];
pc = env->tdata2[i];
if ((ctrl & TYPE2_EXEC) && (bp->pc == pc)) {
- /* check U/S/M bit against current privilege level */
- if ((ctrl >> 3) & BIT(env->priv)) {
- env->badaddr = pc;
- return true;
- }
+ env->badaddr = pc;
+ return true;
}
break;
case TRIGGER_TYPE_AD_MATCH6:
@@ -808,19 +874,8 @@ bool riscv_cpu_debug_check_breakpoint(CPUState *cs)
pc = env->tdata2[i];
if ((ctrl & TYPE6_EXEC) && (bp->pc == pc)) {
- if (env->virt_enabled) {
- /* check VU/VS bit against current privilege level */
- if ((ctrl >> 23) & BIT(env->priv)) {
- env->badaddr = pc;
- return true;
- }
- } else {
- /* check U/S/M bit against current privilege level */
- if ((ctrl >> 3) & BIT(env->priv)) {
- env->badaddr = pc;
- return true;
- }
- }
+ env->badaddr = pc;
+ return true;
}
break;
default:
@@ -846,13 +901,12 @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
for (i = 0; i < RV_MAX_TRIGGERS; i++) {
trigger_type = get_trigger_type(env, i);
+ if (!trigger_common_match(env, trigger_type, i)) {
+ continue;
+ }
+
switch (trigger_type) {
case TRIGGER_TYPE_AD_MATCH:
- /* type 2 trigger cannot be fired in VU/VS mode */
- if (env->virt_enabled) {
- return false;
- }
-
ctrl = env->tdata1[i];
addr = env->tdata2[i];
flags = 0;
@@ -865,10 +919,7 @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
}
if ((wp->flags & flags) && (wp->vaddr == addr)) {
- /* check U/S/M bit against current privilege level */
- if ((ctrl >> 3) & BIT(env->priv)) {
- return true;
- }
+ return true;
}
break;
case TRIGGER_TYPE_AD_MATCH6:
@@ -884,17 +935,7 @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
}
if ((wp->flags & flags) && (wp->vaddr == addr)) {
- if (env->virt_enabled) {
- /* check VU/VS bit against current privilege level */
- if ((ctrl >> 23) & BIT(env->priv)) {
- return true;
- }
- } else {
- /* check U/S/M bit against current privilege level */
- if ((ctrl >> 3) & BIT(env->priv)) {
- return true;
- }
- }
+ return true;
}
break;
default:
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
index 871a70a..91b1a56 100644
--- a/target/riscv/fpu_helper.c
+++ b/target/riscv/fpu_helper.c
@@ -676,7 +676,7 @@ uint64_t helper_fround_h(CPURISCVState *env, uint64_t rs1)
uint64_t helper_froundnx_h(CPURISCVState *env, uint64_t rs1)
{
- float16 frs1 = check_nanbox_s(env, rs1);
+ float16 frs1 = check_nanbox_h(env, rs1);
frs1 = float16_round_to_int(frs1, &env->fp_status);
return nanbox_h(env, frs1);
}
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index 235e2cd..1047961 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -1555,6 +1555,21 @@ static int kvm_riscv_handle_csr(CPUState *cs, struct kvm_run *run)
return ret;
}
+static bool kvm_riscv_handle_debug(CPUState *cs)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+
+ /* Ensure PC is synchronised */
+ kvm_cpu_synchronize_state(cs);
+
+ if (kvm_find_sw_breakpoint(cs, env->pc)) {
+ return true;
+ }
+
+ return false;
+}
+
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
int ret = 0;
@@ -1565,6 +1580,11 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
case KVM_EXIT_RISCV_CSR:
ret = kvm_riscv_handle_csr(cs, run);
break;
+ case KVM_EXIT_DEBUG:
+ if (kvm_riscv_handle_debug(cs)) {
+ ret = EXCP_DEBUG;
+ }
+ break;
default:
qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
__func__, run->exit_reason);
@@ -1969,3 +1989,72 @@ static const TypeInfo riscv_kvm_cpu_type_infos[] = {
};
DEFINE_TYPES(riscv_kvm_cpu_type_infos)
+
+static const uint32_t ebreak_insn = 0x00100073;
+static const uint16_t c_ebreak_insn = 0x9002;
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 0)) {
+ return -EINVAL;
+ }
+
+ if ((bp->saved_insn & 0x3) == 0x3) {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0)
+ || cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ } else {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak_insn, 2, 1)) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ uint32_t ebreak;
+ uint16_t c_ebreak;
+
+ if ((bp->saved_insn & 0x3) == 0x3) {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak, 4, 0) ||
+ ebreak != ebreak_insn ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ } else {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak, 2, 0) ||
+ c_ebreak != c_ebreak_insn ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 1)) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
+{
+ /* TODO; To be implemented later. */
+ return -EINVAL;
+}
+
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
+{
+ /* TODO; To be implemented later. */
+ return -EINVAL;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ /* TODO; To be implemented later. */
+}
+
+void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
+{
+ if (kvm_sw_breakpoints_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE;
+ }
+}
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
index 683f604..ae25686 100644
--- a/target/riscv/tcg/tcg-cpu.c
+++ b/target/riscv/tcg/tcg-cpu.c
@@ -31,11 +31,17 @@
#include "hw/core/accel-cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "tcg/tcg.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/boards.h"
+#endif
/* Hash that stores user set extensions */
static GHashTable *multi_ext_user_opts;
static GHashTable *misa_ext_user_opts;
+static GHashTable *multi_ext_implied_rules;
+static GHashTable *misa_ext_implied_rules;
+
static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
{
return g_hash_table_contains(multi_ext_user_opts,
@@ -76,16 +82,11 @@ static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit,
static const char *cpu_priv_ver_to_str(int priv_ver)
{
- switch (priv_ver) {
- case PRIV_VERSION_1_10_0:
- return "v1.10.0";
- case PRIV_VERSION_1_11_0:
- return "v1.11.0";
- case PRIV_VERSION_1_12_0:
- return "v1.12.0";
- }
+ const char *priv_spec_str = priv_spec_to_str(priv_ver);
- g_assert_not_reached();
+ g_assert(priv_spec_str);
+
+ return priv_spec_str;
}
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
@@ -323,6 +324,10 @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu)
cpu->cfg.has_priv_1_12 = true;
}
+ if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) {
+ cpu->cfg.has_priv_1_13 = true;
+ }
+
/* zic64b is 1.12 or later */
cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 &&
cpu->cfg.cbop_blocksize == 64 &&
@@ -466,10 +471,6 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
- if (cpu->cfg.ext_zfh) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true);
- }
-
if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
error_setg(errp, "Zfh/Zfhmin extensions require F extension");
return;
@@ -491,9 +492,6 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
error_propagate(errp, local_err);
return;
}
-
- /* The V vector extension depends on the Zve64d extension */
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true);
}
/* The Zve64d extension depends on the Zve64f extension */
@@ -502,18 +500,6 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
error_setg(errp, "Zve64d/V extensions require D extension");
return;
}
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true);
- }
-
- /* The Zve64f extension depends on the Zve64x and Zve32f extensions */
- if (cpu->cfg.ext_zve64f) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64x), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true);
- }
-
- /* The Zve64x extension depends on the Zve32x extension */
- if (cpu->cfg.ext_zve64x) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32x), true);
}
/* The Zve32f extension depends on the Zve32x extension */
@@ -522,11 +508,6 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
error_setg(errp, "Zve32f/Zve64f extensions require F extension");
return;
}
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32x), true);
- }
-
- if (cpu->cfg.ext_zvfh) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true);
}
if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
@@ -549,11 +530,6 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
- /* Set the ISA extensions, checks should have happened above */
- if (cpu->cfg.ext_zhinx) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
- }
-
if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
return;
@@ -571,27 +547,6 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
}
}
- if (cpu->cfg.ext_zce) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
- if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
- }
- }
-
- /* zca, zcd and zcf has a PRIV 1.12.0 restriction */
- if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
- if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
- }
- if (riscv_has_ext(env, RVD)) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true);
- }
- }
-
if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
error_setg(errp, "Zcf extension is only relevant to RV32");
return;
@@ -625,48 +580,6 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
- /*
- * Shorthand vector crypto extensions
- */
- if (cpu->cfg.ext_zvknc) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
- }
-
- if (cpu->cfg.ext_zvkng) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true);
- }
-
- if (cpu->cfg.ext_zvkn) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true);
- }
-
- if (cpu->cfg.ext_zvksc) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
- }
-
- if (cpu->cfg.ext_zvksg) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true);
- }
-
- if (cpu->cfg.ext_zvks) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true);
- }
-
- if (cpu->cfg.ext_zvkt) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
- }
-
if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg ||
cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed ||
cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) {
@@ -682,29 +595,6 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
- if (cpu->cfg.ext_zk) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true);
- }
-
- if (cpu->cfg.ext_zkn) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true);
- }
-
- if (cpu->cfg.ext_zks) {
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true);
- }
-
if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) {
if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) {
error_setg(errp, "zicntr requires zicsr");
@@ -833,11 +723,151 @@ static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
}
}
+static void riscv_cpu_init_implied_exts_rules(void)
+{
+ RISCVCPUImpliedExtsRule *rule;
+#ifndef CONFIG_USER_ONLY
+ MachineState *ms = MACHINE(qdev_get_machine());
+#endif
+ static bool initialized;
+ int i;
+
+ /* Implied rules only need to be initialized once. */
+ if (initialized) {
+ return;
+ }
+
+ for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
+#ifndef CONFIG_USER_ONLY
+ rule->enabled = bitmap_new(ms->smp.cpus);
+#endif
+ g_hash_table_insert(misa_ext_implied_rules,
+ GUINT_TO_POINTER(rule->ext), (gpointer)rule);
+ }
+
+ for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
+#ifndef CONFIG_USER_ONLY
+ rule->enabled = bitmap_new(ms->smp.cpus);
+#endif
+ g_hash_table_insert(multi_ext_implied_rules,
+ GUINT_TO_POINTER(rule->ext), (gpointer)rule);
+ }
+
+ initialized = true;
+}
+
+static void cpu_enable_implied_rule(RISCVCPU *cpu,
+ RISCVCPUImpliedExtsRule *rule)
+{
+ CPURISCVState *env = &cpu->env;
+ RISCVCPUImpliedExtsRule *ir;
+ bool enabled = false;
+ int i;
+
+#ifndef CONFIG_USER_ONLY
+ enabled = test_bit(cpu->env.mhartid, rule->enabled);
+#endif
+
+ if (!enabled) {
+ /* Enable the implied MISAs. */
+ if (rule->implied_misa_exts) {
+ riscv_cpu_set_misa_ext(env,
+ env->misa_ext | rule->implied_misa_exts);
+
+ for (i = 0; misa_bits[i] != 0; i++) {
+ if (rule->implied_misa_exts & misa_bits[i]) {
+ ir = g_hash_table_lookup(misa_ext_implied_rules,
+ GUINT_TO_POINTER(misa_bits[i]));
+
+ if (ir) {
+ cpu_enable_implied_rule(cpu, ir);
+ }
+ }
+ }
+ }
+
+ /* Enable the implied extensions. */
+ for (i = 0;
+ rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) {
+ cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true);
+
+ ir = g_hash_table_lookup(multi_ext_implied_rules,
+ GUINT_TO_POINTER(
+ rule->implied_multi_exts[i]));
+
+ if (ir) {
+ cpu_enable_implied_rule(cpu, ir);
+ }
+ }
+
+#ifndef CONFIG_USER_ONLY
+ bitmap_set(rule->enabled, cpu->env.mhartid, 1);
+#endif
+ }
+}
+
+/* Zc extension has special implied rules that need to be handled separately. */
+static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
+{
+ RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
+ CPURISCVState *env = &cpu->env;
+
+ if (cpu->cfg.ext_zce) {
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true);
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
+
+ if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
+ }
+ }
+
+ /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */
+ if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
+
+ if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
+ }
+
+ if (riscv_has_ext(env, RVD)) {
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true);
+ }
+ }
+}
+
+static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu)
+{
+ RISCVCPUImpliedExtsRule *rule;
+ int i;
+
+ /* Enable the implied extensions for Zc. */
+ cpu_enable_zc_implied_rules(cpu);
+
+ /* Enable the implied MISAs. */
+ for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
+ if (riscv_has_ext(&cpu->env, rule->ext)) {
+ cpu_enable_implied_rule(cpu, rule);
+ }
+ }
+
+ /* Enable the implied extensions. */
+ for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
+ if (isa_ext_is_enabled(cpu, rule->ext)) {
+ cpu_enable_implied_rule(cpu, rule);
+ }
+ }
+}
+
void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
{
CPURISCVState *env = &cpu->env;
Error *local_err = NULL;
+ riscv_cpu_init_implied_exts_rules();
+ riscv_cpu_enable_implied_rules(cpu);
+
riscv_cpu_validate_misa_priv(env, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
@@ -1343,6 +1373,15 @@ static void riscv_tcg_cpu_instance_init(CPUState *cs)
misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
+
+ if (!misa_ext_implied_rules) {
+ misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
+ }
+
+ if (!multi_ext_implied_rules) {
+ multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
+ }
+
riscv_cpu_add_user_properties(obj);
if (riscv_cpu_has_max_extensions(obj)) {