aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTomasz Jeznach <tjeznach@rivosinc.com>2025-02-24 16:08:23 -0300
committerAlistair Francis <alistair.francis@wdc.com>2025-03-04 15:42:54 +1000
commit4faa3e6f906c832f4c5382fbd618e368525ad2dc (patch)
tree5be6f67ecd2b1634d77fde92a194b67364bbbc8c
parent91dd0bd0216f7a70e5e30cfc24eeea455b4f6993 (diff)
downloadqemu-4faa3e6f906c832f4c5382fbd618e368525ad2dc.zip
qemu-4faa3e6f906c832f4c5382fbd618e368525ad2dc.tar.gz
qemu-4faa3e6f906c832f4c5382fbd618e368525ad2dc.tar.bz2
hw/riscv/riscv-iommu: add hpm events mmio write
To support hpm events mmio writes, done via riscv_iommu_process_hpmevt_write(), we're also adding the 'hpm-counters' IOMMU property that are used to determine the amount of counters available in the IOMMU. Note that everything we did so far didn't change any IOMMU behavior because we're still not advertising HPM capability to software. This will be done in the next patch. Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Acked-by: Alistair Francis <alistair.francis@wdc.com> Message-ID: <20250224190826.1858473-9-dbarboza@ventanamicro.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
-rw-r--r--hw/riscv/riscv-iommu-hpm.c88
-rw-r--r--hw/riscv/riscv-iommu-hpm.h1
-rw-r--r--hw/riscv/riscv-iommu.c4
-rw-r--r--hw/riscv/riscv-iommu.h1
4 files changed, 93 insertions, 1 deletions
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
index 1cea6b1..5518c28 100644
--- a/hw/riscv/riscv-iommu-hpm.c
+++ b/hw/riscv/riscv-iommu-hpm.c
@@ -281,3 +281,91 @@ void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
s->hpmcycle_prev = get_cycles();
hpm_setup_timer(s, s->hpmcycle_val);
}
+
+static inline bool check_valid_event_id(unsigned event_id)
+{
+ return event_id > RISCV_IOMMU_HPMEVENT_INVALID &&
+ event_id < RISCV_IOMMU_HPMEVENT_MAX;
+}
+
+static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata)
+{
+ uint32_t *pair = udata;
+
+ if (GPOINTER_TO_UINT(value) & (1 << pair[0])) {
+ pair[1] = GPOINTER_TO_UINT(key);
+ return true;
+ }
+
+ return false;
+}
+
+/* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */
+static void update_event_map(RISCVIOMMUState *s, uint64_t value,
+ uint32_t ctr_idx)
+{
+ unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID);
+ uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID };
+ uint32_t new_value = 1 << ctr_idx;
+ gpointer data;
+
+ /*
+ * If EventID field is RISCV_IOMMU_HPMEVENT_INVALID
+ * remove the current mapping.
+ */
+ if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) {
+ data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair);
+
+ new_value = GPOINTER_TO_UINT(data) & ~(new_value);
+ if (new_value != 0) {
+ g_hash_table_replace(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(pair[1]),
+ GUINT_TO_POINTER(new_value));
+ } else {
+ g_hash_table_remove(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(pair[1]));
+ }
+
+ return;
+ }
+
+ /* Update the counter mask if the event is already enabled. */
+ if (g_hash_table_lookup_extended(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(event_id),
+ NULL,
+ &data)) {
+ new_value |= GPOINTER_TO_UINT(data);
+ }
+
+ g_hash_table_insert(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(event_id),
+ GUINT_TO_POINTER(new_value));
+}
+
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
+{
+ const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3;
+ const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
+ uint64_t val = riscv_iommu_reg_get64(s, evt_reg);
+
+ if (ctr_idx >= s->hpm_cntrs) {
+ return;
+ }
+
+ /* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
+ if (get_field(ovf, BIT(ctr_idx + 1)) &&
+ !get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
+ /* +1 to offset CYCLE register OF bit. */
+ riscv_iommu_reg_mod32(
+ s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1));
+ }
+
+ if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) {
+ /* Reset EventID (WARL) field to invalid. */
+ val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID,
+ RISCV_IOMMU_HPMEVENT_INVALID);
+ riscv_iommu_reg_set64(s, evt_reg, val);
+ }
+
+ update_event_map(s, val, ctr_idx);
+}
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
index 0cd5509..5fc4ef2 100644
--- a/hw/riscv/riscv-iommu-hpm.h
+++ b/hw/riscv/riscv-iommu-hpm.h
@@ -28,5 +28,6 @@ void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
void riscv_iommu_hpm_timer_cb(void *priv);
void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s);
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg);
#endif
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
index 821ecba..cdbb848 100644
--- a/hw/riscv/riscv-iommu.c
+++ b/hw/riscv/riscv-iommu.c
@@ -2040,7 +2040,7 @@ static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4:
- /* not yet implemented */
+ riscv_iommu_process_hpmevt_write(s, regb & ~7);
break;
}
}
@@ -2487,6 +2487,8 @@ static const Property riscv_iommu_properties[] = {
DEFINE_PROP_BOOL("g-stage", RISCVIOMMUState, enable_g_stage, TRUE),
DEFINE_PROP_LINK("downstream-mr", RISCVIOMMUState, target_mr,
TYPE_MEMORY_REGION, MemoryRegion *),
+ DEFINE_PROP_UINT8("hpm-counters", RISCVIOMMUState, hpm_cntrs,
+ RISCV_IOMMU_IOCOUNT_NUM),
};
static void riscv_iommu_class_init(ObjectClass *klass, void* data)
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
index 2fef6ee..a31aa62 100644
--- a/hw/riscv/riscv-iommu.h
+++ b/hw/riscv/riscv-iommu.h
@@ -90,6 +90,7 @@ struct RISCVIOMMUState {
/* HPM event counters */
GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
+ uint8_t hpm_cntrs;
};
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,