diff options
Diffstat (limited to 'hw/intc/xive.c')
-rw-r--r-- | hw/intc/xive.c | 398 |
1 files changed, 214 insertions, 184 deletions
diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 75dce82..d4c6e21 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -47,12 +47,6 @@ static uint8_t ipb_to_pipr(uint8_t ibp) return ibp ? clz32((uint32_t)ibp << 24) : 0xff; } -static void ipb_update(uint8_t *regs, uint8_t priority) -{ - regs[TM_IPB] |= priority_to_ipb(priority); - regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); -} - static uint8_t exception_mask(uint8_t ring) { switch (ring) { @@ -135,6 +129,15 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) xive_tctx_notify(tctx, ring); } +void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) +{ + uint8_t *regs = &tctx->regs[ring]; + + regs[TM_IPB] |= ipb; + regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); + xive_tctx_notify(tctx, ring); +} + static inline uint32_t xive_tctx_word2(uint8_t *ring) { return *((uint32_t *) &ring[TM_WORD2]); @@ -144,19 +147,20 @@ static inline uint32_t xive_tctx_word2(uint8_t *ring) * XIVE Thread Interrupt Management Area (TIMA) */ -static void xive_tm_set_hv_cppr(XiveTCTX *tctx, hwaddr offset, - uint64_t value, unsigned size) +static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) { xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); } -static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size) +static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) { return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); } -static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset, - unsigned size) +static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) { uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); uint32_t qw2w2; @@ -166,13 +170,14 @@ static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset, return qw2w2; } -static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset, +static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff; } -static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size) +static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) { return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff; } @@ -315,13 +320,14 @@ static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) * state changes (side effects) in addition to setting/returning the * interrupt management area context of the processor thread. */ -static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size) +static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) { return xive_tctx_accept(tctx, TM_QW1_OS); } -static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset, - uint64_t value, unsigned size) +static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) { xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); } @@ -330,24 +336,114 @@ static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset, * Adjust the IPB to allow a CPU to process event queues of other * priorities during one physical interrupt cycle. */ -static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset, - uint64_t value, unsigned size) +static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) { - ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff); - xive_tctx_notify(tctx, TM_QW1_OS); + xive_tctx_ipb_update(tctx, TM_QW1_OS, priority_to_ipb(value & 0xff)); } -static uint64_t xive_tm_pull_os_ctx(XiveTCTX *tctx, hwaddr offset, - unsigned size) +static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vo) { - uint32_t qw1w2_prev = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); - uint32_t qw1w2; + if (nvt_blk) { + *nvt_blk = xive_nvt_blk(cam); + } + if (nvt_idx) { + *nvt_idx = xive_nvt_idx(cam); + } + if (vo) { + *vo = !!(cam & TM_QW1W2_VO); + } +} + +static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vo) +{ + uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); + uint32_t cam = be32_to_cpu(qw1w2); - qw1w2 = xive_set_field32(TM_QW1W2_VO, qw1w2_prev, 0); + xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo); + return qw1w2; +} + +static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2) +{ memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); +} + +static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + uint32_t qw1w2; + uint32_t qw1w2_new; + uint8_t nvt_blk; + uint32_t nvt_idx; + bool vo; + + qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo); + + if (!vo) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n", + nvt_blk, nvt_idx); + } + + /* Invalidate CAM line */ + qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0); + xive_tctx_set_os_cam(tctx, qw1w2_new); return qw1w2; } +static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, + uint8_t nvt_blk, uint32_t nvt_idx) +{ + XiveNVT nvt; + uint8_t ipb; + + /* + * Grab the associated NVT to pull the pending bits, and merge + * them with the IPB of the thread interrupt context registers + */ + if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n", + nvt_blk, nvt_idx); + return; + } + + ipb = xive_get_field32(NVT_W4_IPB, nvt.w4); + + if (ipb) { + /* Reset the NVT value */ + nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0); + xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); + + /* Merge in current context */ + xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); + } +} + +/* + * Updating the OS CAM line can trigger a resend of interrupt + */ +static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + uint32_t cam = value; + uint32_t qw1w2 = cpu_to_be32(cam); + uint8_t nvt_blk; + uint32_t nvt_idx; + bool vo; + + xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo); + + /* First update the registers */ + xive_tctx_set_os_cam(tctx, qw1w2); + + /* Check the interrupt pending bits */ + if (vo) { + xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); + } +} + /* * Define a mapping of "special" operations depending on the TIMA page * offset and the size of the operation. @@ -356,9 +452,11 @@ typedef struct XiveTmOp { uint8_t page_offset; uint32_t op_offset; unsigned size; - void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value, - unsigned size); - uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size); + void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, + uint64_t value, unsigned size); + uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, + unsigned size); } XiveTmOp; static const XiveTmOp xive_tm_operations[] = { @@ -367,6 +465,7 @@ static const XiveTmOp xive_tm_operations[] = { * effects */ { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL }, { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, @@ -404,8 +503,8 @@ static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) /* * TIMA MMIO handlers */ -void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, - unsigned size) +void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, + uint64_t value, unsigned size) { const XiveTmOp *xto; @@ -422,7 +521,7 @@ void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " "@%"HWADDR_PRIx"\n", offset); } else { - xto->write_handler(tctx, offset, value, size); + xto->write_handler(xptr, tctx, offset, value, size); } return; } @@ -432,7 +531,7 @@ void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, */ xto = xive_tm_find_op(offset, size, true); if (xto) { - xto->write_handler(tctx, offset, value, size); + xto->write_handler(xptr, tctx, offset, value, size); return; } @@ -442,7 +541,8 @@ void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, xive_tm_raw_write(tctx, offset, value, size); } -uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size) +uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, + unsigned size) { const XiveTmOp *xto; @@ -460,7 +560,7 @@ uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size) "@%"HWADDR_PRIx"\n", offset); return -1; } - return xto->read_handler(tctx, offset, size); + return xto->read_handler(xptr, tctx, offset, size); } /* @@ -468,7 +568,7 @@ uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size) */ xto = xive_tm_find_op(offset, size, false); if (xto) { - return xto->read_handler(tctx, offset, size); + return xto->read_handler(xptr, tctx, offset, size); } /* @@ -477,35 +577,6 @@ uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size) return xive_tm_raw_read(tctx, offset, size); } -static void xive_tm_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu); - - xive_tctx_tm_write(tctx, offset, value, size); -} - -static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size) -{ - XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu); - - return xive_tctx_tm_read(tctx, offset, size); -} - -const MemoryRegionOps xive_tm_ops = { - .read = xive_tm_read, - .write = xive_tm_write, - .endianness = DEVICE_BIG_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 1, - .max_access_size = 8, - }, -}; - static char *xive_tctx_ring_print(uint8_t *ring) { uint32_t w2 = xive_tctx_word2(ring); @@ -580,19 +651,11 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp) XiveTCTX *tctx = XIVE_TCTX(dev); PowerPCCPU *cpu; CPUPPCState *env; - Object *obj; Error *local_err = NULL; - obj = object_property_get_link(OBJECT(dev), "cpu", &local_err); - if (!obj) { - error_propagate(errp, local_err); - error_prepend(errp, "required link 'cpu' not found: "); - return; - } - - cpu = POWERPC_CPU(obj); - tctx->cs = CPU(obj); + assert(tctx->cs); + cpu = POWERPC_CPU(tctx->cs); env = &cpu->env; switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_POWER9: @@ -662,6 +725,11 @@ static const VMStateDescription vmstate_xive_tctx = { }, }; +static Property xive_tctx_properties[] = { + DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *), + DEFINE_PROP_END_OF_LIST(), +}; + static void xive_tctx_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -669,6 +737,7 @@ static void xive_tctx_class_init(ObjectClass *klass, void *data) dc->desc = "XIVE Interrupt Thread Context"; dc->realize = xive_tctx_realize; dc->vmsd = &vmstate_xive_tctx; + dc->props = xive_tctx_properties; /* * Reason: part of XIVE interrupt controller, needs to be wired up * by xive_tctx_create(). @@ -691,8 +760,7 @@ Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp) obj = object_new(TYPE_XIVE_TCTX); object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort); object_unref(obj); - object_ref(cpu); - object_property_add_const_link(obj, "cpu", cpu, &error_abort); + object_property_set_link(obj, cpu, "cpu", &error_abort); object_property_set_bool(obj, true, "realized", &local_err); if (local_err) { goto error; @@ -710,7 +778,6 @@ void xive_tctx_destroy(XiveTCTX *tctx) { Object *obj = OBJECT(tctx); - object_unref(object_property_get_link(obj, "cpu", &error_abort)); object_unparent(obj); } @@ -1064,17 +1131,8 @@ static void xive_source_reset(void *dev) static void xive_source_realize(DeviceState *dev, Error **errp) { XiveSource *xsrc = XIVE_SOURCE(dev); - Object *obj; - Error *local_err = NULL; - obj = object_property_get_link(OBJECT(dev), "xive", &local_err); - if (!obj) { - error_propagate(errp, local_err); - error_prepend(errp, "required link 'xive' not found: "); - return; - } - - xsrc->xive = XIVE_NOTIFIER(obj); + assert(xsrc->xive); if (!xsrc->nr_irqs) { error_setg(errp, "Number of interrupt needs to be greater than 0"); @@ -1120,6 +1178,8 @@ static Property xive_source_properties[] = { DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0), DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0), DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), + DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER, + XiveNotifier *), DEFINE_PROP_END_OF_LIST(), }; @@ -1311,11 +1371,11 @@ int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); } -XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs) +static int xive_router_get_block_id(XiveRouter *xrtr) { - XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); - return xrc->get_tctx(xrtr, cs); + return xrc->get_block_id(xrtr); } /* @@ -1323,20 +1383,22 @@ XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs) * * chip << 19 | 0000000 0 0001 thread (7Bit) */ -static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx) +static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr)); - return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f)); + return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); } /* * The thread context register words are in big-endian format. */ -static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint32_t logic_serv) +int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, + uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint32_t logic_serv) { uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx); uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); @@ -1364,7 +1426,7 @@ static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format, /* PHYS ring */ if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) && - cam == xive_tctx_hw_cam_line(tctx)) { + cam == xive_tctx_hw_cam_line(xptr, tctx)) { return TM_QW3_HV_PHYS; } @@ -1393,71 +1455,6 @@ static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format, return -1; } -typedef struct XiveTCTXMatch { - XiveTCTX *tctx; - uint8_t ring; -} XiveTCTXMatch; - -static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) -{ - CPUState *cs; - - /* - * TODO (PowerNV): handle chip_id overwrite of block field for - * hardwired CAM compares - */ - - CPU_FOREACH(cs) { - XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs); - int ring; - - /* - * Skip partially initialized vCPUs. This can happen when - * vCPUs are hotplugged. - */ - if (!tctx) { - continue; - } - - /* - * HW checks that the CPU is enabled in the Physical Thread - * Enable Register (PTER). - */ - - /* - * Check the thread context CAM lines and record matches. We - * will handle CPU exception delivery later - */ - ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx, - cam_ignore, logic_serv); - /* - * Save the context and follow on to catch duplicates, that we - * don't support yet. - */ - if (ring != -1) { - if (match->tctx) { - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " - "context NVT %x/%x\n", nvt_blk, nvt_idx); - return false; - } - - match->ring = ring; - match->tctx = tctx; - } - } - - if (!match->tctx) { - qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n", - nvt_blk, nvt_idx); - return false; - } - - return true; -} - /* * This is our simple Xive Presenter Engine model. It is merged in the * Router as it does not require an extra object. @@ -1473,22 +1470,31 @@ static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format, * * The parameters represent what is sent on the PowerBus */ -static bool xive_presenter_notify(XiveRouter *xrtr, uint8_t format, +static bool xive_presenter_notify(uint8_t format, uint8_t nvt_blk, uint32_t nvt_idx, bool cam_ignore, uint8_t priority, uint32_t logic_serv) { + XiveFabric *xfb = XIVE_FABRIC(qdev_get_machine()); + XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; - bool found; + int count; - found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore, - priority, logic_serv, &match); - if (found) { - ipb_update(&match.tctx->regs[match.ring], priority); - xive_tctx_notify(match.tctx, match.ring); + /* + * Ask the machine to scan the interrupt controllers for a match + */ + count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, + priority, logic_serv, &match); + if (count < 0) { + return false; } - return found; + /* handle CPU exception delivery */ + if (count) { + xive_tctx_ipb_update(match.tctx, match.ring, priority_to_ipb(priority)); + } + + return !!count; } /* @@ -1601,7 +1607,7 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, return; } - found = xive_presenter_notify(xrtr, format, nvt_blk, nvt_idx, + found = xive_presenter_notify(format, nvt_blk, nvt_idx, xive_get_field32(END_W7_F0_IGNORE, end.w7), priority, xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); @@ -1618,14 +1624,21 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, * - logical server : forward request to IVPE (not supported) */ if (xive_end_is_backlog(&end)) { + uint8_t ipb; + if (format == 1) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x invalid config: F1 & backlog\n", end_blk, end_idx); return; } - /* Record the IPB in the associated NVT structure */ - ipb_update((uint8_t *) &nvt.w4, priority); + /* + * Record the IPB in the associated NVT structure for later + * use. The presenter will resend the interrupt when the vCPU + * is dispatched again on a HW thread. + */ + ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | priority_to_ipb(priority); + nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb); xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); /* @@ -1718,6 +1731,7 @@ static const TypeInfo xive_router_info = { .class_init = xive_router_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_XIVE_NOTIFIER }, + { TYPE_XIVE_PRESENTER }, { } } }; @@ -1749,7 +1763,11 @@ static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size) uint8_t pq; uint64_t ret = -1; - end_blk = xsrc->block_id; + /* + * The block id should be deduced from the load address on the END + * ESB MMIO but our model only supports a single block per XIVE chip. + */ + end_blk = xive_router_get_block_id(xsrc->xrtr); end_idx = addr >> (xsrc->esb_shift + 1); if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { @@ -1825,17 +1843,8 @@ static const MemoryRegionOps xive_end_source_ops = { static void xive_end_source_realize(DeviceState *dev, Error **errp) { XiveENDSource *xsrc = XIVE_END_SOURCE(dev); - Object *obj; - Error *local_err = NULL; - obj = object_property_get_link(OBJECT(dev), "xive", &local_err); - if (!obj) { - error_propagate(errp, local_err); - error_prepend(errp, "required link 'xive' not found: "); - return; - } - - xsrc->xrtr = XIVE_ROUTER(obj); + assert(xsrc->xrtr); if (!xsrc->nr_ends) { error_setg(errp, "Number of interrupt needs to be greater than 0"); @@ -1858,9 +1867,10 @@ static void xive_end_source_realize(DeviceState *dev, Error **errp) } static Property xive_end_source_properties[] = { - DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0), DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0), DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K), + DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER, + XiveRouter *), DEFINE_PROP_END_OF_LIST(), }; @@ -1894,10 +1904,30 @@ static const TypeInfo xive_notifier_info = { .class_size = sizeof(XiveNotifierClass), }; +/* + * XIVE Presenter + */ +static const TypeInfo xive_presenter_info = { + .name = TYPE_XIVE_PRESENTER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(XivePresenterClass), +}; + +/* + * XIVE Fabric + */ +static const TypeInfo xive_fabric_info = { + .name = TYPE_XIVE_FABRIC, + .parent = TYPE_INTERFACE, + .class_size = sizeof(XiveFabricClass), +}; + static void xive_register_types(void) { + type_register_static(&xive_fabric_info); type_register_static(&xive_source_info); type_register_static(&xive_notifier_info); + type_register_static(&xive_presenter_info); type_register_static(&xive_router_info); type_register_static(&xive_end_source_info); type_register_static(&xive_tctx_info); |