diff options
author | Stefano Stabellini <sstabellini@kernel.org> | 2017-03-22 10:16:03 -0700 |
---|---|---|
committer | Stefano Stabellini <sstabellini@kernel.org> | 2017-04-25 11:04:33 -0700 |
commit | 47b70fb1e4b619c9d6de74776a6c7c8e5c7719ee (patch) | |
tree | 10d605a04ca381848433529b01093e3589364e8c /hw/9pfs/xen-9p-backend.c | |
parent | f23ef34a5dec56103e1348a622a6adf7c87c821f (diff) | |
download | qemu-47b70fb1e4b619c9d6de74776a6c7c8e5c7719ee.zip qemu-47b70fb1e4b619c9d6de74776a6c7c8e5c7719ee.tar.gz qemu-47b70fb1e4b619c9d6de74776a6c7c8e5c7719ee.tar.bz2 |
xen/9pfs: receive requests from the frontend
Upon receiving an event channel notification from the frontend, schedule
the bottom half. From the bottom half, read one request from the ring,
create a pdu and call pdu_submit to handle it.
For now, only handle one request per ring at a time.
Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: anthony.perard@citrix.com
CC: jgross@suse.com
CC: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
CC: Greg Kurz <groug@kaod.org>
Diffstat (limited to 'hw/9pfs/xen-9p-backend.c')
-rw-r--r-- | hw/9pfs/xen-9p-backend.c | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 03dd881..8820e8f 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -100,12 +100,62 @@ static int xen_9pfs_init(struct XenDevice *xendev) return 0; } +static int xen_9pfs_receive(Xen9pfsRing *ring) +{ + P9MsgHeader h; + RING_IDX cons, prod, masked_prod, masked_cons; + V9fsPDU *pdu; + + if (ring->inprogress) { + return 0; + } + + cons = ring->intf->out_cons; + prod = ring->intf->out_prod; + xen_rmb(); + + if (xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order)) < + sizeof(h)) { + return 0; + } + ring->inprogress = true; + + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h), + masked_prod, &masked_cons, + XEN_FLEX_RING_SIZE(ring->ring_order)); + + /* cannot fail, because we only handle one request per ring at a time */ + pdu = pdu_alloc(&ring->priv->state); + pdu->size = le32_to_cpu(h.size_le); + pdu->id = h.id; + pdu->tag = le32_to_cpu(h.tag_le); + ring->out_size = le32_to_cpu(h.size_le); + ring->out_cons = cons + le32_to_cpu(h.size_le); + + qemu_co_queue_init(&pdu->complete); + pdu_submit(pdu); + + return 0; +} + static void xen_9pfs_bh(void *opaque) { + Xen9pfsRing *ring = opaque; + xen_9pfs_receive(ring); } static void xen_9pfs_evtchn_event(void *opaque) { + Xen9pfsRing *ring = opaque; + evtchn_port_t port; + + port = xenevtchn_pending(ring->evtchndev); + xenevtchn_unmask(ring->evtchndev, port); + + qemu_bh_schedule(ring->bh); } static int xen_9pfs_free(struct XenDevice *xendev) |