aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw@amazon.co.uk>2022-12-28 10:06:49 +0000
committerDavid Woodhouse <dwmw@amazon.co.uk>2023-03-01 09:08:26 +0000
commitf3341e7b91548c38d484285307c23b8f9ce73307 (patch)
treed98528c7341c5c6df14586975e560eaabc0b12a1 /hw
parentc08f5d0e53b00f101c6aab7b5c7eabe22bab1962 (diff)
downloadqemu-f3341e7b91548c38d484285307c23b8f9ce73307.zip
qemu-f3341e7b91548c38d484285307c23b8f9ce73307.tar.gz
qemu-f3341e7b91548c38d484285307c23b8f9ce73307.tar.bz2
hw/xen: Add basic ring handling to xenstore
Extract requests, return ENOSYS to all of them. This is enough to allow older Linux guests to boot, as they need *something* back but it doesn't matter much what. A full implementation of a single-tentant internal XenStore copy-on-write tree with transactions and watches is waiting in the wings to be sent in a subsequent round of patches along with hooking up the actual PV disk back end in qemu, but this is enough to get guests booting for now. Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Reviewed-by: Paul Durrant <paul@xen.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/i386/kvm/xen_xenstore.c254
1 files changed, 251 insertions, 3 deletions
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
index e8abdda..14193ef 100644
--- a/hw/i386/kvm/xen_xenstore.c
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -192,18 +192,266 @@ uint16_t xen_xenstore_get_port(void)
return s->guest_port;
}
+static bool req_pending(XenXenstoreState *s)
+{
+ struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+
+ return s->req_offset == XENSTORE_HEADER_SIZE + req->len;
+}
+
+static void reset_req(XenXenstoreState *s)
+{
+ memset(s->req_data, 0, sizeof(s->req_data));
+ s->req_offset = 0;
+}
+
+static void reset_rsp(XenXenstoreState *s)
+{
+ s->rsp_pending = false;
+
+ memset(s->rsp_data, 0, sizeof(s->rsp_data));
+ s->rsp_offset = 0;
+}
+
+static void process_req(XenXenstoreState *s)
+{
+ struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ const char enosys[] = "ENOSYS";
+
+ assert(req_pending(s));
+ assert(!s->rsp_pending);
+
+ rsp->type = XS_ERROR;
+ rsp->req_id = req->req_id;
+ rsp->tx_id = req->tx_id;
+ rsp->len = sizeof(enosys);
+ memcpy((void *)&rsp[1], enosys, sizeof(enosys));
+
+ s->rsp_pending = true;
+ reset_req(s);
+}
+
+static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr,
+ unsigned int len)
+{
+ if (!len) {
+ return 0;
+ }
+
+ XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod);
+ XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons);
+ unsigned int copied = 0;
+
+ /* Ensure the ring contents don't cross the req_prod access. */
+ smp_rmb();
+
+ while (len) {
+ unsigned int avail = prod - cons;
+ unsigned int offset = MASK_XENSTORE_IDX(cons);
+ unsigned int copylen = avail;
+
+ if (avail > XENSTORE_RING_SIZE) {
+ error_report("XenStore ring handling error");
+ s->fatal_error = true;
+ break;
+ } else if (avail == 0) {
+ break;
+ }
+
+ if (copylen > len) {
+ copylen = len;
+ }
+ if (copylen > XENSTORE_RING_SIZE - offset) {
+ copylen = XENSTORE_RING_SIZE - offset;
+ }
+
+ memcpy(ptr, &s->xs->req[offset], copylen);
+ copied += copylen;
+
+ ptr += copylen;
+ len -= copylen;
+
+ cons += copylen;
+ }
+
+ /*
+ * Not sure this ever mattered except on Alpha, but this barrier
+ * is to ensure that the update to req_cons is globally visible
+ * only after we have consumed all the data from the ring, and we
+ * don't end up seeing data written to the ring *after* the other
+ * end sees the update and writes more to the ring. Xen's own
+ * xenstored has the same barrier here (although with no comment
+ * at all, obviously, because it's Xen code).
+ */
+ smp_mb();
+
+ qatomic_set(&s->xs->req_cons, cons);
+
+ return copied;
+}
+
+static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr,
+ unsigned int len)
+{
+ if (!len) {
+ return 0;
+ }
+
+ XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons);
+ XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod);
+ unsigned int copied = 0;
+
+ /*
+ * This matches the barrier in copy_to_ring() (or the guest's
+ * equivalent) betweem writing the data to the ring and updating
+ * rsp_prod. It protects against the pathological case (which
+ * again I think never happened except on Alpha) where our
+ * subsequent writes to the ring could *cross* the read of
+ * rsp_cons and the guest could see the new data when it was
+ * intending to read the old.
+ */
+ smp_mb();
+
+ while (len) {
+ unsigned int avail = cons + XENSTORE_RING_SIZE - prod;
+ unsigned int offset = MASK_XENSTORE_IDX(prod);
+ unsigned int copylen = len;
+
+ if (avail > XENSTORE_RING_SIZE) {
+ error_report("XenStore ring handling error");
+ s->fatal_error = true;
+ break;
+ } else if (avail == 0) {
+ break;
+ }
+
+ if (copylen > avail) {
+ copylen = avail;
+ }
+ if (copylen > XENSTORE_RING_SIZE - offset) {
+ copylen = XENSTORE_RING_SIZE - offset;
+ }
+
+
+ memcpy(&s->xs->rsp[offset], ptr, copylen);
+ copied += copylen;
+
+ ptr += copylen;
+ len -= copylen;
+
+ prod += copylen;
+ }
+
+ /* Ensure the ring contents are seen before rsp_prod update. */
+ smp_wmb();
+
+ qatomic_set(&s->xs->rsp_prod, prod);
+
+ return copied;
+}
+
+static unsigned int get_req(XenXenstoreState *s)
+{
+ unsigned int copied = 0;
+
+ if (s->fatal_error) {
+ return 0;
+ }
+
+ assert(!req_pending(s));
+
+ if (s->req_offset < XENSTORE_HEADER_SIZE) {
+ void *ptr = s->req_data + s->req_offset;
+ unsigned int len = XENSTORE_HEADER_SIZE;
+ unsigned int copylen = copy_from_ring(s, ptr, len);
+
+ copied += copylen;
+ s->req_offset += copylen;
+ }
+
+ if (s->req_offset >= XENSTORE_HEADER_SIZE) {
+ struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+
+ if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) {
+ error_report("Illegal XenStore request");
+ s->fatal_error = true;
+ return 0;
+ }
+
+ void *ptr = s->req_data + s->req_offset;
+ unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset;
+ unsigned int copylen = copy_from_ring(s, ptr, len);
+
+ copied += copylen;
+ s->req_offset += copylen;
+ }
+
+ return copied;
+}
+
+static unsigned int put_rsp(XenXenstoreState *s)
+{
+ if (s->fatal_error) {
+ return 0;
+ }
+
+ assert(s->rsp_pending);
+
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len);
+
+ void *ptr = s->rsp_data + s->rsp_offset;
+ unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset;
+ unsigned int copylen = copy_to_ring(s, ptr, len);
+
+ s->rsp_offset += copylen;
+
+ /* Have we produced a complete response? */
+ if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len) {
+ reset_rsp(s);
+ }
+
+ return copylen;
+}
+
static void xen_xenstore_event(void *opaque)
{
XenXenstoreState *s = opaque;
evtchn_port_t port = xen_be_evtchn_pending(s->eh);
+ unsigned int copied_to, copied_from;
+ bool processed, notify = false;
+
if (port != s->be_port) {
return;
}
- printf("xenstore event\n");
+
/* We know this is a no-op. */
xen_be_evtchn_unmask(s->eh, port);
- qemu_hexdump(stdout, "", s->xs, sizeof(*s->xs));
- xen_be_evtchn_notify(s->eh, s->be_port);
+
+ do {
+ copied_to = copied_from = 0;
+ processed = false;
+
+ if (s->rsp_pending) {
+ copied_to = put_rsp(s);
+ }
+
+ if (!req_pending(s)) {
+ copied_from = get_req(s);
+ }
+
+ if (req_pending(s) && !s->rsp_pending) {
+ process_req(s);
+ processed = true;
+ }
+
+ notify |= copied_to || copied_from;
+ } while (copied_to || copied_from || processed);
+
+ if (notify) {
+ xen_be_evtchn_notify(s->eh, s->be_port);
+ }
}
static void alloc_guest_port(XenXenstoreState *s)