From 009c2549bb9dc7f7061009eb87f2a53d4b364983 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:26 +0200 Subject: vhost: move descriptor translation to vhost_svq_vring_write_descs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's done for both in and out descriptors so it's better placed here. Acked-by: Jason Wang Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 56c96eb..e2184a4 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -122,17 +122,35 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, return true; } -static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, - const struct iovec *iovec, size_t num, - bool more_descs, bool write) +/** + * Write descriptors to SVQ vring + * + * @svq: The shadow virtqueue + * @sg: Cache for hwaddr + * @iovec: The iovec from the guest + * @num: iovec length + * @more_descs: True if more descriptors come in the chain + * @write: True if they are writeable descriptors + * + * Return true if success, false otherwise and print error. + */ +static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, + const struct iovec *iovec, size_t num, + bool more_descs, bool write) { uint16_t i = svq->free_head, last = svq->free_head; unsigned n; uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0; vring_desc_t *descs = svq->vring.desc; + bool ok; if (num == 0) { - return; + return true; + } + + ok = vhost_svq_translate_addr(svq, sg, iovec, num); + if (unlikely(!ok)) { + return false; } for (n = 0; n < num; n++) { @@ -150,6 +168,7 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, } svq->free_head = le16_to_cpu(svq->desc_next[last]); + return true; } static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, @@ -169,21 +188,18 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, return false; } - ok = vhost_svq_translate_addr(svq, sgs, elem->out_sg, elem->out_num); + ok = vhost_svq_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num, + elem->in_num > 0, false); if (unlikely(!ok)) { return false; } - vhost_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num, - elem->in_num > 0, false); - - ok = vhost_svq_translate_addr(svq, sgs, elem->in_sg, elem->in_num); + ok = vhost_svq_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, + true); if (unlikely(!ok)) { return false; } - vhost_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, true); - /* * Put the entry in the available array (but don't update avail->idx until * they do sync). -- cgit v1.1 From 6758c01f054c2a842d41d927d628b09f649d3254 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:27 +0200 Subject: virtio-net: Expose MAC_TABLE_ENTRIES MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit vhost-vdpa control virtqueue needs to know the maximum entries supported by the virtio-net device, so we know if it is possible to apply the filter. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/net/virtio-net.c | 1 - 1 file changed, 1 deletion(-) (limited to 'hw') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 7ad948e..f83e96e 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -49,7 +49,6 @@ #define VIRTIO_NET_VM_VERSION 11 -#define MAC_TABLE_ENTRIES 64 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ /* previously fixed value */ -- cgit v1.1 From 640b8a1c588b56349b3307d88459ea1cd86181fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:28 +0200 Subject: virtio-net: Expose ctrl virtqueue logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows external vhost-net devices to modify the state of the VirtIO device model once the vhost-vdpa device has acknowledged the control commands. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/net/virtio-net.c | 84 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 49 insertions(+), 35 deletions(-) (limited to 'hw') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index f83e96e..dd0d056 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1433,57 +1433,71 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, return VIRTIO_NET_OK; } -static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, + const struct iovec *in_sg, unsigned in_num, + const struct iovec *out_sg, + unsigned out_num) { VirtIONet *n = VIRTIO_NET(vdev); struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = VIRTIO_NET_ERR; - VirtQueueElement *elem; size_t s; struct iovec *iov, *iov2; - unsigned int iov_cnt; + + if (iov_size(in_sg, in_num) < sizeof(status) || + iov_size(out_sg, out_num) < sizeof(ctrl)) { + virtio_error(vdev, "virtio-net ctrl missing headers"); + return 0; + } + + iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num); + s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl)); + iov_discard_front(&iov, &out_num, sizeof(ctrl)); + if (s != sizeof(ctrl)) { + status = VIRTIO_NET_ERR; + } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { + status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { + status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { + status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { + status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { + status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { + status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num); + } + + s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status)); + assert(s == sizeof(status)); + + g_free(iov2); + return sizeof(status); +} + +static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtQueueElement *elem; for (;;) { + size_t written; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { break; } - if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || - iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { - virtio_error(vdev, "virtio-net ctrl missing headers"); + + written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num, + elem->out_sg, elem->out_num); + if (written > 0) { + virtqueue_push(vq, elem, written); + virtio_notify(vdev, vq); + g_free(elem); + } else { virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } - - iov_cnt = elem->out_num; - iov2 = iov = g_memdup2(elem->out_sg, - sizeof(struct iovec) * elem->out_num); - s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); - iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); - if (s != sizeof(ctrl)) { - status = VIRTIO_NET_ERR; - } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { - status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { - status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { - status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { - status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { - status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { - status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); - } - - s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); - assert(s == sizeof(status)); - - virtqueue_push(vq, elem, sizeof(status)); - virtio_notify(vdev, vq); - g_free(iov2); - g_free(elem); } } -- cgit v1.1 From c381abc37f0aba42ed2e3b41cdace8f8438829e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:29 +0200 Subject: vdpa: Avoid compiler to squash reads to used idx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the next patch we will allow busypolling of this value. The compiler have a running path where shadow_used_idx, last_used_idx, and vring used idx are not modified within the same thread busypolling. This was not an issue before since we always cleared device event notifier before checking it, and that could act as memory barrier. However, the busypoll needs something similar to kernel READ_ONCE. Let's add it here, sepparated from the polling. Signed-off-by: Eugenio Pérez Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index e2184a4..560d07a 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -327,11 +327,12 @@ static void vhost_handle_guest_kick_notifier(EventNotifier *n) static bool vhost_svq_more_used(VhostShadowVirtqueue *svq) { + uint16_t *used_idx = &svq->vring.used->idx; if (svq->last_used_idx != svq->shadow_used_idx) { return true; } - svq->shadow_used_idx = cpu_to_le16(svq->vring.used->idx); + svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx); return svq->last_used_idx != svq->shadow_used_idx; } -- cgit v1.1 From d93a2405ca6efa9dc1c420cee5a34bd8242818d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:30 +0200 Subject: vhost: Reorder vhost_svq_kick MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Future code needs to call it from vhost_svq_add. No functional change intended. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 560d07a..043a185 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -215,6 +215,20 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, return true; } +static void vhost_svq_kick(VhostShadowVirtqueue *svq) +{ + /* + * We need to expose the available array entries before checking the used + * flags + */ + smp_mb(); + if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) { + return; + } + + event_notifier_set(&svq->hdev_kick); +} + /** * Add an element to a SVQ. * @@ -235,20 +249,6 @@ static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) return true; } -static void vhost_svq_kick(VhostShadowVirtqueue *svq) -{ - /* - * We need to expose the available array entries before checking the used - * flags - */ - smp_mb(); - if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) { - return; - } - - event_notifier_set(&svq->hdev_kick); -} - /** * Forward available buffers. * -- cgit v1.1 From 98b5adef8493a2bfad6655cfee84299e88bedbf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:31 +0200 Subject: vhost: Move vhost_svq_kick call to vhost_svq_add MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The series needs to expose vhost_svq_add with full functionality, including kick Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 043a185..e272c33 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -246,6 +246,7 @@ static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) } svq->ring_id_maps[qemu_head] = elem; + vhost_svq_kick(svq); return true; } @@ -306,7 +307,6 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) /* VQ is broken, just return and ignore any other kicks */ return; } - vhost_svq_kick(svq); } virtio_queue_set_notification(svq->vq, true); -- cgit v1.1 From f20b70eb5a68cfd8fef74a13ccdd494ef1cb0221 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:32 +0200 Subject: vhost: Check for queue full at vhost_svq_add MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The series need to expose vhost_svq_add with full functionality, including checking for full queue. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 59 +++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 26 deletions(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index e272c33..11302ea 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -233,21 +233,29 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq) * Add an element to a SVQ. * * The caller must check that there is enough slots for the new element. It - * takes ownership of the element: In case of failure, it is free and the SVQ - * is considered broken. + * takes ownership of the element: In case of failure not ENOSPC, it is free. + * + * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full */ -static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) +static int vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) { unsigned qemu_head; - bool ok = vhost_svq_add_split(svq, elem, &qemu_head); + unsigned ndescs = elem->in_num + elem->out_num; + bool ok; + + if (unlikely(ndescs > vhost_svq_available_slots(svq))) { + return -ENOSPC; + } + + ok = vhost_svq_add_split(svq, elem, &qemu_head); if (unlikely(!ok)) { g_free(elem); - return false; + return -EINVAL; } svq->ring_id_maps[qemu_head] = elem; vhost_svq_kick(svq); - return true; + return 0; } /** @@ -274,7 +282,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) while (true) { VirtQueueElement *elem; - bool ok; + int r; if (svq->next_guest_avail_elem) { elem = g_steal_pointer(&svq->next_guest_avail_elem); @@ -286,25 +294,24 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) break; } - if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) { - /* - * This condition is possible since a contiguous buffer in GPA - * does not imply a contiguous buffer in qemu's VA - * scatter-gather segments. If that happens, the buffer exposed - * to the device needs to be a chain of descriptors at this - * moment. - * - * SVQ cannot hold more available buffers if we are here: - * queue the current guest descriptor and ignore further kicks - * until some elements are used. - */ - svq->next_guest_avail_elem = elem; - return; - } - - ok = vhost_svq_add(svq, elem); - if (unlikely(!ok)) { - /* VQ is broken, just return and ignore any other kicks */ + r = vhost_svq_add(svq, elem); + if (unlikely(r != 0)) { + if (r == -ENOSPC) { + /* + * This condition is possible since a contiguous buffer in + * GPA does not imply a contiguous buffer in qemu's VA + * scatter-gather segments. If that happens, the buffer + * exposed to the device needs to be a chain of descriptors + * at this moment. + * + * SVQ cannot hold more available buffers if we are here: + * queue the current guest descriptor and ignore kicks + * until some elements are used. + */ + svq->next_guest_avail_elem = elem; + } + + /* VQ is full or broken, just return and ignore kicks */ return; } } -- cgit v1.1 From 1f46ae65d85f677b660bda46685dd3e94885a7cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:33 +0200 Subject: vhost: Decouple vhost_svq_add from VirtQueueElement MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VirtQueueElement comes from the guest, but we're heading SVQ to be able to modify the element presented to the device without the guest's knowledge. To do so, make SVQ accept sg buffers directly, instead of using VirtQueueElement. Add vhost_svq_add_element to maintain element convenience. Signed-off-by: Eugenio Pérez Acked-by: Jason Wang Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 11302ea..e3afa2b 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -172,30 +172,31 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, } static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, - VirtQueueElement *elem, unsigned *head) + const struct iovec *out_sg, size_t out_num, + const struct iovec *in_sg, size_t in_num, + unsigned *head) { unsigned avail_idx; vring_avail_t *avail = svq->vring.avail; bool ok; - g_autofree hwaddr *sgs = g_new(hwaddr, MAX(elem->out_num, elem->in_num)); + g_autofree hwaddr *sgs = g_new(hwaddr, MAX(out_num, in_num)); *head = svq->free_head; /* We need some descriptors here */ - if (unlikely(!elem->out_num && !elem->in_num)) { + if (unlikely(!out_num && !in_num)) { qemu_log_mask(LOG_GUEST_ERROR, "Guest provided element with no descriptors"); return false; } - ok = vhost_svq_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num, - elem->in_num > 0, false); + ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0, + false); if (unlikely(!ok)) { return false; } - ok = vhost_svq_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, - true); + ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true); if (unlikely(!ok)) { return false; } @@ -237,17 +238,19 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq) * * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full */ -static int vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) +static int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, + size_t in_num, VirtQueueElement *elem) { unsigned qemu_head; - unsigned ndescs = elem->in_num + elem->out_num; + unsigned ndescs = in_num + out_num; bool ok; if (unlikely(ndescs > vhost_svq_available_slots(svq))) { return -ENOSPC; } - ok = vhost_svq_add_split(svq, elem, &qemu_head); + ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head); if (unlikely(!ok)) { g_free(elem); return -EINVAL; @@ -258,6 +261,14 @@ static int vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem) return 0; } +/* Convenience wrapper to add a guest's element to SVQ */ +static int vhost_svq_add_element(VhostShadowVirtqueue *svq, + VirtQueueElement *elem) +{ + return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg, + elem->in_num, elem); +} + /** * Forward available buffers. * @@ -294,7 +305,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) break; } - r = vhost_svq_add(svq, elem); + r = vhost_svq_add_element(svq, elem); if (unlikely(r != 0)) { if (r == -ENOSPC) { /* -- cgit v1.1 From 9e87868fcaf5785c8e1490c290505fa32305ff91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:34 +0200 Subject: vhost: Add SVQDescState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will allow SVQ to add context to the different queue elements. This patch only store the actual element, no functional change intended. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 16 ++++++++-------- hw/virtio/vhost-shadow-virtqueue.h | 8 ++++++-- 2 files changed, 14 insertions(+), 10 deletions(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index e3afa2b..e4c09e2 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -256,7 +256,7 @@ static int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, return -EINVAL; } - svq->ring_id_maps[qemu_head] = elem; + svq->desc_state[qemu_head].elem = elem; vhost_svq_kick(svq); return 0; } @@ -411,21 +411,21 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, return NULL; } - if (unlikely(!svq->ring_id_maps[used_elem.id])) { + if (unlikely(!svq->desc_state[used_elem.id].elem)) { qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used, but it was not available", svq->vdev->name, used_elem.id); return NULL; } - num = svq->ring_id_maps[used_elem.id]->in_num + - svq->ring_id_maps[used_elem.id]->out_num; + num = svq->desc_state[used_elem.id].elem->in_num + + svq->desc_state[used_elem.id].elem->out_num; last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); svq->desc_next[last_used_chain] = svq->free_head; svq->free_head = used_elem.id; *len = used_elem.len; - return g_steal_pointer(&svq->ring_id_maps[used_elem.id]); + return g_steal_pointer(&svq->desc_state[used_elem.id].elem); } static void vhost_svq_flush(VhostShadowVirtqueue *svq, @@ -595,7 +595,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, memset(svq->vring.desc, 0, driver_size); svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size); memset(svq->vring.used, 0, device_size); - svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num); + svq->desc_state = g_new0(SVQDescState, svq->vring.num); svq->desc_next = g_new0(uint16_t, svq->vring.num); for (unsigned i = 0; i < svq->vring.num - 1; i++) { svq->desc_next[i] = cpu_to_le16(i + 1); @@ -620,7 +620,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) for (unsigned i = 0; i < svq->vring.num; ++i) { g_autofree VirtQueueElement *elem = NULL; - elem = g_steal_pointer(&svq->ring_id_maps[i]); + elem = g_steal_pointer(&svq->desc_state[i].elem); if (elem) { virtqueue_detach_element(svq->vq, elem, 0); } @@ -632,7 +632,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) } svq->vq = NULL; g_free(svq->desc_next); - g_free(svq->ring_id_maps); + g_free(svq->desc_state); qemu_vfree(svq->vring.desc); qemu_vfree(svq->vring.used); } diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index c132c99..d646c35 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -15,6 +15,10 @@ #include "standard-headers/linux/vhost_types.h" #include "hw/virtio/vhost-iova-tree.h" +typedef struct SVQDescState { + VirtQueueElement *elem; +} SVQDescState; + /* Shadow virtqueue to relay notifications */ typedef struct VhostShadowVirtqueue { /* Shadow vring */ @@ -47,8 +51,8 @@ typedef struct VhostShadowVirtqueue { /* IOVA mapping */ VhostIOVATree *iova_tree; - /* Map for use the guest's descriptors */ - VirtQueueElement **ring_id_maps; + /* SVQ vring descriptors state */ + SVQDescState *desc_state; /* Next VirtQueue element that guest made available */ VirtQueueElement *next_guest_avail_elem; -- cgit v1.1 From ac4cfdc6f39c06732d27554523f9d5f8a53b4ffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:35 +0200 Subject: vhost: Track number of descs in SVQDescState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A guest's buffer continuos on GPA may need multiple descriptors on qemu's VA, so SVQ should track its length sepparatedly. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 4 ++-- hw/virtio/vhost-shadow-virtqueue.h | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index e4c09e2..8314405 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -257,6 +257,7 @@ static int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, } svq->desc_state[qemu_head].elem = elem; + svq->desc_state[qemu_head].ndescs = ndescs; vhost_svq_kick(svq); return 0; } @@ -418,8 +419,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, return NULL; } - num = svq->desc_state[used_elem.id].elem->in_num + - svq->desc_state[used_elem.id].elem->out_num; + num = svq->desc_state[used_elem.id].ndescs; last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); svq->desc_next[last_used_chain] = svq->free_head; svq->free_head = used_elem.id; diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index d646c35..5c7e7cb 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -17,6 +17,12 @@ typedef struct SVQDescState { VirtQueueElement *elem; + + /* + * Number of descriptors exposed to the device. May or may not match + * guest's + */ + unsigned int ndescs; } SVQDescState; /* Shadow virtqueue to relay notifications */ -- cgit v1.1 From 432efd144e990b6e040862de25f8f0b6a6eeb03d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:36 +0200 Subject: vhost: add vhost_svq_push_elem MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function allows external SVQ users to return guest's available buffers. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 16 ++++++++++++++++ hw/virtio/vhost-shadow-virtqueue.h | 3 +++ 2 files changed, 19 insertions(+) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 8314405..1669b1f 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -428,6 +428,22 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, return g_steal_pointer(&svq->desc_state[used_elem.id].elem); } +/** + * Push an element to SVQ, returning it to the guest. + */ +void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + const VirtQueueElement *elem, uint32_t len) +{ + virtqueue_push(svq->vq, elem, len); + if (svq->next_guest_avail_elem) { + /* + * Avail ring was full when vhost_svq_flush was called, so it's a + * good moment to make more descriptors available if possible. + */ + vhost_handle_guest_kick(svq); + } +} + static void vhost_svq_flush(VhostShadowVirtqueue *svq, bool check_for_avail_queue) { diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index 5c7e7cb..d9fc1f1 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -84,6 +84,9 @@ typedef struct VhostShadowVirtqueue { bool vhost_svq_valid_features(uint64_t features, Error **errp); +void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + const VirtQueueElement *elem, uint32_t len); + void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq, -- cgit v1.1 From d0291f3f284d3bc220cdb13b0d8ac8a44eb5fd4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:37 +0200 Subject: vhost: Expose vhost_svq_add MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows external parts of SVQ to forward custom buffers to the device. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 6 +++--- hw/virtio/vhost-shadow-virtqueue.h | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 1669b1f..c3a75ca 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -238,9 +238,9 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq) * * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full */ -static int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, - size_t out_num, const struct iovec *in_sg, - size_t in_num, VirtQueueElement *elem) +int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, size_t in_num, + VirtQueueElement *elem) { unsigned qemu_head; unsigned ndescs = in_num + out_num; diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index d9fc1f1..dd78f4b 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -86,6 +86,9 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp); void vhost_svq_push_elem(VhostShadowVirtqueue *svq, const VirtQueueElement *elem, uint32_t len); +int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, size_t in_num, + VirtQueueElement *elem); void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); -- cgit v1.1 From 3f44d13dda83d390cc9563e56e7d337e4f6223f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:38 +0200 Subject: vhost: add vhost_svq_poll MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It allows the Shadow Control VirtQueue to wait for the device to use the available buffers. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 27 +++++++++++++++++++++++++++ hw/virtio/vhost-shadow-virtqueue.h | 1 + 2 files changed, 28 insertions(+) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index c3a75ca..cc2ee47 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -486,6 +486,33 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, } /** + * Poll the SVQ for one device used buffer. + * + * This function race with main event loop SVQ polling, so extra + * synchronization is needed. + * + * Return the length written by the device. + */ +size_t vhost_svq_poll(VhostShadowVirtqueue *svq) +{ + int64_t start_us = g_get_monotonic_time(); + do { + uint32_t len; + VirtQueueElement *elem = vhost_svq_get_buf(svq, &len); + if (elem) { + return len; + } + + if (unlikely(g_get_monotonic_time() - start_us > 10e6)) { + return 0; + } + + /* Make sure we read new used_idx */ + smp_rmb(); + } while (true); +} + +/** * Forward used buffers. * * @n: hdev call event notifier, the one that device set to notify svq. diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index dd78f4b..cf442f7 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -89,6 +89,7 @@ void vhost_svq_push_elem(VhostShadowVirtqueue *svq, int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, size_t out_num, const struct iovec *in_sg, size_t in_num, VirtQueueElement *elem); +size_t vhost_svq_poll(VhostShadowVirtqueue *svq); void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); -- cgit v1.1 From e966c0b781aebabd2c0f5eef91678f08ce1d068c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:39 +0200 Subject: vhost: Add svq avail_handler callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows external handlers to be aware of new buffers that the guest places in the virtqueue. When this callback is defined the ownership of the guest's virtqueue element is transferred to the callback. This means that if the user wants to forward the descriptor it needs to manually inject it. The callback is also free to process the command by itself and use the element with svq_push. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-shadow-virtqueue.c | 14 ++++++++++++-- hw/virtio/vhost-shadow-virtqueue.h | 31 ++++++++++++++++++++++++++++++- hw/virtio/vhost-vdpa.c | 3 ++- 3 files changed, 44 insertions(+), 4 deletions(-) (limited to 'hw') diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index cc2ee47..e495672 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -306,7 +306,11 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) break; } - r = vhost_svq_add_element(svq, elem); + if (svq->ops) { + r = svq->ops->avail_handler(svq, elem, svq->ops_opaque); + } else { + r = vhost_svq_add_element(svq, elem); + } if (unlikely(r != 0)) { if (r == -ENOSPC) { /* @@ -685,12 +689,16 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) * shadow methods and file descriptors. * * @iova_tree: Tree to perform descriptors translations + * @ops: SVQ owner callbacks + * @ops_opaque: ops opaque pointer * * Returns the new virtqueue or NULL. * * In case of error, reason is reported through error_report. */ -VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree) +VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, + const VhostShadowVirtqueueOps *ops, + void *ops_opaque) { g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1); int r; @@ -712,6 +720,8 @@ VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree) event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND); event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call); svq->iova_tree = iova_tree; + svq->ops = ops; + svq->ops_opaque = ops_opaque; return g_steal_pointer(&svq); err_init_hdev_call: diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index cf442f7..d04c34a 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -25,6 +25,27 @@ typedef struct SVQDescState { unsigned int ndescs; } SVQDescState; +typedef struct VhostShadowVirtqueue VhostShadowVirtqueue; + +/** + * Callback to handle an avail buffer. + * + * @svq: Shadow virtqueue + * @elem: Element placed in the queue by the guest + * @vq_callback_opaque: Opaque + * + * Returns 0 if the vq is running as expected. + * + * Note that ownership of elem is transferred to the callback. + */ +typedef int (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq, + VirtQueueElement *elem, + void *vq_callback_opaque); + +typedef struct VhostShadowVirtqueueOps { + VirtQueueAvailCallback avail_handler; +} VhostShadowVirtqueueOps; + /* Shadow virtqueue to relay notifications */ typedef struct VhostShadowVirtqueue { /* Shadow vring */ @@ -69,6 +90,12 @@ typedef struct VhostShadowVirtqueue { */ uint16_t *desc_next; + /* Caller callbacks */ + const VhostShadowVirtqueueOps *ops; + + /* Caller callbacks opaque */ + void *ops_opaque; + /* Next head to expose to the device */ uint16_t shadow_avail_idx; @@ -102,7 +129,9 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, VirtQueue *vq); void vhost_svq_stop(VhostShadowVirtqueue *svq); -VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree); +VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, + const VhostShadowVirtqueueOps *ops, + void *ops_opaque); void vhost_svq_free(gpointer vq); G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free); diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 66f054a..0b13e98 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -418,8 +418,9 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v, shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); for (unsigned n = 0; n < hdev->nvqs; ++n) { - g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new(v->iova_tree); + g_autoptr(VhostShadowVirtqueue) svq; + svq = vhost_svq_new(v->iova_tree, NULL, NULL); if (unlikely(!svq)) { error_setg(errp, "Cannot create svq %u", n); return -1; -- cgit v1.1 From 463ba1e3b8cf080812895c5f26d95d8d7db2e692 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:40 +0200 Subject: vdpa: Export vhost_vdpa_dma_map and unmap calls MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Shadow CVQ will copy buffers on qemu VA, so we avoid TOCTOU attacks from the guest that could set a different state in qemu device model and vdpa device. To do so, it needs to be able to map these new buffers to the device. Signed-off-by: Eugenio Pérez Acked-by: Jason Wang Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-vdpa.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'hw') diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 0b13e98..9699721 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -71,8 +71,8 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, return false; } -static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, - void *vaddr, bool readonly) +int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, + void *vaddr, bool readonly) { struct vhost_msg_v2 msg = {}; int fd = v->device_fd; @@ -97,8 +97,7 @@ static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, return ret; } -static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, - hwaddr size) +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size) { struct vhost_msg_v2 msg = {}; int fd = v->device_fd; -- cgit v1.1 From bd907ae4b00ebedad5e586af05ea3d6490318d45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:42 +0200 Subject: vdpa: manual forward CVQ buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do a simple forwarding of CVQ buffers, the same work SVQ could do but through callbacks. No functional change intended. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-vdpa.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'hw') diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 9699721..beaaa70 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -419,7 +419,8 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v, for (unsigned n = 0; n < hdev->nvqs; ++n) { g_autoptr(VhostShadowVirtqueue) svq; - svq = vhost_svq_new(v->iova_tree, NULL, NULL); + svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops, + v->shadow_vq_ops_opaque); if (unlikely(!svq)) { error_setg(errp, "Cannot create svq %u", n); return -1; -- cgit v1.1 From c156d5bf2b142dcc06808ccee06882144f230aec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Wed, 20 Jul 2022 08:59:45 +0200 Subject: vdpa: Add device migration blocker MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since the vhost-vdpa device is exposing _F_LOG, adding a migration blocker if it uses CVQ. However, qemu is able to migrate simple devices with no CVQ as long as they use SVQ. To allow it, add a placeholder error to vhost_vdpa, and only add to vhost_dev when used. vhost_dev machinery place the migration blocker if needed. Signed-off-by: Eugenio Pérez Reviewed-by: Michael S. Tsirkin Signed-off-by: Jason Wang --- hw/virtio/vhost-vdpa.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'hw') diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index beaaa70..291cd19 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -20,6 +20,7 @@ #include "hw/virtio/vhost-shadow-virtqueue.h" #include "hw/virtio/vhost-vdpa.h" #include "exec/address-spaces.h" +#include "migration/blocker.h" #include "qemu/cutils.h" #include "qemu/main-loop.h" #include "cpu.h" @@ -1022,6 +1023,13 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) return true; } + if (v->migration_blocker) { + int r = migrate_add_blocker(v->migration_blocker, &err); + if (unlikely(r < 0)) { + return false; + } + } + for (i = 0; i < v->shadow_vqs->len; ++i) { VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); @@ -1064,6 +1072,10 @@ err: vhost_svq_stop(svq); } + if (v->migration_blocker) { + migrate_del_blocker(v->migration_blocker); + } + return false; } @@ -1083,6 +1095,9 @@ static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev) } } + if (v->migration_blocker) { + migrate_del_blocker(v->migration_blocker); + } return true; } -- cgit v1.1