aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEugenio Pérez <eperezma@redhat.com>2025-05-22 10:58:39 -0400
committerMichael S. Tsirkin <mst@redhat.com>2025-06-02 14:29:15 -0400
commit494c50dcc0995ae6eb526d2848c33cbf910ab218 (patch)
tree5db8e7ab591d5601f044576a9676afd71238ecc9
parenta400720365ea86602044b78dd8654911d0fe3977 (diff)
downloadqemu-494c50dcc0995ae6eb526d2848c33cbf910ab218.zip
qemu-494c50dcc0995ae6eb526d2848c33cbf910ab218.tar.gz
qemu-494c50dcc0995ae6eb526d2848c33cbf910ab218.tar.bz2
vdpa: move memory listener register to vhost_vdpa_init
Current memory operations like pinning may take a lot of time at the destination. Currently they are done after the source of the migration is stopped, and before the workload is resumed at the destination. This is a period where neigher traffic can flow, nor the VM workload can continue (downtime). We can do better as we know the memory layout of the guest RAM at the destination from the moment that all devices are initializaed. So moving that operation allows QEMU to communicate the kernel the maps while the workload is still running in the source, so Linux can start mapping them. As a small drawback, there is a time in the initialization where QEMU cannot respond to QMP etc. By some testing, this time is about 0.2seconds. This may be further reduced (or increased) depending on the vdpa driver and the platform hardware, and it is dominated by the cost of memory pinning. This matches the time that we move out of the called downtime window. The downtime is measured as the elapsed trace time between the last vhost_vdpa_suspend on the source and the last vhost_vdpa_set_vring_enable_one on the destination. In other words, from "guest CPUs freeze" to the instant the final Rx/Tx queue-pair is able to start moving data. Using ConnectX-6 Dx (MLX5) NICs in vhost-vDPA mode with 8 queue-pairs, the series reduces guest-visible downtime during back-to-back live migrations by more than half: - 39G VM: 4.72s -> 2.09s (-2.63s, ~56% improvement) - 128G VM: 14.72s -> 5.83s (-8.89s, ~60% improvement) Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Si-Wei Liu <si-wei.liu@oracle.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Signed-off-by: Jonah Palmer <jonah.palmer@oracle.com> Message-Id: <20250522145839.59974-8-jonah.palmer@oracle.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--hw/virtio/vhost-vdpa.c35
1 files changed, 28 insertions, 7 deletions
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index de834f2..e20da95 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -894,8 +894,14 @@ static int vhost_vdpa_reset_device(struct vhost_dev *dev)
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
trace_vhost_vdpa_reset_device(dev);
+ if (ret) {
+ return ret;
+ }
+
+ memory_listener_unregister(&v->shared->listener);
+ v->shared->listener_registered = false;
v->suspended = false;
- return ret;
+ return 0;
}
static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
@@ -1379,6 +1385,11 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
"IOMMU and try again");
return -1;
}
+ if (v->shared->listener_registered &&
+ dev->vdev->dma_as != v->shared->listener.address_space) {
+ memory_listener_unregister(&v->shared->listener);
+ v->shared->listener_registered = false;
+ }
if (!v->shared->listener_registered) {
memory_listener_register(&v->shared->listener, dev->vdev->dma_as);
v->shared->listener_registered = true;
@@ -1392,8 +1403,6 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
static void vhost_vdpa_reset_status(struct vhost_dev *dev)
{
- struct vhost_vdpa *v = dev->opaque;
-
if (!vhost_vdpa_last_dev(dev)) {
return;
}
@@ -1401,9 +1410,6 @@ static void vhost_vdpa_reset_status(struct vhost_dev *dev)
vhost_vdpa_reset_device(dev);
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
VIRTIO_CONFIG_S_DRIVER);
- memory_listener_unregister(&v->shared->listener);
- v->shared->listener_registered = false;
-
}
static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
@@ -1537,12 +1543,27 @@ static int vhost_vdpa_get_features(struct vhost_dev *dev,
static int vhost_vdpa_set_owner(struct vhost_dev *dev)
{
+ int r;
+ struct vhost_vdpa *v;
+
if (!vhost_vdpa_first_dev(dev)) {
return 0;
}
trace_vhost_vdpa_set_owner(dev);
- return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
+ r = vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ /*
+ * Being optimistic and listening address space memory. If the device
+ * uses vIOMMU, it is changed at vhost_vdpa_dev_start.
+ */
+ v = dev->opaque;
+ memory_listener_register(&v->shared->listener, &address_space_memory);
+ v->shared->listener_registered = true;
+ return 0;
}
static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,