aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEugenio Pérez <eperezma@redhat.com>2023-12-21 18:43:11 +0100
committerMichael S. Tsirkin <mst@redhat.com>2023-12-26 04:51:07 -0500
commit5edb02e8004c2f1d5026a02cd9378046973f47af (patch)
tree719ac5860ff896d6388fa3a6a2960450f280b519 /net
parent8c5e9809225c387026476f5eefc2f8ae749a72f7 (diff)
downloadqemu-5edb02e8004c2f1d5026a02cd9378046973f47af.zip
qemu-5edb02e8004c2f1d5026a02cd9378046973f47af.tar.gz
qemu-5edb02e8004c2f1d5026a02cd9378046973f47af.tar.bz2
vdpa: move iova tree to the shared struct
Next patches will register the vhost_vdpa memory listener while the VM is migrating at the destination, so we can map the memory to the device before stopping the VM at the source. The main goal is to reduce the downtime. However, the destination QEMU is unaware of which vhost_vdpa device will register its memory_listener. If the source guest has CVQ enabled, it will be the CVQ device. Otherwise, it will be the first one. Move the iova tree to VhostVDPAShared so all vhost_vdpa can use it, rather than always in the first or last vhost_vdpa. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20231221174322.3130442-3-eperezma@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/vhost-vdpa.c54
1 files changed, 23 insertions, 31 deletions
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 8b661b9..10703e5 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -354,8 +354,8 @@ static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
migration_add_notifier(&s->migration_state,
vdpa_net_migration_state_notifier);
if (v->shadow_vqs_enabled) {
- v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
- v->iova_range.last);
+ v->shared->iova_tree = vhost_iova_tree_new(v->iova_range.first,
+ v->iova_range.last);
}
}
@@ -380,11 +380,6 @@ static int vhost_vdpa_net_data_start(NetClientState *nc)
return 0;
}
- if (v->shadow_vqs_enabled) {
- VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
- v->iova_tree = s0->vhost_vdpa.iova_tree;
- }
-
return 0;
}
@@ -417,9 +412,8 @@ static void vhost_vdpa_net_client_stop(NetClientState *nc)
dev = s->vhost_vdpa.dev;
if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
- g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
- } else {
- s->vhost_vdpa.iova_tree = NULL;
+ g_clear_pointer(&s->vhost_vdpa.shared->iova_tree,
+ vhost_iova_tree_delete);
}
}
@@ -474,7 +468,7 @@ static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
{
- VhostIOVATree *tree = v->iova_tree;
+ VhostIOVATree *tree = v->shared->iova_tree;
DMAMap needle = {
/*
* No need to specify size or to look for more translations since
@@ -508,7 +502,7 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
map.translated_addr = (hwaddr)(uintptr_t)buf;
map.size = size - 1;
map.perm = write ? IOMMU_RW : IOMMU_RO,
- r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
+ r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map);
if (unlikely(r != IOVA_OK)) {
error_report("Cannot map injected element");
return r;
@@ -523,7 +517,7 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
return 0;
dma_map_err:
- vhost_iova_tree_remove(v->iova_tree, map);
+ vhost_iova_tree_remove(v->shared->iova_tree, map);
return r;
}
@@ -583,24 +577,22 @@ out:
return 0;
}
- if (s0->vhost_vdpa.iova_tree) {
- /*
- * SVQ is already configured for all virtqueues. Reuse IOVA tree for
- * simplicity, whether CVQ shares ASID with guest or not, because:
- * - Memory listener need access to guest's memory addresses allocated
- * in the IOVA tree.
- * - There should be plenty of IOVA address space for both ASID not to
- * worry about collisions between them. Guest's translations are
- * still validated with virtio virtqueue_pop so there is no risk for
- * the guest to access memory that it shouldn't.
- *
- * To allocate a iova tree per ASID is doable but it complicates the
- * code and it is not worth it for the moment.
- */
- v->iova_tree = s0->vhost_vdpa.iova_tree;
- } else {
- v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
- v->iova_range.last);
+ /*
+ * If other vhost_vdpa already have an iova_tree, reuse it for simplicity,
+ * whether CVQ shares ASID with guest or not, because:
+ * - Memory listener need access to guest's memory addresses allocated in
+ * the IOVA tree.
+ * - There should be plenty of IOVA address space for both ASID not to
+ * worry about collisions between them. Guest's translations are still
+ * validated with virtio virtqueue_pop so there is no risk for the guest
+ * to access memory that it shouldn't.
+ *
+ * To allocate a iova tree per ASID is doable but it complicates the code
+ * and it is not worth it for the moment.
+ */
+ if (!v->shared->iova_tree) {
+ v->shared->iova_tree = vhost_iova_tree_new(v->iova_range.first,
+ v->iova_range.last);
}
r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,