aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEugenio Pérez <eperezma@redhat.com>2023-12-21 18:43:10 +0100
committerMichael S. Tsirkin <mst@redhat.com>2023-12-26 04:51:07 -0500
commit8c5e9809225c387026476f5eefc2f8ae749a72f7 (patch)
tree97e2dfe94abe8e7ff99d2aa3b5817b533732b989
parentbc865bfe2d54ac56daa20a6395abff68fab6a5d6 (diff)
downloadqemu-8c5e9809225c387026476f5eefc2f8ae749a72f7.zip
qemu-8c5e9809225c387026476f5eefc2f8ae749a72f7.tar.gz
qemu-8c5e9809225c387026476f5eefc2f8ae749a72f7.tar.bz2
vdpa: add VhostVDPAShared
It will hold properties shared among all vhost_vdpa instances associated with of the same device. For example, we just need one iova_tree or one memory listener for the entire device. Next patches will register the vhost_vdpa memory listener at the beginning of the VM migration at the destination. This enables QEMU to map the memory to the device before stopping the VM at the source, instead of doing while both source and destination are stopped, thus minimizing the downtime. However, the destination QEMU is unaware of which vhost_vdpa struct will register its memory_listener. If the source guest has CVQ enabled, it will be the one associated with the CVQ. Otherwise, it will be the first one. Save the memory operations related members in a common place rather than always in the first / last vhost_vdpa. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20231221174322.3130442-2-eperezma@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--include/hw/virtio/vhost-vdpa.h5
-rw-r--r--net/vhost-vdpa.c24
2 files changed, 27 insertions, 2 deletions
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 5407d54..eb1a56d 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -30,6 +30,10 @@ typedef struct VhostVDPAHostNotifier {
void *addr;
} VhostVDPAHostNotifier;
+/* Info shared by all vhost_vdpa device models */
+typedef struct vhost_vdpa_shared {
+} VhostVDPAShared;
+
typedef struct vhost_vdpa {
int device_fd;
int index;
@@ -46,6 +50,7 @@ typedef struct vhost_vdpa {
bool suspended;
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
+ VhostVDPAShared *shared;
GPtrArray *shadow_vqs;
const VhostShadowVirtqueueOps *shadow_vq_ops;
void *shadow_vq_ops_opaque;
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index d0614d7..8b661b9 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -240,6 +240,10 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
qemu_close(s->vhost_vdpa.device_fd);
s->vhost_vdpa.device_fd = -1;
}
+ if (s->vhost_vdpa.index != 0) {
+ return;
+ }
+ g_free(s->vhost_vdpa.shared);
}
/** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */
@@ -1661,6 +1665,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
bool svq,
struct vhost_vdpa_iova_range iova_range,
uint64_t features,
+ VhostVDPAShared *shared,
Error **errp)
{
NetClientState *nc = NULL;
@@ -1696,6 +1701,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
+ s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,
@@ -1708,11 +1714,16 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.shadow_vq_ops_opaque = s;
s->cvq_isolated = cvq_isolated;
}
+ if (queue_pair_index != 0) {
+ s->vhost_vdpa.shared = shared;
+ }
+
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
qemu_del_net_client(nc);
return NULL;
}
+
return nc;
}
@@ -1824,17 +1835,26 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
for (i = 0; i < queue_pairs; i++) {
+ VhostVDPAShared *shared = NULL;
+
+ if (i) {
+ shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
+ }
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
- iova_range, features, errp);
+ iova_range, features, shared, errp);
if (!ncs[i])
goto err;
}
if (has_cvq) {
+ VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
+ VhostVDPAShared *shared = s0->vhost_vdpa.shared;
+
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
- opts->x_svq, iova_range, features, errp);
+ opts->x_svq, iova_range, features, shared,
+ errp);
if (!nc)
goto err;
}