diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2019-06-26 08:48:13 +0100 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2019-07-04 17:00:32 -0400 |
commit | 6f5fd837889814e57a4bb473bf80ce08e355a12d (patch) | |
tree | 94a9980f0db7259a68621d91d65cc5a83477663d /tests | |
parent | db68f4ff06cbe0517ed0d9b5634f6cddaed2547c (diff) | |
download | qemu-6f5fd837889814e57a4bb473bf80ce08e355a12d.zip qemu-6f5fd837889814e57a4bb473bf80ce08e355a12d.tar.gz qemu-6f5fd837889814e57a4bb473bf80ce08e355a12d.tar.bz2 |
libvhost-user: support many virtqueues
Currently libvhost-user is hardcoded to at most 8 virtqueues. The
device backend should decide the number of virtqueues, not
libvhost-user. This is important for multiqueue device backends where
the guest driver needs an accurate number of virtqueues.
This change breaks libvhost-user and libvhost-user-glib API stability.
There is no stability guarantee yet, so make this change now and update
all in-tree library users.
This patch touches up vhost-user-blk, vhost-user-gpu, vhost-user-input,
vhost-user-scsi, and vhost-user-bridge. If the device has a fixed
number of queues that exact number is used. Otherwise the previous
default of 8 virtqueues is used.
vu_init() and vug_init() can now fail if malloc() returns NULL. I
considered aborting with an error in libvhost-user but it should be safe
to instantiate new vhost-user instances at runtime without risk of
terminating the process. Therefore callers need to handle the vu_init()
failure now.
vhost-user-blk and vhost-user-scsi duplicate virtqueue index checks that
are already performed by libvhost-user. This code would need to be
modified to use max_queues but remove it completely instead since it's
redundant.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <20190626074815.19994-3-stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/vhost-user-bridge.c | 42 |
1 files changed, 28 insertions, 14 deletions
diff --git a/tests/vhost-user-bridge.c b/tests/vhost-user-bridge.c index 0bb03af..c4e350e 100644 --- a/tests/vhost-user-bridge.c +++ b/tests/vhost-user-bridge.c @@ -45,6 +45,10 @@ } \ } while (0) +enum { + VHOST_USER_BRIDGE_MAX_QUEUES = 8, +}; + typedef void (*CallbackFunc)(int sock, void *ctx); typedef struct Event { @@ -512,12 +516,16 @@ vubr_accept_cb(int sock, void *ctx) } DPRINT("Got connection from remote peer on sock %d\n", conn_fd); - vu_init(&dev->vudev, - conn_fd, - vubr_panic, - vubr_set_watch, - vubr_remove_watch, - &vuiface); + if (!vu_init(&dev->vudev, + VHOST_USER_BRIDGE_MAX_QUEUES, + conn_fd, + vubr_panic, + vubr_set_watch, + vubr_remove_watch, + &vuiface)) { + fprintf(stderr, "Failed to initialize libvhost-user\n"); + exit(1); + } dispatcher_add(&dev->dispatcher, conn_fd, ctx, vubr_receive_cb); dispatcher_remove(&dev->dispatcher, sock); @@ -560,12 +568,18 @@ vubr_new(const char *path, bool client) if (connect(dev->sock, (struct sockaddr *)&un, len) == -1) { vubr_die("connect"); } - vu_init(&dev->vudev, - dev->sock, - vubr_panic, - vubr_set_watch, - vubr_remove_watch, - &vuiface); + + if (!vu_init(&dev->vudev, + VHOST_USER_BRIDGE_MAX_QUEUES, + dev->sock, + vubr_panic, + vubr_set_watch, + vubr_remove_watch, + &vuiface)) { + fprintf(stderr, "Failed to initialize libvhost-user\n"); + exit(1); + } + cb = vubr_receive_cb; } @@ -584,7 +598,7 @@ static void *notifier_thread(void *arg) int qidx; while (true) { - for (qidx = 0; qidx < VHOST_MAX_NR_VIRTQUEUE; qidx++) { + for (qidx = 0; qidx < VHOST_USER_BRIDGE_MAX_QUEUES; qidx++) { uint16_t *n = vubr->notifier.addr + pagesize * qidx; if (*n == qidx) { @@ -616,7 +630,7 @@ vubr_host_notifier_setup(VubrDev *dev) void *addr; int fd; - length = getpagesize() * VHOST_MAX_NR_VIRTQUEUE; + length = getpagesize() * VHOST_USER_BRIDGE_MAX_QUEUES; fd = mkstemp(template); if (fd < 0) { |