aboutsummaryrefslogtreecommitdiff
path: root/tools/virtiofsd
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2019-07-17 16:05:57 +0100
committerDr. David Alan Gilbert <dgilbert@redhat.com>2020-01-23 16:41:37 +0000
commite7b337326d594b71b07cd6dbb332c49c122c80a4 (patch)
treef927a9d9c5baee3d1d3968b10ef768a55a2124a3 /tools/virtiofsd
parent620e9d8d9cee6df7fe71168dea950dba0cc21a4a (diff)
downloadqemu-e7b337326d594b71b07cd6dbb332c49c122c80a4.zip
qemu-e7b337326d594b71b07cd6dbb332c49c122c80a4.tar.gz
qemu-e7b337326d594b71b07cd6dbb332c49c122c80a4.tar.bz2
virtiofsd: prevent fv_queue_thread() vs virtio_loop() races
We call into libvhost-user from the virtqueue handler thread and the vhost-user message processing thread without a lock. There is nothing protecting the virtqueue handler thread if the vhost-user message processing thread changes the virtqueue or memory table while it is running. This patch introduces a read-write lock. Virtqueue handler threads are readers. The vhost-user message processing thread is a writer. This will allow concurrency for multiqueue in the future while protecting against fv_queue_thread() vs virtio_loop() races. Note that the critical sections could be made smaller but it would be more invasive and require libvhost-user changes. Let's start simple and improve performance later, if necessary. Another option would be an RCU-style approach with lighter-weight primitives. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'tools/virtiofsd')
-rw-r--r--tools/virtiofsd/fuse_virtio.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
index fb8d6d1..f6242f9 100644
--- a/tools/virtiofsd/fuse_virtio.c
+++ b/tools/virtiofsd/fuse_virtio.c
@@ -59,6 +59,18 @@ struct fv_VuDev {
struct fuse_session *se;
/*
+ * Either handle virtqueues or vhost-user protocol messages. Don't do
+ * both at the same time since that could lead to race conditions if
+ * virtqueues or memory tables change while another thread is accessing
+ * them.
+ *
+ * The assumptions are:
+ * 1. fv_queue_thread() reads/writes to virtqueues and only reads VuDev.
+ * 2. virtio_loop() reads/writes virtqueues and VuDev.
+ */
+ pthread_rwlock_t vu_dispatch_rwlock;
+
+ /*
* The following pair of fields are only accessed in the main
* virtio_loop
*/
@@ -415,6 +427,8 @@ static void *fv_queue_thread(void *opaque)
qi->qidx, qi->kick_fd);
while (1) {
struct pollfd pf[2];
+ int ret;
+
pf[0].fd = qi->kick_fd;
pf[0].events = POLLIN;
pf[0].revents = 0;
@@ -461,6 +475,9 @@ static void *fv_queue_thread(void *opaque)
fuse_log(FUSE_LOG_ERR, "Eventfd_read for queue: %m\n");
break;
}
+ /* Mutual exclusion with virtio_loop() */
+ ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
+ assert(ret == 0); /* there is no possible error case */
/* out is from guest, in is too guest */
unsigned int in_bytes, out_bytes;
vu_queue_get_avail_bytes(dev, q, &in_bytes, &out_bytes, ~0, ~0);
@@ -469,6 +486,7 @@ static void *fv_queue_thread(void *opaque)
"%s: Queue %d gave evalue: %zx available: in: %u out: %u\n",
__func__, qi->qidx, (size_t)evalue, in_bytes, out_bytes);
+
while (1) {
bool allocated_bufv = false;
struct fuse_bufvec bufv;
@@ -597,6 +615,8 @@ static void *fv_queue_thread(void *opaque)
free(elem);
elem = NULL;
}
+
+ pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
}
out:
pthread_mutex_destroy(&ch.lock);
@@ -711,6 +731,8 @@ int virtio_loop(struct fuse_session *se)
while (!fuse_session_exited(se)) {
struct pollfd pf[1];
+ bool ok;
+ int ret;
pf[0].fd = se->vu_socketfd;
pf[0].events = POLLIN;
pf[0].revents = 0;
@@ -735,7 +757,15 @@ int virtio_loop(struct fuse_session *se)
}
assert(pf[0].revents & POLLIN);
fuse_log(FUSE_LOG_DEBUG, "%s: Got VU event\n", __func__);
- if (!vu_dispatch(&se->virtio_dev->dev)) {
+ /* Mutual exclusion with fv_queue_thread() */
+ ret = pthread_rwlock_wrlock(&se->virtio_dev->vu_dispatch_rwlock);
+ assert(ret == 0); /* there is no possible error case */
+
+ ok = vu_dispatch(&se->virtio_dev->dev);
+
+ pthread_rwlock_unlock(&se->virtio_dev->vu_dispatch_rwlock);
+
+ if (!ok) {
fuse_log(FUSE_LOG_ERR, "%s: vu_dispatch failed\n", __func__);
break;
}
@@ -877,6 +907,7 @@ int virtio_session_mount(struct fuse_session *se)
se->vu_socketfd = data_sock;
se->virtio_dev->se = se;
+ pthread_rwlock_init(&se->virtio_dev->vu_dispatch_rwlock, NULL);
vu_init(&se->virtio_dev->dev, 2, se->vu_socketfd, fv_panic, fv_set_watch,
fv_remove_watch, &fv_iface);
@@ -892,6 +923,7 @@ void virtio_session_close(struct fuse_session *se)
}
free(se->virtio_dev->qi);
+ pthread_rwlock_destroy(&se->virtio_dev->vu_dispatch_rwlock);
free(se->virtio_dev);
se->virtio_dev = NULL;
}