aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-05-16 15:02:24 -0400
committerKevin Wolf <kwolf@redhat.com>2023-05-30 17:32:02 +0200
commit8f5e9a8ee189b44ffa90cc6db61e25499b9d786a (patch)
tree1663d44305b3d07d36099d69880bba2efdfa7cbb
parent75d33e852536361367c8460abd8b04e3fe9921ee (diff)
downloadqemu-8f5e9a8ee189b44ffa90cc6db61e25499b9d786a.zip
qemu-8f5e9a8ee189b44ffa90cc6db61e25499b9d786a.tar.gz
qemu-8f5e9a8ee189b44ffa90cc6db61e25499b9d786a.tar.bz2
block/export: wait for vhost-user-blk requests when draining
Each vhost-user-blk request runs in a coroutine. When the BlockBackend enters a drained section we need to enter a quiescent state. Currently any in-flight requests race with bdrv_drained_begin() because it is unaware of vhost-user-blk requests. When blk_co_preadv/pwritev()/etc returns it wakes the bdrv_drained_begin() thread but vhost-user-blk request processing has not yet finished. The request coroutine continues executing while the main loop thread thinks it is in a drained section. One example where this is unsafe is for blk_set_aio_context() where bdrv_drained_begin() is called before .aio_context_detached() and .aio_context_attach(). If request coroutines are still running after bdrv_drained_begin(), then the AioContext could change underneath them and they race with new requests processed in the new AioContext. This could lead to virtqueue corruption, for example. (This example is theoretical, I came across this while reading the code and have not tried to reproduce it.) It's easy to make bdrv_drained_begin() wait for in-flight requests: add a .drained_poll() callback that checks the VuServer's in-flight counter. VuServer just needs an API that returns true when there are requests in flight. The in-flight counter needs to be atomic. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20230516190238.8401-7-stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r--block/export/vhost-user-blk-server.c13
-rw-r--r--include/qemu/vhost-user-server.h4
-rw-r--r--util/vhost-user-server.c18
3 files changed, 28 insertions, 7 deletions
diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
index 841acb3..f51a36a 100644
--- a/block/export/vhost-user-blk-server.c
+++ b/block/export/vhost-user-blk-server.c
@@ -272,7 +272,20 @@ static void vu_blk_exp_resize(void *opaque)
vu_config_change_msg(&vexp->vu_server.vu_dev);
}
+/*
+ * Ensures that bdrv_drained_begin() waits until in-flight requests complete.
+ *
+ * Called with vexp->export.ctx acquired.
+ */
+static bool vu_blk_drained_poll(void *opaque)
+{
+ VuBlkExport *vexp = opaque;
+
+ return vhost_user_server_has_in_flight(&vexp->vu_server);
+}
+
static const BlockDevOps vu_blk_dev_ops = {
+ .drained_poll = vu_blk_drained_poll,
.resize_cb = vu_blk_exp_resize,
};
diff --git a/include/qemu/vhost-user-server.h b/include/qemu/vhost-user-server.h
index bc0ac9d..b1c1cda 100644
--- a/include/qemu/vhost-user-server.h
+++ b/include/qemu/vhost-user-server.h
@@ -40,8 +40,9 @@ typedef struct {
int max_queues;
const VuDevIface *vu_iface;
+ unsigned int in_flight; /* atomic */
+
/* Protected by ctx lock */
- unsigned int in_flight;
bool wait_idle;
VuDev vu_dev;
QIOChannel *ioc; /* The I/O channel with the client */
@@ -62,6 +63,7 @@ void vhost_user_server_stop(VuServer *server);
void vhost_user_server_inc_in_flight(VuServer *server);
void vhost_user_server_dec_in_flight(VuServer *server);
+bool vhost_user_server_has_in_flight(VuServer *server);
void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx);
void vhost_user_server_detach_aio_context(VuServer *server);
diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
index 1622f8c..68c3bf1 100644
--- a/util/vhost-user-server.c
+++ b/util/vhost-user-server.c
@@ -78,17 +78,23 @@ static void panic_cb(VuDev *vu_dev, const char *buf)
void vhost_user_server_inc_in_flight(VuServer *server)
{
assert(!server->wait_idle);
- server->in_flight++;
+ qatomic_inc(&server->in_flight);
}
void vhost_user_server_dec_in_flight(VuServer *server)
{
- server->in_flight--;
- if (server->wait_idle && !server->in_flight) {
- aio_co_wake(server->co_trip);
+ if (qatomic_fetch_dec(&server->in_flight) == 1) {
+ if (server->wait_idle) {
+ aio_co_wake(server->co_trip);
+ }
}
}
+bool vhost_user_server_has_in_flight(VuServer *server)
+{
+ return qatomic_load_acquire(&server->in_flight) > 0;
+}
+
static bool coroutine_fn
vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg)
{
@@ -192,13 +198,13 @@ static coroutine_fn void vu_client_trip(void *opaque)
/* Keep running */
}
- if (server->in_flight) {
+ if (vhost_user_server_has_in_flight(server)) {
/* Wait for requests to complete before we can unmap the memory */
server->wait_idle = true;
qemu_coroutine_yield();
server->wait_idle = false;
}
- assert(server->in_flight == 0);
+ assert(!vhost_user_server_has_in_flight(server));
vu_deinit(vu_dev);