From 7376eda7c2e0451e819e81bd05fabc56a9deb946 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Wed, 7 Mar 2018 12:46:19 +0000 Subject: block: make BDRV_POLL_WHILE() re-entrancy safe Nested BDRV_POLL_WHILE() calls can occur. Currently assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens. This patch converts the bool wait_->need_kick flag to an unsigned wait_->num_waiters counter. Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate the condition again after the inner caller completes (invoking the inner caller counts as aio_poll() progress). Reported-by: "fuweiwei (C)" Reviewed-by: Eric Blake Signed-off-by: Stefan Hajnoczi Message-id: 20180307124619.6218-1-stefanha@redhat.com Cc: Paolo Bonzini Signed-off-by: Stefan Hajnoczi --- include/block/aio-wait.h | 61 ++++++++++++++++++++++++------------------------ util/aio-wait.c | 2 +- 2 files changed, 31 insertions(+), 32 deletions(-) diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h index f7a3972..8c90a2e 100644 --- a/include/block/aio-wait.h +++ b/include/block/aio-wait.h @@ -50,8 +50,8 @@ * } */ typedef struct { - /* Is the main loop waiting for a kick? Accessed with atomic ops. */ - bool need_kick; + /* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */ + unsigned num_waiters; } AioWait; /** @@ -71,35 +71,34 @@ typedef struct { * wait on conditions between two IOThreads since that could lead to deadlock, * go via the main loop instead. */ -#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \ - bool waited_ = false; \ - bool busy_ = true; \ - AioWait *wait_ = (wait); \ - AioContext *ctx_ = (ctx); \ - if (in_aio_context_home_thread(ctx_)) { \ - while ((cond) || busy_) { \ - busy_ = aio_poll(ctx_, (cond)); \ - waited_ |= !!(cond) | busy_; \ - } \ - } else { \ - assert(qemu_get_current_aio_context() == \ - qemu_get_aio_context()); \ - assert(!wait_->need_kick); \ - /* Set wait_->need_kick before evaluating cond. */ \ - atomic_mb_set(&wait_->need_kick, true); \ - while (busy_) { \ - if ((cond)) { \ - waited_ = busy_ = true; \ - aio_context_release(ctx_); \ - aio_poll(qemu_get_aio_context(), true); \ - aio_context_acquire(ctx_); \ - } else { \ - busy_ = aio_poll(ctx_, false); \ - waited_ |= busy_; \ - } \ - } \ - atomic_set(&wait_->need_kick, false); \ - } \ +#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \ + bool waited_ = false; \ + bool busy_ = true; \ + AioWait *wait_ = (wait); \ + AioContext *ctx_ = (ctx); \ + if (in_aio_context_home_thread(ctx_)) { \ + while ((cond) || busy_) { \ + busy_ = aio_poll(ctx_, (cond)); \ + waited_ |= !!(cond) | busy_; \ + } \ + } else { \ + assert(qemu_get_current_aio_context() == \ + qemu_get_aio_context()); \ + /* Increment wait_->num_waiters before evaluating cond. */ \ + atomic_inc(&wait_->num_waiters); \ + while (busy_) { \ + if ((cond)) { \ + waited_ = busy_ = true; \ + aio_context_release(ctx_); \ + aio_poll(qemu_get_aio_context(), true); \ + aio_context_acquire(ctx_); \ + } else { \ + busy_ = aio_poll(ctx_, false); \ + waited_ |= busy_; \ + } \ + } \ + atomic_dec(&wait_->num_waiters); \ + } \ waited_; }) /** diff --git a/util/aio-wait.c b/util/aio-wait.c index 975afdd..b8a8f86 100644 --- a/util/aio-wait.c +++ b/util/aio-wait.c @@ -34,7 +34,7 @@ static void dummy_bh_cb(void *opaque) void aio_wait_kick(AioWait *wait) { /* The barrier (or an atomic op) is in the caller. */ - if (atomic_read(&wait->need_kick)) { + if (atomic_read(&wait->num_waiters)) { aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL); } } -- cgit v1.1