diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2018-03-07 12:46:19 +0000 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2018-03-12 11:07:37 +0000 |
commit | 7376eda7c2e0451e819e81bd05fabc56a9deb946 (patch) | |
tree | c5a89490ebd3871f68331544ad39567ca4f0e832 | |
parent | e4ae62b802cec437f877f2cadc4ef059cc0eca76 (diff) | |
download | qemu-7376eda7c2e0451e819e81bd05fabc56a9deb946.zip qemu-7376eda7c2e0451e819e81bd05fabc56a9deb946.tar.gz qemu-7376eda7c2e0451e819e81bd05fabc56a9deb946.tar.bz2 |
block: make BDRV_POLL_WHILE() re-entrancy safe
Nested BDRV_POLL_WHILE() calls can occur. Currently
assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens.
This patch converts the bool wait_->need_kick flag to an unsigned
wait_->num_waiters counter.
Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate
the condition again after the inner caller completes (invoking the inner
caller counts as aio_poll() progress).
Reported-by: "fuweiwei (C)" <fuweiwei2@huawei.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20180307124619.6218-1-stefanha@redhat.com
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r-- | include/block/aio-wait.h | 61 | ||||
-rw-r--r-- | util/aio-wait.c | 2 |
2 files changed, 31 insertions, 32 deletions
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h index f7a3972..8c90a2e 100644 --- a/include/block/aio-wait.h +++ b/include/block/aio-wait.h @@ -50,8 +50,8 @@ * } */ typedef struct { - /* Is the main loop waiting for a kick? Accessed with atomic ops. */ - bool need_kick; + /* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */ + unsigned num_waiters; } AioWait; /** @@ -71,35 +71,34 @@ typedef struct { * wait on conditions between two IOThreads since that could lead to deadlock, * go via the main loop instead. */ -#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \ - bool waited_ = false; \ - bool busy_ = true; \ - AioWait *wait_ = (wait); \ - AioContext *ctx_ = (ctx); \ - if (in_aio_context_home_thread(ctx_)) { \ - while ((cond) || busy_) { \ - busy_ = aio_poll(ctx_, (cond)); \ - waited_ |= !!(cond) | busy_; \ - } \ - } else { \ - assert(qemu_get_current_aio_context() == \ - qemu_get_aio_context()); \ - assert(!wait_->need_kick); \ - /* Set wait_->need_kick before evaluating cond. */ \ - atomic_mb_set(&wait_->need_kick, true); \ - while (busy_) { \ - if ((cond)) { \ - waited_ = busy_ = true; \ - aio_context_release(ctx_); \ - aio_poll(qemu_get_aio_context(), true); \ - aio_context_acquire(ctx_); \ - } else { \ - busy_ = aio_poll(ctx_, false); \ - waited_ |= busy_; \ - } \ - } \ - atomic_set(&wait_->need_kick, false); \ - } \ +#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \ + bool waited_ = false; \ + bool busy_ = true; \ + AioWait *wait_ = (wait); \ + AioContext *ctx_ = (ctx); \ + if (in_aio_context_home_thread(ctx_)) { \ + while ((cond) || busy_) { \ + busy_ = aio_poll(ctx_, (cond)); \ + waited_ |= !!(cond) | busy_; \ + } \ + } else { \ + assert(qemu_get_current_aio_context() == \ + qemu_get_aio_context()); \ + /* Increment wait_->num_waiters before evaluating cond. */ \ + atomic_inc(&wait_->num_waiters); \ + while (busy_) { \ + if ((cond)) { \ + waited_ = busy_ = true; \ + aio_context_release(ctx_); \ + aio_poll(qemu_get_aio_context(), true); \ + aio_context_acquire(ctx_); \ + } else { \ + busy_ = aio_poll(ctx_, false); \ + waited_ |= busy_; \ + } \ + } \ + atomic_dec(&wait_->num_waiters); \ + } \ waited_; }) /** diff --git a/util/aio-wait.c b/util/aio-wait.c index 975afdd..b8a8f86 100644 --- a/util/aio-wait.c +++ b/util/aio-wait.c @@ -34,7 +34,7 @@ static void dummy_bh_cb(void *opaque) void aio_wait_kick(AioWait *wait) { /* The barrier (or an atomic op) is in the caller. */ - if (atomic_read(&wait->need_kick)) { + if (atomic_read(&wait->num_waiters)) { aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL); } } |