diff options
author | Max Reitz <mreitz@redhat.com> | 2018-09-25 16:12:41 +0200 |
---|---|---|
committer | Max Reitz <mreitz@redhat.com> | 2018-09-25 16:12:44 +0200 |
commit | 9c76ff9c16be890e70fce30754b096ff9950d1ee (patch) | |
tree | 358214b68c3d62d3943b80c22d744d0e32764475 /block | |
parent | 66da04ddd3dcb8c61ee664b6faced132da002006 (diff) | |
parent | d8b3afd597d54e496809b05ac39ac29a5799664f (diff) | |
download | qemu-9c76ff9c16be890e70fce30754b096ff9950d1ee.zip qemu-9c76ff9c16be890e70fce30754b096ff9950d1ee.tar.gz qemu-9c76ff9c16be890e70fce30754b096ff9950d1ee.tar.bz2 |
Merge remote-tracking branch 'kevin/tags/for-upstream' into block
Block layer patches:
- Fix some jobs/drain/aio_poll related hangs
- commit: Add top-node/base-node options
- linux-aio: Fix locking for qemu_laio_process_completions()
- Fix use after free error in bdrv_open_inherit
# gpg: Signature made Tue Sep 25 15:54:01 2018 CEST
# gpg: using RSA key 7F09B272C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6
* kevin/tags/for-upstream: (26 commits)
test-bdrv-drain: Test draining job source child and parent
block: Use a single global AioWait
test-bdrv-drain: Fix outdated comments
test-bdrv-drain: AIO_WAIT_WHILE() in job .commit/.abort
job: Avoid deadlocks in job_completed_txn_abort()
test-bdrv-drain: Test nested poll in bdrv_drain_poll_top_level()
block: Remove aio_poll() in bdrv_drain_poll variants
blockjob: Lie better in child_job_drained_poll()
block-backend: Decrease in_flight only after callback
block-backend: Fix potential double blk_delete()
block-backend: Add .drained_poll callback
block: Add missing locking in bdrv_co_drain_bh_cb()
test-bdrv-drain: Test AIO_WAIT_WHILE() in completion callback
job: Use AIO_WAIT_WHILE() in job_finish_sync()
test-blockjob: Acquire AioContext around job_cancel_sync()
test-bdrv-drain: Drain with block jobs in an I/O thread
aio-wait: Increase num_waiters even in home thread
blockjob: Wake up BDS when job becomes idle
job: Fix missing locking due to mismerge
job: Fix nested aio_poll() hanging in job_txn_apply
...
Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/block-backend.c | 31 | ||||
-rw-r--r-- | block/io.c | 30 | ||||
-rw-r--r-- | block/linux-aio.c | 2 |
3 files changed, 40 insertions, 23 deletions
diff --git a/block/block-backend.c b/block/block-backend.c index 14a1b7a..7b1ec50 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -88,7 +88,6 @@ struct BlockBackend { * Accessed with atomic ops. */ unsigned int in_flight; - AioWait wait; }; typedef struct BlockBackendAIOCB { @@ -121,6 +120,7 @@ static void blk_root_inherit_options(int *child_flags, QDict *child_options, abort(); } static void blk_root_drained_begin(BdrvChild *child); +static bool blk_root_drained_poll(BdrvChild *child); static void blk_root_drained_end(BdrvChild *child); static void blk_root_change_media(BdrvChild *child, bool load); @@ -294,6 +294,7 @@ static const BdrvChildRole child_root = { .get_parent_desc = blk_root_get_parent_desc, .drained_begin = blk_root_drained_begin, + .drained_poll = blk_root_drained_poll, .drained_end = blk_root_drained_end, .activate = blk_root_activate, @@ -433,6 +434,7 @@ int blk_get_refcnt(BlockBackend *blk) */ void blk_ref(BlockBackend *blk) { + assert(blk->refcnt > 0); blk->refcnt++; } @@ -445,7 +447,13 @@ void blk_unref(BlockBackend *blk) { if (blk) { assert(blk->refcnt > 0); - if (!--blk->refcnt) { + if (blk->refcnt > 1) { + blk->refcnt--; + } else { + blk_drain(blk); + /* blk_drain() cannot resurrect blk, nobody held a reference */ + assert(blk->refcnt == 1); + blk->refcnt = 0; blk_delete(blk); } } @@ -1289,7 +1297,7 @@ static void blk_inc_in_flight(BlockBackend *blk) static void blk_dec_in_flight(BlockBackend *blk) { atomic_dec(&blk->in_flight); - aio_wait_kick(&blk->wait); + aio_wait_kick(); } static void error_callback_bh(void *opaque) @@ -1330,8 +1338,8 @@ static const AIOCBInfo blk_aio_em_aiocb_info = { static void blk_aio_complete(BlkAioEmAIOCB *acb) { if (acb->has_returned) { - blk_dec_in_flight(acb->rwco.blk); acb->common.cb(acb->common.opaque, acb->rwco.ret); + blk_dec_in_flight(acb->rwco.blk); qemu_aio_unref(acb); } } @@ -1590,9 +1598,8 @@ void blk_drain(BlockBackend *blk) } /* We may have -ENOMEDIUM completions in flight */ - AIO_WAIT_WHILE(&blk->wait, - blk_get_aio_context(blk), - atomic_mb_read(&blk->in_flight) > 0); + AIO_WAIT_WHILE(blk_get_aio_context(blk), + atomic_mb_read(&blk->in_flight) > 0); if (bs) { bdrv_drained_end(bs); @@ -1611,8 +1618,7 @@ void blk_drain_all(void) aio_context_acquire(ctx); /* We may have -ENOMEDIUM completions in flight */ - AIO_WAIT_WHILE(&blk->wait, ctx, - atomic_mb_read(&blk->in_flight) > 0); + AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0); aio_context_release(ctx); } @@ -2189,6 +2195,13 @@ static void blk_root_drained_begin(BdrvChild *child) } } +static bool blk_root_drained_poll(BdrvChild *child) +{ + BlockBackend *blk = child->opaque; + assert(blk->quiesce_counter); + return !!blk->in_flight; +} + static void blk_root_drained_end(BdrvChild *child) { BlockBackend *blk = child->opaque; @@ -38,8 +38,6 @@ /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) -static AioWait drain_all_aio_wait; - static void bdrv_parent_cb_resize(BlockDriverState *bs); static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, BdrvRequestFlags flags); @@ -268,10 +266,6 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive, BdrvChild *ignore_parent) { - /* Execute pending BHs first and check everything else only after the BHs - * have executed. */ - while (aio_poll(bs->aio_context, false)); - return bdrv_drain_poll(bs, recursive, ignore_parent, false); } @@ -288,6 +282,18 @@ static void bdrv_co_drain_bh_cb(void *opaque) BlockDriverState *bs = data->bs; if (bs) { + AioContext *ctx = bdrv_get_aio_context(bs); + AioContext *co_ctx = qemu_coroutine_get_aio_context(co); + + /* + * When the coroutine yielded, the lock for its home context was + * released, so we need to re-acquire it here. If it explicitly + * acquired a different context, the lock is still held and we don't + * want to lock it a second time (or AIO_WAIT_WHILE() would hang). + */ + if (ctx == co_ctx) { + aio_context_acquire(ctx); + } bdrv_dec_in_flight(bs); if (data->begin) { bdrv_do_drained_begin(bs, data->recursive, data->parent, @@ -296,6 +302,9 @@ static void bdrv_co_drain_bh_cb(void *opaque) bdrv_do_drained_end(bs, data->recursive, data->parent, data->ignore_bds_parents); } + if (ctx == co_ctx) { + aio_context_release(ctx); + } } else { assert(data->begin); bdrv_drain_all_begin(); @@ -496,10 +505,6 @@ static bool bdrv_drain_all_poll(void) BlockDriverState *bs = NULL; bool result = false; - /* Execute pending BHs first (may modify the graph) and check everything - * else only after the BHs have executed. */ - while (aio_poll(qemu_get_aio_context(), false)); - /* bdrv_drain_poll() can't make changes to the graph and we are holding the * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ while ((bs = bdrv_next_all_states(bs))) { @@ -550,7 +555,7 @@ void bdrv_drain_all_begin(void) } /* Now poll the in-flight requests */ - AIO_WAIT_WHILE(&drain_all_aio_wait, NULL, bdrv_drain_all_poll()); + AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll()); while ((bs = bdrv_next_all_states(bs))) { bdrv_drain_assert_idle(bs); @@ -706,8 +711,7 @@ void bdrv_inc_in_flight(BlockDriverState *bs) void bdrv_wakeup(BlockDriverState *bs) { - aio_wait_kick(bdrv_get_aio_wait(bs)); - aio_wait_kick(&drain_all_aio_wait); + aio_wait_kick(); } void bdrv_dec_in_flight(BlockDriverState *bs) diff --git a/block/linux-aio.c b/block/linux-aio.c index 19eb922..217ce60 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -234,9 +234,9 @@ static void qemu_laio_process_completions(LinuxAioState *s) static void qemu_laio_process_completions_and_submit(LinuxAioState *s) { + aio_context_acquire(s->aio_context); qemu_laio_process_completions(s); - aio_context_acquire(s->aio_context); if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { ioq_submit(s); } |