diff options
author | Alberto Garcia <berto@igalia.com> | 2016-10-28 10:08:02 +0300 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2016-10-31 16:51:14 +0100 |
commit | c0778f6693213d0d6bfa7bee8045374a798db750 (patch) | |
tree | f3e9b270885565b056a7a6accabc4f5b785d8820 /block | |
parent | ad0e90a682280030af81ece502715f64232706db (diff) | |
download | qemu-c0778f6693213d0d6bfa7bee8045374a798db750.zip qemu-c0778f6693213d0d6bfa7bee8045374a798db750.tar.gz qemu-c0778f6693213d0d6bfa7bee8045374a798db750.tar.bz2 |
block: Add bdrv_drain_all_{begin,end}()
bdrv_drain_all() doesn't allow the caller to do anything after all
pending requests have been completed but before block jobs are
resumed.
This patch splits bdrv_drain_all() into _begin() and _end() for that
purpose. It also adds aio_{disable,enable}_external() calls to disable
external clients in the meantime.
An important restriction of this split is that no new block jobs or
BlockDriverStates can be created between the bdrv_drain_all_begin()
and bdrv_drain_all_end() calls. This is not a concern now because
we'll only be using this in bdrv_reopen_multiple(), but it must be
dealt with if we ever have other uses cases in the future.
Signed-off-by: Alberto Garcia <berto@igalia.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/io.c | 27 |
1 files changed, 24 insertions, 3 deletions
@@ -273,8 +273,14 @@ void bdrv_drain(BlockDriverState *bs) * * This function does not flush data to disk, use bdrv_flush_all() for that * after calling this function. + * + * This pauses all block jobs and disables external clients. It must + * be paired with bdrv_drain_all_end(). + * + * NOTE: no new block jobs or BlockDriverStates can be created between + * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls. */ -void bdrv_drain_all(void) +void bdrv_drain_all_begin(void) { /* Always run first iteration so any pending completion BHs run */ bool waited = true; @@ -297,6 +303,7 @@ void bdrv_drain_all(void) aio_context_acquire(aio_context); bdrv_parent_drained_begin(bs); bdrv_io_unplugged_begin(bs); + aio_disable_external(aio_context); aio_context_release(aio_context); if (!g_slist_find(aio_ctxs, aio_context)) { @@ -326,17 +333,25 @@ void bdrv_drain_all(void) } } + g_slist_free(aio_ctxs); +} + +void bdrv_drain_all_end(void) +{ + BlockDriverState *bs; + BdrvNextIterator it; + BlockJob *job = NULL; + for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); + aio_enable_external(aio_context); bdrv_io_unplugged_end(bs); bdrv_parent_drained_end(bs); aio_context_release(aio_context); } - g_slist_free(aio_ctxs); - job = NULL; while ((job = block_job_next(job))) { AioContext *aio_context = blk_get_aio_context(job->blk); @@ -346,6 +361,12 @@ void bdrv_drain_all(void) } } +void bdrv_drain_all(void) +{ + bdrv_drain_all_begin(); + bdrv_drain_all_end(); +} + /** * Remove an active request from the tracked requests list * |