aboutsummaryrefslogtreecommitdiff
path: root/block/io.c
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2018-03-28 18:29:18 +0200
committerKevin Wolf <kwolf@redhat.com>2018-06-18 15:03:25 +0200
commit0f12264e7a41458179ad10276a7c33c72024861a (patch)
tree653cbbecadd1bb2e6f9b8c220e5a72a1285e2dce /block/io.c
parent6cd5c9d7b2df93ef54144f170d4c908934a4767f (diff)
downloadqemu-0f12264e7a41458179ad10276a7c33c72024861a.zip
qemu-0f12264e7a41458179ad10276a7c33c72024861a.tar.gz
qemu-0f12264e7a41458179ad10276a7c33c72024861a.tar.bz2
block: Allow graph changes in bdrv_drain_all_begin/end sections
bdrv_drain_all_*() used bdrv_next() to iterate over all root nodes and did a subtree drain for each of them. This works fine as long as the graph is static, but sadly, reality looks different. If the graph changes so that root nodes are added or removed, we would have to compensate for this. bdrv_next() returns each root node only once even if it's the root node for multiple BlockBackends or for a monitor-owned block driver tree, which would only complicate things. The much easier and more obviously correct way is to fundamentally change the way the functions work: Iterate over all BlockDriverStates, no matter who owns them, and drain them individually. Compensation is only necessary when a new BDS is created inside a drain_all section. Removal of a BDS doesn't require any action because it's gone afterwards anyway. Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/io.c')
-rw-r--r--block/io.c60
1 files changed, 46 insertions, 14 deletions
diff --git a/block/io.c b/block/io.c
index 1834a14..ef4fedd 100644
--- a/block/io.c
+++ b/block/io.c
@@ -38,6 +38,8 @@
/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
+static AioWait drain_all_aio_wait;
+
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags);
@@ -472,6 +474,29 @@ static void bdrv_drain_assert_idle(BlockDriverState *bs)
}
}
+unsigned int bdrv_drain_all_count = 0;
+
+static bool bdrv_drain_all_poll(void)
+{
+ BlockDriverState *bs = NULL;
+ bool result = false;
+
+ /* Execute pending BHs first (may modify the graph) and check everything
+ * else only after the BHs have executed. */
+ while (aio_poll(qemu_get_aio_context(), false));
+
+ /* bdrv_drain_poll() can't make changes to the graph and we are holding the
+ * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
+ while ((bs = bdrv_next_all_states(bs))) {
+ AioContext *aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(aio_context);
+ result |= bdrv_drain_poll(bs, false, NULL, true);
+ aio_context_release(aio_context);
+ }
+
+ return result;
+}
+
/*
* Wait for pending requests to complete across all BlockDriverStates
*
@@ -486,45 +511,51 @@ static void bdrv_drain_assert_idle(BlockDriverState *bs)
*/
void bdrv_drain_all_begin(void)
{
- BlockDriverState *bs;
- BdrvNextIterator it;
+ BlockDriverState *bs = NULL;
if (qemu_in_coroutine()) {
- bdrv_co_yield_to_drain(NULL, true, false, NULL, false, true);
+ bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true);
return;
}
- /* BDRV_POLL_WHILE() for a node can only be called from its own I/O thread
- * or the main loop AioContext. We potentially use BDRV_POLL_WHILE() on
- * nodes in several different AioContexts, so make sure we're in the main
- * context. */
+ /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
+ * loop AioContext, so make sure we're in the main context. */
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
+ assert(bdrv_drain_all_count < INT_MAX);
+ bdrv_drain_all_count++;
- for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
+ /* Quiesce all nodes, without polling in-flight requests yet. The graph
+ * cannot change during this loop. */
+ while ((bs = bdrv_next_all_states(bs))) {
AioContext *aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
- bdrv_do_drained_begin(bs, true, NULL, false, true);
+ bdrv_do_drained_begin(bs, false, NULL, true, false);
aio_context_release(aio_context);
}
- for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
+ /* Now poll the in-flight requests */
+ AIO_WAIT_WHILE(&drain_all_aio_wait, NULL, bdrv_drain_all_poll());
+
+ while ((bs = bdrv_next_all_states(bs))) {
bdrv_drain_assert_idle(bs);
}
}
void bdrv_drain_all_end(void)
{
- BlockDriverState *bs;
- BdrvNextIterator it;
+ BlockDriverState *bs = NULL;
- for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
+ while ((bs = bdrv_next_all_states(bs))) {
AioContext *aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
- bdrv_do_drained_end(bs, true, NULL, false);
+ bdrv_do_drained_end(bs, false, NULL, true);
aio_context_release(aio_context);
}
+
+ assert(bdrv_drain_all_count > 0);
+ bdrv_drain_all_count--;
}
void bdrv_drain_all(void)
@@ -647,6 +678,7 @@ void bdrv_inc_in_flight(BlockDriverState *bs)
void bdrv_wakeup(BlockDriverState *bs)
{
aio_wait_kick(bdrv_get_aio_wait(bs));
+ aio_wait_kick(&drain_all_aio_wait);
}
void bdrv_dec_in_flight(BlockDriverState *bs)