diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2025-06-05 11:00:12 -0400 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2025-06-05 11:00:12 -0400 |
commit | 83c2201fc47bd0dfa656bde7202bd0e2539d54a0 (patch) | |
tree | 199460c0e2afd09259a4c35d5c220d2396e20959 | |
parent | f8a113701dd2d28f3bedb216e59125ddcb77fd05 (diff) | |
parent | eef2dd03f948a512499775043bdc0c5c88d8a2dd (diff) | |
download | qemu-83c2201fc47bd0dfa656bde7202bd0e2539d54a0.zip qemu-83c2201fc47bd0dfa656bde7202bd0e2539d54a0.tar.gz qemu-83c2201fc47bd0dfa656bde7202bd0e2539d54a0.tar.bz2 |
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
Block layer patches
- Deadlock fixes: Do not drain while holding the graph lock
- qdev-properties-system: Fix assertion failure in set_drive_helper()
- iotests: fix 240
# -----BEGIN PGP SIGNATURE-----
#
# iQJFBAABCgAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmhAiH8RHGt3b2xmQHJl
# ZGhhdC5jb20ACgkQfwmycsiPL9asxBAAniUnM2ysT85wgi1+KUVcURYJWAOTyHUK
# CxKQFXALeNYb1of4OEvFGxTJV9fIi7lY2P6Fh+ANUvAk6r8mGk7PKTV7qyJcv0r0
# Xu5BXPRBtOVeQ1QtWc36NhUJ5Oo9AZdutXKuHtt0FjlL5bxOvwY40ddDhQcg0dWF
# H4Eozi9oPACCsjbkHU0JAkMAS9Vvn4FNuDjzCfu1AlAKQnY64xRwVQwQeOC5WzvB
# 6vUs0W/ZZS5T30rtdgXtRA+00CIPC00cF1DbeL9cZEN4Rkux7JPoosCQq8lZ9YsR
# EPsHbSve6cgJP/KB1UzBjcoKI4e+8Z3KBaYOC30F65dU6e7N1wZMjCHHK/gt5bxs
# 48qWautEyot1VKBHeXZQkqR8OXk5GlyfMnQfPre6gMaAJ4H6z8GHBwxidsB9G1Da
# 27tmpZP1DyPjcH0Btz+DmhFTABaG6pgRamDmdHNJdkBX1qydZ6A1UYKf0KZRsEIu
# B43dIJ4fL4riTc+vkR0SlakQvGNAvv559uvblkDp0/2wdUzE1U7g8+tuSrsP5I1x
# BMjPPgdV5iiPvOMEO0dl1HLGZi7ORd/3FJfzvWkzWlnw6ByArXmHceXGIvhgoyjR
# iT6XwmJ85Sl0F/3HlXgcgI86AnpieE0PE8nw3gBuw0rZFJChQuHUzxokLZ88U9VQ
# UePwpYPDn58=
# =tetv
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 04 Jun 2025 13:55:11 EDT
# gpg: using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6
# gpg: issuer "kwolf@redhat.com"
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full]
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6
* tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (24 commits)
hw/core/qdev-properties-system: Add missing return in set_drive_helper()
iotests: fix 240
block/io: remove duplicate GLOBAL_STATE_CODE() in bdrv_do_drained_end()
iotests/graph-changes-while-io: add test case with removal of lower snapshot
iotests/graph-changes-while-io: remove image file after test
block: mark bdrv_drained_begin() and friends as GRAPH_UNLOCKED
blockdev: drain while unlocked in external_snapshot_action()
blockdev: drain while unlocked in internal_snapshot_action()
block: move drain outside of quorum_del_child()
block: move drain outside of bdrv_root_unref_child()
block: move drain outside of quorum_add_child()
block: move drain outside of bdrv_attach_child()
block: move drain outside of bdrv_root_attach_child()
block: move drain outside of bdrv_set_backing_hd_drained()
block: move drain outside of bdrv_attach_child_common(_abort)()
block: move drain outside of bdrv_try_change_aio_context()
block: move drain outside of bdrv_change_aio_context() and mark GRAPH_RDLOCK
block: mark bdrv_child_change_aio_context() GRAPH_RDLOCK
block: mark change_aio_ctx() callback and instances as GRAPH_RDLOCK(_PTR)
block: mark bdrv_parent_change_aio_context() GRAPH_RDLOCK
...
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r-- | block.c | 235 | ||||
-rw-r--r-- | block/backup.c | 2 | ||||
-rw-r--r-- | block/blklogwrites.c | 4 | ||||
-rw-r--r-- | block/blkverify.c | 2 | ||||
-rw-r--r-- | block/block-backend.c | 10 | ||||
-rw-r--r-- | block/commit.c | 4 | ||||
-rw-r--r-- | block/io.c | 1 | ||||
-rw-r--r-- | block/mirror.c | 5 | ||||
-rw-r--r-- | block/qcow2.c | 4 | ||||
-rw-r--r-- | block/quorum.c | 4 | ||||
-rw-r--r-- | block/replication.c | 7 | ||||
-rw-r--r-- | block/snapshot.c | 28 | ||||
-rw-r--r-- | block/stream.c | 10 | ||||
-rw-r--r-- | block/vmdk.c | 10 | ||||
-rw-r--r-- | blockdev.c | 78 | ||||
-rw-r--r-- | blockjob.c | 12 | ||||
-rw-r--r-- | hw/core/qdev-properties-system.c | 1 | ||||
-rw-r--r-- | include/block/block-global-state.h | 19 | ||||
-rw-r--r-- | include/block/block-io.h | 2 | ||||
-rw-r--r-- | include/block/block_int-common.h | 32 | ||||
-rw-r--r-- | include/block/blockjob.h | 2 | ||||
-rw-r--r-- | qemu-img.c | 2 | ||||
-rwxr-xr-x | tests/qemu-iotests/240 | 2 | ||||
-rw-r--r-- | tests/qemu-iotests/240.out | 4 | ||||
-rwxr-xr-x | tests/qemu-iotests/tests/graph-changes-while-io | 102 | ||||
-rw-r--r-- | tests/qemu-iotests/tests/graph-changes-while-io.out | 4 | ||||
-rw-r--r-- | tests/unit/test-bdrv-drain.c | 24 | ||||
-rw-r--r-- | tests/unit/test-bdrv-graph-mod.c | 10 |
28 files changed, 475 insertions, 145 deletions
@@ -106,9 +106,9 @@ static void bdrv_reopen_abort(BDRVReopenState *reopen_state); static bool bdrv_backing_overridden(BlockDriverState *bs); -static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp); +static bool GRAPH_RDLOCK +bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, + GHashTable *visited, Transaction *tran, Error **errp); /* If non-zero, use only whitelisted block drivers */ static int use_bdrv_whitelist; @@ -1226,9 +1226,10 @@ static int bdrv_child_cb_inactivate(BdrvChild *child) return 0; } -static bool bdrv_child_cb_change_aio_ctx(BdrvChild *child, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp) +static bool GRAPH_RDLOCK +bdrv_child_cb_change_aio_ctx(BdrvChild *child, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) { BlockDriverState *bs = child->opaque; return bdrv_change_aio_context(bs, ctx, visited, tran, errp); @@ -1720,12 +1721,14 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, open_failed: bs->drv = NULL; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); if (bs->file != NULL) { bdrv_unref_child(bs, bs->file); assert(!bs->file); } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); g_free(bs->opaque); bs->opaque = NULL; @@ -3027,7 +3030,8 @@ static void GRAPH_WRLOCK bdrv_attach_child_common_abort(void *opaque) bdrv_replace_child_noperm(s->child, NULL); if (bdrv_get_aio_context(bs) != s->old_child_ctx) { - bdrv_try_change_aio_context(bs, s->old_child_ctx, NULL, &error_abort); + bdrv_try_change_aio_context_locked(bs, s->old_child_ctx, NULL, + &error_abort); } if (bdrv_child_get_parent_aio_context(s->child) != s->old_parent_ctx) { @@ -3069,6 +3073,9 @@ static TransactionActionDrv bdrv_attach_child_common_drv = { * * Both @parent_bs and @child_bs can move to a different AioContext in this * function. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static BdrvChild * GRAPH_WRLOCK bdrv_attach_child_common(BlockDriverState *child_bs, @@ -3112,8 +3119,8 @@ bdrv_attach_child_common(BlockDriverState *child_bs, parent_ctx = bdrv_child_get_parent_aio_context(new_child); if (child_ctx != parent_ctx) { Error *local_err = NULL; - int ret = bdrv_try_change_aio_context(child_bs, parent_ctx, NULL, - &local_err); + int ret = bdrv_try_change_aio_context_locked(child_bs, parent_ctx, NULL, + &local_err); if (ret < 0 && child_class->change_aio_ctx) { Transaction *aio_ctx_tran = tran_new(); @@ -3179,6 +3186,9 @@ bdrv_attach_child_common(BlockDriverState *child_bs, * * After calling this function, the transaction @tran may only be completed * while holding a writer lock for the graph. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static BdrvChild * GRAPH_WRLOCK bdrv_attach_child_noperm(BlockDriverState *parent_bs, @@ -3220,6 +3230,8 @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs, * * On failure NULL is returned, errp is set and the reference to * child_bs is also dropped. + * + * All block nodes must be drained. */ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, const char *child_name, @@ -3259,6 +3271,8 @@ out: * * On failure NULL is returned, errp is set and the reference to * child_bs is also dropped. + * + * All block nodes must be drained. */ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, @@ -3293,7 +3307,11 @@ out: return ret < 0 ? NULL : child; } -/* Callers must ensure that child->frozen is false. */ +/* + * Callers must ensure that child->frozen is false. + * + * All block nodes must be drained. + */ void bdrv_root_unref_child(BdrvChild *child) { BlockDriverState *child_bs = child->bs; @@ -3314,8 +3332,8 @@ void bdrv_root_unref_child(BdrvChild *child) * When the parent requiring a non-default AioContext is removed, the * node moves back to the main AioContext */ - bdrv_try_change_aio_context(child_bs, qemu_get_aio_context(), NULL, - NULL); + bdrv_try_change_aio_context_locked(child_bs, qemu_get_aio_context(), + NULL, NULL); } bdrv_schedule_unref(child_bs); @@ -3388,7 +3406,11 @@ bdrv_unset_inherits_from(BlockDriverState *root, BdrvChild *child, } } -/* Callers must ensure that child->frozen is false. */ +/* + * Callers must ensure that child->frozen is false. + * + * All block nodes must be drained. + */ void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child) { GLOBAL_STATE_CODE(); @@ -3453,6 +3475,9 @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs) * * After calling this function, the transaction @tran may only be completed * while holding a writer lock for the graph. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static int GRAPH_WRLOCK bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs, @@ -3545,8 +3570,7 @@ out: * Both @bs and @backing_hd can move to a different AioContext in this * function. * - * If a backing child is already present (i.e. we're detaching a node), that - * child node must be drained. + * All block nodes must be drained. */ int bdrv_set_backing_hd_drained(BlockDriverState *bs, BlockDriverState *backing_hd, @@ -3575,21 +3599,14 @@ out: int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, Error **errp) { - BlockDriverState *drain_bs; int ret; GLOBAL_STATE_CODE(); - bdrv_graph_rdlock_main_loop(); - drain_bs = bs->backing ? bs->backing->bs : bs; - bdrv_graph_rdunlock_main_loop(); - - bdrv_ref(drain_bs); - bdrv_drained_begin(drain_bs); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp); bdrv_graph_wrunlock(); - bdrv_drained_end(drain_bs); - bdrv_unref(drain_bs); + bdrv_drain_all_end(); return ret; } @@ -3780,10 +3797,12 @@ static BdrvChild *bdrv_open_child_common(const char *filename, return NULL; } + bdrv_drain_all_begin(); bdrv_graph_wrlock(); child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role, errp); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); return child; } @@ -4358,9 +4377,7 @@ bdrv_recurse_has_child(BlockDriverState *bs, BlockDriverState *child) * returns a pointer to bs_queue, which is either the newly allocated * bs_queue, or the existing bs_queue being used. * - * bs is drained here and undrained by bdrv_reopen_queue_free(). - * - * To be called with bs->aio_context locked. + * bs must be drained. */ static BlockReopenQueue * GRAPH_RDLOCK bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs, @@ -4379,12 +4396,7 @@ bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs, GLOBAL_STATE_CODE(); - /* - * Strictly speaking, draining is illegal under GRAPH_RDLOCK. We know that - * we've been called with bdrv_graph_rdlock_main_loop(), though, so it's ok - * in practice. - */ - bdrv_drained_begin(bs); + assert(bs->quiesce_counter > 0); if (bs_queue == NULL) { bs_queue = g_new0(BlockReopenQueue, 1); @@ -4519,12 +4531,17 @@ bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs, return bs_queue; } -/* To be called with bs->aio_context locked */ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, BlockDriverState *bs, QDict *options, bool keep_old_opts) { GLOBAL_STATE_CODE(); + + if (bs_queue == NULL) { + /* Paired with bdrv_drain_all_end() in bdrv_reopen_queue_free(). */ + bdrv_drain_all_begin(); + } + GRAPH_RDLOCK_GUARD_MAINLOOP(); return bdrv_reopen_queue_child(bs_queue, bs, options, NULL, 0, false, @@ -4537,12 +4554,14 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue) if (bs_queue) { BlockReopenQueueEntry *bs_entry, *next; QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { - bdrv_drained_end(bs_entry->state.bs); qobject_unref(bs_entry->state.explicit_options); qobject_unref(bs_entry->state.options); g_free(bs_entry); } g_free(bs_queue); + + /* Paired with bdrv_drain_all_begin() in bdrv_reopen_queue(). */ + bdrv_drain_all_end(); } } @@ -4709,6 +4728,9 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, * Return 0 on success, otherwise return < 0 and set @errp. * * @reopen_state->bs can move to a different AioContext in this function. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static int GRAPH_UNLOCKED bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, @@ -4802,7 +4824,7 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, if (old_child_bs) { bdrv_ref(old_child_bs); - bdrv_drained_begin(old_child_bs); + assert(old_child_bs->quiesce_counter > 0); } bdrv_graph_rdunlock_main_loop(); @@ -4814,7 +4836,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, bdrv_graph_wrunlock(); if (old_child_bs) { - bdrv_drained_end(old_child_bs); bdrv_unref(old_child_bs); } @@ -4843,6 +4864,9 @@ out_rdlock: * * After calling this function, the transaction @change_child_tran may only be * completed while holding a writer lock for the graph. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static int GRAPH_UNLOCKED bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, @@ -5156,6 +5180,7 @@ static void bdrv_close(BlockDriverState *bs) bs->drv = NULL; } + bdrv_drain_all_begin(); bdrv_graph_wrlock(); QLIST_FOREACH_SAFE(child, &bs->children, next, next) { bdrv_unref_child(bs, child); @@ -5164,6 +5189,7 @@ static void bdrv_close(BlockDriverState *bs) assert(!bs->backing); assert(!bs->file); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); g_free(bs->opaque); bs->opaque = NULL; @@ -5489,9 +5515,7 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, assert(!bs_new->backing); bdrv_graph_rdunlock_main_loop(); - bdrv_drained_begin(bs_top); - bdrv_drained_begin(bs_new); - + bdrv_drain_all_begin(); bdrv_graph_wrlock(); child = bdrv_attach_child_noperm(bs_new, bs_top, "backing", @@ -5513,9 +5537,7 @@ out: bdrv_refresh_limits(bs_top, NULL, NULL); bdrv_graph_wrunlock(); - - bdrv_drained_end(bs_top); - bdrv_drained_end(bs_new); + bdrv_drain_all_end(); return ret; } @@ -6989,6 +7011,8 @@ bdrv_inactivate_recurse(BlockDriverState *bs, bool top_level) GLOBAL_STATE_CODE(); + assert(bs->quiesce_counter > 0); + if (!bs->drv) { return -ENOMEDIUM; } @@ -7032,9 +7056,7 @@ bdrv_inactivate_recurse(BlockDriverState *bs, bool top_level) return -EPERM; } - bdrv_drained_begin(bs); bs->open_flags |= BDRV_O_INACTIVE; - bdrv_drained_end(bs); /* * Update permissions, they may differ for inactive nodes. @@ -7059,20 +7081,26 @@ int bdrv_inactivate(BlockDriverState *bs, Error **errp) int ret; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); if (bdrv_has_bds_parent(bs, true)) { error_setg(errp, "Node has active parent node"); - return -EPERM; + ret = -EPERM; + goto out; } ret = bdrv_inactivate_recurse(bs, true); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to inactivate node"); - return ret; + goto out; } - return 0; +out: + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); + return ret; } int bdrv_inactivate_all(void) @@ -7082,7 +7110,9 @@ int bdrv_inactivate_all(void) int ret = 0; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { /* Nodes with BDS parents are covered by recursion from the last @@ -7098,6 +7128,9 @@ int bdrv_inactivate_all(void) } } + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); + return ret; } @@ -7278,10 +7311,6 @@ bool bdrv_op_blocker_is_empty(BlockDriverState *bs) return true; } -/* - * Must not be called while holding the lock of an AioContext other than the - * current one. - */ void bdrv_img_create(const char *filename, const char *fmt, const char *base_filename, const char *base_fmt, char *options, uint64_t img_size, int flags, bool quiet, @@ -7568,10 +7597,21 @@ typedef struct BdrvStateSetAioContext { BlockDriverState *bs; } BdrvStateSetAioContext; -static bool bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx, - GHashTable *visited, - Transaction *tran, - Error **errp) +/* + * Changes the AioContext of @child to @ctx and recursively for the associated + * block nodes and all their children and parents. Returns true if the change is + * possible and the transaction @tran can be continued. Returns false and sets + * @errp if not and the transaction must be aborted. + * + * @visited will accumulate all visited BdrvChild objects. The caller is + * responsible for freeing the list afterwards. + * + * Must be called with the affected block nodes drained. + */ +static bool GRAPH_RDLOCK +bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) { GLOBAL_STATE_CODE(); if (g_hash_table_contains(visited, c)) { @@ -7596,6 +7636,17 @@ static bool bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx, return true; } +/* + * Changes the AioContext of @c->bs to @ctx and recursively for all its children + * and parents. Returns true if the change is possible and the transaction @tran + * can be continued. Returns false and sets @errp if not and the transaction + * must be aborted. + * + * @visited will accumulate all visited BdrvChild objects. The caller is + * responsible for freeing the list afterwards. + * + * Must be called with the affected block nodes drained. + */ bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, GHashTable *visited, Transaction *tran, Error **errp) @@ -7611,10 +7662,6 @@ bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, static void bdrv_set_aio_context_clean(void *opaque) { BdrvStateSetAioContext *state = (BdrvStateSetAioContext *) opaque; - BlockDriverState *bs = (BlockDriverState *) state->bs; - - /* Paired with bdrv_drained_begin in bdrv_change_aio_context() */ - bdrv_drained_end(bs); g_free(state); } @@ -7642,10 +7689,12 @@ static TransactionActionDrv set_aio_context = { * * @visited will accumulate all visited BdrvChild objects. The caller is * responsible for freeing the list afterwards. + * + * @bs must be drained. */ -static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp) +static bool GRAPH_RDLOCK +bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, + GHashTable *visited, Transaction *tran, Error **errp) { BdrvChild *c; BdrvStateSetAioContext *state; @@ -7656,21 +7705,17 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, return true; } - bdrv_graph_rdlock_main_loop(); QLIST_FOREACH(c, &bs->parents, next_parent) { if (!bdrv_parent_change_aio_context(c, ctx, visited, tran, errp)) { - bdrv_graph_rdunlock_main_loop(); return false; } } QLIST_FOREACH(c, &bs->children, next) { if (!bdrv_child_change_aio_context(c, ctx, visited, tran, errp)) { - bdrv_graph_rdunlock_main_loop(); return false; } } - bdrv_graph_rdunlock_main_loop(); state = g_new(BdrvStateSetAioContext, 1); *state = (BdrvStateSetAioContext) { @@ -7678,8 +7723,7 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, .bs = bs, }; - /* Paired with bdrv_drained_end in bdrv_set_aio_context_clean() */ - bdrv_drained_begin(bs); + assert(bs->quiesce_counter > 0); tran_add(tran, &set_aio_context, state); @@ -7692,9 +7736,13 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, * * If ignore_child is not NULL, that child (and its subgraph) will not * be touched. + * + * Called with the graph lock held. + * + * Called while all bs are drained. */ -int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, - BdrvChild *ignore_child, Error **errp) +int bdrv_try_change_aio_context_locked(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp) { Transaction *tran; GHashTable *visited; @@ -7703,9 +7751,9 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, /* * Recursion phase: go through all nodes of the graph. - * Take care of checking that all nodes support changing AioContext - * and drain them, building a linear list of callbacks to run if everything - * is successful (the transaction itself). + * Take care of checking that all nodes support changing AioContext, + * building a linear list of callbacks to run if everything is successful + * (the transaction itself). */ tran = tran_new(); visited = g_hash_table_new(NULL, NULL); @@ -7732,6 +7780,29 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, return 0; } +/* + * Change bs's and recursively all of its parents' and children's AioContext + * to the given new context, returning an error if that isn't possible. + * + * If ignore_child is not NULL, that child (and its subgraph) will not + * be touched. + */ +int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp) +{ + int ret; + + GLOBAL_STATE_CODE(); + + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); + ret = bdrv_try_change_aio_context_locked(bs, ctx, ignore_child, errp); + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); + + return ret; +} + void bdrv_add_aio_context_notifier(BlockDriverState *bs, void (*attached_aio_context)(AioContext *new_context, void *opaque), void (*detach_aio_context)(void *opaque), void *opaque) @@ -8159,8 +8230,10 @@ char *bdrv_dirname(BlockDriverState *bs, Error **errp) } /* - * Hot add/remove a BDS's child. So the user can take a child offline when - * it is broken and take a new child online + * Hot add a BDS's child. Used in combination with bdrv_del_child, so the user + * can take a child offline when it is broken and take a new child online. + * + * All block nodes must be drained. */ void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, Error **errp) @@ -8200,6 +8273,12 @@ void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, parent_bs->drv->bdrv_add_child(parent_bs, child_bs, errp); } +/* + * Hot remove a BDS's child. Used in combination with bdrv_add_child, so the + * user can take a child offline when it is broken and take a new child online. + * + * All block nodes must be drained. + */ void bdrv_del_child(BlockDriverState *parent_bs, BdrvChild *child, Error **errp) { BdrvChild *tmp; diff --git a/block/backup.c b/block/backup.c index 0151e84..909027c 100644 --- a/block/backup.c +++ b/block/backup.c @@ -498,10 +498,12 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, block_copy_set_speed(bcs, speed); /* Required permissions are taken by copy-before-write filter target */ + bdrv_drain_all_begin(); bdrv_graph_wrlock(); block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); return &job->common; diff --git a/block/blklogwrites.c b/block/blklogwrites.c index b0f78c4..70ac76f 100644 --- a/block/blklogwrites.c +++ b/block/blklogwrites.c @@ -281,9 +281,11 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags, ret = 0; fail_log: if (ret < 0) { + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, s->log_file); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); s->log_file = NULL; qemu_mutex_destroy(&s->mutex); } @@ -296,10 +298,12 @@ static void blk_log_writes_close(BlockDriverState *bs) { BDRVBlkLogWritesState *s = bs->opaque; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, s->log_file); s->log_file = NULL; bdrv_graph_wrunlock(); + bdrv_drain_all_end(); qemu_mutex_destroy(&s->mutex); } diff --git a/block/blkverify.c b/block/blkverify.c index db79a36..3a71f74 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -151,10 +151,12 @@ static void blkverify_close(BlockDriverState *bs) { BDRVBlkverifyState *s = bs->opaque; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, s->test_file); s->test_file = NULL; bdrv_graph_wrunlock(); + bdrv_drain_all_end(); } static int64_t coroutine_fn GRAPH_RDLOCK diff --git a/block/block-backend.c b/block/block-backend.c index a402db1..68209bb 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -136,9 +136,9 @@ static void blk_root_drained_end(BdrvChild *child); static void blk_root_change_media(BdrvChild *child, bool load); static void blk_root_resize(BdrvChild *child); -static bool blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp); +static bool GRAPH_RDLOCK +blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx, GHashTable *visited, + Transaction *tran, Error **errp); static char *blk_root_get_parent_desc(BdrvChild *child) { @@ -889,9 +889,11 @@ void blk_remove_bs(BlockBackend *blk) root = blk->root; blk->root = NULL; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_root_unref_child(root); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); } /* @@ -904,6 +906,7 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) GLOBAL_STATE_CODE(); bdrv_ref(bs); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); if ((bs->open_flags & BDRV_O_INACTIVE) && blk_can_inactivate(blk)) { @@ -919,6 +922,7 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, perm, shared_perm, blk, errp); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); if (blk->root == NULL) { return -EPERM; } diff --git a/block/commit.c b/block/commit.c index 7cc8c0f..6c4b736 100644 --- a/block/commit.c +++ b/block/commit.c @@ -392,6 +392,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, * this is the responsibility of the interface (i.e. whoever calls * commit_start()). */ + bdrv_drain_all_begin(); bdrv_graph_wrlock(); s->base_overlay = bdrv_find_overlay(top, base); assert(s->base_overlay); @@ -424,18 +425,21 @@ void commit_start(const char *job_id, BlockDriverState *bs, iter_shared_perms, errp); if (ret < 0) { bdrv_graph_wrunlock(); + bdrv_drain_all_end(); goto fail; } } if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) { bdrv_graph_wrunlock(); + bdrv_drain_all_end(); goto fail; } s->chain_frozen = true; ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); if (ret < 0) { goto fail; @@ -413,7 +413,6 @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) /* At this point, we should be always running in the main loop. */ GLOBAL_STATE_CODE(); assert(bs->quiesce_counter > 0); - GLOBAL_STATE_CODE(); /* Re-enable things in child-to-parent order */ old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); diff --git a/block/mirror.c b/block/mirror.c index c2c5099..6e8caf4 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -2014,6 +2014,7 @@ static BlockJob *mirror_start_job( */ bdrv_disable_dirty_bitmap(s->dirty_bitmap); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); ret = block_job_add_bdrv(&s->common, "source", bs, 0, BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | @@ -2021,6 +2022,7 @@ static BlockJob *mirror_start_job( errp); if (ret < 0) { bdrv_graph_wrunlock(); + bdrv_drain_all_end(); goto fail; } @@ -2066,16 +2068,19 @@ static BlockJob *mirror_start_job( iter_shared_perms, errp); if (ret < 0) { bdrv_graph_wrunlock(); + bdrv_drain_all_end(); goto fail; } } if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { bdrv_graph_wrunlock(); + bdrv_drain_all_end(); goto fail; } } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); QTAILQ_INIT(&s->ops_in_flight); diff --git a/block/qcow2.c b/block/qcow2.c index 66fba89..45451a7 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -1895,7 +1895,9 @@ qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, g_free(s->image_data_file); if (open_data_file && has_data_file(bs)) { bdrv_graph_co_rdunlock(); + bdrv_drain_all_begin(); bdrv_co_unref_child(bs, s->data_file); + bdrv_drain_all_end(); bdrv_graph_co_rdlock(); s->data_file = NULL; } @@ -2821,9 +2823,11 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file) if (close_data_file && has_data_file(bs)) { GLOBAL_STATE_CODE(); bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, s->data_file); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); s->data_file = NULL; bdrv_graph_rdlock_main_loop(); } diff --git a/block/quorum.c b/block/quorum.c index ed8ce80..cc3bc5f 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -1037,6 +1037,7 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags, close_exit: /* cleanup on error */ + bdrv_drain_all_begin(); bdrv_graph_wrlock(); for (i = 0; i < s->num_children; i++) { if (!opened[i]) { @@ -1045,6 +1046,7 @@ close_exit: bdrv_unref_child(bs, s->children[i]); } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); g_free(s->children); g_free(opened); exit: @@ -1057,11 +1059,13 @@ static void quorum_close(BlockDriverState *bs) BDRVQuorumState *s = bs->opaque; int i; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); for (i = 0; i < s->num_children; i++) { bdrv_unref_child(bs, s->children[i]); } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); g_free(s->children); } diff --git a/block/replication.c b/block/replication.c index 07f274d..0879718 100644 --- a/block/replication.c +++ b/block/replication.c @@ -540,6 +540,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, return; } + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_ref(hidden_disk->bs); @@ -549,6 +550,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (local_err) { error_propagate(errp, local_err); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); return; } @@ -559,6 +561,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (local_err) { error_propagate(errp, local_err); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); return; } @@ -571,12 +574,14 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, !check_top_bs(top_bs, bs)) { error_setg(errp, "No top_bs or it is invalid"); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); reopen_backing_file(bs, false, NULL); return; } bdrv_op_block_all(top_bs, s->blocker); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); s->backup_job = backup_job_create( NULL, s->secondary_disk->bs, s->hidden_disk->bs, @@ -651,12 +656,14 @@ static void replication_done(void *opaque, int ret) if (ret == 0) { s->stage = BLOCK_REPLICATION_DONE; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, s->secondary_disk); s->secondary_disk = NULL; bdrv_unref_child(bs, s->hidden_disk); s->hidden_disk = NULL; bdrv_graph_wrunlock(); + bdrv_drain_all_end(); s->error = 0; } else { diff --git a/block/snapshot.c b/block/snapshot.c index 22567f1..28c9c43 100644 --- a/block/snapshot.c +++ b/block/snapshot.c @@ -291,9 +291,11 @@ int bdrv_snapshot_goto(BlockDriverState *bs, } /* .bdrv_open() will re-attach it */ + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, fallback); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp); memset(bs->opaque, 0, drv->instance_size); @@ -327,7 +329,7 @@ int bdrv_snapshot_goto(BlockDriverState *bs, /** * Delete an internal snapshot by @snapshot_id and @name. - * @bs: block device used in the operation + * @bs: block device used in the operation, must be drained * @snapshot_id: unique snapshot ID, or NULL * @name: snapshot name, or NULL * @errp: location to store error @@ -358,6 +360,8 @@ int bdrv_snapshot_delete(BlockDriverState *bs, GLOBAL_STATE_CODE(); + assert(bs->quiesce_counter > 0); + if (!drv) { error_setg(errp, "Device '%s' has no medium", bdrv_get_device_name(bs)); @@ -368,9 +372,6 @@ int bdrv_snapshot_delete(BlockDriverState *bs, return -EINVAL; } - /* drain all pending i/o before deleting snapshot */ - bdrv_drained_begin(bs); - if (drv->bdrv_snapshot_delete) { ret = drv->bdrv_snapshot_delete(bs, snapshot_id, name, errp); } else if (fallback_bs) { @@ -382,7 +383,6 @@ int bdrv_snapshot_delete(BlockDriverState *bs, ret = -ENOTSUP; } - bdrv_drained_end(bs); return ret; } @@ -571,19 +571,22 @@ int bdrv_all_delete_snapshot(const char *name, ERRP_GUARD(); g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; + int ret = 0; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); - if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) { - return -1; + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); + + ret = bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp); + if (ret < 0) { + goto out; } iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; QEMUSnapshotInfo sn1, *snapshot = &sn1; - int ret = 0; if ((devices || bdrv_all_snapshots_includes_bs(bs)) && bdrv_snapshot_find(bs, snapshot, name) >= 0) @@ -594,13 +597,16 @@ int bdrv_all_delete_snapshot(const char *name, if (ret < 0) { error_prepend(errp, "Could not delete snapshot '%s' on '%s': ", name, bdrv_get_device_or_node_name(bs)); - return -1; + goto out; } iterbdrvs = iterbdrvs->next; } - return 0; +out: + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); + return ret; } diff --git a/block/stream.c b/block/stream.c index 999d9e5..f5441f2 100644 --- a/block/stream.c +++ b/block/stream.c @@ -80,11 +80,10 @@ static int stream_prepare(Job *job) * may end up working with the wrong base node (or it might even have gone * away by the time we want to use it). */ - bdrv_drained_begin(unfiltered_bs); if (unfiltered_bs_cow) { bdrv_ref(unfiltered_bs_cow); - bdrv_drained_begin(unfiltered_bs_cow); } + bdrv_drain_all_begin(); bdrv_graph_rdlock_main_loop(); base = bdrv_filter_or_cow_bs(s->above_base); @@ -123,11 +122,10 @@ static int stream_prepare(Job *job) } out: + bdrv_drain_all_end(); if (unfiltered_bs_cow) { - bdrv_drained_end(unfiltered_bs_cow); bdrv_unref(unfiltered_bs_cow); } - bdrv_drained_end(unfiltered_bs); return ret; } @@ -373,10 +371,12 @@ void stream_start(const char *job_id, BlockDriverState *bs, * already have our own plans. Also don't allow resize as the image size is * queried only at the job start and then cached. */ + bdrv_drain_all_begin(); bdrv_graph_wrlock(); if (block_job_add_bdrv(&s->common, "active node", bs, 0, basic_flags | BLK_PERM_WRITE, errp)) { bdrv_graph_wrunlock(); + bdrv_drain_all_end(); goto fail; } @@ -397,10 +397,12 @@ void stream_start(const char *job_id, BlockDriverState *bs, basic_flags, errp); if (ret < 0) { bdrv_graph_wrunlock(); + bdrv_drain_all_end(); goto fail; } } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); s->base_overlay = base_overlay; s->above_base = above_base; diff --git a/block/vmdk.c b/block/vmdk.c index 9c7ab03..89a7250 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -271,6 +271,7 @@ static void vmdk_free_extents(BlockDriverState *bs) BDRVVmdkState *s = bs->opaque; VmdkExtent *e; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); for (i = 0; i < s->num_extents; i++) { e = &s->extents[i]; @@ -283,6 +284,7 @@ static void vmdk_free_extents(BlockDriverState *bs) } } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); g_free(s->extents); } @@ -1247,9 +1249,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, 0, 0, 0, 0, 0, &extent, errp); if (ret < 0) { bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); bdrv_graph_rdlock_main_loop(); goto out; } @@ -1266,9 +1270,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, g_free(buf); if (ret) { bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); bdrv_graph_rdlock_main_loop(); goto out; } @@ -1277,9 +1283,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp); if (ret) { bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); bdrv_graph_rdlock_main_loop(); goto out; } @@ -1287,9 +1295,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, } else { error_setg(errp, "Unsupported extent type '%s'", type); bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); bdrv_graph_rdlock_main_loop(); ret = -ENOTSUP; goto out; @@ -1132,39 +1132,41 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, int ret; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); bs = qmp_get_root_bs(device, errp); if (!bs) { - return NULL; + goto error; } if (!id && !name) { error_setg(errp, "Name or id must be provided"); - return NULL; + goto error; } if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) { - return NULL; + goto error; } ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err); if (local_err) { error_propagate(errp, local_err); - return NULL; + goto error; } if (!ret) { error_setg(errp, "Snapshot with id '%s' and name '%s' does not exist on " "device '%s'", STR_OR_NULL(id), STR_OR_NULL(name), device); - return NULL; + goto error; } bdrv_snapshot_delete(bs, id, name, &local_err); if (local_err) { error_propagate(errp, local_err); - return NULL; + goto error; } info = g_new0(SnapshotInfo, 1); @@ -1180,6 +1182,9 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, info->has_icount = true; } +error: + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); return info; } @@ -1203,7 +1208,7 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, Error *local_err = NULL; const char *device; const char *name; - BlockDriverState *bs; + BlockDriverState *bs, *check_bs; QEMUSnapshotInfo old_sn, *sn; bool ret; int64_t rt; @@ -1211,7 +1216,7 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, int ret1; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + bdrv_graph_rdlock_main_loop(); tran_add(tran, &internal_snapshot_drv, state); @@ -1220,14 +1225,29 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, bs = qmp_get_root_bs(device, errp); if (!bs) { + bdrv_graph_rdunlock_main_loop(); return; } state->bs = bs; + /* Need to drain while unlocked. */ + bdrv_graph_rdunlock_main_loop(); /* Paired with .clean() */ bdrv_drained_begin(bs); + GRAPH_RDLOCK_GUARD_MAINLOOP(); + + /* Make sure the root bs did not change with the drain. */ + check_bs = qmp_get_root_bs(device, errp); + if (bs != check_bs) { + if (check_bs) { + error_setg(errp, "Block node of device '%s' unexpectedly changed", + device); + } /* else errp is already set */ + return; + } + if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) { return; } @@ -1295,12 +1315,14 @@ static void internal_snapshot_abort(void *opaque) Error *local_error = NULL; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); if (!state->created) { return; } + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); + if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) { error_reportf_err(local_error, "Failed to delete snapshot with id '%s' and " @@ -1308,6 +1330,8 @@ static void internal_snapshot_abort(void *opaque) sn->id_str, sn->name, bdrv_get_device_name(bs)); } + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); } static void internal_snapshot_clean(void *opaque) @@ -1353,9 +1377,10 @@ static void external_snapshot_action(TransactionAction *action, const char *new_image_file; ExternalSnapshotState *state = g_new0(ExternalSnapshotState, 1); uint64_t perm, shared; + BlockDriverState *check_bs; /* TODO We'll eventually have to take a writer lock in this function */ - GRAPH_RDLOCK_GUARD_MAINLOOP(); + bdrv_graph_rdlock_main_loop(); tran_add(tran, &external_snapshot_drv, state); @@ -1388,11 +1413,25 @@ static void external_snapshot_action(TransactionAction *action, state->old_bs = bdrv_lookup_bs(device, node_name, errp); if (!state->old_bs) { + bdrv_graph_rdunlock_main_loop(); return; } + /* Need to drain while unlocked. */ + bdrv_graph_rdunlock_main_loop(); /* Paired with .clean() */ bdrv_drained_begin(state->old_bs); + GRAPH_RDLOCK_GUARD_MAINLOOP(); + + /* Make sure the associated bs did not change with the drain. */ + check_bs = bdrv_lookup_bs(device, node_name, errp); + if (state->old_bs != check_bs) { + if (check_bs) { + error_setg(errp, "Block node of device '%s' unexpectedly changed", + device); + } /* else errp is already set */ + return; + } if (!bdrv_is_inserted(state->old_bs)) { error_setg(errp, "Device '%s' has no medium", @@ -3522,6 +3561,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child, BlockDriverState *parent_bs, *new_bs = NULL; BdrvChild *p_child; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); parent_bs = bdrv_lookup_bs(parent, parent, errp); @@ -3559,6 +3599,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child, out: bdrv_graph_wrunlock(); + bdrv_drain_all_end(); } BlockJobInfoList *qmp_query_block_jobs(Error **errp) @@ -3592,12 +3633,13 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, AioContext *new_context; BlockDriverState *bs; - GRAPH_RDLOCK_GUARD_MAINLOOP(); + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); bs = bdrv_find_node(node_name); if (!bs) { error_setg(errp, "Failed to find node with node-name='%s'", node_name); - return; + goto out; } /* Protects against accidents. */ @@ -3605,14 +3647,14 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, error_setg(errp, "Node %s is associated with a BlockBackend and could " "be in use (use force=true to override this check)", node_name); - return; + goto out; } if (iothread->type == QTYPE_QSTRING) { IOThread *obj = iothread_by_id(iothread->u.s); if (!obj) { error_setg(errp, "Cannot find iothread %s", iothread->u.s); - return; + goto out; } new_context = iothread_get_aio_context(obj); @@ -3620,7 +3662,11 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, new_context = qemu_get_aio_context(); } - bdrv_try_change_aio_context(bs, new_context, NULL, errp); + bdrv_try_change_aio_context_locked(bs, new_context, NULL, errp); + +out: + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); } QemuOptsList qemu_common_drive_opts = { @@ -144,9 +144,9 @@ static TransactionActionDrv change_child_job_context = { .clean = g_free, }; -static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp) +static bool GRAPH_RDLOCK +child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx, GHashTable *visited, + Transaction *tran, Error **errp) { BlockJob *job = c->opaque; BdrvStateChildJobContext *s; @@ -198,6 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job) * one to make sure that such a concurrent access does not attempt * to process an already freed BdrvChild. */ + bdrv_drain_all_begin(); bdrv_graph_wrlock(); while (job->nodes) { GSList *l = job->nodes; @@ -211,6 +212,7 @@ void block_job_remove_all_bdrv(BlockJob *job) g_slist_free_1(l); } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); } bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs) @@ -496,6 +498,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, int ret; GLOBAL_STATE_CODE(); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); if (job_id == NULL && !(flags & JOB_INTERNAL)) { @@ -506,6 +509,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, flags, cb, opaque, errp); if (job == NULL) { bdrv_graph_wrunlock(); + bdrv_drain_all_end(); return NULL; } @@ -544,10 +548,12 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); return job; fail: bdrv_graph_wrunlock(); + bdrv_drain_all_end(); job_early_fail(&job->job); return NULL; } diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index 8e11e63..24e145d 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -145,6 +145,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, if (ctx != bdrv_get_aio_context(bs)) { error_setg(errp, "Different aio context is not supported for new " "node"); + return; } blk_replace_bs(blk, bs, errp); diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h index 9be34b3..84a2a4e 100644 --- a/include/block/block-global-state.h +++ b/include/block/block-global-state.h @@ -192,10 +192,10 @@ int bdrv_inactivate_all(void); int bdrv_flush_all(void); void bdrv_close_all(void); -void bdrv_drain_all_begin(void); +void GRAPH_UNLOCKED bdrv_drain_all_begin(void); void bdrv_drain_all_begin_nopoll(void); void bdrv_drain_all_end(void); -void bdrv_drain_all(void); +void GRAPH_UNLOCKED bdrv_drain_all(void); void bdrv_aio_cancel(BlockAIOCB *acb); @@ -274,11 +274,16 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); int bdrv_debug_resume(BlockDriverState *bs, const char *tag); bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); -bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp); -int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, - BdrvChild *ignore_child, Error **errp); +bool GRAPH_RDLOCK +bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp); +int GRAPH_UNLOCKED +bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp); +int GRAPH_RDLOCK +bdrv_try_change_aio_context_locked(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp); int GRAPH_RDLOCK bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz); int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); diff --git a/include/block/block-io.h b/include/block/block-io.h index b99cc98..4cf83fb 100644 --- a/include/block/block-io.h +++ b/include/block/block-io.h @@ -431,7 +431,7 @@ bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, * * This function can be recursive. */ -void bdrv_drained_begin(BlockDriverState *bs); +void GRAPH_UNLOCKED bdrv_drained_begin(BlockDriverState *bs); /** * bdrv_do_drained_begin_quiesce: diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h index 2982dd3..925a3e7 100644 --- a/include/block/block_int-common.h +++ b/include/block/block_int-common.h @@ -396,9 +396,23 @@ struct BlockDriver { int GRAPH_RDLOCK_PTR (*bdrv_probe_geometry)( BlockDriverState *bs, HDGeometry *geo); + /** + * Hot add a BDS's child. Used in combination with bdrv_del_child, so the + * user can take a child offline when it is broken and take a new child + * online. + * + * All block nodes must be drained. + */ void GRAPH_WRLOCK_PTR (*bdrv_add_child)( BlockDriverState *parent, BlockDriverState *child, Error **errp); + /** + * Hot remove a BDS's child. Used in combination with bdrv_add_child, so the + * user can take a child offline when it is broken and take a new child + * online. + * + * All block nodes must be drained. + */ void GRAPH_WRLOCK_PTR (*bdrv_del_child)( BlockDriverState *parent, BdrvChild *child, Error **errp); @@ -983,9 +997,21 @@ struct BdrvChildClass { bool backing_mask_protocol, Error **errp); - bool (*change_aio_ctx)(BdrvChild *child, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp); + /* + * Notifies the parent that the child is trying to change its AioContext. + * The parent may in turn change the AioContext of other nodes in the same + * transaction. Returns true if the change is possible and the transaction + * can be continued. Returns false and sets @errp if not and the transaction + * must be aborted. + * + * @visited will accumulate all visited BdrvChild objects. The caller is + * responsible for freeing the list afterwards. + * + * Must be called with the affected block nodes drained. + */ + bool GRAPH_RDLOCK_PTR (*change_aio_ctx)(BdrvChild *child, AioContext *ctx, + GHashTable *visited, + Transaction *tran, Error **errp); /* * I/O API functions. These functions are thread-safe. diff --git a/include/block/blockjob.h b/include/block/blockjob.h index 7061ab7..990f3e1 100644 --- a/include/block/blockjob.h +++ b/include/block/blockjob.h @@ -137,6 +137,8 @@ BlockJob *block_job_get_locked(const char *id); * Add @bs to the list of BlockDriverState that are involved in * @job. This means that all operations will be blocked on @bs while * @job exists. + * + * All block nodes must be drained. */ int GRAPH_WRLOCK block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, @@ -3505,6 +3505,7 @@ static int img_snapshot(int argc, char **argv) break; case SNAPSHOT_DELETE: + bdrv_drain_all_begin(); bdrv_graph_rdlock_main_loop(); ret = bdrv_snapshot_find(bs, &sn, snapshot_name); if (ret < 0) { @@ -3520,6 +3521,7 @@ static int img_snapshot(int argc, char **argv) } } bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); break; } diff --git a/tests/qemu-iotests/240 b/tests/qemu-iotests/240 index 9b281e1..f8af9ff 100755 --- a/tests/qemu-iotests/240 +++ b/tests/qemu-iotests/240 @@ -81,8 +81,6 @@ class TestCase(iotests.QMPTestCase): self.vm.qmp_log('device_del', id='scsi-hd0') self.vm.event_wait('DEVICE_DELETED') - self.vm.qmp_log('device_add', id='scsi-hd1', driver='scsi-hd', drive='hd0', bus="scsi1.0") - self.vm.qmp_log('device_del', id='scsi-hd1') self.vm.event_wait('DEVICE_DELETED') self.vm.qmp_log('blockdev-del', node_name='hd0') diff --git a/tests/qemu-iotests/240.out b/tests/qemu-iotests/240.out index 89ed25e..10dcc42 100644 --- a/tests/qemu-iotests/240.out +++ b/tests/qemu-iotests/240.out @@ -46,10 +46,8 @@ {"execute": "device_add", "arguments": {"bus": "scsi0.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd0"}} {"return": {}} {"execute": "device_add", "arguments": {"bus": "scsi1.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd1"}} -{"error": {"class": "GenericError", "desc": "Cannot change iothread of active block backend"}} -{"execute": "device_del", "arguments": {"id": "scsi-hd0"}} {"return": {}} -{"execute": "device_add", "arguments": {"bus": "scsi1.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd1"}} +{"execute": "device_del", "arguments": {"id": "scsi-hd0"}} {"return": {}} {"execute": "device_del", "arguments": {"id": "scsi-hd1"}} {"return": {}} diff --git a/tests/qemu-iotests/tests/graph-changes-while-io b/tests/qemu-iotests/tests/graph-changes-while-io index 194fda5..dca1167 100755 --- a/tests/qemu-iotests/tests/graph-changes-while-io +++ b/tests/qemu-iotests/tests/graph-changes-while-io @@ -27,6 +27,7 @@ from iotests import imgfmt, qemu_img, qemu_img_create, qemu_io, \ top = os.path.join(iotests.test_dir, 'top.img') +mid = os.path.join(iotests.test_dir, 'mid.img') nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock') @@ -57,6 +58,16 @@ class TestGraphChangesWhileIO(QMPTestCase): def tearDown(self) -> None: self.qsd.stop() + os.remove(top) + + def _wait_for_blockjob(self, status: str) -> None: + done = False + while not done: + for event in self.qsd.get_qmp().get_events(wait=10.0): + if event['event'] != 'JOB_STATUS_CHANGE': + continue + if event['data']['status'] == status: + done = True def test_blockdev_add_while_io(self) -> None: # Run qemu-img bench in the background @@ -116,15 +127,92 @@ class TestGraphChangesWhileIO(QMPTestCase): 'device': 'job0', }) - cancelled = False - while not cancelled: - for event in self.qsd.get_qmp().get_events(wait=10.0): - if event['event'] != 'JOB_STATUS_CHANGE': - continue - if event['data']['status'] == 'null': - cancelled = True + self._wait_for_blockjob('null') + + bench_thr.join() + + def test_remove_lower_snapshot_while_io(self) -> None: + # Run qemu-img bench in the background + bench_thr = Thread(target=do_qemu_img_bench, args=(100000, )) + bench_thr.start() + + # While I/O is performed on 'node0' node, consequently add 2 snapshots + # on top of it, then remove (commit) them starting from lower one. + while bench_thr.is_alive(): + # Recreate snapshot images on every iteration + qemu_img_create('-f', imgfmt, mid, '1G') + qemu_img_create('-f', imgfmt, top, '1G') + + self.qsd.cmd('blockdev-add', { + 'driver': imgfmt, + 'node-name': 'mid', + 'file': { + 'driver': 'file', + 'filename': mid + } + }) + + self.qsd.cmd('blockdev-snapshot', { + 'node': 'node0', + 'overlay': 'mid', + }) + + self.qsd.cmd('blockdev-add', { + 'driver': imgfmt, + 'node-name': 'top', + 'file': { + 'driver': 'file', + 'filename': top + } + }) + + self.qsd.cmd('blockdev-snapshot', { + 'node': 'mid', + 'overlay': 'top', + }) + + self.qsd.cmd('block-commit', { + 'job-id': 'commit-mid', + 'device': 'top', + 'top-node': 'mid', + 'base-node': 'node0', + 'auto-finalize': True, + 'auto-dismiss': False, + }) + + self._wait_for_blockjob('concluded') + self.qsd.cmd('job-dismiss', { + 'id': 'commit-mid', + }) + + self.qsd.cmd('block-commit', { + 'job-id': 'commit-top', + 'device': 'top', + 'top-node': 'top', + 'base-node': 'node0', + 'auto-finalize': True, + 'auto-dismiss': False, + }) + + self._wait_for_blockjob('ready') + self.qsd.cmd('job-complete', { + 'id': 'commit-top', + }) + + self._wait_for_blockjob('concluded') + self.qsd.cmd('job-dismiss', { + 'id': 'commit-top', + }) + + self.qsd.cmd('blockdev-del', { + 'node-name': 'mid' + }) + self.qsd.cmd('blockdev-del', { + 'node-name': 'top' + }) bench_thr.join() + os.remove(mid) if __name__ == '__main__': # Format must support raw backing files diff --git a/tests/qemu-iotests/tests/graph-changes-while-io.out b/tests/qemu-iotests/tests/graph-changes-while-io.out index fbc63e6..8d7e9967 100644 --- a/tests/qemu-iotests/tests/graph-changes-while-io.out +++ b/tests/qemu-iotests/tests/graph-changes-while-io.out @@ -1,5 +1,5 @@ -.. +... ---------------------------------------------------------------------- -Ran 2 tests +Ran 3 tests OK diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 290cd2a..59c2793 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -772,9 +772,11 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, tjob->bs = src; job = &tjob->common; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); switch (result) { case TEST_JOB_SUCCESS: @@ -953,11 +955,13 @@ static void bdrv_test_top_close(BlockDriverState *bs) { BdrvChild *c, *next_c; + bdrv_drain_all_begin(); bdrv_graph_wrlock(); QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { bdrv_unref_child(bs, c); } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); } static int coroutine_fn GRAPH_RDLOCK @@ -1014,7 +1018,9 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque) bdrv_graph_co_rdlock(); QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { bdrv_graph_co_rdunlock(); + bdrv_drain_all_begin(); bdrv_co_unref_child(bs, c); + bdrv_drain_all_end(); bdrv_graph_co_rdlock(); } bdrv_graph_co_rdunlock(); @@ -1047,10 +1053,12 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); /* This child will be the one to pass to requests through to, and * it will stall until a drain occurs */ @@ -1058,21 +1066,25 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, &error_abort); child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; /* Takes our reference to child_bs */ + bdrv_drain_all_begin(); bdrv_graph_wrlock(); tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", &child_of_bds, BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); /* This child is just there to be deleted * (for detach_instead_of_delete == true) */ null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); blk_insert_bs(blk, bs, &error_abort); @@ -1155,6 +1167,7 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque) bdrv_dec_in_flight(data->child_b->bs); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_unref_child(data->parent_b, data->child_b); @@ -1163,6 +1176,7 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque) &child_of_bds, BDRV_CHILD_DATA, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); } static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret) @@ -1260,6 +1274,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) /* Set child relationships */ bdrv_ref(b); bdrv_ref(a); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds, BDRV_CHILD_DATA, &error_abort); @@ -1271,6 +1286,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class, BDRV_CHILD_DATA, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); g_assert_cmpint(parent_a->refcnt, ==, 1); g_assert_cmpint(parent_b->refcnt, ==, 1); @@ -1396,14 +1412,10 @@ static void test_set_aio_context(void) bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, &error_abort); - bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort); - bdrv_drained_end(bs); - bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort); bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort); - bdrv_drained_end(bs); bdrv_unref(bs); iothread_join(a); @@ -1687,6 +1699,7 @@ static void test_drop_intermediate_poll(void) * Establish the chain last, so the chain links are the first * elements in the BDS.parents lists */ + bdrv_drain_all_begin(); bdrv_graph_wrlock(); for (i = 0; i < 3; i++) { if (i) { @@ -1696,6 +1709,7 @@ static void test_drop_intermediate_poll(void) } } bdrv_graph_wrunlock(); + bdrv_drain_all_end(); job = block_job_create("job", &test_simple_job_driver, NULL, job_node, 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); @@ -1942,10 +1956,12 @@ static void do_test_replace_child_mid_drain(int old_drain_count, new_child_bs->total_sectors = 1; bdrv_ref(old_child_bs); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds, BDRV_CHILD_COW, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); parent_s->setup_completed = true; for (i = 0; i < old_drain_count; i++) { diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c index d743abb..7b03ebe 100644 --- a/tests/unit/test-bdrv-graph-mod.c +++ b/tests/unit/test-bdrv-graph-mod.c @@ -137,10 +137,12 @@ static void test_update_perm_tree(void) blk_insert_bs(root, bs, &error_abort); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_attach_child(filter, bs, "child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); ret = bdrv_append(filter, bs, NULL); g_assert_cmpint(ret, <, 0); @@ -204,11 +206,13 @@ static void test_should_update_child(void) bdrv_set_backing_hd(target, bs, &error_abort); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); g_assert(target->backing->bs == bs); bdrv_attach_child(filter, target, "target", &child_of_bds, BDRV_CHILD_DATA, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); bdrv_append(filter, bs, &error_abort); bdrv_graph_rdlock_main_loop(); @@ -244,6 +248,7 @@ static void test_parallel_exclusive_write(void) bdrv_ref(base); bdrv_ref(fl1); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_attach_child(top, fl1, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, @@ -257,6 +262,7 @@ static void test_parallel_exclusive_write(void) bdrv_replace_node(fl1, fl2, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); bdrv_drained_end(fl2); bdrv_drained_end(fl1); @@ -363,6 +369,7 @@ static void test_parallel_perm_update(void) */ bdrv_ref(base); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA, &error_abort); @@ -377,6 +384,7 @@ static void test_parallel_perm_update(void) BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); /* Select fl1 as first child to be active */ s->selected = c_fl1; @@ -430,11 +438,13 @@ static void test_append_greedy_filter(void) BlockDriverState *base = no_perm_node("base"); BlockDriverState *fl = exclusive_writer_node("fl1"); + bdrv_drain_all_begin(); bdrv_graph_wrlock(); bdrv_attach_child(top, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); bdrv_graph_wrunlock(); + bdrv_drain_all_end(); bdrv_append(fl, base, &error_abort); bdrv_unref(fl); |