aboutsummaryrefslogtreecommitdiff
path: root/tests/unit
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-09-21 09:31:27 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2023-09-21 09:31:28 -0400
commit3da71a2111de9f0bc475f8292d009265ab34365f (patch)
tree102ffb4b18cea24d8b75a0d7f22f0eea14d3ba8b /tests/unit
parentf2df7e7705e832a8a65422c227e9ef1bdac226c1 (diff)
parentc428b392590df6364a025d5841e3e8a589ebfd4a (diff)
downloadqemu-3da71a2111de9f0bc475f8292d009265ab34365f.zip
qemu-3da71a2111de9f0bc475f8292d009265ab34365f.tar.gz
qemu-3da71a2111de9f0bc475f8292d009265ab34365f.tar.bz2
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
Block layer patches - Graph locking part 4 (node management) - qemu-img map: report compressed data blocks - block-backend: process I/O in the current AioContext # -----BEGIN PGP SIGNATURE----- # # iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmULHnURHGt3b2xmQHJl # ZGhhdC5jb20ACgkQfwmycsiPL9aB5hAAqH8To7WIUtg1rj1PY809ck78ghm18PKg # TNdN7IbrXQghX5foh2VgPwVVl+JaW2CSrJYWQcAO6AbvFduNIi9iKzI6RT0xKXpb # b8oQXS7zntFzwBv8ohOU5NSVJOgVmNP4h5qJIMmXgB9ZcLFG40zggVH2qQT7guUf # 9MAc81kI/d5vvSHY0ZjdHjNOgwG4q1j8yytL7OFqWUfB8sXloUCA9lT7w4jIYD8L # v2StUOLWB01Zts2o8SCNaFxuajs6wUee8b/DM1cyPyLy4KtOdXvLKhq2NlXpLo2i # aZFr4PtizTVwrQZIJttA9jqM+QCsDOsiSat3BLNNsKUaCWHZB0rOGLCzMCtisyOo # 4PzuL4UI21ik2zieO1qVM+Thqvw16kHtp6dD9pGk4X4ogGreGYEIxzBl79luR+AV # NCRizoeFWTHKymS1tSoKrWT9ZNHcLmwemO6Tt1rMYk9jV3T4uY5e1NwxaUavEfsX # f8dLfQjhNiySOoDknT1OSerBOVdTXURS2ri5H3GZxrxvJ4jOeFkn52C8r3YlZ3Wp # Cr9LCUJZeXgwY+Q1JQ3D4VLY8aZ83txpw6XKEy0eTEv5wxkBj5LWhXx7hNb5F3lg # bqaRYijVJn+P82wVxlftIzMfNeVBFHzFE90taPV5grJjr8lgrGBFmD7Puc97kfDX # oTDBwRxJeew= # =qTNA # -----END PGP SIGNATURE----- # gpg: Signature made Wed 20 Sep 2023 12:31:49 EDT # gpg: using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6 # gpg: issuer "kwolf@redhat.com" # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full] # Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6 * tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (28 commits) block: mark aio_poll as non-coroutine block-backend: process zoned requests in the current AioContext block-backend: process I/O in the current AioContext test-bdrv-drain: avoid race with BH in IOThread drain test block: remove AIOCBInfo->get_aio_context() qemu-img: map: report compressed data blocks block: add BDRV_BLOCK_COMPRESSED flag for bdrv_block_status() block: Mark bdrv_add/del_child() and caller GRAPH_WRLOCK block: Mark bdrv_unref_child() GRAPH_WRLOCK block: Mark bdrv_root_unref_child() GRAPH_WRLOCK block: Take graph rdlock in bdrv_change_aio_context() block: Take graph rdlock in bdrv_drop_intermediate() block: Mark bdrv_parent_cb_change_media() GRAPH_RDLOCK block: Mark bdrv_child_perm() GRAPH_RDLOCK block: Mark bdrv_get_cumulative_perm() and callers GRAPH_RDLOCK block: Mark bdrv_parent_perms_conflict() and callers GRAPH_RDLOCK block: Mark bdrv_attach_child() GRAPH_WRLOCK block: Call transaction callbacks with lock held block: Mark bdrv_attach_child_common() GRAPH_WRLOCK block: Mark bdrv_replace_child_tran() GRAPH_WRLOCK ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'tests/unit')
-rw-r--r--tests/unit/test-bdrv-drain.c31
-rw-r--r--tests/unit/test-bdrv-graph-mod.c20
-rw-r--r--tests/unit/test-block-iothread.c3
3 files changed, 53 insertions, 1 deletions
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index ccc453c..0b603e7 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -512,6 +512,7 @@ static void test_iothread_main_thread_bh(void *opaque)
* executed during drain, otherwise this would deadlock. */
aio_context_acquire(bdrv_get_aio_context(data->bs));
bdrv_flush(data->bs);
+ bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */
aio_context_release(bdrv_get_aio_context(data->bs));
}
@@ -583,6 +584,13 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
aio_context_acquire(ctx_a);
}
+ /*
+ * Increment in_flight so that do_drain_begin() waits for
+ * test_iothread_main_thread_bh(). This prevents the race between
+ * test_iothread_main_thread_bh() in IOThread a and do_drain_begin() in
+ * this thread. test_iothread_main_thread_bh() decrements in_flight.
+ */
+ bdrv_inc_in_flight(bs);
aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
/* The request is running on the IOThread a. Draining its block device
@@ -967,9 +975,12 @@ typedef struct BDRVTestTopState {
static void bdrv_test_top_close(BlockDriverState *bs)
{
BdrvChild *c, *next_c;
+
+ bdrv_graph_wrlock(NULL);
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
bdrv_unref_child(bs, c);
}
+ bdrv_graph_wrunlock();
}
static int coroutine_fn GRAPH_RDLOCK
@@ -1024,7 +1035,7 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
} else {
BdrvChild *c, *next_c;
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
- bdrv_unref_child(bs, c);
+ bdrv_co_unref_child(bs, c);
}
}
@@ -1055,8 +1066,10 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
&error_abort);
+ bdrv_graph_wrlock(NULL);
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
+ bdrv_graph_wrunlock();
/* This child will be the one to pass to requests through to, and
* it will stall until a drain occurs */
@@ -1064,17 +1077,21 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
&error_abort);
child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
/* Takes our reference to child_bs */
+ bdrv_graph_wrlock(NULL);
tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
&child_of_bds,
BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
&error_abort);
+ bdrv_graph_wrunlock();
/* This child is just there to be deleted
* (for detach_instead_of_delete == true) */
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
&error_abort);
+ bdrv_graph_wrlock(NULL);
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
&error_abort);
+ bdrv_graph_wrunlock();
blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
blk_insert_bs(blk, bs, &error_abort);
@@ -1156,12 +1173,15 @@ static void detach_indirect_bh(void *opaque)
struct detach_by_parent_data *data = opaque;
bdrv_dec_in_flight(data->child_b->bs);
+
+ bdrv_graph_wrlock(NULL);
bdrv_unref_child(data->parent_b, data->child_b);
bdrv_ref(data->c);
data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
&child_of_bds, BDRV_CHILD_DATA,
&error_abort);
+ bdrv_graph_wrunlock();
}
static void detach_by_parent_aio_cb(void *opaque, int ret)
@@ -1258,6 +1278,7 @@ static void test_detach_indirect(bool by_parent_cb)
/* Set child relationships */
bdrv_ref(b);
bdrv_ref(a);
+ bdrv_graph_wrlock(NULL);
child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
@@ -1267,6 +1288,7 @@ static void test_detach_indirect(bool by_parent_cb)
bdrv_attach_child(parent_a, a, "PA-A",
by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
BDRV_CHILD_DATA, &error_abort);
+ bdrv_graph_wrunlock();
g_assert_cmpint(parent_a->refcnt, ==, 1);
g_assert_cmpint(parent_b->refcnt, ==, 1);
@@ -1359,7 +1381,10 @@ static void test_append_to_drained(void)
g_assert_cmpint(base_s->drain_count, ==, 1);
g_assert_cmpint(base->in_flight, ==, 0);
+ aio_context_acquire(qemu_get_aio_context());
bdrv_append(overlay, base, &error_abort);
+ aio_context_release(qemu_get_aio_context());
+
g_assert_cmpint(base->in_flight, ==, 0);
g_assert_cmpint(overlay->in_flight, ==, 0);
@@ -1682,6 +1707,7 @@ static void test_drop_intermediate_poll(void)
* Establish the chain last, so the chain links are the first
* elements in the BDS.parents lists
*/
+ bdrv_graph_wrlock(NULL);
for (i = 0; i < 3; i++) {
if (i) {
/* Takes the reference to chain[i - 1] */
@@ -1689,6 +1715,7 @@ static void test_drop_intermediate_poll(void)
&chain_child_class, BDRV_CHILD_COW, &error_abort);
}
}
+ bdrv_graph_wrunlock();
job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
@@ -1933,8 +1960,10 @@ static void do_test_replace_child_mid_drain(int old_drain_count,
new_child_bs->total_sectors = 1;
bdrv_ref(old_child_bs);
+ bdrv_graph_wrlock(NULL);
bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
BDRV_CHILD_COW, &error_abort);
+ bdrv_graph_wrunlock();
parent_s->setup_completed = true;
for (i = 0; i < old_drain_count; i++) {
diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c
index 36eed4b..8609f7f 100644
--- a/tests/unit/test-bdrv-graph-mod.c
+++ b/tests/unit/test-bdrv-graph-mod.c
@@ -137,11 +137,15 @@ static void test_update_perm_tree(void)
blk_insert_bs(root, bs, &error_abort);
+ bdrv_graph_wrlock(NULL);
bdrv_attach_child(filter, bs, "child", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
+ bdrv_graph_wrunlock();
+ aio_context_acquire(qemu_get_aio_context());
ret = bdrv_append(filter, bs, NULL);
g_assert_cmpint(ret, <, 0);
+ aio_context_release(qemu_get_aio_context());
bdrv_unref(filter);
blk_unref(root);
@@ -203,9 +207,13 @@ static void test_should_update_child(void)
bdrv_set_backing_hd(target, bs, &error_abort);
g_assert(target->backing->bs == bs);
+ bdrv_graph_wrlock(NULL);
bdrv_attach_child(filter, target, "target", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
+ bdrv_graph_wrunlock();
+ aio_context_acquire(qemu_get_aio_context());
bdrv_append(filter, bs, &error_abort);
+ aio_context_release(qemu_get_aio_context());
g_assert(target->backing->bs == bs);
bdrv_unref(filter);
@@ -232,6 +240,7 @@ static void test_parallel_exclusive_write(void)
*/
bdrv_ref(base);
+ bdrv_graph_wrlock(NULL);
bdrv_attach_child(top, fl1, "backing", &child_of_bds,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
&error_abort);
@@ -241,6 +250,7 @@ static void test_parallel_exclusive_write(void)
bdrv_attach_child(fl2, base, "backing", &child_of_bds,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
&error_abort);
+ bdrv_graph_wrunlock();
bdrv_replace_node(fl1, fl2, &error_abort);
@@ -345,6 +355,7 @@ static void test_parallel_perm_update(void)
*/
bdrv_ref(base);
+ bdrv_graph_wrlock(NULL);
bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA,
&error_abort);
c_fl1 = bdrv_attach_child(ws, fl1, "first", &child_of_bds,
@@ -357,9 +368,13 @@ static void test_parallel_perm_update(void)
bdrv_attach_child(fl2, base, "backing", &child_of_bds,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
&error_abort);
+ bdrv_graph_wrunlock();
/* Select fl1 as first child to be active */
s->selected = c_fl1;
+
+ bdrv_graph_rdlock_main_loop();
+
bdrv_child_refresh_perms(top, top->children.lh_first, &error_abort);
assert(c_fl1->perm & BLK_PERM_WRITE);
@@ -379,6 +394,7 @@ static void test_parallel_perm_update(void)
assert(c_fl1->perm & BLK_PERM_WRITE);
assert(!(c_fl2->perm & BLK_PERM_WRITE));
+ bdrv_graph_rdunlock_main_loop();
bdrv_unref(top);
}
@@ -406,11 +422,15 @@ static void test_append_greedy_filter(void)
BlockDriverState *base = no_perm_node("base");
BlockDriverState *fl = exclusive_writer_node("fl1");
+ bdrv_graph_wrlock(NULL);
bdrv_attach_child(top, base, "backing", &child_of_bds,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
&error_abort);
+ bdrv_graph_wrunlock();
+ aio_context_acquire(qemu_get_aio_context());
bdrv_append(fl, base, &error_abort);
+ aio_context_release(qemu_get_aio_context());
bdrv_unref(fl);
bdrv_unref(top);
}
diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c
index d727a5f..9155547 100644
--- a/tests/unit/test-block-iothread.c
+++ b/tests/unit/test-block-iothread.c
@@ -756,11 +756,14 @@ static void test_propagate_mirror(void)
&error_abort);
/* Start a mirror job */
+ aio_context_acquire(main_ctx);
mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
&error_abort);
+ aio_context_release(main_ctx);
+
WITH_JOB_LOCK_GUARD() {
job = job_get_locked("job0");
}