aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-05-22 07:04:50 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-05-22 07:04:50 -0700
commitad3387396a71416cacc0b394e5e440dd6e9ba19a (patch)
treef271a1645d501e7ff53560b1e1e8b69298b9262c /tests
parentffd9492f2a713c614025a87add4517db61d77609 (diff)
parent95fdd8db61848d31fde1d9b32da7f3f76babfa25 (diff)
downloadqemu-ad3387396a71416cacc0b394e5e440dd6e9ba19a.zip
qemu-ad3387396a71416cacc0b394e5e440dd6e9ba19a.tar.gz
qemu-ad3387396a71416cacc0b394e5e440dd6e9ba19a.tar.bz2
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
Block layer patches - qcow2 spec: Rename "zlib" compression to "deflate" - Honour graph read lock even in the main thread + prerequisite fixes - aio-posix: do not nest poll handlers (fixes infinite recursion) - Refactor QMP blockdev transactions - graph-lock: Disable locking for now - iotests/245: Check if 'compress' driver is available # -----BEGIN PGP SIGNATURE----- # # iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmRnrxURHGt3b2xmQHJl # ZGhhdC5jb20ACgkQfwmycsiPL9aHyw/9H0xpceVb0kcC5CStOWCcq4PJHzkl/8/m # c6ABFe0fgEuN2FCiKiCKOt6+V7qaIAw0+YLgPr/LGIsbIBzdxF3Xgd2UyIH6o4dK # bSaIAaes6ZLTcYGIYEVJtHuwNgvzhjyBlW5qqwTpN0YArKS411eHyQ3wlUkCEVwK # ZNmDY/MC8jq8r1xfwpPi7CaH6k1I6HhDmyl1PdURW9hmoAKZQZMhEdA5reJrUwZ9 # EhfgbLIaK0kkLLsufJ9YIkd+b/P3mUbH30kekNMOiA0XlnhWm1Djol5pxlnNiflg # CGh6CAyhJKdXzwV567cSF11NYCsFmiY+c/l0xRIGscujwvO4iD7wFT5xk2geUAKV # yaox8JA7Le36g7lO2CRadlS24/Ekqnle6q09g2i8s2tZwB4fS286vaZz6QDPmf7W # VSQp9vuDj6ZcVjMsuo2+LzF3yA2Vqvgd9s032iBAjRDSGLAoOdQZjBJrreypJ0Oi # pVFwgK+9QNCZBsqVhwVOgElSoK/3Vbl1kqpi30Ikgc0epAn0suM1g2QQPJ2Zt/MJ # xqMlTv+48OW3vq3ebr8GXqkhvG/u0ku6I1G6ZyCrjOce89osK8QUaovERyi1eOmo # ouoZ8UJJa6VfEkkmdhq2vF6u/MP4PeZ8MW3pYQy6qEnSOPDKpLnR30Z/s/HZCZcm # H4QIbfQnzic= # =edNP # -----END PGP SIGNATURE----- # gpg: Signature made Fri 19 May 2023 10:17:09 AM PDT # gpg: using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6 # gpg: issuer "kwolf@redhat.com" # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full] * tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (21 commits) iotests: Test commit with iothreads and ongoing I/O nbd/server: Fix drained_poll to wake coroutine in right AioContext graph-lock: Disable locking for now tested: add test for nested aio_poll() in poll handlers aio-posix: do not nest poll handlers iotests/245: Check if 'compress' driver is available graph-lock: Honour read locks even in the main thread blockjob: Adhere to rate limit even when reentered early test-bdrv-drain: Call bdrv_co_unref() in coroutine context test-bdrv-drain: Take graph lock more selectively qemu-img: Take graph lock more selectively qcow2: Unlock the graph in qcow2_do_open() where necessary block/export: Fix null pointer dereference in error path block: Call .bdrv_co_create(_opts) unlocked docs/interop/qcow2.txt: fix description about "zlib" clusters blockdev: qmp_transaction: drop extra generic layer blockdev: use state.bitmap in block-dirty-bitmap-add action blockdev: transaction: refactor handling transaction properties blockdev: qmp_transaction: refactor loop to classic for blockdev: transactions: rename some things ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tests')
-rwxr-xr-xtests/qemu-iotests/2457
-rw-r--r--tests/qemu-iotests/245.out9
-rw-r--r--tests/qemu-iotests/iotests.py4
-rwxr-xr-xtests/qemu-iotests/tests/graph-changes-while-io56
-rw-r--r--tests/qemu-iotests/tests/graph-changes-while-io.out4
-rw-r--r--tests/unit/meson.build5
-rw-r--r--tests/unit/test-bdrv-drain.c6
-rw-r--r--tests/unit/test-nested-aio-poll.c130
8 files changed, 200 insertions, 21 deletions
diff --git a/tests/qemu-iotests/245 b/tests/qemu-iotests/245
index edaf290..92b28c7 100755
--- a/tests/qemu-iotests/245
+++ b/tests/qemu-iotests/245
@@ -611,6 +611,7 @@ class TestBlockdevReopen(iotests.QMPTestCase):
self.reopen(hd0_opts, {'file': 'hd0-file'})
# Insert (and remove) a compress filter
+ @iotests.skip_if_unsupported(['compress'])
def test_insert_compress_filter(self):
# Add an image to the VM: hd (raw) -> hd0 (qcow2) -> hd0-file (file)
opts = {'driver': 'raw', 'node-name': 'hd', 'file': hd_opts(0)}
@@ -650,9 +651,9 @@ class TestBlockdevReopen(iotests.QMPTestCase):
# Check the first byte of the first three L2 entries and verify that
# the second one is compressed (0x40) while the others are not (0x80)
- iotests.qemu_io_log('-f', 'raw', '-c', 'read -P 0x80 0x40000 1',
- '-c', 'read -P 0x40 0x40008 1',
- '-c', 'read -P 0x80 0x40010 1', hd_path[0])
+ iotests.qemu_io('-f', 'raw', '-c', 'read -P 0x80 0x40000 1',
+ '-c', 'read -P 0x40 0x40008 1',
+ '-c', 'read -P 0x80 0x40010 1', hd_path[0])
# Swap the disk images of two active block devices
def test_swap_files(self):
diff --git a/tests/qemu-iotests/245.out b/tests/qemu-iotests/245.out
index a4e04a3..0970ced 100644
--- a/tests/qemu-iotests/245.out
+++ b/tests/qemu-iotests/245.out
@@ -10,14 +10,7 @@
{"return": {}}
{"data": {"id": "stream0", "type": "stream"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"device": "stream0", "len": 3145728, "offset": 3145728, "speed": 0, "type": "stream"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
-....read 1/1 bytes at offset 262144
-1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-read 1/1 bytes at offset 262152
-1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-read 1/1 bytes at offset 262160
-1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-
-................
+....................
----------------------------------------------------------------------
Ran 26 tests
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 3e82c63..7073579 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -462,6 +462,10 @@ class QemuStorageDaemon:
assert self._qmp is not None
return self._qmp.cmd(cmd, args)
+ def get_qmp(self) -> QEMUMonitorProtocol:
+ assert self._qmp is not None
+ return self._qmp
+
def stop(self, kill_signal=15):
self._p.send_signal(kill_signal)
self._p.wait()
diff --git a/tests/qemu-iotests/tests/graph-changes-while-io b/tests/qemu-iotests/tests/graph-changes-while-io
index 7664f33..750e7d4 100755
--- a/tests/qemu-iotests/tests/graph-changes-while-io
+++ b/tests/qemu-iotests/tests/graph-changes-while-io
@@ -22,19 +22,19 @@
import os
from threading import Thread
import iotests
-from iotests import imgfmt, qemu_img, qemu_img_create, QMPTestCase, \
- QemuStorageDaemon
+from iotests import imgfmt, qemu_img, qemu_img_create, qemu_io, \
+ QMPTestCase, QemuStorageDaemon
top = os.path.join(iotests.test_dir, 'top.img')
nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock')
-def do_qemu_img_bench() -> None:
+def do_qemu_img_bench(count: int = 2000000) -> None:
"""
Do some I/O requests on `nbd_sock`.
"""
- qemu_img('bench', '-f', 'raw', '-c', '2000000',
+ qemu_img('bench', '-f', 'raw', '-c', str(count),
f'nbd+unix:///node0?socket={nbd_sock}')
@@ -84,6 +84,54 @@ class TestGraphChangesWhileIO(QMPTestCase):
bench_thr.join()
+ def test_commit_while_io(self) -> None:
+ # Run qemu-img bench in the background
+ bench_thr = Thread(target=do_qemu_img_bench, args=(200000, ))
+ bench_thr.start()
+
+ qemu_io('-c', 'write 0 64k', top)
+ qemu_io('-c', 'write 128k 64k', top)
+
+ result = self.qsd.qmp('blockdev-add', {
+ 'driver': imgfmt,
+ 'node-name': 'overlay',
+ 'backing': None,
+ 'file': {
+ 'driver': 'file',
+ 'filename': top
+ }
+ })
+ self.assert_qmp(result, 'return', {})
+
+ result = self.qsd.qmp('blockdev-snapshot', {
+ 'node': 'node0',
+ 'overlay': 'overlay',
+ })
+ self.assert_qmp(result, 'return', {})
+
+ # While qemu-img bench is running, repeatedly commit overlay to node0
+ while bench_thr.is_alive():
+ result = self.qsd.qmp('block-commit', {
+ 'job-id': 'job0',
+ 'device': 'overlay',
+ })
+ self.assert_qmp(result, 'return', {})
+
+ result = self.qsd.qmp('block-job-cancel', {
+ 'device': 'job0',
+ })
+ self.assert_qmp(result, 'return', {})
+
+ cancelled = False
+ while not cancelled:
+ for event in self.qsd.get_qmp().get_events(wait=10.0):
+ if event['event'] != 'JOB_STATUS_CHANGE':
+ continue
+ if event['data']['status'] == 'null':
+ cancelled = True
+
+ bench_thr.join()
+
if __name__ == '__main__':
# Format must support raw backing files
iotests.main(supported_fmts=['qcow', 'qcow2', 'qed'],
diff --git a/tests/qemu-iotests/tests/graph-changes-while-io.out b/tests/qemu-iotests/tests/graph-changes-while-io.out
index ae1213e..fbc63e6 100644
--- a/tests/qemu-iotests/tests/graph-changes-while-io.out
+++ b/tests/qemu-iotests/tests/graph-changes-while-io.out
@@ -1,5 +1,5 @@
-.
+..
----------------------------------------------------------------------
-Ran 1 tests
+Ran 2 tests
OK
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
index 48ae660..3a63142 100644
--- a/tests/unit/meson.build
+++ b/tests/unit/meson.build
@@ -114,7 +114,10 @@ if have_block
tests += {'test-crypto-xts': [crypto, io]}
endif
if 'CONFIG_POSIX' in config_host
- tests += {'test-image-locking': [testblock]}
+ tests += {
+ 'test-image-locking': [testblock],
+ 'test-nested-aio-poll': [testblock],
+ }
endif
if config_host_data.get('CONFIG_REPLICATION')
tests += {'test-replication': [testblock]}
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index 9a4c5e5..08bb0f9 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -1004,8 +1004,6 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
void *buffer = g_malloc(65536);
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
- GRAPH_RDLOCK_GUARD();
-
/* Pretend some internal write operation from parent to child.
* Important: We have to read from the child, not from the parent!
* Draining works by first propagating it all up the tree to the
@@ -1014,12 +1012,14 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
* everything will be drained before we go back down the tree, but
* we do not want that. We want to be in the middle of draining
* when this following requests returns. */
+ bdrv_graph_co_rdlock();
bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0);
+ bdrv_graph_co_rdunlock();
g_assert_cmpint(bs->refcnt, ==, 1);
if (!dbdd->detach_instead_of_delete) {
- blk_unref(blk);
+ blk_co_unref(blk);
} else {
BdrvChild *c, *next_c;
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
diff --git a/tests/unit/test-nested-aio-poll.c b/tests/unit/test-nested-aio-poll.c
new file mode 100644
index 0000000..9bbe18b
--- /dev/null
+++ b/tests/unit/test-nested-aio-poll.c
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Test that poll handlers are not re-entrant in nested aio_poll()
+ *
+ * Copyright Red Hat
+ *
+ * Poll handlers are usually level-triggered. That means they continue firing
+ * until the condition is reset (e.g. a virtqueue becomes empty). If a poll
+ * handler calls nested aio_poll() before the condition is reset, then infinite
+ * recursion occurs.
+ *
+ * aio_poll() is supposed to prevent this by disabling poll handlers in nested
+ * aio_poll() calls. This test case checks that this is indeed what happens.
+ */
+#include "qemu/osdep.h"
+#include "block/aio.h"
+#include "qapi/error.h"
+
+typedef struct {
+ AioContext *ctx;
+
+ /* This is the EventNotifier that drives the test */
+ EventNotifier poll_notifier;
+
+ /* This EventNotifier is only used to wake aio_poll() */
+ EventNotifier dummy_notifier;
+
+ bool nested;
+} TestData;
+
+static void io_read(EventNotifier *notifier)
+{
+ fprintf(stderr, "%s %p\n", __func__, notifier);
+ event_notifier_test_and_clear(notifier);
+}
+
+static bool io_poll_true(void *opaque)
+{
+ fprintf(stderr, "%s %p\n", __func__, opaque);
+ return true;
+}
+
+static bool io_poll_false(void *opaque)
+{
+ fprintf(stderr, "%s %p\n", __func__, opaque);
+ return false;
+}
+
+static void io_poll_ready(EventNotifier *notifier)
+{
+ TestData *td = container_of(notifier, TestData, poll_notifier);
+
+ fprintf(stderr, "> %s\n", __func__);
+
+ g_assert(!td->nested);
+ td->nested = true;
+
+ /* Wake the following nested aio_poll() call */
+ event_notifier_set(&td->dummy_notifier);
+
+ /* This nested event loop must not call io_poll()/io_poll_ready() */
+ g_assert(aio_poll(td->ctx, true));
+
+ td->nested = false;
+
+ fprintf(stderr, "< %s\n", __func__);
+}
+
+/* dummy_notifier never triggers */
+static void io_poll_never_ready(EventNotifier *notifier)
+{
+ g_assert_not_reached();
+}
+
+static void test(void)
+{
+ TestData td = {
+ .ctx = aio_context_new(&error_abort),
+ };
+
+ qemu_set_current_aio_context(td.ctx);
+
+ /* Enable polling */
+ aio_context_set_poll_params(td.ctx, 1000000, 2, 2, &error_abort);
+
+ /*
+ * The GSource is unused but this has the side-effect of changing the fdmon
+ * that AioContext uses.
+ */
+ aio_get_g_source(td.ctx);
+
+ /* Make the event notifier active (set) right away */
+ event_notifier_init(&td.poll_notifier, 1);
+ aio_set_event_notifier(td.ctx, &td.poll_notifier, false,
+ io_read, io_poll_true, io_poll_ready);
+
+ /* This event notifier will be used later */
+ event_notifier_init(&td.dummy_notifier, 0);
+ aio_set_event_notifier(td.ctx, &td.dummy_notifier, false,
+ io_read, io_poll_false, io_poll_never_ready);
+
+ /* Consume aio_notify() */
+ g_assert(!aio_poll(td.ctx, false));
+
+ /*
+ * Run the io_read() handler. This has the side-effect of activating
+ * polling in future aio_poll() calls.
+ */
+ g_assert(aio_poll(td.ctx, true));
+
+ /* The second time around the io_poll()/io_poll_ready() handler runs */
+ g_assert(aio_poll(td.ctx, true));
+
+ /* Run io_poll()/io_poll_ready() one more time to show it keeps working */
+ g_assert(aio_poll(td.ctx, true));
+
+ aio_set_event_notifier(td.ctx, &td.dummy_notifier, false,
+ NULL, NULL, NULL);
+ aio_set_event_notifier(td.ctx, &td.poll_notifier, false, NULL, NULL, NULL);
+ event_notifier_cleanup(&td.dummy_notifier);
+ event_notifier_cleanup(&td.poll_notifier);
+ aio_context_unref(td.ctx);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ g_test_add_func("/nested-aio-poll", test);
+ return g_test_run();
+}