From dfa26a110c7e88887ed5732c834ed5c1d22bd2e6 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Tue, 30 Jul 2019 12:23:45 +0200 Subject: iotests/118: Test media change for scsi-cd The test covered only floppy and ide-cd. Add scsi-cd as well. Signed-off-by: Kevin Wolf Reviewed-by: Max Reitz --- tests/qemu-iotests/118 | 20 ++++++++++++++++++++ tests/qemu-iotests/118.out | 4 ++-- 2 files changed, 22 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/qemu-iotests/118 b/tests/qemu-iotests/118 index 499c5f0..3c20d2d 100755 --- a/tests/qemu-iotests/118 +++ b/tests/qemu-iotests/118 @@ -33,6 +33,8 @@ def interface_to_device_name(interface): return 'ide-cd' elif interface == 'floppy': return 'floppy' + elif interface == 'scsi': + return 'scsi-cd' else: return None @@ -297,6 +299,8 @@ class TestInitiallyFilled(GeneralChangeTestsBaseClass): qemu_img('create', '-f', iotests.imgfmt, new_img, '1440k') self.vm = iotests.VM() self.vm.add_drive(old_img, 'media=%s' % media, 'none') + if interface == 'scsi': + self.vm.add_device('virtio-scsi-pci') self.vm.add_device('%s,drive=drive0,id=%s' % (interface_to_device_name(interface), self.device_name)) @@ -330,6 +334,8 @@ class TestInitiallyEmpty(GeneralChangeTestsBaseClass): def setUp(self, media, interface): qemu_img('create', '-f', iotests.imgfmt, new_img, '1440k') self.vm = iotests.VM().add_drive(None, 'media=%s' % media, 'none') + if interface == 'scsi': + self.vm.add_device('virtio-scsi-pci') self.vm.add_device('%s,drive=drive0,id=%s' % (interface_to_device_name(interface), self.device_name)) @@ -363,6 +369,20 @@ class TestCDInitiallyEmpty(TestInitiallyEmpty): def setUp(self): self.TestInitiallyEmpty.setUp(self, 'cdrom', 'ide') +class TestSCSICDInitiallyFilled(TestInitiallyFilled): + TestInitiallyFilled = TestInitiallyFilled + has_real_tray = True + + def setUp(self): + self.TestInitiallyFilled.setUp(self, 'cdrom', 'scsi') + +class TestSCSICDInitiallyEmpty(TestInitiallyEmpty): + TestInitiallyEmpty = TestInitiallyEmpty + has_real_tray = True + + def setUp(self): + self.TestInitiallyEmpty.setUp(self, 'cdrom', 'scsi') + class TestFloppyInitiallyFilled(TestInitiallyFilled): TestInitiallyFilled = TestInitiallyFilled has_real_tray = False diff --git a/tests/qemu-iotests/118.out b/tests/qemu-iotests/118.out index 4823c11..b4ff997 100644 --- a/tests/qemu-iotests/118.out +++ b/tests/qemu-iotests/118.out @@ -1,5 +1,5 @@ -............................................................... +......................................................................................... ---------------------------------------------------------------------- -Ran 63 tests +Ran 89 tests OK -- cgit v1.1 From dfc828941c0f1771c5861fce11a428c8c64a206d Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Tue, 30 Jul 2019 16:25:55 +0200 Subject: iotests/118: Create test classes dynamically We're getting a ridiculous number of child classes of TestInitiallyFilled and TestInitiallyEmpty that differ only in a few attributes that we want to test in all combinations. Instead of explicitly writing down every combination, let's use a loop and create those classes dynamically. Signed-off-by: Kevin Wolf Reviewed-by: Max Reitz --- tests/qemu-iotests/118 | 69 +++++++++++++++----------------------------------- 1 file changed, 21 insertions(+), 48 deletions(-) (limited to 'tests') diff --git a/tests/qemu-iotests/118 b/tests/qemu-iotests/118 index 3c20d2d..c281259 100755 --- a/tests/qemu-iotests/118 +++ b/tests/qemu-iotests/118 @@ -294,15 +294,15 @@ class GeneralChangeTestsBaseClass(ChangeBaseClass): class TestInitiallyFilled(GeneralChangeTestsBaseClass): was_empty = False - def setUp(self, media, interface): + def setUp(self): qemu_img('create', '-f', iotests.imgfmt, old_img, '1440k') qemu_img('create', '-f', iotests.imgfmt, new_img, '1440k') self.vm = iotests.VM() - self.vm.add_drive(old_img, 'media=%s' % media, 'none') - if interface == 'scsi': + self.vm.add_drive(old_img, 'media=%s' % self.media, 'none') + if self.interface == 'scsi': self.vm.add_device('virtio-scsi-pci') self.vm.add_device('%s,drive=drive0,id=%s' % - (interface_to_device_name(interface), + (interface_to_device_name(self.interface), self.device_name)) self.vm.launch() @@ -331,13 +331,13 @@ class TestInitiallyFilled(GeneralChangeTestsBaseClass): class TestInitiallyEmpty(GeneralChangeTestsBaseClass): was_empty = True - def setUp(self, media, interface): + def setUp(self): qemu_img('create', '-f', iotests.imgfmt, new_img, '1440k') - self.vm = iotests.VM().add_drive(None, 'media=%s' % media, 'none') - if interface == 'scsi': + self.vm = iotests.VM().add_drive(None, 'media=%s' % self.media, 'none') + if self.interface == 'scsi': self.vm.add_device('virtio-scsi-pci') self.vm.add_device('%s,drive=drive0,id=%s' % - (interface_to_device_name(interface), + (interface_to_device_name(self.interface), self.device_name)) self.vm.launch() @@ -355,50 +355,23 @@ class TestInitiallyEmpty(GeneralChangeTestsBaseClass): # Should be a no-op self.assert_qmp(result, 'return', {}) -class TestCDInitiallyFilled(TestInitiallyFilled): - TestInitiallyFilled = TestInitiallyFilled - has_real_tray = True - - def setUp(self): - self.TestInitiallyFilled.setUp(self, 'cdrom', 'ide') - -class TestCDInitiallyEmpty(TestInitiallyEmpty): - TestInitiallyEmpty = TestInitiallyEmpty - has_real_tray = True - - def setUp(self): - self.TestInitiallyEmpty.setUp(self, 'cdrom', 'ide') +# Do this in a function to avoid leaking variables like case into the global +# name space (otherwise tests would be run for the abstract base classes) +def create_basic_test_classes(): + for (media, interface, has_real_tray) in [ ('cdrom', 'ide', True), + ('cdrom', 'scsi', True), + ('disk', 'floppy', False) ]: -class TestSCSICDInitiallyFilled(TestInitiallyFilled): - TestInitiallyFilled = TestInitiallyFilled - has_real_tray = True + for case in [ TestInitiallyFilled, TestInitiallyEmpty ]: - def setUp(self): - self.TestInitiallyFilled.setUp(self, 'cdrom', 'scsi') + attr = { 'media': media, + 'interface': interface, + 'has_real_tray': has_real_tray } -class TestSCSICDInitiallyEmpty(TestInitiallyEmpty): - TestInitiallyEmpty = TestInitiallyEmpty - has_real_tray = True + name = '%s_%s_%s' % (case.__name__, media, interface) + globals()[name] = type(name, (case, ), attr) - def setUp(self): - self.TestInitiallyEmpty.setUp(self, 'cdrom', 'scsi') - -class TestFloppyInitiallyFilled(TestInitiallyFilled): - TestInitiallyFilled = TestInitiallyFilled - has_real_tray = False - - def setUp(self): - self.TestInitiallyFilled.setUp(self, 'disk', 'floppy') - -class TestFloppyInitiallyEmpty(TestInitiallyEmpty): - TestInitiallyEmpty = TestInitiallyEmpty - has_real_tray = False - - def setUp(self): - self.TestInitiallyEmpty.setUp(self, 'disk', 'floppy') - # FDDs not having a real tray and there not being a medium inside the - # tray at startup means the tray will be considered open - self.has_opened = True +create_basic_test_classes() class TestChangeReadOnly(ChangeBaseClass): device_name = 'qdev0' -- cgit v1.1 From 19462c4bdd3bab7d277aafbdc178daa6ca074c9c Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Tue, 30 Jul 2019 16:49:26 +0200 Subject: iotests/118: Add -blockdev based tests The code path for -device drive= or without a drive=... option for empty drives, which is supposed to be used with -blockdev differs enough from the -drive based path with a user-owned BlockBackend, so we want to test both paths at least for the basic tests implemented by TestInitiallyFilled and TestInitiallyEmpty. This would have caught the bug recently fixed for inserting read-only nodes into a scsi-cd created without a drive=... option. Signed-off-by: Kevin Wolf Reviewed-by: Max Reitz --- tests/qemu-iotests/118 | 43 ++++++++++++++++++++++++++++++------------- tests/qemu-iotests/118.out | 4 ++-- 2 files changed, 32 insertions(+), 15 deletions(-) (limited to 'tests') diff --git a/tests/qemu-iotests/118 b/tests/qemu-iotests/118 index c281259..6f45779 100755 --- a/tests/qemu-iotests/118 +++ b/tests/qemu-iotests/118 @@ -42,10 +42,14 @@ class ChangeBaseClass(iotests.QMPTestCase): has_opened = False has_closed = False + device_name = 'qdev0' + use_drive = False + def process_events(self): for event in self.vm.get_qmp_events(wait=False): if (event['event'] == 'DEVICE_TRAY_MOVED' and - event['data']['device'] == 'drive0'): + (event['data']['device'] == 'drive0' or + event['data']['id'] == self.device_name)): if event['data']['tray-open'] == False: self.has_closed = True else: @@ -69,9 +73,11 @@ class ChangeBaseClass(iotests.QMPTestCase): class GeneralChangeTestsBaseClass(ChangeBaseClass): - device_name = 'qdev0' - def test_change(self): + # 'change' requires a drive name, so skip the test for blockdev + if not self.use_drive: + return + result = self.vm.qmp('change', device='drive0', target=new_img, arg=iotests.imgfmt) self.assert_qmp(result, 'return', {}) @@ -298,7 +304,13 @@ class TestInitiallyFilled(GeneralChangeTestsBaseClass): qemu_img('create', '-f', iotests.imgfmt, old_img, '1440k') qemu_img('create', '-f', iotests.imgfmt, new_img, '1440k') self.vm = iotests.VM() - self.vm.add_drive(old_img, 'media=%s' % self.media, 'none') + if self.use_drive: + self.vm.add_drive(old_img, 'media=%s' % self.media, 'none') + else: + self.vm.add_blockdev([ 'node-name=drive0', + 'driver=%s' % iotests.imgfmt, + 'file.driver=file', + 'file.filename=%s' % old_img ]) if self.interface == 'scsi': self.vm.add_device('virtio-scsi-pci') self.vm.add_device('%s,drive=drive0,id=%s' % @@ -333,11 +345,14 @@ class TestInitiallyEmpty(GeneralChangeTestsBaseClass): def setUp(self): qemu_img('create', '-f', iotests.imgfmt, new_img, '1440k') - self.vm = iotests.VM().add_drive(None, 'media=%s' % self.media, 'none') + self.vm = iotests.VM() + if self.use_drive: + self.vm.add_drive(None, 'media=%s' % self.media, 'none') if self.interface == 'scsi': self.vm.add_device('virtio-scsi-pci') - self.vm.add_device('%s,drive=drive0,id=%s' % + self.vm.add_device('%s,%sid=%s' % (interface_to_device_name(self.interface), + 'drive=drive0,' if self.use_drive else '', self.device_name)) self.vm.launch() @@ -363,13 +378,15 @@ def create_basic_test_classes(): ('disk', 'floppy', False) ]: for case in [ TestInitiallyFilled, TestInitiallyEmpty ]: - - attr = { 'media': media, - 'interface': interface, - 'has_real_tray': has_real_tray } - - name = '%s_%s_%s' % (case.__name__, media, interface) - globals()[name] = type(name, (case, ), attr) + for use_drive in [ True, False ]: + attr = { 'media': media, + 'interface': interface, + 'has_real_tray': has_real_tray, + 'use_drive': use_drive } + + name = '%s_%s_%s_%s' % (case.__name__, media, interface, + 'drive' if use_drive else 'blockdev') + globals()[name] = type(name, (case, ), attr) create_basic_test_classes() diff --git a/tests/qemu-iotests/118.out b/tests/qemu-iotests/118.out index b4ff997..bf5bfd5 100644 --- a/tests/qemu-iotests/118.out +++ b/tests/qemu-iotests/118.out @@ -1,5 +1,5 @@ -......................................................................................... +....................................................................................................................................................................... ---------------------------------------------------------------------- -Ran 89 tests +Ran 167 tests OK -- cgit v1.1 From 980448f17a24573fea53ceec3fa66353e9fd4092 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Thu, 1 Aug 2019 13:14:09 +0200 Subject: iotests: Move migration helpers to iotests.py 234 implements functions that are useful for doing migration between two VMs. Move them to iotests.py so that other test cases can use them, too. Signed-off-by: Kevin Wolf Reviewed-by: Max Reitz --- tests/qemu-iotests/234 | 30 +++++++----------------------- tests/qemu-iotests/iotests.py | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 23 deletions(-) (limited to 'tests') diff --git a/tests/qemu-iotests/234 b/tests/qemu-iotests/234 index c4c26bc..34c818c 100755 --- a/tests/qemu-iotests/234 +++ b/tests/qemu-iotests/234 @@ -26,22 +26,6 @@ import os iotests.verify_image_format(supported_fmts=['qcow2']) iotests.verify_platform(['linux']) -def enable_migration_events(vm, name): - iotests.log('Enabling migration QMP events on %s...' % name) - iotests.log(vm.qmp('migrate-set-capabilities', capabilities=[ - { - 'capability': 'events', - 'state': True - } - ])) - -def wait_migration(vm): - while True: - event = vm.event_wait('MIGRATION') - iotests.log(event, filters=[iotests.filter_qmp_event]) - if event['data']['status'] == 'completed': - break - with iotests.FilePath('img') as img_path, \ iotests.FilePath('backing') as backing_path, \ iotests.FilePath('mig_fifo_a') as fifo_a, \ @@ -62,7 +46,7 @@ with iotests.FilePath('img') as img_path, \ .add_blockdev('%s,file=drive0-backing-file,node-name=drive0-backing' % (iotests.imgfmt)) .launch()) - enable_migration_events(vm_a, 'A') + vm_a.enable_migration_events('A') iotests.log('Launching destination VM...') (vm_b.add_blockdev('file,filename=%s,node-name=drive0-file' % (img_path)) @@ -72,7 +56,7 @@ with iotests.FilePath('img') as img_path, \ .add_incoming("exec: cat '%s'" % (fifo_a)) .launch()) - enable_migration_events(vm_b, 'B') + vm_b.enable_migration_events('B') # Add a child node that was created after the parent node. The reverse case # is covered by the -blockdev options above. @@ -85,9 +69,9 @@ with iotests.FilePath('img') as img_path, \ iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo_a))) with iotests.Timeout(3, 'Migration does not complete'): # Wait for the source first (which includes setup=setup) - wait_migration(vm_a) + vm_a.wait_migration() # Wait for the destination second (which does not) - wait_migration(vm_b) + vm_b.wait_migration() iotests.log(vm_a.qmp('query-migrate')['return']['status']) iotests.log(vm_b.qmp('query-migrate')['return']['status']) @@ -105,7 +89,7 @@ with iotests.FilePath('img') as img_path, \ .add_incoming("exec: cat '%s'" % (fifo_b)) .launch()) - enable_migration_events(vm_a, 'A') + vm_a.enable_migration_events('A') iotests.log(vm_a.qmp('blockdev-snapshot', node='drive0-backing', overlay='drive0')) @@ -114,9 +98,9 @@ with iotests.FilePath('img') as img_path, \ iotests.log(vm_b.qmp('migrate', uri='exec:cat >%s' % (fifo_b))) with iotests.Timeout(3, 'Migration does not complete'): # Wait for the source first (which includes setup=setup) - wait_migration(vm_b) + vm_b.wait_migration() # Wait for the destination second (which does not) - wait_migration(vm_a) + vm_a.wait_migration() iotests.log(vm_a.qmp('query-migrate')['return']['status']) iotests.log(vm_b.qmp('query-migrate')['return']['status']) diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index ce74177..91172c3 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -583,6 +583,22 @@ class VM(qtest.QEMUQtestMachine): elif status == 'null': return error + def enable_migration_events(self, name): + log('Enabling migration QMP events on %s...' % name) + log(self.qmp('migrate-set-capabilities', capabilities=[ + { + 'capability': 'events', + 'state': True + } + ])) + + def wait_migration(self): + while True: + event = self.event_wait('MIGRATION') + log(event, filters=[filter_qmp_event]) + if event['data']['status'] == 'completed': + break + def node_info(self, node_name): nodes = self.qmp('query-named-block-nodes') for x in nodes['return']: -- cgit v1.1 From 5b96e6a002d8791d24ef69ed67ee6d264239622d Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Thu, 1 Aug 2019 17:12:25 +0200 Subject: iotests: Test migration with all kinds of filter nodes This test case is motivated by commit 2b23f28639 ('block/copy-on-read: Fix permissions for inactive node'). Instead of just testing copy-on-read on migration, let's stack all sorts of filter nodes on top of each other and try if the resulting VM can still migrate successfully. For good measure, put everything into an iothread, because why not? Signed-off-by: Kevin Wolf Reviewed-by: Max Reitz --- tests/qemu-iotests/262 | 82 ++++++++++++++++++++++++++++++++++++++++++++++ tests/qemu-iotests/262.out | 17 ++++++++++ tests/qemu-iotests/group | 1 + 3 files changed, 100 insertions(+) create mode 100755 tests/qemu-iotests/262 create mode 100644 tests/qemu-iotests/262.out (limited to 'tests') diff --git a/tests/qemu-iotests/262 b/tests/qemu-iotests/262 new file mode 100755 index 0000000..398f635 --- /dev/null +++ b/tests/qemu-iotests/262 @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# +# Copyright (C) 2019 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Creator/Owner: Kevin Wolf +# +# Test migration with filter drivers present. Keep everything in an +# iothread just for fun. + +import iotests +import os + +iotests.verify_image_format(supported_fmts=['qcow2']) +iotests.verify_platform(['linux']) + +with iotests.FilePath('img') as img_path, \ + iotests.FilePath('mig_fifo') as fifo, \ + iotests.VM(path_suffix='a') as vm_a, \ + iotests.VM(path_suffix='b') as vm_b: + + def add_opts(vm): + vm.add_object('iothread,id=iothread0') + vm.add_object('throttle-group,id=tg0,x-bps-total=65536') + vm.add_blockdev('file,filename=%s,node-name=drive0-file' % (img_path)) + vm.add_blockdev('%s,file=drive0-file,node-name=drive0-fmt' % (iotests.imgfmt)) + vm.add_blockdev('copy-on-read,file=drive0-fmt,node-name=drive0-cor') + vm.add_blockdev('throttle,file=drive0-cor,node-name=drive0-throttle,throttle-group=tg0') + vm.add_blockdev('blkdebug,image=drive0-throttle,node-name=drive0-dbg') + vm.add_blockdev('null-co,node-name=null,read-zeroes=on') + vm.add_blockdev('blkverify,test=drive0-dbg,raw=null,node-name=drive0-verify') + + if iotests.supports_quorum(): + vm.add_blockdev('quorum,children.0=drive0-verify,vote-threshold=1,node-name=drive0-quorum') + root = "drive0-quorum" + else: + root = "drive0-verify" + + vm.add_device('virtio-blk,drive=%s,iothread=iothread0' % root) + + iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, img_path, '64M') + + os.mkfifo(fifo) + + iotests.log('Launching source VM...') + add_opts(vm_a) + vm_a.launch() + + vm_a.enable_migration_events('A') + + iotests.log('Launching destination VM...') + add_opts(vm_b) + vm_b.add_incoming("exec: cat '%s'" % (fifo)) + vm_b.launch() + + vm_b.enable_migration_events('B') + + iotests.log('Starting migration to B...') + iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo))) + with iotests.Timeout(3, 'Migration does not complete'): + # Wait for the source first (which includes setup=setup) + vm_a.wait_migration() + # Wait for the destination second (which does not) + vm_b.wait_migration() + + iotests.log(vm_a.qmp('query-migrate')['return']['status']) + iotests.log(vm_b.qmp('query-migrate')['return']['status']) + + iotests.log(vm_a.qmp('query-status')) + iotests.log(vm_b.qmp('query-status')) diff --git a/tests/qemu-iotests/262.out b/tests/qemu-iotests/262.out new file mode 100644 index 0000000..5a58e5e --- /dev/null +++ b/tests/qemu-iotests/262.out @@ -0,0 +1,17 @@ +Launching source VM... +Enabling migration QMP events on A... +{"return": {}} +Launching destination VM... +Enabling migration QMP events on B... +{"return": {}} +Starting migration to B... +{"return": {}} +{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +completed +completed +{"return": {"running": false, "singlestep": false, "status": "postmigrate"}} +{"return": {"running": true, "singlestep": false, "status": "running"}} diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index f13e5f2..71ba3c0 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -271,3 +271,4 @@ 254 rw backing quick 255 rw quick 256 rw quick +262 rw quick migration -- cgit v1.1 From 9746b35cf3c1a60cf569d8caa434aa1ee919e63f Mon Sep 17 00:00:00 2001 From: Max Reitz Date: Mon, 22 Jul 2019 15:33:45 +0200 Subject: tests: Test polling in bdrv_drop_intermediate() Signed-off-by: Max Reitz Signed-off-by: Kevin Wolf --- tests/test-bdrv-drain.c | 167 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 167 insertions(+) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index 03fa114..1600d41 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -100,6 +100,13 @@ static void bdrv_test_child_perm(BlockDriverState *bs, BdrvChild *c, nperm, nshared); } +static int bdrv_test_change_backing_file(BlockDriverState *bs, + const char *backing_file, + const char *backing_fmt) +{ + return 0; +} + static BlockDriver bdrv_test = { .format_name = "test", .instance_size = sizeof(BDRVTestState), @@ -111,6 +118,8 @@ static BlockDriver bdrv_test = { .bdrv_co_drain_end = bdrv_test_co_drain_end, .bdrv_child_perm = bdrv_test_child_perm, + + .bdrv_change_backing_file = bdrv_test_change_backing_file, }; static void aio_ret_cb(void *opaque, int ret) @@ -1671,6 +1680,161 @@ static void test_blockjob_commit_by_drained_end(void) bdrv_unref(bs_child); } + +typedef struct TestSimpleBlockJob { + BlockJob common; + bool should_complete; + bool *did_complete; +} TestSimpleBlockJob; + +static int coroutine_fn test_simple_job_run(Job *job, Error **errp) +{ + TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); + + while (!s->should_complete) { + job_sleep_ns(job, 0); + } + + return 0; +} + +static void test_simple_job_clean(Job *job) +{ + TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); + *s->did_complete = true; +} + +static const BlockJobDriver test_simple_job_driver = { + .job_driver = { + .instance_size = sizeof(TestSimpleBlockJob), + .free = block_job_free, + .user_resume = block_job_user_resume, + .drain = block_job_drain, + .run = test_simple_job_run, + .clean = test_simple_job_clean, + }, +}; + +static int drop_intermediate_poll_update_filename(BdrvChild *child, + BlockDriverState *new_base, + const char *filename, + Error **errp) +{ + /* + * We are free to poll here, which may change the block graph, if + * it is not drained. + */ + + /* If the job is not drained: Complete it, schedule job_exit() */ + aio_poll(qemu_get_current_aio_context(), false); + /* If the job is not drained: Run job_exit(), finish the job */ + aio_poll(qemu_get_current_aio_context(), false); + + return 0; +} + +/** + * Test a poll in the midst of bdrv_drop_intermediate(). + * + * bdrv_drop_intermediate() calls BdrvChildRole.update_filename(), + * which can yield or poll. This may lead to graph changes, unless + * the whole subtree in question is drained. + * + * We test this on the following graph: + * + * Job + * + * | + * job-node + * | + * v + * + * job-node + * + * | + * backing + * | + * v + * + * node-2 --chain--> node-1 --chain--> node-0 + * + * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0). + * + * This first updates node-2's backing filename by invoking + * drop_intermediate_poll_update_filename(), which polls twice. This + * causes the job to finish, which in turns causes the job-node to be + * deleted. + * + * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it + * already has a pointer to the BdrvChild edge between job-node and + * node-1. When it tries to handle that edge, we probably get a + * segmentation fault because the object no longer exists. + * + * + * The solution is for bdrv_drop_intermediate() to drain top's + * subtree. This prevents graph changes from happening just because + * BdrvChildRole.update_filename() yields or polls. Thus, the block + * job is paused during that drained section and must finish before or + * after. + * + * (In addition, bdrv_replace_child() must keep the job paused.) + */ +static void test_drop_intermediate_poll(void) +{ + static BdrvChildRole chain_child_role; + BlockDriverState *chain[3]; + TestSimpleBlockJob *job; + BlockDriverState *job_node; + bool job_has_completed = false; + int i; + int ret; + + chain_child_role = child_backing; + chain_child_role.update_filename = drop_intermediate_poll_update_filename; + + for (i = 0; i < 3; i++) { + char name[32]; + snprintf(name, 32, "node-%i", i); + + chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort); + } + + job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR, + &error_abort); + bdrv_set_backing_hd(job_node, chain[1], &error_abort); + + /* + * Establish the chain last, so the chain links are the first + * elements in the BDS.parents lists + */ + for (i = 0; i < 3; i++) { + if (i) { + /* Takes the reference to chain[i - 1] */ + chain[i]->backing = bdrv_attach_child(chain[i], chain[i - 1], + "chain", &chain_child_role, + &error_abort); + } + } + + job = block_job_create("job", &test_simple_job_driver, NULL, job_node, + 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); + + /* The job has a reference now */ + bdrv_unref(job_node); + + job->did_complete = &job_has_completed; + + job_start(&job->common.job); + job->should_complete = true; + + g_assert(!job_has_completed); + ret = bdrv_drop_intermediate(chain[1], chain[0], NULL); + g_assert(ret == 0); + g_assert(job_has_completed); + + bdrv_unref(chain[2]); +} + int main(int argc, char **argv) { int ret; @@ -1757,6 +1921,9 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end", test_blockjob_commit_by_drained_end); + g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll", + test_drop_intermediate_poll); + ret = g_test_run(); qemu_event_destroy(&done_event); return ret; -- cgit v1.1 From 0513f9841f14b918bd7aa7f1c8fb0ac3b0f6dea0 Mon Sep 17 00:00:00 2001 From: Max Reitz Date: Mon, 22 Jul 2019 15:33:46 +0200 Subject: tests: Test mid-drain bdrv_replace_child_noperm() Add a test for what happens when you call bdrv_replace_child_noperm() for various drain situations ({old,new} child {drained,not drained}). Most importantly, if both the old and the new child are drained, the parent must not be undrained at any point. Signed-off-by: Max Reitz Signed-off-by: Kevin Wolf --- tests/test-bdrv-drain.c | 308 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 308 insertions(+) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index 1600d41..9dffd86 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -1835,6 +1835,311 @@ static void test_drop_intermediate_poll(void) bdrv_unref(chain[2]); } + +typedef struct BDRVReplaceTestState { + bool was_drained; + bool was_undrained; + bool has_read; + + int drain_count; + + bool yield_before_read; + Coroutine *io_co; + Coroutine *drain_co; +} BDRVReplaceTestState; + +static void bdrv_replace_test_close(BlockDriverState *bs) +{ +} + +/** + * If @bs has a backing file: + * Yield if .yield_before_read is true (and wait for drain_begin to + * wake us up). + * Forward the read to bs->backing. Set .has_read to true. + * If drain_begin has woken us, wake it in turn. + * + * Otherwise: + * Set .has_read to true and return success. + */ +static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs, + uint64_t offset, + uint64_t bytes, + QEMUIOVector *qiov, + int flags) +{ + BDRVReplaceTestState *s = bs->opaque; + + if (bs->backing) { + int ret; + + g_assert(!s->drain_count); + + s->io_co = qemu_coroutine_self(); + if (s->yield_before_read) { + s->yield_before_read = false; + qemu_coroutine_yield(); + } + s->io_co = NULL; + + ret = bdrv_preadv(bs->backing, offset, qiov); + s->has_read = true; + + /* Wake up drain_co if it runs */ + if (s->drain_co) { + aio_co_wake(s->drain_co); + } + + return ret; + } + + s->has_read = true; + return 0; +} + +/** + * If .drain_count is 0, wake up .io_co if there is one; and set + * .was_drained. + * Increment .drain_count. + */ +static void coroutine_fn bdrv_replace_test_co_drain_begin(BlockDriverState *bs) +{ + BDRVReplaceTestState *s = bs->opaque; + + if (!s->drain_count) { + /* Keep waking io_co up until it is done */ + s->drain_co = qemu_coroutine_self(); + while (s->io_co) { + aio_co_wake(s->io_co); + s->io_co = NULL; + qemu_coroutine_yield(); + } + s->drain_co = NULL; + + s->was_drained = true; + } + s->drain_count++; +} + +/** + * Reduce .drain_count, set .was_undrained once it reaches 0. + * If .drain_count reaches 0 and the node has a backing file, issue a + * read request. + */ +static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs) +{ + BDRVReplaceTestState *s = bs->opaque; + + g_assert(s->drain_count > 0); + if (!--s->drain_count) { + int ret; + + s->was_undrained = true; + + if (bs->backing) { + char data; + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1); + + /* Queue a read request post-drain */ + ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0); + g_assert(ret >= 0); + } + } +} + +static BlockDriver bdrv_replace_test = { + .format_name = "replace_test", + .instance_size = sizeof(BDRVReplaceTestState), + + .bdrv_close = bdrv_replace_test_close, + .bdrv_co_preadv = bdrv_replace_test_co_preadv, + + .bdrv_co_drain_begin = bdrv_replace_test_co_drain_begin, + .bdrv_co_drain_end = bdrv_replace_test_co_drain_end, + + .bdrv_child_perm = bdrv_format_default_perms, +}; + +static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque) +{ + int ret; + char data; + + ret = blk_co_pread(opaque, 0, 1, &data, 0); + g_assert(ret >= 0); +} + +/** + * We test two things: + * (1) bdrv_replace_child_noperm() must not undrain the parent if both + * children are drained. + * (2) bdrv_replace_child_noperm() must never flush I/O requests to a + * drained child. If the old child is drained, it must flush I/O + * requests after the new one has been attached. If the new child + * is drained, it must flush I/O requests before the old one is + * detached. + * + * To do so, we create one parent node and two child nodes; then + * attach one of the children (old_child_bs) to the parent, then + * drain both old_child_bs and new_child_bs according to + * old_drain_count and new_drain_count, respectively, and finally + * we invoke bdrv_replace_node() to replace old_child_bs by + * new_child_bs. + * + * The test block driver we use here (bdrv_replace_test) has a read + * function that: + * - For the parent node, can optionally yield, and then forwards the + * read to bdrv_preadv(), + * - For the child node, just returns immediately. + * + * If the read yields, the drain_begin function will wake it up. + * + * The drain_end function issues a read on the parent once it is fully + * undrained (which simulates requests starting to come in again). + */ +static void do_test_replace_child_mid_drain(int old_drain_count, + int new_drain_count) +{ + BlockBackend *parent_blk; + BlockDriverState *parent_bs; + BlockDriverState *old_child_bs, *new_child_bs; + BDRVReplaceTestState *parent_s; + BDRVReplaceTestState *old_child_s, *new_child_s; + Coroutine *io_co; + int i; + + parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0, + &error_abort); + parent_s = parent_bs->opaque; + + parent_blk = blk_new(qemu_get_aio_context(), + BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); + blk_insert_bs(parent_blk, parent_bs, &error_abort); + + old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0, + &error_abort); + new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0, + &error_abort); + old_child_s = old_child_bs->opaque; + new_child_s = new_child_bs->opaque; + + /* So that we can read something */ + parent_bs->total_sectors = 1; + old_child_bs->total_sectors = 1; + new_child_bs->total_sectors = 1; + + bdrv_ref(old_child_bs); + parent_bs->backing = bdrv_attach_child(parent_bs, old_child_bs, "child", + &child_backing, &error_abort); + + for (i = 0; i < old_drain_count; i++) { + bdrv_drained_begin(old_child_bs); + } + for (i = 0; i < new_drain_count; i++) { + bdrv_drained_begin(new_child_bs); + } + + if (!old_drain_count) { + /* + * Start a read operation that will yield, so it will not + * complete before the node is drained. + */ + parent_s->yield_before_read = true; + io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co, + parent_blk); + qemu_coroutine_enter(io_co); + } + + /* If we have started a read operation, it should have yielded */ + g_assert(!parent_s->has_read); + + /* Reset drained status so we can see what bdrv_replace_node() does */ + parent_s->was_drained = false; + parent_s->was_undrained = false; + + g_assert(parent_bs->quiesce_counter == old_drain_count); + bdrv_replace_node(old_child_bs, new_child_bs, &error_abort); + g_assert(parent_bs->quiesce_counter == new_drain_count); + + if (!old_drain_count && !new_drain_count) { + /* + * From undrained to undrained drains and undrains the parent, + * because bdrv_replace_node() contains a drained section for + * @old_child_bs. + */ + g_assert(parent_s->was_drained && parent_s->was_undrained); + } else if (!old_drain_count && new_drain_count) { + /* + * From undrained to drained should drain the parent and keep + * it that way. + */ + g_assert(parent_s->was_drained && !parent_s->was_undrained); + } else if (old_drain_count && !new_drain_count) { + /* + * From drained to undrained should undrain the parent and + * keep it that way. + */ + g_assert(!parent_s->was_drained && parent_s->was_undrained); + } else /* if (old_drain_count && new_drain_count) */ { + /* + * From drained to drained must not undrain the parent at any + * point + */ + g_assert(!parent_s->was_drained && !parent_s->was_undrained); + } + + if (!old_drain_count || !new_drain_count) { + /* + * If !old_drain_count, we have started a read request before + * bdrv_replace_node(). If !new_drain_count, the parent must + * have been undrained at some point, and + * bdrv_replace_test_co_drain_end() starts a read request + * then. + */ + g_assert(parent_s->has_read); + } else { + /* + * If the parent was never undrained, there is no way to start + * a read request. + */ + g_assert(!parent_s->has_read); + } + + /* A drained child must have not received any request */ + g_assert(!(old_drain_count && old_child_s->has_read)); + g_assert(!(new_drain_count && new_child_s->has_read)); + + for (i = 0; i < new_drain_count; i++) { + bdrv_drained_end(new_child_bs); + } + for (i = 0; i < old_drain_count; i++) { + bdrv_drained_end(old_child_bs); + } + + /* + * By now, bdrv_replace_test_co_drain_end() must have been called + * at some point while the new child was attached to the parent. + */ + g_assert(parent_s->has_read); + g_assert(new_child_s->has_read); + + blk_unref(parent_blk); + bdrv_unref(parent_bs); + bdrv_unref(old_child_bs); + bdrv_unref(new_child_bs); +} + +static void test_replace_child_mid_drain(void) +{ + int old_drain_count, new_drain_count; + + for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) { + for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) { + do_test_replace_child_mid_drain(old_drain_count, new_drain_count); + } + } +} + int main(int argc, char **argv) { int ret; @@ -1924,6 +2229,9 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll", test_drop_intermediate_poll); + g_test_add_func("/bdrv-drain/replace_child/mid-drain", + test_replace_child_mid_drain); + ret = g_test_run(); qemu_event_destroy(&done_event); return ret; -- cgit v1.1 From 48057fc2b4f010a5c92035d223a0cceed3635237 Mon Sep 17 00:00:00 2001 From: Max Reitz Date: Mon, 22 Jul 2019 15:33:47 +0200 Subject: iotests: Add test for concurrent stream/commit We already have 030 for that in general, but this tests very specific cases of both jobs finishing concurrently. Signed-off-by: Max Reitz Signed-off-by: Kevin Wolf --- tests/qemu-iotests/258 | 163 +++++++++++++++++++++++++++++++++++++++++++++ tests/qemu-iotests/258.out | 33 +++++++++ tests/qemu-iotests/group | 1 + 3 files changed, 197 insertions(+) create mode 100755 tests/qemu-iotests/258 create mode 100644 tests/qemu-iotests/258.out (limited to 'tests') diff --git a/tests/qemu-iotests/258 b/tests/qemu-iotests/258 new file mode 100755 index 0000000..b84cf02 --- /dev/null +++ b/tests/qemu-iotests/258 @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# +# Very specific tests for adjacent commit/stream block jobs +# +# Copyright (C) 2019 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Creator/Owner: Max Reitz + +import iotests +from iotests import log, qemu_img, qemu_io_silent, \ + filter_qmp_testfiles, filter_qmp_imgfmt + +# Need backing file and change-backing-file support +iotests.verify_image_format(supported_fmts=['qcow2', 'qed']) +iotests.verify_platform(['linux']) + + +# Returns a node for blockdev-add +def node(node_name, path, backing=None, fmt=None, throttle=None): + if fmt is None: + fmt = iotests.imgfmt + + res = { + 'node-name': node_name, + 'driver': fmt, + 'file': { + 'driver': 'file', + 'filename': path + } + } + + if backing is not None: + res['backing'] = backing + + if throttle: + res['file'] = { + 'driver': 'throttle', + 'throttle-group': throttle, + 'file': res['file'] + } + + return res + +# Finds a node in the debug block graph +def find_graph_node(graph, node_id): + return next(node for node in graph['nodes'] if node['id'] == node_id) + + +def test_concurrent_finish(write_to_stream_node): + log('') + log('=== Commit and stream finish concurrently (letting %s write) ===' % \ + ('stream' if write_to_stream_node else 'commit')) + log('') + + # All chosen in such a way that when the commit job wants to + # finish, it polls and thus makes stream finish concurrently -- + # and the other way around, depending on whether the commit job + # is finalized before stream completes or not. + + with iotests.FilePath('node4.img') as node4_path, \ + iotests.FilePath('node3.img') as node3_path, \ + iotests.FilePath('node2.img') as node2_path, \ + iotests.FilePath('node1.img') as node1_path, \ + iotests.FilePath('node0.img') as node0_path, \ + iotests.VM() as vm: + + # It is important to use raw for the base layer (so that + # permissions are just handed through to the protocol layer) + assert qemu_img('create', '-f', 'raw', node0_path, '64M') == 0 + + stream_throttle=None + commit_throttle=None + + for path in [node1_path, node2_path, node3_path, node4_path]: + assert qemu_img('create', '-f', iotests.imgfmt, path, '64M') == 0 + + if write_to_stream_node: + # This is what (most of the time) makes commit finish + # earlier and then pull in stream + assert qemu_io_silent(node2_path, + '-c', 'write %iK 64K' % (65536 - 192), + '-c', 'write %iK 64K' % (65536 - 64)) == 0 + + stream_throttle='tg' + else: + # And this makes stream finish earlier + assert qemu_io_silent(node1_path, + '-c', 'write %iK 64K' % (65536 - 64)) == 0 + + commit_throttle='tg' + + vm.launch() + + vm.qmp_log('object-add', + qom_type='throttle-group', + id='tg', + props={ + 'x-iops-write': 1, + 'x-iops-write-max': 1 + }) + + vm.qmp_log('blockdev-add', + filters=[filter_qmp_testfiles, filter_qmp_imgfmt], + **node('node4', node4_path, throttle=stream_throttle, + backing=node('node3', node3_path, + backing=node('node2', node2_path, + backing=node('node1', node1_path, + backing=node('node0', node0_path, throttle=commit_throttle, + fmt='raw')))))) + + vm.qmp_log('block-commit', + job_id='commit', + device='node4', + filter_node_name='commit-filter', + top_node='node1', + base_node='node0', + auto_finalize=False) + + vm.qmp_log('block-stream', + job_id='stream', + device='node3', + base_node='commit-filter') + + if write_to_stream_node: + vm.run_job('commit', auto_finalize=False, auto_dismiss=True) + vm.run_job('stream', auto_finalize=True, auto_dismiss=True) + else: + # No, the jobs do not really finish concurrently here, + # the stream job does complete strictly before commit. + # But still, this is close enough for what we want to + # test. + vm.run_job('stream', auto_finalize=True, auto_dismiss=True) + vm.run_job('commit', auto_finalize=False, auto_dismiss=True) + + # Assert that the backing node of node3 is node 0 now + graph = vm.qmp('x-debug-query-block-graph')['return'] + for edge in graph['edges']: + if edge['name'] == 'backing' and \ + find_graph_node(graph, edge['parent'])['name'] == 'node3': + assert find_graph_node(graph, edge['child'])['name'] == 'node0' + break + + +def main(): + log('Running tests:') + test_concurrent_finish(True) + test_concurrent_finish(False) + +if __name__ == '__main__': + main() diff --git a/tests/qemu-iotests/258.out b/tests/qemu-iotests/258.out new file mode 100644 index 0000000..ce6e9ba --- /dev/null +++ b/tests/qemu-iotests/258.out @@ -0,0 +1,33 @@ +Running tests: + +=== Commit and stream finish concurrently (letting stream write) === + +{"execute": "object-add", "arguments": {"id": "tg", "props": {"x-iops-write": 1, "x-iops-write-max": 1}, "qom-type": "throttle-group"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": {"backing": {"backing": {"backing": {"backing": {"driver": "raw", "file": {"driver": "file", "filename": "TEST_DIR/PID-node0.img"}, "node-name": "node0"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node1.img"}, "node-name": "node1"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node2.img"}, "node-name": "node2"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node3.img"}, "node-name": "node3"}, "driver": "IMGFMT", "file": {"driver": "throttle", "file": {"driver": "file", "filename": "TEST_DIR/PID-node4.img"}, "throttle-group": "tg"}, "node-name": "node4"}} +{"return": {}} +{"execute": "block-commit", "arguments": {"auto-finalize": false, "base-node": "node0", "device": "node4", "filter-node-name": "commit-filter", "job-id": "commit", "top-node": "node1"}} +{"return": {}} +{"execute": "block-stream", "arguments": {"base-node": "commit-filter", "device": "node3", "job-id": "stream"}} +{"return": {}} +{"execute": "job-finalize", "arguments": {"id": "commit"}} +{"return": {}} +{"data": {"id": "commit", "type": "commit"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"device": "commit", "len": 67108864, "offset": 67108864, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"device": "stream", "len": 67108864, "offset": 67108864, "speed": 0, "type": "stream"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} + +=== Commit and stream finish concurrently (letting commit write) === + +{"execute": "object-add", "arguments": {"id": "tg", "props": {"x-iops-write": 1, "x-iops-write-max": 1}, "qom-type": "throttle-group"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": {"backing": {"backing": {"backing": {"backing": {"driver": "raw", "file": {"driver": "throttle", "file": {"driver": "file", "filename": "TEST_DIR/PID-node0.img"}, "throttle-group": "tg"}, "node-name": "node0"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node1.img"}, "node-name": "node1"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node2.img"}, "node-name": "node2"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node3.img"}, "node-name": "node3"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node4.img"}, "node-name": "node4"}} +{"return": {}} +{"execute": "block-commit", "arguments": {"auto-finalize": false, "base-node": "node0", "device": "node4", "filter-node-name": "commit-filter", "job-id": "commit", "top-node": "node1"}} +{"return": {}} +{"execute": "block-stream", "arguments": {"base-node": "commit-filter", "device": "node3", "job-id": "stream"}} +{"return": {}} +{"data": {"device": "stream", "len": 67108864, "offset": 67108864, "speed": 0, "type": "stream"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"execute": "job-finalize", "arguments": {"id": "commit"}} +{"return": {}} +{"data": {"id": "commit", "type": "commit"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"device": "commit", "len": 67108864, "offset": 67108864, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index 71ba3c0..5a37839 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -271,4 +271,5 @@ 254 rw backing quick 255 rw quick 256 rw quick +258 rw quick 262 rw quick migration -- cgit v1.1 From cf3129323f900ef5ddbccbe86e4fa801e88c566e Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Mon, 22 Jul 2019 17:46:23 +0200 Subject: block-backend: Queue requests while drained This fixes devices like IDE that can still start new requests from I/O handlers in the CPU thread while the block backend is drained. The basic assumption is that in a drain section, no new requests should be allowed through a BlockBackend (blk_drained_begin/end don't exist, we get drain sections only on the node level). However, there are two special cases where requests should not be queued: 1. Block jobs: We already make sure that block jobs are paused in a drain section, so they won't start new requests. However, if the drain_begin is called on the job's BlockBackend first, it can happen that we deadlock because the job stays busy until it reaches a pause point - which it can't if its requests aren't processed any more. The proper solution here would be to make all requests through the job's filter node instead of using a BlockBackend. For now, just disabling request queuing on the job BlockBackend is simpler. 2. In test cases where making requests through bdrv_* would be cumbersome because we'd need a BdrvChild. As we already got the functionality to disable request queuing from 1., use it in tests, too, for convenience. Signed-off-by: Kevin Wolf Reviewed-by: Max Reitz --- tests/test-bdrv-drain.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index 9dffd86..468bbcc 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -686,6 +686,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) &error_abort); s = bs->opaque; blk_insert_bs(blk, bs, &error_abort); + blk_set_disable_request_queuing(blk, true); blk_set_aio_context(blk, ctx_a, &error_abort); aio_context_acquire(ctx_a); -- cgit v1.1