aboutsummaryrefslogtreecommitdiff
path: root/tests/qemu-iotests/245
diff options
context:
space:
mode:
authorAndrey Shinkevich <andrey.shinkevich@virtuozzo.com>2020-12-16 09:17:03 +0300
committerMax Reitz <mreitz@redhat.com>2021-01-26 14:36:37 +0100
commit205736f4888e6c2bf1a78712cf0fc84d9f7cbcee (patch)
treed6cbb0fe14f9990208ffd1c39ad8436f4e6a7d49 /tests/qemu-iotests/245
parent0f6c94988afdf38228abdaf0b5504cc115f63836 (diff)
downloadqemu-205736f4888e6c2bf1a78712cf0fc84d9f7cbcee.zip
qemu-205736f4888e6c2bf1a78712cf0fc84d9f7cbcee.tar.gz
qemu-205736f4888e6c2bf1a78712cf0fc84d9f7cbcee.tar.bz2
block: apply COR-filter to block-stream jobs
This patch completes the series with the COR-filter applied to block-stream operations. Adding the filter makes it possible in future implement discarding copied regions in backing files during the block-stream job, to reduce the disk overuse (we need control on permissions). Also, the filter now is smart enough to do copy-on-read with specified base, so we have benefit on guest reads even when doing block-stream of the part of the backing chain. Several iotests are slightly modified due to filter insertion. Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20201216061703.70908-14-vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'tests/qemu-iotests/245')
-rwxr-xr-xtests/qemu-iotests/24520
1 files changed, 12 insertions, 8 deletions
diff --git a/tests/qemu-iotests/245 b/tests/qemu-iotests/245
index 86f00f2..cfdeb90 100755
--- a/tests/qemu-iotests/245
+++ b/tests/qemu-iotests/245
@@ -893,20 +893,24 @@ class TestBlockdevReopen(iotests.QMPTestCase):
# hd1 <- hd0
result = self.vm.qmp('block-stream', conv_keys = True, job_id = 'stream0',
- device = 'hd1', auto_finalize = False)
+ device = 'hd1', filter_node_name='cor',
+ auto_finalize = False)
self.assert_qmp(result, 'return', {})
- # We can't reopen with the original options because that would
- # make hd1 read-only and block-stream requires it to be read-write
- # (Which error message appears depends on whether the stream job is
- # already done with copying at this point.)
+ # We can't reopen with the original options because there is a filter
+ # inserted by stream job above hd1.
self.reopen(opts, {},
- ["Can't set node 'hd1' to r/o with copy-on-read enabled",
- "Cannot make block node read-only, there is a writer on it"])
+ "Cannot change the option 'backing.backing.file.node-name'")
+
+ # We can't reopen hd1 to read-only, as block-stream requires it to be
+ # read-write
+ self.reopen(opts['backing'], {'read-only': True},
+ "Cannot make block node read-only, there is a writer on it")
# We can't remove hd2 while the stream job is ongoing
opts['backing']['backing'] = None
- self.reopen(opts, {'backing.read-only': False}, "Cannot change 'backing' link from 'hd1' to 'hd2'")
+ self.reopen(opts['backing'], {'read-only': False},
+ "Cannot change 'backing' link from 'hd1' to 'hd2'")
# We can detach hd1 from hd0 because it doesn't affect the stream job
opts['backing'] = None