aboutsummaryrefslogtreecommitdiff
path: root/tests/qemu-iotests/093
diff options
context:
space:
mode:
authorAlberto Garcia <berto@igalia.com>2018-08-02 17:50:25 +0300
committerKevin Wolf <kwolf@redhat.com>2018-08-15 12:50:39 +0200
commit3db3e9c621519cbfc2b52e98b38f13ad863c0062 (patch)
tree1303bce436aaac765298bd4af377a605ac40168e /tests/qemu-iotests/093
parent5d8e4ca035f5a21e8634eb63a678bed55a1a94f9 (diff)
downloadqemu-3db3e9c621519cbfc2b52e98b38f13ad863c0062.zip
qemu-3db3e9c621519cbfc2b52e98b38f13ad863c0062.tar.gz
qemu-3db3e9c621519cbfc2b52e98b38f13ad863c0062.tar.bz2
qemu-iotests: Update 093 to improve the draining test
The previous patch fixes a problem in which draining a block device with more than one throttled request can make it wait first for the completion of requests in other members of the same group. This patch updates test_remove_group_member() in iotest 093 to reproduce that scenario. This updated test would hang QEMU without the fix from the previous patch. Signed-off-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'tests/qemu-iotests/093')
-rwxr-xr-xtests/qemu-iotests/09319
1 files changed, 11 insertions, 8 deletions
diff --git a/tests/qemu-iotests/093 b/tests/qemu-iotests/093
index b26cd34..9d1971a 100755
--- a/tests/qemu-iotests/093
+++ b/tests/qemu-iotests/093
@@ -225,15 +225,18 @@ class ThrottleTestCase(iotests.QMPTestCase):
# Read 4KB from drive0. This is performed immediately.
self.vm.hmp_qemu_io("drive0", "aio_read 0 4096")
- # Read 4KB again. The I/O limit has been exceeded so this
+ # Read 2KB. The I/O limit has been exceeded so this
# request is throttled and a timer is set to wake it up.
- self.vm.hmp_qemu_io("drive0", "aio_read 0 4096")
+ self.vm.hmp_qemu_io("drive0", "aio_read 0 2048")
+
+ # Read 2KB again. We're still over the I/O limit so this is
+ # request is also throttled, but no new timer is set since
+ # there's already one.
+ self.vm.hmp_qemu_io("drive0", "aio_read 0 2048")
- # Read from drive1. We're still over the I/O limit so this
- # request is also throttled. There's no timer set in drive1
- # because there's already one in drive0. Once the timer in
- # drive0 fires and its throttled request is processed then the
- # next request in the queue will be scheduled: this one.
+ # Read from drive1. This request is also throttled, and no
+ # timer is set in drive1 because there's already one in
+ # drive0.
self.vm.hmp_qemu_io("drive1", "aio_read 0 4096")
# At this point only the first 4KB have been read from drive0.
@@ -248,7 +251,7 @@ class ThrottleTestCase(iotests.QMPTestCase):
result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
self.assert_qmp(result, 'return', {})
- # Removing the I/O limits from drive0 drains its pending request.
+ # Removing the I/O limits from drive0 drains its two pending requests.
# The read request in drive1 is still throttled.
self.assertEqual(self.blockstats('drive0')[0], 8192)
self.assertEqual(self.blockstats('drive1')[0], 0)