aboutsummaryrefslogtreecommitdiff
path: root/tests/qemu-iotests/122
diff options
context:
space:
mode:
authorAlberto Garcia <berto@igalia.com>2018-03-29 15:07:45 +0300
committerMax Reitz <mreitz@redhat.com>2018-04-03 17:39:37 +0200
commitabd3622cc03cf41ed542126a540385f30a4c0175 (patch)
treeb6a7efec25c78ca7e5b845d143d9a9e68725b564 /tests/qemu-iotests/122
parent96914159b7c4f9cd574301517c15b0743a050f95 (diff)
downloadqemu-abd3622cc03cf41ed542126a540385f30a4c0175.zip
qemu-abd3622cc03cf41ed542126a540385f30a4c0175.tar.gz
qemu-abd3622cc03cf41ed542126a540385f30a4c0175.tar.bz2
iotests: Test abnormally large size in compressed cluster descriptor
L2 entries for compressed clusters have a field that indicates the number of sectors used to store the data in the image. That's however not the size of the compressed data itself, just the number of sectors where that data is located. The actual data size is usually not a multiple of the sector size, and therefore cannot be represented with this field. The way it works is that QEMU reads all the specified sectors and starts decompressing the data until there's enough to recover the original uncompressed cluster. If there are any bytes left that haven't been decompressed they are simply ignored. One consequence of this is that even if the size field is larger than it needs to be QEMU can handle it just fine: it will read more data from disk but it will ignore the extra bytes. This test creates an image with two compressed clusters that use 5 sectors (2.5 KB) each, increases the size field to the maximum (8192 sectors, or 4 MB) and verifies that the data can be read without problems. This test is important because while the decompressed data takes exactly one cluster, the maximum value allowed in the compressed size field is twice the cluster size. So although QEMU won't produce images with such large values we need to make sure that it can handle them. Another effect of increasing the size field is that it can make it include data from the following host cluster(s). In this case 'qemu-img check' will detect that the refcounts are not correct, and we'll need to rebuild them. Additionally, this patch also tests that decreasing the size corrupts the image since the original data can no longer be recovered. In this case QEMU returns an error when trying to read the compressed data, but 'qemu-img check' doesn't see anything wrong if the refcounts are consistent. One possible task for the future is to make 'qemu-img check' verify the sizes of the compressed clusters, by trying to decompress the data and checking that the size stored in the L2 entry is correct. Signed-off-by: Alberto Garcia <berto@igalia.com> Message-id: 20180329120745.11154-1-berto@igalia.com Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'tests/qemu-iotests/122')
-rwxr-xr-xtests/qemu-iotests/12247
1 files changed, 47 insertions, 0 deletions
diff --git a/tests/qemu-iotests/122 b/tests/qemu-iotests/122
index 45b359c..6cf4fcb 100755
--- a/tests/qemu-iotests/122
+++ b/tests/qemu-iotests/122
@@ -130,6 +130,53 @@ $QEMU_IO -c "read -P 0 1024k 1022k" "$TEST_IMG" 2>&1 | _filter_qemu_io | _fil
echo
+echo "=== Corrupted size field in compressed cluster descriptor ==="
+echo
+# Create an empty image and fill half of it with compressed data.
+# The L2 entries of the two compressed clusters are located at
+# 0x800000 and 0x800008, their original values are 0x4008000000a00000
+# and 0x4008000000a00802 (5 sectors for compressed data each).
+_make_test_img 8M -o cluster_size=2M
+$QEMU_IO -c "write -c -P 0x11 0 2M" -c "write -c -P 0x11 2M 2M" "$TEST_IMG" \
+ 2>&1 | _filter_qemu_io | _filter_testdir
+
+# Reduce size of compressed data to 4 sectors: this corrupts the image.
+poke_file "$TEST_IMG" $((0x800000)) "\x40\x06"
+$QEMU_IO -c "read -P 0x11 0 4M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+# 'qemu-img check' however doesn't see anything wrong because it
+# doesn't try to decompress the data and the refcounts are consistent.
+# TODO: update qemu-img so this can be detected.
+_check_test_img
+
+# Increase size of compressed data to the maximum (8192 sectors).
+# This makes QEMU read more data (8192 sectors instead of 5, host
+# addresses [0xa00000, 0xdfffff]), but the decompression algorithm
+# stops once we have enough to restore the uncompressed cluster, so
+# the rest of the data is ignored.
+poke_file "$TEST_IMG" $((0x800000)) "\x7f\xfe"
+# Do it also for the second compressed cluster (L2 entry at 0x800008).
+# In this case the compressed data would span 3 host clusters
+# (host addresses: [0xa00802, 0xe00801])
+poke_file "$TEST_IMG" $((0x800008)) "\x7f\xfe"
+
+# Here the image is too small so we're asking QEMU to read beyond the
+# end of the image.
+$QEMU_IO -c "read -P 0x11 0 4M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+# But if we grow the image we won't be reading beyond its end anymore.
+$QEMU_IO -c "write -P 0x22 4M 4M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x11 0 4M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+# The refcount data is however wrong because due to the increased size
+# of the compressed data it now reaches the following host clusters.
+# This can be repaired by qemu-img check by increasing the refcount of
+# those clusters.
+# TODO: update qemu-img to correct the compressed cluster size instead.
+_check_test_img -r all
+$QEMU_IO -c "read -P 0x11 0 4M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+$QEMU_IO -c "read -P 0x22 4M 4M" "$TEST_IMG" 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
echo "=== Full allocation with -S 0 ==="
echo