aboutsummaryrefslogtreecommitdiff
path: root/tests/qemu-iotests/220
diff options
context:
space:
mode:
authorEric Blake <eblake@redhat.com>2018-11-13 17:03:19 -0600
committerKevin Wolf <kwolf@redhat.com>2018-11-19 12:51:40 +0100
commit3b94c343f98b660e1c1cf79a8f704151962ecd48 (patch)
tree5e5af504bf004891f84ebad4d11ecb498bcfb6ab /tests/qemu-iotests/220
parent77d6a21558577fbdd35e65e0e1d03ae07214329f (diff)
downloadqemu-3b94c343f98b660e1c1cf79a8f704151962ecd48.zip
qemu-3b94c343f98b660e1c1cf79a8f704151962ecd48.tar.gz
qemu-3b94c343f98b660e1c1cf79a8f704151962ecd48.tar.bz2
iotests: Add new test 220 for max compressed cluster offset
If you have a capable file system (tmpfs is good, ext4 not so much; run ./check with TEST_DIR pointing to a good location so as not to skip the test), it's actually possible to create a qcow2 file that expands to a sparse 512T image with just over 38M of content. The test is not the world's fastest (qemu crawling through 256M bits of refcount table to find the next cluster to allocate takes several seconds, as does qemu-img check reporting millions of leaked clusters); but it DOES catch the problem that the previous patch just fixed where writing a compressed cluster to a full image ended up overwriting the wrong cluster. Suggested-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: Alberto Garcia <berto@igalia.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'tests/qemu-iotests/220')
-rwxr-xr-xtests/qemu-iotests/22096
1 files changed, 96 insertions, 0 deletions
diff --git a/tests/qemu-iotests/220 b/tests/qemu-iotests/220
new file mode 100755
index 0000000..0c5682b
--- /dev/null
+++ b/tests/qemu-iotests/220
@@ -0,0 +1,96 @@
+#!/bin/bash
+#
+# max limits on compression in huge qcow2 files
+#
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+seq=$(basename $0)
+echo "QA output created by $seq"
+
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+. ./common.pattern
+
+_supported_fmt qcow2
+_supported_proto file
+_supported_os Linux
+
+echo "== Creating huge file =="
+
+# Sanity check: We require a file system that permits the creation
+# of a HUGE (but very sparse) file. tmpfs works, ext4 does not.
+if ! truncate --size=513T "$TEST_IMG"; then
+ _notrun "file system on $TEST_DIR does not support large enough files"
+fi
+rm "$TEST_IMG"
+IMGOPTS='cluster_size=2M,refcount_bits=1' _make_test_img 513T
+
+echo "== Populating refcounts =="
+# We want an image with 256M refcounts * 2M clusters = 512T referenced.
+# Each 2M cluster holds 16M refcounts; the refcount table initially uses
+# 1 refblock, so we need to add 15 more. The refcount table lives at 2M,
+# first refblock at 4M, L2 at 6M, so our remaining additions start at 8M.
+# Then, for each refblock, mark it as fully populated.
+to_hex() {
+ printf %016x\\n $1 | sed 's/\(..\)/\\x\1/g'
+}
+truncate --size=38m "$TEST_IMG"
+entry=$((0x200000))
+$QEMU_IO_PROG -f raw -c "w -P 0xff 4m 2m" "$TEST_IMG" | _filter_qemu_io
+for i in {1..15}; do
+ offs=$((0x600000 + i*0x200000))
+ poke_file "$TEST_IMG" $((i*8 + entry)) $(to_hex $offs)
+ $QEMU_IO_PROG -f raw -c "w -P 0xff $offs 2m" "$TEST_IMG" | _filter_qemu_io
+done
+
+echo "== Checking file before =="
+# FIXME: 'qemu-img check' doesn't diagnose refcounts beyond the end of
+# the file as leaked clusters
+_check_test_img 2>&1 | sed '/^Leaked cluster/d'
+stat -c 'image size %s' "$TEST_IMG"
+
+echo "== Trying to write compressed cluster =="
+# Given our file size, the next available cluster at 512T lies beyond the
+# maximum offset that a compressed 2M cluster can reside in
+$QEMU_IO_PROG -c 'w -c 0 2m' "$TEST_IMG" | _filter_qemu_io
+# The attempt failed, but ended up allocating a new refblock
+stat -c 'image size %s' "$TEST_IMG"
+
+echo "== Writing normal cluster =="
+# The failed write should not corrupt the image, so a normal write succeeds
+$QEMU_IO_PROG -c 'w 0 2m' "$TEST_IMG" | _filter_qemu_io
+
+echo "== Checking file after =="
+# qemu-img now sees the millions of leaked clusters, thanks to the allocations
+# at 512T. Undo many of our faked references to speed up the check.
+$QEMU_IO_PROG -f raw -c "w -z 5m 1m" -c "w -z 8m 30m" "$TEST_IMG" |
+ _filter_qemu_io
+_check_test_img 2>&1 | sed '/^Leaked cluster/d'
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0