aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>2019-04-29 12:08:40 +0300
committerMichael Roth <mdroth@linux.vnet.ibm.com>2019-10-01 16:58:28 -0500
commit87851171b4adc456ba6de97e729cf87ace318288 (patch)
treeab22cf8424730a4ff56559ea7337c6a8691c1c81
parent3f6c00eb618389034a0414ea119665f8cc36b2ca (diff)
downloadqemu-87851171b4adc456ba6de97e729cf87ace318288.zip
qemu-87851171b4adc456ba6de97e729cf87ace318288.tar.gz
qemu-87851171b4adc456ba6de97e729cf87ace318288.tar.bz2
block/backup: refactor and tolerate unallocated cluster skipping
Split allocation checking to separate function and reduce nesting. Consider bdrv_is_allocated() fail as allocated area, as copying more than needed is not wrong (and we do it anyway) and seems better than fail the whole job. And, most probably we will fail on the next read, if there are real problem with source. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: 20190429090842.57910-4-vsementsov@virtuozzo.com Signed-off-by: Max Reitz <mreitz@redhat.com> (cherry picked from commit 9eb5a248f3e50c1f034bc6ff4b2f25c8c56515a5) *prereq for 110571be4e Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
-rw-r--r--block/backup.c60
1 files changed, 23 insertions, 37 deletions
diff --git a/block/backup.c b/block/backup.c
index 510fc54..298e85f 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -377,6 +377,22 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
return false;
}
+static bool bdrv_is_unallocated_range(BlockDriverState *bs,
+ int64_t offset, int64_t bytes)
+{
+ int64_t end = offset + bytes;
+
+ while (offset < end && !bdrv_is_allocated(bs, offset, bytes, &bytes)) {
+ if (bytes == 0) {
+ return true;
+ }
+ offset += bytes;
+ bytes = end - offset;
+ }
+
+ return offset >= end;
+}
+
static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
{
int ret;
@@ -462,49 +478,19 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
for (offset = 0; offset < s->len;
offset += s->cluster_size) {
bool error_is_read;
- int alloced = 0;
if (yield_and_check(s)) {
break;
}
- if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
- int i;
- int64_t n;
-
- /* Check to see if these blocks are already in the
- * backing file. */
-
- for (i = 0; i < s->cluster_size;) {
- /* bdrv_is_allocated() only returns true/false based
- * on the first set of sectors it comes across that
- * are are all in the same state.
- * For that reason we must verify each sector in the
- * backup cluster length. We end up copying more than
- * needed but at some point that is always the case. */
- alloced =
- bdrv_is_allocated(bs, offset + i,
- s->cluster_size - i, &n);
- i += n;
-
- if (alloced || n == 0) {
- break;
- }
- }
-
- /* If the above loop never found any sectors that are in
- * the topmost image, skip this backup. */
- if (alloced == 0) {
- continue;
- }
- }
- /* FULL sync mode we copy the whole drive. */
- if (alloced < 0) {
- ret = alloced;
- } else {
- ret = backup_do_cow(s, offset, s->cluster_size,
- &error_is_read, false);
+ if (s->sync_mode == MIRROR_SYNC_MODE_TOP &&
+ bdrv_is_unallocated_range(bs, offset, s->cluster_size))
+ {
+ continue;
}
+
+ ret = backup_do_cow(s, offset, s->cluster_size,
+ &error_is_read, false);
if (ret < 0) {
/* Depending on error action, fail now or retry cluster */
BlockErrorAction action =