aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/backup-top.c6
-rw-r--r--block/backup.c38
-rw-r--r--block/block-copy.c405
-rw-r--r--block/crypto.c62
-rw-r--r--block/curl.c32
-rw-r--r--block/qcow2-threads.c12
-rw-r--r--block/qcow2.c75
-rw-r--r--block/trace-events1
8 files changed, 453 insertions, 178 deletions
diff --git a/block/backup-top.c b/block/backup-top.c
index 1bfb360..3b50c06 100644
--- a/block/backup-top.c
+++ b/block/backup-top.c
@@ -38,6 +38,7 @@ typedef struct BDRVBackupTopState {
BlockCopyState *bcs;
BdrvChild *target;
bool active;
+ int64_t cluster_size;
} BDRVBackupTopState;
static coroutine_fn int backup_top_co_preadv(
@@ -57,8 +58,8 @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset,
return 0;
}
- off = QEMU_ALIGN_DOWN(offset, s->bcs->cluster_size);
- end = QEMU_ALIGN_UP(offset + bytes, s->bcs->cluster_size);
+ off = QEMU_ALIGN_DOWN(offset, s->cluster_size);
+ end = QEMU_ALIGN_UP(offset + bytes, s->cluster_size);
return block_copy(s->bcs, off, end - off, NULL);
}
@@ -238,6 +239,7 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
goto fail;
}
+ state->cluster_size = cluster_size;
state->bcs = block_copy_state_new(top->backing, state->target,
cluster_size, write_flags, &local_err);
if (local_err) {
diff --git a/block/backup.c b/block/backup.c
index 1383e21..7430ca5 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -57,15 +57,6 @@ static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
BackupBlockJob *s = opaque;
s->bytes_read += bytes;
- job_progress_update(&s->common.job, bytes);
-}
-
-static void backup_progress_reset_callback(void *opaque)
-{
- BackupBlockJob *s = opaque;
- uint64_t estimate = bdrv_get_dirty_count(s->bcs->copy_bitmap);
-
- job_progress_set_remaining(&s->common.job, estimate);
}
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
@@ -111,7 +102,7 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
/* If we failed and synced, merge in the bits we didn't copy: */
- bdrv_dirty_bitmap_merge_internal(bm, job->bcs->copy_bitmap,
+ bdrv_dirty_bitmap_merge_internal(bm, block_copy_dirty_bitmap(job->bcs),
NULL, true);
}
}
@@ -154,7 +145,8 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
return;
}
- bdrv_set_dirty_bitmap(backup_job->bcs->copy_bitmap, 0, backup_job->len);
+ bdrv_set_dirty_bitmap(block_copy_dirty_bitmap(backup_job->bcs), 0,
+ backup_job->len);
}
static BlockErrorAction backup_error_action(BackupBlockJob *job,
@@ -199,7 +191,7 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
BdrvDirtyBitmapIter *bdbi;
int ret = 0;
- bdbi = bdrv_dirty_iter_new(job->bcs->copy_bitmap);
+ bdbi = bdrv_dirty_iter_new(block_copy_dirty_bitmap(job->bcs));
while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
do {
if (yield_and_check(job)) {
@@ -219,14 +211,14 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
return ret;
}
-static void backup_init_copy_bitmap(BackupBlockJob *job)
+static void backup_init_bcs_bitmap(BackupBlockJob *job)
{
bool ret;
uint64_t estimate;
+ BdrvDirtyBitmap *bcs_bitmap = block_copy_dirty_bitmap(job->bcs);
if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
- ret = bdrv_dirty_bitmap_merge_internal(job->bcs->copy_bitmap,
- job->sync_bitmap,
+ ret = bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap,
NULL, true);
assert(ret);
} else {
@@ -235,12 +227,12 @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
* We can't hog the coroutine to initialize this thoroughly.
* Set a flag and resume work when we are able to yield safely.
*/
- job->bcs->skip_unallocated = true;
+ block_copy_set_skip_unallocated(job->bcs, true);
}
- bdrv_set_dirty_bitmap(job->bcs->copy_bitmap, 0, job->len);
+ bdrv_set_dirty_bitmap(bcs_bitmap, 0, job->len);
}
- estimate = bdrv_get_dirty_count(job->bcs->copy_bitmap);
+ estimate = bdrv_get_dirty_count(bcs_bitmap);
job_progress_set_remaining(&job->common.job, estimate);
}
@@ -249,7 +241,7 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
int ret = 0;
- backup_init_copy_bitmap(s);
+ backup_init_bcs_bitmap(s);
if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
int64_t offset = 0;
@@ -268,12 +260,12 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
offset += count;
}
- s->bcs->skip_unallocated = false;
+ block_copy_set_skip_unallocated(s->bcs, false);
}
if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
/*
- * All bits are set in copy_bitmap to allow any cluster to be copied.
+ * All bits are set in bcs bitmap to allow any cluster to be copied.
* This does not actually require them to be copied.
*/
while (!job_is_cancelled(job)) {
@@ -464,8 +456,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
job->cluster_size = cluster_size;
job->len = len;
- block_copy_set_callbacks(bcs, backup_progress_bytes_callback,
- backup_progress_reset_callback, job);
+ block_copy_set_progress_callback(bcs, backup_progress_bytes_callback, job);
+ block_copy_set_progress_meter(bcs, &job->common.job.progress);
/* Required permissions are already taken by backup-top target */
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
diff --git a/block/block-copy.c b/block/block-copy.c
index 79798a1..05227e1 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -24,37 +24,136 @@
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
#define BLOCK_COPY_MAX_MEM (128 * MiB)
-static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
- int64_t start,
- int64_t end)
+typedef struct BlockCopyInFlightReq {
+ int64_t offset;
+ int64_t bytes;
+ QLIST_ENTRY(BlockCopyInFlightReq) list;
+ CoQueue wait_queue; /* coroutines blocked on this request */
+} BlockCopyInFlightReq;
+
+typedef struct BlockCopyState {
+ /*
+ * BdrvChild objects are not owned or managed by block-copy. They are
+ * provided by block-copy user and user is responsible for appropriate
+ * permissions on these children.
+ */
+ BdrvChild *source;
+ BdrvChild *target;
+ BdrvDirtyBitmap *copy_bitmap;
+ int64_t in_flight_bytes;
+ int64_t cluster_size;
+ bool use_copy_range;
+ int64_t copy_size;
+ uint64_t len;
+ QLIST_HEAD(, BlockCopyInFlightReq) inflight_reqs;
+
+ BdrvRequestFlags write_flags;
+
+ /*
+ * skip_unallocated:
+ *
+ * Used by sync=top jobs, which first scan the source node for unallocated
+ * areas and clear them in the copy_bitmap. During this process, the bitmap
+ * is thus not fully initialized: It may still have bits set for areas that
+ * are unallocated and should actually not be copied.
+ *
+ * This is indicated by skip_unallocated.
+ *
+ * In this case, block_copy() will query the source’s allocation status,
+ * skip unallocated regions, clear them in the copy_bitmap, and invoke
+ * block_copy_reset_unallocated() every time it does.
+ */
+ bool skip_unallocated;
+
+ ProgressMeter *progress;
+ /* progress_bytes_callback: called when some copying progress is done. */
+ ProgressBytesCallbackFunc progress_bytes_callback;
+ void *progress_opaque;
+
+ SharedResource *mem;
+} BlockCopyState;
+
+static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
+ int64_t offset,
+ int64_t bytes)
{
BlockCopyInFlightReq *req;
- bool waited;
- do {
- waited = false;
- QLIST_FOREACH(req, &s->inflight_reqs, list) {
- if (end > req->start_byte && start < req->end_byte) {
- qemu_co_queue_wait(&req->wait_queue, NULL);
- waited = true;
- break;
- }
+ QLIST_FOREACH(req, &s->inflight_reqs, list) {
+ if (offset + bytes > req->offset && offset < req->offset + req->bytes) {
+ return req;
}
- } while (waited);
+ }
+
+ return NULL;
+}
+
+/*
+ * If there are no intersecting requests return false. Otherwise, wait for the
+ * first found intersecting request to finish and return true.
+ */
+static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
+ int64_t bytes)
+{
+ BlockCopyInFlightReq *req = find_conflicting_inflight_req(s, offset, bytes);
+
+ if (!req) {
+ return false;
+ }
+
+ qemu_co_queue_wait(&req->wait_queue, NULL);
+
+ return true;
}
+/* Called only on full-dirty region */
static void block_copy_inflight_req_begin(BlockCopyState *s,
BlockCopyInFlightReq *req,
- int64_t start, int64_t end)
+ int64_t offset, int64_t bytes)
{
- req->start_byte = start;
- req->end_byte = end;
+ assert(!find_conflicting_inflight_req(s, offset, bytes));
+
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
+ s->in_flight_bytes += bytes;
+
+ req->offset = offset;
+ req->bytes = bytes;
qemu_co_queue_init(&req->wait_queue);
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
}
-static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req)
+/*
+ * block_copy_inflight_req_shrink
+ *
+ * Drop the tail of the request to be handled later. Set dirty bits back and
+ * wake up all requests waiting for us (may be some of them are not intersecting
+ * with shrunk request)
+ */
+static void coroutine_fn block_copy_inflight_req_shrink(BlockCopyState *s,
+ BlockCopyInFlightReq *req, int64_t new_bytes)
+{
+ if (new_bytes == req->bytes) {
+ return;
+ }
+
+ assert(new_bytes > 0 && new_bytes < req->bytes);
+
+ s->in_flight_bytes -= req->bytes - new_bytes;
+ bdrv_set_dirty_bitmap(s->copy_bitmap,
+ req->offset + new_bytes, req->bytes - new_bytes);
+
+ req->bytes = new_bytes;
+ qemu_co_queue_restart_all(&req->wait_queue);
+}
+
+static void coroutine_fn block_copy_inflight_req_end(BlockCopyState *s,
+ BlockCopyInFlightReq *req,
+ int ret)
{
+ s->in_flight_bytes -= req->bytes;
+ if (ret < 0) {
+ bdrv_set_dirty_bitmap(s->copy_bitmap, req->offset, req->bytes);
+ }
QLIST_REMOVE(req, list);
qemu_co_queue_restart_all(&req->wait_queue);
}
@@ -70,16 +169,19 @@ void block_copy_state_free(BlockCopyState *s)
g_free(s);
}
+static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
+{
+ return MIN_NON_ZERO(INT_MAX,
+ MIN_NON_ZERO(source->bs->bl.max_transfer,
+ target->bs->bl.max_transfer));
+}
+
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
int64_t cluster_size,
BdrvRequestFlags write_flags, Error **errp)
{
BlockCopyState *s;
BdrvDirtyBitmap *copy_bitmap;
- uint32_t max_transfer =
- MIN_NON_ZERO(INT_MAX,
- MIN_NON_ZERO(source->bs->bl.max_transfer,
- target->bs->bl.max_transfer));
copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
errp);
@@ -99,7 +201,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
.mem = shres_create(BLOCK_COPY_MAX_MEM),
};
- if (max_transfer < cluster_size) {
+ if (block_copy_max_transfer(source, target) < cluster_size) {
/*
* copy_range does not respect max_transfer. We don't want to bother
* with requests smaller than block-copy cluster size, so fallback to
@@ -114,12 +216,11 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
s->copy_size = cluster_size;
} else {
/*
- * copy_range does not respect max_transfer (it's a TODO), so we factor
- * that in here.
+ * We enable copy-range, but keep small copy_size, until first
+ * successful copy_range (look at block_copy_do_copy).
*/
s->use_copy_range = true;
- s->copy_size = MIN(MAX(cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
- QEMU_ALIGN_DOWN(max_transfer, cluster_size));
+ s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
}
QLIST_INIT(&s->inflight_reqs);
@@ -127,48 +228,83 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
return s;
}
-void block_copy_set_callbacks(
+void block_copy_set_progress_callback(
BlockCopyState *s,
ProgressBytesCallbackFunc progress_bytes_callback,
- ProgressResetCallbackFunc progress_reset_callback,
void *progress_opaque)
{
s->progress_bytes_callback = progress_bytes_callback;
- s->progress_reset_callback = progress_reset_callback;
s->progress_opaque = progress_opaque;
}
+void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
+{
+ s->progress = pm;
+}
+
/*
* block_copy_do_copy
*
- * Do copy of cluser-aligned chunk. @end is allowed to exceed s->len only to
- * cover last cluster when s->len is not aligned to clusters.
+ * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
+ * s->len only to cover last cluster when s->len is not aligned to clusters.
*
* No sync here: nor bitmap neighter intersecting requests handling, only copy.
*
* Returns 0 on success.
*/
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
- int64_t start, int64_t end,
- bool *error_is_read)
+ int64_t offset, int64_t bytes,
+ bool zeroes, bool *error_is_read)
{
int ret;
- int nbytes = MIN(end, s->len) - start;
+ int64_t nbytes = MIN(offset + bytes, s->len) - offset;
void *bounce_buffer = NULL;
- assert(QEMU_IS_ALIGNED(start, s->cluster_size));
- assert(QEMU_IS_ALIGNED(end, s->cluster_size));
- assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
+ assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
+ assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
+ assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
+ assert(offset < s->len);
+ assert(offset + bytes <= s->len ||
+ offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
+ assert(nbytes < INT_MAX);
+
+ if (zeroes) {
+ ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
+ ~BDRV_REQ_WRITE_COMPRESSED);
+ if (ret < 0) {
+ trace_block_copy_write_zeroes_fail(s, offset, ret);
+ if (error_is_read) {
+ *error_is_read = false;
+ }
+ }
+ return ret;
+ }
if (s->use_copy_range) {
- ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
+ ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
0, s->write_flags);
if (ret < 0) {
- trace_block_copy_copy_range_fail(s, start, ret);
+ trace_block_copy_copy_range_fail(s, offset, ret);
s->use_copy_range = false;
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
/* Fallback to read+write with allocated buffer */
} else {
+ if (s->use_copy_range) {
+ /*
+ * Successful copy-range. Now increase copy_size. copy_range
+ * does not respect max_transfer (it's a TODO), so we factor
+ * that in here.
+ *
+ * Note: we double-check s->use_copy_range for the case when
+ * parallel block-copy request unsets it during previous
+ * bdrv_co_copy_range call.
+ */
+ s->copy_size =
+ MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
+ QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
+ s->target),
+ s->cluster_size));
+ }
goto out;
}
}
@@ -176,24 +312,27 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
/*
* In case of failed copy_range request above, we may proceed with buffered
* request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will
- * be properly limited, so don't care too much.
+ * be properly limited, so don't care too much. Moreover the most likely
+ * case (copy_range is unsupported for the configuration, so the very first
+ * copy_range request fails) is handled by setting large copy_size only
+ * after first successful copy_range.
*/
bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
- ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0);
+ ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
if (ret < 0) {
- trace_block_copy_read_fail(s, start, ret);
+ trace_block_copy_read_fail(s, offset, ret);
if (error_is_read) {
*error_is_read = true;
}
goto out;
}
- ret = bdrv_co_pwrite(s->target, start, nbytes, bounce_buffer,
+ ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
s->write_flags);
if (ret < 0) {
- trace_block_copy_write_fail(s, start, ret);
+ trace_block_copy_write_fail(s, offset, ret);
if (error_is_read) {
*error_is_read = false;
}
@@ -206,6 +345,38 @@ out:
return ret;
}
+static int block_copy_block_status(BlockCopyState *s, int64_t offset,
+ int64_t bytes, int64_t *pnum)
+{
+ int64_t num;
+ BlockDriverState *base;
+ int ret;
+
+ if (s->skip_unallocated && s->source->bs->backing) {
+ base = s->source->bs->backing->bs;
+ } else {
+ base = NULL;
+ }
+
+ ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
+ NULL, NULL);
+ if (ret < 0 || num < s->cluster_size) {
+ /*
+ * On error or if failed to obtain large enough chunk just fallback to
+ * copy one cluster.
+ */
+ num = s->cluster_size;
+ ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
+ } else if (offset + num == s->len) {
+ num = QEMU_ALIGN_UP(num, s->cluster_size);
+ } else {
+ num = QEMU_ALIGN_DOWN(num, s->cluster_size);
+ }
+
+ *pnum = num;
+ return ret;
+}
+
/*
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
@@ -269,21 +440,28 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
if (!ret) {
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
- s->progress_reset_callback(s->progress_opaque);
+ progress_set_remaining(s->progress,
+ bdrv_get_dirty_count(s->copy_bitmap) +
+ s->in_flight_bytes);
}
*count = bytes;
return ret;
}
-int coroutine_fn block_copy(BlockCopyState *s,
- int64_t start, uint64_t bytes,
- bool *error_is_read)
+/*
+ * block_copy_dirty_clusters
+ *
+ * Copy dirty clusters in @offset/@bytes range.
+ * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
+ * clusters found and -errno on failure.
+ */
+static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
+ int64_t offset, int64_t bytes,
+ bool *error_is_read)
{
int ret = 0;
- int64_t end = bytes + start; /* bytes */
- int64_t status_bytes;
- BlockCopyInFlightReq req;
+ bool found_dirty = false;
/*
* block_copy() user is responsible for keeping source and target in same
@@ -292,60 +470,109 @@ int coroutine_fn block_copy(BlockCopyState *s,
assert(bdrv_get_aio_context(s->source->bs) ==
bdrv_get_aio_context(s->target->bs));
- assert(QEMU_IS_ALIGNED(start, s->cluster_size));
- assert(QEMU_IS_ALIGNED(end, s->cluster_size));
-
- block_copy_wait_inflight_reqs(s, start, bytes);
- block_copy_inflight_req_begin(s, &req, start, end);
+ assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
+ assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
- while (start < end) {
- int64_t next_zero, chunk_end;
+ while (bytes) {
+ BlockCopyInFlightReq req;
+ int64_t next_zero, cur_bytes, status_bytes;
- if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
- trace_block_copy_skip(s, start);
- start += s->cluster_size;
+ if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
+ trace_block_copy_skip(s, offset);
+ offset += s->cluster_size;
+ bytes -= s->cluster_size;
continue; /* already copied */
}
- chunk_end = MIN(end, start + s->copy_size);
+ found_dirty = true;
+
+ cur_bytes = MIN(bytes, s->copy_size);
- next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
- chunk_end - start);
+ next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset,
+ cur_bytes);
if (next_zero >= 0) {
- assert(next_zero > start); /* start is dirty */
- assert(next_zero < chunk_end); /* no need to do MIN() */
- chunk_end = next_zero;
+ assert(next_zero > offset); /* offset is dirty */
+ assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
+ cur_bytes = next_zero - offset;
}
-
- if (s->skip_unallocated) {
- ret = block_copy_reset_unallocated(s, start, &status_bytes);
- if (ret == 0) {
- trace_block_copy_skip_range(s, start, status_bytes);
- start += status_bytes;
- continue;
- }
- /* Clamp to known allocated region */
- chunk_end = MIN(chunk_end, start + status_bytes);
+ block_copy_inflight_req_begin(s, &req, offset, cur_bytes);
+
+ ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
+ assert(ret >= 0); /* never fail */
+ cur_bytes = MIN(cur_bytes, status_bytes);
+ block_copy_inflight_req_shrink(s, &req, cur_bytes);
+ if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
+ block_copy_inflight_req_end(s, &req, 0);
+ progress_set_remaining(s->progress,
+ bdrv_get_dirty_count(s->copy_bitmap) +
+ s->in_flight_bytes);
+ trace_block_copy_skip_range(s, offset, status_bytes);
+ offset += status_bytes;
+ bytes -= status_bytes;
+ continue;
}
- trace_block_copy_process(s, start);
-
- bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
+ trace_block_copy_process(s, offset);
- co_get_from_shres(s->mem, chunk_end - start);
- ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
- co_put_to_shres(s->mem, chunk_end - start);
+ co_get_from_shres(s->mem, cur_bytes);
+ ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
+ error_is_read);
+ co_put_to_shres(s->mem, cur_bytes);
+ block_copy_inflight_req_end(s, &req, ret);
if (ret < 0) {
- bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
- break;
+ return ret;
}
- s->progress_bytes_callback(chunk_end - start, s->progress_opaque);
- start = chunk_end;
- ret = 0;
+ progress_work_done(s->progress, cur_bytes);
+ s->progress_bytes_callback(cur_bytes, s->progress_opaque);
+ offset += cur_bytes;
+ bytes -= cur_bytes;
}
- block_copy_inflight_req_end(&req);
+ return found_dirty;
+}
+
+/*
+ * block_copy
+ *
+ * Copy requested region, accordingly to dirty bitmap.
+ * Collaborate with parallel block_copy requests: if they succeed it will help
+ * us. If they fail, we will retry not-copied regions. So, if we return error,
+ * it means that some I/O operation failed in context of _this_ block_copy call,
+ * not some parallel operation.
+ */
+int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
+ bool *error_is_read)
+{
+ int ret;
+
+ do {
+ ret = block_copy_dirty_clusters(s, offset, bytes, error_is_read);
+
+ if (ret == 0) {
+ ret = block_copy_wait_one(s, offset, bytes);
+ }
+
+ /*
+ * We retry in two cases:
+ * 1. Some progress done
+ * Something was copied, which means that there were yield points
+ * and some new dirty bits may have appeared (due to failed parallel
+ * block-copy requests).
+ * 2. We have waited for some intersecting block-copy request
+ * It may have failed and produced new dirty bits.
+ */
+ } while (ret > 0);
return ret;
}
+
+BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
+{
+ return s->copy_bitmap;
+}
+
+void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
+{
+ s->skip_unallocated = skip;
+}
diff --git a/block/crypto.c b/block/crypto.c
index 2482383..23e9c74 100644
--- a/block/crypto.c
+++ b/block/crypto.c
@@ -484,6 +484,67 @@ static int64_t block_crypto_getlength(BlockDriverState *bs)
}
+static BlockMeasureInfo *block_crypto_measure(QemuOpts *opts,
+ BlockDriverState *in_bs,
+ Error **errp)
+{
+ g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL;
+ Error *local_err = NULL;
+ BlockMeasureInfo *info;
+ uint64_t size;
+ size_t luks_payload_size;
+ QDict *cryptoopts;
+
+ /*
+ * Preallocation mode doesn't affect size requirements but we must consume
+ * the option.
+ */
+ g_free(qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC));
+
+ size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
+
+ if (in_bs) {
+ int64_t ssize = bdrv_getlength(in_bs);
+
+ if (ssize < 0) {
+ error_setg_errno(&local_err, -ssize,
+ "Unable to get image virtual_size");
+ goto err;
+ }
+
+ size = ssize;
+ }
+
+ cryptoopts = qemu_opts_to_qdict_filtered(opts, NULL,
+ &block_crypto_create_opts_luks, true);
+ qdict_put_str(cryptoopts, "format", "luks");
+ create_opts = block_crypto_create_opts_init(cryptoopts, &local_err);
+ qobject_unref(cryptoopts);
+ if (!create_opts) {
+ goto err;
+ }
+
+ if (!qcrypto_block_calculate_payload_offset(create_opts, NULL,
+ &luks_payload_size,
+ &local_err)) {
+ goto err;
+ }
+
+ /*
+ * Unallocated blocks are still encrypted so allocation status makes no
+ * difference to the file size.
+ */
+ info = g_new(BlockMeasureInfo, 1);
+ info->fully_allocated = luks_payload_size + size;
+ info->required = luks_payload_size + size;
+ return info;
+
+err:
+ error_propagate(errp, local_err);
+ return NULL;
+}
+
+
static int block_crypto_probe_luks(const uint8_t *buf,
int buf_size,
const char *filename) {
@@ -670,6 +731,7 @@ static BlockDriver bdrv_crypto_luks = {
.bdrv_co_preadv = block_crypto_co_preadv,
.bdrv_co_pwritev = block_crypto_co_pwritev,
.bdrv_getlength = block_crypto_getlength,
+ .bdrv_measure = block_crypto_measure,
.bdrv_get_info = block_crypto_get_info_luks,
.bdrv_get_specific_info = block_crypto_get_specific_info_luks,
diff --git a/block/curl.c b/block/curl.c
index f862993..6e32590 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -214,11 +214,35 @@ static size_t curl_header_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
{
BDRVCURLState *s = opaque;
size_t realsize = size * nmemb;
- const char *accept_line = "Accept-Ranges: bytes";
+ const char *header = (char *)ptr;
+ const char *end = header + realsize;
+ const char *accept_ranges = "accept-ranges:";
+ const char *bytes = "bytes";
- if (realsize >= strlen(accept_line)
- && strncmp((char *)ptr, accept_line, strlen(accept_line)) == 0) {
- s->accept_range = true;
+ if (realsize >= strlen(accept_ranges)
+ && g_ascii_strncasecmp(header, accept_ranges,
+ strlen(accept_ranges)) == 0) {
+
+ char *p = strchr(header, ':') + 1;
+
+ /* Skip whitespace between the header name and value. */
+ while (p < end && *p && g_ascii_isspace(*p)) {
+ p++;
+ }
+
+ if (end - p >= strlen(bytes)
+ && strncmp(p, bytes, strlen(bytes)) == 0) {
+
+ /* Check that there is nothing but whitespace after the value. */
+ p += strlen(bytes);
+ while (p < end && *p && g_ascii_isspace(*p)) {
+ p++;
+ }
+
+ if (p == end || !*p) {
+ s->accept_range = true;
+ }
+ }
}
return realsize;
diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
index 77bb578..a68126f 100644
--- a/block/qcow2-threads.c
+++ b/block/qcow2-threads.c
@@ -128,12 +128,12 @@ static ssize_t qcow2_compress(void *dest, size_t dest_size,
* @src - source buffer, @src_size bytes
*
* Returns: 0 on success
- * -1 on fail
+ * -EIO on fail
*/
static ssize_t qcow2_decompress(void *dest, size_t dest_size,
const void *src, size_t src_size)
{
- int ret = 0;
+ int ret;
z_stream strm;
memset(&strm, 0, sizeof(strm));
@@ -144,17 +144,19 @@ static ssize_t qcow2_decompress(void *dest, size_t dest_size,
ret = inflateInit2(&strm, -12);
if (ret != Z_OK) {
- return -1;
+ return -EIO;
}
ret = inflate(&strm, Z_FINISH);
- if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || strm.avail_out != 0) {
+ if ((ret == Z_STREAM_END || ret == Z_BUF_ERROR) && strm.avail_out == 0) {
/*
* We approve Z_BUF_ERROR because we need @dest buffer to be filled, but
* @src buffer may be processed partly (because in qcow2 we know size of
* compressed data with precision of one sector)
*/
- ret = -1;
+ ret = 0;
+ } else {
+ ret = -EIO;
}
inflateEnd(&strm);
diff --git a/block/qcow2.c b/block/qcow2.c
index 3640e8c..d44b456 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -2610,6 +2610,7 @@ static void qcow2_close(BlockDriverState *bs)
qcrypto_block_free(s->crypto);
s->crypto = NULL;
+ qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
g_free(s->unknown_header_fields);
cleanup_unknown_header_ext(bs);
@@ -4608,60 +4609,6 @@ static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
return ret;
}
-static ssize_t qcow2_measure_crypto_hdr_init_func(QCryptoBlock *block,
- size_t headerlen, void *opaque, Error **errp)
-{
- size_t *headerlenp = opaque;
-
- /* Stash away the payload size */
- *headerlenp = headerlen;
- return 0;
-}
-
-static ssize_t qcow2_measure_crypto_hdr_write_func(QCryptoBlock *block,
- size_t offset, const uint8_t *buf, size_t buflen,
- void *opaque, Error **errp)
-{
- /* Discard the bytes, we're not actually writing to an image */
- return buflen;
-}
-
-/* Determine the number of bytes for the LUKS payload */
-static bool qcow2_measure_luks_headerlen(QemuOpts *opts, size_t *len,
- Error **errp)
-{
- QDict *opts_qdict;
- QDict *cryptoopts_qdict;
- QCryptoBlockCreateOptions *cryptoopts;
- QCryptoBlock *crypto;
-
- /* Extract "encrypt." options into a qdict */
- opts_qdict = qemu_opts_to_qdict(opts, NULL);
- qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt.");
- qobject_unref(opts_qdict);
-
- /* Build QCryptoBlockCreateOptions object from qdict */
- qdict_put_str(cryptoopts_qdict, "format", "luks");
- cryptoopts = block_crypto_create_opts_init(cryptoopts_qdict, errp);
- qobject_unref(cryptoopts_qdict);
- if (!cryptoopts) {
- return false;
- }
-
- /* Fake LUKS creation in order to determine the payload size */
- crypto = qcrypto_block_create(cryptoopts, "encrypt.",
- qcow2_measure_crypto_hdr_init_func,
- qcow2_measure_crypto_hdr_write_func,
- len, errp);
- qapi_free_QCryptoBlockCreateOptions(cryptoopts);
- if (!crypto) {
- return false;
- }
-
- qcrypto_block_free(crypto);
- return true;
-}
-
static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
Error **errp)
{
@@ -4712,9 +4659,27 @@ static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
g_free(optstr);
if (has_luks) {
+ g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL;
+ QDict *opts_qdict;
+ QDict *cryptoopts;
size_t headerlen;
- if (!qcow2_measure_luks_headerlen(opts, &headerlen, &local_err)) {
+ opts_qdict = qemu_opts_to_qdict(opts, NULL);
+ qdict_extract_subqdict(opts_qdict, &cryptoopts, "encrypt.");
+ qobject_unref(opts_qdict);
+
+ qdict_put_str(cryptoopts, "format", "luks");
+
+ create_opts = block_crypto_create_opts_init(cryptoopts, errp);
+ qobject_unref(cryptoopts);
+ if (!create_opts) {
+ goto err;
+ }
+
+ if (!qcrypto_block_calculate_payload_offset(create_opts,
+ "encrypt.",
+ &headerlen,
+ &local_err)) {
goto err;
}
diff --git a/block/trace-events b/block/trace-events
index 1a7329b..29dff88 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -48,6 +48,7 @@ block_copy_process(void *bcs, int64_t start) "bcs %p start %"PRId64
block_copy_copy_range_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
block_copy_read_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
block_copy_write_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
+block_copy_write_zeroes_fail(void *bcs, int64_t start, int ret) "bcs %p start %"PRId64" ret %d"
# ../blockdev.c
qmp_block_job_cancel(void *job) "job %p"