aboutsummaryrefslogtreecommitdiff
path: root/block/commit.c
diff options
context:
space:
mode:
authorEric Blake <eblake@redhat.com>2017-07-07 07:44:59 -0500
committerKevin Wolf <kwolf@redhat.com>2017-07-10 13:18:07 +0200
commit51b0a488882328f8f02519bb47ca7e0e7fbe12ff (patch)
tree36257cf2221ea7e4e54ab7deae446d02c46f326c /block/commit.c
parentc00716beb30ba996bd6fdfd5f41bb07e4414144f (diff)
downloadqemu-51b0a488882328f8f02519bb47ca7e0e7fbe12ff.zip
qemu-51b0a488882328f8f02519bb47ca7e0e7fbe12ff.tar.gz
qemu-51b0a488882328f8f02519bb47ca7e0e7fbe12ff.tar.bz2
block: Make bdrv_is_allocated_above() byte-based
We are gradually moving away from sector-based interfaces, towards byte-based. In the common case, allocation is unlikely to ever use values that are not naturally sector-aligned, but it is possible that byte-based values will let us be more precise about allocation at the end of an unaligned file that can do byte-based access. Changing the signature of the function to use int64_t *pnum ensures that the compiler enforces that all callers are updated. For now, the io.c layer still assert()s that all callers are sector-aligned, but that can be relaxed when a later patch implements byte-based block status. Therefore, for the most part this patch is just the addition of scaling at the callers followed by inverse scaling at bdrv_is_allocated(). But some code, particularly stream_run(), gets a lot simpler because it no longer has to mess with sectors. Leave comments where we can further simplify by switching to byte-based iterations, once later patches eliminate the need for sector-aligned operations. For ease of review, bdrv_is_allocated() was tackled separately. Signed-off-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/commit.c')
-rw-r--r--block/commit.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/block/commit.c b/block/commit.c
index 241aa95..774a8a5 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -146,7 +146,7 @@ static void coroutine_fn commit_run(void *opaque)
int64_t offset;
uint64_t delay_ns = 0;
int ret = 0;
- int n = 0; /* sectors */
+ int64_t n = 0; /* bytes */
void *buf = NULL;
int bytes_written = 0;
int64_t base_len;
@@ -171,7 +171,7 @@ static void coroutine_fn commit_run(void *opaque)
buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE);
- for (offset = 0; offset < s->common.len; offset += n * BDRV_SECTOR_SIZE) {
+ for (offset = 0; offset < s->common.len; offset += n) {
bool copy;
/* Note that even when no rate limit is applied we need to yield
@@ -183,15 +183,12 @@ static void coroutine_fn commit_run(void *opaque)
}
/* Copy if allocated above the base */
ret = bdrv_is_allocated_above(blk_bs(s->top), blk_bs(s->base),
- offset / BDRV_SECTOR_SIZE,
- COMMIT_BUFFER_SIZE / BDRV_SECTOR_SIZE,
- &n);
+ offset, COMMIT_BUFFER_SIZE, &n);
copy = (ret == 1);
- trace_commit_one_iteration(s, offset, n * BDRV_SECTOR_SIZE, ret);
+ trace_commit_one_iteration(s, offset, n, ret);
if (copy) {
- ret = commit_populate(s->top, s->base, offset,
- n * BDRV_SECTOR_SIZE, buf);
- bytes_written += n * BDRV_SECTOR_SIZE;
+ ret = commit_populate(s->top, s->base, offset, n, buf);
+ bytes_written += n;
}
if (ret < 0) {
BlockErrorAction action =
@@ -204,11 +201,10 @@ static void coroutine_fn commit_run(void *opaque)
}
}
/* Publish progress */
- s->common.offset += n * BDRV_SECTOR_SIZE;
+ s->common.offset += n;
if (copy && s->common.speed) {
- delay_ns = ratelimit_calculate_delay(&s->limit,
- n * BDRV_SECTOR_SIZE);
+ delay_ns = ratelimit_calculate_delay(&s->limit, n);
}
}