aboutsummaryrefslogtreecommitdiff
path: root/block/qcow2-refcount.c
diff options
context:
space:
mode:
authorMax Reitz <mreitz@redhat.com>2014-09-03 00:25:07 +0200
committerKevin Wolf <kwolf@redhat.com>2014-10-23 15:34:02 +0200
commit17bd5f472754acd2458b53dc02a30d5651e6dd79 (patch)
treed8e77d116346cf9f284134985a9947e5840da0f0 /block/qcow2-refcount.c
parent234764eed1aab56a657a161e9a0c65730442e6f8 (diff)
downloadqemu-17bd5f472754acd2458b53dc02a30d5651e6dd79.zip
qemu-17bd5f472754acd2458b53dc02a30d5651e6dd79.tar.gz
qemu-17bd5f472754acd2458b53dc02a30d5651e6dd79.tar.bz2
qcow2: Drop REFCOUNT_SHIFT
With BDRVQcowState.refcount_block_bits, we don't need REFCOUNT_SHIFT anymore. Signed-off-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/qcow2-refcount.c')
-rw-r--r--block/qcow2-refcount.c32
1 files changed, 14 insertions, 18 deletions
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 6f1b118..1477031 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -100,7 +100,7 @@ static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
uint16_t *refcount_block;
uint16_t refcount;
- refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
+ refcount_table_index = cluster_index >> s->refcount_block_bits;
if (refcount_table_index >= s->refcount_table_size)
return 0;
refcount_block_offset =
@@ -121,8 +121,7 @@ static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
return ret;
}
- block_index = cluster_index &
- ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
+ block_index = cluster_index & (s->refcount_block_size - 1);
refcount = be16_to_cpu(refcount_block[block_index]);
ret = qcow2_cache_put(bs, s->refcount_block_cache,
@@ -157,8 +156,8 @@ static unsigned int next_refcount_table_size(BDRVQcowState *s,
static int in_same_refcount_block(BDRVQcowState *s, uint64_t offset_a,
uint64_t offset_b)
{
- uint64_t block_a = offset_a >> (2 * s->cluster_bits - REFCOUNT_SHIFT);
- uint64_t block_b = offset_b >> (2 * s->cluster_bits - REFCOUNT_SHIFT);
+ uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits);
+ uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits);
return (block_a == block_b);
}
@@ -179,7 +178,7 @@ static int alloc_refcount_block(BlockDriverState *bs,
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
/* Find the refcount block for the given cluster */
- refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
+ refcount_table_index = cluster_index >> s->refcount_block_bits;
if (refcount_table_index < s->refcount_table_size) {
@@ -256,7 +255,7 @@ static int alloc_refcount_block(BlockDriverState *bs,
/* The block describes itself, need to update the cache */
int block_index = (new_block >> s->cluster_bits) &
- ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
+ (s->refcount_block_size - 1);
(*refcount_block)[block_index] = cpu_to_be16(1);
} else {
/* Described somewhere else. This can recurse at most twice before we
@@ -328,8 +327,7 @@ static int alloc_refcount_block(BlockDriverState *bs,
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW);
/* Calculate the number of refcount blocks needed so far */
- uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT);
- uint64_t blocks_used = DIV_ROUND_UP(cluster_index, refcount_block_clusters);
+ uint64_t blocks_used = DIV_ROUND_UP(cluster_index, s->refcount_block_size);
if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
return -EFBIG;
@@ -343,14 +341,14 @@ static int alloc_refcount_block(BlockDriverState *bs,
uint64_t table_clusters =
size_to_clusters(s, table_size * sizeof(uint64_t));
blocks_clusters = 1 +
- ((table_clusters + refcount_block_clusters - 1)
- / refcount_block_clusters);
+ ((table_clusters + s->refcount_block_size - 1)
+ / s->refcount_block_size);
uint64_t meta_clusters = table_clusters + blocks_clusters;
last_table_size = table_size;
table_size = next_refcount_table_size(s, blocks_used +
- ((meta_clusters + refcount_block_clusters - 1)
- / refcount_block_clusters));
+ ((meta_clusters + s->refcount_block_size - 1)
+ / s->refcount_block_size));
} while (last_table_size != table_size);
@@ -360,7 +358,7 @@ static int alloc_refcount_block(BlockDriverState *bs,
#endif
/* Create the new refcount table and blocks */
- uint64_t meta_offset = (blocks_used * refcount_block_clusters) *
+ uint64_t meta_offset = (blocks_used * s->refcount_block_size) *
s->cluster_size;
uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size;
uint64_t *new_table = g_try_new0(uint64_t, table_size);
@@ -560,8 +558,7 @@ static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
{
int block_index, refcount;
int64_t cluster_index = cluster_offset >> s->cluster_bits;
- int64_t table_index =
- cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
+ int64_t table_index = cluster_index >> s->refcount_block_bits;
/* Load the refcount block and allocate it if needed */
if (table_index != old_table_index) {
@@ -583,8 +580,7 @@ static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block);
/* we can update the count and save it */
- block_index = cluster_index &
- ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
+ block_index = cluster_index & (s->refcount_block_size - 1);
refcount = be16_to_cpu(refcount_block[block_index]);
refcount += addend;