aboutsummaryrefslogtreecommitdiff
path: root/block/qcow2-cluster.c
diff options
context:
space:
mode:
authorAlberto Garcia <berto@igalia.com>2015-05-11 15:54:53 +0300
committerKevin Wolf <kwolf@redhat.com>2015-05-22 17:08:01 +0200
commit72e80b89015bab196f0f0e83b12b0eee75fa0574 (patch)
treebccf75bc1b4f2268827580f456f55770cc29a93e /block/qcow2-cluster.c
parent13c4941cdd8685d28c7e3a09e393a5579b58db46 (diff)
downloadqemu-72e80b89015bab196f0f0e83b12b0eee75fa0574.zip
qemu-72e80b89015bab196f0f0e83b12b0eee75fa0574.tar.gz
qemu-72e80b89015bab196f0f0e83b12b0eee75fa0574.tar.bz2
qcow2: use one single memory block for the L2/refcount cache tables
The qcow2 L2/refcount cache contains one separate table for each cache entry. Doing one allocation per table adds unnecessary overhead and it also requires us to store the address of each table separately. Since the size of the cache is constant during its lifetime, it's better to have an array that contains all the tables using one single allocation. In my tests measuring freshly created caches with sizes 128MB (L2) and 32MB (refcount) this uses around 10MB of RAM less. Signed-off-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/qcow2-cluster.c')
-rw-r--r--block/qcow2-cluster.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index ed2b44d..5cd418a 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -263,7 +263,7 @@ static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
trace_qcow2_l2_allocate_write_l2(bs, l1_index);
- qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
+ qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
ret = qcow2_cache_flush(bs, s->l2_table_cache);
if (ret < 0) {
goto fail;
@@ -692,7 +692,7 @@ uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
/* compressed clusters never have the copied flag */
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
- qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
+ qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
l2_table[l2_index] = cpu_to_be64(cluster_offset);
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
if (ret < 0) {
@@ -771,7 +771,7 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
if (ret < 0) {
goto err;
}
- qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
+ qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
assert(l2_index + m->nb_clusters <= s->l2_size);
for (i = 0; i < m->nb_clusters; i++) {
@@ -1470,7 +1470,7 @@ static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
}
/* First remove L2 entries */
- qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
+ qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
if (!full_discard && s->qcow_version >= 3) {
l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
} else {
@@ -1558,7 +1558,7 @@ static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
old_offset = be64_to_cpu(l2_table[l2_index + i]);
/* Update L2 entries */
- qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
+ qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
if (old_offset & QCOW_OFLAG_COMPRESSED) {
l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
@@ -1760,7 +1760,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
if (is_active_l1) {
if (l2_dirty) {
- qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
+ qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
qcow2_cache_depends_on_flush(s->l2_table_cache);
}
ret = qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);