diff options
-rw-r--r-- | block/dirty-bitmap.c | 16 | ||||
-rw-r--r-- | block/qcow2-bitmap.c | 15 | ||||
-rwxr-xr-x | configure | 20 | ||||
-rw-r--r-- | include/block/dirty-bitmap.h | 9 | ||||
-rw-r--r-- | include/qemu/hbitmap.h | 97 | ||||
-rw-r--r-- | nbd/server.c | 251 | ||||
-rw-r--r-- | tests/test-hbitmap.c | 314 | ||||
-rw-r--r-- | util/hbitmap.c | 134 |
8 files changed, 395 insertions, 461 deletions
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index 7039e82..063793e 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -860,16 +860,24 @@ char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp) return hbitmap_sha256(bitmap->bitmap, errp); } -int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset, - uint64_t bytes) +int64_t bdrv_dirty_bitmap_next_dirty(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes) +{ + return hbitmap_next_dirty(bitmap->bitmap, offset, bytes); +} + +int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes) { return hbitmap_next_zero(bitmap->bitmap, offset, bytes); } bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, - uint64_t *offset, uint64_t *bytes) + int64_t start, int64_t end, int64_t max_dirty_count, + int64_t *dirty_start, int64_t *dirty_count) { - return hbitmap_next_dirty_area(bitmap->bitmap, offset, bytes); + return hbitmap_next_dirty_area(bitmap->bitmap, start, end, max_dirty_count, + dirty_start, dirty_count); } /** diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c index 8cccc2c..cb06954 100644 --- a/block/qcow2-bitmap.c +++ b/block/qcow2-bitmap.c @@ -1288,7 +1288,6 @@ static uint64_t *store_bitmap_data(BlockDriverState *bs, uint64_t bm_size = bdrv_dirty_bitmap_size(bitmap); const char *bm_name = bdrv_dirty_bitmap_name(bitmap); uint8_t *buf = NULL; - BdrvDirtyBitmapIter *dbi; uint64_t *tb; uint64_t tb_size = size_to_clusters(s, @@ -1307,12 +1306,14 @@ static uint64_t *store_bitmap_data(BlockDriverState *bs, return NULL; } - dbi = bdrv_dirty_iter_new(bitmap); buf = g_malloc(s->cluster_size); limit = bytes_covered_by_bitmap_cluster(s, bitmap); assert(DIV_ROUND_UP(bm_size, limit) == tb_size); - while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) { + offset = 0; + while ((offset = bdrv_dirty_bitmap_next_dirty(bitmap, offset, INT64_MAX)) + >= 0) + { uint64_t cluster = offset / limit; uint64_t end, write_size; int64_t off; @@ -1355,23 +1356,17 @@ static uint64_t *store_bitmap_data(BlockDriverState *bs, goto fail; } - if (end >= bm_size) { - break; - } - - bdrv_set_dirty_iter(dbi, end); + offset = end; } *bitmap_table_size = tb_size; g_free(buf); - bdrv_dirty_iter_free(dbi); return tb; fail: clear_bitmap_table(bs, tb, tb_size); g_free(buf); - bdrv_dirty_iter_free(dbi); g_free(tb); return NULL; @@ -3855,6 +3855,26 @@ if ! compile_prog "$glib_cflags -Werror" "$glib_libs" ; then fi fi +# Silence clang warnings triggered by glib < 2.57.2 +cat > $TMPC << EOF +#include <glib.h> +typedef struct Foo { + int i; +} Foo; +static void foo_free(Foo *f) +{ + g_free(f); +} +G_DEFINE_AUTOPTR_CLEANUP_FUNC(Foo, foo_free); +int main(void) { return 0; } +EOF +if ! compile_prog "$glib_cflags -Werror" "$glib_libs" ; then + if cc_has_warning_flag "-Wno-unused-function"; then + glib_cflags="$glib_cflags -Wno-unused-function" + CFLAGS="$CFLAGS -Wno-unused-function" + fi +fi + ######################################### # zlib check diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index e2b20ec..8a10029 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -105,10 +105,13 @@ for (bitmap = bdrv_dirty_bitmap_first(bs); bitmap; \ bitmap = bdrv_dirty_bitmap_next(bitmap)) char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp); -int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset, - uint64_t bytes); +int64_t bdrv_dirty_bitmap_next_dirty(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes); +int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, + int64_t bytes); bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, - uint64_t *offset, uint64_t *bytes); + int64_t start, int64_t end, int64_t max_dirty_count, + int64_t *dirty_start, int64_t *dirty_count); BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, Error **errp); diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index 1bf944c..5e71b6d 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -297,12 +297,18 @@ void hbitmap_free(HBitmap *hb); */ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first); -/* hbitmap_iter_skip_words: - * @hbi: HBitmapIter to operate on. +/* + * hbitmap_next_dirty: + * + * Find next dirty bit within selected range. If not found, return -1. * - * Internal function used by hbitmap_iter_next and hbitmap_iter_next_word. + * @hb: The HBitmap to operate on + * @start: The bit to start from. + * @count: Number of bits to proceed. If @start+@count > bitmap size, the whole + * bitmap is looked through. You can use INT64_MAX as @count to search up to + * the bitmap end. */ -unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); +int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count); /* hbitmap_next_zero: * @@ -311,47 +317,28 @@ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); * @hb: The HBitmap to operate on * @start: The bit to start from. * @count: Number of bits to proceed. If @start+@count > bitmap size, the whole - * bitmap is looked through. You can use UINT64_MAX as @count to search up to + * bitmap is looked through. You can use INT64_MAX as @count to search up to * the bitmap end. */ -int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count); +int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count); /* hbitmap_next_dirty_area: * @hb: The HBitmap to operate on - * @start: in-out parameter. - * in: the offset to start from - * out: (if area found) start of found area - * @count: in-out parameter. - * in: length of requested region - * out: length of found area - * - * If dirty area found within [@start, @start + @count), returns true and sets - * @offset and @bytes appropriately. Otherwise returns false and leaves @offset - * and @bytes unchanged. - */ -bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start, - uint64_t *count); - -/* hbitmap_create_meta: - * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap. - * The caller owns the created bitmap and must call hbitmap_free_meta(hb) to - * free it. - * - * Currently, we only guarantee that if a bit in the hbitmap is changed it - * will be reflected in the meta bitmap, but we do not yet guarantee the - * opposite. - * - * @hb: The HBitmap to operate on. - * @chunk_size: How many bits in @hb does one bit in the meta track. - */ -HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size); - -/* hbitmap_free_meta: - * Free the meta bitmap of @hb. - * - * @hb: The HBitmap whose meta bitmap should be freed. - */ -void hbitmap_free_meta(HBitmap *hb); + * @start: the offset to start from + * @end: end of requested area + * @max_dirty_count: limit for out parameter dirty_count + * @dirty_start: on success: start of found area + * @dirty_count: on success: length of found area + * + * If dirty area found within [@start, @end), returns true and sets + * @dirty_start and @dirty_count appropriately. @dirty_count will not exceed + * @max_dirty_count. + * If dirty area was not found, returns false and leaves @dirty_start and + * @dirty_count unchanged. + */ +bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end, + int64_t max_dirty_count, + int64_t *dirty_start, int64_t *dirty_count); /** * hbitmap_iter_next: @@ -362,34 +349,4 @@ void hbitmap_free_meta(HBitmap *hb); */ int64_t hbitmap_iter_next(HBitmapIter *hbi); -/** - * hbitmap_iter_next_word: - * @hbi: HBitmapIter to operate on. - * @p_cur: Location where to store the next non-zero word. - * - * Return the index of the next nonzero word that is set in @hbi's - * associated HBitmap, and set *p_cur to the content of that word - * (bits before the index that was passed to hbitmap_iter_init are - * trimmed on the first call). Return -1, and set *p_cur to zero, - * if all remaining words are zero. - */ -static inline size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur) -{ - unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1]; - - if (cur == 0) { - cur = hbitmap_iter_skip_words(hbi); - if (cur == 0) { - *p_cur = 0; - return -1; - } - } - - /* The next call will resume work from the next word. */ - hbi->cur[HBITMAP_LEVELS - 1] = 0; - *p_cur = cur; - return hbi->pos; -} - - #endif diff --git a/nbd/server.c b/nbd/server.c index 11a3109..02b1ed0 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -1909,27 +1909,98 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client, return ret; } +typedef struct NBDExtentArray { + NBDExtent *extents; + unsigned int nb_alloc; + unsigned int count; + uint64_t total_length; + bool can_add; + bool converted_to_be; +} NBDExtentArray; + +static NBDExtentArray *nbd_extent_array_new(unsigned int nb_alloc) +{ + NBDExtentArray *ea = g_new0(NBDExtentArray, 1); + + ea->nb_alloc = nb_alloc; + ea->extents = g_new(NBDExtent, nb_alloc); + ea->can_add = true; + + return ea; +} + +static void nbd_extent_array_free(NBDExtentArray *ea) +{ + g_free(ea->extents); + g_free(ea); +} +G_DEFINE_AUTOPTR_CLEANUP_FUNC(NBDExtentArray, nbd_extent_array_free); + +/* Further modifications of the array after conversion are abandoned */ +static void nbd_extent_array_convert_to_be(NBDExtentArray *ea) +{ + int i; + + assert(!ea->converted_to_be); + ea->can_add = false; + ea->converted_to_be = true; + + for (i = 0; i < ea->count; i++) { + ea->extents[i].flags = cpu_to_be32(ea->extents[i].flags); + ea->extents[i].length = cpu_to_be32(ea->extents[i].length); + } +} + /* - * Populate @extents from block status. Update @bytes to be the actual - * length encoded (which may be smaller than the original), and update - * @nb_extents to the number of extents used. - * - * Returns zero on success and -errno on bdrv_block_status_above failure. + * Add extent to NBDExtentArray. If extent can't be added (no available space), + * return -1. + * For safety, when returning -1 for the first time, .can_add is set to false, + * further call to nbd_extent_array_add() will crash. + * (to avoid the situation, when after failing to add an extent (returned -1), + * user miss this failure and add another extent, which is successfully added + * (array is full, but new extent may be squashed into the last one), then we + * have invalid array with skipped extent) */ -static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, - uint64_t *bytes, NBDExtent *extents, - unsigned int *nb_extents) +static int nbd_extent_array_add(NBDExtentArray *ea, + uint32_t length, uint32_t flags) { - uint64_t remaining_bytes = *bytes; - NBDExtent *extent = extents, *extents_end = extents + *nb_extents; - bool first_extent = true; + assert(ea->can_add); + + if (!length) { + return 0; + } + + /* Extend previous extent if flags are the same */ + if (ea->count > 0 && flags == ea->extents[ea->count - 1].flags) { + uint64_t sum = (uint64_t)length + ea->extents[ea->count - 1].length; + + if (sum <= UINT32_MAX) { + ea->extents[ea->count - 1].length = sum; + ea->total_length += length; + return 0; + } + } - assert(*nb_extents); - while (remaining_bytes) { + if (ea->count >= ea->nb_alloc) { + ea->can_add = false; + return -1; + } + + ea->total_length += length; + ea->extents[ea->count] = (NBDExtent) {.length = length, .flags = flags}; + ea->count++; + + return 0; +} + +static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, + uint64_t bytes, NBDExtentArray *ea) +{ + while (bytes) { uint32_t flags; int64_t num; - int ret = bdrv_block_status_above(bs, NULL, offset, remaining_bytes, - &num, NULL, NULL); + int ret = bdrv_block_status_above(bs, NULL, offset, bytes, &num, + NULL, NULL); if (ret < 0) { return ret; @@ -1938,60 +2009,37 @@ static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, flags = (ret & BDRV_BLOCK_ALLOCATED ? 0 : NBD_STATE_HOLE) | (ret & BDRV_BLOCK_ZERO ? NBD_STATE_ZERO : 0); - if (first_extent) { - extent->flags = flags; - extent->length = num; - first_extent = false; - } else if (flags == extent->flags) { - /* extend current extent */ - extent->length += num; - } else { - if (extent + 1 == extents_end) { - break; - } - - /* start new extent */ - extent++; - extent->flags = flags; - extent->length = num; + if (nbd_extent_array_add(ea, num, flags) < 0) { + return 0; } - offset += num; - remaining_bytes -= num; - } - extents_end = extent + 1; - - for (extent = extents; extent < extents_end; extent++) { - extent->flags = cpu_to_be32(extent->flags); - extent->length = cpu_to_be32(extent->length); + offset += num; + bytes -= num; } - *bytes -= remaining_bytes; - *nb_extents = extents_end - extents; - return 0; } -/* nbd_co_send_extents +/* + * nbd_co_send_extents * - * @length is only for tracing purposes (and may be smaller or larger - * than the client's original request). @last controls whether - * NBD_REPLY_FLAG_DONE is sent. @extents should already be in - * big-endian format. + * @ea is converted to BE by the function + * @last controls whether NBD_REPLY_FLAG_DONE is sent. */ static int nbd_co_send_extents(NBDClient *client, uint64_t handle, - NBDExtent *extents, unsigned int nb_extents, - uint64_t length, bool last, - uint32_t context_id, Error **errp) + NBDExtentArray *ea, + bool last, uint32_t context_id, Error **errp) { NBDStructuredMeta chunk; - struct iovec iov[] = { {.iov_base = &chunk, .iov_len = sizeof(chunk)}, - {.iov_base = extents, .iov_len = nb_extents * sizeof(extents[0])} + {.iov_base = ea->extents, .iov_len = ea->count * sizeof(ea->extents[0])} }; - trace_nbd_co_send_extents(handle, nb_extents, context_id, length, last); + nbd_extent_array_convert_to_be(ea); + + trace_nbd_co_send_extents(handle, ea->count, context_id, ea->total_length, + last); set_be_chunk(&chunk.h, last ? NBD_REPLY_FLAG_DONE : 0, NBD_REPLY_TYPE_BLOCK_STATUS, handle, sizeof(chunk) - sizeof(chunk.h) + iov[1].iov_len); @@ -2009,82 +2057,47 @@ static int nbd_co_send_block_status(NBDClient *client, uint64_t handle, { int ret; unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; - NBDExtent *extents = g_new(NBDExtent, nb_extents); - uint64_t final_length = length; + g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents); - ret = blockstatus_to_extents(bs, offset, &final_length, extents, - &nb_extents); + ret = blockstatus_to_extents(bs, offset, length, ea); if (ret < 0) { - g_free(extents); return nbd_co_send_structured_error( client, handle, -ret, "can't get block status", errp); } - ret = nbd_co_send_extents(client, handle, extents, nb_extents, - final_length, last, context_id, errp); - - g_free(extents); - - return ret; + return nbd_co_send_extents(client, handle, ea, last, context_id, errp); } -/* - * Populate @extents from a dirty bitmap. Unless @dont_fragment, the - * final extent may exceed the original @length. Store in @length the - * byte length encoded (which may be smaller or larger than the - * original), and return the number of extents used. - */ -static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset, - uint64_t *length, NBDExtent *extents, - unsigned int nb_extents, - bool dont_fragment) +/* Populate @ea from a dirty bitmap. */ +static void bitmap_to_extents(BdrvDirtyBitmap *bitmap, + uint64_t offset, uint64_t length, + NBDExtentArray *es) { - uint64_t begin = offset, end = offset; - uint64_t overall_end = offset + *length; - unsigned int i = 0; - BdrvDirtyBitmapIter *it; - bool dirty; + int64_t start, dirty_start, dirty_count; + int64_t end = offset + length; + bool full = false; bdrv_dirty_bitmap_lock(bitmap); - it = bdrv_dirty_iter_new(bitmap); - dirty = bdrv_dirty_bitmap_get_locked(bitmap, offset); - - assert(begin < overall_end && nb_extents); - while (begin < overall_end && i < nb_extents) { - bool next_dirty = !dirty; - - if (dirty) { - end = bdrv_dirty_bitmap_next_zero(bitmap, begin, UINT64_MAX); - } else { - bdrv_set_dirty_iter(it, begin); - end = bdrv_dirty_iter_next(it); - } - if (end == -1 || end - begin > UINT32_MAX) { - /* Cap to an aligned value < 4G beyond begin. */ - end = MIN(bdrv_dirty_bitmap_size(bitmap), - begin + UINT32_MAX + 1 - - bdrv_dirty_bitmap_granularity(bitmap)); - next_dirty = dirty; - } - if (dont_fragment && end > overall_end) { - end = overall_end; + for (start = offset; + bdrv_dirty_bitmap_next_dirty_area(bitmap, start, end, INT32_MAX, + &dirty_start, &dirty_count); + start = dirty_start + dirty_count) + { + if ((nbd_extent_array_add(es, dirty_start - start, 0) < 0) || + (nbd_extent_array_add(es, dirty_count, NBD_STATE_DIRTY) < 0)) + { + full = true; + break; } - - extents[i].length = cpu_to_be32(end - begin); - extents[i].flags = cpu_to_be32(dirty ? NBD_STATE_DIRTY : 0); - i++; - begin = end; - dirty = next_dirty; } - bdrv_dirty_iter_free(it); + if (!full) { + /* last non dirty extent */ + nbd_extent_array_add(es, end - start, 0); + } bdrv_dirty_bitmap_unlock(bitmap); - - assert(offset < end); - *length = end - offset; - return i; } static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle, @@ -2092,20 +2105,12 @@ static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle, uint32_t length, bool dont_fragment, bool last, uint32_t context_id, Error **errp) { - int ret; unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; - NBDExtent *extents = g_new(NBDExtent, nb_extents); - uint64_t final_length = length; - - nb_extents = bitmap_to_extents(bitmap, offset, &final_length, extents, - nb_extents, dont_fragment); + g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents); - ret = nbd_co_send_extents(client, handle, extents, nb_extents, - final_length, last, context_id, errp); + bitmap_to_extents(bitmap, offset, length, ea); - g_free(extents); - - return ret; + return nbd_co_send_extents(client, handle, ea, last, context_id, errp); } /* nbd_co_receive_request diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index e1f8670..b6726cf 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -22,7 +22,6 @@ typedef struct TestHBitmapData { HBitmap *hb; - HBitmap *meta; unsigned long *bits; size_t size; size_t old_size; @@ -94,14 +93,6 @@ static void hbitmap_test_init(TestHBitmapData *data, } } -static void hbitmap_test_init_meta(TestHBitmapData *data, - uint64_t size, int granularity, - int meta_chunk) -{ - hbitmap_test_init(data, size, granularity); - data->meta = hbitmap_create_meta(data->hb, meta_chunk); -} - static inline size_t hbitmap_test_array_size(size_t bits) { size_t n = DIV_ROUND_UP(bits, BITS_PER_LONG); @@ -144,9 +135,6 @@ static void hbitmap_test_teardown(TestHBitmapData *data, const void *unused) { if (data->hb) { - if (data->meta) { - hbitmap_free_meta(data->hb); - } hbitmap_free(data->hb); data->hb = NULL; } @@ -648,96 +636,6 @@ static void test_hbitmap_truncate_shrink_large(TestHBitmapData *data, hbitmap_test_truncate(data, size, -diff, 0); } -static void hbitmap_check_meta(TestHBitmapData *data, - int64_t start, int count) -{ - int64_t i; - - for (i = 0; i < data->size; i++) { - if (i >= start && i < start + count) { - g_assert(hbitmap_get(data->meta, i)); - } else { - g_assert(!hbitmap_get(data->meta, i)); - } - } -} - -static void hbitmap_test_meta(TestHBitmapData *data, - int64_t start, int count, - int64_t check_start, int check_count) -{ - hbitmap_reset_all(data->hb); - hbitmap_reset_all(data->meta); - - /* Test "unset" -> "unset" will not update meta. */ - hbitmap_reset(data->hb, start, count); - hbitmap_check_meta(data, 0, 0); - - /* Test "unset" -> "set" will update meta */ - hbitmap_set(data->hb, start, count); - hbitmap_check_meta(data, check_start, check_count); - - /* Test "set" -> "set" will not update meta */ - hbitmap_reset_all(data->meta); - hbitmap_set(data->hb, start, count); - hbitmap_check_meta(data, 0, 0); - - /* Test "set" -> "unset" will update meta */ - hbitmap_reset_all(data->meta); - hbitmap_reset(data->hb, start, count); - hbitmap_check_meta(data, check_start, check_count); -} - -static void hbitmap_test_meta_do(TestHBitmapData *data, int chunk_size) -{ - uint64_t size = chunk_size * 100; - hbitmap_test_init_meta(data, size, 0, chunk_size); - - hbitmap_test_meta(data, 0, 1, 0, chunk_size); - hbitmap_test_meta(data, 0, chunk_size, 0, chunk_size); - hbitmap_test_meta(data, chunk_size - 1, 1, 0, chunk_size); - hbitmap_test_meta(data, chunk_size - 1, 2, 0, chunk_size * 2); - hbitmap_test_meta(data, chunk_size - 1, chunk_size + 1, 0, chunk_size * 2); - hbitmap_test_meta(data, chunk_size - 1, chunk_size + 2, 0, chunk_size * 3); - hbitmap_test_meta(data, 7 * chunk_size - 1, chunk_size + 2, - 6 * chunk_size, chunk_size * 3); - hbitmap_test_meta(data, size - 1, 1, size - chunk_size, chunk_size); - hbitmap_test_meta(data, 0, size, 0, size); -} - -static void test_hbitmap_meta_byte(TestHBitmapData *data, const void *unused) -{ - hbitmap_test_meta_do(data, BITS_PER_BYTE); -} - -static void test_hbitmap_meta_word(TestHBitmapData *data, const void *unused) -{ - hbitmap_test_meta_do(data, BITS_PER_LONG); -} - -static void test_hbitmap_meta_sector(TestHBitmapData *data, const void *unused) -{ - hbitmap_test_meta_do(data, BDRV_SECTOR_SIZE * BITS_PER_BYTE); -} - -/** - * Create an HBitmap and test set/unset. - */ -static void test_hbitmap_meta_one(TestHBitmapData *data, const void *unused) -{ - int i; - int64_t offsets[] = { - 0, 1, L1 - 1, L1, L1 + 1, L2 - 1, L2, L2 + 1, L3 - 1, L3, L3 + 1 - }; - - hbitmap_test_init_meta(data, L3 * 2, 0, 1); - for (i = 0; i < ARRAY_SIZE(offsets); i++) { - hbitmap_test_meta(data, offsets[i], 1, offsets[i], 1); - hbitmap_test_meta(data, offsets[i], L1, offsets[i], L1); - hbitmap_test_meta(data, offsets[i], L2, offsets[i], L2); - } -} - static void test_hbitmap_serialize_align(TestHBitmapData *data, const void *unused) { @@ -750,13 +648,6 @@ static void test_hbitmap_serialize_align(TestHBitmapData *data, g_assert_cmpint(r, ==, 64 << 3); } -static void test_hbitmap_meta_zero(TestHBitmapData *data, const void *unused) -{ - hbitmap_test_init_meta(data, 0, 0, 1); - - hbitmap_check_meta(data, 0, 0); -} - static void hbitmap_test_serialize_range(TestHBitmapData *data, uint8_t *buf, size_t buf_size, uint64_t pos, uint64_t count) @@ -925,106 +816,123 @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data, hbitmap_iter_next(&hbi); } -static void test_hbitmap_next_zero_check_range(TestHBitmapData *data, - uint64_t start, - uint64_t count) +static void test_hbitmap_next_x_check_range(TestHBitmapData *data, + int64_t start, + int64_t count) { - int64_t ret1 = hbitmap_next_zero(data->hb, start, count); - int64_t ret2 = start; + int64_t next_zero = hbitmap_next_zero(data->hb, start, count); + int64_t next_dirty = hbitmap_next_dirty(data->hb, start, count); + int64_t next; int64_t end = start >= data->size || data->size - start < count ? data->size : start + count; + bool first_bit = hbitmap_get(data->hb, start); - for ( ; ret2 < end && hbitmap_get(data->hb, ret2); ret2++) { + for (next = start; + next < end && hbitmap_get(data->hb, next) == first_bit; + next++) + { ; } - if (ret2 == end) { - ret2 = -1; + + if (next == end) { + next = -1; } - g_assert_cmpint(ret1, ==, ret2); + g_assert_cmpint(next_dirty, ==, first_bit ? start : next); + g_assert_cmpint(next_zero, ==, first_bit ? next : start); } -static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) +static void test_hbitmap_next_x_check(TestHBitmapData *data, int64_t start) { - test_hbitmap_next_zero_check_range(data, start, UINT64_MAX); + test_hbitmap_next_x_check_range(data, start, INT64_MAX); } -static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity) +static void test_hbitmap_next_x_do(TestHBitmapData *data, int granularity) { hbitmap_test_init(data, L3, granularity); - test_hbitmap_next_zero_check(data, 0); - test_hbitmap_next_zero_check(data, L3 - 1); - test_hbitmap_next_zero_check_range(data, 0, 1); - test_hbitmap_next_zero_check_range(data, L3 - 1, 1); + test_hbitmap_next_x_check(data, 0); + test_hbitmap_next_x_check(data, L3 - 1); + test_hbitmap_next_x_check_range(data, 0, 1); + test_hbitmap_next_x_check_range(data, L3 - 1, 1); hbitmap_set(data->hb, L2, 1); - test_hbitmap_next_zero_check(data, 0); - test_hbitmap_next_zero_check(data, L2 - 1); - test_hbitmap_next_zero_check(data, L2); - test_hbitmap_next_zero_check(data, L2 + 1); - test_hbitmap_next_zero_check_range(data, 0, 1); - test_hbitmap_next_zero_check_range(data, 0, L2); - test_hbitmap_next_zero_check_range(data, L2 - 1, 1); - test_hbitmap_next_zero_check_range(data, L2 - 1, 2); - test_hbitmap_next_zero_check_range(data, L2, 1); - test_hbitmap_next_zero_check_range(data, L2 + 1, 1); + test_hbitmap_next_x_check(data, 0); + test_hbitmap_next_x_check(data, L2 - 1); + test_hbitmap_next_x_check(data, L2); + test_hbitmap_next_x_check(data, L2 + 1); + test_hbitmap_next_x_check_range(data, 0, 1); + test_hbitmap_next_x_check_range(data, 0, L2); + test_hbitmap_next_x_check_range(data, L2 - 1, 1); + test_hbitmap_next_x_check_range(data, L2 - 1, 2); + test_hbitmap_next_x_check_range(data, L2, 1); + test_hbitmap_next_x_check_range(data, L2 + 1, 1); hbitmap_set(data->hb, L2 + 5, L1); - test_hbitmap_next_zero_check(data, 0); - test_hbitmap_next_zero_check(data, L2 + 1); - test_hbitmap_next_zero_check(data, L2 + 2); - test_hbitmap_next_zero_check(data, L2 + 5); - test_hbitmap_next_zero_check(data, L2 + L1 - 1); - test_hbitmap_next_zero_check(data, L2 + L1); - test_hbitmap_next_zero_check_range(data, L2, 6); - test_hbitmap_next_zero_check_range(data, L2 + 1, 3); - test_hbitmap_next_zero_check_range(data, L2 + 4, L1); - test_hbitmap_next_zero_check_range(data, L2 + 5, L1); + test_hbitmap_next_x_check(data, 0); + test_hbitmap_next_x_check(data, L2 - L1); + test_hbitmap_next_x_check(data, L2 + 1); + test_hbitmap_next_x_check(data, L2 + 2); + test_hbitmap_next_x_check(data, L2 + 5); + test_hbitmap_next_x_check(data, L2 + L1 - 1); + test_hbitmap_next_x_check(data, L2 + L1); + test_hbitmap_next_x_check(data, L2 + L1 + 1); + test_hbitmap_next_x_check_range(data, L2 - 2, L1); + test_hbitmap_next_x_check_range(data, L2, 4); + test_hbitmap_next_x_check_range(data, L2, 6); + test_hbitmap_next_x_check_range(data, L2 + 1, 3); + test_hbitmap_next_x_check_range(data, L2 + 4, L1); + test_hbitmap_next_x_check_range(data, L2 + 5, L1); + test_hbitmap_next_x_check_range(data, L2 + 5 + L1 - 1, 1); + test_hbitmap_next_x_check_range(data, L2 + 5 + L1, 1); + test_hbitmap_next_x_check_range(data, L2 + 5 + L1 + 1, 1); hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); - test_hbitmap_next_zero_check(data, L2 * 2 - L1); - test_hbitmap_next_zero_check(data, L2 * 2 - 2); - test_hbitmap_next_zero_check(data, L2 * 2 - 1); - test_hbitmap_next_zero_check(data, L2 * 2); - test_hbitmap_next_zero_check(data, L3 - 1); - test_hbitmap_next_zero_check_range(data, L2 * 2 - L1, L1 + 1); - test_hbitmap_next_zero_check_range(data, L2 * 2, L2); + test_hbitmap_next_x_check(data, L2 * 2 - L1); + test_hbitmap_next_x_check(data, L2 * 2 - 2); + test_hbitmap_next_x_check(data, L2 * 2 - 1); + test_hbitmap_next_x_check(data, L2 * 2); + test_hbitmap_next_x_check(data, L2 * 2 + 1); + test_hbitmap_next_x_check(data, L2 * 2 + L1); + test_hbitmap_next_x_check(data, L3 - 1); + test_hbitmap_next_x_check_range(data, L2 * 2 - L1, L1 + 1); + test_hbitmap_next_x_check_range(data, L2 * 2, L2); hbitmap_set(data->hb, 0, L3); - test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_x_check(data, 0); } -static void test_hbitmap_next_zero_0(TestHBitmapData *data, const void *unused) +static void test_hbitmap_next_x_0(TestHBitmapData *data, const void *unused) { - test_hbitmap_next_zero_do(data, 0); + test_hbitmap_next_x_do(data, 0); } -static void test_hbitmap_next_zero_4(TestHBitmapData *data, const void *unused) +static void test_hbitmap_next_x_4(TestHBitmapData *data, const void *unused) { - test_hbitmap_next_zero_do(data, 4); + test_hbitmap_next_x_do(data, 4); } -static void test_hbitmap_next_zero_after_truncate(TestHBitmapData *data, - const void *unused) +static void test_hbitmap_next_x_after_truncate(TestHBitmapData *data, + const void *unused) { hbitmap_test_init(data, L1, 0); hbitmap_test_truncate_impl(data, L1 * 2); hbitmap_set(data->hb, 0, L1); - test_hbitmap_next_zero_check(data, 0); + test_hbitmap_next_x_check(data, 0); } -static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, - uint64_t offset, - uint64_t count) +static void test_hbitmap_next_dirty_area_check_limited(TestHBitmapData *data, + int64_t offset, + int64_t count, + int64_t max_dirty) { - uint64_t off1, off2; - uint64_t len1 = 0, len2; + int64_t off1, off2; + int64_t len1 = 0, len2; bool ret1, ret2; int64_t end; - off1 = offset; - len1 = count; - ret1 = hbitmap_next_dirty_area(data->hb, &off1, &len1); + ret1 = hbitmap_next_dirty_area(data->hb, + offset, count == INT64_MAX ? INT64_MAX : offset + count, max_dirty, + &off1, &len1); end = offset > data->size || data->size - offset < count ? data->size : offset + count; @@ -1033,45 +941,52 @@ static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, ; } - for (len2 = 1; off2 + len2 < end && hbitmap_get(data->hb, off2 + len2); - len2++) { + for (len2 = 1; (off2 + len2 < end && len2 < max_dirty && + hbitmap_get(data->hb, off2 + len2)); len2++) + { ; } ret2 = off2 < end; - if (!ret2) { - /* leave unchanged */ - off2 = offset; - len2 = count; + g_assert_cmpint(ret1, ==, ret2); + + if (ret2) { + g_assert_cmpint(off1, ==, off2); + g_assert_cmpint(len1, ==, len2); } +} - g_assert_cmpint(ret1, ==, ret2); - g_assert_cmpint(off1, ==, off2); - g_assert_cmpint(len1, ==, len2); +static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, + int64_t offset, int64_t count) +{ + test_hbitmap_next_dirty_area_check_limited(data, offset, count, INT64_MAX); } static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, int granularity) { hbitmap_test_init(data, L3, granularity); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); test_hbitmap_next_dirty_area_check(data, 0, 1); test_hbitmap_next_dirty_area_check(data, L3 - 1, 1); + test_hbitmap_next_dirty_area_check_limited(data, 0, INT64_MAX, 1); hbitmap_set(data->hb, L2, 1); test_hbitmap_next_dirty_area_check(data, 0, 1); test_hbitmap_next_dirty_area_check(data, 0, L2); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); - test_hbitmap_next_dirty_area_check(data, L2 - 1, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2 - 1, INT64_MAX); test_hbitmap_next_dirty_area_check(data, L2 - 1, 1); test_hbitmap_next_dirty_area_check(data, L2 - 1, 2); test_hbitmap_next_dirty_area_check(data, L2 - 1, 3); - test_hbitmap_next_dirty_area_check(data, L2, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2, INT64_MAX); test_hbitmap_next_dirty_area_check(data, L2, 1); test_hbitmap_next_dirty_area_check(data, L2 + 1, 1); + test_hbitmap_next_dirty_area_check_limited(data, 0, INT64_MAX, 1); + test_hbitmap_next_dirty_area_check_limited(data, L2 - 1, 2, 1); hbitmap_set(data->hb, L2 + 5, L1); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); test_hbitmap_next_dirty_area_check(data, L2 - 2, 8); test_hbitmap_next_dirty_area_check(data, L2 + 1, 5); test_hbitmap_next_dirty_area_check(data, L2 + 1, 3); @@ -1081,18 +996,23 @@ static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, test_hbitmap_next_dirty_area_check(data, L2 + L1, L1); test_hbitmap_next_dirty_area_check(data, L2, 0); test_hbitmap_next_dirty_area_check(data, L2 + 1, 0); + test_hbitmap_next_dirty_area_check_limited(data, L2 + 3, INT64_MAX, 3); + test_hbitmap_next_dirty_area_check_limited(data, L2 + 3, 7, 10); hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); - test_hbitmap_next_dirty_area_check(data, L2, UINT64_MAX); - test_hbitmap_next_dirty_area_check(data, L2 + 1, UINT64_MAX); - test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1 - 1, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2, INT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2 + 1, INT64_MAX); + test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1 - 1, INT64_MAX); test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1, 5); test_hbitmap_next_dirty_area_check(data, L2 * 2 - L1, L1 + 1); test_hbitmap_next_dirty_area_check(data, L2 * 2, L2); + test_hbitmap_next_dirty_area_check_limited(data, L2 * 2 + 1, INT64_MAX, 5); + test_hbitmap_next_dirty_area_check_limited(data, L2 * 2 + 1, 10, 5); + test_hbitmap_next_dirty_area_check_limited(data, L2 * 2 + 1, 2, 5); hbitmap_set(data->hb, 0, L3); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); } static void test_hbitmap_next_dirty_area_0(TestHBitmapData *data, @@ -1119,7 +1039,7 @@ static void test_hbitmap_next_dirty_area_after_truncate(TestHBitmapData *data, hbitmap_test_init(data, L1, 0); hbitmap_test_truncate_impl(data, L1 * 2); hbitmap_set(data->hb, L1 + 1, 1); - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); } int main(int argc, char **argv) @@ -1165,12 +1085,6 @@ int main(int argc, char **argv) hbitmap_test_add("/hbitmap/truncate/shrink/large", test_hbitmap_truncate_shrink_large); - hbitmap_test_add("/hbitmap/meta/zero", test_hbitmap_meta_zero); - hbitmap_test_add("/hbitmap/meta/one", test_hbitmap_meta_one); - hbitmap_test_add("/hbitmap/meta/byte", test_hbitmap_meta_byte); - hbitmap_test_add("/hbitmap/meta/word", test_hbitmap_meta_word); - hbitmap_test_add("/hbitmap/meta/sector", test_hbitmap_meta_sector); - hbitmap_test_add("/hbitmap/serialize/align", test_hbitmap_serialize_align); hbitmap_test_add("/hbitmap/serialize/basic", @@ -1183,12 +1097,12 @@ int main(int argc, char **argv) hbitmap_test_add("/hbitmap/iter/iter_and_reset", test_hbitmap_iter_and_reset); - hbitmap_test_add("/hbitmap/next_zero/next_zero_0", - test_hbitmap_next_zero_0); - hbitmap_test_add("/hbitmap/next_zero/next_zero_4", - test_hbitmap_next_zero_4); - hbitmap_test_add("/hbitmap/next_zero/next_zero_after_truncate", - test_hbitmap_next_zero_after_truncate); + hbitmap_test_add("/hbitmap/next_zero/next_x_0", + test_hbitmap_next_x_0); + hbitmap_test_add("/hbitmap/next_zero/next_x_4", + test_hbitmap_next_x_4); + hbitmap_test_add("/hbitmap/next_zero/next_x_after_truncate", + test_hbitmap_next_x_after_truncate); hbitmap_test_add("/hbitmap/next_dirty_area/next_dirty_area_0", test_hbitmap_next_dirty_area_0); diff --git a/util/hbitmap.c b/util/hbitmap.c index 242c6e5..305b894 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -104,7 +104,7 @@ struct HBitmap { /* Advance hbi to the next nonzero word and return it. hbi->pos * is updated. Returns zero if we reach the end of the bitmap. */ -unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) +static unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) { size_t pos = hbi->pos; const HBitmap *hb = hbi->hb; @@ -193,7 +193,31 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first) } } -int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count) +int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count) +{ + HBitmapIter hbi; + int64_t first_dirty_off; + uint64_t end; + + assert(start >= 0 && count >= 0); + + if (start >= hb->orig_size || count == 0) { + return -1; + } + + end = count > hb->orig_size - start ? hb->orig_size : start + count; + + hbitmap_iter_init(&hbi, hb, start); + first_dirty_off = hbitmap_iter_next(&hbi); + + if (first_dirty_off < 0 || first_dirty_off >= end) { + return -1; + } + + return MAX(start, first_dirty_off); +} + +int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count) { size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1]; @@ -202,6 +226,8 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count) uint64_t end_bit, sz; int64_t res; + assert(start >= 0 && count >= 0); + if (start >= hb->orig_size || count == 0) { return -1; } @@ -244,41 +270,33 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count) return res; } -bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start, - uint64_t *count) +bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end, + int64_t max_dirty_count, + int64_t *dirty_start, int64_t *dirty_count) { - HBitmapIter hbi; - int64_t firt_dirty_off, area_end; - uint32_t granularity = 1UL << hb->granularity; - uint64_t end; + int64_t next_zero; - if (*start >= hb->orig_size || *count == 0) { + assert(start >= 0 && end >= 0 && max_dirty_count > 0); + + end = MIN(end, hb->orig_size); + if (start >= end) { return false; } - end = *count > hb->orig_size - *start ? hb->orig_size : *start + *count; - - hbitmap_iter_init(&hbi, hb, *start); - firt_dirty_off = hbitmap_iter_next(&hbi); - - if (firt_dirty_off < 0 || firt_dirty_off >= end) { + start = hbitmap_next_dirty(hb, start, end - start); + if (start < 0) { return false; } - if (firt_dirty_off + granularity >= end) { - area_end = end; - } else { - area_end = hbitmap_next_zero(hb, firt_dirty_off + granularity, - end - firt_dirty_off - granularity); - if (area_end < 0) { - area_end = end; - } - } + end = start + MIN(end - start, max_dirty_count); - if (firt_dirty_off > *start) { - *start = firt_dirty_off; + next_zero = hbitmap_next_zero(hb, start, end - start); + if (next_zero >= 0) { + end = next_zero; } - *count = area_end - *start; + + *dirty_start = start; + *dirty_count = end - start; return true; } @@ -298,6 +316,35 @@ uint64_t hbitmap_count(const HBitmap *hb) return hb->count << hb->granularity; } +/** + * hbitmap_iter_next_word: + * @hbi: HBitmapIter to operate on. + * @p_cur: Location where to store the next non-zero word. + * + * Return the index of the next nonzero word that is set in @hbi's + * associated HBitmap, and set *p_cur to the content of that word + * (bits before the index that was passed to hbitmap_iter_init are + * trimmed on the first call). Return -1, and set *p_cur to zero, + * if all remaining words are zero. + */ +static size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur) +{ + unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1]; + + if (cur == 0) { + cur = hbitmap_iter_skip_words(hbi); + if (cur == 0) { + *p_cur = 0; + return -1; + } + } + + /* The next call will resume work from the next word. */ + hbi->cur[HBITMAP_LEVELS - 1] = 0; + *p_cur = cur; + return hbi->pos; +} + /* Count the number of set bits between start and end, not accounting for * the granularity. Also an example of how to use hbitmap_iter_next_word. */ @@ -716,6 +763,7 @@ HBitmap *hbitmap_alloc(uint64_t size, int granularity) HBitmap *hb = g_new0(struct HBitmap, 1); unsigned i; + assert(size <= INT64_MAX); hb->orig_size = size; assert(granularity >= 0 && granularity < 64); @@ -746,6 +794,7 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size) uint64_t num_elements = size; uint64_t old; + assert(size <= INT64_MAX); hb->orig_size = size; /* Size comes in as logical elements, adjust for granularity. */ @@ -803,16 +852,15 @@ bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b) */ static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src) { - uint64_t offset = 0; - uint64_t count = src->orig_size; + int64_t offset; + int64_t count; - while (hbitmap_next_dirty_area(src, &offset, &count)) { + for (offset = 0; + hbitmap_next_dirty_area(src, offset, src->orig_size, INT64_MAX, + &offset, &count); + offset += count) + { hbitmap_set(dst, offset, count); - offset += count; - if (offset >= src->orig_size) { - break; - } - count = src->orig_size - offset; } } @@ -874,22 +922,6 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) return true; } -HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size) -{ - assert(!(chunk_size & (chunk_size - 1))); - assert(!hb->meta); - hb->meta = hbitmap_alloc(hb->size << hb->granularity, - hb->granularity + ctz32(chunk_size)); - return hb->meta; -} - -void hbitmap_free_meta(HBitmap *hb) -{ - assert(hb->meta); - hbitmap_free(hb->meta); - hb->meta = NULL; -} - char *hbitmap_sha256(const HBitmap *bitmap, Error **errp) { size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long); |