aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Blake <eblake@redhat.com>2017-09-25 09:55:26 -0500
committerKevin Wolf <kwolf@redhat.com>2017-10-06 16:28:58 +0200
commitca759622441da1d19107efaa62d9d63fb83ebb79 (patch)
treef8570ec9b02f5acebabdb38a30d49838fe6c4cd8
parent0fdf1a4f68e5f27df9bb10a7223bd519c8d30e31 (diff)
downloadqemu-ca759622441da1d19107efaa62d9d63fb83ebb79.zip
qemu-ca759622441da1d19107efaa62d9d63fb83ebb79.tar.gz
qemu-ca759622441da1d19107efaa62d9d63fb83ebb79.tar.bz2
dirty-bitmap: Convert internal hbitmap size/granularity
Now that all callers are using byte-based interfaces, there's no reason for our internal hbitmap to remain with sector-based granularity. It also simplifies our internal scaling, since we already know that hbitmap widens requests out to granularity boundaries. Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r--block/dirty-bitmap.c62
1 files changed, 18 insertions, 44 deletions
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
index 58a3f33..bd04e99 100644
--- a/block/dirty-bitmap.c
+++ b/block/dirty-bitmap.c
@@ -38,7 +38,7 @@
*/
struct BdrvDirtyBitmap {
QemuMutex *mutex;
- HBitmap *bitmap; /* Dirty sector bitmap implementation */
+ HBitmap *bitmap; /* Dirty bitmap implementation */
HBitmap *meta; /* Meta dirty bitmap */
BdrvDirtyBitmap *successor; /* Anonymous child; implies frozen status */
char *name; /* Optional non-empty unique ID */
@@ -130,12 +130,7 @@ BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs,
}
bitmap = g_new0(BdrvDirtyBitmap, 1);
bitmap->mutex = &bs->dirty_bitmap_mutex;
- /*
- * TODO - let hbitmap track full granularity. For now, it is tracking
- * only sector granularity, as a shortcut for our iterators.
- */
- bitmap->bitmap = hbitmap_alloc(DIV_ROUND_UP(bitmap_size, BDRV_SECTOR_SIZE),
- ctz32(granularity) - BDRV_SECTOR_BITS);
+ bitmap->bitmap = hbitmap_alloc(bitmap_size, ctz32(granularity));
bitmap->size = bitmap_size;
bitmap->name = g_strdup(name);
bitmap->disabled = false;
@@ -312,7 +307,7 @@ void bdrv_dirty_bitmap_truncate(BlockDriverState *bs, int64_t bytes)
QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
assert(!bdrv_dirty_bitmap_frozen(bitmap));
assert(!bitmap->active_iterators);
- hbitmap_truncate(bitmap->bitmap, DIV_ROUND_UP(bytes, BDRV_SECTOR_SIZE));
+ hbitmap_truncate(bitmap->bitmap, bytes);
bitmap->size = bytes;
}
bdrv_dirty_bitmaps_unlock(bs);
@@ -442,7 +437,7 @@ bool bdrv_get_dirty_locked(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
int64_t offset)
{
if (bitmap) {
- return hbitmap_get(bitmap->bitmap, offset >> BDRV_SECTOR_BITS);
+ return hbitmap_get(bitmap->bitmap, offset);
} else {
return false;
}
@@ -470,7 +465,7 @@ uint32_t bdrv_get_default_bitmap_granularity(BlockDriverState *bs)
uint32_t bdrv_dirty_bitmap_granularity(const BdrvDirtyBitmap *bitmap)
{
- return BDRV_SECTOR_SIZE << hbitmap_granularity(bitmap->bitmap);
+ return 1U << hbitmap_granularity(bitmap->bitmap);
}
BdrvDirtyBitmapIter *bdrv_dirty_iter_new(BdrvDirtyBitmap *bitmap)
@@ -503,20 +498,16 @@ void bdrv_dirty_iter_free(BdrvDirtyBitmapIter *iter)
int64_t bdrv_dirty_iter_next(BdrvDirtyBitmapIter *iter)
{
- int64_t ret = hbitmap_iter_next(&iter->hbi);
- return ret < 0 ? -1 : ret * BDRV_SECTOR_SIZE;
+ return hbitmap_iter_next(&iter->hbi);
}
/* Called within bdrv_dirty_bitmap_lock..unlock */
void bdrv_set_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
int64_t offset, int64_t bytes)
{
- int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
-
assert(bdrv_dirty_bitmap_enabled(bitmap));
assert(!bdrv_dirty_bitmap_readonly(bitmap));
- hbitmap_set(bitmap->bitmap, offset >> BDRV_SECTOR_BITS,
- end_sector - (offset >> BDRV_SECTOR_BITS));
+ hbitmap_set(bitmap->bitmap, offset, bytes);
}
void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap,
@@ -531,12 +522,9 @@ void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap,
void bdrv_reset_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
int64_t offset, int64_t bytes)
{
- int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
-
assert(bdrv_dirty_bitmap_enabled(bitmap));
assert(!bdrv_dirty_bitmap_readonly(bitmap));
- hbitmap_reset(bitmap->bitmap, offset >> BDRV_SECTOR_BITS,
- end_sector - (offset >> BDRV_SECTOR_BITS));
+ hbitmap_reset(bitmap->bitmap, offset, bytes);
}
void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap,
@@ -556,8 +544,7 @@ void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out)
hbitmap_reset_all(bitmap->bitmap);
} else {
HBitmap *backup = bitmap->bitmap;
- bitmap->bitmap = hbitmap_alloc(DIV_ROUND_UP(bitmap->size,
- BDRV_SECTOR_SIZE),
+ bitmap->bitmap = hbitmap_alloc(bitmap->size,
hbitmap_granularity(backup));
*out = backup;
}
@@ -576,51 +563,40 @@ void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in)
uint64_t bdrv_dirty_bitmap_serialization_size(const BdrvDirtyBitmap *bitmap,
uint64_t offset, uint64_t bytes)
{
- assert(QEMU_IS_ALIGNED(offset | bytes, BDRV_SECTOR_SIZE));
- return hbitmap_serialization_size(bitmap->bitmap,
- offset >> BDRV_SECTOR_BITS,
- bytes >> BDRV_SECTOR_BITS);
+ return hbitmap_serialization_size(bitmap->bitmap, offset, bytes);
}
uint64_t bdrv_dirty_bitmap_serialization_align(const BdrvDirtyBitmap *bitmap)
{
- return hbitmap_serialization_align(bitmap->bitmap) * BDRV_SECTOR_SIZE;
+ return hbitmap_serialization_align(bitmap->bitmap);
}
void bdrv_dirty_bitmap_serialize_part(const BdrvDirtyBitmap *bitmap,
uint8_t *buf, uint64_t offset,
uint64_t bytes)
{
- assert(QEMU_IS_ALIGNED(offset | bytes, BDRV_SECTOR_SIZE));
- hbitmap_serialize_part(bitmap->bitmap, buf, offset >> BDRV_SECTOR_BITS,
- bytes >> BDRV_SECTOR_BITS);
+ hbitmap_serialize_part(bitmap->bitmap, buf, offset, bytes);
}
void bdrv_dirty_bitmap_deserialize_part(BdrvDirtyBitmap *bitmap,
uint8_t *buf, uint64_t offset,
uint64_t bytes, bool finish)
{
- assert(QEMU_IS_ALIGNED(offset | bytes, BDRV_SECTOR_SIZE));
- hbitmap_deserialize_part(bitmap->bitmap, buf, offset >> BDRV_SECTOR_BITS,
- bytes >> BDRV_SECTOR_BITS, finish);
+ hbitmap_deserialize_part(bitmap->bitmap, buf, offset, bytes, finish);
}
void bdrv_dirty_bitmap_deserialize_zeroes(BdrvDirtyBitmap *bitmap,
uint64_t offset, uint64_t bytes,
bool finish)
{
- assert(QEMU_IS_ALIGNED(offset | bytes, BDRV_SECTOR_SIZE));
- hbitmap_deserialize_zeroes(bitmap->bitmap, offset >> BDRV_SECTOR_BITS,
- bytes >> BDRV_SECTOR_BITS, finish);
+ hbitmap_deserialize_zeroes(bitmap->bitmap, offset, bytes, finish);
}
void bdrv_dirty_bitmap_deserialize_ones(BdrvDirtyBitmap *bitmap,
uint64_t offset, uint64_t bytes,
bool finish)
{
- assert(QEMU_IS_ALIGNED(offset | bytes, BDRV_SECTOR_SIZE));
- hbitmap_deserialize_ones(bitmap->bitmap, offset >> BDRV_SECTOR_BITS,
- bytes >> BDRV_SECTOR_BITS, finish);
+ hbitmap_deserialize_ones(bitmap->bitmap, offset, bytes, finish);
}
void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap)
@@ -631,7 +607,6 @@ void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap)
void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
BdrvDirtyBitmap *bitmap;
- int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
if (QLIST_EMPTY(&bs->dirty_bitmaps)) {
return;
@@ -643,8 +618,7 @@ void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes)
continue;
}
assert(!bdrv_dirty_bitmap_readonly(bitmap));
- hbitmap_set(bitmap->bitmap, offset >> BDRV_SECTOR_BITS,
- end_sector - (offset >> BDRV_SECTOR_BITS));
+ hbitmap_set(bitmap->bitmap, offset, bytes);
}
bdrv_dirty_bitmaps_unlock(bs);
}
@@ -654,12 +628,12 @@ void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes)
*/
void bdrv_set_dirty_iter(BdrvDirtyBitmapIter *iter, int64_t offset)
{
- hbitmap_iter_init(&iter->hbi, iter->hbi.hb, offset >> BDRV_SECTOR_BITS);
+ hbitmap_iter_init(&iter->hbi, iter->hbi.hb, offset);
}
int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap)
{
- return hbitmap_count(bitmap->bitmap) << BDRV_SECTOR_BITS;
+ return hbitmap_count(bitmap->bitmap);
}
int64_t bdrv_get_meta_dirty_count(BdrvDirtyBitmap *bitmap)