aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>2020-02-05 14:20:36 +0300
committerJohn Snow <jsnow@redhat.com>2020-03-18 14:03:46 -0400
commit642700fda029ed6b4051db7eab8f704131217643 (patch)
tree793d737515e0301fae6adbf06cadc495a6db65c9 /util
parent0c88f1970c769289fa49361bc3f00b5fba9d5d0e (diff)
downloadqemu-642700fda029ed6b4051db7eab8f704131217643.zip
qemu-642700fda029ed6b4051db7eab8f704131217643.tar.gz
qemu-642700fda029ed6b4051db7eab8f704131217643.tar.bz2
block/dirty-bitmap: switch _next_dirty_area and _next_zero to int64_t
We are going to introduce bdrv_dirty_bitmap_next_dirty so that same variable may be used to store its return value and to be its parameter, so it would int64_t. Similarly, we are going to refactor hbitmap_next_dirty_area to use hbitmap_next_dirty together with hbitmap_next_zero, therefore we want hbitmap_next_zero parameter type to be int64_t too. So, for convenience update all parameters of *_next_zero and *_next_dirty_area to be int64_t. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com> Message-id: 20200205112041.6003-6-vsementsov@virtuozzo.com Signed-off-by: John Snow <jsnow@redhat.com>
Diffstat (limited to 'util')
-rw-r--r--util/hbitmap.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/util/hbitmap.c b/util/hbitmap.c
index b6d4b99..df22f06 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -193,7 +193,7 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
}
}
-int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
+int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count)
{
size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
@@ -202,6 +202,8 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
uint64_t end_bit, sz;
int64_t res;
+ assert(start >= 0 && count >= 0);
+
if (start >= hb->orig_size || count == 0) {
return -1;
}
@@ -244,14 +246,15 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
return res;
}
-bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start,
- uint64_t *count)
+bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count)
{
HBitmapIter hbi;
int64_t firt_dirty_off, area_end;
uint32_t granularity = 1UL << hb->granularity;
uint64_t end;
+ assert(*start >= 0 && *count >= 0);
+
if (*start >= hb->orig_size || *count == 0) {
return false;
}
@@ -834,8 +837,8 @@ bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b)
*/
static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src)
{
- uint64_t offset = 0;
- uint64_t count = src->orig_size;
+ int64_t offset = 0;
+ int64_t count = src->orig_size;
while (hbitmap_next_dirty_area(src, &offset, &count)) {
hbitmap_set(dst, offset, count);