diff options
author | Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 2020-02-05 14:20:37 +0300 |
---|---|---|
committer | John Snow <jsnow@redhat.com> | 2020-03-18 14:03:46 -0400 |
commit | 9399c54b7557a20bc78aaecf2d51983cfafbbf41 (patch) | |
tree | 19261c083bb14e67cdf630a36f08398daf778d7b /util/hbitmap.c | |
parent | 642700fda029ed6b4051db7eab8f704131217643 (diff) | |
download | qemu-9399c54b7557a20bc78aaecf2d51983cfafbbf41.zip qemu-9399c54b7557a20bc78aaecf2d51983cfafbbf41.tar.gz qemu-9399c54b7557a20bc78aaecf2d51983cfafbbf41.tar.bz2 |
block/dirty-bitmap: add _next_dirty API
We have bdrv_dirty_bitmap_next_zero, let's add corresponding
bdrv_dirty_bitmap_next_dirty, which is more comfortable to use than
bitmap iterators in some cases.
For test modify test_hbitmap_next_zero_check_range to check both
next_zero and next_dirty and add some new checks.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Message-id: 20200205112041.6003-7-vsementsov@virtuozzo.com
Signed-off-by: John Snow <jsnow@redhat.com>
Diffstat (limited to 'util/hbitmap.c')
-rw-r--r-- | util/hbitmap.c | 60 |
1 files changed, 32 insertions, 28 deletions
diff --git a/util/hbitmap.c b/util/hbitmap.c index df22f06..883ca48 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -193,6 +193,30 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first) } } +int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count) +{ + HBitmapIter hbi; + int64_t first_dirty_off; + uint64_t end; + + assert(start >= 0 && count >= 0); + + if (start >= hb->orig_size || count == 0) { + return -1; + } + + end = count > hb->orig_size - start ? hb->orig_size : start + count; + + hbitmap_iter_init(&hbi, hb, start); + first_dirty_off = hbitmap_iter_next(&hbi); + + if (first_dirty_off < 0 || first_dirty_off >= end) { + return -1; + } + + return MAX(start, first_dirty_off); +} + int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count) { size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; @@ -248,40 +272,20 @@ int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count) bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t *start, int64_t *count) { - HBitmapIter hbi; - int64_t firt_dirty_off, area_end; - uint32_t granularity = 1UL << hb->granularity; - uint64_t end; - - assert(*start >= 0 && *count >= 0); - - if (*start >= hb->orig_size || *count == 0) { - return false; - } - - end = *count > hb->orig_size - *start ? hb->orig_size : *start + *count; - - hbitmap_iter_init(&hbi, hb, *start); - firt_dirty_off = hbitmap_iter_next(&hbi); + int64_t area_start, area_end; - if (firt_dirty_off < 0 || firt_dirty_off >= end) { + area_start = hbitmap_next_dirty(hb, *start, *count); + if (area_start < 0) { return false; } - if (firt_dirty_off + granularity >= end) { - area_end = end; - } else { - area_end = hbitmap_next_zero(hb, firt_dirty_off + granularity, - end - firt_dirty_off - granularity); - if (area_end < 0) { - area_end = end; - } + area_end = hbitmap_next_zero(hb, area_start, *start + *count - area_start); + if (area_end < 0) { + area_end = MIN(hb->orig_size, *start + *count); } - if (firt_dirty_off > *start) { - *start = firt_dirty_off; - } - *count = area_end - *start; + *start = area_start; + *count = area_end - area_start; return true; } |