diff options
author | John Snow <jsnow@redhat.com> | 2019-07-29 16:35:53 -0400 |
---|---|---|
committer | John Snow <jsnow@redhat.com> | 2019-08-16 16:28:02 -0400 |
commit | c5b40c1f9cd310b44b571a75ee42de22539996cd (patch) | |
tree | c51524fe2a7e329e1429c60d31e05966e21d1b5f /util | |
parent | 3bde4b010e7510061dd8055b336c0148610a7dff (diff) | |
download | qemu-c5b40c1f9cd310b44b571a75ee42de22539996cd.zip qemu-c5b40c1f9cd310b44b571a75ee42de22539996cd.tar.gz qemu-c5b40c1f9cd310b44b571a75ee42de22539996cd.tar.bz2 |
hbitmap: enable merging across granularities
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20190709232550.10724-9-jsnow@redhat.com
Signed-off-by: John Snow <jsnow@redhat.com>
Diffstat (limited to 'util')
-rw-r--r-- | util/hbitmap.c | 36 |
1 files changed, 35 insertions, 1 deletions
diff --git a/util/hbitmap.c b/util/hbitmap.c index 83927f3..fd44c89 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -781,7 +781,27 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size) bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b) { - return (a->size == b->size) && (a->granularity == b->granularity); + return (a->orig_size == b->orig_size); +} + +/** + * hbitmap_sparse_merge: performs dst = dst | src + * works with differing granularities. + * best used when src is sparsely populated. + */ +static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src) +{ + uint64_t offset = 0; + uint64_t count = src->orig_size; + + while (hbitmap_next_dirty_area(src, &offset, &count)) { + hbitmap_set(dst, offset, count); + offset += count; + if (offset >= src->orig_size) { + break; + } + count = src->orig_size - offset; + } } /** @@ -812,10 +832,24 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) return true; } + if (a->granularity != b->granularity) { + if ((a != result) && (b != result)) { + hbitmap_reset_all(result); + } + if (a != result) { + hbitmap_sparse_merge(result, a); + } + if (b != result) { + hbitmap_sparse_merge(result, b); + } + return true; + } + /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant. * It may be possible to improve running times for sparsely populated maps * by using hbitmap_iter_next, but this is suboptimal for dense maps. */ + assert(a->size == b->size); for (i = HBITMAP_LEVELS - 1; i >= 0; i--) { for (j = 0; j < a->sizes[i]; j++) { result->levels[i][j] = a->levels[i][j] | b->levels[i][j]; |