aboutsummaryrefslogtreecommitdiff
path: root/accel/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2019-09-24 10:47:48 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2019-09-30 13:51:50 +0200
commit4222147dfb7229de36d8cc8c9fa3543b43347241 (patch)
treed45e09fb978f61ead847c7d594a41b85776ef710 /accel/kvm
parentee35e9684c334523e2864b1f9c31f0838e834343 (diff)
downloadqemu-4222147dfb7229de36d8cc8c9fa3543b43347241.zip
qemu-4222147dfb7229de36d8cc8c9fa3543b43347241.tar.gz
qemu-4222147dfb7229de36d8cc8c9fa3543b43347241.tar.bz2
kvm: extract kvm_log_clear_one_slot
We may need to clear the dirty bitmap for more than one KVM memslot. First do some code movement with no semantic change. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Message-Id: <20190924144751.24149-2-imammedo@redhat.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> [fixup line break]
Diffstat (limited to 'accel/kvm')
-rw-r--r--accel/kvm/kvm-all.c103
1 files changed, 57 insertions, 46 deletions
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index b09bad0..a85ec09 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -575,55 +575,14 @@ out:
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
-/**
- * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
- *
- * NOTE: this will be a no-op if we haven't enabled manual dirty log
- * protection in the host kernel because in that case this operation
- * will be done within log_sync().
- *
- * @kml: the kvm memory listener
- * @section: the memory range to clear dirty bitmap
- */
-static int kvm_physical_log_clear(KVMMemoryListener *kml,
- MemoryRegionSection *section)
+static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
+ uint64_t size)
{
KVMState *s = kvm_state;
+ uint64_t end, bmap_start, start_delta, bmap_npages;
struct kvm_clear_dirty_log d;
- uint64_t start, end, bmap_start, start_delta, bmap_npages, size;
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
- KVMSlot *mem = NULL;
- int ret, i;
-
- if (!s->manual_dirty_log_protect) {
- /* No need to do explicit clear */
- return 0;
- }
-
- start = section->offset_within_address_space;
- size = int128_get64(section->size);
-
- if (!size) {
- /* Nothing more we can do... */
- return 0;
- }
-
- kvm_slots_lock(kml);
-
- /* Find any possible slot that covers the section */
- for (i = 0; i < s->nr_slots; i++) {
- mem = &kml->slots[i];
- if (mem->start_addr <= start &&
- start + size <= mem->start_addr + mem->memory_size) {
- break;
- }
- }
-
- /*
- * We should always find one memslot until this point, otherwise
- * there could be something wrong from the upper layer
- */
- assert(mem && i != s->nr_slots);
+ int ret;
/*
* We need to extend either the start or the size or both to
@@ -694,7 +653,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
/* It should never overflow. If it happens, say something */
assert(bmap_npages <= UINT32_MAX);
d.num_pages = bmap_npages;
- d.slot = mem->slot | (kml->as_id << 16);
+ d.slot = mem->slot | (as_id << 16);
if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
ret = -errno;
@@ -717,6 +676,58 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
size / psize);
/* This handles the NULL case well */
g_free(bmap_clear);
+ return ret;
+}
+
+
+/**
+ * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
+ *
+ * NOTE: this will be a no-op if we haven't enabled manual dirty log
+ * protection in the host kernel because in that case this operation
+ * will be done within log_sync().
+ *
+ * @kml: the kvm memory listener
+ * @section: the memory range to clear dirty bitmap
+ */
+static int kvm_physical_log_clear(KVMMemoryListener *kml,
+ MemoryRegionSection *section)
+{
+ KVMState *s = kvm_state;
+ uint64_t start, size;
+ KVMSlot *mem = NULL;
+ int ret, i;
+
+ if (!s->manual_dirty_log_protect) {
+ /* No need to do explicit clear */
+ return 0;
+ }
+
+ start = section->offset_within_address_space;
+ size = int128_get64(section->size);
+
+ if (!size) {
+ /* Nothing more we can do... */
+ return 0;
+ }
+
+ kvm_slots_lock(kml);
+
+ /* Find any possible slot that covers the section */
+ for (i = 0; i < s->nr_slots; i++) {
+ mem = &kml->slots[i];
+ if (mem->start_addr <= start &&
+ start + size <= mem->start_addr + mem->memory_size) {
+ break;
+ }
+ }
+
+ /*
+ * We should always find one memslot until this point, otherwise
+ * there could be something wrong from the upper layer
+ */
+ assert(mem && i != s->nr_slots);
+ ret = kvm_log_clear_one_slot(mem, kml->as_id, start, size);
kvm_slots_unlock(kml);