aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGonglei <arei.gonglei@huawei.com>2014-05-08 11:47:32 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2014-06-17 16:07:37 +0200
commit4dc56152237f61694fa42675a78ffd14a509df98 (patch)
tree0edbaa91512ac1de8593534c4f83065db25de9a1
parentb0e56e0b63f350691b52d3e75e89bb64143fbeff (diff)
downloadqemu-4dc56152237f61694fa42675a78ffd14a509df98.zip
qemu-4dc56152237f61694fa42675a78ffd14a509df98.tar.gz
qemu-4dc56152237f61694fa42675a78ffd14a509df98.tar.bz2
memory: Don't update all memory region when ioeventfd changed
memory mappings don't rely on ioeventfds, there is no need to destroy and rebuild them when manipulating ioeventfds, otherwise it scarifies performance. according to testing result, each ioeventfd deleing needs about 5ms, within which memory mapping rebuilding needs about 4ms. With many Nics and vmchannel in a VM doing migrating, there can be many ioeventfds deleting which increasing downtime remarkably. Signed-off-by: Gonglei <arei.gonglei@huawei.com> Signed-off-by: Herongguang <herongguang.he@huawei.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--memory.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/memory.c b/memory.c
index 678661e..829d56d 100644
--- a/memory.c
+++ b/memory.c
@@ -28,6 +28,7 @@
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
+static bool ioeventfd_update_pending;
static bool global_dirty_log = false;
/* flat_view_mutex is taken around reading as->current_map; the critical
@@ -786,22 +787,34 @@ void memory_region_transaction_begin(void)
++memory_region_transaction_depth;
}
+static void memory_region_clear_pending(void)
+{
+ memory_region_update_pending = false;
+ ioeventfd_update_pending = false;
+}
+
void memory_region_transaction_commit(void)
{
AddressSpace *as;
assert(memory_region_transaction_depth);
--memory_region_transaction_depth;
- if (!memory_region_transaction_depth && memory_region_update_pending) {
- memory_region_update_pending = false;
- MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
+ if (!memory_region_transaction_depth) {
+ if (memory_region_update_pending) {
+ MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
- QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
- address_space_update_topology(as);
- }
+ QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
+ address_space_update_topology(as);
+ }
- MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
- }
+ MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
+ } else if (ioeventfd_update_pending) {
+ QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
+ address_space_update_ioeventfds(as);
+ }
+ }
+ memory_region_clear_pending();
+ }
}
static void memory_region_destructor_none(MemoryRegion *mr)
@@ -1373,7 +1386,7 @@ void memory_region_add_eventfd(MemoryRegion *mr,
memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
mr->ioeventfds[i] = mrfd;
- memory_region_update_pending |= mr->enabled;
+ ioeventfd_update_pending |= mr->enabled;
memory_region_transaction_commit();
}
@@ -1406,7 +1419,7 @@ void memory_region_del_eventfd(MemoryRegion *mr,
--mr->ioeventfd_nb;
mr->ioeventfds = g_realloc(mr->ioeventfds,
sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
- memory_region_update_pending |= mr->enabled;
+ ioeventfd_update_pending |= mr->enabled;
memory_region_transaction_commit();
}