diff options
author | Juan Quintela <quintela@redhat.com> | 2023-04-11 18:02:34 +0200 |
---|---|---|
committer | Juan Quintela <quintela@redhat.com> | 2023-04-24 11:28:58 +0200 |
commit | 536b5a4e56ec67c958f46e7d46cbd5ac34e5a239 (patch) | |
tree | 30e852bb37674503a265e83f55919107bb23e937 | |
parent | 296a4ac2aa63038b6b702f2ee8f0f93ae26727ae (diff) | |
download | qemu-536b5a4e56ec67c958f46e7d46cbd5ac34e5a239.zip qemu-536b5a4e56ec67c958f46e7d46cbd5ac34e5a239.tar.gz qemu-536b5a4e56ec67c958f46e7d46cbd5ac34e5a239.tar.bz2 |
migration: Make dirty_sync_count atomic
Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
-rw-r--r-- | migration/migration.c | 3 | ||||
-rw-r--r-- | migration/ram.c | 13 | ||||
-rw-r--r-- | migration/ram.h | 2 |
3 files changed, 10 insertions, 8 deletions
diff --git a/migration/migration.c b/migration/migration.c index 4ca2173..97c227a 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1148,7 +1148,8 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->ram->normal = stat64_get(&ram_counters.normal); info->ram->normal_bytes = info->ram->normal * page_size; info->ram->mbps = s->mbps; - info->ram->dirty_sync_count = ram_counters.dirty_sync_count; + info->ram->dirty_sync_count = + stat64_get(&ram_counters.dirty_sync_count); info->ram->dirty_sync_missed_zero_copy = stat64_get(&ram_counters.dirty_sync_missed_zero_copy); info->ram->postcopy_requests = ram_counters.postcopy_requests; diff --git a/migration/ram.c b/migration/ram.c index b1722b6..3c13136 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -764,7 +764,7 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) /* We don't care if this fails to allocate a new cache page * as long as it updated an old one */ cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page, - ram_counters.dirty_sync_count); + stat64_get(&ram_counters.dirty_sync_count)); } #define ENCODING_FLAG_XBZRLE 0x1 @@ -790,13 +790,13 @@ static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, int encoded_len = 0, bytes_xbzrle; uint8_t *prev_cached_page; QEMUFile *file = pss->pss_channel; + uint64_t generation = stat64_get(&ram_counters.dirty_sync_count); - if (!cache_is_cached(XBZRLE.cache, current_addr, - ram_counters.dirty_sync_count)) { + if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) { xbzrle_counters.cache_miss++; if (!rs->last_stage) { if (cache_insert(XBZRLE.cache, current_addr, *current_data, - ram_counters.dirty_sync_count) == -1) { + generation) == -1) { return -1; } else { /* update *current_data when the page has been @@ -1209,7 +1209,7 @@ static void migration_bitmap_sync(RAMState *rs) RAMBlock *block; int64_t end_time; - ram_counters.dirty_sync_count++; + stat64_add(&ram_counters.dirty_sync_count, 1); if (!rs->time_last_bitmap_sync) { rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); @@ -1246,7 +1246,8 @@ static void migration_bitmap_sync(RAMState *rs) rs->bytes_xfer_prev = stat64_get(&ram_counters.transferred); } if (migrate_use_events()) { - qapi_event_send_migration_pass(ram_counters.dirty_sync_count); + uint64_t generation = stat64_get(&ram_counters.dirty_sync_count); + qapi_event_send_migration_pass(generation); } } diff --git a/migration/ram.h b/migration/ram.h index bb52632..8c0d07c 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -42,7 +42,7 @@ */ typedef struct { int64_t dirty_pages_rate; - int64_t dirty_sync_count; + Stat64 dirty_sync_count; Stat64 dirty_sync_missed_zero_copy; Stat64 downtime_bytes; Stat64 duplicate; |