aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2025-06-13 10:12:08 -0400
committerFabiano Rosas <farosas@suse.de>2025-07-11 10:37:37 -0300
commit08fb2a933586183be788aac43c62b2993e0a99ce (patch)
tree3c5ba8636a04646e3d6ad42e4fff3030b4b73819
parentb2819530e3134fb47c92c1bf0f3def8ea5b1c8ee (diff)
downloadqemu-08fb2a933586183be788aac43c62b2993e0a99ce.zip
qemu-08fb2a933586183be788aac43c62b2993e0a99ce.tar.gz
qemu-08fb2a933586183be788aac43c62b2993e0a99ce.tar.bz2
migration/postcopy: Drop PostcopyBlocktimeContext.start_time
Now with 64bits, the offseting using start_time is not needed anymore, because the array can always remember the whole timestamp. Then drop the unused parameter in get_low_time_offset() altogether. Reviewed-by: Fabiano Rosas <farosas@suse.de> Link: https://lore.kernel.org/r/20250613141217.474825-6-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Fabiano Rosas <farosas@suse.de>
-rw-r--r--migration/postcopy-ram.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index ec91821..e9acb4e 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -123,7 +123,6 @@ typedef struct PostcopyBlocktimeContext {
uint64_t last_begin;
/* number of vCPU are suspended */
int smp_cpus_down;
- uint64_t start_time;
/*
* Handler for exit event, necessary for
@@ -157,7 +156,6 @@ static struct PostcopyBlocktimeContext *blocktime_context_new(void)
ctx->vcpu_blocktime_total = g_new0(uint64_t, smp_cpus);
ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
ctx->exit_notifier.notify = migration_exit_cb;
- ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
qemu_add_exit_notifier(&ctx->exit_notifier);
return ctx;
@@ -818,9 +816,9 @@ static int get_mem_fault_cpu_index(uint32_t pid)
return -1;
}
-static uint64_t get_low_time_offset(PostcopyBlocktimeContext *dc)
+static uint64_t get_low_time_offset(void)
{
- return (uint64_t)qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - dc->start_time;
+ return (uint64_t)qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
}
/*
@@ -847,7 +845,7 @@ void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
return;
}
- low_time_offset = get_low_time_offset(dc);
+ low_time_offset = get_low_time_offset();
if (dc->vcpu_addr[cpu] == 0) {
dc->smp_cpus_down++;
}
@@ -907,7 +905,7 @@ static void mark_postcopy_blocktime_end(uintptr_t addr)
return;
}
- low_time_offset = get_low_time_offset(dc);
+ low_time_offset = get_low_time_offset();
/* lookup cpu, to clear it,
* that algorithm looks straightforward, but it's not
* optimal, more optimal algorithm is keeping tree or hash