aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2025-06-13 10:12:06 -0400
committerFabiano Rosas <farosas@suse.de>2025-07-11 10:37:37 -0300
commitc0f47dfb5b06c40ef41641a5f03ebafa8125c557 (patch)
treeacfe92fafabe1534f1623833164684866faf2cb1
parentd2a81ca8c6fbe6ed691889d953d0b5fe2c7e4671 (diff)
downloadqemu-c0f47dfb5b06c40ef41641a5f03ebafa8125c557.zip
qemu-c0f47dfb5b06c40ef41641a5f03ebafa8125c557.tar.gz
qemu-c0f47dfb5b06c40ef41641a5f03ebafa8125c557.tar.bz2
migration/postcopy: Drop all atomic ops in blocktime feature
Now with the mutex protection it's not needed anymore. Reviewed-by: Fabiano Rosas <farosas@suse.de> Link: https://lore.kernel.org/r/20250613141217.474825-4-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Fabiano Rosas <farosas@suse.de>
-rw-r--r--migration/postcopy-ram.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 32fa06d..8192553 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -849,12 +849,12 @@ void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
low_time_offset = get_low_time_offset(dc);
if (dc->vcpu_addr[cpu] == 0) {
- qatomic_inc(&dc->smp_cpus_down);
+ dc->smp_cpus_down++;
}
- qatomic_xchg(&dc->last_begin, low_time_offset);
- qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset);
- qatomic_xchg(&dc->vcpu_addr[cpu], addr);
+ dc->last_begin = low_time_offset;
+ dc->page_fault_vcpu_time[cpu] = low_time_offset;
+ dc->vcpu_addr[cpu] = addr;
/*
* The caller should only inject a blocktime entry when the page is
@@ -915,29 +915,26 @@ static void mark_postcopy_blocktime_end(uintptr_t addr)
for (i = 0; i < smp_cpus; i++) {
uint32_t vcpu_blocktime = 0;
- read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
- if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
- read_vcpu_time == 0) {
+ read_vcpu_time = dc->page_fault_vcpu_time[i];
+ if (dc->vcpu_addr[i] != addr || read_vcpu_time == 0) {
continue;
}
- qatomic_xchg(&dc->vcpu_addr[i], 0);
+ dc->vcpu_addr[i] = 0;
vcpu_blocktime = low_time_offset - read_vcpu_time;
affected_cpu += 1;
/* we need to know is that mark_postcopy_end was due to
* faulted page, another possible case it's prefetched
* page and in that case we shouldn't be here */
- if (!vcpu_total_blocktime &&
- qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
+ if (!vcpu_total_blocktime && dc->smp_cpus_down == smp_cpus) {
vcpu_total_blocktime = true;
}
/* continue cycle, due to one page could affect several vCPUs */
dc->vcpu_blocktime[i] += vcpu_blocktime;
}
- qatomic_sub(&dc->smp_cpus_down, affected_cpu);
+ dc->smp_cpus_down -= affected_cpu;
if (vcpu_total_blocktime) {
- dc->total_blocktime += low_time_offset - qatomic_fetch_add(
- &dc->last_begin, 0);
+ dc->total_blocktime += low_time_offset - dc->last_begin;
}
trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
affected_cpu);