From d73415a315471ac0b127ed3fad45c8ec5d711de1 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Wed, 23 Sep 2020 11:56:46 +0100 Subject: qemu/atomic.h: rename atomic_ to qatomic_ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit clang's C11 atomic_fetch_*() functions only take a C11 atomic type pointer argument. QEMU uses direct types (int, etc) and this causes a compiler error when a QEMU code calls these functions in a source file that also included via a system header file: $ CC=clang CXX=clang++ ./configure ... && make ../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid) Avoid using atomic_*() names in QEMU's atomic.h since that namespace is used by . Prefix QEMU's APIs with 'q' so that atomic.h and can co-exist. I checked /usr/include on my machine and searched GitHub for existing "qatomic_" users but there seem to be none. This patch was generated using: $ git grep -h -o '\/tmp/changed_identifiers $ for identifier in $(%q$identifier%g" \ $(git grep -I -l "\<$identifier\>") done I manually fixed line-wrap issues and misaligned rST tables. Signed-off-by: Stefan Hajnoczi Reviewed-by: Philippe Mathieu-Daudé Acked-by: Paolo Bonzini Message-Id: <20200923105646.47864-1-stefanha@redhat.com> --- softmmu/cpu-throttle.c | 10 +++++----- softmmu/cpus.c | 42 +++++++++++++++++++++--------------------- softmmu/memory.c | 6 +++--- softmmu/vl.c | 2 +- 4 files changed, 30 insertions(+), 30 deletions(-) (limited to 'softmmu') diff --git a/softmmu/cpu-throttle.c b/softmmu/cpu-throttle.c index 4e6b281..2ec4b8e 100644 --- a/softmmu/cpu-throttle.c +++ b/softmmu/cpu-throttle.c @@ -64,7 +64,7 @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) } sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } - atomic_set(&cpu->throttle_thread_scheduled, 0); + qatomic_set(&cpu->throttle_thread_scheduled, 0); } static void cpu_throttle_timer_tick(void *opaque) @@ -77,7 +77,7 @@ static void cpu_throttle_timer_tick(void *opaque) return; } CPU_FOREACH(cpu) { - if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { + if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) { async_run_on_cpu(cpu, cpu_throttle_thread, RUN_ON_CPU_NULL); } @@ -94,7 +94,7 @@ void cpu_throttle_set(int new_throttle_pct) new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX); new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN); - atomic_set(&throttle_percentage, new_throttle_pct); + qatomic_set(&throttle_percentage, new_throttle_pct); timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + CPU_THROTTLE_TIMESLICE_NS); @@ -102,7 +102,7 @@ void cpu_throttle_set(int new_throttle_pct) void cpu_throttle_stop(void) { - atomic_set(&throttle_percentage, 0); + qatomic_set(&throttle_percentage, 0); } bool cpu_throttle_active(void) @@ -112,7 +112,7 @@ bool cpu_throttle_active(void) int cpu_throttle_get_percentage(void) { - return atomic_read(&throttle_percentage); + return qatomic_read(&throttle_percentage); } void cpu_throttle_init(void) diff --git a/softmmu/cpus.c b/softmmu/cpus.c index e3b9806..ac8940d 100644 --- a/softmmu/cpus.c +++ b/softmmu/cpus.c @@ -192,7 +192,7 @@ static void cpu_update_icount_locked(CPUState *cpu) int64_t executed = cpu_get_icount_executed(cpu); cpu->icount_budget -= executed; - atomic_set_i64(&timers_state.qemu_icount, + qatomic_set_i64(&timers_state.qemu_icount, timers_state.qemu_icount + executed); } @@ -223,13 +223,13 @@ static int64_t cpu_get_icount_raw_locked(void) cpu_update_icount_locked(cpu); } /* The read is protected by the seqlock, but needs atomic64 to avoid UB */ - return atomic_read_i64(&timers_state.qemu_icount); + return qatomic_read_i64(&timers_state.qemu_icount); } static int64_t cpu_get_icount_locked(void) { int64_t icount = cpu_get_icount_raw_locked(); - return atomic_read_i64(&timers_state.qemu_icount_bias) + + return qatomic_read_i64(&timers_state.qemu_icount_bias) + cpu_icount_to_ns(icount); } @@ -262,7 +262,7 @@ int64_t cpu_get_icount(void) int64_t cpu_icount_to_ns(int64_t icount) { - return icount << atomic_read(&timers_state.icount_time_shift); + return icount << qatomic_read(&timers_state.icount_time_shift); } static int64_t cpu_get_ticks_locked(void) @@ -393,18 +393,18 @@ static void icount_adjust(void) && last_delta + ICOUNT_WOBBLE < delta * 2 && timers_state.icount_time_shift > 0) { /* The guest is getting too far ahead. Slow time down. */ - atomic_set(&timers_state.icount_time_shift, + qatomic_set(&timers_state.icount_time_shift, timers_state.icount_time_shift - 1); } if (delta < 0 && last_delta - ICOUNT_WOBBLE > delta * 2 && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) { /* The guest is getting too far behind. Speed time up. */ - atomic_set(&timers_state.icount_time_shift, + qatomic_set(&timers_state.icount_time_shift, timers_state.icount_time_shift + 1); } last_delta = delta; - atomic_set_i64(&timers_state.qemu_icount_bias, + qatomic_set_i64(&timers_state.qemu_icount_bias, cur_icount - (timers_state.qemu_icount << timers_state.icount_time_shift)); seqlock_write_unlock(&timers_state.vm_clock_seqlock, @@ -428,7 +428,7 @@ static void icount_adjust_vm(void *opaque) static int64_t qemu_icount_round(int64_t count) { - int shift = atomic_read(&timers_state.icount_time_shift); + int shift = qatomic_read(&timers_state.icount_time_shift); return (count + (1 << shift) - 1) >> shift; } @@ -466,7 +466,7 @@ static void icount_warp_rt(void) int64_t delta = clock - cur_icount; warp_delta = MIN(warp_delta, delta); } - atomic_set_i64(&timers_state.qemu_icount_bias, + qatomic_set_i64(&timers_state.qemu_icount_bias, timers_state.qemu_icount_bias + warp_delta); } timers_state.vm_clock_warp_start = -1; @@ -499,7 +499,7 @@ void qtest_clock_warp(int64_t dest) seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); - atomic_set_i64(&timers_state.qemu_icount_bias, + qatomic_set_i64(&timers_state.qemu_icount_bias, timers_state.qemu_icount_bias + warp); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); @@ -583,7 +583,7 @@ void qemu_start_warp_timer(void) */ seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); - atomic_set_i64(&timers_state.qemu_icount_bias, + qatomic_set_i64(&timers_state.qemu_icount_bias, timers_state.qemu_icount_bias + deadline); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); @@ -837,11 +837,11 @@ static void qemu_cpu_kick_rr_next_cpu(void) { CPUState *cpu; do { - cpu = atomic_mb_read(&tcg_current_rr_cpu); + cpu = qatomic_mb_read(&tcg_current_rr_cpu); if (cpu) { cpu_exit(cpu); } - } while (cpu != atomic_mb_read(&tcg_current_rr_cpu)); + } while (cpu != qatomic_mb_read(&tcg_current_rr_cpu)); } /* Kick all RR vCPUs */ @@ -1110,7 +1110,7 @@ static void qemu_cpu_stop(CPUState *cpu, bool exit) static void qemu_wait_io_event_common(CPUState *cpu) { - atomic_mb_set(&cpu->thread_kicked, false); + qatomic_mb_set(&cpu->thread_kicked, false); if (cpu->stop) { qemu_cpu_stop(cpu, false); } @@ -1356,7 +1356,7 @@ static int tcg_cpu_exec(CPUState *cpu) ret = cpu_exec(cpu); cpu_exec_end(cpu); #ifdef CONFIG_PROFILER - atomic_set(&tcg_ctx->prof.cpu_exec_time, + qatomic_set(&tcg_ctx->prof.cpu_exec_time, tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti); #endif return ret; @@ -1443,7 +1443,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { - atomic_mb_set(&tcg_current_rr_cpu, cpu); + qatomic_mb_set(&tcg_current_rr_cpu, cpu); current_cpu = cpu; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, @@ -1479,11 +1479,11 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) cpu = CPU_NEXT(cpu); } /* while (cpu && !cpu->exit_request).. */ - /* Does not need atomic_mb_set because a spurious wakeup is okay. */ - atomic_set(&tcg_current_rr_cpu, NULL); + /* Does not need qatomic_mb_set because a spurious wakeup is okay. */ + qatomic_set(&tcg_current_rr_cpu, NULL); if (cpu && cpu->exit_request) { - atomic_mb_set(&cpu->exit_request, 0); + qatomic_mb_set(&cpu->exit_request, 0); } if (use_icount && all_cpu_threads_idle()) { @@ -1687,7 +1687,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) } } - atomic_mb_set(&cpu->exit_request, 0); + qatomic_mb_set(&cpu->exit_request, 0); qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); @@ -1776,7 +1776,7 @@ bool qemu_mutex_iothread_locked(void) */ void qemu_mutex_lock_iothread_impl(const char *file, int line) { - QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func); + QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func); g_assert(!qemu_mutex_iothread_locked()); bql_lock(&qemu_global_mutex, file, line); diff --git a/softmmu/memory.c b/softmmu/memory.c index d030eb6..da5f90f 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -294,12 +294,12 @@ static void flatview_destroy(FlatView *view) static bool flatview_ref(FlatView *view) { - return atomic_fetch_inc_nonzero(&view->ref) > 0; + return qatomic_fetch_inc_nonzero(&view->ref) > 0; } void flatview_unref(FlatView *view) { - if (atomic_fetch_dec(&view->ref) == 1) { + if (qatomic_fetch_dec(&view->ref) == 1) { trace_flatview_destroy_rcu(view, view->root); assert(view->root); call_rcu(view, flatview_destroy, rcu); @@ -1027,7 +1027,7 @@ static void address_space_set_flatview(AddressSpace *as) } /* Writes are protected by the BQL. */ - atomic_rcu_set(&as->current_map, new_view); + qatomic_rcu_set(&as->current_map, new_view); if (old_view) { flatview_unref(old_view); } diff --git a/softmmu/vl.c b/softmmu/vl.c index f7b1034..50d8c2e 100644 --- a/softmmu/vl.c +++ b/softmmu/vl.c @@ -1320,7 +1320,7 @@ ShutdownCause qemu_reset_requested_get(void) static int qemu_shutdown_requested(void) { - return atomic_xchg(&shutdown_requested, SHUTDOWN_CAUSE_NONE); + return qatomic_xchg(&shutdown_requested, SHUTDOWN_CAUSE_NONE); } static void qemu_kill_report(void) -- cgit v1.1