diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 11:56:46 +0100 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 16:07:44 +0100 |
commit | d73415a315471ac0b127ed3fad45c8ec5d711de1 (patch) | |
tree | bae20b3a39968fdfb4340b1a39b533333a8e6fd0 /hw | |
parent | ed7db34b5aedba4487fd949b2e545eef954f093e (diff) | |
download | qemu-d73415a315471ac0b127ed3fad45c8ec5d711de1.zip qemu-d73415a315471ac0b127ed3fad45c8ec5d711de1.tar.gz qemu-d73415a315471ac0b127ed3fad45c8ec5d711de1.tar.bz2 |
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type
pointer argument. QEMU uses direct types (int, etc) and this causes a
compiler error when a QEMU code calls these functions in a source file
that also included <stdatomic.h> via a system header file:
$ CC=clang CXX=clang++ ./configure ... && make
../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid)
Avoid using atomic_*() names in QEMU's atomic.h since that namespace is
used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h
and <stdatomic.h> can co-exist. I checked /usr/include on my machine and
searched GitHub for existing "qatomic_" users but there seem to be none.
This patch was generated using:
$ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \
sort -u >/tmp/changed_identifiers
$ for identifier in $(</tmp/changed_identifiers); do
sed -i "s%\<$identifier\>%q$identifier%g" \
$(git grep -I -l "\<$identifier\>")
done
I manually fixed line-wrap issues and misaligned rST tables.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/core/cpu.c | 6 | ||||
-rw-r--r-- | hw/display/qxl.c | 4 | ||||
-rw-r--r-- | hw/hyperv/hyperv.c | 10 | ||||
-rw-r--r-- | hw/hyperv/vmbus.c | 2 | ||||
-rw-r--r-- | hw/i386/xen/xen-hvm.c | 2 | ||||
-rw-r--r-- | hw/intc/rx_icu.c | 12 | ||||
-rw-r--r-- | hw/intc/sifive_plic.c | 4 | ||||
-rw-r--r-- | hw/misc/edu.c | 16 | ||||
-rw-r--r-- | hw/net/virtio-net.c | 10 | ||||
-rw-r--r-- | hw/rdma/rdma_backend.c | 18 | ||||
-rw-r--r-- | hw/rdma/rdma_rm.c | 2 | ||||
-rw-r--r-- | hw/rdma/vmw/pvrdma_dev_ring.c | 4 | ||||
-rw-r--r-- | hw/s390x/s390-pci-bus.c | 2 | ||||
-rw-r--r-- | hw/s390x/virtio-ccw.c | 2 | ||||
-rw-r--r-- | hw/virtio/vhost.c | 2 | ||||
-rw-r--r-- | hw/virtio/virtio-mmio.c | 6 | ||||
-rw-r--r-- | hw/virtio/virtio-pci.c | 6 | ||||
-rw-r--r-- | hw/virtio/virtio.c | 16 | ||||
-rw-r--r-- | hw/xtensa/pic_cpu.c | 4 |
19 files changed, 64 insertions, 64 deletions
diff --git a/hw/core/cpu.c b/hw/core/cpu.c index 8f65383..c55c09f 100644 --- a/hw/core/cpu.c +++ b/hw/core/cpu.c @@ -111,10 +111,10 @@ void cpu_reset_interrupt(CPUState *cpu, int mask) void cpu_exit(CPUState *cpu) { - atomic_set(&cpu->exit_request, 1); + qatomic_set(&cpu->exit_request, 1); /* Ensure cpu_exec will see the exit request after TCG has exited. */ smp_wmb(); - atomic_set(&cpu->icount_decr_ptr->u16.high, -1); + qatomic_set(&cpu->icount_decr_ptr->u16.high, -1); } int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, @@ -261,7 +261,7 @@ static void cpu_common_reset(DeviceState *dev) cpu->halted = cpu->start_powered_off; cpu->mem_io_pc = 0; cpu->icount_extra = 0; - atomic_set(&cpu->icount_decr_ptr->u32, 0); + qatomic_set(&cpu->icount_decr_ptr->u32, 0); cpu->can_do_io = 1; cpu->exception_index = -1; cpu->crash_occurred = false; diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 1187134..431c107 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -1908,7 +1908,7 @@ static void qxl_send_events(PCIQXLDevice *d, uint32_t events) /* * Older versions of Spice forgot to define the QXLRam struct * with the '__aligned__(4)' attribute. clang 7 and newer will - * thus warn that atomic_fetch_or(&d->ram->int_pending, ...) + * thus warn that qatomic_fetch_or(&d->ram->int_pending, ...) * might be a misaligned atomic access, and will generate an * out-of-line call for it, which results in a link error since * we don't currently link against libatomic. @@ -1928,7 +1928,7 @@ static void qxl_send_events(PCIQXLDevice *d, uint32_t events) #define ALIGNED_UINT32_PTR(P) ((uint32_t *)P) #endif - old_pending = atomic_fetch_or(ALIGNED_UINT32_PTR(&d->ram->int_pending), + old_pending = qatomic_fetch_or(ALIGNED_UINT32_PTR(&d->ram->int_pending), le_events); if ((old_pending & le_events) == le_events) { return; diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c index 4b26db1..cb1074f 100644 --- a/hw/hyperv/hyperv.c +++ b/hw/hyperv/hyperv.c @@ -231,7 +231,7 @@ static void sint_msg_bh(void *opaque) HvSintRoute *sint_route = opaque; HvSintStagedMessage *staged_msg = sint_route->staged_msg; - if (atomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) { + if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) { /* status nor ready yet (spurious ack from guest?), ignore */ return; } @@ -240,7 +240,7 @@ static void sint_msg_bh(void *opaque) staged_msg->status = 0; /* staged message processing finished, ready to start over */ - atomic_set(&staged_msg->state, HV_STAGED_MSG_FREE); + qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE); /* drop the reference taken in hyperv_post_msg */ hyperv_sint_route_unref(sint_route); } @@ -278,7 +278,7 @@ static void cpu_post_msg(CPUState *cs, run_on_cpu_data data) memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page)); posted: - atomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED); + qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED); /* * Notify the msg originator of the progress made; if the slot was busy we * set msg_pending flag in it so it will be the guest who will do EOM and @@ -301,7 +301,7 @@ int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg) assert(staged_msg); /* grab the staging area */ - if (atomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE, + if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE, HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) { return -EAGAIN; } @@ -351,7 +351,7 @@ int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno) set_mask = BIT_MASK(eventno); flags = synic->event_page->slot[sint_route->sint].flags; - if ((atomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) { + if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) { memory_region_set_dirty(&synic->event_page_mr, 0, sizeof(*synic->event_page)); ret = hyperv_sint_route_set_sint(sint_route); diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c index 6ef895b..896e981 100644 --- a/hw/hyperv/vmbus.c +++ b/hw/hyperv/vmbus.c @@ -747,7 +747,7 @@ static int vmbus_channel_notify_guest(VMBusChannel *chan) idx = BIT_WORD(chan->id); mask = BIT_MASK(chan->id); - if ((atomic_fetch_or(&int_map[idx], mask) & mask) != mask) { + if ((qatomic_fetch_or(&int_map[idx], mask) & mask) != mask) { res = hyperv_sint_route_set_sint(chan->notify_route); dirty = len; } diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index cde981b..a39a648 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -1140,7 +1140,7 @@ static int handle_buffered_iopage(XenIOState *state) assert(req.dir == IOREQ_WRITE); assert(!req.data_is_ptr); - atomic_add(&buf_page->read_pointer, qw + 1); + qatomic_add(&buf_page->read_pointer, qw + 1); } return req.count; diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c index df4b6a8..94e17a9 100644 --- a/hw/intc/rx_icu.c +++ b/hw/intc/rx_icu.c @@ -81,8 +81,8 @@ static void rxicu_request(RXICUState *icu, int n_IRQ) int enable; enable = icu->ier[n_IRQ / 8] & (1 << (n_IRQ & 7)); - if (n_IRQ > 0 && enable != 0 && atomic_read(&icu->req_irq) < 0) { - atomic_set(&icu->req_irq, n_IRQ); + if (n_IRQ > 0 && enable != 0 && qatomic_read(&icu->req_irq) < 0) { + qatomic_set(&icu->req_irq, n_IRQ); set_irq(icu, n_IRQ, rxicu_level(icu, n_IRQ)); } } @@ -124,10 +124,10 @@ static void rxicu_set_irq(void *opaque, int n_IRQ, int level) } if (issue == 0 && src->sense == TRG_LEVEL) { icu->ir[n_IRQ] = 0; - if (atomic_read(&icu->req_irq) == n_IRQ) { + if (qatomic_read(&icu->req_irq) == n_IRQ) { /* clear request */ set_irq(icu, n_IRQ, 0); - atomic_set(&icu->req_irq, -1); + qatomic_set(&icu->req_irq, -1); } return; } @@ -144,11 +144,11 @@ static void rxicu_ack_irq(void *opaque, int no, int level) int n_IRQ; int max_pri; - n_IRQ = atomic_read(&icu->req_irq); + n_IRQ = qatomic_read(&icu->req_irq); if (n_IRQ < 0) { return; } - atomic_set(&icu->req_irq, -1); + qatomic_set(&icu->req_irq, -1); if (icu->src[n_IRQ].sense != TRG_LEVEL) { icu->ir[n_IRQ] = 0; } diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c index af611f8..f42fd69 100644 --- a/hw/intc/sifive_plic.c +++ b/hw/intc/sifive_plic.c @@ -89,12 +89,12 @@ static void sifive_plic_print_state(SiFivePLICState *plic) static uint32_t atomic_set_masked(uint32_t *a, uint32_t mask, uint32_t value) { - uint32_t old, new, cmp = atomic_read(a); + uint32_t old, new, cmp = qatomic_read(a); do { old = cmp; new = (old & ~mask) | (value & mask); - cmp = atomic_cmpxchg(a, old, new); + cmp = qatomic_cmpxchg(a, old, new); } while (old != cmp); return old; diff --git a/hw/misc/edu.c b/hw/misc/edu.c index 0ff9d1a..e935c41 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -212,7 +212,7 @@ static uint64_t edu_mmio_read(void *opaque, hwaddr addr, unsigned size) qemu_mutex_unlock(&edu->thr_mutex); break; case 0x20: - val = atomic_read(&edu->status); + val = qatomic_read(&edu->status); break; case 0x24: val = edu->irq_status; @@ -252,7 +252,7 @@ static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val, edu->addr4 = ~val; break; case 0x08: - if (atomic_read(&edu->status) & EDU_STATUS_COMPUTING) { + if (qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) { break; } /* EDU_STATUS_COMPUTING cannot go 0->1 concurrently, because it is only @@ -260,15 +260,15 @@ static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val, */ qemu_mutex_lock(&edu->thr_mutex); edu->fact = val; - atomic_or(&edu->status, EDU_STATUS_COMPUTING); + qatomic_or(&edu->status, EDU_STATUS_COMPUTING); qemu_cond_signal(&edu->thr_cond); qemu_mutex_unlock(&edu->thr_mutex); break; case 0x20: if (val & EDU_STATUS_IRQFACT) { - atomic_or(&edu->status, EDU_STATUS_IRQFACT); + qatomic_or(&edu->status, EDU_STATUS_IRQFACT); } else { - atomic_and(&edu->status, ~EDU_STATUS_IRQFACT); + qatomic_and(&edu->status, ~EDU_STATUS_IRQFACT); } break; case 0x60: @@ -322,7 +322,7 @@ static void *edu_fact_thread(void *opaque) uint32_t val, ret = 1; qemu_mutex_lock(&edu->thr_mutex); - while ((atomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 && + while ((qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 && !edu->stopping) { qemu_cond_wait(&edu->thr_cond, &edu->thr_mutex); } @@ -347,9 +347,9 @@ static void *edu_fact_thread(void *opaque) qemu_mutex_lock(&edu->thr_mutex); edu->fact = ret; qemu_mutex_unlock(&edu->thr_mutex); - atomic_and(&edu->status, ~EDU_STATUS_COMPUTING); + qatomic_and(&edu->status, ~EDU_STATUS_COMPUTING); - if (atomic_read(&edu->status) & EDU_STATUS_IRQFACT) { + if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) { qemu_mutex_lock_iothread(); edu_raise_irq(edu, FACT_IRQ); qemu_mutex_unlock_iothread(); diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index cb0d270..7bf27b9 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -933,7 +933,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) { qapi_event_send_failover_negotiated(n->netclient_name); - atomic_set(&n->primary_should_be_hidden, false); + qatomic_set(&n->primary_should_be_hidden, false); failover_add_primary(n, &err); if (err) { n->primary_dev = virtio_connect_failover_devices(n, n->qdev, &err); @@ -3168,7 +3168,7 @@ static void virtio_net_handle_migration_primary(VirtIONet *n, bool should_be_hidden; Error *err = NULL; - should_be_hidden = atomic_read(&n->primary_should_be_hidden); + should_be_hidden = qatomic_read(&n->primary_should_be_hidden); if (!n->primary_dev) { n->primary_dev = virtio_connect_failover_devices(n, n->qdev, &err); @@ -3183,7 +3183,7 @@ static void virtio_net_handle_migration_primary(VirtIONet *n, qdev_get_vmsd(n->primary_dev), n->primary_dev); qapi_event_send_unplug_primary(n->primary_device_id); - atomic_set(&n->primary_should_be_hidden, true); + qatomic_set(&n->primary_should_be_hidden, true); } else { warn_report("couldn't unplug primary device"); } @@ -3234,7 +3234,7 @@ static int virtio_net_primary_should_be_hidden(DeviceListener *listener, n->primary_device_opts = device_opts; /* primary_should_be_hidden is set during feature negotiation */ - hide = atomic_read(&n->primary_should_be_hidden); + hide = qatomic_read(&n->primary_should_be_hidden); if (n->primary_device_dict) { g_free(n->primary_device_id); @@ -3291,7 +3291,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) if (n->failover) { n->primary_listener.should_be_hidden = virtio_net_primary_should_be_hidden; - atomic_set(&n->primary_should_be_hidden, true); + qatomic_set(&n->primary_should_be_hidden, true); device_listener_register(&n->primary_listener); n->migration_state.notify = virtio_net_migration_state_notifier; add_migration_state_change_notifier(&n->migration_state); diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c index db7e5c8..5de010b 100644 --- a/hw/rdma/rdma_backend.c +++ b/hw/rdma/rdma_backend.c @@ -68,7 +68,7 @@ static void free_cqe_ctx(gpointer data, gpointer user_data) bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); if (bctx) { rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); - atomic_dec(&rdma_dev_res->stats.missing_cqe); + qatomic_dec(&rdma_dev_res->stats.missing_cqe); } g_free(bctx); } @@ -81,7 +81,7 @@ static void clean_recv_mads(RdmaBackendDev *backend_dev) cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev-> recv_mads_list); if (cqe_ctx_id != -ENOENT) { - atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), backend_dev->rdma_dev_res); } @@ -123,7 +123,7 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) } total_ne += ne; } while (ne > 0); - atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); + qatomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); } if (ne < 0) { @@ -195,17 +195,17 @@ static void *comp_handler_thread(void *arg) static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev) { - atomic_set(&backend_dev->rdmacm_mux.can_receive, 0); + qatomic_set(&backend_dev->rdmacm_mux.can_receive, 0); } static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev) { - atomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg)); + qatomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg)); } static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev) { - return atomic_read(&backend_dev->rdmacm_mux.can_receive); + return qatomic_read(&backend_dev->rdmacm_mux.can_receive); } static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be) @@ -555,7 +555,7 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev, goto err_dealloc_cqe_ctx; } - atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); backend_dev->rdma_dev_res->stats.tx++; return; @@ -658,7 +658,7 @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev, goto err_dealloc_cqe_ctx; } - atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); backend_dev->rdma_dev_res->stats.rx_bufs++; return; @@ -710,7 +710,7 @@ void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev, goto err_dealloc_cqe_ctx; } - atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); backend_dev->rdma_dev_res->stats.rx_bufs++; backend_dev->rdma_dev_res->stats.rx_srq++; diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c index 60957f8..49141d4 100644 --- a/hw/rdma/rdma_rm.c +++ b/hw/rdma/rdma_rm.c @@ -790,7 +790,7 @@ int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr) qemu_mutex_init(&dev_res->lock); memset(&dev_res->stats, 0, sizeof(dev_res->stats)); - atomic_set(&dev_res->stats.missing_cqe, 0); + qatomic_set(&dev_res->stats.missing_cqe, 0); return 0; } diff --git a/hw/rdma/vmw/pvrdma_dev_ring.c b/hw/rdma/vmw/pvrdma_dev_ring.c index c122fe7..f0bcde7 100644 --- a/hw/rdma/vmw/pvrdma_dev_ring.c +++ b/hw/rdma/vmw/pvrdma_dev_ring.c @@ -38,8 +38,8 @@ int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev, ring->max_elems = max_elems; ring->elem_sz = elem_sz; /* TODO: Give a moment to think if we want to redo driver settings - atomic_set(&ring->ring_state->prod_tail, 0); - atomic_set(&ring->ring_state->cons_head, 0); + qatomic_set(&ring->ring_state->prod_tail, 0); + qatomic_set(&ring->ring_state->cons_head, 0); */ ring->npages = npages; ring->pages = g_malloc(npages * sizeof(void *)); diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index 92146a2..fb4cee8 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -650,7 +650,7 @@ static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) actual = *ind_addr; do { expected = actual; - actual = atomic_cmpxchg(ind_addr, expected, expected | to_be_set); + actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); } while (actual != expected); cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 8feb345..8d140dc 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -800,7 +800,7 @@ static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, actual = *ind_addr; do { expected = actual; - actual = atomic_cmpxchg(ind_addr, expected, expected | to_be_set); + actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); } while (actual != expected); trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set); cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 1a1384e..0119516 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -90,7 +90,7 @@ static void vhost_dev_sync_region(struct vhost_dev *dev, } /* Data must be read atomically. We don't really need barrier semantics * but it's easier to use atomic_* than roll our own. */ - log = atomic_xchg(from, 0); + log = qatomic_xchg(from, 0); while (log) { int bit = ctzl(log); hwaddr page_addr; diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index f12d159..e1b5c3b 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -179,7 +179,7 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) } return proxy->vqs[vdev->queue_sel].enabled; case VIRTIO_MMIO_INTERRUPT_STATUS: - return atomic_read(&vdev->isr); + return qatomic_read(&vdev->isr); case VIRTIO_MMIO_STATUS: return vdev->status; case VIRTIO_MMIO_CONFIG_GENERATION: @@ -370,7 +370,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, } break; case VIRTIO_MMIO_INTERRUPT_ACK: - atomic_and(&vdev->isr, ~value); + qatomic_and(&vdev->isr, ~value); virtio_update_irq(vdev); break; case VIRTIO_MMIO_STATUS: @@ -496,7 +496,7 @@ static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector) if (!vdev) { return; } - level = (atomic_read(&vdev->isr) != 0); + level = (qatomic_read(&vdev->isr) != 0); trace_virtio_mmio_setting_irq(level); qemu_set_irq(proxy->irq, level); } diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 5bc769f..02790e3 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -72,7 +72,7 @@ static void virtio_pci_notify(DeviceState *d, uint16_t vector) msix_notify(&proxy->pci_dev, vector); else { VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); - pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1); + pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1); } } @@ -398,7 +398,7 @@ static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) break; case VIRTIO_PCI_ISR: /* reading from the ISR also clears it. */ - ret = atomic_xchg(&vdev->isr, 0); + ret = qatomic_xchg(&vdev->isr, 0); pci_irq_deassert(&proxy->pci_dev); break; case VIRTIO_MSI_CONFIG_VECTOR: @@ -1362,7 +1362,7 @@ static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, { VirtIOPCIProxy *proxy = opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); - uint64_t val = atomic_xchg(&vdev->isr, 0); + uint64_t val = qatomic_xchg(&vdev->isr, 0); pci_irq_deassert(&proxy->pci_dev); return val; diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index e983025..3a3d012 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -149,8 +149,8 @@ static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq) { VRingMemoryRegionCaches *caches; - caches = atomic_read(&vq->vring.caches); - atomic_rcu_set(&vq->vring.caches, NULL); + caches = qatomic_read(&vq->vring.caches); + qatomic_rcu_set(&vq->vring.caches, NULL); if (caches) { call_rcu(caches, virtio_free_region_cache, rcu); } @@ -197,7 +197,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n) goto err_avail; } - atomic_rcu_set(&vq->vring.caches, new); + qatomic_rcu_set(&vq->vring.caches, new); if (old) { call_rcu(old, virtio_free_region_cache, rcu); } @@ -283,7 +283,7 @@ static void vring_packed_flags_write(VirtIODevice *vdev, /* Called within rcu_read_lock(). */ static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq) { - return atomic_rcu_read(&vq->vring.caches); + return qatomic_rcu_read(&vq->vring.caches); } /* Called within rcu_read_lock(). */ @@ -2007,7 +2007,7 @@ void virtio_reset(void *opaque) vdev->queue_sel = 0; vdev->status = 0; vdev->disabled = false; - atomic_set(&vdev->isr, 0); + qatomic_set(&vdev->isr, 0); vdev->config_vector = VIRTIO_NO_VECTOR; virtio_notify_vector(vdev, vdev->config_vector); @@ -2439,13 +2439,13 @@ void virtio_del_queue(VirtIODevice *vdev, int n) static void virtio_set_isr(VirtIODevice *vdev, int value) { - uint8_t old = atomic_read(&vdev->isr); + uint8_t old = qatomic_read(&vdev->isr); /* Do not write ISR if it does not change, so that its cacheline remains * shared in the common case where the guest does not read it. */ if ((old & value) != value) { - atomic_or(&vdev->isr, value); + qatomic_or(&vdev->isr, value); } } @@ -3254,7 +3254,7 @@ void virtio_init(VirtIODevice *vdev, const char *name, vdev->started = false; vdev->device_id = device_id; vdev->status = 0; - atomic_set(&vdev->isr, 0); + qatomic_set(&vdev->isr, 0); vdev->queue_sel = 0; vdev->config_vector = VIRTIO_NO_VECTOR; vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); diff --git a/hw/xtensa/pic_cpu.c b/hw/xtensa/pic_cpu.c index 1d5982a..6c94475 100644 --- a/hw/xtensa/pic_cpu.c +++ b/hw/xtensa/pic_cpu.c @@ -72,9 +72,9 @@ static void xtensa_set_irq(void *opaque, int irq, int active) uint32_t irq_bit = 1 << irq; if (active) { - atomic_or(&env->sregs[INTSET], irq_bit); + qatomic_or(&env->sregs[INTSET], irq_bit); } else if (env->config->interrupt[irq].inttype == INTTYPE_LEVEL) { - atomic_and(&env->sregs[INTSET], ~irq_bit); + qatomic_and(&env->sregs[INTSET], ~irq_bit); } check_interrupts(env); |