aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-09-24 18:48:45 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-09-24 18:48:45 +0100
commit8c1c07929feae876202ba26f07a540c5115c18cd (patch)
tree20f6c8e2ac556bfb3c88a98c0d0cb2689de0263e /hw
parent1bd5556f6686365e76f7ff67fe67260c449e8345 (diff)
parentd73415a315471ac0b127ed3fad45c8ec5d711de1 (diff)
downloadqemu-8c1c07929feae876202ba26f07a540c5115c18cd.zip
qemu-8c1c07929feae876202ba26f07a540c5115c18cd.tar.gz
qemu-8c1c07929feae876202ba26f07a540c5115c18cd.tar.bz2
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request This includes the atomic_ -> qatomic_ rename that touches many files and is prone to conflicts. # gpg: Signature made Wed 23 Sep 2020 17:08:43 BST # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: qemu/atomic.h: rename atomic_ to qatomic_ tests: add test-fdmon-epoll fdmon-poll: reset npfd when upgrading to fdmon-epoll gitmodules: add qemu.org vbootrom submodule gitmodules: switch to qemu.org meson mirror gitmodules: switch to qemu.org qboot mirror docs/system: clarify deprecation schedule virtio-crypto: don't modify elem->in/out_sg virtio-blk: undo destructive iov_discard_*() operations util/iov: add iov_discard_undo() virtio: add vhost-user-fs-ccw device libvhost-user: handle endianness as mandated by the spec MAINTAINERS: add Stefan Hajnoczi as block/nvme.c maintainer Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/block/virtio-blk.c11
-rw-r--r--hw/core/cpu.c6
-rw-r--r--hw/display/qxl.c4
-rw-r--r--hw/hyperv/hyperv.c10
-rw-r--r--hw/hyperv/vmbus.c2
-rw-r--r--hw/i386/xen/xen-hvm.c2
-rw-r--r--hw/intc/rx_icu.c12
-rw-r--r--hw/intc/sifive_plic.c4
-rw-r--r--hw/misc/edu.c16
-rw-r--r--hw/net/virtio-net.c10
-rw-r--r--hw/rdma/rdma_backend.c18
-rw-r--r--hw/rdma/rdma_rm.c2
-rw-r--r--hw/rdma/vmw/pvrdma_dev_ring.c4
-rw-r--r--hw/s390x/meson.build1
-rw-r--r--hw/s390x/s390-pci-bus.c2
-rw-r--r--hw/s390x/vhost-user-fs-ccw.c75
-rw-r--r--hw/s390x/virtio-ccw.c2
-rw-r--r--hw/virtio/vhost.c2
-rw-r--r--hw/virtio/virtio-crypto.c17
-rw-r--r--hw/virtio/virtio-mmio.c6
-rw-r--r--hw/virtio/virtio-pci.c6
-rw-r--r--hw/virtio/virtio.c16
-rw-r--r--hw/xtensa/pic_cpu.c4
23 files changed, 163 insertions, 69 deletions
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 2204ba1..bac2d6f 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -80,6 +80,8 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
trace_virtio_blk_req_complete(vdev, req, status);
stb_p(&req->in->status, status);
+ iov_discard_undo(&req->inhdr_undo);
+ iov_discard_undo(&req->outhdr_undo);
virtqueue_push(req->vq, &req->elem, req->in_len);
if (s->dataplane_started && !s->dataplane_disabled) {
virtio_blk_data_plane_notify(s->dataplane, req->vq);
@@ -632,10 +634,12 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
return -1;
}
- iov_discard_front(&out_iov, &out_num, sizeof(req->out));
+ iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out),
+ &req->outhdr_undo);
if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
virtio_error(vdev, "virtio-blk request inhdr too short");
+ iov_discard_undo(&req->outhdr_undo);
return -1;
}
@@ -644,7 +648,8 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
req->in = (void *)in_iov[in_num - 1].iov_base
+ in_iov[in_num - 1].iov_len
- sizeof(struct virtio_blk_inhdr);
- iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr));
+ iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr),
+ &req->inhdr_undo);
type = virtio_ldl_p(vdev, &req->out.type);
@@ -739,6 +744,8 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr,
sizeof(dwz_hdr)) != sizeof(dwz_hdr))) {
+ iov_discard_undo(&req->inhdr_undo);
+ iov_discard_undo(&req->outhdr_undo);
virtio_error(vdev, "virtio-blk discard/write_zeroes header"
" too short");
return -1;
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
index 8f65383..c55c09f 100644
--- a/hw/core/cpu.c
+++ b/hw/core/cpu.c
@@ -111,10 +111,10 @@ void cpu_reset_interrupt(CPUState *cpu, int mask)
void cpu_exit(CPUState *cpu)
{
- atomic_set(&cpu->exit_request, 1);
+ qatomic_set(&cpu->exit_request, 1);
/* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb();
- atomic_set(&cpu->icount_decr_ptr->u16.high, -1);
+ qatomic_set(&cpu->icount_decr_ptr->u16.high, -1);
}
int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
@@ -261,7 +261,7 @@ static void cpu_common_reset(DeviceState *dev)
cpu->halted = cpu->start_powered_off;
cpu->mem_io_pc = 0;
cpu->icount_extra = 0;
- atomic_set(&cpu->icount_decr_ptr->u32, 0);
+ qatomic_set(&cpu->icount_decr_ptr->u32, 0);
cpu->can_do_io = 1;
cpu->exception_index = -1;
cpu->crash_occurred = false;
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index 1187134..431c107 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -1908,7 +1908,7 @@ static void qxl_send_events(PCIQXLDevice *d, uint32_t events)
/*
* Older versions of Spice forgot to define the QXLRam struct
* with the '__aligned__(4)' attribute. clang 7 and newer will
- * thus warn that atomic_fetch_or(&d->ram->int_pending, ...)
+ * thus warn that qatomic_fetch_or(&d->ram->int_pending, ...)
* might be a misaligned atomic access, and will generate an
* out-of-line call for it, which results in a link error since
* we don't currently link against libatomic.
@@ -1928,7 +1928,7 @@ static void qxl_send_events(PCIQXLDevice *d, uint32_t events)
#define ALIGNED_UINT32_PTR(P) ((uint32_t *)P)
#endif
- old_pending = atomic_fetch_or(ALIGNED_UINT32_PTR(&d->ram->int_pending),
+ old_pending = qatomic_fetch_or(ALIGNED_UINT32_PTR(&d->ram->int_pending),
le_events);
if ((old_pending & le_events) == le_events) {
return;
diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
index 4b26db1..cb1074f 100644
--- a/hw/hyperv/hyperv.c
+++ b/hw/hyperv/hyperv.c
@@ -231,7 +231,7 @@ static void sint_msg_bh(void *opaque)
HvSintRoute *sint_route = opaque;
HvSintStagedMessage *staged_msg = sint_route->staged_msg;
- if (atomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) {
+ if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) {
/* status nor ready yet (spurious ack from guest?), ignore */
return;
}
@@ -240,7 +240,7 @@ static void sint_msg_bh(void *opaque)
staged_msg->status = 0;
/* staged message processing finished, ready to start over */
- atomic_set(&staged_msg->state, HV_STAGED_MSG_FREE);
+ qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE);
/* drop the reference taken in hyperv_post_msg */
hyperv_sint_route_unref(sint_route);
}
@@ -278,7 +278,7 @@ static void cpu_post_msg(CPUState *cs, run_on_cpu_data data)
memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page));
posted:
- atomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED);
+ qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED);
/*
* Notify the msg originator of the progress made; if the slot was busy we
* set msg_pending flag in it so it will be the guest who will do EOM and
@@ -301,7 +301,7 @@ int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg)
assert(staged_msg);
/* grab the staging area */
- if (atomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE,
+ if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE,
HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) {
return -EAGAIN;
}
@@ -351,7 +351,7 @@ int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno)
set_mask = BIT_MASK(eventno);
flags = synic->event_page->slot[sint_route->sint].flags;
- if ((atomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) {
+ if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) {
memory_region_set_dirty(&synic->event_page_mr, 0,
sizeof(*synic->event_page));
ret = hyperv_sint_route_set_sint(sint_route);
diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c
index 6ef895b..896e981 100644
--- a/hw/hyperv/vmbus.c
+++ b/hw/hyperv/vmbus.c
@@ -747,7 +747,7 @@ static int vmbus_channel_notify_guest(VMBusChannel *chan)
idx = BIT_WORD(chan->id);
mask = BIT_MASK(chan->id);
- if ((atomic_fetch_or(&int_map[idx], mask) & mask) != mask) {
+ if ((qatomic_fetch_or(&int_map[idx], mask) & mask) != mask) {
res = hyperv_sint_route_set_sint(chan->notify_route);
dirty = len;
}
diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index cde981b..a39a648 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -1140,7 +1140,7 @@ static int handle_buffered_iopage(XenIOState *state)
assert(req.dir == IOREQ_WRITE);
assert(!req.data_is_ptr);
- atomic_add(&buf_page->read_pointer, qw + 1);
+ qatomic_add(&buf_page->read_pointer, qw + 1);
}
return req.count;
diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c
index df4b6a8..94e17a9 100644
--- a/hw/intc/rx_icu.c
+++ b/hw/intc/rx_icu.c
@@ -81,8 +81,8 @@ static void rxicu_request(RXICUState *icu, int n_IRQ)
int enable;
enable = icu->ier[n_IRQ / 8] & (1 << (n_IRQ & 7));
- if (n_IRQ > 0 && enable != 0 && atomic_read(&icu->req_irq) < 0) {
- atomic_set(&icu->req_irq, n_IRQ);
+ if (n_IRQ > 0 && enable != 0 && qatomic_read(&icu->req_irq) < 0) {
+ qatomic_set(&icu->req_irq, n_IRQ);
set_irq(icu, n_IRQ, rxicu_level(icu, n_IRQ));
}
}
@@ -124,10 +124,10 @@ static void rxicu_set_irq(void *opaque, int n_IRQ, int level)
}
if (issue == 0 && src->sense == TRG_LEVEL) {
icu->ir[n_IRQ] = 0;
- if (atomic_read(&icu->req_irq) == n_IRQ) {
+ if (qatomic_read(&icu->req_irq) == n_IRQ) {
/* clear request */
set_irq(icu, n_IRQ, 0);
- atomic_set(&icu->req_irq, -1);
+ qatomic_set(&icu->req_irq, -1);
}
return;
}
@@ -144,11 +144,11 @@ static void rxicu_ack_irq(void *opaque, int no, int level)
int n_IRQ;
int max_pri;
- n_IRQ = atomic_read(&icu->req_irq);
+ n_IRQ = qatomic_read(&icu->req_irq);
if (n_IRQ < 0) {
return;
}
- atomic_set(&icu->req_irq, -1);
+ qatomic_set(&icu->req_irq, -1);
if (icu->src[n_IRQ].sense != TRG_LEVEL) {
icu->ir[n_IRQ] = 0;
}
diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c
index af611f8..f42fd69 100644
--- a/hw/intc/sifive_plic.c
+++ b/hw/intc/sifive_plic.c
@@ -89,12 +89,12 @@ static void sifive_plic_print_state(SiFivePLICState *plic)
static uint32_t atomic_set_masked(uint32_t *a, uint32_t mask, uint32_t value)
{
- uint32_t old, new, cmp = atomic_read(a);
+ uint32_t old, new, cmp = qatomic_read(a);
do {
old = cmp;
new = (old & ~mask) | (value & mask);
- cmp = atomic_cmpxchg(a, old, new);
+ cmp = qatomic_cmpxchg(a, old, new);
} while (old != cmp);
return old;
diff --git a/hw/misc/edu.c b/hw/misc/edu.c
index 0ff9d1a..e935c41 100644
--- a/hw/misc/edu.c
+++ b/hw/misc/edu.c
@@ -212,7 +212,7 @@ static uint64_t edu_mmio_read(void *opaque, hwaddr addr, unsigned size)
qemu_mutex_unlock(&edu->thr_mutex);
break;
case 0x20:
- val = atomic_read(&edu->status);
+ val = qatomic_read(&edu->status);
break;
case 0x24:
val = edu->irq_status;
@@ -252,7 +252,7 @@ static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
edu->addr4 = ~val;
break;
case 0x08:
- if (atomic_read(&edu->status) & EDU_STATUS_COMPUTING) {
+ if (qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) {
break;
}
/* EDU_STATUS_COMPUTING cannot go 0->1 concurrently, because it is only
@@ -260,15 +260,15 @@ static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
*/
qemu_mutex_lock(&edu->thr_mutex);
edu->fact = val;
- atomic_or(&edu->status, EDU_STATUS_COMPUTING);
+ qatomic_or(&edu->status, EDU_STATUS_COMPUTING);
qemu_cond_signal(&edu->thr_cond);
qemu_mutex_unlock(&edu->thr_mutex);
break;
case 0x20:
if (val & EDU_STATUS_IRQFACT) {
- atomic_or(&edu->status, EDU_STATUS_IRQFACT);
+ qatomic_or(&edu->status, EDU_STATUS_IRQFACT);
} else {
- atomic_and(&edu->status, ~EDU_STATUS_IRQFACT);
+ qatomic_and(&edu->status, ~EDU_STATUS_IRQFACT);
}
break;
case 0x60:
@@ -322,7 +322,7 @@ static void *edu_fact_thread(void *opaque)
uint32_t val, ret = 1;
qemu_mutex_lock(&edu->thr_mutex);
- while ((atomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 &&
+ while ((qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 &&
!edu->stopping) {
qemu_cond_wait(&edu->thr_cond, &edu->thr_mutex);
}
@@ -347,9 +347,9 @@ static void *edu_fact_thread(void *opaque)
qemu_mutex_lock(&edu->thr_mutex);
edu->fact = ret;
qemu_mutex_unlock(&edu->thr_mutex);
- atomic_and(&edu->status, ~EDU_STATUS_COMPUTING);
+ qatomic_and(&edu->status, ~EDU_STATUS_COMPUTING);
- if (atomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
+ if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
qemu_mutex_lock_iothread();
edu_raise_irq(edu, FACT_IRQ);
qemu_mutex_unlock_iothread();
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index cb0d270..7bf27b9 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -933,7 +933,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
qapi_event_send_failover_negotiated(n->netclient_name);
- atomic_set(&n->primary_should_be_hidden, false);
+ qatomic_set(&n->primary_should_be_hidden, false);
failover_add_primary(n, &err);
if (err) {
n->primary_dev = virtio_connect_failover_devices(n, n->qdev, &err);
@@ -3168,7 +3168,7 @@ static void virtio_net_handle_migration_primary(VirtIONet *n,
bool should_be_hidden;
Error *err = NULL;
- should_be_hidden = atomic_read(&n->primary_should_be_hidden);
+ should_be_hidden = qatomic_read(&n->primary_should_be_hidden);
if (!n->primary_dev) {
n->primary_dev = virtio_connect_failover_devices(n, n->qdev, &err);
@@ -3183,7 +3183,7 @@ static void virtio_net_handle_migration_primary(VirtIONet *n,
qdev_get_vmsd(n->primary_dev),
n->primary_dev);
qapi_event_send_unplug_primary(n->primary_device_id);
- atomic_set(&n->primary_should_be_hidden, true);
+ qatomic_set(&n->primary_should_be_hidden, true);
} else {
warn_report("couldn't unplug primary device");
}
@@ -3234,7 +3234,7 @@ static int virtio_net_primary_should_be_hidden(DeviceListener *listener,
n->primary_device_opts = device_opts;
/* primary_should_be_hidden is set during feature negotiation */
- hide = atomic_read(&n->primary_should_be_hidden);
+ hide = qatomic_read(&n->primary_should_be_hidden);
if (n->primary_device_dict) {
g_free(n->primary_device_id);
@@ -3291,7 +3291,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
if (n->failover) {
n->primary_listener.should_be_hidden =
virtio_net_primary_should_be_hidden;
- atomic_set(&n->primary_should_be_hidden, true);
+ qatomic_set(&n->primary_should_be_hidden, true);
device_listener_register(&n->primary_listener);
n->migration_state.notify = virtio_net_migration_state_notifier;
add_migration_state_change_notifier(&n->migration_state);
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
index db7e5c8..5de010b 100644
--- a/hw/rdma/rdma_backend.c
+++ b/hw/rdma/rdma_backend.c
@@ -68,7 +68,7 @@ static void free_cqe_ctx(gpointer data, gpointer user_data)
bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id);
if (bctx) {
rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id);
- atomic_dec(&rdma_dev_res->stats.missing_cqe);
+ qatomic_dec(&rdma_dev_res->stats.missing_cqe);
}
g_free(bctx);
}
@@ -81,7 +81,7 @@ static void clean_recv_mads(RdmaBackendDev *backend_dev)
cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->
recv_mads_list);
if (cqe_ctx_id != -ENOENT) {
- atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
+ qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id),
backend_dev->rdma_dev_res);
}
@@ -123,7 +123,7 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
}
total_ne += ne;
} while (ne > 0);
- atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
+ qatomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
}
if (ne < 0) {
@@ -195,17 +195,17 @@ static void *comp_handler_thread(void *arg)
static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
{
- atomic_set(&backend_dev->rdmacm_mux.can_receive, 0);
+ qatomic_set(&backend_dev->rdmacm_mux.can_receive, 0);
}
static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
{
- atomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg));
+ qatomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg));
}
static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev)
{
- return atomic_read(&backend_dev->rdmacm_mux.can_receive);
+ return qatomic_read(&backend_dev->rdmacm_mux.can_receive);
}
static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be)
@@ -555,7 +555,7 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev,
goto err_dealloc_cqe_ctx;
}
- atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
+ qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
backend_dev->rdma_dev_res->stats.tx++;
return;
@@ -658,7 +658,7 @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
goto err_dealloc_cqe_ctx;
}
- atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
+ qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
backend_dev->rdma_dev_res->stats.rx_bufs++;
return;
@@ -710,7 +710,7 @@ void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
goto err_dealloc_cqe_ctx;
}
- atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
+ qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
backend_dev->rdma_dev_res->stats.rx_bufs++;
backend_dev->rdma_dev_res->stats.rx_srq++;
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
index 60957f8..49141d4 100644
--- a/hw/rdma/rdma_rm.c
+++ b/hw/rdma/rdma_rm.c
@@ -790,7 +790,7 @@ int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr)
qemu_mutex_init(&dev_res->lock);
memset(&dev_res->stats, 0, sizeof(dev_res->stats));
- atomic_set(&dev_res->stats.missing_cqe, 0);
+ qatomic_set(&dev_res->stats.missing_cqe, 0);
return 0;
}
diff --git a/hw/rdma/vmw/pvrdma_dev_ring.c b/hw/rdma/vmw/pvrdma_dev_ring.c
index c122fe7..f0bcde7 100644
--- a/hw/rdma/vmw/pvrdma_dev_ring.c
+++ b/hw/rdma/vmw/pvrdma_dev_ring.c
@@ -38,8 +38,8 @@ int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
ring->max_elems = max_elems;
ring->elem_sz = elem_sz;
/* TODO: Give a moment to think if we want to redo driver settings
- atomic_set(&ring->ring_state->prod_tail, 0);
- atomic_set(&ring->ring_state->cons_head, 0);
+ qatomic_set(&ring->ring_state->prod_tail, 0);
+ qatomic_set(&ring->ring_state->cons_head, 0);
*/
ring->npages = npages;
ring->pages = g_malloc(npages * sizeof(void *));
diff --git a/hw/s390x/meson.build b/hw/s390x/meson.build
index b63782d..948ceae 100644
--- a/hw/s390x/meson.build
+++ b/hw/s390x/meson.build
@@ -41,6 +41,7 @@ virtio_ss.add(when: 'CONFIG_VIRTIO_SCSI', if_true: files('virtio-ccw-scsi.c'))
virtio_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-ccw-serial.c'))
virtio_ss.add(when: ['CONFIG_VIRTIO_9P', 'CONFIG_VIRTFS'], if_true: files('virtio-ccw-blk.c'))
virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-ccw.c'))
+virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-ccw.c'))
s390x_ss.add_all(when: 'CONFIG_VIRTIO_CCW', if_true: virtio_ss)
hw_arch += {'s390x': s390x_ss}
diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
index 92146a2..fb4cee8 100644
--- a/hw/s390x/s390-pci-bus.c
+++ b/hw/s390x/s390-pci-bus.c
@@ -650,7 +650,7 @@ static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set)
actual = *ind_addr;
do {
expected = actual;
- actual = atomic_cmpxchg(ind_addr, expected, expected | to_be_set);
+ actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
} while (actual != expected);
cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
diff --git a/hw/s390x/vhost-user-fs-ccw.c b/hw/s390x/vhost-user-fs-ccw.c
new file mode 100644
index 0000000..6c6f269
--- /dev/null
+++ b/hw/s390x/vhost-user-fs-ccw.c
@@ -0,0 +1,75 @@
+/*
+ * virtio ccw vhost-user-fs implementation
+ *
+ * Copyright 2020 IBM Corp.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+#include "qemu/osdep.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+#include "hw/virtio/vhost-user-fs.h"
+#include "virtio-ccw.h"
+
+typedef struct VHostUserFSCcw {
+ VirtioCcwDevice parent_obj;
+ VHostUserFS vdev;
+} VHostUserFSCcw;
+
+#define TYPE_VHOST_USER_FS_CCW "vhost-user-fs-ccw"
+#define VHOST_USER_FS_CCW(obj) \
+ OBJECT_CHECK(VHostUserFSCcw, (obj), TYPE_VHOST_USER_FS_CCW)
+
+
+static Property vhost_user_fs_ccw_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vhost_user_fs_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VHostUserFSCcw *dev = VHOST_USER_FS_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ qdev_realize(vdev, BUS(&ccw_dev->bus), errp);
+}
+
+static void vhost_user_fs_ccw_instance_init(Object *obj)
+{
+ VHostUserFSCcw *dev = VHOST_USER_FS_CCW(obj);
+ VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj);
+
+ ccw_dev->force_revision_1 = true;
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_USER_FS);
+}
+
+static void vhost_user_fs_ccw_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = vhost_user_fs_ccw_realize;
+ device_class_set_props(dc, vhost_user_fs_ccw_properties);
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo vhost_user_fs_ccw = {
+ .name = TYPE_VHOST_USER_FS_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VHostUserFSCcw),
+ .instance_init = vhost_user_fs_ccw_instance_init,
+ .class_init = vhost_user_fs_ccw_class_init,
+};
+
+static void vhost_user_fs_ccw_register(void)
+{
+ type_register_static(&vhost_user_fs_ccw);
+}
+
+type_init(vhost_user_fs_ccw_register)
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 8feb345..8d140dc 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -800,7 +800,7 @@ static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
actual = *ind_addr;
do {
expected = actual;
- actual = atomic_cmpxchg(ind_addr, expected, expected | to_be_set);
+ actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
} while (actual != expected);
trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set);
cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 1a1384e..0119516 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -90,7 +90,7 @@ static void vhost_dev_sync_region(struct vhost_dev *dev,
}
/* Data must be read atomically. We don't really need barrier semantics
* but it's easier to use atomic_* than roll our own. */
- log = atomic_xchg(from, 0);
+ log = qatomic_xchg(from, 0);
while (log) {
int bit = ctzl(log);
hwaddr page_addr;
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index 6da12e3..54f9bbb 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -228,6 +228,8 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
size_t s;
for (;;) {
+ g_autofree struct iovec *out_iov_copy = NULL;
+
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
break;
@@ -240,9 +242,12 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
out_num = elem->out_num;
- out_iov = elem->out_sg;
+ out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num);
+ out_iov = out_iov_copy;
+
in_num = elem->in_num;
in_iov = elem->in_sg;
+
if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl))
!= sizeof(ctrl))) {
virtio_error(vdev, "virtio-crypto request ctrl_hdr too short");
@@ -582,6 +587,8 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
struct virtio_crypto_op_data_req req;
int ret;
+ g_autofree struct iovec *in_iov_copy = NULL;
+ g_autofree struct iovec *out_iov_copy = NULL;
struct iovec *in_iov;
struct iovec *out_iov;
unsigned in_num;
@@ -598,9 +605,13 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
}
out_num = elem->out_num;
- out_iov = elem->out_sg;
+ out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num);
+ out_iov = out_iov_copy;
+
in_num = elem->in_num;
- in_iov = elem->in_sg;
+ in_iov_copy = g_memdup(elem->in_sg, sizeof(in_iov[0]) * in_num);
+ in_iov = in_iov_copy;
+
if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
!= sizeof(req))) {
virtio_error(vdev, "virtio-crypto request outhdr too short");
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index f12d159..e1b5c3b 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -179,7 +179,7 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
}
return proxy->vqs[vdev->queue_sel].enabled;
case VIRTIO_MMIO_INTERRUPT_STATUS:
- return atomic_read(&vdev->isr);
+ return qatomic_read(&vdev->isr);
case VIRTIO_MMIO_STATUS:
return vdev->status;
case VIRTIO_MMIO_CONFIG_GENERATION:
@@ -370,7 +370,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
}
break;
case VIRTIO_MMIO_INTERRUPT_ACK:
- atomic_and(&vdev->isr, ~value);
+ qatomic_and(&vdev->isr, ~value);
virtio_update_irq(vdev);
break;
case VIRTIO_MMIO_STATUS:
@@ -496,7 +496,7 @@ static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
if (!vdev) {
return;
}
- level = (atomic_read(&vdev->isr) != 0);
+ level = (qatomic_read(&vdev->isr) != 0);
trace_virtio_mmio_setting_irq(level);
qemu_set_irq(proxy->irq, level);
}
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 5bc769f..02790e3 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -72,7 +72,7 @@ static void virtio_pci_notify(DeviceState *d, uint16_t vector)
msix_notify(&proxy->pci_dev, vector);
else {
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1);
+ pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1);
}
}
@@ -398,7 +398,7 @@ static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
break;
case VIRTIO_PCI_ISR:
/* reading from the ISR also clears it. */
- ret = atomic_xchg(&vdev->isr, 0);
+ ret = qatomic_xchg(&vdev->isr, 0);
pci_irq_deassert(&proxy->pci_dev);
break;
case VIRTIO_MSI_CONFIG_VECTOR:
@@ -1362,7 +1362,7 @@ static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- uint64_t val = atomic_xchg(&vdev->isr, 0);
+ uint64_t val = qatomic_xchg(&vdev->isr, 0);
pci_irq_deassert(&proxy->pci_dev);
return val;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index e983025..3a3d012 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -149,8 +149,8 @@ static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
{
VRingMemoryRegionCaches *caches;
- caches = atomic_read(&vq->vring.caches);
- atomic_rcu_set(&vq->vring.caches, NULL);
+ caches = qatomic_read(&vq->vring.caches);
+ qatomic_rcu_set(&vq->vring.caches, NULL);
if (caches) {
call_rcu(caches, virtio_free_region_cache, rcu);
}
@@ -197,7 +197,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
goto err_avail;
}
- atomic_rcu_set(&vq->vring.caches, new);
+ qatomic_rcu_set(&vq->vring.caches, new);
if (old) {
call_rcu(old, virtio_free_region_cache, rcu);
}
@@ -283,7 +283,7 @@ static void vring_packed_flags_write(VirtIODevice *vdev,
/* Called within rcu_read_lock(). */
static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
{
- return atomic_rcu_read(&vq->vring.caches);
+ return qatomic_rcu_read(&vq->vring.caches);
}
/* Called within rcu_read_lock(). */
@@ -2007,7 +2007,7 @@ void virtio_reset(void *opaque)
vdev->queue_sel = 0;
vdev->status = 0;
vdev->disabled = false;
- atomic_set(&vdev->isr, 0);
+ qatomic_set(&vdev->isr, 0);
vdev->config_vector = VIRTIO_NO_VECTOR;
virtio_notify_vector(vdev, vdev->config_vector);
@@ -2439,13 +2439,13 @@ void virtio_del_queue(VirtIODevice *vdev, int n)
static void virtio_set_isr(VirtIODevice *vdev, int value)
{
- uint8_t old = atomic_read(&vdev->isr);
+ uint8_t old = qatomic_read(&vdev->isr);
/* Do not write ISR if it does not change, so that its cacheline remains
* shared in the common case where the guest does not read it.
*/
if ((old & value) != value) {
- atomic_or(&vdev->isr, value);
+ qatomic_or(&vdev->isr, value);
}
}
@@ -3254,7 +3254,7 @@ void virtio_init(VirtIODevice *vdev, const char *name,
vdev->started = false;
vdev->device_id = device_id;
vdev->status = 0;
- atomic_set(&vdev->isr, 0);
+ qatomic_set(&vdev->isr, 0);
vdev->queue_sel = 0;
vdev->config_vector = VIRTIO_NO_VECTOR;
vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
diff --git a/hw/xtensa/pic_cpu.c b/hw/xtensa/pic_cpu.c
index 1d5982a..6c94475 100644
--- a/hw/xtensa/pic_cpu.c
+++ b/hw/xtensa/pic_cpu.c
@@ -72,9 +72,9 @@ static void xtensa_set_irq(void *opaque, int irq, int active)
uint32_t irq_bit = 1 << irq;
if (active) {
- atomic_or(&env->sregs[INTSET], irq_bit);
+ qatomic_or(&env->sregs[INTSET], irq_bit);
} else if (env->config->interrupt[irq].inttype == INTTYPE_LEVEL) {
- atomic_and(&env->sregs[INTSET], ~irq_bit);
+ qatomic_and(&env->sregs[INTSET], ~irq_bit);
}
check_interrupts(env);