aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-09-24 18:48:45 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-09-24 18:48:45 +0100
commit8c1c07929feae876202ba26f07a540c5115c18cd (patch)
tree20f6c8e2ac556bfb3c88a98c0d0cb2689de0263e /include
parent1bd5556f6686365e76f7ff67fe67260c449e8345 (diff)
parentd73415a315471ac0b127ed3fad45c8ec5d711de1 (diff)
downloadqemu-8c1c07929feae876202ba26f07a540c5115c18cd.zip
qemu-8c1c07929feae876202ba26f07a540c5115c18cd.tar.gz
qemu-8c1c07929feae876202ba26f07a540c5115c18cd.tar.bz2
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request This includes the atomic_ -> qatomic_ rename that touches many files and is prone to conflicts. # gpg: Signature made Wed 23 Sep 2020 17:08:43 BST # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: qemu/atomic.h: rename atomic_ to qatomic_ tests: add test-fdmon-epoll fdmon-poll: reset npfd when upgrading to fdmon-epoll gitmodules: add qemu.org vbootrom submodule gitmodules: switch to qemu.org meson mirror gitmodules: switch to qemu.org qboot mirror docs/system: clarify deprecation schedule virtio-crypto: don't modify elem->in/out_sg virtio-blk: undo destructive iov_discard_*() operations util/iov: add iov_discard_undo() virtio: add vhost-user-fs-ccw device libvhost-user: handle endianness as mandated by the spec MAINTAINERS: add Stefan Hajnoczi as block/nvme.c maintainer Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/block/aio-wait.h4
-rw-r--r--include/block/aio.h8
-rw-r--r--include/exec/cpu_ldst.h2
-rw-r--r--include/exec/exec-all.h6
-rw-r--r--include/exec/log.h6
-rw-r--r--include/exec/memory.h2
-rw-r--r--include/exec/ram_addr.h26
-rw-r--r--include/exec/ramlist.h2
-rw-r--r--include/exec/tb-lookup.h4
-rw-r--r--include/hw/core/cpu.h2
-rw-r--r--include/hw/virtio/virtio-blk.h2
-rw-r--r--include/qemu/atomic.h258
-rw-r--r--include/qemu/atomic128.h6
-rw-r--r--include/qemu/bitops.h2
-rw-r--r--include/qemu/coroutine.h2
-rw-r--r--include/qemu/iov.h23
-rw-r--r--include/qemu/log.h6
-rw-r--r--include/qemu/queue.h7
-rw-r--r--include/qemu/rcu.h10
-rw-r--r--include/qemu/rcu_queue.h100
-rw-r--r--include/qemu/seqlock.h8
-rw-r--r--include/qemu/stats64.h28
-rw-r--r--include/qemu/thread.h24
-rw-r--r--include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h14
24 files changed, 293 insertions, 259 deletions
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index 716d263..b39eefb 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -80,7 +80,7 @@ extern AioWait global_aio_wait;
AioWait *wait_ = &global_aio_wait; \
AioContext *ctx_ = (ctx); \
/* Increment wait_->num_waiters before evaluating cond. */ \
- atomic_inc(&wait_->num_waiters); \
+ qatomic_inc(&wait_->num_waiters); \
if (ctx_ && in_aio_context_home_thread(ctx_)) { \
while ((cond)) { \
aio_poll(ctx_, true); \
@@ -100,7 +100,7 @@ extern AioWait global_aio_wait;
waited_ = true; \
} \
} \
- atomic_dec(&wait_->num_waiters); \
+ qatomic_dec(&wait_->num_waiters); \
waited_; })
/**
diff --git a/include/block/aio.h b/include/block/aio.h
index b2f703f..ec8c5af 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -595,7 +595,7 @@ int64_t aio_compute_timeout(AioContext *ctx);
*/
static inline void aio_disable_external(AioContext *ctx)
{
- atomic_inc(&ctx->external_disable_cnt);
+ qatomic_inc(&ctx->external_disable_cnt);
}
/**
@@ -608,7 +608,7 @@ static inline void aio_enable_external(AioContext *ctx)
{
int old;
- old = atomic_fetch_dec(&ctx->external_disable_cnt);
+ old = qatomic_fetch_dec(&ctx->external_disable_cnt);
assert(old > 0);
if (old == 1) {
/* Kick event loop so it re-arms file descriptors */
@@ -624,7 +624,7 @@ static inline void aio_enable_external(AioContext *ctx)
*/
static inline bool aio_external_disabled(AioContext *ctx)
{
- return atomic_read(&ctx->external_disable_cnt);
+ return qatomic_read(&ctx->external_disable_cnt);
}
/**
@@ -637,7 +637,7 @@ static inline bool aio_external_disabled(AioContext *ctx)
*/
static inline bool aio_node_check(AioContext *ctx, bool is_external)
{
- return !is_external || !atomic_read(&ctx->external_disable_cnt);
+ return !is_external || !qatomic_read(&ctx->external_disable_cnt);
}
/**
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index c14a48f..30605ed 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -299,7 +299,7 @@ static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
#if TCG_OVERSIZED_GUEST
return entry->addr_write;
#else
- return atomic_read(&entry->addr_write);
+ return qatomic_read(&entry->addr_write);
#endif
}
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 3cf8827..1fe28d5 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -89,7 +89,7 @@ void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
*/
static inline bool cpu_loop_exit_requested(CPUState *cpu)
{
- return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
+ return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
}
#if !defined(CONFIG_USER_ONLY)
@@ -487,10 +487,10 @@ struct TranslationBlock {
extern bool parallel_cpus;
-/* Hide the atomic_read to make code a little easier on the eyes */
+/* Hide the qatomic_read to make code a little easier on the eyes */
static inline uint32_t tb_cflags(const TranslationBlock *tb)
{
- return atomic_read(&tb->cflags);
+ return qatomic_read(&tb->cflags);
}
/* current cflags for hashing/comparison */
diff --git a/include/exec/log.h b/include/exec/log.h
index 3ed797c..86871f4 100644
--- a/include/exec/log.h
+++ b/include/exec/log.h
@@ -19,7 +19,7 @@ static inline void log_cpu_state(CPUState *cpu, int flags)
if (qemu_log_enabled()) {
rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
+ logfile = qatomic_rcu_read(&qemu_logfile);
if (logfile) {
cpu_dump_state(cpu, logfile->fd, flags);
}
@@ -49,7 +49,7 @@ static inline void log_target_disas(CPUState *cpu, target_ulong start,
{
QemuLogFile *logfile;
rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
+ logfile = qatomic_rcu_read(&qemu_logfile);
if (logfile) {
target_disas(logfile->fd, cpu, start, len);
}
@@ -60,7 +60,7 @@ static inline void log_disas(void *code, unsigned long size, const char *note)
{
QemuLogFile *logfile;
rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
+ logfile = qatomic_rcu_read(&qemu_logfile);
if (logfile) {
disas(logfile->fd, code, size, note);
}
diff --git a/include/exec/memory.h b/include/exec/memory.h
index f1bb2a7..06b85e3 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -685,7 +685,7 @@ struct FlatView {
static inline FlatView *address_space_to_flatview(AddressSpace *as)
{
- return atomic_rcu_read(&as->current_map);
+ return qatomic_rcu_read(&as->current_map);
}
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 3ef729a..c6d2ef1 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -164,7 +164,7 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
page = start >> TARGET_PAGE_BITS;
WITH_RCU_READ_LOCK_GUARD() {
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
@@ -205,7 +205,7 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
RCU_READ_LOCK_GUARD();
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
@@ -278,7 +278,7 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
RCU_READ_LOCK_GUARD();
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
set_bit_atomic(offset, blocks->blocks[idx]);
}
@@ -301,7 +301,7 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
WITH_RCU_READ_LOCK_GUARD() {
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
}
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
@@ -361,23 +361,25 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
WITH_RCU_READ_LOCK_GUARD() {
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ blocks[i] =
+ qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
}
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
- atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
+ qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
if (global_dirty_log) {
- atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
- temp);
+ qatomic_or(
+ &blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
+ temp);
}
if (tcg_enabled()) {
- atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
- temp);
+ qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
+ temp);
}
}
@@ -461,12 +463,12 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
DIRTY_MEMORY_BLOCK_SIZE);
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
- src = atomic_rcu_read(
+ src = qatomic_rcu_read(
&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
for (k = page; k < page + nr; k++) {
if (src[idx][offset]) {
- unsigned long bits = atomic_xchg(&src[idx][offset], 0);
+ unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
new_dirty = ~dest[k];
dest[k] |= bits;
diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h
index bc4faa1..26704aa 100644
--- a/include/exec/ramlist.h
+++ b/include/exec/ramlist.h
@@ -19,7 +19,7 @@ typedef struct RAMBlockNotifier RAMBlockNotifier;
* rcu_read_lock();
*
* DirtyMemoryBlocks *blocks =
- * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ * qatomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
*
* ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
* unsigned long *block = blocks.blocks[idx];
diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h
index 26921b6..9cf475b 100644
--- a/include/exec/tb-lookup.h
+++ b/include/exec/tb-lookup.h
@@ -27,7 +27,7 @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
cpu_get_tb_cpu_state(env, pc, cs_base, flags);
hash = tb_jmp_cache_hash_func(*pc);
- tb = atomic_rcu_read(&cpu->tb_jmp_cache[hash]);
+ tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
cf_mask &= ~CF_CLUSTER_MASK;
cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT;
@@ -44,7 +44,7 @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
if (tb == NULL) {
return NULL;
}
- atomic_set(&cpu->tb_jmp_cache[hash], tb);
+ qatomic_set(&cpu->tb_jmp_cache[hash], tb);
return tb;
}
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 99dc33f..6c34798c 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -482,7 +482,7 @@ static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
unsigned int i;
for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
- atomic_set(&cpu->tb_jmp_cache[i], NULL);
+ qatomic_set(&cpu->tb_jmp_cache[i], NULL);
}
}
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 5953cf8..214ab74 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -70,6 +70,8 @@ typedef struct VirtIOBlockReq {
int64_t sector_num;
VirtIOBlock *dev;
VirtQueue *vq;
+ IOVDiscardUndo inhdr_undo;
+ IOVDiscardUndo outhdr_undo;
struct virtio_blk_inhdr *in;
struct virtio_blk_outhdr out;
QEMUIOVector qiov;
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index ff72db5..c1d211a 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -125,49 +125,49 @@
* no effect on the generated code but not using the atomic primitives
* will get flagged by sanitizers as a violation.
*/
-#define atomic_read__nocheck(ptr) \
+#define qatomic_read__nocheck(ptr) \
__atomic_load_n(ptr, __ATOMIC_RELAXED)
-#define atomic_read(ptr) \
- ({ \
+#define qatomic_read(ptr) \
+ ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_read__nocheck(ptr); \
+ qatomic_read__nocheck(ptr); \
})
-#define atomic_set__nocheck(ptr, i) \
+#define qatomic_set__nocheck(ptr, i) \
__atomic_store_n(ptr, i, __ATOMIC_RELAXED)
-#define atomic_set(ptr, i) do { \
+#define qatomic_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_set__nocheck(ptr, i); \
+ qatomic_set__nocheck(ptr, i); \
} while(0)
/* See above: most compilers currently treat consume and acquire the
- * same, but this slows down atomic_rcu_read unnecessarily.
+ * same, but this slows down qatomic_rcu_read unnecessarily.
*/
#ifdef __SANITIZE_THREAD__
-#define atomic_rcu_read__nocheck(ptr, valptr) \
+#define qatomic_rcu_read__nocheck(ptr, valptr) \
__atomic_load(ptr, valptr, __ATOMIC_CONSUME);
#else
-#define atomic_rcu_read__nocheck(ptr, valptr) \
- __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
+#define qatomic_rcu_read__nocheck(ptr, valptr) \
+ __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
smp_read_barrier_depends();
#endif
-#define atomic_rcu_read(ptr) \
- ({ \
+#define qatomic_rcu_read(ptr) \
+ ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- typeof_strip_qual(*ptr) _val; \
- atomic_rcu_read__nocheck(ptr, &_val); \
- _val; \
+ typeof_strip_qual(*ptr) _val; \
+ qatomic_rcu_read__nocheck(ptr, &_val); \
+ _val; \
})
-#define atomic_rcu_set(ptr, i) do { \
+#define qatomic_rcu_set(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
+ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
-#define atomic_load_acquire(ptr) \
+#define qatomic_load_acquire(ptr) \
({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
typeof_strip_qual(*ptr) _val; \
@@ -175,7 +175,7 @@
_val; \
})
-#define atomic_store_release(ptr, i) do { \
+#define qatomic_store_release(ptr, i) do { \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
} while(0)
@@ -183,56 +183,61 @@
/* All the remaining operations are fully sequentially consistent */
-#define atomic_xchg__nocheck(ptr, i) ({ \
+#define qatomic_xchg__nocheck(ptr, i) ({ \
__atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
})
-#define atomic_xchg(ptr, i) ({ \
+#define qatomic_xchg(ptr, i) ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_xchg__nocheck(ptr, i); \
+ qatomic_xchg__nocheck(ptr, i); \
})
/* Returns the eventual value, failed or not */
-#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
+#define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \
typeof_strip_qual(*ptr) _old = (old); \
(void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
_old; \
})
-#define atomic_cmpxchg(ptr, old, new) ({ \
+#define qatomic_cmpxchg(ptr, old, new) ({ \
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
- atomic_cmpxchg__nocheck(ptr, old, new); \
+ qatomic_cmpxchg__nocheck(ptr, old, new); \
})
/* Provide shorter names for GCC atomic builtins, return old value */
-#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
-#endif
-
-#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
-#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
-#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
+
+#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
+
+#define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
+#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
+#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
/* And even shorter names that return void. */
-#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
-#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
-#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
-#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_inc(ptr) \
+ ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
+#define qatomic_dec(ptr) \
+ ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
+#define qatomic_add(ptr, n) \
+ ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_sub(ptr, n) \
+ ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_and(ptr, n) \
+ ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_or(ptr, n) \
+ ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
+#define qatomic_xor(ptr, n) \
+ ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
#else /* __ATOMIC_RELAXED */
@@ -272,7 +277,7 @@
* but it is a full barrier at the hardware level. Add a compiler barrier
* to make it a full barrier also at the compiler level.
*/
-#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
+#define qatomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
#elif defined(_ARCH_PPC)
@@ -325,14 +330,14 @@
/* These will only be atomic if the processor does the fetch or store
* in a single issue memory operation
*/
-#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
-#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
+#define qatomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
+#define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
-#define atomic_read(ptr) atomic_read__nocheck(ptr)
-#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i)
+#define qatomic_read(ptr) qatomic_read__nocheck(ptr)
+#define qatomic_set(ptr, i) qatomic_set__nocheck(ptr,i)
/**
- * atomic_rcu_read - reads a RCU-protected pointer to a local variable
+ * qatomic_rcu_read - reads a RCU-protected pointer to a local variable
* into a RCU read-side critical section. The pointer can later be safely
* dereferenced within the critical section.
*
@@ -342,21 +347,21 @@
* Inserts memory barriers on architectures that require them (currently only
* Alpha) and documents which pointers are protected by RCU.
*
- * atomic_rcu_read also includes a compiler barrier to ensure that
+ * qatomic_rcu_read also includes a compiler barrier to ensure that
* value-speculative optimizations (e.g. VSS: Value Speculation
* Scheduling) does not perform the data read before the pointer read
* by speculating the value of the pointer.
*
- * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
+ * Should match qatomic_rcu_set(), qatomic_xchg(), qatomic_cmpxchg().
*/
-#define atomic_rcu_read(ptr) ({ \
- typeof(*ptr) _val = atomic_read(ptr); \
+#define qatomic_rcu_read(ptr) ({ \
+ typeof(*ptr) _val = qatomic_read(ptr); \
smp_read_barrier_depends(); \
_val; \
})
/**
- * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
+ * qatomic_rcu_set - assigns (publicizes) a pointer to a new data structure
* meant to be read by RCU read-side critical sections.
*
* Documents which pointers will be dereferenced by RCU read-side critical
@@ -364,65 +369,64 @@
* them. It also makes sure the compiler does not reorder code initializing the
* data structure before its publication.
*
- * Should match atomic_rcu_read().
+ * Should match qatomic_rcu_read().
*/
-#define atomic_rcu_set(ptr, i) do { \
+#define qatomic_rcu_set(ptr, i) do { \
smp_wmb(); \
- atomic_set(ptr, i); \
+ qatomic_set(ptr, i); \
} while (0)
-#define atomic_load_acquire(ptr) ({ \
- typeof(*ptr) _val = atomic_read(ptr); \
+#define qatomic_load_acquire(ptr) ({ \
+ typeof(*ptr) _val = qatomic_read(ptr); \
smp_mb_acquire(); \
_val; \
})
-#define atomic_store_release(ptr, i) do { \
+#define qatomic_store_release(ptr, i) do { \
smp_mb_release(); \
- atomic_set(ptr, i); \
+ qatomic_set(ptr, i); \
} while (0)
-#ifndef atomic_xchg
+#ifndef qatomic_xchg
#if defined(__clang__)
-#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
+#define qatomic_xchg(ptr, i) __sync_swap(ptr, i)
#else
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
-#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
+#define qatomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
#endif
#endif
-#define atomic_xchg__nocheck atomic_xchg
+#define qatomic_xchg__nocheck qatomic_xchg
/* Provide shorter names for GCC atomic builtins. */
-#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
-#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
-#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
-#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
-#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
-#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
-#endif
-
-#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
-#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
-#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
-#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
-#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
-#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
-#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
-
-#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
-#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
+#define qatomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
+#define qatomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
+
+#define qatomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
+#define qatomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
+#define qatomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
+#define qatomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
+#define qatomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
+
+#define qatomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
+#define qatomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
+#define qatomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
+#define qatomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
+#define qatomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
+#define qatomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
+#define qatomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
+
+#define qatomic_cmpxchg(ptr, old, new) \
+ __sync_val_compare_and_swap(ptr, old, new)
+#define qatomic_cmpxchg__nocheck(ptr, old, new) qatomic_cmpxchg(ptr, old, new)
/* And even shorter names that return void. */
-#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
-#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
-#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
-#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
-#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
-#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
-#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
+#define qatomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
+#define qatomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
+#define qatomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
+#define qatomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
+#define qatomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
+#define qatomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
+#define qatomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
#endif /* __ATOMIC_RELAXED */
@@ -436,11 +440,11 @@
/* This is more efficient than a store plus a fence. */
#if !defined(__SANITIZE_THREAD__)
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
-#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
+#define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
#endif
#endif
-/* atomic_mb_read/set semantics map Java volatile variables. They are
+/* qatomic_mb_read/set semantics map Java volatile variables. They are
* less expensive on some platforms (notably POWER) than fully
* sequentially consistent operations.
*
@@ -448,58 +452,58 @@
* use. See docs/devel/atomics.txt for more discussion.
*/
-#ifndef atomic_mb_read
-#define atomic_mb_read(ptr) \
- atomic_load_acquire(ptr)
+#ifndef qatomic_mb_read
+#define qatomic_mb_read(ptr) \
+ qatomic_load_acquire(ptr)
#endif
-#ifndef atomic_mb_set
-#define atomic_mb_set(ptr, i) do { \
- atomic_store_release(ptr, i); \
+#ifndef qatomic_mb_set
+#define qatomic_mb_set(ptr, i) do { \
+ qatomic_store_release(ptr, i); \
smp_mb(); \
} while(0)
#endif
-#define atomic_fetch_inc_nonzero(ptr) ({ \
- typeof_strip_qual(*ptr) _oldn = atomic_read(ptr); \
- while (_oldn && atomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
- _oldn = atomic_read(ptr); \
+#define qatomic_fetch_inc_nonzero(ptr) ({ \
+ typeof_strip_qual(*ptr) _oldn = qatomic_read(ptr); \
+ while (_oldn && qatomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
+ _oldn = qatomic_read(ptr); \
} \
_oldn; \
})
/* Abstractions to access atomically (i.e. "once") i64/u64 variables */
#ifdef CONFIG_ATOMIC64
-static inline int64_t atomic_read_i64(const int64_t *ptr)
+static inline int64_t qatomic_read_i64(const int64_t *ptr)
{
/* use __nocheck because sizeof(void *) might be < sizeof(u64) */
- return atomic_read__nocheck(ptr);
+ return qatomic_read__nocheck(ptr);
}
-static inline uint64_t atomic_read_u64(const uint64_t *ptr)
+static inline uint64_t qatomic_read_u64(const uint64_t *ptr)
{
- return atomic_read__nocheck(ptr);
+ return qatomic_read__nocheck(ptr);
}
-static inline void atomic_set_i64(int64_t *ptr, int64_t val)
+static inline void qatomic_set_i64(int64_t *ptr, int64_t val)
{
- atomic_set__nocheck(ptr, val);
+ qatomic_set__nocheck(ptr, val);
}
-static inline void atomic_set_u64(uint64_t *ptr, uint64_t val)
+static inline void qatomic_set_u64(uint64_t *ptr, uint64_t val)
{
- atomic_set__nocheck(ptr, val);
+ qatomic_set__nocheck(ptr, val);
}
-static inline void atomic64_init(void)
+static inline void qatomic64_init(void)
{
}
#else /* !CONFIG_ATOMIC64 */
-int64_t atomic_read_i64(const int64_t *ptr);
-uint64_t atomic_read_u64(const uint64_t *ptr);
-void atomic_set_i64(int64_t *ptr, int64_t val);
-void atomic_set_u64(uint64_t *ptr, uint64_t val);
-void atomic64_init(void);
+int64_t qatomic_read_i64(const int64_t *ptr);
+uint64_t qatomic_read_u64(const uint64_t *ptr);
+void qatomic_set_i64(int64_t *ptr, int64_t val);
+void qatomic_set_u64(uint64_t *ptr, uint64_t val);
+void qatomic64_init(void);
#endif /* !CONFIG_ATOMIC64 */
#endif /* QEMU_ATOMIC_H */
diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h
index 6b34484..ad2bcf4 100644
--- a/include/qemu/atomic128.h
+++ b/include/qemu/atomic128.h
@@ -44,7 +44,7 @@
#if defined(CONFIG_ATOMIC128)
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
{
- return atomic_cmpxchg__nocheck(ptr, cmp, new);
+ return qatomic_cmpxchg__nocheck(ptr, cmp, new);
}
# define HAVE_CMPXCHG128 1
#elif defined(CONFIG_CMPXCHG128)
@@ -89,12 +89,12 @@ Int128 QEMU_ERROR("unsupported atomic")
#if defined(CONFIG_ATOMIC128)
static inline Int128 atomic16_read(Int128 *ptr)
{
- return atomic_read__nocheck(ptr);
+ return qatomic_read__nocheck(ptr);
}
static inline void atomic16_set(Int128 *ptr, Int128 val)
{
- atomic_set__nocheck(ptr, val);
+ qatomic_set__nocheck(ptr, val);
}
# define HAVE_ATOMIC128 1
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index f55ce8b..3acbf33 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -51,7 +51,7 @@ static inline void set_bit_atomic(long nr, unsigned long *addr)
unsigned long mask = BIT_MASK(nr);
unsigned long *p = addr + BIT_WORD(nr);
- atomic_or(p, mask);
+ qatomic_or(p, mask);
}
/**
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index dfd261c..84eab6e 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -179,7 +179,7 @@ static inline coroutine_fn void qemu_co_mutex_assert_locked(CoMutex *mutex)
* because the condition will be false no matter whether we read NULL or
* the pointer for any other coroutine.
*/
- assert(atomic_read(&mutex->locked) &&
+ assert(qatomic_read(&mutex->locked) &&
mutex->holder == qemu_coroutine_self());
}
diff --git a/include/qemu/iov.h b/include/qemu/iov.h
index bffc151..b6b283a 100644
--- a/include/qemu/iov.h
+++ b/include/qemu/iov.h
@@ -130,6 +130,29 @@ size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
size_t bytes);
+/* Information needed to undo an iov_discard_*() operation */
+typedef struct {
+ struct iovec *modified_iov;
+ struct iovec orig;
+} IOVDiscardUndo;
+
+/*
+ * Undo an iov_discard_front_undoable() or iov_discard_back_undoable()
+ * operation. If multiple operations are made then each one needs a separate
+ * IOVDiscardUndo and iov_discard_undo() must be called in the reverse order
+ * that the operations were made.
+ */
+void iov_discard_undo(IOVDiscardUndo *undo);
+
+/*
+ * Undoable versions of iov_discard_front() and iov_discard_back(). Use
+ * iov_discard_undo() to reset to the state before the discard operations.
+ */
+size_t iov_discard_front_undoable(struct iovec **iov, unsigned int *iov_cnt,
+ size_t bytes, IOVDiscardUndo *undo);
+size_t iov_discard_back_undoable(struct iovec *iov, unsigned int *iov_cnt,
+ size_t bytes, IOVDiscardUndo *undo);
+
typedef struct QEMUIOVector {
struct iovec *iov;
int niov;
diff --git a/include/qemu/log.h b/include/qemu/log.h
index f4724f7..9b80660 100644
--- a/include/qemu/log.h
+++ b/include/qemu/log.h
@@ -36,7 +36,7 @@ static inline bool qemu_log_separate(void)
bool res = false;
rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
+ logfile = qatomic_rcu_read(&qemu_logfile);
if (logfile && logfile->fd != stderr) {
res = true;
}
@@ -75,7 +75,7 @@ static inline FILE *qemu_log_lock(void)
{
QemuLogFile *logfile;
rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
+ logfile = qatomic_rcu_read(&qemu_logfile);
if (logfile) {
qemu_flockfile(logfile->fd);
return logfile->fd;
@@ -102,7 +102,7 @@ qemu_log_vprintf(const char *fmt, va_list va)
QemuLogFile *logfile;
rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
+ logfile = qatomic_rcu_read(&qemu_logfile);
if (logfile) {
vfprintf(logfile->fd, fmt, va);
}
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
index 456a5b0..e029e7b 100644
--- a/include/qemu/queue.h
+++ b/include/qemu/queue.h
@@ -218,12 +218,12 @@ struct { \
typeof(elm) save_sle_next; \
do { \
save_sle_next = (elm)->field.sle_next = (head)->slh_first; \
- } while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \
+ } while (qatomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) !=\
save_sle_next); \
} while (/*CONSTCOND*/0)
#define QSLIST_MOVE_ATOMIC(dest, src) do { \
- (dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL); \
+ (dest)->slh_first = qatomic_xchg(&(src)->slh_first, NULL); \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_HEAD(head, field) do { \
@@ -376,7 +376,8 @@ struct { \
/*
* Simple queue access methods.
*/
-#define QSIMPLEQ_EMPTY_ATOMIC(head) (atomic_read(&((head)->sqh_first)) == NULL)
+#define QSIMPLEQ_EMPTY_ATOMIC(head) \
+ (qatomic_read(&((head)->sqh_first)) == NULL)
#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
#define QSIMPLEQ_FIRST(head) ((head)->sqh_first)
#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
index 0e375eb..515d327 100644
--- a/include/qemu/rcu.h
+++ b/include/qemu/rcu.h
@@ -79,8 +79,8 @@ static inline void rcu_read_lock(void)
return;
}
- ctr = atomic_read(&rcu_gp_ctr);
- atomic_set(&p_rcu_reader->ctr, ctr);
+ ctr = qatomic_read(&rcu_gp_ctr);
+ qatomic_set(&p_rcu_reader->ctr, ctr);
/* Write p_rcu_reader->ctr before reading RCU-protected pointers. */
smp_mb_placeholder();
@@ -100,12 +100,12 @@ static inline void rcu_read_unlock(void)
* smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr
* are sequentially consistent.
*/
- atomic_store_release(&p_rcu_reader->ctr, 0);
+ qatomic_store_release(&p_rcu_reader->ctr, 0);
/* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
smp_mb_placeholder();
- if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
- atomic_set(&p_rcu_reader->waiting, false);
+ if (unlikely(qatomic_read(&p_rcu_reader->waiting))) {
+ qatomic_set(&p_rcu_reader->waiting, false);
qemu_event_set(&rcu_gp_event);
}
}
diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h
index 558961c..0e53ddd 100644
--- a/include/qemu/rcu_queue.h
+++ b/include/qemu/rcu_queue.h
@@ -36,9 +36,9 @@ extern "C" {
/*
* List access methods.
*/
-#define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL)
-#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
-#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
+#define QLIST_EMPTY_RCU(head) (qatomic_read(&(head)->lh_first) == NULL)
+#define QLIST_FIRST_RCU(head) (qatomic_rcu_read(&(head)->lh_first))
+#define QLIST_NEXT_RCU(elm, field) (qatomic_rcu_read(&(elm)->field.le_next))
/*
* List functions.
@@ -46,7 +46,7 @@ extern "C" {
/*
- * The difference between atomic_read/set and atomic_rcu_read/set
+ * The difference between qatomic_read/set and qatomic_rcu_read/set
* is in the including of a read/write memory barrier to the volatile
* access. atomic_rcu_* macros include the memory barrier, the
* plain atomic macros do not. Therefore, it should be correct to
@@ -66,7 +66,7 @@ extern "C" {
#define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
(elm)->field.le_next = (listelm)->field.le_next; \
(elm)->field.le_prev = &(listelm)->field.le_next; \
- atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.le_next, (elm)); \
if ((elm)->field.le_next != NULL) { \
(elm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
@@ -82,7 +82,7 @@ extern "C" {
#define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
- atomic_rcu_set((listelm)->field.le_prev, (elm)); \
+ qatomic_rcu_set((listelm)->field.le_prev, (elm)); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (/*CONSTCOND*/0)
@@ -95,7 +95,7 @@ extern "C" {
#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
(elm)->field.le_prev = &(head)->lh_first; \
(elm)->field.le_next = (head)->lh_first; \
- atomic_rcu_set((&(head)->lh_first), (elm)); \
+ qatomic_rcu_set((&(head)->lh_first), (elm)); \
if ((elm)->field.le_next != NULL) { \
(elm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
@@ -112,20 +112,20 @@ extern "C" {
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
} \
- atomic_set((elm)->field.le_prev, (elm)->field.le_next); \
+ qatomic_set((elm)->field.le_prev, (elm)->field.le_next); \
} while (/*CONSTCOND*/0)
/* List traversal must occur within an RCU critical section. */
#define QLIST_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->lh_first); \
+ for ((var) = qatomic_rcu_read(&(head)->lh_first); \
(var); \
- (var) = atomic_rcu_read(&(var)->field.le_next))
+ (var) = qatomic_rcu_read(&(var)->field.le_next))
/* List traversal must occur within an RCU critical section. */
#define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
- for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
+ for ((var) = (qatomic_rcu_read(&(head)->lh_first)); \
(var) && \
- ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
+ ((next_var) = qatomic_rcu_read(&(var)->field.le_next), 1); \
(var) = (next_var))
/*
@@ -133,9 +133,10 @@ extern "C" {
*/
/* Simple queue access methods */
-#define QSIMPLEQ_EMPTY_RCU(head) (atomic_read(&(head)->sqh_first) == NULL)
-#define QSIMPLEQ_FIRST_RCU(head) atomic_rcu_read(&(head)->sqh_first)
-#define QSIMPLEQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sqe_next)
+#define QSIMPLEQ_EMPTY_RCU(head) \
+ (qatomic_read(&(head)->sqh_first) == NULL)
+#define QSIMPLEQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->sqh_first)
+#define QSIMPLEQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sqe_next)
/* Simple queue functions */
#define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \
@@ -143,12 +144,12 @@ extern "C" {
if ((elm)->field.sqe_next == NULL) { \
(head)->sqh_last = &(elm)->field.sqe_next; \
} \
- atomic_rcu_set(&(head)->sqh_first, (elm)); \
+ qatomic_rcu_set(&(head)->sqh_first, (elm)); \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
- atomic_rcu_set((head)->sqh_last, (elm)); \
+ qatomic_rcu_set((head)->sqh_last, (elm)); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (/*CONSTCOND*/0)
@@ -157,11 +158,11 @@ extern "C" {
if ((elm)->field.sqe_next == NULL) { \
(head)->sqh_last = &(elm)->field.sqe_next; \
} \
- atomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \
- atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \
+ qatomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next);\
if ((head)->sqh_first == NULL) { \
(head)->sqh_last = &(head)->sqh_first; \
} \
@@ -175,7 +176,7 @@ extern "C" {
while (curr->field.sqe_next != (elm)) { \
curr = curr->field.sqe_next; \
} \
- atomic_set(&curr->field.sqe_next, \
+ qatomic_set(&curr->field.sqe_next, \
curr->field.sqe_next->field.sqe_next); \
if (curr->field.sqe_next == NULL) { \
(head)->sqh_last = &(curr)->field.sqe_next; \
@@ -184,13 +185,13 @@ extern "C" {
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->sqh_first); \
+ for ((var) = qatomic_rcu_read(&(head)->sqh_first); \
(var); \
- (var) = atomic_rcu_read(&(var)->field.sqe_next))
+ (var) = qatomic_rcu_read(&(var)->field.sqe_next))
#define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \
- for ((var) = atomic_rcu_read(&(head)->sqh_first); \
- (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \
+ for ((var) = qatomic_rcu_read(&(head)->sqh_first); \
+ (var) && ((next) = qatomic_rcu_read(&(var)->field.sqe_next), 1);\
(var) = (next))
/*
@@ -198,9 +199,9 @@ extern "C" {
*/
/* Tail queue access methods */
-#define QTAILQ_EMPTY_RCU(head) (atomic_read(&(head)->tqh_first) == NULL)
-#define QTAILQ_FIRST_RCU(head) atomic_rcu_read(&(head)->tqh_first)
-#define QTAILQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.tqe_next)
+#define QTAILQ_EMPTY_RCU(head) (qatomic_read(&(head)->tqh_first) == NULL)
+#define QTAILQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->tqh_first)
+#define QTAILQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.tqe_next)
/* Tail queue functions */
#define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \
@@ -211,14 +212,14 @@ extern "C" {
} else { \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} \
- atomic_rcu_set(&(head)->tqh_first, (elm)); \
+ qatomic_rcu_set(&(head)->tqh_first, (elm)); \
(elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
- atomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \
+ qatomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
@@ -230,14 +231,14 @@ extern "C" {
} else { \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} \
- atomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \
(elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \
(elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
(elm)->field.tqe_next = (listelm); \
- atomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm));\
(listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
@@ -248,18 +249,19 @@ extern "C" {
} else { \
(head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
} \
- atomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, (elm)->field.tqe_next); \
+ qatomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, \
+ (elm)->field.tqe_next); \
(elm)->field.tqe_circ.tql_prev = NULL; \
} while (/*CONSTCOND*/0)
#define QTAILQ_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->tqh_first); \
+ for ((var) = qatomic_rcu_read(&(head)->tqh_first); \
(var); \
- (var) = atomic_rcu_read(&(var)->field.tqe_next))
+ (var) = qatomic_rcu_read(&(var)->field.tqe_next))
#define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \
- for ((var) = atomic_rcu_read(&(head)->tqh_first); \
- (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \
+ for ((var) = qatomic_rcu_read(&(head)->tqh_first); \
+ (var) && ((next) = qatomic_rcu_read(&(var)->field.tqe_next), 1);\
(var) = (next))
/*
@@ -267,23 +269,23 @@ extern "C" {
*/
/* Singly-linked list access methods */
-#define QSLIST_EMPTY_RCU(head) (atomic_read(&(head)->slh_first) == NULL)
-#define QSLIST_FIRST_RCU(head) atomic_rcu_read(&(head)->slh_first)
-#define QSLIST_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sle_next)
+#define QSLIST_EMPTY_RCU(head) (qatomic_read(&(head)->slh_first) == NULL)
+#define QSLIST_FIRST_RCU(head) qatomic_rcu_read(&(head)->slh_first)
+#define QSLIST_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sle_next)
/* Singly-linked list functions */
#define QSLIST_INSERT_HEAD_RCU(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
- atomic_rcu_set(&(head)->slh_first, (elm)); \
+ qatomic_rcu_set(&(head)->slh_first, (elm)); \
} while (/*CONSTCOND*/0)
#define QSLIST_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
(elm)->field.sle_next = (listelm)->field.sle_next; \
- atomic_rcu_set(&(listelm)->field.sle_next, (elm)); \
+ qatomic_rcu_set(&(listelm)->field.sle_next, (elm)); \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_HEAD_RCU(head, field) do { \
- atomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next); \
+ qatomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next);\
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_RCU(head, elm, type, field) do { \
@@ -294,19 +296,19 @@ extern "C" {
while (curr->field.sle_next != (elm)) { \
curr = curr->field.sle_next; \
} \
- atomic_set(&curr->field.sle_next, \
+ qatomic_set(&curr->field.sle_next, \
curr->field.sle_next->field.sle_next); \
} \
} while (/*CONSTCOND*/0)
#define QSLIST_FOREACH_RCU(var, head, field) \
- for ((var) = atomic_rcu_read(&(head)->slh_first); \
- (var); \
- (var) = atomic_rcu_read(&(var)->field.sle_next))
+ for ((var) = qatomic_rcu_read(&(head)->slh_first); \
+ (var); \
+ (var) = qatomic_rcu_read(&(var)->field.sle_next))
-#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \
- for ((var) = atomic_rcu_read(&(head)->slh_first); \
- (var) && ((next) = atomic_rcu_read(&(var)->field.sle_next), 1); \
+#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \
+ for ((var) = qatomic_rcu_read(&(head)->slh_first); \
+ (var) && ((next) = qatomic_rcu_read(&(var)->field.sle_next), 1); \
(var) = (next))
#ifdef __cplusplus
diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h
index 8b6b4ee..ecb7d2c 100644
--- a/include/qemu/seqlock.h
+++ b/include/qemu/seqlock.h
@@ -32,7 +32,7 @@ static inline void seqlock_init(QemuSeqLock *sl)
/* Lock out other writers and update the count. */
static inline void seqlock_write_begin(QemuSeqLock *sl)
{
- atomic_set(&sl->sequence, sl->sequence + 1);
+ qatomic_set(&sl->sequence, sl->sequence + 1);
/* Write sequence before updating other fields. */
smp_wmb();
@@ -43,7 +43,7 @@ static inline void seqlock_write_end(QemuSeqLock *sl)
/* Write other fields before finalizing sequence. */
smp_wmb();
- atomic_set(&sl->sequence, sl->sequence + 1);
+ qatomic_set(&sl->sequence, sl->sequence + 1);
}
/* Lock out other writers and update the count. */
@@ -68,7 +68,7 @@ static inline void seqlock_write_unlock_impl(QemuSeqLock *sl, QemuLockable *lock
static inline unsigned seqlock_read_begin(const QemuSeqLock *sl)
{
/* Always fail if a write is in progress. */
- unsigned ret = atomic_read(&sl->sequence);
+ unsigned ret = qatomic_read(&sl->sequence);
/* Read sequence before reading other fields. */
smp_rmb();
@@ -79,7 +79,7 @@ static inline int seqlock_read_retry(const QemuSeqLock *sl, unsigned start)
{
/* Read other fields before reading final sequence. */
smp_rmb();
- return unlikely(atomic_read(&sl->sequence) != start);
+ return unlikely(qatomic_read(&sl->sequence) != start);
}
#endif
diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h
index 19a5ac4..fdd3d1b 100644
--- a/include/qemu/stats64.h
+++ b/include/qemu/stats64.h
@@ -37,27 +37,27 @@ static inline void stat64_init(Stat64 *s, uint64_t value)
static inline uint64_t stat64_get(const Stat64 *s)
{
- return atomic_read__nocheck(&s->value);
+ return qatomic_read__nocheck(&s->value);
}
static inline void stat64_add(Stat64 *s, uint64_t value)
{
- atomic_add(&s->value, value);
+ qatomic_add(&s->value, value);
}
static inline void stat64_min(Stat64 *s, uint64_t value)
{
- uint64_t orig = atomic_read__nocheck(&s->value);
+ uint64_t orig = qatomic_read__nocheck(&s->value);
while (orig > value) {
- orig = atomic_cmpxchg__nocheck(&s->value, orig, value);
+ orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
}
}
static inline void stat64_max(Stat64 *s, uint64_t value)
{
- uint64_t orig = atomic_read__nocheck(&s->value);
+ uint64_t orig = qatomic_read__nocheck(&s->value);
while (orig < value) {
- orig = atomic_cmpxchg__nocheck(&s->value, orig, value);
+ orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
}
}
#else
@@ -79,7 +79,7 @@ static inline void stat64_add(Stat64 *s, uint64_t value)
low = (uint32_t) value;
if (!low) {
if (high) {
- atomic_add(&s->high, high);
+ qatomic_add(&s->high, high);
}
return;
}
@@ -101,7 +101,7 @@ static inline void stat64_add(Stat64 *s, uint64_t value)
* the high 32 bits, so it can race just fine with stat64_add32_carry
* and even stat64_get!
*/
- old = atomic_cmpxchg(&s->low, orig, result);
+ old = qatomic_cmpxchg(&s->low, orig, result);
if (orig == old) {
return;
}
@@ -116,7 +116,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
high = value >> 32;
low = (uint32_t) value;
do {
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high < high) {
return;
}
@@ -128,7 +128,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
* the write barrier in stat64_min_slow.
*/
smp_rmb();
- orig_low = atomic_read(&s->low);
+ orig_low = qatomic_read(&s->low);
if (orig_low <= low) {
return;
}
@@ -138,7 +138,7 @@ static inline void stat64_min(Stat64 *s, uint64_t value)
* we may miss being lucky.
*/
smp_rmb();
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high < high) {
return;
}
@@ -156,7 +156,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
high = value >> 32;
low = (uint32_t) value;
do {
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high > high) {
return;
}
@@ -168,7 +168,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
* the write barrier in stat64_max_slow.
*/
smp_rmb();
- orig_low = atomic_read(&s->low);
+ orig_low = qatomic_read(&s->low);
if (orig_low >= low) {
return;
}
@@ -178,7 +178,7 @@ static inline void stat64_max(Stat64 *s, uint64_t value)
* we may miss being lucky.
*/
smp_rmb();
- orig_high = atomic_read(&s->high);
+ orig_high = qatomic_read(&s->high);
if (orig_high > high) {
return;
}
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index 4baf4d1..5435763 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -70,33 +70,33 @@ extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
#else
#define qemu_mutex_lock(m) ({ \
- QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \
+ QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
_f(m, __FILE__, __LINE__); \
})
-#define qemu_mutex_trylock(m) ({ \
- QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \
- _f(m, __FILE__, __LINE__); \
+#define qemu_mutex_trylock(m) ({ \
+ QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
+ _f(m, __FILE__, __LINE__); \
})
-#define qemu_rec_mutex_lock(m) ({ \
- QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \
- _f(m, __FILE__, __LINE__); \
+#define qemu_rec_mutex_lock(m) ({ \
+ QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
+ _f(m, __FILE__, __LINE__); \
})
#define qemu_rec_mutex_trylock(m) ({ \
QemuRecMutexTrylockFunc _f; \
- _f = atomic_read(&qemu_rec_mutex_trylock_func); \
+ _f = qatomic_read(&qemu_rec_mutex_trylock_func); \
_f(m, __FILE__, __LINE__); \
})
#define qemu_cond_wait(c, m) ({ \
- QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \
+ QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \
_f(c, m, __FILE__, __LINE__); \
})
#define qemu_cond_timedwait(c, m, ms) ({ \
- QemuCondTimedWaitFunc _f = atomic_read(&qemu_cond_timedwait_func); \
+ QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
_f(c, m, ms, __FILE__, __LINE__); \
})
#endif
@@ -236,7 +236,7 @@ static inline void qemu_spin_lock(QemuSpin *spin)
__tsan_mutex_pre_lock(spin, 0);
#endif
while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
- while (atomic_read(&spin->value)) {
+ while (qatomic_read(&spin->value)) {
cpu_relax();
}
}
@@ -261,7 +261,7 @@ static inline bool qemu_spin_trylock(QemuSpin *spin)
static inline bool qemu_spin_locked(QemuSpin *spin)
{
- return atomic_read(&spin->value);
+ return qatomic_read(&spin->value);
}
static inline void qemu_spin_unlock(QemuSpin *spin)
diff --git a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
index acd4c83..7b4062a 100644
--- a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
+++ b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
@@ -68,7 +68,7 @@ static inline int pvrdma_idx_valid(uint32_t idx, uint32_t max_elems)
static inline int32_t pvrdma_idx(int *var, uint32_t max_elems)
{
- const unsigned int idx = atomic_read(var);
+ const unsigned int idx = qatomic_read(var);
if (pvrdma_idx_valid(idx, max_elems))
return idx & (max_elems - 1);
@@ -77,17 +77,17 @@ static inline int32_t pvrdma_idx(int *var, uint32_t max_elems)
static inline void pvrdma_idx_ring_inc(int *var, uint32_t max_elems)
{
- uint32_t idx = atomic_read(var) + 1; /* Increment. */
+ uint32_t idx = qatomic_read(var) + 1; /* Increment. */
idx &= (max_elems << 1) - 1; /* Modulo size, flip gen. */
- atomic_set(var, idx);
+ qatomic_set(var, idx);
}
static inline int32_t pvrdma_idx_ring_has_space(const struct pvrdma_ring *r,
uint32_t max_elems, uint32_t *out_tail)
{
- const uint32_t tail = atomic_read(&r->prod_tail);
- const uint32_t head = atomic_read(&r->cons_head);
+ const uint32_t tail = qatomic_read(&r->prod_tail);
+ const uint32_t head = qatomic_read(&r->cons_head);
if (pvrdma_idx_valid(tail, max_elems) &&
pvrdma_idx_valid(head, max_elems)) {
@@ -100,8 +100,8 @@ static inline int32_t pvrdma_idx_ring_has_space(const struct pvrdma_ring *r,
static inline int32_t pvrdma_idx_ring_has_data(const struct pvrdma_ring *r,
uint32_t max_elems, uint32_t *out_head)
{
- const uint32_t tail = atomic_read(&r->prod_tail);
- const uint32_t head = atomic_read(&r->cons_head);
+ const uint32_t tail = qatomic_read(&r->prod_tail);
+ const uint32_t head = qatomic_read(&r->cons_head);
if (pvrdma_idx_valid(tail, max_elems) &&
pvrdma_idx_valid(head, max_elems)) {