From c04649eeeaf5f84ba9e43d0c4ffbe1719b0d940c Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Sat, 18 Aug 2018 00:25:12 -0400 Subject: seqlock: constify seqlock_read_begin Signed-off-by: Emilio G. Cota Signed-off-by: Paolo Bonzini --- include/qemu/seqlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/qemu') diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h index 8dee11d..c367516 100644 --- a/include/qemu/seqlock.h +++ b/include/qemu/seqlock.h @@ -45,7 +45,7 @@ static inline void seqlock_write_end(QemuSeqLock *sl) atomic_set(&sl->sequence, sl->sequence + 1); } -static inline unsigned seqlock_read_begin(QemuSeqLock *sl) +static inline unsigned seqlock_read_begin(const QemuSeqLock *sl) { /* Always fail if a write is in progress. */ unsigned ret = atomic_read(&sl->sequence); -- cgit v1.1 From fe9959a275fc7b4744e49201a390627b3adda597 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Tue, 8 Aug 2017 13:53:15 -0400 Subject: qsp: QEMU's Synchronization Profiler The goal of this module is to profile synchronization primitives (i.e. mutexes, recursive mutexes and condition variables) so that scalability issues can be quickly diagnosed. Sync primitives are profiled by QSP based on the vaddr of the object accessed as well as the call site (file:line_nr). That means the same object called from two different call sites will be tracked in separate entries, which might be reported together or separately (see subsequent commit on call site coalescing). Some perf numbers: Host: Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz Command: taskset -c 0 tests/atomic_add-bench -d 5 -m - Before: 54.80 Mops/s - After: 54.75 Mops/s That is, a negligible slowdown due to the now indirect call to qemu_mutex_lock. Note that using a branch instead of an indirect call introduces a more severe slowdown (53.65 Mops/s, i.e. 2% slowdown). Enabling the profiler (with -p, added in this series) is more interesting: - No profiling: 54.75 Mops/s - W/ profiling: 12.53 Mops/s That is, a 4.36X slowdown. We can break down this slowdown by removing the get_clock calls or the entry lookup: - No profiling: 54.75 Mops/s - W/o get_clock: 25.37 Mops/s - W/o entry lookup: 19.30 Mops/s - W/ profiling: 12.53 Mops/s Signed-off-by: Emilio G. Cota Signed-off-by: Paolo Bonzini --- include/qemu/qht.h | 1 + include/qemu/qsp.h | 22 +++++++++++++++ include/qemu/thread-posix.h | 4 +-- include/qemu/thread-win32.h | 5 ++-- include/qemu/thread.h | 65 ++++++++++++++++++++++++++++++++++++++++----- 5 files changed, 86 insertions(+), 11 deletions(-) create mode 100644 include/qemu/qsp.h (limited to 'include/qemu') diff --git a/include/qemu/qht.h b/include/qemu/qht.h index 1fb9116..c9a11cc 100644 --- a/include/qemu/qht.h +++ b/include/qemu/qht.h @@ -46,6 +46,7 @@ typedef bool (*qht_lookup_func_t)(const void *obj, const void *userp); typedef void (*qht_iter_func_t)(struct qht *ht, void *p, uint32_t h, void *up); #define QHT_MODE_AUTO_RESIZE 0x1 /* auto-resize when heavily loaded */ +#define QHT_MODE_RAW_MUTEXES 0x2 /* bypass the profiler (QSP) */ /** * qht_init - Initialize a QHT diff --git a/include/qemu/qsp.h b/include/qemu/qsp.h new file mode 100644 index 0000000..9c2bb60 --- /dev/null +++ b/include/qemu/qsp.h @@ -0,0 +1,22 @@ +/* + * qsp.c - QEMU Synchronization Profiler + * + * Copyright (C) 2018, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Note: this header file can *only* be included from thread.h. + */ +#ifndef QEMU_QSP_H +#define QEMU_QSP_H + +#include "qemu/fprintf-fn.h" + +void qsp_report(FILE *f, fprintf_function cpu_fprintf, size_t max); + +bool qsp_is_enabled(void); +void qsp_enable(void); +void qsp_disable(void); + +#endif /* QEMU_QSP_H */ diff --git a/include/qemu/thread-posix.h b/include/qemu/thread-posix.h index fd27b34..c903525 100644 --- a/include/qemu/thread-posix.h +++ b/include/qemu/thread-posix.h @@ -6,8 +6,8 @@ typedef QemuMutex QemuRecMutex; #define qemu_rec_mutex_destroy qemu_mutex_destroy -#define qemu_rec_mutex_lock qemu_mutex_lock -#define qemu_rec_mutex_trylock qemu_mutex_trylock +#define qemu_rec_mutex_lock_impl qemu_mutex_lock_impl +#define qemu_rec_mutex_trylock_impl qemu_mutex_trylock_impl #define qemu_rec_mutex_unlock qemu_mutex_unlock struct QemuMutex { diff --git a/include/qemu/thread-win32.h b/include/qemu/thread-win32.h index d668d78..50af5dd 100644 --- a/include/qemu/thread-win32.h +++ b/include/qemu/thread-win32.h @@ -19,8 +19,9 @@ struct QemuRecMutex { }; void qemu_rec_mutex_destroy(QemuRecMutex *mutex); -void qemu_rec_mutex_lock(QemuRecMutex *mutex); -int qemu_rec_mutex_trylock(QemuRecMutex *mutex); +void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line); +int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, + int line); void qemu_rec_mutex_unlock(QemuRecMutex *mutex); struct QemuCond { diff --git a/include/qemu/thread.h b/include/qemu/thread.h index ef7bd16..c90ea47 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -16,6 +16,9 @@ typedef struct QemuThread QemuThread; #include "qemu/thread-posix.h" #endif +/* include QSP header once QemuMutex, QemuCond etc. are defined */ +#include "qemu/qsp.h" + #define QEMU_THREAD_JOINABLE 0 #define QEMU_THREAD_DETACHED 1 @@ -25,10 +28,51 @@ int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line); void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line); void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line); -#define qemu_mutex_lock(mutex) \ - qemu_mutex_lock_impl(mutex, __FILE__, __LINE__) -#define qemu_mutex_trylock(mutex) \ - qemu_mutex_trylock_impl(mutex, __FILE__, __LINE__) +typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l); +typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l); +typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l); +typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l); +typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, + int l); + +extern QemuMutexLockFunc qemu_mutex_lock_func; +extern QemuMutexTrylockFunc qemu_mutex_trylock_func; +extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; +extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func; +extern QemuCondWaitFunc qemu_cond_wait_func; + +/* convenience macros to bypass the profiler */ +#define qemu_mutex_lock__raw(m) \ + qemu_mutex_lock_impl(m, __FILE__, __LINE__) +#define qemu_mutex_trylock__raw(m) \ + qemu_mutex_trylock_impl(m, __FILE__, __LINE__) + +#define qemu_mutex_lock(m) ({ \ + QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \ + _f(m, __FILE__, __LINE__); \ + }) + +#define qemu_mutex_trylock(m) ({ \ + QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \ + _f(m, __FILE__, __LINE__); \ + }) + +#define qemu_rec_mutex_lock(m) ({ \ + QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \ + _f(m, __FILE__, __LINE__); \ + }) + +#define qemu_rec_mutex_trylock(m) ({ \ + QemuRecMutexTrylockFunc _f; \ + _f = atomic_read(&qemu_rec_mutex_trylock_func); \ + _f(m, __FILE__, __LINE__); \ + }) + +#define qemu_cond_wait(c, m) ({ \ + QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \ + _f(c, m, __FILE__, __LINE__); \ + }) + #define qemu_mutex_unlock(mutex) \ qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__) @@ -47,6 +91,16 @@ static inline void (qemu_mutex_unlock)(QemuMutex *mutex) qemu_mutex_unlock(mutex); } +static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex) +{ + qemu_rec_mutex_lock(mutex); +} + +static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex) +{ + return qemu_rec_mutex_trylock(mutex); +} + /* Prototypes for other functions are in thread-posix.h/thread-win32.h. */ void qemu_rec_mutex_init(QemuRecMutex *mutex); @@ -63,9 +117,6 @@ void qemu_cond_broadcast(QemuCond *cond); void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line); -#define qemu_cond_wait(cond, mutex) \ - qemu_cond_wait_impl(cond, mutex, __FILE__, __LINE__) - static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex) { qemu_cond_wait(cond, mutex); -- cgit v1.1 From 0a22777c7120deafcfc5d680227293145f6f062c Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Thu, 16 Aug 2018 23:14:40 -0400 Subject: qsp: add sort_by option to qsp_report Signed-off-by: Emilio G. Cota Signed-off-by: Paolo Bonzini --- include/qemu/qsp.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/qemu') diff --git a/include/qemu/qsp.h b/include/qemu/qsp.h index 9c2bb60..209480b 100644 --- a/include/qemu/qsp.h +++ b/include/qemu/qsp.h @@ -13,7 +13,13 @@ #include "qemu/fprintf-fn.h" -void qsp_report(FILE *f, fprintf_function cpu_fprintf, size_t max); +enum QSPSortBy { + QSP_SORT_BY_TOTAL_WAIT_TIME, + QSP_SORT_BY_AVG_WAIT_TIME, +}; + +void qsp_report(FILE *f, fprintf_function cpu_fprintf, size_t max, + enum QSPSortBy sort_by); bool qsp_is_enabled(void); void qsp_enable(void); -- cgit v1.1 From 996e8d9a45f74fd59a8aa5fa78102e1d8981a945 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Thu, 16 Aug 2018 23:29:49 -0400 Subject: qsp: add qsp_reset I first implemented this by deleting all entries in the global hash table. But doing that safely slows down profiling, since we'd need to introduce rcu_read_lock/unlock in the fast path. What's implemented here avoids messing with the thread-local data in the global hash table. It achieves this by taking a snapshot of the current state, so that subsequent reports present the delta wrt to the snapshot. Signed-off-by: Emilio G. Cota Signed-off-by: Paolo Bonzini --- include/qemu/qsp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/qemu') diff --git a/include/qemu/qsp.h b/include/qemu/qsp.h index 209480b..f8c6c96 100644 --- a/include/qemu/qsp.h +++ b/include/qemu/qsp.h @@ -24,5 +24,6 @@ void qsp_report(FILE *f, fprintf_function cpu_fprintf, size_t max, bool qsp_is_enabled(void); void qsp_enable(void); void qsp_disable(void); +void qsp_reset(void); #endif /* QEMU_QSP_H */ -- cgit v1.1 From d557de4a0e290c9946f37ff1fba7204844cccc64 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Thu, 16 Aug 2018 22:41:01 -0400 Subject: qsp: support call site coalescing Signed-off-by: Emilio G. Cota Signed-off-by: Paolo Bonzini --- include/qemu/qsp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/qemu') diff --git a/include/qemu/qsp.h b/include/qemu/qsp.h index f8c6c96..a94c464 100644 --- a/include/qemu/qsp.h +++ b/include/qemu/qsp.h @@ -19,7 +19,7 @@ enum QSPSortBy { }; void qsp_report(FILE *f, fprintf_function cpu_fprintf, size_t max, - enum QSPSortBy sort_by); + enum QSPSortBy sort_by, bool callsite_coalesce); bool qsp_is_enabled(void); void qsp_enable(void); -- cgit v1.1 From cb764d06650da5fad7c833975b255d08e91a0a52 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Sat, 28 Oct 2017 02:16:41 -0400 Subject: qsp: track BQL callers explicitly The BQL is acquired via qemu_mutex_lock_iothread(), which makes the profiler assign the associated wait time (i.e. most of BQL wait time) entirely to that function. This loses the original call site information, which does not help diagnose BQL contention. Fix it by tracking the callers explicitly. Signed-off-by: Emilio G. Cota Signed-off-by: Paolo Bonzini --- include/qemu/main-loop.h | 4 +++- include/qemu/thread.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'include/qemu') diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index 721aa24..e59f9ae 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -276,7 +276,9 @@ bool qemu_mutex_iothread_locked(void); * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread * is a no-op there. */ -void qemu_mutex_lock_iothread(void); +#define qemu_mutex_lock_iothread() \ + qemu_mutex_lock_iothread_impl(__FILE__, __LINE__) +void qemu_mutex_lock_iothread_impl(const char *file, int line); /** * qemu_mutex_unlock_iothread: Unlock the main loop mutex. diff --git a/include/qemu/thread.h b/include/qemu/thread.h index c90ea47..dacebcf 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -35,6 +35,7 @@ typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l); typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, int l); +extern QemuMutexLockFunc qemu_bql_mutex_lock_func; extern QemuMutexLockFunc qemu_mutex_lock_func; extern QemuMutexTrylockFunc qemu_mutex_trylock_func; extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; -- cgit v1.1 From c177e0bf063659427874bf78236a762b5f040546 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Sun, 19 Aug 2018 05:13:25 -0400 Subject: rcu_queue: use atomic_set in QLIST_REMOVE_RCU To avoid undefined behaviour. Signed-off-by: Emilio G. Cota Message-Id: <20180819091335.22863-2-cota@braap.org> Signed-off-by: Paolo Bonzini --- include/qemu/rcu_queue.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/qemu') diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h index 01be774..dd7b3be 100644 --- a/include/qemu/rcu_queue.h +++ b/include/qemu/rcu_queue.h @@ -112,7 +112,7 @@ extern "C" { (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ } \ - *(elm)->field.le_prev = (elm)->field.le_next; \ + atomic_set((elm)->field.le_prev, (elm)->field.le_next); \ } while (/*CONSTCOND*/0) /* List traversal must occur within an RCU critical section. */ -- cgit v1.1 From 735d1af662f50634d79699a7c0a9633c0ac08251 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Sun, 19 Aug 2018 05:13:26 -0400 Subject: rcu_queue: remove barrier from QLIST_EMPTY_RCU It's unnecessary because the pointer isn't dereferenced. Signed-off-by: Emilio G. Cota Message-Id: <20180819091335.22863-3-cota@braap.org> Signed-off-by: Paolo Bonzini --- include/qemu/rcu_queue.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/qemu') diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h index dd7b3be..6881ea5 100644 --- a/include/qemu/rcu_queue.h +++ b/include/qemu/rcu_queue.h @@ -36,7 +36,7 @@ extern "C" { /* * List access methods. */ -#define QLIST_EMPTY_RCU(head) (atomic_rcu_read(&(head)->lh_first) == NULL) +#define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL) #define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first)) #define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next)) -- cgit v1.1 From 13d8ef7dda07aaf199db2dc2611c22b1089dec34 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Sun, 19 Aug 2018 05:13:27 -0400 Subject: rcu_queue: add RCU QSIMPLEQ Signed-off-by: Emilio G. Cota Message-Id: <20180819091335.22863-4-cota@braap.org> Signed-off-by: Paolo Bonzini --- include/qemu/rcu_queue.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) (limited to 'include/qemu') diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h index 6881ea5..e0395c9 100644 --- a/include/qemu/rcu_queue.h +++ b/include/qemu/rcu_queue.h @@ -128,6 +128,71 @@ extern "C" { ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \ (var) = (next_var)) +/* + * RCU simple queue + */ + +/* Simple queue access methods */ +#define QSIMPLEQ_EMPTY_RCU(head) (atomic_read(&(head)->sqh_first) == NULL) +#define QSIMPLEQ_FIRST_RCU(head) atomic_rcu_read(&(head)->sqh_first) +#define QSIMPLEQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sqe_next) + +/* Simple queue functions */ +#define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \ + (elm)->field.sqe_next = (head)->sqh_first; \ + if ((elm)->field.sqe_next == NULL) { \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + } \ + atomic_rcu_set(&(head)->sqh_first, (elm)); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + atomic_rcu_set((head)->sqh_last, (elm)); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \ + (elm)->field.sqe_next = (listelm)->field.sqe_next; \ + if ((elm)->field.sqe_next == NULL) { \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + } \ + atomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \ + atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \ + if ((head)->sqh_first == NULL) { \ + (head)->sqh_last = &(head)->sqh_first; \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE_RCU(head, elm, type, field) do { \ + if ((head)->sqh_first == (elm)) { \ + QSIMPLEQ_REMOVE_HEAD_RCU((head), field); \ + } else { \ + struct type *curr = (head)->sqh_first; \ + while (curr->field.sqe_next != (elm)) { \ + curr = curr->field.sqe_next; \ + } \ + atomic_set(&curr->field.sqe_next, \ + curr->field.sqe_next->field.sqe_next); \ + if (curr->field.sqe_next == NULL) { \ + (head)->sqh_last = &(curr)->field.sqe_next; \ + } \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_FOREACH_RCU(var, head, field) \ + for ((var) = atomic_rcu_read(&(head)->sqh_first); \ + (var); \ + (var) = atomic_rcu_read(&(var)->field.sqe_next)) + +#define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \ + for ((var) = atomic_rcu_read(&(head)->sqh_first); \ + (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \ + (var) = (next)) + #ifdef __cplusplus } #endif -- cgit v1.1 From 945d9c7530dd22a7a500dd8833836d6054a17ab9 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Sun, 19 Aug 2018 05:13:28 -0400 Subject: rcu_queue: add RCU QTAILQ Signed-off-by: Emilio G. Cota Message-Id: <20180819091335.22863-5-cota@braap.org> Signed-off-by: Paolo Bonzini --- include/qemu/rcu_queue.h | 66 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) (limited to 'include/qemu') diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h index e0395c9..904b337 100644 --- a/include/qemu/rcu_queue.h +++ b/include/qemu/rcu_queue.h @@ -193,6 +193,72 @@ extern "C" { (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \ (var) = (next)) +/* + * RCU tail queue + */ + +/* Tail queue access methods */ +#define QTAILQ_EMPTY_RCU(head) (atomic_read(&(head)->tqh_first) == NULL) +#define QTAILQ_FIRST_RCU(head) atomic_rcu_read(&(head)->tqh_first) +#define QTAILQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.tqe_next) + +/* Tail queue functions */ +#define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \ + (elm)->field.tqe_next = (head)->tqh_first; \ + if ((elm)->field.tqe_next != NULL) { \ + (head)->tqh_first->field.tqe_prev = &(elm)->field.tqe_next; \ + } else { \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + } \ + atomic_rcu_set(&(head)->tqh_first, (elm)); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + atomic_rcu_set((head)->tqh_last, (elm)); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \ + (elm)->field.tqe_next = (listelm)->field.tqe_next; \ + if ((elm)->field.tqe_next != NULL) { \ + (elm)->field.tqe_next->field.tqe_prev = &(elm)->field.tqe_next; \ + } else { \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + } \ + atomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + atomic_rcu_set((listelm)->field.tqe_prev, (elm)); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ + } while (/*CONSTCOND*/0) + +#define QTAILQ_REMOVE_RCU(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) { \ + (elm)->field.tqe_next->field.tqe_prev = (elm)->field.tqe_prev; \ + } else { \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + } \ + atomic_set((elm)->field.tqe_prev, (elm)->field.tqe_next); \ + (elm)->field.tqe_prev = NULL; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_FOREACH_RCU(var, head, field) \ + for ((var) = atomic_rcu_read(&(head)->tqh_first); \ + (var); \ + (var) = atomic_rcu_read(&(var)->field.tqe_next)) + +#define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \ + for ((var) = atomic_rcu_read(&(head)->tqh_first); \ + (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \ + (var) = (next)) + #ifdef __cplusplus } #endif -- cgit v1.1 From 988fcafc730242711ba92f9e62557e37f2b22cc0 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 14 Aug 2018 09:48:29 +0200 Subject: seqlock: add QemuLockable support A shortcut when the seqlock write is protected by a spinlock or any mutex other than the BQL. Signed-off-by: Paolo Bonzini --- include/qemu/seqlock.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/qemu') diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h index c367516..fd408b7 100644 --- a/include/qemu/seqlock.h +++ b/include/qemu/seqlock.h @@ -16,6 +16,7 @@ #include "qemu/atomic.h" #include "qemu/thread.h" +#include "qemu/lockable.h" typedef struct QemuSeqLock QemuSeqLock; @@ -45,6 +46,25 @@ static inline void seqlock_write_end(QemuSeqLock *sl) atomic_set(&sl->sequence, sl->sequence + 1); } +/* Lock out other writers and update the count. */ +static inline void seqlock_write_lock_impl(QemuSeqLock *sl, QemuLockable *lock) +{ + qemu_lockable_lock(lock); + seqlock_write_begin(sl); +} +#define seqlock_write_lock(sl, lock) \ + seqlock_write_lock_impl(sl, QEMU_MAKE_LOCKABLE(lock)) + +/* Lock out other writers and update the count. */ +static inline void seqlock_write_unlock_impl(QemuSeqLock *sl, QemuLockable *lock) +{ + qemu_lockable_unlock(lock); + seqlock_write_begin(sl); +} +#define seqlock_write_unlock(sl, lock) \ + seqlock_write_unlock_impl(sl, QEMU_MAKE_LOCKABLE(lock)) + + static inline unsigned seqlock_read_begin(const QemuSeqLock *sl) { /* Always fail if a write is in progress. */ -- cgit v1.1