diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2017-01-12 19:07:52 +0100 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2017-01-16 13:25:17 +0000 |
commit | 51dee5e465e1b3454a886792ca3e14b851e8e67d (patch) | |
tree | 33d418609cae2d5251a509cf60dce904dd714bc4 /util/lockcnt.c | |
parent | cf2c02c8ea0f2d7888f3b45b20e7eb24b47a93f1 (diff) | |
download | qemu-51dee5e465e1b3454a886792ca3e14b851e8e67d.zip qemu-51dee5e465e1b3454a886792ca3e14b851e8e67d.tar.gz qemu-51dee5e465e1b3454a886792ca3e14b851e8e67d.tar.bz2 |
qemu-thread: introduce QemuLockCnt
A QemuLockCnt comprises a counter and a mutex, with primitives
to increment and decrement the counter, and to take and release the
mutex. It can be used to do lock-free visits to a data structure
whenever mutexes would be too heavy-weight and the critical section
is too long for RCU.
This could be implemented simply by protecting the counter with the
mutex, but QemuLockCnt is harder to misuse and more efficient.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20170112180800.21085-3-pbonzini@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'util/lockcnt.c')
-rw-r--r-- | util/lockcnt.c | 114 |
1 files changed, 114 insertions, 0 deletions
diff --git a/util/lockcnt.c b/util/lockcnt.c new file mode 100644 index 0000000..da1de77 --- /dev/null +++ b/util/lockcnt.c @@ -0,0 +1,114 @@ +/* + * QemuLockCnt implementation + * + * Copyright Red Hat, Inc. 2017 + * + * Author: + * Paolo Bonzini <pbonzini@redhat.com> + */ +#include "qemu/osdep.h" +#include "qemu/thread.h" +#include "qemu/atomic.h" + +void qemu_lockcnt_init(QemuLockCnt *lockcnt) +{ + qemu_mutex_init(&lockcnt->mutex); + lockcnt->count = 0; +} + +void qemu_lockcnt_destroy(QemuLockCnt *lockcnt) +{ + qemu_mutex_destroy(&lockcnt->mutex); +} + +void qemu_lockcnt_inc(QemuLockCnt *lockcnt) +{ + int old; + for (;;) { + old = atomic_read(&lockcnt->count); + if (old == 0) { + qemu_lockcnt_lock(lockcnt); + qemu_lockcnt_inc_and_unlock(lockcnt); + return; + } else { + if (atomic_cmpxchg(&lockcnt->count, old, old + 1) == old) { + return; + } + } + } +} + +void qemu_lockcnt_dec(QemuLockCnt *lockcnt) +{ + atomic_dec(&lockcnt->count); +} + +/* Decrement a counter, and return locked if it is decremented to zero. + * It is impossible for the counter to become nonzero while the mutex + * is taken. + */ +bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt) +{ + int val = atomic_read(&lockcnt->count); + while (val > 1) { + int old = atomic_cmpxchg(&lockcnt->count, val, val - 1); + if (old != val) { + val = old; + continue; + } + + return false; + } + + qemu_lockcnt_lock(lockcnt); + if (atomic_fetch_dec(&lockcnt->count) == 1) { + return true; + } + + qemu_lockcnt_unlock(lockcnt); + return false; +} + +/* Decrement a counter and return locked if it is decremented to zero. + * Otherwise do nothing. + * + * It is impossible for the counter to become nonzero while the mutex + * is taken. + */ +bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt) +{ + /* No need for acquire semantics if we return false. */ + int val = atomic_read(&lockcnt->count); + if (val > 1) { + return false; + } + + qemu_lockcnt_lock(lockcnt); + if (atomic_fetch_dec(&lockcnt->count) == 1) { + return true; + } + + qemu_lockcnt_inc_and_unlock(lockcnt); + return false; +} + +void qemu_lockcnt_lock(QemuLockCnt *lockcnt) +{ + qemu_mutex_lock(&lockcnt->mutex); +} + +void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt) +{ + atomic_inc(&lockcnt->count); + qemu_mutex_unlock(&lockcnt->mutex); +} + +void qemu_lockcnt_unlock(QemuLockCnt *lockcnt) +{ + qemu_mutex_unlock(&lockcnt->mutex); +} + +unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt) +{ + return atomic_read(&lockcnt->count); +} |