From 2da61b671eb89fcaa306738f44eed472977d6587 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Mon, 3 Mar 2014 11:30:03 +0100 Subject: rfifolock: add recursive FIFO lock QemuMutex does not guarantee fairness and cannot be acquired recursively: Fairness means each locker gets a turn and the scheduler cannot cause starvation. Recursive locking is useful for composition, it allows a sequence of locking operations to be invoked atomically by acquiring the lock around them. This patch adds RFifoLock, a recursive lock that guarantees FIFO order. Its first user is added in the next patch. RFifoLock has one additional feature: it can be initialized with an optional contention callback. The callback is invoked whenever a thread must wait for the lock. For example, it can be used to poke the current owner so that they release the lock soon. Signed-off-by: Stefan Hajnoczi --- util/Makefile.objs | 1 + util/rfifolock.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 util/rfifolock.c (limited to 'util') diff --git a/util/Makefile.objs b/util/Makefile.objs index 937376b..df83b62 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -14,3 +14,4 @@ util-obj-y += crc32c.o util-obj-y += throttle.o util-obj-y += getauxval.o util-obj-y += readline.o +util-obj-y += rfifolock.o diff --git a/util/rfifolock.c b/util/rfifolock.c new file mode 100644 index 0000000..afbf748 --- /dev/null +++ b/util/rfifolock.c @@ -0,0 +1,78 @@ +/* + * Recursive FIFO lock + * + * Copyright Red Hat, Inc. 2013 + * + * Authors: + * Stefan Hajnoczi + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include +#include "qemu/rfifolock.h" + +void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque) +{ + qemu_mutex_init(&r->lock); + r->head = 0; + r->tail = 0; + qemu_cond_init(&r->cond); + r->nesting = 0; + r->cb = cb; + r->cb_opaque = opaque; +} + +void rfifolock_destroy(RFifoLock *r) +{ + qemu_cond_destroy(&r->cond); + qemu_mutex_destroy(&r->lock); +} + +/* + * Theory of operation: + * + * In order to ensure FIFO ordering, implement a ticketlock. Threads acquiring + * the lock enqueue themselves by incrementing the tail index. When the lock + * is unlocked, the head is incremented and waiting threads are notified. + * + * Recursive locking does not take a ticket since the head is only incremented + * when the outermost recursive caller unlocks. + */ +void rfifolock_lock(RFifoLock *r) +{ + qemu_mutex_lock(&r->lock); + + /* Take a ticket */ + unsigned int ticket = r->tail++; + + if (r->nesting > 0 && qemu_thread_is_self(&r->owner_thread)) { + r->tail--; /* put ticket back, we're nesting */ + } else { + while (ticket != r->head) { + /* Invoke optional contention callback */ + if (r->cb) { + r->cb(r->cb_opaque); + } + qemu_cond_wait(&r->cond, &r->lock); + } + } + + qemu_thread_get_self(&r->owner_thread); + r->nesting++; + qemu_mutex_unlock(&r->lock); +} + +void rfifolock_unlock(RFifoLock *r) +{ + qemu_mutex_lock(&r->lock); + assert(r->nesting > 0); + assert(qemu_thread_is_self(&r->owner_thread)); + if (--r->nesting == 0) { + r->head++; + qemu_cond_broadcast(&r->cond); + } + qemu_mutex_unlock(&r->lock); +} -- cgit v1.1