From c2b38b277a7882a592f4f2ec955084b2b756daaa Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 13 Feb 2017 14:52:18 +0100 Subject: block: move AioContext, QEMUTimer, main-loop to libqemuutil AioContext is fairly self contained, the only dependency is QEMUTimer but that in turn doesn't need anything else. So move them out of block-obj-y to avoid introducing a dependency from io/ to block-obj-y. main-loop and its dependency iohandler also need to be moved, because later in this series io/ will call iohandler_get_aio_context. [Changed copyright "the QEMU team" to "other QEMU contributors" as suggested by Daniel Berrange and agreed by Paolo. --Stefan] Signed-off-by: Paolo Bonzini Reviewed-by: Fam Zheng Message-id: 20170213135235.12274-2-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi --- tests/Makefile.include | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/tests/Makefile.include b/tests/Makefile.include index 634394a..fd9c70a 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -45,6 +45,9 @@ check-unit-y += tests/test-visitor-serialization$(EXESUF) check-unit-y += tests/test-iov$(EXESUF) gcov-files-test-iov-y = util/iov.c check-unit-y += tests/test-aio$(EXESUF) +gcov-files-test-aio-y = util/async.c util/qemu-timer.o +gcov-files-test-aio-$(CONFIG_WIN32) += util/aio-win32.c +gcov-files-test-aio-$(CONFIG_POSIX) += util/aio-posix.c check-unit-y += tests/test-throttle$(EXESUF) gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c @@ -517,8 +520,7 @@ tests/check-qjson$(EXESUF): tests/check-qjson.o $(test-util-obj-y) tests/check-qom-interface$(EXESUF): tests/check-qom-interface.o $(test-qom-obj-y) tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y) -tests/test-char$(EXESUF): tests/test-char.o qemu-timer.o \ - $(test-util-obj-y) $(qtest-obj-y) $(test-block-obj-y) $(chardev-obj-y) +tests/test-char$(EXESUF): tests/test-char.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y) $(chardev-obj-y) tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y) tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y) tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y) @@ -551,8 +553,7 @@ tests/test-vmstate$(EXESUF): tests/test-vmstate.o \ migration/vmstate.o migration/qemu-file.o \ migration/qemu-file-channel.o migration/qjson.o \ $(test-io-obj-y) -tests/test-timed-average$(EXESUF): tests/test-timed-average.o qemu-timer.o \ - $(test-util-obj-y) +tests/test-timed-average$(EXESUF): tests/test-timed-average.o $(test-util-obj-y) tests/test-base64$(EXESUF): tests/test-base64.o \ libqemuutil.a libqemustub.a tests/ptimer-test$(EXESUF): tests/ptimer-test.o tests/ptimer-test-stubs.o hw/core/ptimer.o libqemustub.a @@ -712,7 +713,7 @@ tests/usb-hcd-ehci-test$(EXESUF): tests/usb-hcd-ehci-test.o $(libqos-usb-obj-y) tests/usb-hcd-xhci-test$(EXESUF): tests/usb-hcd-xhci-test.o $(libqos-usb-obj-y) tests/pc-cpu-test$(EXESUF): tests/pc-cpu-test.o tests/postcopy-test$(EXESUF): tests/postcopy-test.o -tests/vhost-user-test$(EXESUF): tests/vhost-user-test.o qemu-timer.o \ +tests/vhost-user-test$(EXESUF): tests/vhost-user-test.o $(test-util-obj-y) \ $(qtest-obj-y) $(test-io-obj-y) $(libqos-virtio-obj-y) $(libqos-pc-obj-y) \ $(chardev-obj-y) tests/qemu-iotests/socket_scm_helper$(EXESUF): tests/qemu-iotests/socket_scm_helper.o -- cgit v1.1 From 0c330a734b51c177ab8488932ac3b0c4d63a718a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 13 Feb 2017 14:52:19 +0100 Subject: aio: introduce aio_co_schedule and aio_co_wake aio_co_wake provides the infrastructure to start a coroutine on a "home" AioContext. It will be used by CoMutex and CoQueue, so that coroutines don't jump from one context to another when they go to sleep on a mutex or waitqueue. However, it can also be used as a more efficient alternative to one-shot bottom halves, and saves the effort of tracking which AioContext a coroutine is running on. aio_co_schedule is the part of aio_co_wake that starts a coroutine on a remove AioContext, but it is also useful to implement e.g. bdrv_set_aio_context callbacks. The implementation of aio_co_schedule is based on a lock-free multiple-producer, single-consumer queue. The multiple producers use cmpxchg to add to a LIFO stack. The consumer (a per-AioContext bottom half) grabs all items added so far, inverts the list to make it FIFO, and goes through it one item at a time until it's empty. The data structure was inspired by OSv, which uses it in the very code we'll "port" to QEMU for the thread-safe CoMutex. Most of the new code is really tests. Signed-off-by: Paolo Bonzini Reviewed-by: Fam Zheng Message-id: 20170213135235.12274-3-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi --- tests/Makefile.include | 8 +- tests/iothread.c | 91 ++++++++++++++++++ tests/iothread.h | 25 +++++ tests/test-aio-multithread.c | 213 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 334 insertions(+), 3 deletions(-) create mode 100644 tests/iothread.c create mode 100644 tests/iothread.h create mode 100644 tests/test-aio-multithread.c (limited to 'tests') diff --git a/tests/Makefile.include b/tests/Makefile.include index fd9c70a..e60bb6c 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -48,9 +48,10 @@ check-unit-y += tests/test-aio$(EXESUF) gcov-files-test-aio-y = util/async.c util/qemu-timer.o gcov-files-test-aio-$(CONFIG_WIN32) += util/aio-win32.c gcov-files-test-aio-$(CONFIG_POSIX) += util/aio-posix.c +check-unit-y += tests/test-aio-multithread$(EXESUF) +gcov-files-test-aio-multithread-y = $(gcov-files-test-aio-y) +gcov-files-test-aio-multithread-y += util/qemu-coroutine.c tests/iothread.c check-unit-y += tests/test-throttle$(EXESUF) -gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c -gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c check-unit-y += tests/test-thread-pool$(EXESUF) gcov-files-test-thread-pool-y = thread-pool.c gcov-files-test-hbitmap-y = util/hbitmap.c @@ -508,7 +509,7 @@ test-qapi-obj-y = tests/test-qapi-visit.o tests/test-qapi-types.o \ $(test-qom-obj-y) test-crypto-obj-y = $(crypto-obj-y) $(test-qom-obj-y) test-io-obj-y = $(io-obj-y) $(test-crypto-obj-y) -test-block-obj-y = $(block-obj-y) $(test-io-obj-y) +test-block-obj-y = $(block-obj-y) $(test-io-obj-y) tests/iothread.o tests/check-qint$(EXESUF): tests/check-qint.o $(test-util-obj-y) tests/check-qstring$(EXESUF): tests/check-qstring.o $(test-util-obj-y) @@ -523,6 +524,7 @@ tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y) tests/test-char$(EXESUF): tests/test-char.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y) $(chardev-obj-y) tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y) tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y) +tests/test-aio-multithread$(EXESUF): tests/test-aio-multithread.o $(test-block-obj-y) tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y) tests/test-blockjob$(EXESUF): tests/test-blockjob.o $(test-block-obj-y) $(test-util-obj-y) tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y) diff --git a/tests/iothread.c b/tests/iothread.c new file mode 100644 index 0000000..777d9ee --- /dev/null +++ b/tests/iothread.c @@ -0,0 +1,91 @@ +/* + * Event loop thread implementation for unit tests + * + * Copyright Red Hat Inc., 2013, 2016 + * + * Authors: + * Stefan Hajnoczi + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "block/aio.h" +#include "qemu/main-loop.h" +#include "qemu/rcu.h" +#include "iothread.h" + +struct IOThread { + AioContext *ctx; + + QemuThread thread; + QemuMutex init_done_lock; + QemuCond init_done_cond; /* is thread initialization done? */ + bool stopping; +}; + +static __thread IOThread *my_iothread; + +AioContext *qemu_get_current_aio_context(void) +{ + return my_iothread ? my_iothread->ctx : qemu_get_aio_context(); +} + +static void *iothread_run(void *opaque) +{ + IOThread *iothread = opaque; + + rcu_register_thread(); + + my_iothread = iothread; + qemu_mutex_lock(&iothread->init_done_lock); + iothread->ctx = aio_context_new(&error_abort); + qemu_cond_signal(&iothread->init_done_cond); + qemu_mutex_unlock(&iothread->init_done_lock); + + while (!atomic_read(&iothread->stopping)) { + aio_poll(iothread->ctx, true); + } + + rcu_unregister_thread(); + return NULL; +} + +void iothread_join(IOThread *iothread) +{ + iothread->stopping = true; + aio_notify(iothread->ctx); + qemu_thread_join(&iothread->thread); + qemu_cond_destroy(&iothread->init_done_cond); + qemu_mutex_destroy(&iothread->init_done_lock); + aio_context_unref(iothread->ctx); + g_free(iothread); +} + +IOThread *iothread_new(void) +{ + IOThread *iothread = g_new0(IOThread, 1); + + qemu_mutex_init(&iothread->init_done_lock); + qemu_cond_init(&iothread->init_done_cond); + qemu_thread_create(&iothread->thread, NULL, iothread_run, + iothread, QEMU_THREAD_JOINABLE); + + /* Wait for initialization to complete */ + qemu_mutex_lock(&iothread->init_done_lock); + while (iothread->ctx == NULL) { + qemu_cond_wait(&iothread->init_done_cond, + &iothread->init_done_lock); + } + qemu_mutex_unlock(&iothread->init_done_lock); + return iothread; +} + +AioContext *iothread_get_aio_context(IOThread *iothread) +{ + return iothread->ctx; +} diff --git a/tests/iothread.h b/tests/iothread.h new file mode 100644 index 0000000..4877cea --- /dev/null +++ b/tests/iothread.h @@ -0,0 +1,25 @@ +/* + * Event loop thread implementation for unit tests + * + * Copyright Red Hat Inc., 2013, 2016 + * + * Authors: + * Stefan Hajnoczi + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef TEST_IOTHREAD_H +#define TEST_IOTHREAD_H + +#include "block/aio.h" +#include "qemu/thread.h" + +typedef struct IOThread IOThread; + +IOThread *iothread_new(void); +void iothread_join(IOThread *iothread); +AioContext *iothread_get_aio_context(IOThread *iothread); + +#endif diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c new file mode 100644 index 0000000..534807d --- /dev/null +++ b/tests/test-aio-multithread.c @@ -0,0 +1,213 @@ +/* + * AioContext multithreading tests + * + * Copyright Red Hat, Inc. 2016 + * + * Authors: + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include +#include "block/aio.h" +#include "qapi/error.h" +#include "qemu/coroutine.h" +#include "qemu/thread.h" +#include "qemu/error-report.h" +#include "iothread.h" + +/* AioContext management */ + +#define NUM_CONTEXTS 5 + +static IOThread *threads[NUM_CONTEXTS]; +static AioContext *ctx[NUM_CONTEXTS]; +static __thread int id = -1; + +static QemuEvent done_event; + +/* Run a function synchronously on a remote iothread. */ + +typedef struct CtxRunData { + QEMUBHFunc *cb; + void *arg; +} CtxRunData; + +static void ctx_run_bh_cb(void *opaque) +{ + CtxRunData *data = opaque; + + data->cb(data->arg); + qemu_event_set(&done_event); +} + +static void ctx_run(int i, QEMUBHFunc *cb, void *opaque) +{ + CtxRunData data = { + .cb = cb, + .arg = opaque + }; + + qemu_event_reset(&done_event); + aio_bh_schedule_oneshot(ctx[i], ctx_run_bh_cb, &data); + qemu_event_wait(&done_event); +} + +/* Starting the iothreads. */ + +static void set_id_cb(void *opaque) +{ + int *i = opaque; + + id = *i; +} + +static void create_aio_contexts(void) +{ + int i; + + for (i = 0; i < NUM_CONTEXTS; i++) { + threads[i] = iothread_new(); + ctx[i] = iothread_get_aio_context(threads[i]); + } + + qemu_event_init(&done_event, false); + for (i = 0; i < NUM_CONTEXTS; i++) { + ctx_run(i, set_id_cb, &i); + } +} + +/* Stopping the iothreads. */ + +static void join_aio_contexts(void) +{ + int i; + + for (i = 0; i < NUM_CONTEXTS; i++) { + aio_context_ref(ctx[i]); + } + for (i = 0; i < NUM_CONTEXTS; i++) { + iothread_join(threads[i]); + } + for (i = 0; i < NUM_CONTEXTS; i++) { + aio_context_unref(ctx[i]); + } + qemu_event_destroy(&done_event); +} + +/* Basic test for the stuff above. */ + +static void test_lifecycle(void) +{ + create_aio_contexts(); + join_aio_contexts(); +} + +/* aio_co_schedule test. */ + +static Coroutine *to_schedule[NUM_CONTEXTS]; + +static bool now_stopping; + +static int count_retry; +static int count_here; +static int count_other; + +static bool schedule_next(int n) +{ + Coroutine *co; + + co = atomic_xchg(&to_schedule[n], NULL); + if (!co) { + atomic_inc(&count_retry); + return false; + } + + if (n == id) { + atomic_inc(&count_here); + } else { + atomic_inc(&count_other); + } + + aio_co_schedule(ctx[n], co); + return true; +} + +static void finish_cb(void *opaque) +{ + schedule_next(id); +} + +static coroutine_fn void test_multi_co_schedule_entry(void *opaque) +{ + g_assert(to_schedule[id] == NULL); + atomic_mb_set(&to_schedule[id], qemu_coroutine_self()); + + while (!atomic_mb_read(&now_stopping)) { + int n; + + n = g_test_rand_int_range(0, NUM_CONTEXTS); + schedule_next(n); + qemu_coroutine_yield(); + + g_assert(to_schedule[id] == NULL); + atomic_mb_set(&to_schedule[id], qemu_coroutine_self()); + } +} + + +static void test_multi_co_schedule(int seconds) +{ + int i; + + count_here = count_other = count_retry = 0; + now_stopping = false; + + create_aio_contexts(); + for (i = 0; i < NUM_CONTEXTS; i++) { + Coroutine *co1 = qemu_coroutine_create(test_multi_co_schedule_entry, NULL); + aio_co_schedule(ctx[i], co1); + } + + g_usleep(seconds * 1000000); + + atomic_mb_set(&now_stopping, true); + for (i = 0; i < NUM_CONTEXTS; i++) { + ctx_run(i, finish_cb, NULL); + to_schedule[i] = NULL; + } + + join_aio_contexts(); + g_test_message("scheduled %d, queued %d, retry %d, total %d\n", + count_other, count_here, count_retry, + count_here + count_other + count_retry); +} + +static void test_multi_co_schedule_1(void) +{ + test_multi_co_schedule(1); +} + +static void test_multi_co_schedule_10(void) +{ + test_multi_co_schedule(10); +} + +/* End of tests. */ + +int main(int argc, char **argv) +{ + init_clocks(); + + g_test_init(&argc, &argv, NULL); + g_test_add_func("/aio/multi/lifecycle", test_lifecycle); + if (g_test_quick()) { + g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1); + } else { + g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10); + } + return g_test_run(); +} -- cgit v1.1 From 934ebf48c0e81dd1f3febb53ae9b8eb8f2a12aab Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 13 Feb 2017 14:52:21 +0100 Subject: test-thread-pool: use generic AioContext infrastructure Once the thread pool starts using aio_co_wake, it will also need qemu_get_current_aio_context(). Make test-thread-pool create an AioContext with qemu_init_main_loop, so that stubs/iothread.c and tests/iothread.c can provide the rest. Reviewed-by: Stefan Hajnoczi Signed-off-by: Paolo Bonzini Reviewed-by: Fam Zheng Message-id: 20170213135235.12274-5-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi --- tests/test-thread-pool.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'tests') diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c index 8dbf66a..91b4ec5 100644 --- a/tests/test-thread-pool.c +++ b/tests/test-thread-pool.c @@ -6,6 +6,7 @@ #include "qapi/error.h" #include "qemu/timer.h" #include "qemu/error-report.h" +#include "qemu/main-loop.h" static AioContext *ctx; static ThreadPool *pool; @@ -224,15 +225,9 @@ static void test_cancel_async(void) int main(int argc, char **argv) { int ret; - Error *local_error = NULL; - init_clocks(); - - ctx = aio_context_new(&local_error); - if (!ctx) { - error_reportf_err(local_error, "Failed to create AIO Context: "); - exit(1); - } + qemu_init_main_loop(&error_abort); + ctx = qemu_get_current_aio_context(); pool = aio_get_thread_pool(ctx); g_test_init(&argc, &argv, NULL); @@ -245,6 +240,5 @@ int main(int argc, char **argv) ret = g_test_run(); - aio_context_unref(ctx); return ret; } -- cgit v1.1 From fed20a70e39bb9385020bdc4e8839d95326df8e2 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 13 Feb 2017 19:12:39 +0100 Subject: coroutine-lock: make CoMutex thread-safe This uses the lock-free mutex described in the paper '"Blocking without Locking", or LFTHREADS: A lock-free thread library' by Gidenstam and Papatriantafilou. The same technique is used in OSv, and in fact the code is essentially a conversion to C of OSv's code. [Added missing coroutine_fn in tests/test-aio-multithread.c. --Stefan] Signed-off-by: Paolo Bonzini Reviewed-by: Fam Zheng Message-id: 20170213181244.16297-2-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi --- tests/test-aio-multithread.c | 86 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) (limited to 'tests') diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c index 534807d..4fa2e9b 100644 --- a/tests/test-aio-multithread.c +++ b/tests/test-aio-multithread.c @@ -196,6 +196,88 @@ static void test_multi_co_schedule_10(void) test_multi_co_schedule(10); } +/* CoMutex thread-safety. */ + +static uint32_t atomic_counter; +static uint32_t running; +static uint32_t counter; +static CoMutex comutex; + +static void coroutine_fn test_multi_co_mutex_entry(void *opaque) +{ + while (!atomic_mb_read(&now_stopping)) { + qemu_co_mutex_lock(&comutex); + counter++; + qemu_co_mutex_unlock(&comutex); + + /* Increase atomic_counter *after* releasing the mutex. Otherwise + * there is a chance (it happens about 1 in 3 runs) that the iothread + * exits before the coroutine is woken up, causing a spurious + * assertion failure. + */ + atomic_inc(&atomic_counter); + } + atomic_dec(&running); +} + +static void test_multi_co_mutex(int threads, int seconds) +{ + int i; + + qemu_co_mutex_init(&comutex); + counter = 0; + atomic_counter = 0; + now_stopping = false; + + create_aio_contexts(); + assert(threads <= NUM_CONTEXTS); + running = threads; + for (i = 0; i < threads; i++) { + Coroutine *co1 = qemu_coroutine_create(test_multi_co_mutex_entry, NULL); + aio_co_schedule(ctx[i], co1); + } + + g_usleep(seconds * 1000000); + + atomic_mb_set(&now_stopping, true); + while (running > 0) { + g_usleep(100000); + } + + join_aio_contexts(); + g_test_message("%d iterations/second\n", counter / seconds); + g_assert_cmpint(counter, ==, atomic_counter); +} + +/* Testing with NUM_CONTEXTS threads focuses on the queue. The mutex however + * is too contended (and the threads spend too much time in aio_poll) + * to actually stress the handoff protocol. + */ +static void test_multi_co_mutex_1(void) +{ + test_multi_co_mutex(NUM_CONTEXTS, 1); +} + +static void test_multi_co_mutex_10(void) +{ + test_multi_co_mutex(NUM_CONTEXTS, 10); +} + +/* Testing with fewer threads stresses the handoff protocol too. Still, the + * case where the locker _can_ pick up a handoff is very rare, happening + * about 10 times in 1 million, so increase the runtime a bit compared to + * other "quick" testcases that only run for 1 second. + */ +static void test_multi_co_mutex_2_3(void) +{ + test_multi_co_mutex(2, 3); +} + +static void test_multi_co_mutex_2_30(void) +{ + test_multi_co_mutex(2, 30); +} + /* End of tests. */ int main(int argc, char **argv) @@ -206,8 +288,12 @@ int main(int argc, char **argv) g_test_add_func("/aio/multi/lifecycle", test_lifecycle); if (g_test_quick()) { g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1); + g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_1); + g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3); } else { g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10); + g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_10); + g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30); } return g_test_run(); } -- cgit v1.1 From c05df34a874706946a918d320e602bb498550838 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 13 Feb 2017 19:12:41 +0100 Subject: test-aio-multithread: add performance comparison with thread-based mutexes Add two implementations of the same benchmark as the previous patch, but using pthreads. One uses a normal QemuMutex, the other is Linux only and implements a fair mutex based on MCS locks and futexes. This shows that the slower performance of the 5-thread case is due to the fairness of CoMutex, rather than to coroutines. If fairness does not matter, as is the case with two threads, CoMutex can actually be faster than pthreads. Signed-off-by: Paolo Bonzini Reviewed-by: Fam Zheng Message-id: 20170213181244.16297-4-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi --- tests/test-aio-multithread.c | 164 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) (limited to 'tests') diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c index 4fa2e9b..f11e990 100644 --- a/tests/test-aio-multithread.c +++ b/tests/test-aio-multithread.c @@ -278,6 +278,162 @@ static void test_multi_co_mutex_2_30(void) test_multi_co_mutex(2, 30); } +/* Same test with fair mutexes, for performance comparison. */ + +#ifdef CONFIG_LINUX +#include "qemu/futex.h" + +/* The nodes for the mutex reside in this structure (on which we try to avoid + * false sharing). The head of the mutex is in the "mutex_head" variable. + */ +static struct { + int next, locked; + int padding[14]; +} nodes[NUM_CONTEXTS] __attribute__((__aligned__(64))); + +static int mutex_head = -1; + +static void mcs_mutex_lock(void) +{ + int prev; + + nodes[id].next = -1; + nodes[id].locked = 1; + prev = atomic_xchg(&mutex_head, id); + if (prev != -1) { + atomic_set(&nodes[prev].next, id); + qemu_futex_wait(&nodes[id].locked, 1); + } +} + +static void mcs_mutex_unlock(void) +{ + int next; + if (nodes[id].next == -1) { + if (atomic_read(&mutex_head) == id && + atomic_cmpxchg(&mutex_head, id, -1) == id) { + /* Last item in the list, exit. */ + return; + } + while (atomic_read(&nodes[id].next) == -1) { + /* mcs_mutex_lock did the xchg, but has not updated + * nodes[prev].next yet. + */ + } + } + + /* Wake up the next in line. */ + next = nodes[id].next; + nodes[next].locked = 0; + qemu_futex_wake(&nodes[next].locked, 1); +} + +static void test_multi_fair_mutex_entry(void *opaque) +{ + while (!atomic_mb_read(&now_stopping)) { + mcs_mutex_lock(); + counter++; + mcs_mutex_unlock(); + atomic_inc(&atomic_counter); + } + atomic_dec(&running); +} + +static void test_multi_fair_mutex(int threads, int seconds) +{ + int i; + + assert(mutex_head == -1); + counter = 0; + atomic_counter = 0; + now_stopping = false; + + create_aio_contexts(); + assert(threads <= NUM_CONTEXTS); + running = threads; + for (i = 0; i < threads; i++) { + Coroutine *co1 = qemu_coroutine_create(test_multi_fair_mutex_entry, NULL); + aio_co_schedule(ctx[i], co1); + } + + g_usleep(seconds * 1000000); + + atomic_mb_set(&now_stopping, true); + while (running > 0) { + g_usleep(100000); + } + + join_aio_contexts(); + g_test_message("%d iterations/second\n", counter / seconds); + g_assert_cmpint(counter, ==, atomic_counter); +} + +static void test_multi_fair_mutex_1(void) +{ + test_multi_fair_mutex(NUM_CONTEXTS, 1); +} + +static void test_multi_fair_mutex_10(void) +{ + test_multi_fair_mutex(NUM_CONTEXTS, 10); +} +#endif + +/* Same test with pthread mutexes, for performance comparison and + * portability. */ + +static QemuMutex mutex; + +static void test_multi_mutex_entry(void *opaque) +{ + while (!atomic_mb_read(&now_stopping)) { + qemu_mutex_lock(&mutex); + counter++; + qemu_mutex_unlock(&mutex); + atomic_inc(&atomic_counter); + } + atomic_dec(&running); +} + +static void test_multi_mutex(int threads, int seconds) +{ + int i; + + qemu_mutex_init(&mutex); + counter = 0; + atomic_counter = 0; + now_stopping = false; + + create_aio_contexts(); + assert(threads <= NUM_CONTEXTS); + running = threads; + for (i = 0; i < threads; i++) { + Coroutine *co1 = qemu_coroutine_create(test_multi_mutex_entry, NULL); + aio_co_schedule(ctx[i], co1); + } + + g_usleep(seconds * 1000000); + + atomic_mb_set(&now_stopping, true); + while (running > 0) { + g_usleep(100000); + } + + join_aio_contexts(); + g_test_message("%d iterations/second\n", counter / seconds); + g_assert_cmpint(counter, ==, atomic_counter); +} + +static void test_multi_mutex_1(void) +{ + test_multi_mutex(NUM_CONTEXTS, 1); +} + +static void test_multi_mutex_10(void) +{ + test_multi_mutex(NUM_CONTEXTS, 10); +} + /* End of tests. */ int main(int argc, char **argv) @@ -290,10 +446,18 @@ int main(int argc, char **argv) g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1); g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_1); g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3); +#ifdef CONFIG_LINUX + g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_1); +#endif + g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_1); } else { g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10); g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_10); g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30); +#ifdef CONFIG_LINUX + g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_10); +#endif + g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_10); } return g_test_run(); } -- cgit v1.1