diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2014-08-15 14:49:50 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2014-08-15 14:49:50 +0100 |
commit | f2fb1da9412ae7b4cb512cfbd86c0185f191e2f9 (patch) | |
tree | 94e077ff9f6c344af73d34bd66468b8852ce2c37 /thread-pool.c | |
parent | f083201667fddd51055c2ac67f67221e82298a35 (diff) | |
parent | 908bcd540f489f7adf2d804347905b0025d808d3 (diff) | |
download | qemu-f2fb1da9412ae7b4cb512cfbd86c0185f191e2f9.zip qemu-f2fb1da9412ae7b4cb512cfbd86c0185f191e2f9.tar.gz qemu-f2fb1da9412ae7b4cb512cfbd86c0185f191e2f9.tar.bz2 |
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
Block patches
# gpg: Signature made Fri 15 Aug 2014 14:07:42 BST using RSA key ID C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"
* remotes/kevin/tags/for-upstream: (59 commits)
block: Catch !bs->drv in bdrv_check()
iotests: Add test for image header overlap
qcow2: Catch !*host_offset for data allocation
qcow2: Return useful error code in refcount_init()
mirror: Handle failure for potentially large allocations
vpc: Handle failure for potentially large allocations
vmdk: Handle failure for potentially large allocations
vhdx: Handle failure for potentially large allocations
vdi: Handle failure for potentially large allocations
rbd: Handle failure for potentially large allocations
raw-win32: Handle failure for potentially large allocations
raw-posix: Handle failure for potentially large allocations
qed: Handle failure for potentially large allocations
qcow2: Handle failure for potentially large allocations
qcow1: Handle failure for potentially large allocations
parallels: Handle failure for potentially large allocations
nfs: Handle failure for potentially large allocations
iscsi: Handle failure for potentially large allocations
dmg: Handle failure for potentially large allocations
curl: Handle failure for potentially large allocations
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'thread-pool.c')
-rw-r--r-- | thread-pool.c | 27 |
1 files changed, 14 insertions, 13 deletions
diff --git a/thread-pool.c b/thread-pool.c index dfb699d..23888dc 100644 --- a/thread-pool.c +++ b/thread-pool.c @@ -21,7 +21,6 @@ #include "block/coroutine.h" #include "trace.h" #include "block/block_int.h" -#include "qemu/event_notifier.h" #include "block/thread-pool.h" #include "qemu/main-loop.h" @@ -57,8 +56,8 @@ struct ThreadPoolElement { }; struct ThreadPool { - EventNotifier notifier; AioContext *ctx; + QEMUBH *completion_bh; QemuMutex lock; QemuCond check_cancel; QemuCond worker_stopped; @@ -119,7 +118,7 @@ static void *worker_thread(void *opaque) qemu_cond_broadcast(&pool->check_cancel); } - event_notifier_set(&pool->notifier); + qemu_bh_schedule(pool->completion_bh); } pool->cur_threads--; @@ -168,12 +167,11 @@ static void spawn_thread(ThreadPool *pool) } } -static void event_notifier_ready(EventNotifier *notifier) +static void thread_pool_completion_bh(void *opaque) { - ThreadPool *pool = container_of(notifier, ThreadPool, notifier); + ThreadPool *pool = opaque; ThreadPoolElement *elem, *next; - event_notifier_test_and_clear(notifier); restart: QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { if (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) { @@ -187,6 +185,12 @@ restart: QLIST_REMOVE(elem, all); /* Read state before ret. */ smp_rmb(); + + /* Schedule ourselves in case elem->common.cb() calls aio_poll() to + * wait for another request that completed at the same time. + */ + qemu_bh_schedule(pool->completion_bh); + elem->common.cb(elem->common.opaque, elem->ret); qemu_aio_release(elem); goto restart; @@ -215,7 +219,7 @@ static void thread_pool_cancel(BlockDriverAIOCB *acb) qemu_sem_timedwait(&pool->sem, 0) == 0) { QTAILQ_REMOVE(&pool->request_list, elem, reqs); elem->state = THREAD_CANCELED; - event_notifier_set(&pool->notifier); + qemu_bh_schedule(pool->completion_bh); } else { pool->pending_cancellations++; while (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) { @@ -224,7 +228,7 @@ static void thread_pool_cancel(BlockDriverAIOCB *acb) pool->pending_cancellations--; } qemu_mutex_unlock(&pool->lock); - event_notifier_ready(&pool->notifier); + thread_pool_completion_bh(pool); } static const AIOCBInfo thread_pool_aiocb_info = { @@ -293,8 +297,8 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) } memset(pool, 0, sizeof(*pool)); - event_notifier_init(&pool->notifier, false); pool->ctx = ctx; + pool->completion_bh = aio_bh_new(ctx, thread_pool_completion_bh, pool); qemu_mutex_init(&pool->lock); qemu_cond_init(&pool->check_cancel); qemu_cond_init(&pool->worker_stopped); @@ -304,8 +308,6 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) QLIST_INIT(&pool->head); QTAILQ_INIT(&pool->request_list); - - aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready); } ThreadPool *thread_pool_new(AioContext *ctx) @@ -339,11 +341,10 @@ void thread_pool_free(ThreadPool *pool) qemu_mutex_unlock(&pool->lock); - aio_set_event_notifier(pool->ctx, &pool->notifier, NULL); + qemu_bh_delete(pool->completion_bh); qemu_sem_destroy(&pool->sem); qemu_cond_destroy(&pool->check_cancel); qemu_cond_destroy(&pool->worker_stopped); qemu_mutex_destroy(&pool->lock); - event_notifier_cleanup(&pool->notifier); g_free(pool); } |