aboutsummaryrefslogtreecommitdiff
path: root/include/block
diff options
context:
space:
mode:
authorNicolas Saenz Julienne <nsaenzju@redhat.com>2022-04-25 09:57:23 +0200
committerStefan Hajnoczi <stefanha@redhat.com>2022-05-09 10:43:23 +0100
commit71ad4713cc1d7fca24388b828ef31ae6cb38a31c (patch)
treebaf49f521c85ffa4551b2cb73083562570b3c857 /include/block
parent70ac26b9e5ca8374bb3ef3f30b871726673c9f27 (diff)
downloadqemu-71ad4713cc1d7fca24388b828ef31ae6cb38a31c.zip
qemu-71ad4713cc1d7fca24388b828ef31ae6cb38a31c.tar.gz
qemu-71ad4713cc1d7fca24388b828ef31ae6cb38a31c.tar.bz2
util/event-loop-base: Introduce options to set the thread pool size
The thread pool regulates itself: when idle, it kills threads until empty, when in demand, it creates new threads until full. This behaviour doesn't play well with latency sensitive workloads where the price of creating a new thread is too high. For example, when paired with qemu's '-mlock', or using safety features like SafeStack, creating a new thread has been measured take multiple milliseconds. In order to mitigate this let's introduce a new 'EventLoopBase' property to set the thread pool size. The threads will be created during the pool's initialization or upon updating the property's value, remain available during its lifetime regardless of demand, and destroyed upon freeing it. A properly characterized workload will then be able to configure the pool to avoid any latency spikes. Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Acked-by: Markus Armbruster <armbru@redhat.com> Message-id: 20220425075723.20019-4-nsaenzju@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'include/block')
-rw-r--r--include/block/aio.h10
-rw-r--r--include/block/thread-pool.h3
2 files changed, 13 insertions, 0 deletions
diff --git a/include/block/aio.h b/include/block/aio.h
index 5634173..d128558 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -192,6 +192,8 @@ struct AioContext {
QSLIST_HEAD(, Coroutine) scheduled_coroutines;
QEMUBH *co_schedule_bh;
+ int thread_pool_min;
+ int thread_pool_max;
/* Thread pool for performing work and receiving completion callbacks.
* Has its own locking.
*/
@@ -769,4 +771,12 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
Error **errp);
+/**
+ * aio_context_set_thread_pool_params:
+ * @ctx: the aio context
+ * @min: min number of threads to have readily available in the thread pool
+ * @min: max number of threads the thread pool can contain
+ */
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
+ int64_t max, Error **errp);
#endif
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
index 7dd7d73..2020bcc 100644
--- a/include/block/thread-pool.h
+++ b/include/block/thread-pool.h
@@ -20,6 +20,8 @@
#include "block/block.h"
+#define THREAD_POOL_MAX_THREADS_DEFAULT 64
+
typedef int ThreadPoolFunc(void *opaque);
typedef struct ThreadPool ThreadPool;
@@ -33,5 +35,6 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
ThreadPoolFunc *func, void *arg);
void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
+void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
#endif