aboutsummaryrefslogtreecommitdiff
path: root/block/throttle-groups.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2017-06-05 14:38:57 +0200
committerFam Zheng <famz@redhat.com>2017-06-16 07:55:00 +0800
commit3b170dc86732539ebc5927880097d32d4f11df8c (patch)
tree8575bacfdc027850fbc60fa1a8b709868a3e6ee9 /block/throttle-groups.c
parent7258ed930c78e7b85a064c7a0aff5d415efb8fbb (diff)
downloadqemu-3b170dc86732539ebc5927880097d32d4f11df8c.zip
qemu-3b170dc86732539ebc5927880097d32d4f11df8c.tar.gz
qemu-3b170dc86732539ebc5927880097d32d4f11df8c.tar.bz2
throttle-groups: do not use qemu_co_enter_next
Prepare for removing this function; always restart throttled requests from coroutine context. This will matter when restarting throttled requests will have to acquire a CoMutex. Reviewed-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20170605123908.18777-9-pbonzini@redhat.com> Signed-off-by: Fam Zheng <famz@redhat.com>
Diffstat (limited to 'block/throttle-groups.c')
-rw-r--r--block/throttle-groups.c42
1 files changed, 37 insertions, 5 deletions
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 85169ec..8bf1031 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -260,6 +260,20 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
return must_wait;
}
+/* Start the next pending I/O request for a BlockBackend. Return whether
+ * any request was actually pending.
+ *
+ * @blk: the current BlockBackend
+ * @is_write: the type of operation (read/write)
+ */
+static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk,
+ bool is_write)
+{
+ BlockBackendPublic *blkp = blk_get_public(blk);
+
+ return qemu_co_queue_next(&blkp->throttled_reqs[is_write]);
+}
+
/* Look for the next pending I/O request and schedule it.
*
* This assumes that tg->lock is held.
@@ -287,7 +301,7 @@ static void schedule_next_request(BlockBackend *blk, bool is_write)
if (!must_wait) {
/* Give preference to requests from the current blk */
if (qemu_in_coroutine() &&
- qemu_co_queue_next(&blkp->throttled_reqs[is_write])) {
+ throttle_group_co_restart_queue(blk, is_write)) {
token = blk;
} else {
ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
@@ -340,15 +354,21 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
qemu_mutex_unlock(&tg->lock);
}
-static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
+typedef struct {
+ BlockBackend *blk;
+ bool is_write;
+} RestartData;
+
+static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
{
+ RestartData *data = opaque;
+ BlockBackend *blk = data->blk;
+ bool is_write = data->is_write;
BlockBackendPublic *blkp = blk_get_public(blk);
ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
bool empty_queue;
- aio_context_acquire(blk_get_aio_context(blk));
- empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
- aio_context_release(blk_get_aio_context(blk));
+ empty_queue = !throttle_group_co_restart_queue(blk, is_write);
/* If the request queue was empty then we have to take care of
* scheduling the next one */
@@ -359,6 +379,18 @@ static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
}
}
+static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
+{
+ Coroutine *co;
+ RestartData rd = {
+ .blk = blk,
+ .is_write = is_write
+ };
+
+ co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd);
+ aio_co_enter(blk_get_aio_context(blk), co);
+}
+
void throttle_group_restart_blk(BlockBackend *blk)
{
BlockBackendPublic *blkp = blk_get_public(blk);