aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2024-01-09 10:32:23 +0000
committerPeter Maydell <peter.maydell@linaro.org>2024-01-09 10:32:23 +0000
commit9468484fe904ab4691de6d9c34616667f377ceac (patch)
tree4cefd5acf45a99755742069ef0dc18c526397cca /util
parentc1df5b4f165f011cf058e9bafb07b5504abb1b3d (diff)
parent0b2675c473f68f13bc5ca1dd1c43ce421542e7b8 (diff)
downloadqemu-9468484fe904ab4691de6d9c34616667f377ceac.zip
qemu-9468484fe904ab4691de6d9c34616667f377ceac.tar.gz
qemu-9468484fe904ab4691de6d9c34616667f377ceac.tar.bz2
Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging
Pull request # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmWcJMUACgkQnKSrs4Gr # c8hh/Qf/Wt177UlhBR49OWmmegs8c8yS1mhyawo7YIJM4pqoXCYLaACpcKECXcGU # rlgyR4ow68EXnnU8+/s2cp2UqHxrla+E2eNqBoTDmkNt3Cko5sJn5G5PM5EYK+mO # JjFRzn7awRyxD6mGOuaMVoj6OuHbAA/U4JF7FhW0YuRl8v0/mvAxRSfQ4U6Crq/y # 19Aa1CXHD1GH2CUJsMCY8zT47Dr4DJcvZx5IpcDFaHaYDCkktFwNzdo5IDnCx2M2 # xnP37Qp/Q93cu12lWkVOu8HCT6yhoszahyOqlBxDmo7QeGkskrxGbMyE+vHM3fFI # aGSxiw193U7/QWu+Cq2/727C3YIq1g== # =pKUb # -----END PGP SIGNATURE----- # gpg: Signature made Mon 08 Jan 2024 16:37:25 GMT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * tag 'block-pull-request' of https://gitlab.com/stefanha/qemu: Rename "QEMU global mutex" to "BQL" in comments and docs Replace "iothread lock" with "BQL" in comments qemu/main-loop: rename qemu_cond_wait_iothread() to qemu_cond_wait_bql() qemu/main-loop: rename QEMU_IOTHREAD_LOCK_GUARD to BQL_LOCK_GUARD system/cpus: rename qemu_mutex_lock_iothread() to bql_lock() iothread: Remove unused Error** argument in aio_context_set_aio_params Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util')
-rw-r--r--util/aio-posix.c3
-rw-r--r--util/aio-win32.c3
-rw-r--r--util/async.c2
-rw-r--r--util/main-loop.c13
-rw-r--r--util/qsp.c6
-rw-r--r--util/rcu.c16
6 files changed, 19 insertions, 24 deletions
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 7f2c997..266c9dd 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -777,8 +777,7 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
aio_notify(ctx);
}
-void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
- Error **errp)
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
{
/*
* No thread synchronization here, it doesn't matter if an incorrect value
diff --git a/util/aio-win32.c b/util/aio-win32.c
index 948ef47..d144f93 100644
--- a/util/aio-win32.c
+++ b/util/aio-win32.c
@@ -438,7 +438,6 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
}
}
-void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
- Error **errp)
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
{
}
diff --git a/util/async.c b/util/async.c
index 4605290..36a8e76 100644
--- a/util/async.c
+++ b/util/async.c
@@ -727,7 +727,7 @@ AioContext *qemu_get_current_aio_context(void)
if (ctx) {
return ctx;
}
- if (qemu_mutex_iothread_locked()) {
+ if (bql_locked()) {
/* Possibly in a vCPU thread. */
return qemu_get_aio_context();
}
diff --git a/util/main-loop.c b/util/main-loop.c
index 797b640..a0386cf 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -192,10 +192,7 @@ static void main_loop_update_params(EventLoopBase *base, Error **errp)
return;
}
- aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
- if (*errp) {
- return;
- }
+ aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch);
aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min,
base->thread_pool_max, errp);
@@ -302,13 +299,13 @@ static int os_host_main_loop_wait(int64_t timeout)
glib_pollfds_fill(&timeout);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
replay_mutex_unlock();
ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
replay_mutex_lock();
- qemu_mutex_lock_iothread();
+ bql_lock();
glib_pollfds_poll();
@@ -517,7 +514,7 @@ static int os_host_main_loop_wait(int64_t timeout)
poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
replay_mutex_unlock();
@@ -525,7 +522,7 @@ static int os_host_main_loop_wait(int64_t timeout)
replay_mutex_lock();
- qemu_mutex_lock_iothread();
+ bql_lock();
if (g_poll_ret > 0) {
for (i = 0; i < w->num; i++) {
w->revents[i] = poll_fds[n_poll_fds + i].revents;
diff --git a/util/qsp.c b/util/qsp.c
index 2fe3764..6b783e2 100644
--- a/util/qsp.c
+++ b/util/qsp.c
@@ -124,7 +124,7 @@ static const char * const qsp_typenames[] = {
[QSP_CONDVAR] = "condvar",
};
-QemuMutexLockFunc qemu_bql_mutex_lock_func = qemu_mutex_lock_impl;
+QemuMutexLockFunc bql_mutex_lock_func = qemu_mutex_lock_impl;
QemuMutexLockFunc qemu_mutex_lock_func = qemu_mutex_lock_impl;
QemuMutexTrylockFunc qemu_mutex_trylock_func = qemu_mutex_trylock_impl;
QemuRecMutexLockFunc qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl;
@@ -439,7 +439,7 @@ void qsp_enable(void)
{
qatomic_set(&qemu_mutex_lock_func, qsp_mutex_lock);
qatomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock);
- qatomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock);
+ qatomic_set(&bql_mutex_lock_func, qsp_bql_mutex_lock);
qatomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock);
qatomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock);
qatomic_set(&qemu_cond_wait_func, qsp_cond_wait);
@@ -450,7 +450,7 @@ void qsp_disable(void)
{
qatomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl);
qatomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl);
- qatomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl);
+ qatomic_set(&bql_mutex_lock_func, qemu_mutex_lock_impl);
qatomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl);
qatomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl);
qatomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl);
diff --git a/util/rcu.c b/util/rcu.c
index e587bcc..fa32c94 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -283,24 +283,24 @@ static void *call_rcu_thread(void *opaque)
qatomic_sub(&rcu_call_count, n);
synchronize_rcu();
- qemu_mutex_lock_iothread();
+ bql_lock();
while (n > 0) {
node = try_dequeue();
while (!node) {
- qemu_mutex_unlock_iothread();
+ bql_unlock();
qemu_event_reset(&rcu_call_ready_event);
node = try_dequeue();
if (!node) {
qemu_event_wait(&rcu_call_ready_event);
node = try_dequeue();
}
- qemu_mutex_lock_iothread();
+ bql_lock();
}
n--;
node->func(node);
}
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
abort();
}
@@ -337,13 +337,13 @@ static void drain_rcu_callback(struct rcu_head *node)
void drain_call_rcu(void)
{
struct rcu_drain rcu_drain;
- bool locked = qemu_mutex_iothread_locked();
+ bool locked = bql_locked();
memset(&rcu_drain, 0, sizeof(struct rcu_drain));
qemu_event_init(&rcu_drain.drain_complete_event, false);
if (locked) {
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
@@ -365,7 +365,7 @@ void drain_call_rcu(void)
qatomic_dec(&in_drain_call_rcu);
if (locked) {
- qemu_mutex_lock_iothread();
+ bql_lock();
}
}
@@ -409,7 +409,7 @@ static void rcu_init_complete(void)
qemu_event_init(&rcu_call_ready_event, false);
- /* The caller is assumed to have iothread lock, so the call_rcu thread
+ /* The caller is assumed to have BQL, so the call_rcu thread
* must have been quiescent even after forking, just recreate it.
*/
qemu_thread_create(&thread, "call_rcu", call_rcu_thread,