aboutsummaryrefslogtreecommitdiff
path: root/util/rcu.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2023-03-03 13:46:03 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2023-05-08 11:10:48 +0200
commit6e288b00ef536f87910f76cb1940a8caced69c54 (patch)
tree709984e3e94a50b2f5ef989860199c6d08149514 /util/rcu.c
parent792f77f376adef944f9a03e601f6ad90c2f891b2 (diff)
downloadqemu-6e288b00ef536f87910f76cb1940a8caced69c54.zip
qemu-6e288b00ef536f87910f76cb1940a8caced69c54.tar.gz
qemu-6e288b00ef536f87910f76cb1940a8caced69c54.tar.bz2
rcu: remove qatomic_mb_set, expand comments
Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'util/rcu.c')
-rw-r--r--util/rcu.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/util/rcu.c b/util/rcu.c
index b6d6c71..e5b6e52 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -83,12 +83,6 @@ static void wait_for_readers(void)
*/
qemu_event_reset(&rcu_gp_event);
- /* Instead of using qatomic_mb_set for index->waiting, and
- * qatomic_mb_read for index->ctr, memory barriers are placed
- * manually since writes to different threads are independent.
- * qemu_event_reset has acquire semantics, so no memory barrier
- * is needed here.
- */
QLIST_FOREACH(index, &registry, node) {
qatomic_set(&index->waiting, true);
}
@@ -96,6 +90,10 @@ static void wait_for_readers(void)
/* Here, order the stores to index->waiting before the loads of
* index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(),
* ensuring that the loads of index->ctr are sequentially consistent.
+ *
+ * If this is the last iteration, this barrier also prevents
+ * frees from seeping upwards, and orders the two wait phases
+ * on architectures with 32-bit longs; see synchronize_rcu().
*/
smp_mb_global();
@@ -104,7 +102,7 @@ static void wait_for_readers(void)
QLIST_REMOVE(index, node);
QLIST_INSERT_HEAD(&qsreaders, index, node);
- /* No need for mb_set here, worst of all we
+ /* No need for memory barriers here, worst of all we
* get some extra futex wakeups.
*/
qatomic_set(&index->waiting, false);
@@ -149,26 +147,26 @@ void synchronize_rcu(void)
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
* Pairs with smp_mb_placeholder() in rcu_read_lock().
+ *
+ * Also orders write to RCU-protected pointers before
+ * write to rcu_gp_ctr.
*/
smp_mb_global();
QEMU_LOCK_GUARD(&rcu_registry_lock);
if (!QLIST_EMPTY(&registry)) {
- /* In either case, the qatomic_mb_set below blocks stores that free
- * old RCU-protected pointers.
- */
if (sizeof(rcu_gp_ctr) < 8) {
/* For architectures with 32-bit longs, a two-subphases algorithm
* ensures we do not encounter overflow bugs.
*
* Switch parity: 0 -> 1, 1 -> 0.
*/
- qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+ qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
wait_for_readers();
- qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+ qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
} else {
/* Increment current grace period. */
- qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+ qatomic_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
}
wait_for_readers();