summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-02-16 09:23:31 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2018-03-12 16:12:47 +0100
commit77a8b8462b02a10aea5cad389a8f9260f79ede36 (patch)
tree5f337abf53ab50994a45f6a302f7da8ee341e01a
parent729c0ddd3cdf16973d850b1ee7c5234a1e4dddbb (diff)
downloadqemu-77a8b8462b02a10aea5cad389a8f9260f79ede36.tar.gz
rcu: make memory barriers more explicit
Prepare for introducing smp_mb_placeholder() and smp_mb_global(). The new smp_mb() in synchronize_rcu() is not strictly necessary, since the first atomic_mb_set for rcu_gp_ctr provides the required ordering. However, synchronize_rcu is not performance critical, and it *will* be necessary to introduce a smp_mb_global before calling wait_for_readers(). Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--include/qemu/rcu.h15
-rw-r--r--util/rcu.c12
2 files changed, 22 insertions, 5 deletions
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
index f19413d649..625f09ac09 100644
--- a/include/qemu/rcu.h
+++ b/include/qemu/rcu.h
@@ -79,7 +79,10 @@ static inline void rcu_read_lock(void)
}
ctr = atomic_read(&rcu_gp_ctr);
- atomic_xchg(&p_rcu_reader->ctr, ctr);
+ atomic_set(&p_rcu_reader->ctr, ctr);
+
+ /* Write p_rcu_reader->ctr before reading RCU-protected pointers. */
+ smp_mb();
}
static inline void rcu_read_unlock(void)
@@ -91,7 +94,15 @@ static inline void rcu_read_unlock(void)
return;
}
- atomic_xchg(&p_rcu_reader->ctr, 0);
+ /* Ensure that the critical section is seen to precede the
+ * store to p_rcu_reader->ctr. Together with the following
+ * smp_mb(), this ensures writes to p_rcu_reader->ctr
+ * are sequentially consistent.
+ */
+ atomic_store_release(&p_rcu_reader->ctr, 0);
+
+ /* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
+ smp_mb();
if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
atomic_set(&p_rcu_reader->waiting, false);
qemu_event_set(&rcu_gp_event);
diff --git a/util/rcu.c b/util/rcu.c
index f4d09c8304..7366dc50dd 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -92,8 +92,9 @@ static void wait_for_readers(void)
atomic_set(&index->waiting, true);
}
- /* Here, order the stores to index->waiting before the
- * loads of index->ctr.
+ /* Here, order the stores to index->waiting before the loads of
+ * index->ctr. Pairs with smp_mb() in rcu_read_unlock(),
+ * ensuring that the loads of index->ctr are sequentially consistent.
*/
smp_mb();
@@ -142,8 +143,13 @@ static void wait_for_readers(void)
void synchronize_rcu(void)
{
qemu_mutex_lock(&rcu_sync_lock);
- qemu_mutex_lock(&rcu_registry_lock);
+ /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
+ * Pairs with smp_mb() in rcu_read_lock().
+ */
+ smp_mb();
+
+ qemu_mutex_lock(&rcu_registry_lock);
if (!QLIST_EMPTY(&registry)) {
/* In either case, the atomic_mb_set below blocks stores that free
* old RCU-protected pointers.