From 97b430320ce7c95f0d5587c5ecc8f6a9d0c698e9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 16 Oct 2007 23:26:04 -0700 Subject: [PATCH] Immunize rcu_dereference() against crazy compiler writers Turns out that compiler writers are a bit more aggressive about optimizing than one might expect. This patch prevents a number of such optimizations from messing up rcu_deference(). This is not merely a theoretical problem, as evidenced by the rmb() in mce_log(). Signed-off-by: Paul E. McKenney Cc: Ingo Molnar Acked-by: Josh Triplett Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rcupdate.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 76c1a530edc5..cc24a01df940 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -231,6 +231,18 @@ extern struct lockdep_map rcu_lock_map; local_bh_enable(); \ } while(0) +/* + * Prevent the compiler from merging or refetching accesses. The compiler + * is also forbidden from reordering successive instances of ACCESS_ONCE(), + * but only when the compiler is aware of some particular ordering. One way + * to make the compiler aware of ordering is to put the two invocations of + * ACCESS_ONCE() in different C statements. + * + * This macro does absolutely -nothing- to prevent the CPU from reordering, + * merging, or refetching absolutely anything at any time. + */ +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) + /** * rcu_dereference - fetch an RCU-protected pointer in an * RCU read-side critical section. This pointer may later @@ -242,7 +254,7 @@ extern struct lockdep_map rcu_lock_map; */ #define rcu_dereference(p) ({ \ - typeof(p) _________p1 = p; \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ smp_read_barrier_depends(); \ (_________p1); \ }) -- 2.20.1