Immunize rcu_dereference() against crazy compiler writers
Turns out that compiler writers are a bit more aggressive about optimizing than one might expect. This patch prevents a number of such optimizations from messing up rcu_deference(). This is not merely a theoretical problem, as evidenced by the rmb() in mce_log(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Acked-by: Josh Triplett <josh@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f6b450d489
commit
97b430320c
1 changed files with 13 additions and 1 deletions
|
@ -231,6 +231,18 @@ extern struct lockdep_map rcu_lock_map;
|
|||
local_bh_enable(); \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
* Prevent the compiler from merging or refetching accesses. The compiler
|
||||
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
|
||||
* but only when the compiler is aware of some particular ordering. One way
|
||||
* to make the compiler aware of ordering is to put the two invocations of
|
||||
* ACCESS_ONCE() in different C statements.
|
||||
*
|
||||
* This macro does absolutely -nothing- to prevent the CPU from reordering,
|
||||
* merging, or refetching absolutely anything at any time.
|
||||
*/
|
||||
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
||||
|
||||
/**
|
||||
* rcu_dereference - fetch an RCU-protected pointer in an
|
||||
* RCU read-side critical section. This pointer may later
|
||||
|
@ -242,7 +254,7 @@ extern struct lockdep_map rcu_lock_map;
|
|||
*/
|
||||
|
||||
#define rcu_dereference(p) ({ \
|
||||
typeof(p) _________p1 = p; \
|
||||
typeof(p) _________p1 = ACCESS_ONCE(p); \
|
||||
smp_read_barrier_depends(); \
|
||||
(_________p1); \
|
||||
})
|
||||
|
|
Loading…
Reference in a new issue