int cpu;
unsigned long flags;
struct rcu_data *rdp;
--- struct rcu_head rh;
+++ struct rcu_data rd;
+++ unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
+++ unsigned long snap_done;
--- init_rcu_head_on_stack(&rh);
+++ init_rcu_head_on_stack(&rd.barrier_head);
+++ _rcu_barrier_trace(rsp, "Begin", -1, snap);
/* Take mutex to serialize concurrent rcu_barrier() requests. */
--- mutex_lock(&rcu_barrier_mutex);
+++ mutex_lock(&rsp->barrier_mutex);
+ +
- smp_mb(); /* Prevent any prior operations from leaking in. */
+++ /*
+++ * Ensure that all prior references, including to ->n_barrier_done,
+++ * are ordered before the _rcu_barrier() machinery.
+++ */
+++ smp_mb(); /* See above block comment. */
+++
+++ /*
+++ * Recheck ->n_barrier_done to see if others did our work for us.
+++ * This means checking ->n_barrier_done for an even-to-odd-to-even
+++ * transition. The "if" expression below therefore rounds the old
+++ * value up to the next even number and adds two before comparing.
+++ */
+++ snap_done = ACCESS_ONCE(rsp->n_barrier_done);
+++ _rcu_barrier_trace(rsp, "Check", -1, snap_done);
+++ if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
+++ _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
+++ smp_mb(); /* caller's subsequent code after above check. */
+++ mutex_unlock(&rsp->barrier_mutex);
+++ return;
+++ }
+
- - smp_mb(); /* Prevent any prior operations from leaking in. */
+++ /*
+++ * Increment ->n_barrier_done to avoid duplicate work. Use
+++ * ACCESS_ONCE() to prevent the compiler from speculating
+++ * the increment to precede the early-exit check.
+++ */
+++ ACCESS_ONCE(rsp->n_barrier_done)++;
+++ WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
+++ _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
+++ smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
/*
* Initialize the count to one rather than to zero in order to