EXPORT_SYMBOL_GPL(synchronize_rcu);
static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
-static unsigned long sync_rcu_preempt_exp_count;
static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
/*
{
struct rcu_node *rnp;
struct rcu_state *rsp = rcu_state_p;
- unsigned long snap;
+ unsigned long s;
int trycount = 0;
- smp_mb(); /* Caller's modifications seen first by other CPUs. */
- snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
- smp_mb(); /* Above access cannot bleed into critical section. */
+ s = rcu_exp_gp_seq_snap(rsp);
/*
* Acquire lock, falling back to synchronize_rcu() if too many
* expedited grace period for us, just leave.
*/
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
- if (ULONG_CMP_LT(snap,
- READ_ONCE(sync_rcu_preempt_exp_count)))
+ if (rcu_exp_gp_seq_done(rsp, s))
goto mb_ret; /* Others did our work for us. */
if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
return;
}
}
- if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count)))
+ if (rcu_exp_gp_seq_done(rsp, s))
goto unlock_mb_ret; /* Others did our work for us. */
+ rcu_exp_gp_seq_start(rsp);
/* force all RCU readers onto ->blkd_tasks lists. */
synchronize_sched_expedited();
sync_rcu_preempt_exp_done(rnp));
/* Clean up and exit. */
- smp_mb(); /* ensure expedited GP seen before counter increment. */
- WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1);
+ rcu_exp_gp_seq_end(rsp);
unlock_mb_ret:
mutex_unlock(&sync_rcu_preempt_exp_mutex);
mb_ret: