rcu_exp_gp_seq_start(rsp);
/* Stop each CPU that is online, non-idle, and not us. */
- init_waitqueue_head(&rsp->expedited_wq);
atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */
for_each_online_cpu(cpu) {
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
}
init_waitqueue_head(&rsp->gp_wq);
+ init_waitqueue_head(&rsp->expedited_wq);
rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
while (i > rnp->grphi)
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
-static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
-
/*
* Return non-zero if there are any tasks in RCU read-side critical
* sections blocking the current preemptible-RCU expedited grace period.
raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (wake) {
smp_mb(); /* EGP done before wake_up(). */
- wake_up(&sync_rcu_preempt_exp_wq);
+ wake_up(&rsp->expedited_wq);
}
break;
}
/* Wait for snapshotted ->blkd_tasks lists to drain. */
rnp = rcu_get_root(rsp);
- wait_event(sync_rcu_preempt_exp_wq,
+ wait_event(rsp->expedited_wq,
sync_rcu_preempt_exp_done(rnp));
/* Clean up and exit. */