*/
void synchronize_sched_expedited(void)
{
----- - int firstsnap, s, snap, trycount = 0;
+++++ + long firstsnap, s, snap;
+++++ + int trycount = 0;
+++++ + struct rcu_state *rsp = &rcu_sched_state;
++++
- - /* Note that atomic_inc_return() implies full memory barrier. */
- - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
+++++ + /*
+++++ + * If we are in danger of counter wrap, just do synchronize_sched().
+++++ + * By allowing sync_sched_expedited_started to advance no more than
+++++ + * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
+++++ + * that more than 3.5 billion CPUs would be required to force a
+++++ + * counter wrap on a 32-bit system. Quite a few more CPUs would of
+++++ + * course be required on a 64-bit system.
+++++ + */
+++++ + if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
+++++ + (ulong)atomic_long_read(&rsp->expedited_done) +
+++++ + ULONG_MAX / 8)) {
+++++ + synchronize_sched();
+++++ + atomic_long_inc(&rsp->expedited_wrap);
+++++ + return;
+++++ + }
+ +
---- /* Note that atomic_inc_return() implies full memory barrier. */
---- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
+++++ + /*
+++++ + * Take a ticket. Note that atomic_inc_return() implies a
+++++ + * full memory barrier.
+++++ + */
+++++ + snap = atomic_long_inc_return(&rsp->expedited_start);
+++++ + firstsnap = snap;
get_online_cpus();
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
} else {
------ synchronize_sched();
++++++ wait_rcu_gp(call_rcu_sched);
+++++ + atomic_long_inc(&rsp->expedited_normal);
return;
}