* can be inexact, as it is just promoting locality and is not
* strictly needed for correctness.
*/
- if (sync_exp_work_done(rsp, NULL, NULL, &rdp->expedited_workdone1, s))
+ if (sync_exp_work_done(rsp, NULL, NULL, &rdp->exp_workdone1, s))
return NULL;
mutex_lock(&rdp->exp_funnel_mutex);
trace_rcu_exp_funnel_lock(rsp->name, rdp->mynode->level + 1,
rdp->cpu, rdp->cpu, TPS("acq"));
rnp0 = rdp->mynode;
for (; rnp0 != NULL; rnp0 = rnp0->parent) {
- if (sync_exp_work_done(rsp, rnp1, rdp,
- &rdp->expedited_workdone2, s))
+ if (sync_exp_work_done(rsp, rnp1, rdp, &rdp->exp_workdone2, s))
return NULL;
mutex_lock(&rnp0->exp_funnel_mutex);
trace_rcu_exp_funnel_lock(rsp->name, rnp0->level,
}
rnp1 = rnp0;
}
- if (sync_exp_work_done(rsp, rnp1, rdp,
- &rdp->expedited_workdone3, s))
+ if (sync_exp_work_done(rsp, rnp1, rdp, &rdp->exp_workdone3, s))
return NULL;
return rnp1;
}
struct rcu_head oom_head;
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
struct mutex exp_funnel_mutex;
- atomic_long_t expedited_workdone1; /* # done by others #1. */
- atomic_long_t expedited_workdone2; /* # done by others #2. */
- atomic_long_t expedited_workdone3; /* # done by others #3. */
+ atomic_long_t exp_workdone1; /* # done by others #1. */
+ atomic_long_t exp_workdone2; /* # done by others #2. */
+ atomic_long_t exp_workdone3; /* # done by others #3. */
/* 7) Callback offloading. */
#ifdef CONFIG_RCU_NOCB_CPU
for_each_possible_cpu(cpu) {
rdp = per_cpu_ptr(rsp->rda, cpu);
- s1 += atomic_long_read(&rdp->expedited_workdone1);
- s2 += atomic_long_read(&rdp->expedited_workdone2);
- s3 += atomic_long_read(&rdp->expedited_workdone3);
+ s1 += atomic_long_read(&rdp->exp_workdone1);
+ s2 += atomic_long_read(&rdp->exp_workdone2);
+ s3 += atomic_long_read(&rdp->exp_workdone3);
}
seq_printf(m, "s=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
rsp->expedited_sequence, s1, s2, s3,