EXPORT_SYMBOL_GPL(rcu_lock_map);
#endif
-enum rcu_barrier {
- RCU_BARRIER_STD,
- RCU_BARRIER_BH,
- RCU_BARRIER_SCHED,
-};
-
static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
{
int cpu = smp_processor_id();
struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
+ void (*call_rcu_func)(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
atomic_inc(&rcu_barrier_cpu_count);
- switch ((enum rcu_barrier)type) {
- case RCU_BARRIER_STD:
- call_rcu(head, rcu_barrier_callback);
- break;
- case RCU_BARRIER_BH:
- call_rcu_bh(head, rcu_barrier_callback);
- break;
- case RCU_BARRIER_SCHED:
- call_rcu_sched(head, rcu_barrier_callback);
- break;
- }
+ call_rcu_func = type;
+ call_rcu_func(head, rcu_barrier_callback);
}
static inline void wait_migrated_callbacks(void)
* Orchestrate the specified type of RCU barrier, waiting for all
* RCU callbacks of the specified type to complete.
*/
-static void _rcu_barrier(enum rcu_barrier type)
+static void _rcu_barrier(void (*call_rcu_func)(struct rcu_head *head,
+ void (*func)(struct rcu_head *head)))
{
BUG_ON(in_interrupt());
/* Take cpucontrol mutex to protect against CPU hotplug */
* early.
*/
atomic_set(&rcu_barrier_cpu_count, 1);
- on_each_cpu(rcu_barrier_func, (void *)type, 1);
+ on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
complete(&rcu_barrier_completion);
wait_for_completion(&rcu_barrier_completion);
*/
void rcu_barrier(void)
{
- _rcu_barrier(RCU_BARRIER_STD);
+ _rcu_barrier(call_rcu);
}
EXPORT_SYMBOL_GPL(rcu_barrier);
*/
void rcu_barrier_bh(void)
{
- _rcu_barrier(RCU_BARRIER_BH);
+ _rcu_barrier(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
*/
void rcu_barrier_sched(void)
{
- _rcu_barrier(RCU_BARRIER_SCHED);
+ _rcu_barrier(call_rcu_sched);
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);