call_rcu(head, func);
}
-static inline void rcu_note_context_switch(int cpu)
+static inline void rcu_note_context_switch(void)
{
rcu_sched_qs();
}
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-void rcu_note_context_switch(int cpu);
+void rcu_note_context_switch(void);
#ifndef CONFIG_RCU_NOCB_CPU_ALL
int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
- rcu_note_context_switch(cpu);
+ rcu_note_context_switch();
}
void synchronize_rcu_bh(void);
* and requires special handling for preemptible RCU.
* The caller must have disabled preemption.
*/
-void rcu_note_context_switch(int cpu)
+void rcu_note_context_switch(void)
{
trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs();
- rcu_preempt_note_context_switch(cpu);
+ rcu_preempt_note_context_switch();
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
rcu_momentary_dyntick_idle();
trace_rcu_utilization(TPS("End context switch"));
/* Forward declarations for rcutree_plugin.h */
static void rcu_bootup_announce(void);
long rcu_batches_completed(void);
-static void rcu_preempt_note_context_switch(int cpu);
+static void rcu_preempt_note_context_switch(void);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
*
* Caller must disable preemption.
*/
-static void rcu_preempt_note_context_switch(int cpu)
+static void rcu_preempt_note_context_switch(void)
{
struct task_struct *t = current;
unsigned long flags;
!t->rcu_read_unlock_special.b.blocked) {
/* Possibly blocking in an RCU read-side critical section. */
- rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
+ rdp = this_cpu_ptr(rcu_preempt_state.rda);
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
* Because preemptible RCU does not exist, we never have to check for
* CPUs being in quiescent states.
*/
-static void rcu_preempt_note_context_switch(int cpu)
+static void rcu_preempt_note_context_switch(void)
{
}
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
- rcu_note_context_switch(cpu);
+ rcu_note_context_switch();
prev = rq->curr;
schedule_debug(prev);
* in the task stack here.
*/
__do_softirq();
- rcu_note_context_switch(cpu);
+ rcu_note_context_switch();
local_irq_enable();
cond_resched();
return;