2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
28 #include <linux/stop_machine.h>
30 #define RCU_KTHREAD_PRIO 1
32 #ifdef CONFIG_RCU_BOOST
33 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
35 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
39 * Check the RCU kernel configuration parameters and print informative
40 * messages about anything out of the ordinary. If you like #ifdef, you
41 * will love this function.
43 static void __init
rcu_bootup_announce_oddness(void)
45 #ifdef CONFIG_RCU_TRACE
46 printk(KERN_INFO
"\tRCU debugfs-based tracing is enabled.\n");
48 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
49 printk(KERN_INFO
"\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
52 #ifdef CONFIG_RCU_FANOUT_EXACT
53 printk(KERN_INFO
"\tHierarchical RCU autobalancing is disabled.\n");
55 #ifdef CONFIG_RCU_FAST_NO_HZ
57 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
59 #ifdef CONFIG_PROVE_RCU
60 printk(KERN_INFO
"\tRCU lockdep checking is enabled.\n");
62 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
63 printk(KERN_INFO
"\tRCU torture testing starts during boot.\n");
65 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
66 printk(KERN_INFO
"\tVerbose stalled-CPUs detection is disabled.\n");
68 #if NUM_RCU_LVL_4 != 0
69 printk(KERN_INFO
"\tExperimental four-level hierarchy is enabled.\n");
73 #ifdef CONFIG_TREE_PREEMPT_RCU
75 struct rcu_state rcu_preempt_state
= RCU_STATE_INITIALIZER(rcu_preempt
);
76 DEFINE_PER_CPU(struct rcu_data
, rcu_preempt_data
);
77 static struct rcu_state
*rcu_state
= &rcu_preempt_state
;
79 static void rcu_read_unlock_special(struct task_struct
*t
);
80 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
);
83 * Tell them what RCU they are running.
85 static void __init
rcu_bootup_announce(void)
87 printk(KERN_INFO
"Preemptible hierarchical RCU implementation.\n");
88 rcu_bootup_announce_oddness();
92 * Return the number of RCU-preempt batches processed thus far
93 * for debug and statistics.
95 long rcu_batches_completed_preempt(void)
97 return rcu_preempt_state
.completed
;
99 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt
);
102 * Return the number of RCU batches processed thus far for debug & stats.
104 long rcu_batches_completed(void)
106 return rcu_batches_completed_preempt();
108 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
111 * Force a quiescent state for preemptible RCU.
113 void rcu_force_quiescent_state(void)
115 force_quiescent_state(&rcu_preempt_state
, 0);
117 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
120 * Record a preemptible-RCU quiescent state for the specified CPU. Note
121 * that this just means that the task currently running on the CPU is
122 * not in a quiescent state. There might be any number of tasks blocked
123 * while in an RCU read-side critical section.
125 * Unlike the other rcu_*_qs() functions, callers to this function
126 * must disable irqs in order to protect the assignment to
127 * ->rcu_read_unlock_special.
129 static void rcu_preempt_qs(int cpu
)
131 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
133 rdp
->passed_quiesce_gpnum
= rdp
->gpnum
;
135 if (rdp
->passed_quiesce
== 0)
136 trace_rcu_grace_period("rcu_preempt", rdp
->gpnum
, "cpuqs");
137 rdp
->passed_quiesce
= 1;
138 current
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_NEED_QS
;
142 * We have entered the scheduler, and the current task might soon be
143 * context-switched away from. If this task is in an RCU read-side
144 * critical section, we will no longer be able to rely on the CPU to
145 * record that fact, so we enqueue the task on the blkd_tasks list.
146 * The task will dequeue itself when it exits the outermost enclosing
147 * RCU read-side critical section. Therefore, the current grace period
148 * cannot be permitted to complete until the blkd_tasks list entries
149 * predating the current grace period drain, in other words, until
150 * rnp->gp_tasks becomes NULL.
152 * Caller must disable preemption.
154 static void rcu_preempt_note_context_switch(int cpu
)
156 struct task_struct
*t
= current
;
158 struct rcu_data
*rdp
;
159 struct rcu_node
*rnp
;
161 if (t
->rcu_read_lock_nesting
> 0 &&
162 (t
->rcu_read_unlock_special
& RCU_READ_UNLOCK_BLOCKED
) == 0) {
164 /* Possibly blocking in an RCU read-side critical section. */
165 rdp
= per_cpu_ptr(rcu_preempt_state
.rda
, cpu
);
167 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
168 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_BLOCKED
;
169 t
->rcu_blocked_node
= rnp
;
172 * If this CPU has already checked in, then this task
173 * will hold up the next grace period rather than the
174 * current grace period. Queue the task accordingly.
175 * If the task is queued for the current grace period
176 * (i.e., this CPU has not yet passed through a quiescent
177 * state for the current grace period), then as long
178 * as that task remains queued, the current grace period
179 * cannot end. Note that there is some uncertainty as
180 * to exactly when the current grace period started.
181 * We take a conservative approach, which can result
182 * in unnecessarily waiting on tasks that started very
183 * slightly after the current grace period began. C'est
186 * But first, note that the current CPU must still be
189 WARN_ON_ONCE((rdp
->grpmask
& rnp
->qsmaskinit
) == 0);
190 WARN_ON_ONCE(!list_empty(&t
->rcu_node_entry
));
191 if ((rnp
->qsmask
& rdp
->grpmask
) && rnp
->gp_tasks
!= NULL
) {
192 list_add(&t
->rcu_node_entry
, rnp
->gp_tasks
->prev
);
193 rnp
->gp_tasks
= &t
->rcu_node_entry
;
194 #ifdef CONFIG_RCU_BOOST
195 if (rnp
->boost_tasks
!= NULL
)
196 rnp
->boost_tasks
= rnp
->gp_tasks
;
197 #endif /* #ifdef CONFIG_RCU_BOOST */
199 list_add(&t
->rcu_node_entry
, &rnp
->blkd_tasks
);
200 if (rnp
->qsmask
& rdp
->grpmask
)
201 rnp
->gp_tasks
= &t
->rcu_node_entry
;
203 trace_rcu_preempt_task(rdp
->rsp
->name
,
205 (rnp
->qsmask
& rdp
->grpmask
)
208 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
209 } else if (t
->rcu_read_lock_nesting
< 0 &&
210 t
->rcu_read_unlock_special
) {
213 * Complete exit from RCU read-side critical section on
214 * behalf of preempted instance of __rcu_read_unlock().
216 rcu_read_unlock_special(t
);
220 * Either we were not in an RCU read-side critical section to
221 * begin with, or we have now recorded that critical section
222 * globally. Either way, we can now note a quiescent state
223 * for this CPU. Again, if we were in an RCU read-side critical
224 * section, and if that critical section was blocking the current
225 * grace period, then the fact that the task has been enqueued
226 * means that we continue to block the current grace period.
228 local_irq_save(flags
);
230 local_irq_restore(flags
);
234 * Tree-preemptible RCU implementation for rcu_read_lock().
235 * Just increment ->rcu_read_lock_nesting, shared state will be updated
238 void __rcu_read_lock(void)
240 current
->rcu_read_lock_nesting
++;
241 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
243 EXPORT_SYMBOL_GPL(__rcu_read_lock
);
246 * Check for preempted RCU readers blocking the current grace period
247 * for the specified rcu_node structure. If the caller needs a reliable
248 * answer, it must hold the rcu_node's ->lock.
250 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
252 return rnp
->gp_tasks
!= NULL
;
256 * Record a quiescent state for all tasks that were previously queued
257 * on the specified rcu_node structure and that were blocking the current
258 * RCU grace period. The caller must hold the specified rnp->lock with
259 * irqs disabled, and this lock is released upon return, but irqs remain
262 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
263 __releases(rnp
->lock
)
266 struct rcu_node
*rnp_p
;
268 if (rnp
->qsmask
!= 0 || rcu_preempt_blocked_readers_cgp(rnp
)) {
269 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
270 return; /* Still need more quiescent states! */
276 * Either there is only one rcu_node in the tree,
277 * or tasks were kicked up to root rcu_node due to
278 * CPUs going offline.
280 rcu_report_qs_rsp(&rcu_preempt_state
, flags
);
284 /* Report up the rest of the hierarchy. */
286 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
287 raw_spin_lock(&rnp_p
->lock
); /* irqs already disabled. */
288 rcu_report_qs_rnp(mask
, &rcu_preempt_state
, rnp_p
, flags
);
292 * Advance a ->blkd_tasks-list pointer to the next entry, instead
293 * returning NULL if at the end of the list.
295 static struct list_head
*rcu_next_node_entry(struct task_struct
*t
,
296 struct rcu_node
*rnp
)
298 struct list_head
*np
;
300 np
= t
->rcu_node_entry
.next
;
301 if (np
== &rnp
->blkd_tasks
)
307 * Handle special cases during rcu_read_unlock(), such as needing to
308 * notify RCU core processing or task having blocked during the RCU
309 * read-side critical section.
311 static noinline
void rcu_read_unlock_special(struct task_struct
*t
)
317 struct list_head
*np
;
318 #ifdef CONFIG_RCU_BOOST
319 struct rt_mutex
*rbmp
= NULL
;
320 #endif /* #ifdef CONFIG_RCU_BOOST */
321 struct rcu_node
*rnp
;
324 /* NMI handlers cannot block and cannot safely manipulate state. */
328 local_irq_save(flags
);
331 * If RCU core is waiting for this CPU to exit critical section,
332 * let it know that we have done so.
334 special
= t
->rcu_read_unlock_special
;
335 if (special
& RCU_READ_UNLOCK_NEED_QS
) {
336 rcu_preempt_qs(smp_processor_id());
339 /* Hardware IRQ handlers cannot block. */
340 if (in_irq() || in_serving_softirq()) {
341 local_irq_restore(flags
);
345 /* Clean up if blocked during RCU read-side critical section. */
346 if (special
& RCU_READ_UNLOCK_BLOCKED
) {
347 t
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_BLOCKED
;
350 * Remove this task from the list it blocked on. The
351 * task can migrate while we acquire the lock, but at
352 * most one time. So at most two passes through loop.
355 rnp
= t
->rcu_blocked_node
;
356 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
357 if (rnp
== t
->rcu_blocked_node
)
359 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
361 empty
= !rcu_preempt_blocked_readers_cgp(rnp
);
362 empty_exp
= !rcu_preempted_readers_exp(rnp
);
363 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
364 np
= rcu_next_node_entry(t
, rnp
);
365 list_del_init(&t
->rcu_node_entry
);
366 t
->rcu_blocked_node
= NULL
;
367 trace_rcu_unlock_preempted_task("rcu_preempt",
369 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
371 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
373 #ifdef CONFIG_RCU_BOOST
374 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
375 rnp
->boost_tasks
= np
;
376 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
377 if (t
->rcu_boost_mutex
) {
378 rbmp
= t
->rcu_boost_mutex
;
379 t
->rcu_boost_mutex
= NULL
;
381 #endif /* #ifdef CONFIG_RCU_BOOST */
384 * If this was the last task on the current list, and if
385 * we aren't waiting on any CPUs, report the quiescent state.
386 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
387 * so we must take a snapshot of the expedited state.
389 empty_exp_now
= !rcu_preempted_readers_exp(rnp
);
390 if (!empty
&& !rcu_preempt_blocked_readers_cgp(rnp
)) {
391 trace_rcu_quiescent_state_report("preempt_rcu",
398 rcu_report_unblock_qs_rnp(rnp
, flags
);
400 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
402 #ifdef CONFIG_RCU_BOOST
403 /* Unboost if we were boosted. */
405 rt_mutex_unlock(rbmp
);
406 #endif /* #ifdef CONFIG_RCU_BOOST */
409 * If this was the last task on the expedited lists,
410 * then we need to report up the rcu_node hierarchy.
412 if (!empty_exp
&& empty_exp_now
)
413 rcu_report_exp_rnp(&rcu_preempt_state
, rnp
, true);
415 local_irq_restore(flags
);
420 * Tree-preemptible RCU implementation for rcu_read_unlock().
421 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
422 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
423 * invoke rcu_read_unlock_special() to clean up after a context switch
424 * in an RCU read-side critical section and other special cases.
426 void __rcu_read_unlock(void)
428 struct task_struct
*t
= current
;
430 if (t
->rcu_read_lock_nesting
!= 1)
431 --t
->rcu_read_lock_nesting
;
433 barrier(); /* critical section before exit code. */
434 t
->rcu_read_lock_nesting
= INT_MIN
;
435 barrier(); /* assign before ->rcu_read_unlock_special load */
436 if (unlikely(ACCESS_ONCE(t
->rcu_read_unlock_special
)))
437 rcu_read_unlock_special(t
);
438 barrier(); /* ->rcu_read_unlock_special load before assign */
439 t
->rcu_read_lock_nesting
= 0;
441 #ifdef CONFIG_PROVE_LOCKING
443 int rrln
= ACCESS_ONCE(t
->rcu_read_lock_nesting
);
445 WARN_ON_ONCE(rrln
< 0 && rrln
> INT_MIN
/ 2);
447 #endif /* #ifdef CONFIG_PROVE_LOCKING */
449 EXPORT_SYMBOL_GPL(__rcu_read_unlock
);
451 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
454 * Dump detailed information for all tasks blocking the current RCU
455 * grace period on the specified rcu_node structure.
457 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
460 struct task_struct
*t
;
462 if (!rcu_preempt_blocked_readers_cgp(rnp
))
464 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
465 t
= list_entry(rnp
->gp_tasks
,
466 struct task_struct
, rcu_node_entry
);
467 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
)
469 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
473 * Dump detailed information for all tasks blocking the current RCU
476 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
478 struct rcu_node
*rnp
= rcu_get_root(rsp
);
480 rcu_print_detail_task_stall_rnp(rnp
);
481 rcu_for_each_leaf_node(rsp
, rnp
)
482 rcu_print_detail_task_stall_rnp(rnp
);
485 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
487 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
491 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
494 * Scan the current list of tasks blocked within RCU read-side critical
495 * sections, printing out the tid of each.
497 static int rcu_print_task_stall(struct rcu_node
*rnp
)
499 struct task_struct
*t
;
502 if (!rcu_preempt_blocked_readers_cgp(rnp
))
504 t
= list_entry(rnp
->gp_tasks
,
505 struct task_struct
, rcu_node_entry
);
506 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
) {
507 printk(" P%d", t
->pid
);
514 * Suppress preemptible RCU's CPU stall warnings by pushing the
515 * time of the next stall-warning message comfortably far into the
518 static void rcu_preempt_stall_reset(void)
520 rcu_preempt_state
.jiffies_stall
= jiffies
+ ULONG_MAX
/ 2;
524 * Check that the list of blocked tasks for the newly completed grace
525 * period is in fact empty. It is a serious bug to complete a grace
526 * period that still has RCU readers blocked! This function must be
527 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
528 * must be held by the caller.
530 * Also, if there are blocked tasks on the list, they automatically
531 * block the newly created grace period, so set up ->gp_tasks accordingly.
533 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
535 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp
));
536 if (!list_empty(&rnp
->blkd_tasks
))
537 rnp
->gp_tasks
= rnp
->blkd_tasks
.next
;
538 WARN_ON_ONCE(rnp
->qsmask
);
541 #ifdef CONFIG_HOTPLUG_CPU
544 * Handle tasklist migration for case in which all CPUs covered by the
545 * specified rcu_node have gone offline. Move them up to the root
546 * rcu_node. The reason for not just moving them to the immediate
547 * parent is to remove the need for rcu_read_unlock_special() to
548 * make more than two attempts to acquire the target rcu_node's lock.
549 * Returns true if there were tasks blocking the current RCU grace
552 * Returns 1 if there was previously a task blocking the current grace
553 * period on the specified rcu_node structure.
555 * The caller must hold rnp->lock with irqs disabled.
557 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
558 struct rcu_node
*rnp
,
559 struct rcu_data
*rdp
)
561 struct list_head
*lp
;
562 struct list_head
*lp_root
;
564 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
565 struct task_struct
*t
;
567 if (rnp
== rnp_root
) {
568 WARN_ONCE(1, "Last CPU thought to be offlined?");
569 return 0; /* Shouldn't happen: at least one CPU online. */
572 /* If we are on an internal node, complain bitterly. */
573 WARN_ON_ONCE(rnp
!= rdp
->mynode
);
576 * Move tasks up to root rcu_node. Don't try to get fancy for
577 * this corner-case operation -- just put this node's tasks
578 * at the head of the root node's list, and update the root node's
579 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
580 * if non-NULL. This might result in waiting for more tasks than
581 * absolutely necessary, but this is a good performance/complexity
584 if (rcu_preempt_blocked_readers_cgp(rnp
))
585 retval
|= RCU_OFL_TASKS_NORM_GP
;
586 if (rcu_preempted_readers_exp(rnp
))
587 retval
|= RCU_OFL_TASKS_EXP_GP
;
588 lp
= &rnp
->blkd_tasks
;
589 lp_root
= &rnp_root
->blkd_tasks
;
590 while (!list_empty(lp
)) {
591 t
= list_entry(lp
->next
, typeof(*t
), rcu_node_entry
);
592 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
593 list_del(&t
->rcu_node_entry
);
594 t
->rcu_blocked_node
= rnp_root
;
595 list_add(&t
->rcu_node_entry
, lp_root
);
596 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
597 rnp_root
->gp_tasks
= rnp
->gp_tasks
;
598 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
599 rnp_root
->exp_tasks
= rnp
->exp_tasks
;
600 #ifdef CONFIG_RCU_BOOST
601 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
602 rnp_root
->boost_tasks
= rnp
->boost_tasks
;
603 #endif /* #ifdef CONFIG_RCU_BOOST */
604 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
607 #ifdef CONFIG_RCU_BOOST
608 /* In case root is being boosted and leaf is not. */
609 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
610 if (rnp_root
->boost_tasks
!= NULL
&&
611 rnp_root
->boost_tasks
!= rnp_root
->gp_tasks
)
612 rnp_root
->boost_tasks
= rnp_root
->gp_tasks
;
613 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
614 #endif /* #ifdef CONFIG_RCU_BOOST */
616 rnp
->gp_tasks
= NULL
;
617 rnp
->exp_tasks
= NULL
;
621 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
624 * Do CPU-offline processing for preemptible RCU.
626 static void rcu_preempt_cleanup_dead_cpu(int cpu
)
628 rcu_cleanup_dead_cpu(cpu
, &rcu_preempt_state
);
632 * Check for a quiescent state from the current CPU. When a task blocks,
633 * the task is recorded in the corresponding CPU's rcu_node structure,
634 * which is checked elsewhere.
636 * Caller must disable hard irqs.
638 static void rcu_preempt_check_callbacks(int cpu
)
640 struct task_struct
*t
= current
;
642 if (t
->rcu_read_lock_nesting
== 0) {
646 if (t
->rcu_read_lock_nesting
> 0 &&
647 per_cpu(rcu_preempt_data
, cpu
).qs_pending
)
648 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_NEED_QS
;
652 * Process callbacks for preemptible RCU.
654 static void rcu_preempt_process_callbacks(void)
656 __rcu_process_callbacks(&rcu_preempt_state
,
657 &__get_cpu_var(rcu_preempt_data
));
660 #ifdef CONFIG_RCU_BOOST
662 static void rcu_preempt_do_callbacks(void)
664 rcu_do_batch(&rcu_preempt_state
, &__get_cpu_var(rcu_preempt_data
));
667 #endif /* #ifdef CONFIG_RCU_BOOST */
670 * Queue a preemptible-RCU callback for invocation after a grace period.
672 void call_rcu(struct rcu_head
*head
, void (*func
)(struct rcu_head
*rcu
))
674 __call_rcu(head
, func
, &rcu_preempt_state
, 0);
676 EXPORT_SYMBOL_GPL(call_rcu
);
679 * Queue an RCU callback for lazy invocation after a grace period.
680 * This will likely be later named something like "call_rcu_lazy()",
681 * but this change will require some way of tagging the lazy RCU
682 * callbacks in the list of pending callbacks. Until then, this
683 * function may only be called from __kfree_rcu().
685 void kfree_call_rcu(struct rcu_head
*head
,
686 void (*func
)(struct rcu_head
*rcu
))
688 __call_rcu(head
, func
, &rcu_preempt_state
, 1);
690 EXPORT_SYMBOL_GPL(kfree_call_rcu
);
693 * synchronize_rcu - wait until a grace period has elapsed.
695 * Control will return to the caller some time after a full grace
696 * period has elapsed, in other words after all currently executing RCU
697 * read-side critical sections have completed. Note, however, that
698 * upon return from synchronize_rcu(), the caller might well be executing
699 * concurrently with new RCU read-side critical sections that began while
700 * synchronize_rcu() was waiting. RCU read-side critical sections are
701 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
703 void synchronize_rcu(void)
705 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map
) &&
706 !lock_is_held(&rcu_lock_map
) &&
707 !lock_is_held(&rcu_sched_lock_map
),
708 "Illegal synchronize_rcu() in RCU read-side critical section");
709 if (!rcu_scheduler_active
)
711 wait_rcu_gp(call_rcu
);
713 EXPORT_SYMBOL_GPL(synchronize_rcu
);
715 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq
);
716 static long sync_rcu_preempt_exp_count
;
717 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex
);
720 * Return non-zero if there are any tasks in RCU read-side critical
721 * sections blocking the current preemptible-RCU expedited grace period.
722 * If there is no preemptible-RCU expedited grace period currently in
723 * progress, returns zero unconditionally.
725 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
)
727 return rnp
->exp_tasks
!= NULL
;
731 * return non-zero if there is no RCU expedited grace period in progress
732 * for the specified rcu_node structure, in other words, if all CPUs and
733 * tasks covered by the specified rcu_node structure have done their bit
734 * for the current expedited grace period. Works only for preemptible
735 * RCU -- other RCU implementation use other means.
737 * Caller must hold sync_rcu_preempt_exp_mutex.
739 static int sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
741 return !rcu_preempted_readers_exp(rnp
) &&
742 ACCESS_ONCE(rnp
->expmask
) == 0;
746 * Report the exit from RCU read-side critical section for the last task
747 * that queued itself during or before the current expedited preemptible-RCU
748 * grace period. This event is reported either to the rcu_node structure on
749 * which the task was queued or to one of that rcu_node structure's ancestors,
750 * recursively up the tree. (Calm down, calm down, we do the recursion
753 * Most callers will set the "wake" flag, but the task initiating the
754 * expedited grace period need not wake itself.
756 * Caller must hold sync_rcu_preempt_exp_mutex.
758 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
764 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
766 if (!sync_rcu_preempt_exp_done(rnp
)) {
767 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
770 if (rnp
->parent
== NULL
) {
771 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
773 wake_up(&sync_rcu_preempt_exp_wq
);
777 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled */
779 raw_spin_lock(&rnp
->lock
); /* irqs already disabled */
780 rnp
->expmask
&= ~mask
;
785 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
786 * grace period for the specified rcu_node structure. If there are no such
787 * tasks, report it up the rcu_node hierarchy.
789 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
792 sync_rcu_preempt_exp_init(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
797 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
798 if (list_empty(&rnp
->blkd_tasks
))
799 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
801 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
802 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock */
806 rcu_report_exp_rnp(rsp
, rnp
, false); /* Don't wake self. */
810 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
811 * is to invoke synchronize_sched_expedited() to push all the tasks to
812 * the ->blkd_tasks lists and wait for this list to drain.
814 void synchronize_rcu_expedited(void)
817 struct rcu_node
*rnp
;
818 struct rcu_state
*rsp
= &rcu_preempt_state
;
822 smp_mb(); /* Caller's modifications seen first by other CPUs. */
823 snap
= ACCESS_ONCE(sync_rcu_preempt_exp_count
) + 1;
824 smp_mb(); /* Above access cannot bleed into critical section. */
827 * Acquire lock, falling back to synchronize_rcu() if too many
828 * lock-acquisition failures. Of course, if someone does the
829 * expedited grace period for us, just leave.
831 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex
)) {
833 udelay(trycount
* num_online_cpus());
838 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
839 goto mb_ret
; /* Others did our work for us. */
841 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
842 goto unlock_mb_ret
; /* Others did our work for us. */
844 /* force all RCU readers onto ->blkd_tasks lists. */
845 synchronize_sched_expedited();
847 raw_spin_lock_irqsave(&rsp
->onofflock
, flags
);
849 /* Initialize ->expmask for all non-leaf rcu_node structures. */
850 rcu_for_each_nonleaf_node_breadth_first(rsp
, rnp
) {
851 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
852 rnp
->expmask
= rnp
->qsmaskinit
;
853 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
856 /* Snapshot current state of ->blkd_tasks lists. */
857 rcu_for_each_leaf_node(rsp
, rnp
)
858 sync_rcu_preempt_exp_init(rsp
, rnp
);
859 if (NUM_RCU_NODES
> 1)
860 sync_rcu_preempt_exp_init(rsp
, rcu_get_root(rsp
));
862 raw_spin_unlock_irqrestore(&rsp
->onofflock
, flags
);
864 /* Wait for snapshotted ->blkd_tasks lists to drain. */
865 rnp
= rcu_get_root(rsp
);
866 wait_event(sync_rcu_preempt_exp_wq
,
867 sync_rcu_preempt_exp_done(rnp
));
869 /* Clean up and exit. */
870 smp_mb(); /* ensure expedited GP seen before counter increment. */
871 ACCESS_ONCE(sync_rcu_preempt_exp_count
)++;
873 mutex_unlock(&sync_rcu_preempt_exp_mutex
);
875 smp_mb(); /* ensure subsequent action seen after grace period. */
877 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
880 * Check to see if there is any immediate preemptible-RCU-related work
883 static int rcu_preempt_pending(int cpu
)
885 return __rcu_pending(&rcu_preempt_state
,
886 &per_cpu(rcu_preempt_data
, cpu
));
890 * Does preemptible RCU need the CPU to stay out of dynticks mode?
892 static int rcu_preempt_needs_cpu(int cpu
)
894 return !!per_cpu(rcu_preempt_data
, cpu
).nxtlist
;
898 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
900 void rcu_barrier(void)
902 _rcu_barrier(&rcu_preempt_state
, call_rcu
);
904 EXPORT_SYMBOL_GPL(rcu_barrier
);
907 * Initialize preemptible RCU's per-CPU data.
909 static void __cpuinit
rcu_preempt_init_percpu_data(int cpu
)
911 rcu_init_percpu_data(cpu
, &rcu_preempt_state
, 1);
915 * Move preemptible RCU's callbacks from dying CPU to other online CPU
916 * and record a quiescent state.
918 static void rcu_preempt_cleanup_dying_cpu(void)
920 rcu_cleanup_dying_cpu(&rcu_preempt_state
);
924 * Initialize preemptible RCU's state structures.
926 static void __init
__rcu_init_preempt(void)
928 rcu_init_one(&rcu_preempt_state
, &rcu_preempt_data
);
932 * Check for a task exiting while in a preemptible-RCU read-side
933 * critical section, clean up if so. No need to issue warnings,
934 * as debug_check_no_locks_held() already does this if lockdep
939 struct task_struct
*t
= current
;
941 if (t
->rcu_read_lock_nesting
== 0)
943 t
->rcu_read_lock_nesting
= 1;
947 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
949 static struct rcu_state
*rcu_state
= &rcu_sched_state
;
952 * Tell them what RCU they are running.
954 static void __init
rcu_bootup_announce(void)
956 printk(KERN_INFO
"Hierarchical RCU implementation.\n");
957 rcu_bootup_announce_oddness();
961 * Return the number of RCU batches processed thus far for debug & stats.
963 long rcu_batches_completed(void)
965 return rcu_batches_completed_sched();
967 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
970 * Force a quiescent state for RCU, which, because there is no preemptible
971 * RCU, becomes the same as rcu-sched.
973 void rcu_force_quiescent_state(void)
975 rcu_sched_force_quiescent_state();
977 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
980 * Because preemptible RCU does not exist, we never have to check for
981 * CPUs being in quiescent states.
983 static void rcu_preempt_note_context_switch(int cpu
)
988 * Because preemptible RCU does not exist, there are never any preempted
991 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
996 #ifdef CONFIG_HOTPLUG_CPU
998 /* Because preemptible RCU does not exist, no quieting of tasks. */
999 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
1001 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1004 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1007 * Because preemptible RCU does not exist, we never have to check for
1008 * tasks blocked within RCU read-side critical sections.
1010 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
1015 * Because preemptible RCU does not exist, we never have to check for
1016 * tasks blocked within RCU read-side critical sections.
1018 static int rcu_print_task_stall(struct rcu_node
*rnp
)
1024 * Because preemptible RCU does not exist, there is no need to suppress
1025 * its CPU stall warnings.
1027 static void rcu_preempt_stall_reset(void)
1032 * Because there is no preemptible RCU, there can be no readers blocked,
1033 * so there is no need to check for blocked tasks. So check only for
1034 * bogus qsmask values.
1036 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
1038 WARN_ON_ONCE(rnp
->qsmask
);
1041 #ifdef CONFIG_HOTPLUG_CPU
1044 * Because preemptible RCU does not exist, it never needs to migrate
1045 * tasks that were blocked within RCU read-side critical sections, and
1046 * such non-existent tasks cannot possibly have been blocking the current
1049 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
1050 struct rcu_node
*rnp
,
1051 struct rcu_data
*rdp
)
1056 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1059 * Because preemptible RCU does not exist, it never needs CPU-offline
1062 static void rcu_preempt_cleanup_dead_cpu(int cpu
)
1067 * Because preemptible RCU does not exist, it never has any callbacks
1070 static void rcu_preempt_check_callbacks(int cpu
)
1075 * Because preemptible RCU does not exist, it never has any callbacks
1078 static void rcu_preempt_process_callbacks(void)
1083 * Queue an RCU callback for lazy invocation after a grace period.
1084 * This will likely be later named something like "call_rcu_lazy()",
1085 * but this change will require some way of tagging the lazy RCU
1086 * callbacks in the list of pending callbacks. Until then, this
1087 * function may only be called from __kfree_rcu().
1089 * Because there is no preemptible RCU, we use RCU-sched instead.
1091 void kfree_call_rcu(struct rcu_head
*head
,
1092 void (*func
)(struct rcu_head
*rcu
))
1094 __call_rcu(head
, func
, &rcu_sched_state
, 1);
1096 EXPORT_SYMBOL_GPL(kfree_call_rcu
);
1099 * Wait for an rcu-preempt grace period, but make it happen quickly.
1100 * But because preemptible RCU does not exist, map to rcu-sched.
1102 void synchronize_rcu_expedited(void)
1104 synchronize_sched_expedited();
1106 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
1108 #ifdef CONFIG_HOTPLUG_CPU
1111 * Because preemptible RCU does not exist, there is never any need to
1112 * report on tasks preempted in RCU read-side critical sections during
1113 * expedited RCU grace periods.
1115 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
1120 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1123 * Because preemptible RCU does not exist, it never has any work to do.
1125 static int rcu_preempt_pending(int cpu
)
1131 * Because preemptible RCU does not exist, it never needs any CPU.
1133 static int rcu_preempt_needs_cpu(int cpu
)
1139 * Because preemptible RCU does not exist, rcu_barrier() is just
1140 * another name for rcu_barrier_sched().
1142 void rcu_barrier(void)
1144 rcu_barrier_sched();
1146 EXPORT_SYMBOL_GPL(rcu_barrier
);
1149 * Because preemptible RCU does not exist, there is no per-CPU
1150 * data to initialize.
1152 static void __cpuinit
rcu_preempt_init_percpu_data(int cpu
)
1157 * Because there is no preemptible RCU, there is no cleanup to do.
1159 static void rcu_preempt_cleanup_dying_cpu(void)
1164 * Because preemptible RCU does not exist, it need not be initialized.
1166 static void __init
__rcu_init_preempt(void)
1170 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1172 #ifdef CONFIG_RCU_BOOST
1174 #include "rtmutex_common.h"
1176 #ifdef CONFIG_RCU_TRACE
1178 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1180 if (list_empty(&rnp
->blkd_tasks
))
1181 rnp
->n_balk_blkd_tasks
++;
1182 else if (rnp
->exp_tasks
== NULL
&& rnp
->gp_tasks
== NULL
)
1183 rnp
->n_balk_exp_gp_tasks
++;
1184 else if (rnp
->gp_tasks
!= NULL
&& rnp
->boost_tasks
!= NULL
)
1185 rnp
->n_balk_boost_tasks
++;
1186 else if (rnp
->gp_tasks
!= NULL
&& rnp
->qsmask
!= 0)
1187 rnp
->n_balk_notblocked
++;
1188 else if (rnp
->gp_tasks
!= NULL
&&
1189 ULONG_CMP_LT(jiffies
, rnp
->boost_time
))
1190 rnp
->n_balk_notyet
++;
1195 #else /* #ifdef CONFIG_RCU_TRACE */
1197 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1201 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1204 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1205 * or ->boost_tasks, advancing the pointer to the next task in the
1206 * ->blkd_tasks list.
1208 * Note that irqs must be enabled: boosting the task can block.
1209 * Returns 1 if there are more tasks needing to be boosted.
1211 static int rcu_boost(struct rcu_node
*rnp
)
1213 unsigned long flags
;
1214 struct rt_mutex mtx
;
1215 struct task_struct
*t
;
1216 struct list_head
*tb
;
1218 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
)
1219 return 0; /* Nothing left to boost. */
1221 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1224 * Recheck under the lock: all tasks in need of boosting
1225 * might exit their RCU read-side critical sections on their own.
1227 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
) {
1228 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1233 * Preferentially boost tasks blocking expedited grace periods.
1234 * This cannot starve the normal grace periods because a second
1235 * expedited grace period must boost all blocked tasks, including
1236 * those blocking the pre-existing normal grace period.
1238 if (rnp
->exp_tasks
!= NULL
) {
1239 tb
= rnp
->exp_tasks
;
1240 rnp
->n_exp_boosts
++;
1242 tb
= rnp
->boost_tasks
;
1243 rnp
->n_normal_boosts
++;
1245 rnp
->n_tasks_boosted
++;
1248 * We boost task t by manufacturing an rt_mutex that appears to
1249 * be held by task t. We leave a pointer to that rt_mutex where
1250 * task t can find it, and task t will release the mutex when it
1251 * exits its outermost RCU read-side critical section. Then
1252 * simply acquiring this artificial rt_mutex will boost task
1253 * t's priority. (Thanks to tglx for suggesting this approach!)
1255 * Note that task t must acquire rnp->lock to remove itself from
1256 * the ->blkd_tasks list, which it will do from exit() if from
1257 * nowhere else. We therefore are guaranteed that task t will
1258 * stay around at least until we drop rnp->lock. Note that
1259 * rnp->lock also resolves races between our priority boosting
1260 * and task t's exiting its outermost RCU read-side critical
1263 t
= container_of(tb
, struct task_struct
, rcu_node_entry
);
1264 rt_mutex_init_proxy_locked(&mtx
, t
);
1265 t
->rcu_boost_mutex
= &mtx
;
1266 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1267 rt_mutex_lock(&mtx
); /* Side effect: boosts task t's priority. */
1268 rt_mutex_unlock(&mtx
); /* Keep lockdep happy. */
1270 return ACCESS_ONCE(rnp
->exp_tasks
) != NULL
||
1271 ACCESS_ONCE(rnp
->boost_tasks
) != NULL
;
1275 * Timer handler to initiate waking up of boost kthreads that
1276 * have yielded the CPU due to excessive numbers of tasks to
1277 * boost. We wake up the per-rcu_node kthread, which in turn
1278 * will wake up the booster kthread.
1280 static void rcu_boost_kthread_timer(unsigned long arg
)
1282 invoke_rcu_node_kthread((struct rcu_node
*)arg
);
1286 * Priority-boosting kthread. One per leaf rcu_node and one for the
1289 static int rcu_boost_kthread(void *arg
)
1291 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1295 trace_rcu_utilization("Start boost kthread@init");
1297 rnp
->boost_kthread_status
= RCU_KTHREAD_WAITING
;
1298 trace_rcu_utilization("End boost kthread@rcu_wait");
1299 rcu_wait(rnp
->boost_tasks
|| rnp
->exp_tasks
);
1300 trace_rcu_utilization("Start boost kthread@rcu_wait");
1301 rnp
->boost_kthread_status
= RCU_KTHREAD_RUNNING
;
1302 more2boost
= rcu_boost(rnp
);
1308 trace_rcu_utilization("End boost kthread@rcu_yield");
1309 rcu_yield(rcu_boost_kthread_timer
, (unsigned long)rnp
);
1310 trace_rcu_utilization("Start boost kthread@rcu_yield");
1315 trace_rcu_utilization("End boost kthread@notreached");
1320 * Check to see if it is time to start boosting RCU readers that are
1321 * blocking the current grace period, and, if so, tell the per-rcu_node
1322 * kthread to start boosting them. If there is an expedited grace
1323 * period in progress, it is always time to boost.
1325 * The caller must hold rnp->lock, which this function releases,
1326 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1327 * so we don't need to worry about it going away.
1329 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1331 struct task_struct
*t
;
1333 if (!rcu_preempt_blocked_readers_cgp(rnp
) && rnp
->exp_tasks
== NULL
) {
1334 rnp
->n_balk_exp_gp_tasks
++;
1335 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1338 if (rnp
->exp_tasks
!= NULL
||
1339 (rnp
->gp_tasks
!= NULL
&&
1340 rnp
->boost_tasks
== NULL
&&
1342 ULONG_CMP_GE(jiffies
, rnp
->boost_time
))) {
1343 if (rnp
->exp_tasks
== NULL
)
1344 rnp
->boost_tasks
= rnp
->gp_tasks
;
1345 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1346 t
= rnp
->boost_kthread_task
;
1350 rcu_initiate_boost_trace(rnp
);
1351 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1356 * Wake up the per-CPU kthread to invoke RCU callbacks.
1358 static void invoke_rcu_callbacks_kthread(void)
1360 unsigned long flags
;
1362 local_irq_save(flags
);
1363 __this_cpu_write(rcu_cpu_has_work
, 1);
1364 if (__this_cpu_read(rcu_cpu_kthread_task
) != NULL
&&
1365 current
!= __this_cpu_read(rcu_cpu_kthread_task
))
1366 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task
));
1367 local_irq_restore(flags
);
1371 * Is the current CPU running the RCU-callbacks kthread?
1372 * Caller must have preemption disabled.
1374 static bool rcu_is_callbacks_kthread(void)
1376 return __get_cpu_var(rcu_cpu_kthread_task
) == current
;
1380 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1381 * held, so no one should be messing with the existence of the boost
1384 static void rcu_boost_kthread_setaffinity(struct rcu_node
*rnp
,
1387 struct task_struct
*t
;
1389 t
= rnp
->boost_kthread_task
;
1391 set_cpus_allowed_ptr(rnp
->boost_kthread_task
, cm
);
1394 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1397 * Do priority-boost accounting for the start of a new grace period.
1399 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1401 rnp
->boost_time
= jiffies
+ RCU_BOOST_DELAY_JIFFIES
;
1405 * Create an RCU-boost kthread for the specified node if one does not
1406 * already exist. We only create this kthread for preemptible RCU.
1407 * Returns zero if all is well, a negated errno otherwise.
1409 static int __cpuinit
rcu_spawn_one_boost_kthread(struct rcu_state
*rsp
,
1410 struct rcu_node
*rnp
,
1413 unsigned long flags
;
1414 struct sched_param sp
;
1415 struct task_struct
*t
;
1417 if (&rcu_preempt_state
!= rsp
)
1420 if (rnp
->boost_kthread_task
!= NULL
)
1422 t
= kthread_create(rcu_boost_kthread
, (void *)rnp
,
1423 "rcub/%d", rnp_index
);
1426 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1427 rnp
->boost_kthread_task
= t
;
1428 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1429 sp
.sched_priority
= RCU_BOOST_PRIO
;
1430 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1431 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1435 #ifdef CONFIG_HOTPLUG_CPU
1438 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1440 static void rcu_stop_cpu_kthread(int cpu
)
1442 struct task_struct
*t
;
1444 /* Stop the CPU's kthread. */
1445 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1447 per_cpu(rcu_cpu_kthread_task
, cpu
) = NULL
;
1452 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1454 static void rcu_kthread_do_work(void)
1456 rcu_do_batch(&rcu_sched_state
, &__get_cpu_var(rcu_sched_data
));
1457 rcu_do_batch(&rcu_bh_state
, &__get_cpu_var(rcu_bh_data
));
1458 rcu_preempt_do_callbacks();
1462 * Wake up the specified per-rcu_node-structure kthread.
1463 * Because the per-rcu_node kthreads are immortal, we don't need
1464 * to do anything to keep them alive.
1466 static void invoke_rcu_node_kthread(struct rcu_node
*rnp
)
1468 struct task_struct
*t
;
1470 t
= rnp
->node_kthread_task
;
1476 * Set the specified CPU's kthread to run RT or not, as specified by
1477 * the to_rt argument. The CPU-hotplug locks are held, so the task
1478 * is not going away.
1480 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1483 struct sched_param sp
;
1484 struct task_struct
*t
;
1486 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1490 policy
= SCHED_FIFO
;
1491 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1493 policy
= SCHED_NORMAL
;
1494 sp
.sched_priority
= 0;
1496 sched_setscheduler_nocheck(t
, policy
, &sp
);
1500 * Timer handler to initiate the waking up of per-CPU kthreads that
1501 * have yielded the CPU due to excess numbers of RCU callbacks.
1502 * We wake up the per-rcu_node kthread, which in turn will wake up
1503 * the booster kthread.
1505 static void rcu_cpu_kthread_timer(unsigned long arg
)
1507 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, arg
);
1508 struct rcu_node
*rnp
= rdp
->mynode
;
1510 atomic_or(rdp
->grpmask
, &rnp
->wakemask
);
1511 invoke_rcu_node_kthread(rnp
);
1515 * Drop to non-real-time priority and yield, but only after posting a
1516 * timer that will cause us to regain our real-time priority if we
1517 * remain preempted. Either way, we restore our real-time priority
1520 static void rcu_yield(void (*f
)(unsigned long), unsigned long arg
)
1522 struct sched_param sp
;
1523 struct timer_list yield_timer
;
1524 int prio
= current
->rt_priority
;
1526 setup_timer_on_stack(&yield_timer
, f
, arg
);
1527 mod_timer(&yield_timer
, jiffies
+ 2);
1528 sp
.sched_priority
= 0;
1529 sched_setscheduler_nocheck(current
, SCHED_NORMAL
, &sp
);
1530 set_user_nice(current
, 19);
1532 set_user_nice(current
, 0);
1533 sp
.sched_priority
= prio
;
1534 sched_setscheduler_nocheck(current
, SCHED_FIFO
, &sp
);
1535 del_timer(&yield_timer
);
1539 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1540 * This can happen while the corresponding CPU is either coming online
1541 * or going offline. We cannot wait until the CPU is fully online
1542 * before starting the kthread, because the various notifier functions
1543 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1544 * the corresponding CPU is online.
1546 * Return 1 if the kthread needs to stop, 0 otherwise.
1548 * Caller must disable bh. This function can momentarily enable it.
1550 static int rcu_cpu_kthread_should_stop(int cpu
)
1552 while (cpu_is_offline(cpu
) ||
1553 !cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)) ||
1554 smp_processor_id() != cpu
) {
1555 if (kthread_should_stop())
1557 per_cpu(rcu_cpu_kthread_status
, cpu
) = RCU_KTHREAD_OFFCPU
;
1558 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = raw_smp_processor_id();
1560 schedule_timeout_uninterruptible(1);
1561 if (!cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)))
1562 set_cpus_allowed_ptr(current
, cpumask_of(cpu
));
1565 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1570 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1571 * RCU softirq used in flavors and configurations of RCU that do not
1572 * support RCU priority boosting.
1574 static int rcu_cpu_kthread(void *arg
)
1576 int cpu
= (int)(long)arg
;
1577 unsigned long flags
;
1579 unsigned int *statusp
= &per_cpu(rcu_cpu_kthread_status
, cpu
);
1581 char *workp
= &per_cpu(rcu_cpu_has_work
, cpu
);
1583 trace_rcu_utilization("Start CPU kthread@init");
1585 *statusp
= RCU_KTHREAD_WAITING
;
1586 trace_rcu_utilization("End CPU kthread@rcu_wait");
1587 rcu_wait(*workp
!= 0 || kthread_should_stop());
1588 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1590 if (rcu_cpu_kthread_should_stop(cpu
)) {
1594 *statusp
= RCU_KTHREAD_RUNNING
;
1595 per_cpu(rcu_cpu_kthread_loops
, cpu
)++;
1596 local_irq_save(flags
);
1599 local_irq_restore(flags
);
1601 rcu_kthread_do_work();
1608 *statusp
= RCU_KTHREAD_YIELDING
;
1609 trace_rcu_utilization("End CPU kthread@rcu_yield");
1610 rcu_yield(rcu_cpu_kthread_timer
, (unsigned long)cpu
);
1611 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1615 *statusp
= RCU_KTHREAD_STOPPED
;
1616 trace_rcu_utilization("End CPU kthread@term");
1621 * Spawn a per-CPU kthread, setting up affinity and priority.
1622 * Because the CPU hotplug lock is held, no other CPU will be attempting
1623 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1624 * attempting to access it during boot, but the locking in kthread_bind()
1625 * will enforce sufficient ordering.
1627 * Please note that we cannot simply refuse to wake up the per-CPU
1628 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1629 * which can result in softlockup complaints if the task ends up being
1630 * idle for more than a couple of minutes.
1632 * However, please note also that we cannot bind the per-CPU kthread to its
1633 * CPU until that CPU is fully online. We also cannot wait until the
1634 * CPU is fully online before we create its per-CPU kthread, as this would
1635 * deadlock the system when CPU notifiers tried waiting for grace
1636 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1637 * is online. If its CPU is not yet fully online, then the code in
1638 * rcu_cpu_kthread() will wait until it is fully online, and then do
1641 static int __cpuinit
rcu_spawn_one_cpu_kthread(int cpu
)
1643 struct sched_param sp
;
1644 struct task_struct
*t
;
1646 if (!rcu_scheduler_fully_active
||
1647 per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
)
1649 t
= kthread_create_on_node(rcu_cpu_kthread
,
1655 if (cpu_online(cpu
))
1656 kthread_bind(t
, cpu
);
1657 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1658 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
);
1659 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1660 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1661 per_cpu(rcu_cpu_kthread_task
, cpu
) = t
;
1662 wake_up_process(t
); /* Get to TASK_INTERRUPTIBLE quickly. */
1667 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1668 * kthreads when needed. We ignore requests to wake up kthreads
1669 * for offline CPUs, which is OK because force_quiescent_state()
1670 * takes care of this case.
1672 static int rcu_node_kthread(void *arg
)
1675 unsigned long flags
;
1677 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1678 struct sched_param sp
;
1679 struct task_struct
*t
;
1682 rnp
->node_kthread_status
= RCU_KTHREAD_WAITING
;
1683 rcu_wait(atomic_read(&rnp
->wakemask
) != 0);
1684 rnp
->node_kthread_status
= RCU_KTHREAD_RUNNING
;
1685 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1686 mask
= atomic_xchg(&rnp
->wakemask
, 0);
1687 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock. */
1688 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1) {
1689 if ((mask
& 0x1) == 0)
1692 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1693 if (!cpu_online(cpu
) || t
== NULL
) {
1697 per_cpu(rcu_cpu_has_work
, cpu
) = 1;
1698 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1699 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1704 rnp
->node_kthread_status
= RCU_KTHREAD_STOPPED
;
1709 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1710 * served by the rcu_node in question. The CPU hotplug lock is still
1711 * held, so the value of rnp->qsmaskinit will be stable.
1713 * We don't include outgoingcpu in the affinity set, use -1 if there is
1714 * no outgoing CPU. If there are no CPUs left in the affinity set,
1715 * this function allows the kthread to execute on any CPU.
1717 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1721 unsigned long mask
= rnp
->qsmaskinit
;
1723 if (rnp
->node_kthread_task
== NULL
)
1725 if (!alloc_cpumask_var(&cm
, GFP_KERNEL
))
1728 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1)
1729 if ((mask
& 0x1) && cpu
!= outgoingcpu
)
1730 cpumask_set_cpu(cpu
, cm
);
1731 if (cpumask_weight(cm
) == 0) {
1733 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++)
1734 cpumask_clear_cpu(cpu
, cm
);
1735 WARN_ON_ONCE(cpumask_weight(cm
) == 0);
1737 set_cpus_allowed_ptr(rnp
->node_kthread_task
, cm
);
1738 rcu_boost_kthread_setaffinity(rnp
, cm
);
1739 free_cpumask_var(cm
);
1743 * Spawn a per-rcu_node kthread, setting priority and affinity.
1744 * Called during boot before online/offline can happen, or, if
1745 * during runtime, with the main CPU-hotplug locks held. So only
1746 * one of these can be executing at a time.
1748 static int __cpuinit
rcu_spawn_one_node_kthread(struct rcu_state
*rsp
,
1749 struct rcu_node
*rnp
)
1751 unsigned long flags
;
1752 int rnp_index
= rnp
- &rsp
->node
[0];
1753 struct sched_param sp
;
1754 struct task_struct
*t
;
1756 if (!rcu_scheduler_fully_active
||
1757 rnp
->qsmaskinit
== 0)
1759 if (rnp
->node_kthread_task
== NULL
) {
1760 t
= kthread_create(rcu_node_kthread
, (void *)rnp
,
1761 "rcun/%d", rnp_index
);
1764 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1765 rnp
->node_kthread_task
= t
;
1766 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1767 sp
.sched_priority
= 99;
1768 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1769 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1771 return rcu_spawn_one_boost_kthread(rsp
, rnp
, rnp_index
);
1775 * Spawn all kthreads -- called as soon as the scheduler is running.
1777 static int __init
rcu_spawn_kthreads(void)
1780 struct rcu_node
*rnp
;
1782 rcu_scheduler_fully_active
= 1;
1783 for_each_possible_cpu(cpu
) {
1784 per_cpu(rcu_cpu_has_work
, cpu
) = 0;
1785 if (cpu_online(cpu
))
1786 (void)rcu_spawn_one_cpu_kthread(cpu
);
1788 rnp
= rcu_get_root(rcu_state
);
1789 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1790 if (NUM_RCU_NODES
> 1) {
1791 rcu_for_each_leaf_node(rcu_state
, rnp
)
1792 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1796 early_initcall(rcu_spawn_kthreads
);
1798 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1800 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, cpu
);
1801 struct rcu_node
*rnp
= rdp
->mynode
;
1803 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1804 if (rcu_scheduler_fully_active
) {
1805 (void)rcu_spawn_one_cpu_kthread(cpu
);
1806 if (rnp
->node_kthread_task
== NULL
)
1807 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1811 #else /* #ifdef CONFIG_RCU_BOOST */
1813 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1815 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1818 static void invoke_rcu_callbacks_kthread(void)
1823 static bool rcu_is_callbacks_kthread(void)
1828 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1832 #ifdef CONFIG_HOTPLUG_CPU
1834 static void rcu_stop_cpu_kthread(int cpu
)
1838 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1840 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1844 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1848 static int __init
rcu_scheduler_really_started(void)
1850 rcu_scheduler_fully_active
= 1;
1853 early_initcall(rcu_scheduler_really_started
);
1855 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1859 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1863 void synchronize_sched_expedited(void)
1867 EXPORT_SYMBOL_GPL(synchronize_sched_expedited
);
1869 #else /* #ifndef CONFIG_SMP */
1871 static atomic_t sync_sched_expedited_started
= ATOMIC_INIT(0);
1872 static atomic_t sync_sched_expedited_done
= ATOMIC_INIT(0);
1874 static int synchronize_sched_expedited_cpu_stop(void *data
)
1877 * There must be a full memory barrier on each affected CPU
1878 * between the time that try_stop_cpus() is called and the
1879 * time that it returns.
1881 * In the current initial implementation of cpu_stop, the
1882 * above condition is already met when the control reaches
1883 * this point and the following smp_mb() is not strictly
1884 * necessary. Do smp_mb() anyway for documentation and
1885 * robustness against future implementation changes.
1887 smp_mb(); /* See above comment block. */
1892 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1893 * approach to force grace period to end quickly. This consumes
1894 * significant time on all CPUs, and is thus not recommended for
1895 * any sort of common-case code.
1897 * Note that it is illegal to call this function while holding any
1898 * lock that is acquired by a CPU-hotplug notifier. Failing to
1899 * observe this restriction will result in deadlock.
1901 * This implementation can be thought of as an application of ticket
1902 * locking to RCU, with sync_sched_expedited_started and
1903 * sync_sched_expedited_done taking on the roles of the halves
1904 * of the ticket-lock word. Each task atomically increments
1905 * sync_sched_expedited_started upon entry, snapshotting the old value,
1906 * then attempts to stop all the CPUs. If this succeeds, then each
1907 * CPU will have executed a context switch, resulting in an RCU-sched
1908 * grace period. We are then done, so we use atomic_cmpxchg() to
1909 * update sync_sched_expedited_done to match our snapshot -- but
1910 * only if someone else has not already advanced past our snapshot.
1912 * On the other hand, if try_stop_cpus() fails, we check the value
1913 * of sync_sched_expedited_done. If it has advanced past our
1914 * initial snapshot, then someone else must have forced a grace period
1915 * some time after we took our snapshot. In this case, our work is
1916 * done for us, and we can simply return. Otherwise, we try again,
1917 * but keep our initial snapshot for purposes of checking for someone
1918 * doing our work for us.
1920 * If we fail too many times in a row, we fall back to synchronize_sched().
1922 void synchronize_sched_expedited(void)
1924 int firstsnap
, s
, snap
, trycount
= 0;
1926 /* Note that atomic_inc_return() implies full memory barrier. */
1927 firstsnap
= snap
= atomic_inc_return(&sync_sched_expedited_started
);
1931 * Each pass through the following loop attempts to force a
1932 * context switch on each CPU.
1934 while (try_stop_cpus(cpu_online_mask
,
1935 synchronize_sched_expedited_cpu_stop
,
1939 /* No joy, try again later. Or just synchronize_sched(). */
1940 if (trycount
++ < 10)
1941 udelay(trycount
* num_online_cpus());
1943 synchronize_sched();
1947 /* Check to see if someone else did our work for us. */
1948 s
= atomic_read(&sync_sched_expedited_done
);
1949 if (UINT_CMP_GE((unsigned)s
, (unsigned)firstsnap
)) {
1950 smp_mb(); /* ensure test happens before caller kfree */
1955 * Refetching sync_sched_expedited_started allows later
1956 * callers to piggyback on our grace period. We subtract
1957 * 1 to get the same token that the last incrementer got.
1958 * We retry after they started, so our grace period works
1959 * for them, and they started after our first try, so their
1960 * grace period works for us.
1963 snap
= atomic_read(&sync_sched_expedited_started
);
1964 smp_mb(); /* ensure read is before try_stop_cpus(). */
1968 * Everyone up to our most recent fetch is covered by our grace
1969 * period. Update the counter, but only if our work is still
1970 * relevant -- which it won't be if someone who started later
1971 * than we did beat us to the punch.
1974 s
= atomic_read(&sync_sched_expedited_done
);
1975 if (UINT_CMP_GE((unsigned)s
, (unsigned)snap
)) {
1976 smp_mb(); /* ensure test happens before caller kfree */
1979 } while (atomic_cmpxchg(&sync_sched_expedited_done
, s
, snap
) != s
);
1983 EXPORT_SYMBOL_GPL(synchronize_sched_expedited
);
1985 #endif /* #else #ifndef CONFIG_SMP */
1987 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1990 * Check to see if any future RCU-related work will need to be done
1991 * by the current CPU, even if none need be done immediately, returning
1992 * 1 if so. This function is part of the RCU implementation; it is -not-
1993 * an exported member of the RCU API.
1995 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1996 * any flavor of RCU.
1998 int rcu_needs_cpu(int cpu
)
2000 return rcu_cpu_has_callbacks(cpu
);
2004 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
2006 static void rcu_prepare_for_idle_init(int cpu
)
2011 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
2014 static void rcu_cleanup_after_idle(int cpu
)
2019 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y,
2022 static void rcu_prepare_for_idle(int cpu
)
2026 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2029 * This code is invoked when a CPU goes idle, at which point we want
2030 * to have the CPU do everything required for RCU so that it can enter
2031 * the energy-efficient dyntick-idle mode. This is handled by a
2032 * state machine implemented by rcu_prepare_for_idle() below.
2034 * The following three proprocessor symbols control this state machine:
2036 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
2037 * to satisfy RCU. Beyond this point, it is better to incur a periodic
2038 * scheduling-clock interrupt than to loop through the state machine
2040 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
2041 * optional if RCU does not need anything immediately from this
2042 * CPU, even if this CPU still has RCU callbacks queued. The first
2043 * times through the state machine are mandatory: we need to give
2044 * the state machine a chance to communicate a quiescent state
2046 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
2047 * to sleep in dyntick-idle mode with RCU callbacks pending. This
2048 * is sized to be roughly one RCU grace period. Those energy-efficiency
2049 * benchmarkers who might otherwise be tempted to set this to a large
2050 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
2051 * system. And if you are -that- concerned about energy efficiency,
2052 * just power the system down and be done with it!
2054 * The values below work well in practice. If future workloads require
2055 * adjustment, they can be converted into kernel config parameters, though
2056 * making the state machine smarter might be a better option.
2058 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
2059 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
2060 #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
2062 static DEFINE_PER_CPU(int, rcu_dyntick_drain
);
2063 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff
);
2064 static DEFINE_PER_CPU(struct hrtimer
, rcu_idle_gp_timer
);
2065 static ktime_t rcu_idle_gp_wait
;
2068 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
2069 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
2070 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
2071 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
2072 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
2073 * it is better to incur scheduling-clock interrupts than to spin
2074 * continuously for the same time duration!
2076 int rcu_needs_cpu(int cpu
)
2078 /* If no callbacks, RCU doesn't need the CPU. */
2079 if (!rcu_cpu_has_callbacks(cpu
))
2081 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
2082 return per_cpu(rcu_dyntick_holdoff
, cpu
) == jiffies
;
2086 * Does the specified flavor of RCU have non-lazy callbacks pending on
2087 * the specified CPU? Both RCU flavor and CPU are specified by the
2088 * rcu_data structure.
2090 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data
*rdp
)
2092 return rdp
->qlen
!= rdp
->qlen_lazy
;
2095 #ifdef CONFIG_TREE_PREEMPT_RCU
2098 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
2099 * is no RCU-preempt in the kernel.)
2101 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu
)
2103 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
2105 return __rcu_cpu_has_nonlazy_callbacks(rdp
);
2108 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2110 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu
)
2115 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
2118 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
2120 static bool rcu_cpu_has_nonlazy_callbacks(int cpu
)
2122 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data
, cpu
)) ||
2123 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data
, cpu
)) ||
2124 rcu_preempt_cpu_has_nonlazy_callbacks(cpu
);
2128 * Timer handler used to force CPU to start pushing its remaining RCU
2129 * callbacks in the case where it entered dyntick-idle mode with callbacks
2130 * pending. The hander doesn't really need to do anything because the
2131 * real work is done upon re-entry to idle, or by the next scheduling-clock
2132 * interrupt should idle not be re-entered.
2134 static enum hrtimer_restart
rcu_idle_gp_timer_func(struct hrtimer
*hrtp
)
2136 trace_rcu_prep_idle("Timer");
2137 return HRTIMER_NORESTART
;
2141 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2143 static void rcu_prepare_for_idle_init(int cpu
)
2145 static int firsttime
= 1;
2146 struct hrtimer
*hrtp
= &per_cpu(rcu_idle_gp_timer
, cpu
);
2148 hrtimer_init(hrtp
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2149 hrtp
->function
= rcu_idle_gp_timer_func
;
2151 unsigned int upj
= jiffies_to_usecs(RCU_IDLE_GP_DELAY
);
2153 rcu_idle_gp_wait
= ns_to_ktime(upj
* (u64
)1000);
2159 * Clean up for exit from idle. Because we are exiting from idle, there
2160 * is no longer any point to rcu_idle_gp_timer, so cancel it. This will
2161 * do nothing if this timer is not active, so just cancel it unconditionally.
2163 static void rcu_cleanup_after_idle(int cpu
)
2165 hrtimer_cancel(&per_cpu(rcu_idle_gp_timer
, cpu
));
2169 * Check to see if any RCU-related work can be done by the current CPU,
2170 * and if so, schedule a softirq to get it done. This function is part
2171 * of the RCU implementation; it is -not- an exported member of the RCU API.
2173 * The idea is for the current CPU to clear out all work required by the
2174 * RCU core for the current grace period, so that this CPU can be permitted
2175 * to enter dyntick-idle mode. In some cases, it will need to be awakened
2176 * at the end of the grace period by whatever CPU ends the grace period.
2177 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2178 * number of wakeups by a modest integer factor.
2180 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2181 * disabled, we do one pass of force_quiescent_state(), then do a
2182 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2183 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
2185 * The caller must have disabled interrupts.
2187 static void rcu_prepare_for_idle(int cpu
)
2189 unsigned long flags
;
2191 local_irq_save(flags
);
2194 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2195 * Also reset state to avoid prejudicing later attempts.
2197 if (!rcu_cpu_has_callbacks(cpu
)) {
2198 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
- 1;
2199 per_cpu(rcu_dyntick_drain
, cpu
) = 0;
2200 local_irq_restore(flags
);
2201 trace_rcu_prep_idle("No callbacks");
2206 * If in holdoff mode, just return. We will presumably have
2207 * refrained from disabling the scheduling-clock tick.
2209 if (per_cpu(rcu_dyntick_holdoff
, cpu
) == jiffies
) {
2210 local_irq_restore(flags
);
2211 trace_rcu_prep_idle("In holdoff");
2215 /* Check and update the rcu_dyntick_drain sequencing. */
2216 if (per_cpu(rcu_dyntick_drain
, cpu
) <= 0) {
2217 /* First time through, initialize the counter. */
2218 per_cpu(rcu_dyntick_drain
, cpu
) = RCU_IDLE_FLUSHES
;
2219 } else if (per_cpu(rcu_dyntick_drain
, cpu
) <= RCU_IDLE_OPT_FLUSHES
&&
2220 !rcu_pending(cpu
)) {
2221 /* Can we go dyntick-idle despite still having callbacks? */
2222 trace_rcu_prep_idle("Dyntick with callbacks");
2223 per_cpu(rcu_dyntick_drain
, cpu
) = 0;
2224 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
- 1;
2225 if (rcu_cpu_has_nonlazy_callbacks(cpu
))
2226 hrtimer_start(&per_cpu(rcu_idle_gp_timer
, cpu
),
2227 rcu_idle_gp_wait
, HRTIMER_MODE_REL
);
2228 return; /* Nothing more to do immediately. */
2229 } else if (--per_cpu(rcu_dyntick_drain
, cpu
) <= 0) {
2230 /* We have hit the limit, so time to give up. */
2231 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
;
2232 local_irq_restore(flags
);
2233 trace_rcu_prep_idle("Begin holdoff");
2234 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2239 * Do one step of pushing the remaining RCU callbacks through
2240 * the RCU core state machine.
2242 #ifdef CONFIG_TREE_PREEMPT_RCU
2243 if (per_cpu(rcu_preempt_data
, cpu
).nxtlist
) {
2244 local_irq_restore(flags
);
2245 rcu_preempt_qs(cpu
);
2246 force_quiescent_state(&rcu_preempt_state
, 0);
2247 local_irq_save(flags
);
2249 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2250 if (per_cpu(rcu_sched_data
, cpu
).nxtlist
) {
2251 local_irq_restore(flags
);
2253 force_quiescent_state(&rcu_sched_state
, 0);
2254 local_irq_save(flags
);
2256 if (per_cpu(rcu_bh_data
, cpu
).nxtlist
) {
2257 local_irq_restore(flags
);
2259 force_quiescent_state(&rcu_bh_state
, 0);
2260 local_irq_save(flags
);
2264 * If RCU callbacks are still pending, RCU still needs this CPU.
2265 * So try forcing the callbacks through the grace period.
2267 if (rcu_cpu_has_callbacks(cpu
)) {
2268 local_irq_restore(flags
);
2269 trace_rcu_prep_idle("More callbacks");
2272 local_irq_restore(flags
);
2273 trace_rcu_prep_idle("Callbacks drained");
2277 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */