KVM: PPC: Book3S: Add API for in-kernel XICS emulation
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / rcutree_plugin.h
1 /*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
27 #include <linux/delay.h>
28 #include <linux/gfp.h>
29 #include <linux/oom.h>
30 #include <linux/smpboot.h>
31
32 #define RCU_KTHREAD_PRIO 1
33
34 #ifdef CONFIG_RCU_BOOST
35 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
36 #else
37 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
38 #endif
39
40 #ifdef CONFIG_RCU_NOCB_CPU
41 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
42 static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
43 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
44 static char __initdata nocb_buf[NR_CPUS * 5];
45 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
46
47 /*
48 * Check the RCU kernel configuration parameters and print informative
49 * messages about anything out of the ordinary. If you like #ifdef, you
50 * will love this function.
51 */
52 static void __init rcu_bootup_announce_oddness(void)
53 {
54 #ifdef CONFIG_RCU_TRACE
55 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
56 #endif
57 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
58 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
59 CONFIG_RCU_FANOUT);
60 #endif
61 #ifdef CONFIG_RCU_FANOUT_EXACT
62 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
63 #endif
64 #ifdef CONFIG_RCU_FAST_NO_HZ
65 printk(KERN_INFO
66 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
67 #endif
68 #ifdef CONFIG_PROVE_RCU
69 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
70 #endif
71 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
72 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
73 #endif
74 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
75 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
76 #endif
77 #if defined(CONFIG_RCU_CPU_STALL_INFO)
78 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
79 #endif
80 #if NUM_RCU_LVL_4 != 0
81 printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
82 #endif
83 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
84 printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
85 if (nr_cpu_ids != NR_CPUS)
86 printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
87 #ifdef CONFIG_RCU_NOCB_CPU
88 if (have_rcu_nocb_mask) {
89 if (cpumask_test_cpu(0, rcu_nocb_mask)) {
90 cpumask_clear_cpu(0, rcu_nocb_mask);
91 pr_info("\tCPU 0: illegal no-CBs CPU (cleared).\n");
92 }
93 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
94 pr_info("\tExperimental no-CBs CPUs: %s.\n", nocb_buf);
95 if (rcu_nocb_poll)
96 pr_info("\tExperimental polled no-CBs CPUs.\n");
97 }
98 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
99 }
100
101 #ifdef CONFIG_TREE_PREEMPT_RCU
102
103 struct rcu_state rcu_preempt_state =
104 RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
105 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
106 static struct rcu_state *rcu_state = &rcu_preempt_state;
107
108 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
109
110 /*
111 * Tell them what RCU they are running.
112 */
113 static void __init rcu_bootup_announce(void)
114 {
115 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
116 rcu_bootup_announce_oddness();
117 }
118
119 /*
120 * Return the number of RCU-preempt batches processed thus far
121 * for debug and statistics.
122 */
123 long rcu_batches_completed_preempt(void)
124 {
125 return rcu_preempt_state.completed;
126 }
127 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
128
129 /*
130 * Return the number of RCU batches processed thus far for debug & stats.
131 */
132 long rcu_batches_completed(void)
133 {
134 return rcu_batches_completed_preempt();
135 }
136 EXPORT_SYMBOL_GPL(rcu_batches_completed);
137
138 /*
139 * Force a quiescent state for preemptible RCU.
140 */
141 void rcu_force_quiescent_state(void)
142 {
143 force_quiescent_state(&rcu_preempt_state);
144 }
145 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
146
147 /*
148 * Record a preemptible-RCU quiescent state for the specified CPU. Note
149 * that this just means that the task currently running on the CPU is
150 * not in a quiescent state. There might be any number of tasks blocked
151 * while in an RCU read-side critical section.
152 *
153 * Unlike the other rcu_*_qs() functions, callers to this function
154 * must disable irqs in order to protect the assignment to
155 * ->rcu_read_unlock_special.
156 */
157 static void rcu_preempt_qs(int cpu)
158 {
159 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
160
161 if (rdp->passed_quiesce == 0)
162 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
163 rdp->passed_quiesce = 1;
164 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
165 }
166
167 /*
168 * We have entered the scheduler, and the current task might soon be
169 * context-switched away from. If this task is in an RCU read-side
170 * critical section, we will no longer be able to rely on the CPU to
171 * record that fact, so we enqueue the task on the blkd_tasks list.
172 * The task will dequeue itself when it exits the outermost enclosing
173 * RCU read-side critical section. Therefore, the current grace period
174 * cannot be permitted to complete until the blkd_tasks list entries
175 * predating the current grace period drain, in other words, until
176 * rnp->gp_tasks becomes NULL.
177 *
178 * Caller must disable preemption.
179 */
180 static void rcu_preempt_note_context_switch(int cpu)
181 {
182 struct task_struct *t = current;
183 unsigned long flags;
184 struct rcu_data *rdp;
185 struct rcu_node *rnp;
186
187 if (t->rcu_read_lock_nesting > 0 &&
188 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
189
190 /* Possibly blocking in an RCU read-side critical section. */
191 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
192 rnp = rdp->mynode;
193 raw_spin_lock_irqsave(&rnp->lock, flags);
194 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
195 t->rcu_blocked_node = rnp;
196
197 /*
198 * If this CPU has already checked in, then this task
199 * will hold up the next grace period rather than the
200 * current grace period. Queue the task accordingly.
201 * If the task is queued for the current grace period
202 * (i.e., this CPU has not yet passed through a quiescent
203 * state for the current grace period), then as long
204 * as that task remains queued, the current grace period
205 * cannot end. Note that there is some uncertainty as
206 * to exactly when the current grace period started.
207 * We take a conservative approach, which can result
208 * in unnecessarily waiting on tasks that started very
209 * slightly after the current grace period began. C'est
210 * la vie!!!
211 *
212 * But first, note that the current CPU must still be
213 * on line!
214 */
215 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
216 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
217 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
218 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
219 rnp->gp_tasks = &t->rcu_node_entry;
220 #ifdef CONFIG_RCU_BOOST
221 if (rnp->boost_tasks != NULL)
222 rnp->boost_tasks = rnp->gp_tasks;
223 #endif /* #ifdef CONFIG_RCU_BOOST */
224 } else {
225 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
226 if (rnp->qsmask & rdp->grpmask)
227 rnp->gp_tasks = &t->rcu_node_entry;
228 }
229 trace_rcu_preempt_task(rdp->rsp->name,
230 t->pid,
231 (rnp->qsmask & rdp->grpmask)
232 ? rnp->gpnum
233 : rnp->gpnum + 1);
234 raw_spin_unlock_irqrestore(&rnp->lock, flags);
235 } else if (t->rcu_read_lock_nesting < 0 &&
236 t->rcu_read_unlock_special) {
237
238 /*
239 * Complete exit from RCU read-side critical section on
240 * behalf of preempted instance of __rcu_read_unlock().
241 */
242 rcu_read_unlock_special(t);
243 }
244
245 /*
246 * Either we were not in an RCU read-side critical section to
247 * begin with, or we have now recorded that critical section
248 * globally. Either way, we can now note a quiescent state
249 * for this CPU. Again, if we were in an RCU read-side critical
250 * section, and if that critical section was blocking the current
251 * grace period, then the fact that the task has been enqueued
252 * means that we continue to block the current grace period.
253 */
254 local_irq_save(flags);
255 rcu_preempt_qs(cpu);
256 local_irq_restore(flags);
257 }
258
259 /*
260 * Check for preempted RCU readers blocking the current grace period
261 * for the specified rcu_node structure. If the caller needs a reliable
262 * answer, it must hold the rcu_node's ->lock.
263 */
264 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
265 {
266 return rnp->gp_tasks != NULL;
267 }
268
269 /*
270 * Record a quiescent state for all tasks that were previously queued
271 * on the specified rcu_node structure and that were blocking the current
272 * RCU grace period. The caller must hold the specified rnp->lock with
273 * irqs disabled, and this lock is released upon return, but irqs remain
274 * disabled.
275 */
276 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
277 __releases(rnp->lock)
278 {
279 unsigned long mask;
280 struct rcu_node *rnp_p;
281
282 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
283 raw_spin_unlock_irqrestore(&rnp->lock, flags);
284 return; /* Still need more quiescent states! */
285 }
286
287 rnp_p = rnp->parent;
288 if (rnp_p == NULL) {
289 /*
290 * Either there is only one rcu_node in the tree,
291 * or tasks were kicked up to root rcu_node due to
292 * CPUs going offline.
293 */
294 rcu_report_qs_rsp(&rcu_preempt_state, flags);
295 return;
296 }
297
298 /* Report up the rest of the hierarchy. */
299 mask = rnp->grpmask;
300 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
301 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
302 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
303 }
304
305 /*
306 * Advance a ->blkd_tasks-list pointer to the next entry, instead
307 * returning NULL if at the end of the list.
308 */
309 static struct list_head *rcu_next_node_entry(struct task_struct *t,
310 struct rcu_node *rnp)
311 {
312 struct list_head *np;
313
314 np = t->rcu_node_entry.next;
315 if (np == &rnp->blkd_tasks)
316 np = NULL;
317 return np;
318 }
319
320 /*
321 * Handle special cases during rcu_read_unlock(), such as needing to
322 * notify RCU core processing or task having blocked during the RCU
323 * read-side critical section.
324 */
325 void rcu_read_unlock_special(struct task_struct *t)
326 {
327 int empty;
328 int empty_exp;
329 int empty_exp_now;
330 unsigned long flags;
331 struct list_head *np;
332 #ifdef CONFIG_RCU_BOOST
333 struct rt_mutex *rbmp = NULL;
334 #endif /* #ifdef CONFIG_RCU_BOOST */
335 struct rcu_node *rnp;
336 int special;
337
338 /* NMI handlers cannot block and cannot safely manipulate state. */
339 if (in_nmi())
340 return;
341
342 local_irq_save(flags);
343
344 /*
345 * If RCU core is waiting for this CPU to exit critical section,
346 * let it know that we have done so.
347 */
348 special = t->rcu_read_unlock_special;
349 if (special & RCU_READ_UNLOCK_NEED_QS) {
350 rcu_preempt_qs(smp_processor_id());
351 }
352
353 /* Hardware IRQ handlers cannot block. */
354 if (in_irq() || in_serving_softirq()) {
355 local_irq_restore(flags);
356 return;
357 }
358
359 /* Clean up if blocked during RCU read-side critical section. */
360 if (special & RCU_READ_UNLOCK_BLOCKED) {
361 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
362
363 /*
364 * Remove this task from the list it blocked on. The
365 * task can migrate while we acquire the lock, but at
366 * most one time. So at most two passes through loop.
367 */
368 for (;;) {
369 rnp = t->rcu_blocked_node;
370 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
371 if (rnp == t->rcu_blocked_node)
372 break;
373 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
374 }
375 empty = !rcu_preempt_blocked_readers_cgp(rnp);
376 empty_exp = !rcu_preempted_readers_exp(rnp);
377 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
378 np = rcu_next_node_entry(t, rnp);
379 list_del_init(&t->rcu_node_entry);
380 t->rcu_blocked_node = NULL;
381 trace_rcu_unlock_preempted_task("rcu_preempt",
382 rnp->gpnum, t->pid);
383 if (&t->rcu_node_entry == rnp->gp_tasks)
384 rnp->gp_tasks = np;
385 if (&t->rcu_node_entry == rnp->exp_tasks)
386 rnp->exp_tasks = np;
387 #ifdef CONFIG_RCU_BOOST
388 if (&t->rcu_node_entry == rnp->boost_tasks)
389 rnp->boost_tasks = np;
390 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
391 if (t->rcu_boost_mutex) {
392 rbmp = t->rcu_boost_mutex;
393 t->rcu_boost_mutex = NULL;
394 }
395 #endif /* #ifdef CONFIG_RCU_BOOST */
396
397 /*
398 * If this was the last task on the current list, and if
399 * we aren't waiting on any CPUs, report the quiescent state.
400 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
401 * so we must take a snapshot of the expedited state.
402 */
403 empty_exp_now = !rcu_preempted_readers_exp(rnp);
404 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
405 trace_rcu_quiescent_state_report("preempt_rcu",
406 rnp->gpnum,
407 0, rnp->qsmask,
408 rnp->level,
409 rnp->grplo,
410 rnp->grphi,
411 !!rnp->gp_tasks);
412 rcu_report_unblock_qs_rnp(rnp, flags);
413 } else {
414 raw_spin_unlock_irqrestore(&rnp->lock, flags);
415 }
416
417 #ifdef CONFIG_RCU_BOOST
418 /* Unboost if we were boosted. */
419 if (rbmp)
420 rt_mutex_unlock(rbmp);
421 #endif /* #ifdef CONFIG_RCU_BOOST */
422
423 /*
424 * If this was the last task on the expedited lists,
425 * then we need to report up the rcu_node hierarchy.
426 */
427 if (!empty_exp && empty_exp_now)
428 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
429 } else {
430 local_irq_restore(flags);
431 }
432 }
433
434 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
435
436 /*
437 * Dump detailed information for all tasks blocking the current RCU
438 * grace period on the specified rcu_node structure.
439 */
440 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
441 {
442 unsigned long flags;
443 struct task_struct *t;
444
445 raw_spin_lock_irqsave(&rnp->lock, flags);
446 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
447 raw_spin_unlock_irqrestore(&rnp->lock, flags);
448 return;
449 }
450 t = list_entry(rnp->gp_tasks,
451 struct task_struct, rcu_node_entry);
452 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
453 sched_show_task(t);
454 raw_spin_unlock_irqrestore(&rnp->lock, flags);
455 }
456
457 /*
458 * Dump detailed information for all tasks blocking the current RCU
459 * grace period.
460 */
461 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
462 {
463 struct rcu_node *rnp = rcu_get_root(rsp);
464
465 rcu_print_detail_task_stall_rnp(rnp);
466 rcu_for_each_leaf_node(rsp, rnp)
467 rcu_print_detail_task_stall_rnp(rnp);
468 }
469
470 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
471
472 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
473 {
474 }
475
476 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
477
478 #ifdef CONFIG_RCU_CPU_STALL_INFO
479
480 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
481 {
482 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
483 rnp->level, rnp->grplo, rnp->grphi);
484 }
485
486 static void rcu_print_task_stall_end(void)
487 {
488 printk(KERN_CONT "\n");
489 }
490
491 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
492
493 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
494 {
495 }
496
497 static void rcu_print_task_stall_end(void)
498 {
499 }
500
501 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
502
503 /*
504 * Scan the current list of tasks blocked within RCU read-side critical
505 * sections, printing out the tid of each.
506 */
507 static int rcu_print_task_stall(struct rcu_node *rnp)
508 {
509 struct task_struct *t;
510 int ndetected = 0;
511
512 if (!rcu_preempt_blocked_readers_cgp(rnp))
513 return 0;
514 rcu_print_task_stall_begin(rnp);
515 t = list_entry(rnp->gp_tasks,
516 struct task_struct, rcu_node_entry);
517 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
518 printk(KERN_CONT " P%d", t->pid);
519 ndetected++;
520 }
521 rcu_print_task_stall_end();
522 return ndetected;
523 }
524
525 /*
526 * Check that the list of blocked tasks for the newly completed grace
527 * period is in fact empty. It is a serious bug to complete a grace
528 * period that still has RCU readers blocked! This function must be
529 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
530 * must be held by the caller.
531 *
532 * Also, if there are blocked tasks on the list, they automatically
533 * block the newly created grace period, so set up ->gp_tasks accordingly.
534 */
535 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
536 {
537 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
538 if (!list_empty(&rnp->blkd_tasks))
539 rnp->gp_tasks = rnp->blkd_tasks.next;
540 WARN_ON_ONCE(rnp->qsmask);
541 }
542
543 #ifdef CONFIG_HOTPLUG_CPU
544
545 /*
546 * Handle tasklist migration for case in which all CPUs covered by the
547 * specified rcu_node have gone offline. Move them up to the root
548 * rcu_node. The reason for not just moving them to the immediate
549 * parent is to remove the need for rcu_read_unlock_special() to
550 * make more than two attempts to acquire the target rcu_node's lock.
551 * Returns true if there were tasks blocking the current RCU grace
552 * period.
553 *
554 * Returns 1 if there was previously a task blocking the current grace
555 * period on the specified rcu_node structure.
556 *
557 * The caller must hold rnp->lock with irqs disabled.
558 */
559 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
560 struct rcu_node *rnp,
561 struct rcu_data *rdp)
562 {
563 struct list_head *lp;
564 struct list_head *lp_root;
565 int retval = 0;
566 struct rcu_node *rnp_root = rcu_get_root(rsp);
567 struct task_struct *t;
568
569 if (rnp == rnp_root) {
570 WARN_ONCE(1, "Last CPU thought to be offlined?");
571 return 0; /* Shouldn't happen: at least one CPU online. */
572 }
573
574 /* If we are on an internal node, complain bitterly. */
575 WARN_ON_ONCE(rnp != rdp->mynode);
576
577 /*
578 * Move tasks up to root rcu_node. Don't try to get fancy for
579 * this corner-case operation -- just put this node's tasks
580 * at the head of the root node's list, and update the root node's
581 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
582 * if non-NULL. This might result in waiting for more tasks than
583 * absolutely necessary, but this is a good performance/complexity
584 * tradeoff.
585 */
586 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
587 retval |= RCU_OFL_TASKS_NORM_GP;
588 if (rcu_preempted_readers_exp(rnp))
589 retval |= RCU_OFL_TASKS_EXP_GP;
590 lp = &rnp->blkd_tasks;
591 lp_root = &rnp_root->blkd_tasks;
592 while (!list_empty(lp)) {
593 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
594 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
595 list_del(&t->rcu_node_entry);
596 t->rcu_blocked_node = rnp_root;
597 list_add(&t->rcu_node_entry, lp_root);
598 if (&t->rcu_node_entry == rnp->gp_tasks)
599 rnp_root->gp_tasks = rnp->gp_tasks;
600 if (&t->rcu_node_entry == rnp->exp_tasks)
601 rnp_root->exp_tasks = rnp->exp_tasks;
602 #ifdef CONFIG_RCU_BOOST
603 if (&t->rcu_node_entry == rnp->boost_tasks)
604 rnp_root->boost_tasks = rnp->boost_tasks;
605 #endif /* #ifdef CONFIG_RCU_BOOST */
606 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
607 }
608
609 rnp->gp_tasks = NULL;
610 rnp->exp_tasks = NULL;
611 #ifdef CONFIG_RCU_BOOST
612 rnp->boost_tasks = NULL;
613 /*
614 * In case root is being boosted and leaf was not. Make sure
615 * that we boost the tasks blocking the current grace period
616 * in this case.
617 */
618 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
619 if (rnp_root->boost_tasks != NULL &&
620 rnp_root->boost_tasks != rnp_root->gp_tasks &&
621 rnp_root->boost_tasks != rnp_root->exp_tasks)
622 rnp_root->boost_tasks = rnp_root->gp_tasks;
623 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
624 #endif /* #ifdef CONFIG_RCU_BOOST */
625
626 return retval;
627 }
628
629 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
630
631 /*
632 * Check for a quiescent state from the current CPU. When a task blocks,
633 * the task is recorded in the corresponding CPU's rcu_node structure,
634 * which is checked elsewhere.
635 *
636 * Caller must disable hard irqs.
637 */
638 static void rcu_preempt_check_callbacks(int cpu)
639 {
640 struct task_struct *t = current;
641
642 if (t->rcu_read_lock_nesting == 0) {
643 rcu_preempt_qs(cpu);
644 return;
645 }
646 if (t->rcu_read_lock_nesting > 0 &&
647 per_cpu(rcu_preempt_data, cpu).qs_pending)
648 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
649 }
650
651 #ifdef CONFIG_RCU_BOOST
652
653 static void rcu_preempt_do_callbacks(void)
654 {
655 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
656 }
657
658 #endif /* #ifdef CONFIG_RCU_BOOST */
659
660 /*
661 * Queue a preemptible-RCU callback for invocation after a grace period.
662 */
663 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
664 {
665 __call_rcu(head, func, &rcu_preempt_state, -1, 0);
666 }
667 EXPORT_SYMBOL_GPL(call_rcu);
668
669 /*
670 * Queue an RCU callback for lazy invocation after a grace period.
671 * This will likely be later named something like "call_rcu_lazy()",
672 * but this change will require some way of tagging the lazy RCU
673 * callbacks in the list of pending callbacks. Until then, this
674 * function may only be called from __kfree_rcu().
675 */
676 void kfree_call_rcu(struct rcu_head *head,
677 void (*func)(struct rcu_head *rcu))
678 {
679 __call_rcu(head, func, &rcu_preempt_state, -1, 1);
680 }
681 EXPORT_SYMBOL_GPL(kfree_call_rcu);
682
683 /**
684 * synchronize_rcu - wait until a grace period has elapsed.
685 *
686 * Control will return to the caller some time after a full grace
687 * period has elapsed, in other words after all currently executing RCU
688 * read-side critical sections have completed. Note, however, that
689 * upon return from synchronize_rcu(), the caller might well be executing
690 * concurrently with new RCU read-side critical sections that began while
691 * synchronize_rcu() was waiting. RCU read-side critical sections are
692 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
693 *
694 * See the description of synchronize_sched() for more detailed information
695 * on memory ordering guarantees.
696 */
697 void synchronize_rcu(void)
698 {
699 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
700 !lock_is_held(&rcu_lock_map) &&
701 !lock_is_held(&rcu_sched_lock_map),
702 "Illegal synchronize_rcu() in RCU read-side critical section");
703 if (!rcu_scheduler_active)
704 return;
705 if (rcu_expedited)
706 synchronize_rcu_expedited();
707 else
708 wait_rcu_gp(call_rcu);
709 }
710 EXPORT_SYMBOL_GPL(synchronize_rcu);
711
712 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
713 static unsigned long sync_rcu_preempt_exp_count;
714 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
715
716 /*
717 * Return non-zero if there are any tasks in RCU read-side critical
718 * sections blocking the current preemptible-RCU expedited grace period.
719 * If there is no preemptible-RCU expedited grace period currently in
720 * progress, returns zero unconditionally.
721 */
722 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
723 {
724 return rnp->exp_tasks != NULL;
725 }
726
727 /*
728 * return non-zero if there is no RCU expedited grace period in progress
729 * for the specified rcu_node structure, in other words, if all CPUs and
730 * tasks covered by the specified rcu_node structure have done their bit
731 * for the current expedited grace period. Works only for preemptible
732 * RCU -- other RCU implementation use other means.
733 *
734 * Caller must hold sync_rcu_preempt_exp_mutex.
735 */
736 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
737 {
738 return !rcu_preempted_readers_exp(rnp) &&
739 ACCESS_ONCE(rnp->expmask) == 0;
740 }
741
742 /*
743 * Report the exit from RCU read-side critical section for the last task
744 * that queued itself during or before the current expedited preemptible-RCU
745 * grace period. This event is reported either to the rcu_node structure on
746 * which the task was queued or to one of that rcu_node structure's ancestors,
747 * recursively up the tree. (Calm down, calm down, we do the recursion
748 * iteratively!)
749 *
750 * Most callers will set the "wake" flag, but the task initiating the
751 * expedited grace period need not wake itself.
752 *
753 * Caller must hold sync_rcu_preempt_exp_mutex.
754 */
755 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
756 bool wake)
757 {
758 unsigned long flags;
759 unsigned long mask;
760
761 raw_spin_lock_irqsave(&rnp->lock, flags);
762 for (;;) {
763 if (!sync_rcu_preempt_exp_done(rnp)) {
764 raw_spin_unlock_irqrestore(&rnp->lock, flags);
765 break;
766 }
767 if (rnp->parent == NULL) {
768 raw_spin_unlock_irqrestore(&rnp->lock, flags);
769 if (wake)
770 wake_up(&sync_rcu_preempt_exp_wq);
771 break;
772 }
773 mask = rnp->grpmask;
774 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
775 rnp = rnp->parent;
776 raw_spin_lock(&rnp->lock); /* irqs already disabled */
777 rnp->expmask &= ~mask;
778 }
779 }
780
781 /*
782 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
783 * grace period for the specified rcu_node structure. If there are no such
784 * tasks, report it up the rcu_node hierarchy.
785 *
786 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
787 * CPU hotplug operations.
788 */
789 static void
790 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
791 {
792 unsigned long flags;
793 int must_wait = 0;
794
795 raw_spin_lock_irqsave(&rnp->lock, flags);
796 if (list_empty(&rnp->blkd_tasks)) {
797 raw_spin_unlock_irqrestore(&rnp->lock, flags);
798 } else {
799 rnp->exp_tasks = rnp->blkd_tasks.next;
800 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
801 must_wait = 1;
802 }
803 if (!must_wait)
804 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
805 }
806
807 /**
808 * synchronize_rcu_expedited - Brute-force RCU grace period
809 *
810 * Wait for an RCU-preempt grace period, but expedite it. The basic
811 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
812 * the ->blkd_tasks lists and wait for this list to drain. This consumes
813 * significant time on all CPUs and is unfriendly to real-time workloads,
814 * so is thus not recommended for any sort of common-case code.
815 * In fact, if you are using synchronize_rcu_expedited() in a loop,
816 * please restructure your code to batch your updates, and then Use a
817 * single synchronize_rcu() instead.
818 *
819 * Note that it is illegal to call this function while holding any lock
820 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
821 * to call this function from a CPU-hotplug notifier. Failing to observe
822 * these restriction will result in deadlock.
823 */
824 void synchronize_rcu_expedited(void)
825 {
826 unsigned long flags;
827 struct rcu_node *rnp;
828 struct rcu_state *rsp = &rcu_preempt_state;
829 unsigned long snap;
830 int trycount = 0;
831
832 smp_mb(); /* Caller's modifications seen first by other CPUs. */
833 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
834 smp_mb(); /* Above access cannot bleed into critical section. */
835
836 /*
837 * Block CPU-hotplug operations. This means that any CPU-hotplug
838 * operation that finds an rcu_node structure with tasks in the
839 * process of being boosted will know that all tasks blocking
840 * this expedited grace period will already be in the process of
841 * being boosted. This simplifies the process of moving tasks
842 * from leaf to root rcu_node structures.
843 */
844 get_online_cpus();
845
846 /*
847 * Acquire lock, falling back to synchronize_rcu() if too many
848 * lock-acquisition failures. Of course, if someone does the
849 * expedited grace period for us, just leave.
850 */
851 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
852 if (ULONG_CMP_LT(snap,
853 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
854 put_online_cpus();
855 goto mb_ret; /* Others did our work for us. */
856 }
857 if (trycount++ < 10) {
858 udelay(trycount * num_online_cpus());
859 } else {
860 put_online_cpus();
861 wait_rcu_gp(call_rcu);
862 return;
863 }
864 }
865 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
866 put_online_cpus();
867 goto unlock_mb_ret; /* Others did our work for us. */
868 }
869
870 /* force all RCU readers onto ->blkd_tasks lists. */
871 synchronize_sched_expedited();
872
873 /* Initialize ->expmask for all non-leaf rcu_node structures. */
874 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
875 raw_spin_lock_irqsave(&rnp->lock, flags);
876 rnp->expmask = rnp->qsmaskinit;
877 raw_spin_unlock_irqrestore(&rnp->lock, flags);
878 }
879
880 /* Snapshot current state of ->blkd_tasks lists. */
881 rcu_for_each_leaf_node(rsp, rnp)
882 sync_rcu_preempt_exp_init(rsp, rnp);
883 if (NUM_RCU_NODES > 1)
884 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
885
886 put_online_cpus();
887
888 /* Wait for snapshotted ->blkd_tasks lists to drain. */
889 rnp = rcu_get_root(rsp);
890 wait_event(sync_rcu_preempt_exp_wq,
891 sync_rcu_preempt_exp_done(rnp));
892
893 /* Clean up and exit. */
894 smp_mb(); /* ensure expedited GP seen before counter increment. */
895 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
896 unlock_mb_ret:
897 mutex_unlock(&sync_rcu_preempt_exp_mutex);
898 mb_ret:
899 smp_mb(); /* ensure subsequent action seen after grace period. */
900 }
901 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
902
903 /**
904 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
905 *
906 * Note that this primitive does not necessarily wait for an RCU grace period
907 * to complete. For example, if there are no RCU callbacks queued anywhere
908 * in the system, then rcu_barrier() is within its rights to return
909 * immediately, without waiting for anything, much less an RCU grace period.
910 */
911 void rcu_barrier(void)
912 {
913 _rcu_barrier(&rcu_preempt_state);
914 }
915 EXPORT_SYMBOL_GPL(rcu_barrier);
916
917 /*
918 * Initialize preemptible RCU's state structures.
919 */
920 static void __init __rcu_init_preempt(void)
921 {
922 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
923 }
924
925 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
926
927 static struct rcu_state *rcu_state = &rcu_sched_state;
928
929 /*
930 * Tell them what RCU they are running.
931 */
932 static void __init rcu_bootup_announce(void)
933 {
934 printk(KERN_INFO "Hierarchical RCU implementation.\n");
935 rcu_bootup_announce_oddness();
936 }
937
938 /*
939 * Return the number of RCU batches processed thus far for debug & stats.
940 */
941 long rcu_batches_completed(void)
942 {
943 return rcu_batches_completed_sched();
944 }
945 EXPORT_SYMBOL_GPL(rcu_batches_completed);
946
947 /*
948 * Force a quiescent state for RCU, which, because there is no preemptible
949 * RCU, becomes the same as rcu-sched.
950 */
951 void rcu_force_quiescent_state(void)
952 {
953 rcu_sched_force_quiescent_state();
954 }
955 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
956
957 /*
958 * Because preemptible RCU does not exist, we never have to check for
959 * CPUs being in quiescent states.
960 */
961 static void rcu_preempt_note_context_switch(int cpu)
962 {
963 }
964
965 /*
966 * Because preemptible RCU does not exist, there are never any preempted
967 * RCU readers.
968 */
969 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
970 {
971 return 0;
972 }
973
974 #ifdef CONFIG_HOTPLUG_CPU
975
976 /* Because preemptible RCU does not exist, no quieting of tasks. */
977 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
978 {
979 raw_spin_unlock_irqrestore(&rnp->lock, flags);
980 }
981
982 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
983
984 /*
985 * Because preemptible RCU does not exist, we never have to check for
986 * tasks blocked within RCU read-side critical sections.
987 */
988 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
989 {
990 }
991
992 /*
993 * Because preemptible RCU does not exist, we never have to check for
994 * tasks blocked within RCU read-side critical sections.
995 */
996 static int rcu_print_task_stall(struct rcu_node *rnp)
997 {
998 return 0;
999 }
1000
1001 /*
1002 * Because there is no preemptible RCU, there can be no readers blocked,
1003 * so there is no need to check for blocked tasks. So check only for
1004 * bogus qsmask values.
1005 */
1006 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1007 {
1008 WARN_ON_ONCE(rnp->qsmask);
1009 }
1010
1011 #ifdef CONFIG_HOTPLUG_CPU
1012
1013 /*
1014 * Because preemptible RCU does not exist, it never needs to migrate
1015 * tasks that were blocked within RCU read-side critical sections, and
1016 * such non-existent tasks cannot possibly have been blocking the current
1017 * grace period.
1018 */
1019 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1020 struct rcu_node *rnp,
1021 struct rcu_data *rdp)
1022 {
1023 return 0;
1024 }
1025
1026 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1027
1028 /*
1029 * Because preemptible RCU does not exist, it never has any callbacks
1030 * to check.
1031 */
1032 static void rcu_preempt_check_callbacks(int cpu)
1033 {
1034 }
1035
1036 /*
1037 * Queue an RCU callback for lazy invocation after a grace period.
1038 * This will likely be later named something like "call_rcu_lazy()",
1039 * but this change will require some way of tagging the lazy RCU
1040 * callbacks in the list of pending callbacks. Until then, this
1041 * function may only be called from __kfree_rcu().
1042 *
1043 * Because there is no preemptible RCU, we use RCU-sched instead.
1044 */
1045 void kfree_call_rcu(struct rcu_head *head,
1046 void (*func)(struct rcu_head *rcu))
1047 {
1048 __call_rcu(head, func, &rcu_sched_state, -1, 1);
1049 }
1050 EXPORT_SYMBOL_GPL(kfree_call_rcu);
1051
1052 /*
1053 * Wait for an rcu-preempt grace period, but make it happen quickly.
1054 * But because preemptible RCU does not exist, map to rcu-sched.
1055 */
1056 void synchronize_rcu_expedited(void)
1057 {
1058 synchronize_sched_expedited();
1059 }
1060 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1061
1062 #ifdef CONFIG_HOTPLUG_CPU
1063
1064 /*
1065 * Because preemptible RCU does not exist, there is never any need to
1066 * report on tasks preempted in RCU read-side critical sections during
1067 * expedited RCU grace periods.
1068 */
1069 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1070 bool wake)
1071 {
1072 }
1073
1074 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1075
1076 /*
1077 * Because preemptible RCU does not exist, rcu_barrier() is just
1078 * another name for rcu_barrier_sched().
1079 */
1080 void rcu_barrier(void)
1081 {
1082 rcu_barrier_sched();
1083 }
1084 EXPORT_SYMBOL_GPL(rcu_barrier);
1085
1086 /*
1087 * Because preemptible RCU does not exist, it need not be initialized.
1088 */
1089 static void __init __rcu_init_preempt(void)
1090 {
1091 }
1092
1093 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1094
1095 #ifdef CONFIG_RCU_BOOST
1096
1097 #include "rtmutex_common.h"
1098
1099 #ifdef CONFIG_RCU_TRACE
1100
1101 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1102 {
1103 if (list_empty(&rnp->blkd_tasks))
1104 rnp->n_balk_blkd_tasks++;
1105 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1106 rnp->n_balk_exp_gp_tasks++;
1107 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1108 rnp->n_balk_boost_tasks++;
1109 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1110 rnp->n_balk_notblocked++;
1111 else if (rnp->gp_tasks != NULL &&
1112 ULONG_CMP_LT(jiffies, rnp->boost_time))
1113 rnp->n_balk_notyet++;
1114 else
1115 rnp->n_balk_nos++;
1116 }
1117
1118 #else /* #ifdef CONFIG_RCU_TRACE */
1119
1120 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1121 {
1122 }
1123
1124 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1125
1126 static void rcu_wake_cond(struct task_struct *t, int status)
1127 {
1128 /*
1129 * If the thread is yielding, only wake it when this
1130 * is invoked from idle
1131 */
1132 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1133 wake_up_process(t);
1134 }
1135
1136 /*
1137 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1138 * or ->boost_tasks, advancing the pointer to the next task in the
1139 * ->blkd_tasks list.
1140 *
1141 * Note that irqs must be enabled: boosting the task can block.
1142 * Returns 1 if there are more tasks needing to be boosted.
1143 */
1144 static int rcu_boost(struct rcu_node *rnp)
1145 {
1146 unsigned long flags;
1147 struct rt_mutex mtx;
1148 struct task_struct *t;
1149 struct list_head *tb;
1150
1151 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1152 return 0; /* Nothing left to boost. */
1153
1154 raw_spin_lock_irqsave(&rnp->lock, flags);
1155
1156 /*
1157 * Recheck under the lock: all tasks in need of boosting
1158 * might exit their RCU read-side critical sections on their own.
1159 */
1160 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1161 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1162 return 0;
1163 }
1164
1165 /*
1166 * Preferentially boost tasks blocking expedited grace periods.
1167 * This cannot starve the normal grace periods because a second
1168 * expedited grace period must boost all blocked tasks, including
1169 * those blocking the pre-existing normal grace period.
1170 */
1171 if (rnp->exp_tasks != NULL) {
1172 tb = rnp->exp_tasks;
1173 rnp->n_exp_boosts++;
1174 } else {
1175 tb = rnp->boost_tasks;
1176 rnp->n_normal_boosts++;
1177 }
1178 rnp->n_tasks_boosted++;
1179
1180 /*
1181 * We boost task t by manufacturing an rt_mutex that appears to
1182 * be held by task t. We leave a pointer to that rt_mutex where
1183 * task t can find it, and task t will release the mutex when it
1184 * exits its outermost RCU read-side critical section. Then
1185 * simply acquiring this artificial rt_mutex will boost task
1186 * t's priority. (Thanks to tglx for suggesting this approach!)
1187 *
1188 * Note that task t must acquire rnp->lock to remove itself from
1189 * the ->blkd_tasks list, which it will do from exit() if from
1190 * nowhere else. We therefore are guaranteed that task t will
1191 * stay around at least until we drop rnp->lock. Note that
1192 * rnp->lock also resolves races between our priority boosting
1193 * and task t's exiting its outermost RCU read-side critical
1194 * section.
1195 */
1196 t = container_of(tb, struct task_struct, rcu_node_entry);
1197 rt_mutex_init_proxy_locked(&mtx, t);
1198 t->rcu_boost_mutex = &mtx;
1199 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1200 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1201 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1202
1203 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1204 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1205 }
1206
1207 /*
1208 * Priority-boosting kthread. One per leaf rcu_node and one for the
1209 * root rcu_node.
1210 */
1211 static int rcu_boost_kthread(void *arg)
1212 {
1213 struct rcu_node *rnp = (struct rcu_node *)arg;
1214 int spincnt = 0;
1215 int more2boost;
1216
1217 trace_rcu_utilization("Start boost kthread@init");
1218 for (;;) {
1219 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1220 trace_rcu_utilization("End boost kthread@rcu_wait");
1221 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1222 trace_rcu_utilization("Start boost kthread@rcu_wait");
1223 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1224 more2boost = rcu_boost(rnp);
1225 if (more2boost)
1226 spincnt++;
1227 else
1228 spincnt = 0;
1229 if (spincnt > 10) {
1230 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1231 trace_rcu_utilization("End boost kthread@rcu_yield");
1232 schedule_timeout_interruptible(2);
1233 trace_rcu_utilization("Start boost kthread@rcu_yield");
1234 spincnt = 0;
1235 }
1236 }
1237 /* NOTREACHED */
1238 trace_rcu_utilization("End boost kthread@notreached");
1239 return 0;
1240 }
1241
1242 /*
1243 * Check to see if it is time to start boosting RCU readers that are
1244 * blocking the current grace period, and, if so, tell the per-rcu_node
1245 * kthread to start boosting them. If there is an expedited grace
1246 * period in progress, it is always time to boost.
1247 *
1248 * The caller must hold rnp->lock, which this function releases.
1249 * The ->boost_kthread_task is immortal, so we don't need to worry
1250 * about it going away.
1251 */
1252 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1253 {
1254 struct task_struct *t;
1255
1256 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1257 rnp->n_balk_exp_gp_tasks++;
1258 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1259 return;
1260 }
1261 if (rnp->exp_tasks != NULL ||
1262 (rnp->gp_tasks != NULL &&
1263 rnp->boost_tasks == NULL &&
1264 rnp->qsmask == 0 &&
1265 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1266 if (rnp->exp_tasks == NULL)
1267 rnp->boost_tasks = rnp->gp_tasks;
1268 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1269 t = rnp->boost_kthread_task;
1270 if (t)
1271 rcu_wake_cond(t, rnp->boost_kthread_status);
1272 } else {
1273 rcu_initiate_boost_trace(rnp);
1274 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1275 }
1276 }
1277
1278 /*
1279 * Wake up the per-CPU kthread to invoke RCU callbacks.
1280 */
1281 static void invoke_rcu_callbacks_kthread(void)
1282 {
1283 unsigned long flags;
1284
1285 local_irq_save(flags);
1286 __this_cpu_write(rcu_cpu_has_work, 1);
1287 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1288 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1289 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1290 __this_cpu_read(rcu_cpu_kthread_status));
1291 }
1292 local_irq_restore(flags);
1293 }
1294
1295 /*
1296 * Is the current CPU running the RCU-callbacks kthread?
1297 * Caller must have preemption disabled.
1298 */
1299 static bool rcu_is_callbacks_kthread(void)
1300 {
1301 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1302 }
1303
1304 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1305
1306 /*
1307 * Do priority-boost accounting for the start of a new grace period.
1308 */
1309 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1310 {
1311 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1312 }
1313
1314 /*
1315 * Create an RCU-boost kthread for the specified node if one does not
1316 * already exist. We only create this kthread for preemptible RCU.
1317 * Returns zero if all is well, a negated errno otherwise.
1318 */
1319 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1320 struct rcu_node *rnp)
1321 {
1322 int rnp_index = rnp - &rsp->node[0];
1323 unsigned long flags;
1324 struct sched_param sp;
1325 struct task_struct *t;
1326
1327 if (&rcu_preempt_state != rsp)
1328 return 0;
1329
1330 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1331 return 0;
1332
1333 rsp->boost = 1;
1334 if (rnp->boost_kthread_task != NULL)
1335 return 0;
1336 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1337 "rcub/%d", rnp_index);
1338 if (IS_ERR(t))
1339 return PTR_ERR(t);
1340 raw_spin_lock_irqsave(&rnp->lock, flags);
1341 rnp->boost_kthread_task = t;
1342 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1343 sp.sched_priority = RCU_BOOST_PRIO;
1344 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1345 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1346 return 0;
1347 }
1348
1349 static void rcu_kthread_do_work(void)
1350 {
1351 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1352 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1353 rcu_preempt_do_callbacks();
1354 }
1355
1356 static void rcu_cpu_kthread_setup(unsigned int cpu)
1357 {
1358 struct sched_param sp;
1359
1360 sp.sched_priority = RCU_KTHREAD_PRIO;
1361 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1362 }
1363
1364 static void rcu_cpu_kthread_park(unsigned int cpu)
1365 {
1366 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1367 }
1368
1369 static int rcu_cpu_kthread_should_run(unsigned int cpu)
1370 {
1371 return __get_cpu_var(rcu_cpu_has_work);
1372 }
1373
1374 /*
1375 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1376 * RCU softirq used in flavors and configurations of RCU that do not
1377 * support RCU priority boosting.
1378 */
1379 static void rcu_cpu_kthread(unsigned int cpu)
1380 {
1381 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1382 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1383 int spincnt;
1384
1385 for (spincnt = 0; spincnt < 10; spincnt++) {
1386 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1387 local_bh_disable();
1388 *statusp = RCU_KTHREAD_RUNNING;
1389 this_cpu_inc(rcu_cpu_kthread_loops);
1390 local_irq_disable();
1391 work = *workp;
1392 *workp = 0;
1393 local_irq_enable();
1394 if (work)
1395 rcu_kthread_do_work();
1396 local_bh_enable();
1397 if (*workp == 0) {
1398 trace_rcu_utilization("End CPU kthread@rcu_wait");
1399 *statusp = RCU_KTHREAD_WAITING;
1400 return;
1401 }
1402 }
1403 *statusp = RCU_KTHREAD_YIELDING;
1404 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1405 schedule_timeout_interruptible(2);
1406 trace_rcu_utilization("End CPU kthread@rcu_yield");
1407 *statusp = RCU_KTHREAD_WAITING;
1408 }
1409
1410 /*
1411 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1412 * served by the rcu_node in question. The CPU hotplug lock is still
1413 * held, so the value of rnp->qsmaskinit will be stable.
1414 *
1415 * We don't include outgoingcpu in the affinity set, use -1 if there is
1416 * no outgoing CPU. If there are no CPUs left in the affinity set,
1417 * this function allows the kthread to execute on any CPU.
1418 */
1419 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1420 {
1421 struct task_struct *t = rnp->boost_kthread_task;
1422 unsigned long mask = rnp->qsmaskinit;
1423 cpumask_var_t cm;
1424 int cpu;
1425
1426 if (!t)
1427 return;
1428 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1429 return;
1430 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1431 if ((mask & 0x1) && cpu != outgoingcpu)
1432 cpumask_set_cpu(cpu, cm);
1433 if (cpumask_weight(cm) == 0) {
1434 cpumask_setall(cm);
1435 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1436 cpumask_clear_cpu(cpu, cm);
1437 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1438 }
1439 set_cpus_allowed_ptr(t, cm);
1440 free_cpumask_var(cm);
1441 }
1442
1443 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1444 .store = &rcu_cpu_kthread_task,
1445 .thread_should_run = rcu_cpu_kthread_should_run,
1446 .thread_fn = rcu_cpu_kthread,
1447 .thread_comm = "rcuc/%u",
1448 .setup = rcu_cpu_kthread_setup,
1449 .park = rcu_cpu_kthread_park,
1450 };
1451
1452 /*
1453 * Spawn all kthreads -- called as soon as the scheduler is running.
1454 */
1455 static int __init rcu_spawn_kthreads(void)
1456 {
1457 struct rcu_node *rnp;
1458 int cpu;
1459
1460 rcu_scheduler_fully_active = 1;
1461 for_each_possible_cpu(cpu)
1462 per_cpu(rcu_cpu_has_work, cpu) = 0;
1463 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1464 rnp = rcu_get_root(rcu_state);
1465 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1466 if (NUM_RCU_NODES > 1) {
1467 rcu_for_each_leaf_node(rcu_state, rnp)
1468 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1469 }
1470 return 0;
1471 }
1472 early_initcall(rcu_spawn_kthreads);
1473
1474 static void __cpuinit rcu_prepare_kthreads(int cpu)
1475 {
1476 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1477 struct rcu_node *rnp = rdp->mynode;
1478
1479 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1480 if (rcu_scheduler_fully_active)
1481 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1482 }
1483
1484 #else /* #ifdef CONFIG_RCU_BOOST */
1485
1486 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1487 {
1488 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1489 }
1490
1491 static void invoke_rcu_callbacks_kthread(void)
1492 {
1493 WARN_ON_ONCE(1);
1494 }
1495
1496 static bool rcu_is_callbacks_kthread(void)
1497 {
1498 return false;
1499 }
1500
1501 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1502 {
1503 }
1504
1505 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1506 {
1507 }
1508
1509 static int __init rcu_scheduler_really_started(void)
1510 {
1511 rcu_scheduler_fully_active = 1;
1512 return 0;
1513 }
1514 early_initcall(rcu_scheduler_really_started);
1515
1516 static void __cpuinit rcu_prepare_kthreads(int cpu)
1517 {
1518 }
1519
1520 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1521
1522 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1523
1524 /*
1525 * Check to see if any future RCU-related work will need to be done
1526 * by the current CPU, even if none need be done immediately, returning
1527 * 1 if so. This function is part of the RCU implementation; it is -not-
1528 * an exported member of the RCU API.
1529 *
1530 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1531 * any flavor of RCU.
1532 */
1533 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1534 {
1535 *delta_jiffies = ULONG_MAX;
1536 return rcu_cpu_has_callbacks(cpu);
1537 }
1538
1539 /*
1540 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1541 */
1542 static void rcu_prepare_for_idle_init(int cpu)
1543 {
1544 }
1545
1546 /*
1547 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1548 * after it.
1549 */
1550 static void rcu_cleanup_after_idle(int cpu)
1551 {
1552 }
1553
1554 /*
1555 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1556 * is nothing.
1557 */
1558 static void rcu_prepare_for_idle(int cpu)
1559 {
1560 }
1561
1562 /*
1563 * Don't bother keeping a running count of the number of RCU callbacks
1564 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1565 */
1566 static void rcu_idle_count_callbacks_posted(void)
1567 {
1568 }
1569
1570 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1571
1572 /*
1573 * This code is invoked when a CPU goes idle, at which point we want
1574 * to have the CPU do everything required for RCU so that it can enter
1575 * the energy-efficient dyntick-idle mode. This is handled by a
1576 * state machine implemented by rcu_prepare_for_idle() below.
1577 *
1578 * The following three proprocessor symbols control this state machine:
1579 *
1580 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1581 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1582 * scheduling-clock interrupt than to loop through the state machine
1583 * at full power.
1584 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1585 * optional if RCU does not need anything immediately from this
1586 * CPU, even if this CPU still has RCU callbacks queued. The first
1587 * times through the state machine are mandatory: we need to give
1588 * the state machine a chance to communicate a quiescent state
1589 * to the RCU core.
1590 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1591 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1592 * is sized to be roughly one RCU grace period. Those energy-efficiency
1593 * benchmarkers who might otherwise be tempted to set this to a large
1594 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1595 * system. And if you are -that- concerned about energy efficiency,
1596 * just power the system down and be done with it!
1597 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1598 * permitted to sleep in dyntick-idle mode with only lazy RCU
1599 * callbacks pending. Setting this too high can OOM your system.
1600 *
1601 * The values below work well in practice. If future workloads require
1602 * adjustment, they can be converted into kernel config parameters, though
1603 * making the state machine smarter might be a better option.
1604 */
1605 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1606 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
1607 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
1608 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1609
1610 extern int tick_nohz_enabled;
1611
1612 /*
1613 * Does the specified flavor of RCU have non-lazy callbacks pending on
1614 * the specified CPU? Both RCU flavor and CPU are specified by the
1615 * rcu_data structure.
1616 */
1617 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
1618 {
1619 return rdp->qlen != rdp->qlen_lazy;
1620 }
1621
1622 #ifdef CONFIG_TREE_PREEMPT_RCU
1623
1624 /*
1625 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
1626 * is no RCU-preempt in the kernel.)
1627 */
1628 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1629 {
1630 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
1631
1632 return __rcu_cpu_has_nonlazy_callbacks(rdp);
1633 }
1634
1635 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1636
1637 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1638 {
1639 return 0;
1640 }
1641
1642 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
1643
1644 /*
1645 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
1646 */
1647 static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
1648 {
1649 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
1650 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
1651 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
1652 }
1653
1654 /*
1655 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1656 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1657 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1658 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1659 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1660 * it is better to incur scheduling-clock interrupts than to spin
1661 * continuously for the same time duration!
1662 *
1663 * The delta_jiffies argument is used to store the time when RCU is
1664 * going to need the CPU again if it still has callbacks. The reason
1665 * for this is that rcu_prepare_for_idle() might need to post a timer,
1666 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
1667 * the wakeup time for this CPU. This means that RCU's timer can be
1668 * delayed until the wakeup time, which defeats the purpose of posting
1669 * a timer.
1670 */
1671 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1672 {
1673 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1674
1675 /* Flag a new idle sojourn to the idle-entry state machine. */
1676 rdtp->idle_first_pass = 1;
1677 /* If no callbacks, RCU doesn't need the CPU. */
1678 if (!rcu_cpu_has_callbacks(cpu)) {
1679 *delta_jiffies = ULONG_MAX;
1680 return 0;
1681 }
1682 if (rdtp->dyntick_holdoff == jiffies) {
1683 /* RCU recently tried and failed, so don't try again. */
1684 *delta_jiffies = 1;
1685 return 1;
1686 }
1687 /* Set up for the possibility that RCU will post a timer. */
1688 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1689 *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
1690 RCU_IDLE_GP_DELAY) - jiffies;
1691 } else {
1692 *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
1693 *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
1694 }
1695 return 0;
1696 }
1697
1698 /*
1699 * Handler for smp_call_function_single(). The only point of this
1700 * handler is to wake the CPU up, so the handler does only tracing.
1701 */
1702 void rcu_idle_demigrate(void *unused)
1703 {
1704 trace_rcu_prep_idle("Demigrate");
1705 }
1706
1707 /*
1708 * Timer handler used to force CPU to start pushing its remaining RCU
1709 * callbacks in the case where it entered dyntick-idle mode with callbacks
1710 * pending. The hander doesn't really need to do anything because the
1711 * real work is done upon re-entry to idle, or by the next scheduling-clock
1712 * interrupt should idle not be re-entered.
1713 *
1714 * One special case: the timer gets migrated without awakening the CPU
1715 * on which the timer was scheduled on. In this case, we must wake up
1716 * that CPU. We do so with smp_call_function_single().
1717 */
1718 static void rcu_idle_gp_timer_func(unsigned long cpu_in)
1719 {
1720 int cpu = (int)cpu_in;
1721
1722 trace_rcu_prep_idle("Timer");
1723 if (cpu != smp_processor_id())
1724 smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
1725 else
1726 WARN_ON_ONCE(1); /* Getting here can hang the system... */
1727 }
1728
1729 /*
1730 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
1731 */
1732 static void rcu_prepare_for_idle_init(int cpu)
1733 {
1734 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1735
1736 rdtp->dyntick_holdoff = jiffies - 1;
1737 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
1738 rdtp->idle_gp_timer_expires = jiffies - 1;
1739 rdtp->idle_first_pass = 1;
1740 }
1741
1742 /*
1743 * Clean up for exit from idle. Because we are exiting from idle, there
1744 * is no longer any point to ->idle_gp_timer, so cancel it. This will
1745 * do nothing if this timer is not active, so just cancel it unconditionally.
1746 */
1747 static void rcu_cleanup_after_idle(int cpu)
1748 {
1749 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1750
1751 del_timer(&rdtp->idle_gp_timer);
1752 trace_rcu_prep_idle("Cleanup after idle");
1753 rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
1754 }
1755
1756 /*
1757 * Check to see if any RCU-related work can be done by the current CPU,
1758 * and if so, schedule a softirq to get it done. This function is part
1759 * of the RCU implementation; it is -not- an exported member of the RCU API.
1760 *
1761 * The idea is for the current CPU to clear out all work required by the
1762 * RCU core for the current grace period, so that this CPU can be permitted
1763 * to enter dyntick-idle mode. In some cases, it will need to be awakened
1764 * at the end of the grace period by whatever CPU ends the grace period.
1765 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
1766 * number of wakeups by a modest integer factor.
1767 *
1768 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1769 * disabled, we do one pass of force_quiescent_state(), then do a
1770 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1771 * later. The ->dyntick_drain field controls the sequencing.
1772 *
1773 * The caller must have disabled interrupts.
1774 */
1775 static void rcu_prepare_for_idle(int cpu)
1776 {
1777 struct timer_list *tp;
1778 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1779 int tne;
1780
1781 /* Handle nohz enablement switches conservatively. */
1782 tne = ACCESS_ONCE(tick_nohz_enabled);
1783 if (tne != rdtp->tick_nohz_enabled_snap) {
1784 if (rcu_cpu_has_callbacks(cpu))
1785 invoke_rcu_core(); /* force nohz to see update. */
1786 rdtp->tick_nohz_enabled_snap = tne;
1787 return;
1788 }
1789 if (!tne)
1790 return;
1791
1792 /* Adaptive-tick mode, where usermode execution is idle to RCU. */
1793 if (!is_idle_task(current)) {
1794 rdtp->dyntick_holdoff = jiffies - 1;
1795 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1796 trace_rcu_prep_idle("User dyntick with callbacks");
1797 rdtp->idle_gp_timer_expires =
1798 round_up(jiffies + RCU_IDLE_GP_DELAY,
1799 RCU_IDLE_GP_DELAY);
1800 } else if (rcu_cpu_has_callbacks(cpu)) {
1801 rdtp->idle_gp_timer_expires =
1802 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
1803 trace_rcu_prep_idle("User dyntick with lazy callbacks");
1804 } else {
1805 return;
1806 }
1807 tp = &rdtp->idle_gp_timer;
1808 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1809 return;
1810 }
1811
1812 /*
1813 * If this is an idle re-entry, for example, due to use of
1814 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
1815 * loop, then don't take any state-machine actions, unless the
1816 * momentary exit from idle queued additional non-lazy callbacks.
1817 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
1818 * pending.
1819 */
1820 if (!rdtp->idle_first_pass &&
1821 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
1822 if (rcu_cpu_has_callbacks(cpu)) {
1823 tp = &rdtp->idle_gp_timer;
1824 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1825 }
1826 return;
1827 }
1828 rdtp->idle_first_pass = 0;
1829 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
1830
1831 /*
1832 * If there are no callbacks on this CPU, enter dyntick-idle mode.
1833 * Also reset state to avoid prejudicing later attempts.
1834 */
1835 if (!rcu_cpu_has_callbacks(cpu)) {
1836 rdtp->dyntick_holdoff = jiffies - 1;
1837 rdtp->dyntick_drain = 0;
1838 trace_rcu_prep_idle("No callbacks");
1839 return;
1840 }
1841
1842 /*
1843 * If in holdoff mode, just return. We will presumably have
1844 * refrained from disabling the scheduling-clock tick.
1845 */
1846 if (rdtp->dyntick_holdoff == jiffies) {
1847 trace_rcu_prep_idle("In holdoff");
1848 return;
1849 }
1850
1851 /* Check and update the ->dyntick_drain sequencing. */
1852 if (rdtp->dyntick_drain <= 0) {
1853 /* First time through, initialize the counter. */
1854 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
1855 } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
1856 !rcu_pending(cpu) &&
1857 !local_softirq_pending()) {
1858 /* Can we go dyntick-idle despite still having callbacks? */
1859 rdtp->dyntick_drain = 0;
1860 rdtp->dyntick_holdoff = jiffies;
1861 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1862 trace_rcu_prep_idle("Dyntick with callbacks");
1863 rdtp->idle_gp_timer_expires =
1864 round_up(jiffies + RCU_IDLE_GP_DELAY,
1865 RCU_IDLE_GP_DELAY);
1866 } else {
1867 rdtp->idle_gp_timer_expires =
1868 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
1869 trace_rcu_prep_idle("Dyntick with lazy callbacks");
1870 }
1871 tp = &rdtp->idle_gp_timer;
1872 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1873 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1874 return; /* Nothing more to do immediately. */
1875 } else if (--(rdtp->dyntick_drain) <= 0) {
1876 /* We have hit the limit, so time to give up. */
1877 rdtp->dyntick_holdoff = jiffies;
1878 trace_rcu_prep_idle("Begin holdoff");
1879 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
1880 return;
1881 }
1882
1883 /*
1884 * Do one step of pushing the remaining RCU callbacks through
1885 * the RCU core state machine.
1886 */
1887 #ifdef CONFIG_TREE_PREEMPT_RCU
1888 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
1889 rcu_preempt_qs(cpu);
1890 force_quiescent_state(&rcu_preempt_state);
1891 }
1892 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1893 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
1894 rcu_sched_qs(cpu);
1895 force_quiescent_state(&rcu_sched_state);
1896 }
1897 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
1898 rcu_bh_qs(cpu);
1899 force_quiescent_state(&rcu_bh_state);
1900 }
1901
1902 /*
1903 * If RCU callbacks are still pending, RCU still needs this CPU.
1904 * So try forcing the callbacks through the grace period.
1905 */
1906 if (rcu_cpu_has_callbacks(cpu)) {
1907 trace_rcu_prep_idle("More callbacks");
1908 invoke_rcu_core();
1909 } else {
1910 trace_rcu_prep_idle("Callbacks drained");
1911 }
1912 }
1913
1914 /*
1915 * Keep a running count of the number of non-lazy callbacks posted
1916 * on this CPU. This running counter (which is never decremented) allows
1917 * rcu_prepare_for_idle() to detect when something out of the idle loop
1918 * posts a callback, even if an equal number of callbacks are invoked.
1919 * Of course, callbacks should only be posted from within a trace event
1920 * designed to be called from idle or from within RCU_NONIDLE().
1921 */
1922 static void rcu_idle_count_callbacks_posted(void)
1923 {
1924 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1925 }
1926
1927 /*
1928 * Data for flushing lazy RCU callbacks at OOM time.
1929 */
1930 static atomic_t oom_callback_count;
1931 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1932
1933 /*
1934 * RCU OOM callback -- decrement the outstanding count and deliver the
1935 * wake-up if we are the last one.
1936 */
1937 static void rcu_oom_callback(struct rcu_head *rhp)
1938 {
1939 if (atomic_dec_and_test(&oom_callback_count))
1940 wake_up(&oom_callback_wq);
1941 }
1942
1943 /*
1944 * Post an rcu_oom_notify callback on the current CPU if it has at
1945 * least one lazy callback. This will unnecessarily post callbacks
1946 * to CPUs that already have a non-lazy callback at the end of their
1947 * callback list, but this is an infrequent operation, so accept some
1948 * extra overhead to keep things simple.
1949 */
1950 static void rcu_oom_notify_cpu(void *unused)
1951 {
1952 struct rcu_state *rsp;
1953 struct rcu_data *rdp;
1954
1955 for_each_rcu_flavor(rsp) {
1956 rdp = __this_cpu_ptr(rsp->rda);
1957 if (rdp->qlen_lazy != 0) {
1958 atomic_inc(&oom_callback_count);
1959 rsp->call(&rdp->oom_head, rcu_oom_callback);
1960 }
1961 }
1962 }
1963
1964 /*
1965 * If low on memory, ensure that each CPU has a non-lazy callback.
1966 * This will wake up CPUs that have only lazy callbacks, in turn
1967 * ensuring that they free up the corresponding memory in a timely manner.
1968 * Because an uncertain amount of memory will be freed in some uncertain
1969 * timeframe, we do not claim to have freed anything.
1970 */
1971 static int rcu_oom_notify(struct notifier_block *self,
1972 unsigned long notused, void *nfreed)
1973 {
1974 int cpu;
1975
1976 /* Wait for callbacks from earlier instance to complete. */
1977 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1978
1979 /*
1980 * Prevent premature wakeup: ensure that all increments happen
1981 * before there is a chance of the counter reaching zero.
1982 */
1983 atomic_set(&oom_callback_count, 1);
1984
1985 get_online_cpus();
1986 for_each_online_cpu(cpu) {
1987 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1988 cond_resched();
1989 }
1990 put_online_cpus();
1991
1992 /* Unconditionally decrement: no need to wake ourselves up. */
1993 atomic_dec(&oom_callback_count);
1994
1995 return NOTIFY_OK;
1996 }
1997
1998 static struct notifier_block rcu_oom_nb = {
1999 .notifier_call = rcu_oom_notify
2000 };
2001
2002 static int __init rcu_register_oom_notifier(void)
2003 {
2004 register_oom_notifier(&rcu_oom_nb);
2005 return 0;
2006 }
2007 early_initcall(rcu_register_oom_notifier);
2008
2009 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2010
2011 #ifdef CONFIG_RCU_CPU_STALL_INFO
2012
2013 #ifdef CONFIG_RCU_FAST_NO_HZ
2014
2015 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2016 {
2017 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2018 struct timer_list *tltp = &rdtp->idle_gp_timer;
2019 char c;
2020
2021 c = rdtp->dyntick_holdoff == jiffies ? 'H' : '.';
2022 if (timer_pending(tltp))
2023 sprintf(cp, "drain=%d %c timer=%lu",
2024 rdtp->dyntick_drain, c, tltp->expires - jiffies);
2025 else
2026 sprintf(cp, "drain=%d %c timer not pending",
2027 rdtp->dyntick_drain, c);
2028 }
2029
2030 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2031
2032 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2033 {
2034 *cp = '\0';
2035 }
2036
2037 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2038
2039 /* Initiate the stall-info list. */
2040 static void print_cpu_stall_info_begin(void)
2041 {
2042 printk(KERN_CONT "\n");
2043 }
2044
2045 /*
2046 * Print out diagnostic information for the specified stalled CPU.
2047 *
2048 * If the specified CPU is aware of the current RCU grace period
2049 * (flavor specified by rsp), then print the number of scheduling
2050 * clock interrupts the CPU has taken during the time that it has
2051 * been aware. Otherwise, print the number of RCU grace periods
2052 * that this CPU is ignorant of, for example, "1" if the CPU was
2053 * aware of the previous grace period.
2054 *
2055 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2056 */
2057 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2058 {
2059 char fast_no_hz[72];
2060 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2061 struct rcu_dynticks *rdtp = rdp->dynticks;
2062 char *ticks_title;
2063 unsigned long ticks_value;
2064
2065 if (rsp->gpnum == rdp->gpnum) {
2066 ticks_title = "ticks this GP";
2067 ticks_value = rdp->ticks_this_gp;
2068 } else {
2069 ticks_title = "GPs behind";
2070 ticks_value = rsp->gpnum - rdp->gpnum;
2071 }
2072 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2073 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2074 cpu, ticks_value, ticks_title,
2075 atomic_read(&rdtp->dynticks) & 0xfff,
2076 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2077 fast_no_hz);
2078 }
2079
2080 /* Terminate the stall-info list. */
2081 static void print_cpu_stall_info_end(void)
2082 {
2083 printk(KERN_ERR "\t");
2084 }
2085
2086 /* Zero ->ticks_this_gp for all flavors of RCU. */
2087 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2088 {
2089 rdp->ticks_this_gp = 0;
2090 }
2091
2092 /* Increment ->ticks_this_gp for all flavors of RCU. */
2093 static void increment_cpu_stall_ticks(void)
2094 {
2095 struct rcu_state *rsp;
2096
2097 for_each_rcu_flavor(rsp)
2098 __this_cpu_ptr(rsp->rda)->ticks_this_gp++;
2099 }
2100
2101 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2102
2103 static void print_cpu_stall_info_begin(void)
2104 {
2105 printk(KERN_CONT " {");
2106 }
2107
2108 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2109 {
2110 printk(KERN_CONT " %d", cpu);
2111 }
2112
2113 static void print_cpu_stall_info_end(void)
2114 {
2115 printk(KERN_CONT "} ");
2116 }
2117
2118 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2119 {
2120 }
2121
2122 static void increment_cpu_stall_ticks(void)
2123 {
2124 }
2125
2126 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
2127
2128 #ifdef CONFIG_RCU_NOCB_CPU
2129
2130 /*
2131 * Offload callback processing from the boot-time-specified set of CPUs
2132 * specified by rcu_nocb_mask. For each CPU in the set, there is a
2133 * kthread created that pulls the callbacks from the corresponding CPU,
2134 * waits for a grace period to elapse, and invokes the callbacks.
2135 * The no-CBs CPUs do a wake_up() on their kthread when they insert
2136 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
2137 * has been specified, in which case each kthread actively polls its
2138 * CPU. (Which isn't so great for energy efficiency, but which does
2139 * reduce RCU's overhead on that CPU.)
2140 *
2141 * This is intended to be used in conjunction with Frederic Weisbecker's
2142 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
2143 * running CPU-bound user-mode computations.
2144 *
2145 * Offloading of callback processing could also in theory be used as
2146 * an energy-efficiency measure because CPUs with no RCU callbacks
2147 * queued are more aggressive about entering dyntick-idle mode.
2148 */
2149
2150
2151 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
2152 static int __init rcu_nocb_setup(char *str)
2153 {
2154 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
2155 have_rcu_nocb_mask = true;
2156 cpulist_parse(str, rcu_nocb_mask);
2157 return 1;
2158 }
2159 __setup("rcu_nocbs=", rcu_nocb_setup);
2160
2161 static int __init parse_rcu_nocb_poll(char *arg)
2162 {
2163 rcu_nocb_poll = 1;
2164 return 0;
2165 }
2166 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2167
2168 /* Is the specified CPU a no-CPUs CPU? */
2169 static bool is_nocb_cpu(int cpu)
2170 {
2171 if (have_rcu_nocb_mask)
2172 return cpumask_test_cpu(cpu, rcu_nocb_mask);
2173 return false;
2174 }
2175
2176 /*
2177 * Enqueue the specified string of rcu_head structures onto the specified
2178 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
2179 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
2180 * counts are supplied by rhcount and rhcount_lazy.
2181 *
2182 * If warranted, also wake up the kthread servicing this CPUs queues.
2183 */
2184 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2185 struct rcu_head *rhp,
2186 struct rcu_head **rhtp,
2187 int rhcount, int rhcount_lazy)
2188 {
2189 int len;
2190 struct rcu_head **old_rhpp;
2191 struct task_struct *t;
2192
2193 /* Enqueue the callback on the nocb list and update counts. */
2194 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2195 ACCESS_ONCE(*old_rhpp) = rhp;
2196 atomic_long_add(rhcount, &rdp->nocb_q_count);
2197 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2198
2199 /* If we are not being polled and there is a kthread, awaken it ... */
2200 t = ACCESS_ONCE(rdp->nocb_kthread);
2201 if (rcu_nocb_poll | !t)
2202 return;
2203 len = atomic_long_read(&rdp->nocb_q_count);
2204 if (old_rhpp == &rdp->nocb_head) {
2205 wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
2206 rdp->qlen_last_fqs_check = 0;
2207 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2208 wake_up_process(t); /* ... or if many callbacks queued. */
2209 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2210 }
2211 return;
2212 }
2213
2214 /*
2215 * This is a helper for __call_rcu(), which invokes this when the normal
2216 * callback queue is inoperable. If this is not a no-CBs CPU, this
2217 * function returns failure back to __call_rcu(), which can complain
2218 * appropriately.
2219 *
2220 * Otherwise, this function queues the callback where the corresponding
2221 * "rcuo" kthread can find it.
2222 */
2223 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2224 bool lazy)
2225 {
2226
2227 if (!is_nocb_cpu(rdp->cpu))
2228 return 0;
2229 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
2230 return 1;
2231 }
2232
2233 /*
2234 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2235 * not a no-CBs CPU.
2236 */
2237 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2238 struct rcu_data *rdp)
2239 {
2240 long ql = rsp->qlen;
2241 long qll = rsp->qlen_lazy;
2242
2243 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2244 if (!is_nocb_cpu(smp_processor_id()))
2245 return 0;
2246 rsp->qlen = 0;
2247 rsp->qlen_lazy = 0;
2248
2249 /* First, enqueue the donelist, if any. This preserves CB ordering. */
2250 if (rsp->orphan_donelist != NULL) {
2251 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2252 rsp->orphan_donetail, ql, qll);
2253 ql = qll = 0;
2254 rsp->orphan_donelist = NULL;
2255 rsp->orphan_donetail = &rsp->orphan_donelist;
2256 }
2257 if (rsp->orphan_nxtlist != NULL) {
2258 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2259 rsp->orphan_nxttail, ql, qll);
2260 ql = qll = 0;
2261 rsp->orphan_nxtlist = NULL;
2262 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2263 }
2264 return 1;
2265 }
2266
2267 /*
2268 * There must be at least one non-no-CBs CPU in operation at any given
2269 * time, because no-CBs CPUs are not capable of initiating grace periods
2270 * independently. This function therefore complains if the specified
2271 * CPU is the last non-no-CBs CPU, allowing the CPU-hotplug system to
2272 * avoid offlining the last such CPU. (Recursion is a wonderful thing,
2273 * but you have to have a base case!)
2274 */
2275 static bool nocb_cpu_expendable(int cpu)
2276 {
2277 cpumask_var_t non_nocb_cpus;
2278 int ret;
2279
2280 /*
2281 * If there are no no-CB CPUs or if this CPU is not a no-CB CPU,
2282 * then offlining this CPU is harmless. Let it happen.
2283 */
2284 if (!have_rcu_nocb_mask || is_nocb_cpu(cpu))
2285 return 1;
2286
2287 /* If no memory, play it safe and keep the CPU around. */
2288 if (!alloc_cpumask_var(&non_nocb_cpus, GFP_NOIO))
2289 return 0;
2290 cpumask_andnot(non_nocb_cpus, cpu_online_mask, rcu_nocb_mask);
2291 cpumask_clear_cpu(cpu, non_nocb_cpus);
2292 ret = !cpumask_empty(non_nocb_cpus);
2293 free_cpumask_var(non_nocb_cpus);
2294 return ret;
2295 }
2296
2297 /*
2298 * Helper structure for remote registry of RCU callbacks.
2299 * This is needed for when a no-CBs CPU needs to start a grace period.
2300 * If it just invokes call_rcu(), the resulting callback will be queued,
2301 * which can result in deadlock.
2302 */
2303 struct rcu_head_remote {
2304 struct rcu_head *rhp;
2305 call_rcu_func_t *crf;
2306 void (*func)(struct rcu_head *rhp);
2307 };
2308
2309 /*
2310 * Register a callback as specified by the rcu_head_remote struct.
2311 * This function is intended to be invoked via smp_call_function_single().
2312 */
2313 static void call_rcu_local(void *arg)
2314 {
2315 struct rcu_head_remote *rhrp =
2316 container_of(arg, struct rcu_head_remote, rhp);
2317
2318 rhrp->crf(rhrp->rhp, rhrp->func);
2319 }
2320
2321 /*
2322 * Set up an rcu_head_remote structure and the invoke call_rcu_local()
2323 * on CPU 0 (which is guaranteed to be a non-no-CBs CPU) via
2324 * smp_call_function_single().
2325 */
2326 static void invoke_crf_remote(struct rcu_head *rhp,
2327 void (*func)(struct rcu_head *rhp),
2328 call_rcu_func_t crf)
2329 {
2330 struct rcu_head_remote rhr;
2331
2332 rhr.rhp = rhp;
2333 rhr.crf = crf;
2334 rhr.func = func;
2335 smp_call_function_single(0, call_rcu_local, &rhr, 1);
2336 }
2337
2338 /*
2339 * Helper functions to be passed to wait_rcu_gp(), each of which
2340 * invokes invoke_crf_remote() to register a callback appropriately.
2341 */
2342 static void __maybe_unused
2343 call_rcu_preempt_remote(struct rcu_head *rhp,
2344 void (*func)(struct rcu_head *rhp))
2345 {
2346 invoke_crf_remote(rhp, func, call_rcu);
2347 }
2348 static void call_rcu_bh_remote(struct rcu_head *rhp,
2349 void (*func)(struct rcu_head *rhp))
2350 {
2351 invoke_crf_remote(rhp, func, call_rcu_bh);
2352 }
2353 static void call_rcu_sched_remote(struct rcu_head *rhp,
2354 void (*func)(struct rcu_head *rhp))
2355 {
2356 invoke_crf_remote(rhp, func, call_rcu_sched);
2357 }
2358
2359 /*
2360 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes
2361 * callbacks queued by the corresponding no-CBs CPU.
2362 */
2363 static int rcu_nocb_kthread(void *arg)
2364 {
2365 int c, cl;
2366 struct rcu_head *list;
2367 struct rcu_head *next;
2368 struct rcu_head **tail;
2369 struct rcu_data *rdp = arg;
2370
2371 /* Each pass through this loop invokes one batch of callbacks */
2372 for (;;) {
2373 /* If not polling, wait for next batch of callbacks. */
2374 if (!rcu_nocb_poll)
2375 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2376 list = ACCESS_ONCE(rdp->nocb_head);
2377 if (!list) {
2378 schedule_timeout_interruptible(1);
2379 flush_signals(current);
2380 continue;
2381 }
2382
2383 /*
2384 * Extract queued callbacks, update counts, and wait
2385 * for a grace period to elapse.
2386 */
2387 ACCESS_ONCE(rdp->nocb_head) = NULL;
2388 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2389 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2390 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2391 ACCESS_ONCE(rdp->nocb_p_count) += c;
2392 ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
2393 wait_rcu_gp(rdp->rsp->call_remote);
2394
2395 /* Each pass through the following loop invokes a callback. */
2396 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
2397 c = cl = 0;
2398 while (list) {
2399 next = list->next;
2400 /* Wait for enqueuing to complete, if needed. */
2401 while (next == NULL && &list->next != tail) {
2402 schedule_timeout_interruptible(1);
2403 next = list->next;
2404 }
2405 debug_rcu_head_unqueue(list);
2406 local_bh_disable();
2407 if (__rcu_reclaim(rdp->rsp->name, list))
2408 cl++;
2409 c++;
2410 local_bh_enable();
2411 list = next;
2412 }
2413 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2414 ACCESS_ONCE(rdp->nocb_p_count) -= c;
2415 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
2416 rdp->n_nocbs_invoked += c;
2417 }
2418 return 0;
2419 }
2420
2421 /* Initialize per-rcu_data variables for no-CBs CPUs. */
2422 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2423 {
2424 rdp->nocb_tail = &rdp->nocb_head;
2425 init_waitqueue_head(&rdp->nocb_wq);
2426 }
2427
2428 /* Create a kthread for each RCU flavor for each no-CBs CPU. */
2429 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2430 {
2431 int cpu;
2432 struct rcu_data *rdp;
2433 struct task_struct *t;
2434
2435 if (rcu_nocb_mask == NULL)
2436 return;
2437 for_each_cpu(cpu, rcu_nocb_mask) {
2438 rdp = per_cpu_ptr(rsp->rda, cpu);
2439 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
2440 BUG_ON(IS_ERR(t));
2441 ACCESS_ONCE(rdp->nocb_kthread) = t;
2442 }
2443 }
2444
2445 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2446 static void init_nocb_callback_list(struct rcu_data *rdp)
2447 {
2448 if (rcu_nocb_mask == NULL ||
2449 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
2450 return;
2451 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2452 }
2453
2454 /* Initialize the ->call_remote fields in the rcu_state structures. */
2455 static void __init rcu_init_nocb(void)
2456 {
2457 #ifdef CONFIG_PREEMPT_RCU
2458 rcu_preempt_state.call_remote = call_rcu_preempt_remote;
2459 #endif /* #ifdef CONFIG_PREEMPT_RCU */
2460 rcu_bh_state.call_remote = call_rcu_bh_remote;
2461 rcu_sched_state.call_remote = call_rcu_sched_remote;
2462 }
2463
2464 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
2465
2466 static bool is_nocb_cpu(int cpu)
2467 {
2468 return false;
2469 }
2470
2471 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2472 bool lazy)
2473 {
2474 return 0;
2475 }
2476
2477 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2478 struct rcu_data *rdp)
2479 {
2480 return 0;
2481 }
2482
2483 static bool nocb_cpu_expendable(int cpu)
2484 {
2485 return 1;
2486 }
2487
2488 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2489 {
2490 }
2491
2492 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2493 {
2494 }
2495
2496 static void init_nocb_callback_list(struct rcu_data *rdp)
2497 {
2498 }
2499
2500 static void __init rcu_init_nocb(void)
2501 {
2502 }
2503
2504 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */