Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / rcutree_plugin.h
CommitLineData
f41d911f
PM
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
6cc68793 4 * or preemptible semantics.
f41d911f
PM
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
d9a3da06 27#include <linux/delay.h>
3fbfbf7a 28#include <linux/gfp.h>
b626c1b6 29#include <linux/oom.h>
62ab7072 30#include <linux/smpboot.h>
65d798f0 31#include <linux/tick.h>
f41d911f 32
5b61b0ba
MG
33#define RCU_KTHREAD_PRIO 1
34
35#ifdef CONFIG_RCU_BOOST
36#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
37#else
38#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
39#endif
40
3fbfbf7a
PM
41#ifdef CONFIG_RCU_NOCB_CPU
42static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
43static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
1b0048a4 44static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
3fbfbf7a
PM
45static char __initdata nocb_buf[NR_CPUS * 5];
46#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
47
26845c28
PM
48/*
49 * Check the RCU kernel configuration parameters and print informative
50 * messages about anything out of the ordinary. If you like #ifdef, you
51 * will love this function.
52 */
53static void __init rcu_bootup_announce_oddness(void)
54{
55#ifdef CONFIG_RCU_TRACE
56 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
57#endif
58#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
59 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
60 CONFIG_RCU_FANOUT);
61#endif
62#ifdef CONFIG_RCU_FANOUT_EXACT
63 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
64#endif
65#ifdef CONFIG_RCU_FAST_NO_HZ
66 printk(KERN_INFO
67 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
68#endif
69#ifdef CONFIG_PROVE_RCU
70 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
71#endif
72#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
73 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
74#endif
81a294c4 75#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
a858af28
PM
76 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
77#endif
78#if defined(CONFIG_RCU_CPU_STALL_INFO)
79 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
26845c28
PM
80#endif
81#if NUM_RCU_LVL_4 != 0
cc5df65b 82 printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
26845c28 83#endif
f885b7f2
PM
84 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
85 printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
cca6f393
PM
86 if (nr_cpu_ids != NR_CPUS)
87 printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
3fbfbf7a 88#ifdef CONFIG_RCU_NOCB_CPU
911af505
PM
89#ifndef CONFIG_RCU_NOCB_CPU_NONE
90 if (!have_rcu_nocb_mask) {
615ee544 91 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
911af505
PM
92 have_rcu_nocb_mask = true;
93 }
94#ifdef CONFIG_RCU_NOCB_CPU_ZERO
95 pr_info("\tExperimental no-CBs CPU 0\n");
96 cpumask_set_cpu(0, rcu_nocb_mask);
97#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
98#ifdef CONFIG_RCU_NOCB_CPU_ALL
99 pr_info("\tExperimental no-CBs for all CPUs\n");
100 cpumask_setall(rcu_nocb_mask);
101#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
102#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
3fbfbf7a 103 if (have_rcu_nocb_mask) {
3fbfbf7a
PM
104 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
105 pr_info("\tExperimental no-CBs CPUs: %s.\n", nocb_buf);
106 if (rcu_nocb_poll)
107 pr_info("\tExperimental polled no-CBs CPUs.\n");
108 }
109#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
26845c28
PM
110}
111
f41d911f
PM
112#ifdef CONFIG_TREE_PREEMPT_RCU
113
037b64ed 114struct rcu_state rcu_preempt_state =
a4889858 115 RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
f41d911f 116DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
27f4d280 117static struct rcu_state *rcu_state = &rcu_preempt_state;
f41d911f 118
d9a3da06
PM
119static int rcu_preempted_readers_exp(struct rcu_node *rnp);
120
f41d911f
PM
121/*
122 * Tell them what RCU they are running.
123 */
0e0fc1c2 124static void __init rcu_bootup_announce(void)
f41d911f 125{
6cc68793 126 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
26845c28 127 rcu_bootup_announce_oddness();
f41d911f
PM
128}
129
130/*
131 * Return the number of RCU-preempt batches processed thus far
132 * for debug and statistics.
133 */
134long rcu_batches_completed_preempt(void)
135{
136 return rcu_preempt_state.completed;
137}
138EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
139
140/*
141 * Return the number of RCU batches processed thus far for debug & stats.
142 */
143long rcu_batches_completed(void)
144{
145 return rcu_batches_completed_preempt();
146}
147EXPORT_SYMBOL_GPL(rcu_batches_completed);
148
bf66f18e
PM
149/*
150 * Force a quiescent state for preemptible RCU.
151 */
152void rcu_force_quiescent_state(void)
153{
4cdfc175 154 force_quiescent_state(&rcu_preempt_state);
bf66f18e
PM
155}
156EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
157
f41d911f 158/*
6cc68793 159 * Record a preemptible-RCU quiescent state for the specified CPU. Note
f41d911f
PM
160 * that this just means that the task currently running on the CPU is
161 * not in a quiescent state. There might be any number of tasks blocked
162 * while in an RCU read-side critical section.
25502a6c
PM
163 *
164 * Unlike the other rcu_*_qs() functions, callers to this function
165 * must disable irqs in order to protect the assignment to
166 * ->rcu_read_unlock_special.
f41d911f 167 */
c3422bea 168static void rcu_preempt_qs(int cpu)
f41d911f
PM
169{
170 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
25502a6c 171
e4cc1f22 172 if (rdp->passed_quiesce == 0)
d4c08f2a 173 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
e4cc1f22 174 rdp->passed_quiesce = 1;
25502a6c 175 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
f41d911f
PM
176}
177
178/*
c3422bea
PM
179 * We have entered the scheduler, and the current task might soon be
180 * context-switched away from. If this task is in an RCU read-side
181 * critical section, we will no longer be able to rely on the CPU to
12f5f524
PM
182 * record that fact, so we enqueue the task on the blkd_tasks list.
183 * The task will dequeue itself when it exits the outermost enclosing
184 * RCU read-side critical section. Therefore, the current grace period
185 * cannot be permitted to complete until the blkd_tasks list entries
186 * predating the current grace period drain, in other words, until
187 * rnp->gp_tasks becomes NULL.
c3422bea
PM
188 *
189 * Caller must disable preemption.
f41d911f 190 */
cba6d0d6 191static void rcu_preempt_note_context_switch(int cpu)
f41d911f
PM
192{
193 struct task_struct *t = current;
c3422bea 194 unsigned long flags;
f41d911f
PM
195 struct rcu_data *rdp;
196 struct rcu_node *rnp;
197
10f39bb1 198 if (t->rcu_read_lock_nesting > 0 &&
f41d911f
PM
199 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
200
201 /* Possibly blocking in an RCU read-side critical section. */
cba6d0d6 202 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
f41d911f 203 rnp = rdp->mynode;
1304afb2 204 raw_spin_lock_irqsave(&rnp->lock, flags);
f41d911f 205 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
86848966 206 t->rcu_blocked_node = rnp;
f41d911f
PM
207
208 /*
209 * If this CPU has already checked in, then this task
210 * will hold up the next grace period rather than the
211 * current grace period. Queue the task accordingly.
212 * If the task is queued for the current grace period
213 * (i.e., this CPU has not yet passed through a quiescent
214 * state for the current grace period), then as long
215 * as that task remains queued, the current grace period
12f5f524
PM
216 * cannot end. Note that there is some uncertainty as
217 * to exactly when the current grace period started.
218 * We take a conservative approach, which can result
219 * in unnecessarily waiting on tasks that started very
220 * slightly after the current grace period began. C'est
221 * la vie!!!
b0e165c0
PM
222 *
223 * But first, note that the current CPU must still be
224 * on line!
f41d911f 225 */
b0e165c0 226 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
e7d8842e 227 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
12f5f524
PM
228 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
229 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
230 rnp->gp_tasks = &t->rcu_node_entry;
27f4d280
PM
231#ifdef CONFIG_RCU_BOOST
232 if (rnp->boost_tasks != NULL)
233 rnp->boost_tasks = rnp->gp_tasks;
234#endif /* #ifdef CONFIG_RCU_BOOST */
12f5f524
PM
235 } else {
236 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
237 if (rnp->qsmask & rdp->grpmask)
238 rnp->gp_tasks = &t->rcu_node_entry;
239 }
d4c08f2a
PM
240 trace_rcu_preempt_task(rdp->rsp->name,
241 t->pid,
242 (rnp->qsmask & rdp->grpmask)
243 ? rnp->gpnum
244 : rnp->gpnum + 1);
1304afb2 245 raw_spin_unlock_irqrestore(&rnp->lock, flags);
10f39bb1
PM
246 } else if (t->rcu_read_lock_nesting < 0 &&
247 t->rcu_read_unlock_special) {
248
249 /*
250 * Complete exit from RCU read-side critical section on
251 * behalf of preempted instance of __rcu_read_unlock().
252 */
253 rcu_read_unlock_special(t);
f41d911f
PM
254 }
255
256 /*
257 * Either we were not in an RCU read-side critical section to
258 * begin with, or we have now recorded that critical section
259 * globally. Either way, we can now note a quiescent state
260 * for this CPU. Again, if we were in an RCU read-side critical
261 * section, and if that critical section was blocking the current
262 * grace period, then the fact that the task has been enqueued
263 * means that we continue to block the current grace period.
264 */
e7d8842e 265 local_irq_save(flags);
cba6d0d6 266 rcu_preempt_qs(cpu);
e7d8842e 267 local_irq_restore(flags);
f41d911f
PM
268}
269
fc2219d4
PM
270/*
271 * Check for preempted RCU readers blocking the current grace period
272 * for the specified rcu_node structure. If the caller needs a reliable
273 * answer, it must hold the rcu_node's ->lock.
274 */
27f4d280 275static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
fc2219d4 276{
12f5f524 277 return rnp->gp_tasks != NULL;
fc2219d4
PM
278}
279
b668c9cf
PM
280/*
281 * Record a quiescent state for all tasks that were previously queued
282 * on the specified rcu_node structure and that were blocking the current
283 * RCU grace period. The caller must hold the specified rnp->lock with
284 * irqs disabled, and this lock is released upon return, but irqs remain
285 * disabled.
286 */
d3f6bad3 287static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
b668c9cf
PM
288 __releases(rnp->lock)
289{
290 unsigned long mask;
291 struct rcu_node *rnp_p;
292
27f4d280 293 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1304afb2 294 raw_spin_unlock_irqrestore(&rnp->lock, flags);
b668c9cf
PM
295 return; /* Still need more quiescent states! */
296 }
297
298 rnp_p = rnp->parent;
299 if (rnp_p == NULL) {
300 /*
301 * Either there is only one rcu_node in the tree,
302 * or tasks were kicked up to root rcu_node due to
303 * CPUs going offline.
304 */
d3f6bad3 305 rcu_report_qs_rsp(&rcu_preempt_state, flags);
b668c9cf
PM
306 return;
307 }
308
309 /* Report up the rest of the hierarchy. */
310 mask = rnp->grpmask;
1304afb2
PM
311 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
312 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
d3f6bad3 313 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
b668c9cf
PM
314}
315
12f5f524
PM
316/*
317 * Advance a ->blkd_tasks-list pointer to the next entry, instead
318 * returning NULL if at the end of the list.
319 */
320static struct list_head *rcu_next_node_entry(struct task_struct *t,
321 struct rcu_node *rnp)
322{
323 struct list_head *np;
324
325 np = t->rcu_node_entry.next;
326 if (np == &rnp->blkd_tasks)
327 np = NULL;
328 return np;
329}
330
b668c9cf
PM
331/*
332 * Handle special cases during rcu_read_unlock(), such as needing to
333 * notify RCU core processing or task having blocked during the RCU
334 * read-side critical section.
335 */
2a3fa843 336void rcu_read_unlock_special(struct task_struct *t)
f41d911f
PM
337{
338 int empty;
d9a3da06 339 int empty_exp;
389abd48 340 int empty_exp_now;
f41d911f 341 unsigned long flags;
12f5f524 342 struct list_head *np;
82e78d80
PM
343#ifdef CONFIG_RCU_BOOST
344 struct rt_mutex *rbmp = NULL;
345#endif /* #ifdef CONFIG_RCU_BOOST */
f41d911f
PM
346 struct rcu_node *rnp;
347 int special;
348
349 /* NMI handlers cannot block and cannot safely manipulate state. */
350 if (in_nmi())
351 return;
352
353 local_irq_save(flags);
354
355 /*
356 * If RCU core is waiting for this CPU to exit critical section,
357 * let it know that we have done so.
358 */
359 special = t->rcu_read_unlock_special;
360 if (special & RCU_READ_UNLOCK_NEED_QS) {
c3422bea 361 rcu_preempt_qs(smp_processor_id());
f41d911f
PM
362 }
363
364 /* Hardware IRQ handlers cannot block. */
ec433f0c 365 if (in_irq() || in_serving_softirq()) {
f41d911f
PM
366 local_irq_restore(flags);
367 return;
368 }
369
370 /* Clean up if blocked during RCU read-side critical section. */
371 if (special & RCU_READ_UNLOCK_BLOCKED) {
372 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
373
dd5d19ba
PM
374 /*
375 * Remove this task from the list it blocked on. The
376 * task can migrate while we acquire the lock, but at
377 * most one time. So at most two passes through loop.
378 */
379 for (;;) {
86848966 380 rnp = t->rcu_blocked_node;
1304afb2 381 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
86848966 382 if (rnp == t->rcu_blocked_node)
dd5d19ba 383 break;
1304afb2 384 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
dd5d19ba 385 }
27f4d280 386 empty = !rcu_preempt_blocked_readers_cgp(rnp);
d9a3da06
PM
387 empty_exp = !rcu_preempted_readers_exp(rnp);
388 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
12f5f524 389 np = rcu_next_node_entry(t, rnp);
f41d911f 390 list_del_init(&t->rcu_node_entry);
82e78d80 391 t->rcu_blocked_node = NULL;
d4c08f2a
PM
392 trace_rcu_unlock_preempted_task("rcu_preempt",
393 rnp->gpnum, t->pid);
12f5f524
PM
394 if (&t->rcu_node_entry == rnp->gp_tasks)
395 rnp->gp_tasks = np;
396 if (&t->rcu_node_entry == rnp->exp_tasks)
397 rnp->exp_tasks = np;
27f4d280
PM
398#ifdef CONFIG_RCU_BOOST
399 if (&t->rcu_node_entry == rnp->boost_tasks)
400 rnp->boost_tasks = np;
82e78d80
PM
401 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
402 if (t->rcu_boost_mutex) {
403 rbmp = t->rcu_boost_mutex;
404 t->rcu_boost_mutex = NULL;
7765be2f 405 }
27f4d280 406#endif /* #ifdef CONFIG_RCU_BOOST */
f41d911f
PM
407
408 /*
409 * If this was the last task on the current list, and if
410 * we aren't waiting on any CPUs, report the quiescent state.
389abd48
PM
411 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
412 * so we must take a snapshot of the expedited state.
f41d911f 413 */
389abd48 414 empty_exp_now = !rcu_preempted_readers_exp(rnp);
d4c08f2a
PM
415 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
416 trace_rcu_quiescent_state_report("preempt_rcu",
417 rnp->gpnum,
418 0, rnp->qsmask,
419 rnp->level,
420 rnp->grplo,
421 rnp->grphi,
422 !!rnp->gp_tasks);
d3f6bad3 423 rcu_report_unblock_qs_rnp(rnp, flags);
c701d5d9 424 } else {
d4c08f2a 425 raw_spin_unlock_irqrestore(&rnp->lock, flags);
c701d5d9 426 }
d9a3da06 427
27f4d280
PM
428#ifdef CONFIG_RCU_BOOST
429 /* Unboost if we were boosted. */
82e78d80
PM
430 if (rbmp)
431 rt_mutex_unlock(rbmp);
27f4d280
PM
432#endif /* #ifdef CONFIG_RCU_BOOST */
433
d9a3da06
PM
434 /*
435 * If this was the last task on the expedited lists,
436 * then we need to report up the rcu_node hierarchy.
437 */
389abd48 438 if (!empty_exp && empty_exp_now)
b40d293e 439 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
b668c9cf
PM
440 } else {
441 local_irq_restore(flags);
f41d911f 442 }
f41d911f
PM
443}
444
1ed509a2
PM
445#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
446
447/*
448 * Dump detailed information for all tasks blocking the current RCU
449 * grace period on the specified rcu_node structure.
450 */
451static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
452{
453 unsigned long flags;
1ed509a2
PM
454 struct task_struct *t;
455
12f5f524 456 raw_spin_lock_irqsave(&rnp->lock, flags);
5fd4dc06
PM
457 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
458 raw_spin_unlock_irqrestore(&rnp->lock, flags);
459 return;
460 }
12f5f524
PM
461 t = list_entry(rnp->gp_tasks,
462 struct task_struct, rcu_node_entry);
463 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
464 sched_show_task(t);
465 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1ed509a2
PM
466}
467
468/*
469 * Dump detailed information for all tasks blocking the current RCU
470 * grace period.
471 */
472static void rcu_print_detail_task_stall(struct rcu_state *rsp)
473{
474 struct rcu_node *rnp = rcu_get_root(rsp);
475
476 rcu_print_detail_task_stall_rnp(rnp);
477 rcu_for_each_leaf_node(rsp, rnp)
478 rcu_print_detail_task_stall_rnp(rnp);
479}
480
481#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
482
483static void rcu_print_detail_task_stall(struct rcu_state *rsp)
484{
485}
486
487#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
488
a858af28
PM
489#ifdef CONFIG_RCU_CPU_STALL_INFO
490
491static void rcu_print_task_stall_begin(struct rcu_node *rnp)
492{
493 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
494 rnp->level, rnp->grplo, rnp->grphi);
495}
496
497static void rcu_print_task_stall_end(void)
498{
499 printk(KERN_CONT "\n");
500}
501
502#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
503
504static void rcu_print_task_stall_begin(struct rcu_node *rnp)
505{
506}
507
508static void rcu_print_task_stall_end(void)
509{
510}
511
512#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
513
f41d911f
PM
514/*
515 * Scan the current list of tasks blocked within RCU read-side critical
516 * sections, printing out the tid of each.
517 */
9bc8b558 518static int rcu_print_task_stall(struct rcu_node *rnp)
f41d911f 519{
f41d911f 520 struct task_struct *t;
9bc8b558 521 int ndetected = 0;
f41d911f 522
27f4d280 523 if (!rcu_preempt_blocked_readers_cgp(rnp))
9bc8b558 524 return 0;
a858af28 525 rcu_print_task_stall_begin(rnp);
12f5f524
PM
526 t = list_entry(rnp->gp_tasks,
527 struct task_struct, rcu_node_entry);
9bc8b558 528 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
a858af28 529 printk(KERN_CONT " P%d", t->pid);
9bc8b558
PM
530 ndetected++;
531 }
a858af28 532 rcu_print_task_stall_end();
9bc8b558 533 return ndetected;
f41d911f
PM
534}
535
b0e165c0
PM
536/*
537 * Check that the list of blocked tasks for the newly completed grace
538 * period is in fact empty. It is a serious bug to complete a grace
539 * period that still has RCU readers blocked! This function must be
540 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
541 * must be held by the caller.
12f5f524
PM
542 *
543 * Also, if there are blocked tasks on the list, they automatically
544 * block the newly created grace period, so set up ->gp_tasks accordingly.
b0e165c0
PM
545 */
546static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
547{
27f4d280 548 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
12f5f524
PM
549 if (!list_empty(&rnp->blkd_tasks))
550 rnp->gp_tasks = rnp->blkd_tasks.next;
28ecd580 551 WARN_ON_ONCE(rnp->qsmask);
b0e165c0
PM
552}
553
33f76148
PM
554#ifdef CONFIG_HOTPLUG_CPU
555
dd5d19ba
PM
556/*
557 * Handle tasklist migration for case in which all CPUs covered by the
558 * specified rcu_node have gone offline. Move them up to the root
559 * rcu_node. The reason for not just moving them to the immediate
560 * parent is to remove the need for rcu_read_unlock_special() to
561 * make more than two attempts to acquire the target rcu_node's lock.
b668c9cf
PM
562 * Returns true if there were tasks blocking the current RCU grace
563 * period.
dd5d19ba 564 *
237c80c5
PM
565 * Returns 1 if there was previously a task blocking the current grace
566 * period on the specified rcu_node structure.
567 *
dd5d19ba
PM
568 * The caller must hold rnp->lock with irqs disabled.
569 */
237c80c5
PM
570static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
571 struct rcu_node *rnp,
572 struct rcu_data *rdp)
dd5d19ba 573{
dd5d19ba
PM
574 struct list_head *lp;
575 struct list_head *lp_root;
d9a3da06 576 int retval = 0;
dd5d19ba 577 struct rcu_node *rnp_root = rcu_get_root(rsp);
12f5f524 578 struct task_struct *t;
dd5d19ba 579
86848966
PM
580 if (rnp == rnp_root) {
581 WARN_ONCE(1, "Last CPU thought to be offlined?");
237c80c5 582 return 0; /* Shouldn't happen: at least one CPU online. */
86848966 583 }
12f5f524
PM
584
585 /* If we are on an internal node, complain bitterly. */
586 WARN_ON_ONCE(rnp != rdp->mynode);
dd5d19ba
PM
587
588 /*
12f5f524
PM
589 * Move tasks up to root rcu_node. Don't try to get fancy for
590 * this corner-case operation -- just put this node's tasks
591 * at the head of the root node's list, and update the root node's
592 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
593 * if non-NULL. This might result in waiting for more tasks than
594 * absolutely necessary, but this is a good performance/complexity
595 * tradeoff.
dd5d19ba 596 */
2036d94a 597 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
d9a3da06
PM
598 retval |= RCU_OFL_TASKS_NORM_GP;
599 if (rcu_preempted_readers_exp(rnp))
600 retval |= RCU_OFL_TASKS_EXP_GP;
12f5f524
PM
601 lp = &rnp->blkd_tasks;
602 lp_root = &rnp_root->blkd_tasks;
603 while (!list_empty(lp)) {
604 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
605 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
606 list_del(&t->rcu_node_entry);
607 t->rcu_blocked_node = rnp_root;
608 list_add(&t->rcu_node_entry, lp_root);
609 if (&t->rcu_node_entry == rnp->gp_tasks)
610 rnp_root->gp_tasks = rnp->gp_tasks;
611 if (&t->rcu_node_entry == rnp->exp_tasks)
612 rnp_root->exp_tasks = rnp->exp_tasks;
27f4d280
PM
613#ifdef CONFIG_RCU_BOOST
614 if (&t->rcu_node_entry == rnp->boost_tasks)
615 rnp_root->boost_tasks = rnp->boost_tasks;
616#endif /* #ifdef CONFIG_RCU_BOOST */
12f5f524 617 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
dd5d19ba 618 }
27f4d280 619
1e3fd2b3
PM
620 rnp->gp_tasks = NULL;
621 rnp->exp_tasks = NULL;
27f4d280 622#ifdef CONFIG_RCU_BOOST
1e3fd2b3 623 rnp->boost_tasks = NULL;
5cc900cf
PM
624 /*
625 * In case root is being boosted and leaf was not. Make sure
626 * that we boost the tasks blocking the current grace period
627 * in this case.
628 */
27f4d280
PM
629 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
630 if (rnp_root->boost_tasks != NULL &&
5cc900cf
PM
631 rnp_root->boost_tasks != rnp_root->gp_tasks &&
632 rnp_root->boost_tasks != rnp_root->exp_tasks)
27f4d280
PM
633 rnp_root->boost_tasks = rnp_root->gp_tasks;
634 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
635#endif /* #ifdef CONFIG_RCU_BOOST */
636
237c80c5 637 return retval;
dd5d19ba
PM
638}
639
e5601400
PM
640#endif /* #ifdef CONFIG_HOTPLUG_CPU */
641
f41d911f
PM
642/*
643 * Check for a quiescent state from the current CPU. When a task blocks,
644 * the task is recorded in the corresponding CPU's rcu_node structure,
645 * which is checked elsewhere.
646 *
647 * Caller must disable hard irqs.
648 */
649static void rcu_preempt_check_callbacks(int cpu)
650{
651 struct task_struct *t = current;
652
653 if (t->rcu_read_lock_nesting == 0) {
c3422bea 654 rcu_preempt_qs(cpu);
f41d911f
PM
655 return;
656 }
10f39bb1
PM
657 if (t->rcu_read_lock_nesting > 0 &&
658 per_cpu(rcu_preempt_data, cpu).qs_pending)
c3422bea 659 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
f41d911f
PM
660}
661
a46e0899
PM
662#ifdef CONFIG_RCU_BOOST
663
09223371
SL
664static void rcu_preempt_do_callbacks(void)
665{
666 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
667}
668
a46e0899
PM
669#endif /* #ifdef CONFIG_RCU_BOOST */
670
f41d911f 671/*
6cc68793 672 * Queue a preemptible-RCU callback for invocation after a grace period.
f41d911f
PM
673 */
674void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
675{
3fbfbf7a 676 __call_rcu(head, func, &rcu_preempt_state, -1, 0);
f41d911f
PM
677}
678EXPORT_SYMBOL_GPL(call_rcu);
679
486e2593
PM
680/*
681 * Queue an RCU callback for lazy invocation after a grace period.
682 * This will likely be later named something like "call_rcu_lazy()",
683 * but this change will require some way of tagging the lazy RCU
684 * callbacks in the list of pending callbacks. Until then, this
685 * function may only be called from __kfree_rcu().
686 */
687void kfree_call_rcu(struct rcu_head *head,
688 void (*func)(struct rcu_head *rcu))
689{
3fbfbf7a 690 __call_rcu(head, func, &rcu_preempt_state, -1, 1);
486e2593
PM
691}
692EXPORT_SYMBOL_GPL(kfree_call_rcu);
693
6ebb237b
PM
694/**
695 * synchronize_rcu - wait until a grace period has elapsed.
696 *
697 * Control will return to the caller some time after a full grace
698 * period has elapsed, in other words after all currently executing RCU
77d8485a
PM
699 * read-side critical sections have completed. Note, however, that
700 * upon return from synchronize_rcu(), the caller might well be executing
701 * concurrently with new RCU read-side critical sections that began while
702 * synchronize_rcu() was waiting. RCU read-side critical sections are
703 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
f0a0e6f2
PM
704 *
705 * See the description of synchronize_sched() for more detailed information
706 * on memory ordering guarantees.
6ebb237b
PM
707 */
708void synchronize_rcu(void)
709{
fe15d706
PM
710 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
711 !lock_is_held(&rcu_lock_map) &&
712 !lock_is_held(&rcu_sched_lock_map),
713 "Illegal synchronize_rcu() in RCU read-side critical section");
6ebb237b
PM
714 if (!rcu_scheduler_active)
715 return;
3705b88d
AM
716 if (rcu_expedited)
717 synchronize_rcu_expedited();
718 else
719 wait_rcu_gp(call_rcu);
6ebb237b
PM
720}
721EXPORT_SYMBOL_GPL(synchronize_rcu);
722
d9a3da06 723static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
bcfa57ce 724static unsigned long sync_rcu_preempt_exp_count;
d9a3da06
PM
725static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
726
727/*
728 * Return non-zero if there are any tasks in RCU read-side critical
729 * sections blocking the current preemptible-RCU expedited grace period.
730 * If there is no preemptible-RCU expedited grace period currently in
731 * progress, returns zero unconditionally.
732 */
733static int rcu_preempted_readers_exp(struct rcu_node *rnp)
734{
12f5f524 735 return rnp->exp_tasks != NULL;
d9a3da06
PM
736}
737
738/*
739 * return non-zero if there is no RCU expedited grace period in progress
740 * for the specified rcu_node structure, in other words, if all CPUs and
741 * tasks covered by the specified rcu_node structure have done their bit
742 * for the current expedited grace period. Works only for preemptible
743 * RCU -- other RCU implementation use other means.
744 *
745 * Caller must hold sync_rcu_preempt_exp_mutex.
746 */
747static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
748{
749 return !rcu_preempted_readers_exp(rnp) &&
750 ACCESS_ONCE(rnp->expmask) == 0;
751}
752
753/*
754 * Report the exit from RCU read-side critical section for the last task
755 * that queued itself during or before the current expedited preemptible-RCU
756 * grace period. This event is reported either to the rcu_node structure on
757 * which the task was queued or to one of that rcu_node structure's ancestors,
758 * recursively up the tree. (Calm down, calm down, we do the recursion
759 * iteratively!)
760 *
b40d293e
TG
761 * Most callers will set the "wake" flag, but the task initiating the
762 * expedited grace period need not wake itself.
763 *
d9a3da06
PM
764 * Caller must hold sync_rcu_preempt_exp_mutex.
765 */
b40d293e
TG
766static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
767 bool wake)
d9a3da06
PM
768{
769 unsigned long flags;
770 unsigned long mask;
771
1304afb2 772 raw_spin_lock_irqsave(&rnp->lock, flags);
d9a3da06 773 for (;;) {
131906b0
PM
774 if (!sync_rcu_preempt_exp_done(rnp)) {
775 raw_spin_unlock_irqrestore(&rnp->lock, flags);
d9a3da06 776 break;
131906b0 777 }
d9a3da06 778 if (rnp->parent == NULL) {
131906b0 779 raw_spin_unlock_irqrestore(&rnp->lock, flags);
b40d293e
TG
780 if (wake)
781 wake_up(&sync_rcu_preempt_exp_wq);
d9a3da06
PM
782 break;
783 }
784 mask = rnp->grpmask;
1304afb2 785 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
d9a3da06 786 rnp = rnp->parent;
1304afb2 787 raw_spin_lock(&rnp->lock); /* irqs already disabled */
d9a3da06
PM
788 rnp->expmask &= ~mask;
789 }
d9a3da06
PM
790}
791
792/*
793 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
794 * grace period for the specified rcu_node structure. If there are no such
795 * tasks, report it up the rcu_node hierarchy.
796 *
7b2e6011
PM
797 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
798 * CPU hotplug operations.
d9a3da06
PM
799 */
800static void
801sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
802{
1217ed1b 803 unsigned long flags;
12f5f524 804 int must_wait = 0;
d9a3da06 805
1217ed1b 806 raw_spin_lock_irqsave(&rnp->lock, flags);
c701d5d9 807 if (list_empty(&rnp->blkd_tasks)) {
1217ed1b 808 raw_spin_unlock_irqrestore(&rnp->lock, flags);
c701d5d9 809 } else {
12f5f524 810 rnp->exp_tasks = rnp->blkd_tasks.next;
1217ed1b 811 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
12f5f524
PM
812 must_wait = 1;
813 }
d9a3da06 814 if (!must_wait)
b40d293e 815 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
d9a3da06
PM
816}
817
236fefaf
PM
818/**
819 * synchronize_rcu_expedited - Brute-force RCU grace period
820 *
821 * Wait for an RCU-preempt grace period, but expedite it. The basic
822 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
823 * the ->blkd_tasks lists and wait for this list to drain. This consumes
824 * significant time on all CPUs and is unfriendly to real-time workloads,
825 * so is thus not recommended for any sort of common-case code.
826 * In fact, if you are using synchronize_rcu_expedited() in a loop,
827 * please restructure your code to batch your updates, and then Use a
828 * single synchronize_rcu() instead.
829 *
830 * Note that it is illegal to call this function while holding any lock
831 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
832 * to call this function from a CPU-hotplug notifier. Failing to observe
833 * these restriction will result in deadlock.
019129d5
PM
834 */
835void synchronize_rcu_expedited(void)
836{
d9a3da06
PM
837 unsigned long flags;
838 struct rcu_node *rnp;
839 struct rcu_state *rsp = &rcu_preempt_state;
bcfa57ce 840 unsigned long snap;
d9a3da06
PM
841 int trycount = 0;
842
843 smp_mb(); /* Caller's modifications seen first by other CPUs. */
844 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
845 smp_mb(); /* Above access cannot bleed into critical section. */
846
1943c89d
PM
847 /*
848 * Block CPU-hotplug operations. This means that any CPU-hotplug
849 * operation that finds an rcu_node structure with tasks in the
850 * process of being boosted will know that all tasks blocking
851 * this expedited grace period will already be in the process of
852 * being boosted. This simplifies the process of moving tasks
853 * from leaf to root rcu_node structures.
854 */
855 get_online_cpus();
856
d9a3da06
PM
857 /*
858 * Acquire lock, falling back to synchronize_rcu() if too many
859 * lock-acquisition failures. Of course, if someone does the
860 * expedited grace period for us, just leave.
861 */
862 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
1943c89d
PM
863 if (ULONG_CMP_LT(snap,
864 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
865 put_online_cpus();
866 goto mb_ret; /* Others did our work for us. */
867 }
c701d5d9 868 if (trycount++ < 10) {
d9a3da06 869 udelay(trycount * num_online_cpus());
c701d5d9 870 } else {
1943c89d 871 put_online_cpus();
3705b88d 872 wait_rcu_gp(call_rcu);
d9a3da06
PM
873 return;
874 }
d9a3da06 875 }
1943c89d
PM
876 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
877 put_online_cpus();
d9a3da06 878 goto unlock_mb_ret; /* Others did our work for us. */
1943c89d 879 }
d9a3da06 880
12f5f524 881 /* force all RCU readers onto ->blkd_tasks lists. */
d9a3da06
PM
882 synchronize_sched_expedited();
883
d9a3da06
PM
884 /* Initialize ->expmask for all non-leaf rcu_node structures. */
885 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
1943c89d 886 raw_spin_lock_irqsave(&rnp->lock, flags);
d9a3da06 887 rnp->expmask = rnp->qsmaskinit;
1943c89d 888 raw_spin_unlock_irqrestore(&rnp->lock, flags);
d9a3da06
PM
889 }
890
12f5f524 891 /* Snapshot current state of ->blkd_tasks lists. */
d9a3da06
PM
892 rcu_for_each_leaf_node(rsp, rnp)
893 sync_rcu_preempt_exp_init(rsp, rnp);
894 if (NUM_RCU_NODES > 1)
895 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
896
1943c89d 897 put_online_cpus();
d9a3da06 898
12f5f524 899 /* Wait for snapshotted ->blkd_tasks lists to drain. */
d9a3da06
PM
900 rnp = rcu_get_root(rsp);
901 wait_event(sync_rcu_preempt_exp_wq,
902 sync_rcu_preempt_exp_done(rnp));
903
904 /* Clean up and exit. */
905 smp_mb(); /* ensure expedited GP seen before counter increment. */
906 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
907unlock_mb_ret:
908 mutex_unlock(&sync_rcu_preempt_exp_mutex);
909mb_ret:
910 smp_mb(); /* ensure subsequent action seen after grace period. */
019129d5
PM
911}
912EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
913
e74f4c45
PM
914/**
915 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
f0a0e6f2
PM
916 *
917 * Note that this primitive does not necessarily wait for an RCU grace period
918 * to complete. For example, if there are no RCU callbacks queued anywhere
919 * in the system, then rcu_barrier() is within its rights to return
920 * immediately, without waiting for anything, much less an RCU grace period.
e74f4c45
PM
921 */
922void rcu_barrier(void)
923{
037b64ed 924 _rcu_barrier(&rcu_preempt_state);
e74f4c45
PM
925}
926EXPORT_SYMBOL_GPL(rcu_barrier);
927
1eba8f84 928/*
6cc68793 929 * Initialize preemptible RCU's state structures.
1eba8f84
PM
930 */
931static void __init __rcu_init_preempt(void)
932{
394f99a9 933 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
1eba8f84
PM
934}
935
f41d911f
PM
936#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
937
27f4d280
PM
938static struct rcu_state *rcu_state = &rcu_sched_state;
939
f41d911f
PM
940/*
941 * Tell them what RCU they are running.
942 */
0e0fc1c2 943static void __init rcu_bootup_announce(void)
f41d911f
PM
944{
945 printk(KERN_INFO "Hierarchical RCU implementation.\n");
26845c28 946 rcu_bootup_announce_oddness();
f41d911f
PM
947}
948
949/*
950 * Return the number of RCU batches processed thus far for debug & stats.
951 */
952long rcu_batches_completed(void)
953{
954 return rcu_batches_completed_sched();
955}
956EXPORT_SYMBOL_GPL(rcu_batches_completed);
957
bf66f18e
PM
958/*
959 * Force a quiescent state for RCU, which, because there is no preemptible
960 * RCU, becomes the same as rcu-sched.
961 */
962void rcu_force_quiescent_state(void)
963{
964 rcu_sched_force_quiescent_state();
965}
966EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
967
cba6d0d6
PM
968/*
969 * Because preemptible RCU does not exist, we never have to check for
970 * CPUs being in quiescent states.
971 */
972static void rcu_preempt_note_context_switch(int cpu)
973{
974}
975
fc2219d4 976/*
6cc68793 977 * Because preemptible RCU does not exist, there are never any preempted
fc2219d4
PM
978 * RCU readers.
979 */
27f4d280 980static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
fc2219d4
PM
981{
982 return 0;
983}
984
b668c9cf
PM
985#ifdef CONFIG_HOTPLUG_CPU
986
987/* Because preemptible RCU does not exist, no quieting of tasks. */
d3f6bad3 988static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
b668c9cf 989{
1304afb2 990 raw_spin_unlock_irqrestore(&rnp->lock, flags);
b668c9cf
PM
991}
992
993#endif /* #ifdef CONFIG_HOTPLUG_CPU */
994
1ed509a2 995/*
6cc68793 996 * Because preemptible RCU does not exist, we never have to check for
1ed509a2
PM
997 * tasks blocked within RCU read-side critical sections.
998 */
999static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1000{
1001}
1002
f41d911f 1003/*
6cc68793 1004 * Because preemptible RCU does not exist, we never have to check for
f41d911f
PM
1005 * tasks blocked within RCU read-side critical sections.
1006 */
9bc8b558 1007static int rcu_print_task_stall(struct rcu_node *rnp)
f41d911f 1008{
9bc8b558 1009 return 0;
f41d911f
PM
1010}
1011
b0e165c0 1012/*
6cc68793 1013 * Because there is no preemptible RCU, there can be no readers blocked,
49e29126
PM
1014 * so there is no need to check for blocked tasks. So check only for
1015 * bogus qsmask values.
b0e165c0
PM
1016 */
1017static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1018{
49e29126 1019 WARN_ON_ONCE(rnp->qsmask);
b0e165c0
PM
1020}
1021
33f76148
PM
1022#ifdef CONFIG_HOTPLUG_CPU
1023
dd5d19ba 1024/*
6cc68793 1025 * Because preemptible RCU does not exist, it never needs to migrate
237c80c5
PM
1026 * tasks that were blocked within RCU read-side critical sections, and
1027 * such non-existent tasks cannot possibly have been blocking the current
1028 * grace period.
dd5d19ba 1029 */
237c80c5
PM
1030static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1031 struct rcu_node *rnp,
1032 struct rcu_data *rdp)
dd5d19ba 1033{
237c80c5 1034 return 0;
dd5d19ba
PM
1035}
1036
e5601400
PM
1037#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1038
f41d911f 1039/*
6cc68793 1040 * Because preemptible RCU does not exist, it never has any callbacks
f41d911f
PM
1041 * to check.
1042 */
1eba8f84 1043static void rcu_preempt_check_callbacks(int cpu)
f41d911f
PM
1044{
1045}
1046
486e2593
PM
1047/*
1048 * Queue an RCU callback for lazy invocation after a grace period.
1049 * This will likely be later named something like "call_rcu_lazy()",
1050 * but this change will require some way of tagging the lazy RCU
1051 * callbacks in the list of pending callbacks. Until then, this
1052 * function may only be called from __kfree_rcu().
1053 *
1054 * Because there is no preemptible RCU, we use RCU-sched instead.
1055 */
1056void kfree_call_rcu(struct rcu_head *head,
1057 void (*func)(struct rcu_head *rcu))
1058{
3fbfbf7a 1059 __call_rcu(head, func, &rcu_sched_state, -1, 1);
486e2593
PM
1060}
1061EXPORT_SYMBOL_GPL(kfree_call_rcu);
1062
019129d5
PM
1063/*
1064 * Wait for an rcu-preempt grace period, but make it happen quickly.
6cc68793 1065 * But because preemptible RCU does not exist, map to rcu-sched.
019129d5
PM
1066 */
1067void synchronize_rcu_expedited(void)
1068{
1069 synchronize_sched_expedited();
1070}
1071EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1072
d9a3da06
PM
1073#ifdef CONFIG_HOTPLUG_CPU
1074
1075/*
6cc68793 1076 * Because preemptible RCU does not exist, there is never any need to
d9a3da06
PM
1077 * report on tasks preempted in RCU read-side critical sections during
1078 * expedited RCU grace periods.
1079 */
b40d293e
TG
1080static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1081 bool wake)
d9a3da06 1082{
d9a3da06
PM
1083}
1084
1085#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1086
e74f4c45 1087/*
6cc68793 1088 * Because preemptible RCU does not exist, rcu_barrier() is just
e74f4c45
PM
1089 * another name for rcu_barrier_sched().
1090 */
1091void rcu_barrier(void)
1092{
1093 rcu_barrier_sched();
1094}
1095EXPORT_SYMBOL_GPL(rcu_barrier);
1096
1eba8f84 1097/*
6cc68793 1098 * Because preemptible RCU does not exist, it need not be initialized.
1eba8f84
PM
1099 */
1100static void __init __rcu_init_preempt(void)
1101{
1102}
1103
f41d911f 1104#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
8bd93a2c 1105
27f4d280
PM
1106#ifdef CONFIG_RCU_BOOST
1107
1108#include "rtmutex_common.h"
1109
0ea1f2eb
PM
1110#ifdef CONFIG_RCU_TRACE
1111
1112static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1113{
1114 if (list_empty(&rnp->blkd_tasks))
1115 rnp->n_balk_blkd_tasks++;
1116 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1117 rnp->n_balk_exp_gp_tasks++;
1118 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1119 rnp->n_balk_boost_tasks++;
1120 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1121 rnp->n_balk_notblocked++;
1122 else if (rnp->gp_tasks != NULL &&
a9f4793d 1123 ULONG_CMP_LT(jiffies, rnp->boost_time))
0ea1f2eb
PM
1124 rnp->n_balk_notyet++;
1125 else
1126 rnp->n_balk_nos++;
1127}
1128
1129#else /* #ifdef CONFIG_RCU_TRACE */
1130
1131static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1132{
1133}
1134
1135#endif /* #else #ifdef CONFIG_RCU_TRACE */
1136
5d01bbd1
TG
1137static void rcu_wake_cond(struct task_struct *t, int status)
1138{
1139 /*
1140 * If the thread is yielding, only wake it when this
1141 * is invoked from idle
1142 */
1143 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1144 wake_up_process(t);
1145}
1146
27f4d280
PM
1147/*
1148 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1149 * or ->boost_tasks, advancing the pointer to the next task in the
1150 * ->blkd_tasks list.
1151 *
1152 * Note that irqs must be enabled: boosting the task can block.
1153 * Returns 1 if there are more tasks needing to be boosted.
1154 */
1155static int rcu_boost(struct rcu_node *rnp)
1156{
1157 unsigned long flags;
1158 struct rt_mutex mtx;
1159 struct task_struct *t;
1160 struct list_head *tb;
1161
1162 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1163 return 0; /* Nothing left to boost. */
1164
1165 raw_spin_lock_irqsave(&rnp->lock, flags);
1166
1167 /*
1168 * Recheck under the lock: all tasks in need of boosting
1169 * might exit their RCU read-side critical sections on their own.
1170 */
1171 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1172 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1173 return 0;
1174 }
1175
1176 /*
1177 * Preferentially boost tasks blocking expedited grace periods.
1178 * This cannot starve the normal grace periods because a second
1179 * expedited grace period must boost all blocked tasks, including
1180 * those blocking the pre-existing normal grace period.
1181 */
0ea1f2eb 1182 if (rnp->exp_tasks != NULL) {
27f4d280 1183 tb = rnp->exp_tasks;
0ea1f2eb
PM
1184 rnp->n_exp_boosts++;
1185 } else {
27f4d280 1186 tb = rnp->boost_tasks;
0ea1f2eb
PM
1187 rnp->n_normal_boosts++;
1188 }
1189 rnp->n_tasks_boosted++;
27f4d280
PM
1190
1191 /*
1192 * We boost task t by manufacturing an rt_mutex that appears to
1193 * be held by task t. We leave a pointer to that rt_mutex where
1194 * task t can find it, and task t will release the mutex when it
1195 * exits its outermost RCU read-side critical section. Then
1196 * simply acquiring this artificial rt_mutex will boost task
1197 * t's priority. (Thanks to tglx for suggesting this approach!)
1198 *
1199 * Note that task t must acquire rnp->lock to remove itself from
1200 * the ->blkd_tasks list, which it will do from exit() if from
1201 * nowhere else. We therefore are guaranteed that task t will
1202 * stay around at least until we drop rnp->lock. Note that
1203 * rnp->lock also resolves races between our priority boosting
1204 * and task t's exiting its outermost RCU read-side critical
1205 * section.
1206 */
1207 t = container_of(tb, struct task_struct, rcu_node_entry);
1208 rt_mutex_init_proxy_locked(&mtx, t);
1209 t->rcu_boost_mutex = &mtx;
27f4d280
PM
1210 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1211 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1212 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1213
4f89b336
PM
1214 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1215 ACCESS_ONCE(rnp->boost_tasks) != NULL;
27f4d280
PM
1216}
1217
27f4d280
PM
1218/*
1219 * Priority-boosting kthread. One per leaf rcu_node and one for the
1220 * root rcu_node.
1221 */
1222static int rcu_boost_kthread(void *arg)
1223{
1224 struct rcu_node *rnp = (struct rcu_node *)arg;
1225 int spincnt = 0;
1226 int more2boost;
1227
385680a9 1228 trace_rcu_utilization("Start boost kthread@init");
27f4d280 1229 for (;;) {
d71df90e 1230 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
385680a9 1231 trace_rcu_utilization("End boost kthread@rcu_wait");
08bca60a 1232 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
385680a9 1233 trace_rcu_utilization("Start boost kthread@rcu_wait");
d71df90e 1234 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
27f4d280
PM
1235 more2boost = rcu_boost(rnp);
1236 if (more2boost)
1237 spincnt++;
1238 else
1239 spincnt = 0;
1240 if (spincnt > 10) {
5d01bbd1 1241 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
385680a9 1242 trace_rcu_utilization("End boost kthread@rcu_yield");
5d01bbd1 1243 schedule_timeout_interruptible(2);
385680a9 1244 trace_rcu_utilization("Start boost kthread@rcu_yield");
27f4d280
PM
1245 spincnt = 0;
1246 }
1247 }
1217ed1b 1248 /* NOTREACHED */
385680a9 1249 trace_rcu_utilization("End boost kthread@notreached");
27f4d280
PM
1250 return 0;
1251}
1252
1253/*
1254 * Check to see if it is time to start boosting RCU readers that are
1255 * blocking the current grace period, and, if so, tell the per-rcu_node
1256 * kthread to start boosting them. If there is an expedited grace
1257 * period in progress, it is always time to boost.
1258 *
b065a853
PM
1259 * The caller must hold rnp->lock, which this function releases.
1260 * The ->boost_kthread_task is immortal, so we don't need to worry
1261 * about it going away.
27f4d280 1262 */
1217ed1b 1263static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
27f4d280
PM
1264{
1265 struct task_struct *t;
1266
0ea1f2eb
PM
1267 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1268 rnp->n_balk_exp_gp_tasks++;
1217ed1b 1269 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27f4d280 1270 return;
0ea1f2eb 1271 }
27f4d280
PM
1272 if (rnp->exp_tasks != NULL ||
1273 (rnp->gp_tasks != NULL &&
1274 rnp->boost_tasks == NULL &&
1275 rnp->qsmask == 0 &&
1276 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1277 if (rnp->exp_tasks == NULL)
1278 rnp->boost_tasks = rnp->gp_tasks;
1217ed1b 1279 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27f4d280 1280 t = rnp->boost_kthread_task;
5d01bbd1
TG
1281 if (t)
1282 rcu_wake_cond(t, rnp->boost_kthread_status);
1217ed1b 1283 } else {
0ea1f2eb 1284 rcu_initiate_boost_trace(rnp);
1217ed1b
PM
1285 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1286 }
27f4d280
PM
1287}
1288
a46e0899
PM
1289/*
1290 * Wake up the per-CPU kthread to invoke RCU callbacks.
1291 */
1292static void invoke_rcu_callbacks_kthread(void)
1293{
1294 unsigned long flags;
1295
1296 local_irq_save(flags);
1297 __this_cpu_write(rcu_cpu_has_work, 1);
1eb52121 1298 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
5d01bbd1
TG
1299 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1300 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1301 __this_cpu_read(rcu_cpu_kthread_status));
1302 }
a46e0899
PM
1303 local_irq_restore(flags);
1304}
1305
dff1672d
PM
1306/*
1307 * Is the current CPU running the RCU-callbacks kthread?
1308 * Caller must have preemption disabled.
1309 */
1310static bool rcu_is_callbacks_kthread(void)
1311{
1312 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1313}
1314
27f4d280
PM
1315#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1316
1317/*
1318 * Do priority-boost accounting for the start of a new grace period.
1319 */
1320static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1321{
1322 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1323}
1324
27f4d280
PM
1325/*
1326 * Create an RCU-boost kthread for the specified node if one does not
1327 * already exist. We only create this kthread for preemptible RCU.
1328 * Returns zero if all is well, a negated errno otherwise.
1329 */
1330static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
5d01bbd1 1331 struct rcu_node *rnp)
27f4d280 1332{
5d01bbd1 1333 int rnp_index = rnp - &rsp->node[0];
27f4d280
PM
1334 unsigned long flags;
1335 struct sched_param sp;
1336 struct task_struct *t;
1337
1338 if (&rcu_preempt_state != rsp)
1339 return 0;
5d01bbd1
TG
1340
1341 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1342 return 0;
1343
a46e0899 1344 rsp->boost = 1;
27f4d280
PM
1345 if (rnp->boost_kthread_task != NULL)
1346 return 0;
1347 t = kthread_create(rcu_boost_kthread, (void *)rnp,
5b61b0ba 1348 "rcub/%d", rnp_index);
27f4d280
PM
1349 if (IS_ERR(t))
1350 return PTR_ERR(t);
1351 raw_spin_lock_irqsave(&rnp->lock, flags);
1352 rnp->boost_kthread_task = t;
1353 raw_spin_unlock_irqrestore(&rnp->lock, flags);
5b61b0ba 1354 sp.sched_priority = RCU_BOOST_PRIO;
27f4d280 1355 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
9a432736 1356 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
27f4d280
PM
1357 return 0;
1358}
1359
f8b7fc6b
PM
1360static void rcu_kthread_do_work(void)
1361{
1362 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1363 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1364 rcu_preempt_do_callbacks();
1365}
1366
62ab7072 1367static void rcu_cpu_kthread_setup(unsigned int cpu)
f8b7fc6b 1368{
f8b7fc6b 1369 struct sched_param sp;
f8b7fc6b 1370
62ab7072
PM
1371 sp.sched_priority = RCU_KTHREAD_PRIO;
1372 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
f8b7fc6b
PM
1373}
1374
62ab7072 1375static void rcu_cpu_kthread_park(unsigned int cpu)
f8b7fc6b 1376{
62ab7072 1377 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
f8b7fc6b
PM
1378}
1379
62ab7072 1380static int rcu_cpu_kthread_should_run(unsigned int cpu)
f8b7fc6b 1381{
62ab7072 1382 return __get_cpu_var(rcu_cpu_has_work);
f8b7fc6b
PM
1383}
1384
1385/*
1386 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
e0f23060
PM
1387 * RCU softirq used in flavors and configurations of RCU that do not
1388 * support RCU priority boosting.
f8b7fc6b 1389 */
62ab7072 1390static void rcu_cpu_kthread(unsigned int cpu)
f8b7fc6b 1391{
62ab7072
PM
1392 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1393 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1394 int spincnt;
f8b7fc6b 1395
62ab7072 1396 for (spincnt = 0; spincnt < 10; spincnt++) {
385680a9 1397 trace_rcu_utilization("Start CPU kthread@rcu_wait");
f8b7fc6b 1398 local_bh_disable();
f8b7fc6b 1399 *statusp = RCU_KTHREAD_RUNNING;
62ab7072
PM
1400 this_cpu_inc(rcu_cpu_kthread_loops);
1401 local_irq_disable();
f8b7fc6b
PM
1402 work = *workp;
1403 *workp = 0;
62ab7072 1404 local_irq_enable();
f8b7fc6b
PM
1405 if (work)
1406 rcu_kthread_do_work();
1407 local_bh_enable();
62ab7072
PM
1408 if (*workp == 0) {
1409 trace_rcu_utilization("End CPU kthread@rcu_wait");
1410 *statusp = RCU_KTHREAD_WAITING;
1411 return;
f8b7fc6b
PM
1412 }
1413 }
62ab7072
PM
1414 *statusp = RCU_KTHREAD_YIELDING;
1415 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1416 schedule_timeout_interruptible(2);
1417 trace_rcu_utilization("End CPU kthread@rcu_yield");
1418 *statusp = RCU_KTHREAD_WAITING;
f8b7fc6b
PM
1419}
1420
1421/*
1422 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1423 * served by the rcu_node in question. The CPU hotplug lock is still
1424 * held, so the value of rnp->qsmaskinit will be stable.
1425 *
1426 * We don't include outgoingcpu in the affinity set, use -1 if there is
1427 * no outgoing CPU. If there are no CPUs left in the affinity set,
1428 * this function allows the kthread to execute on any CPU.
1429 */
5d01bbd1 1430static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
f8b7fc6b 1431{
5d01bbd1
TG
1432 struct task_struct *t = rnp->boost_kthread_task;
1433 unsigned long mask = rnp->qsmaskinit;
f8b7fc6b
PM
1434 cpumask_var_t cm;
1435 int cpu;
f8b7fc6b 1436
5d01bbd1 1437 if (!t)
f8b7fc6b 1438 return;
5d01bbd1 1439 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
f8b7fc6b 1440 return;
f8b7fc6b
PM
1441 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1442 if ((mask & 0x1) && cpu != outgoingcpu)
1443 cpumask_set_cpu(cpu, cm);
1444 if (cpumask_weight(cm) == 0) {
1445 cpumask_setall(cm);
1446 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1447 cpumask_clear_cpu(cpu, cm);
1448 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1449 }
5d01bbd1 1450 set_cpus_allowed_ptr(t, cm);
f8b7fc6b
PM
1451 free_cpumask_var(cm);
1452}
1453
62ab7072
PM
1454static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1455 .store = &rcu_cpu_kthread_task,
1456 .thread_should_run = rcu_cpu_kthread_should_run,
1457 .thread_fn = rcu_cpu_kthread,
1458 .thread_comm = "rcuc/%u",
1459 .setup = rcu_cpu_kthread_setup,
1460 .park = rcu_cpu_kthread_park,
1461};
f8b7fc6b
PM
1462
1463/*
1464 * Spawn all kthreads -- called as soon as the scheduler is running.
1465 */
1466static int __init rcu_spawn_kthreads(void)
1467{
f8b7fc6b 1468 struct rcu_node *rnp;
5d01bbd1 1469 int cpu;
f8b7fc6b 1470
b0d30417 1471 rcu_scheduler_fully_active = 1;
62ab7072 1472 for_each_possible_cpu(cpu)
f8b7fc6b 1473 per_cpu(rcu_cpu_has_work, cpu) = 0;
62ab7072 1474 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
f8b7fc6b 1475 rnp = rcu_get_root(rcu_state);
5d01bbd1 1476 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
f8b7fc6b
PM
1477 if (NUM_RCU_NODES > 1) {
1478 rcu_for_each_leaf_node(rcu_state, rnp)
5d01bbd1 1479 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
f8b7fc6b
PM
1480 }
1481 return 0;
1482}
1483early_initcall(rcu_spawn_kthreads);
1484
1485static void __cpuinit rcu_prepare_kthreads(int cpu)
1486{
1487 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1488 struct rcu_node *rnp = rdp->mynode;
1489
1490 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
62ab7072 1491 if (rcu_scheduler_fully_active)
5d01bbd1 1492 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
f8b7fc6b
PM
1493}
1494
27f4d280
PM
1495#else /* #ifdef CONFIG_RCU_BOOST */
1496
1217ed1b 1497static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
27f4d280 1498{
1217ed1b 1499 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27f4d280
PM
1500}
1501
a46e0899 1502static void invoke_rcu_callbacks_kthread(void)
27f4d280 1503{
a46e0899 1504 WARN_ON_ONCE(1);
27f4d280
PM
1505}
1506
dff1672d
PM
1507static bool rcu_is_callbacks_kthread(void)
1508{
1509 return false;
1510}
1511
27f4d280
PM
1512static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1513{
1514}
1515
5d01bbd1 1516static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
f8b7fc6b
PM
1517{
1518}
1519
b0d30417
PM
1520static int __init rcu_scheduler_really_started(void)
1521{
1522 rcu_scheduler_fully_active = 1;
1523 return 0;
1524}
1525early_initcall(rcu_scheduler_really_started);
1526
f8b7fc6b
PM
1527static void __cpuinit rcu_prepare_kthreads(int cpu)
1528{
1529}
1530
27f4d280
PM
1531#endif /* #else #ifdef CONFIG_RCU_BOOST */
1532
8bd93a2c
PM
1533#if !defined(CONFIG_RCU_FAST_NO_HZ)
1534
1535/*
1536 * Check to see if any future RCU-related work will need to be done
1537 * by the current CPU, even if none need be done immediately, returning
1538 * 1 if so. This function is part of the RCU implementation; it is -not-
1539 * an exported member of the RCU API.
1540 *
7cb92499
PM
1541 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1542 * any flavor of RCU.
8bd93a2c 1543 */
aa9b1630 1544int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
8bd93a2c 1545{
aa9b1630 1546 *delta_jiffies = ULONG_MAX;
c0f4dfd4 1547 return rcu_cpu_has_callbacks(cpu, NULL);
7cb92499
PM
1548}
1549
1550/*
1551 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1552 * after it.
1553 */
1554static void rcu_cleanup_after_idle(int cpu)
1555{
1556}
1557
aea1b35e 1558/*
a858af28 1559 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
aea1b35e
PM
1560 * is nothing.
1561 */
1562static void rcu_prepare_for_idle(int cpu)
1563{
1564}
1565
c57afe80
PM
1566/*
1567 * Don't bother keeping a running count of the number of RCU callbacks
1568 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1569 */
1570static void rcu_idle_count_callbacks_posted(void)
1571{
1572}
1573
8bd93a2c
PM
1574#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1575
f23f7fa1
PM
1576/*
1577 * This code is invoked when a CPU goes idle, at which point we want
1578 * to have the CPU do everything required for RCU so that it can enter
1579 * the energy-efficient dyntick-idle mode. This is handled by a
1580 * state machine implemented by rcu_prepare_for_idle() below.
1581 *
1582 * The following three proprocessor symbols control this state machine:
1583 *
f23f7fa1
PM
1584 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1585 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1586 * is sized to be roughly one RCU grace period. Those energy-efficiency
1587 * benchmarkers who might otherwise be tempted to set this to a large
1588 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1589 * system. And if you are -that- concerned about energy efficiency,
1590 * just power the system down and be done with it!
778d250a
PM
1591 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1592 * permitted to sleep in dyntick-idle mode with only lazy RCU
1593 * callbacks pending. Setting this too high can OOM your system.
f23f7fa1
PM
1594 *
1595 * The values below work well in practice. If future workloads require
1596 * adjustment, they can be converted into kernel config parameters, though
1597 * making the state machine smarter might be a better option.
1598 */
e84c48ae 1599#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
778d250a 1600#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
f23f7fa1 1601
5e44ce35
PM
1602static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1603module_param(rcu_idle_gp_delay, int, 0644);
1604static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1605module_param(rcu_idle_lazy_gp_delay, int, 0644);
486e2593 1606
9d2ad243 1607extern int tick_nohz_enabled;
486e2593
PM
1608
1609/*
c0f4dfd4
PM
1610 * Try to advance callbacks for all flavors of RCU on the current CPU.
1611 * Afterwards, if there are any callbacks ready for immediate invocation,
1612 * return true.
486e2593 1613 */
c0f4dfd4 1614static bool rcu_try_advance_all_cbs(void)
486e2593 1615{
c0f4dfd4
PM
1616 bool cbs_ready = false;
1617 struct rcu_data *rdp;
1618 struct rcu_node *rnp;
1619 struct rcu_state *rsp;
486e2593 1620
c0f4dfd4
PM
1621 for_each_rcu_flavor(rsp) {
1622 rdp = this_cpu_ptr(rsp->rda);
1623 rnp = rdp->mynode;
486e2593 1624
c0f4dfd4
PM
1625 /*
1626 * Don't bother checking unless a grace period has
1627 * completed since we last checked and there are
1628 * callbacks not yet ready to invoke.
1629 */
1630 if (rdp->completed != rnp->completed &&
1631 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1632 rcu_process_gp_end(rsp, rdp);
486e2593 1633
c0f4dfd4
PM
1634 if (cpu_has_callbacks_ready_to_invoke(rdp))
1635 cbs_ready = true;
1636 }
1637 return cbs_ready;
486e2593
PM
1638}
1639
aa9b1630 1640/*
c0f4dfd4
PM
1641 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1642 * to invoke. If the CPU has callbacks, try to advance them. Tell the
1643 * caller to set the timeout based on whether or not there are non-lazy
1644 * callbacks.
aa9b1630 1645 *
c0f4dfd4 1646 * The caller must have disabled interrupts.
aa9b1630 1647 */
c0f4dfd4 1648int rcu_needs_cpu(int cpu, unsigned long *dj)
aa9b1630
PM
1649{
1650 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1651
c0f4dfd4
PM
1652 /* Snapshot to detect later posting of non-lazy callback. */
1653 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1654
aa9b1630 1655 /* If no callbacks, RCU doesn't need the CPU. */
c0f4dfd4
PM
1656 if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) {
1657 *dj = ULONG_MAX;
aa9b1630
PM
1658 return 0;
1659 }
c0f4dfd4
PM
1660
1661 /* Attempt to advance callbacks. */
1662 if (rcu_try_advance_all_cbs()) {
1663 /* Some ready to invoke, so initiate later invocation. */
1664 invoke_rcu_core();
aa9b1630
PM
1665 return 1;
1666 }
c0f4dfd4
PM
1667 rdtp->last_accelerate = jiffies;
1668
1669 /* Request timer delay depending on laziness, and round. */
6faf7283 1670 if (!rdtp->all_lazy) {
c0f4dfd4
PM
1671 *dj = round_up(rcu_idle_gp_delay + jiffies,
1672 rcu_idle_gp_delay) - jiffies;
e84c48ae 1673 } else {
c0f4dfd4 1674 *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
e84c48ae 1675 }
aa9b1630
PM
1676 return 0;
1677}
1678
21e52e15 1679/*
c0f4dfd4
PM
1680 * Prepare a CPU for idle from an RCU perspective. The first major task
1681 * is to sense whether nohz mode has been enabled or disabled via sysfs.
1682 * The second major task is to check to see if a non-lazy callback has
1683 * arrived at a CPU that previously had only lazy callbacks. The third
1684 * major task is to accelerate (that is, assign grace-period numbers to)
1685 * any recently arrived callbacks.
aea1b35e
PM
1686 *
1687 * The caller must have disabled interrupts.
8bd93a2c 1688 */
aea1b35e 1689static void rcu_prepare_for_idle(int cpu)
8bd93a2c 1690{
c0f4dfd4 1691 struct rcu_data *rdp;
5955f7ee 1692 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
c0f4dfd4
PM
1693 struct rcu_node *rnp;
1694 struct rcu_state *rsp;
9d2ad243
PM
1695 int tne;
1696
1697 /* Handle nohz enablement switches conservatively. */
1698 tne = ACCESS_ONCE(tick_nohz_enabled);
1699 if (tne != rdtp->tick_nohz_enabled_snap) {
c0f4dfd4 1700 if (rcu_cpu_has_callbacks(cpu, NULL))
9d2ad243
PM
1701 invoke_rcu_core(); /* force nohz to see update. */
1702 rdtp->tick_nohz_enabled_snap = tne;
1703 return;
1704 }
1705 if (!tne)
1706 return;
f511fc62 1707
c0f4dfd4 1708 /* If this is a no-CBs CPU, no callbacks, just return. */
534c97b0 1709 if (rcu_is_nocb_cpu(cpu))
9a0c6fef 1710 return;
9a0c6fef 1711
c57afe80 1712 /*
c0f4dfd4
PM
1713 * If a non-lazy callback arrived at a CPU having only lazy
1714 * callbacks, invoke RCU core for the side-effect of recalculating
1715 * idle duration on re-entry to idle.
c57afe80 1716 */
c0f4dfd4
PM
1717 if (rdtp->all_lazy &&
1718 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1719 invoke_rcu_core();
c57afe80
PM
1720 return;
1721 }
c57afe80 1722
3084f2f8 1723 /*
c0f4dfd4
PM
1724 * If we have not yet accelerated this jiffy, accelerate all
1725 * callbacks on this CPU.
3084f2f8 1726 */
c0f4dfd4 1727 if (rdtp->last_accelerate == jiffies)
aea1b35e 1728 return;
c0f4dfd4
PM
1729 rdtp->last_accelerate = jiffies;
1730 for_each_rcu_flavor(rsp) {
1731 rdp = per_cpu_ptr(rsp->rda, cpu);
1732 if (!*rdp->nxttail[RCU_DONE_TAIL])
1733 continue;
1734 rnp = rdp->mynode;
1735 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1736 rcu_accelerate_cbs(rsp, rnp, rdp);
1737 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
77e38ed3 1738 }
c0f4dfd4 1739}
3084f2f8 1740
c0f4dfd4
PM
1741/*
1742 * Clean up for exit from idle. Attempt to advance callbacks based on
1743 * any grace periods that elapsed while the CPU was idle, and if any
1744 * callbacks are now ready to invoke, initiate invocation.
1745 */
1746static void rcu_cleanup_after_idle(int cpu)
1747{
1748 struct rcu_data *rdp;
1749 struct rcu_state *rsp;
a47cd880 1750
534c97b0 1751 if (rcu_is_nocb_cpu(cpu))
aea1b35e 1752 return;
c0f4dfd4
PM
1753 rcu_try_advance_all_cbs();
1754 for_each_rcu_flavor(rsp) {
1755 rdp = per_cpu_ptr(rsp->rda, cpu);
1756 if (cpu_has_callbacks_ready_to_invoke(rdp))
1757 invoke_rcu_core();
c701d5d9 1758 }
8bd93a2c
PM
1759}
1760
c57afe80 1761/*
98248a0e
PM
1762 * Keep a running count of the number of non-lazy callbacks posted
1763 * on this CPU. This running counter (which is never decremented) allows
1764 * rcu_prepare_for_idle() to detect when something out of the idle loop
1765 * posts a callback, even if an equal number of callbacks are invoked.
1766 * Of course, callbacks should only be posted from within a trace event
1767 * designed to be called from idle or from within RCU_NONIDLE().
c57afe80
PM
1768 */
1769static void rcu_idle_count_callbacks_posted(void)
1770{
5955f7ee 1771 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
c57afe80
PM
1772}
1773
b626c1b6
PM
1774/*
1775 * Data for flushing lazy RCU callbacks at OOM time.
1776 */
1777static atomic_t oom_callback_count;
1778static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1779
1780/*
1781 * RCU OOM callback -- decrement the outstanding count and deliver the
1782 * wake-up if we are the last one.
1783 */
1784static void rcu_oom_callback(struct rcu_head *rhp)
1785{
1786 if (atomic_dec_and_test(&oom_callback_count))
1787 wake_up(&oom_callback_wq);
1788}
1789
1790/*
1791 * Post an rcu_oom_notify callback on the current CPU if it has at
1792 * least one lazy callback. This will unnecessarily post callbacks
1793 * to CPUs that already have a non-lazy callback at the end of their
1794 * callback list, but this is an infrequent operation, so accept some
1795 * extra overhead to keep things simple.
1796 */
1797static void rcu_oom_notify_cpu(void *unused)
1798{
1799 struct rcu_state *rsp;
1800 struct rcu_data *rdp;
1801
1802 for_each_rcu_flavor(rsp) {
1803 rdp = __this_cpu_ptr(rsp->rda);
1804 if (rdp->qlen_lazy != 0) {
1805 atomic_inc(&oom_callback_count);
1806 rsp->call(&rdp->oom_head, rcu_oom_callback);
1807 }
1808 }
1809}
1810
1811/*
1812 * If low on memory, ensure that each CPU has a non-lazy callback.
1813 * This will wake up CPUs that have only lazy callbacks, in turn
1814 * ensuring that they free up the corresponding memory in a timely manner.
1815 * Because an uncertain amount of memory will be freed in some uncertain
1816 * timeframe, we do not claim to have freed anything.
1817 */
1818static int rcu_oom_notify(struct notifier_block *self,
1819 unsigned long notused, void *nfreed)
1820{
1821 int cpu;
1822
1823 /* Wait for callbacks from earlier instance to complete. */
1824 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1825
1826 /*
1827 * Prevent premature wakeup: ensure that all increments happen
1828 * before there is a chance of the counter reaching zero.
1829 */
1830 atomic_set(&oom_callback_count, 1);
1831
1832 get_online_cpus();
1833 for_each_online_cpu(cpu) {
1834 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1835 cond_resched();
1836 }
1837 put_online_cpus();
1838
1839 /* Unconditionally decrement: no need to wake ourselves up. */
1840 atomic_dec(&oom_callback_count);
1841
1842 return NOTIFY_OK;
1843}
1844
1845static struct notifier_block rcu_oom_nb = {
1846 .notifier_call = rcu_oom_notify
1847};
1848
1849static int __init rcu_register_oom_notifier(void)
1850{
1851 register_oom_notifier(&rcu_oom_nb);
1852 return 0;
1853}
1854early_initcall(rcu_register_oom_notifier);
1855
8bd93a2c 1856#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
a858af28
PM
1857
1858#ifdef CONFIG_RCU_CPU_STALL_INFO
1859
1860#ifdef CONFIG_RCU_FAST_NO_HZ
1861
1862static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1863{
5955f7ee 1864 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
c0f4dfd4 1865 unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
a858af28 1866
c0f4dfd4
PM
1867 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1868 rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1869 ulong2long(nlpd),
1870 rdtp->all_lazy ? 'L' : '.',
1871 rdtp->tick_nohz_enabled_snap ? '.' : 'D');
a858af28
PM
1872}
1873
1874#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1875
1876static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1877{
1c17e4d4 1878 *cp = '\0';
a858af28
PM
1879}
1880
1881#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1882
1883/* Initiate the stall-info list. */
1884static void print_cpu_stall_info_begin(void)
1885{
1886 printk(KERN_CONT "\n");
1887}
1888
1889/*
1890 * Print out diagnostic information for the specified stalled CPU.
1891 *
1892 * If the specified CPU is aware of the current RCU grace period
1893 * (flavor specified by rsp), then print the number of scheduling
1894 * clock interrupts the CPU has taken during the time that it has
1895 * been aware. Otherwise, print the number of RCU grace periods
1896 * that this CPU is ignorant of, for example, "1" if the CPU was
1897 * aware of the previous grace period.
1898 *
1899 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1900 */
1901static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1902{
1903 char fast_no_hz[72];
1904 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1905 struct rcu_dynticks *rdtp = rdp->dynticks;
1906 char *ticks_title;
1907 unsigned long ticks_value;
1908
1909 if (rsp->gpnum == rdp->gpnum) {
1910 ticks_title = "ticks this GP";
1911 ticks_value = rdp->ticks_this_gp;
1912 } else {
1913 ticks_title = "GPs behind";
1914 ticks_value = rsp->gpnum - rdp->gpnum;
1915 }
1916 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
6231069b 1917 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
a858af28
PM
1918 cpu, ticks_value, ticks_title,
1919 atomic_read(&rdtp->dynticks) & 0xfff,
1920 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
6231069b 1921 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
a858af28
PM
1922 fast_no_hz);
1923}
1924
1925/* Terminate the stall-info list. */
1926static void print_cpu_stall_info_end(void)
1927{
1928 printk(KERN_ERR "\t");
1929}
1930
1931/* Zero ->ticks_this_gp for all flavors of RCU. */
1932static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1933{
1934 rdp->ticks_this_gp = 0;
6231069b 1935 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
a858af28
PM
1936}
1937
1938/* Increment ->ticks_this_gp for all flavors of RCU. */
1939static void increment_cpu_stall_ticks(void)
1940{
115f7a7c
PM
1941 struct rcu_state *rsp;
1942
1943 for_each_rcu_flavor(rsp)
1944 __this_cpu_ptr(rsp->rda)->ticks_this_gp++;
a858af28
PM
1945}
1946
1947#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
1948
1949static void print_cpu_stall_info_begin(void)
1950{
1951 printk(KERN_CONT " {");
1952}
1953
1954static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1955{
1956 printk(KERN_CONT " %d", cpu);
1957}
1958
1959static void print_cpu_stall_info_end(void)
1960{
1961 printk(KERN_CONT "} ");
1962}
1963
1964static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1965{
1966}
1967
1968static void increment_cpu_stall_ticks(void)
1969{
1970}
1971
1972#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
3fbfbf7a
PM
1973
1974#ifdef CONFIG_RCU_NOCB_CPU
1975
1976/*
1977 * Offload callback processing from the boot-time-specified set of CPUs
1978 * specified by rcu_nocb_mask. For each CPU in the set, there is a
1979 * kthread created that pulls the callbacks from the corresponding CPU,
1980 * waits for a grace period to elapse, and invokes the callbacks.
1981 * The no-CBs CPUs do a wake_up() on their kthread when they insert
1982 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
1983 * has been specified, in which case each kthread actively polls its
1984 * CPU. (Which isn't so great for energy efficiency, but which does
1985 * reduce RCU's overhead on that CPU.)
1986 *
1987 * This is intended to be used in conjunction with Frederic Weisbecker's
1988 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1989 * running CPU-bound user-mode computations.
1990 *
1991 * Offloading of callback processing could also in theory be used as
1992 * an energy-efficiency measure because CPUs with no RCU callbacks
1993 * queued are more aggressive about entering dyntick-idle mode.
1994 */
1995
1996
1997/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
1998static int __init rcu_nocb_setup(char *str)
1999{
2000 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
2001 have_rcu_nocb_mask = true;
2002 cpulist_parse(str, rcu_nocb_mask);
2003 return 1;
2004}
2005__setup("rcu_nocbs=", rcu_nocb_setup);
2006
1b0048a4
PG
2007static int __init parse_rcu_nocb_poll(char *arg)
2008{
2009 rcu_nocb_poll = 1;
2010 return 0;
2011}
2012early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2013
34ed6246 2014/*
dae6e64d
PM
2015 * Do any no-CBs CPUs need another grace period?
2016 *
2017 * Interrupts must be disabled. If the caller does not hold the root
2018 * rnp_node structure's ->lock, the results are advisory only.
2019 */
2020static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2021{
2022 struct rcu_node *rnp = rcu_get_root(rsp);
2023
8b425aa8 2024 return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
dae6e64d
PM
2025}
2026
2027/*
0446be48
PM
2028 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
2029 * grace period.
dae6e64d 2030 */
0446be48 2031static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
dae6e64d 2032{
0446be48 2033 wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
dae6e64d
PM
2034}
2035
2036/*
8b425aa8 2037 * Set the root rcu_node structure's ->need_future_gp field
dae6e64d
PM
2038 * based on the sum of those of all rcu_node structures. This does
2039 * double-count the root rcu_node structure's requests, but this
2040 * is necessary to handle the possibility of a rcu_nocb_kthread()
2041 * having awakened during the time that the rcu_node structures
2042 * were being updated for the end of the previous grace period.
34ed6246 2043 */
dae6e64d
PM
2044static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2045{
8b425aa8 2046 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
dae6e64d
PM
2047}
2048
2049static void rcu_init_one_nocb(struct rcu_node *rnp)
34ed6246 2050{
dae6e64d
PM
2051 init_waitqueue_head(&rnp->nocb_gp_wq[0]);
2052 init_waitqueue_head(&rnp->nocb_gp_wq[1]);
34ed6246
PM
2053}
2054
3fbfbf7a 2055/* Is the specified CPU a no-CPUs CPU? */
d1e43fa5 2056bool rcu_is_nocb_cpu(int cpu)
3fbfbf7a
PM
2057{
2058 if (have_rcu_nocb_mask)
2059 return cpumask_test_cpu(cpu, rcu_nocb_mask);
2060 return false;
2061}
2062
2063/*
2064 * Enqueue the specified string of rcu_head structures onto the specified
2065 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
2066 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
2067 * counts are supplied by rhcount and rhcount_lazy.
2068 *
2069 * If warranted, also wake up the kthread servicing this CPUs queues.
2070 */
2071static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2072 struct rcu_head *rhp,
2073 struct rcu_head **rhtp,
2074 int rhcount, int rhcount_lazy)
2075{
2076 int len;
2077 struct rcu_head **old_rhpp;
2078 struct task_struct *t;
2079
2080 /* Enqueue the callback on the nocb list and update counts. */
2081 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2082 ACCESS_ONCE(*old_rhpp) = rhp;
2083 atomic_long_add(rhcount, &rdp->nocb_q_count);
2084 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2085
2086 /* If we are not being polled and there is a kthread, awaken it ... */
2087 t = ACCESS_ONCE(rdp->nocb_kthread);
2088 if (rcu_nocb_poll | !t)
2089 return;
2090 len = atomic_long_read(&rdp->nocb_q_count);
2091 if (old_rhpp == &rdp->nocb_head) {
2092 wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
2093 rdp->qlen_last_fqs_check = 0;
2094 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2095 wake_up_process(t); /* ... or if many callbacks queued. */
2096 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2097 }
2098 return;
2099}
2100
2101/*
2102 * This is a helper for __call_rcu(), which invokes this when the normal
2103 * callback queue is inoperable. If this is not a no-CBs CPU, this
2104 * function returns failure back to __call_rcu(), which can complain
2105 * appropriately.
2106 *
2107 * Otherwise, this function queues the callback where the corresponding
2108 * "rcuo" kthread can find it.
2109 */
2110static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2111 bool lazy)
2112{
2113
d1e43fa5 2114 if (!rcu_is_nocb_cpu(rdp->cpu))
3fbfbf7a
PM
2115 return 0;
2116 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
21e7a608
PM
2117 if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2118 trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2119 (unsigned long)rhp->func,
2120 rdp->qlen_lazy, rdp->qlen);
2121 else
2122 trace_rcu_callback(rdp->rsp->name, rhp,
2123 rdp->qlen_lazy, rdp->qlen);
3fbfbf7a
PM
2124 return 1;
2125}
2126
2127/*
2128 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2129 * not a no-CBs CPU.
2130 */
2131static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2132 struct rcu_data *rdp)
2133{
2134 long ql = rsp->qlen;
2135 long qll = rsp->qlen_lazy;
2136
2137 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
d1e43fa5 2138 if (!rcu_is_nocb_cpu(smp_processor_id()))
3fbfbf7a
PM
2139 return 0;
2140 rsp->qlen = 0;
2141 rsp->qlen_lazy = 0;
2142
2143 /* First, enqueue the donelist, if any. This preserves CB ordering. */
2144 if (rsp->orphan_donelist != NULL) {
2145 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2146 rsp->orphan_donetail, ql, qll);
2147 ql = qll = 0;
2148 rsp->orphan_donelist = NULL;
2149 rsp->orphan_donetail = &rsp->orphan_donelist;
2150 }
2151 if (rsp->orphan_nxtlist != NULL) {
2152 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2153 rsp->orphan_nxttail, ql, qll);
2154 ql = qll = 0;
2155 rsp->orphan_nxtlist = NULL;
2156 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2157 }
2158 return 1;
2159}
2160
2161/*
34ed6246
PM
2162 * If necessary, kick off a new grace period, and either way wait
2163 * for a subsequent grace period to complete.
3fbfbf7a 2164 */
34ed6246 2165static void rcu_nocb_wait_gp(struct rcu_data *rdp)
3fbfbf7a 2166{
34ed6246 2167 unsigned long c;
dae6e64d 2168 bool d;
34ed6246 2169 unsigned long flags;
34ed6246
PM
2170 struct rcu_node *rnp = rdp->mynode;
2171
2172 raw_spin_lock_irqsave(&rnp->lock, flags);
0446be48
PM
2173 c = rcu_start_future_gp(rnp, rdp);
2174 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3fbfbf7a
PM
2175
2176 /*
34ed6246
PM
2177 * Wait for the grace period. Do so interruptibly to avoid messing
2178 * up the load average.
3fbfbf7a 2179 */
0446be48 2180 trace_rcu_future_gp(rnp, rdp, c, "StartWait");
34ed6246 2181 for (;;) {
dae6e64d
PM
2182 wait_event_interruptible(
2183 rnp->nocb_gp_wq[c & 0x1],
2184 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
2185 if (likely(d))
34ed6246 2186 break;
dae6e64d 2187 flush_signals(current);
0446be48 2188 trace_rcu_future_gp(rnp, rdp, c, "ResumeWait");
34ed6246 2189 }
0446be48 2190 trace_rcu_future_gp(rnp, rdp, c, "EndWait");
34ed6246 2191 smp_mb(); /* Ensure that CB invocation happens after GP end. */
3fbfbf7a
PM
2192}
2193
2194/*
2195 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes
2196 * callbacks queued by the corresponding no-CBs CPU.
2197 */
2198static int rcu_nocb_kthread(void *arg)
2199{
2200 int c, cl;
2201 struct rcu_head *list;
2202 struct rcu_head *next;
2203 struct rcu_head **tail;
2204 struct rcu_data *rdp = arg;
2205
2206 /* Each pass through this loop invokes one batch of callbacks */
2207 for (;;) {
2208 /* If not polling, wait for next batch of callbacks. */
2209 if (!rcu_nocb_poll)
353af9c9 2210 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
3fbfbf7a
PM
2211 list = ACCESS_ONCE(rdp->nocb_head);
2212 if (!list) {
2213 schedule_timeout_interruptible(1);
353af9c9 2214 flush_signals(current);
3fbfbf7a
PM
2215 continue;
2216 }
2217
2218 /*
2219 * Extract queued callbacks, update counts, and wait
2220 * for a grace period to elapse.
2221 */
2222 ACCESS_ONCE(rdp->nocb_head) = NULL;
2223 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2224 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2225 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2226 ACCESS_ONCE(rdp->nocb_p_count) += c;
2227 ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
34ed6246 2228 rcu_nocb_wait_gp(rdp);
3fbfbf7a
PM
2229
2230 /* Each pass through the following loop invokes a callback. */
2231 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
2232 c = cl = 0;
2233 while (list) {
2234 next = list->next;
2235 /* Wait for enqueuing to complete, if needed. */
2236 while (next == NULL && &list->next != tail) {
2237 schedule_timeout_interruptible(1);
2238 next = list->next;
2239 }
2240 debug_rcu_head_unqueue(list);
2241 local_bh_disable();
2242 if (__rcu_reclaim(rdp->rsp->name, list))
2243 cl++;
2244 c++;
2245 local_bh_enable();
2246 list = next;
2247 }
2248 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2249 ACCESS_ONCE(rdp->nocb_p_count) -= c;
2250 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
c635a4e1 2251 rdp->n_nocbs_invoked += c;
3fbfbf7a
PM
2252 }
2253 return 0;
2254}
2255
2256/* Initialize per-rcu_data variables for no-CBs CPUs. */
2257static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2258{
2259 rdp->nocb_tail = &rdp->nocb_head;
2260 init_waitqueue_head(&rdp->nocb_wq);
2261}
2262
2263/* Create a kthread for each RCU flavor for each no-CBs CPU. */
2264static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2265{
2266 int cpu;
2267 struct rcu_data *rdp;
2268 struct task_struct *t;
2269
2270 if (rcu_nocb_mask == NULL)
2271 return;
2272 for_each_cpu(cpu, rcu_nocb_mask) {
2273 rdp = per_cpu_ptr(rsp->rda, cpu);
a4889858
PM
2274 t = kthread_run(rcu_nocb_kthread, rdp,
2275 "rcuo%c/%d", rsp->abbr, cpu);
3fbfbf7a
PM
2276 BUG_ON(IS_ERR(t));
2277 ACCESS_ONCE(rdp->nocb_kthread) = t;
2278 }
2279}
2280
2281/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
34ed6246 2282static bool init_nocb_callback_list(struct rcu_data *rdp)
3fbfbf7a
PM
2283{
2284 if (rcu_nocb_mask == NULL ||
2285 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
34ed6246 2286 return false;
3fbfbf7a 2287 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
34ed6246 2288 return true;
3fbfbf7a
PM
2289}
2290
34ed6246
PM
2291#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2292
dae6e64d
PM
2293static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2294{
2295 return 0;
3fbfbf7a
PM
2296}
2297
0446be48 2298static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
3fbfbf7a 2299{
3fbfbf7a
PM
2300}
2301
dae6e64d
PM
2302static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2303{
2304}
2305
2306static void rcu_init_one_nocb(struct rcu_node *rnp)
2307{
2308}
3fbfbf7a 2309
3fbfbf7a
PM
2310static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2311 bool lazy)
2312{
2313 return 0;
2314}
2315
2316static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2317 struct rcu_data *rdp)
2318{
2319 return 0;
2320}
2321
3fbfbf7a
PM
2322static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2323{
2324}
2325
2326static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2327{
2328}
2329
34ed6246 2330static bool init_nocb_callback_list(struct rcu_data *rdp)
3fbfbf7a 2331{
34ed6246 2332 return false;
3fbfbf7a
PM
2333}
2334
2335#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
65d798f0
PM
2336
2337/*
2338 * An adaptive-ticks CPU can potentially execute in kernel mode for an
2339 * arbitrarily long period of time with the scheduling-clock tick turned
2340 * off. RCU will be paying attention to this CPU because it is in the
2341 * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2342 * machine because the scheduling-clock tick has been disabled. Therefore,
2343 * if an adaptive-ticks CPU is failing to respond to the current grace
2344 * period and has not be idle from an RCU perspective, kick it.
2345 */
2346static void rcu_kick_nohz_cpu(int cpu)
2347{
2348#ifdef CONFIG_NO_HZ_FULL
2349 if (tick_nohz_full_cpu(cpu))
2350 smp_send_reschedule(cpu);
2351#endif /* #ifdef CONFIG_NO_HZ_FULL */
2352}