rcu: add grace-period age and more kthread state to tracing
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / rcutree.c
1 /*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23 *
24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 *
27 * For detailed explanation of Read-Copy Update mechanism see -
28 * Documentation/RCU
29 */
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp.h>
35 #include <linux/rcupdate.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/nmi.h>
39 #include <asm/atomic.h>
40 #include <linux/bitops.h>
41 #include <linux/module.h>
42 #include <linux/completion.h>
43 #include <linux/moduleparam.h>
44 #include <linux/percpu.h>
45 #include <linux/notifier.h>
46 #include <linux/cpu.h>
47 #include <linux/mutex.h>
48 #include <linux/time.h>
49 #include <linux/kernel_stat.h>
50 #include <linux/wait.h>
51 #include <linux/kthread.h>
52
53 #include "rcutree.h"
54
55 /* Data structures. */
56
57 static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
58
59 #define RCU_STATE_INITIALIZER(structname) { \
60 .level = { &structname.node[0] }, \
61 .levelcnt = { \
62 NUM_RCU_LVL_0, /* root of hierarchy. */ \
63 NUM_RCU_LVL_1, \
64 NUM_RCU_LVL_2, \
65 NUM_RCU_LVL_3, \
66 NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
67 }, \
68 .signaled = RCU_GP_IDLE, \
69 .gpnum = -300, \
70 .completed = -300, \
71 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \
72 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \
73 .n_force_qs = 0, \
74 .n_force_qs_ngp = 0, \
75 .name = #structname, \
76 }
77
78 struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
79 DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
80
81 struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83
84 static struct rcu_state *rcu_state;
85
86 int rcu_scheduler_active __read_mostly;
87 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
88
89 /*
90 * Control variables for per-CPU and per-rcu_node kthreads. These
91 * handle all flavors of RCU.
92 */
93 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
94 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
95 DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
96 static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
97 DEFINE_PER_CPU(char, rcu_cpu_has_work);
98 static char rcu_kthreads_spawnable;
99
100 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
101 static void invoke_rcu_cpu_kthread(void);
102
103 #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
104
105 /*
106 * Track the rcutorture test sequence number and the update version
107 * number within a given test. The rcutorture_testseq is incremented
108 * on every rcutorture module load and unload, so has an odd value
109 * when a test is running. The rcutorture_vernum is set to zero
110 * when rcutorture starts and is incremented on each rcutorture update.
111 * These variables enable correlating rcutorture output with the
112 * RCU tracing information.
113 */
114 unsigned long rcutorture_testseq;
115 unsigned long rcutorture_vernum;
116
117 /*
118 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
119 * permit this function to be invoked without holding the root rcu_node
120 * structure's ->lock, but of course results can be subject to change.
121 */
122 static int rcu_gp_in_progress(struct rcu_state *rsp)
123 {
124 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
125 }
126
127 /*
128 * Note a quiescent state. Because we do not need to know
129 * how many quiescent states passed, just if there was at least
130 * one since the start of the grace period, this just sets a flag.
131 */
132 void rcu_sched_qs(int cpu)
133 {
134 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
135
136 rdp->passed_quiesc_completed = rdp->gpnum - 1;
137 barrier();
138 rdp->passed_quiesc = 1;
139 }
140
141 void rcu_bh_qs(int cpu)
142 {
143 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
144
145 rdp->passed_quiesc_completed = rdp->gpnum - 1;
146 barrier();
147 rdp->passed_quiesc = 1;
148 }
149
150 /*
151 * Note a context switch. This is a quiescent state for RCU-sched,
152 * and requires special handling for preemptible RCU.
153 */
154 void rcu_note_context_switch(int cpu)
155 {
156 rcu_sched_qs(cpu);
157 rcu_preempt_note_context_switch(cpu);
158 }
159
160 #ifdef CONFIG_NO_HZ
161 DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
162 .dynticks_nesting = 1,
163 .dynticks = ATOMIC_INIT(1),
164 };
165 #endif /* #ifdef CONFIG_NO_HZ */
166
167 static int blimit = 10; /* Maximum callbacks per softirq. */
168 static int qhimark = 10000; /* If this many pending, ignore blimit. */
169 static int qlowmark = 100; /* Once only this many pending, use blimit. */
170
171 module_param(blimit, int, 0);
172 module_param(qhimark, int, 0);
173 module_param(qlowmark, int, 0);
174
175 int rcu_cpu_stall_suppress __read_mostly;
176 module_param(rcu_cpu_stall_suppress, int, 0644);
177
178 static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
179 static int rcu_pending(int cpu);
180
181 /*
182 * Return the number of RCU-sched batches processed thus far for debug & stats.
183 */
184 long rcu_batches_completed_sched(void)
185 {
186 return rcu_sched_state.completed;
187 }
188 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
189
190 /*
191 * Return the number of RCU BH batches processed thus far for debug & stats.
192 */
193 long rcu_batches_completed_bh(void)
194 {
195 return rcu_bh_state.completed;
196 }
197 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
198
199 /*
200 * Force a quiescent state for RCU BH.
201 */
202 void rcu_bh_force_quiescent_state(void)
203 {
204 force_quiescent_state(&rcu_bh_state, 0);
205 }
206 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
207
208 /*
209 * Record the number of times rcutorture tests have been initiated and
210 * terminated. This information allows the debugfs tracing stats to be
211 * correlated to the rcutorture messages, even when the rcutorture module
212 * is being repeatedly loaded and unloaded. In other words, we cannot
213 * store this state in rcutorture itself.
214 */
215 void rcutorture_record_test_transition(void)
216 {
217 rcutorture_testseq++;
218 rcutorture_vernum = 0;
219 }
220 EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
221
222 /*
223 * Record the number of writer passes through the current rcutorture test.
224 * This is also used to correlate debugfs tracing stats with the rcutorture
225 * messages.
226 */
227 void rcutorture_record_progress(unsigned long vernum)
228 {
229 rcutorture_vernum++;
230 }
231 EXPORT_SYMBOL_GPL(rcutorture_record_progress);
232
233 /*
234 * Force a quiescent state for RCU-sched.
235 */
236 void rcu_sched_force_quiescent_state(void)
237 {
238 force_quiescent_state(&rcu_sched_state, 0);
239 }
240 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
241
242 /*
243 * Does the CPU have callbacks ready to be invoked?
244 */
245 static int
246 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
247 {
248 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
249 }
250
251 /*
252 * Does the current CPU require a yet-as-unscheduled grace period?
253 */
254 static int
255 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
256 {
257 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
258 }
259
260 /*
261 * Return the root node of the specified rcu_state structure.
262 */
263 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
264 {
265 return &rsp->node[0];
266 }
267
268 #ifdef CONFIG_SMP
269
270 /*
271 * If the specified CPU is offline, tell the caller that it is in
272 * a quiescent state. Otherwise, whack it with a reschedule IPI.
273 * Grace periods can end up waiting on an offline CPU when that
274 * CPU is in the process of coming online -- it will be added to the
275 * rcu_node bitmasks before it actually makes it online. The same thing
276 * can happen while a CPU is in the process of coming online. Because this
277 * race is quite rare, we check for it after detecting that the grace
278 * period has been delayed rather than checking each and every CPU
279 * each and every time we start a new grace period.
280 */
281 static int rcu_implicit_offline_qs(struct rcu_data *rdp)
282 {
283 /*
284 * If the CPU is offline, it is in a quiescent state. We can
285 * trust its state not to change because interrupts are disabled.
286 */
287 if (cpu_is_offline(rdp->cpu)) {
288 rdp->offline_fqs++;
289 return 1;
290 }
291
292 /* If preemptable RCU, no point in sending reschedule IPI. */
293 if (rdp->preemptable)
294 return 0;
295
296 /* The CPU is online, so send it a reschedule IPI. */
297 if (rdp->cpu != smp_processor_id())
298 smp_send_reschedule(rdp->cpu);
299 else
300 set_need_resched();
301 rdp->resched_ipi++;
302 return 0;
303 }
304
305 #endif /* #ifdef CONFIG_SMP */
306
307 #ifdef CONFIG_NO_HZ
308
309 /**
310 * rcu_enter_nohz - inform RCU that current CPU is entering nohz
311 *
312 * Enter nohz mode, in other words, -leave- the mode in which RCU
313 * read-side critical sections can occur. (Though RCU read-side
314 * critical sections can occur in irq handlers in nohz mode, a possibility
315 * handled by rcu_irq_enter() and rcu_irq_exit()).
316 */
317 void rcu_enter_nohz(void)
318 {
319 unsigned long flags;
320 struct rcu_dynticks *rdtp;
321
322 local_irq_save(flags);
323 rdtp = &__get_cpu_var(rcu_dynticks);
324 if (--rdtp->dynticks_nesting) {
325 local_irq_restore(flags);
326 return;
327 }
328 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
329 smp_mb__before_atomic_inc(); /* See above. */
330 atomic_inc(&rdtp->dynticks);
331 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
332 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
333 local_irq_restore(flags);
334
335 /* If the interrupt queued a callback, get out of dyntick mode. */
336 if (in_irq() &&
337 (__get_cpu_var(rcu_sched_data).nxtlist ||
338 __get_cpu_var(rcu_bh_data).nxtlist ||
339 rcu_preempt_needs_cpu(smp_processor_id())))
340 set_need_resched();
341 }
342
343 /*
344 * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
345 *
346 * Exit nohz mode, in other words, -enter- the mode in which RCU
347 * read-side critical sections normally occur.
348 */
349 void rcu_exit_nohz(void)
350 {
351 unsigned long flags;
352 struct rcu_dynticks *rdtp;
353
354 local_irq_save(flags);
355 rdtp = &__get_cpu_var(rcu_dynticks);
356 if (rdtp->dynticks_nesting++) {
357 local_irq_restore(flags);
358 return;
359 }
360 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
361 atomic_inc(&rdtp->dynticks);
362 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
363 smp_mb__after_atomic_inc(); /* See above. */
364 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
365 local_irq_restore(flags);
366 }
367
368 /**
369 * rcu_nmi_enter - inform RCU of entry to NMI context
370 *
371 * If the CPU was idle with dynamic ticks active, and there is no
372 * irq handler running, this updates rdtp->dynticks_nmi to let the
373 * RCU grace-period handling know that the CPU is active.
374 */
375 void rcu_nmi_enter(void)
376 {
377 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
378
379 if (rdtp->dynticks_nmi_nesting == 0 &&
380 (atomic_read(&rdtp->dynticks) & 0x1))
381 return;
382 rdtp->dynticks_nmi_nesting++;
383 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
384 atomic_inc(&rdtp->dynticks);
385 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
386 smp_mb__after_atomic_inc(); /* See above. */
387 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
388 }
389
390 /**
391 * rcu_nmi_exit - inform RCU of exit from NMI context
392 *
393 * If the CPU was idle with dynamic ticks active, and there is no
394 * irq handler running, this updates rdtp->dynticks_nmi to let the
395 * RCU grace-period handling know that the CPU is no longer active.
396 */
397 void rcu_nmi_exit(void)
398 {
399 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
400
401 if (rdtp->dynticks_nmi_nesting == 0 ||
402 --rdtp->dynticks_nmi_nesting != 0)
403 return;
404 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
405 smp_mb__before_atomic_inc(); /* See above. */
406 atomic_inc(&rdtp->dynticks);
407 smp_mb__after_atomic_inc(); /* Force delay to next write. */
408 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
409 }
410
411 /**
412 * rcu_irq_enter - inform RCU of entry to hard irq context
413 *
414 * If the CPU was idle with dynamic ticks active, this updates the
415 * rdtp->dynticks to let the RCU handling know that the CPU is active.
416 */
417 void rcu_irq_enter(void)
418 {
419 rcu_exit_nohz();
420 }
421
422 /**
423 * rcu_irq_exit - inform RCU of exit from hard irq context
424 *
425 * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
426 * to put let the RCU handling be aware that the CPU is going back to idle
427 * with no ticks.
428 */
429 void rcu_irq_exit(void)
430 {
431 rcu_enter_nohz();
432 }
433
434 #ifdef CONFIG_SMP
435
436 /*
437 * Snapshot the specified CPU's dynticks counter so that we can later
438 * credit them with an implicit quiescent state. Return 1 if this CPU
439 * is in dynticks idle mode, which is an extended quiescent state.
440 */
441 static int dyntick_save_progress_counter(struct rcu_data *rdp)
442 {
443 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
444 return 0;
445 }
446
447 /*
448 * Return true if the specified CPU has passed through a quiescent
449 * state by virtue of being in or having passed through an dynticks
450 * idle state since the last call to dyntick_save_progress_counter()
451 * for this same CPU.
452 */
453 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
454 {
455 unsigned long curr;
456 unsigned long snap;
457
458 curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
459 snap = (unsigned long)rdp->dynticks_snap;
460
461 /*
462 * If the CPU passed through or entered a dynticks idle phase with
463 * no active irq/NMI handlers, then we can safely pretend that the CPU
464 * already acknowledged the request to pass through a quiescent
465 * state. Either way, that CPU cannot possibly be in an RCU
466 * read-side critical section that started before the beginning
467 * of the current RCU grace period.
468 */
469 if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
470 rdp->dynticks_fqs++;
471 return 1;
472 }
473
474 /* Go check for the CPU being offline. */
475 return rcu_implicit_offline_qs(rdp);
476 }
477
478 #endif /* #ifdef CONFIG_SMP */
479
480 #else /* #ifdef CONFIG_NO_HZ */
481
482 #ifdef CONFIG_SMP
483
484 static int dyntick_save_progress_counter(struct rcu_data *rdp)
485 {
486 return 0;
487 }
488
489 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
490 {
491 return rcu_implicit_offline_qs(rdp);
492 }
493
494 #endif /* #ifdef CONFIG_SMP */
495
496 #endif /* #else #ifdef CONFIG_NO_HZ */
497
498 int rcu_cpu_stall_suppress __read_mostly;
499
500 static void record_gp_stall_check_time(struct rcu_state *rsp)
501 {
502 rsp->gp_start = jiffies;
503 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
504 }
505
506 static void print_other_cpu_stall(struct rcu_state *rsp)
507 {
508 int cpu;
509 long delta;
510 unsigned long flags;
511 struct rcu_node *rnp = rcu_get_root(rsp);
512
513 /* Only let one CPU complain about others per time interval. */
514
515 raw_spin_lock_irqsave(&rnp->lock, flags);
516 delta = jiffies - rsp->jiffies_stall;
517 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
518 raw_spin_unlock_irqrestore(&rnp->lock, flags);
519 return;
520 }
521 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
522
523 /*
524 * Now rat on any tasks that got kicked up to the root rcu_node
525 * due to CPU offlining.
526 */
527 rcu_print_task_stall(rnp);
528 raw_spin_unlock_irqrestore(&rnp->lock, flags);
529
530 /*
531 * OK, time to rat on our buddy...
532 * See Documentation/RCU/stallwarn.txt for info on how to debug
533 * RCU CPU stall warnings.
534 */
535 printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
536 rsp->name);
537 rcu_for_each_leaf_node(rsp, rnp) {
538 raw_spin_lock_irqsave(&rnp->lock, flags);
539 rcu_print_task_stall(rnp);
540 raw_spin_unlock_irqrestore(&rnp->lock, flags);
541 if (rnp->qsmask == 0)
542 continue;
543 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
544 if (rnp->qsmask & (1UL << cpu))
545 printk(" %d", rnp->grplo + cpu);
546 }
547 printk("} (detected by %d, t=%ld jiffies)\n",
548 smp_processor_id(), (long)(jiffies - rsp->gp_start));
549 trigger_all_cpu_backtrace();
550
551 /* If so configured, complain about tasks blocking the grace period. */
552
553 rcu_print_detail_task_stall(rsp);
554
555 force_quiescent_state(rsp, 0); /* Kick them all. */
556 }
557
558 static void print_cpu_stall(struct rcu_state *rsp)
559 {
560 unsigned long flags;
561 struct rcu_node *rnp = rcu_get_root(rsp);
562
563 /*
564 * OK, time to rat on ourselves...
565 * See Documentation/RCU/stallwarn.txt for info on how to debug
566 * RCU CPU stall warnings.
567 */
568 printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
569 rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
570 trigger_all_cpu_backtrace();
571
572 raw_spin_lock_irqsave(&rnp->lock, flags);
573 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
574 rsp->jiffies_stall =
575 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
576 raw_spin_unlock_irqrestore(&rnp->lock, flags);
577
578 set_need_resched(); /* kick ourselves to get things going. */
579 }
580
581 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
582 {
583 long delta;
584 struct rcu_node *rnp;
585
586 if (rcu_cpu_stall_suppress)
587 return;
588 delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
589 rnp = rdp->mynode;
590 if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) {
591
592 /* We haven't checked in, so go dump stack. */
593 print_cpu_stall(rsp);
594
595 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
596
597 /* They had two time units to dump stack, so complain. */
598 print_other_cpu_stall(rsp);
599 }
600 }
601
602 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
603 {
604 rcu_cpu_stall_suppress = 1;
605 return NOTIFY_DONE;
606 }
607
608 /**
609 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
610 *
611 * Set the stall-warning timeout way off into the future, thus preventing
612 * any RCU CPU stall-warning messages from appearing in the current set of
613 * RCU grace periods.
614 *
615 * The caller must disable hard irqs.
616 */
617 void rcu_cpu_stall_reset(void)
618 {
619 rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
620 rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
621 rcu_preempt_stall_reset();
622 }
623
624 static struct notifier_block rcu_panic_block = {
625 .notifier_call = rcu_panic,
626 };
627
628 static void __init check_cpu_stall_init(void)
629 {
630 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
631 }
632
633 /*
634 * Update CPU-local rcu_data state to record the newly noticed grace period.
635 * This is used both when we started the grace period and when we notice
636 * that someone else started the grace period. The caller must hold the
637 * ->lock of the leaf rcu_node structure corresponding to the current CPU,
638 * and must have irqs disabled.
639 */
640 static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
641 {
642 if (rdp->gpnum != rnp->gpnum) {
643 /*
644 * If the current grace period is waiting for this CPU,
645 * set up to detect a quiescent state, otherwise don't
646 * go looking for one.
647 */
648 rdp->gpnum = rnp->gpnum;
649 if (rnp->qsmask & rdp->grpmask) {
650 rdp->qs_pending = 1;
651 rdp->passed_quiesc = 0;
652 } else
653 rdp->qs_pending = 0;
654 }
655 }
656
657 static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
658 {
659 unsigned long flags;
660 struct rcu_node *rnp;
661
662 local_irq_save(flags);
663 rnp = rdp->mynode;
664 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
665 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
666 local_irq_restore(flags);
667 return;
668 }
669 __note_new_gpnum(rsp, rnp, rdp);
670 raw_spin_unlock_irqrestore(&rnp->lock, flags);
671 }
672
673 /*
674 * Did someone else start a new RCU grace period start since we last
675 * checked? Update local state appropriately if so. Must be called
676 * on the CPU corresponding to rdp.
677 */
678 static int
679 check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
680 {
681 unsigned long flags;
682 int ret = 0;
683
684 local_irq_save(flags);
685 if (rdp->gpnum != rsp->gpnum) {
686 note_new_gpnum(rsp, rdp);
687 ret = 1;
688 }
689 local_irq_restore(flags);
690 return ret;
691 }
692
693 /*
694 * Advance this CPU's callbacks, but only if the current grace period
695 * has ended. This may be called only from the CPU to whom the rdp
696 * belongs. In addition, the corresponding leaf rcu_node structure's
697 * ->lock must be held by the caller, with irqs disabled.
698 */
699 static void
700 __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
701 {
702 /* Did another grace period end? */
703 if (rdp->completed != rnp->completed) {
704
705 /* Advance callbacks. No harm if list empty. */
706 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
707 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
708 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
709
710 /* Remember that we saw this grace-period completion. */
711 rdp->completed = rnp->completed;
712
713 /*
714 * If we were in an extended quiescent state, we may have
715 * missed some grace periods that others CPUs handled on
716 * our behalf. Catch up with this state to avoid noting
717 * spurious new grace periods. If another grace period
718 * has started, then rnp->gpnum will have advanced, so
719 * we will detect this later on.
720 */
721 if (ULONG_CMP_LT(rdp->gpnum, rdp->completed))
722 rdp->gpnum = rdp->completed;
723
724 /*
725 * If RCU does not need a quiescent state from this CPU,
726 * then make sure that this CPU doesn't go looking for one.
727 */
728 if ((rnp->qsmask & rdp->grpmask) == 0)
729 rdp->qs_pending = 0;
730 }
731 }
732
733 /*
734 * Advance this CPU's callbacks, but only if the current grace period
735 * has ended. This may be called only from the CPU to whom the rdp
736 * belongs.
737 */
738 static void
739 rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
740 {
741 unsigned long flags;
742 struct rcu_node *rnp;
743
744 local_irq_save(flags);
745 rnp = rdp->mynode;
746 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
747 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
748 local_irq_restore(flags);
749 return;
750 }
751 __rcu_process_gp_end(rsp, rnp, rdp);
752 raw_spin_unlock_irqrestore(&rnp->lock, flags);
753 }
754
755 /*
756 * Do per-CPU grace-period initialization for running CPU. The caller
757 * must hold the lock of the leaf rcu_node structure corresponding to
758 * this CPU.
759 */
760 static void
761 rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
762 {
763 /* Prior grace period ended, so advance callbacks for current CPU. */
764 __rcu_process_gp_end(rsp, rnp, rdp);
765
766 /*
767 * Because this CPU just now started the new grace period, we know
768 * that all of its callbacks will be covered by this upcoming grace
769 * period, even the ones that were registered arbitrarily recently.
770 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
771 *
772 * Other CPUs cannot be sure exactly when the grace period started.
773 * Therefore, their recently registered callbacks must pass through
774 * an additional RCU_NEXT_READY stage, so that they will be handled
775 * by the next RCU grace period.
776 */
777 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
778 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
779
780 /* Set state so that this CPU will detect the next quiescent state. */
781 __note_new_gpnum(rsp, rnp, rdp);
782 }
783
784 /*
785 * Start a new RCU grace period if warranted, re-initializing the hierarchy
786 * in preparation for detecting the next grace period. The caller must hold
787 * the root node's ->lock, which is released before return. Hard irqs must
788 * be disabled.
789 */
790 static void
791 rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
792 __releases(rcu_get_root(rsp)->lock)
793 {
794 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
795 struct rcu_node *rnp = rcu_get_root(rsp);
796
797 if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
798 if (cpu_needs_another_gp(rsp, rdp))
799 rsp->fqs_need_gp = 1;
800 if (rnp->completed == rsp->completed) {
801 raw_spin_unlock_irqrestore(&rnp->lock, flags);
802 return;
803 }
804 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
805
806 /*
807 * Propagate new ->completed value to rcu_node structures
808 * so that other CPUs don't have to wait until the start
809 * of the next grace period to process their callbacks.
810 */
811 rcu_for_each_node_breadth_first(rsp, rnp) {
812 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
813 rnp->completed = rsp->completed;
814 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
815 }
816 local_irq_restore(flags);
817 return;
818 }
819
820 /* Advance to a new grace period and initialize state. */
821 rsp->gpnum++;
822 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
823 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
824 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
825 record_gp_stall_check_time(rsp);
826
827 /* Special-case the common single-level case. */
828 if (NUM_RCU_NODES == 1) {
829 rcu_preempt_check_blocked_tasks(rnp);
830 rnp->qsmask = rnp->qsmaskinit;
831 rnp->gpnum = rsp->gpnum;
832 rnp->completed = rsp->completed;
833 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
834 rcu_start_gp_per_cpu(rsp, rnp, rdp);
835 rcu_preempt_boost_start_gp(rnp);
836 raw_spin_unlock_irqrestore(&rnp->lock, flags);
837 return;
838 }
839
840 raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */
841
842
843 /* Exclude any concurrent CPU-hotplug operations. */
844 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
845
846 /*
847 * Set the quiescent-state-needed bits in all the rcu_node
848 * structures for all currently online CPUs in breadth-first
849 * order, starting from the root rcu_node structure. This
850 * operation relies on the layout of the hierarchy within the
851 * rsp->node[] array. Note that other CPUs will access only
852 * the leaves of the hierarchy, which still indicate that no
853 * grace period is in progress, at least until the corresponding
854 * leaf node has been initialized. In addition, we have excluded
855 * CPU-hotplug operations.
856 *
857 * Note that the grace period cannot complete until we finish
858 * the initialization process, as there will be at least one
859 * qsmask bit set in the root node until that time, namely the
860 * one corresponding to this CPU, due to the fact that we have
861 * irqs disabled.
862 */
863 rcu_for_each_node_breadth_first(rsp, rnp) {
864 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
865 rcu_preempt_check_blocked_tasks(rnp);
866 rnp->qsmask = rnp->qsmaskinit;
867 rnp->gpnum = rsp->gpnum;
868 rnp->completed = rsp->completed;
869 if (rnp == rdp->mynode)
870 rcu_start_gp_per_cpu(rsp, rnp, rdp);
871 rcu_preempt_boost_start_gp(rnp);
872 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
873 }
874
875 rnp = rcu_get_root(rsp);
876 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
877 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
878 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
879 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
880 }
881
882 /*
883 * Report a full set of quiescent states to the specified rcu_state
884 * data structure. This involves cleaning up after the prior grace
885 * period and letting rcu_start_gp() start up the next grace period
886 * if one is needed. Note that the caller must hold rnp->lock, as
887 * required by rcu_start_gp(), which will release it.
888 */
889 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
890 __releases(rcu_get_root(rsp)->lock)
891 {
892 unsigned long gp_duration;
893
894 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
895
896 /*
897 * Ensure that all grace-period and pre-grace-period activity
898 * is seen before the assignment to rsp->completed.
899 */
900 smp_mb(); /* See above block comment. */
901 gp_duration = jiffies - rsp->gp_start;
902 if (gp_duration > rsp->gp_max)
903 rsp->gp_max = gp_duration;
904 rsp->completed = rsp->gpnum;
905 rsp->signaled = RCU_GP_IDLE;
906 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
907 }
908
909 /*
910 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
911 * Allows quiescent states for a group of CPUs to be reported at one go
912 * to the specified rcu_node structure, though all the CPUs in the group
913 * must be represented by the same rcu_node structure (which need not be
914 * a leaf rcu_node structure, though it often will be). That structure's
915 * lock must be held upon entry, and it is released before return.
916 */
917 static void
918 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
919 struct rcu_node *rnp, unsigned long flags)
920 __releases(rnp->lock)
921 {
922 struct rcu_node *rnp_c;
923
924 /* Walk up the rcu_node hierarchy. */
925 for (;;) {
926 if (!(rnp->qsmask & mask)) {
927
928 /* Our bit has already been cleared, so done. */
929 raw_spin_unlock_irqrestore(&rnp->lock, flags);
930 return;
931 }
932 rnp->qsmask &= ~mask;
933 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
934
935 /* Other bits still set at this level, so done. */
936 raw_spin_unlock_irqrestore(&rnp->lock, flags);
937 return;
938 }
939 mask = rnp->grpmask;
940 if (rnp->parent == NULL) {
941
942 /* No more levels. Exit loop holding root lock. */
943
944 break;
945 }
946 raw_spin_unlock_irqrestore(&rnp->lock, flags);
947 rnp_c = rnp;
948 rnp = rnp->parent;
949 raw_spin_lock_irqsave(&rnp->lock, flags);
950 WARN_ON_ONCE(rnp_c->qsmask);
951 }
952
953 /*
954 * Get here if we are the last CPU to pass through a quiescent
955 * state for this grace period. Invoke rcu_report_qs_rsp()
956 * to clean up and start the next grace period if one is needed.
957 */
958 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
959 }
960
961 /*
962 * Record a quiescent state for the specified CPU to that CPU's rcu_data
963 * structure. This must be either called from the specified CPU, or
964 * called when the specified CPU is known to be offline (and when it is
965 * also known that no other CPU is concurrently trying to help the offline
966 * CPU). The lastcomp argument is used to make sure we are still in the
967 * grace period of interest. We don't want to end the current grace period
968 * based on quiescent states detected in an earlier grace period!
969 */
970 static void
971 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
972 {
973 unsigned long flags;
974 unsigned long mask;
975 struct rcu_node *rnp;
976
977 rnp = rdp->mynode;
978 raw_spin_lock_irqsave(&rnp->lock, flags);
979 if (lastcomp != rnp->completed) {
980
981 /*
982 * Someone beat us to it for this grace period, so leave.
983 * The race with GP start is resolved by the fact that we
984 * hold the leaf rcu_node lock, so that the per-CPU bits
985 * cannot yet be initialized -- so we would simply find our
986 * CPU's bit already cleared in rcu_report_qs_rnp() if this
987 * race occurred.
988 */
989 rdp->passed_quiesc = 0; /* try again later! */
990 raw_spin_unlock_irqrestore(&rnp->lock, flags);
991 return;
992 }
993 mask = rdp->grpmask;
994 if ((rnp->qsmask & mask) == 0) {
995 raw_spin_unlock_irqrestore(&rnp->lock, flags);
996 } else {
997 rdp->qs_pending = 0;
998
999 /*
1000 * This GP can't end until cpu checks in, so all of our
1001 * callbacks can be processed during the next GP.
1002 */
1003 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1004
1005 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
1006 }
1007 }
1008
1009 /*
1010 * Check to see if there is a new grace period of which this CPU
1011 * is not yet aware, and if so, set up local rcu_data state for it.
1012 * Otherwise, see if this CPU has just passed through its first
1013 * quiescent state for this grace period, and record that fact if so.
1014 */
1015 static void
1016 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
1017 {
1018 /* If there is now a new grace period, record and return. */
1019 if (check_for_new_grace_period(rsp, rdp))
1020 return;
1021
1022 /*
1023 * Does this CPU still need to do its part for current grace period?
1024 * If no, return and let the other CPUs do their part as well.
1025 */
1026 if (!rdp->qs_pending)
1027 return;
1028
1029 /*
1030 * Was there a quiescent state since the beginning of the grace
1031 * period? If no, then exit and wait for the next call.
1032 */
1033 if (!rdp->passed_quiesc)
1034 return;
1035
1036 /*
1037 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
1038 * judge of that).
1039 */
1040 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
1041 }
1042
1043 #ifdef CONFIG_HOTPLUG_CPU
1044
1045 /*
1046 * Move a dying CPU's RCU callbacks to online CPU's callback list.
1047 * Synchronization is not required because this function executes
1048 * in stop_machine() context.
1049 */
1050 static void rcu_send_cbs_to_online(struct rcu_state *rsp)
1051 {
1052 int i;
1053 /* current DYING CPU is cleared in the cpu_online_mask */
1054 int receive_cpu = cpumask_any(cpu_online_mask);
1055 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1056 struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu);
1057
1058 if (rdp->nxtlist == NULL)
1059 return; /* irqs disabled, so comparison is stable. */
1060
1061 *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
1062 receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1063 receive_rdp->qlen += rdp->qlen;
1064 receive_rdp->n_cbs_adopted += rdp->qlen;
1065 rdp->n_cbs_orphaned += rdp->qlen;
1066
1067 rdp->nxtlist = NULL;
1068 for (i = 0; i < RCU_NEXT_SIZE; i++)
1069 rdp->nxttail[i] = &rdp->nxtlist;
1070 rdp->qlen = 0;
1071 }
1072
1073 /*
1074 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
1075 * and move all callbacks from the outgoing CPU to the current one.
1076 * There can only be one CPU hotplug operation at a time, so no other
1077 * CPU can be attempting to update rcu_cpu_kthread_task.
1078 */
1079 static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1080 {
1081 unsigned long flags;
1082 unsigned long mask;
1083 int need_report = 0;
1084 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1085 struct rcu_node *rnp;
1086 struct task_struct *t;
1087
1088 /* Stop the CPU's kthread. */
1089 t = per_cpu(rcu_cpu_kthread_task, cpu);
1090 if (t != NULL) {
1091 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1092 kthread_stop(t);
1093 }
1094
1095 /* Exclude any attempts to start a new grace period. */
1096 raw_spin_lock_irqsave(&rsp->onofflock, flags);
1097
1098 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
1099 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
1100 mask = rdp->grpmask; /* rnp->grplo is constant. */
1101 do {
1102 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1103 rnp->qsmaskinit &= ~mask;
1104 if (rnp->qsmaskinit != 0) {
1105 if (rnp != rdp->mynode)
1106 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1107 break;
1108 }
1109 if (rnp == rdp->mynode)
1110 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
1111 else
1112 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1113 mask = rnp->grpmask;
1114 rnp = rnp->parent;
1115 } while (rnp != NULL);
1116
1117 /*
1118 * We still hold the leaf rcu_node structure lock here, and
1119 * irqs are still disabled. The reason for this subterfuge is
1120 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
1121 * held leads to deadlock.
1122 */
1123 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
1124 rnp = rdp->mynode;
1125 if (need_report & RCU_OFL_TASKS_NORM_GP)
1126 rcu_report_unblock_qs_rnp(rnp, flags);
1127 else
1128 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1129 if (need_report & RCU_OFL_TASKS_EXP_GP)
1130 rcu_report_exp_rnp(rsp, rnp);
1131
1132 /*
1133 * If there are no more online CPUs for this rcu_node structure,
1134 * kill the rcu_node structure's kthread. Otherwise, adjust its
1135 * affinity.
1136 */
1137 t = rnp->node_kthread_task;
1138 if (t != NULL &&
1139 rnp->qsmaskinit == 0) {
1140 raw_spin_lock_irqsave(&rnp->lock, flags);
1141 rnp->node_kthread_task = NULL;
1142 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1143 kthread_stop(t);
1144 rcu_stop_boost_kthread(rnp);
1145 } else
1146 rcu_node_kthread_setaffinity(rnp, -1);
1147 }
1148
1149 /*
1150 * Remove the specified CPU from the RCU hierarchy and move any pending
1151 * callbacks that it might have to the current CPU. This code assumes
1152 * that at least one CPU in the system will remain running at all times.
1153 * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
1154 */
1155 static void rcu_offline_cpu(int cpu)
1156 {
1157 __rcu_offline_cpu(cpu, &rcu_sched_state);
1158 __rcu_offline_cpu(cpu, &rcu_bh_state);
1159 rcu_preempt_offline_cpu(cpu);
1160 }
1161
1162 #else /* #ifdef CONFIG_HOTPLUG_CPU */
1163
1164 static void rcu_send_cbs_to_online(struct rcu_state *rsp)
1165 {
1166 }
1167
1168 static void rcu_offline_cpu(int cpu)
1169 {
1170 }
1171
1172 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1173
1174 /*
1175 * Invoke any RCU callbacks that have made it to the end of their grace
1176 * period. Thottle as specified by rdp->blimit.
1177 */
1178 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1179 {
1180 unsigned long flags;
1181 struct rcu_head *next, *list, **tail;
1182 int count;
1183
1184 /* If no callbacks are ready, just return.*/
1185 if (!cpu_has_callbacks_ready_to_invoke(rdp))
1186 return;
1187
1188 /*
1189 * Extract the list of ready callbacks, disabling to prevent
1190 * races with call_rcu() from interrupt handlers.
1191 */
1192 local_irq_save(flags);
1193 list = rdp->nxtlist;
1194 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1195 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
1196 tail = rdp->nxttail[RCU_DONE_TAIL];
1197 for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
1198 if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
1199 rdp->nxttail[count] = &rdp->nxtlist;
1200 local_irq_restore(flags);
1201
1202 /* Invoke callbacks. */
1203 count = 0;
1204 while (list) {
1205 next = list->next;
1206 prefetch(next);
1207 debug_rcu_head_unqueue(list);
1208 list->func(list);
1209 list = next;
1210 if (++count >= rdp->blimit)
1211 break;
1212 }
1213
1214 local_irq_save(flags);
1215
1216 /* Update count, and requeue any remaining callbacks. */
1217 rdp->qlen -= count;
1218 rdp->n_cbs_invoked += count;
1219 if (list != NULL) {
1220 *tail = rdp->nxtlist;
1221 rdp->nxtlist = list;
1222 for (count = 0; count < RCU_NEXT_SIZE; count++)
1223 if (&rdp->nxtlist == rdp->nxttail[count])
1224 rdp->nxttail[count] = tail;
1225 else
1226 break;
1227 }
1228
1229 /* Reinstate batch limit if we have worked down the excess. */
1230 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
1231 rdp->blimit = blimit;
1232
1233 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1234 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1235 rdp->qlen_last_fqs_check = 0;
1236 rdp->n_force_qs_snap = rsp->n_force_qs;
1237 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1238 rdp->qlen_last_fqs_check = rdp->qlen;
1239
1240 local_irq_restore(flags);
1241
1242 /* Re-raise the RCU softirq if there are callbacks remaining. */
1243 if (cpu_has_callbacks_ready_to_invoke(rdp))
1244 invoke_rcu_cpu_kthread();
1245 }
1246
1247 /*
1248 * Check to see if this CPU is in a non-context-switch quiescent state
1249 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
1250 * Also schedule the RCU softirq handler.
1251 *
1252 * This function must be called with hardirqs disabled. It is normally
1253 * invoked from the scheduling-clock interrupt. If rcu_pending returns
1254 * false, there is no point in invoking rcu_check_callbacks().
1255 */
1256 void rcu_check_callbacks(int cpu, int user)
1257 {
1258 if (user ||
1259 (idle_cpu(cpu) && rcu_scheduler_active &&
1260 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
1261
1262 /*
1263 * Get here if this CPU took its interrupt from user
1264 * mode or from the idle loop, and if this is not a
1265 * nested interrupt. In this case, the CPU is in
1266 * a quiescent state, so note it.
1267 *
1268 * No memory barrier is required here because both
1269 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
1270 * variables that other CPUs neither access nor modify,
1271 * at least not while the corresponding CPU is online.
1272 */
1273
1274 rcu_sched_qs(cpu);
1275 rcu_bh_qs(cpu);
1276
1277 } else if (!in_softirq()) {
1278
1279 /*
1280 * Get here if this CPU did not take its interrupt from
1281 * softirq, in other words, if it is not interrupting
1282 * a rcu_bh read-side critical section. This is an _bh
1283 * critical section, so note it.
1284 */
1285
1286 rcu_bh_qs(cpu);
1287 }
1288 rcu_preempt_check_callbacks(cpu);
1289 if (rcu_pending(cpu))
1290 invoke_rcu_cpu_kthread();
1291 }
1292
1293 #ifdef CONFIG_SMP
1294
1295 /*
1296 * Scan the leaf rcu_node structures, processing dyntick state for any that
1297 * have not yet encountered a quiescent state, using the function specified.
1298 * Also initiate boosting for any threads blocked on the root rcu_node.
1299 *
1300 * The caller must have suppressed start of new grace periods.
1301 */
1302 static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
1303 {
1304 unsigned long bit;
1305 int cpu;
1306 unsigned long flags;
1307 unsigned long mask;
1308 struct rcu_node *rnp;
1309
1310 rcu_for_each_leaf_node(rsp, rnp) {
1311 mask = 0;
1312 raw_spin_lock_irqsave(&rnp->lock, flags);
1313 if (!rcu_gp_in_progress(rsp)) {
1314 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1315 return;
1316 }
1317 if (rnp->qsmask == 0) {
1318 rcu_initiate_boost(rnp);
1319 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1320 continue;
1321 }
1322 cpu = rnp->grplo;
1323 bit = 1;
1324 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1325 if ((rnp->qsmask & bit) != 0 &&
1326 f(per_cpu_ptr(rsp->rda, cpu)))
1327 mask |= bit;
1328 }
1329 if (mask != 0) {
1330
1331 /* rcu_report_qs_rnp() releases rnp->lock. */
1332 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1333 continue;
1334 }
1335 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1336 }
1337 rnp = rcu_get_root(rsp);
1338 raw_spin_lock_irqsave(&rnp->lock, flags);
1339 if (rnp->qsmask == 0)
1340 rcu_initiate_boost(rnp);
1341 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1342 }
1343
1344 /*
1345 * Force quiescent states on reluctant CPUs, and also detect which
1346 * CPUs are in dyntick-idle mode.
1347 */
1348 static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1349 {
1350 unsigned long flags;
1351 struct rcu_node *rnp = rcu_get_root(rsp);
1352
1353 if (!rcu_gp_in_progress(rsp))
1354 return; /* No grace period in progress, nothing to force. */
1355 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
1356 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1357 return; /* Someone else is already on the job. */
1358 }
1359 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
1360 goto unlock_fqs_ret; /* no emergency and done recently. */
1361 rsp->n_force_qs++;
1362 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1363 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1364 if(!rcu_gp_in_progress(rsp)) {
1365 rsp->n_force_qs_ngp++;
1366 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1367 goto unlock_fqs_ret; /* no GP in progress, time updated. */
1368 }
1369 rsp->fqs_active = 1;
1370 switch (rsp->signaled) {
1371 case RCU_GP_IDLE:
1372 case RCU_GP_INIT:
1373
1374 break; /* grace period idle or initializing, ignore. */
1375
1376 case RCU_SAVE_DYNTICK:
1377 if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
1378 break; /* So gcc recognizes the dead code. */
1379
1380 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1381
1382 /* Record dyntick-idle state. */
1383 force_qs_rnp(rsp, dyntick_save_progress_counter);
1384 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1385 if (rcu_gp_in_progress(rsp))
1386 rsp->signaled = RCU_FORCE_QS;
1387 break;
1388
1389 case RCU_FORCE_QS:
1390
1391 /* Check dyntick-idle state, send IPI to laggarts. */
1392 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1393 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
1394
1395 /* Leave state in case more forcing is required. */
1396
1397 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1398 break;
1399 }
1400 rsp->fqs_active = 0;
1401 if (rsp->fqs_need_gp) {
1402 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
1403 rsp->fqs_need_gp = 0;
1404 rcu_start_gp(rsp, flags); /* releases rnp->lock */
1405 return;
1406 }
1407 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1408 unlock_fqs_ret:
1409 raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
1410 }
1411
1412 #else /* #ifdef CONFIG_SMP */
1413
1414 static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1415 {
1416 set_need_resched();
1417 }
1418
1419 #endif /* #else #ifdef CONFIG_SMP */
1420
1421 /*
1422 * This does the RCU processing work from softirq context for the
1423 * specified rcu_state and rcu_data structures. This may be called
1424 * only from the CPU to whom the rdp belongs.
1425 */
1426 static void
1427 __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1428 {
1429 unsigned long flags;
1430
1431 WARN_ON_ONCE(rdp->beenonline == 0);
1432
1433 /*
1434 * If an RCU GP has gone long enough, go check for dyntick
1435 * idle CPUs and, if needed, send resched IPIs.
1436 */
1437 if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1438 force_quiescent_state(rsp, 1);
1439
1440 /*
1441 * Advance callbacks in response to end of earlier grace
1442 * period that some other CPU ended.
1443 */
1444 rcu_process_gp_end(rsp, rdp);
1445
1446 /* Update RCU state based on any recent quiescent states. */
1447 rcu_check_quiescent_state(rsp, rdp);
1448
1449 /* Does this CPU require a not-yet-started grace period? */
1450 if (cpu_needs_another_gp(rsp, rdp)) {
1451 raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
1452 rcu_start_gp(rsp, flags); /* releases above lock */
1453 }
1454
1455 /* If there are callbacks ready, invoke them. */
1456 rcu_do_batch(rsp, rdp);
1457 }
1458
1459 /*
1460 * Do softirq processing for the current CPU.
1461 */
1462 static void rcu_process_callbacks(void)
1463 {
1464 __rcu_process_callbacks(&rcu_sched_state,
1465 &__get_cpu_var(rcu_sched_data));
1466 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1467 rcu_preempt_process_callbacks();
1468
1469 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
1470 rcu_needs_cpu_flush();
1471 }
1472
1473 /*
1474 * Wake up the current CPU's kthread. This replaces raise_softirq()
1475 * in earlier versions of RCU. Note that because we are running on
1476 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
1477 * cannot disappear out from under us.
1478 */
1479 static void invoke_rcu_cpu_kthread(void)
1480 {
1481 unsigned long flags;
1482 wait_queue_head_t *q;
1483 int cpu;
1484
1485 local_irq_save(flags);
1486 cpu = smp_processor_id();
1487 per_cpu(rcu_cpu_has_work, cpu) = 1;
1488 if (per_cpu(rcu_cpu_kthread_task, cpu) == NULL) {
1489 local_irq_restore(flags);
1490 return;
1491 }
1492 q = &per_cpu(rcu_cpu_wq, cpu);
1493 wake_up(q);
1494 local_irq_restore(flags);
1495 }
1496
1497 /*
1498 * Wake up the specified per-rcu_node-structure kthread.
1499 * The caller must hold ->lock.
1500 */
1501 static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1502 {
1503 struct task_struct *t;
1504
1505 t = rnp->node_kthread_task;
1506 if (t != NULL)
1507 wake_up_process(t);
1508 }
1509
1510 /*
1511 * Set the specified CPU's kthread to run RT or not, as specified by
1512 * the to_rt argument. The CPU-hotplug locks are held, so the task
1513 * is not going away.
1514 */
1515 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1516 {
1517 int policy;
1518 struct sched_param sp;
1519 struct task_struct *t;
1520
1521 t = per_cpu(rcu_cpu_kthread_task, cpu);
1522 if (t == NULL)
1523 return;
1524 if (to_rt) {
1525 policy = SCHED_FIFO;
1526 sp.sched_priority = RCU_KTHREAD_PRIO;
1527 } else {
1528 policy = SCHED_NORMAL;
1529 sp.sched_priority = 0;
1530 }
1531 sched_setscheduler_nocheck(t, policy, &sp);
1532 }
1533
1534 /*
1535 * Timer handler to initiate the waking up of per-CPU kthreads that
1536 * have yielded the CPU due to excess numbers of RCU callbacks.
1537 * We wake up the per-rcu_node kthread, which in turn will wake up
1538 * the booster kthread.
1539 */
1540 static void rcu_cpu_kthread_timer(unsigned long arg)
1541 {
1542 unsigned long flags;
1543 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1544 struct rcu_node *rnp = rdp->mynode;
1545
1546 raw_spin_lock_irqsave(&rnp->lock, flags);
1547 rnp->wakemask |= rdp->grpmask;
1548 invoke_rcu_node_kthread(rnp);
1549 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1550 }
1551
1552 /*
1553 * Drop to non-real-time priority and yield, but only after posting a
1554 * timer that will cause us to regain our real-time priority if we
1555 * remain preempted. Either way, we restore our real-time priority
1556 * before returning.
1557 */
1558 static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1559 {
1560 struct sched_param sp;
1561 struct timer_list yield_timer;
1562
1563 setup_timer_on_stack(&yield_timer, f, arg);
1564 mod_timer(&yield_timer, jiffies + 2);
1565 sp.sched_priority = 0;
1566 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1567 schedule();
1568 sp.sched_priority = RCU_KTHREAD_PRIO;
1569 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1570 del_timer(&yield_timer);
1571 }
1572
1573 /*
1574 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1575 * This can happen while the corresponding CPU is either coming online
1576 * or going offline. We cannot wait until the CPU is fully online
1577 * before starting the kthread, because the various notifier functions
1578 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1579 * the corresponding CPU is online.
1580 *
1581 * Return 1 if the kthread needs to stop, 0 otherwise.
1582 *
1583 * Caller must disable bh. This function can momentarily enable it.
1584 */
1585 static int rcu_cpu_kthread_should_stop(int cpu)
1586 {
1587 while (cpu_is_offline(cpu) ||
1588 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1589 smp_processor_id() != cpu) {
1590 if (kthread_should_stop())
1591 return 1;
1592 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1593 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1594 local_bh_enable();
1595 schedule_timeout_uninterruptible(1);
1596 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1597 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1598 local_bh_disable();
1599 }
1600 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1601 return 0;
1602 }
1603
1604 /*
1605 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1606 * earlier RCU softirq.
1607 */
1608 static int rcu_cpu_kthread(void *arg)
1609 {
1610 int cpu = (int)(long)arg;
1611 unsigned long flags;
1612 int spincnt = 0;
1613 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1614 wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
1615 char work;
1616 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1617
1618 for (;;) {
1619 *statusp = RCU_KTHREAD_WAITING;
1620 wait_event_interruptible(*wqp,
1621 *workp != 0 || kthread_should_stop());
1622 local_bh_disable();
1623 if (rcu_cpu_kthread_should_stop(cpu)) {
1624 local_bh_enable();
1625 break;
1626 }
1627 *statusp = RCU_KTHREAD_RUNNING;
1628 local_irq_save(flags);
1629 work = *workp;
1630 *workp = 0;
1631 local_irq_restore(flags);
1632 if (work)
1633 rcu_process_callbacks();
1634 local_bh_enable();
1635 if (*workp != 0)
1636 spincnt++;
1637 else
1638 spincnt = 0;
1639 if (spincnt > 10) {
1640 *statusp = RCU_KTHREAD_YIELDING;
1641 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1642 spincnt = 0;
1643 }
1644 }
1645 *statusp = RCU_KTHREAD_STOPPED;
1646 return 0;
1647 }
1648
1649 /*
1650 * Spawn a per-CPU kthread, setting up affinity and priority.
1651 * Because the CPU hotplug lock is held, no other CPU will be attempting
1652 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1653 * attempting to access it during boot, but the locking in kthread_bind()
1654 * will enforce sufficient ordering.
1655 */
1656 static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1657 {
1658 struct sched_param sp;
1659 struct task_struct *t;
1660
1661 if (!rcu_kthreads_spawnable ||
1662 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1663 return 0;
1664 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1665 if (IS_ERR(t))
1666 return PTR_ERR(t);
1667 kthread_bind(t, cpu);
1668 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1669 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1670 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1671 wake_up_process(t);
1672 sp.sched_priority = RCU_KTHREAD_PRIO;
1673 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1674 return 0;
1675 }
1676
1677 /*
1678 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1679 * kthreads when needed. We ignore requests to wake up kthreads
1680 * for offline CPUs, which is OK because force_quiescent_state()
1681 * takes care of this case.
1682 */
1683 static int rcu_node_kthread(void *arg)
1684 {
1685 int cpu;
1686 unsigned long flags;
1687 unsigned long mask;
1688 struct rcu_node *rnp = (struct rcu_node *)arg;
1689 struct sched_param sp;
1690 struct task_struct *t;
1691
1692 for (;;) {
1693 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1694 wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 ||
1695 kthread_should_stop());
1696 if (kthread_should_stop())
1697 break;
1698 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1699 raw_spin_lock_irqsave(&rnp->lock, flags);
1700 mask = rnp->wakemask;
1701 rnp->wakemask = 0;
1702 rcu_initiate_boost(rnp);
1703 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1704 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1705 if ((mask & 0x1) == 0)
1706 continue;
1707 preempt_disable();
1708 t = per_cpu(rcu_cpu_kthread_task, cpu);
1709 if (!cpu_online(cpu) || t == NULL) {
1710 preempt_enable();
1711 continue;
1712 }
1713 per_cpu(rcu_cpu_has_work, cpu) = 1;
1714 sp.sched_priority = RCU_KTHREAD_PRIO;
1715 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1716 preempt_enable();
1717 }
1718 }
1719 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1720 return 0;
1721 }
1722
1723 /*
1724 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1725 * served by the rcu_node in question. The CPU hotplug lock is still
1726 * held, so the value of rnp->qsmaskinit will be stable.
1727 *
1728 * We don't include outgoingcpu in the affinity set, use -1 if there is
1729 * no outgoing CPU. If there are no CPUs left in the affinity set,
1730 * this function allows the kthread to execute on any CPU.
1731 */
1732 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1733 {
1734 cpumask_var_t cm;
1735 int cpu;
1736 unsigned long mask = rnp->qsmaskinit;
1737
1738 if (rnp->node_kthread_task == NULL || mask == 0)
1739 return;
1740 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1741 return;
1742 cpumask_clear(cm);
1743 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1744 if ((mask & 0x1) && cpu != outgoingcpu)
1745 cpumask_set_cpu(cpu, cm);
1746 if (cpumask_weight(cm) == 0) {
1747 cpumask_setall(cm);
1748 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1749 cpumask_clear_cpu(cpu, cm);
1750 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1751 }
1752 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1753 rcu_boost_kthread_setaffinity(rnp, cm);
1754 free_cpumask_var(cm);
1755 }
1756
1757 /*
1758 * Spawn a per-rcu_node kthread, setting priority and affinity.
1759 * Called during boot before online/offline can happen, or, if
1760 * during runtime, with the main CPU-hotplug locks held. So only
1761 * one of these can be executing at a time.
1762 */
1763 static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1764 struct rcu_node *rnp)
1765 {
1766 unsigned long flags;
1767 int rnp_index = rnp - &rsp->node[0];
1768 struct sched_param sp;
1769 struct task_struct *t;
1770
1771 if (!rcu_kthreads_spawnable ||
1772 rnp->qsmaskinit == 0)
1773 return 0;
1774 if (rnp->node_kthread_task == NULL) {
1775 t = kthread_create(rcu_node_kthread, (void *)rnp,
1776 "rcun%d", rnp_index);
1777 if (IS_ERR(t))
1778 return PTR_ERR(t);
1779 raw_spin_lock_irqsave(&rnp->lock, flags);
1780 rnp->node_kthread_task = t;
1781 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1782 wake_up_process(t);
1783 sp.sched_priority = 99;
1784 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1785 }
1786 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1787 }
1788
1789 /*
1790 * Spawn all kthreads -- called as soon as the scheduler is running.
1791 */
1792 static int __init rcu_spawn_kthreads(void)
1793 {
1794 int cpu;
1795 struct rcu_node *rnp;
1796
1797 rcu_kthreads_spawnable = 1;
1798 for_each_possible_cpu(cpu) {
1799 init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu));
1800 per_cpu(rcu_cpu_has_work, cpu) = 0;
1801 if (cpu_online(cpu))
1802 (void)rcu_spawn_one_cpu_kthread(cpu);
1803 }
1804 rnp = rcu_get_root(rcu_state);
1805 init_waitqueue_head(&rnp->node_wq);
1806 rcu_init_boost_waitqueue(rnp);
1807 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1808 if (NUM_RCU_NODES > 1)
1809 rcu_for_each_leaf_node(rcu_state, rnp) {
1810 init_waitqueue_head(&rnp->node_wq);
1811 rcu_init_boost_waitqueue(rnp);
1812 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1813 }
1814 return 0;
1815 }
1816 early_initcall(rcu_spawn_kthreads);
1817
1818 static void
1819 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1820 struct rcu_state *rsp)
1821 {
1822 unsigned long flags;
1823 struct rcu_data *rdp;
1824
1825 debug_rcu_head_queue(head);
1826 head->func = func;
1827 head->next = NULL;
1828
1829 smp_mb(); /* Ensure RCU update seen before callback registry. */
1830
1831 /*
1832 * Opportunistically note grace-period endings and beginnings.
1833 * Note that we might see a beginning right after we see an
1834 * end, but never vice versa, since this CPU has to pass through
1835 * a quiescent state betweentimes.
1836 */
1837 local_irq_save(flags);
1838 rdp = this_cpu_ptr(rsp->rda);
1839
1840 /* Add the callback to our list. */
1841 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1842 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1843
1844 /*
1845 * Force the grace period if too many callbacks or too long waiting.
1846 * Enforce hysteresis, and don't invoke force_quiescent_state()
1847 * if some other CPU has recently done so. Also, don't bother
1848 * invoking force_quiescent_state() if the newly enqueued callback
1849 * is the only one waiting for a grace period to complete.
1850 */
1851 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1852
1853 /* Are we ignoring a completed grace period? */
1854 rcu_process_gp_end(rsp, rdp);
1855 check_for_new_grace_period(rsp, rdp);
1856
1857 /* Start a new grace period if one not already started. */
1858 if (!rcu_gp_in_progress(rsp)) {
1859 unsigned long nestflag;
1860 struct rcu_node *rnp_root = rcu_get_root(rsp);
1861
1862 raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
1863 rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
1864 } else {
1865 /* Give the grace period a kick. */
1866 rdp->blimit = LONG_MAX;
1867 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1868 *rdp->nxttail[RCU_DONE_TAIL] != head)
1869 force_quiescent_state(rsp, 0);
1870 rdp->n_force_qs_snap = rsp->n_force_qs;
1871 rdp->qlen_last_fqs_check = rdp->qlen;
1872 }
1873 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1874 force_quiescent_state(rsp, 1);
1875 local_irq_restore(flags);
1876 }
1877
1878 /*
1879 * Queue an RCU-sched callback for invocation after a grace period.
1880 */
1881 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1882 {
1883 __call_rcu(head, func, &rcu_sched_state);
1884 }
1885 EXPORT_SYMBOL_GPL(call_rcu_sched);
1886
1887 /*
1888 * Queue an RCU for invocation after a quicker grace period.
1889 */
1890 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1891 {
1892 __call_rcu(head, func, &rcu_bh_state);
1893 }
1894 EXPORT_SYMBOL_GPL(call_rcu_bh);
1895
1896 /**
1897 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
1898 *
1899 * Control will return to the caller some time after a full rcu-sched
1900 * grace period has elapsed, in other words after all currently executing
1901 * rcu-sched read-side critical sections have completed. These read-side
1902 * critical sections are delimited by rcu_read_lock_sched() and
1903 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
1904 * local_irq_disable(), and so on may be used in place of
1905 * rcu_read_lock_sched().
1906 *
1907 * This means that all preempt_disable code sequences, including NMI and
1908 * hardware-interrupt handlers, in progress on entry will have completed
1909 * before this primitive returns. However, this does not guarantee that
1910 * softirq handlers will have completed, since in some kernels, these
1911 * handlers can run in process context, and can block.
1912 *
1913 * This primitive provides the guarantees made by the (now removed)
1914 * synchronize_kernel() API. In contrast, synchronize_rcu() only
1915 * guarantees that rcu_read_lock() sections will have completed.
1916 * In "classic RCU", these two guarantees happen to be one and
1917 * the same, but can differ in realtime RCU implementations.
1918 */
1919 void synchronize_sched(void)
1920 {
1921 struct rcu_synchronize rcu;
1922
1923 if (rcu_blocking_is_gp())
1924 return;
1925
1926 init_rcu_head_on_stack(&rcu.head);
1927 init_completion(&rcu.completion);
1928 /* Will wake me after RCU finished. */
1929 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1930 /* Wait for it. */
1931 wait_for_completion(&rcu.completion);
1932 destroy_rcu_head_on_stack(&rcu.head);
1933 }
1934 EXPORT_SYMBOL_GPL(synchronize_sched);
1935
1936 /**
1937 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
1938 *
1939 * Control will return to the caller some time after a full rcu_bh grace
1940 * period has elapsed, in other words after all currently executing rcu_bh
1941 * read-side critical sections have completed. RCU read-side critical
1942 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
1943 * and may be nested.
1944 */
1945 void synchronize_rcu_bh(void)
1946 {
1947 struct rcu_synchronize rcu;
1948
1949 if (rcu_blocking_is_gp())
1950 return;
1951
1952 init_rcu_head_on_stack(&rcu.head);
1953 init_completion(&rcu.completion);
1954 /* Will wake me after RCU finished. */
1955 call_rcu_bh(&rcu.head, wakeme_after_rcu);
1956 /* Wait for it. */
1957 wait_for_completion(&rcu.completion);
1958 destroy_rcu_head_on_stack(&rcu.head);
1959 }
1960 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1961
1962 /*
1963 * Check to see if there is any immediate RCU-related work to be done
1964 * by the current CPU, for the specified type of RCU, returning 1 if so.
1965 * The checks are in order of increasing expense: checks that can be
1966 * carried out against CPU-local state are performed first. However,
1967 * we must check for CPU stalls first, else we might not get a chance.
1968 */
1969 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1970 {
1971 struct rcu_node *rnp = rdp->mynode;
1972
1973 rdp->n_rcu_pending++;
1974
1975 /* Check for CPU stalls, if enabled. */
1976 check_cpu_stall(rsp, rdp);
1977
1978 /* Is the RCU core waiting for a quiescent state from this CPU? */
1979 if (rdp->qs_pending && !rdp->passed_quiesc) {
1980
1981 /*
1982 * If force_quiescent_state() coming soon and this CPU
1983 * needs a quiescent state, and this is either RCU-sched
1984 * or RCU-bh, force a local reschedule.
1985 */
1986 rdp->n_rp_qs_pending++;
1987 if (!rdp->preemptable &&
1988 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
1989 jiffies))
1990 set_need_resched();
1991 } else if (rdp->qs_pending && rdp->passed_quiesc) {
1992 rdp->n_rp_report_qs++;
1993 return 1;
1994 }
1995
1996 /* Does this CPU have callbacks ready to invoke? */
1997 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
1998 rdp->n_rp_cb_ready++;
1999 return 1;
2000 }
2001
2002 /* Has RCU gone idle with this CPU needing another grace period? */
2003 if (cpu_needs_another_gp(rsp, rdp)) {
2004 rdp->n_rp_cpu_needs_gp++;
2005 return 1;
2006 }
2007
2008 /* Has another RCU grace period completed? */
2009 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
2010 rdp->n_rp_gp_completed++;
2011 return 1;
2012 }
2013
2014 /* Has a new RCU grace period started? */
2015 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
2016 rdp->n_rp_gp_started++;
2017 return 1;
2018 }
2019
2020 /* Has an RCU GP gone long enough to send resched IPIs &c? */
2021 if (rcu_gp_in_progress(rsp) &&
2022 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
2023 rdp->n_rp_need_fqs++;
2024 return 1;
2025 }
2026
2027 /* nothing to do */
2028 rdp->n_rp_need_nothing++;
2029 return 0;
2030 }
2031
2032 /*
2033 * Check to see if there is any immediate RCU-related work to be done
2034 * by the current CPU, returning 1 if so. This function is part of the
2035 * RCU implementation; it is -not- an exported member of the RCU API.
2036 */
2037 static int rcu_pending(int cpu)
2038 {
2039 return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
2040 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
2041 rcu_preempt_pending(cpu);
2042 }
2043
2044 /*
2045 * Check to see if any future RCU-related work will need to be done
2046 * by the current CPU, even if none need be done immediately, returning
2047 * 1 if so.
2048 */
2049 static int rcu_needs_cpu_quick_check(int cpu)
2050 {
2051 /* RCU callbacks either ready or pending? */
2052 return per_cpu(rcu_sched_data, cpu).nxtlist ||
2053 per_cpu(rcu_bh_data, cpu).nxtlist ||
2054 rcu_preempt_needs_cpu(cpu);
2055 }
2056
2057 static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
2058 static atomic_t rcu_barrier_cpu_count;
2059 static DEFINE_MUTEX(rcu_barrier_mutex);
2060 static struct completion rcu_barrier_completion;
2061
2062 static void rcu_barrier_callback(struct rcu_head *notused)
2063 {
2064 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
2065 complete(&rcu_barrier_completion);
2066 }
2067
2068 /*
2069 * Called with preemption disabled, and from cross-cpu IRQ context.
2070 */
2071 static void rcu_barrier_func(void *type)
2072 {
2073 int cpu = smp_processor_id();
2074 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
2075 void (*call_rcu_func)(struct rcu_head *head,
2076 void (*func)(struct rcu_head *head));
2077
2078 atomic_inc(&rcu_barrier_cpu_count);
2079 call_rcu_func = type;
2080 call_rcu_func(head, rcu_barrier_callback);
2081 }
2082
2083 /*
2084 * Orchestrate the specified type of RCU barrier, waiting for all
2085 * RCU callbacks of the specified type to complete.
2086 */
2087 static void _rcu_barrier(struct rcu_state *rsp,
2088 void (*call_rcu_func)(struct rcu_head *head,
2089 void (*func)(struct rcu_head *head)))
2090 {
2091 BUG_ON(in_interrupt());
2092 /* Take mutex to serialize concurrent rcu_barrier() requests. */
2093 mutex_lock(&rcu_barrier_mutex);
2094 init_completion(&rcu_barrier_completion);
2095 /*
2096 * Initialize rcu_barrier_cpu_count to 1, then invoke
2097 * rcu_barrier_func() on each CPU, so that each CPU also has
2098 * incremented rcu_barrier_cpu_count. Only then is it safe to
2099 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
2100 * might complete its grace period before all of the other CPUs
2101 * did their increment, causing this function to return too
2102 * early. Note that on_each_cpu() disables irqs, which prevents
2103 * any CPUs from coming online or going offline until each online
2104 * CPU has queued its RCU-barrier callback.
2105 */
2106 atomic_set(&rcu_barrier_cpu_count, 1);
2107 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
2108 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
2109 complete(&rcu_barrier_completion);
2110 wait_for_completion(&rcu_barrier_completion);
2111 mutex_unlock(&rcu_barrier_mutex);
2112 }
2113
2114 /**
2115 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
2116 */
2117 void rcu_barrier_bh(void)
2118 {
2119 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
2120 }
2121 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
2122
2123 /**
2124 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
2125 */
2126 void rcu_barrier_sched(void)
2127 {
2128 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
2129 }
2130 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
2131
2132 /*
2133 * Do boot-time initialization of a CPU's per-CPU RCU data.
2134 */
2135 static void __init
2136 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2137 {
2138 unsigned long flags;
2139 int i;
2140 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2141 struct rcu_node *rnp = rcu_get_root(rsp);
2142
2143 /* Set up local state, ensuring consistent view of global state. */
2144 raw_spin_lock_irqsave(&rnp->lock, flags);
2145 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
2146 rdp->nxtlist = NULL;
2147 for (i = 0; i < RCU_NEXT_SIZE; i++)
2148 rdp->nxttail[i] = &rdp->nxtlist;
2149 rdp->qlen = 0;
2150 #ifdef CONFIG_NO_HZ
2151 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2152 #endif /* #ifdef CONFIG_NO_HZ */
2153 rdp->cpu = cpu;
2154 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2155 }
2156
2157 /*
2158 * Initialize a CPU's per-CPU RCU data. Note that only one online or
2159 * offline event can be happening at a given time. Note also that we
2160 * can accept some slop in the rsp->completed access due to the fact
2161 * that this CPU cannot possibly have any RCU callbacks in flight yet.
2162 */
2163 static void __cpuinit
2164 rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
2165 {
2166 unsigned long flags;
2167 unsigned long mask;
2168 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2169 struct rcu_node *rnp = rcu_get_root(rsp);
2170
2171 /* Set up local state, ensuring consistent view of global state. */
2172 raw_spin_lock_irqsave(&rnp->lock, flags);
2173 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
2174 rdp->qs_pending = 1; /* so set up to respond to current GP. */
2175 rdp->beenonline = 1; /* We have now been online. */
2176 rdp->preemptable = preemptable;
2177 rdp->qlen_last_fqs_check = 0;
2178 rdp->n_force_qs_snap = rsp->n_force_qs;
2179 rdp->blimit = blimit;
2180 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2181
2182 /*
2183 * A new grace period might start here. If so, we won't be part
2184 * of it, but that is OK, as we are currently in a quiescent state.
2185 */
2186
2187 /* Exclude any attempts to start a new GP on large systems. */
2188 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
2189
2190 /* Add CPU to rcu_node bitmasks. */
2191 rnp = rdp->mynode;
2192 mask = rdp->grpmask;
2193 do {
2194 /* Exclude any attempts to start a new GP on small systems. */
2195 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2196 rnp->qsmaskinit |= mask;
2197 mask = rnp->grpmask;
2198 if (rnp == rdp->mynode) {
2199 rdp->gpnum = rnp->completed; /* if GP in progress... */
2200 rdp->completed = rnp->completed;
2201 rdp->passed_quiesc_completed = rnp->completed - 1;
2202 }
2203 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
2204 rnp = rnp->parent;
2205 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
2206
2207 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
2208 }
2209
2210 static void __cpuinit rcu_online_cpu(int cpu)
2211 {
2212 rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
2213 rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
2214 rcu_preempt_init_percpu_data(cpu);
2215 }
2216
2217 static void __cpuinit rcu_online_kthreads(int cpu)
2218 {
2219 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2220 struct rcu_node *rnp = rdp->mynode;
2221
2222 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
2223 if (rcu_kthreads_spawnable) {
2224 (void)rcu_spawn_one_cpu_kthread(cpu);
2225 if (rnp->node_kthread_task == NULL)
2226 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
2227 }
2228 }
2229
2230 /*
2231 * Handle CPU online/offline notification events.
2232 */
2233 static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2234 unsigned long action, void *hcpu)
2235 {
2236 long cpu = (long)hcpu;
2237 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2238 struct rcu_node *rnp = rdp->mynode;
2239
2240 switch (action) {
2241 case CPU_UP_PREPARE:
2242 case CPU_UP_PREPARE_FROZEN:
2243 rcu_online_cpu(cpu);
2244 rcu_online_kthreads(cpu);
2245 break;
2246 case CPU_ONLINE:
2247 case CPU_DOWN_FAILED:
2248 rcu_node_kthread_setaffinity(rnp, -1);
2249 rcu_cpu_kthread_setrt(cpu, 1);
2250 break;
2251 case CPU_DOWN_PREPARE:
2252 rcu_node_kthread_setaffinity(rnp, cpu);
2253 rcu_cpu_kthread_setrt(cpu, 0);
2254 break;
2255 case CPU_DYING:
2256 case CPU_DYING_FROZEN:
2257 /*
2258 * The whole machine is "stopped" except this CPU, so we can
2259 * touch any data without introducing corruption. We send the
2260 * dying CPU's callbacks to an arbitrarily chosen online CPU.
2261 */
2262 rcu_send_cbs_to_online(&rcu_bh_state);
2263 rcu_send_cbs_to_online(&rcu_sched_state);
2264 rcu_preempt_send_cbs_to_online();
2265 break;
2266 case CPU_DEAD:
2267 case CPU_DEAD_FROZEN:
2268 case CPU_UP_CANCELED:
2269 case CPU_UP_CANCELED_FROZEN:
2270 rcu_offline_cpu(cpu);
2271 break;
2272 default:
2273 break;
2274 }
2275 return NOTIFY_OK;
2276 }
2277
2278 /*
2279 * This function is invoked towards the end of the scheduler's initialization
2280 * process. Before this is called, the idle task might contain
2281 * RCU read-side critical sections (during which time, this idle
2282 * task is booting the system). After this function is called, the
2283 * idle tasks are prohibited from containing RCU read-side critical
2284 * sections. This function also enables RCU lockdep checking.
2285 */
2286 void rcu_scheduler_starting(void)
2287 {
2288 WARN_ON(num_online_cpus() != 1);
2289 WARN_ON(nr_context_switches() > 0);
2290 rcu_scheduler_active = 1;
2291 }
2292
2293 /*
2294 * Compute the per-level fanout, either using the exact fanout specified
2295 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
2296 */
2297 #ifdef CONFIG_RCU_FANOUT_EXACT
2298 static void __init rcu_init_levelspread(struct rcu_state *rsp)
2299 {
2300 int i;
2301
2302 for (i = NUM_RCU_LVLS - 1; i > 0; i--)
2303 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
2304 rsp->levelspread[0] = RCU_FANOUT_LEAF;
2305 }
2306 #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
2307 static void __init rcu_init_levelspread(struct rcu_state *rsp)
2308 {
2309 int ccur;
2310 int cprv;
2311 int i;
2312
2313 cprv = NR_CPUS;
2314 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
2315 ccur = rsp->levelcnt[i];
2316 rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
2317 cprv = ccur;
2318 }
2319 }
2320 #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
2321
2322 /*
2323 * Helper function for rcu_init() that initializes one rcu_state structure.
2324 */
2325 static void __init rcu_init_one(struct rcu_state *rsp,
2326 struct rcu_data __percpu *rda)
2327 {
2328 static char *buf[] = { "rcu_node_level_0",
2329 "rcu_node_level_1",
2330 "rcu_node_level_2",
2331 "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */
2332 int cpustride = 1;
2333 int i;
2334 int j;
2335 struct rcu_node *rnp;
2336
2337 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
2338
2339 /* Initialize the level-tracking arrays. */
2340
2341 for (i = 1; i < NUM_RCU_LVLS; i++)
2342 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
2343 rcu_init_levelspread(rsp);
2344
2345 /* Initialize the elements themselves, starting from the leaves. */
2346
2347 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
2348 cpustride *= rsp->levelspread[i];
2349 rnp = rsp->level[i];
2350 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
2351 raw_spin_lock_init(&rnp->lock);
2352 lockdep_set_class_and_name(&rnp->lock,
2353 &rcu_node_class[i], buf[i]);
2354 rnp->gpnum = 0;
2355 rnp->qsmask = 0;
2356 rnp->qsmaskinit = 0;
2357 rnp->grplo = j * cpustride;
2358 rnp->grphi = (j + 1) * cpustride - 1;
2359 if (rnp->grphi >= NR_CPUS)
2360 rnp->grphi = NR_CPUS - 1;
2361 if (i == 0) {
2362 rnp->grpnum = 0;
2363 rnp->grpmask = 0;
2364 rnp->parent = NULL;
2365 } else {
2366 rnp->grpnum = j % rsp->levelspread[i - 1];
2367 rnp->grpmask = 1UL << rnp->grpnum;
2368 rnp->parent = rsp->level[i - 1] +
2369 j / rsp->levelspread[i - 1];
2370 }
2371 rnp->level = i;
2372 INIT_LIST_HEAD(&rnp->blkd_tasks);
2373 }
2374 }
2375
2376 rsp->rda = rda;
2377 rnp = rsp->level[NUM_RCU_LVLS - 1];
2378 for_each_possible_cpu(i) {
2379 while (i > rnp->grphi)
2380 rnp++;
2381 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
2382 rcu_boot_init_percpu_data(i, rsp);
2383 }
2384 }
2385
2386 void __init rcu_init(void)
2387 {
2388 int cpu;
2389
2390 rcu_bootup_announce();
2391 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
2392 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
2393 __rcu_init_preempt();
2394
2395 /*
2396 * We don't need protection against CPU-hotplug here because
2397 * this is called early in boot, before either interrupts
2398 * or the scheduler are operational.
2399 */
2400 cpu_notifier(rcu_cpu_notify, 0);
2401 for_each_online_cpu(cpu)
2402 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
2403 check_cpu_stall_init();
2404 }
2405
2406 #include "rcutree_plugin.h"