Linux 2.6.28-rc6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / rcuclassic.c
1 /*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 *
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * Documentation/RCU
31 *
32 */
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/rcupdate.h>
39 #include <linux/interrupt.h>
40 #include <linux/sched.h>
41 #include <asm/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/module.h>
44 #include <linux/completion.h>
45 #include <linux/moduleparam.h>
46 #include <linux/percpu.h>
47 #include <linux/notifier.h>
48 #include <linux/cpu.h>
49 #include <linux/mutex.h>
50 #include <linux/time.h>
51
52 #ifdef CONFIG_DEBUG_LOCK_ALLOC
53 static struct lock_class_key rcu_lock_key;
54 struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56 EXPORT_SYMBOL_GPL(rcu_lock_map);
57 #endif
58
59
60 /* Definition for rcupdate control block. */
61 static struct rcu_ctrlblk rcu_ctrlblk = {
62 .cur = -300,
63 .completed = -300,
64 .pending = -300,
65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
66 .cpumask = CPU_MASK_NONE,
67 };
68 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
69 .cur = -300,
70 .completed = -300,
71 .pending = -300,
72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
73 .cpumask = CPU_MASK_NONE,
74 };
75
76 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
77 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
78
79 static int blimit = 10;
80 static int qhimark = 10000;
81 static int qlowmark = 100;
82
83 #ifdef CONFIG_SMP
84 static void force_quiescent_state(struct rcu_data *rdp,
85 struct rcu_ctrlblk *rcp)
86 {
87 int cpu;
88 cpumask_t cpumask;
89 unsigned long flags;
90
91 set_need_resched();
92 spin_lock_irqsave(&rcp->lock, flags);
93 if (unlikely(!rcp->signaled)) {
94 rcp->signaled = 1;
95 /*
96 * Don't send IPI to itself. With irqs disabled,
97 * rdp->cpu is the current cpu.
98 *
99 * cpu_online_map is updated by the _cpu_down()
100 * using __stop_machine(). Since we're in irqs disabled
101 * section, __stop_machine() is not exectuting, hence
102 * the cpu_online_map is stable.
103 *
104 * However, a cpu might have been offlined _just_ before
105 * we disabled irqs while entering here.
106 * And rcu subsystem might not yet have handled the CPU_DEAD
107 * notification, leading to the offlined cpu's bit
108 * being set in the rcp->cpumask.
109 *
110 * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
111 * sending smp_reschedule() to an offlined CPU.
112 */
113 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
114 cpu_clear(rdp->cpu, cpumask);
115 for_each_cpu_mask_nr(cpu, cpumask)
116 smp_send_reschedule(cpu);
117 }
118 spin_unlock_irqrestore(&rcp->lock, flags);
119 }
120 #else
121 static inline void force_quiescent_state(struct rcu_data *rdp,
122 struct rcu_ctrlblk *rcp)
123 {
124 set_need_resched();
125 }
126 #endif
127
128 static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
129 struct rcu_data *rdp)
130 {
131 long batch;
132
133 head->next = NULL;
134 smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
135
136 /*
137 * Determine the batch number of this callback.
138 *
139 * Using ACCESS_ONCE to avoid the following error when gcc eliminates
140 * local variable "batch" and emits codes like this:
141 * 1) rdp->batch = rcp->cur + 1 # gets old value
142 * ......
143 * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
144 * then [*nxttail[0], *nxttail[1]) may contain callbacks
145 * that batch# = rdp->batch, see the comment of struct rcu_data.
146 */
147 batch = ACCESS_ONCE(rcp->cur) + 1;
148
149 if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
150 /* process callbacks */
151 rdp->nxttail[0] = rdp->nxttail[1];
152 rdp->nxttail[1] = rdp->nxttail[2];
153 if (rcu_batch_after(batch - 1, rdp->batch))
154 rdp->nxttail[0] = rdp->nxttail[2];
155 }
156
157 rdp->batch = batch;
158 *rdp->nxttail[2] = head;
159 rdp->nxttail[2] = &head->next;
160
161 if (unlikely(++rdp->qlen > qhimark)) {
162 rdp->blimit = INT_MAX;
163 force_quiescent_state(rdp, &rcu_ctrlblk);
164 }
165 }
166
167 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
168
169 static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
170 {
171 rcp->gp_start = jiffies;
172 rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
173 }
174
175 static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
176 {
177 int cpu;
178 long delta;
179 unsigned long flags;
180
181 /* Only let one CPU complain about others per time interval. */
182
183 spin_lock_irqsave(&rcp->lock, flags);
184 delta = jiffies - rcp->jiffies_stall;
185 if (delta < 2 || rcp->cur != rcp->completed) {
186 spin_unlock_irqrestore(&rcp->lock, flags);
187 return;
188 }
189 rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
190 spin_unlock_irqrestore(&rcp->lock, flags);
191
192 /* OK, time to rat on our buddy... */
193
194 printk(KERN_ERR "RCU detected CPU stalls:");
195 for_each_possible_cpu(cpu) {
196 if (cpu_isset(cpu, rcp->cpumask))
197 printk(" %d", cpu);
198 }
199 printk(" (detected by %d, t=%ld jiffies)\n",
200 smp_processor_id(), (long)(jiffies - rcp->gp_start));
201 }
202
203 static void print_cpu_stall(struct rcu_ctrlblk *rcp)
204 {
205 unsigned long flags;
206
207 printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
208 smp_processor_id(), jiffies,
209 jiffies - rcp->gp_start);
210 dump_stack();
211 spin_lock_irqsave(&rcp->lock, flags);
212 if ((long)(jiffies - rcp->jiffies_stall) >= 0)
213 rcp->jiffies_stall =
214 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
215 spin_unlock_irqrestore(&rcp->lock, flags);
216 set_need_resched(); /* kick ourselves to get things going. */
217 }
218
219 static void check_cpu_stall(struct rcu_ctrlblk *rcp)
220 {
221 long delta;
222
223 delta = jiffies - rcp->jiffies_stall;
224 if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) {
225
226 /* We haven't checked in, so go dump stack. */
227 print_cpu_stall(rcp);
228
229 } else if (rcp->cur != rcp->completed && delta >= 2) {
230
231 /* They had two seconds to dump stack, so complain. */
232 print_other_cpu_stall(rcp);
233 }
234 }
235
236 #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
237
238 static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
239 {
240 }
241
242 static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
243 {
244 }
245
246 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
247
248 /**
249 * call_rcu - Queue an RCU callback for invocation after a grace period.
250 * @head: structure to be used for queueing the RCU updates.
251 * @func: actual update function to be invoked after the grace period
252 *
253 * The update function will be invoked some time after a full grace
254 * period elapses, in other words after all currently executing RCU
255 * read-side critical sections have completed. RCU read-side critical
256 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
257 * and may be nested.
258 */
259 void call_rcu(struct rcu_head *head,
260 void (*func)(struct rcu_head *rcu))
261 {
262 unsigned long flags;
263
264 head->func = func;
265 local_irq_save(flags);
266 __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
267 local_irq_restore(flags);
268 }
269 EXPORT_SYMBOL_GPL(call_rcu);
270
271 /**
272 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
273 * @head: structure to be used for queueing the RCU updates.
274 * @func: actual update function to be invoked after the grace period
275 *
276 * The update function will be invoked some time after a full grace
277 * period elapses, in other words after all currently executing RCU
278 * read-side critical sections have completed. call_rcu_bh() assumes
279 * that the read-side critical sections end on completion of a softirq
280 * handler. This means that read-side critical sections in process
281 * context must not be interrupted by softirqs. This interface is to be
282 * used when most of the read-side critical sections are in softirq context.
283 * RCU read-side critical sections are delimited by rcu_read_lock() and
284 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
285 * and rcu_read_unlock_bh(), if in process context. These may be nested.
286 */
287 void call_rcu_bh(struct rcu_head *head,
288 void (*func)(struct rcu_head *rcu))
289 {
290 unsigned long flags;
291
292 head->func = func;
293 local_irq_save(flags);
294 __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
295 local_irq_restore(flags);
296 }
297 EXPORT_SYMBOL_GPL(call_rcu_bh);
298
299 /*
300 * Return the number of RCU batches processed thus far. Useful
301 * for debug and statistics.
302 */
303 long rcu_batches_completed(void)
304 {
305 return rcu_ctrlblk.completed;
306 }
307 EXPORT_SYMBOL_GPL(rcu_batches_completed);
308
309 /*
310 * Return the number of RCU batches processed thus far. Useful
311 * for debug and statistics.
312 */
313 long rcu_batches_completed_bh(void)
314 {
315 return rcu_bh_ctrlblk.completed;
316 }
317 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
318
319 /* Raises the softirq for processing rcu_callbacks. */
320 static inline void raise_rcu_softirq(void)
321 {
322 raise_softirq(RCU_SOFTIRQ);
323 }
324
325 /*
326 * Invoke the completed RCU callbacks. They are expected to be in
327 * a per-cpu list.
328 */
329 static void rcu_do_batch(struct rcu_data *rdp)
330 {
331 unsigned long flags;
332 struct rcu_head *next, *list;
333 int count = 0;
334
335 list = rdp->donelist;
336 while (list) {
337 next = list->next;
338 prefetch(next);
339 list->func(list);
340 list = next;
341 if (++count >= rdp->blimit)
342 break;
343 }
344 rdp->donelist = list;
345
346 local_irq_save(flags);
347 rdp->qlen -= count;
348 local_irq_restore(flags);
349 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
350 rdp->blimit = blimit;
351
352 if (!rdp->donelist)
353 rdp->donetail = &rdp->donelist;
354 else
355 raise_rcu_softirq();
356 }
357
358 /*
359 * Grace period handling:
360 * The grace period handling consists out of two steps:
361 * - A new grace period is started.
362 * This is done by rcu_start_batch. The start is not broadcasted to
363 * all cpus, they must pick this up by comparing rcp->cur with
364 * rdp->quiescbatch. All cpus are recorded in the
365 * rcu_ctrlblk.cpumask bitmap.
366 * - All cpus must go through a quiescent state.
367 * Since the start of the grace period is not broadcasted, at least two
368 * calls to rcu_check_quiescent_state are required:
369 * The first call just notices that a new grace period is running. The
370 * following calls check if there was a quiescent state since the beginning
371 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
372 * the bitmap is empty, then the grace period is completed.
373 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
374 * period (if necessary).
375 */
376
377 /*
378 * Register a new batch of callbacks, and start it up if there is currently no
379 * active batch and the batch to be registered has not already occurred.
380 * Caller must hold rcu_ctrlblk.lock.
381 */
382 static void rcu_start_batch(struct rcu_ctrlblk *rcp)
383 {
384 if (rcp->cur != rcp->pending &&
385 rcp->completed == rcp->cur) {
386 rcp->cur++;
387 record_gp_stall_check_time(rcp);
388
389 /*
390 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
391 * Barrier Otherwise it can cause tickless idle CPUs to be
392 * included in rcp->cpumask, which will extend graceperiods
393 * unnecessarily.
394 */
395 smp_mb();
396 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
397
398 rcp->signaled = 0;
399 }
400 }
401
402 /*
403 * cpu went through a quiescent state since the beginning of the grace period.
404 * Clear it from the cpu mask and complete the grace period if it was the last
405 * cpu. Start another grace period if someone has further entries pending
406 */
407 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
408 {
409 cpu_clear(cpu, rcp->cpumask);
410 if (cpus_empty(rcp->cpumask)) {
411 /* batch completed ! */
412 rcp->completed = rcp->cur;
413 rcu_start_batch(rcp);
414 }
415 }
416
417 /*
418 * Check if the cpu has gone through a quiescent state (say context
419 * switch). If so and if it already hasn't done so in this RCU
420 * quiescent cycle, then indicate that it has done so.
421 */
422 static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
423 struct rcu_data *rdp)
424 {
425 unsigned long flags;
426
427 if (rdp->quiescbatch != rcp->cur) {
428 /* start new grace period: */
429 rdp->qs_pending = 1;
430 rdp->passed_quiesc = 0;
431 rdp->quiescbatch = rcp->cur;
432 return;
433 }
434
435 /* Grace period already completed for this cpu?
436 * qs_pending is checked instead of the actual bitmap to avoid
437 * cacheline trashing.
438 */
439 if (!rdp->qs_pending)
440 return;
441
442 /*
443 * Was there a quiescent state since the beginning of the grace
444 * period? If no, then exit and wait for the next call.
445 */
446 if (!rdp->passed_quiesc)
447 return;
448 rdp->qs_pending = 0;
449
450 spin_lock_irqsave(&rcp->lock, flags);
451 /*
452 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
453 * during cpu startup. Ignore the quiescent state.
454 */
455 if (likely(rdp->quiescbatch == rcp->cur))
456 cpu_quiet(rdp->cpu, rcp);
457
458 spin_unlock_irqrestore(&rcp->lock, flags);
459 }
460
461
462 #ifdef CONFIG_HOTPLUG_CPU
463
464 /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
465 * locking requirements, the list it's pulling from has to belong to a cpu
466 * which is dead and hence not processing interrupts.
467 */
468 static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
469 struct rcu_head **tail, long batch)
470 {
471 unsigned long flags;
472
473 if (list) {
474 local_irq_save(flags);
475 this_rdp->batch = batch;
476 *this_rdp->nxttail[2] = list;
477 this_rdp->nxttail[2] = tail;
478 local_irq_restore(flags);
479 }
480 }
481
482 static void __rcu_offline_cpu(struct rcu_data *this_rdp,
483 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
484 {
485 unsigned long flags;
486
487 /*
488 * if the cpu going offline owns the grace period
489 * we can block indefinitely waiting for it, so flush
490 * it here
491 */
492 spin_lock_irqsave(&rcp->lock, flags);
493 if (rcp->cur != rcp->completed)
494 cpu_quiet(rdp->cpu, rcp);
495 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
496 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
497 spin_unlock(&rcp->lock);
498
499 this_rdp->qlen += rdp->qlen;
500 local_irq_restore(flags);
501 }
502
503 static void rcu_offline_cpu(int cpu)
504 {
505 struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
506 struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
507
508 __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
509 &per_cpu(rcu_data, cpu));
510 __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
511 &per_cpu(rcu_bh_data, cpu));
512 put_cpu_var(rcu_data);
513 put_cpu_var(rcu_bh_data);
514 }
515
516 #else
517
518 static void rcu_offline_cpu(int cpu)
519 {
520 }
521
522 #endif
523
524 /*
525 * This does the RCU processing work from softirq context.
526 */
527 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
528 struct rcu_data *rdp)
529 {
530 unsigned long flags;
531 long completed_snap;
532
533 if (rdp->nxtlist) {
534 local_irq_save(flags);
535 completed_snap = ACCESS_ONCE(rcp->completed);
536
537 /*
538 * move the other grace-period-completed entries to
539 * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
540 */
541 if (!rcu_batch_before(completed_snap, rdp->batch))
542 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
543 else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
544 rdp->nxttail[0] = rdp->nxttail[1];
545
546 /*
547 * the grace period for entries in
548 * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
549 * move these entries to donelist
550 */
551 if (rdp->nxttail[0] != &rdp->nxtlist) {
552 *rdp->donetail = rdp->nxtlist;
553 rdp->donetail = rdp->nxttail[0];
554 rdp->nxtlist = *rdp->nxttail[0];
555 *rdp->donetail = NULL;
556
557 if (rdp->nxttail[1] == rdp->nxttail[0])
558 rdp->nxttail[1] = &rdp->nxtlist;
559 if (rdp->nxttail[2] == rdp->nxttail[0])
560 rdp->nxttail[2] = &rdp->nxtlist;
561 rdp->nxttail[0] = &rdp->nxtlist;
562 }
563
564 local_irq_restore(flags);
565
566 if (rcu_batch_after(rdp->batch, rcp->pending)) {
567 unsigned long flags2;
568
569 /* and start it/schedule start if it's a new batch */
570 spin_lock_irqsave(&rcp->lock, flags2);
571 if (rcu_batch_after(rdp->batch, rcp->pending)) {
572 rcp->pending = rdp->batch;
573 rcu_start_batch(rcp);
574 }
575 spin_unlock_irqrestore(&rcp->lock, flags2);
576 }
577 }
578
579 rcu_check_quiescent_state(rcp, rdp);
580 if (rdp->donelist)
581 rcu_do_batch(rdp);
582 }
583
584 static void rcu_process_callbacks(struct softirq_action *unused)
585 {
586 /*
587 * Memory references from any prior RCU read-side critical sections
588 * executed by the interrupted code must be see before any RCU
589 * grace-period manupulations below.
590 */
591
592 smp_mb(); /* See above block comment. */
593
594 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
595 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
596
597 /*
598 * Memory references from any later RCU read-side critical sections
599 * executed by the interrupted code must be see after any RCU
600 * grace-period manupulations above.
601 */
602
603 smp_mb(); /* See above block comment. */
604 }
605
606 static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
607 {
608 /* Check for CPU stalls, if enabled. */
609 check_cpu_stall(rcp);
610
611 if (rdp->nxtlist) {
612 long completed_snap = ACCESS_ONCE(rcp->completed);
613
614 /*
615 * This cpu has pending rcu entries and the grace period
616 * for them has completed.
617 */
618 if (!rcu_batch_before(completed_snap, rdp->batch))
619 return 1;
620 if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
621 rdp->nxttail[0] != rdp->nxttail[1])
622 return 1;
623 if (rdp->nxttail[0] != &rdp->nxtlist)
624 return 1;
625
626 /*
627 * This cpu has pending rcu entries and the new batch
628 * for then hasn't been started nor scheduled start
629 */
630 if (rcu_batch_after(rdp->batch, rcp->pending))
631 return 1;
632 }
633
634 /* This cpu has finished callbacks to invoke */
635 if (rdp->donelist)
636 return 1;
637
638 /* The rcu core waits for a quiescent state from the cpu */
639 if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
640 return 1;
641
642 /* nothing to do */
643 return 0;
644 }
645
646 /*
647 * Check to see if there is any immediate RCU-related work to be done
648 * by the current CPU, returning 1 if so. This function is part of the
649 * RCU implementation; it is -not- an exported member of the RCU API.
650 */
651 int rcu_pending(int cpu)
652 {
653 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
654 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
655 }
656
657 /*
658 * Check to see if any future RCU-related work will need to be done
659 * by the current CPU, even if none need be done immediately, returning
660 * 1 if so. This function is part of the RCU implementation; it is -not-
661 * an exported member of the RCU API.
662 */
663 int rcu_needs_cpu(int cpu)
664 {
665 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
666 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
667
668 return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
669 }
670
671 /*
672 * Top-level function driving RCU grace-period detection, normally
673 * invoked from the scheduler-clock interrupt. This function simply
674 * increments counters that are read only from softirq by this same
675 * CPU, so there are no memory barriers required.
676 */
677 void rcu_check_callbacks(int cpu, int user)
678 {
679 if (user ||
680 (idle_cpu(cpu) && !in_softirq() &&
681 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
682
683 /*
684 * Get here if this CPU took its interrupt from user
685 * mode or from the idle loop, and if this is not a
686 * nested interrupt. In this case, the CPU is in
687 * a quiescent state, so count it.
688 *
689 * Also do a memory barrier. This is needed to handle
690 * the case where writes from a preempt-disable section
691 * of code get reordered into schedule() by this CPU's
692 * write buffer. The memory barrier makes sure that
693 * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
694 * by other CPUs to happen after any such write.
695 */
696
697 smp_mb(); /* See above block comment. */
698 rcu_qsctr_inc(cpu);
699 rcu_bh_qsctr_inc(cpu);
700
701 } else if (!in_softirq()) {
702
703 /*
704 * Get here if this CPU did not take its interrupt from
705 * softirq, in other words, if it is not interrupting
706 * a rcu_bh read-side critical section. This is an _bh
707 * critical section, so count it. The memory barrier
708 * is needed for the same reason as is the above one.
709 */
710
711 smp_mb(); /* See above block comment. */
712 rcu_bh_qsctr_inc(cpu);
713 }
714 raise_rcu_softirq();
715 }
716
717 static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
718 struct rcu_data *rdp)
719 {
720 unsigned long flags;
721
722 spin_lock_irqsave(&rcp->lock, flags);
723 memset(rdp, 0, sizeof(*rdp));
724 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
725 rdp->donetail = &rdp->donelist;
726 rdp->quiescbatch = rcp->completed;
727 rdp->qs_pending = 0;
728 rdp->cpu = cpu;
729 rdp->blimit = blimit;
730 spin_unlock_irqrestore(&rcp->lock, flags);
731 }
732
733 static void __cpuinit rcu_online_cpu(int cpu)
734 {
735 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
736 struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
737
738 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
739 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
740 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
741 }
742
743 static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
744 unsigned long action, void *hcpu)
745 {
746 long cpu = (long)hcpu;
747
748 switch (action) {
749 case CPU_UP_PREPARE:
750 case CPU_UP_PREPARE_FROZEN:
751 rcu_online_cpu(cpu);
752 break;
753 case CPU_DEAD:
754 case CPU_DEAD_FROZEN:
755 rcu_offline_cpu(cpu);
756 break;
757 default:
758 break;
759 }
760 return NOTIFY_OK;
761 }
762
763 static struct notifier_block __cpuinitdata rcu_nb = {
764 .notifier_call = rcu_cpu_notify,
765 };
766
767 /*
768 * Initializes rcu mechanism. Assumed to be called early.
769 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
770 * Note that rcu_qsctr and friends are implicitly
771 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
772 */
773 void __init __rcu_init(void)
774 {
775 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
776 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
777 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
778 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
779 (void *)(long)smp_processor_id());
780 /* Register notifier for non-boot CPUs */
781 register_cpu_notifier(&rcu_nb);
782 }
783
784 module_param(blimit, int, 0);
785 module_param(qhimark, int, 0);
786 module_param(qlowmark, int, 0);