rcu: Improve synchronize_rcu() diagnostics
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / rcutiny_plugin.h
CommitLineData
bbad9379 1/*
a57eb940 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
bbad9379 3 * Internal non-public definitions that provide either classic
a57eb940 4 * or preemptible semantics.
bbad9379
PM
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
a57eb940 20 * Copyright (c) 2010 Linaro
bbad9379
PM
21 *
22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
b2c0710c 25#include <linux/kthread.h>
bdfa97bf 26#include <linux/module.h>
9e571a82
PM
27#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29
24278d14
PM
30/* Global control variables for rcupdate callback mechanism. */
31struct rcu_ctrlblk {
32 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */
9e571a82 35 RCU_TRACE(long qlen); /* Number of pending CBs. */
e99033c5 36 RCU_TRACE(char *name); /* Name of RCU type. */
24278d14
PM
37};
38
39/* Definition for rcupdate control block. */
40static struct rcu_ctrlblk rcu_sched_ctrlblk = {
41 .donetail = &rcu_sched_ctrlblk.rcucblist,
42 .curtail = &rcu_sched_ctrlblk.rcucblist,
e99033c5 43 RCU_TRACE(.name = "rcu_sched")
24278d14
PM
44};
45
46static struct rcu_ctrlblk rcu_bh_ctrlblk = {
47 .donetail = &rcu_bh_ctrlblk.rcucblist,
48 .curtail = &rcu_bh_ctrlblk.rcucblist,
e99033c5 49 RCU_TRACE(.name = "rcu_bh")
24278d14
PM
50};
51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53int rcu_scheduler_active __read_mostly;
54EXPORT_SYMBOL_GPL(rcu_scheduler_active);
55#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
56
a57eb940
PM
57#ifdef CONFIG_TINY_PREEMPT_RCU
58
59#include <linux/delay.h>
60
a57eb940
PM
61/* Global control variables for preemptible RCU. */
62struct rcu_preempt_ctrlblk {
63 struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
64 struct rcu_head **nexttail;
65 /* Tasks blocked in a preemptible RCU */
66 /* read-side critical section while an */
67 /* preemptible-RCU grace period is in */
68 /* progress must wait for a later grace */
69 /* period. This pointer points to the */
70 /* ->next pointer of the last task that */
71 /* must wait for a later grace period, or */
72 /* to &->rcb.rcucblist if there is no */
73 /* such task. */
74 struct list_head blkd_tasks;
75 /* Tasks blocked in RCU read-side critical */
76 /* section. Tasks are placed at the head */
77 /* of this list and age towards the tail. */
78 struct list_head *gp_tasks;
79 /* Pointer to the first task blocking the */
80 /* current grace period, or NULL if there */
24278d14 81 /* is no such task. */
a57eb940
PM
82 struct list_head *exp_tasks;
83 /* Pointer to first task blocking the */
84 /* current expedited grace period, or NULL */
85 /* if there is no such task. If there */
86 /* is no current expedited grace period, */
87 /* then there cannot be any such task. */
24278d14
PM
88#ifdef CONFIG_RCU_BOOST
89 struct list_head *boost_tasks;
90 /* Pointer to first task that needs to be */
91 /* priority-boosted, or NULL if no priority */
92 /* boosting is needed. If there is no */
93 /* current or expedited grace period, there */
94 /* can be no such task. */
95#endif /* #ifdef CONFIG_RCU_BOOST */
a57eb940
PM
96 u8 gpnum; /* Current grace period. */
97 u8 gpcpu; /* Last grace period blocked by the CPU. */
98 u8 completed; /* Last grace period completed. */
99 /* If all three are equal, RCU is idle. */
9e571a82 100#ifdef CONFIG_RCU_BOOST
24278d14 101 unsigned long boost_time; /* When to start boosting (jiffies) */
9e571a82
PM
102#endif /* #ifdef CONFIG_RCU_BOOST */
103#ifdef CONFIG_RCU_TRACE
104 unsigned long n_grace_periods;
105#ifdef CONFIG_RCU_BOOST
106 unsigned long n_tasks_boosted;
7e8b4c72 107 /* Total number of tasks boosted. */
9e571a82 108 unsigned long n_exp_boosts;
7e8b4c72 109 /* Number of tasks boosted for expedited GP. */
9e571a82 110 unsigned long n_normal_boosts;
7e8b4c72
PM
111 /* Number of tasks boosted for normal GP. */
112 unsigned long n_balk_blkd_tasks;
113 /* Refused to boost: no blocked tasks. */
114 unsigned long n_balk_exp_gp_tasks;
115 /* Refused to boost: nothing blocking GP. */
116 unsigned long n_balk_boost_tasks;
117 /* Refused to boost: already boosting. */
118 unsigned long n_balk_notyet;
119 /* Refused to boost: not yet time. */
120 unsigned long n_balk_nos;
121 /* Refused to boost: not sure why, though. */
122 /* This can happen due to race conditions. */
9e571a82
PM
123#endif /* #ifdef CONFIG_RCU_BOOST */
124#endif /* #ifdef CONFIG_RCU_TRACE */
a57eb940
PM
125};
126
127static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
128 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
129 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
130 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
131 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
e99033c5 132 RCU_TRACE(.rcb.name = "rcu_preempt")
a57eb940
PM
133};
134
135static int rcu_preempted_readers_exp(void);
136static void rcu_report_exp_done(void);
137
138/*
139 * Return true if the CPU has not yet responded to the current grace period.
140 */
dd7c4d89 141static int rcu_cpu_blocking_cur_gp(void)
a57eb940
PM
142{
143 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
144}
145
146/*
147 * Check for a running RCU reader. Because there is only one CPU,
148 * there can be but one running RCU reader at a time. ;-)
149 */
150static int rcu_preempt_running_reader(void)
151{
152 return current->rcu_read_lock_nesting;
153}
154
155/*
156 * Check for preempted RCU readers blocking any grace period.
157 * If the caller needs a reliable answer, it must disable hard irqs.
158 */
159static int rcu_preempt_blocked_readers_any(void)
160{
161 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
162}
163
164/*
165 * Check for preempted RCU readers blocking the current grace period.
166 * If the caller needs a reliable answer, it must disable hard irqs.
167 */
168static int rcu_preempt_blocked_readers_cgp(void)
169{
170 return rcu_preempt_ctrlblk.gp_tasks != NULL;
171}
172
173/*
174 * Return true if another preemptible-RCU grace period is needed.
175 */
176static int rcu_preempt_needs_another_gp(void)
177{
178 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
179}
180
181/*
182 * Return true if a preemptible-RCU grace period is in progress.
183 * The caller must disable hardirqs.
184 */
185static int rcu_preempt_gp_in_progress(void)
186{
187 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
188}
189
24278d14
PM
190/*
191 * Advance a ->blkd_tasks-list pointer to the next entry, instead
192 * returning NULL if at the end of the list.
193 */
194static struct list_head *rcu_next_node_entry(struct task_struct *t)
195{
196 struct list_head *np;
197
198 np = t->rcu_node_entry.next;
199 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
200 np = NULL;
201 return np;
202}
203
9e571a82
PM
204#ifdef CONFIG_RCU_TRACE
205
206#ifdef CONFIG_RCU_BOOST
207static void rcu_initiate_boost_trace(void);
9e571a82
PM
208#endif /* #ifdef CONFIG_RCU_BOOST */
209
210/*
211 * Dump additional statistice for TINY_PREEMPT_RCU.
212 */
213static void show_tiny_preempt_stats(struct seq_file *m)
214{
215 seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
216 rcu_preempt_ctrlblk.rcb.qlen,
217 rcu_preempt_ctrlblk.n_grace_periods,
218 rcu_preempt_ctrlblk.gpnum,
219 rcu_preempt_ctrlblk.gpcpu,
220 rcu_preempt_ctrlblk.completed,
221 "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
222 "N."[!rcu_preempt_ctrlblk.gp_tasks],
223 "E."[!rcu_preempt_ctrlblk.exp_tasks]);
224#ifdef CONFIG_RCU_BOOST
203373c8
PM
225 seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
226 " ",
227 "B."[!rcu_preempt_ctrlblk.boost_tasks],
9e571a82
PM
228 rcu_preempt_ctrlblk.n_tasks_boosted,
229 rcu_preempt_ctrlblk.n_exp_boosts,
230 rcu_preempt_ctrlblk.n_normal_boosts,
231 (int)(jiffies & 0xffff),
232 (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
7e8b4c72
PM
233 seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
234 " balk",
235 rcu_preempt_ctrlblk.n_balk_blkd_tasks,
236 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
237 rcu_preempt_ctrlblk.n_balk_boost_tasks,
238 rcu_preempt_ctrlblk.n_balk_notyet,
239 rcu_preempt_ctrlblk.n_balk_nos);
9e571a82
PM
240#endif /* #ifdef CONFIG_RCU_BOOST */
241}
242
243#endif /* #ifdef CONFIG_RCU_TRACE */
244
24278d14
PM
245#ifdef CONFIG_RCU_BOOST
246
247#include "rtmutex_common.h"
248
965a002b
PM
249#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
250
251/* Controls for rcu_kthread() kthread. */
252static struct task_struct *rcu_kthread_task;
253static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
254static unsigned long have_rcu_kthread_work;
255
24278d14
PM
256/*
257 * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
258 * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
259 */
260static int rcu_boost(void)
261{
262 unsigned long flags;
263 struct rt_mutex mtx;
24278d14 264 struct task_struct *t;
7e8b4c72 265 struct list_head *tb;
24278d14 266
7e8b4c72
PM
267 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
268 rcu_preempt_ctrlblk.exp_tasks == NULL)
24278d14 269 return 0; /* Nothing to boost. */
7e8b4c72 270
24278d14 271 raw_local_irq_save(flags);
7e8b4c72
PM
272
273 /*
274 * Recheck with irqs disabled: all tasks in need of boosting
275 * might exit their RCU read-side critical sections on their own
276 * if we are preempted just before disabling irqs.
277 */
278 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
279 rcu_preempt_ctrlblk.exp_tasks == NULL) {
280 raw_local_irq_restore(flags);
281 return 0;
282 }
283
284 /*
285 * Preferentially boost tasks blocking expedited grace periods.
286 * This cannot starve the normal grace periods because a second
287 * expedited grace period must boost all blocked tasks, including
288 * those blocking the pre-existing normal grace period.
289 */
290 if (rcu_preempt_ctrlblk.exp_tasks != NULL) {
291 tb = rcu_preempt_ctrlblk.exp_tasks;
292 RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
293 } else {
294 tb = rcu_preempt_ctrlblk.boost_tasks;
295 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
296 }
297 RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
298
299 /*
300 * We boost task t by manufacturing an rt_mutex that appears to
301 * be held by task t. We leave a pointer to that rt_mutex where
302 * task t can find it, and task t will release the mutex when it
303 * exits its outermost RCU read-side critical section. Then
304 * simply acquiring this artificial rt_mutex will boost task
305 * t's priority. (Thanks to tglx for suggesting this approach!)
306 */
307 t = container_of(tb, struct task_struct, rcu_node_entry);
24278d14
PM
308 rt_mutex_init_proxy_locked(&mtx, t);
309 t->rcu_boost_mutex = &mtx;
310 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
311 raw_local_irq_restore(flags);
312 rt_mutex_lock(&mtx);
7e8b4c72
PM
313 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
314
4f89b336
PM
315 return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
316 ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
24278d14
PM
317}
318
319/*
320 * Check to see if it is now time to start boosting RCU readers blocking
321 * the current grace period, and, if so, tell the rcu_kthread_task to
322 * start boosting them. If there is an expedited boost in progress,
323 * we wait for it to complete.
9e571a82
PM
324 *
325 * If there are no blocked readers blocking the current grace period,
326 * return 0 to let the caller know, otherwise return 1. Note that this
327 * return value is independent of whether or not boosting was done.
24278d14 328 */
9e571a82 329static int rcu_initiate_boost(void)
24278d14 330{
7e8b4c72
PM
331 if (!rcu_preempt_blocked_readers_cgp() &&
332 rcu_preempt_ctrlblk.exp_tasks == NULL) {
333 RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
9e571a82
PM
334 return 0;
335 }
7e8b4c72
PM
336 if (rcu_preempt_ctrlblk.exp_tasks != NULL ||
337 (rcu_preempt_ctrlblk.gp_tasks != NULL &&
338 rcu_preempt_ctrlblk.boost_tasks == NULL &&
339 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
340 if (rcu_preempt_ctrlblk.exp_tasks == NULL)
341 rcu_preempt_ctrlblk.boost_tasks =
342 rcu_preempt_ctrlblk.gp_tasks;
965a002b 343 invoke_rcu_callbacks();
9e571a82
PM
344 } else
345 RCU_TRACE(rcu_initiate_boost_trace());
346 return 1;
24278d14
PM
347}
348
ddeb7581 349#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
24278d14
PM
350
351/*
352 * Do priority-boost accounting for the start of a new grace period.
353 */
354static void rcu_preempt_boost_start_gp(void)
355{
356 rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
24278d14
PM
357}
358
359#else /* #ifdef CONFIG_RCU_BOOST */
360
24278d14 361/*
9e571a82
PM
362 * If there is no RCU priority boosting, we don't initiate boosting,
363 * but we do indicate whether there are blocked readers blocking the
364 * current grace period.
24278d14 365 */
9e571a82 366static int rcu_initiate_boost(void)
24278d14 367{
9e571a82 368 return rcu_preempt_blocked_readers_cgp();
24278d14
PM
369}
370
24278d14
PM
371/*
372 * If there is no RCU priority boosting, nothing to do at grace-period start.
373 */
374static void rcu_preempt_boost_start_gp(void)
375{
376}
377
378#endif /* else #ifdef CONFIG_RCU_BOOST */
379
a57eb940
PM
380/*
381 * Record a preemptible-RCU quiescent state for the specified CPU. Note
382 * that this just means that the task currently running on the CPU is
383 * in a quiescent state. There might be any number of tasks blocked
384 * while in an RCU read-side critical section.
385 *
386 * Unlike the other rcu_*_qs() functions, callers to this function
387 * must disable irqs in order to protect the assignment to
388 * ->rcu_read_unlock_special.
389 *
390 * Because this is a single-CPU implementation, the only way a grace
391 * period can end is if the CPU is in a quiescent state. The reason is
392 * that a blocked preemptible-RCU reader can exit its critical section
393 * only if the CPU is running it at the time. Therefore, when the
394 * last task blocking the current grace period exits its RCU read-side
395 * critical section, neither the CPU nor blocked tasks will be stopping
396 * the current grace period. (In contrast, SMP implementations
397 * might have CPUs running in RCU read-side critical sections that
398 * block later grace periods -- but this is not possible given only
399 * one CPU.)
400 */
401static void rcu_preempt_cpu_qs(void)
402{
403 /* Record both CPU and task as having responded to current GP. */
404 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
405 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
406
24278d14 407 /* If there is no GP then there is nothing more to do. */
9e571a82 408 if (!rcu_preempt_gp_in_progress())
a57eb940 409 return;
9e571a82 410 /*
ddeb7581 411 * Check up on boosting. If there are readers blocking the
9e571a82
PM
412 * current grace period, leave.
413 */
414 if (rcu_initiate_boost())
24278d14 415 return;
a57eb940
PM
416
417 /* Advance callbacks. */
418 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
419 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
420 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
421
422 /* If there are no blocked readers, next GP is done instantly. */
423 if (!rcu_preempt_blocked_readers_any())
424 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
425
b2c0710c 426 /* If there are done callbacks, cause them to be invoked. */
a57eb940 427 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
965a002b 428 invoke_rcu_callbacks();
a57eb940
PM
429}
430
431/*
432 * Start a new RCU grace period if warranted. Hard irqs must be disabled.
433 */
434static void rcu_preempt_start_gp(void)
435{
436 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
437
438 /* Official start of GP. */
439 rcu_preempt_ctrlblk.gpnum++;
9e571a82 440 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
a57eb940
PM
441
442 /* Any blocked RCU readers block new GP. */
443 if (rcu_preempt_blocked_readers_any())
444 rcu_preempt_ctrlblk.gp_tasks =
445 rcu_preempt_ctrlblk.blkd_tasks.next;
446
24278d14
PM
447 /* Set up for RCU priority boosting. */
448 rcu_preempt_boost_start_gp();
449
a57eb940
PM
450 /* If there is no running reader, CPU is done with GP. */
451 if (!rcu_preempt_running_reader())
452 rcu_preempt_cpu_qs();
453 }
454}
455
456/*
457 * We have entered the scheduler, and the current task might soon be
458 * context-switched away from. If this task is in an RCU read-side
459 * critical section, we will no longer be able to rely on the CPU to
460 * record that fact, so we enqueue the task on the blkd_tasks list.
461 * If the task started after the current grace period began, as recorded
462 * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
463 * before the element referenced by ->gp_tasks (or at the tail if
464 * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
465 * The task will dequeue itself when it exits the outermost enclosing
466 * RCU read-side critical section. Therefore, the current grace period
467 * cannot be permitted to complete until the ->gp_tasks pointer becomes
468 * NULL.
469 *
470 * Caller must disable preemption.
471 */
472void rcu_preempt_note_context_switch(void)
473{
474 struct task_struct *t = current;
475 unsigned long flags;
476
477 local_irq_save(flags); /* must exclude scheduler_tick(). */
478 if (rcu_preempt_running_reader() &&
479 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
480
481 /* Possibly blocking in an RCU read-side critical section. */
482 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
483
484 /*
485 * If this CPU has already checked in, then this task
486 * will hold up the next grace period rather than the
487 * current grace period. Queue the task accordingly.
488 * If the task is queued for the current grace period
489 * (i.e., this CPU has not yet passed through a quiescent
490 * state for the current grace period), then as long
491 * as that task remains queued, the current grace period
492 * cannot end.
493 */
494 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
dd7c4d89 495 if (rcu_cpu_blocking_cur_gp())
a57eb940
PM
496 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
497 }
498
499 /*
500 * Either we were not in an RCU read-side critical section to
501 * begin with, or we have now recorded that critical section
502 * globally. Either way, we can now note a quiescent state
503 * for this CPU. Again, if we were in an RCU read-side critical
504 * section, and if that critical section was blocking the current
505 * grace period, then the fact that the task has been enqueued
506 * means that current grace period continues to be blocked.
507 */
508 rcu_preempt_cpu_qs();
509 local_irq_restore(flags);
510}
511
512/*
513 * Tiny-preemptible RCU implementation for rcu_read_lock().
514 * Just increment ->rcu_read_lock_nesting, shared state will be updated
515 * if we block.
516 */
517void __rcu_read_lock(void)
518{
519 current->rcu_read_lock_nesting++;
520 barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
521}
522EXPORT_SYMBOL_GPL(__rcu_read_lock);
523
524/*
525 * Handle special cases during rcu_read_unlock(), such as needing to
526 * notify RCU core processing or task having blocked during the RCU
527 * read-side critical section.
528 */
529static void rcu_read_unlock_special(struct task_struct *t)
530{
531 int empty;
532 int empty_exp;
533 unsigned long flags;
534 struct list_head *np;
535 int special;
536
537 /*
538 * NMI handlers cannot block and cannot safely manipulate state.
539 * They therefore cannot possibly be special, so just leave.
540 */
541 if (in_nmi())
542 return;
543
544 local_irq_save(flags);
545
546 /*
547 * If RCU core is waiting for this CPU to exit critical section,
548 * let it know that we have done so.
549 */
550 special = t->rcu_read_unlock_special;
551 if (special & RCU_READ_UNLOCK_NEED_QS)
552 rcu_preempt_cpu_qs();
553
554 /* Hardware IRQ handlers cannot block. */
555 if (in_irq()) {
556 local_irq_restore(flags);
557 return;
558 }
559
560 /* Clean up if blocked during RCU read-side critical section. */
561 if (special & RCU_READ_UNLOCK_BLOCKED) {
562 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
563
564 /*
565 * Remove this task from the ->blkd_tasks list and adjust
566 * any pointers that might have been referencing it.
567 */
568 empty = !rcu_preempt_blocked_readers_cgp();
569 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
24278d14 570 np = rcu_next_node_entry(t);
ddeb7581 571 list_del_init(&t->rcu_node_entry);
a57eb940
PM
572 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
573 rcu_preempt_ctrlblk.gp_tasks = np;
574 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
575 rcu_preempt_ctrlblk.exp_tasks = np;
24278d14
PM
576#ifdef CONFIG_RCU_BOOST
577 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
578 rcu_preempt_ctrlblk.boost_tasks = np;
579#endif /* #ifdef CONFIG_RCU_BOOST */
a57eb940
PM
580
581 /*
582 * If this was the last task on the current list, and if
583 * we aren't waiting on the CPU, report the quiescent state
584 * and start a new grace period if needed.
585 */
586 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
587 rcu_preempt_cpu_qs();
588 rcu_preempt_start_gp();
589 }
590
591 /*
592 * If this was the last task on the expedited lists,
593 * then we need wake up the waiting task.
594 */
595 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
596 rcu_report_exp_done();
597 }
24278d14
PM
598#ifdef CONFIG_RCU_BOOST
599 /* Unboost self if was boosted. */
600 if (special & RCU_READ_UNLOCK_BOOSTED) {
601 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
602 rt_mutex_unlock(t->rcu_boost_mutex);
603 t->rcu_boost_mutex = NULL;
604 }
605#endif /* #ifdef CONFIG_RCU_BOOST */
a57eb940
PM
606 local_irq_restore(flags);
607}
608
609/*
610 * Tiny-preemptible RCU implementation for rcu_read_unlock().
611 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
612 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
613 * invoke rcu_read_unlock_special() to clean up after a context switch
614 * in an RCU read-side critical section and other special cases.
615 */
616void __rcu_read_unlock(void)
617{
618 struct task_struct *t = current;
619
620 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
621 --t->rcu_read_lock_nesting;
622 barrier(); /* decrement before load of ->rcu_read_unlock_special */
623 if (t->rcu_read_lock_nesting == 0 &&
624 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
625 rcu_read_unlock_special(t);
626#ifdef CONFIG_PROVE_LOCKING
627 WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
628#endif /* #ifdef CONFIG_PROVE_LOCKING */
629}
630EXPORT_SYMBOL_GPL(__rcu_read_unlock);
631
632/*
633 * Check for a quiescent state from the current CPU. When a task blocks,
634 * the task is recorded in the rcu_preempt_ctrlblk structure, which is
635 * checked elsewhere. This is called from the scheduling-clock interrupt.
636 *
637 * Caller must disable hard irqs.
638 */
639static void rcu_preempt_check_callbacks(void)
640{
641 struct task_struct *t = current;
642
dd7c4d89
PM
643 if (rcu_preempt_gp_in_progress() &&
644 (!rcu_preempt_running_reader() ||
645 !rcu_cpu_blocking_cur_gp()))
a57eb940
PM
646 rcu_preempt_cpu_qs();
647 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
648 rcu_preempt_ctrlblk.rcb.donetail)
965a002b 649 invoke_rcu_callbacks();
dd7c4d89
PM
650 if (rcu_preempt_gp_in_progress() &&
651 rcu_cpu_blocking_cur_gp() &&
652 rcu_preempt_running_reader())
a57eb940
PM
653 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
654}
655
656/*
657 * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
b2c0710c 658 * update, so this is invoked from rcu_process_callbacks() to
a57eb940
PM
659 * handle that case. Of course, it is invoked for all flavors of
660 * RCU, but RCU callbacks can appear only on one of the lists, and
661 * neither ->nexttail nor ->donetail can possibly be NULL, so there
662 * is no need for an explicit check.
663 */
664static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
665{
666 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
667 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
668}
669
670/*
671 * Process callbacks for preemptible RCU.
672 */
673static void rcu_preempt_process_callbacks(void)
674{
965a002b 675 __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
a57eb940
PM
676}
677
678/*
679 * Queue a preemptible -RCU callback for invocation after a grace period.
680 */
681void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
682{
683 unsigned long flags;
684
685 debug_rcu_head_queue(head);
686 head->func = func;
687 head->next = NULL;
688
689 local_irq_save(flags);
690 *rcu_preempt_ctrlblk.nexttail = head;
691 rcu_preempt_ctrlblk.nexttail = &head->next;
9e571a82 692 RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
a57eb940
PM
693 rcu_preempt_start_gp(); /* checks to see if GP needed. */
694 local_irq_restore(flags);
695}
696EXPORT_SYMBOL_GPL(call_rcu);
697
a57eb940
PM
698/*
699 * synchronize_rcu - wait until a grace period has elapsed.
700 *
701 * Control will return to the caller some time after a full grace
702 * period has elapsed, in other words after all currently executing RCU
703 * read-side critical sections have completed. RCU read-side critical
704 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
705 * and may be nested.
706 */
707void synchronize_rcu(void)
708{
709#ifdef CONFIG_DEBUG_LOCK_ALLOC
710 if (!rcu_scheduler_active)
711 return;
712#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
713
714 WARN_ON_ONCE(rcu_preempt_running_reader());
715 if (!rcu_preempt_blocked_readers_any())
716 return;
717
718 /* Once we get past the fastpath checks, same code as rcu_barrier(). */
719 rcu_barrier();
720}
721EXPORT_SYMBOL_GPL(synchronize_rcu);
722
723static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
724static unsigned long sync_rcu_preempt_exp_count;
725static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
726
727/*
728 * Return non-zero if there are any tasks in RCU read-side critical
729 * sections blocking the current preemptible-RCU expedited grace period.
730 * If there is no preemptible-RCU expedited grace period currently in
731 * progress, returns zero unconditionally.
732 */
733static int rcu_preempted_readers_exp(void)
734{
735 return rcu_preempt_ctrlblk.exp_tasks != NULL;
736}
737
738/*
739 * Report the exit from RCU read-side critical section for the last task
740 * that queued itself during or before the current expedited preemptible-RCU
741 * grace period.
742 */
743static void rcu_report_exp_done(void)
744{
745 wake_up(&sync_rcu_preempt_exp_wq);
746}
747
748/*
749 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
750 * is to rely in the fact that there is but one CPU, and that it is
751 * illegal for a task to invoke synchronize_rcu_expedited() while in a
752 * preemptible-RCU read-side critical section. Therefore, any such
753 * critical sections must correspond to blocked tasks, which must therefore
754 * be on the ->blkd_tasks list. So just record the current head of the
755 * list in the ->exp_tasks pointer, and wait for all tasks including and
756 * after the task pointed to by ->exp_tasks to drain.
757 */
758void synchronize_rcu_expedited(void)
759{
760 unsigned long flags;
761 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
762 unsigned long snap;
763
764 barrier(); /* ensure prior action seen before grace period. */
765
766 WARN_ON_ONCE(rcu_preempt_running_reader());
767
768 /*
769 * Acquire lock so that there is only one preemptible RCU grace
770 * period in flight. Of course, if someone does the expedited
771 * grace period for us while we are acquiring the lock, just leave.
772 */
773 snap = sync_rcu_preempt_exp_count + 1;
774 mutex_lock(&sync_rcu_preempt_exp_mutex);
775 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
776 goto unlock_mb_ret; /* Others did our work for us. */
777
778 local_irq_save(flags);
779
780 /*
781 * All RCU readers have to already be on blkd_tasks because
782 * we cannot legally be executing in an RCU read-side critical
783 * section.
784 */
785
786 /* Snapshot current head of ->blkd_tasks list. */
787 rpcp->exp_tasks = rpcp->blkd_tasks.next;
788 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
789 rpcp->exp_tasks = NULL;
a57eb940
PM
790
791 /* Wait for tail of ->blkd_tasks list to drain. */
7e8b4c72
PM
792 if (!rcu_preempted_readers_exp())
793 local_irq_restore(flags);
794 else {
795 rcu_initiate_boost();
796 local_irq_restore(flags);
a57eb940
PM
797 wait_event(sync_rcu_preempt_exp_wq,
798 !rcu_preempted_readers_exp());
7e8b4c72 799 }
a57eb940
PM
800
801 /* Clean up and exit. */
802 barrier(); /* ensure expedited GP seen before counter increment. */
803 sync_rcu_preempt_exp_count++;
804unlock_mb_ret:
805 mutex_unlock(&sync_rcu_preempt_exp_mutex);
806 barrier(); /* ensure subsequent action seen after grace period. */
807}
808EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
809
810/*
811 * Does preemptible RCU need the CPU to stay out of dynticks mode?
812 */
813int rcu_preempt_needs_cpu(void)
814{
815 if (!rcu_preempt_running_reader())
816 rcu_preempt_cpu_qs();
817 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
818}
819
820/*
821 * Check for a task exiting while in a preemptible -RCU read-side
822 * critical section, clean up if so. No need to issue warnings,
823 * as debug_check_no_locks_held() already does this if lockdep
824 * is enabled.
825 */
826void exit_rcu(void)
827{
828 struct task_struct *t = current;
829
830 if (t->rcu_read_lock_nesting == 0)
831 return;
832 t->rcu_read_lock_nesting = 1;
ba74f4d7 833 __rcu_read_unlock();
a57eb940
PM
834}
835
836#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
837
9e571a82
PM
838#ifdef CONFIG_RCU_TRACE
839
840/*
841 * Because preemptible RCU does not exist, it is not necessary to
842 * dump out its statistics.
843 */
844static void show_tiny_preempt_stats(struct seq_file *m)
845{
846}
847
848#endif /* #ifdef CONFIG_RCU_TRACE */
849
a57eb940
PM
850/*
851 * Because preemptible RCU does not exist, it never has any callbacks
852 * to check.
853 */
854static void rcu_preempt_check_callbacks(void)
855{
856}
857
858/*
859 * Because preemptible RCU does not exist, it never has any callbacks
860 * to remove.
861 */
862static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
863{
864}
865
866/*
867 * Because preemptible RCU does not exist, it never has any callbacks
868 * to process.
869 */
870static void rcu_preempt_process_callbacks(void)
871{
872}
873
874#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
875
965a002b
PM
876#ifdef CONFIG_RCU_BOOST
877
878/*
879 * Wake up rcu_kthread() to process callbacks now eligible for invocation
880 * or to boost readers.
881 */
882static void invoke_rcu_callbacks(void)
883{
884 have_rcu_kthread_work = 1;
885 wake_up(&rcu_kthread_wq);
886}
887
4968c300
PM
888#ifdef CONFIG_RCU_TRACE
889
890/*
891 * Is the current CPU running the RCU-callbacks kthread?
892 * Caller must have preemption disabled.
893 */
894static bool rcu_is_callbacks_kthread(void)
895{
896 return rcu_kthread_task == current;
897}
898
899#endif /* #ifdef CONFIG_RCU_TRACE */
900
965a002b
PM
901/*
902 * This kthread invokes RCU callbacks whose grace periods have
903 * elapsed. It is awakened as needed, and takes the place of the
904 * RCU_SOFTIRQ that is used for this purpose when boosting is disabled.
905 * This is a kthread, but it is never stopped, at least not until
906 * the system goes down.
907 */
908static int rcu_kthread(void *arg)
909{
910 unsigned long work;
911 unsigned long morework;
912 unsigned long flags;
913
914 for (;;) {
915 wait_event_interruptible(rcu_kthread_wq,
916 have_rcu_kthread_work != 0);
917 morework = rcu_boost();
918 local_irq_save(flags);
919 work = have_rcu_kthread_work;
920 have_rcu_kthread_work = morework;
921 local_irq_restore(flags);
922 if (work)
923 rcu_process_callbacks(NULL);
924 schedule_timeout_interruptible(1); /* Leave CPU for others. */
925 }
926
927 return 0; /* Not reached, but needed to shut gcc up. */
928}
929
930/*
931 * Spawn the kthread that invokes RCU callbacks.
932 */
933static int __init rcu_spawn_kthreads(void)
934{
935 struct sched_param sp;
936
937 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
938 sp.sched_priority = RCU_BOOST_PRIO;
939 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
940 return 0;
941}
942early_initcall(rcu_spawn_kthreads);
943
944#else /* #ifdef CONFIG_RCU_BOOST */
945
946/*
947 * Start up softirq processing of callbacks.
948 */
949void invoke_rcu_callbacks(void)
950{
951 raise_softirq(RCU_SOFTIRQ);
952}
953
4968c300
PM
954#ifdef CONFIG_RCU_TRACE
955
956/*
957 * There is no callback kthread, so this thread is never it.
958 */
959static bool rcu_is_callbacks_kthread(void)
960{
961 return false;
962}
963
964#endif /* #ifdef CONFIG_RCU_TRACE */
965
965a002b
PM
966void rcu_init(void)
967{
968 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
969}
970
971#endif /* #else #ifdef CONFIG_RCU_BOOST */
972
bbad9379 973#ifdef CONFIG_DEBUG_LOCK_ALLOC
bbad9379
PM
974#include <linux/kernel_stat.h>
975
976/*
977 * During boot, we forgive RCU lockdep issues. After this function is
978 * invoked, we start taking RCU lockdep issues seriously.
979 */
b2c0710c 980void __init rcu_scheduler_starting(void)
bbad9379
PM
981{
982 WARN_ON(nr_context_switches() > 0);
983 rcu_scheduler_active = 1;
984}
985
986#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
24278d14 987
9e571a82
PM
988#ifdef CONFIG_RCU_TRACE
989
990#ifdef CONFIG_RCU_BOOST
991
992static void rcu_initiate_boost_trace(void)
993{
7e8b4c72
PM
994 if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
995 rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
996 else if (rcu_preempt_ctrlblk.gp_tasks == NULL &&
997 rcu_preempt_ctrlblk.exp_tasks == NULL)
998 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
9e571a82 999 else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
7e8b4c72 1000 rcu_preempt_ctrlblk.n_balk_boost_tasks++;
9e571a82 1001 else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
7e8b4c72 1002 rcu_preempt_ctrlblk.n_balk_notyet++;
9e571a82 1003 else
7e8b4c72 1004 rcu_preempt_ctrlblk.n_balk_nos++;
9e571a82
PM
1005}
1006
1007#endif /* #ifdef CONFIG_RCU_BOOST */
1008
1009static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
1010{
1011 unsigned long flags;
1012
1013 raw_local_irq_save(flags);
1014 rcp->qlen -= n;
1015 raw_local_irq_restore(flags);
1016}
1017
1018/*
1019 * Dump statistics for TINY_RCU, such as they are.
1020 */
1021static int show_tiny_stats(struct seq_file *m, void *unused)
1022{
1023 show_tiny_preempt_stats(m);
1024 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
1025 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
1026 return 0;
1027}
1028
1029static int show_tiny_stats_open(struct inode *inode, struct file *file)
1030{
1031 return single_open(file, show_tiny_stats, NULL);
1032}
1033
1034static const struct file_operations show_tiny_stats_fops = {
1035 .owner = THIS_MODULE,
1036 .open = show_tiny_stats_open,
1037 .read = seq_read,
1038 .llseek = seq_lseek,
1039 .release = single_release,
1040};
1041
1042static struct dentry *rcudir;
1043
1044static int __init rcutiny_trace_init(void)
1045{
1046 struct dentry *retval;
1047
1048 rcudir = debugfs_create_dir("rcu", NULL);
1049 if (!rcudir)
1050 goto free_out;
1051 retval = debugfs_create_file("rcudata", 0444, rcudir,
1052 NULL, &show_tiny_stats_fops);
1053 if (!retval)
1054 goto free_out;
1055 return 0;
1056free_out:
1057 debugfs_remove_recursive(rcudir);
1058 return 1;
1059}
1060
1061static void __exit rcutiny_trace_cleanup(void)
1062{
1063 debugfs_remove_recursive(rcudir);
1064}
1065
1066module_init(rcutiny_trace_init);
1067module_exit(rcutiny_trace_cleanup);
1068
1069MODULE_AUTHOR("Paul E. McKenney");
1070MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
1071MODULE_LICENSE("GPL");
1072
1073#endif /* #ifdef CONFIG_RCU_TRACE */