Commit | Line | Data |
---|---|---|
9b1d82fa PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright IBM Corporation, 2008 | |
19 | * | |
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
21 | * | |
22 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 23 | * Documentation/RCU |
9b1d82fa | 24 | */ |
4ce5b903 IM |
25 | #include <linux/completion.h> |
26 | #include <linux/interrupt.h> | |
9b1d82fa | 27 | #include <linux/notifier.h> |
4ce5b903 IM |
28 | #include <linux/rcupdate.h> |
29 | #include <linux/kernel.h> | |
9984de1a | 30 | #include <linux/export.h> |
9b1d82fa | 31 | #include <linux/mutex.h> |
4ce5b903 IM |
32 | #include <linux/sched.h> |
33 | #include <linux/types.h> | |
34 | #include <linux/init.h> | |
9b1d82fa | 35 | #include <linux/time.h> |
4ce5b903 | 36 | #include <linux/cpu.h> |
268bb0ce | 37 | #include <linux/prefetch.h> |
9b1d82fa | 38 | |
29c00b4a | 39 | #ifdef CONFIG_RCU_TRACE |
29c00b4a | 40 | #include <trace/events/rcu.h> |
29c00b4a PM |
41 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
42 | ||
43 | #include "rcu.h" | |
44 | ||
a57eb940 | 45 | /* Forward declarations for rcutiny_plugin.h. */ |
24278d14 | 46 | struct rcu_ctrlblk; |
965a002b PM |
47 | static void invoke_rcu_callbacks(void); |
48 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); | |
49 | static void rcu_process_callbacks(struct softirq_action *unused); | |
a57eb940 PM |
50 | static void __call_rcu(struct rcu_head *head, |
51 | void (*func)(struct rcu_head *rcu), | |
52 | struct rcu_ctrlblk *rcp); | |
53 | ||
54 | #include "rcutiny_plugin.h" | |
55 | ||
9b2e4f18 | 56 | static long long rcu_dynticks_nesting = LLONG_MAX / 2; |
9b1d82fa | 57 | |
9b2e4f18 PM |
58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ |
59 | static void rcu_idle_enter_common(void) | |
60 | { | |
61 | if (rcu_dynticks_nesting) { | |
62 | RCU_TRACE(trace_rcu_dyntick("--=", rcu_dynticks_nesting)); | |
63 | return; | |
64 | } | |
65 | RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting)); | |
66 | if (!idle_cpu(smp_processor_id())) { | |
67 | WARN_ON_ONCE(1); /* must be idle task! */ | |
68 | RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", | |
69 | rcu_dynticks_nesting)); | |
70 | ftrace_dump(DUMP_ALL); | |
71 | } | |
72 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ | |
73 | } | |
74 | ||
75 | /* | |
76 | * Enter idle, which is an extended quiescent state if we have fully | |
77 | * entered that mode (i.e., if the new value of dynticks_nesting is zero). | |
78 | */ | |
79 | void rcu_idle_enter(void) | |
80 | { | |
81 | unsigned long flags; | |
82 | ||
83 | local_irq_save(flags); | |
84 | rcu_dynticks_nesting = 0; | |
85 | rcu_idle_enter_common(); | |
86 | local_irq_restore(flags); | |
87 | } | |
88 | ||
89 | /* | |
90 | * Exit an interrupt handler towards idle. | |
91 | */ | |
92 | void rcu_irq_exit(void) | |
93 | { | |
94 | unsigned long flags; | |
95 | ||
96 | local_irq_save(flags); | |
97 | rcu_dynticks_nesting--; | |
98 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); | |
99 | rcu_idle_enter_common(); | |
100 | local_irq_restore(flags); | |
101 | } | |
102 | ||
103 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ | |
104 | static void rcu_idle_exit_common(long long oldval) | |
105 | { | |
106 | if (oldval) { | |
107 | RCU_TRACE(trace_rcu_dyntick("++=", rcu_dynticks_nesting)); | |
108 | return; | |
109 | } | |
110 | RCU_TRACE(trace_rcu_dyntick("End", oldval)); | |
111 | if (!idle_cpu(smp_processor_id())) { | |
112 | WARN_ON_ONCE(1); /* must be idle task! */ | |
113 | RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", | |
114 | oldval)); | |
115 | ftrace_dump(DUMP_ALL); | |
116 | } | |
117 | } | |
9b1d82fa PM |
118 | |
119 | /* | |
9b2e4f18 | 120 | * Exit idle, so that we are no longer in an extended quiescent state. |
9b1d82fa | 121 | */ |
9b2e4f18 | 122 | void rcu_idle_exit(void) |
9b1d82fa | 123 | { |
9b2e4f18 PM |
124 | unsigned long flags; |
125 | long long oldval; | |
126 | ||
127 | local_irq_save(flags); | |
128 | oldval = rcu_dynticks_nesting; | |
129 | WARN_ON_ONCE(oldval != 0); | |
130 | rcu_dynticks_nesting = LLONG_MAX / 2; | |
131 | rcu_idle_exit_common(oldval); | |
132 | local_irq_restore(flags); | |
9b1d82fa PM |
133 | } |
134 | ||
135 | /* | |
9b2e4f18 | 136 | * Enter an interrupt handler, moving away from idle. |
9b1d82fa | 137 | */ |
9b2e4f18 | 138 | void rcu_irq_enter(void) |
9b1d82fa | 139 | { |
9b2e4f18 PM |
140 | unsigned long flags; |
141 | long long oldval; | |
142 | ||
143 | local_irq_save(flags); | |
144 | oldval = rcu_dynticks_nesting; | |
9b1d82fa | 145 | rcu_dynticks_nesting++; |
9b2e4f18 PM |
146 | WARN_ON_ONCE(rcu_dynticks_nesting == 0); |
147 | rcu_idle_exit_common(oldval); | |
148 | local_irq_restore(flags); | |
9b1d82fa PM |
149 | } |
150 | ||
9b2e4f18 PM |
151 | #ifdef CONFIG_PROVE_RCU |
152 | ||
153 | /* | |
154 | * Test whether RCU thinks that the current CPU is idle. | |
155 | */ | |
156 | int rcu_is_cpu_idle(void) | |
157 | { | |
158 | return !rcu_dynticks_nesting; | |
159 | } | |
160 | ||
161 | #endif /* #ifdef CONFIG_PROVE_RCU */ | |
162 | ||
163 | /* | |
164 | * Test whether the current CPU was interrupted from idle. Nested | |
165 | * interrupts don't count, we must be running at the first interrupt | |
166 | * level. | |
167 | */ | |
168 | int rcu_is_cpu_rrupt_from_idle(void) | |
169 | { | |
170 | return rcu_dynticks_nesting <= 0; | |
171 | } | |
9b1d82fa PM |
172 | |
173 | /* | |
b554d7de ED |
174 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
175 | * Also irqs are disabled to avoid confusion due to interrupt handlers | |
4ce5b903 | 176 | * invoking call_rcu(). |
9b1d82fa PM |
177 | */ |
178 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | |
179 | { | |
9b1d82fa PM |
180 | if (rcp->rcucblist != NULL && |
181 | rcp->donetail != rcp->curtail) { | |
182 | rcp->donetail = rcp->curtail; | |
9b1d82fa PM |
183 | return 1; |
184 | } | |
4ce5b903 | 185 | |
9b1d82fa PM |
186 | return 0; |
187 | } | |
188 | ||
189 | /* | |
190 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | |
191 | * are at it, given that any rcu quiescent state is also an rcu_bh | |
192 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | |
193 | */ | |
194 | void rcu_sched_qs(int cpu) | |
195 | { | |
b554d7de ED |
196 | unsigned long flags; |
197 | ||
198 | local_irq_save(flags); | |
99652b54 PM |
199 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
200 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | |
965a002b | 201 | invoke_rcu_callbacks(); |
b554d7de | 202 | local_irq_restore(flags); |
9b1d82fa PM |
203 | } |
204 | ||
205 | /* | |
206 | * Record an rcu_bh quiescent state. | |
207 | */ | |
208 | void rcu_bh_qs(int cpu) | |
209 | { | |
b554d7de ED |
210 | unsigned long flags; |
211 | ||
212 | local_irq_save(flags); | |
9b1d82fa | 213 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
965a002b | 214 | invoke_rcu_callbacks(); |
b554d7de | 215 | local_irq_restore(flags); |
9b1d82fa PM |
216 | } |
217 | ||
218 | /* | |
219 | * Check to see if the scheduling-clock interrupt came from an extended | |
9b2e4f18 PM |
220 | * quiescent state, and, if so, tell RCU about it. This function must |
221 | * be called from hardirq context. It is normally called from the | |
222 | * scheduling-clock interrupt. | |
9b1d82fa PM |
223 | */ |
224 | void rcu_check_callbacks(int cpu, int user) | |
225 | { | |
9b2e4f18 | 226 | if (user || rcu_is_cpu_rrupt_from_idle()) |
9b1d82fa PM |
227 | rcu_sched_qs(cpu); |
228 | else if (!in_softirq()) | |
229 | rcu_bh_qs(cpu); | |
a57eb940 | 230 | rcu_preempt_check_callbacks(); |
9b1d82fa PM |
231 | } |
232 | ||
233 | /* | |
b2c0710c PM |
234 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
235 | * whose grace period has elapsed. | |
9b1d82fa | 236 | */ |
965a002b | 237 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
9b1d82fa | 238 | { |
d4c08f2a | 239 | char *rn = NULL; |
9b1d82fa | 240 | struct rcu_head *next, *list; |
4ce5b903 | 241 | unsigned long flags; |
9e571a82 | 242 | RCU_TRACE(int cb_count = 0); |
9b1d82fa PM |
243 | |
244 | /* If no RCU callbacks ready to invoke, just return. */ | |
29c00b4a | 245 | if (&rcp->rcucblist == rcp->donetail) { |
72fe701b PM |
246 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); |
247 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0)); | |
9b1d82fa | 248 | return; |
29c00b4a | 249 | } |
9b1d82fa PM |
250 | |
251 | /* Move the ready-to-invoke callbacks to a local list. */ | |
252 | local_irq_save(flags); | |
72fe701b | 253 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); |
9b1d82fa PM |
254 | list = rcp->rcucblist; |
255 | rcp->rcucblist = *rcp->donetail; | |
256 | *rcp->donetail = NULL; | |
257 | if (rcp->curtail == rcp->donetail) | |
258 | rcp->curtail = &rcp->rcucblist; | |
a57eb940 | 259 | rcu_preempt_remove_callbacks(rcp); |
9b1d82fa PM |
260 | rcp->donetail = &rcp->rcucblist; |
261 | local_irq_restore(flags); | |
262 | ||
263 | /* Invoke the callbacks on the local list. */ | |
d4c08f2a | 264 | RCU_TRACE(rn = rcp->name); |
9b1d82fa PM |
265 | while (list) { |
266 | next = list->next; | |
267 | prefetch(next); | |
551d55a9 | 268 | debug_rcu_head_unqueue(list); |
b2c0710c | 269 | local_bh_disable(); |
d4c08f2a | 270 | __rcu_reclaim(rn, list); |
b2c0710c | 271 | local_bh_enable(); |
9b1d82fa | 272 | list = next; |
9e571a82 | 273 | RCU_TRACE(cb_count++); |
9b1d82fa | 274 | } |
9e571a82 | 275 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
72fe701b | 276 | RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); |
9b1d82fa PM |
277 | } |
278 | ||
965a002b | 279 | static void rcu_process_callbacks(struct softirq_action *unused) |
b2c0710c | 280 | { |
965a002b PM |
281 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
282 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | |
283 | rcu_preempt_process_callbacks(); | |
b2c0710c PM |
284 | } |
285 | ||
9b1d82fa PM |
286 | /* |
287 | * Wait for a grace period to elapse. But it is illegal to invoke | |
288 | * synchronize_sched() from within an RCU read-side critical section. | |
289 | * Therefore, any legal call to synchronize_sched() is a quiescent | |
290 | * state, and so on a UP system, synchronize_sched() need do nothing. | |
291 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the | |
292 | * benefits of doing might_sleep() to reduce latency.) | |
293 | * | |
294 | * Cool, huh? (Due to Josh Triplett.) | |
295 | * | |
da848c47 PM |
296 | * But we want to make this a static inline later. The cond_resched() |
297 | * currently makes this problematic. | |
9b1d82fa PM |
298 | */ |
299 | void synchronize_sched(void) | |
300 | { | |
301 | cond_resched(); | |
302 | } | |
303 | EXPORT_SYMBOL_GPL(synchronize_sched); | |
304 | ||
9b1d82fa PM |
305 | /* |
306 | * Helper function for call_rcu() and call_rcu_bh(). | |
307 | */ | |
308 | static void __call_rcu(struct rcu_head *head, | |
309 | void (*func)(struct rcu_head *rcu), | |
310 | struct rcu_ctrlblk *rcp) | |
311 | { | |
312 | unsigned long flags; | |
313 | ||
551d55a9 | 314 | debug_rcu_head_queue(head); |
9b1d82fa PM |
315 | head->func = func; |
316 | head->next = NULL; | |
4ce5b903 | 317 | |
9b1d82fa PM |
318 | local_irq_save(flags); |
319 | *rcp->curtail = head; | |
320 | rcp->curtail = &head->next; | |
9e571a82 | 321 | RCU_TRACE(rcp->qlen++); |
9b1d82fa PM |
322 | local_irq_restore(flags); |
323 | } | |
324 | ||
325 | /* | |
a57eb940 | 326 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
9b1d82fa PM |
327 | * period. But since we have but one CPU, that would be after any |
328 | * quiescent state. | |
329 | */ | |
a57eb940 | 330 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa | 331 | { |
99652b54 | 332 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
9b1d82fa | 333 | } |
a57eb940 | 334 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
9b1d82fa PM |
335 | |
336 | /* | |
337 | * Post an RCU bottom-half callback to be invoked after any subsequent | |
338 | * quiescent state. | |
339 | */ | |
4ce5b903 | 340 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa PM |
341 | { |
342 | __call_rcu(head, func, &rcu_bh_ctrlblk); | |
343 | } | |
344 | EXPORT_SYMBOL_GPL(call_rcu_bh); |