Commit | Line | Data |
---|---|---|
f41d911f PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
3 | * Internal non-public definitions that provide either classic | |
6cc68793 | 4 | * or preemptible semantics. |
f41d911f PM |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
20 | * Copyright Red Hat, 2009 | |
21 | * Copyright IBM Corporation, 2009 | |
22 | * | |
23 | * Author: Ingo Molnar <mingo@elte.hu> | |
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
25 | */ | |
26 | ||
d9a3da06 | 27 | #include <linux/delay.h> |
7b27d547 | 28 | #include <linux/stop_machine.h> |
f41d911f | 29 | |
26845c28 PM |
30 | /* |
31 | * Check the RCU kernel configuration parameters and print informative | |
32 | * messages about anything out of the ordinary. If you like #ifdef, you | |
33 | * will love this function. | |
34 | */ | |
35 | static void __init rcu_bootup_announce_oddness(void) | |
36 | { | |
37 | #ifdef CONFIG_RCU_TRACE | |
38 | printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); | |
39 | #endif | |
40 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | |
41 | printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | |
42 | CONFIG_RCU_FANOUT); | |
43 | #endif | |
44 | #ifdef CONFIG_RCU_FANOUT_EXACT | |
45 | printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); | |
46 | #endif | |
47 | #ifdef CONFIG_RCU_FAST_NO_HZ | |
48 | printk(KERN_INFO | |
49 | "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | |
50 | #endif | |
51 | #ifdef CONFIG_PROVE_RCU | |
52 | printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); | |
53 | #endif | |
54 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | |
55 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | |
56 | #endif | |
81a294c4 | 57 | #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) |
26845c28 PM |
58 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); |
59 | #endif | |
60 | #if NUM_RCU_LVL_4 != 0 | |
61 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | |
62 | #endif | |
63 | } | |
64 | ||
f41d911f PM |
65 | #ifdef CONFIG_TREE_PREEMPT_RCU |
66 | ||
67 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | |
68 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | |
27f4d280 | 69 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
f41d911f | 70 | |
d9a3da06 PM |
71 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
72 | ||
f41d911f PM |
73 | /* |
74 | * Tell them what RCU they are running. | |
75 | */ | |
0e0fc1c2 | 76 | static void __init rcu_bootup_announce(void) |
f41d911f | 77 | { |
6cc68793 | 78 | printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n"); |
26845c28 | 79 | rcu_bootup_announce_oddness(); |
f41d911f PM |
80 | } |
81 | ||
82 | /* | |
83 | * Return the number of RCU-preempt batches processed thus far | |
84 | * for debug and statistics. | |
85 | */ | |
86 | long rcu_batches_completed_preempt(void) | |
87 | { | |
88 | return rcu_preempt_state.completed; | |
89 | } | |
90 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | |
91 | ||
92 | /* | |
93 | * Return the number of RCU batches processed thus far for debug & stats. | |
94 | */ | |
95 | long rcu_batches_completed(void) | |
96 | { | |
97 | return rcu_batches_completed_preempt(); | |
98 | } | |
99 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
100 | ||
bf66f18e PM |
101 | /* |
102 | * Force a quiescent state for preemptible RCU. | |
103 | */ | |
104 | void rcu_force_quiescent_state(void) | |
105 | { | |
106 | force_quiescent_state(&rcu_preempt_state, 0); | |
107 | } | |
108 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
109 | ||
f41d911f | 110 | /* |
6cc68793 | 111 | * Record a preemptible-RCU quiescent state for the specified CPU. Note |
f41d911f PM |
112 | * that this just means that the task currently running on the CPU is |
113 | * not in a quiescent state. There might be any number of tasks blocked | |
114 | * while in an RCU read-side critical section. | |
25502a6c PM |
115 | * |
116 | * Unlike the other rcu_*_qs() functions, callers to this function | |
117 | * must disable irqs in order to protect the assignment to | |
118 | * ->rcu_read_unlock_special. | |
f41d911f | 119 | */ |
c3422bea | 120 | static void rcu_preempt_qs(int cpu) |
f41d911f PM |
121 | { |
122 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | |
25502a6c | 123 | |
c64ac3ce | 124 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
c3422bea PM |
125 | barrier(); |
126 | rdp->passed_quiesc = 1; | |
25502a6c | 127 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
128 | } |
129 | ||
130 | /* | |
c3422bea PM |
131 | * We have entered the scheduler, and the current task might soon be |
132 | * context-switched away from. If this task is in an RCU read-side | |
133 | * critical section, we will no longer be able to rely on the CPU to | |
12f5f524 PM |
134 | * record that fact, so we enqueue the task on the blkd_tasks list. |
135 | * The task will dequeue itself when it exits the outermost enclosing | |
136 | * RCU read-side critical section. Therefore, the current grace period | |
137 | * cannot be permitted to complete until the blkd_tasks list entries | |
138 | * predating the current grace period drain, in other words, until | |
139 | * rnp->gp_tasks becomes NULL. | |
c3422bea PM |
140 | * |
141 | * Caller must disable preemption. | |
f41d911f | 142 | */ |
c3422bea | 143 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
144 | { |
145 | struct task_struct *t = current; | |
c3422bea | 146 | unsigned long flags; |
f41d911f PM |
147 | struct rcu_data *rdp; |
148 | struct rcu_node *rnp; | |
149 | ||
150 | if (t->rcu_read_lock_nesting && | |
151 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | |
152 | ||
153 | /* Possibly blocking in an RCU read-side critical section. */ | |
394f99a9 | 154 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
f41d911f | 155 | rnp = rdp->mynode; |
1304afb2 | 156 | raw_spin_lock_irqsave(&rnp->lock, flags); |
f41d911f | 157 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
86848966 | 158 | t->rcu_blocked_node = rnp; |
f41d911f PM |
159 | |
160 | /* | |
161 | * If this CPU has already checked in, then this task | |
162 | * will hold up the next grace period rather than the | |
163 | * current grace period. Queue the task accordingly. | |
164 | * If the task is queued for the current grace period | |
165 | * (i.e., this CPU has not yet passed through a quiescent | |
166 | * state for the current grace period), then as long | |
167 | * as that task remains queued, the current grace period | |
12f5f524 PM |
168 | * cannot end. Note that there is some uncertainty as |
169 | * to exactly when the current grace period started. | |
170 | * We take a conservative approach, which can result | |
171 | * in unnecessarily waiting on tasks that started very | |
172 | * slightly after the current grace period began. C'est | |
173 | * la vie!!! | |
b0e165c0 PM |
174 | * |
175 | * But first, note that the current CPU must still be | |
176 | * on line! | |
f41d911f | 177 | */ |
b0e165c0 | 178 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
e7d8842e | 179 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
12f5f524 PM |
180 | if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { |
181 | list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); | |
182 | rnp->gp_tasks = &t->rcu_node_entry; | |
27f4d280 PM |
183 | #ifdef CONFIG_RCU_BOOST |
184 | if (rnp->boost_tasks != NULL) | |
185 | rnp->boost_tasks = rnp->gp_tasks; | |
186 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
12f5f524 PM |
187 | } else { |
188 | list_add(&t->rcu_node_entry, &rnp->blkd_tasks); | |
189 | if (rnp->qsmask & rdp->grpmask) | |
190 | rnp->gp_tasks = &t->rcu_node_entry; | |
191 | } | |
1304afb2 | 192 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
f41d911f PM |
193 | } |
194 | ||
195 | /* | |
196 | * Either we were not in an RCU read-side critical section to | |
197 | * begin with, or we have now recorded that critical section | |
198 | * globally. Either way, we can now note a quiescent state | |
199 | * for this CPU. Again, if we were in an RCU read-side critical | |
200 | * section, and if that critical section was blocking the current | |
201 | * grace period, then the fact that the task has been enqueued | |
202 | * means that we continue to block the current grace period. | |
203 | */ | |
e7d8842e | 204 | local_irq_save(flags); |
25502a6c | 205 | rcu_preempt_qs(cpu); |
e7d8842e | 206 | local_irq_restore(flags); |
f41d911f PM |
207 | } |
208 | ||
209 | /* | |
6cc68793 | 210 | * Tree-preemptible RCU implementation for rcu_read_lock(). |
f41d911f PM |
211 | * Just increment ->rcu_read_lock_nesting, shared state will be updated |
212 | * if we block. | |
213 | */ | |
214 | void __rcu_read_lock(void) | |
215 | { | |
80dcf60e | 216 | current->rcu_read_lock_nesting++; |
f41d911f PM |
217 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ |
218 | } | |
219 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
220 | ||
fc2219d4 PM |
221 | /* |
222 | * Check for preempted RCU readers blocking the current grace period | |
223 | * for the specified rcu_node structure. If the caller needs a reliable | |
224 | * answer, it must hold the rcu_node's ->lock. | |
225 | */ | |
27f4d280 | 226 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
fc2219d4 | 227 | { |
12f5f524 | 228 | return rnp->gp_tasks != NULL; |
fc2219d4 PM |
229 | } |
230 | ||
b668c9cf PM |
231 | /* |
232 | * Record a quiescent state for all tasks that were previously queued | |
233 | * on the specified rcu_node structure and that were blocking the current | |
234 | * RCU grace period. The caller must hold the specified rnp->lock with | |
235 | * irqs disabled, and this lock is released upon return, but irqs remain | |
236 | * disabled. | |
237 | */ | |
d3f6bad3 | 238 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf PM |
239 | __releases(rnp->lock) |
240 | { | |
241 | unsigned long mask; | |
242 | struct rcu_node *rnp_p; | |
243 | ||
27f4d280 | 244 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { |
1304afb2 | 245 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
246 | return; /* Still need more quiescent states! */ |
247 | } | |
248 | ||
249 | rnp_p = rnp->parent; | |
250 | if (rnp_p == NULL) { | |
251 | /* | |
252 | * Either there is only one rcu_node in the tree, | |
253 | * or tasks were kicked up to root rcu_node due to | |
254 | * CPUs going offline. | |
255 | */ | |
d3f6bad3 | 256 | rcu_report_qs_rsp(&rcu_preempt_state, flags); |
b668c9cf PM |
257 | return; |
258 | } | |
259 | ||
260 | /* Report up the rest of the hierarchy. */ | |
261 | mask = rnp->grpmask; | |
1304afb2 PM |
262 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
263 | raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ | |
d3f6bad3 | 264 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); |
b668c9cf PM |
265 | } |
266 | ||
12f5f524 PM |
267 | /* |
268 | * Advance a ->blkd_tasks-list pointer to the next entry, instead | |
269 | * returning NULL if at the end of the list. | |
270 | */ | |
271 | static struct list_head *rcu_next_node_entry(struct task_struct *t, | |
272 | struct rcu_node *rnp) | |
273 | { | |
274 | struct list_head *np; | |
275 | ||
276 | np = t->rcu_node_entry.next; | |
277 | if (np == &rnp->blkd_tasks) | |
278 | np = NULL; | |
279 | return np; | |
280 | } | |
281 | ||
b668c9cf PM |
282 | /* |
283 | * Handle special cases during rcu_read_unlock(), such as needing to | |
284 | * notify RCU core processing or task having blocked during the RCU | |
285 | * read-side critical section. | |
286 | */ | |
be0e1e21 | 287 | static noinline void rcu_read_unlock_special(struct task_struct *t) |
f41d911f PM |
288 | { |
289 | int empty; | |
d9a3da06 | 290 | int empty_exp; |
f41d911f | 291 | unsigned long flags; |
12f5f524 | 292 | struct list_head *np; |
f41d911f PM |
293 | struct rcu_node *rnp; |
294 | int special; | |
295 | ||
296 | /* NMI handlers cannot block and cannot safely manipulate state. */ | |
297 | if (in_nmi()) | |
298 | return; | |
299 | ||
300 | local_irq_save(flags); | |
301 | ||
302 | /* | |
303 | * If RCU core is waiting for this CPU to exit critical section, | |
304 | * let it know that we have done so. | |
305 | */ | |
306 | special = t->rcu_read_unlock_special; | |
307 | if (special & RCU_READ_UNLOCK_NEED_QS) { | |
c3422bea | 308 | rcu_preempt_qs(smp_processor_id()); |
f41d911f PM |
309 | } |
310 | ||
311 | /* Hardware IRQ handlers cannot block. */ | |
312 | if (in_irq()) { | |
313 | local_irq_restore(flags); | |
314 | return; | |
315 | } | |
316 | ||
317 | /* Clean up if blocked during RCU read-side critical section. */ | |
318 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
319 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
320 | ||
dd5d19ba PM |
321 | /* |
322 | * Remove this task from the list it blocked on. The | |
323 | * task can migrate while we acquire the lock, but at | |
324 | * most one time. So at most two passes through loop. | |
325 | */ | |
326 | for (;;) { | |
86848966 | 327 | rnp = t->rcu_blocked_node; |
1304afb2 | 328 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
86848966 | 329 | if (rnp == t->rcu_blocked_node) |
dd5d19ba | 330 | break; |
1304afb2 | 331 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
dd5d19ba | 332 | } |
27f4d280 | 333 | empty = !rcu_preempt_blocked_readers_cgp(rnp); |
d9a3da06 PM |
334 | empty_exp = !rcu_preempted_readers_exp(rnp); |
335 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | |
12f5f524 | 336 | np = rcu_next_node_entry(t, rnp); |
f41d911f | 337 | list_del_init(&t->rcu_node_entry); |
12f5f524 PM |
338 | if (&t->rcu_node_entry == rnp->gp_tasks) |
339 | rnp->gp_tasks = np; | |
340 | if (&t->rcu_node_entry == rnp->exp_tasks) | |
341 | rnp->exp_tasks = np; | |
27f4d280 PM |
342 | #ifdef CONFIG_RCU_BOOST |
343 | if (&t->rcu_node_entry == rnp->boost_tasks) | |
344 | rnp->boost_tasks = np; | |
7765be2f PM |
345 | /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */ |
346 | if (t->rcu_boosted) { | |
347 | special |= RCU_READ_UNLOCK_BOOSTED; | |
348 | t->rcu_boosted = 0; | |
349 | } | |
27f4d280 | 350 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
dd5d19ba | 351 | t->rcu_blocked_node = NULL; |
f41d911f PM |
352 | |
353 | /* | |
354 | * If this was the last task on the current list, and if | |
355 | * we aren't waiting on any CPUs, report the quiescent state. | |
d3f6bad3 | 356 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. |
f41d911f | 357 | */ |
b668c9cf | 358 | if (empty) |
1304afb2 | 359 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf | 360 | else |
d3f6bad3 | 361 | rcu_report_unblock_qs_rnp(rnp, flags); |
d9a3da06 | 362 | |
27f4d280 PM |
363 | #ifdef CONFIG_RCU_BOOST |
364 | /* Unboost if we were boosted. */ | |
365 | if (special & RCU_READ_UNLOCK_BOOSTED) { | |
27f4d280 PM |
366 | rt_mutex_unlock(t->rcu_boost_mutex); |
367 | t->rcu_boost_mutex = NULL; | |
368 | } | |
369 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
370 | ||
d9a3da06 PM |
371 | /* |
372 | * If this was the last task on the expedited lists, | |
373 | * then we need to report up the rcu_node hierarchy. | |
374 | */ | |
375 | if (!empty_exp && !rcu_preempted_readers_exp(rnp)) | |
376 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | |
b668c9cf PM |
377 | } else { |
378 | local_irq_restore(flags); | |
f41d911f | 379 | } |
f41d911f PM |
380 | } |
381 | ||
382 | /* | |
6cc68793 | 383 | * Tree-preemptible RCU implementation for rcu_read_unlock(). |
f41d911f PM |
384 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost |
385 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
386 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
387 | * in an RCU read-side critical section and other special cases. | |
388 | */ | |
389 | void __rcu_read_unlock(void) | |
390 | { | |
391 | struct task_struct *t = current; | |
392 | ||
393 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | |
be0e1e21 PM |
394 | if (--t->rcu_read_lock_nesting == 0) { |
395 | barrier(); /* decr before ->rcu_read_unlock_special load */ | |
396 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | |
397 | rcu_read_unlock_special(t); | |
398 | } | |
cba8244a PM |
399 | #ifdef CONFIG_PROVE_LOCKING |
400 | WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); | |
401 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | |
f41d911f PM |
402 | } |
403 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
404 | ||
1ed509a2 PM |
405 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
406 | ||
407 | /* | |
408 | * Dump detailed information for all tasks blocking the current RCU | |
409 | * grace period on the specified rcu_node structure. | |
410 | */ | |
411 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | |
412 | { | |
413 | unsigned long flags; | |
1ed509a2 PM |
414 | struct task_struct *t; |
415 | ||
27f4d280 | 416 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
12f5f524 PM |
417 | return; |
418 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
419 | t = list_entry(rnp->gp_tasks, | |
420 | struct task_struct, rcu_node_entry); | |
421 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) | |
422 | sched_show_task(t); | |
423 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1ed509a2 PM |
424 | } |
425 | ||
426 | /* | |
427 | * Dump detailed information for all tasks blocking the current RCU | |
428 | * grace period. | |
429 | */ | |
430 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
431 | { | |
432 | struct rcu_node *rnp = rcu_get_root(rsp); | |
433 | ||
434 | rcu_print_detail_task_stall_rnp(rnp); | |
435 | rcu_for_each_leaf_node(rsp, rnp) | |
436 | rcu_print_detail_task_stall_rnp(rnp); | |
437 | } | |
438 | ||
439 | #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
440 | ||
441 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
442 | { | |
443 | } | |
444 | ||
445 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | |
446 | ||
f41d911f PM |
447 | /* |
448 | * Scan the current list of tasks blocked within RCU read-side critical | |
449 | * sections, printing out the tid of each. | |
450 | */ | |
451 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
452 | { | |
f41d911f PM |
453 | struct task_struct *t; |
454 | ||
27f4d280 | 455 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
12f5f524 PM |
456 | return; |
457 | t = list_entry(rnp->gp_tasks, | |
458 | struct task_struct, rcu_node_entry); | |
459 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) | |
460 | printk(" P%d", t->pid); | |
f41d911f PM |
461 | } |
462 | ||
53d84e00 PM |
463 | /* |
464 | * Suppress preemptible RCU's CPU stall warnings by pushing the | |
465 | * time of the next stall-warning message comfortably far into the | |
466 | * future. | |
467 | */ | |
468 | static void rcu_preempt_stall_reset(void) | |
469 | { | |
470 | rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; | |
471 | } | |
472 | ||
b0e165c0 PM |
473 | /* |
474 | * Check that the list of blocked tasks for the newly completed grace | |
475 | * period is in fact empty. It is a serious bug to complete a grace | |
476 | * period that still has RCU readers blocked! This function must be | |
477 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | |
478 | * must be held by the caller. | |
12f5f524 PM |
479 | * |
480 | * Also, if there are blocked tasks on the list, they automatically | |
481 | * block the newly created grace period, so set up ->gp_tasks accordingly. | |
b0e165c0 PM |
482 | */ |
483 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
484 | { | |
27f4d280 | 485 | WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); |
12f5f524 PM |
486 | if (!list_empty(&rnp->blkd_tasks)) |
487 | rnp->gp_tasks = rnp->blkd_tasks.next; | |
28ecd580 | 488 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
489 | } |
490 | ||
33f76148 PM |
491 | #ifdef CONFIG_HOTPLUG_CPU |
492 | ||
dd5d19ba PM |
493 | /* |
494 | * Handle tasklist migration for case in which all CPUs covered by the | |
495 | * specified rcu_node have gone offline. Move them up to the root | |
496 | * rcu_node. The reason for not just moving them to the immediate | |
497 | * parent is to remove the need for rcu_read_unlock_special() to | |
498 | * make more than two attempts to acquire the target rcu_node's lock. | |
b668c9cf PM |
499 | * Returns true if there were tasks blocking the current RCU grace |
500 | * period. | |
dd5d19ba | 501 | * |
237c80c5 PM |
502 | * Returns 1 if there was previously a task blocking the current grace |
503 | * period on the specified rcu_node structure. | |
504 | * | |
dd5d19ba PM |
505 | * The caller must hold rnp->lock with irqs disabled. |
506 | */ | |
237c80c5 PM |
507 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
508 | struct rcu_node *rnp, | |
509 | struct rcu_data *rdp) | |
dd5d19ba | 510 | { |
dd5d19ba PM |
511 | struct list_head *lp; |
512 | struct list_head *lp_root; | |
d9a3da06 | 513 | int retval = 0; |
dd5d19ba | 514 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
12f5f524 | 515 | struct task_struct *t; |
dd5d19ba | 516 | |
86848966 PM |
517 | if (rnp == rnp_root) { |
518 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | |
237c80c5 | 519 | return 0; /* Shouldn't happen: at least one CPU online. */ |
86848966 | 520 | } |
12f5f524 PM |
521 | |
522 | /* If we are on an internal node, complain bitterly. */ | |
523 | WARN_ON_ONCE(rnp != rdp->mynode); | |
dd5d19ba PM |
524 | |
525 | /* | |
12f5f524 PM |
526 | * Move tasks up to root rcu_node. Don't try to get fancy for |
527 | * this corner-case operation -- just put this node's tasks | |
528 | * at the head of the root node's list, and update the root node's | |
529 | * ->gp_tasks and ->exp_tasks pointers to those of this node's, | |
530 | * if non-NULL. This might result in waiting for more tasks than | |
531 | * absolutely necessary, but this is a good performance/complexity | |
532 | * tradeoff. | |
dd5d19ba | 533 | */ |
27f4d280 | 534 | if (rcu_preempt_blocked_readers_cgp(rnp)) |
d9a3da06 PM |
535 | retval |= RCU_OFL_TASKS_NORM_GP; |
536 | if (rcu_preempted_readers_exp(rnp)) | |
537 | retval |= RCU_OFL_TASKS_EXP_GP; | |
12f5f524 PM |
538 | lp = &rnp->blkd_tasks; |
539 | lp_root = &rnp_root->blkd_tasks; | |
540 | while (!list_empty(lp)) { | |
541 | t = list_entry(lp->next, typeof(*t), rcu_node_entry); | |
542 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
543 | list_del(&t->rcu_node_entry); | |
544 | t->rcu_blocked_node = rnp_root; | |
545 | list_add(&t->rcu_node_entry, lp_root); | |
546 | if (&t->rcu_node_entry == rnp->gp_tasks) | |
547 | rnp_root->gp_tasks = rnp->gp_tasks; | |
548 | if (&t->rcu_node_entry == rnp->exp_tasks) | |
549 | rnp_root->exp_tasks = rnp->exp_tasks; | |
27f4d280 PM |
550 | #ifdef CONFIG_RCU_BOOST |
551 | if (&t->rcu_node_entry == rnp->boost_tasks) | |
552 | rnp_root->boost_tasks = rnp->boost_tasks; | |
553 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
12f5f524 | 554 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ |
dd5d19ba | 555 | } |
27f4d280 PM |
556 | |
557 | #ifdef CONFIG_RCU_BOOST | |
558 | /* In case root is being boosted and leaf is not. */ | |
559 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
560 | if (rnp_root->boost_tasks != NULL && | |
561 | rnp_root->boost_tasks != rnp_root->gp_tasks) | |
562 | rnp_root->boost_tasks = rnp_root->gp_tasks; | |
563 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | |
564 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
565 | ||
12f5f524 PM |
566 | rnp->gp_tasks = NULL; |
567 | rnp->exp_tasks = NULL; | |
237c80c5 | 568 | return retval; |
dd5d19ba PM |
569 | } |
570 | ||
33f76148 | 571 | /* |
6cc68793 | 572 | * Do CPU-offline processing for preemptible RCU. |
33f76148 PM |
573 | */ |
574 | static void rcu_preempt_offline_cpu(int cpu) | |
575 | { | |
576 | __rcu_offline_cpu(cpu, &rcu_preempt_state); | |
577 | } | |
578 | ||
579 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
580 | ||
f41d911f PM |
581 | /* |
582 | * Check for a quiescent state from the current CPU. When a task blocks, | |
583 | * the task is recorded in the corresponding CPU's rcu_node structure, | |
584 | * which is checked elsewhere. | |
585 | * | |
586 | * Caller must disable hard irqs. | |
587 | */ | |
588 | static void rcu_preempt_check_callbacks(int cpu) | |
589 | { | |
590 | struct task_struct *t = current; | |
591 | ||
592 | if (t->rcu_read_lock_nesting == 0) { | |
c3422bea | 593 | rcu_preempt_qs(cpu); |
f41d911f PM |
594 | return; |
595 | } | |
a71fca58 | 596 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) |
c3422bea | 597 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
598 | } |
599 | ||
600 | /* | |
6cc68793 | 601 | * Process callbacks for preemptible RCU. |
f41d911f PM |
602 | */ |
603 | static void rcu_preempt_process_callbacks(void) | |
604 | { | |
605 | __rcu_process_callbacks(&rcu_preempt_state, | |
606 | &__get_cpu_var(rcu_preempt_data)); | |
607 | } | |
608 | ||
a46e0899 PM |
609 | #ifdef CONFIG_RCU_BOOST |
610 | ||
09223371 SL |
611 | static void rcu_preempt_do_callbacks(void) |
612 | { | |
613 | rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); | |
614 | } | |
615 | ||
a46e0899 PM |
616 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
617 | ||
f41d911f | 618 | /* |
6cc68793 | 619 | * Queue a preemptible-RCU callback for invocation after a grace period. |
f41d911f PM |
620 | */ |
621 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
622 | { | |
623 | __call_rcu(head, func, &rcu_preempt_state); | |
624 | } | |
625 | EXPORT_SYMBOL_GPL(call_rcu); | |
626 | ||
6ebb237b PM |
627 | /** |
628 | * synchronize_rcu - wait until a grace period has elapsed. | |
629 | * | |
630 | * Control will return to the caller some time after a full grace | |
631 | * period has elapsed, in other words after all currently executing RCU | |
77d8485a PM |
632 | * read-side critical sections have completed. Note, however, that |
633 | * upon return from synchronize_rcu(), the caller might well be executing | |
634 | * concurrently with new RCU read-side critical sections that began while | |
635 | * synchronize_rcu() was waiting. RCU read-side critical sections are | |
636 | * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. | |
6ebb237b PM |
637 | */ |
638 | void synchronize_rcu(void) | |
639 | { | |
640 | struct rcu_synchronize rcu; | |
641 | ||
642 | if (!rcu_scheduler_active) | |
643 | return; | |
644 | ||
72d5a9f7 | 645 | init_rcu_head_on_stack(&rcu.head); |
6ebb237b PM |
646 | init_completion(&rcu.completion); |
647 | /* Will wake me after RCU finished. */ | |
648 | call_rcu(&rcu.head, wakeme_after_rcu); | |
649 | /* Wait for it. */ | |
650 | wait_for_completion(&rcu.completion); | |
72d5a9f7 | 651 | destroy_rcu_head_on_stack(&rcu.head); |
6ebb237b PM |
652 | } |
653 | EXPORT_SYMBOL_GPL(synchronize_rcu); | |
654 | ||
d9a3da06 PM |
655 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); |
656 | static long sync_rcu_preempt_exp_count; | |
657 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | |
658 | ||
659 | /* | |
660 | * Return non-zero if there are any tasks in RCU read-side critical | |
661 | * sections blocking the current preemptible-RCU expedited grace period. | |
662 | * If there is no preemptible-RCU expedited grace period currently in | |
663 | * progress, returns zero unconditionally. | |
664 | */ | |
665 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | |
666 | { | |
12f5f524 | 667 | return rnp->exp_tasks != NULL; |
d9a3da06 PM |
668 | } |
669 | ||
670 | /* | |
671 | * return non-zero if there is no RCU expedited grace period in progress | |
672 | * for the specified rcu_node structure, in other words, if all CPUs and | |
673 | * tasks covered by the specified rcu_node structure have done their bit | |
674 | * for the current expedited grace period. Works only for preemptible | |
675 | * RCU -- other RCU implementation use other means. | |
676 | * | |
677 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
678 | */ | |
679 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |
680 | { | |
681 | return !rcu_preempted_readers_exp(rnp) && | |
682 | ACCESS_ONCE(rnp->expmask) == 0; | |
683 | } | |
684 | ||
685 | /* | |
686 | * Report the exit from RCU read-side critical section for the last task | |
687 | * that queued itself during or before the current expedited preemptible-RCU | |
688 | * grace period. This event is reported either to the rcu_node structure on | |
689 | * which the task was queued or to one of that rcu_node structure's ancestors, | |
690 | * recursively up the tree. (Calm down, calm down, we do the recursion | |
691 | * iteratively!) | |
692 | * | |
693 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
694 | */ | |
695 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
696 | { | |
697 | unsigned long flags; | |
698 | unsigned long mask; | |
699 | ||
1304afb2 | 700 | raw_spin_lock_irqsave(&rnp->lock, flags); |
d9a3da06 | 701 | for (;;) { |
131906b0 PM |
702 | if (!sync_rcu_preempt_exp_done(rnp)) { |
703 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
d9a3da06 | 704 | break; |
131906b0 | 705 | } |
d9a3da06 | 706 | if (rnp->parent == NULL) { |
131906b0 | 707 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
d9a3da06 PM |
708 | wake_up(&sync_rcu_preempt_exp_wq); |
709 | break; | |
710 | } | |
711 | mask = rnp->grpmask; | |
1304afb2 | 712 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
d9a3da06 | 713 | rnp = rnp->parent; |
1304afb2 | 714 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
d9a3da06 PM |
715 | rnp->expmask &= ~mask; |
716 | } | |
d9a3da06 PM |
717 | } |
718 | ||
719 | /* | |
720 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | |
721 | * grace period for the specified rcu_node structure. If there are no such | |
722 | * tasks, report it up the rcu_node hierarchy. | |
723 | * | |
724 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | |
725 | */ | |
726 | static void | |
727 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |
728 | { | |
1217ed1b | 729 | unsigned long flags; |
12f5f524 | 730 | int must_wait = 0; |
d9a3da06 | 731 | |
1217ed1b PM |
732 | raw_spin_lock_irqsave(&rnp->lock, flags); |
733 | if (list_empty(&rnp->blkd_tasks)) | |
734 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
735 | else { | |
12f5f524 | 736 | rnp->exp_tasks = rnp->blkd_tasks.next; |
1217ed1b | 737 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
12f5f524 PM |
738 | must_wait = 1; |
739 | } | |
d9a3da06 PM |
740 | if (!must_wait) |
741 | rcu_report_exp_rnp(rsp, rnp); | |
742 | } | |
743 | ||
019129d5 | 744 | /* |
d9a3da06 PM |
745 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea |
746 | * is to invoke synchronize_sched_expedited() to push all the tasks to | |
12f5f524 | 747 | * the ->blkd_tasks lists and wait for this list to drain. |
019129d5 PM |
748 | */ |
749 | void synchronize_rcu_expedited(void) | |
750 | { | |
d9a3da06 PM |
751 | unsigned long flags; |
752 | struct rcu_node *rnp; | |
753 | struct rcu_state *rsp = &rcu_preempt_state; | |
754 | long snap; | |
755 | int trycount = 0; | |
756 | ||
757 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | |
758 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | |
759 | smp_mb(); /* Above access cannot bleed into critical section. */ | |
760 | ||
761 | /* | |
762 | * Acquire lock, falling back to synchronize_rcu() if too many | |
763 | * lock-acquisition failures. Of course, if someone does the | |
764 | * expedited grace period for us, just leave. | |
765 | */ | |
766 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | |
767 | if (trycount++ < 10) | |
768 | udelay(trycount * num_online_cpus()); | |
769 | else { | |
770 | synchronize_rcu(); | |
771 | return; | |
772 | } | |
773 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
774 | goto mb_ret; /* Others did our work for us. */ | |
775 | } | |
776 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
777 | goto unlock_mb_ret; /* Others did our work for us. */ | |
778 | ||
12f5f524 | 779 | /* force all RCU readers onto ->blkd_tasks lists. */ |
d9a3da06 PM |
780 | synchronize_sched_expedited(); |
781 | ||
1304afb2 | 782 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
d9a3da06 PM |
783 | |
784 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | |
785 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | |
1304afb2 | 786 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
d9a3da06 | 787 | rnp->expmask = rnp->qsmaskinit; |
1304afb2 | 788 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
d9a3da06 PM |
789 | } |
790 | ||
12f5f524 | 791 | /* Snapshot current state of ->blkd_tasks lists. */ |
d9a3da06 PM |
792 | rcu_for_each_leaf_node(rsp, rnp) |
793 | sync_rcu_preempt_exp_init(rsp, rnp); | |
794 | if (NUM_RCU_NODES > 1) | |
795 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | |
796 | ||
1304afb2 | 797 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
d9a3da06 | 798 | |
12f5f524 | 799 | /* Wait for snapshotted ->blkd_tasks lists to drain. */ |
d9a3da06 PM |
800 | rnp = rcu_get_root(rsp); |
801 | wait_event(sync_rcu_preempt_exp_wq, | |
802 | sync_rcu_preempt_exp_done(rnp)); | |
803 | ||
804 | /* Clean up and exit. */ | |
805 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | |
806 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | |
807 | unlock_mb_ret: | |
808 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | |
809 | mb_ret: | |
810 | smp_mb(); /* ensure subsequent action seen after grace period. */ | |
019129d5 PM |
811 | } |
812 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
813 | ||
f41d911f | 814 | /* |
6cc68793 | 815 | * Check to see if there is any immediate preemptible-RCU-related work |
f41d911f PM |
816 | * to be done. |
817 | */ | |
818 | static int rcu_preempt_pending(int cpu) | |
819 | { | |
820 | return __rcu_pending(&rcu_preempt_state, | |
821 | &per_cpu(rcu_preempt_data, cpu)); | |
822 | } | |
823 | ||
824 | /* | |
6cc68793 | 825 | * Does preemptible RCU need the CPU to stay out of dynticks mode? |
f41d911f PM |
826 | */ |
827 | static int rcu_preempt_needs_cpu(int cpu) | |
828 | { | |
829 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | |
830 | } | |
831 | ||
e74f4c45 PM |
832 | /** |
833 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | |
834 | */ | |
835 | void rcu_barrier(void) | |
836 | { | |
837 | _rcu_barrier(&rcu_preempt_state, call_rcu); | |
838 | } | |
839 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
840 | ||
f41d911f | 841 | /* |
6cc68793 | 842 | * Initialize preemptible RCU's per-CPU data. |
f41d911f PM |
843 | */ |
844 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
845 | { | |
846 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | |
847 | } | |
848 | ||
e74f4c45 | 849 | /* |
6cc68793 | 850 | * Move preemptible RCU's callbacks from dying CPU to other online CPU. |
e74f4c45 | 851 | */ |
29494be7 | 852 | static void rcu_preempt_send_cbs_to_online(void) |
e74f4c45 | 853 | { |
29494be7 | 854 | rcu_send_cbs_to_online(&rcu_preempt_state); |
e74f4c45 PM |
855 | } |
856 | ||
1eba8f84 | 857 | /* |
6cc68793 | 858 | * Initialize preemptible RCU's state structures. |
1eba8f84 PM |
859 | */ |
860 | static void __init __rcu_init_preempt(void) | |
861 | { | |
394f99a9 | 862 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); |
1eba8f84 PM |
863 | } |
864 | ||
f41d911f | 865 | /* |
6cc68793 | 866 | * Check for a task exiting while in a preemptible-RCU read-side |
f41d911f PM |
867 | * critical section, clean up if so. No need to issue warnings, |
868 | * as debug_check_no_locks_held() already does this if lockdep | |
869 | * is enabled. | |
870 | */ | |
871 | void exit_rcu(void) | |
872 | { | |
873 | struct task_struct *t = current; | |
874 | ||
875 | if (t->rcu_read_lock_nesting == 0) | |
876 | return; | |
877 | t->rcu_read_lock_nesting = 1; | |
13491a0e | 878 | __rcu_read_unlock(); |
f41d911f PM |
879 | } |
880 | ||
881 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
882 | ||
27f4d280 PM |
883 | static struct rcu_state *rcu_state = &rcu_sched_state; |
884 | ||
f41d911f PM |
885 | /* |
886 | * Tell them what RCU they are running. | |
887 | */ | |
0e0fc1c2 | 888 | static void __init rcu_bootup_announce(void) |
f41d911f PM |
889 | { |
890 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | |
26845c28 | 891 | rcu_bootup_announce_oddness(); |
f41d911f PM |
892 | } |
893 | ||
894 | /* | |
895 | * Return the number of RCU batches processed thus far for debug & stats. | |
896 | */ | |
897 | long rcu_batches_completed(void) | |
898 | { | |
899 | return rcu_batches_completed_sched(); | |
900 | } | |
901 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
902 | ||
bf66f18e PM |
903 | /* |
904 | * Force a quiescent state for RCU, which, because there is no preemptible | |
905 | * RCU, becomes the same as rcu-sched. | |
906 | */ | |
907 | void rcu_force_quiescent_state(void) | |
908 | { | |
909 | rcu_sched_force_quiescent_state(); | |
910 | } | |
911 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |
912 | ||
f41d911f | 913 | /* |
6cc68793 | 914 | * Because preemptible RCU does not exist, we never have to check for |
f41d911f PM |
915 | * CPUs being in quiescent states. |
916 | */ | |
c3422bea | 917 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
918 | { |
919 | } | |
920 | ||
fc2219d4 | 921 | /* |
6cc68793 | 922 | * Because preemptible RCU does not exist, there are never any preempted |
fc2219d4 PM |
923 | * RCU readers. |
924 | */ | |
27f4d280 | 925 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
fc2219d4 PM |
926 | { |
927 | return 0; | |
928 | } | |
929 | ||
b668c9cf PM |
930 | #ifdef CONFIG_HOTPLUG_CPU |
931 | ||
932 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | |
d3f6bad3 | 933 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf | 934 | { |
1304afb2 | 935 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf PM |
936 | } |
937 | ||
938 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
939 | ||
1ed509a2 | 940 | /* |
6cc68793 | 941 | * Because preemptible RCU does not exist, we never have to check for |
1ed509a2 PM |
942 | * tasks blocked within RCU read-side critical sections. |
943 | */ | |
944 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |
945 | { | |
946 | } | |
947 | ||
f41d911f | 948 | /* |
6cc68793 | 949 | * Because preemptible RCU does not exist, we never have to check for |
f41d911f PM |
950 | * tasks blocked within RCU read-side critical sections. |
951 | */ | |
952 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
953 | { | |
954 | } | |
955 | ||
53d84e00 PM |
956 | /* |
957 | * Because preemptible RCU does not exist, there is no need to suppress | |
958 | * its CPU stall warnings. | |
959 | */ | |
960 | static void rcu_preempt_stall_reset(void) | |
961 | { | |
962 | } | |
963 | ||
b0e165c0 | 964 | /* |
6cc68793 | 965 | * Because there is no preemptible RCU, there can be no readers blocked, |
49e29126 PM |
966 | * so there is no need to check for blocked tasks. So check only for |
967 | * bogus qsmask values. | |
b0e165c0 PM |
968 | */ |
969 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
970 | { | |
49e29126 | 971 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
972 | } |
973 | ||
33f76148 PM |
974 | #ifdef CONFIG_HOTPLUG_CPU |
975 | ||
dd5d19ba | 976 | /* |
6cc68793 | 977 | * Because preemptible RCU does not exist, it never needs to migrate |
237c80c5 PM |
978 | * tasks that were blocked within RCU read-side critical sections, and |
979 | * such non-existent tasks cannot possibly have been blocking the current | |
980 | * grace period. | |
dd5d19ba | 981 | */ |
237c80c5 PM |
982 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
983 | struct rcu_node *rnp, | |
984 | struct rcu_data *rdp) | |
dd5d19ba | 985 | { |
237c80c5 | 986 | return 0; |
dd5d19ba PM |
987 | } |
988 | ||
33f76148 | 989 | /* |
6cc68793 | 990 | * Because preemptible RCU does not exist, it never needs CPU-offline |
33f76148 PM |
991 | * processing. |
992 | */ | |
993 | static void rcu_preempt_offline_cpu(int cpu) | |
994 | { | |
995 | } | |
996 | ||
997 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
998 | ||
f41d911f | 999 | /* |
6cc68793 | 1000 | * Because preemptible RCU does not exist, it never has any callbacks |
f41d911f PM |
1001 | * to check. |
1002 | */ | |
1eba8f84 | 1003 | static void rcu_preempt_check_callbacks(int cpu) |
f41d911f PM |
1004 | { |
1005 | } | |
1006 | ||
1007 | /* | |
6cc68793 | 1008 | * Because preemptible RCU does not exist, it never has any callbacks |
f41d911f PM |
1009 | * to process. |
1010 | */ | |
1eba8f84 | 1011 | static void rcu_preempt_process_callbacks(void) |
f41d911f PM |
1012 | { |
1013 | } | |
1014 | ||
019129d5 PM |
1015 | /* |
1016 | * Wait for an rcu-preempt grace period, but make it happen quickly. | |
6cc68793 | 1017 | * But because preemptible RCU does not exist, map to rcu-sched. |
019129d5 PM |
1018 | */ |
1019 | void synchronize_rcu_expedited(void) | |
1020 | { | |
1021 | synchronize_sched_expedited(); | |
1022 | } | |
1023 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
1024 | ||
d9a3da06 PM |
1025 | #ifdef CONFIG_HOTPLUG_CPU |
1026 | ||
1027 | /* | |
6cc68793 | 1028 | * Because preemptible RCU does not exist, there is never any need to |
d9a3da06 PM |
1029 | * report on tasks preempted in RCU read-side critical sections during |
1030 | * expedited RCU grace periods. | |
1031 | */ | |
1032 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
1033 | { | |
1034 | return; | |
1035 | } | |
1036 | ||
1037 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1038 | ||
f41d911f | 1039 | /* |
6cc68793 | 1040 | * Because preemptible RCU does not exist, it never has any work to do. |
f41d911f PM |
1041 | */ |
1042 | static int rcu_preempt_pending(int cpu) | |
1043 | { | |
1044 | return 0; | |
1045 | } | |
1046 | ||
1047 | /* | |
6cc68793 | 1048 | * Because preemptible RCU does not exist, it never needs any CPU. |
f41d911f PM |
1049 | */ |
1050 | static int rcu_preempt_needs_cpu(int cpu) | |
1051 | { | |
1052 | return 0; | |
1053 | } | |
1054 | ||
e74f4c45 | 1055 | /* |
6cc68793 | 1056 | * Because preemptible RCU does not exist, rcu_barrier() is just |
e74f4c45 PM |
1057 | * another name for rcu_barrier_sched(). |
1058 | */ | |
1059 | void rcu_barrier(void) | |
1060 | { | |
1061 | rcu_barrier_sched(); | |
1062 | } | |
1063 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
1064 | ||
f41d911f | 1065 | /* |
6cc68793 | 1066 | * Because preemptible RCU does not exist, there is no per-CPU |
f41d911f PM |
1067 | * data to initialize. |
1068 | */ | |
1069 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
1070 | { | |
1071 | } | |
1072 | ||
e74f4c45 | 1073 | /* |
6cc68793 | 1074 | * Because there is no preemptible RCU, there are no callbacks to move. |
e74f4c45 | 1075 | */ |
29494be7 | 1076 | static void rcu_preempt_send_cbs_to_online(void) |
e74f4c45 PM |
1077 | { |
1078 | } | |
1079 | ||
1eba8f84 | 1080 | /* |
6cc68793 | 1081 | * Because preemptible RCU does not exist, it need not be initialized. |
1eba8f84 PM |
1082 | */ |
1083 | static void __init __rcu_init_preempt(void) | |
1084 | { | |
1085 | } | |
1086 | ||
f41d911f | 1087 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
8bd93a2c | 1088 | |
27f4d280 PM |
1089 | #ifdef CONFIG_RCU_BOOST |
1090 | ||
1091 | #include "rtmutex_common.h" | |
1092 | ||
0ea1f2eb PM |
1093 | #ifdef CONFIG_RCU_TRACE |
1094 | ||
1095 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) | |
1096 | { | |
1097 | if (list_empty(&rnp->blkd_tasks)) | |
1098 | rnp->n_balk_blkd_tasks++; | |
1099 | else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) | |
1100 | rnp->n_balk_exp_gp_tasks++; | |
1101 | else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL) | |
1102 | rnp->n_balk_boost_tasks++; | |
1103 | else if (rnp->gp_tasks != NULL && rnp->qsmask != 0) | |
1104 | rnp->n_balk_notblocked++; | |
1105 | else if (rnp->gp_tasks != NULL && | |
a9f4793d | 1106 | ULONG_CMP_LT(jiffies, rnp->boost_time)) |
0ea1f2eb PM |
1107 | rnp->n_balk_notyet++; |
1108 | else | |
1109 | rnp->n_balk_nos++; | |
1110 | } | |
1111 | ||
1112 | #else /* #ifdef CONFIG_RCU_TRACE */ | |
1113 | ||
1114 | static void rcu_initiate_boost_trace(struct rcu_node *rnp) | |
1115 | { | |
1116 | } | |
1117 | ||
1118 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | |
1119 | ||
27f4d280 PM |
1120 | /* |
1121 | * Carry out RCU priority boosting on the task indicated by ->exp_tasks | |
1122 | * or ->boost_tasks, advancing the pointer to the next task in the | |
1123 | * ->blkd_tasks list. | |
1124 | * | |
1125 | * Note that irqs must be enabled: boosting the task can block. | |
1126 | * Returns 1 if there are more tasks needing to be boosted. | |
1127 | */ | |
1128 | static int rcu_boost(struct rcu_node *rnp) | |
1129 | { | |
1130 | unsigned long flags; | |
1131 | struct rt_mutex mtx; | |
1132 | struct task_struct *t; | |
1133 | struct list_head *tb; | |
1134 | ||
1135 | if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) | |
1136 | return 0; /* Nothing left to boost. */ | |
1137 | ||
1138 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1139 | ||
1140 | /* | |
1141 | * Recheck under the lock: all tasks in need of boosting | |
1142 | * might exit their RCU read-side critical sections on their own. | |
1143 | */ | |
1144 | if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { | |
1145 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1146 | return 0; | |
1147 | } | |
1148 | ||
1149 | /* | |
1150 | * Preferentially boost tasks blocking expedited grace periods. | |
1151 | * This cannot starve the normal grace periods because a second | |
1152 | * expedited grace period must boost all blocked tasks, including | |
1153 | * those blocking the pre-existing normal grace period. | |
1154 | */ | |
0ea1f2eb | 1155 | if (rnp->exp_tasks != NULL) { |
27f4d280 | 1156 | tb = rnp->exp_tasks; |
0ea1f2eb PM |
1157 | rnp->n_exp_boosts++; |
1158 | } else { | |
27f4d280 | 1159 | tb = rnp->boost_tasks; |
0ea1f2eb PM |
1160 | rnp->n_normal_boosts++; |
1161 | } | |
1162 | rnp->n_tasks_boosted++; | |
27f4d280 PM |
1163 | |
1164 | /* | |
1165 | * We boost task t by manufacturing an rt_mutex that appears to | |
1166 | * be held by task t. We leave a pointer to that rt_mutex where | |
1167 | * task t can find it, and task t will release the mutex when it | |
1168 | * exits its outermost RCU read-side critical section. Then | |
1169 | * simply acquiring this artificial rt_mutex will boost task | |
1170 | * t's priority. (Thanks to tglx for suggesting this approach!) | |
1171 | * | |
1172 | * Note that task t must acquire rnp->lock to remove itself from | |
1173 | * the ->blkd_tasks list, which it will do from exit() if from | |
1174 | * nowhere else. We therefore are guaranteed that task t will | |
1175 | * stay around at least until we drop rnp->lock. Note that | |
1176 | * rnp->lock also resolves races between our priority boosting | |
1177 | * and task t's exiting its outermost RCU read-side critical | |
1178 | * section. | |
1179 | */ | |
1180 | t = container_of(tb, struct task_struct, rcu_node_entry); | |
1181 | rt_mutex_init_proxy_locked(&mtx, t); | |
1182 | t->rcu_boost_mutex = &mtx; | |
7765be2f | 1183 | t->rcu_boosted = 1; |
27f4d280 PM |
1184 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1185 | rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ | |
1186 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ | |
1187 | ||
1188 | return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL; | |
1189 | } | |
1190 | ||
1191 | /* | |
1192 | * Timer handler to initiate waking up of boost kthreads that | |
1193 | * have yielded the CPU due to excessive numbers of tasks to | |
1194 | * boost. We wake up the per-rcu_node kthread, which in turn | |
1195 | * will wake up the booster kthread. | |
1196 | */ | |
1197 | static void rcu_boost_kthread_timer(unsigned long arg) | |
1198 | { | |
1217ed1b | 1199 | invoke_rcu_node_kthread((struct rcu_node *)arg); |
27f4d280 PM |
1200 | } |
1201 | ||
1202 | /* | |
1203 | * Priority-boosting kthread. One per leaf rcu_node and one for the | |
1204 | * root rcu_node. | |
1205 | */ | |
1206 | static int rcu_boost_kthread(void *arg) | |
1207 | { | |
1208 | struct rcu_node *rnp = (struct rcu_node *)arg; | |
1209 | int spincnt = 0; | |
1210 | int more2boost; | |
1211 | ||
1212 | for (;;) { | |
d71df90e | 1213 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; |
08bca60a | 1214 | rcu_wait(rnp->boost_tasks || rnp->exp_tasks); |
d71df90e | 1215 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; |
27f4d280 PM |
1216 | more2boost = rcu_boost(rnp); |
1217 | if (more2boost) | |
1218 | spincnt++; | |
1219 | else | |
1220 | spincnt = 0; | |
1221 | if (spincnt > 10) { | |
1222 | rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); | |
1223 | spincnt = 0; | |
1224 | } | |
1225 | } | |
1217ed1b | 1226 | /* NOTREACHED */ |
27f4d280 PM |
1227 | return 0; |
1228 | } | |
1229 | ||
1230 | /* | |
1231 | * Check to see if it is time to start boosting RCU readers that are | |
1232 | * blocking the current grace period, and, if so, tell the per-rcu_node | |
1233 | * kthread to start boosting them. If there is an expedited grace | |
1234 | * period in progress, it is always time to boost. | |
1235 | * | |
1217ed1b PM |
1236 | * The caller must hold rnp->lock, which this function releases, |
1237 | * but irqs remain disabled. The ->boost_kthread_task is immortal, | |
1238 | * so we don't need to worry about it going away. | |
27f4d280 | 1239 | */ |
1217ed1b | 1240 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
27f4d280 PM |
1241 | { |
1242 | struct task_struct *t; | |
1243 | ||
0ea1f2eb PM |
1244 | if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { |
1245 | rnp->n_balk_exp_gp_tasks++; | |
1217ed1b | 1246 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27f4d280 | 1247 | return; |
0ea1f2eb | 1248 | } |
27f4d280 PM |
1249 | if (rnp->exp_tasks != NULL || |
1250 | (rnp->gp_tasks != NULL && | |
1251 | rnp->boost_tasks == NULL && | |
1252 | rnp->qsmask == 0 && | |
1253 | ULONG_CMP_GE(jiffies, rnp->boost_time))) { | |
1254 | if (rnp->exp_tasks == NULL) | |
1255 | rnp->boost_tasks = rnp->gp_tasks; | |
1217ed1b | 1256 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27f4d280 PM |
1257 | t = rnp->boost_kthread_task; |
1258 | if (t != NULL) | |
1259 | wake_up_process(t); | |
1217ed1b | 1260 | } else { |
0ea1f2eb | 1261 | rcu_initiate_boost_trace(rnp); |
1217ed1b PM |
1262 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1263 | } | |
27f4d280 PM |
1264 | } |
1265 | ||
a46e0899 PM |
1266 | /* |
1267 | * Wake up the per-CPU kthread to invoke RCU callbacks. | |
1268 | */ | |
1269 | static void invoke_rcu_callbacks_kthread(void) | |
1270 | { | |
1271 | unsigned long flags; | |
1272 | ||
1273 | local_irq_save(flags); | |
1274 | __this_cpu_write(rcu_cpu_has_work, 1); | |
1275 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | |
1276 | local_irq_restore(flags); | |
1277 | return; | |
1278 | } | |
1279 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | |
1280 | local_irq_restore(flags); | |
1281 | } | |
1282 | ||
0f962a5e PM |
1283 | /* |
1284 | * Set the affinity of the boost kthread. The CPU-hotplug locks are | |
1285 | * held, so no one should be messing with the existence of the boost | |
1286 | * kthread. | |
1287 | */ | |
27f4d280 PM |
1288 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
1289 | cpumask_var_t cm) | |
1290 | { | |
27f4d280 PM |
1291 | struct task_struct *t; |
1292 | ||
27f4d280 PM |
1293 | t = rnp->boost_kthread_task; |
1294 | if (t != NULL) | |
1295 | set_cpus_allowed_ptr(rnp->boost_kthread_task, cm); | |
27f4d280 PM |
1296 | } |
1297 | ||
1298 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) | |
1299 | ||
1300 | /* | |
1301 | * Do priority-boost accounting for the start of a new grace period. | |
1302 | */ | |
1303 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |
1304 | { | |
1305 | rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; | |
1306 | } | |
1307 | ||
27f4d280 PM |
1308 | /* |
1309 | * Create an RCU-boost kthread for the specified node if one does not | |
1310 | * already exist. We only create this kthread for preemptible RCU. | |
1311 | * Returns zero if all is well, a negated errno otherwise. | |
1312 | */ | |
1313 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |
1314 | struct rcu_node *rnp, | |
1315 | int rnp_index) | |
1316 | { | |
1317 | unsigned long flags; | |
1318 | struct sched_param sp; | |
1319 | struct task_struct *t; | |
1320 | ||
1321 | if (&rcu_preempt_state != rsp) | |
1322 | return 0; | |
a46e0899 | 1323 | rsp->boost = 1; |
27f4d280 PM |
1324 | if (rnp->boost_kthread_task != NULL) |
1325 | return 0; | |
1326 | t = kthread_create(rcu_boost_kthread, (void *)rnp, | |
1327 | "rcub%d", rnp_index); | |
1328 | if (IS_ERR(t)) | |
1329 | return PTR_ERR(t); | |
1330 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1331 | rnp->boost_kthread_task = t; | |
1332 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
27f4d280 PM |
1333 | sp.sched_priority = RCU_KTHREAD_PRIO; |
1334 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
9a432736 | 1335 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ |
27f4d280 PM |
1336 | return 0; |
1337 | } | |
1338 | ||
f8b7fc6b PM |
1339 | #ifdef CONFIG_HOTPLUG_CPU |
1340 | ||
1341 | /* | |
1342 | * Stop the RCU's per-CPU kthread when its CPU goes offline,. | |
1343 | */ | |
1344 | static void rcu_stop_cpu_kthread(int cpu) | |
1345 | { | |
1346 | struct task_struct *t; | |
1347 | ||
1348 | /* Stop the CPU's kthread. */ | |
1349 | t = per_cpu(rcu_cpu_kthread_task, cpu); | |
1350 | if (t != NULL) { | |
1351 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | |
1352 | kthread_stop(t); | |
1353 | } | |
1354 | } | |
1355 | ||
1356 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1357 | ||
1358 | static void rcu_kthread_do_work(void) | |
1359 | { | |
1360 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | |
1361 | rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | |
1362 | rcu_preempt_do_callbacks(); | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | * Wake up the specified per-rcu_node-structure kthread. | |
1367 | * Because the per-rcu_node kthreads are immortal, we don't need | |
1368 | * to do anything to keep them alive. | |
1369 | */ | |
1370 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | |
1371 | { | |
1372 | struct task_struct *t; | |
1373 | ||
1374 | t = rnp->node_kthread_task; | |
1375 | if (t != NULL) | |
1376 | wake_up_process(t); | |
1377 | } | |
1378 | ||
1379 | /* | |
1380 | * Set the specified CPU's kthread to run RT or not, as specified by | |
1381 | * the to_rt argument. The CPU-hotplug locks are held, so the task | |
1382 | * is not going away. | |
1383 | */ | |
1384 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | |
1385 | { | |
1386 | int policy; | |
1387 | struct sched_param sp; | |
1388 | struct task_struct *t; | |
1389 | ||
1390 | t = per_cpu(rcu_cpu_kthread_task, cpu); | |
1391 | if (t == NULL) | |
1392 | return; | |
1393 | if (to_rt) { | |
1394 | policy = SCHED_FIFO; | |
1395 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1396 | } else { | |
1397 | policy = SCHED_NORMAL; | |
1398 | sp.sched_priority = 0; | |
1399 | } | |
1400 | sched_setscheduler_nocheck(t, policy, &sp); | |
1401 | } | |
1402 | ||
1403 | /* | |
1404 | * Timer handler to initiate the waking up of per-CPU kthreads that | |
1405 | * have yielded the CPU due to excess numbers of RCU callbacks. | |
1406 | * We wake up the per-rcu_node kthread, which in turn will wake up | |
1407 | * the booster kthread. | |
1408 | */ | |
1409 | static void rcu_cpu_kthread_timer(unsigned long arg) | |
1410 | { | |
1411 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | |
1412 | struct rcu_node *rnp = rdp->mynode; | |
1413 | ||
1414 | atomic_or(rdp->grpmask, &rnp->wakemask); | |
1415 | invoke_rcu_node_kthread(rnp); | |
1416 | } | |
1417 | ||
1418 | /* | |
1419 | * Drop to non-real-time priority and yield, but only after posting a | |
1420 | * timer that will cause us to regain our real-time priority if we | |
1421 | * remain preempted. Either way, we restore our real-time priority | |
1422 | * before returning. | |
1423 | */ | |
1424 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | |
1425 | { | |
1426 | struct sched_param sp; | |
1427 | struct timer_list yield_timer; | |
1428 | ||
1429 | setup_timer_on_stack(&yield_timer, f, arg); | |
1430 | mod_timer(&yield_timer, jiffies + 2); | |
1431 | sp.sched_priority = 0; | |
1432 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | |
1433 | set_user_nice(current, 19); | |
1434 | schedule(); | |
1435 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1436 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | |
1437 | del_timer(&yield_timer); | |
1438 | } | |
1439 | ||
1440 | /* | |
1441 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | |
1442 | * This can happen while the corresponding CPU is either coming online | |
1443 | * or going offline. We cannot wait until the CPU is fully online | |
1444 | * before starting the kthread, because the various notifier functions | |
1445 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | |
1446 | * the corresponding CPU is online. | |
1447 | * | |
1448 | * Return 1 if the kthread needs to stop, 0 otherwise. | |
1449 | * | |
1450 | * Caller must disable bh. This function can momentarily enable it. | |
1451 | */ | |
1452 | static int rcu_cpu_kthread_should_stop(int cpu) | |
1453 | { | |
1454 | while (cpu_is_offline(cpu) || | |
1455 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | |
1456 | smp_processor_id() != cpu) { | |
1457 | if (kthread_should_stop()) | |
1458 | return 1; | |
1459 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | |
1460 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | |
1461 | local_bh_enable(); | |
1462 | schedule_timeout_uninterruptible(1); | |
1463 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | |
1464 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | |
1465 | local_bh_disable(); | |
1466 | } | |
1467 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | |
1468 | return 0; | |
1469 | } | |
1470 | ||
1471 | /* | |
1472 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | |
1473 | * earlier RCU softirq. | |
1474 | */ | |
1475 | static int rcu_cpu_kthread(void *arg) | |
1476 | { | |
1477 | int cpu = (int)(long)arg; | |
1478 | unsigned long flags; | |
1479 | int spincnt = 0; | |
1480 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | |
1481 | char work; | |
1482 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | |
1483 | ||
1484 | for (;;) { | |
1485 | *statusp = RCU_KTHREAD_WAITING; | |
1486 | rcu_wait(*workp != 0 || kthread_should_stop()); | |
1487 | local_bh_disable(); | |
1488 | if (rcu_cpu_kthread_should_stop(cpu)) { | |
1489 | local_bh_enable(); | |
1490 | break; | |
1491 | } | |
1492 | *statusp = RCU_KTHREAD_RUNNING; | |
1493 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | |
1494 | local_irq_save(flags); | |
1495 | work = *workp; | |
1496 | *workp = 0; | |
1497 | local_irq_restore(flags); | |
1498 | if (work) | |
1499 | rcu_kthread_do_work(); | |
1500 | local_bh_enable(); | |
1501 | if (*workp != 0) | |
1502 | spincnt++; | |
1503 | else | |
1504 | spincnt = 0; | |
1505 | if (spincnt > 10) { | |
1506 | *statusp = RCU_KTHREAD_YIELDING; | |
1507 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | |
1508 | spincnt = 0; | |
1509 | } | |
1510 | } | |
1511 | *statusp = RCU_KTHREAD_STOPPED; | |
1512 | return 0; | |
1513 | } | |
1514 | ||
1515 | /* | |
1516 | * Spawn a per-CPU kthread, setting up affinity and priority. | |
1517 | * Because the CPU hotplug lock is held, no other CPU will be attempting | |
1518 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | |
1519 | * attempting to access it during boot, but the locking in kthread_bind() | |
1520 | * will enforce sufficient ordering. | |
1521 | * | |
1522 | * Please note that we cannot simply refuse to wake up the per-CPU | |
1523 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | |
1524 | * which can result in softlockup complaints if the task ends up being | |
1525 | * idle for more than a couple of minutes. | |
1526 | * | |
1527 | * However, please note also that we cannot bind the per-CPU kthread to its | |
1528 | * CPU until that CPU is fully online. We also cannot wait until the | |
1529 | * CPU is fully online before we create its per-CPU kthread, as this would | |
1530 | * deadlock the system when CPU notifiers tried waiting for grace | |
1531 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | |
1532 | * is online. If its CPU is not yet fully online, then the code in | |
1533 | * rcu_cpu_kthread() will wait until it is fully online, and then do | |
1534 | * the binding. | |
1535 | */ | |
1536 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | |
1537 | { | |
1538 | struct sched_param sp; | |
1539 | struct task_struct *t; | |
1540 | ||
b0d30417 | 1541 | if (!rcu_scheduler_fully_active || |
f8b7fc6b PM |
1542 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) |
1543 | return 0; | |
1544 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | |
1545 | if (IS_ERR(t)) | |
1546 | return PTR_ERR(t); | |
1547 | if (cpu_online(cpu)) | |
1548 | kthread_bind(t, cpu); | |
1549 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | |
1550 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | |
1551 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1552 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
1553 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | |
1554 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | |
1555 | return 0; | |
1556 | } | |
1557 | ||
1558 | /* | |
1559 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | |
1560 | * kthreads when needed. We ignore requests to wake up kthreads | |
1561 | * for offline CPUs, which is OK because force_quiescent_state() | |
1562 | * takes care of this case. | |
1563 | */ | |
1564 | static int rcu_node_kthread(void *arg) | |
1565 | { | |
1566 | int cpu; | |
1567 | unsigned long flags; | |
1568 | unsigned long mask; | |
1569 | struct rcu_node *rnp = (struct rcu_node *)arg; | |
1570 | struct sched_param sp; | |
1571 | struct task_struct *t; | |
1572 | ||
1573 | for (;;) { | |
1574 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | |
1575 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | |
1576 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | |
1577 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1578 | mask = atomic_xchg(&rnp->wakemask, 0); | |
1579 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | |
1580 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | |
1581 | if ((mask & 0x1) == 0) | |
1582 | continue; | |
1583 | preempt_disable(); | |
1584 | t = per_cpu(rcu_cpu_kthread_task, cpu); | |
1585 | if (!cpu_online(cpu) || t == NULL) { | |
1586 | preempt_enable(); | |
1587 | continue; | |
1588 | } | |
1589 | per_cpu(rcu_cpu_has_work, cpu) = 1; | |
1590 | sp.sched_priority = RCU_KTHREAD_PRIO; | |
1591 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
1592 | preempt_enable(); | |
1593 | } | |
1594 | } | |
1595 | /* NOTREACHED */ | |
1596 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | |
1597 | return 0; | |
1598 | } | |
1599 | ||
1600 | /* | |
1601 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | |
1602 | * served by the rcu_node in question. The CPU hotplug lock is still | |
1603 | * held, so the value of rnp->qsmaskinit will be stable. | |
1604 | * | |
1605 | * We don't include outgoingcpu in the affinity set, use -1 if there is | |
1606 | * no outgoing CPU. If there are no CPUs left in the affinity set, | |
1607 | * this function allows the kthread to execute on any CPU. | |
1608 | */ | |
1609 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |
1610 | { | |
1611 | cpumask_var_t cm; | |
1612 | int cpu; | |
1613 | unsigned long mask = rnp->qsmaskinit; | |
1614 | ||
1615 | if (rnp->node_kthread_task == NULL) | |
1616 | return; | |
1617 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | |
1618 | return; | |
1619 | cpumask_clear(cm); | |
1620 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | |
1621 | if ((mask & 0x1) && cpu != outgoingcpu) | |
1622 | cpumask_set_cpu(cpu, cm); | |
1623 | if (cpumask_weight(cm) == 0) { | |
1624 | cpumask_setall(cm); | |
1625 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | |
1626 | cpumask_clear_cpu(cpu, cm); | |
1627 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | |
1628 | } | |
1629 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | |
1630 | rcu_boost_kthread_setaffinity(rnp, cm); | |
1631 | free_cpumask_var(cm); | |
1632 | } | |
1633 | ||
1634 | /* | |
1635 | * Spawn a per-rcu_node kthread, setting priority and affinity. | |
1636 | * Called during boot before online/offline can happen, or, if | |
1637 | * during runtime, with the main CPU-hotplug locks held. So only | |
1638 | * one of these can be executing at a time. | |
1639 | */ | |
1640 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | |
1641 | struct rcu_node *rnp) | |
1642 | { | |
1643 | unsigned long flags; | |
1644 | int rnp_index = rnp - &rsp->node[0]; | |
1645 | struct sched_param sp; | |
1646 | struct task_struct *t; | |
1647 | ||
b0d30417 | 1648 | if (!rcu_scheduler_fully_active || |
f8b7fc6b PM |
1649 | rnp->qsmaskinit == 0) |
1650 | return 0; | |
1651 | if (rnp->node_kthread_task == NULL) { | |
1652 | t = kthread_create(rcu_node_kthread, (void *)rnp, | |
1653 | "rcun%d", rnp_index); | |
1654 | if (IS_ERR(t)) | |
1655 | return PTR_ERR(t); | |
1656 | raw_spin_lock_irqsave(&rnp->lock, flags); | |
1657 | rnp->node_kthread_task = t; | |
1658 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | |
1659 | sp.sched_priority = 99; | |
1660 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | |
1661 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | |
1662 | } | |
1663 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | |
1664 | } | |
1665 | ||
1666 | /* | |
1667 | * Spawn all kthreads -- called as soon as the scheduler is running. | |
1668 | */ | |
1669 | static int __init rcu_spawn_kthreads(void) | |
1670 | { | |
1671 | int cpu; | |
1672 | struct rcu_node *rnp; | |
1673 | ||
b0d30417 | 1674 | rcu_scheduler_fully_active = 1; |
f8b7fc6b PM |
1675 | for_each_possible_cpu(cpu) { |
1676 | per_cpu(rcu_cpu_has_work, cpu) = 0; | |
1677 | if (cpu_online(cpu)) | |
1678 | (void)rcu_spawn_one_cpu_kthread(cpu); | |
1679 | } | |
1680 | rnp = rcu_get_root(rcu_state); | |
1681 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | |
1682 | if (NUM_RCU_NODES > 1) { | |
1683 | rcu_for_each_leaf_node(rcu_state, rnp) | |
1684 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | |
1685 | } | |
1686 | return 0; | |
1687 | } | |
1688 | early_initcall(rcu_spawn_kthreads); | |
1689 | ||
1690 | static void __cpuinit rcu_prepare_kthreads(int cpu) | |
1691 | { | |
1692 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | |
1693 | struct rcu_node *rnp = rdp->mynode; | |
1694 | ||
1695 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | |
b0d30417 | 1696 | if (rcu_scheduler_fully_active) { |
f8b7fc6b PM |
1697 | (void)rcu_spawn_one_cpu_kthread(cpu); |
1698 | if (rnp->node_kthread_task == NULL) | |
1699 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | |
1700 | } | |
1701 | } | |
1702 | ||
27f4d280 PM |
1703 | #else /* #ifdef CONFIG_RCU_BOOST */ |
1704 | ||
1217ed1b | 1705 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
27f4d280 | 1706 | { |
1217ed1b | 1707 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27f4d280 PM |
1708 | } |
1709 | ||
a46e0899 | 1710 | static void invoke_rcu_callbacks_kthread(void) |
27f4d280 | 1711 | { |
a46e0899 | 1712 | WARN_ON_ONCE(1); |
27f4d280 PM |
1713 | } |
1714 | ||
1715 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |
1716 | { | |
1717 | } | |
1718 | ||
f8b7fc6b PM |
1719 | #ifdef CONFIG_HOTPLUG_CPU |
1720 | ||
1721 | static void rcu_stop_cpu_kthread(int cpu) | |
1722 | { | |
1723 | } | |
1724 | ||
1725 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
1726 | ||
1727 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |
1728 | { | |
1729 | } | |
1730 | ||
1731 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | |
1732 | { | |
1733 | } | |
1734 | ||
b0d30417 PM |
1735 | static int __init rcu_scheduler_really_started(void) |
1736 | { | |
1737 | rcu_scheduler_fully_active = 1; | |
1738 | return 0; | |
1739 | } | |
1740 | early_initcall(rcu_scheduler_really_started); | |
1741 | ||
f8b7fc6b PM |
1742 | static void __cpuinit rcu_prepare_kthreads(int cpu) |
1743 | { | |
1744 | } | |
1745 | ||
27f4d280 PM |
1746 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1747 | ||
7b27d547 LJ |
1748 | #ifndef CONFIG_SMP |
1749 | ||
1750 | void synchronize_sched_expedited(void) | |
1751 | { | |
1752 | cond_resched(); | |
1753 | } | |
1754 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | |
1755 | ||
1756 | #else /* #ifndef CONFIG_SMP */ | |
1757 | ||
e27fc964 TH |
1758 | static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); |
1759 | static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); | |
7b27d547 LJ |
1760 | |
1761 | static int synchronize_sched_expedited_cpu_stop(void *data) | |
1762 | { | |
1763 | /* | |
1764 | * There must be a full memory barrier on each affected CPU | |
1765 | * between the time that try_stop_cpus() is called and the | |
1766 | * time that it returns. | |
1767 | * | |
1768 | * In the current initial implementation of cpu_stop, the | |
1769 | * above condition is already met when the control reaches | |
1770 | * this point and the following smp_mb() is not strictly | |
1771 | * necessary. Do smp_mb() anyway for documentation and | |
1772 | * robustness against future implementation changes. | |
1773 | */ | |
1774 | smp_mb(); /* See above comment block. */ | |
1775 | return 0; | |
1776 | } | |
1777 | ||
1778 | /* | |
1779 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | |
1780 | * approach to force grace period to end quickly. This consumes | |
1781 | * significant time on all CPUs, and is thus not recommended for | |
1782 | * any sort of common-case code. | |
1783 | * | |
1784 | * Note that it is illegal to call this function while holding any | |
1785 | * lock that is acquired by a CPU-hotplug notifier. Failing to | |
1786 | * observe this restriction will result in deadlock. | |
db3a8920 | 1787 | * |
e27fc964 TH |
1788 | * This implementation can be thought of as an application of ticket |
1789 | * locking to RCU, with sync_sched_expedited_started and | |
1790 | * sync_sched_expedited_done taking on the roles of the halves | |
1791 | * of the ticket-lock word. Each task atomically increments | |
1792 | * sync_sched_expedited_started upon entry, snapshotting the old value, | |
1793 | * then attempts to stop all the CPUs. If this succeeds, then each | |
1794 | * CPU will have executed a context switch, resulting in an RCU-sched | |
1795 | * grace period. We are then done, so we use atomic_cmpxchg() to | |
1796 | * update sync_sched_expedited_done to match our snapshot -- but | |
1797 | * only if someone else has not already advanced past our snapshot. | |
1798 | * | |
1799 | * On the other hand, if try_stop_cpus() fails, we check the value | |
1800 | * of sync_sched_expedited_done. If it has advanced past our | |
1801 | * initial snapshot, then someone else must have forced a grace period | |
1802 | * some time after we took our snapshot. In this case, our work is | |
1803 | * done for us, and we can simply return. Otherwise, we try again, | |
1804 | * but keep our initial snapshot for purposes of checking for someone | |
1805 | * doing our work for us. | |
1806 | * | |
1807 | * If we fail too many times in a row, we fall back to synchronize_sched(). | |
7b27d547 LJ |
1808 | */ |
1809 | void synchronize_sched_expedited(void) | |
1810 | { | |
e27fc964 | 1811 | int firstsnap, s, snap, trycount = 0; |
7b27d547 | 1812 | |
e27fc964 TH |
1813 | /* Note that atomic_inc_return() implies full memory barrier. */ |
1814 | firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); | |
7b27d547 | 1815 | get_online_cpus(); |
e27fc964 TH |
1816 | |
1817 | /* | |
1818 | * Each pass through the following loop attempts to force a | |
1819 | * context switch on each CPU. | |
1820 | */ | |
7b27d547 LJ |
1821 | while (try_stop_cpus(cpu_online_mask, |
1822 | synchronize_sched_expedited_cpu_stop, | |
1823 | NULL) == -EAGAIN) { | |
1824 | put_online_cpus(); | |
e27fc964 TH |
1825 | |
1826 | /* No joy, try again later. Or just synchronize_sched(). */ | |
7b27d547 LJ |
1827 | if (trycount++ < 10) |
1828 | udelay(trycount * num_online_cpus()); | |
1829 | else { | |
1830 | synchronize_sched(); | |
1831 | return; | |
1832 | } | |
e27fc964 TH |
1833 | |
1834 | /* Check to see if someone else did our work for us. */ | |
1835 | s = atomic_read(&sync_sched_expedited_done); | |
1836 | if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { | |
7b27d547 LJ |
1837 | smp_mb(); /* ensure test happens before caller kfree */ |
1838 | return; | |
1839 | } | |
e27fc964 TH |
1840 | |
1841 | /* | |
1842 | * Refetching sync_sched_expedited_started allows later | |
1843 | * callers to piggyback on our grace period. We subtract | |
1844 | * 1 to get the same token that the last incrementer got. | |
1845 | * We retry after they started, so our grace period works | |
1846 | * for them, and they started after our first try, so their | |
1847 | * grace period works for us. | |
1848 | */ | |
7b27d547 | 1849 | get_online_cpus(); |
e27fc964 TH |
1850 | snap = atomic_read(&sync_sched_expedited_started) - 1; |
1851 | smp_mb(); /* ensure read is before try_stop_cpus(). */ | |
7b27d547 | 1852 | } |
e27fc964 TH |
1853 | |
1854 | /* | |
1855 | * Everyone up to our most recent fetch is covered by our grace | |
1856 | * period. Update the counter, but only if our work is still | |
1857 | * relevant -- which it won't be if someone who started later | |
1858 | * than we did beat us to the punch. | |
1859 | */ | |
1860 | do { | |
1861 | s = atomic_read(&sync_sched_expedited_done); | |
1862 | if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) { | |
1863 | smp_mb(); /* ensure test happens before caller kfree */ | |
1864 | break; | |
1865 | } | |
1866 | } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s); | |
1867 | ||
7b27d547 LJ |
1868 | put_online_cpus(); |
1869 | } | |
1870 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | |
1871 | ||
1872 | #endif /* #else #ifndef CONFIG_SMP */ | |
1873 | ||
8bd93a2c PM |
1874 | #if !defined(CONFIG_RCU_FAST_NO_HZ) |
1875 | ||
1876 | /* | |
1877 | * Check to see if any future RCU-related work will need to be done | |
1878 | * by the current CPU, even if none need be done immediately, returning | |
1879 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1880 | * an exported member of the RCU API. | |
1881 | * | |
1882 | * Because we have preemptible RCU, just check whether this CPU needs | |
1883 | * any flavor of RCU. Do not chew up lots of CPU cycles with preemption | |
1884 | * disabled in a most-likely vain attempt to cause RCU not to need this CPU. | |
1885 | */ | |
1886 | int rcu_needs_cpu(int cpu) | |
1887 | { | |
1888 | return rcu_needs_cpu_quick_check(cpu); | |
1889 | } | |
1890 | ||
a47cd880 PM |
1891 | /* |
1892 | * Check to see if we need to continue a callback-flush operations to | |
1893 | * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle | |
1894 | * entry is not configured, so we never do need to. | |
1895 | */ | |
1896 | static void rcu_needs_cpu_flush(void) | |
1897 | { | |
1898 | } | |
1899 | ||
8bd93a2c PM |
1900 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
1901 | ||
1902 | #define RCU_NEEDS_CPU_FLUSHES 5 | |
a47cd880 | 1903 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); |
71da8132 | 1904 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); |
8bd93a2c PM |
1905 | |
1906 | /* | |
1907 | * Check to see if any future RCU-related work will need to be done | |
1908 | * by the current CPU, even if none need be done immediately, returning | |
1909 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
1910 | * an exported member of the RCU API. | |
1911 | * | |
1912 | * Because we are not supporting preemptible RCU, attempt to accelerate | |
1913 | * any current grace periods so that RCU no longer needs this CPU, but | |
1914 | * only if all other CPUs are already in dynticks-idle mode. This will | |
1915 | * allow the CPU cores to be powered down immediately, as opposed to after | |
1916 | * waiting many milliseconds for grace periods to elapse. | |
a47cd880 PM |
1917 | * |
1918 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | |
1919 | * disabled, we do one pass of force_quiescent_state(), then do a | |
a46e0899 | 1920 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
27f4d280 | 1921 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. |
8bd93a2c PM |
1922 | */ |
1923 | int rcu_needs_cpu(int cpu) | |
1924 | { | |
a47cd880 | 1925 | int c = 0; |
77e38ed3 | 1926 | int snap; |
8bd93a2c PM |
1927 | int thatcpu; |
1928 | ||
622ea685 PM |
1929 | /* Check for being in the holdoff period. */ |
1930 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) | |
1931 | return rcu_needs_cpu_quick_check(cpu); | |
1932 | ||
8bd93a2c | 1933 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
77e38ed3 PM |
1934 | for_each_online_cpu(thatcpu) { |
1935 | if (thatcpu == cpu) | |
1936 | continue; | |
23b5c8fa PM |
1937 | snap = atomic_add_return(0, &per_cpu(rcu_dynticks, |
1938 | thatcpu).dynticks); | |
77e38ed3 | 1939 | smp_mb(); /* Order sampling of snap with end of grace period. */ |
23b5c8fa | 1940 | if ((snap & 0x1) != 0) { |
a47cd880 | 1941 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
71da8132 | 1942 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
8bd93a2c | 1943 | return rcu_needs_cpu_quick_check(cpu); |
8bd93a2c | 1944 | } |
77e38ed3 | 1945 | } |
a47cd880 PM |
1946 | |
1947 | /* Check and update the rcu_dyntick_drain sequencing. */ | |
1948 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | |
1949 | /* First time through, initialize the counter. */ | |
1950 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; | |
1951 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | |
1952 | /* We have hit the limit, so time to give up. */ | |
71da8132 | 1953 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; |
a47cd880 PM |
1954 | return rcu_needs_cpu_quick_check(cpu); |
1955 | } | |
1956 | ||
1957 | /* Do one step pushing remaining RCU callbacks through. */ | |
1958 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | |
1959 | rcu_sched_qs(cpu); | |
1960 | force_quiescent_state(&rcu_sched_state, 0); | |
1961 | c = c || per_cpu(rcu_sched_data, cpu).nxtlist; | |
1962 | } | |
1963 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | |
1964 | rcu_bh_qs(cpu); | |
1965 | force_quiescent_state(&rcu_bh_state, 0); | |
1966 | c = c || per_cpu(rcu_bh_data, cpu).nxtlist; | |
8bd93a2c PM |
1967 | } |
1968 | ||
1969 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | |
622ea685 | 1970 | if (c) |
a46e0899 | 1971 | invoke_rcu_core(); |
8bd93a2c PM |
1972 | return c; |
1973 | } | |
1974 | ||
a47cd880 PM |
1975 | /* |
1976 | * Check to see if we need to continue a callback-flush operations to | |
1977 | * allow the last CPU to enter dyntick-idle mode. | |
1978 | */ | |
1979 | static void rcu_needs_cpu_flush(void) | |
1980 | { | |
1981 | int cpu = smp_processor_id(); | |
71da8132 | 1982 | unsigned long flags; |
a47cd880 PM |
1983 | |
1984 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) | |
1985 | return; | |
71da8132 | 1986 | local_irq_save(flags); |
a47cd880 | 1987 | (void)rcu_needs_cpu(cpu); |
71da8132 | 1988 | local_irq_restore(flags); |
a47cd880 PM |
1989 | } |
1990 | ||
8bd93a2c | 1991 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |