Merge branch 'devel-stable' of master.kernel.org:/home/rmk/linux-2.6-arm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / smp.c
1 /*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 */
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
15
16 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
17 static struct {
18 struct list_head queue;
19 raw_spinlock_t lock;
20 } call_function __cacheline_aligned_in_smp =
21 {
22 .queue = LIST_HEAD_INIT(call_function.queue),
23 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
24 };
25
26 enum {
27 CSD_FLAG_LOCK = 0x01,
28 };
29
30 struct call_function_data {
31 struct call_single_data csd;
32 atomic_t refs;
33 cpumask_var_t cpumask;
34 };
35
36 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
37
38 struct call_single_queue {
39 struct list_head list;
40 raw_spinlock_t lock;
41 };
42
43 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
44
45 static int
46 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
47 {
48 long cpu = (long)hcpu;
49 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
50
51 switch (action) {
52 case CPU_UP_PREPARE:
53 case CPU_UP_PREPARE_FROZEN:
54 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
55 cpu_to_node(cpu)))
56 return notifier_from_errno(-ENOMEM);
57 break;
58
59 #ifdef CONFIG_HOTPLUG_CPU
60 case CPU_UP_CANCELED:
61 case CPU_UP_CANCELED_FROZEN:
62
63 case CPU_DEAD:
64 case CPU_DEAD_FROZEN:
65 free_cpumask_var(cfd->cpumask);
66 break;
67 #endif
68 };
69
70 return NOTIFY_OK;
71 }
72
73 static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd,
75 };
76
77 static int __cpuinit init_call_single_data(void)
78 {
79 void *cpu = (void *)(long)smp_processor_id();
80 int i;
81
82 for_each_possible_cpu(i) {
83 struct call_single_queue *q = &per_cpu(call_single_queue, i);
84
85 raw_spin_lock_init(&q->lock);
86 INIT_LIST_HEAD(&q->list);
87 }
88
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier);
91
92 return 0;
93 }
94 early_initcall(init_call_single_data);
95
96 /*
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
98 *
99 * For non-synchronous ipi calls the csd can still be in use by the
100 * previous function call. For multi-cpu calls its even more interesting
101 * as we'll have to ensure no other cpu is observing our csd.
102 */
103 static void csd_lock_wait(struct call_single_data *data)
104 {
105 while (data->flags & CSD_FLAG_LOCK)
106 cpu_relax();
107 }
108
109 static void csd_lock(struct call_single_data *data)
110 {
111 csd_lock_wait(data);
112 data->flags = CSD_FLAG_LOCK;
113
114 /*
115 * prevent CPU from reordering the above assignment
116 * to ->flags with any subsequent assignments to other
117 * fields of the specified call_single_data structure:
118 */
119 smp_mb();
120 }
121
122 static void csd_unlock(struct call_single_data *data)
123 {
124 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
125
126 /*
127 * ensure we're all done before releasing data:
128 */
129 smp_mb();
130
131 data->flags &= ~CSD_FLAG_LOCK;
132 }
133
134 /*
135 * Insert a previously allocated call_single_data element
136 * for execution on the given CPU. data must already have
137 * ->func, ->info, and ->flags set.
138 */
139 static
140 void generic_exec_single(int cpu, struct call_single_data *data, int wait)
141 {
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
143 unsigned long flags;
144 int ipi;
145
146 raw_spin_lock_irqsave(&dst->lock, flags);
147 ipi = list_empty(&dst->list);
148 list_add_tail(&data->list, &dst->list);
149 raw_spin_unlock_irqrestore(&dst->lock, flags);
150
151 /*
152 * The list addition should be visible before sending the IPI
153 * handler locks the list to pull the entry off it because of
154 * normal cache coherency rules implied by spinlocks.
155 *
156 * If IPIs can go out of order to the cache coherency protocol
157 * in an architecture, sufficient synchronisation should be added
158 * to arch code to make it appear to obey cache coherency WRT
159 * locking and barrier primitives. Generic code isn't really
160 * equipped to do the right thing...
161 */
162 if (ipi)
163 arch_send_call_function_single_ipi(cpu);
164
165 if (wait)
166 csd_lock_wait(data);
167 }
168
169 /*
170 * Invoked by arch to handle an IPI for call function. Must be called with
171 * interrupts disabled.
172 */
173 void generic_smp_call_function_interrupt(void)
174 {
175 struct call_function_data *data;
176 int cpu = smp_processor_id();
177
178 /*
179 * Shouldn't receive this interrupt on a cpu that is not yet online.
180 */
181 WARN_ON_ONCE(!cpu_online(cpu));
182
183 /*
184 * Ensure entry is visible on call_function_queue after we have
185 * entered the IPI. See comment in smp_call_function_many.
186 * If we don't have this, then we may miss an entry on the list
187 * and never get another IPI to process it.
188 */
189 smp_mb();
190
191 /*
192 * It's ok to use list_for_each_rcu() here even though we may
193 * delete 'pos', since list_del_rcu() doesn't clear ->next
194 */
195 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
196 int refs;
197
198 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
199 continue;
200
201 data->csd.func(data->csd.info);
202
203 refs = atomic_dec_return(&data->refs);
204 WARN_ON(refs < 0);
205 if (!refs) {
206 raw_spin_lock(&call_function.lock);
207 list_del_rcu(&data->csd.list);
208 raw_spin_unlock(&call_function.lock);
209 }
210
211 if (refs)
212 continue;
213
214 csd_unlock(&data->csd);
215 }
216
217 }
218
219 /*
220 * Invoked by arch to handle an IPI for call function single. Must be
221 * called from the arch with interrupts disabled.
222 */
223 void generic_smp_call_function_single_interrupt(void)
224 {
225 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
226 unsigned int data_flags;
227 LIST_HEAD(list);
228
229 /*
230 * Shouldn't receive this interrupt on a cpu that is not yet online.
231 */
232 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
233
234 raw_spin_lock(&q->lock);
235 list_replace_init(&q->list, &list);
236 raw_spin_unlock(&q->lock);
237
238 while (!list_empty(&list)) {
239 struct call_single_data *data;
240
241 data = list_entry(list.next, struct call_single_data, list);
242 list_del(&data->list);
243
244 /*
245 * 'data' can be invalid after this call if flags == 0
246 * (when called through generic_exec_single()),
247 * so save them away before making the call:
248 */
249 data_flags = data->flags;
250
251 data->func(data->info);
252
253 /*
254 * Unlocked CSDs are valid through generic_exec_single():
255 */
256 if (data_flags & CSD_FLAG_LOCK)
257 csd_unlock(data);
258 }
259 }
260
261 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
262
263 /*
264 * smp_call_function_single - Run a function on a specific CPU
265 * @func: The function to run. This must be fast and non-blocking.
266 * @info: An arbitrary pointer to pass to the function.
267 * @wait: If true, wait until function has completed on other CPUs.
268 *
269 * Returns 0 on success, else a negative status code.
270 */
271 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
272 int wait)
273 {
274 struct call_single_data d = {
275 .flags = 0,
276 };
277 unsigned long flags;
278 int this_cpu;
279 int err = 0;
280
281 /*
282 * prevent preemption and reschedule on another processor,
283 * as well as CPU removal
284 */
285 this_cpu = get_cpu();
286
287 /*
288 * Can deadlock when called with interrupts disabled.
289 * We allow cpu's that are not yet online though, as no one else can
290 * send smp call function interrupt to this cpu and as such deadlocks
291 * can't happen.
292 */
293 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
294 && !oops_in_progress);
295
296 if (cpu == this_cpu) {
297 local_irq_save(flags);
298 func(info);
299 local_irq_restore(flags);
300 } else {
301 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
302 struct call_single_data *data = &d;
303
304 if (!wait)
305 data = &__get_cpu_var(csd_data);
306
307 csd_lock(data);
308
309 data->func = func;
310 data->info = info;
311 generic_exec_single(cpu, data, wait);
312 } else {
313 err = -ENXIO; /* CPU not online */
314 }
315 }
316
317 put_cpu();
318
319 return err;
320 }
321 EXPORT_SYMBOL(smp_call_function_single);
322
323 /*
324 * smp_call_function_any - Run a function on any of the given cpus
325 * @mask: The mask of cpus it can run on.
326 * @func: The function to run. This must be fast and non-blocking.
327 * @info: An arbitrary pointer to pass to the function.
328 * @wait: If true, wait until function has completed.
329 *
330 * Returns 0 on success, else a negative status code (if no cpus were online).
331 * Note that @wait will be implicitly turned on in case of allocation failures,
332 * since we fall back to on-stack allocation.
333 *
334 * Selection preference:
335 * 1) current cpu if in @mask
336 * 2) any cpu of current node if in @mask
337 * 3) any other online cpu in @mask
338 */
339 int smp_call_function_any(const struct cpumask *mask,
340 smp_call_func_t func, void *info, int wait)
341 {
342 unsigned int cpu;
343 const struct cpumask *nodemask;
344 int ret;
345
346 /* Try for same CPU (cheapest) */
347 cpu = get_cpu();
348 if (cpumask_test_cpu(cpu, mask))
349 goto call;
350
351 /* Try for same node. */
352 nodemask = cpumask_of_node(cpu_to_node(cpu));
353 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
354 cpu = cpumask_next_and(cpu, nodemask, mask)) {
355 if (cpu_online(cpu))
356 goto call;
357 }
358
359 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
360 cpu = cpumask_any_and(mask, cpu_online_mask);
361 call:
362 ret = smp_call_function_single(cpu, func, info, wait);
363 put_cpu();
364 return ret;
365 }
366 EXPORT_SYMBOL_GPL(smp_call_function_any);
367
368 /**
369 * __smp_call_function_single(): Run a function on a specific CPU
370 * @cpu: The CPU to run on.
371 * @data: Pre-allocated and setup data structure
372 * @wait: If true, wait until function has completed on specified CPU.
373 *
374 * Like smp_call_function_single(), but allow caller to pass in a
375 * pre-allocated data structure. Useful for embedding @data inside
376 * other structures, for instance.
377 */
378 void __smp_call_function_single(int cpu, struct call_single_data *data,
379 int wait)
380 {
381 unsigned int this_cpu;
382 unsigned long flags;
383
384 this_cpu = get_cpu();
385 /*
386 * Can deadlock when called with interrupts disabled.
387 * We allow cpu's that are not yet online though, as no one else can
388 * send smp call function interrupt to this cpu and as such deadlocks
389 * can't happen.
390 */
391 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
392 && !oops_in_progress);
393
394 if (cpu == this_cpu) {
395 local_irq_save(flags);
396 data->func(data->info);
397 local_irq_restore(flags);
398 } else {
399 csd_lock(data);
400 generic_exec_single(cpu, data, wait);
401 }
402 put_cpu();
403 }
404
405 /**
406 * smp_call_function_many(): Run a function on a set of other CPUs.
407 * @mask: The set of cpus to run on (only runs on online subset).
408 * @func: The function to run. This must be fast and non-blocking.
409 * @info: An arbitrary pointer to pass to the function.
410 * @wait: If true, wait (atomically) until function has completed
411 * on other CPUs.
412 *
413 * If @wait is true, then returns once @func has returned.
414 *
415 * You must not call this function with disabled interrupts or from a
416 * hardware interrupt handler or from a bottom half handler. Preemption
417 * must be disabled when calling this function.
418 */
419 void smp_call_function_many(const struct cpumask *mask,
420 smp_call_func_t func, void *info, bool wait)
421 {
422 struct call_function_data *data;
423 unsigned long flags;
424 int cpu, next_cpu, this_cpu = smp_processor_id();
425
426 /*
427 * Can deadlock when called with interrupts disabled.
428 * We allow cpu's that are not yet online though, as no one else can
429 * send smp call function interrupt to this cpu and as such deadlocks
430 * can't happen.
431 */
432 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
433 && !oops_in_progress);
434
435 /* So, what's a CPU they want? Ignoring this one. */
436 cpu = cpumask_first_and(mask, cpu_online_mask);
437 if (cpu == this_cpu)
438 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
439
440 /* No online cpus? We're done. */
441 if (cpu >= nr_cpu_ids)
442 return;
443
444 /* Do we have another CPU which isn't us? */
445 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
446 if (next_cpu == this_cpu)
447 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
448
449 /* Fastpath: do that cpu by itself. */
450 if (next_cpu >= nr_cpu_ids) {
451 smp_call_function_single(cpu, func, info, wait);
452 return;
453 }
454
455 data = &__get_cpu_var(cfd_data);
456 csd_lock(&data->csd);
457
458 data->csd.func = func;
459 data->csd.info = info;
460 cpumask_and(data->cpumask, mask, cpu_online_mask);
461 cpumask_clear_cpu(this_cpu, data->cpumask);
462 atomic_set(&data->refs, cpumask_weight(data->cpumask));
463
464 raw_spin_lock_irqsave(&call_function.lock, flags);
465 /*
466 * Place entry at the _HEAD_ of the list, so that any cpu still
467 * observing the entry in generic_smp_call_function_interrupt()
468 * will not miss any other list entries:
469 */
470 list_add_rcu(&data->csd.list, &call_function.queue);
471 raw_spin_unlock_irqrestore(&call_function.lock, flags);
472
473 /*
474 * Make the list addition visible before sending the ipi.
475 * (IPIs must obey or appear to obey normal Linux cache
476 * coherency rules -- see comment in generic_exec_single).
477 */
478 smp_mb();
479
480 /* Send a message to all CPUs in the map */
481 arch_send_call_function_ipi_mask(data->cpumask);
482
483 /* Optionally wait for the CPUs to complete */
484 if (wait)
485 csd_lock_wait(&data->csd);
486 }
487 EXPORT_SYMBOL(smp_call_function_many);
488
489 /**
490 * smp_call_function(): Run a function on all other CPUs.
491 * @func: The function to run. This must be fast and non-blocking.
492 * @info: An arbitrary pointer to pass to the function.
493 * @wait: If true, wait (atomically) until function has completed
494 * on other CPUs.
495 *
496 * Returns 0.
497 *
498 * If @wait is true, then returns once @func has returned; otherwise
499 * it returns just before the target cpu calls @func.
500 *
501 * You must not call this function with disabled interrupts or from a
502 * hardware interrupt handler or from a bottom half handler.
503 */
504 int smp_call_function(smp_call_func_t func, void *info, int wait)
505 {
506 preempt_disable();
507 smp_call_function_many(cpu_online_mask, func, info, wait);
508 preempt_enable();
509
510 return 0;
511 }
512 EXPORT_SYMBOL(smp_call_function);
513
514 void ipi_call_lock(void)
515 {
516 raw_spin_lock(&call_function.lock);
517 }
518
519 void ipi_call_unlock(void)
520 {
521 raw_spin_unlock(&call_function.lock);
522 }
523
524 void ipi_call_lock_irq(void)
525 {
526 raw_spin_lock_irq(&call_function.lock);
527 }
528
529 void ipi_call_unlock_irq(void)
530 {
531 raw_spin_unlock_irq(&call_function.lock);
532 }
533 #endif /* USE_GENERIC_SMP_HELPERS */
534
535 /*
536 * Call a function on all processors
537 */
538 int on_each_cpu(void (*func) (void *info), void *info, int wait)
539 {
540 int ret = 0;
541
542 preempt_disable();
543 ret = smp_call_function(func, info, wait);
544 local_irq_disable();
545 func(info);
546 local_irq_enable();
547 preempt_enable();
548 return ret;
549 }
550 EXPORT_SYMBOL(on_each_cpu);