Merge branch 'master' into next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / smp.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/smp.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
c97d4869 10#include <linux/module.h>
1da177e4
LT
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
61b5cb1c 19#include <linux/ftrace.h>
1da177e4 20#include <linux/mm.h>
4e950f6f 21#include <linux/err.h>
1da177e4
LT
22#include <linux/cpu.h>
23#include <linux/smp.h>
24#include <linux/seq_file.h>
c97d4869 25#include <linux/irq.h>
bc28248e
RK
26#include <linux/percpu.h>
27#include <linux/clockchips.h>
3c030bea 28#include <linux/completion.h>
1da177e4
LT
29
30#include <asm/atomic.h>
31#include <asm/cacheflush.h>
32#include <asm/cpu.h>
42578c82 33#include <asm/cputype.h>
e65f38ed
RK
34#include <asm/mmu_context.h>
35#include <asm/pgtable.h>
36#include <asm/pgalloc.h>
1da177e4 37#include <asm/processor.h>
37b05b63 38#include <asm/sections.h>
1da177e4
LT
39#include <asm/tlbflush.h>
40#include <asm/ptrace.h>
bc28248e 41#include <asm/localtimer.h>
1da177e4 42
e65f38ed
RK
43/*
44 * as from 2.5, kernels no longer have an init_tasks structure
45 * so we need some other way of telling a new secondary core
46 * where to place its SVC stack
47 */
48struct secondary_data secondary_data;
49
1da177e4 50enum ipi_msg_type {
24480d98 51 IPI_TIMER = 2,
1da177e4
LT
52 IPI_RESCHEDULE,
53 IPI_CALL_FUNC,
f6dd9fa5 54 IPI_CALL_FUNC_SINGLE,
1da177e4
LT
55 IPI_CPU_STOP,
56};
57
bd6f68af 58int __cpuinit __cpu_up(unsigned int cpu)
1da177e4 59{
71f512e8
RK
60 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
61 struct task_struct *idle = ci->idle;
e65f38ed 62 pgd_t *pgd;
1da177e4
LT
63 int ret;
64
65 /*
71f512e8
RK
66 * Spawn a new process manually, if not already done.
67 * Grab a pointer to its task struct so we can mess with it
1da177e4 68 */
71f512e8
RK
69 if (!idle) {
70 idle = fork_idle(cpu);
71 if (IS_ERR(idle)) {
72 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
73 return PTR_ERR(idle);
74 }
75 ci->idle = idle;
13ea9cc8
SS
76 } else {
77 /*
78 * Since this idle thread is being re-used, call
79 * init_idle() to reinitialize the thread structure.
80 */
81 init_idle(idle, cpu);
1da177e4
LT
82 }
83
e65f38ed
RK
84 /*
85 * Allocate initial page tables to allow the new CPU to
86 * enable the MMU safely. This essentially means a set
87 * of our "standard" page tables, with the addition of
88 * a 1:1 mapping for the physical address of the kernel.
89 */
90 pgd = pgd_alloc(&init_mm);
37b05b63
RK
91 if (!pgd)
92 return -ENOMEM;
93
94 if (PHYS_OFFSET != PAGE_OFFSET) {
95#ifndef CONFIG_HOTPLUG_CPU
96 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
97#endif
98 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
99 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
100 }
e65f38ed
RK
101
102 /*
103 * We need to tell the secondary core where to find
104 * its stack and the page tables.
105 */
32d39a93 106 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
e65f38ed 107 secondary_data.pgdir = virt_to_phys(pgd);
1027247f
RK
108 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
109 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
e65f38ed 110
1da177e4
LT
111 /*
112 * Now bring the CPU into our world.
113 */
114 ret = boot_secondary(cpu, idle);
e65f38ed
RK
115 if (ret == 0) {
116 unsigned long timeout;
117
118 /*
119 * CPU was successfully started, wait for it
120 * to come online or time out.
121 */
122 timeout = jiffies + HZ;
123 while (time_before(jiffies, timeout)) {
124 if (cpu_online(cpu))
125 break;
126
127 udelay(10);
128 barrier();
129 }
130
58613cd1
RK
131 if (!cpu_online(cpu)) {
132 pr_crit("CPU%u: failed to come online\n", cpu);
e65f38ed 133 ret = -EIO;
58613cd1
RK
134 }
135 } else {
136 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
e65f38ed
RK
137 }
138
5d43045b 139 secondary_data.stack = NULL;
e65f38ed
RK
140 secondary_data.pgdir = 0;
141
37b05b63
RK
142 if (PHYS_OFFSET != PAGE_OFFSET) {
143#ifndef CONFIG_HOTPLUG_CPU
144 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
145#endif
146 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
147 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
148 }
149
5e541973 150 pgd_free(&init_mm, pgd);
e65f38ed 151
1da177e4
LT
152 return ret;
153}
154
a054a811 155#ifdef CONFIG_HOTPLUG_CPU
10034aab
RK
156static void percpu_timer_stop(void);
157
a054a811
RK
158/*
159 * __cpu_disable runs on the processor to be shutdown.
160 */
90140c30 161int __cpu_disable(void)
a054a811
RK
162{
163 unsigned int cpu = smp_processor_id();
164 struct task_struct *p;
165 int ret;
166
8e2a43f5 167 ret = platform_cpu_disable(cpu);
a054a811
RK
168 if (ret)
169 return ret;
170
171 /*
172 * Take this CPU offline. Once we clear this, we can't return,
173 * and we must not schedule until we're ready to give up the cpu.
174 */
e03cdade 175 set_cpu_online(cpu, false);
a054a811
RK
176
177 /*
178 * OK - migrate IRQs away from this CPU
179 */
180 migrate_irqs();
181
37ee16ae
RK
182 /*
183 * Stop the local timer for this CPU.
184 */
10034aab 185 percpu_timer_stop();
37ee16ae 186
a054a811
RK
187 /*
188 * Flush user cache and TLB mappings, and then remove this CPU
189 * from the vm mask set of all processes.
190 */
191 flush_cache_all();
192 local_flush_tlb_all();
193
194 read_lock(&tasklist_lock);
195 for_each_process(p) {
196 if (p->mm)
56f8ba83 197 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
a054a811
RK
198 }
199 read_unlock(&tasklist_lock);
200
201 return 0;
202}
203
3c030bea
RK
204static DECLARE_COMPLETION(cpu_died);
205
a054a811
RK
206/*
207 * called on the thread which is asking for a CPU to be shutdown -
208 * waits until shutdown has completed, or it is timed out.
209 */
90140c30 210void __cpu_die(unsigned int cpu)
a054a811 211{
3c030bea
RK
212 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
213 pr_err("CPU%u: cpu didn't die\n", cpu);
214 return;
215 }
216 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
217
a054a811
RK
218 if (!platform_cpu_kill(cpu))
219 printk("CPU%u: unable to kill\n", cpu);
220}
221
222/*
223 * Called from the idle thread for the CPU which has been shutdown.
224 *
225 * Note that we disable IRQs here, but do not re-enable them
226 * before returning to the caller. This is also the behaviour
227 * of the other hotplug-cpu capable cores, so presumably coming
228 * out of idle fixes this.
229 */
90140c30 230void __ref cpu_die(void)
a054a811
RK
231{
232 unsigned int cpu = smp_processor_id();
233
a054a811
RK
234 idle_task_exit();
235
f36d3401
RK
236 local_irq_disable();
237 mb();
238
3c030bea
RK
239 /* Tell __cpu_die() that this CPU is now safe to dispose of */
240 complete(&cpu_died);
241
a054a811
RK
242 /*
243 * actual CPU shutdown procedure is at least platform (if not
3c030bea 244 * CPU) specific.
a054a811
RK
245 */
246 platform_cpu_die(cpu);
247
248 /*
249 * Do not return to the idle loop - jump back to the secondary
250 * cpu initialisation. There's some initialisation which needs
251 * to be repeated to undo the effects of taking the CPU offline.
252 */
253 __asm__("mov sp, %0\n"
faabfa08 254 " mov fp, #0\n"
a054a811
RK
255 " b secondary_start_kernel"
256 :
32d39a93 257 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
a054a811
RK
258}
259#endif /* CONFIG_HOTPLUG_CPU */
260
05c74a6c
RK
261/*
262 * Called by both boot and secondaries to move global data into
263 * per-processor storage.
264 */
265static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
266{
267 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
268
269 cpu_info->loops_per_jiffy = loops_per_jiffy;
270}
271
e65f38ed
RK
272/*
273 * This is the secondary CPU boot entry. We're using this CPUs
274 * idle thread stack, but a set of temporary page tables.
275 */
bd6f68af 276asmlinkage void __cpuinit secondary_start_kernel(void)
e65f38ed
RK
277{
278 struct mm_struct *mm = &init_mm;
da2660d2 279 unsigned int cpu = smp_processor_id();
e65f38ed
RK
280
281 printk("CPU%u: Booted secondary processor\n", cpu);
282
283 /*
284 * All kernel threads share the same mm context; grab a
285 * reference and switch to it.
286 */
e65f38ed
RK
287 atomic_inc(&mm->mm_count);
288 current->active_mm = mm;
56f8ba83 289 cpumask_set_cpu(cpu, mm_cpumask(mm));
e65f38ed
RK
290 cpu_switch_mm(mm->pgd, mm);
291 enter_lazy_tlb(mm, current);
505d7b19 292 local_flush_tlb_all();
e65f38ed
RK
293
294 cpu_init();
5bfb5d69 295 preempt_disable();
2c0136db 296 trace_hardirqs_off();
e65f38ed
RK
297
298 /*
299 * Give the platform a chance to do its own initialisation.
300 */
301 platform_secondary_init(cpu);
302
303 /*
304 * Enable local interrupts.
305 */
e545a614 306 notify_cpu_starting(cpu);
e65f38ed
RK
307 local_irq_enable();
308 local_fiq_enable();
309
a8655e83 310 /*
bc28248e 311 * Setup the percpu timer for this CPU.
a8655e83 312 */
bc28248e 313 percpu_timer_setup();
a8655e83 314
e65f38ed
RK
315 calibrate_delay();
316
317 smp_store_cpu_info(cpu);
318
319 /*
320 * OK, now it's safe to let the boot CPU continue
321 */
e03cdade 322 set_cpu_online(cpu, true);
e65f38ed
RK
323
324 /*
325 * OK, it's off to the idle thread for us
326 */
327 cpu_idle();
328}
329
1da177e4
LT
330void __init smp_cpus_done(unsigned int max_cpus)
331{
332 int cpu;
333 unsigned long bogosum = 0;
334
335 for_each_online_cpu(cpu)
336 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
337
338 printk(KERN_INFO "SMP: Total of %d processors activated "
339 "(%lu.%02lu BogoMIPS).\n",
340 num_online_cpus(),
341 bogosum / (500000/HZ),
342 (bogosum / (5000/HZ)) % 100);
343}
344
345void __init smp_prepare_boot_cpu(void)
346{
347 unsigned int cpu = smp_processor_id();
348
71f512e8 349 per_cpu(cpu_data, cpu).idle = current;
1da177e4
LT
350}
351
05c74a6c 352void __init smp_prepare_cpus(unsigned int max_cpus)
1da177e4 353{
05c74a6c 354 unsigned int ncores = num_possible_cpus();
1da177e4 355
05c74a6c 356 smp_store_cpu_info(smp_processor_id());
1da177e4
LT
357
358 /*
05c74a6c 359 * are we trying to boot more cores than exist?
1da177e4 360 */
05c74a6c
RK
361 if (max_cpus > ncores)
362 max_cpus = ncores;
363
364 if (max_cpus > 1) {
365 /*
366 * Enable the local timer or broadcast device for the
367 * boot CPU, but only if we have more than one CPU.
368 */
369 percpu_timer_setup();
1da177e4 370
05c74a6c
RK
371 /*
372 * Initialise the SCU if there are more than one CPU
373 * and let them know where to start.
374 */
375 platform_smp_prepare_cpus(max_cpus);
376 }
1da177e4
LT
377}
378
82668104 379void arch_send_call_function_ipi_mask(const struct cpumask *mask)
1da177e4 380{
e3fbb087 381 smp_cross_call(mask, IPI_CALL_FUNC);
1da177e4
LT
382}
383
f6dd9fa5 384void arch_send_call_function_single_ipi(int cpu)
3e459990 385{
e3fbb087 386 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
3e459990 387}
3e459990 388
4a88abd7
RK
389static const char *ipi_types[NR_IPI] = {
390#define S(x,s) [x - IPI_TIMER] = s
391 S(IPI_TIMER, "Timer broadcast interrupts"),
392 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
393 S(IPI_CALL_FUNC, "Function call interrupts"),
394 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
395 S(IPI_CPU_STOP, "CPU stop interrupts"),
396};
397
f13cd417 398void show_ipi_list(struct seq_file *p, int prec)
1da177e4 399{
4a88abd7 400 unsigned int cpu, i;
1da177e4 401
4a88abd7
RK
402 for (i = 0; i < NR_IPI; i++) {
403 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
1da177e4 404
4a88abd7
RK
405 for_each_present_cpu(cpu)
406 seq_printf(p, "%10u ",
407 __get_irq_stat(cpu, ipi_irqs[i]));
1da177e4 408
4a88abd7
RK
409 seq_printf(p, " %s\n", ipi_types[i]);
410 }
1da177e4
LT
411}
412
b54992fe 413u64 smp_irq_stat_cpu(unsigned int cpu)
37ee16ae 414{
b54992fe
RK
415 u64 sum = 0;
416 int i;
37ee16ae 417
b54992fe
RK
418 for (i = 0; i < NR_IPI; i++)
419 sum += __get_irq_stat(cpu, ipi_irqs[i]);
37ee16ae 420
b54992fe
RK
421#ifdef CONFIG_LOCAL_TIMERS
422 sum += __get_irq_stat(cpu, local_timer_irqs);
423#endif
37ee16ae 424
b54992fe 425 return sum;
37ee16ae
RK
426}
427
bc28248e
RK
428/*
429 * Timer (local or broadcast) support
430 */
431static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
432
c97d4869 433static void ipi_timer(void)
1da177e4 434{
bc28248e 435 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
1da177e4 436 irq_enter();
bc28248e 437 evt->event_handler(evt);
1da177e4
LT
438 irq_exit();
439}
440
37ee16ae 441#ifdef CONFIG_LOCAL_TIMERS
61b5cb1c 442asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
37ee16ae 443{
c97d4869 444 struct pt_regs *old_regs = set_irq_regs(regs);
37ee16ae
RK
445 int cpu = smp_processor_id();
446
447 if (local_timer_ack()) {
46c48f22 448 __inc_irq_stat(cpu, local_timer_irqs);
c97d4869 449 ipi_timer();
37ee16ae 450 }
c97d4869
RK
451
452 set_irq_regs(old_regs);
37ee16ae 453}
ec405ea9 454
f13cd417 455void show_local_irqs(struct seq_file *p, int prec)
ec405ea9
RK
456{
457 unsigned int cpu;
458
f13cd417 459 seq_printf(p, "%*s: ", prec, "LOC");
ec405ea9
RK
460
461 for_each_present_cpu(cpu)
46c48f22 462 seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
ec405ea9 463
f13cd417 464 seq_printf(p, " Local timer interrupts\n");
ec405ea9 465}
37ee16ae
RK
466#endif
467
bc28248e
RK
468#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
469static void smp_timer_broadcast(const struct cpumask *mask)
470{
e3fbb087 471 smp_cross_call(mask, IPI_TIMER);
bc28248e 472}
5388a6b2
RK
473#else
474#define smp_timer_broadcast NULL
475#endif
bc28248e
RK
476
477static void broadcast_timer_set_mode(enum clock_event_mode mode,
478 struct clock_event_device *evt)
479{
480}
481
a8d2518c 482static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
bc28248e
RK
483{
484 evt->name = "dummy_timer";
485 evt->features = CLOCK_EVT_FEAT_ONESHOT |
486 CLOCK_EVT_FEAT_PERIODIC |
487 CLOCK_EVT_FEAT_DUMMY;
488 evt->rating = 400;
489 evt->mult = 1;
490 evt->set_mode = broadcast_timer_set_mode;
bc28248e
RK
491
492 clockevents_register_device(evt);
493}
bc28248e
RK
494
495void __cpuinit percpu_timer_setup(void)
496{
497 unsigned int cpu = smp_processor_id();
498 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
499
500 evt->cpumask = cpumask_of(cpu);
5388a6b2 501 evt->broadcast = smp_timer_broadcast;
bc28248e 502
af90f10d
SS
503 if (local_timer_setup(evt))
504 broadcast_timer_setup(evt);
bc28248e
RK
505}
506
10034aab
RK
507#ifdef CONFIG_HOTPLUG_CPU
508/*
509 * The generic clock events code purposely does not stop the local timer
510 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
511 * manually here.
512 */
513static void percpu_timer_stop(void)
514{
515 unsigned int cpu = smp_processor_id();
516 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
517
518 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
519}
520#endif
521
1da177e4
LT
522static DEFINE_SPINLOCK(stop_lock);
523
524/*
525 * ipi_cpu_stop - handle IPI from smp_send_stop()
526 */
527static void ipi_cpu_stop(unsigned int cpu)
528{
3d3f78d7
RK
529 if (system_state == SYSTEM_BOOTING ||
530 system_state == SYSTEM_RUNNING) {
531 spin_lock(&stop_lock);
532 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
533 dump_stack();
534 spin_unlock(&stop_lock);
535 }
1da177e4 536
e03cdade 537 set_cpu_online(cpu, false);
1da177e4
LT
538
539 local_fiq_disable();
540 local_irq_disable();
541
542 while (1)
543 cpu_relax();
544}
545
546/*
547 * Main handler for inter-processor interrupts
1da177e4 548 */
4073723a 549asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
1da177e4
LT
550{
551 unsigned int cpu = smp_processor_id();
c97d4869 552 struct pt_regs *old_regs = set_irq_regs(regs);
1da177e4 553
4a88abd7
RK
554 if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
555 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
1da177e4 556
24480d98
RK
557 switch (ipinr) {
558 case IPI_TIMER:
559 ipi_timer();
560 break;
1da177e4 561
24480d98
RK
562 case IPI_RESCHEDULE:
563 /*
564 * nothing more to do - eveything is
565 * done on the interrupt return path
566 */
567 break;
1da177e4 568
24480d98
RK
569 case IPI_CALL_FUNC:
570 generic_smp_call_function_interrupt();
571 break;
f6dd9fa5 572
24480d98
RK
573 case IPI_CALL_FUNC_SINGLE:
574 generic_smp_call_function_single_interrupt();
575 break;
1da177e4 576
24480d98
RK
577 case IPI_CPU_STOP:
578 ipi_cpu_stop(cpu);
579 break;
1da177e4 580
24480d98
RK
581 default:
582 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
583 cpu, ipinr);
584 break;
1da177e4 585 }
c97d4869 586 set_irq_regs(old_regs);
1da177e4
LT
587}
588
589void smp_send_reschedule(int cpu)
590{
e3fbb087 591 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
1da177e4
LT
592}
593
1da177e4
LT
594void smp_send_stop(void)
595{
28e18293 596 unsigned long timeout;
1da177e4 597
28e18293
RK
598 if (num_online_cpus() > 1) {
599 cpumask_t mask = cpu_online_map;
600 cpu_clear(smp_processor_id(), mask);
4b0ef3b1 601
e3fbb087 602 smp_cross_call(&mask, IPI_CPU_STOP);
28e18293 603 }
4b0ef3b1 604
28e18293
RK
605 /* Wait up to one second for other CPUs to stop */
606 timeout = USEC_PER_SEC;
607 while (num_online_cpus() > 1 && timeout--)
608 udelay(1);
4b0ef3b1 609
28e18293
RK
610 if (num_online_cpus() > 1)
611 pr_warning("SMP: failed to stop secondary CPUs\n");
4b0ef3b1
RK
612}
613
4b0ef3b1 614/*
1da177e4 615 * not supported here
4b0ef3b1 616 */
5048bcba 617int setup_profiling_timer(unsigned int multiplier)
4b0ef3b1 618{
1da177e4 619 return -EINVAL;
4b0ef3b1 620}