2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/seq_file.h>
23 #include <linux/irq.h>
24 #include <linux/percpu.h>
25 #include <linux/clockchips.h>
26 #include <linux/completion.h>
27 #include <linux/cpufreq.h>
29 #include <linux/atomic.h>
31 #include <asm/cacheflush.h>
33 #include <asm/cputype.h>
34 #include <asm/exception.h>
35 #include <asm/idmap.h>
36 #include <asm/topology.h>
37 #include <asm/mmu_context.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/processor.h>
41 #include <asm/sections.h>
42 #include <asm/tlbflush.h>
43 #include <asm/ptrace.h>
44 #include <asm/localtimer.h>
45 #include <asm/smp_plat.h>
47 #include <asm/mach/arch.h>
48 #include <linux/mt_sched_mon.h>
49 /*******************************************************************************
50 * 20131225 marc.huang *
52 *******************************************************************************/
53 #include <linux/mtk_ram_console.h>
54 /******************************************************************************/
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/ipi.h>
60 * as from 2.5, kernels no longer have an init_tasks structure
61 * so we need some other way of telling a new secondary core
62 * where to place its SVC stack
64 struct secondary_data secondary_data
;
67 * control for which core is the next to come out of the secondary
70 volatile int __cpuinitdata pen_release
= -1;
82 static DECLARE_COMPLETION(cpu_running
);
84 static struct smp_operations smp_ops
;
86 void __init
smp_set_ops(struct smp_operations
*ops
)
92 int __cpuinit
__cpu_up(unsigned int cpu
, struct task_struct
*idle
)
97 * We need to tell the secondary core where to find
98 * its stack and the page tables.
100 secondary_data
.stack
= task_stack_page(idle
) + THREAD_START_SP
;
101 secondary_data
.pgdir
= virt_to_phys(idmap_pgd
);
102 secondary_data
.swapper_pg_dir
= virt_to_phys(swapper_pg_dir
);
103 __cpuc_flush_dcache_area(&secondary_data
, sizeof(secondary_data
));
104 outer_clean_range(__pa(&secondary_data
), __pa(&secondary_data
+ 1));
107 * Now bring the CPU into our world.
109 ret
= boot_secondary(cpu
, idle
);
112 * CPU was successfully started, wait for it
113 * to come online or time out.
115 wait_for_completion_timeout(&cpu_running
,
116 msecs_to_jiffies(1000));
118 if (!cpu_online(cpu
)) {
119 pr_crit("CPU%u: failed to come online\n", cpu
);
123 pr_err("CPU%u: failed to boot: %d\n", cpu
, ret
);
126 secondary_data
.stack
= NULL
;
127 secondary_data
.pgdir
= 0;
132 /* platform specific SMP operations */
133 void __init
smp_init_cpus(void)
135 if (smp_ops
.smp_init_cpus
)
136 smp_ops
.smp_init_cpus();
139 int __cpuinit
boot_secondary(unsigned int cpu
, struct task_struct
*idle
)
141 if (smp_ops
.smp_boot_secondary
)
142 return smp_ops
.smp_boot_secondary(cpu
, idle
);
146 #ifdef CONFIG_HOTPLUG_CPU
147 static void percpu_timer_stop(void);
149 static int platform_cpu_kill(unsigned int cpu
)
151 if (smp_ops
.cpu_kill
)
152 return smp_ops
.cpu_kill(cpu
);
156 static int platform_cpu_disable(unsigned int cpu
)
158 if (smp_ops
.cpu_disable
)
159 return smp_ops
.cpu_disable(cpu
);
162 * By default, allow disabling all CPUs except the first one,
163 * since this is special on a lot of platforms, e.g. because
164 * of clock tick interrupts.
166 return cpu
== 0 ? -EPERM
: 0;
169 * __cpu_disable runs on the processor to be shutdown.
171 int __cpuinit
__cpu_disable(void)
173 unsigned int cpu
= smp_processor_id();
176 ret
= platform_cpu_disable(cpu
);
181 * Take this CPU offline. Once we clear this, we can't return,
182 * and we must not schedule until we're ready to give up the cpu.
184 set_cpu_online(cpu
, false);
187 * OK - migrate IRQs away from this CPU
192 * Stop the local timer for this CPU.
197 * Flush user cache and TLB mappings, and then remove this CPU
198 * from the vm mask set of all processes.
200 * Caches are flushed to the Level of Unification Inner Shareable
201 * to write-back dirty lines to unified caches shared by all CPUs.
204 local_flush_tlb_all();
206 clear_tasks_mm_cpumask(cpu
);
211 static DECLARE_COMPLETION(cpu_died
);
214 * called on the thread which is asking for a CPU to be shutdown -
215 * waits until shutdown has completed, or it is timed out.
217 void __cpuinit
__cpu_die(unsigned int cpu
)
219 if (!wait_for_completion_timeout(&cpu_died
, msecs_to_jiffies(5000))) {
220 pr_err("CPU%u: cpu didn't die\n", cpu
);
223 printk(KERN_NOTICE
"CPU%u: shutdown\n", cpu
);
226 * platform_cpu_kill() is generally expected to do the powering off
227 * and/or cutting of clocks to the dying CPU. Optionally, this may
228 * be done by the CPU which is dying in preference to supporting
229 * this call, but that means there is _no_ synchronisation between
230 * the requesting CPU and the dying CPU actually losing power.
232 if (!platform_cpu_kill(cpu
))
233 printk("CPU%u: unable to kill\n", cpu
);
237 * Called from the idle thread for the CPU which has been shutdown.
239 * Note that we disable IRQs here, but do not re-enable them
240 * before returning to the caller. This is also the behaviour
241 * of the other hotplug-cpu capable cores, so presumably coming
242 * out of idle fixes this.
244 void __ref
cpu_die(void)
246 unsigned int cpu
= smp_processor_id();
247 aee_rr_rec_hoplug(cpu
, 51, 0);
250 aee_rr_rec_hoplug(cpu
, 52, 0);
253 aee_rr_rec_hoplug(cpu
, 53, 0);
256 * Flush the data out of the L1 cache for this CPU. This must be
257 * before the completion to ensure that data is safely written out
258 * before platform_cpu_kill() gets called - which may disable
259 * *this* CPU and power down its cache.
262 aee_rr_rec_hoplug(cpu
, 54, 0);
265 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
266 * this returns, power and/or clocks can be removed at any point
267 * from this CPU and its cache by platform_cpu_kill().
270 aee_rr_rec_hoplug(cpu
, 55, 0);
273 * Ensure that the cache lines associated with that completion are
274 * written out. This covers the case where _this_ CPU is doing the
275 * powering down, to ensure that the completion is visible to the
276 * CPU waiting for this one.
279 aee_rr_rec_hoplug(cpu
, 56, 0);
282 * The actual CPU shutdown procedure is at least platform (if not
283 * CPU) specific. This may remove power, or it may simply spin.
285 * Platforms are generally expected *NOT* to return from this call,
286 * although there are some which do because they have no way to
287 * power down the CPU. These platforms are the _only_ reason we
288 * have a return path which uses the fragment of assembly below.
290 * The return path should not be used for platforms which can
294 smp_ops
.cpu_die(cpu
);
297 * Do not return to the idle loop - jump back to the secondary
298 * cpu initialisation. There's some initialisation which needs
299 * to be repeated to undo the effects of taking the CPU offline.
301 __asm__("mov sp, %0\n"
303 " b secondary_start_kernel"
305 : "r" (task_stack_page(current
) + THREAD_SIZE
- 8));
307 #endif /* CONFIG_HOTPLUG_CPU */
310 * Called by both boot and secondaries to move global data into
311 * per-processor storage.
313 static void __cpuinit
smp_store_cpu_info(unsigned int cpuid
)
315 struct cpuinfo_arm
*cpu_info
= &per_cpu(cpu_data
, cpuid
);
317 cpu_info
->loops_per_jiffy
= loops_per_jiffy
;
318 cpu_info
->cpuid
= read_cpuid_id();
320 store_cpu_topology(cpuid
);
323 static void percpu_timer_setup(void);
326 * This is the secondary CPU boot entry. We're using this CPUs
327 * idle thread stack, but a set of temporary page tables.
329 asmlinkage
void __cpuinit
secondary_start_kernel(void)
331 struct mm_struct
*mm
= &init_mm
;
332 unsigned int cpu
= 0;
333 aee_rr_rec_hoplug(cpu
, 1, 0);
336 * The identity mapping is uncached (strongly ordered), so
337 * switch away from it before attempting any exclusive accesses.
339 cpu_switch_mm(mm
->pgd
, mm
);
340 local_flush_bp_all();
341 enter_lazy_tlb(mm
, current
);
342 local_flush_tlb_all();
343 aee_rr_rec_hoplug(cpu
, 2, 0);
346 * All kernel threads share the same mm context; grab a
347 * reference and switch to it.
349 cpu
= smp_processor_id();
350 aee_rr_rec_hoplug(cpu
, 3, 0);
351 atomic_inc(&mm
->mm_count
);
352 current
->active_mm
= mm
;
353 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
354 aee_rr_rec_hoplug(cpu
, 4, 0);
357 aee_rr_rec_hoplug(cpu
, 5, 0);
359 printk("CPU%u: Booted secondary processor\n", cpu
);
362 aee_rr_rec_hoplug(cpu
, 6, 0);
363 trace_hardirqs_off();
364 aee_rr_rec_hoplug(cpu
, 7, 0);
367 * Give the platform a chance to do its own initialisation.
369 if (smp_ops
.smp_secondary_init
)
370 smp_ops
.smp_secondary_init(cpu
);
371 aee_rr_rec_hoplug(cpu
, 8, 0);
373 notify_cpu_starting(cpu
);
374 aee_rr_rec_hoplug(cpu
, 9, 0);
377 aee_rr_rec_hoplug(cpu
, 10, 0);
379 smp_store_cpu_info(cpu
);
380 aee_rr_rec_hoplug(cpu
, 11, 0);
383 * OK, now it's safe to let the boot CPU continue. Wait for
384 * the CPU migration code to notice that the CPU is online
385 * before we continue - which happens after __cpu_up returns.
387 set_cpu_online(cpu
, true);
388 aee_rr_rec_hoplug(cpu
, 12, 0);
389 complete(&cpu_running
);
390 aee_rr_rec_hoplug(cpu
, 13, 0);
393 * Setup the percpu timer for this CPU.
395 percpu_timer_setup();
396 aee_rr_rec_hoplug(cpu
, 14, 0);
399 aee_rr_rec_hoplug(cpu
, 15, 0);
401 aee_rr_rec_hoplug(cpu
, 16, 0);
404 * OK, it's off to the idle thread for us
406 cpu_startup_entry(CPUHP_ONLINE
);
407 aee_rr_rec_hoplug(cpu
, 17, 0);
410 void __init
smp_cpus_done(unsigned int max_cpus
)
413 unsigned long bogosum
= 0;
415 for_each_online_cpu(cpu
)
416 bogosum
+= per_cpu(cpu_data
, cpu
).loops_per_jiffy
;
418 printk(KERN_INFO
"SMP: Total of %d processors activated "
419 "(%lu.%02lu BogoMIPS).\n",
421 bogosum
/ (500000/HZ
),
422 (bogosum
/ (5000/HZ
)) % 100);
427 void __init
smp_prepare_boot_cpu(void)
429 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
432 void __init
smp_prepare_cpus(unsigned int max_cpus
)
434 unsigned int ncores
= num_possible_cpus();
438 smp_store_cpu_info(smp_processor_id());
441 * are we trying to boot more cores than exist?
443 if (max_cpus
> ncores
)
445 if (ncores
> 1 && max_cpus
) {
447 * Enable the local timer or broadcast device for the
448 * boot CPU, but only if we have more than one CPU.
450 percpu_timer_setup();
453 * Initialise the present map, which describes the set of CPUs
454 * actually populated at the present time. A platform should
455 * re-initialize the map in the platforms smp_prepare_cpus()
456 * if present != possible (e.g. physical hotplug).
458 init_cpu_present(cpu_possible_mask
);
461 * Initialise the SCU if there are more than one CPU
462 * and let them know where to start.
464 if (smp_ops
.smp_prepare_cpus
)
465 smp_ops
.smp_prepare_cpus(max_cpus
);
469 static void (*__smp_cross_call
)(const struct cpumask
*, unsigned int);
471 void __init
set_smp_cross_call(void (*fn
)(const struct cpumask
*, unsigned int))
473 if (!__smp_cross_call
)
474 __smp_cross_call
= fn
;
477 static const char *ipi_types
[NR_IPI
] __tracepoint_string
= {
478 #define S(x,s) [x] = s
479 S(IPI_WAKEUP
, "CPU wakeup interrupts"),
480 S(IPI_TIMER
, "Timer broadcast interrupts"),
481 S(IPI_RESCHEDULE
, "Rescheduling interrupts"),
482 S(IPI_CALL_FUNC
, "Function call interrupts"),
483 S(IPI_CALL_FUNC_SINGLE
, "Single function call interrupts"),
484 S(IPI_CPU_STOP
, "CPU stop interrupts"),
485 S(IPI_CPU_BACKTRACE
, "CPU backtrace"),
488 static void smp_cross_call(const struct cpumask
*target
, unsigned int ipinr
)
490 trace_ipi_raise(target
, ipi_types
[ipinr
]);
491 __smp_cross_call(target
, ipinr
);
494 void show_ipi_list(struct seq_file
*p
, int prec
)
498 for (i
= 0; i
< NR_IPI
; i
++) {
499 seq_printf(p
, "%*s%u: ", prec
- 1, "IPI", i
);
501 for_each_online_cpu(cpu
)
502 seq_printf(p
, "%10u ",
503 __get_irq_stat(cpu
, ipi_irqs
[i
]));
505 seq_printf(p
, " %s\n", ipi_types
[i
]);
509 u64
smp_irq_stat_cpu(unsigned int cpu
)
514 for (i
= 0; i
< NR_IPI
; i
++)
515 sum
+= __get_irq_stat(cpu
, ipi_irqs
[i
]);
521 * Timer (local or broadcast) support
523 static DEFINE_PER_CPU(struct clock_event_device
, percpu_clockevent
);
525 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
527 smp_cross_call(mask
, IPI_CALL_FUNC
);
530 void arch_send_wakeup_ipi_mask(const struct cpumask
*mask
)
532 smp_cross_call(mask
, IPI_WAKEUP
);
535 void arch_send_call_function_single_ipi(int cpu
)
537 smp_cross_call(cpumask_of(cpu
), IPI_CALL_FUNC_SINGLE
);
540 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
541 void tick_broadcast(const struct cpumask
*mask
)
543 smp_cross_call(mask
, IPI_TIMER
);
547 static void broadcast_timer_set_mode(enum clock_event_mode mode
,
548 struct clock_event_device
*evt
)
552 static void __cpuinit
broadcast_timer_setup(struct clock_event_device
*evt
)
554 evt
->name
= "dummy_timer";
555 evt
->features
= CLOCK_EVT_FEAT_ONESHOT
|
556 CLOCK_EVT_FEAT_PERIODIC
|
557 CLOCK_EVT_FEAT_DUMMY
;
560 evt
->set_mode
= broadcast_timer_set_mode
;
562 clockevents_register_device(evt
);
565 static struct local_timer_ops
*lt_ops
;
567 #ifdef CONFIG_LOCAL_TIMERS
568 int local_timer_register(struct local_timer_ops
*ops
)
570 if (!is_smp() || !setup_max_cpus
)
581 static void __cpuinit
percpu_timer_setup(void)
583 unsigned int cpu
= smp_processor_id();
584 struct clock_event_device
*evt
= &per_cpu(percpu_clockevent
, cpu
);
586 evt
->cpumask
= cpumask_of(cpu
);
588 if (!lt_ops
|| lt_ops
->setup(evt
))
589 broadcast_timer_setup(evt
);
592 #ifdef CONFIG_HOTPLUG_CPU
594 * The generic clock events code purposely does not stop the local timer
595 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
598 static void percpu_timer_stop(void)
600 unsigned int cpu
= smp_processor_id();
601 struct clock_event_device
*evt
= &per_cpu(percpu_clockevent
, cpu
);
608 static DEFINE_RAW_SPINLOCK(stop_lock
);
611 * ipi_cpu_stop - handle IPI from smp_send_stop()
613 static void ipi_cpu_stop(unsigned int cpu
)
615 if (system_state
== SYSTEM_BOOTING
||
616 system_state
== SYSTEM_RUNNING
) {
617 raw_spin_lock(&stop_lock
);
618 printk(KERN_CRIT
"CPU%u: stopping\n", cpu
);
620 raw_spin_unlock(&stop_lock
);
623 set_cpu_online(cpu
, false);
632 static cpumask_t backtrace_mask
;
633 static DEFINE_RAW_SPINLOCK(backtrace_lock
);
635 /* "in progress" flag of arch_trigger_all_cpu_backtrace */
636 static unsigned long backtrace_flag
;
638 void smp_send_all_cpu_backtrace(void)
640 unsigned int this_cpu
= smp_processor_id();
643 if (test_and_set_bit(0, &backtrace_flag
))
645 * If there is already a trigger_all_cpu_backtrace() in progress
646 * (backtrace_flag == 1), don't output double cpu dump infos.
650 cpumask_copy(&backtrace_mask
, cpu_online_mask
);
651 cpu_clear(this_cpu
, backtrace_mask
);
653 pr_info("Backtrace for cpu %d (current):\n", this_cpu
);
656 pr_info("\nsending IPI to all other CPUs:\n");
657 smp_cross_call(&backtrace_mask
, IPI_CPU_BACKTRACE
);
659 /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
660 for (i
= 0; i
< 10 * 1000; i
++) {
661 if (cpumask_empty(&backtrace_mask
))
666 clear_bit(0, &backtrace_flag
);
667 smp_mb__after_clear_bit();
671 * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
673 static void ipi_cpu_backtrace(unsigned int cpu
, struct pt_regs
*regs
)
675 if (cpu_isset(cpu
, backtrace_mask
)) {
676 raw_spin_lock(&backtrace_lock
);
677 pr_warning("IPI backtrace for cpu %d\n", cpu
);
679 raw_spin_unlock(&backtrace_lock
);
680 cpu_clear(cpu
, backtrace_mask
);
685 * Main handler for inter-processor interrupts
687 asmlinkage
void __exception_irq_entry
do_IPI(int ipinr
, struct pt_regs
*regs
)
689 handle_IPI(ipinr
, regs
);
692 void handle_IPI(int ipinr
, struct pt_regs
*regs
)
694 unsigned int cpu
= smp_processor_id();
695 struct pt_regs
*old_regs
= set_irq_regs(regs
);
697 if ((unsigned)ipinr
< NR_IPI
) {
698 trace_ipi_entry(ipi_types
[ipinr
]);
699 __inc_irq_stat(cpu
, ipi_irqs
[ipinr
]);
704 mt_trace_ISR_start(ipinr
);
705 mt_trace_ISR_end(ipinr
);
708 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
711 mt_trace_ISR_start(ipinr
);
712 tick_receive_broadcast();
713 mt_trace_ISR_end(ipinr
);
724 mt_trace_ISR_start(ipinr
);
725 generic_smp_call_function_interrupt();
726 mt_trace_ISR_end(ipinr
);
730 case IPI_CALL_FUNC_SINGLE
:
732 mt_trace_ISR_start(ipinr
);
733 generic_smp_call_function_single_interrupt();
734 mt_trace_ISR_end(ipinr
);
740 mt_trace_ISR_start(ipinr
);
742 mt_trace_ISR_end(ipinr
);
746 case IPI_CPU_BACKTRACE
:
747 mt_trace_ISR_start(ipinr
);
748 ipi_cpu_backtrace(cpu
, regs
);
749 mt_trace_ISR_end(ipinr
);
753 mt_trace_ISR_start(ipinr
);
754 printk(KERN_CRIT
"CPU%u: Unknown IPI message 0x%x\n",
756 mt_trace_ISR_end(ipinr
);
760 if ((unsigned)ipinr
< NR_IPI
)
761 trace_ipi_exit(ipi_types
[ipinr
]);
762 set_irq_regs(old_regs
);
765 void smp_send_reschedule(int cpu
)
767 smp_cross_call(cpumask_of(cpu
), IPI_RESCHEDULE
);
770 void smp_send_stop(void)
772 unsigned long timeout
;
775 cpumask_copy(&mask
, cpu_online_mask
);
776 cpumask_clear_cpu(smp_processor_id(), &mask
);
777 if (!cpumask_empty(&mask
))
778 smp_cross_call(&mask
, IPI_CPU_STOP
);
780 /* Wait up to one second for other CPUs to stop */
781 timeout
= USEC_PER_SEC
;
782 while (num_online_cpus() > 1 && timeout
--)
785 if (num_online_cpus() > 1)
786 pr_warning("SMP: failed to stop secondary CPUs\n");
792 int setup_profiling_timer(unsigned int multiplier
)
797 #ifdef CONFIG_CPU_FREQ
799 static DEFINE_PER_CPU(unsigned long, l_p_j_ref
);
800 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq
);
801 static unsigned long global_l_p_j_ref
;
802 static unsigned long global_l_p_j_ref_freq
;
804 static int cpufreq_callback(struct notifier_block
*nb
,
805 unsigned long val
, void *data
)
807 struct cpufreq_freqs
*freq
= data
;
810 if (freq
->flags
& CPUFREQ_CONST_LOOPS
)
813 if (!per_cpu(l_p_j_ref
, cpu
)) {
814 per_cpu(l_p_j_ref
, cpu
) =
815 per_cpu(cpu_data
, cpu
).loops_per_jiffy
;
816 per_cpu(l_p_j_ref_freq
, cpu
) = freq
->old
;
817 if (!global_l_p_j_ref
) {
818 global_l_p_j_ref
= loops_per_jiffy
;
819 global_l_p_j_ref_freq
= freq
->old
;
823 if ((val
== CPUFREQ_PRECHANGE
&& freq
->old
< freq
->new) ||
824 (val
== CPUFREQ_POSTCHANGE
&& freq
->old
> freq
->new) ||
825 (val
== CPUFREQ_RESUMECHANGE
|| val
== CPUFREQ_SUSPENDCHANGE
)) {
826 loops_per_jiffy
= cpufreq_scale(global_l_p_j_ref
,
827 global_l_p_j_ref_freq
,
829 per_cpu(cpu_data
, cpu
).loops_per_jiffy
=
830 cpufreq_scale(per_cpu(l_p_j_ref
, cpu
),
831 per_cpu(l_p_j_ref_freq
, cpu
),
837 static struct notifier_block cpufreq_notifier
= {
838 .notifier_call
= cpufreq_callback
,
841 static int __init
register_cpufreq_notifier(void)
843 return cpufreq_register_notifier(&cpufreq_notifier
,
844 CPUFREQ_TRANSITION_NOTIFIER
);
846 core_initcall(register_cpufreq_notifier
);