2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/interrupt.h>
25 #include <linux/cache.h>
26 #include <linux/profile.h>
27 #include <linux/errno.h>
29 #include <linux/err.h>
30 #include <linux/cpu.h>
31 #include <linux/smp.h>
32 #include <linux/seq_file.h>
33 #include <linux/irq.h>
34 #include <linux/percpu.h>
35 #include <linux/clockchips.h>
36 #include <linux/completion.h>
39 #include <asm/atomic.h>
40 #include <asm/cacheflush.h>
41 #include <asm/cputype.h>
42 #include <asm/cpu_ops.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
45 #include <asm/pgalloc.h>
46 #include <asm/processor.h>
47 #include <asm/smp_plat.h>
48 #include <asm/sections.h>
49 #include <asm/tlbflush.h>
50 #include <asm/ptrace.h>
52 #include <linux/mt_sched_mon.h>
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/ipi.h>
58 * as from 2.5, kernels no longer have an init_tasks structure
59 * so we need some other way of telling a new secondary core
60 * where to place its SVC stack
62 struct secondary_data secondary_data
;
72 * Boot a secondary CPU, and assign it the specified idle task.
73 * This also gives us the initial stack to use for this CPU.
75 static int __cpuinit
boot_secondary(unsigned int cpu
, struct task_struct
*idle
)
77 if (cpu_ops
[cpu
]->cpu_boot
)
78 return cpu_ops
[cpu
]->cpu_boot(cpu
);
83 static DECLARE_COMPLETION(cpu_running
);
85 int __cpuinit
__cpu_up(unsigned int cpu
, struct task_struct
*idle
)
90 * We need to tell the secondary core where to find its stack and the
93 secondary_data
.stack
= task_stack_page(idle
) + THREAD_START_SP
;
94 __flush_dcache_area(&secondary_data
, sizeof(secondary_data
));
97 * Now bring the CPU into our world.
99 ret
= boot_secondary(cpu
, idle
);
102 * CPU was successfully started, wait for it to come online or
105 wait_for_completion_timeout(&cpu_running
,
106 msecs_to_jiffies(1000));
108 if (!cpu_online(cpu
)) {
109 pr_crit("CPU%u: failed to come online\n", cpu
);
113 pr_err("CPU%u: failed to boot: %d\n", cpu
, ret
);
116 secondary_data
.stack
= NULL
;
121 static void __cpuinit
smp_store_cpu_info(unsigned int cpuid
)
123 store_cpu_topology(cpuid
);
127 * This is the secondary CPU boot entry. We're using this CPUs
128 * idle thread stack, but a set of temporary page tables.
130 asmlinkage
void __cpuinit
secondary_start_kernel(void)
132 struct mm_struct
*mm
= &init_mm
;
133 unsigned int cpu
= smp_processor_id();
135 printk("CPU%u: Booted secondary processor\n", cpu
);
138 * All kernel threads share the same mm context; grab a
139 * reference and switch to it.
141 atomic_inc(&mm
->mm_count
);
142 current
->active_mm
= mm
;
143 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
145 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
148 * TTBR0 is only used for the identity mapping at this stage. Make it
149 * point to zero page to avoid speculatively fetching new entries.
151 cpu_set_reserved_ttbr0();
155 trace_hardirqs_off();
157 if (cpu_ops
[cpu
]->cpu_postboot
)
158 cpu_ops
[cpu
]->cpu_postboot();
161 * Log the CPU info before it is marked online and might get read.
166 * OK, now it's safe to let the boot CPU continue. Wait for
167 * the CPU migration code to notice that the CPU is online
168 * before we continue.
170 set_cpu_online(cpu
, true);
171 complete(&cpu_running
);
173 smp_store_cpu_info(cpu
);
176 * Enable GIC and timers.
178 notify_cpu_starting(cpu
);
185 * OK, it's off to the idle thread for us
187 cpu_startup_entry(CPUHP_ONLINE
);
190 #ifdef CONFIG_HOTPLUG_CPU
191 static int op_cpu_disable(unsigned int cpu
)
194 * If we don't have a cpu_die method, abort before we reach the point
195 * of no return. CPU0 may not have an cpu_ops, so test for it.
197 if (!cpu_ops
[cpu
] || !cpu_ops
[cpu
]->cpu_die
)
201 * We may need to abort a hot unplug for some other mechanism-specific
204 if (cpu_ops
[cpu
]->cpu_disable
)
205 return cpu_ops
[cpu
]->cpu_disable(cpu
);
211 * __cpu_disable runs on the processor to be shutdown.
213 int __cpu_disable(void)
215 unsigned int cpu
= smp_processor_id();
218 ret
= op_cpu_disable(cpu
);
223 * Take this CPU offline. Once we clear this, we can't return,
224 * and we must not schedule until we're ready to give up the cpu.
226 set_cpu_online(cpu
, false);
229 * OK - migrate IRQs away from this CPU
234 * Remove this CPU from the vm mask set of all processes.
236 clear_tasks_mm_cpumask(cpu
);
241 static int op_cpu_kill(unsigned int cpu
)
244 * If we have no means of synchronising with the dying CPU, then assume
245 * that it is really dead. We can only wait for an arbitrary length of
246 * time and hope that it's dead, so let's skip the wait and just hope.
248 if (!cpu_ops
[cpu
]->cpu_kill
)
251 return cpu_ops
[cpu
]->cpu_kill(cpu
);
254 static DECLARE_COMPLETION(cpu_died
);
257 * called on the thread which is asking for a CPU to be shutdown -
258 * waits until shutdown has completed, or it is timed out.
260 void __cpu_die(unsigned int cpu
)
262 if (!wait_for_completion_timeout(&cpu_died
, msecs_to_jiffies(5000))) {
263 pr_crit("CPU%u: cpu didn't die\n", cpu
);
266 pr_notice("CPU%u: shutdown\n", cpu
);
269 * Now that the dying CPU is beyond the point of no return w.r.t.
270 * in-kernel synchronisation, try to get the firwmare to help us to
271 * verify that it has really left the kernel before we consider
272 * clobbering anything it might still be using.
274 if (!op_cpu_kill(cpu
))
275 pr_warn("CPU%d may not have shut down cleanly\n", cpu
);
279 * Called from the idle thread for the CPU which has been shutdown.
281 * Note that we disable IRQs here, but do not re-enable them
282 * before returning to the caller. This is also the behaviour
283 * of the other hotplug-cpu capable cores, so presumably coming
284 * out of idle fixes this.
288 unsigned int cpu
= smp_processor_id();
294 /* Tell __cpu_die() that this CPU is now safe to dispose of */
298 * Actually shutdown the CPU. This must never fail. The specific hotplug
299 * mechanism must perform all required cache maintenance to ensure that
300 * no dirty lines are lost in the process of shutting down the CPU.
302 cpu_ops
[cpu
]->cpu_die(cpu
);
308 void __init
smp_cpus_done(unsigned int max_cpus
)
310 unsigned long bogosum
= loops_per_jiffy
* num_online_cpus();
312 pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
313 num_online_cpus(), bogosum
/ (500000/HZ
),
314 (bogosum
/ (5000/HZ
)) % 100);
317 void __init
smp_prepare_boot_cpu(void)
319 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
323 * Enumerate the possible CPU set from the device tree and build the
324 * cpu logical map array containing MPIDR values related to logical
325 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
327 void __init
smp_init_cpus(void)
329 struct device_node
*dn
= NULL
;
330 unsigned int i
, cpu
= 1;
331 bool bootcpu_valid
= false;
333 while ((dn
= of_find_node_by_type(dn
, "cpu"))) {
338 * A cpu node with missing "reg" property is
339 * considered invalid to build a cpu_logical_map
342 cell
= of_get_property(dn
, "reg", NULL
);
344 pr_err("%s: missing reg property\n", dn
->full_name
);
347 hwid
= of_read_number(cell
, of_n_addr_cells(dn
));
350 * Non affinity bits must be set to 0 in the DT
352 if (hwid
& ~MPIDR_HWID_BITMASK
) {
353 pr_err("%s: invalid reg property\n", dn
->full_name
);
358 * Duplicate MPIDRs are a recipe for disaster. Scan
359 * all initialized entries and check for
360 * duplicates. If any is found just ignore the cpu.
361 * cpu_logical_map was initialized to INVALID_HWID to
362 * avoid matching valid MPIDR values.
364 for (i
= 1; (i
< cpu
) && (i
< NR_CPUS
); i
++) {
365 if (cpu_logical_map(i
) == hwid
) {
366 pr_err("%s: duplicate cpu reg properties in the DT\n",
373 * The numbering scheme requires that the boot CPU
374 * must be assigned logical id 0. Record it so that
375 * the logical map built from DT is validated and can
378 if (hwid
== cpu_logical_map(0)) {
380 pr_err("%s: duplicate boot cpu reg property in DT\n",
385 bootcpu_valid
= true;
388 * cpu_logical_map has already been
389 * initialized and the boot cpu doesn't need
390 * the enable-method so continue without
399 if (cpu_read_ops(dn
, cpu
) != 0)
402 if (cpu_ops
[cpu
]->cpu_init(dn
, cpu
))
405 pr_debug("cpu logical map 0x%llx\n", hwid
);
406 cpu_logical_map(cpu
) = hwid
;
413 pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
416 if (!bootcpu_valid
) {
417 pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n");
422 * All the cpus that made it to the cpu_logical_map have been
423 * validated so set them as possible cpus.
425 for (i
= 0; i
< NR_CPUS
; i
++)
426 if (cpu_logical_map(i
) != INVALID_HWID
)
427 set_cpu_possible(i
, true);
430 void __init
smp_prepare_cpus(unsigned int max_cpus
)
433 unsigned int cpu
, ncores
= num_possible_cpus();
437 smp_store_cpu_info(smp_processor_id());
441 * are we trying to boot more cores than exist?
443 if (max_cpus
> ncores
)
446 /* Don't bother if we're effectively UP */
451 * Initialise the present map (which describes the set of CPUs
452 * actually populated at the present time) and release the
453 * secondaries from the bootloader.
455 * Make sure we online at most (max_cpus - 1) additional CPUs.
458 for_each_possible_cpu(cpu
) {
462 if (cpu
== smp_processor_id())
468 err
= cpu_ops
[cpu
]->cpu_prepare(cpu
);
472 set_cpu_present(cpu
, true);
477 static void (*__smp_cross_call
)(const struct cpumask
*, unsigned int);
479 void __init
set_smp_cross_call(void (*fn
)(const struct cpumask
*, unsigned int))
481 __smp_cross_call
= fn
;
484 static const char *ipi_types
[NR_IPI
] __tracepoint_string
= {
485 #define S(x,s) [x] = s
486 S(IPI_RESCHEDULE
, "Rescheduling interrupts"),
487 S(IPI_CALL_FUNC
, "Function call interrupts"),
488 S(IPI_CALL_FUNC_SINGLE
, "Single function call interrupts"),
489 S(IPI_CPU_STOP
, "CPU stop interrupts"),
492 static void smp_cross_call(const struct cpumask
*target
, unsigned int ipinr
)
494 trace_ipi_raise(target
, ipi_types
[ipinr
]);
495 __smp_cross_call(target
, ipinr
);
498 void show_ipi_list(struct seq_file
*p
, int prec
)
502 for (i
= 0; i
< NR_IPI
; i
++) {
503 seq_printf(p
, "%*s%u:%s", prec
- 1, "IPI", i
,
504 prec
>= 4 ? " " : "");
505 for_each_present_cpu(cpu
)
506 seq_printf(p
, "%10u ",
507 __get_irq_stat(cpu
, ipi_irqs
[i
]));
508 seq_printf(p
, " %s\n", ipi_types
[i
]);
512 u64
smp_irq_stat_cpu(unsigned int cpu
)
517 for (i
= 0; i
< NR_IPI
; i
++)
518 sum
+= __get_irq_stat(cpu
, ipi_irqs
[i
]);
523 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
525 smp_cross_call(mask
, IPI_CALL_FUNC
);
528 void arch_send_call_function_single_ipi(int cpu
)
530 smp_cross_call(cpumask_of(cpu
), IPI_CALL_FUNC_SINGLE
);
533 static DEFINE_RAW_SPINLOCK(stop_lock
);
536 * ipi_cpu_stop - handle IPI from smp_send_stop()
538 static void ipi_cpu_stop(unsigned int cpu
)
540 if (system_state
== SYSTEM_BOOTING
||
541 system_state
== SYSTEM_RUNNING
) {
542 raw_spin_lock(&stop_lock
);
543 pr_crit("CPU%u: stopping\n", cpu
);
545 raw_spin_unlock(&stop_lock
);
548 set_cpu_online(cpu
, false);
558 * Main handler for inter-processor interrupts
560 void handle_IPI(int ipinr
, struct pt_regs
*regs
)
562 unsigned int cpu
= smp_processor_id();
563 struct pt_regs
*old_regs
= set_irq_regs(regs
);
565 if ((unsigned)ipinr
< NR_IPI
) {
566 trace_ipi_entry(ipi_types
[ipinr
]);
567 __inc_irq_stat(cpu
, ipi_irqs
[ipinr
]);
577 mt_trace_ISR_start(ipinr
);
578 generic_smp_call_function_interrupt();
579 mt_trace_ISR_end(ipinr
);
583 case IPI_CALL_FUNC_SINGLE
:
585 mt_trace_ISR_start(ipinr
);
586 generic_smp_call_function_single_interrupt();
587 mt_trace_ISR_end(ipinr
);
593 mt_trace_ISR_start(ipinr
);
595 mt_trace_ISR_end(ipinr
);
600 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu
, ipinr
);
604 if ((unsigned)ipinr
< NR_IPI
)
605 trace_ipi_exit(ipi_types
[ipinr
]);
606 set_irq_regs(old_regs
);
609 void smp_send_reschedule(int cpu
)
611 smp_cross_call(cpumask_of(cpu
), IPI_RESCHEDULE
);
614 void smp_send_stop(void)
616 unsigned long timeout
;
618 if (num_online_cpus() > 1) {
621 cpumask_copy(&mask
, cpu_online_mask
);
622 cpu_clear(smp_processor_id(), mask
);
624 smp_cross_call(&mask
, IPI_CPU_STOP
);
627 /* Wait up to one second for other CPUs to stop */
628 timeout
= USEC_PER_SEC
;
629 while (num_online_cpus() > 1 && timeout
--)
632 if (num_online_cpus() > 1)
633 pr_warning("SMP: failed to stop secondary CPUs\n");
639 int setup_profiling_timer(unsigned int multiplier
)