Commit | Line | Data |
---|---|---|
08e875c1 CM |
1 | /* |
2 | * SMP initialisation and IPI support | |
3 | * Based on arch/arm/kernel/smp.c | |
4 | * | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/delay.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/cache.h> | |
26 | #include <linux/profile.h> | |
27 | #include <linux/errno.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/err.h> | |
30 | #include <linux/cpu.h> | |
31 | #include <linux/smp.h> | |
32 | #include <linux/seq_file.h> | |
33 | #include <linux/irq.h> | |
34 | #include <linux/percpu.h> | |
35 | #include <linux/clockchips.h> | |
36 | #include <linux/completion.h> | |
37 | #include <linux/of.h> | |
38 | ||
39 | #include <asm/atomic.h> | |
40 | #include <asm/cacheflush.h> | |
41 | #include <asm/cputype.h> | |
6fa3eb70 | 42 | #include <asm/cpu_ops.h> |
08e875c1 CM |
43 | #include <asm/mmu_context.h> |
44 | #include <asm/pgtable.h> | |
45 | #include <asm/pgalloc.h> | |
46 | #include <asm/processor.h> | |
4c7aa002 | 47 | #include <asm/smp_plat.h> |
08e875c1 CM |
48 | #include <asm/sections.h> |
49 | #include <asm/tlbflush.h> | |
50 | #include <asm/ptrace.h> | |
08e875c1 | 51 | |
6fa3eb70 S |
52 | #include <linux/mt_sched_mon.h> |
53 | ||
54 | #define CREATE_TRACE_POINTS | |
55 | #include <trace/events/ipi.h> | |
56 | ||
08e875c1 CM |
57 | /* |
58 | * as from 2.5, kernels no longer have an init_tasks structure | |
59 | * so we need some other way of telling a new secondary core | |
60 | * where to place its SVC stack | |
61 | */ | |
62 | struct secondary_data secondary_data; | |
08e875c1 CM |
63 | |
64 | enum ipi_msg_type { | |
65 | IPI_RESCHEDULE, | |
66 | IPI_CALL_FUNC, | |
67 | IPI_CALL_FUNC_SINGLE, | |
68 | IPI_CPU_STOP, | |
69 | }; | |
70 | ||
08e875c1 CM |
71 | /* |
72 | * Boot a secondary CPU, and assign it the specified idle task. | |
73 | * This also gives us the initial stack to use for this CPU. | |
74 | */ | |
75 | static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | |
76 | { | |
6fa3eb70 S |
77 | if (cpu_ops[cpu]->cpu_boot) |
78 | return cpu_ops[cpu]->cpu_boot(cpu); | |
08e875c1 | 79 | |
6fa3eb70 | 80 | return -EOPNOTSUPP; |
08e875c1 CM |
81 | } |
82 | ||
83 | static DECLARE_COMPLETION(cpu_running); | |
84 | ||
85 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | |
86 | { | |
87 | int ret; | |
88 | ||
89 | /* | |
90 | * We need to tell the secondary core where to find its stack and the | |
91 | * page tables. | |
92 | */ | |
93 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | |
94 | __flush_dcache_area(&secondary_data, sizeof(secondary_data)); | |
95 | ||
96 | /* | |
97 | * Now bring the CPU into our world. | |
98 | */ | |
99 | ret = boot_secondary(cpu, idle); | |
100 | if (ret == 0) { | |
101 | /* | |
102 | * CPU was successfully started, wait for it to come online or | |
103 | * time out. | |
104 | */ | |
105 | wait_for_completion_timeout(&cpu_running, | |
106 | msecs_to_jiffies(1000)); | |
107 | ||
108 | if (!cpu_online(cpu)) { | |
109 | pr_crit("CPU%u: failed to come online\n", cpu); | |
110 | ret = -EIO; | |
111 | } | |
112 | } else { | |
113 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | |
114 | } | |
115 | ||
116 | secondary_data.stack = NULL; | |
117 | ||
118 | return ret; | |
119 | } | |
120 | ||
6fa3eb70 S |
121 | static void __cpuinit smp_store_cpu_info(unsigned int cpuid) |
122 | { | |
123 | store_cpu_topology(cpuid); | |
124 | } | |
125 | ||
08e875c1 CM |
126 | /* |
127 | * This is the secondary CPU boot entry. We're using this CPUs | |
128 | * idle thread stack, but a set of temporary page tables. | |
129 | */ | |
130 | asmlinkage void __cpuinit secondary_start_kernel(void) | |
131 | { | |
132 | struct mm_struct *mm = &init_mm; | |
133 | unsigned int cpu = smp_processor_id(); | |
134 | ||
135 | printk("CPU%u: Booted secondary processor\n", cpu); | |
136 | ||
137 | /* | |
138 | * All kernel threads share the same mm context; grab a | |
139 | * reference and switch to it. | |
140 | */ | |
141 | atomic_inc(&mm->mm_count); | |
142 | current->active_mm = mm; | |
143 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | |
144 | ||
6fa3eb70 S |
145 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
146 | ||
08e875c1 CM |
147 | /* |
148 | * TTBR0 is only used for the identity mapping at this stage. Make it | |
149 | * point to zero page to avoid speculatively fetching new entries. | |
150 | */ | |
151 | cpu_set_reserved_ttbr0(); | |
152 | flush_tlb_all(); | |
153 | ||
154 | preempt_disable(); | |
155 | trace_hardirqs_off(); | |
156 | ||
6fa3eb70 S |
157 | if (cpu_ops[cpu]->cpu_postboot) |
158 | cpu_ops[cpu]->cpu_postboot(); | |
08e875c1 | 159 | |
08e875c1 CM |
160 | /* |
161 | * OK, now it's safe to let the boot CPU continue. Wait for | |
162 | * the CPU migration code to notice that the CPU is online | |
163 | * before we continue. | |
164 | */ | |
165 | set_cpu_online(cpu, true); | |
b3770b32 | 166 | complete(&cpu_running); |
08e875c1 | 167 | |
6fa3eb70 S |
168 | smp_store_cpu_info(cpu); |
169 | ||
81ca15a3 CM |
170 | /* |
171 | * Enable GIC and timers. | |
172 | */ | |
173 | notify_cpu_starting(cpu); | |
174 | ||
6fa3eb70 | 175 | local_dbg_enable(); |
81ca15a3 CM |
176 | local_irq_enable(); |
177 | local_fiq_enable(); | |
178 | ||
08e875c1 CM |
179 | /* |
180 | * OK, it's off to the idle thread for us | |
181 | */ | |
0087298f | 182 | cpu_startup_entry(CPUHP_ONLINE); |
08e875c1 CM |
183 | } |
184 | ||
6fa3eb70 S |
185 | #ifdef CONFIG_HOTPLUG_CPU |
186 | static int op_cpu_disable(unsigned int cpu) | |
08e875c1 | 187 | { |
6fa3eb70 S |
188 | /* |
189 | * If we don't have a cpu_die method, abort before we reach the point | |
190 | * of no return. CPU0 may not have an cpu_ops, so test for it. | |
191 | */ | |
192 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) | |
193 | return -EOPNOTSUPP; | |
08e875c1 | 194 | |
6fa3eb70 S |
195 | /* |
196 | * We may need to abort a hot unplug for some other mechanism-specific | |
197 | * reason. | |
198 | */ | |
199 | if (cpu_ops[cpu]->cpu_disable) | |
200 | return cpu_ops[cpu]->cpu_disable(cpu); | |
201 | ||
202 | return 0; | |
08e875c1 CM |
203 | } |
204 | ||
6fa3eb70 S |
205 | /* |
206 | * __cpu_disable runs on the processor to be shutdown. | |
207 | */ | |
208 | int __cpu_disable(void) | |
08e875c1 | 209 | { |
6fa3eb70 S |
210 | unsigned int cpu = smp_processor_id(); |
211 | int ret; | |
08e875c1 | 212 | |
6fa3eb70 S |
213 | ret = op_cpu_disable(cpu); |
214 | if (ret) | |
215 | return ret; | |
d329de3f | 216 | |
6fa3eb70 S |
217 | /* |
218 | * Take this CPU offline. Once we clear this, we can't return, | |
219 | * and we must not schedule until we're ready to give up the cpu. | |
220 | */ | |
221 | set_cpu_online(cpu, false); | |
d329de3f | 222 | |
6fa3eb70 S |
223 | /* |
224 | * OK - migrate IRQs away from this CPU | |
225 | */ | |
226 | migrate_irqs(); | |
227 | ||
228 | /* | |
229 | * Remove this CPU from the vm mask set of all processes. | |
230 | */ | |
231 | clear_tasks_mm_cpumask(cpu); | |
d329de3f | 232 | |
6fa3eb70 S |
233 | return 0; |
234 | } | |
235 | ||
236 | static int op_cpu_kill(unsigned int cpu) | |
d329de3f | 237 | { |
6fa3eb70 S |
238 | /* |
239 | * If we have no means of synchronising with the dying CPU, then assume | |
240 | * that it is really dead. We can only wait for an arbitrary length of | |
241 | * time and hope that it's dead, so let's skip the wait and just hope. | |
242 | */ | |
243 | if (!cpu_ops[cpu]->cpu_kill) | |
244 | return 1; | |
d329de3f | 245 | |
6fa3eb70 S |
246 | return cpu_ops[cpu]->cpu_kill(cpu); |
247 | } | |
248 | ||
249 | static DECLARE_COMPLETION(cpu_died); | |
d329de3f | 250 | |
6fa3eb70 S |
251 | /* |
252 | * called on the thread which is asking for a CPU to be shutdown - | |
253 | * waits until shutdown has completed, or it is timed out. | |
254 | */ | |
255 | void __cpu_die(unsigned int cpu) | |
256 | { | |
257 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { | |
258 | pr_crit("CPU%u: cpu didn't die\n", cpu); | |
259 | return; | |
d329de3f | 260 | } |
6fa3eb70 S |
261 | pr_notice("CPU%u: shutdown\n", cpu); |
262 | ||
263 | /* | |
264 | * Now that the dying CPU is beyond the point of no return w.r.t. | |
265 | * in-kernel synchronisation, try to get the firwmare to help us to | |
266 | * verify that it has really left the kernel before we consider | |
267 | * clobbering anything it might still be using. | |
268 | */ | |
269 | if (!op_cpu_kill(cpu)) | |
270 | pr_warn("CPU%d may not have shut down cleanly\n", cpu); | |
271 | } | |
272 | ||
273 | /* | |
274 | * Called from the idle thread for the CPU which has been shutdown. | |
275 | * | |
276 | * Note that we disable IRQs here, but do not re-enable them | |
277 | * before returning to the caller. This is also the behaviour | |
278 | * of the other hotplug-cpu capable cores, so presumably coming | |
279 | * out of idle fixes this. | |
280 | */ | |
281 | void cpu_die(void) | |
282 | { | |
283 | unsigned int cpu = smp_processor_id(); | |
284 | ||
285 | idle_task_exit(); | |
286 | ||
287 | local_irq_disable(); | |
288 | ||
289 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | |
290 | complete(&cpu_died); | |
291 | ||
292 | /* | |
293 | * Actually shutdown the CPU. This must never fail. The specific hotplug | |
294 | * mechanism must perform all required cache maintenance to ensure that | |
295 | * no dirty lines are lost in the process of shutting down the CPU. | |
296 | */ | |
297 | cpu_ops[cpu]->cpu_die(cpu); | |
d329de3f | 298 | |
6fa3eb70 S |
299 | BUG(); |
300 | } | |
301 | #endif | |
302 | ||
303 | void __init smp_cpus_done(unsigned int max_cpus) | |
304 | { | |
305 | unsigned long bogosum = loops_per_jiffy * num_online_cpus(); | |
306 | ||
307 | pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | |
308 | num_online_cpus(), bogosum / (500000/HZ), | |
309 | (bogosum / (5000/HZ)) % 100); | |
310 | } | |
311 | ||
312 | void __init smp_prepare_boot_cpu(void) | |
313 | { | |
314 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); | |
d329de3f | 315 | } |
08e875c1 CM |
316 | |
317 | /* | |
4c7aa002 JM |
318 | * Enumerate the possible CPU set from the device tree and build the |
319 | * cpu logical map array containing MPIDR values related to logical | |
320 | * cpus. Assumes that cpu_logical_map(0) has already been initialized. | |
08e875c1 CM |
321 | */ |
322 | void __init smp_init_cpus(void) | |
323 | { | |
08e875c1 | 324 | struct device_node *dn = NULL; |
6fa3eb70 | 325 | unsigned int i, cpu = 1; |
4c7aa002 | 326 | bool bootcpu_valid = false; |
08e875c1 CM |
327 | |
328 | while ((dn = of_find_node_by_type(dn, "cpu"))) { | |
72aea393 | 329 | const u32 *cell; |
4c7aa002 JM |
330 | u64 hwid; |
331 | ||
332 | /* | |
333 | * A cpu node with missing "reg" property is | |
334 | * considered invalid to build a cpu_logical_map | |
335 | * entry. | |
336 | */ | |
72aea393 WD |
337 | cell = of_get_property(dn, "reg", NULL); |
338 | if (!cell) { | |
4c7aa002 JM |
339 | pr_err("%s: missing reg property\n", dn->full_name); |
340 | goto next; | |
341 | } | |
72aea393 | 342 | hwid = of_read_number(cell, of_n_addr_cells(dn)); |
4c7aa002 JM |
343 | |
344 | /* | |
345 | * Non affinity bits must be set to 0 in the DT | |
346 | */ | |
347 | if (hwid & ~MPIDR_HWID_BITMASK) { | |
348 | pr_err("%s: invalid reg property\n", dn->full_name); | |
349 | goto next; | |
350 | } | |
351 | ||
352 | /* | |
353 | * Duplicate MPIDRs are a recipe for disaster. Scan | |
354 | * all initialized entries and check for | |
355 | * duplicates. If any is found just ignore the cpu. | |
356 | * cpu_logical_map was initialized to INVALID_HWID to | |
357 | * avoid matching valid MPIDR values. | |
358 | */ | |
359 | for (i = 1; (i < cpu) && (i < NR_CPUS); i++) { | |
360 | if (cpu_logical_map(i) == hwid) { | |
361 | pr_err("%s: duplicate cpu reg properties in the DT\n", | |
362 | dn->full_name); | |
363 | goto next; | |
364 | } | |
365 | } | |
366 | ||
367 | /* | |
368 | * The numbering scheme requires that the boot CPU | |
369 | * must be assigned logical id 0. Record it so that | |
370 | * the logical map built from DT is validated and can | |
371 | * be used. | |
372 | */ | |
373 | if (hwid == cpu_logical_map(0)) { | |
374 | if (bootcpu_valid) { | |
375 | pr_err("%s: duplicate boot cpu reg property in DT\n", | |
376 | dn->full_name); | |
377 | goto next; | |
378 | } | |
379 | ||
380 | bootcpu_valid = true; | |
381 | ||
382 | /* | |
383 | * cpu_logical_map has already been | |
384 | * initialized and the boot cpu doesn't need | |
385 | * the enable-method so continue without | |
386 | * incrementing cpu. | |
387 | */ | |
388 | continue; | |
389 | } | |
390 | ||
08e875c1 CM |
391 | if (cpu >= NR_CPUS) |
392 | goto next; | |
393 | ||
6fa3eb70 | 394 | if (cpu_read_ops(dn, cpu) != 0) |
08e875c1 | 395 | goto next; |
08e875c1 | 396 | |
6fa3eb70 | 397 | if (cpu_ops[cpu]->cpu_init(dn, cpu)) |
d329de3f MZ |
398 | goto next; |
399 | ||
4c7aa002 JM |
400 | pr_debug("cpu logical map 0x%llx\n", hwid); |
401 | cpu_logical_map(cpu) = hwid; | |
08e875c1 CM |
402 | next: |
403 | cpu++; | |
404 | } | |
405 | ||
406 | /* sanity check */ | |
407 | if (cpu > NR_CPUS) | |
408 | pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", | |
409 | cpu, NR_CPUS); | |
4c7aa002 JM |
410 | |
411 | if (!bootcpu_valid) { | |
412 | pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n"); | |
413 | return; | |
414 | } | |
415 | ||
416 | /* | |
417 | * All the cpus that made it to the cpu_logical_map have been | |
418 | * validated so set them as possible cpus. | |
419 | */ | |
420 | for (i = 0; i < NR_CPUS; i++) | |
421 | if (cpu_logical_map(i) != INVALID_HWID) | |
422 | set_cpu_possible(i, true); | |
08e875c1 CM |
423 | } |
424 | ||
425 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
426 | { | |
6fa3eb70 S |
427 | int err; |
428 | unsigned int cpu, ncores = num_possible_cpus(); | |
429 | ||
430 | init_cpu_topology(); | |
431 | ||
432 | smp_store_cpu_info(smp_processor_id()); | |
433 | ||
08e875c1 CM |
434 | |
435 | /* | |
436 | * are we trying to boot more cores than exist? | |
437 | */ | |
438 | if (max_cpus > ncores) | |
439 | max_cpus = ncores; | |
440 | ||
d329de3f MZ |
441 | /* Don't bother if we're effectively UP */ |
442 | if (max_cpus <= 1) | |
443 | return; | |
444 | ||
08e875c1 CM |
445 | /* |
446 | * Initialise the present map (which describes the set of CPUs | |
447 | * actually populated at the present time) and release the | |
448 | * secondaries from the bootloader. | |
d329de3f MZ |
449 | * |
450 | * Make sure we online at most (max_cpus - 1) additional CPUs. | |
08e875c1 | 451 | */ |
d329de3f | 452 | max_cpus--; |
08e875c1 CM |
453 | for_each_possible_cpu(cpu) { |
454 | if (max_cpus == 0) | |
455 | break; | |
456 | ||
d329de3f MZ |
457 | if (cpu == smp_processor_id()) |
458 | continue; | |
459 | ||
6fa3eb70 | 460 | if (!cpu_ops[cpu]) |
08e875c1 CM |
461 | continue; |
462 | ||
6fa3eb70 | 463 | err = cpu_ops[cpu]->cpu_prepare(cpu); |
d329de3f MZ |
464 | if (err) |
465 | continue; | |
08e875c1 CM |
466 | |
467 | set_cpu_present(cpu, true); | |
468 | max_cpus--; | |
469 | } | |
08e875c1 CM |
470 | } |
471 | ||
6fa3eb70 | 472 | static void (*__smp_cross_call)(const struct cpumask *, unsigned int); |
08e875c1 CM |
473 | |
474 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | |
475 | { | |
6fa3eb70 | 476 | __smp_cross_call = fn; |
08e875c1 CM |
477 | } |
478 | ||
6fa3eb70 S |
479 | static const char *ipi_types[NR_IPI] __tracepoint_string = { |
480 | #define S(x,s) [x] = s | |
08e875c1 CM |
481 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), |
482 | S(IPI_CALL_FUNC, "Function call interrupts"), | |
483 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | |
484 | S(IPI_CPU_STOP, "CPU stop interrupts"), | |
485 | }; | |
486 | ||
6fa3eb70 S |
487 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
488 | { | |
489 | trace_ipi_raise(target, ipi_types[ipinr]); | |
490 | __smp_cross_call(target, ipinr); | |
491 | } | |
492 | ||
08e875c1 CM |
493 | void show_ipi_list(struct seq_file *p, int prec) |
494 | { | |
495 | unsigned int cpu, i; | |
496 | ||
497 | for (i = 0; i < NR_IPI; i++) { | |
6fa3eb70 | 498 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, |
08e875c1 CM |
499 | prec >= 4 ? " " : ""); |
500 | for_each_present_cpu(cpu) | |
501 | seq_printf(p, "%10u ", | |
502 | __get_irq_stat(cpu, ipi_irqs[i])); | |
503 | seq_printf(p, " %s\n", ipi_types[i]); | |
504 | } | |
505 | } | |
506 | ||
507 | u64 smp_irq_stat_cpu(unsigned int cpu) | |
508 | { | |
509 | u64 sum = 0; | |
510 | int i; | |
511 | ||
512 | for (i = 0; i < NR_IPI; i++) | |
513 | sum += __get_irq_stat(cpu, ipi_irqs[i]); | |
514 | ||
515 | return sum; | |
516 | } | |
517 | ||
6fa3eb70 S |
518 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
519 | { | |
520 | smp_cross_call(mask, IPI_CALL_FUNC); | |
521 | } | |
522 | ||
523 | void arch_send_call_function_single_ipi(int cpu) | |
524 | { | |
525 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | |
526 | } | |
527 | ||
08e875c1 CM |
528 | static DEFINE_RAW_SPINLOCK(stop_lock); |
529 | ||
530 | /* | |
531 | * ipi_cpu_stop - handle IPI from smp_send_stop() | |
532 | */ | |
533 | static void ipi_cpu_stop(unsigned int cpu) | |
534 | { | |
535 | if (system_state == SYSTEM_BOOTING || | |
536 | system_state == SYSTEM_RUNNING) { | |
537 | raw_spin_lock(&stop_lock); | |
538 | pr_crit("CPU%u: stopping\n", cpu); | |
539 | dump_stack(); | |
540 | raw_spin_unlock(&stop_lock); | |
541 | } | |
542 | ||
543 | set_cpu_online(cpu, false); | |
544 | ||
545 | local_fiq_disable(); | |
546 | local_irq_disable(); | |
547 | ||
548 | while (1) | |
549 | cpu_relax(); | |
550 | } | |
551 | ||
552 | /* | |
553 | * Main handler for inter-processor interrupts | |
554 | */ | |
555 | void handle_IPI(int ipinr, struct pt_regs *regs) | |
556 | { | |
557 | unsigned int cpu = smp_processor_id(); | |
558 | struct pt_regs *old_regs = set_irq_regs(regs); | |
559 | ||
6fa3eb70 S |
560 | if ((unsigned)ipinr < NR_IPI) { |
561 | trace_ipi_entry(ipi_types[ipinr]); | |
562 | __inc_irq_stat(cpu, ipi_irqs[ipinr]); | |
563 | } | |
08e875c1 CM |
564 | |
565 | switch (ipinr) { | |
566 | case IPI_RESCHEDULE: | |
567 | scheduler_ipi(); | |
568 | break; | |
569 | ||
570 | case IPI_CALL_FUNC: | |
571 | irq_enter(); | |
6fa3eb70 | 572 | mt_trace_ISR_start(ipinr); |
08e875c1 | 573 | generic_smp_call_function_interrupt(); |
6fa3eb70 | 574 | mt_trace_ISR_end(ipinr); |
08e875c1 CM |
575 | irq_exit(); |
576 | break; | |
577 | ||
578 | case IPI_CALL_FUNC_SINGLE: | |
579 | irq_enter(); | |
6fa3eb70 | 580 | mt_trace_ISR_start(ipinr); |
08e875c1 | 581 | generic_smp_call_function_single_interrupt(); |
6fa3eb70 | 582 | mt_trace_ISR_end(ipinr); |
08e875c1 CM |
583 | irq_exit(); |
584 | break; | |
585 | ||
586 | case IPI_CPU_STOP: | |
587 | irq_enter(); | |
6fa3eb70 | 588 | mt_trace_ISR_start(ipinr); |
08e875c1 | 589 | ipi_cpu_stop(cpu); |
6fa3eb70 | 590 | mt_trace_ISR_end(ipinr); |
08e875c1 CM |
591 | irq_exit(); |
592 | break; | |
593 | ||
594 | default: | |
595 | pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); | |
596 | break; | |
597 | } | |
6fa3eb70 S |
598 | |
599 | if ((unsigned)ipinr < NR_IPI) | |
600 | trace_ipi_exit(ipi_types[ipinr]); | |
08e875c1 CM |
601 | set_irq_regs(old_regs); |
602 | } | |
603 | ||
604 | void smp_send_reschedule(int cpu) | |
605 | { | |
606 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); | |
607 | } | |
608 | ||
609 | void smp_send_stop(void) | |
610 | { | |
611 | unsigned long timeout; | |
612 | ||
613 | if (num_online_cpus() > 1) { | |
614 | cpumask_t mask; | |
615 | ||
616 | cpumask_copy(&mask, cpu_online_mask); | |
617 | cpu_clear(smp_processor_id(), mask); | |
618 | ||
619 | smp_cross_call(&mask, IPI_CPU_STOP); | |
620 | } | |
621 | ||
622 | /* Wait up to one second for other CPUs to stop */ | |
623 | timeout = USEC_PER_SEC; | |
624 | while (num_online_cpus() > 1 && timeout--) | |
625 | udelay(1); | |
626 | ||
627 | if (num_online_cpus() > 1) | |
628 | pr_warning("SMP: failed to stop secondary CPUs\n"); | |
629 | } | |
630 | ||
631 | /* | |
632 | * not supported here | |
633 | */ | |
634 | int setup_profiling_timer(unsigned int multiplier) | |
635 | { | |
636 | return -EINVAL; | |
637 | } |