Commit | Line | Data |
---|---|---|
41195d23 VG |
1 | /* |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * RajeshwarR: Dec 11, 2007 | |
9 | * -- Added support for Inter Processor Interrupts | |
10 | * | |
11 | * Vineetg: Nov 1st, 2007 | |
12 | * -- Initial Write (Borrowed heavily from ARM) | |
13 | */ | |
14 | ||
41195d23 VG |
15 | #include <linux/spinlock.h> |
16 | #include <linux/sched.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/profile.h> | |
41195d23 VG |
19 | #include <linux/mm.h> |
20 | #include <linux/cpu.h> | |
41195d23 | 21 | #include <linux/irq.h> |
41195d23 | 22 | #include <linux/atomic.h> |
41195d23 | 23 | #include <linux/cpumask.h> |
41195d23 | 24 | #include <linux/reboot.h> |
34e71e4c | 25 | #include <linux/irqdomain.h> |
41195d23 VG |
26 | #include <asm/processor.h> |
27 | #include <asm/setup.h> | |
03a6d28c | 28 | #include <asm/mach_desc.h> |
41195d23 | 29 | |
9fb92eb1 | 30 | #ifndef CONFIG_ARC_HAS_LLSC |
41195d23 VG |
31 | arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
32 | arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; | |
9fb92eb1 | 33 | #endif |
41195d23 | 34 | |
173eaafa | 35 | struct plat_smp_ops __weak plat_smp_ops; |
10b12718 | 36 | |
41195d23 VG |
37 | /* XXX: per cpu ? Only needed once in early seconday boot */ |
38 | struct task_struct *secondary_idle_tsk; | |
39 | ||
40 | /* Called from start_kernel */ | |
41 | void __init smp_prepare_boot_cpu(void) | |
42 | { | |
43 | } | |
44 | ||
45 | /* | |
e55af4da VG |
46 | * Called from setup_arch() before calling setup_processor() |
47 | * | |
48 | * - Initialise the CPU possible map early - this describes the CPUs | |
49 | * which may be present or become present in the system. | |
50 | * - Call early smp init hook. This can initialize a specific multi-core | |
51 | * IP which is say common to several platforms (hence not part of | |
52 | * platform specific int_early() hook) | |
41195d23 VG |
53 | */ |
54 | void __init smp_init_cpus(void) | |
55 | { | |
56 | unsigned int i; | |
57 | ||
58 | for (i = 0; i < NR_CPUS; i++) | |
59 | set_cpu_possible(i, true); | |
e55af4da VG |
60 | |
61 | if (plat_smp_ops.init_early_smp) | |
62 | plat_smp_ops.init_early_smp(); | |
41195d23 VG |
63 | } |
64 | ||
65 | /* called from init ( ) => process 1 */ | |
66 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
67 | { | |
68 | int i; | |
69 | ||
70 | /* | |
8f6d9eb2 NC |
71 | * if platform didn't set the present map already, do it now |
72 | * boot cpu is set to present already by init/main.c | |
41195d23 | 73 | */ |
8f6d9eb2 NC |
74 | if (num_present_cpus() <= 1) { |
75 | for (i = 0; i < max_cpus; i++) | |
76 | set_cpu_present(i, true); | |
77 | } | |
41195d23 VG |
78 | } |
79 | ||
80 | void __init smp_cpus_done(unsigned int max_cpus) | |
81 | { | |
82 | ||
83 | } | |
84 | ||
85 | /* | |
f33e9c43 VG |
86 | * Default smp boot helper for Run-on-reset case where all cores start off |
87 | * together. Non-masters need to wait for Master to start running. | |
88 | * This is implemented using a flag in memory, which Non-masters spin-wait on. | |
89 | * Master sets it to cpu-id of core to "ungate" it. | |
41195d23 | 90 | */ |
f33e9c43 VG |
91 | static volatile int wake_flag; |
92 | ||
78f824d4 VG |
93 | #ifdef CONFIG_ISA_ARCOMPACT |
94 | ||
95 | #define __boot_read(f) f | |
96 | #define __boot_write(f, v) f = v | |
97 | ||
98 | #else | |
99 | ||
100 | #define __boot_read(f) arc_read_uncached_32(&f) | |
101 | #define __boot_write(f, v) arc_write_uncached_32(&f, v) | |
102 | ||
103 | #endif | |
104 | ||
f33e9c43 | 105 | static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) |
41195d23 | 106 | { |
f33e9c43 | 107 | BUG_ON(cpu == 0); |
78f824d4 VG |
108 | |
109 | __boot_write(wake_flag, cpu); | |
f33e9c43 VG |
110 | } |
111 | ||
112 | void arc_platform_smp_wait_to_boot(int cpu) | |
113 | { | |
bf02454a VG |
114 | /* for halt-on-reset, we've waited already */ |
115 | if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET)) | |
116 | return; | |
117 | ||
78f824d4 | 118 | while (__boot_read(wake_flag) != cpu) |
f33e9c43 VG |
119 | ; |
120 | ||
78f824d4 | 121 | __boot_write(wake_flag, 0); |
41195d23 VG |
122 | } |
123 | ||
10b12718 VG |
124 | const char *arc_platform_smp_cpuinfo(void) |
125 | { | |
619f3018 | 126 | return plat_smp_ops.info ? : ""; |
10b12718 VG |
127 | } |
128 | ||
41195d23 VG |
129 | /* |
130 | * The very first "C" code executed by secondary | |
131 | * Called from asm stub in head.S | |
132 | * "current"/R25 already setup by low level boot code | |
133 | */ | |
ce759956 | 134 | void start_kernel_secondary(void) |
41195d23 VG |
135 | { |
136 | struct mm_struct *mm = &init_mm; | |
137 | unsigned int cpu = smp_processor_id(); | |
138 | ||
139 | /* MMU, Caches, Vector Table, Interrupts etc */ | |
140 | setup_processor(); | |
141 | ||
142 | atomic_inc(&mm->mm_users); | |
143 | atomic_inc(&mm->mm_count); | |
144 | current->active_mm = mm; | |
5ea72a90 | 145 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
41195d23 | 146 | |
286130eb | 147 | /* Some SMP H/w setup - for each cpu */ |
b474a023 NC |
148 | if (plat_smp_ops.init_per_cpu) |
149 | plat_smp_ops.init_per_cpu(cpu); | |
286130eb | 150 | |
575a9d4e VG |
151 | if (machine_desc->init_per_cpu) |
152 | machine_desc->init_per_cpu(cpu); | |
41195d23 | 153 | |
71f9cf8f NC |
154 | notify_cpu_starting(cpu); |
155 | set_cpu_online(cpu, true); | |
156 | ||
157 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); | |
158 | ||
41195d23 VG |
159 | local_irq_enable(); |
160 | preempt_disable(); | |
fc6d73d6 | 161 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
41195d23 VG |
162 | } |
163 | ||
164 | /* | |
165 | * Called from kernel_init( ) -> smp_init( ) - for each CPU | |
166 | * | |
167 | * At this point, Secondary Processor is "HALT"ed: | |
168 | * -It booted, but was halted in head.S | |
169 | * -It was configured to halt-on-reset | |
170 | * So need to wake it up. | |
171 | * | |
172 | * Essential requirements being where to run from (PC) and stack (SP) | |
173 | */ | |
ce759956 | 174 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
41195d23 VG |
175 | { |
176 | unsigned long wait_till; | |
177 | ||
178 | secondary_idle_tsk = idle; | |
179 | ||
180 | pr_info("Idle Task [%d] %p", cpu, idle); | |
181 | pr_info("Trying to bring up CPU%u ...\n", cpu); | |
182 | ||
10b12718 VG |
183 | if (plat_smp_ops.cpu_kick) |
184 | plat_smp_ops.cpu_kick(cpu, | |
41195d23 | 185 | (unsigned long)first_lines_of_secondary); |
f33e9c43 VG |
186 | else |
187 | arc_default_smp_cpu_kick(cpu, (unsigned long)NULL); | |
41195d23 VG |
188 | |
189 | /* wait for 1 sec after kicking the secondary */ | |
190 | wait_till = jiffies + HZ; | |
191 | while (time_before(jiffies, wait_till)) { | |
192 | if (cpu_online(cpu)) | |
193 | break; | |
194 | } | |
195 | ||
196 | if (!cpu_online(cpu)) { | |
197 | pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu); | |
198 | return -1; | |
199 | } | |
200 | ||
201 | secondary_idle_tsk = NULL; | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | /* | |
207 | * not supported here | |
208 | */ | |
b27f7391 | 209 | int setup_profiling_timer(unsigned int multiplier) |
41195d23 VG |
210 | { |
211 | return -EINVAL; | |
212 | } | |
213 | ||
214 | /*****************************************************************************/ | |
215 | /* Inter Processor Interrupt Handling */ | |
216 | /*****************************************************************************/ | |
217 | ||
41195d23 | 218 | enum ipi_msg_type { |
f2a4aa56 | 219 | IPI_EMPTY = 0, |
41195d23 VG |
220 | IPI_RESCHEDULE = 1, |
221 | IPI_CALL_FUNC, | |
f2a4aa56 | 222 | IPI_CPU_STOP, |
41195d23 VG |
223 | }; |
224 | ||
f2a4aa56 VG |
225 | /* |
226 | * In arches with IRQ for each msg type (above), receiver can use IRQ-id to | |
227 | * figure out what msg was sent. For those which don't (ARC has dedicated IPI | |
228 | * IRQ), the msg-type needs to be conveyed via per-cpu data | |
229 | */ | |
41195d23 | 230 | |
f2a4aa56 | 231 | static DEFINE_PER_CPU(unsigned long, ipi_data); |
41195d23 | 232 | |
ddf84433 | 233 | static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg) |
41195d23 | 234 | { |
f2a4aa56 | 235 | unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu); |
d8e8c7dd | 236 | unsigned long old, new; |
41195d23 | 237 | unsigned long flags; |
41195d23 | 238 | |
f2a4aa56 VG |
239 | pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu); |
240 | ||
41195d23 VG |
241 | local_irq_save(flags); |
242 | ||
d8e8c7dd VG |
243 | /* |
244 | * Atomically write new msg bit (in case others are writing too), | |
245 | * and read back old value | |
246 | */ | |
247 | do { | |
7082a29c | 248 | new = old = ACCESS_ONCE(*ipi_data_ptr); |
d8e8c7dd VG |
249 | new |= 1U << msg; |
250 | } while (cmpxchg(ipi_data_ptr, old, new) != old); | |
41195d23 | 251 | |
d8e8c7dd VG |
252 | /* |
253 | * Call the platform specific IPI kick function, but avoid if possible: | |
254 | * Only do so if there's no pending msg from other concurrent sender(s). | |
255 | * Otherwise, recevier will see this msg as well when it takes the | |
256 | * IPI corresponding to that msg. This is true, even if it is already in | |
257 | * IPI handler, because !@old means it has not yet dequeued the msg(s) | |
258 | * so @new msg can be a free-loader | |
259 | */ | |
260 | if (plat_smp_ops.ipi_send && !old) | |
ddf84433 | 261 | plat_smp_ops.ipi_send(cpu); |
41195d23 VG |
262 | |
263 | local_irq_restore(flags); | |
264 | } | |
265 | ||
ddf84433 VG |
266 | static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) |
267 | { | |
268 | unsigned int cpu; | |
269 | ||
270 | for_each_cpu(cpu, callmap) | |
271 | ipi_send_msg_one(cpu, msg); | |
272 | } | |
273 | ||
41195d23 VG |
274 | void smp_send_reschedule(int cpu) |
275 | { | |
ddf84433 | 276 | ipi_send_msg_one(cpu, IPI_RESCHEDULE); |
41195d23 VG |
277 | } |
278 | ||
279 | void smp_send_stop(void) | |
280 | { | |
281 | struct cpumask targets; | |
282 | cpumask_copy(&targets, cpu_online_mask); | |
283 | cpumask_clear_cpu(smp_processor_id(), &targets); | |
284 | ipi_send_msg(&targets, IPI_CPU_STOP); | |
285 | } | |
286 | ||
287 | void arch_send_call_function_single_ipi(int cpu) | |
288 | { | |
ddf84433 | 289 | ipi_send_msg_one(cpu, IPI_CALL_FUNC); |
41195d23 VG |
290 | } |
291 | ||
292 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |
293 | { | |
294 | ipi_send_msg(mask, IPI_CALL_FUNC); | |
295 | } | |
296 | ||
297 | /* | |
298 | * ipi_cpu_stop - handle IPI from smp_send_stop() | |
299 | */ | |
53dc110c | 300 | static void ipi_cpu_stop(void) |
41195d23 VG |
301 | { |
302 | machine_halt(); | |
303 | } | |
304 | ||
aa6083ed | 305 | static inline int __do_IPI(unsigned long msg) |
41195d23 | 306 | { |
aa6083ed VG |
307 | int rc = 0; |
308 | ||
d8e8c7dd VG |
309 | switch (msg) { |
310 | case IPI_RESCHEDULE: | |
311 | scheduler_ipi(); | |
312 | break; | |
41195d23 | 313 | |
d8e8c7dd VG |
314 | case IPI_CALL_FUNC: |
315 | generic_smp_call_function_interrupt(); | |
316 | break; | |
f2a4aa56 | 317 | |
d8e8c7dd VG |
318 | case IPI_CPU_STOP: |
319 | ipi_cpu_stop(); | |
320 | break; | |
f2a4aa56 | 321 | |
d8e8c7dd | 322 | default: |
aa6083ed | 323 | rc = 1; |
f2a4aa56 | 324 | } |
aa6083ed VG |
325 | |
326 | return rc; | |
41195d23 VG |
327 | } |
328 | ||
329 | /* | |
330 | * arch-common ISR to handle for inter-processor interrupts | |
331 | * Has hooks for platform specific IPI | |
332 | */ | |
333 | irqreturn_t do_IPI(int irq, void *dev_id) | |
334 | { | |
f2a4aa56 | 335 | unsigned long pending; |
aa6083ed | 336 | unsigned long __maybe_unused copy; |
f2a4aa56 VG |
337 | |
338 | pr_debug("IPI [%ld] received on cpu %d\n", | |
339 | *this_cpu_ptr(&ipi_data), smp_processor_id()); | |
41195d23 | 340 | |
10b12718 | 341 | if (plat_smp_ops.ipi_clear) |
ccdaa6e0 | 342 | plat_smp_ops.ipi_clear(irq); |
41195d23 VG |
343 | |
344 | /* | |
d8e8c7dd VG |
345 | * "dequeue" the msg corresponding to this IPI (and possibly other |
346 | * piggybacked msg from elided IPIs: see ipi_send_msg_one() above) | |
41195d23 | 347 | */ |
aa6083ed | 348 | copy = pending = xchg(this_cpu_ptr(&ipi_data), 0); |
d8e8c7dd VG |
349 | |
350 | do { | |
351 | unsigned long msg = __ffs(pending); | |
aa6083ed VG |
352 | int rc; |
353 | ||
354 | rc = __do_IPI(msg); | |
aa6083ed VG |
355 | if (rc) |
356 | pr_info("IPI with bogus msg %ld in %ld\n", msg, copy); | |
d8e8c7dd VG |
357 | pending &= ~(1U << msg); |
358 | } while (pending); | |
41195d23 VG |
359 | |
360 | return IRQ_HANDLED; | |
361 | } | |
362 | ||
363 | /* | |
364 | * API called by platform code to hookup arch-common ISR to their IPI IRQ | |
56957940 VG |
365 | * |
366 | * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map | |
367 | * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise | |
368 | * request_percpu_irq() below will fail | |
41195d23 VG |
369 | */ |
370 | static DEFINE_PER_CPU(int, ipi_dev); | |
7e512219 | 371 | |
34e71e4c | 372 | int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq) |
41195d23 | 373 | { |
2b75c0f9 | 374 | int *dev = per_cpu_ptr(&ipi_dev, cpu); |
34e71e4c YK |
375 | unsigned int virq = irq_find_mapping(NULL, hwirq); |
376 | ||
377 | if (!virq) | |
378 | panic("Cannot find virq for root domain and hwirq=%lu", hwirq); | |
2b75c0f9 | 379 | |
56957940 VG |
380 | /* Boot cpu calls request, all call enable */ |
381 | if (!cpu) { | |
382 | int rc; | |
383 | ||
34e71e4c | 384 | rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev); |
56957940 | 385 | if (rc) |
34e71e4c | 386 | panic("Percpu IRQ request failed for %u\n", virq); |
56957940 VG |
387 | } |
388 | ||
34e71e4c | 389 | enable_percpu_irq(virq, 0); |
7e512219 NC |
390 | |
391 | return 0; | |
41195d23 | 392 | } |