894b506f963673ec081b16845978719b3f3997ba
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc64 / kernel / smp.c
1 /* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24
25 #include <asm/head.h>
26 #include <asm/ptrace.h>
27 #include <asm/atomic.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cpudata.h>
31 #include <asm/hvtramp.h>
32 #include <asm/io.h>
33
34 #include <asm/irq.h>
35 #include <asm/irq_regs.h>
36 #include <asm/page.h>
37 #include <asm/pgtable.h>
38 #include <asm/oplib.h>
39 #include <asm/uaccess.h>
40 #include <asm/timer.h>
41 #include <asm/starfire.h>
42 #include <asm/tlb.h>
43 #include <asm/sections.h>
44 #include <asm/prom.h>
45 #include <asm/mdesc.h>
46 #include <asm/ldc.h>
47 #include <asm/hypervisor.h>
48
49 extern void calibrate_delay(void);
50
51 int sparc64_multi_core __read_mostly;
52
53 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
54 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
55 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
56 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
57 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
58
59 EXPORT_SYMBOL(cpu_possible_map);
60 EXPORT_SYMBOL(cpu_online_map);
61 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
62 EXPORT_SYMBOL(cpu_core_map);
63
64 static cpumask_t smp_commenced_mask;
65
66 void smp_info(struct seq_file *m)
67 {
68 int i;
69
70 seq_printf(m, "State:\n");
71 for_each_online_cpu(i)
72 seq_printf(m, "CPU%d:\t\tonline\n", i);
73 }
74
75 void smp_bogo(struct seq_file *m)
76 {
77 int i;
78
79 for_each_online_cpu(i)
80 seq_printf(m,
81 "Cpu%dClkTck\t: %016lx\n",
82 i, cpu_data(i).clock_tick);
83 }
84
85 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
86
87 extern void setup_sparc64_timer(void);
88
89 static volatile unsigned long callin_flag = 0;
90
91 void __devinit smp_callin(void)
92 {
93 int cpuid = hard_smp_processor_id();
94
95 __local_per_cpu_offset = __per_cpu_offset(cpuid);
96
97 if (tlb_type == hypervisor)
98 sun4v_ktsb_register();
99
100 __flush_tlb_all();
101
102 setup_sparc64_timer();
103
104 if (cheetah_pcache_forced_on)
105 cheetah_enable_pcache();
106
107 local_irq_enable();
108
109 callin_flag = 1;
110 __asm__ __volatile__("membar #Sync\n\t"
111 "flush %%g6" : : : "memory");
112
113 /* Clear this or we will die instantly when we
114 * schedule back to this idler...
115 */
116 current_thread_info()->new_child = 0;
117
118 /* Attach to the address space of init_task. */
119 atomic_inc(&init_mm.mm_count);
120 current->active_mm = &init_mm;
121
122 while (!cpu_isset(cpuid, smp_commenced_mask))
123 rmb();
124
125 spin_lock(&call_lock);
126 cpu_set(cpuid, cpu_online_map);
127 spin_unlock(&call_lock);
128
129 /* idle thread is expected to have preempt disabled */
130 preempt_disable();
131 }
132
133 void cpu_panic(void)
134 {
135 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
136 panic("SMP bolixed\n");
137 }
138
139 /* This tick register synchronization scheme is taken entirely from
140 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
141 *
142 * The only change I've made is to rework it so that the master
143 * initiates the synchonization instead of the slave. -DaveM
144 */
145
146 #define MASTER 0
147 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
148
149 #define NUM_ROUNDS 64 /* magic value */
150 #define NUM_ITERS 5 /* likewise */
151
152 static DEFINE_SPINLOCK(itc_sync_lock);
153 static unsigned long go[SLAVE + 1];
154
155 #define DEBUG_TICK_SYNC 0
156
157 static inline long get_delta (long *rt, long *master)
158 {
159 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
160 unsigned long tcenter, t0, t1, tm;
161 unsigned long i;
162
163 for (i = 0; i < NUM_ITERS; i++) {
164 t0 = tick_ops->get_tick();
165 go[MASTER] = 1;
166 membar_storeload();
167 while (!(tm = go[SLAVE]))
168 rmb();
169 go[SLAVE] = 0;
170 wmb();
171 t1 = tick_ops->get_tick();
172
173 if (t1 - t0 < best_t1 - best_t0)
174 best_t0 = t0, best_t1 = t1, best_tm = tm;
175 }
176
177 *rt = best_t1 - best_t0;
178 *master = best_tm - best_t0;
179
180 /* average best_t0 and best_t1 without overflow: */
181 tcenter = (best_t0/2 + best_t1/2);
182 if (best_t0 % 2 + best_t1 % 2 == 2)
183 tcenter++;
184 return tcenter - best_tm;
185 }
186
187 void smp_synchronize_tick_client(void)
188 {
189 long i, delta, adj, adjust_latency = 0, done = 0;
190 unsigned long flags, rt, master_time_stamp, bound;
191 #if DEBUG_TICK_SYNC
192 struct {
193 long rt; /* roundtrip time */
194 long master; /* master's timestamp */
195 long diff; /* difference between midpoint and master's timestamp */
196 long lat; /* estimate of itc adjustment latency */
197 } t[NUM_ROUNDS];
198 #endif
199
200 go[MASTER] = 1;
201
202 while (go[MASTER])
203 rmb();
204
205 local_irq_save(flags);
206 {
207 for (i = 0; i < NUM_ROUNDS; i++) {
208 delta = get_delta(&rt, &master_time_stamp);
209 if (delta == 0) {
210 done = 1; /* let's lock on to this... */
211 bound = rt;
212 }
213
214 if (!done) {
215 if (i > 0) {
216 adjust_latency += -delta;
217 adj = -delta + adjust_latency/4;
218 } else
219 adj = -delta;
220
221 tick_ops->add_tick(adj);
222 }
223 #if DEBUG_TICK_SYNC
224 t[i].rt = rt;
225 t[i].master = master_time_stamp;
226 t[i].diff = delta;
227 t[i].lat = adjust_latency/4;
228 #endif
229 }
230 }
231 local_irq_restore(flags);
232
233 #if DEBUG_TICK_SYNC
234 for (i = 0; i < NUM_ROUNDS; i++)
235 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
236 t[i].rt, t[i].master, t[i].diff, t[i].lat);
237 #endif
238
239 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
240 "(last diff %ld cycles, maxerr %lu cycles)\n",
241 smp_processor_id(), delta, rt);
242 }
243
244 static void smp_start_sync_tick_client(int cpu);
245
246 static void smp_synchronize_one_tick(int cpu)
247 {
248 unsigned long flags, i;
249
250 go[MASTER] = 0;
251
252 smp_start_sync_tick_client(cpu);
253
254 /* wait for client to be ready */
255 while (!go[MASTER])
256 rmb();
257
258 /* now let the client proceed into his loop */
259 go[MASTER] = 0;
260 membar_storeload();
261
262 spin_lock_irqsave(&itc_sync_lock, flags);
263 {
264 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
265 while (!go[MASTER])
266 rmb();
267 go[MASTER] = 0;
268 wmb();
269 go[SLAVE] = tick_ops->get_tick();
270 membar_storeload();
271 }
272 }
273 spin_unlock_irqrestore(&itc_sync_lock, flags);
274 }
275
276 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
277 /* XXX Put this in some common place. XXX */
278 static unsigned long kimage_addr_to_ra(void *p)
279 {
280 unsigned long val = (unsigned long) p;
281
282 return kern_base + (val - KERNBASE);
283 }
284
285 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
286 {
287 extern unsigned long sparc64_ttable_tl0;
288 extern unsigned long kern_locked_tte_data;
289 extern int bigkernel;
290 struct hvtramp_descr *hdesc;
291 unsigned long trampoline_ra;
292 struct trap_per_cpu *tb;
293 u64 tte_vaddr, tte_data;
294 unsigned long hv_err;
295
296 hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
297 if (!hdesc) {
298 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
299 "hvtramp_descr.\n");
300 return;
301 }
302
303 hdesc->cpu = cpu;
304 hdesc->num_mappings = (bigkernel ? 2 : 1);
305
306 tb = &trap_block[cpu];
307 tb->hdesc = hdesc;
308
309 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
310 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
311
312 hdesc->thread_reg = thread_reg;
313
314 tte_vaddr = (unsigned long) KERNBASE;
315 tte_data = kern_locked_tte_data;
316
317 hdesc->maps[0].vaddr = tte_vaddr;
318 hdesc->maps[0].tte = tte_data;
319 if (bigkernel) {
320 tte_vaddr += 0x400000;
321 tte_data += 0x400000;
322 hdesc->maps[1].vaddr = tte_vaddr;
323 hdesc->maps[1].tte = tte_data;
324 }
325
326 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
327
328 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
329 kimage_addr_to_ra(&sparc64_ttable_tl0),
330 __pa(hdesc));
331 if (hv_err)
332 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
333 "gives error %lu\n", hv_err);
334 }
335 #endif
336
337 extern unsigned long sparc64_cpu_startup;
338
339 /* The OBP cpu startup callback truncates the 3rd arg cookie to
340 * 32-bits (I think) so to be safe we have it read the pointer
341 * contained here so we work on >4GB machines. -DaveM
342 */
343 static struct thread_info *cpu_new_thread = NULL;
344
345 static int __devinit smp_boot_one_cpu(unsigned int cpu)
346 {
347 struct trap_per_cpu *tb = &trap_block[cpu];
348 unsigned long entry =
349 (unsigned long)(&sparc64_cpu_startup);
350 unsigned long cookie =
351 (unsigned long)(&cpu_new_thread);
352 struct task_struct *p;
353 int timeout, ret;
354
355 p = fork_idle(cpu);
356 if (IS_ERR(p))
357 return PTR_ERR(p);
358 callin_flag = 0;
359 cpu_new_thread = task_thread_info(p);
360
361 if (tlb_type == hypervisor) {
362 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
363 if (ldom_domaining_enabled)
364 ldom_startcpu_cpuid(cpu,
365 (unsigned long) cpu_new_thread);
366 else
367 #endif
368 prom_startcpu_cpuid(cpu, entry, cookie);
369 } else {
370 struct device_node *dp = of_find_node_by_cpuid(cpu);
371
372 prom_startcpu(dp->node, entry, cookie);
373 }
374
375 for (timeout = 0; timeout < 50000; timeout++) {
376 if (callin_flag)
377 break;
378 udelay(100);
379 }
380
381 if (callin_flag) {
382 ret = 0;
383 } else {
384 printk("Processor %d is stuck.\n", cpu);
385 ret = -ENODEV;
386 }
387 cpu_new_thread = NULL;
388
389 if (tb->hdesc) {
390 kfree(tb->hdesc);
391 tb->hdesc = NULL;
392 }
393
394 return ret;
395 }
396
397 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
398 {
399 u64 result, target;
400 int stuck, tmp;
401
402 if (this_is_starfire) {
403 /* map to real upaid */
404 cpu = (((cpu & 0x3c) << 1) |
405 ((cpu & 0x40) >> 4) |
406 (cpu & 0x3));
407 }
408
409 target = (cpu << 14) | 0x70;
410 again:
411 /* Ok, this is the real Spitfire Errata #54.
412 * One must read back from a UDB internal register
413 * after writes to the UDB interrupt dispatch, but
414 * before the membar Sync for that write.
415 * So we use the high UDB control register (ASI 0x7f,
416 * ADDR 0x20) for the dummy read. -DaveM
417 */
418 tmp = 0x40;
419 __asm__ __volatile__(
420 "wrpr %1, %2, %%pstate\n\t"
421 "stxa %4, [%0] %3\n\t"
422 "stxa %5, [%0+%8] %3\n\t"
423 "add %0, %8, %0\n\t"
424 "stxa %6, [%0+%8] %3\n\t"
425 "membar #Sync\n\t"
426 "stxa %%g0, [%7] %3\n\t"
427 "membar #Sync\n\t"
428 "mov 0x20, %%g1\n\t"
429 "ldxa [%%g1] 0x7f, %%g0\n\t"
430 "membar #Sync"
431 : "=r" (tmp)
432 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
433 "r" (data0), "r" (data1), "r" (data2), "r" (target),
434 "r" (0x10), "0" (tmp)
435 : "g1");
436
437 /* NOTE: PSTATE_IE is still clear. */
438 stuck = 100000;
439 do {
440 __asm__ __volatile__("ldxa [%%g0] %1, %0"
441 : "=r" (result)
442 : "i" (ASI_INTR_DISPATCH_STAT));
443 if (result == 0) {
444 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
445 : : "r" (pstate));
446 return;
447 }
448 stuck -= 1;
449 if (stuck == 0)
450 break;
451 } while (result & 0x1);
452 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
453 : : "r" (pstate));
454 if (stuck == 0) {
455 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
456 smp_processor_id(), result);
457 } else {
458 udelay(2);
459 goto again;
460 }
461 }
462
463 static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
464 {
465 u64 pstate;
466 int i;
467
468 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
469 for_each_cpu_mask(i, mask)
470 spitfire_xcall_helper(data0, data1, data2, pstate, i);
471 }
472
473 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
474 * packet, but we have no use for that. However we do take advantage of
475 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
476 */
477 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
478 {
479 u64 pstate, ver;
480 int nack_busy_id, is_jbus, need_more;
481
482 if (cpus_empty(mask))
483 return;
484
485 /* Unfortunately, someone at Sun had the brilliant idea to make the
486 * busy/nack fields hard-coded by ITID number for this Ultra-III
487 * derivative processor.
488 */
489 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
490 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
491 (ver >> 32) == __SERRANO_ID);
492
493 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
494
495 retry:
496 need_more = 0;
497 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
498 : : "r" (pstate), "i" (PSTATE_IE));
499
500 /* Setup the dispatch data registers. */
501 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
502 "stxa %1, [%4] %6\n\t"
503 "stxa %2, [%5] %6\n\t"
504 "membar #Sync\n\t"
505 : /* no outputs */
506 : "r" (data0), "r" (data1), "r" (data2),
507 "r" (0x40), "r" (0x50), "r" (0x60),
508 "i" (ASI_INTR_W));
509
510 nack_busy_id = 0;
511 {
512 int i;
513
514 for_each_cpu_mask(i, mask) {
515 u64 target = (i << 14) | 0x70;
516
517 if (!is_jbus)
518 target |= (nack_busy_id << 24);
519 __asm__ __volatile__(
520 "stxa %%g0, [%0] %1\n\t"
521 "membar #Sync\n\t"
522 : /* no outputs */
523 : "r" (target), "i" (ASI_INTR_W));
524 nack_busy_id++;
525 if (nack_busy_id == 32) {
526 need_more = 1;
527 break;
528 }
529 }
530 }
531
532 /* Now, poll for completion. */
533 {
534 u64 dispatch_stat;
535 long stuck;
536
537 stuck = 100000 * nack_busy_id;
538 do {
539 __asm__ __volatile__("ldxa [%%g0] %1, %0"
540 : "=r" (dispatch_stat)
541 : "i" (ASI_INTR_DISPATCH_STAT));
542 if (dispatch_stat == 0UL) {
543 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
544 : : "r" (pstate));
545 if (unlikely(need_more)) {
546 int i, cnt = 0;
547 for_each_cpu_mask(i, mask) {
548 cpu_clear(i, mask);
549 cnt++;
550 if (cnt == 32)
551 break;
552 }
553 goto retry;
554 }
555 return;
556 }
557 if (!--stuck)
558 break;
559 } while (dispatch_stat & 0x5555555555555555UL);
560
561 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
562 : : "r" (pstate));
563
564 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
565 /* Busy bits will not clear, continue instead
566 * of freezing up on this cpu.
567 */
568 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
569 smp_processor_id(), dispatch_stat);
570 } else {
571 int i, this_busy_nack = 0;
572
573 /* Delay some random time with interrupts enabled
574 * to prevent deadlock.
575 */
576 udelay(2 * nack_busy_id);
577
578 /* Clear out the mask bits for cpus which did not
579 * NACK us.
580 */
581 for_each_cpu_mask(i, mask) {
582 u64 check_mask;
583
584 if (is_jbus)
585 check_mask = (0x2UL << (2*i));
586 else
587 check_mask = (0x2UL <<
588 this_busy_nack);
589 if ((dispatch_stat & check_mask) == 0)
590 cpu_clear(i, mask);
591 this_busy_nack += 2;
592 if (this_busy_nack == 64)
593 break;
594 }
595
596 goto retry;
597 }
598 }
599 }
600
601 /* Multi-cpu list version. */
602 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
603 {
604 struct trap_per_cpu *tb;
605 u16 *cpu_list;
606 u64 *mondo;
607 cpumask_t error_mask;
608 unsigned long flags, status;
609 int cnt, retries, this_cpu, prev_sent, i;
610
611 if (cpus_empty(mask))
612 return;
613
614 /* We have to do this whole thing with interrupts fully disabled.
615 * Otherwise if we send an xcall from interrupt context it will
616 * corrupt both our mondo block and cpu list state.
617 *
618 * One consequence of this is that we cannot use timeout mechanisms
619 * that depend upon interrupts being delivered locally. So, for
620 * example, we cannot sample jiffies and expect it to advance.
621 *
622 * Fortunately, udelay() uses %stick/%tick so we can use that.
623 */
624 local_irq_save(flags);
625
626 this_cpu = smp_processor_id();
627 tb = &trap_block[this_cpu];
628
629 mondo = __va(tb->cpu_mondo_block_pa);
630 mondo[0] = data0;
631 mondo[1] = data1;
632 mondo[2] = data2;
633 wmb();
634
635 cpu_list = __va(tb->cpu_list_pa);
636
637 /* Setup the initial cpu list. */
638 cnt = 0;
639 for_each_cpu_mask(i, mask)
640 cpu_list[cnt++] = i;
641
642 cpus_clear(error_mask);
643 retries = 0;
644 prev_sent = 0;
645 do {
646 int forward_progress, n_sent;
647
648 status = sun4v_cpu_mondo_send(cnt,
649 tb->cpu_list_pa,
650 tb->cpu_mondo_block_pa);
651
652 /* HV_EOK means all cpus received the xcall, we're done. */
653 if (likely(status == HV_EOK))
654 break;
655
656 /* First, see if we made any forward progress.
657 *
658 * The hypervisor indicates successful sends by setting
659 * cpu list entries to the value 0xffff.
660 */
661 n_sent = 0;
662 for (i = 0; i < cnt; i++) {
663 if (likely(cpu_list[i] == 0xffff))
664 n_sent++;
665 }
666
667 forward_progress = 0;
668 if (n_sent > prev_sent)
669 forward_progress = 1;
670
671 prev_sent = n_sent;
672
673 /* If we get a HV_ECPUERROR, then one or more of the cpus
674 * in the list are in error state. Use the cpu_state()
675 * hypervisor call to find out which cpus are in error state.
676 */
677 if (unlikely(status == HV_ECPUERROR)) {
678 for (i = 0; i < cnt; i++) {
679 long err;
680 u16 cpu;
681
682 cpu = cpu_list[i];
683 if (cpu == 0xffff)
684 continue;
685
686 err = sun4v_cpu_state(cpu);
687 if (err >= 0 &&
688 err == HV_CPU_STATE_ERROR) {
689 cpu_list[i] = 0xffff;
690 cpu_set(cpu, error_mask);
691 }
692 }
693 } else if (unlikely(status != HV_EWOULDBLOCK))
694 goto fatal_mondo_error;
695
696 /* Don't bother rewriting the CPU list, just leave the
697 * 0xffff and non-0xffff entries in there and the
698 * hypervisor will do the right thing.
699 *
700 * Only advance timeout state if we didn't make any
701 * forward progress.
702 */
703 if (unlikely(!forward_progress)) {
704 if (unlikely(++retries > 10000))
705 goto fatal_mondo_timeout;
706
707 /* Delay a little bit to let other cpus catch up
708 * on their cpu mondo queue work.
709 */
710 udelay(2 * cnt);
711 }
712 } while (1);
713
714 local_irq_restore(flags);
715
716 if (unlikely(!cpus_empty(error_mask)))
717 goto fatal_mondo_cpu_error;
718
719 return;
720
721 fatal_mondo_cpu_error:
722 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
723 "were in error state\n",
724 this_cpu);
725 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
726 for_each_cpu_mask(i, error_mask)
727 printk("%d ", i);
728 printk("]\n");
729 return;
730
731 fatal_mondo_timeout:
732 local_irq_restore(flags);
733 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
734 " progress after %d retries.\n",
735 this_cpu, retries);
736 goto dump_cpu_list_and_out;
737
738 fatal_mondo_error:
739 local_irq_restore(flags);
740 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
741 this_cpu, status);
742 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
743 "mondo_block_pa(%lx)\n",
744 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
745
746 dump_cpu_list_and_out:
747 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
748 for (i = 0; i < cnt; i++)
749 printk("%u ", cpu_list[i]);
750 printk("]\n");
751 }
752
753 /* Send cross call to all processors mentioned in MASK
754 * except self.
755 */
756 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
757 {
758 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
759 int this_cpu = get_cpu();
760
761 cpus_and(mask, mask, cpu_online_map);
762 cpu_clear(this_cpu, mask);
763
764 if (tlb_type == spitfire)
765 spitfire_xcall_deliver(data0, data1, data2, mask);
766 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
767 cheetah_xcall_deliver(data0, data1, data2, mask);
768 else
769 hypervisor_xcall_deliver(data0, data1, data2, mask);
770 /* NOTE: Caller runs local copy on master. */
771
772 put_cpu();
773 }
774
775 extern unsigned long xcall_sync_tick;
776
777 static void smp_start_sync_tick_client(int cpu)
778 {
779 cpumask_t mask = cpumask_of_cpu(cpu);
780
781 smp_cross_call_masked(&xcall_sync_tick,
782 0, 0, 0, mask);
783 }
784
785 /* Send cross call to all processors except self. */
786 #define smp_cross_call(func, ctx, data1, data2) \
787 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
788
789 struct call_data_struct {
790 void (*func) (void *info);
791 void *info;
792 atomic_t finished;
793 int wait;
794 };
795
796 static struct call_data_struct *call_data;
797
798 extern unsigned long xcall_call_function;
799
800 /**
801 * smp_call_function(): Run a function on all other CPUs.
802 * @func: The function to run. This must be fast and non-blocking.
803 * @info: An arbitrary pointer to pass to the function.
804 * @nonatomic: currently unused.
805 * @wait: If true, wait (atomically) until function has completed on other CPUs.
806 *
807 * Returns 0 on success, else a negative status code. Does not return until
808 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
809 *
810 * You must not call this function with disabled interrupts or from a
811 * hardware interrupt handler or from a bottom half handler.
812 */
813 static int smp_call_function_mask(void (*func)(void *info), void *info,
814 int nonatomic, int wait, cpumask_t mask)
815 {
816 struct call_data_struct data;
817 int cpus;
818
819 /* Can deadlock when called with interrupts disabled */
820 WARN_ON(irqs_disabled());
821
822 data.func = func;
823 data.info = info;
824 atomic_set(&data.finished, 0);
825 data.wait = wait;
826
827 spin_lock(&call_lock);
828
829 cpu_clear(smp_processor_id(), mask);
830 cpus = cpus_weight(mask);
831 if (!cpus)
832 goto out_unlock;
833
834 call_data = &data;
835 mb();
836
837 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
838
839 /* Wait for response */
840 while (atomic_read(&data.finished) != cpus)
841 cpu_relax();
842
843 out_unlock:
844 spin_unlock(&call_lock);
845
846 return 0;
847 }
848
849 int smp_call_function(void (*func)(void *info), void *info,
850 int nonatomic, int wait)
851 {
852 return smp_call_function_mask(func, info, nonatomic, wait,
853 cpu_online_map);
854 }
855
856 void smp_call_function_client(int irq, struct pt_regs *regs)
857 {
858 void (*func) (void *info) = call_data->func;
859 void *info = call_data->info;
860
861 clear_softint(1 << irq);
862 if (call_data->wait) {
863 /* let initiator proceed only after completion */
864 func(info);
865 atomic_inc(&call_data->finished);
866 } else {
867 /* let initiator proceed after getting data */
868 atomic_inc(&call_data->finished);
869 func(info);
870 }
871 }
872
873 static void tsb_sync(void *info)
874 {
875 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
876 struct mm_struct *mm = info;
877
878 /* It is not valid to test "currrent->active_mm == mm" here.
879 *
880 * The value of "current" is not changed atomically with
881 * switch_mm(). But that's OK, we just need to check the
882 * current cpu's trap block PGD physical address.
883 */
884 if (tp->pgd_paddr == __pa(mm->pgd))
885 tsb_context_switch(mm);
886 }
887
888 void smp_tsb_sync(struct mm_struct *mm)
889 {
890 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
891 }
892
893 extern unsigned long xcall_flush_tlb_mm;
894 extern unsigned long xcall_flush_tlb_pending;
895 extern unsigned long xcall_flush_tlb_kernel_range;
896 extern unsigned long xcall_report_regs;
897 extern unsigned long xcall_receive_signal;
898 extern unsigned long xcall_new_mmu_context_version;
899
900 #ifdef DCACHE_ALIASING_POSSIBLE
901 extern unsigned long xcall_flush_dcache_page_cheetah;
902 #endif
903 extern unsigned long xcall_flush_dcache_page_spitfire;
904
905 #ifdef CONFIG_DEBUG_DCFLUSH
906 extern atomic_t dcpage_flushes;
907 extern atomic_t dcpage_flushes_xcall;
908 #endif
909
910 static inline void __local_flush_dcache_page(struct page *page)
911 {
912 #ifdef DCACHE_ALIASING_POSSIBLE
913 __flush_dcache_page(page_address(page),
914 ((tlb_type == spitfire) &&
915 page_mapping(page) != NULL));
916 #else
917 if (page_mapping(page) != NULL &&
918 tlb_type == spitfire)
919 __flush_icache_page(__pa(page_address(page)));
920 #endif
921 }
922
923 void smp_flush_dcache_page_impl(struct page *page, int cpu)
924 {
925 cpumask_t mask = cpumask_of_cpu(cpu);
926 int this_cpu;
927
928 if (tlb_type == hypervisor)
929 return;
930
931 #ifdef CONFIG_DEBUG_DCFLUSH
932 atomic_inc(&dcpage_flushes);
933 #endif
934
935 this_cpu = get_cpu();
936
937 if (cpu == this_cpu) {
938 __local_flush_dcache_page(page);
939 } else if (cpu_online(cpu)) {
940 void *pg_addr = page_address(page);
941 u64 data0;
942
943 if (tlb_type == spitfire) {
944 data0 =
945 ((u64)&xcall_flush_dcache_page_spitfire);
946 if (page_mapping(page) != NULL)
947 data0 |= ((u64)1 << 32);
948 spitfire_xcall_deliver(data0,
949 __pa(pg_addr),
950 (u64) pg_addr,
951 mask);
952 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
953 #ifdef DCACHE_ALIASING_POSSIBLE
954 data0 =
955 ((u64)&xcall_flush_dcache_page_cheetah);
956 cheetah_xcall_deliver(data0,
957 __pa(pg_addr),
958 0, mask);
959 #endif
960 }
961 #ifdef CONFIG_DEBUG_DCFLUSH
962 atomic_inc(&dcpage_flushes_xcall);
963 #endif
964 }
965
966 put_cpu();
967 }
968
969 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
970 {
971 void *pg_addr = page_address(page);
972 cpumask_t mask = cpu_online_map;
973 u64 data0;
974 int this_cpu;
975
976 if (tlb_type == hypervisor)
977 return;
978
979 this_cpu = get_cpu();
980
981 cpu_clear(this_cpu, mask);
982
983 #ifdef CONFIG_DEBUG_DCFLUSH
984 atomic_inc(&dcpage_flushes);
985 #endif
986 if (cpus_empty(mask))
987 goto flush_self;
988 if (tlb_type == spitfire) {
989 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
990 if (page_mapping(page) != NULL)
991 data0 |= ((u64)1 << 32);
992 spitfire_xcall_deliver(data0,
993 __pa(pg_addr),
994 (u64) pg_addr,
995 mask);
996 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
997 #ifdef DCACHE_ALIASING_POSSIBLE
998 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
999 cheetah_xcall_deliver(data0,
1000 __pa(pg_addr),
1001 0, mask);
1002 #endif
1003 }
1004 #ifdef CONFIG_DEBUG_DCFLUSH
1005 atomic_inc(&dcpage_flushes_xcall);
1006 #endif
1007 flush_self:
1008 __local_flush_dcache_page(page);
1009
1010 put_cpu();
1011 }
1012
1013 static void __smp_receive_signal_mask(cpumask_t mask)
1014 {
1015 smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
1016 }
1017
1018 void smp_receive_signal(int cpu)
1019 {
1020 cpumask_t mask = cpumask_of_cpu(cpu);
1021
1022 if (cpu_online(cpu))
1023 __smp_receive_signal_mask(mask);
1024 }
1025
1026 void smp_receive_signal_client(int irq, struct pt_regs *regs)
1027 {
1028 clear_softint(1 << irq);
1029 }
1030
1031 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1032 {
1033 struct mm_struct *mm;
1034 unsigned long flags;
1035
1036 clear_softint(1 << irq);
1037
1038 /* See if we need to allocate a new TLB context because
1039 * the version of the one we are using is now out of date.
1040 */
1041 mm = current->active_mm;
1042 if (unlikely(!mm || (mm == &init_mm)))
1043 return;
1044
1045 spin_lock_irqsave(&mm->context.lock, flags);
1046
1047 if (unlikely(!CTX_VALID(mm->context)))
1048 get_new_mmu_context(mm);
1049
1050 spin_unlock_irqrestore(&mm->context.lock, flags);
1051
1052 load_secondary_context(mm);
1053 __flush_tlb_mm(CTX_HWBITS(mm->context),
1054 SECONDARY_CONTEXT);
1055 }
1056
1057 void smp_new_mmu_context_version(void)
1058 {
1059 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1060 }
1061
1062 void smp_report_regs(void)
1063 {
1064 smp_cross_call(&xcall_report_regs, 0, 0, 0);
1065 }
1066
1067 /* We know that the window frames of the user have been flushed
1068 * to the stack before we get here because all callers of us
1069 * are flush_tlb_*() routines, and these run after flush_cache_*()
1070 * which performs the flushw.
1071 *
1072 * The SMP TLB coherency scheme we use works as follows:
1073 *
1074 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1075 * space has (potentially) executed on, this is the heuristic
1076 * we use to avoid doing cross calls.
1077 *
1078 * Also, for flushing from kswapd and also for clones, we
1079 * use cpu_vm_mask as the list of cpus to make run the TLB.
1080 *
1081 * 2) TLB context numbers are shared globally across all processors
1082 * in the system, this allows us to play several games to avoid
1083 * cross calls.
1084 *
1085 * One invariant is that when a cpu switches to a process, and
1086 * that processes tsk->active_mm->cpu_vm_mask does not have the
1087 * current cpu's bit set, that tlb context is flushed locally.
1088 *
1089 * If the address space is non-shared (ie. mm->count == 1) we avoid
1090 * cross calls when we want to flush the currently running process's
1091 * tlb state. This is done by clearing all cpu bits except the current
1092 * processor's in current->active_mm->cpu_vm_mask and performing the
1093 * flush locally only. This will force any subsequent cpus which run
1094 * this task to flush the context from the local tlb if the process
1095 * migrates to another cpu (again).
1096 *
1097 * 3) For shared address spaces (threads) and swapping we bite the
1098 * bullet for most cases and perform the cross call (but only to
1099 * the cpus listed in cpu_vm_mask).
1100 *
1101 * The performance gain from "optimizing" away the cross call for threads is
1102 * questionable (in theory the big win for threads is the massive sharing of
1103 * address space state across processors).
1104 */
1105
1106 /* This currently is only used by the hugetlb arch pre-fault
1107 * hook on UltraSPARC-III+ and later when changing the pagesize
1108 * bits of the context register for an address space.
1109 */
1110 void smp_flush_tlb_mm(struct mm_struct *mm)
1111 {
1112 u32 ctx = CTX_HWBITS(mm->context);
1113 int cpu = get_cpu();
1114
1115 if (atomic_read(&mm->mm_users) == 1) {
1116 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1117 goto local_flush_and_out;
1118 }
1119
1120 smp_cross_call_masked(&xcall_flush_tlb_mm,
1121 ctx, 0, 0,
1122 mm->cpu_vm_mask);
1123
1124 local_flush_and_out:
1125 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1126
1127 put_cpu();
1128 }
1129
1130 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1131 {
1132 u32 ctx = CTX_HWBITS(mm->context);
1133 int cpu = get_cpu();
1134
1135 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1136 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1137 else
1138 smp_cross_call_masked(&xcall_flush_tlb_pending,
1139 ctx, nr, (unsigned long) vaddrs,
1140 mm->cpu_vm_mask);
1141
1142 __flush_tlb_pending(ctx, nr, vaddrs);
1143
1144 put_cpu();
1145 }
1146
1147 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1148 {
1149 start &= PAGE_MASK;
1150 end = PAGE_ALIGN(end);
1151 if (start != end) {
1152 smp_cross_call(&xcall_flush_tlb_kernel_range,
1153 0, start, end);
1154
1155 __flush_tlb_kernel_range(start, end);
1156 }
1157 }
1158
1159 /* CPU capture. */
1160 /* #define CAPTURE_DEBUG */
1161 extern unsigned long xcall_capture;
1162
1163 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1164 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1165 static unsigned long penguins_are_doing_time;
1166
1167 void smp_capture(void)
1168 {
1169 int result = atomic_add_ret(1, &smp_capture_depth);
1170
1171 if (result == 1) {
1172 int ncpus = num_online_cpus();
1173
1174 #ifdef CAPTURE_DEBUG
1175 printk("CPU[%d]: Sending penguins to jail...",
1176 smp_processor_id());
1177 #endif
1178 penguins_are_doing_time = 1;
1179 membar_storestore_loadstore();
1180 atomic_inc(&smp_capture_registry);
1181 smp_cross_call(&xcall_capture, 0, 0, 0);
1182 while (atomic_read(&smp_capture_registry) != ncpus)
1183 rmb();
1184 #ifdef CAPTURE_DEBUG
1185 printk("done\n");
1186 #endif
1187 }
1188 }
1189
1190 void smp_release(void)
1191 {
1192 if (atomic_dec_and_test(&smp_capture_depth)) {
1193 #ifdef CAPTURE_DEBUG
1194 printk("CPU[%d]: Giving pardon to "
1195 "imprisoned penguins\n",
1196 smp_processor_id());
1197 #endif
1198 penguins_are_doing_time = 0;
1199 membar_storeload_storestore();
1200 atomic_dec(&smp_capture_registry);
1201 }
1202 }
1203
1204 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1205 * can service tlb flush xcalls...
1206 */
1207 extern void prom_world(int);
1208
1209 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1210 {
1211 clear_softint(1 << irq);
1212
1213 preempt_disable();
1214
1215 __asm__ __volatile__("flushw");
1216 prom_world(1);
1217 atomic_inc(&smp_capture_registry);
1218 membar_storeload_storestore();
1219 while (penguins_are_doing_time)
1220 rmb();
1221 atomic_dec(&smp_capture_registry);
1222 prom_world(0);
1223
1224 preempt_enable();
1225 }
1226
1227 /* /proc/profile writes can call this, don't __init it please. */
1228 int setup_profiling_timer(unsigned int multiplier)
1229 {
1230 return -EINVAL;
1231 }
1232
1233 void __init smp_prepare_cpus(unsigned int max_cpus)
1234 {
1235 }
1236
1237 void __devinit smp_prepare_boot_cpu(void)
1238 {
1239 }
1240
1241 void __devinit smp_fill_in_sib_core_maps(void)
1242 {
1243 unsigned int i;
1244
1245 for_each_present_cpu(i) {
1246 unsigned int j;
1247
1248 cpus_clear(cpu_core_map[i]);
1249 if (cpu_data(i).core_id == 0) {
1250 cpu_set(i, cpu_core_map[i]);
1251 continue;
1252 }
1253
1254 for_each_present_cpu(j) {
1255 if (cpu_data(i).core_id ==
1256 cpu_data(j).core_id)
1257 cpu_set(j, cpu_core_map[i]);
1258 }
1259 }
1260
1261 for_each_present_cpu(i) {
1262 unsigned int j;
1263
1264 cpus_clear(per_cpu(cpu_sibling_map, i));
1265 if (cpu_data(i).proc_id == -1) {
1266 cpu_set(i, per_cpu(cpu_sibling_map, i));
1267 continue;
1268 }
1269
1270 for_each_present_cpu(j) {
1271 if (cpu_data(i).proc_id ==
1272 cpu_data(j).proc_id)
1273 cpu_set(j, per_cpu(cpu_sibling_map, i));
1274 }
1275 }
1276 }
1277
1278 int __cpuinit __cpu_up(unsigned int cpu)
1279 {
1280 int ret = smp_boot_one_cpu(cpu);
1281
1282 if (!ret) {
1283 cpu_set(cpu, smp_commenced_mask);
1284 while (!cpu_isset(cpu, cpu_online_map))
1285 mb();
1286 if (!cpu_isset(cpu, cpu_online_map)) {
1287 ret = -ENODEV;
1288 } else {
1289 /* On SUN4V, writes to %tick and %stick are
1290 * not allowed.
1291 */
1292 if (tlb_type != hypervisor)
1293 smp_synchronize_one_tick(cpu);
1294 }
1295 }
1296 return ret;
1297 }
1298
1299 #ifdef CONFIG_HOTPLUG_CPU
1300 void cpu_play_dead(void)
1301 {
1302 int cpu = smp_processor_id();
1303 unsigned long pstate;
1304
1305 idle_task_exit();
1306
1307 if (tlb_type == hypervisor) {
1308 struct trap_per_cpu *tb = &trap_block[cpu];
1309
1310 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1311 tb->cpu_mondo_pa, 0);
1312 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1313 tb->dev_mondo_pa, 0);
1314 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1315 tb->resum_mondo_pa, 0);
1316 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1317 tb->nonresum_mondo_pa, 0);
1318 }
1319
1320 cpu_clear(cpu, smp_commenced_mask);
1321 membar_safe("#Sync");
1322
1323 local_irq_disable();
1324
1325 __asm__ __volatile__(
1326 "rdpr %%pstate, %0\n\t"
1327 "wrpr %0, %1, %%pstate"
1328 : "=r" (pstate)
1329 : "i" (PSTATE_IE));
1330
1331 while (1)
1332 barrier();
1333 }
1334
1335 int __cpu_disable(void)
1336 {
1337 int cpu = smp_processor_id();
1338 cpuinfo_sparc *c;
1339 int i;
1340
1341 for_each_cpu_mask(i, cpu_core_map[cpu])
1342 cpu_clear(cpu, cpu_core_map[i]);
1343 cpus_clear(cpu_core_map[cpu]);
1344
1345 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1346 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1347 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1348
1349 c = &cpu_data(cpu);
1350
1351 c->core_id = 0;
1352 c->proc_id = -1;
1353
1354 spin_lock(&call_lock);
1355 cpu_clear(cpu, cpu_online_map);
1356 spin_unlock(&call_lock);
1357
1358 smp_wmb();
1359
1360 /* Make sure no interrupts point to this cpu. */
1361 fixup_irqs();
1362
1363 local_irq_enable();
1364 mdelay(1);
1365 local_irq_disable();
1366
1367 return 0;
1368 }
1369
1370 void __cpu_die(unsigned int cpu)
1371 {
1372 int i;
1373
1374 for (i = 0; i < 100; i++) {
1375 smp_rmb();
1376 if (!cpu_isset(cpu, smp_commenced_mask))
1377 break;
1378 msleep(100);
1379 }
1380 if (cpu_isset(cpu, smp_commenced_mask)) {
1381 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1382 } else {
1383 #if defined(CONFIG_SUN_LDOMS)
1384 unsigned long hv_err;
1385 int limit = 100;
1386
1387 do {
1388 hv_err = sun4v_cpu_stop(cpu);
1389 if (hv_err == HV_EOK) {
1390 cpu_clear(cpu, cpu_present_map);
1391 break;
1392 }
1393 } while (--limit > 0);
1394 if (limit <= 0) {
1395 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1396 hv_err);
1397 }
1398 #endif
1399 }
1400 }
1401 #endif
1402
1403 void __init smp_cpus_done(unsigned int max_cpus)
1404 {
1405 }
1406
1407 void smp_send_reschedule(int cpu)
1408 {
1409 smp_receive_signal(cpu);
1410 }
1411
1412 /* This is a nop because we capture all other cpus
1413 * anyways when making the PROM active.
1414 */
1415 void smp_send_stop(void)
1416 {
1417 }
1418
1419 unsigned long __per_cpu_base __read_mostly;
1420 unsigned long __per_cpu_shift __read_mostly;
1421
1422 EXPORT_SYMBOL(__per_cpu_base);
1423 EXPORT_SYMBOL(__per_cpu_shift);
1424
1425 void __init real_setup_per_cpu_areas(void)
1426 {
1427 unsigned long goal, size, i;
1428 char *ptr;
1429
1430 /* Copy section for each CPU (we discard the original) */
1431 goal = PERCPU_ENOUGH_ROOM;
1432
1433 __per_cpu_shift = PAGE_SHIFT;
1434 for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1435 __per_cpu_shift++;
1436
1437 ptr = alloc_bootmem_pages(size * NR_CPUS);
1438
1439 __per_cpu_base = ptr - __per_cpu_start;
1440
1441 for (i = 0; i < NR_CPUS; i++, ptr += size)
1442 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1443
1444 /* Setup %g5 for the boot cpu. */
1445 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1446 }