Merge branch 'x86-build-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / m32r / kernel / smp.c
1 /*
2 * linux/arch/m32r/kernel/smp.c
3 *
4 * M32R SMP support routines.
5 *
6 * Copyright (c) 2001, 2002 Hitoshi Yamamoto
7 *
8 * Taken from i386 version.
9 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
10 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
11 *
12 * This code is released under the GNU General Public License version 2 or
13 * later.
14 */
15
16 #undef DEBUG_SMP
17
18 #include <linux/irq.h>
19 #include <linux/interrupt.h>
20 #include <linux/sched.h>
21 #include <linux/spinlock.h>
22 #include <linux/mm.h>
23 #include <linux/smp.h>
24 #include <linux/profile.h>
25 #include <linux/cpu.h>
26
27 #include <asm/cacheflush.h>
28 #include <asm/pgalloc.h>
29 #include <asm/atomic.h>
30 #include <asm/io.h>
31 #include <asm/mmu_context.h>
32 #include <asm/m32r.h>
33 #include <asm/tlbflush.h>
34
35 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
36 /* Data structures and variables */
37 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
38
39 /*
40 * For flush_cache_all()
41 */
42 static DEFINE_SPINLOCK(flushcache_lock);
43 static volatile unsigned long flushcache_cpumask = 0;
44
45 /*
46 * For flush_tlb_others()
47 */
48 static volatile cpumask_t flush_cpumask;
49 static struct mm_struct *flush_mm;
50 static struct vm_area_struct *flush_vma;
51 static volatile unsigned long flush_va;
52 static DEFINE_SPINLOCK(tlbstate_lock);
53 #define FLUSH_ALL 0xffffffff
54
55 DECLARE_PER_CPU(int, prof_multiplier);
56 DECLARE_PER_CPU(int, prof_old_multiplier);
57 DECLARE_PER_CPU(int, prof_counter);
58
59 extern spinlock_t ipi_lock[];
60
61 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
62 /* Function Prototypes */
63 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
64
65 void smp_reschedule_interrupt(void);
66 void smp_flush_cache_all_interrupt(void);
67
68 static void flush_tlb_all_ipi(void *);
69 static void flush_tlb_others(cpumask_t, struct mm_struct *,
70 struct vm_area_struct *, unsigned long);
71
72 void smp_invalidate_interrupt(void);
73
74 static void stop_this_cpu(void *);
75
76 void smp_ipi_timer_interrupt(struct pt_regs *);
77 void smp_local_timer_interrupt(void);
78
79 static void send_IPI_allbutself(int, int);
80 static void send_IPI_mask(const struct cpumask *, int, int);
81
82 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
83 /* Rescheduling request Routines */
84 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
85
86 /*==========================================================================*
87 * Name: smp_send_reschedule
88 *
89 * Description: This routine requests other CPU to execute rescheduling.
90 * 1.Send 'RESCHEDULE_IPI' to other CPU.
91 * Request other CPU to execute 'smp_reschedule_interrupt()'.
92 *
93 * Born on Date: 2002.02.05
94 *
95 * Arguments: cpu_id - Target CPU ID
96 *
97 * Returns: void (cannot fail)
98 *
99 * Modification log:
100 * Date Who Description
101 * ---------- --- --------------------------------------------------------
102 *
103 *==========================================================================*/
104 void smp_send_reschedule(int cpu_id)
105 {
106 WARN_ON(cpu_is_offline(cpu_id));
107 send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
108 }
109
110 /*==========================================================================*
111 * Name: smp_reschedule_interrupt
112 *
113 * Description: This routine executes on CPU which received
114 * 'RESCHEDULE_IPI'.
115 *
116 * Born on Date: 2002.02.05
117 *
118 * Arguments: NONE
119 *
120 * Returns: void (cannot fail)
121 *
122 * Modification log:
123 * Date Who Description
124 * ---------- --- --------------------------------------------------------
125 *
126 *==========================================================================*/
127 void smp_reschedule_interrupt(void)
128 {
129 scheduler_ipi();
130 }
131
132 /*==========================================================================*
133 * Name: smp_flush_cache_all
134 *
135 * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
136 * CPUs in the system.
137 *
138 * Born on Date: 2003-05-28
139 *
140 * Arguments: NONE
141 *
142 * Returns: void (cannot fail)
143 *
144 * Modification log:
145 * Date Who Description
146 * ---------- --- --------------------------------------------------------
147 *
148 *==========================================================================*/
149 void smp_flush_cache_all(void)
150 {
151 cpumask_t cpumask;
152 unsigned long *mask;
153
154 preempt_disable();
155 cpumask_copy(&cpumask, cpu_online_mask);
156 cpumask_clear_cpu(smp_processor_id(), &cpumask);
157 spin_lock(&flushcache_lock);
158 mask=cpumask_bits(&cpumask);
159 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
161 _flush_cache_copyback_all();
162 while (flushcache_cpumask)
163 mb();
164 spin_unlock(&flushcache_lock);
165 preempt_enable();
166 }
167
168 void smp_flush_cache_all_interrupt(void)
169 {
170 _flush_cache_copyback_all();
171 clear_bit(smp_processor_id(), &flushcache_cpumask);
172 }
173
174 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
175 /* TLB flush request Routines */
176 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
177
178 /*==========================================================================*
179 * Name: smp_flush_tlb_all
180 *
181 * Description: This routine flushes all processes TLBs.
182 * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
183 * 2.Execute 'do_flush_tlb_all_local()'.
184 *
185 * Born on Date: 2002.02.05
186 *
187 * Arguments: NONE
188 *
189 * Returns: void (cannot fail)
190 *
191 * Modification log:
192 * Date Who Description
193 * ---------- --- --------------------------------------------------------
194 *
195 *==========================================================================*/
196 void smp_flush_tlb_all(void)
197 {
198 unsigned long flags;
199
200 preempt_disable();
201 local_irq_save(flags);
202 __flush_tlb_all();
203 local_irq_restore(flags);
204 smp_call_function(flush_tlb_all_ipi, NULL, 1);
205 preempt_enable();
206 }
207
208 /*==========================================================================*
209 * Name: flush_tlb_all_ipi
210 *
211 * Description: This routine flushes all local TLBs.
212 * 1.Execute 'do_flush_tlb_all_local()'.
213 *
214 * Born on Date: 2002.02.05
215 *
216 * Arguments: *info - not used
217 *
218 * Returns: void (cannot fail)
219 *
220 * Modification log:
221 * Date Who Description
222 * ---------- --- --------------------------------------------------------
223 *
224 *==========================================================================*/
225 static void flush_tlb_all_ipi(void *info)
226 {
227 __flush_tlb_all();
228 }
229
230 /*==========================================================================*
231 * Name: smp_flush_tlb_mm
232 *
233 * Description: This routine flushes the specified mm context TLB's.
234 *
235 * Born on Date: 2002.02.05
236 *
237 * Arguments: *mm - a pointer to the mm struct for flush TLB
238 *
239 * Returns: void (cannot fail)
240 *
241 * Modification log:
242 * Date Who Description
243 * ---------- --- --------------------------------------------------------
244 *
245 *==========================================================================*/
246 void smp_flush_tlb_mm(struct mm_struct *mm)
247 {
248 int cpu_id;
249 cpumask_t cpu_mask;
250 unsigned long *mmc;
251 unsigned long flags;
252
253 preempt_disable();
254 cpu_id = smp_processor_id();
255 mmc = &mm->context[cpu_id];
256 cpumask_copy(&cpu_mask, mm_cpumask(mm));
257 cpumask_clear_cpu(cpu_id, &cpu_mask);
258
259 if (*mmc != NO_CONTEXT) {
260 local_irq_save(flags);
261 *mmc = NO_CONTEXT;
262 if (mm == current->mm)
263 activate_context(mm);
264 else
265 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
266 local_irq_restore(flags);
267 }
268 if (!cpumask_empty(&cpu_mask))
269 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
270
271 preempt_enable();
272 }
273
274 /*==========================================================================*
275 * Name: smp_flush_tlb_range
276 *
277 * Description: This routine flushes a range of pages.
278 *
279 * Born on Date: 2002.02.05
280 *
281 * Arguments: *mm - a pointer to the mm struct for flush TLB
282 * start - not used
283 * end - not used
284 *
285 * Returns: void (cannot fail)
286 *
287 * Modification log:
288 * Date Who Description
289 * ---------- --- --------------------------------------------------------
290 *
291 *==========================================================================*/
292 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
293 unsigned long end)
294 {
295 smp_flush_tlb_mm(vma->vm_mm);
296 }
297
298 /*==========================================================================*
299 * Name: smp_flush_tlb_page
300 *
301 * Description: This routine flushes one page.
302 *
303 * Born on Date: 2002.02.05
304 *
305 * Arguments: *vma - a pointer to the vma struct include va
306 * va - virtual address for flush TLB
307 *
308 * Returns: void (cannot fail)
309 *
310 * Modification log:
311 * Date Who Description
312 * ---------- --- --------------------------------------------------------
313 *
314 *==========================================================================*/
315 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
316 {
317 struct mm_struct *mm = vma->vm_mm;
318 int cpu_id;
319 cpumask_t cpu_mask;
320 unsigned long *mmc;
321 unsigned long flags;
322
323 preempt_disable();
324 cpu_id = smp_processor_id();
325 mmc = &mm->context[cpu_id];
326 cpumask_copy(&cpu_mask, mm_cpumask(mm));
327 cpumask_clear_cpu(cpu_id, &cpu_mask);
328
329 #ifdef DEBUG_SMP
330 if (!mm)
331 BUG();
332 #endif
333
334 if (*mmc != NO_CONTEXT) {
335 local_irq_save(flags);
336 va &= PAGE_MASK;
337 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
338 __flush_tlb_page(va);
339 local_irq_restore(flags);
340 }
341 if (!cpumask_empty(&cpu_mask))
342 flush_tlb_others(cpu_mask, mm, vma, va);
343
344 preempt_enable();
345 }
346
347 /*==========================================================================*
348 * Name: flush_tlb_others
349 *
350 * Description: This routine requests other CPU to execute flush TLB.
351 * 1.Setup parameters.
352 * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
353 * Request other CPU to execute 'smp_invalidate_interrupt()'.
354 * 3.Wait for other CPUs operation finished.
355 *
356 * Born on Date: 2002.02.05
357 *
358 * Arguments: cpumask - bitmap of target CPUs
359 * *mm - a pointer to the mm struct for flush TLB
360 * *vma - a pointer to the vma struct include va
361 * va - virtual address for flush TLB
362 *
363 * Returns: void (cannot fail)
364 *
365 * Modification log:
366 * Date Who Description
367 * ---------- --- --------------------------------------------------------
368 *
369 *==========================================================================*/
370 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
371 struct vm_area_struct *vma, unsigned long va)
372 {
373 unsigned long *mask;
374 #ifdef DEBUG_SMP
375 unsigned long flags;
376 __save_flags(flags);
377 if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
378 BUG();
379 #endif /* DEBUG_SMP */
380
381 /*
382 * A couple of (to be removed) sanity checks:
383 *
384 * - we do not send IPIs to not-yet booted CPUs.
385 * - current CPU must not be in mask
386 * - mask must exist :)
387 */
388 BUG_ON(cpumask_empty(&cpumask));
389
390 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
391 BUG_ON(!mm);
392
393 /* If a CPU which we ran on has gone down, OK. */
394 cpumask_and(&cpumask, &cpumask, cpu_online_mask);
395 if (cpumask_empty(&cpumask))
396 return;
397
398 /*
399 * i'm not happy about this global shared spinlock in the
400 * MM hot path, but we'll see how contended it is.
401 * Temporarily this turns IRQs off, so that lockups are
402 * detected by the NMI watchdog.
403 */
404 spin_lock(&tlbstate_lock);
405
406 flush_mm = mm;
407 flush_vma = vma;
408 flush_va = va;
409 mask=cpumask_bits(&cpumask);
410 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
411
412 /*
413 * We have to send the IPI only to
414 * CPUs affected.
415 */
416 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
417
418 while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
419 /* nothing. lockup detection does not belong here */
420 mb();
421 }
422
423 flush_mm = NULL;
424 flush_vma = NULL;
425 flush_va = 0;
426 spin_unlock(&tlbstate_lock);
427 }
428
429 /*==========================================================================*
430 * Name: smp_invalidate_interrupt
431 *
432 * Description: This routine executes on CPU which received
433 * 'INVALIDATE_TLB_IPI'.
434 * 1.Flush local TLB.
435 * 2.Report flush TLB process was finished.
436 *
437 * Born on Date: 2002.02.05
438 *
439 * Arguments: NONE
440 *
441 * Returns: void (cannot fail)
442 *
443 * Modification log:
444 * Date Who Description
445 * ---------- --- --------------------------------------------------------
446 *
447 *==========================================================================*/
448 void smp_invalidate_interrupt(void)
449 {
450 int cpu_id = smp_processor_id();
451 unsigned long *mmc = &flush_mm->context[cpu_id];
452
453 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
454 return;
455
456 if (flush_va == FLUSH_ALL) {
457 *mmc = NO_CONTEXT;
458 if (flush_mm == current->active_mm)
459 activate_context(flush_mm);
460 else
461 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
462 } else {
463 unsigned long va = flush_va;
464
465 if (*mmc != NO_CONTEXT) {
466 va &= PAGE_MASK;
467 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
468 __flush_tlb_page(va);
469 }
470 }
471 cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
472 }
473
474 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
475 /* Stop CPU request Routines */
476 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
477
478 /*==========================================================================*
479 * Name: smp_send_stop
480 *
481 * Description: This routine requests stop all CPUs.
482 * 1.Request other CPU to execute 'stop_this_cpu()'.
483 *
484 * Born on Date: 2002.02.05
485 *
486 * Arguments: NONE
487 *
488 * Returns: void (cannot fail)
489 *
490 * Modification log:
491 * Date Who Description
492 * ---------- --- --------------------------------------------------------
493 *
494 *==========================================================================*/
495 void smp_send_stop(void)
496 {
497 smp_call_function(stop_this_cpu, NULL, 0);
498 }
499
500 /*==========================================================================*
501 * Name: stop_this_cpu
502 *
503 * Description: This routine halt CPU.
504 *
505 * Born on Date: 2002.02.05
506 *
507 * Arguments: NONE
508 *
509 * Returns: void (cannot fail)
510 *
511 * Modification log:
512 * Date Who Description
513 * ---------- --- --------------------------------------------------------
514 *
515 *==========================================================================*/
516 static void stop_this_cpu(void *dummy)
517 {
518 int cpu_id = smp_processor_id();
519
520 /*
521 * Remove this CPU:
522 */
523 set_cpu_online(cpu_id, false);
524
525 /*
526 * PSW IE = 1;
527 * IMASK = 0;
528 * goto SLEEP
529 */
530 local_irq_disable();
531 outl(0, M32R_ICU_IMASK_PORTL);
532 inl(M32R_ICU_IMASK_PORTL); /* dummy read */
533 local_irq_enable();
534
535 for ( ; ; );
536 }
537
538 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
539 {
540 send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
541 }
542
543 void arch_send_call_function_single_ipi(int cpu)
544 {
545 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
546 }
547
548 /*==========================================================================*
549 * Name: smp_call_function_interrupt
550 *
551 * Description: This routine executes on CPU which received
552 * 'CALL_FUNCTION_IPI'.
553 *
554 * Born on Date: 2002.02.05
555 *
556 * Arguments: NONE
557 *
558 * Returns: void (cannot fail)
559 *
560 * Modification log:
561 * Date Who Description
562 * ---------- --- --------------------------------------------------------
563 *
564 *==========================================================================*/
565 void smp_call_function_interrupt(void)
566 {
567 irq_enter();
568 generic_smp_call_function_interrupt();
569 irq_exit();
570 }
571
572 void smp_call_function_single_interrupt(void)
573 {
574 irq_enter();
575 generic_smp_call_function_single_interrupt();
576 irq_exit();
577 }
578
579 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
580 /* Timer Routines */
581 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
582
583 /*==========================================================================*
584 * Name: smp_send_timer
585 *
586 * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
587 * in the system.
588 *
589 * Born on Date: 2002.02.05
590 *
591 * Arguments: NONE
592 *
593 * Returns: void (cannot fail)
594 *
595 * Modification log:
596 * Date Who Description
597 * ---------- --- --------------------------------------------------------
598 *
599 *==========================================================================*/
600 void smp_send_timer(void)
601 {
602 send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
603 }
604
605 /*==========================================================================*
606 * Name: smp_send_timer
607 *
608 * Description: This routine executes on CPU which received
609 * 'LOCAL_TIMER_IPI'.
610 *
611 * Born on Date: 2002.02.05
612 *
613 * Arguments: *regs - a pointer to the saved regster info
614 *
615 * Returns: void (cannot fail)
616 *
617 * Modification log:
618 * Date Who Description
619 * ---------- --- --------------------------------------------------------
620 *
621 *==========================================================================*/
622 void smp_ipi_timer_interrupt(struct pt_regs *regs)
623 {
624 struct pt_regs *old_regs;
625 old_regs = set_irq_regs(regs);
626 irq_enter();
627 smp_local_timer_interrupt();
628 irq_exit();
629 set_irq_regs(old_regs);
630 }
631
632 /*==========================================================================*
633 * Name: smp_local_timer_interrupt
634 *
635 * Description: Local timer interrupt handler. It does both profiling and
636 * process statistics/rescheduling.
637 * We do profiling in every local tick, statistics/rescheduling
638 * happen only every 'profiling multiplier' ticks. The default
639 * multiplier is 1 and it can be changed by writing the new
640 * multiplier value into /proc/profile.
641 *
642 * Born on Date: 2002.02.05
643 *
644 * Arguments: *regs - a pointer to the saved regster info
645 *
646 * Returns: void (cannot fail)
647 *
648 * Original: arch/i386/kernel/apic.c
649 *
650 * Modification log:
651 * Date Who Description
652 * ---------- --- --------------------------------------------------------
653 * 2003-06-24 hy use per_cpu structure.
654 *==========================================================================*/
655 void smp_local_timer_interrupt(void)
656 {
657 int user = user_mode(get_irq_regs());
658 int cpu_id = smp_processor_id();
659
660 /*
661 * The profiling function is SMP safe. (nothing can mess
662 * around with "current", and the profiling counters are
663 * updated with atomic operations). This is especially
664 * useful with a profiling multiplier != 1
665 */
666
667 profile_tick(CPU_PROFILING);
668
669 if (--per_cpu(prof_counter, cpu_id) <= 0) {
670 /*
671 * The multiplier may have changed since the last time we got
672 * to this point as a result of the user writing to
673 * /proc/profile. In this case we need to adjust the APIC
674 * timer accordingly.
675 *
676 * Interrupts are already masked off at this point.
677 */
678 per_cpu(prof_counter, cpu_id)
679 = per_cpu(prof_multiplier, cpu_id);
680 if (per_cpu(prof_counter, cpu_id)
681 != per_cpu(prof_old_multiplier, cpu_id))
682 {
683 per_cpu(prof_old_multiplier, cpu_id)
684 = per_cpu(prof_counter, cpu_id);
685 }
686
687 update_process_times(user);
688 }
689 }
690
691 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
692 /* Send IPI Routines */
693 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
694
695 /*==========================================================================*
696 * Name: send_IPI_allbutself
697 *
698 * Description: This routine sends a IPI to all other CPUs in the system.
699 *
700 * Born on Date: 2002.02.05
701 *
702 * Arguments: ipi_num - Number of IPI
703 * try - 0 : Send IPI certainly.
704 * !0 : The following IPI is not sent when Target CPU
705 * has not received the before IPI.
706 *
707 * Returns: void (cannot fail)
708 *
709 * Modification log:
710 * Date Who Description
711 * ---------- --- --------------------------------------------------------
712 *
713 *==========================================================================*/
714 static void send_IPI_allbutself(int ipi_num, int try)
715 {
716 cpumask_t cpumask;
717
718 cpumask_copy(&cpumask, cpu_online_mask);
719 cpumask_clear_cpu(smp_processor_id(), &cpumask);
720
721 send_IPI_mask(&cpumask, ipi_num, try);
722 }
723
724 /*==========================================================================*
725 * Name: send_IPI_mask
726 *
727 * Description: This routine sends a IPI to CPUs in the system.
728 *
729 * Born on Date: 2002.02.05
730 *
731 * Arguments: cpu_mask - Bitmap of target CPUs logical ID
732 * ipi_num - Number of IPI
733 * try - 0 : Send IPI certainly.
734 * !0 : The following IPI is not sent when Target CPU
735 * has not received the before IPI.
736 *
737 * Returns: void (cannot fail)
738 *
739 * Modification log:
740 * Date Who Description
741 * ---------- --- --------------------------------------------------------
742 *
743 *==========================================================================*/
744 static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
745 {
746 cpumask_t physid_mask, tmp;
747 int cpu_id, phys_id;
748 int num_cpus = num_online_cpus();
749
750 if (num_cpus <= 1) /* NO MP */
751 return;
752
753 cpumask_and(&tmp, cpumask, cpu_online_mask);
754 BUG_ON(!cpumask_equal(cpumask, &tmp));
755
756 cpumask_clear(&physid_mask);
757 for_each_cpu(cpu_id, cpumask) {
758 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
759 cpumask_set_cpu(phys_id, &physid_mask);
760 }
761
762 send_IPI_mask_phys(&physid_mask, ipi_num, try);
763 }
764
765 /*==========================================================================*
766 * Name: send_IPI_mask_phys
767 *
768 * Description: This routine sends a IPI to other CPUs in the system.
769 *
770 * Born on Date: 2002.02.05
771 *
772 * Arguments: cpu_mask - Bitmap of target CPUs physical ID
773 * ipi_num - Number of IPI
774 * try - 0 : Send IPI certainly.
775 * !0 : The following IPI is not sent when Target CPU
776 * has not received the before IPI.
777 *
778 * Returns: IPICRi regster value.
779 *
780 * Modification log:
781 * Date Who Description
782 * ---------- --- --------------------------------------------------------
783 *
784 *==========================================================================*/
785 unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
786 int try)
787 {
788 spinlock_t *ipilock;
789 volatile unsigned long *ipicr_addr;
790 unsigned long ipicr_val;
791 unsigned long my_physid_mask;
792 unsigned long mask = cpumask_bits(physid_mask)[0];
793
794
795 if (mask & ~physids_coerce(phys_cpu_present_map))
796 BUG();
797 if (ipi_num >= NR_IPIS || ipi_num < 0)
798 BUG();
799
800 mask <<= IPI_SHIFT;
801 ipilock = &ipi_lock[ipi_num];
802 ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
803 + (ipi_num << 2));
804 my_physid_mask = ~(1 << smp_processor_id());
805
806 /*
807 * lock ipi_lock[i]
808 * check IPICRi == 0
809 * write IPICRi (send IPIi)
810 * unlock ipi_lock[i]
811 */
812 spin_lock(ipilock);
813 __asm__ __volatile__ (
814 ";; CHECK IPICRi == 0 \n\t"
815 ".fillinsn \n"
816 "1: \n\t"
817 "ld %0, @%1 \n\t"
818 "and %0, %4 \n\t"
819 "beqz %0, 2f \n\t"
820 "bnez %3, 3f \n\t"
821 "bra 1b \n\t"
822 ";; WRITE IPICRi (send IPIi) \n\t"
823 ".fillinsn \n"
824 "2: \n\t"
825 "st %2, @%1 \n\t"
826 ".fillinsn \n"
827 "3: \n\t"
828 : "=&r"(ipicr_val)
829 : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
830 : "memory"
831 );
832 spin_unlock(ipilock);
833
834 return ipicr_val;
835 }