Merge branch 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sh / kernel / smp.c
CommitLineData
1da177e4
LT
1/*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
173a44dd 6 * Copyright (C) 2002 - 2008 Paul Mundt
aba1030a 7 * Copyright (C) 2006 - 2007 Akio Idehara
1da177e4 8 *
aba1030a
PM
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
1da177e4 12 */
66c5227e 13#include <linux/err.h>
1da177e4
LT
14#include <linux/cache.h>
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/init.h>
1da177e4 18#include <linux/spinlock.h>
aba1030a 19#include <linux/mm.h>
1da177e4 20#include <linux/module.h>
b56050ae 21#include <linux/cpu.h>
aba1030a 22#include <linux/interrupt.h>
1da177e4
LT
23#include <asm/atomic.h>
24#include <asm/processor.h>
25#include <asm/system.h>
26#include <asm/mmu_context.h>
27#include <asm/smp.h>
aba1030a
PM
28#include <asm/cacheflush.h>
29#include <asm/sections.h>
1da177e4 30
aba1030a
PM
31int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
1da177e4 33
1da177e4
LT
34static inline void __init smp_store_cpu_info(unsigned int cpu)
35{
aba1030a
PM
36 struct sh_cpuinfo *c = cpu_data + cpu;
37
38 c->loops_per_jiffy = loops_per_jiffy;
1da177e4
LT
39}
40
41void __init smp_prepare_cpus(unsigned int max_cpus)
42{
43 unsigned int cpu = smp_processor_id();
1da177e4 44
aba1030a
PM
45 init_new_context(current, &init_mm);
46 current_thread_info()->cpu = cpu;
47 plat_prepare_cpus(max_cpus);
48
49#ifndef CONFIG_HOTPLUG_CPU
50 cpu_present_map = cpu_possible_map;
51#endif
1da177e4
LT
52}
53
54void __devinit smp_prepare_boot_cpu(void)
55{
56 unsigned int cpu = smp_processor_id();
57
aba1030a
PM
58 __cpu_number_map[0] = cpu;
59 __cpu_logical_map[0] = cpu;
60
1da177e4
LT
61 cpu_set(cpu, cpu_online_map);
62 cpu_set(cpu, cpu_possible_map);
63}
64
aba1030a 65asmlinkage void __cpuinit start_secondary(void)
1da177e4 66{
aba1030a
PM
67 unsigned int cpu;
68 struct mm_struct *mm = &init_mm;
1da177e4 69
aba1030a
PM
70 atomic_inc(&mm->mm_count);
71 atomic_inc(&mm->mm_users);
72 current->active_mm = mm;
73 BUG_ON(current->mm);
74 enter_lazy_tlb(mm, current);
75
76 per_cpu_trap_init();
77
78 preempt_disable();
79
e545a614
MS
80 notify_cpu_starting(smp_processor_id());
81
aba1030a 82 local_irq_enable();
1da177e4 83
8c24594d
PM
84 cpu = smp_processor_id();
85
86 /* Enable local timers */
87 local_timer_setup(cpu);
aba1030a
PM
88 calibrate_delay();
89
aba1030a 90 smp_store_cpu_info(cpu);
1da177e4
LT
91
92 cpu_set(cpu, cpu_online_map);
93
aba1030a 94 cpu_idle();
1da177e4
LT
95}
96
aba1030a
PM
97extern struct {
98 unsigned long sp;
99 unsigned long bss_start;
100 unsigned long bss_end;
101 void *start_kernel_fn;
102 void *cpu_init_fn;
103 void *thread_info;
104} stack_start;
105
106int __cpuinit __cpu_up(unsigned int cpu)
1da177e4 107{
aba1030a
PM
108 struct task_struct *tsk;
109 unsigned long timeout;
5bfb5d69 110
aba1030a
PM
111 tsk = fork_idle(cpu);
112 if (IS_ERR(tsk)) {
113 printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
114 return PTR_ERR(tsk);
115 }
1da177e4 116
aba1030a
PM
117 /* Fill in data in head.S for secondary cpus */
118 stack_start.sp = tsk->thread.sp;
119 stack_start.thread_info = tsk->stack;
120 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
121 stack_start.start_kernel_fn = start_secondary;
1da177e4 122
aba1030a 123 flush_cache_all();
1da177e4 124
aba1030a 125 plat_start_cpu(cpu, (unsigned long)_stext);
1da177e4 126
aba1030a
PM
127 timeout = jiffies + HZ;
128 while (time_before(jiffies, timeout)) {
129 if (cpu_online(cpu))
130 break;
131
132 udelay(10);
133 }
134
135 if (cpu_online(cpu))
136 return 0;
137
138 return -ENOENT;
1da177e4
LT
139}
140
141void __init smp_cpus_done(unsigned int max_cpus)
142{
aba1030a
PM
143 unsigned long bogosum = 0;
144 int cpu;
145
146 for_each_online_cpu(cpu)
147 bogosum += cpu_data[cpu].loops_per_jiffy;
148
149 printk(KERN_INFO "SMP: Total of %d processors activated "
150 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
151 bogosum / (500000/HZ),
152 (bogosum / (5000/HZ)) % 100);
1da177e4
LT
153}
154
155void smp_send_reschedule(int cpu)
156{
aba1030a 157 plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
1da177e4
LT
158}
159
160static void stop_this_cpu(void *unused)
161{
162 cpu_clear(smp_processor_id(), cpu_online_map);
163 local_irq_disable();
164
165 for (;;)
166 cpu_relax();
167}
168
169void smp_send_stop(void)
170{
8691e5a8 171 smp_call_function(stop_this_cpu, 0, 0);
1da177e4
LT
172}
173
490f5de5 174void arch_send_call_function_ipi(cpumask_t mask)
1da177e4 175{
490f5de5 176 int cpu;
1da177e4 177
490f5de5
JA
178 for_each_cpu_mask(cpu, mask)
179 plat_send_ipi(cpu, SMP_MSG_FUNCTION);
180}
1da177e4 181
490f5de5
JA
182void arch_send_call_function_single_ipi(int cpu)
183{
184 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
1da177e4
LT
185}
186
320ab2b0 187void smp_timer_broadcast(const struct cpumask *mask)
6f52707e
PM
188{
189 int cpu;
190
320ab2b0 191 for_each_cpu(cpu, mask)
6f52707e
PM
192 plat_send_ipi(cpu, SMP_MSG_TIMER);
193}
194
195static void ipi_timer(void)
196{
197 irq_enter();
8c24594d 198 local_timer_interrupt();
6f52707e
PM
199 irq_exit();
200}
201
173a44dd
PM
202void smp_message_recv(unsigned int msg)
203{
204 switch (msg) {
205 case SMP_MSG_FUNCTION:
206 generic_smp_call_function_interrupt();
207 break;
208 case SMP_MSG_RESCHEDULE:
209 break;
210 case SMP_MSG_FUNCTION_SINGLE:
211 generic_smp_call_function_single_interrupt();
212 break;
6f52707e
PM
213 case SMP_MSG_TIMER:
214 ipi_timer();
215 break;
173a44dd
PM
216 default:
217 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
218 smp_processor_id(), __func__, msg);
219 break;
220 }
221}
222
1da177e4
LT
223/* Not really SMP stuff ... */
224int setup_profiling_timer(unsigned int multiplier)
225{
226 return 0;
227}
228
9964fa8b
PM
229static void flush_tlb_all_ipi(void *info)
230{
231 local_flush_tlb_all();
232}
233
234void flush_tlb_all(void)
235{
15c8b6c1 236 on_each_cpu(flush_tlb_all_ipi, 0, 1);
9964fa8b
PM
237}
238
239static void flush_tlb_mm_ipi(void *mm)
240{
241 local_flush_tlb_mm((struct mm_struct *)mm);
242}
243
244/*
245 * The following tlb flush calls are invoked when old translations are
246 * being torn down, or pte attributes are changing. For single threaded
247 * address spaces, a new context is obtained on the current cpu, and tlb
248 * context on other cpus are invalidated to force a new context allocation
249 * at switch_mm time, should the mm ever be used on other cpus. For
250 * multithreaded address spaces, intercpu interrupts have to be sent.
251 * Another case where intercpu interrupts are required is when the target
252 * mm might be active on another cpu (eg debuggers doing the flushes on
253 * behalf of debugees, kswapd stealing pages from another process etc).
254 * Kanoj 07/00.
255 */
256
257void flush_tlb_mm(struct mm_struct *mm)
258{
259 preempt_disable();
260
261 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
8691e5a8 262 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
9964fa8b
PM
263 } else {
264 int i;
265 for (i = 0; i < num_online_cpus(); i++)
266 if (smp_processor_id() != i)
267 cpu_context(i, mm) = 0;
268 }
269 local_flush_tlb_mm(mm);
270
271 preempt_enable();
272}
273
274struct flush_tlb_data {
275 struct vm_area_struct *vma;
276 unsigned long addr1;
277 unsigned long addr2;
278};
279
280static void flush_tlb_range_ipi(void *info)
281{
282 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
283
284 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
285}
286
287void flush_tlb_range(struct vm_area_struct *vma,
288 unsigned long start, unsigned long end)
289{
290 struct mm_struct *mm = vma->vm_mm;
291
292 preempt_disable();
293 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
294 struct flush_tlb_data fd;
295
296 fd.vma = vma;
297 fd.addr1 = start;
298 fd.addr2 = end;
8691e5a8 299 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
9964fa8b
PM
300 } else {
301 int i;
302 for (i = 0; i < num_online_cpus(); i++)
303 if (smp_processor_id() != i)
304 cpu_context(i, mm) = 0;
305 }
306 local_flush_tlb_range(vma, start, end);
307 preempt_enable();
308}
309
310static void flush_tlb_kernel_range_ipi(void *info)
311{
312 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
313
314 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
315}
316
317void flush_tlb_kernel_range(unsigned long start, unsigned long end)
318{
319 struct flush_tlb_data fd;
320
321 fd.addr1 = start;
322 fd.addr2 = end;
15c8b6c1 323 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
9964fa8b
PM
324}
325
326static void flush_tlb_page_ipi(void *info)
327{
328 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
329
330 local_flush_tlb_page(fd->vma, fd->addr1);
331}
332
333void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
334{
335 preempt_disable();
336 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
337 (current->mm != vma->vm_mm)) {
338 struct flush_tlb_data fd;
339
340 fd.vma = vma;
341 fd.addr1 = page;
8691e5a8 342 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
9964fa8b
PM
343 } else {
344 int i;
345 for (i = 0; i < num_online_cpus(); i++)
346 if (smp_processor_id() != i)
347 cpu_context(i, vma->vm_mm) = 0;
348 }
349 local_flush_tlb_page(vma, page);
350 preempt_enable();
351}
352
353static void flush_tlb_one_ipi(void *info)
354{
355 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
356 local_flush_tlb_one(fd->addr1, fd->addr2);
357}
358
359void flush_tlb_one(unsigned long asid, unsigned long vaddr)
360{
361 struct flush_tlb_data fd;
362
363 fd.addr1 = asid;
364 fd.addr2 = vaddr;
365
8691e5a8 366 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
9964fa8b
PM
367 local_flush_tlb_one(asid, vaddr);
368}