Merge branch 'timer/cleanup' into late/mvebu2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / cris / arch-v32 / kernel / smp.c
1 #include <linux/types.h>
2 #include <asm/delay.h>
3 #include <irq.h>
4 #include <hwregs/intr_vect.h>
5 #include <hwregs/intr_vect_defs.h>
6 #include <asm/tlbflush.h>
7 #include <asm/mmu_context.h>
8 #include <hwregs/asm/mmu_defs_asm.h>
9 #include <hwregs/supp_reg.h>
10 #include <linux/atomic.h>
11
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/timex.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/cpumask.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20
21 #define IPI_SCHEDULE 1
22 #define IPI_CALL 2
23 #define IPI_FLUSH_TLB 4
24 #define IPI_BOOT 8
25
26 #define FLUSH_ALL (void*)0xffffffff
27
28 /* Vector of locks used for various atomic operations */
29 spinlock_t cris_atomic_locks[] = {
30 [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks)
31 };
32
33 /* CPU masks */
34 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
35 EXPORT_SYMBOL(phys_cpu_present_map);
36
37 /* Variables used during SMP boot */
38 volatile int cpu_now_booting = 0;
39 volatile struct thread_info *smp_init_current_idle_thread;
40
41 /* Variables used during IPI */
42 static DEFINE_SPINLOCK(call_lock);
43 static DEFINE_SPINLOCK(tlbstate_lock);
44
45 struct call_data_struct {
46 void (*func) (void *info);
47 void *info;
48 int wait;
49 };
50
51 static struct call_data_struct * call_data;
52
53 static struct mm_struct* flush_mm;
54 static struct vm_area_struct* flush_vma;
55 static unsigned long flush_addr;
56
57 /* Mode registers */
58 static unsigned long irq_regs[NR_CPUS] = {
59 regi_irq,
60 regi_irq2
61 };
62
63 static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id);
64 static int send_ipi(int vector, int wait, cpumask_t cpu_mask);
65 static struct irqaction irq_ipi = {
66 .handler = crisv32_ipi_interrupt,
67 .flags = IRQF_DISABLED,
68 .name = "ipi",
69 };
70
71 extern void cris_mmu_init(void);
72 extern void cris_timer_init(void);
73
74 /* SMP initialization */
75 void __init smp_prepare_cpus(unsigned int max_cpus)
76 {
77 int i;
78
79 /* From now on we can expect IPIs so set them up */
80 setup_irq(IPI_INTR_VECT, &irq_ipi);
81
82 /* Mark all possible CPUs as present */
83 for (i = 0; i < max_cpus; i++)
84 cpumask_set_cpu(i, &phys_cpu_present_map);
85 }
86
87 void smp_prepare_boot_cpu(void)
88 {
89 /* PGD pointer has moved after per_cpu initialization so
90 * update the MMU.
91 */
92 pgd_t **pgd;
93 pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
94
95 SUPP_BANK_SEL(1);
96 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
97 SUPP_BANK_SEL(2);
98 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
99
100 set_cpu_online(0, true);
101 cpumask_set_cpu(0, &phys_cpu_present_map);
102 set_cpu_possible(0, true);
103 }
104
105 void __init smp_cpus_done(unsigned int max_cpus)
106 {
107 }
108
109 /* Bring one cpu online.*/
110 static int __init
111 smp_boot_one_cpu(int cpuid, struct task_struct idle)
112 {
113 unsigned timeout;
114 cpumask_t cpu_mask;
115
116 cpumask_clear(&cpu_mask);
117 task_thread_info(idle)->cpu = cpuid;
118
119 /* Information to the CPU that is about to boot */
120 smp_init_current_idle_thread = task_thread_info(idle);
121 cpu_now_booting = cpuid;
122
123 /* Kick it */
124 set_cpu_online(cpuid, true);
125 cpumask_set_cpu(cpuid, &cpu_mask);
126 send_ipi(IPI_BOOT, 0, cpu_mask);
127 set_cpu_online(cpuid, false);
128
129 /* Wait for CPU to come online */
130 for (timeout = 0; timeout < 10000; timeout++) {
131 if(cpu_online(cpuid)) {
132 cpu_now_booting = 0;
133 smp_init_current_idle_thread = NULL;
134 return 0; /* CPU online */
135 }
136 udelay(100);
137 barrier();
138 }
139
140 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
141 return -1;
142 }
143
144 /* Secondary CPUs starts using C here. Here we need to setup CPU
145 * specific stuff such as the local timer and the MMU. */
146 void __init smp_callin(void)
147 {
148 extern void cpu_idle(void);
149
150 int cpu = cpu_now_booting;
151 reg_intr_vect_rw_mask vect_mask = {0};
152
153 /* Initialise the idle task for this CPU */
154 atomic_inc(&init_mm.mm_count);
155 current->active_mm = &init_mm;
156
157 /* Set up MMU */
158 cris_mmu_init();
159 __flush_tlb_all();
160
161 /* Setup local timer. */
162 cris_timer_init();
163
164 /* Enable IRQ and idle */
165 REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
166 crisv32_unmask_irq(IPI_INTR_VECT);
167 crisv32_unmask_irq(TIMER0_INTR_VECT);
168 preempt_disable();
169 notify_cpu_starting(cpu);
170 local_irq_enable();
171
172 set_cpu_online(cpu, true);
173 cpu_idle();
174 }
175
176 /* Stop execution on this CPU.*/
177 void stop_this_cpu(void* dummy)
178 {
179 local_irq_disable();
180 asm volatile("halt");
181 }
182
183 /* Other calls */
184 void smp_send_stop(void)
185 {
186 smp_call_function(stop_this_cpu, NULL, 0);
187 }
188
189 int setup_profiling_timer(unsigned int multiplier)
190 {
191 return -EINVAL;
192 }
193
194
195 /* cache_decay_ticks is used by the scheduler to decide if a process
196 * is "hot" on one CPU. A higher value means a higher penalty to move
197 * a process to another CPU. Our cache is rather small so we report
198 * 1 tick.
199 */
200 unsigned long cache_decay_ticks = 1;
201
202 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
203 {
204 smp_boot_one_cpu(cpu, tidle);
205 return cpu_online(cpu) ? 0 : -ENOSYS;
206 }
207
208 void smp_send_reschedule(int cpu)
209 {
210 cpumask_t cpu_mask;
211 cpumask_clear(&cpu_mask);
212 cpumask_set_cpu(cpu, &cpu_mask);
213 send_ipi(IPI_SCHEDULE, 0, cpu_mask);
214 }
215
216 /* TLB flushing
217 *
218 * Flush needs to be done on the local CPU and on any other CPU that
219 * may have the same mapping. The mm->cpu_vm_mask is used to keep track
220 * of which CPUs that a specific process has been executed on.
221 */
222 void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr)
223 {
224 unsigned long flags;
225 cpumask_t cpu_mask;
226
227 spin_lock_irqsave(&tlbstate_lock, flags);
228 cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
229 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
230 flush_mm = mm;
231 flush_vma = vma;
232 flush_addr = addr;
233 send_ipi(IPI_FLUSH_TLB, 1, cpu_mask);
234 spin_unlock_irqrestore(&tlbstate_lock, flags);
235 }
236
237 void flush_tlb_all(void)
238 {
239 __flush_tlb_all();
240 flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0);
241 }
242
243 void flush_tlb_mm(struct mm_struct *mm)
244 {
245 __flush_tlb_mm(mm);
246 flush_tlb_common(mm, FLUSH_ALL, 0);
247 /* No more mappings in other CPUs */
248 cpumask_clear(mm_cpumask(mm));
249 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
250 }
251
252 void flush_tlb_page(struct vm_area_struct *vma,
253 unsigned long addr)
254 {
255 __flush_tlb_page(vma, addr);
256 flush_tlb_common(vma->vm_mm, vma, addr);
257 }
258
259 /* Inter processor interrupts
260 *
261 * The IPIs are used for:
262 * * Force a schedule on a CPU
263 * * FLush TLB on other CPUs
264 * * Call a function on other CPUs
265 */
266
267 int send_ipi(int vector, int wait, cpumask_t cpu_mask)
268 {
269 int i = 0;
270 reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
271 int ret = 0;
272
273 /* Calculate CPUs to send to. */
274 cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask);
275
276 /* Send the IPI. */
277 for_each_cpu(i, &cpu_mask)
278 {
279 ipi.vector |= vector;
280 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
281 }
282
283 /* Wait for IPI to finish on other CPUS */
284 if (wait) {
285 for_each_cpu(i, &cpu_mask) {
286 int j;
287 for (j = 0 ; j < 1000; j++) {
288 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
289 if (!ipi.vector)
290 break;
291 udelay(100);
292 }
293
294 /* Timeout? */
295 if (ipi.vector) {
296 printk("SMP call timeout from %d to %d\n", smp_processor_id(), i);
297 ret = -ETIMEDOUT;
298 dump_stack();
299 }
300 }
301 }
302 return ret;
303 }
304
305 /*
306 * You must not call this function with disabled interrupts or from a
307 * hardware interrupt handler or from a bottom half handler.
308 */
309 int smp_call_function(void (*func)(void *info), void *info, int wait)
310 {
311 cpumask_t cpu_mask;
312 struct call_data_struct data;
313 int ret;
314
315 cpumask_setall(&cpu_mask);
316 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
317
318 WARN_ON(irqs_disabled());
319
320 data.func = func;
321 data.info = info;
322 data.wait = wait;
323
324 spin_lock(&call_lock);
325 call_data = &data;
326 ret = send_ipi(IPI_CALL, wait, cpu_mask);
327 spin_unlock(&call_lock);
328
329 return ret;
330 }
331
332 irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
333 {
334 void (*func) (void *info) = call_data->func;
335 void *info = call_data->info;
336 reg_intr_vect_rw_ipi ipi;
337
338 ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
339
340 if (ipi.vector & IPI_SCHEDULE) {
341 scheduler_ipi();
342 }
343 if (ipi.vector & IPI_CALL) {
344 func(info);
345 }
346 if (ipi.vector & IPI_FLUSH_TLB) {
347 if (flush_mm == FLUSH_ALL)
348 __flush_tlb_all();
349 else if (flush_vma == FLUSH_ALL)
350 __flush_tlb_mm(flush_mm);
351 else
352 __flush_tlb_page(flush_vma, flush_addr);
353 }
354
355 ipi.vector = 0;
356 REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi);
357
358 return IRQ_HANDLED;
359 }
360