Commit | Line | Data |
---|---|---|
4a907dec | 1 | /* irq.c: UltraSparc IRQ handling/init/registry. |
1da177e4 | 2 | * |
227c3311 | 3 | * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
4 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) |
5 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | #include <linux/module.h> |
9 | #include <linux/sched.h> | |
9843099f | 10 | #include <linux/linkage.h> |
1da177e4 LT |
11 | #include <linux/ptrace.h> |
12 | #include <linux/errno.h> | |
13 | #include <linux/kernel_stat.h> | |
14 | #include <linux/signal.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/random.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/proc_fs.h> | |
22 | #include <linux/seq_file.h> | |
9960e9e8 | 23 | #include <linux/ftrace.h> |
e18e2a00 | 24 | #include <linux/irq.h> |
2e2dc1d7 | 25 | #include <linux/kmemleak.h> |
1da177e4 LT |
26 | |
27 | #include <asm/ptrace.h> | |
28 | #include <asm/processor.h> | |
29 | #include <asm/atomic.h> | |
30 | #include <asm/system.h> | |
31 | #include <asm/irq.h> | |
2e457ef6 | 32 | #include <asm/io.h> |
1da177e4 LT |
33 | #include <asm/iommu.h> |
34 | #include <asm/upa.h> | |
35 | #include <asm/oplib.h> | |
25c7581b | 36 | #include <asm/prom.h> |
1da177e4 LT |
37 | #include <asm/timer.h> |
38 | #include <asm/smp.h> | |
39 | #include <asm/starfire.h> | |
40 | #include <asm/uaccess.h> | |
41 | #include <asm/cache.h> | |
42 | #include <asm/cpudata.h> | |
63b61452 | 43 | #include <asm/auxio.h> |
92704a1c | 44 | #include <asm/head.h> |
4a907dec | 45 | #include <asm/hypervisor.h> |
42d5f99b | 46 | #include <asm/cacheflush.h> |
1da177e4 | 47 | |
d91aa123 | 48 | #include "entry.h" |
280ff974 | 49 | #include "cpumap.h" |
ec687886 | 50 | #include "kstack.h" |
e18e2a00 DM |
51 | |
52 | #define NUM_IVECS (IMAP_INR + 1) | |
d91aa123 | 53 | |
10397e40 | 54 | struct ino_bucket *ivector_table; |
eb2d8d60 | 55 | unsigned long ivector_table_pa; |
1da177e4 | 56 | |
42d5f99b DM |
57 | /* On several sun4u processors, it is illegal to mix bypass and |
58 | * non-bypass accesses. Therefore we access all INO buckets | |
59 | * using bypass accesses only. | |
60 | */ | |
61 | static unsigned long bucket_get_chain_pa(unsigned long bucket_pa) | |
62 | { | |
63 | unsigned long ret; | |
64 | ||
65 | __asm__ __volatile__("ldxa [%1] %2, %0" | |
66 | : "=&r" (ret) | |
67 | : "r" (bucket_pa + | |
68 | offsetof(struct ino_bucket, | |
69 | __irq_chain_pa)), | |
70 | "i" (ASI_PHYS_USE_EC)); | |
71 | ||
72 | return ret; | |
73 | } | |
74 | ||
75 | static void bucket_clear_chain_pa(unsigned long bucket_pa) | |
76 | { | |
77 | __asm__ __volatile__("stxa %%g0, [%0] %1" | |
78 | : /* no outputs */ | |
79 | : "r" (bucket_pa + | |
80 | offsetof(struct ino_bucket, | |
81 | __irq_chain_pa)), | |
82 | "i" (ASI_PHYS_USE_EC)); | |
83 | } | |
84 | ||
fe41493f | 85 | static unsigned int bucket_get_irq(unsigned long bucket_pa) |
42d5f99b DM |
86 | { |
87 | unsigned int ret; | |
88 | ||
89 | __asm__ __volatile__("lduwa [%1] %2, %0" | |
90 | : "=&r" (ret) | |
91 | : "r" (bucket_pa + | |
92 | offsetof(struct ino_bucket, | |
fe41493f | 93 | __irq)), |
42d5f99b DM |
94 | "i" (ASI_PHYS_USE_EC)); |
95 | ||
96 | return ret; | |
97 | } | |
98 | ||
fe41493f | 99 | static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq) |
42d5f99b DM |
100 | { |
101 | __asm__ __volatile__("stwa %0, [%1] %2" | |
102 | : /* no outputs */ | |
fe41493f | 103 | : "r" (irq), |
42d5f99b DM |
104 | "r" (bucket_pa + |
105 | offsetof(struct ino_bucket, | |
fe41493f | 106 | __irq)), |
42d5f99b DM |
107 | "i" (ASI_PHYS_USE_EC)); |
108 | } | |
109 | ||
eb2d8d60 | 110 | #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) |
1da177e4 | 111 | |
93b3238e | 112 | static struct { |
93b3238e DM |
113 | unsigned int dev_handle; |
114 | unsigned int dev_ino; | |
256c1df3 | 115 | unsigned int in_use; |
fe41493f SR |
116 | } irq_table[NR_IRQS]; |
117 | static DEFINE_SPINLOCK(irq_alloc_lock); | |
8047e247 | 118 | |
fe41493f | 119 | unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino) |
8047e247 | 120 | { |
759f89e0 | 121 | unsigned long flags; |
8047e247 DM |
122 | unsigned char ent; |
123 | ||
124 | BUILD_BUG_ON(NR_IRQS >= 256); | |
125 | ||
fe41493f | 126 | spin_lock_irqsave(&irq_alloc_lock, flags); |
759f89e0 | 127 | |
35a17eb6 | 128 | for (ent = 1; ent < NR_IRQS; ent++) { |
fe41493f | 129 | if (!irq_table[ent].in_use) |
35a17eb6 DM |
130 | break; |
131 | } | |
8047e247 DM |
132 | if (ent >= NR_IRQS) { |
133 | printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); | |
759f89e0 DM |
134 | ent = 0; |
135 | } else { | |
fe41493f SR |
136 | irq_table[ent].dev_handle = dev_handle; |
137 | irq_table[ent].dev_ino = dev_ino; | |
138 | irq_table[ent].in_use = 1; | |
8047e247 DM |
139 | } |
140 | ||
fe41493f | 141 | spin_unlock_irqrestore(&irq_alloc_lock, flags); |
8047e247 DM |
142 | |
143 | return ent; | |
144 | } | |
145 | ||
5746c99d | 146 | #ifdef CONFIG_PCI_MSI |
fe41493f | 147 | void irq_free(unsigned int irq) |
8047e247 | 148 | { |
759f89e0 | 149 | unsigned long flags; |
8047e247 | 150 | |
fe41493f | 151 | if (irq >= NR_IRQS) |
35a17eb6 DM |
152 | return; |
153 | ||
fe41493f | 154 | spin_lock_irqsave(&irq_alloc_lock, flags); |
759f89e0 | 155 | |
fe41493f | 156 | irq_table[irq].in_use = 0; |
35a17eb6 | 157 | |
fe41493f | 158 | spin_unlock_irqrestore(&irq_alloc_lock, flags); |
8047e247 | 159 | } |
5746c99d | 160 | #endif |
8047e247 | 161 | |
1da177e4 | 162 | /* |
e18e2a00 | 163 | * /proc/interrupts printing: |
1da177e4 | 164 | */ |
1da177e4 LT |
165 | |
166 | int show_interrupts(struct seq_file *p, void *v) | |
167 | { | |
e18e2a00 DM |
168 | int i = *(loff_t *) v, j; |
169 | struct irqaction * action; | |
1da177e4 | 170 | unsigned long flags; |
1da177e4 | 171 | |
e18e2a00 DM |
172 | if (i == 0) { |
173 | seq_printf(p, " "); | |
174 | for_each_online_cpu(j) | |
175 | seq_printf(p, "CPU%d ",j); | |
176 | seq_putc(p, '\n'); | |
177 | } | |
178 | ||
179 | if (i < NR_IRQS) { | |
239007b8 | 180 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
e18e2a00 DM |
181 | action = irq_desc[i].action; |
182 | if (!action) | |
183 | goto skip; | |
184 | seq_printf(p, "%3d: ",i); | |
1da177e4 LT |
185 | #ifndef CONFIG_SMP |
186 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
187 | #else | |
e18e2a00 | 188 | for_each_online_cpu(j) |
e81838d2 | 189 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
1da177e4 | 190 | #endif |
9f2264ac | 191 | seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name); |
e18e2a00 DM |
192 | seq_printf(p, " %s", action->name); |
193 | ||
194 | for (action=action->next; action; action = action->next) | |
37cdcd9e | 195 | seq_printf(p, ", %s", action->name); |
e18e2a00 | 196 | |
1da177e4 | 197 | seq_putc(p, '\n'); |
e18e2a00 | 198 | skip: |
239007b8 | 199 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
e5553a6d DM |
200 | } else if (i == NR_IRQS) { |
201 | seq_printf(p, "NMI: "); | |
202 | for_each_online_cpu(j) | |
203 | seq_printf(p, "%10u ", cpu_data(j).__nmi_count); | |
204 | seq_printf(p, " Non-maskable interrupts\n"); | |
1da177e4 | 205 | } |
1da177e4 LT |
206 | return 0; |
207 | } | |
208 | ||
ebd8c56c DM |
209 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) |
210 | { | |
211 | unsigned int tid; | |
212 | ||
213 | if (this_is_starfire) { | |
214 | tid = starfire_translate(imap, cpuid); | |
215 | tid <<= IMAP_TID_SHIFT; | |
216 | tid &= IMAP_TID_UPA; | |
217 | } else { | |
218 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
219 | unsigned long ver; | |
220 | ||
221 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | |
222 | if ((ver >> 32UL) == __JALAPENO_ID || | |
223 | (ver >> 32UL) == __SERRANO_ID) { | |
224 | tid = cpuid << IMAP_TID_SHIFT; | |
225 | tid &= IMAP_TID_JBUS; | |
226 | } else { | |
227 | unsigned int a = cpuid & 0x1f; | |
228 | unsigned int n = (cpuid >> 5) & 0x1f; | |
229 | ||
230 | tid = ((a << IMAP_AID_SHIFT) | | |
231 | (n << IMAP_NID_SHIFT)); | |
232 | tid &= (IMAP_AID_SAFARI | | |
a419aef8 | 233 | IMAP_NID_SAFARI); |
ebd8c56c DM |
234 | } |
235 | } else { | |
236 | tid = cpuid << IMAP_TID_SHIFT; | |
237 | tid &= IMAP_TID_UPA; | |
238 | } | |
239 | } | |
240 | ||
241 | return tid; | |
242 | } | |
243 | ||
e18e2a00 DM |
244 | struct irq_handler_data { |
245 | unsigned long iclr; | |
246 | unsigned long imap; | |
8047e247 | 247 | |
e18e2a00 | 248 | void (*pre_handler)(unsigned int, void *, void *); |
8d57d3ad DM |
249 | void *arg1; |
250 | void *arg2; | |
e18e2a00 | 251 | }; |
1da177e4 | 252 | |
e18e2a00 | 253 | #ifdef CONFIG_SMP |
fe41493f | 254 | static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) |
088dd1f8 | 255 | { |
e65e49d0 | 256 | cpumask_t mask; |
e18e2a00 | 257 | int cpuid; |
088dd1f8 | 258 | |
1091ce62 | 259 | cpumask_copy(&mask, affinity); |
280ff974 | 260 | if (cpus_equal(mask, cpu_online_map)) { |
fe41493f | 261 | cpuid = map_to_cpu(irq); |
e18e2a00 DM |
262 | } else { |
263 | cpumask_t tmp; | |
088dd1f8 | 264 | |
e18e2a00 | 265 | cpus_and(tmp, cpu_online_map, mask); |
fe41493f | 266 | cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp); |
1da177e4 | 267 | } |
088dd1f8 | 268 | |
e18e2a00 DM |
269 | return cpuid; |
270 | } | |
271 | #else | |
fe41493f | 272 | #define irq_choose_cpu(irq, affinity) \ |
6abce771 | 273 | real_hard_smp_processor_id() |
e18e2a00 | 274 | #endif |
1da177e4 | 275 | |
4832b992 | 276 | static void sun4u_irq_enable(struct irq_data *data) |
e3999574 | 277 | { |
4832b992 | 278 | struct irq_handler_data *handler_data = data->handler_data; |
e3999574 | 279 | |
cae78728 | 280 | if (likely(handler_data)) { |
861fe906 | 281 | unsigned long cpuid, imap, val; |
e18e2a00 | 282 | unsigned int tid; |
e3999574 | 283 | |
4832b992 | 284 | cpuid = irq_choose_cpu(data->irq, data->affinity); |
cae78728 | 285 | imap = handler_data->imap; |
e3999574 | 286 | |
e18e2a00 | 287 | tid = sun4u_compute_tid(imap, cpuid); |
e3999574 | 288 | |
861fe906 DM |
289 | val = upa_readq(imap); |
290 | val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | | |
291 | IMAP_AID_SAFARI | IMAP_NID_SAFARI); | |
292 | val |= tid | IMAP_VALID; | |
293 | upa_writeq(val, imap); | |
cae78728 | 294 | upa_writeq(ICLR_IDLE, handler_data->iclr); |
e3999574 | 295 | } |
e3999574 DM |
296 | } |
297 | ||
4832b992 SR |
298 | static int sun4u_set_affinity(struct irq_data *data, |
299 | const struct cpumask *mask, bool force) | |
b53bcb67 | 300 | { |
4832b992 | 301 | struct irq_handler_data *handler_data = data->handler_data; |
1091ce62 | 302 | |
cae78728 | 303 | if (likely(handler_data)) { |
1091ce62 DM |
304 | unsigned long cpuid, imap, val; |
305 | unsigned int tid; | |
306 | ||
4832b992 | 307 | cpuid = irq_choose_cpu(data->irq, mask); |
cae78728 | 308 | imap = handler_data->imap; |
1091ce62 DM |
309 | |
310 | tid = sun4u_compute_tid(imap, cpuid); | |
311 | ||
312 | val = upa_readq(imap); | |
313 | val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | | |
314 | IMAP_AID_SAFARI | IMAP_NID_SAFARI); | |
315 | val |= tid | IMAP_VALID; | |
316 | upa_writeq(val, imap); | |
cae78728 | 317 | upa_writeq(ICLR_IDLE, handler_data->iclr); |
1091ce62 | 318 | } |
d5dedd45 YL |
319 | |
320 | return 0; | |
b53bcb67 DM |
321 | } |
322 | ||
d0cac39e DM |
323 | /* Don't do anything. The desc->status check for IRQ_DISABLED in |
324 | * handler_irq() will skip the handler call and that will leave the | |
325 | * interrupt in the sent state. The next ->enable() call will hit the | |
326 | * ICLR register to reset the state machine. | |
327 | * | |
328 | * This scheme is necessary, instead of clearing the Valid bit in the | |
329 | * IMAP register, to handle the case of IMAP registers being shared by | |
330 | * multiple INOs (and thus ICLR registers). Since we use a different | |
331 | * virtual IRQ for each shared IMAP instance, the generic code thinks | |
332 | * there is only one user so it prematurely calls ->disable() on | |
333 | * free_irq(). | |
334 | * | |
335 | * We have to provide an explicit ->disable() method instead of using | |
336 | * NULL to get the default. The reason is that if the generic code | |
337 | * sees that, it also hooks up a default ->shutdown method which | |
338 | * invokes ->mask() which we do not want. See irq_chip_set_defaults(). | |
339 | */ | |
4832b992 | 340 | static void sun4u_irq_disable(struct irq_data *data) |
1da177e4 | 341 | { |
088dd1f8 DM |
342 | } |
343 | ||
4832b992 | 344 | static void sun4u_irq_eoi(struct irq_data *data) |
088dd1f8 | 345 | { |
4832b992 | 346 | struct irq_handler_data *handler_data = data->handler_data; |
088dd1f8 | 347 | |
cae78728 SR |
348 | if (likely(handler_data)) |
349 | upa_writeq(ICLR_IDLE, handler_data->iclr); | |
088dd1f8 DM |
350 | } |
351 | ||
4832b992 | 352 | static void sun4v_irq_enable(struct irq_data *data) |
088dd1f8 | 353 | { |
fe41493f | 354 | unsigned int ino = irq_table[data->irq].dev_ino; |
4832b992 | 355 | unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); |
77182300 DM |
356 | int err; |
357 | ||
358 | err = sun4v_intr_settarget(ino, cpuid); | |
359 | if (err != HV_EOK) | |
360 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | |
361 | "err(%d)\n", ino, cpuid, err); | |
362 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | |
363 | if (err != HV_EOK) | |
364 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | |
365 | "err(%d)\n", ino, err); | |
366 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); | |
367 | if (err != HV_EOK) | |
368 | printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", | |
369 | ino, err); | |
088dd1f8 DM |
370 | } |
371 | ||
4832b992 SR |
372 | static int sun4v_set_affinity(struct irq_data *data, |
373 | const struct cpumask *mask, bool force) | |
b53bcb67 | 374 | { |
fe41493f | 375 | unsigned int ino = irq_table[data->irq].dev_ino; |
4832b992 | 376 | unsigned long cpuid = irq_choose_cpu(data->irq, mask); |
77182300 DM |
377 | int err; |
378 | ||
379 | err = sun4v_intr_settarget(ino, cpuid); | |
380 | if (err != HV_EOK) | |
381 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | |
382 | "err(%d)\n", ino, cpuid, err); | |
d5dedd45 YL |
383 | |
384 | return 0; | |
b53bcb67 DM |
385 | } |
386 | ||
4832b992 | 387 | static void sun4v_irq_disable(struct irq_data *data) |
1da177e4 | 388 | { |
fe41493f | 389 | unsigned int ino = irq_table[data->irq].dev_ino; |
77182300 | 390 | int err; |
1da177e4 | 391 | |
77182300 DM |
392 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); |
393 | if (err != HV_EOK) | |
394 | printk(KERN_ERR "sun4v_intr_setenabled(%x): " | |
395 | "err(%d)\n", ino, err); | |
e18e2a00 | 396 | } |
1da177e4 | 397 | |
4832b992 | 398 | static void sun4v_irq_eoi(struct irq_data *data) |
e18e2a00 | 399 | { |
fe41493f | 400 | unsigned int ino = irq_table[data->irq].dev_ino; |
77182300 | 401 | int err; |
5a606b72 | 402 | |
77182300 DM |
403 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
404 | if (err != HV_EOK) | |
405 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | |
406 | "err(%d)\n", ino, err); | |
1da177e4 LT |
407 | } |
408 | ||
4832b992 | 409 | static void sun4v_virq_enable(struct irq_data *data) |
4a907dec | 410 | { |
77182300 DM |
411 | unsigned long cpuid, dev_handle, dev_ino; |
412 | int err; | |
413 | ||
4832b992 | 414 | cpuid = irq_choose_cpu(data->irq, data->affinity); |
77182300 | 415 | |
fe41493f SR |
416 | dev_handle = irq_table[data->irq].dev_handle; |
417 | dev_ino = irq_table[data->irq].dev_ino; | |
77182300 DM |
418 | |
419 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | |
420 | if (err != HV_EOK) | |
421 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | |
422 | "err(%d)\n", | |
423 | dev_handle, dev_ino, cpuid, err); | |
424 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | |
425 | HV_INTR_STATE_IDLE); | |
426 | if (err != HV_EOK) | |
427 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | |
428 | "HV_INTR_STATE_IDLE): err(%d)\n", | |
429 | dev_handle, dev_ino, err); | |
430 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | |
431 | HV_INTR_ENABLED); | |
432 | if (err != HV_EOK) | |
433 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | |
434 | "HV_INTR_ENABLED): err(%d)\n", | |
435 | dev_handle, dev_ino, err); | |
4a907dec DM |
436 | } |
437 | ||
4832b992 SR |
438 | static int sun4v_virt_set_affinity(struct irq_data *data, |
439 | const struct cpumask *mask, bool force) | |
b53bcb67 | 440 | { |
77182300 DM |
441 | unsigned long cpuid, dev_handle, dev_ino; |
442 | int err; | |
b53bcb67 | 443 | |
4832b992 | 444 | cpuid = irq_choose_cpu(data->irq, mask); |
b53bcb67 | 445 | |
fe41493f SR |
446 | dev_handle = irq_table[data->irq].dev_handle; |
447 | dev_ino = irq_table[data->irq].dev_ino; | |
b53bcb67 | 448 | |
77182300 DM |
449 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
450 | if (err != HV_EOK) | |
451 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | |
452 | "err(%d)\n", | |
453 | dev_handle, dev_ino, cpuid, err); | |
d5dedd45 YL |
454 | |
455 | return 0; | |
b53bcb67 DM |
456 | } |
457 | ||
4832b992 | 458 | static void sun4v_virq_disable(struct irq_data *data) |
4a907dec | 459 | { |
77182300 DM |
460 | unsigned long dev_handle, dev_ino; |
461 | int err; | |
462 | ||
fe41493f SR |
463 | dev_handle = irq_table[data->irq].dev_handle; |
464 | dev_ino = irq_table[data->irq].dev_ino; | |
77182300 DM |
465 | |
466 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | |
467 | HV_INTR_DISABLED); | |
468 | if (err != HV_EOK) | |
469 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | |
470 | "HV_INTR_DISABLED): err(%d)\n", | |
471 | dev_handle, dev_ino, err); | |
4a907dec DM |
472 | } |
473 | ||
4832b992 | 474 | static void sun4v_virq_eoi(struct irq_data *data) |
4a907dec | 475 | { |
77182300 DM |
476 | unsigned long dev_handle, dev_ino; |
477 | int err; | |
5a606b72 | 478 | |
fe41493f SR |
479 | dev_handle = irq_table[data->irq].dev_handle; |
480 | dev_ino = irq_table[data->irq].dev_ino; | |
4a907dec | 481 | |
77182300 DM |
482 | err = sun4v_vintr_set_state(dev_handle, dev_ino, |
483 | HV_INTR_STATE_IDLE); | |
484 | if (err != HV_EOK) | |
485 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | |
486 | "HV_INTR_STATE_IDLE): err(%d)\n", | |
487 | dev_handle, dev_ino, err); | |
4a907dec DM |
488 | } |
489 | ||
729e7d7e | 490 | static struct irq_chip sun4u_irq = { |
4832b992 SR |
491 | .name = "sun4u", |
492 | .irq_enable = sun4u_irq_enable, | |
493 | .irq_disable = sun4u_irq_disable, | |
494 | .irq_eoi = sun4u_irq_eoi, | |
495 | .irq_set_affinity = sun4u_set_affinity, | |
fcd8d4f4 | 496 | .flags = IRQCHIP_EOI_IF_HANDLED, |
e18e2a00 | 497 | }; |
088dd1f8 | 498 | |
729e7d7e | 499 | static struct irq_chip sun4v_irq = { |
4832b992 SR |
500 | .name = "sun4v", |
501 | .irq_enable = sun4v_irq_enable, | |
502 | .irq_disable = sun4v_irq_disable, | |
503 | .irq_eoi = sun4v_irq_eoi, | |
504 | .irq_set_affinity = sun4v_set_affinity, | |
fcd8d4f4 | 505 | .flags = IRQCHIP_EOI_IF_HANDLED, |
e18e2a00 | 506 | }; |
1da177e4 | 507 | |
4a907dec | 508 | static struct irq_chip sun4v_virq = { |
4832b992 SR |
509 | .name = "vsun4v", |
510 | .irq_enable = sun4v_virq_enable, | |
511 | .irq_disable = sun4v_virq_disable, | |
512 | .irq_eoi = sun4v_virq_eoi, | |
513 | .irq_set_affinity = sun4v_virt_set_affinity, | |
fcd8d4f4 | 514 | .flags = IRQCHIP_EOI_IF_HANDLED, |
4a907dec DM |
515 | }; |
516 | ||
fcd8d4f4 | 517 | static void pre_flow_handler(struct irq_data *d) |
8d57d3ad | 518 | { |
fcd8d4f4 TG |
519 | struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d); |
520 | unsigned int ino = irq_table[d->irq].dev_ino; | |
8d57d3ad | 521 | |
cae78728 | 522 | handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); |
8d57d3ad DM |
523 | } |
524 | ||
fe41493f | 525 | void irq_install_pre_handler(int irq, |
e18e2a00 DM |
526 | void (*func)(unsigned int, void *, void *), |
527 | void *arg1, void *arg2) | |
528 | { | |
fe41493f | 529 | struct irq_handler_data *handler_data = get_irq_data(irq); |
088dd1f8 | 530 | |
cae78728 SR |
531 | handler_data->pre_handler = func; |
532 | handler_data->arg1 = arg1; | |
533 | handler_data->arg2 = arg2; | |
24ac26d4 | 534 | |
fcd8d4f4 | 535 | __irq_set_preflow_handler(irq, pre_flow_handler); |
e18e2a00 | 536 | } |
1da177e4 | 537 | |
e18e2a00 DM |
538 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) |
539 | { | |
540 | struct ino_bucket *bucket; | |
cae78728 | 541 | struct irq_handler_data *handler_data; |
fe41493f | 542 | unsigned int irq; |
e18e2a00 | 543 | int ino; |
1da177e4 | 544 | |
e18e2a00 | 545 | BUG_ON(tlb_type == hypervisor); |
088dd1f8 | 546 | |
861fe906 | 547 | ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; |
e18e2a00 | 548 | bucket = &ivector_table[ino]; |
fe41493f SR |
549 | irq = bucket_get_irq(__pa(bucket)); |
550 | if (!irq) { | |
551 | irq = irq_alloc(0, ino); | |
552 | bucket_set_irq(__pa(bucket), irq); | |
553 | set_irq_chip_and_handler_name(irq, | |
8d57d3ad DM |
554 | &sun4u_irq, |
555 | handle_fasteoi_irq, | |
556 | "IVEC"); | |
fd0504c3 | 557 | } |
1da177e4 | 558 | |
fe41493f | 559 | handler_data = get_irq_data(irq); |
cae78728 | 560 | if (unlikely(handler_data)) |
e18e2a00 | 561 | goto out; |
fd0504c3 | 562 | |
cae78728 SR |
563 | handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
564 | if (unlikely(!handler_data)) { | |
e18e2a00 DM |
565 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
566 | prom_halt(); | |
1da177e4 | 567 | } |
fe41493f | 568 | set_irq_data(irq, handler_data); |
1da177e4 | 569 | |
cae78728 SR |
570 | handler_data->imap = imap; |
571 | handler_data->iclr = iclr; | |
1da177e4 | 572 | |
e18e2a00 | 573 | out: |
fe41493f | 574 | return irq; |
e18e2a00 | 575 | } |
1da177e4 | 576 | |
4a907dec DM |
577 | static unsigned int sun4v_build_common(unsigned long sysino, |
578 | struct irq_chip *chip) | |
1da177e4 | 579 | { |
8047e247 | 580 | struct ino_bucket *bucket; |
cae78728 | 581 | struct irq_handler_data *handler_data; |
fe41493f | 582 | unsigned int irq; |
8047e247 | 583 | |
e18e2a00 | 584 | BUG_ON(tlb_type != hypervisor); |
1da177e4 | 585 | |
e18e2a00 | 586 | bucket = &ivector_table[sysino]; |
fe41493f SR |
587 | irq = bucket_get_irq(__pa(bucket)); |
588 | if (!irq) { | |
589 | irq = irq_alloc(0, sysino); | |
590 | bucket_set_irq(__pa(bucket), irq); | |
591 | set_irq_chip_and_handler_name(irq, chip, | |
8d57d3ad DM |
592 | handle_fasteoi_irq, |
593 | "IVEC"); | |
1da177e4 | 594 | } |
1da177e4 | 595 | |
fe41493f | 596 | handler_data = get_irq_data(irq); |
cae78728 | 597 | if (unlikely(handler_data)) |
1da177e4 | 598 | goto out; |
1da177e4 | 599 | |
cae78728 SR |
600 | handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
601 | if (unlikely(!handler_data)) { | |
e18e2a00 DM |
602 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
603 | prom_halt(); | |
604 | } | |
fe41493f | 605 | set_irq_data(irq, handler_data); |
1da177e4 | 606 | |
e18e2a00 DM |
607 | /* Catch accidental accesses to these things. IMAP/ICLR handling |
608 | * is done by hypervisor calls on sun4v platforms, not by direct | |
609 | * register accesses. | |
610 | */ | |
cae78728 SR |
611 | handler_data->imap = ~0UL; |
612 | handler_data->iclr = ~0UL; | |
1da177e4 | 613 | |
e18e2a00 | 614 | out: |
fe41493f | 615 | return irq; |
e18e2a00 | 616 | } |
1da177e4 | 617 | |
4a907dec DM |
618 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) |
619 | { | |
620 | unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); | |
621 | ||
622 | return sun4v_build_common(sysino, &sun4v_irq); | |
623 | } | |
624 | ||
625 | unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |
626 | { | |
cae78728 | 627 | struct irq_handler_data *handler_data; |
b80e6998 | 628 | unsigned long hv_err, cookie; |
b7c2a757 DM |
629 | struct ino_bucket *bucket; |
630 | struct irq_desc *desc; | |
fe41493f | 631 | unsigned int irq; |
b80e6998 DM |
632 | |
633 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); | |
634 | if (unlikely(!bucket)) | |
635 | return 0; | |
25ad403f DM |
636 | |
637 | /* The only reference we store to the IRQ bucket is | |
638 | * by physical address which kmemleak can't see, tell | |
639 | * it that this object explicitly is not a leak and | |
640 | * should be scanned. | |
641 | */ | |
642 | kmemleak_not_leak(bucket); | |
643 | ||
42d5f99b DM |
644 | __flush_dcache_range((unsigned long) bucket, |
645 | ((unsigned long) bucket + | |
646 | sizeof(struct ino_bucket))); | |
b80e6998 | 647 | |
fe41493f SR |
648 | irq = irq_alloc(devhandle, devino); |
649 | bucket_set_irq(__pa(bucket), irq); | |
8d57d3ad | 650 | |
fe41493f | 651 | set_irq_chip_and_handler_name(irq, &sun4v_virq, |
8d57d3ad DM |
652 | handle_fasteoi_irq, |
653 | "IVEC"); | |
4a907dec | 654 | |
cae78728 SR |
655 | handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
656 | if (unlikely(!handler_data)) | |
b80e6998 | 657 | return 0; |
4a907dec | 658 | |
b7c2a757 DM |
659 | /* In order to make the LDC channel startup sequence easier, |
660 | * especially wrt. locking, we do not let request_irq() enable | |
661 | * the interrupt. | |
662 | */ | |
fe41493f | 663 | desc = irq_desc + irq; |
b7c2a757 DM |
664 | desc->status |= IRQ_NOAUTOEN; |
665 | ||
fe41493f | 666 | set_irq_data(irq, handler_data); |
4a907dec | 667 | |
b80e6998 DM |
668 | /* Catch accidental accesses to these things. IMAP/ICLR handling |
669 | * is done by hypervisor calls on sun4v platforms, not by direct | |
670 | * register accesses. | |
671 | */ | |
cae78728 SR |
672 | handler_data->imap = ~0UL; |
673 | handler_data->iclr = ~0UL; | |
b80e6998 DM |
674 | |
675 | cookie = ~__pa(bucket); | |
676 | hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); | |
4a907dec DM |
677 | if (hv_err) { |
678 | prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " | |
679 | "err=%lu\n", devhandle, devino, hv_err); | |
680 | prom_halt(); | |
681 | } | |
682 | ||
fe41493f | 683 | return irq; |
4a907dec DM |
684 | } |
685 | ||
fe41493f | 686 | void ack_bad_irq(unsigned int irq) |
e18e2a00 | 687 | { |
fe41493f | 688 | unsigned int ino = irq_table[irq].dev_ino; |
ab66a50e | 689 | |
77182300 DM |
690 | if (!ino) |
691 | ino = 0xdeadbeef; | |
6a76267f | 692 | |
fe41493f SR |
693 | printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n", |
694 | ino, irq); | |
1da177e4 LT |
695 | } |
696 | ||
4f70f7a9 DM |
697 | void *hardirq_stack[NR_CPUS]; |
698 | void *softirq_stack[NR_CPUS]; | |
699 | ||
d4d1ec48 | 700 | void __irq_entry handler_irq(int pil, struct pt_regs *regs) |
1da177e4 | 701 | { |
eb2d8d60 | 702 | unsigned long pstate, bucket_pa; |
6d24c8dc | 703 | struct pt_regs *old_regs; |
4f70f7a9 | 704 | void *orig_sp; |
1da177e4 | 705 | |
d4d1ec48 | 706 | clear_softint(1 << pil); |
1da177e4 | 707 | |
6d24c8dc | 708 | old_regs = set_irq_regs(regs); |
1da177e4 | 709 | irq_enter(); |
1da177e4 | 710 | |
a650d383 DM |
711 | /* Grab an atomic snapshot of the pending IVECs. */ |
712 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" | |
713 | "wrpr %0, %3, %%pstate\n\t" | |
714 | "ldx [%2], %1\n\t" | |
715 | "stx %%g0, [%2]\n\t" | |
716 | "wrpr %0, 0x0, %%pstate\n\t" | |
eb2d8d60 DM |
717 | : "=&r" (pstate), "=&r" (bucket_pa) |
718 | : "r" (irq_work_pa(smp_processor_id())), | |
a650d383 DM |
719 | "i" (PSTATE_IE) |
720 | : "memory"); | |
721 | ||
4f70f7a9 DM |
722 | orig_sp = set_hardirq_stack(); |
723 | ||
eb2d8d60 DM |
724 | while (bucket_pa) { |
725 | unsigned long next_pa; | |
fe41493f | 726 | unsigned int irq; |
1da177e4 | 727 | |
42d5f99b | 728 | next_pa = bucket_get_chain_pa(bucket_pa); |
fe41493f | 729 | irq = bucket_get_irq(bucket_pa); |
42d5f99b | 730 | bucket_clear_chain_pa(bucket_pa); |
fd0504c3 | 731 | |
fcd8d4f4 | 732 | generic_handle_irq(irq); |
eb2d8d60 DM |
733 | |
734 | bucket_pa = next_pa; | |
1da177e4 | 735 | } |
e18e2a00 | 736 | |
4f70f7a9 DM |
737 | restore_hardirq_stack(orig_sp); |
738 | ||
1da177e4 | 739 | irq_exit(); |
6d24c8dc | 740 | set_irq_regs(old_regs); |
1da177e4 LT |
741 | } |
742 | ||
4f70f7a9 DM |
743 | void do_softirq(void) |
744 | { | |
745 | unsigned long flags; | |
746 | ||
747 | if (in_interrupt()) | |
748 | return; | |
749 | ||
750 | local_irq_save(flags); | |
751 | ||
752 | if (local_softirq_pending()) { | |
753 | void *orig_sp, *sp = softirq_stack[smp_processor_id()]; | |
754 | ||
755 | sp += THREAD_SIZE - 192 - STACK_BIAS; | |
756 | ||
757 | __asm__ __volatile__("mov %%sp, %0\n\t" | |
758 | "mov %1, %%sp" | |
759 | : "=&r" (orig_sp) | |
760 | : "r" (sp)); | |
761 | __do_softirq(); | |
762 | __asm__ __volatile__("mov %0, %%sp" | |
763 | : : "r" (orig_sp)); | |
764 | } | |
765 | ||
766 | local_irq_restore(flags); | |
767 | } | |
768 | ||
e0204409 DM |
769 | #ifdef CONFIG_HOTPLUG_CPU |
770 | void fixup_irqs(void) | |
771 | { | |
772 | unsigned int irq; | |
773 | ||
774 | for (irq = 0; irq < NR_IRQS; irq++) { | |
775 | unsigned long flags; | |
776 | ||
239007b8 | 777 | raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); |
e0204409 DM |
778 | if (irq_desc[irq].action && |
779 | !(irq_desc[irq].status & IRQ_PER_CPU)) { | |
4832b992 SR |
780 | struct irq_data *data = irq_get_irq_data(irq); |
781 | ||
782 | if (data->chip->irq_set_affinity) | |
783 | data->chip->irq_set_affinity(data, | |
784 | data->affinity, | |
785 | false); | |
e0204409 | 786 | } |
239007b8 | 787 | raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); |
e0204409 | 788 | } |
2eb2f779 DM |
789 | |
790 | tick_ops->disable_irq(); | |
e0204409 DM |
791 | } |
792 | #endif | |
793 | ||
cdd5186f DM |
794 | struct sun5_timer { |
795 | u64 count0; | |
796 | u64 limit0; | |
797 | u64 count1; | |
798 | u64 limit1; | |
799 | }; | |
1da177e4 | 800 | |
cdd5186f | 801 | static struct sun5_timer *prom_timers; |
1da177e4 LT |
802 | static u64 prom_limit0, prom_limit1; |
803 | ||
804 | static void map_prom_timers(void) | |
805 | { | |
25c7581b | 806 | struct device_node *dp; |
6a23acf3 | 807 | const unsigned int *addr; |
1da177e4 LT |
808 | |
809 | /* PROM timer node hangs out in the top level of device siblings... */ | |
25c7581b DM |
810 | dp = of_find_node_by_path("/"); |
811 | dp = dp->child; | |
812 | while (dp) { | |
813 | if (!strcmp(dp->name, "counter-timer")) | |
814 | break; | |
815 | dp = dp->sibling; | |
816 | } | |
1da177e4 LT |
817 | |
818 | /* Assume if node is not present, PROM uses different tick mechanism | |
819 | * which we should not care about. | |
820 | */ | |
25c7581b | 821 | if (!dp) { |
1da177e4 LT |
822 | prom_timers = (struct sun5_timer *) 0; |
823 | return; | |
824 | } | |
825 | ||
826 | /* If PROM is really using this, it must be mapped by him. */ | |
25c7581b DM |
827 | addr = of_get_property(dp, "address", NULL); |
828 | if (!addr) { | |
1da177e4 LT |
829 | prom_printf("PROM does not have timer mapped, trying to continue.\n"); |
830 | prom_timers = (struct sun5_timer *) 0; | |
831 | return; | |
832 | } | |
833 | prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); | |
834 | } | |
835 | ||
836 | static void kill_prom_timer(void) | |
837 | { | |
838 | if (!prom_timers) | |
839 | return; | |
840 | ||
841 | /* Save them away for later. */ | |
842 | prom_limit0 = prom_timers->limit0; | |
843 | prom_limit1 = prom_timers->limit1; | |
844 | ||
845 | /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. | |
846 | * We turn both off here just to be paranoid. | |
847 | */ | |
848 | prom_timers->limit0 = 0; | |
849 | prom_timers->limit1 = 0; | |
850 | ||
851 | /* Wheee, eat the interrupt packet too... */ | |
852 | __asm__ __volatile__( | |
853 | " mov 0x40, %%g2\n" | |
854 | " ldxa [%%g0] %0, %%g1\n" | |
855 | " ldxa [%%g2] %1, %%g1\n" | |
856 | " stxa %%g0, [%%g0] %0\n" | |
857 | " membar #Sync\n" | |
858 | : /* no outputs */ | |
859 | : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) | |
860 | : "g1", "g2"); | |
861 | } | |
862 | ||
9843099f | 863 | void notrace init_irqwork_curcpu(void) |
1da177e4 | 864 | { |
1da177e4 LT |
865 | int cpu = hard_smp_processor_id(); |
866 | ||
eb2d8d60 | 867 | trap_block[cpu].irq_worklist_pa = 0UL; |
1da177e4 LT |
868 | } |
869 | ||
5cbc3073 DM |
870 | /* Please be very careful with register_one_mondo() and |
871 | * sun4v_register_mondo_queues(). | |
872 | * | |
873 | * On SMP this gets invoked from the CPU trampoline before | |
874 | * the cpu has fully taken over the trap table from OBP, | |
875 | * and it's kernel stack + %g6 thread register state is | |
876 | * not fully cooked yet. | |
877 | * | |
878 | * Therefore you cannot make any OBP calls, not even prom_printf, | |
879 | * from these two routines. | |
880 | */ | |
bd4352ca | 881 | static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) |
ac29c11d | 882 | { |
5cbc3073 | 883 | unsigned long num_entries = (qmask + 1) / 64; |
94f8762d DM |
884 | unsigned long status; |
885 | ||
886 | status = sun4v_cpu_qconf(type, paddr, num_entries); | |
887 | if (status != HV_EOK) { | |
888 | prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " | |
889 | "err %lu\n", type, paddr, num_entries, status); | |
ac29c11d DM |
890 | prom_halt(); |
891 | } | |
892 | } | |
893 | ||
9843099f | 894 | void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu) |
5b0c0572 | 895 | { |
b5a37e96 DM |
896 | struct trap_per_cpu *tb = &trap_block[this_cpu]; |
897 | ||
5cbc3073 DM |
898 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, |
899 | tb->cpu_mondo_qmask); | |
900 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, | |
901 | tb->dev_mondo_qmask); | |
902 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, | |
903 | tb->resum_qmask); | |
904 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, | |
905 | tb->nonresum_qmask); | |
b5a37e96 DM |
906 | } |
907 | ||
14a2ff6e DM |
908 | /* Each queue region must be a power of 2 multiple of 64 bytes in |
909 | * size. The base real address must be aligned to the size of the | |
910 | * region. Thus, an 8KB queue must be 8KB aligned, for example. | |
911 | */ | |
912 | static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) | |
b5a37e96 | 913 | { |
5cbc3073 | 914 | unsigned long size = PAGE_ALIGN(qmask + 1); |
14a2ff6e DM |
915 | unsigned long order = get_order(size); |
916 | unsigned long p; | |
5b0c0572 | 917 | |
14a2ff6e | 918 | p = __get_free_pages(GFP_KERNEL, order); |
5cbc3073 | 919 | if (!p) { |
14a2ff6e | 920 | prom_printf("SUN4V: Error, cannot allocate queue.\n"); |
5b0c0572 DM |
921 | prom_halt(); |
922 | } | |
923 | ||
5cbc3073 | 924 | *pa_ptr = __pa(p); |
5b0c0572 DM |
925 | } |
926 | ||
b434e719 | 927 | static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) |
1d2f1f90 DM |
928 | { |
929 | #ifdef CONFIG_SMP | |
14a2ff6e | 930 | unsigned long page; |
1d2f1f90 DM |
931 | |
932 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | |
933 | ||
14a2ff6e | 934 | page = get_zeroed_page(GFP_KERNEL); |
1d2f1f90 DM |
935 | if (!page) { |
936 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | |
937 | prom_halt(); | |
938 | } | |
939 | ||
940 | tb->cpu_mondo_block_pa = __pa(page); | |
941 | tb->cpu_list_pa = __pa(page + 64); | |
942 | #endif | |
943 | } | |
944 | ||
b434e719 DM |
945 | /* Allocate mondo and error queues for all possible cpus. */ |
946 | static void __init sun4v_init_mondo_queues(void) | |
ac29c11d | 947 | { |
b434e719 | 948 | int cpu; |
ac29c11d | 949 | |
b434e719 DM |
950 | for_each_possible_cpu(cpu) { |
951 | struct trap_per_cpu *tb = &trap_block[cpu]; | |
1d2f1f90 | 952 | |
14a2ff6e DM |
953 | alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); |
954 | alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); | |
955 | alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); | |
956 | alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); | |
957 | alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); | |
958 | alloc_one_queue(&tb->nonresum_kernel_buf_pa, | |
959 | tb->nonresum_qmask); | |
43f58923 DM |
960 | } |
961 | } | |
962 | ||
963 | static void __init init_send_mondo_info(void) | |
964 | { | |
965 | int cpu; | |
966 | ||
967 | for_each_possible_cpu(cpu) { | |
968 | struct trap_per_cpu *tb = &trap_block[cpu]; | |
1d2f1f90 | 969 | |
b434e719 | 970 | init_cpu_send_mondo_info(tb); |
72aff53f | 971 | } |
ac29c11d DM |
972 | } |
973 | ||
e18e2a00 DM |
974 | static struct irqaction timer_irq_action = { |
975 | .name = "timer", | |
976 | }; | |
977 | ||
1da177e4 LT |
978 | /* Only invoked on boot processor. */ |
979 | void __init init_IRQ(void) | |
980 | { | |
10397e40 DM |
981 | unsigned long size; |
982 | ||
1da177e4 LT |
983 | map_prom_timers(); |
984 | kill_prom_timer(); | |
1da177e4 | 985 | |
10397e40 | 986 | size = sizeof(struct ino_bucket) * NUM_IVECS; |
14a2ff6e | 987 | ivector_table = kzalloc(size, GFP_KERNEL); |
10397e40 DM |
988 | if (!ivector_table) { |
989 | prom_printf("Fatal error, cannot allocate ivector_table\n"); | |
990 | prom_halt(); | |
991 | } | |
42d5f99b DM |
992 | __flush_dcache_range((unsigned long) ivector_table, |
993 | ((unsigned long) ivector_table) + size); | |
10397e40 DM |
994 | |
995 | ivector_table_pa = __pa(ivector_table); | |
eb2d8d60 | 996 | |
ac29c11d | 997 | if (tlb_type == hypervisor) |
b434e719 | 998 | sun4v_init_mondo_queues(); |
ac29c11d | 999 | |
43f58923 DM |
1000 | init_send_mondo_info(); |
1001 | ||
1002 | if (tlb_type == hypervisor) { | |
1003 | /* Load up the boot cpu's entries. */ | |
1004 | sun4v_register_mondo_queues(hard_smp_processor_id()); | |
1005 | } | |
1006 | ||
1da177e4 LT |
1007 | /* We need to clear any IRQ's pending in the soft interrupt |
1008 | * registers, a spurious one could be left around from the | |
1009 | * PROM timer which we just disabled. | |
1010 | */ | |
1011 | clear_softint(get_softint()); | |
1012 | ||
1013 | /* Now that ivector table is initialized, it is safe | |
1014 | * to receive IRQ vector traps. We will normally take | |
1015 | * one or two right now, in case some device PROM used | |
1016 | * to boot us wants to speak to us. We just ignore them. | |
1017 | */ | |
1018 | __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" | |
1019 | "or %%g1, %0, %%g1\n\t" | |
1020 | "wrpr %%g1, 0x0, %%pstate" | |
1021 | : /* No outputs */ | |
1022 | : "i" (PSTATE_IE) | |
1023 | : "g1"); | |
1da177e4 | 1024 | |
e18e2a00 | 1025 | irq_desc[0].action = &timer_irq_action; |
1da177e4 | 1026 | } |