Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Derived from arch/i386/kernel/irq.c |
3 | * Copyright (C) 1992 Linus Torvalds | |
4 | * Adapted from arch/i386 by Gary Thomas | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
756e7104 SR |
6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
7 | * Copyright (C) 1996-2001 Cort Dougan | |
1da177e4 LT |
8 | * Adapted for Power Macintosh by Paul Mackerras |
9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
756e7104 | 10 | * |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This file contains the code used by various IRQ handling routines: | |
17 | * asking for different IRQ's should be done through these routines | |
18 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
19 | * shouldn't result in any weird surprises, and installing new handlers | |
20 | * should be easier. | |
756e7104 SR |
21 | * |
22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
24 | * mask register (of which only 16 are defined), hence the weird shifting | |
25 | * and complement of the cached_irq_mask. I want to be able to stuff | |
26 | * this right into the SIU SMASK register. | |
27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
28 | * to reduce code space and undefined function references. | |
1da177e4 LT |
29 | */ |
30 | ||
0ebfff14 BH |
31 | #undef DEBUG |
32 | ||
1da177e4 LT |
33 | #include <linux/module.h> |
34 | #include <linux/threads.h> | |
35 | #include <linux/kernel_stat.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/sched.h> | |
756e7104 | 38 | #include <linux/ptrace.h> |
1da177e4 LT |
39 | #include <linux/ioport.h> |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/timex.h> | |
1da177e4 LT |
42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | |
1da177e4 LT |
44 | #include <linux/delay.h> |
45 | #include <linux/irq.h> | |
756e7104 SR |
46 | #include <linux/seq_file.h> |
47 | #include <linux/cpumask.h> | |
1da177e4 LT |
48 | #include <linux/profile.h> |
49 | #include <linux/bitops.h> | |
0ebfff14 BH |
50 | #include <linux/list.h> |
51 | #include <linux/radix-tree.h> | |
52 | #include <linux/mutex.h> | |
53 | #include <linux/bootmem.h> | |
45934c47 | 54 | #include <linux/pci.h> |
60b332e7 | 55 | #include <linux/debugfs.h> |
1da177e4 LT |
56 | |
57 | #include <asm/uaccess.h> | |
58 | #include <asm/system.h> | |
59 | #include <asm/io.h> | |
60 | #include <asm/pgtable.h> | |
61 | #include <asm/irq.h> | |
62 | #include <asm/cache.h> | |
63 | #include <asm/prom.h> | |
64 | #include <asm/ptrace.h> | |
1da177e4 | 65 | #include <asm/machdep.h> |
0ebfff14 | 66 | #include <asm/udbg.h> |
d04c56f7 | 67 | #ifdef CONFIG_PPC64 |
1da177e4 | 68 | #include <asm/paca.h> |
d04c56f7 | 69 | #include <asm/firmware.h> |
0874dd40 | 70 | #include <asm/lv1call.h> |
756e7104 | 71 | #endif |
1da177e4 | 72 | |
868accb7 | 73 | int __irq_offset_value; |
756e7104 SR |
74 | static int ppc_spurious_interrupts; |
75 | ||
756e7104 | 76 | #ifdef CONFIG_PPC32 |
b9e5b4e6 BH |
77 | EXPORT_SYMBOL(__irq_offset_value); |
78 | atomic_t ppc_n_lost_interrupts; | |
756e7104 | 79 | |
756e7104 SR |
80 | #ifdef CONFIG_TAU_INT |
81 | extern int tau_initialized; | |
82 | extern int tau_interrupts(int); | |
83 | #endif | |
b9e5b4e6 | 84 | #endif /* CONFIG_PPC32 */ |
756e7104 | 85 | |
756e7104 | 86 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
87 | EXPORT_SYMBOL(irq_desc); |
88 | ||
89 | int distribute_irqs = 1; | |
d04c56f7 | 90 | |
4e491d14 | 91 | static inline notrace unsigned long get_hard_enabled(void) |
ef2b343e HD |
92 | { |
93 | unsigned long enabled; | |
94 | ||
95 | __asm__ __volatile__("lbz %0,%1(13)" | |
96 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); | |
97 | ||
98 | return enabled; | |
99 | } | |
100 | ||
4e491d14 | 101 | static inline notrace void set_soft_enabled(unsigned long enable) |
ef2b343e HD |
102 | { |
103 | __asm__ __volatile__("stb %0,%1(13)" | |
104 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | |
105 | } | |
106 | ||
4e491d14 | 107 | notrace void raw_local_irq_restore(unsigned long en) |
d04c56f7 | 108 | { |
ef2b343e HD |
109 | /* |
110 | * get_paca()->soft_enabled = en; | |
111 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? | |
112 | * That was allowed before, and in such a case we do need to take care | |
113 | * that gcc will set soft_enabled directly via r13, not choose to use | |
114 | * an intermediate register, lest we're preempted to a different cpu. | |
115 | */ | |
116 | set_soft_enabled(en); | |
d04c56f7 PM |
117 | if (!en) |
118 | return; | |
119 | ||
120 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | |
ef2b343e HD |
121 | /* |
122 | * Do we need to disable preemption here? Not really: in the | |
123 | * unlikely event that we're preempted to a different cpu in | |
124 | * between getting r13, loading its lppaca_ptr, and loading | |
125 | * its any_int, we might call iseries_handle_interrupts without | |
126 | * an interrupt pending on the new cpu, but that's no disaster, | |
127 | * is it? And the business of preempting us off the old cpu | |
128 | * would itself involve a local_irq_restore which handles the | |
129 | * interrupt to that cpu. | |
130 | * | |
131 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" | |
132 | * to avoid any preemption checking added into get_paca(). | |
133 | */ | |
134 | if (local_paca->lppaca_ptr->int_dword.any_int) | |
d04c56f7 | 135 | iseries_handle_interrupts(); |
d04c56f7 PM |
136 | } |
137 | ||
ef2b343e HD |
138 | /* |
139 | * if (get_paca()->hard_enabled) return; | |
140 | * But again we need to take care that gcc gets hard_enabled directly | |
141 | * via r13, not choose to use an intermediate register, lest we're | |
142 | * preempted to a different cpu in between the two instructions. | |
143 | */ | |
144 | if (get_hard_enabled()) | |
d04c56f7 | 145 | return; |
ef2b343e HD |
146 | |
147 | /* | |
148 | * Need to hard-enable interrupts here. Since currently disabled, | |
149 | * no need to take further asm precautions against preemption; but | |
150 | * use local_paca instead of get_paca() to avoid preemption checking. | |
151 | */ | |
152 | local_paca->hard_enabled = en; | |
d04c56f7 PM |
153 | if ((int)mfspr(SPRN_DEC) < 0) |
154 | mtspr(SPRN_DEC, 1); | |
0874dd40 TS |
155 | |
156 | /* | |
157 | * Force the delivery of pending soft-disabled interrupts on PS3. | |
158 | * Any HV call will have this side effect. | |
159 | */ | |
160 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | |
161 | u64 tmp; | |
162 | lv1_get_version_info(&tmp); | |
163 | } | |
164 | ||
e1fa2e13 | 165 | __hard_irq_enable(); |
d04c56f7 | 166 | } |
945feb17 | 167 | EXPORT_SYMBOL(raw_local_irq_restore); |
756e7104 | 168 | #endif /* CONFIG_PPC64 */ |
1da177e4 LT |
169 | |
170 | int show_interrupts(struct seq_file *p, void *v) | |
171 | { | |
756e7104 SR |
172 | int i = *(loff_t *)v, j; |
173 | struct irqaction *action; | |
97f7d6bc | 174 | struct irq_desc *desc; |
1da177e4 LT |
175 | unsigned long flags; |
176 | ||
177 | if (i == 0) { | |
756e7104 SR |
178 | seq_puts(p, " "); |
179 | for_each_online_cpu(j) | |
180 | seq_printf(p, "CPU%d ", j); | |
1da177e4 LT |
181 | seq_putc(p, '\n'); |
182 | } | |
183 | ||
184 | if (i < NR_IRQS) { | |
185 | desc = get_irq_desc(i); | |
186 | spin_lock_irqsave(&desc->lock, flags); | |
187 | action = desc->action; | |
188 | if (!action || !action->handler) | |
189 | goto skip; | |
190 | seq_printf(p, "%3d: ", i); | |
191 | #ifdef CONFIG_SMP | |
756e7104 | 192 | for_each_online_cpu(j) |
dee4102a | 193 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
1da177e4 LT |
194 | #else |
195 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
196 | #endif /* CONFIG_SMP */ | |
d1bef4ed IM |
197 | if (desc->chip) |
198 | seq_printf(p, " %s ", desc->chip->typename); | |
1da177e4 | 199 | else |
756e7104 | 200 | seq_puts(p, " None "); |
1da177e4 | 201 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); |
756e7104 SR |
202 | seq_printf(p, " %s", action->name); |
203 | for (action = action->next; action; action = action->next) | |
1da177e4 LT |
204 | seq_printf(p, ", %s", action->name); |
205 | seq_putc(p, '\n'); | |
206 | skip: | |
207 | spin_unlock_irqrestore(&desc->lock, flags); | |
756e7104 | 208 | } else if (i == NR_IRQS) { |
9c4cb825 | 209 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) |
756e7104 SR |
210 | if (tau_initialized){ |
211 | seq_puts(p, "TAU: "); | |
394e3902 AM |
212 | for_each_online_cpu(j) |
213 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
756e7104 SR |
214 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
215 | } | |
9c4cb825 | 216 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ |
1da177e4 | 217 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); |
756e7104 | 218 | } |
1da177e4 LT |
219 | return 0; |
220 | } | |
221 | ||
222 | #ifdef CONFIG_HOTPLUG_CPU | |
223 | void fixup_irqs(cpumask_t map) | |
224 | { | |
225 | unsigned int irq; | |
226 | static int warned; | |
227 | ||
228 | for_each_irq(irq) { | |
229 | cpumask_t mask; | |
230 | ||
231 | if (irq_desc[irq].status & IRQ_PER_CPU) | |
232 | continue; | |
233 | ||
e65e49d0 | 234 | cpumask_and(&mask, irq_desc[irq].affinity, &map); |
1da177e4 LT |
235 | if (any_online_cpu(mask) == NR_CPUS) { |
236 | printk("Breaking affinity for irq %i\n", irq); | |
237 | mask = map; | |
238 | } | |
d1bef4ed | 239 | if (irq_desc[irq].chip->set_affinity) |
0de26520 | 240 | irq_desc[irq].chip->set_affinity(irq, &mask); |
1da177e4 LT |
241 | else if (irq_desc[irq].action && !(warned++)) |
242 | printk("Cannot set affinity for irq %i\n", irq); | |
243 | } | |
244 | ||
245 | local_irq_enable(); | |
246 | mdelay(1); | |
247 | local_irq_disable(); | |
248 | } | |
249 | #endif | |
250 | ||
f2694ba5 ME |
251 | #ifdef CONFIG_IRQSTACKS |
252 | static inline void handle_one_irq(unsigned int irq) | |
253 | { | |
254 | struct thread_info *curtp, *irqtp; | |
255 | unsigned long saved_sp_limit; | |
256 | struct irq_desc *desc; | |
257 | void *handler; | |
258 | ||
259 | /* Switch to the irq stack to handle this */ | |
260 | curtp = current_thread_info(); | |
261 | irqtp = hardirq_ctx[smp_processor_id()]; | |
262 | ||
263 | if (curtp == irqtp) { | |
264 | /* We're already on the irq stack, just handle it */ | |
265 | generic_handle_irq(irq); | |
266 | return; | |
267 | } | |
268 | ||
269 | desc = irq_desc + irq; | |
270 | saved_sp_limit = current->thread.ksp_limit; | |
271 | ||
272 | handler = desc->handle_irq; | |
273 | if (handler == NULL) | |
274 | handler = &__do_IRQ; | |
275 | ||
276 | irqtp->task = curtp->task; | |
277 | irqtp->flags = 0; | |
278 | ||
279 | /* Copy the softirq bits in preempt_count so that the | |
280 | * softirq checks work in the hardirq context. */ | |
281 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | | |
282 | (curtp->preempt_count & SOFTIRQ_MASK); | |
283 | ||
284 | current->thread.ksp_limit = (unsigned long)irqtp + | |
285 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
286 | ||
287 | call_handle_irq(irq, desc, irqtp, handler); | |
288 | current->thread.ksp_limit = saved_sp_limit; | |
289 | irqtp->task = NULL; | |
290 | ||
291 | /* Set any flag that may have been set on the | |
292 | * alternate stack | |
293 | */ | |
294 | if (irqtp->flags) | |
295 | set_bits(irqtp->flags, &curtp->flags); | |
296 | } | |
297 | #else | |
298 | static inline void handle_one_irq(unsigned int irq) | |
299 | { | |
300 | generic_handle_irq(irq); | |
301 | } | |
302 | #endif | |
303 | ||
d7cb10d6 ME |
304 | static inline void check_stack_overflow(void) |
305 | { | |
306 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
307 | long sp; | |
308 | ||
309 | sp = __get_SP() & (THREAD_SIZE-1); | |
310 | ||
311 | /* check for stack overflow: is there less than 2KB free? */ | |
312 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | |
313 | printk("do_IRQ: stack overflow: %ld\n", | |
314 | sp - sizeof(struct thread_info)); | |
315 | dump_stack(); | |
316 | } | |
317 | #endif | |
318 | } | |
319 | ||
1da177e4 LT |
320 | void do_IRQ(struct pt_regs *regs) |
321 | { | |
7d12e780 | 322 | struct pt_regs *old_regs = set_irq_regs(regs); |
0ebfff14 | 323 | unsigned int irq; |
1da177e4 | 324 | |
4b218e9b | 325 | irq_enter(); |
1da177e4 | 326 | |
d7cb10d6 | 327 | check_stack_overflow(); |
1da177e4 | 328 | |
35a84c2f | 329 | irq = ppc_md.get_irq(); |
1da177e4 | 330 | |
f2694ba5 ME |
331 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) |
332 | handle_one_irq(irq); | |
333 | else if (irq != NO_IRQ_IGNORE) | |
e199500c SR |
334 | /* That's not SMP safe ... but who cares ? */ |
335 | ppc_spurious_interrupts++; | |
336 | ||
4b218e9b | 337 | irq_exit(); |
7d12e780 | 338 | set_irq_regs(old_regs); |
756e7104 | 339 | |
e199500c | 340 | #ifdef CONFIG_PPC_ISERIES |
b06a3183 SR |
341 | if (firmware_has_feature(FW_FEATURE_ISERIES) && |
342 | get_lppaca()->int_dword.fields.decr_int) { | |
3356bb9f DG |
343 | get_lppaca()->int_dword.fields.decr_int = 0; |
344 | /* Signal a fake decrementer interrupt */ | |
345 | timer_interrupt(regs); | |
e199500c SR |
346 | } |
347 | #endif | |
348 | } | |
1da177e4 LT |
349 | |
350 | void __init init_IRQ(void) | |
351 | { | |
70584578 SR |
352 | if (ppc_md.init_IRQ) |
353 | ppc_md.init_IRQ(); | |
bcf0b088 KG |
354 | |
355 | exc_lvl_ctx_init(); | |
356 | ||
1da177e4 LT |
357 | irq_ctx_init(); |
358 | } | |
359 | ||
bcf0b088 KG |
360 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
361 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; | |
362 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; | |
363 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |
364 | ||
365 | void exc_lvl_ctx_init(void) | |
366 | { | |
367 | struct thread_info *tp; | |
368 | int i; | |
369 | ||
370 | for_each_possible_cpu(i) { | |
371 | memset((void *)critirq_ctx[i], 0, THREAD_SIZE); | |
372 | tp = critirq_ctx[i]; | |
373 | tp->cpu = i; | |
374 | tp->preempt_count = 0; | |
375 | ||
376 | #ifdef CONFIG_BOOKE | |
377 | memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); | |
378 | tp = dbgirq_ctx[i]; | |
379 | tp->cpu = i; | |
380 | tp->preempt_count = 0; | |
381 | ||
382 | memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); | |
383 | tp = mcheckirq_ctx[i]; | |
384 | tp->cpu = i; | |
385 | tp->preempt_count = HARDIRQ_OFFSET; | |
386 | #endif | |
387 | } | |
388 | } | |
389 | #endif | |
1da177e4 | 390 | |
1da177e4 | 391 | #ifdef CONFIG_IRQSTACKS |
22722051 AM |
392 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
393 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
394 | |
395 | void irq_ctx_init(void) | |
396 | { | |
397 | struct thread_info *tp; | |
398 | int i; | |
399 | ||
0e551954 | 400 | for_each_possible_cpu(i) { |
1da177e4 LT |
401 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
402 | tp = softirq_ctx[i]; | |
403 | tp->cpu = i; | |
e6768a4f | 404 | tp->preempt_count = 0; |
1da177e4 LT |
405 | |
406 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | |
407 | tp = hardirq_ctx[i]; | |
408 | tp->cpu = i; | |
409 | tp->preempt_count = HARDIRQ_OFFSET; | |
410 | } | |
411 | } | |
412 | ||
c6622f63 PM |
413 | static inline void do_softirq_onstack(void) |
414 | { | |
415 | struct thread_info *curtp, *irqtp; | |
85218827 | 416 | unsigned long saved_sp_limit = current->thread.ksp_limit; |
c6622f63 PM |
417 | |
418 | curtp = current_thread_info(); | |
419 | irqtp = softirq_ctx[smp_processor_id()]; | |
420 | irqtp->task = curtp->task; | |
85218827 KG |
421 | current->thread.ksp_limit = (unsigned long)irqtp + |
422 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
c6622f63 | 423 | call_do_softirq(irqtp); |
85218827 | 424 | current->thread.ksp_limit = saved_sp_limit; |
c6622f63 PM |
425 | irqtp->task = NULL; |
426 | } | |
1da177e4 | 427 | |
c6622f63 PM |
428 | #else |
429 | #define do_softirq_onstack() __do_softirq() | |
430 | #endif /* CONFIG_IRQSTACKS */ | |
431 | ||
1da177e4 LT |
432 | void do_softirq(void) |
433 | { | |
434 | unsigned long flags; | |
1da177e4 LT |
435 | |
436 | if (in_interrupt()) | |
1da177e4 LT |
437 | return; |
438 | ||
1da177e4 | 439 | local_irq_save(flags); |
1da177e4 | 440 | |
912b2539 | 441 | if (local_softirq_pending()) |
c6622f63 | 442 | do_softirq_onstack(); |
1da177e4 LT |
443 | |
444 | local_irq_restore(flags); | |
1da177e4 | 445 | } |
1da177e4 | 446 | |
1da177e4 | 447 | |
1da177e4 | 448 | /* |
0ebfff14 | 449 | * IRQ controller and virtual interrupts |
1da177e4 LT |
450 | */ |
451 | ||
0ebfff14 | 452 | static LIST_HEAD(irq_hosts); |
057b184a | 453 | static DEFINE_SPINLOCK(irq_big_lock); |
967e012e | 454 | static unsigned int revmap_trees_allocated; |
150c6c8f | 455 | static DEFINE_MUTEX(revmap_trees_mutex); |
0ebfff14 BH |
456 | struct irq_map_entry irq_map[NR_IRQS]; |
457 | static unsigned int irq_virq_count = NR_IRQS; | |
458 | static struct irq_host *irq_default_host; | |
1da177e4 | 459 | |
35923f12 OJ |
460 | irq_hw_number_t virq_to_hw(unsigned int virq) |
461 | { | |
462 | return irq_map[virq].hwirq; | |
463 | } | |
464 | EXPORT_SYMBOL_GPL(virq_to_hw); | |
465 | ||
68158006 ME |
466 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) |
467 | { | |
468 | return h->of_node != NULL && h->of_node == np; | |
469 | } | |
470 | ||
5669c3cf | 471 | struct irq_host *irq_alloc_host(struct device_node *of_node, |
52964f87 ME |
472 | unsigned int revmap_type, |
473 | unsigned int revmap_arg, | |
474 | struct irq_host_ops *ops, | |
475 | irq_hw_number_t inval_irq) | |
1da177e4 | 476 | { |
0ebfff14 BH |
477 | struct irq_host *host; |
478 | unsigned int size = sizeof(struct irq_host); | |
479 | unsigned int i; | |
480 | unsigned int *rmap; | |
481 | unsigned long flags; | |
482 | ||
483 | /* Allocate structure and revmap table if using linear mapping */ | |
484 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | |
485 | size += revmap_arg * sizeof(unsigned int); | |
5669c3cf | 486 | host = zalloc_maybe_bootmem(size, GFP_KERNEL); |
0ebfff14 BH |
487 | if (host == NULL) |
488 | return NULL; | |
7d01c880 | 489 | |
0ebfff14 BH |
490 | /* Fill structure */ |
491 | host->revmap_type = revmap_type; | |
492 | host->inval_irq = inval_irq; | |
493 | host->ops = ops; | |
19fc65b5 | 494 | host->of_node = of_node_get(of_node); |
7d01c880 | 495 | |
68158006 ME |
496 | if (host->ops->match == NULL) |
497 | host->ops->match = default_irq_host_match; | |
7d01c880 | 498 | |
0ebfff14 BH |
499 | spin_lock_irqsave(&irq_big_lock, flags); |
500 | ||
501 | /* If it's a legacy controller, check for duplicates and | |
502 | * mark it as allocated (we use irq 0 host pointer for that | |
503 | */ | |
504 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | |
505 | if (irq_map[0].host != NULL) { | |
506 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
507 | /* If we are early boot, we can't free the structure, | |
508 | * too bad... | |
509 | * this will be fixed once slab is made available early | |
510 | * instead of the current cruft | |
511 | */ | |
512 | if (mem_init_done) | |
513 | kfree(host); | |
514 | return NULL; | |
515 | } | |
516 | irq_map[0].host = host; | |
517 | } | |
518 | ||
519 | list_add(&host->link, &irq_hosts); | |
520 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
521 | ||
522 | /* Additional setups per revmap type */ | |
523 | switch(revmap_type) { | |
524 | case IRQ_HOST_MAP_LEGACY: | |
525 | /* 0 is always the invalid number for legacy */ | |
526 | host->inval_irq = 0; | |
527 | /* setup us as the host for all legacy interrupts */ | |
528 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | |
7866291d | 529 | irq_map[i].hwirq = i; |
0ebfff14 BH |
530 | smp_wmb(); |
531 | irq_map[i].host = host; | |
532 | smp_wmb(); | |
533 | ||
6e99e458 BH |
534 | /* Clear norequest flags */ |
535 | get_irq_desc(i)->status &= ~IRQ_NOREQUEST; | |
0ebfff14 BH |
536 | |
537 | /* Legacy flags are left to default at this point, | |
538 | * one can then use irq_create_mapping() to | |
c03983ac | 539 | * explicitly change them |
0ebfff14 | 540 | */ |
6e99e458 | 541 | ops->map(host, i, i); |
0ebfff14 BH |
542 | } |
543 | break; | |
544 | case IRQ_HOST_MAP_LINEAR: | |
545 | rmap = (unsigned int *)(host + 1); | |
546 | for (i = 0; i < revmap_arg; i++) | |
f5921697 | 547 | rmap[i] = NO_IRQ; |
0ebfff14 BH |
548 | host->revmap_data.linear.size = revmap_arg; |
549 | smp_wmb(); | |
550 | host->revmap_data.linear.revmap = rmap; | |
551 | break; | |
552 | default: | |
553 | break; | |
554 | } | |
555 | ||
556 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | |
557 | ||
558 | return host; | |
1da177e4 LT |
559 | } |
560 | ||
0ebfff14 | 561 | struct irq_host *irq_find_host(struct device_node *node) |
1da177e4 | 562 | { |
0ebfff14 BH |
563 | struct irq_host *h, *found = NULL; |
564 | unsigned long flags; | |
565 | ||
566 | /* We might want to match the legacy controller last since | |
567 | * it might potentially be set to match all interrupts in | |
568 | * the absence of a device node. This isn't a problem so far | |
569 | * yet though... | |
570 | */ | |
571 | spin_lock_irqsave(&irq_big_lock, flags); | |
572 | list_for_each_entry(h, &irq_hosts, link) | |
68158006 | 573 | if (h->ops->match(h, node)) { |
0ebfff14 BH |
574 | found = h; |
575 | break; | |
576 | } | |
577 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
578 | return found; | |
579 | } | |
580 | EXPORT_SYMBOL_GPL(irq_find_host); | |
581 | ||
582 | void irq_set_default_host(struct irq_host *host) | |
583 | { | |
584 | pr_debug("irq: Default host set to @0x%p\n", host); | |
1da177e4 | 585 | |
0ebfff14 BH |
586 | irq_default_host = host; |
587 | } | |
1da177e4 | 588 | |
0ebfff14 BH |
589 | void irq_set_virq_count(unsigned int count) |
590 | { | |
591 | pr_debug("irq: Trying to set virq count to %d\n", count); | |
fef1c772 | 592 | |
0ebfff14 BH |
593 | BUG_ON(count < NUM_ISA_INTERRUPTS); |
594 | if (count < NR_IRQS) | |
595 | irq_virq_count = count; | |
596 | } | |
597 | ||
6fde40f3 ME |
598 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
599 | irq_hw_number_t hwirq) | |
600 | { | |
601 | /* Clear IRQ_NOREQUEST flag */ | |
602 | get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; | |
603 | ||
604 | /* map it */ | |
605 | smp_wmb(); | |
606 | irq_map[virq].hwirq = hwirq; | |
607 | smp_mb(); | |
608 | ||
609 | if (host->ops->map(host, virq, hwirq)) { | |
610 | pr_debug("irq: -> mapping failed, freeing\n"); | |
611 | irq_free_virt(virq, 1); | |
612 | return -1; | |
613 | } | |
614 | ||
615 | return 0; | |
616 | } | |
8ec8f2e8 | 617 | |
ee51de56 ME |
618 | unsigned int irq_create_direct_mapping(struct irq_host *host) |
619 | { | |
620 | unsigned int virq; | |
621 | ||
622 | if (host == NULL) | |
623 | host = irq_default_host; | |
624 | ||
625 | BUG_ON(host == NULL); | |
626 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); | |
627 | ||
628 | virq = irq_alloc_virt(host, 1, 0); | |
629 | if (virq == NO_IRQ) { | |
630 | pr_debug("irq: create_direct virq allocation failed\n"); | |
631 | return NO_IRQ; | |
632 | } | |
633 | ||
634 | pr_debug("irq: create_direct obtained virq %d\n", virq); | |
635 | ||
636 | if (irq_setup_virq(host, virq, virq)) | |
637 | return NO_IRQ; | |
638 | ||
639 | return virq; | |
640 | } | |
641 | ||
0ebfff14 | 642 | unsigned int irq_create_mapping(struct irq_host *host, |
6e99e458 | 643 | irq_hw_number_t hwirq) |
0ebfff14 BH |
644 | { |
645 | unsigned int virq, hint; | |
646 | ||
6e99e458 | 647 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); |
0ebfff14 BH |
648 | |
649 | /* Look for default host if nececssary */ | |
650 | if (host == NULL) | |
651 | host = irq_default_host; | |
652 | if (host == NULL) { | |
653 | printk(KERN_WARNING "irq_create_mapping called for" | |
654 | " NULL host, hwirq=%lx\n", hwirq); | |
655 | WARN_ON(1); | |
656 | return NO_IRQ; | |
1da177e4 | 657 | } |
0ebfff14 | 658 | pr_debug("irq: -> using host @%p\n", host); |
1da177e4 | 659 | |
0ebfff14 BH |
660 | /* Check if mapping already exist, if it does, call |
661 | * host->ops->map() to update the flags | |
662 | */ | |
663 | virq = irq_find_mapping(host, hwirq); | |
f5921697 | 664 | if (virq != NO_IRQ) { |
acc900ef IK |
665 | if (host->ops->remap) |
666 | host->ops->remap(host, virq, hwirq); | |
0ebfff14 | 667 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
0ebfff14 | 668 | return virq; |
1da177e4 LT |
669 | } |
670 | ||
0ebfff14 BH |
671 | /* Get a virtual interrupt number */ |
672 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | |
673 | /* Handle legacy */ | |
674 | virq = (unsigned int)hwirq; | |
675 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | |
676 | return NO_IRQ; | |
677 | return virq; | |
678 | } else { | |
679 | /* Allocate a virtual interrupt number */ | |
680 | hint = hwirq % irq_virq_count; | |
681 | virq = irq_alloc_virt(host, 1, hint); | |
682 | if (virq == NO_IRQ) { | |
683 | pr_debug("irq: -> virq allocation failed\n"); | |
684 | return NO_IRQ; | |
685 | } | |
686 | } | |
0ebfff14 | 687 | |
6fde40f3 | 688 | if (irq_setup_virq(host, virq, hwirq)) |
0ebfff14 | 689 | return NO_IRQ; |
6fde40f3 | 690 | |
c7d07fdd ME |
691 | printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n", |
692 | hwirq, host->of_node ? host->of_node->full_name : "null", virq); | |
693 | ||
1da177e4 | 694 | return virq; |
0ebfff14 BH |
695 | } |
696 | EXPORT_SYMBOL_GPL(irq_create_mapping); | |
697 | ||
f3d2ab41 AV |
698 | unsigned int irq_create_of_mapping(struct device_node *controller, |
699 | u32 *intspec, unsigned int intsize) | |
0ebfff14 BH |
700 | { |
701 | struct irq_host *host; | |
702 | irq_hw_number_t hwirq; | |
6e99e458 BH |
703 | unsigned int type = IRQ_TYPE_NONE; |
704 | unsigned int virq; | |
1da177e4 | 705 | |
0ebfff14 BH |
706 | if (controller == NULL) |
707 | host = irq_default_host; | |
708 | else | |
709 | host = irq_find_host(controller); | |
6e99e458 BH |
710 | if (host == NULL) { |
711 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | |
712 | controller->full_name); | |
0ebfff14 | 713 | return NO_IRQ; |
6e99e458 | 714 | } |
0ebfff14 BH |
715 | |
716 | /* If host has no translation, then we assume interrupt line */ | |
717 | if (host->ops->xlate == NULL) | |
718 | hwirq = intspec[0]; | |
719 | else { | |
720 | if (host->ops->xlate(host, controller, intspec, intsize, | |
6e99e458 | 721 | &hwirq, &type)) |
0ebfff14 | 722 | return NO_IRQ; |
1da177e4 | 723 | } |
0ebfff14 | 724 | |
6e99e458 BH |
725 | /* Create mapping */ |
726 | virq = irq_create_mapping(host, hwirq); | |
727 | if (virq == NO_IRQ) | |
728 | return virq; | |
729 | ||
730 | /* Set type if specified and different than the current one */ | |
731 | if (type != IRQ_TYPE_NONE && | |
732 | type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) | |
733 | set_irq_type(virq, type); | |
734 | return virq; | |
1da177e4 | 735 | } |
0ebfff14 | 736 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
1da177e4 | 737 | |
0ebfff14 | 738 | unsigned int irq_of_parse_and_map(struct device_node *dev, int index) |
1da177e4 | 739 | { |
0ebfff14 | 740 | struct of_irq oirq; |
1da177e4 | 741 | |
0ebfff14 BH |
742 | if (of_irq_map_one(dev, index, &oirq)) |
743 | return NO_IRQ; | |
1da177e4 | 744 | |
0ebfff14 BH |
745 | return irq_create_of_mapping(oirq.controller, oirq.specifier, |
746 | oirq.size); | |
747 | } | |
748 | EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | |
1da177e4 | 749 | |
0ebfff14 BH |
750 | void irq_dispose_mapping(unsigned int virq) |
751 | { | |
5414c6be | 752 | struct irq_host *host; |
0ebfff14 | 753 | irq_hw_number_t hwirq; |
1da177e4 | 754 | |
5414c6be ME |
755 | if (virq == NO_IRQ) |
756 | return; | |
757 | ||
758 | host = irq_map[virq].host; | |
0ebfff14 BH |
759 | WARN_ON (host == NULL); |
760 | if (host == NULL) | |
761 | return; | |
1da177e4 | 762 | |
0ebfff14 BH |
763 | /* Never unmap legacy interrupts */ |
764 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
765 | return; | |
1da177e4 | 766 | |
0ebfff14 BH |
767 | /* remove chip and handler */ |
768 | set_irq_chip_and_handler(virq, NULL, NULL); | |
769 | ||
770 | /* Make sure it's completed */ | |
771 | synchronize_irq(virq); | |
772 | ||
773 | /* Tell the PIC about it */ | |
774 | if (host->ops->unmap) | |
775 | host->ops->unmap(host, virq); | |
776 | smp_mb(); | |
777 | ||
778 | /* Clear reverse map */ | |
779 | hwirq = irq_map[virq].hwirq; | |
780 | switch(host->revmap_type) { | |
781 | case IRQ_HOST_MAP_LINEAR: | |
782 | if (hwirq < host->revmap_data.linear.size) | |
f5921697 | 783 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
0ebfff14 BH |
784 | break; |
785 | case IRQ_HOST_MAP_TREE: | |
967e012e SD |
786 | /* |
787 | * Check if radix tree allocated yet, if not then nothing to | |
788 | * remove. | |
789 | */ | |
790 | smp_rmb(); | |
791 | if (revmap_trees_allocated < 1) | |
0ebfff14 | 792 | break; |
150c6c8f | 793 | mutex_lock(&revmap_trees_mutex); |
0ebfff14 | 794 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
150c6c8f | 795 | mutex_unlock(&revmap_trees_mutex); |
0ebfff14 BH |
796 | break; |
797 | } | |
1da177e4 | 798 | |
0ebfff14 BH |
799 | /* Destroy map */ |
800 | smp_mb(); | |
801 | irq_map[virq].hwirq = host->inval_irq; | |
1da177e4 | 802 | |
0ebfff14 BH |
803 | /* Set some flags */ |
804 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | |
1da177e4 | 805 | |
0ebfff14 BH |
806 | /* Free it */ |
807 | irq_free_virt(virq, 1); | |
1da177e4 | 808 | } |
0ebfff14 | 809 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
1da177e4 | 810 | |
0ebfff14 BH |
811 | unsigned int irq_find_mapping(struct irq_host *host, |
812 | irq_hw_number_t hwirq) | |
813 | { | |
814 | unsigned int i; | |
815 | unsigned int hint = hwirq % irq_virq_count; | |
816 | ||
817 | /* Look for default host if nececssary */ | |
818 | if (host == NULL) | |
819 | host = irq_default_host; | |
820 | if (host == NULL) | |
821 | return NO_IRQ; | |
822 | ||
823 | /* legacy -> bail early */ | |
824 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
825 | return hwirq; | |
826 | ||
827 | /* Slow path does a linear search of the map */ | |
828 | if (hint < NUM_ISA_INTERRUPTS) | |
829 | hint = NUM_ISA_INTERRUPTS; | |
830 | i = hint; | |
831 | do { | |
832 | if (irq_map[i].host == host && | |
833 | irq_map[i].hwirq == hwirq) | |
834 | return i; | |
835 | i++; | |
836 | if (i >= irq_virq_count) | |
837 | i = NUM_ISA_INTERRUPTS; | |
838 | } while(i != hint); | |
839 | return NO_IRQ; | |
840 | } | |
841 | EXPORT_SYMBOL_GPL(irq_find_mapping); | |
1da177e4 | 842 | |
0ebfff14 | 843 | |
967e012e SD |
844 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, |
845 | irq_hw_number_t hwirq) | |
1da177e4 | 846 | { |
0ebfff14 BH |
847 | struct irq_map_entry *ptr; |
848 | unsigned int virq; | |
1da177e4 | 849 | |
0ebfff14 | 850 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
1da177e4 | 851 | |
967e012e SD |
852 | /* |
853 | * Check if the radix tree exists and has bee initialized. | |
854 | * If not, we fallback to slow mode | |
0ebfff14 | 855 | */ |
967e012e | 856 | if (revmap_trees_allocated < 2) |
0ebfff14 BH |
857 | return irq_find_mapping(host, hwirq); |
858 | ||
0ebfff14 | 859 | /* Now try to resolve */ |
150c6c8f SD |
860 | /* |
861 | * No rcu_read_lock(ing) needed, the ptr returned can't go under us | |
862 | * as it's referencing an entry in the static irq_map table. | |
863 | */ | |
967e012e | 864 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); |
8ec8f2e8 | 865 | |
967e012e SD |
866 | /* |
867 | * If found in radix tree, then fine. | |
868 | * Else fallback to linear lookup - this should not happen in practice | |
869 | * as it means that we failed to insert the node in the radix tree. | |
870 | */ | |
871 | if (ptr) | |
0ebfff14 | 872 | virq = ptr - irq_map; |
967e012e SD |
873 | else |
874 | virq = irq_find_mapping(host, hwirq); | |
875 | ||
876 | return virq; | |
877 | } | |
878 | ||
879 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | |
880 | irq_hw_number_t hwirq) | |
881 | { | |
967e012e SD |
882 | |
883 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | |
884 | ||
885 | /* | |
886 | * Check if the radix tree exists yet. | |
887 | * If not, then the irq will be inserted into the tree when it gets | |
888 | * initialized. | |
889 | */ | |
890 | smp_rmb(); | |
891 | if (revmap_trees_allocated < 1) | |
892 | return; | |
0ebfff14 | 893 | |
8ec8f2e8 | 894 | if (virq != NO_IRQ) { |
150c6c8f | 895 | mutex_lock(&revmap_trees_mutex); |
967e012e SD |
896 | radix_tree_insert(&host->revmap_data.tree, hwirq, |
897 | &irq_map[virq]); | |
150c6c8f | 898 | mutex_unlock(&revmap_trees_mutex); |
8ec8f2e8 | 899 | } |
1da177e4 LT |
900 | } |
901 | ||
0ebfff14 BH |
902 | unsigned int irq_linear_revmap(struct irq_host *host, |
903 | irq_hw_number_t hwirq) | |
c6622f63 | 904 | { |
0ebfff14 | 905 | unsigned int *revmap; |
c6622f63 | 906 | |
0ebfff14 BH |
907 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); |
908 | ||
909 | /* Check revmap bounds */ | |
910 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | |
911 | return irq_find_mapping(host, hwirq); | |
912 | ||
913 | /* Check if revmap was allocated */ | |
914 | revmap = host->revmap_data.linear.revmap; | |
915 | if (unlikely(revmap == NULL)) | |
916 | return irq_find_mapping(host, hwirq); | |
917 | ||
918 | /* Fill up revmap with slow path if no mapping found */ | |
919 | if (unlikely(revmap[hwirq] == NO_IRQ)) | |
920 | revmap[hwirq] = irq_find_mapping(host, hwirq); | |
921 | ||
922 | return revmap[hwirq]; | |
c6622f63 PM |
923 | } |
924 | ||
0ebfff14 BH |
925 | unsigned int irq_alloc_virt(struct irq_host *host, |
926 | unsigned int count, | |
927 | unsigned int hint) | |
928 | { | |
929 | unsigned long flags; | |
930 | unsigned int i, j, found = NO_IRQ; | |
c6622f63 | 931 | |
0ebfff14 BH |
932 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) |
933 | return NO_IRQ; | |
934 | ||
935 | spin_lock_irqsave(&irq_big_lock, flags); | |
936 | ||
937 | /* Use hint for 1 interrupt if any */ | |
938 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | |
939 | hint < irq_virq_count && irq_map[hint].host == NULL) { | |
940 | found = hint; | |
941 | goto hint_found; | |
942 | } | |
943 | ||
944 | /* Look for count consecutive numbers in the allocatable | |
945 | * (non-legacy) space | |
946 | */ | |
e1251465 ME |
947 | for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { |
948 | if (irq_map[i].host != NULL) | |
949 | j = 0; | |
950 | else | |
951 | j++; | |
952 | ||
953 | if (j == count) { | |
954 | found = i - count + 1; | |
955 | break; | |
956 | } | |
0ebfff14 BH |
957 | } |
958 | if (found == NO_IRQ) { | |
959 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
960 | return NO_IRQ; | |
961 | } | |
962 | hint_found: | |
963 | for (i = found; i < (found + count); i++) { | |
964 | irq_map[i].hwirq = host->inval_irq; | |
965 | smp_wmb(); | |
966 | irq_map[i].host = host; | |
967 | } | |
968 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
969 | return found; | |
970 | } | |
971 | ||
972 | void irq_free_virt(unsigned int virq, unsigned int count) | |
1da177e4 LT |
973 | { |
974 | unsigned long flags; | |
0ebfff14 | 975 | unsigned int i; |
1da177e4 | 976 | |
0ebfff14 BH |
977 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
978 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | |
1da177e4 | 979 | |
0ebfff14 BH |
980 | spin_lock_irqsave(&irq_big_lock, flags); |
981 | for (i = virq; i < (virq + count); i++) { | |
982 | struct irq_host *host; | |
1da177e4 | 983 | |
0ebfff14 BH |
984 | if (i < NUM_ISA_INTERRUPTS || |
985 | (virq + count) > irq_virq_count) | |
986 | continue; | |
1da177e4 | 987 | |
0ebfff14 BH |
988 | host = irq_map[i].host; |
989 | irq_map[i].hwirq = host->inval_irq; | |
990 | smp_wmb(); | |
991 | irq_map[i].host = NULL; | |
992 | } | |
993 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
1da177e4 | 994 | } |
0ebfff14 BH |
995 | |
996 | void irq_early_init(void) | |
997 | { | |
998 | unsigned int i; | |
999 | ||
1000 | for (i = 0; i < NR_IRQS; i++) | |
1001 | get_irq_desc(i)->status |= IRQ_NOREQUEST; | |
1002 | } | |
1003 | ||
1004 | /* We need to create the radix trees late */ | |
1005 | static int irq_late_init(void) | |
1006 | { | |
1007 | struct irq_host *h; | |
967e012e | 1008 | unsigned int i; |
0ebfff14 | 1009 | |
967e012e SD |
1010 | /* |
1011 | * No mutual exclusion with respect to accessors of the tree is needed | |
1012 | * here as the synchronization is done via the state variable | |
1013 | * revmap_trees_allocated. | |
1014 | */ | |
0ebfff14 BH |
1015 | list_for_each_entry(h, &irq_hosts, link) { |
1016 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | |
967e012e SD |
1017 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); |
1018 | } | |
1019 | ||
1020 | /* | |
1021 | * Make sure the radix trees inits are visible before setting | |
1022 | * the flag | |
1023 | */ | |
1024 | smp_wmb(); | |
1025 | revmap_trees_allocated = 1; | |
1026 | ||
1027 | /* | |
1028 | * Insert the reverse mapping for those interrupts already present | |
1029 | * in irq_map[]. | |
1030 | */ | |
150c6c8f | 1031 | mutex_lock(&revmap_trees_mutex); |
967e012e SD |
1032 | for (i = 0; i < irq_virq_count; i++) { |
1033 | if (irq_map[i].host && | |
1034 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) | |
1035 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, | |
1036 | irq_map[i].hwirq, &irq_map[i]); | |
0ebfff14 | 1037 | } |
150c6c8f | 1038 | mutex_unlock(&revmap_trees_mutex); |
0ebfff14 | 1039 | |
967e012e SD |
1040 | /* |
1041 | * Make sure the radix trees insertions are visible before setting | |
1042 | * the flag | |
1043 | */ | |
1044 | smp_wmb(); | |
1045 | revmap_trees_allocated = 2; | |
1046 | ||
0ebfff14 BH |
1047 | return 0; |
1048 | } | |
1049 | arch_initcall(irq_late_init); | |
1050 | ||
60b332e7 ME |
1051 | #ifdef CONFIG_VIRQ_DEBUG |
1052 | static int virq_debug_show(struct seq_file *m, void *private) | |
1053 | { | |
1054 | unsigned long flags; | |
97f7d6bc | 1055 | struct irq_desc *desc; |
60b332e7 ME |
1056 | const char *p; |
1057 | char none[] = "none"; | |
1058 | int i; | |
1059 | ||
1060 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", | |
1061 | "chip name", "host name"); | |
1062 | ||
1063 | for (i = 1; i < NR_IRQS; i++) { | |
1064 | desc = get_irq_desc(i); | |
1065 | spin_lock_irqsave(&desc->lock, flags); | |
1066 | ||
1067 | if (desc->action && desc->action->handler) { | |
1068 | seq_printf(m, "%5d ", i); | |
1069 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); | |
1070 | ||
1071 | if (desc->chip && desc->chip->typename) | |
1072 | p = desc->chip->typename; | |
1073 | else | |
1074 | p = none; | |
1075 | seq_printf(m, "%-15s ", p); | |
1076 | ||
1077 | if (irq_map[i].host && irq_map[i].host->of_node) | |
1078 | p = irq_map[i].host->of_node->full_name; | |
1079 | else | |
1080 | p = none; | |
1081 | seq_printf(m, "%s\n", p); | |
1082 | } | |
1083 | ||
1084 | spin_unlock_irqrestore(&desc->lock, flags); | |
1085 | } | |
1086 | ||
1087 | return 0; | |
1088 | } | |
1089 | ||
1090 | static int virq_debug_open(struct inode *inode, struct file *file) | |
1091 | { | |
1092 | return single_open(file, virq_debug_show, inode->i_private); | |
1093 | } | |
1094 | ||
1095 | static const struct file_operations virq_debug_fops = { | |
1096 | .open = virq_debug_open, | |
1097 | .read = seq_read, | |
1098 | .llseek = seq_lseek, | |
1099 | .release = single_release, | |
1100 | }; | |
1101 | ||
1102 | static int __init irq_debugfs_init(void) | |
1103 | { | |
1104 | if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, | |
476ff8a0 | 1105 | NULL, &virq_debug_fops) == NULL) |
60b332e7 ME |
1106 | return -ENOMEM; |
1107 | ||
1108 | return 0; | |
1109 | } | |
1110 | __initcall(irq_debugfs_init); | |
1111 | #endif /* CONFIG_VIRQ_DEBUG */ | |
1112 | ||
c6622f63 | 1113 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
1114 | static int __init setup_noirqdistrib(char *str) |
1115 | { | |
1116 | distribute_irqs = 0; | |
1117 | return 1; | |
1118 | } | |
1119 | ||
1120 | __setup("noirqdistrib", setup_noirqdistrib); | |
756e7104 | 1121 | #endif /* CONFIG_PPC64 */ |