Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Derived from arch/i386/kernel/irq.c |
3 | * Copyright (C) 1992 Linus Torvalds | |
4 | * Adapted from arch/i386 by Gary Thomas | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
756e7104 SR |
6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
7 | * Copyright (C) 1996-2001 Cort Dougan | |
1da177e4 LT |
8 | * Adapted for Power Macintosh by Paul Mackerras |
9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
756e7104 | 10 | * |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This file contains the code used by various IRQ handling routines: | |
17 | * asking for different IRQ's should be done through these routines | |
18 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
19 | * shouldn't result in any weird surprises, and installing new handlers | |
20 | * should be easier. | |
756e7104 SR |
21 | * |
22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
24 | * mask register (of which only 16 are defined), hence the weird shifting | |
25 | * and complement of the cached_irq_mask. I want to be able to stuff | |
26 | * this right into the SIU SMASK register. | |
27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
28 | * to reduce code space and undefined function references. | |
1da177e4 LT |
29 | */ |
30 | ||
0ebfff14 BH |
31 | #undef DEBUG |
32 | ||
1da177e4 LT |
33 | #include <linux/module.h> |
34 | #include <linux/threads.h> | |
35 | #include <linux/kernel_stat.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/sched.h> | |
756e7104 | 38 | #include <linux/ptrace.h> |
1da177e4 LT |
39 | #include <linux/ioport.h> |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/timex.h> | |
1da177e4 LT |
42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | |
1da177e4 LT |
44 | #include <linux/delay.h> |
45 | #include <linux/irq.h> | |
756e7104 SR |
46 | #include <linux/seq_file.h> |
47 | #include <linux/cpumask.h> | |
1da177e4 LT |
48 | #include <linux/profile.h> |
49 | #include <linux/bitops.h> | |
0ebfff14 BH |
50 | #include <linux/list.h> |
51 | #include <linux/radix-tree.h> | |
52 | #include <linux/mutex.h> | |
53 | #include <linux/bootmem.h> | |
45934c47 | 54 | #include <linux/pci.h> |
60b332e7 | 55 | #include <linux/debugfs.h> |
1da177e4 LT |
56 | |
57 | #include <asm/uaccess.h> | |
58 | #include <asm/system.h> | |
59 | #include <asm/io.h> | |
60 | #include <asm/pgtable.h> | |
61 | #include <asm/irq.h> | |
62 | #include <asm/cache.h> | |
63 | #include <asm/prom.h> | |
64 | #include <asm/ptrace.h> | |
1da177e4 | 65 | #include <asm/machdep.h> |
0ebfff14 | 66 | #include <asm/udbg.h> |
d04c56f7 | 67 | #ifdef CONFIG_PPC64 |
1da177e4 | 68 | #include <asm/paca.h> |
d04c56f7 | 69 | #include <asm/firmware.h> |
0874dd40 | 70 | #include <asm/lv1call.h> |
756e7104 | 71 | #endif |
1da177e4 | 72 | |
868accb7 | 73 | int __irq_offset_value; |
756e7104 SR |
74 | static int ppc_spurious_interrupts; |
75 | ||
756e7104 | 76 | #ifdef CONFIG_PPC32 |
b9e5b4e6 BH |
77 | EXPORT_SYMBOL(__irq_offset_value); |
78 | atomic_t ppc_n_lost_interrupts; | |
756e7104 | 79 | |
b9e5b4e6 BH |
80 | #ifndef CONFIG_PPC_MERGE |
81 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | |
756e7104 | 82 | unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; |
b9e5b4e6 | 83 | #endif |
756e7104 SR |
84 | |
85 | #ifdef CONFIG_TAU_INT | |
86 | extern int tau_initialized; | |
87 | extern int tau_interrupts(int); | |
88 | #endif | |
b9e5b4e6 | 89 | #endif /* CONFIG_PPC32 */ |
756e7104 SR |
90 | |
91 | #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) | |
92 | extern atomic_t ipi_recv; | |
93 | extern atomic_t ipi_sent; | |
94 | #endif | |
756e7104 SR |
95 | |
96 | #ifdef CONFIG_PPC64 | |
1da177e4 LT |
97 | EXPORT_SYMBOL(irq_desc); |
98 | ||
99 | int distribute_irqs = 1; | |
d04c56f7 | 100 | |
ef2b343e HD |
101 | static inline unsigned long get_hard_enabled(void) |
102 | { | |
103 | unsigned long enabled; | |
104 | ||
105 | __asm__ __volatile__("lbz %0,%1(13)" | |
106 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); | |
107 | ||
108 | return enabled; | |
109 | } | |
110 | ||
111 | static inline void set_soft_enabled(unsigned long enable) | |
112 | { | |
113 | __asm__ __volatile__("stb %0,%1(13)" | |
114 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | |
115 | } | |
116 | ||
945feb17 | 117 | void raw_local_irq_restore(unsigned long en) |
d04c56f7 | 118 | { |
ef2b343e HD |
119 | /* |
120 | * get_paca()->soft_enabled = en; | |
121 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? | |
122 | * That was allowed before, and in such a case we do need to take care | |
123 | * that gcc will set soft_enabled directly via r13, not choose to use | |
124 | * an intermediate register, lest we're preempted to a different cpu. | |
125 | */ | |
126 | set_soft_enabled(en); | |
d04c56f7 PM |
127 | if (!en) |
128 | return; | |
129 | ||
130 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | |
ef2b343e HD |
131 | /* |
132 | * Do we need to disable preemption here? Not really: in the | |
133 | * unlikely event that we're preempted to a different cpu in | |
134 | * between getting r13, loading its lppaca_ptr, and loading | |
135 | * its any_int, we might call iseries_handle_interrupts without | |
136 | * an interrupt pending on the new cpu, but that's no disaster, | |
137 | * is it? And the business of preempting us off the old cpu | |
138 | * would itself involve a local_irq_restore which handles the | |
139 | * interrupt to that cpu. | |
140 | * | |
141 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" | |
142 | * to avoid any preemption checking added into get_paca(). | |
143 | */ | |
144 | if (local_paca->lppaca_ptr->int_dword.any_int) | |
d04c56f7 | 145 | iseries_handle_interrupts(); |
d04c56f7 PM |
146 | } |
147 | ||
ef2b343e HD |
148 | /* |
149 | * if (get_paca()->hard_enabled) return; | |
150 | * But again we need to take care that gcc gets hard_enabled directly | |
151 | * via r13, not choose to use an intermediate register, lest we're | |
152 | * preempted to a different cpu in between the two instructions. | |
153 | */ | |
154 | if (get_hard_enabled()) | |
d04c56f7 | 155 | return; |
ef2b343e HD |
156 | |
157 | /* | |
158 | * Need to hard-enable interrupts here. Since currently disabled, | |
159 | * no need to take further asm precautions against preemption; but | |
160 | * use local_paca instead of get_paca() to avoid preemption checking. | |
161 | */ | |
162 | local_paca->hard_enabled = en; | |
d04c56f7 PM |
163 | if ((int)mfspr(SPRN_DEC) < 0) |
164 | mtspr(SPRN_DEC, 1); | |
0874dd40 TS |
165 | |
166 | /* | |
167 | * Force the delivery of pending soft-disabled interrupts on PS3. | |
168 | * Any HV call will have this side effect. | |
169 | */ | |
170 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | |
171 | u64 tmp; | |
172 | lv1_get_version_info(&tmp); | |
173 | } | |
174 | ||
e1fa2e13 | 175 | __hard_irq_enable(); |
d04c56f7 | 176 | } |
945feb17 | 177 | EXPORT_SYMBOL(raw_local_irq_restore); |
756e7104 | 178 | #endif /* CONFIG_PPC64 */ |
1da177e4 LT |
179 | |
180 | int show_interrupts(struct seq_file *p, void *v) | |
181 | { | |
756e7104 SR |
182 | int i = *(loff_t *)v, j; |
183 | struct irqaction *action; | |
1da177e4 LT |
184 | irq_desc_t *desc; |
185 | unsigned long flags; | |
186 | ||
187 | if (i == 0) { | |
756e7104 SR |
188 | seq_puts(p, " "); |
189 | for_each_online_cpu(j) | |
190 | seq_printf(p, "CPU%d ", j); | |
1da177e4 LT |
191 | seq_putc(p, '\n'); |
192 | } | |
193 | ||
194 | if (i < NR_IRQS) { | |
195 | desc = get_irq_desc(i); | |
196 | spin_lock_irqsave(&desc->lock, flags); | |
197 | action = desc->action; | |
198 | if (!action || !action->handler) | |
199 | goto skip; | |
200 | seq_printf(p, "%3d: ", i); | |
201 | #ifdef CONFIG_SMP | |
756e7104 SR |
202 | for_each_online_cpu(j) |
203 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | |
1da177e4 LT |
204 | #else |
205 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
206 | #endif /* CONFIG_SMP */ | |
d1bef4ed IM |
207 | if (desc->chip) |
208 | seq_printf(p, " %s ", desc->chip->typename); | |
1da177e4 | 209 | else |
756e7104 | 210 | seq_puts(p, " None "); |
1da177e4 | 211 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); |
756e7104 SR |
212 | seq_printf(p, " %s", action->name); |
213 | for (action = action->next; action; action = action->next) | |
1da177e4 LT |
214 | seq_printf(p, ", %s", action->name); |
215 | seq_putc(p, '\n'); | |
216 | skip: | |
217 | spin_unlock_irqrestore(&desc->lock, flags); | |
756e7104 SR |
218 | } else if (i == NR_IRQS) { |
219 | #ifdef CONFIG_PPC32 | |
220 | #ifdef CONFIG_TAU_INT | |
221 | if (tau_initialized){ | |
222 | seq_puts(p, "TAU: "); | |
394e3902 AM |
223 | for_each_online_cpu(j) |
224 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
756e7104 SR |
225 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
226 | } | |
227 | #endif | |
228 | #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) | |
229 | /* should this be per processor send/receive? */ | |
230 | seq_printf(p, "IPI (recv/sent): %10u/%u\n", | |
231 | atomic_read(&ipi_recv), atomic_read(&ipi_sent)); | |
232 | #endif | |
233 | #endif /* CONFIG_PPC32 */ | |
1da177e4 | 234 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); |
756e7104 | 235 | } |
1da177e4 LT |
236 | return 0; |
237 | } | |
238 | ||
239 | #ifdef CONFIG_HOTPLUG_CPU | |
240 | void fixup_irqs(cpumask_t map) | |
241 | { | |
242 | unsigned int irq; | |
243 | static int warned; | |
244 | ||
245 | for_each_irq(irq) { | |
246 | cpumask_t mask; | |
247 | ||
248 | if (irq_desc[irq].status & IRQ_PER_CPU) | |
249 | continue; | |
250 | ||
a53da52f | 251 | cpus_and(mask, irq_desc[irq].affinity, map); |
1da177e4 LT |
252 | if (any_online_cpu(mask) == NR_CPUS) { |
253 | printk("Breaking affinity for irq %i\n", irq); | |
254 | mask = map; | |
255 | } | |
d1bef4ed IM |
256 | if (irq_desc[irq].chip->set_affinity) |
257 | irq_desc[irq].chip->set_affinity(irq, mask); | |
1da177e4 LT |
258 | else if (irq_desc[irq].action && !(warned++)) |
259 | printk("Cannot set affinity for irq %i\n", irq); | |
260 | } | |
261 | ||
262 | local_irq_enable(); | |
263 | mdelay(1); | |
264 | local_irq_disable(); | |
265 | } | |
266 | #endif | |
267 | ||
1da177e4 LT |
268 | void do_IRQ(struct pt_regs *regs) |
269 | { | |
7d12e780 | 270 | struct pt_regs *old_regs = set_irq_regs(regs); |
0ebfff14 | 271 | unsigned int irq; |
b709c083 SR |
272 | #ifdef CONFIG_IRQSTACKS |
273 | struct thread_info *curtp, *irqtp; | |
274 | #endif | |
1da177e4 | 275 | |
4b218e9b | 276 | irq_enter(); |
1da177e4 LT |
277 | |
278 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
279 | /* Debugging check for stack overflow: is there less than 2KB free? */ | |
280 | { | |
281 | long sp; | |
282 | ||
283 | sp = __get_SP() & (THREAD_SIZE-1); | |
284 | ||
285 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | |
286 | printk("do_IRQ: stack overflow: %ld\n", | |
287 | sp - sizeof(struct thread_info)); | |
288 | dump_stack(); | |
289 | } | |
290 | } | |
291 | #endif | |
292 | ||
756e7104 SR |
293 | /* |
294 | * Every platform is required to implement ppc_md.get_irq. | |
92d4dda3 | 295 | * This function will either return an irq number or NO_IRQ to |
756e7104 | 296 | * indicate there are no more pending. |
92d4dda3 JB |
297 | * The value NO_IRQ_IGNORE is for buggy hardware and means that this |
298 | * IRQ has already been handled. -- Tom | |
756e7104 | 299 | */ |
35a84c2f | 300 | irq = ppc_md.get_irq(); |
1da177e4 | 301 | |
0ebfff14 | 302 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { |
b709c083 SR |
303 | #ifdef CONFIG_IRQSTACKS |
304 | /* Switch to the irq stack to handle this */ | |
305 | curtp = current_thread_info(); | |
306 | irqtp = hardirq_ctx[smp_processor_id()]; | |
307 | if (curtp != irqtp) { | |
b9e5b4e6 BH |
308 | struct irq_desc *desc = irq_desc + irq; |
309 | void *handler = desc->handle_irq; | |
85218827 | 310 | unsigned long saved_sp_limit = current->thread.ksp_limit; |
b9e5b4e6 BH |
311 | if (handler == NULL) |
312 | handler = &__do_IRQ; | |
b709c083 SR |
313 | irqtp->task = curtp->task; |
314 | irqtp->flags = 0; | |
e6768a4f BH |
315 | |
316 | /* Copy the softirq bits in preempt_count so that the | |
317 | * softirq checks work in the hardirq context. | |
318 | */ | |
319 | irqtp->preempt_count = | |
320 | (irqtp->preempt_count & ~SOFTIRQ_MASK) | | |
321 | (curtp->preempt_count & SOFTIRQ_MASK); | |
322 | ||
85218827 KG |
323 | current->thread.ksp_limit = (unsigned long)irqtp + |
324 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
7d12e780 | 325 | call_handle_irq(irq, desc, irqtp, handler); |
85218827 | 326 | current->thread.ksp_limit = saved_sp_limit; |
b709c083 | 327 | irqtp->task = NULL; |
e6768a4f BH |
328 | |
329 | ||
330 | /* Set any flag that may have been set on the | |
331 | * alternate stack | |
332 | */ | |
b709c083 SR |
333 | if (irqtp->flags) |
334 | set_bits(irqtp->flags, &curtp->flags); | |
335 | } else | |
336 | #endif | |
7d12e780 | 337 | generic_handle_irq(irq); |
0ebfff14 | 338 | } else if (irq != NO_IRQ_IGNORE) |
e199500c SR |
339 | /* That's not SMP safe ... but who cares ? */ |
340 | ppc_spurious_interrupts++; | |
341 | ||
4b218e9b | 342 | irq_exit(); |
7d12e780 | 343 | set_irq_regs(old_regs); |
756e7104 | 344 | |
e199500c | 345 | #ifdef CONFIG_PPC_ISERIES |
b06a3183 SR |
346 | if (firmware_has_feature(FW_FEATURE_ISERIES) && |
347 | get_lppaca()->int_dword.fields.decr_int) { | |
3356bb9f DG |
348 | get_lppaca()->int_dword.fields.decr_int = 0; |
349 | /* Signal a fake decrementer interrupt */ | |
350 | timer_interrupt(regs); | |
e199500c SR |
351 | } |
352 | #endif | |
353 | } | |
1da177e4 LT |
354 | |
355 | void __init init_IRQ(void) | |
356 | { | |
70584578 SR |
357 | if (ppc_md.init_IRQ) |
358 | ppc_md.init_IRQ(); | |
bcf0b088 KG |
359 | |
360 | exc_lvl_ctx_init(); | |
361 | ||
1da177e4 LT |
362 | irq_ctx_init(); |
363 | } | |
364 | ||
bcf0b088 KG |
365 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
366 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; | |
367 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; | |
368 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |
369 | ||
370 | void exc_lvl_ctx_init(void) | |
371 | { | |
372 | struct thread_info *tp; | |
373 | int i; | |
374 | ||
375 | for_each_possible_cpu(i) { | |
376 | memset((void *)critirq_ctx[i], 0, THREAD_SIZE); | |
377 | tp = critirq_ctx[i]; | |
378 | tp->cpu = i; | |
379 | tp->preempt_count = 0; | |
380 | ||
381 | #ifdef CONFIG_BOOKE | |
382 | memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); | |
383 | tp = dbgirq_ctx[i]; | |
384 | tp->cpu = i; | |
385 | tp->preempt_count = 0; | |
386 | ||
387 | memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); | |
388 | tp = mcheckirq_ctx[i]; | |
389 | tp->cpu = i; | |
390 | tp->preempt_count = HARDIRQ_OFFSET; | |
391 | #endif | |
392 | } | |
393 | } | |
394 | #endif | |
1da177e4 | 395 | |
1da177e4 | 396 | #ifdef CONFIG_IRQSTACKS |
22722051 AM |
397 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
398 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
399 | |
400 | void irq_ctx_init(void) | |
401 | { | |
402 | struct thread_info *tp; | |
403 | int i; | |
404 | ||
0e551954 | 405 | for_each_possible_cpu(i) { |
1da177e4 LT |
406 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
407 | tp = softirq_ctx[i]; | |
408 | tp->cpu = i; | |
e6768a4f | 409 | tp->preempt_count = 0; |
1da177e4 LT |
410 | |
411 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | |
412 | tp = hardirq_ctx[i]; | |
413 | tp->cpu = i; | |
414 | tp->preempt_count = HARDIRQ_OFFSET; | |
415 | } | |
416 | } | |
417 | ||
c6622f63 PM |
418 | static inline void do_softirq_onstack(void) |
419 | { | |
420 | struct thread_info *curtp, *irqtp; | |
85218827 | 421 | unsigned long saved_sp_limit = current->thread.ksp_limit; |
c6622f63 PM |
422 | |
423 | curtp = current_thread_info(); | |
424 | irqtp = softirq_ctx[smp_processor_id()]; | |
425 | irqtp->task = curtp->task; | |
85218827 KG |
426 | current->thread.ksp_limit = (unsigned long)irqtp + |
427 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
c6622f63 | 428 | call_do_softirq(irqtp); |
85218827 | 429 | current->thread.ksp_limit = saved_sp_limit; |
c6622f63 PM |
430 | irqtp->task = NULL; |
431 | } | |
1da177e4 | 432 | |
c6622f63 PM |
433 | #else |
434 | #define do_softirq_onstack() __do_softirq() | |
435 | #endif /* CONFIG_IRQSTACKS */ | |
436 | ||
1da177e4 LT |
437 | void do_softirq(void) |
438 | { | |
439 | unsigned long flags; | |
1da177e4 LT |
440 | |
441 | if (in_interrupt()) | |
1da177e4 LT |
442 | return; |
443 | ||
1da177e4 | 444 | local_irq_save(flags); |
1da177e4 | 445 | |
912b2539 | 446 | if (local_softirq_pending()) |
c6622f63 | 447 | do_softirq_onstack(); |
1da177e4 LT |
448 | |
449 | local_irq_restore(flags); | |
1da177e4 | 450 | } |
1da177e4 | 451 | |
1da177e4 | 452 | |
1da177e4 | 453 | /* |
0ebfff14 | 454 | * IRQ controller and virtual interrupts |
1da177e4 LT |
455 | */ |
456 | ||
0ebfff14 | 457 | #ifdef CONFIG_PPC_MERGE |
1da177e4 | 458 | |
0ebfff14 | 459 | static LIST_HEAD(irq_hosts); |
057b184a | 460 | static DEFINE_SPINLOCK(irq_big_lock); |
8ec8f2e8 BH |
461 | static DEFINE_PER_CPU(unsigned int, irq_radix_reader); |
462 | static unsigned int irq_radix_writer; | |
0ebfff14 BH |
463 | struct irq_map_entry irq_map[NR_IRQS]; |
464 | static unsigned int irq_virq_count = NR_IRQS; | |
465 | static struct irq_host *irq_default_host; | |
1da177e4 | 466 | |
35923f12 OJ |
467 | irq_hw_number_t virq_to_hw(unsigned int virq) |
468 | { | |
469 | return irq_map[virq].hwirq; | |
470 | } | |
471 | EXPORT_SYMBOL_GPL(virq_to_hw); | |
472 | ||
68158006 ME |
473 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) |
474 | { | |
475 | return h->of_node != NULL && h->of_node == np; | |
476 | } | |
477 | ||
5669c3cf | 478 | struct irq_host *irq_alloc_host(struct device_node *of_node, |
52964f87 ME |
479 | unsigned int revmap_type, |
480 | unsigned int revmap_arg, | |
481 | struct irq_host_ops *ops, | |
482 | irq_hw_number_t inval_irq) | |
1da177e4 | 483 | { |
0ebfff14 BH |
484 | struct irq_host *host; |
485 | unsigned int size = sizeof(struct irq_host); | |
486 | unsigned int i; | |
487 | unsigned int *rmap; | |
488 | unsigned long flags; | |
489 | ||
490 | /* Allocate structure and revmap table if using linear mapping */ | |
491 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | |
492 | size += revmap_arg * sizeof(unsigned int); | |
5669c3cf | 493 | host = zalloc_maybe_bootmem(size, GFP_KERNEL); |
0ebfff14 BH |
494 | if (host == NULL) |
495 | return NULL; | |
7d01c880 | 496 | |
0ebfff14 BH |
497 | /* Fill structure */ |
498 | host->revmap_type = revmap_type; | |
499 | host->inval_irq = inval_irq; | |
500 | host->ops = ops; | |
52964f87 | 501 | host->of_node = of_node; |
7d01c880 | 502 | |
68158006 ME |
503 | if (host->ops->match == NULL) |
504 | host->ops->match = default_irq_host_match; | |
7d01c880 | 505 | |
0ebfff14 BH |
506 | spin_lock_irqsave(&irq_big_lock, flags); |
507 | ||
508 | /* If it's a legacy controller, check for duplicates and | |
509 | * mark it as allocated (we use irq 0 host pointer for that | |
510 | */ | |
511 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | |
512 | if (irq_map[0].host != NULL) { | |
513 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
514 | /* If we are early boot, we can't free the structure, | |
515 | * too bad... | |
516 | * this will be fixed once slab is made available early | |
517 | * instead of the current cruft | |
518 | */ | |
519 | if (mem_init_done) | |
520 | kfree(host); | |
521 | return NULL; | |
522 | } | |
523 | irq_map[0].host = host; | |
524 | } | |
525 | ||
526 | list_add(&host->link, &irq_hosts); | |
527 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
528 | ||
529 | /* Additional setups per revmap type */ | |
530 | switch(revmap_type) { | |
531 | case IRQ_HOST_MAP_LEGACY: | |
532 | /* 0 is always the invalid number for legacy */ | |
533 | host->inval_irq = 0; | |
534 | /* setup us as the host for all legacy interrupts */ | |
535 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | |
7866291d | 536 | irq_map[i].hwirq = i; |
0ebfff14 BH |
537 | smp_wmb(); |
538 | irq_map[i].host = host; | |
539 | smp_wmb(); | |
540 | ||
6e99e458 BH |
541 | /* Clear norequest flags */ |
542 | get_irq_desc(i)->status &= ~IRQ_NOREQUEST; | |
0ebfff14 BH |
543 | |
544 | /* Legacy flags are left to default at this point, | |
545 | * one can then use irq_create_mapping() to | |
c03983ac | 546 | * explicitly change them |
0ebfff14 | 547 | */ |
6e99e458 | 548 | ops->map(host, i, i); |
0ebfff14 BH |
549 | } |
550 | break; | |
551 | case IRQ_HOST_MAP_LINEAR: | |
552 | rmap = (unsigned int *)(host + 1); | |
553 | for (i = 0; i < revmap_arg; i++) | |
f5921697 | 554 | rmap[i] = NO_IRQ; |
0ebfff14 BH |
555 | host->revmap_data.linear.size = revmap_arg; |
556 | smp_wmb(); | |
557 | host->revmap_data.linear.revmap = rmap; | |
558 | break; | |
559 | default: | |
560 | break; | |
561 | } | |
562 | ||
563 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | |
564 | ||
565 | return host; | |
1da177e4 LT |
566 | } |
567 | ||
0ebfff14 | 568 | struct irq_host *irq_find_host(struct device_node *node) |
1da177e4 | 569 | { |
0ebfff14 BH |
570 | struct irq_host *h, *found = NULL; |
571 | unsigned long flags; | |
572 | ||
573 | /* We might want to match the legacy controller last since | |
574 | * it might potentially be set to match all interrupts in | |
575 | * the absence of a device node. This isn't a problem so far | |
576 | * yet though... | |
577 | */ | |
578 | spin_lock_irqsave(&irq_big_lock, flags); | |
579 | list_for_each_entry(h, &irq_hosts, link) | |
68158006 | 580 | if (h->ops->match(h, node)) { |
0ebfff14 BH |
581 | found = h; |
582 | break; | |
583 | } | |
584 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
585 | return found; | |
586 | } | |
587 | EXPORT_SYMBOL_GPL(irq_find_host); | |
588 | ||
589 | void irq_set_default_host(struct irq_host *host) | |
590 | { | |
591 | pr_debug("irq: Default host set to @0x%p\n", host); | |
1da177e4 | 592 | |
0ebfff14 BH |
593 | irq_default_host = host; |
594 | } | |
1da177e4 | 595 | |
0ebfff14 BH |
596 | void irq_set_virq_count(unsigned int count) |
597 | { | |
598 | pr_debug("irq: Trying to set virq count to %d\n", count); | |
fef1c772 | 599 | |
0ebfff14 BH |
600 | BUG_ON(count < NUM_ISA_INTERRUPTS); |
601 | if (count < NR_IRQS) | |
602 | irq_virq_count = count; | |
603 | } | |
604 | ||
8ec8f2e8 BH |
605 | /* radix tree not lockless safe ! we use a brlock-type mecanism |
606 | * for now, until we can use a lockless radix tree | |
607 | */ | |
608 | static void irq_radix_wrlock(unsigned long *flags) | |
609 | { | |
610 | unsigned int cpu, ok; | |
611 | ||
612 | spin_lock_irqsave(&irq_big_lock, *flags); | |
613 | irq_radix_writer = 1; | |
614 | smp_mb(); | |
615 | do { | |
616 | barrier(); | |
617 | ok = 1; | |
618 | for_each_possible_cpu(cpu) { | |
619 | if (per_cpu(irq_radix_reader, cpu)) { | |
620 | ok = 0; | |
621 | break; | |
622 | } | |
623 | } | |
624 | if (!ok) | |
625 | cpu_relax(); | |
626 | } while(!ok); | |
627 | } | |
628 | ||
629 | static void irq_radix_wrunlock(unsigned long flags) | |
630 | { | |
631 | smp_wmb(); | |
632 | irq_radix_writer = 0; | |
633 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
634 | } | |
635 | ||
636 | static void irq_radix_rdlock(unsigned long *flags) | |
637 | { | |
638 | local_irq_save(*flags); | |
639 | __get_cpu_var(irq_radix_reader) = 1; | |
640 | smp_mb(); | |
641 | if (likely(irq_radix_writer == 0)) | |
642 | return; | |
643 | __get_cpu_var(irq_radix_reader) = 0; | |
644 | smp_wmb(); | |
645 | spin_lock(&irq_big_lock); | |
646 | __get_cpu_var(irq_radix_reader) = 1; | |
647 | spin_unlock(&irq_big_lock); | |
648 | } | |
649 | ||
650 | static void irq_radix_rdunlock(unsigned long flags) | |
651 | { | |
652 | __get_cpu_var(irq_radix_reader) = 0; | |
653 | local_irq_restore(flags); | |
654 | } | |
655 | ||
6fde40f3 ME |
656 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
657 | irq_hw_number_t hwirq) | |
658 | { | |
659 | /* Clear IRQ_NOREQUEST flag */ | |
660 | get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; | |
661 | ||
662 | /* map it */ | |
663 | smp_wmb(); | |
664 | irq_map[virq].hwirq = hwirq; | |
665 | smp_mb(); | |
666 | ||
667 | if (host->ops->map(host, virq, hwirq)) { | |
668 | pr_debug("irq: -> mapping failed, freeing\n"); | |
669 | irq_free_virt(virq, 1); | |
670 | return -1; | |
671 | } | |
672 | ||
673 | return 0; | |
674 | } | |
8ec8f2e8 | 675 | |
ee51de56 ME |
676 | unsigned int irq_create_direct_mapping(struct irq_host *host) |
677 | { | |
678 | unsigned int virq; | |
679 | ||
680 | if (host == NULL) | |
681 | host = irq_default_host; | |
682 | ||
683 | BUG_ON(host == NULL); | |
684 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); | |
685 | ||
686 | virq = irq_alloc_virt(host, 1, 0); | |
687 | if (virq == NO_IRQ) { | |
688 | pr_debug("irq: create_direct virq allocation failed\n"); | |
689 | return NO_IRQ; | |
690 | } | |
691 | ||
692 | pr_debug("irq: create_direct obtained virq %d\n", virq); | |
693 | ||
694 | if (irq_setup_virq(host, virq, virq)) | |
695 | return NO_IRQ; | |
696 | ||
697 | return virq; | |
698 | } | |
699 | ||
0ebfff14 | 700 | unsigned int irq_create_mapping(struct irq_host *host, |
6e99e458 | 701 | irq_hw_number_t hwirq) |
0ebfff14 BH |
702 | { |
703 | unsigned int virq, hint; | |
704 | ||
6e99e458 | 705 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); |
0ebfff14 BH |
706 | |
707 | /* Look for default host if nececssary */ | |
708 | if (host == NULL) | |
709 | host = irq_default_host; | |
710 | if (host == NULL) { | |
711 | printk(KERN_WARNING "irq_create_mapping called for" | |
712 | " NULL host, hwirq=%lx\n", hwirq); | |
713 | WARN_ON(1); | |
714 | return NO_IRQ; | |
1da177e4 | 715 | } |
0ebfff14 | 716 | pr_debug("irq: -> using host @%p\n", host); |
1da177e4 | 717 | |
0ebfff14 BH |
718 | /* Check if mapping already exist, if it does, call |
719 | * host->ops->map() to update the flags | |
720 | */ | |
721 | virq = irq_find_mapping(host, hwirq); | |
f5921697 | 722 | if (virq != NO_IRQ) { |
acc900ef IK |
723 | if (host->ops->remap) |
724 | host->ops->remap(host, virq, hwirq); | |
0ebfff14 | 725 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
0ebfff14 | 726 | return virq; |
1da177e4 LT |
727 | } |
728 | ||
0ebfff14 BH |
729 | /* Get a virtual interrupt number */ |
730 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | |
731 | /* Handle legacy */ | |
732 | virq = (unsigned int)hwirq; | |
733 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | |
734 | return NO_IRQ; | |
735 | return virq; | |
736 | } else { | |
737 | /* Allocate a virtual interrupt number */ | |
738 | hint = hwirq % irq_virq_count; | |
739 | virq = irq_alloc_virt(host, 1, hint); | |
740 | if (virq == NO_IRQ) { | |
741 | pr_debug("irq: -> virq allocation failed\n"); | |
742 | return NO_IRQ; | |
743 | } | |
744 | } | |
745 | pr_debug("irq: -> obtained virq %d\n", virq); | |
746 | ||
6fde40f3 | 747 | if (irq_setup_virq(host, virq, hwirq)) |
0ebfff14 | 748 | return NO_IRQ; |
6fde40f3 | 749 | |
1da177e4 | 750 | return virq; |
0ebfff14 BH |
751 | } |
752 | EXPORT_SYMBOL_GPL(irq_create_mapping); | |
753 | ||
f3d2ab41 AV |
754 | unsigned int irq_create_of_mapping(struct device_node *controller, |
755 | u32 *intspec, unsigned int intsize) | |
0ebfff14 BH |
756 | { |
757 | struct irq_host *host; | |
758 | irq_hw_number_t hwirq; | |
6e99e458 BH |
759 | unsigned int type = IRQ_TYPE_NONE; |
760 | unsigned int virq; | |
1da177e4 | 761 | |
0ebfff14 BH |
762 | if (controller == NULL) |
763 | host = irq_default_host; | |
764 | else | |
765 | host = irq_find_host(controller); | |
6e99e458 BH |
766 | if (host == NULL) { |
767 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | |
768 | controller->full_name); | |
0ebfff14 | 769 | return NO_IRQ; |
6e99e458 | 770 | } |
0ebfff14 BH |
771 | |
772 | /* If host has no translation, then we assume interrupt line */ | |
773 | if (host->ops->xlate == NULL) | |
774 | hwirq = intspec[0]; | |
775 | else { | |
776 | if (host->ops->xlate(host, controller, intspec, intsize, | |
6e99e458 | 777 | &hwirq, &type)) |
0ebfff14 | 778 | return NO_IRQ; |
1da177e4 | 779 | } |
0ebfff14 | 780 | |
6e99e458 BH |
781 | /* Create mapping */ |
782 | virq = irq_create_mapping(host, hwirq); | |
783 | if (virq == NO_IRQ) | |
784 | return virq; | |
785 | ||
786 | /* Set type if specified and different than the current one */ | |
787 | if (type != IRQ_TYPE_NONE && | |
788 | type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) | |
789 | set_irq_type(virq, type); | |
790 | return virq; | |
1da177e4 | 791 | } |
0ebfff14 | 792 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
1da177e4 | 793 | |
0ebfff14 | 794 | unsigned int irq_of_parse_and_map(struct device_node *dev, int index) |
1da177e4 | 795 | { |
0ebfff14 | 796 | struct of_irq oirq; |
1da177e4 | 797 | |
0ebfff14 BH |
798 | if (of_irq_map_one(dev, index, &oirq)) |
799 | return NO_IRQ; | |
1da177e4 | 800 | |
0ebfff14 BH |
801 | return irq_create_of_mapping(oirq.controller, oirq.specifier, |
802 | oirq.size); | |
803 | } | |
804 | EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | |
1da177e4 | 805 | |
0ebfff14 BH |
806 | void irq_dispose_mapping(unsigned int virq) |
807 | { | |
5414c6be | 808 | struct irq_host *host; |
0ebfff14 BH |
809 | irq_hw_number_t hwirq; |
810 | unsigned long flags; | |
1da177e4 | 811 | |
5414c6be ME |
812 | if (virq == NO_IRQ) |
813 | return; | |
814 | ||
815 | host = irq_map[virq].host; | |
0ebfff14 BH |
816 | WARN_ON (host == NULL); |
817 | if (host == NULL) | |
818 | return; | |
1da177e4 | 819 | |
0ebfff14 BH |
820 | /* Never unmap legacy interrupts */ |
821 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
822 | return; | |
1da177e4 | 823 | |
0ebfff14 BH |
824 | /* remove chip and handler */ |
825 | set_irq_chip_and_handler(virq, NULL, NULL); | |
826 | ||
827 | /* Make sure it's completed */ | |
828 | synchronize_irq(virq); | |
829 | ||
830 | /* Tell the PIC about it */ | |
831 | if (host->ops->unmap) | |
832 | host->ops->unmap(host, virq); | |
833 | smp_mb(); | |
834 | ||
835 | /* Clear reverse map */ | |
836 | hwirq = irq_map[virq].hwirq; | |
837 | switch(host->revmap_type) { | |
838 | case IRQ_HOST_MAP_LINEAR: | |
839 | if (hwirq < host->revmap_data.linear.size) | |
f5921697 | 840 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
0ebfff14 BH |
841 | break; |
842 | case IRQ_HOST_MAP_TREE: | |
843 | /* Check if radix tree allocated yet */ | |
844 | if (host->revmap_data.tree.gfp_mask == 0) | |
845 | break; | |
8ec8f2e8 | 846 | irq_radix_wrlock(&flags); |
0ebfff14 | 847 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
8ec8f2e8 | 848 | irq_radix_wrunlock(flags); |
0ebfff14 BH |
849 | break; |
850 | } | |
1da177e4 | 851 | |
0ebfff14 BH |
852 | /* Destroy map */ |
853 | smp_mb(); | |
854 | irq_map[virq].hwirq = host->inval_irq; | |
1da177e4 | 855 | |
0ebfff14 BH |
856 | /* Set some flags */ |
857 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | |
1da177e4 | 858 | |
0ebfff14 BH |
859 | /* Free it */ |
860 | irq_free_virt(virq, 1); | |
1da177e4 | 861 | } |
0ebfff14 | 862 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
1da177e4 | 863 | |
0ebfff14 BH |
864 | unsigned int irq_find_mapping(struct irq_host *host, |
865 | irq_hw_number_t hwirq) | |
866 | { | |
867 | unsigned int i; | |
868 | unsigned int hint = hwirq % irq_virq_count; | |
869 | ||
870 | /* Look for default host if nececssary */ | |
871 | if (host == NULL) | |
872 | host = irq_default_host; | |
873 | if (host == NULL) | |
874 | return NO_IRQ; | |
875 | ||
876 | /* legacy -> bail early */ | |
877 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
878 | return hwirq; | |
879 | ||
880 | /* Slow path does a linear search of the map */ | |
881 | if (hint < NUM_ISA_INTERRUPTS) | |
882 | hint = NUM_ISA_INTERRUPTS; | |
883 | i = hint; | |
884 | do { | |
885 | if (irq_map[i].host == host && | |
886 | irq_map[i].hwirq == hwirq) | |
887 | return i; | |
888 | i++; | |
889 | if (i >= irq_virq_count) | |
890 | i = NUM_ISA_INTERRUPTS; | |
891 | } while(i != hint); | |
892 | return NO_IRQ; | |
893 | } | |
894 | EXPORT_SYMBOL_GPL(irq_find_mapping); | |
1da177e4 | 895 | |
0ebfff14 BH |
896 | |
897 | unsigned int irq_radix_revmap(struct irq_host *host, | |
898 | irq_hw_number_t hwirq) | |
1da177e4 | 899 | { |
0ebfff14 BH |
900 | struct radix_tree_root *tree; |
901 | struct irq_map_entry *ptr; | |
902 | unsigned int virq; | |
903 | unsigned long flags; | |
1da177e4 | 904 | |
0ebfff14 | 905 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
1da177e4 | 906 | |
0ebfff14 BH |
907 | /* Check if the radix tree exist yet. We test the value of |
908 | * the gfp_mask for that. Sneaky but saves another int in the | |
909 | * structure. If not, we fallback to slow mode | |
910 | */ | |
911 | tree = &host->revmap_data.tree; | |
912 | if (tree->gfp_mask == 0) | |
913 | return irq_find_mapping(host, hwirq); | |
914 | ||
0ebfff14 | 915 | /* Now try to resolve */ |
8ec8f2e8 | 916 | irq_radix_rdlock(&flags); |
0ebfff14 | 917 | ptr = radix_tree_lookup(tree, hwirq); |
8ec8f2e8 BH |
918 | irq_radix_rdunlock(flags); |
919 | ||
0ebfff14 BH |
920 | /* Found it, return */ |
921 | if (ptr) { | |
922 | virq = ptr - irq_map; | |
8ec8f2e8 | 923 | return virq; |
1da177e4 | 924 | } |
0ebfff14 BH |
925 | |
926 | /* If not there, try to insert it */ | |
927 | virq = irq_find_mapping(host, hwirq); | |
8ec8f2e8 BH |
928 | if (virq != NO_IRQ) { |
929 | irq_radix_wrlock(&flags); | |
e5c14ce1 | 930 | radix_tree_insert(tree, hwirq, &irq_map[virq]); |
8ec8f2e8 BH |
931 | irq_radix_wrunlock(flags); |
932 | } | |
0ebfff14 | 933 | return virq; |
1da177e4 LT |
934 | } |
935 | ||
0ebfff14 BH |
936 | unsigned int irq_linear_revmap(struct irq_host *host, |
937 | irq_hw_number_t hwirq) | |
c6622f63 | 938 | { |
0ebfff14 | 939 | unsigned int *revmap; |
c6622f63 | 940 | |
0ebfff14 BH |
941 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); |
942 | ||
943 | /* Check revmap bounds */ | |
944 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | |
945 | return irq_find_mapping(host, hwirq); | |
946 | ||
947 | /* Check if revmap was allocated */ | |
948 | revmap = host->revmap_data.linear.revmap; | |
949 | if (unlikely(revmap == NULL)) | |
950 | return irq_find_mapping(host, hwirq); | |
951 | ||
952 | /* Fill up revmap with slow path if no mapping found */ | |
953 | if (unlikely(revmap[hwirq] == NO_IRQ)) | |
954 | revmap[hwirq] = irq_find_mapping(host, hwirq); | |
955 | ||
956 | return revmap[hwirq]; | |
c6622f63 PM |
957 | } |
958 | ||
0ebfff14 BH |
959 | unsigned int irq_alloc_virt(struct irq_host *host, |
960 | unsigned int count, | |
961 | unsigned int hint) | |
962 | { | |
963 | unsigned long flags; | |
964 | unsigned int i, j, found = NO_IRQ; | |
c6622f63 | 965 | |
0ebfff14 BH |
966 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) |
967 | return NO_IRQ; | |
968 | ||
969 | spin_lock_irqsave(&irq_big_lock, flags); | |
970 | ||
971 | /* Use hint for 1 interrupt if any */ | |
972 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | |
973 | hint < irq_virq_count && irq_map[hint].host == NULL) { | |
974 | found = hint; | |
975 | goto hint_found; | |
976 | } | |
977 | ||
978 | /* Look for count consecutive numbers in the allocatable | |
979 | * (non-legacy) space | |
980 | */ | |
e1251465 ME |
981 | for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { |
982 | if (irq_map[i].host != NULL) | |
983 | j = 0; | |
984 | else | |
985 | j++; | |
986 | ||
987 | if (j == count) { | |
988 | found = i - count + 1; | |
989 | break; | |
990 | } | |
0ebfff14 BH |
991 | } |
992 | if (found == NO_IRQ) { | |
993 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
994 | return NO_IRQ; | |
995 | } | |
996 | hint_found: | |
997 | for (i = found; i < (found + count); i++) { | |
998 | irq_map[i].hwirq = host->inval_irq; | |
999 | smp_wmb(); | |
1000 | irq_map[i].host = host; | |
1001 | } | |
1002 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
1003 | return found; | |
1004 | } | |
1005 | ||
1006 | void irq_free_virt(unsigned int virq, unsigned int count) | |
1da177e4 LT |
1007 | { |
1008 | unsigned long flags; | |
0ebfff14 | 1009 | unsigned int i; |
1da177e4 | 1010 | |
0ebfff14 BH |
1011 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
1012 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | |
1da177e4 | 1013 | |
0ebfff14 BH |
1014 | spin_lock_irqsave(&irq_big_lock, flags); |
1015 | for (i = virq; i < (virq + count); i++) { | |
1016 | struct irq_host *host; | |
1da177e4 | 1017 | |
0ebfff14 BH |
1018 | if (i < NUM_ISA_INTERRUPTS || |
1019 | (virq + count) > irq_virq_count) | |
1020 | continue; | |
1da177e4 | 1021 | |
0ebfff14 BH |
1022 | host = irq_map[i].host; |
1023 | irq_map[i].hwirq = host->inval_irq; | |
1024 | smp_wmb(); | |
1025 | irq_map[i].host = NULL; | |
1026 | } | |
1027 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
1da177e4 | 1028 | } |
0ebfff14 BH |
1029 | |
1030 | void irq_early_init(void) | |
1031 | { | |
1032 | unsigned int i; | |
1033 | ||
1034 | for (i = 0; i < NR_IRQS; i++) | |
1035 | get_irq_desc(i)->status |= IRQ_NOREQUEST; | |
1036 | } | |
1037 | ||
1038 | /* We need to create the radix trees late */ | |
1039 | static int irq_late_init(void) | |
1040 | { | |
1041 | struct irq_host *h; | |
1042 | unsigned long flags; | |
1043 | ||
8ec8f2e8 | 1044 | irq_radix_wrlock(&flags); |
0ebfff14 BH |
1045 | list_for_each_entry(h, &irq_hosts, link) { |
1046 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | |
1047 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); | |
1048 | } | |
8ec8f2e8 | 1049 | irq_radix_wrunlock(flags); |
0ebfff14 BH |
1050 | |
1051 | return 0; | |
1052 | } | |
1053 | arch_initcall(irq_late_init); | |
1054 | ||
60b332e7 ME |
1055 | #ifdef CONFIG_VIRQ_DEBUG |
1056 | static int virq_debug_show(struct seq_file *m, void *private) | |
1057 | { | |
1058 | unsigned long flags; | |
1059 | irq_desc_t *desc; | |
1060 | const char *p; | |
1061 | char none[] = "none"; | |
1062 | int i; | |
1063 | ||
1064 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", | |
1065 | "chip name", "host name"); | |
1066 | ||
1067 | for (i = 1; i < NR_IRQS; i++) { | |
1068 | desc = get_irq_desc(i); | |
1069 | spin_lock_irqsave(&desc->lock, flags); | |
1070 | ||
1071 | if (desc->action && desc->action->handler) { | |
1072 | seq_printf(m, "%5d ", i); | |
1073 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); | |
1074 | ||
1075 | if (desc->chip && desc->chip->typename) | |
1076 | p = desc->chip->typename; | |
1077 | else | |
1078 | p = none; | |
1079 | seq_printf(m, "%-15s ", p); | |
1080 | ||
1081 | if (irq_map[i].host && irq_map[i].host->of_node) | |
1082 | p = irq_map[i].host->of_node->full_name; | |
1083 | else | |
1084 | p = none; | |
1085 | seq_printf(m, "%s\n", p); | |
1086 | } | |
1087 | ||
1088 | spin_unlock_irqrestore(&desc->lock, flags); | |
1089 | } | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | ||
1094 | static int virq_debug_open(struct inode *inode, struct file *file) | |
1095 | { | |
1096 | return single_open(file, virq_debug_show, inode->i_private); | |
1097 | } | |
1098 | ||
1099 | static const struct file_operations virq_debug_fops = { | |
1100 | .open = virq_debug_open, | |
1101 | .read = seq_read, | |
1102 | .llseek = seq_lseek, | |
1103 | .release = single_release, | |
1104 | }; | |
1105 | ||
1106 | static int __init irq_debugfs_init(void) | |
1107 | { | |
1108 | if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, | |
1109 | NULL, &virq_debug_fops)) | |
1110 | return -ENOMEM; | |
1111 | ||
1112 | return 0; | |
1113 | } | |
1114 | __initcall(irq_debugfs_init); | |
1115 | #endif /* CONFIG_VIRQ_DEBUG */ | |
1116 | ||
0ebfff14 | 1117 | #endif /* CONFIG_PPC_MERGE */ |
1da177e4 | 1118 | |
c6622f63 | 1119 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
1120 | static int __init setup_noirqdistrib(char *str) |
1121 | { | |
1122 | distribute_irqs = 0; | |
1123 | return 1; | |
1124 | } | |
1125 | ||
1126 | __setup("noirqdistrib", setup_noirqdistrib); | |
756e7104 | 1127 | #endif /* CONFIG_PPC64 */ |