powerpc: Add timer, performance monitor and machine check counts to /proc/interrupts
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / irq.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
756e7104
SR
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
1da177e4
LT
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
756e7104 10 *
1da177e4
LT
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
756e7104
SR
21 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
1da177e4
LT
29 */
30
0ebfff14
BH
31#undef DEBUG
32
1da177e4
LT
33#include <linux/module.h>
34#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
756e7104 38#include <linux/ptrace.h>
1da177e4
LT
39#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
1da177e4
LT
42#include <linux/init.h>
43#include <linux/slab.h>
1da177e4
LT
44#include <linux/delay.h>
45#include <linux/irq.h>
756e7104
SR
46#include <linux/seq_file.h>
47#include <linux/cpumask.h>
1da177e4
LT
48#include <linux/profile.h>
49#include <linux/bitops.h>
0ebfff14
BH
50#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/bootmem.h>
45934c47 54#include <linux/pci.h>
60b332e7 55#include <linux/debugfs.h>
cdd6c482 56#include <linux/perf_event.h>
1da177e4
LT
57
58#include <asm/uaccess.h>
59#include <asm/system.h>
60#include <asm/io.h>
61#include <asm/pgtable.h>
62#include <asm/irq.h>
63#include <asm/cache.h>
64#include <asm/prom.h>
65#include <asm/ptrace.h>
1da177e4 66#include <asm/machdep.h>
0ebfff14 67#include <asm/udbg.h>
d04c56f7 68#ifdef CONFIG_PPC64
1da177e4 69#include <asm/paca.h>
d04c56f7 70#include <asm/firmware.h>
0874dd40 71#include <asm/lv1call.h>
756e7104 72#endif
1bf4af16
AB
73#define CREATE_TRACE_POINTS
74#include <asm/trace.h>
1da177e4 75
8c007bfd
AB
76DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
77EXPORT_PER_CPU_SYMBOL(irq_stat);
78
868accb7 79int __irq_offset_value;
756e7104
SR
80static int ppc_spurious_interrupts;
81
756e7104 82#ifdef CONFIG_PPC32
b9e5b4e6
BH
83EXPORT_SYMBOL(__irq_offset_value);
84atomic_t ppc_n_lost_interrupts;
756e7104 85
756e7104
SR
86#ifdef CONFIG_TAU_INT
87extern int tau_initialized;
88extern int tau_interrupts(int);
89#endif
b9e5b4e6 90#endif /* CONFIG_PPC32 */
756e7104 91
756e7104 92#ifdef CONFIG_PPC64
cd015707
ME
93
94#ifndef CONFIG_SPARSE_IRQ
1da177e4 95EXPORT_SYMBOL(irq_desc);
cd015707 96#endif
1da177e4
LT
97
98int distribute_irqs = 1;
d04c56f7 99
4e491d14 100static inline notrace unsigned long get_hard_enabled(void)
ef2b343e
HD
101{
102 unsigned long enabled;
103
104 __asm__ __volatile__("lbz %0,%1(13)"
105 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
106
107 return enabled;
108}
109
4e491d14 110static inline notrace void set_soft_enabled(unsigned long enable)
ef2b343e
HD
111{
112 __asm__ __volatile__("stb %0,%1(13)"
113 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
114}
115
4e491d14 116notrace void raw_local_irq_restore(unsigned long en)
d04c56f7 117{
ef2b343e
HD
118 /*
119 * get_paca()->soft_enabled = en;
120 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
121 * That was allowed before, and in such a case we do need to take care
122 * that gcc will set soft_enabled directly via r13, not choose to use
123 * an intermediate register, lest we're preempted to a different cpu.
124 */
125 set_soft_enabled(en);
d04c56f7
PM
126 if (!en)
127 return;
128
94491685 129#ifdef CONFIG_PPC_STD_MMU_64
d04c56f7 130 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
ef2b343e
HD
131 /*
132 * Do we need to disable preemption here? Not really: in the
133 * unlikely event that we're preempted to a different cpu in
134 * between getting r13, loading its lppaca_ptr, and loading
135 * its any_int, we might call iseries_handle_interrupts without
136 * an interrupt pending on the new cpu, but that's no disaster,
137 * is it? And the business of preempting us off the old cpu
138 * would itself involve a local_irq_restore which handles the
139 * interrupt to that cpu.
140 *
141 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
142 * to avoid any preemption checking added into get_paca().
143 */
144 if (local_paca->lppaca_ptr->int_dword.any_int)
d04c56f7 145 iseries_handle_interrupts();
d04c56f7 146 }
94491685 147#endif /* CONFIG_PPC_STD_MMU_64 */
d04c56f7 148
cdd6c482
IM
149 if (test_perf_event_pending()) {
150 clear_perf_event_pending();
151 perf_event_do_pending();
b6c5a71d 152 }
93a6d3ce 153
ef2b343e
HD
154 /*
155 * if (get_paca()->hard_enabled) return;
156 * But again we need to take care that gcc gets hard_enabled directly
157 * via r13, not choose to use an intermediate register, lest we're
158 * preempted to a different cpu in between the two instructions.
159 */
160 if (get_hard_enabled())
d04c56f7 161 return;
ef2b343e
HD
162
163 /*
164 * Need to hard-enable interrupts here. Since currently disabled,
165 * no need to take further asm precautions against preemption; but
166 * use local_paca instead of get_paca() to avoid preemption checking.
167 */
168 local_paca->hard_enabled = en;
d04c56f7
PM
169 if ((int)mfspr(SPRN_DEC) < 0)
170 mtspr(SPRN_DEC, 1);
0874dd40
TS
171
172 /*
173 * Force the delivery of pending soft-disabled interrupts on PS3.
174 * Any HV call will have this side effect.
175 */
176 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
177 u64 tmp;
178 lv1_get_version_info(&tmp);
179 }
180
e1fa2e13 181 __hard_irq_enable();
d04c56f7 182}
945feb17 183EXPORT_SYMBOL(raw_local_irq_restore);
756e7104 184#endif /* CONFIG_PPC64 */
1da177e4 185
c86845ed
AB
186static int show_other_interrupts(struct seq_file *p, int prec)
187{
188 int j;
189
190#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
191 if (tau_initialized) {
192 seq_printf(p, "%*s: ", prec, "TAU");
193 for_each_online_cpu(j)
194 seq_printf(p, "%10u ", tau_interrupts(j));
195 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
196 }
197#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
198
89713ed1
AB
199 seq_printf(p, "%*s: ", prec, "LOC");
200 for_each_online_cpu(j)
201 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
202 seq_printf(p, " Local timer interrupts\n");
203
204 seq_printf(p, "%*s: ", prec, "CNT");
205 for_each_online_cpu(j)
206 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
207 seq_printf(p, " Performance monitoring interrupts\n");
208
209 seq_printf(p, "%*s: ", prec, "MCE");
210 for_each_online_cpu(j)
211 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
212 seq_printf(p, " Machine check exceptions\n");
213
c86845ed
AB
214 seq_printf(p, "%*s: %10u\n", prec, "BAD", ppc_spurious_interrupts);
215
216 return 0;
217}
218
1da177e4
LT
219int show_interrupts(struct seq_file *p, void *v)
220{
c86845ed
AB
221 unsigned long flags, any_count = 0;
222 int i = *(loff_t *) v, j, prec;
756e7104 223 struct irqaction *action;
97f7d6bc 224 struct irq_desc *desc;
1da177e4 225
c86845ed
AB
226 if (i > nr_irqs)
227 return 0;
228
229 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
230 j *= 10;
231
232 if (i == nr_irqs)
233 return show_other_interrupts(p, prec);
234
235 /* print header */
1da177e4 236 if (i == 0) {
c86845ed 237 seq_printf(p, "%*s", prec + 8, "");
756e7104 238 for_each_online_cpu(j)
c86845ed 239 seq_printf(p, "CPU%-8d", j);
1da177e4 240 seq_putc(p, '\n');
756e7104 241 }
750ab112
ME
242
243 desc = irq_to_desc(i);
244 if (!desc)
245 return 0;
246
239007b8 247 raw_spin_lock_irqsave(&desc->lock, flags);
c86845ed
AB
248 for_each_online_cpu(j)
249 any_count |= kstat_irqs_cpu(i, j);
750ab112 250 action = desc->action;
c86845ed
AB
251 if (!action && !any_count)
252 goto out;
750ab112 253
c86845ed 254 seq_printf(p, "%*d: ", prec, i);
750ab112
ME
255 for_each_online_cpu(j)
256 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
750ab112
ME
257
258 if (desc->chip)
c86845ed 259 seq_printf(p, " %-16s", desc->chip->name);
750ab112 260 else
c86845ed
AB
261 seq_printf(p, " %-16s", "None");
262 seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
750ab112 263
c86845ed
AB
264 if (action) {
265 seq_printf(p, " %s", action->name);
266 while ((action = action->next) != NULL)
267 seq_printf(p, ", %s", action->name);
268 }
750ab112 269
750ab112 270 seq_putc(p, '\n');
c86845ed 271out:
239007b8 272 raw_spin_unlock_irqrestore(&desc->lock, flags);
1da177e4
LT
273 return 0;
274}
275
89713ed1
AB
276/*
277 * /proc/stat helpers
278 */
279u64 arch_irq_stat_cpu(unsigned int cpu)
280{
281 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
282
283 sum += per_cpu(irq_stat, cpu).pmu_irqs;
284 sum += per_cpu(irq_stat, cpu).mce_exceptions;
285
286 return sum;
287}
288
289u64 arch_irq_stat(void)
290{
291 u64 sum = ppc_spurious_interrupts;
292
293 return sum;
294}
295
1da177e4
LT
296#ifdef CONFIG_HOTPLUG_CPU
297void fixup_irqs(cpumask_t map)
298{
6cff46f4 299 struct irq_desc *desc;
1da177e4
LT
300 unsigned int irq;
301 static int warned;
302
303 for_each_irq(irq) {
304 cpumask_t mask;
305
6cff46f4
ME
306 desc = irq_to_desc(irq);
307 if (desc && desc->status & IRQ_PER_CPU)
1da177e4
LT
308 continue;
309
6cff46f4 310 cpumask_and(&mask, desc->affinity, &map);
1da177e4
LT
311 if (any_online_cpu(mask) == NR_CPUS) {
312 printk("Breaking affinity for irq %i\n", irq);
313 mask = map;
314 }
6cff46f4
ME
315 if (desc->chip->set_affinity)
316 desc->chip->set_affinity(irq, &mask);
317 else if (desc->action && !(warned++))
1da177e4
LT
318 printk("Cannot set affinity for irq %i\n", irq);
319 }
320
321 local_irq_enable();
322 mdelay(1);
323 local_irq_disable();
324}
325#endif
326
f2694ba5
ME
327#ifdef CONFIG_IRQSTACKS
328static inline void handle_one_irq(unsigned int irq)
329{
330 struct thread_info *curtp, *irqtp;
331 unsigned long saved_sp_limit;
332 struct irq_desc *desc;
f2694ba5
ME
333
334 /* Switch to the irq stack to handle this */
335 curtp = current_thread_info();
336 irqtp = hardirq_ctx[smp_processor_id()];
337
338 if (curtp == irqtp) {
339 /* We're already on the irq stack, just handle it */
340 generic_handle_irq(irq);
341 return;
342 }
343
6cff46f4 344 desc = irq_to_desc(irq);
f2694ba5
ME
345 saved_sp_limit = current->thread.ksp_limit;
346
f2694ba5
ME
347 irqtp->task = curtp->task;
348 irqtp->flags = 0;
349
350 /* Copy the softirq bits in preempt_count so that the
351 * softirq checks work in the hardirq context. */
352 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
353 (curtp->preempt_count & SOFTIRQ_MASK);
354
355 current->thread.ksp_limit = (unsigned long)irqtp +
356 _ALIGN_UP(sizeof(struct thread_info), 16);
357
835363e6 358 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
f2694ba5
ME
359 current->thread.ksp_limit = saved_sp_limit;
360 irqtp->task = NULL;
361
362 /* Set any flag that may have been set on the
363 * alternate stack
364 */
365 if (irqtp->flags)
366 set_bits(irqtp->flags, &curtp->flags);
367}
368#else
369static inline void handle_one_irq(unsigned int irq)
370{
371 generic_handle_irq(irq);
372}
373#endif
374
d7cb10d6
ME
375static inline void check_stack_overflow(void)
376{
377#ifdef CONFIG_DEBUG_STACKOVERFLOW
378 long sp;
379
380 sp = __get_SP() & (THREAD_SIZE-1);
381
382 /* check for stack overflow: is there less than 2KB free? */
383 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
384 printk("do_IRQ: stack overflow: %ld\n",
385 sp - sizeof(struct thread_info));
386 dump_stack();
387 }
388#endif
389}
390
1da177e4
LT
391void do_IRQ(struct pt_regs *regs)
392{
7d12e780 393 struct pt_regs *old_regs = set_irq_regs(regs);
0ebfff14 394 unsigned int irq;
1da177e4 395
1bf4af16
AB
396 trace_irq_entry(regs);
397
4b218e9b 398 irq_enter();
1da177e4 399
d7cb10d6 400 check_stack_overflow();
1da177e4 401
35a84c2f 402 irq = ppc_md.get_irq();
1da177e4 403
f2694ba5
ME
404 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
405 handle_one_irq(irq);
406 else if (irq != NO_IRQ_IGNORE)
e199500c
SR
407 /* That's not SMP safe ... but who cares ? */
408 ppc_spurious_interrupts++;
409
4b218e9b 410 irq_exit();
7d12e780 411 set_irq_regs(old_regs);
756e7104 412
e199500c 413#ifdef CONFIG_PPC_ISERIES
b06a3183
SR
414 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
415 get_lppaca()->int_dword.fields.decr_int) {
3356bb9f
DG
416 get_lppaca()->int_dword.fields.decr_int = 0;
417 /* Signal a fake decrementer interrupt */
418 timer_interrupt(regs);
e199500c
SR
419 }
420#endif
1bf4af16
AB
421
422 trace_irq_exit(regs);
e199500c 423}
1da177e4
LT
424
425void __init init_IRQ(void)
426{
70584578
SR
427 if (ppc_md.init_IRQ)
428 ppc_md.init_IRQ();
bcf0b088
KG
429
430 exc_lvl_ctx_init();
431
1da177e4
LT
432 irq_ctx_init();
433}
434
bcf0b088
KG
435#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
436struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
437struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
438struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
439
440void exc_lvl_ctx_init(void)
441{
442 struct thread_info *tp;
443 int i;
444
445 for_each_possible_cpu(i) {
446 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
447 tp = critirq_ctx[i];
448 tp->cpu = i;
449 tp->preempt_count = 0;
450
451#ifdef CONFIG_BOOKE
452 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
453 tp = dbgirq_ctx[i];
454 tp->cpu = i;
455 tp->preempt_count = 0;
456
457 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
458 tp = mcheckirq_ctx[i];
459 tp->cpu = i;
460 tp->preempt_count = HARDIRQ_OFFSET;
461#endif
462 }
463}
464#endif
1da177e4 465
1da177e4 466#ifdef CONFIG_IRQSTACKS
22722051
AM
467struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
468struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
1da177e4
LT
469
470void irq_ctx_init(void)
471{
472 struct thread_info *tp;
473 int i;
474
0e551954 475 for_each_possible_cpu(i) {
1da177e4
LT
476 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
477 tp = softirq_ctx[i];
478 tp->cpu = i;
e6768a4f 479 tp->preempt_count = 0;
1da177e4
LT
480
481 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
482 tp = hardirq_ctx[i];
483 tp->cpu = i;
484 tp->preempt_count = HARDIRQ_OFFSET;
485 }
486}
487
c6622f63
PM
488static inline void do_softirq_onstack(void)
489{
490 struct thread_info *curtp, *irqtp;
85218827 491 unsigned long saved_sp_limit = current->thread.ksp_limit;
c6622f63
PM
492
493 curtp = current_thread_info();
494 irqtp = softirq_ctx[smp_processor_id()];
495 irqtp->task = curtp->task;
85218827
KG
496 current->thread.ksp_limit = (unsigned long)irqtp +
497 _ALIGN_UP(sizeof(struct thread_info), 16);
c6622f63 498 call_do_softirq(irqtp);
85218827 499 current->thread.ksp_limit = saved_sp_limit;
c6622f63
PM
500 irqtp->task = NULL;
501}
1da177e4 502
c6622f63
PM
503#else
504#define do_softirq_onstack() __do_softirq()
505#endif /* CONFIG_IRQSTACKS */
506
1da177e4
LT
507void do_softirq(void)
508{
509 unsigned long flags;
1da177e4
LT
510
511 if (in_interrupt())
1da177e4
LT
512 return;
513
1da177e4 514 local_irq_save(flags);
1da177e4 515
912b2539 516 if (local_softirq_pending())
c6622f63 517 do_softirq_onstack();
1da177e4
LT
518
519 local_irq_restore(flags);
1da177e4 520}
1da177e4 521
1da177e4 522
1da177e4 523/*
0ebfff14 524 * IRQ controller and virtual interrupts
1da177e4
LT
525 */
526
0ebfff14 527static LIST_HEAD(irq_hosts);
057b184a 528static DEFINE_SPINLOCK(irq_big_lock);
967e012e 529static unsigned int revmap_trees_allocated;
150c6c8f 530static DEFINE_MUTEX(revmap_trees_mutex);
0ebfff14
BH
531struct irq_map_entry irq_map[NR_IRQS];
532static unsigned int irq_virq_count = NR_IRQS;
533static struct irq_host *irq_default_host;
1da177e4 534
35923f12
OJ
535irq_hw_number_t virq_to_hw(unsigned int virq)
536{
537 return irq_map[virq].hwirq;
538}
539EXPORT_SYMBOL_GPL(virq_to_hw);
540
68158006
ME
541static int default_irq_host_match(struct irq_host *h, struct device_node *np)
542{
543 return h->of_node != NULL && h->of_node == np;
544}
545
5669c3cf 546struct irq_host *irq_alloc_host(struct device_node *of_node,
52964f87
ME
547 unsigned int revmap_type,
548 unsigned int revmap_arg,
549 struct irq_host_ops *ops,
550 irq_hw_number_t inval_irq)
1da177e4 551{
0ebfff14
BH
552 struct irq_host *host;
553 unsigned int size = sizeof(struct irq_host);
554 unsigned int i;
555 unsigned int *rmap;
556 unsigned long flags;
557
558 /* Allocate structure and revmap table if using linear mapping */
559 if (revmap_type == IRQ_HOST_MAP_LINEAR)
560 size += revmap_arg * sizeof(unsigned int);
5669c3cf 561 host = zalloc_maybe_bootmem(size, GFP_KERNEL);
0ebfff14
BH
562 if (host == NULL)
563 return NULL;
7d01c880 564
0ebfff14
BH
565 /* Fill structure */
566 host->revmap_type = revmap_type;
567 host->inval_irq = inval_irq;
568 host->ops = ops;
19fc65b5 569 host->of_node = of_node_get(of_node);
7d01c880 570
68158006
ME
571 if (host->ops->match == NULL)
572 host->ops->match = default_irq_host_match;
7d01c880 573
0ebfff14
BH
574 spin_lock_irqsave(&irq_big_lock, flags);
575
576 /* If it's a legacy controller, check for duplicates and
577 * mark it as allocated (we use irq 0 host pointer for that
578 */
579 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
580 if (irq_map[0].host != NULL) {
581 spin_unlock_irqrestore(&irq_big_lock, flags);
582 /* If we are early boot, we can't free the structure,
583 * too bad...
584 * this will be fixed once slab is made available early
585 * instead of the current cruft
586 */
587 if (mem_init_done)
588 kfree(host);
589 return NULL;
590 }
591 irq_map[0].host = host;
592 }
593
594 list_add(&host->link, &irq_hosts);
595 spin_unlock_irqrestore(&irq_big_lock, flags);
596
597 /* Additional setups per revmap type */
598 switch(revmap_type) {
599 case IRQ_HOST_MAP_LEGACY:
600 /* 0 is always the invalid number for legacy */
601 host->inval_irq = 0;
602 /* setup us as the host for all legacy interrupts */
603 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
7866291d 604 irq_map[i].hwirq = i;
0ebfff14
BH
605 smp_wmb();
606 irq_map[i].host = host;
607 smp_wmb();
608
6e99e458 609 /* Clear norequest flags */
6cff46f4 610 irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
0ebfff14
BH
611
612 /* Legacy flags are left to default at this point,
613 * one can then use irq_create_mapping() to
c03983ac 614 * explicitly change them
0ebfff14 615 */
6e99e458 616 ops->map(host, i, i);
0ebfff14
BH
617 }
618 break;
619 case IRQ_HOST_MAP_LINEAR:
620 rmap = (unsigned int *)(host + 1);
621 for (i = 0; i < revmap_arg; i++)
f5921697 622 rmap[i] = NO_IRQ;
0ebfff14
BH
623 host->revmap_data.linear.size = revmap_arg;
624 smp_wmb();
625 host->revmap_data.linear.revmap = rmap;
626 break;
627 default:
628 break;
629 }
630
631 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
632
633 return host;
1da177e4
LT
634}
635
0ebfff14 636struct irq_host *irq_find_host(struct device_node *node)
1da177e4 637{
0ebfff14
BH
638 struct irq_host *h, *found = NULL;
639 unsigned long flags;
640
641 /* We might want to match the legacy controller last since
642 * it might potentially be set to match all interrupts in
643 * the absence of a device node. This isn't a problem so far
644 * yet though...
645 */
646 spin_lock_irqsave(&irq_big_lock, flags);
647 list_for_each_entry(h, &irq_hosts, link)
68158006 648 if (h->ops->match(h, node)) {
0ebfff14
BH
649 found = h;
650 break;
651 }
652 spin_unlock_irqrestore(&irq_big_lock, flags);
653 return found;
654}
655EXPORT_SYMBOL_GPL(irq_find_host);
656
657void irq_set_default_host(struct irq_host *host)
658{
659 pr_debug("irq: Default host set to @0x%p\n", host);
1da177e4 660
0ebfff14
BH
661 irq_default_host = host;
662}
1da177e4 663
0ebfff14
BH
664void irq_set_virq_count(unsigned int count)
665{
666 pr_debug("irq: Trying to set virq count to %d\n", count);
fef1c772 667
0ebfff14
BH
668 BUG_ON(count < NUM_ISA_INTERRUPTS);
669 if (count < NR_IRQS)
670 irq_virq_count = count;
671}
672
6fde40f3
ME
673static int irq_setup_virq(struct irq_host *host, unsigned int virq,
674 irq_hw_number_t hwirq)
675{
cd015707
ME
676 struct irq_desc *desc;
677
678 desc = irq_to_desc_alloc_node(virq, 0);
679 if (!desc) {
680 pr_debug("irq: -> allocating desc failed\n");
681 goto error;
682 }
683
6fde40f3 684 /* Clear IRQ_NOREQUEST flag */
cd015707 685 desc->status &= ~IRQ_NOREQUEST;
6fde40f3
ME
686
687 /* map it */
688 smp_wmb();
689 irq_map[virq].hwirq = hwirq;
690 smp_mb();
691
692 if (host->ops->map(host, virq, hwirq)) {
693 pr_debug("irq: -> mapping failed, freeing\n");
cd015707 694 goto error;
6fde40f3
ME
695 }
696
697 return 0;
cd015707
ME
698
699error:
700 irq_free_virt(virq, 1);
701 return -1;
6fde40f3 702}
8ec8f2e8 703
ee51de56
ME
704unsigned int irq_create_direct_mapping(struct irq_host *host)
705{
706 unsigned int virq;
707
708 if (host == NULL)
709 host = irq_default_host;
710
711 BUG_ON(host == NULL);
712 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
713
714 virq = irq_alloc_virt(host, 1, 0);
715 if (virq == NO_IRQ) {
716 pr_debug("irq: create_direct virq allocation failed\n");
717 return NO_IRQ;
718 }
719
720 pr_debug("irq: create_direct obtained virq %d\n", virq);
721
722 if (irq_setup_virq(host, virq, virq))
723 return NO_IRQ;
724
725 return virq;
726}
727
0ebfff14 728unsigned int irq_create_mapping(struct irq_host *host,
6e99e458 729 irq_hw_number_t hwirq)
0ebfff14
BH
730{
731 unsigned int virq, hint;
732
6e99e458 733 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
0ebfff14
BH
734
735 /* Look for default host if nececssary */
736 if (host == NULL)
737 host = irq_default_host;
738 if (host == NULL) {
739 printk(KERN_WARNING "irq_create_mapping called for"
740 " NULL host, hwirq=%lx\n", hwirq);
741 WARN_ON(1);
742 return NO_IRQ;
1da177e4 743 }
0ebfff14 744 pr_debug("irq: -> using host @%p\n", host);
1da177e4 745
0ebfff14
BH
746 /* Check if mapping already exist, if it does, call
747 * host->ops->map() to update the flags
748 */
749 virq = irq_find_mapping(host, hwirq);
f5921697 750 if (virq != NO_IRQ) {
acc900ef
IK
751 if (host->ops->remap)
752 host->ops->remap(host, virq, hwirq);
0ebfff14 753 pr_debug("irq: -> existing mapping on virq %d\n", virq);
0ebfff14 754 return virq;
1da177e4
LT
755 }
756
0ebfff14
BH
757 /* Get a virtual interrupt number */
758 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
759 /* Handle legacy */
760 virq = (unsigned int)hwirq;
761 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
762 return NO_IRQ;
763 return virq;
764 } else {
765 /* Allocate a virtual interrupt number */
766 hint = hwirq % irq_virq_count;
767 virq = irq_alloc_virt(host, 1, hint);
768 if (virq == NO_IRQ) {
769 pr_debug("irq: -> virq allocation failed\n");
770 return NO_IRQ;
771 }
772 }
0ebfff14 773
6fde40f3 774 if (irq_setup_virq(host, virq, hwirq))
0ebfff14 775 return NO_IRQ;
6fde40f3 776
c7d07fdd
ME
777 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
778 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
779
1da177e4 780 return virq;
0ebfff14
BH
781}
782EXPORT_SYMBOL_GPL(irq_create_mapping);
783
f3d2ab41 784unsigned int irq_create_of_mapping(struct device_node *controller,
40d50cf7 785 const u32 *intspec, unsigned int intsize)
0ebfff14
BH
786{
787 struct irq_host *host;
788 irq_hw_number_t hwirq;
6e99e458
BH
789 unsigned int type = IRQ_TYPE_NONE;
790 unsigned int virq;
1da177e4 791
0ebfff14
BH
792 if (controller == NULL)
793 host = irq_default_host;
794 else
795 host = irq_find_host(controller);
6e99e458
BH
796 if (host == NULL) {
797 printk(KERN_WARNING "irq: no irq host found for %s !\n",
798 controller->full_name);
0ebfff14 799 return NO_IRQ;
6e99e458 800 }
0ebfff14
BH
801
802 /* If host has no translation, then we assume interrupt line */
803 if (host->ops->xlate == NULL)
804 hwirq = intspec[0];
805 else {
806 if (host->ops->xlate(host, controller, intspec, intsize,
6e99e458 807 &hwirq, &type))
0ebfff14 808 return NO_IRQ;
1da177e4 809 }
0ebfff14 810
6e99e458
BH
811 /* Create mapping */
812 virq = irq_create_mapping(host, hwirq);
813 if (virq == NO_IRQ)
814 return virq;
815
816 /* Set type if specified and different than the current one */
817 if (type != IRQ_TYPE_NONE &&
6cff46f4 818 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
6e99e458
BH
819 set_irq_type(virq, type);
820 return virq;
1da177e4 821}
0ebfff14 822EXPORT_SYMBOL_GPL(irq_create_of_mapping);
1da177e4 823
0ebfff14 824unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
1da177e4 825{
0ebfff14 826 struct of_irq oirq;
1da177e4 827
0ebfff14
BH
828 if (of_irq_map_one(dev, index, &oirq))
829 return NO_IRQ;
1da177e4 830
0ebfff14
BH
831 return irq_create_of_mapping(oirq.controller, oirq.specifier,
832 oirq.size);
833}
834EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
1da177e4 835
0ebfff14
BH
836void irq_dispose_mapping(unsigned int virq)
837{
5414c6be 838 struct irq_host *host;
0ebfff14 839 irq_hw_number_t hwirq;
1da177e4 840
5414c6be
ME
841 if (virq == NO_IRQ)
842 return;
843
844 host = irq_map[virq].host;
0ebfff14
BH
845 WARN_ON (host == NULL);
846 if (host == NULL)
847 return;
1da177e4 848
0ebfff14
BH
849 /* Never unmap legacy interrupts */
850 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
851 return;
1da177e4 852
0ebfff14
BH
853 /* remove chip and handler */
854 set_irq_chip_and_handler(virq, NULL, NULL);
855
856 /* Make sure it's completed */
857 synchronize_irq(virq);
858
859 /* Tell the PIC about it */
860 if (host->ops->unmap)
861 host->ops->unmap(host, virq);
862 smp_mb();
863
864 /* Clear reverse map */
865 hwirq = irq_map[virq].hwirq;
866 switch(host->revmap_type) {
867 case IRQ_HOST_MAP_LINEAR:
868 if (hwirq < host->revmap_data.linear.size)
f5921697 869 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
0ebfff14
BH
870 break;
871 case IRQ_HOST_MAP_TREE:
967e012e
SD
872 /*
873 * Check if radix tree allocated yet, if not then nothing to
874 * remove.
875 */
876 smp_rmb();
877 if (revmap_trees_allocated < 1)
0ebfff14 878 break;
150c6c8f 879 mutex_lock(&revmap_trees_mutex);
0ebfff14 880 radix_tree_delete(&host->revmap_data.tree, hwirq);
150c6c8f 881 mutex_unlock(&revmap_trees_mutex);
0ebfff14
BH
882 break;
883 }
1da177e4 884
0ebfff14
BH
885 /* Destroy map */
886 smp_mb();
887 irq_map[virq].hwirq = host->inval_irq;
1da177e4 888
0ebfff14 889 /* Set some flags */
6cff46f4 890 irq_to_desc(virq)->status |= IRQ_NOREQUEST;
1da177e4 891
0ebfff14
BH
892 /* Free it */
893 irq_free_virt(virq, 1);
1da177e4 894}
0ebfff14 895EXPORT_SYMBOL_GPL(irq_dispose_mapping);
1da177e4 896
0ebfff14
BH
897unsigned int irq_find_mapping(struct irq_host *host,
898 irq_hw_number_t hwirq)
899{
900 unsigned int i;
901 unsigned int hint = hwirq % irq_virq_count;
902
903 /* Look for default host if nececssary */
904 if (host == NULL)
905 host = irq_default_host;
906 if (host == NULL)
907 return NO_IRQ;
908
909 /* legacy -> bail early */
910 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
911 return hwirq;
912
913 /* Slow path does a linear search of the map */
914 if (hint < NUM_ISA_INTERRUPTS)
915 hint = NUM_ISA_INTERRUPTS;
916 i = hint;
917 do {
918 if (irq_map[i].host == host &&
919 irq_map[i].hwirq == hwirq)
920 return i;
921 i++;
922 if (i >= irq_virq_count)
923 i = NUM_ISA_INTERRUPTS;
924 } while(i != hint);
925 return NO_IRQ;
926}
927EXPORT_SYMBOL_GPL(irq_find_mapping);
1da177e4 928
0ebfff14 929
967e012e
SD
930unsigned int irq_radix_revmap_lookup(struct irq_host *host,
931 irq_hw_number_t hwirq)
1da177e4 932{
0ebfff14
BH
933 struct irq_map_entry *ptr;
934 unsigned int virq;
1da177e4 935
0ebfff14 936 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
1da177e4 937
967e012e
SD
938 /*
939 * Check if the radix tree exists and has bee initialized.
940 * If not, we fallback to slow mode
0ebfff14 941 */
967e012e 942 if (revmap_trees_allocated < 2)
0ebfff14
BH
943 return irq_find_mapping(host, hwirq);
944
0ebfff14 945 /* Now try to resolve */
150c6c8f
SD
946 /*
947 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
948 * as it's referencing an entry in the static irq_map table.
949 */
967e012e 950 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
8ec8f2e8 951
967e012e
SD
952 /*
953 * If found in radix tree, then fine.
954 * Else fallback to linear lookup - this should not happen in practice
955 * as it means that we failed to insert the node in the radix tree.
956 */
957 if (ptr)
0ebfff14 958 virq = ptr - irq_map;
967e012e
SD
959 else
960 virq = irq_find_mapping(host, hwirq);
961
962 return virq;
963}
964
965void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
966 irq_hw_number_t hwirq)
967{
967e012e
SD
968
969 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
970
971 /*
972 * Check if the radix tree exists yet.
973 * If not, then the irq will be inserted into the tree when it gets
974 * initialized.
975 */
976 smp_rmb();
977 if (revmap_trees_allocated < 1)
978 return;
0ebfff14 979
8ec8f2e8 980 if (virq != NO_IRQ) {
150c6c8f 981 mutex_lock(&revmap_trees_mutex);
967e012e
SD
982 radix_tree_insert(&host->revmap_data.tree, hwirq,
983 &irq_map[virq]);
150c6c8f 984 mutex_unlock(&revmap_trees_mutex);
8ec8f2e8 985 }
1da177e4
LT
986}
987
0ebfff14
BH
988unsigned int irq_linear_revmap(struct irq_host *host,
989 irq_hw_number_t hwirq)
c6622f63 990{
0ebfff14 991 unsigned int *revmap;
c6622f63 992
0ebfff14
BH
993 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
994
995 /* Check revmap bounds */
996 if (unlikely(hwirq >= host->revmap_data.linear.size))
997 return irq_find_mapping(host, hwirq);
998
999 /* Check if revmap was allocated */
1000 revmap = host->revmap_data.linear.revmap;
1001 if (unlikely(revmap == NULL))
1002 return irq_find_mapping(host, hwirq);
1003
1004 /* Fill up revmap with slow path if no mapping found */
1005 if (unlikely(revmap[hwirq] == NO_IRQ))
1006 revmap[hwirq] = irq_find_mapping(host, hwirq);
1007
1008 return revmap[hwirq];
c6622f63
PM
1009}
1010
0ebfff14
BH
1011unsigned int irq_alloc_virt(struct irq_host *host,
1012 unsigned int count,
1013 unsigned int hint)
1014{
1015 unsigned long flags;
1016 unsigned int i, j, found = NO_IRQ;
c6622f63 1017
0ebfff14
BH
1018 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1019 return NO_IRQ;
1020
1021 spin_lock_irqsave(&irq_big_lock, flags);
1022
1023 /* Use hint for 1 interrupt if any */
1024 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1025 hint < irq_virq_count && irq_map[hint].host == NULL) {
1026 found = hint;
1027 goto hint_found;
1028 }
1029
1030 /* Look for count consecutive numbers in the allocatable
1031 * (non-legacy) space
1032 */
e1251465
ME
1033 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1034 if (irq_map[i].host != NULL)
1035 j = 0;
1036 else
1037 j++;
1038
1039 if (j == count) {
1040 found = i - count + 1;
1041 break;
1042 }
0ebfff14
BH
1043 }
1044 if (found == NO_IRQ) {
1045 spin_unlock_irqrestore(&irq_big_lock, flags);
1046 return NO_IRQ;
1047 }
1048 hint_found:
1049 for (i = found; i < (found + count); i++) {
1050 irq_map[i].hwirq = host->inval_irq;
1051 smp_wmb();
1052 irq_map[i].host = host;
1053 }
1054 spin_unlock_irqrestore(&irq_big_lock, flags);
1055 return found;
1056}
1057
1058void irq_free_virt(unsigned int virq, unsigned int count)
1da177e4
LT
1059{
1060 unsigned long flags;
0ebfff14 1061 unsigned int i;
1da177e4 1062
0ebfff14
BH
1063 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1064 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1da177e4 1065
0ebfff14
BH
1066 spin_lock_irqsave(&irq_big_lock, flags);
1067 for (i = virq; i < (virq + count); i++) {
1068 struct irq_host *host;
1da177e4 1069
0ebfff14
BH
1070 if (i < NUM_ISA_INTERRUPTS ||
1071 (virq + count) > irq_virq_count)
1072 continue;
1da177e4 1073
0ebfff14
BH
1074 host = irq_map[i].host;
1075 irq_map[i].hwirq = host->inval_irq;
1076 smp_wmb();
1077 irq_map[i].host = NULL;
1078 }
1079 spin_unlock_irqrestore(&irq_big_lock, flags);
1da177e4 1080}
0ebfff14 1081
cd015707 1082int arch_early_irq_init(void)
0ebfff14 1083{
cd015707
ME
1084 struct irq_desc *desc;
1085 int i;
0ebfff14 1086
cd015707
ME
1087 for (i = 0; i < NR_IRQS; i++) {
1088 desc = irq_to_desc(i);
1089 if (desc)
1090 desc->status |= IRQ_NOREQUEST;
1091 }
1092
1093 return 0;
1094}
1095
1096int arch_init_chip_data(struct irq_desc *desc, int node)
1097{
1098 desc->status |= IRQ_NOREQUEST;
1099 return 0;
0ebfff14
BH
1100}
1101
1102/* We need to create the radix trees late */
1103static int irq_late_init(void)
1104{
1105 struct irq_host *h;
967e012e 1106 unsigned int i;
0ebfff14 1107
967e012e
SD
1108 /*
1109 * No mutual exclusion with respect to accessors of the tree is needed
1110 * here as the synchronization is done via the state variable
1111 * revmap_trees_allocated.
1112 */
0ebfff14
BH
1113 list_for_each_entry(h, &irq_hosts, link) {
1114 if (h->revmap_type == IRQ_HOST_MAP_TREE)
967e012e
SD
1115 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1116 }
1117
1118 /*
1119 * Make sure the radix trees inits are visible before setting
1120 * the flag
1121 */
1122 smp_wmb();
1123 revmap_trees_allocated = 1;
1124
1125 /*
1126 * Insert the reverse mapping for those interrupts already present
1127 * in irq_map[].
1128 */
150c6c8f 1129 mutex_lock(&revmap_trees_mutex);
967e012e
SD
1130 for (i = 0; i < irq_virq_count; i++) {
1131 if (irq_map[i].host &&
1132 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1133 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1134 irq_map[i].hwirq, &irq_map[i]);
0ebfff14 1135 }
150c6c8f 1136 mutex_unlock(&revmap_trees_mutex);
0ebfff14 1137
967e012e
SD
1138 /*
1139 * Make sure the radix trees insertions are visible before setting
1140 * the flag
1141 */
1142 smp_wmb();
1143 revmap_trees_allocated = 2;
1144
0ebfff14
BH
1145 return 0;
1146}
1147arch_initcall(irq_late_init);
1148
60b332e7
ME
1149#ifdef CONFIG_VIRQ_DEBUG
1150static int virq_debug_show(struct seq_file *m, void *private)
1151{
1152 unsigned long flags;
97f7d6bc 1153 struct irq_desc *desc;
60b332e7
ME
1154 const char *p;
1155 char none[] = "none";
1156 int i;
1157
1158 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
1159 "chip name", "host name");
1160
76f1d94f 1161 for (i = 1; i < nr_irqs; i++) {
6cff46f4 1162 desc = irq_to_desc(i);
76f1d94f
ME
1163 if (!desc)
1164 continue;
1165
239007b8 1166 raw_spin_lock_irqsave(&desc->lock, flags);
60b332e7
ME
1167
1168 if (desc->action && desc->action->handler) {
1169 seq_printf(m, "%5d ", i);
1170 seq_printf(m, "0x%05lx ", virq_to_hw(i));
1171
b27df672
TG
1172 if (desc->chip && desc->chip->name)
1173 p = desc->chip->name;
60b332e7
ME
1174 else
1175 p = none;
1176 seq_printf(m, "%-15s ", p);
1177
1178 if (irq_map[i].host && irq_map[i].host->of_node)
1179 p = irq_map[i].host->of_node->full_name;
1180 else
1181 p = none;
1182 seq_printf(m, "%s\n", p);
1183 }
1184
239007b8 1185 raw_spin_unlock_irqrestore(&desc->lock, flags);
60b332e7
ME
1186 }
1187
1188 return 0;
1189}
1190
1191static int virq_debug_open(struct inode *inode, struct file *file)
1192{
1193 return single_open(file, virq_debug_show, inode->i_private);
1194}
1195
1196static const struct file_operations virq_debug_fops = {
1197 .open = virq_debug_open,
1198 .read = seq_read,
1199 .llseek = seq_lseek,
1200 .release = single_release,
1201};
1202
1203static int __init irq_debugfs_init(void)
1204{
1205 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
476ff8a0 1206 NULL, &virq_debug_fops) == NULL)
60b332e7
ME
1207 return -ENOMEM;
1208
1209 return 0;
1210}
1211__initcall(irq_debugfs_init);
1212#endif /* CONFIG_VIRQ_DEBUG */
1213
c6622f63 1214#ifdef CONFIG_PPC64
1da177e4
LT
1215static int __init setup_noirqdistrib(char *str)
1216{
1217 distribute_irqs = 0;
1218 return 1;
1219}
1220
1221__setup("noirqdistrib", setup_noirqdistrib);
756e7104 1222#endif /* CONFIG_PPC64 */