Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/ppc/kernel/irq.c | |
3 | * | |
4 | * Derived from arch/i386/kernel/irq.c | |
5 | * Copyright (C) 1992 Linus Torvalds | |
6 | * Adapted from arch/i386 by Gary Thomas | |
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
8 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> | |
9 | * Copyright (C) 1996-2001 Cort Dougan | |
10 | * Adapted for Power Macintosh by Paul Mackerras | |
11 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | |
13 | * | |
14 | * This file contains the code used by various IRQ handling routines: | |
15 | * asking for different IRQ's should be done through these routines | |
16 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
17 | * shouldn't result in any weird surprises, and installing new handlers | |
18 | * should be easier. | |
19 | * | |
20 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
21 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
22 | * mask register (of which only 16 are defined), hence the weird shifting | |
23 | * and complement of the cached_irq_mask. I want to be able to stuff | |
24 | * this right into the SIU SMASK register. | |
25 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
26 | * to reduce code space and undefined function references. | |
27 | */ | |
28 | ||
29 | #include <linux/errno.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/threads.h> | |
32 | #include <linux/kernel_stat.h> | |
33 | #include <linux/signal.h> | |
34 | #include <linux/sched.h> | |
35 | #include <linux/ptrace.h> | |
36 | #include <linux/ioport.h> | |
37 | #include <linux/interrupt.h> | |
38 | #include <linux/timex.h> | |
39 | #include <linux/config.h> | |
40 | #include <linux/init.h> | |
41 | #include <linux/slab.h> | |
42 | #include <linux/pci.h> | |
43 | #include <linux/delay.h> | |
44 | #include <linux/irq.h> | |
45 | #include <linux/proc_fs.h> | |
46 | #include <linux/random.h> | |
47 | #include <linux/seq_file.h> | |
48 | #include <linux/cpumask.h> | |
49 | #include <linux/profile.h> | |
50 | #include <linux/bitops.h> | |
51 | ||
52 | #include <asm/uaccess.h> | |
53 | #include <asm/system.h> | |
54 | #include <asm/io.h> | |
55 | #include <asm/pgtable.h> | |
56 | #include <asm/irq.h> | |
57 | #include <asm/cache.h> | |
58 | #include <asm/prom.h> | |
59 | #include <asm/ptrace.h> | |
b60fc8bb | 60 | #include <asm/machdep.h> |
1da177e4 LT |
61 | |
62 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | |
63 | ||
64 | extern atomic_t ipi_recv; | |
65 | extern atomic_t ipi_sent; | |
66 | ||
67 | #define MAXCOUNT 10000000 | |
68 | ||
69 | int ppc_spurious_interrupts = 0; | |
70 | struct irqaction *ppc_irq_action[NR_IRQS]; | |
71 | unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | |
72 | unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; | |
73 | atomic_t ppc_n_lost_interrupts; | |
74 | ||
75 | #ifdef CONFIG_TAU_INT | |
76 | extern int tau_initialized; | |
77 | extern int tau_interrupts(int); | |
78 | #endif | |
79 | ||
80 | int show_interrupts(struct seq_file *p, void *v) | |
81 | { | |
82 | int i = *(loff_t *) v, j; | |
83 | struct irqaction * action; | |
84 | unsigned long flags; | |
85 | ||
86 | if (i == 0) { | |
87 | seq_puts(p, " "); | |
88 | for (j=0; j<NR_CPUS; j++) | |
89 | if (cpu_online(j)) | |
90 | seq_printf(p, "CPU%d ", j); | |
91 | seq_putc(p, '\n'); | |
92 | } | |
93 | ||
94 | if (i < NR_IRQS) { | |
95 | spin_lock_irqsave(&irq_desc[i].lock, flags); | |
96 | action = irq_desc[i].action; | |
97 | if ( !action || !action->handler ) | |
98 | goto skip; | |
99 | seq_printf(p, "%3d: ", i); | |
100 | #ifdef CONFIG_SMP | |
101 | for (j = 0; j < NR_CPUS; j++) | |
102 | if (cpu_online(j)) | |
103 | seq_printf(p, "%10u ", | |
104 | kstat_cpu(j).irqs[i]); | |
105 | #else | |
106 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
107 | #endif /* CONFIG_SMP */ | |
108 | if (irq_desc[i].handler) | |
109 | seq_printf(p, " %s ", irq_desc[i].handler->typename); | |
110 | else | |
111 | seq_puts(p, " None "); | |
112 | seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge "); | |
113 | seq_printf(p, " %s", action->name); | |
114 | for (action = action->next; action; action = action->next) | |
115 | seq_printf(p, ", %s", action->name); | |
116 | seq_putc(p, '\n'); | |
117 | skip: | |
118 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | |
119 | } else if (i == NR_IRQS) { | |
120 | #ifdef CONFIG_TAU_INT | |
121 | if (tau_initialized){ | |
122 | seq_puts(p, "TAU: "); | |
123 | for (j = 0; j < NR_CPUS; j++) | |
124 | if (cpu_online(j)) | |
125 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
126 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | |
127 | } | |
128 | #endif | |
129 | #ifdef CONFIG_SMP | |
130 | /* should this be per processor send/receive? */ | |
131 | seq_printf(p, "IPI (recv/sent): %10u/%u\n", | |
132 | atomic_read(&ipi_recv), atomic_read(&ipi_sent)); | |
133 | #endif | |
134 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); | |
135 | } | |
136 | return 0; | |
137 | } | |
138 | ||
139 | void do_IRQ(struct pt_regs *regs) | |
140 | { | |
141 | int irq, first = 1; | |
142 | irq_enter(); | |
143 | ||
144 | /* | |
145 | * Every platform is required to implement ppc_md.get_irq. | |
146 | * This function will either return an irq number or -1 to | |
147 | * indicate there are no more pending. But the first time | |
148 | * through the loop this means there wasn't and IRQ pending. | |
149 | * The value -2 is for buggy hardware and means that this IRQ | |
150 | * has already been handled. -- Tom | |
151 | */ | |
152 | while ((irq = ppc_md.get_irq(regs)) >= 0) { | |
153 | __do_IRQ(irq, regs); | |
154 | first = 0; | |
155 | } | |
156 | if (irq != -2 && first) | |
157 | /* That's not SMP safe ... but who cares ? */ | |
158 | ppc_spurious_interrupts++; | |
159 | irq_exit(); | |
160 | } | |
161 | ||
162 | void __init init_IRQ(void) | |
163 | { | |
164 | ppc_md.init_IRQ(); | |
165 | } |