[POWERPC] Copy i8259 code back to arch/ppc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / irq.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
756e7104
SR
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
1da177e4
LT
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
756e7104 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * This file contains the code used by various IRQ handling routines:
18 * asking for different IRQ's should be done through these routines
19 * instead of just grabbing them. Thus setups with different IRQ numbers
20 * shouldn't result in any weird surprises, and installing new handlers
21 * should be easier.
756e7104
SR
22 *
23 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
24 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
25 * mask register (of which only 16 are defined), hence the weird shifting
26 * and complement of the cached_irq_mask. I want to be able to stuff
27 * this right into the SIU SMASK register.
28 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
29 * to reduce code space and undefined function references.
1da177e4
LT
30 */
31
1da177e4
LT
32#include <linux/module.h>
33#include <linux/threads.h>
34#include <linux/kernel_stat.h>
35#include <linux/signal.h>
36#include <linux/sched.h>
756e7104 37#include <linux/ptrace.h>
1da177e4
LT
38#include <linux/ioport.h>
39#include <linux/interrupt.h>
40#include <linux/timex.h>
1da177e4
LT
41#include <linux/init.h>
42#include <linux/slab.h>
1da177e4
LT
43#include <linux/delay.h>
44#include <linux/irq.h>
756e7104
SR
45#include <linux/seq_file.h>
46#include <linux/cpumask.h>
1da177e4
LT
47#include <linux/profile.h>
48#include <linux/bitops.h>
204face4 49#include <linux/pci.h>
1da177e4
LT
50
51#include <asm/uaccess.h>
52#include <asm/system.h>
53#include <asm/io.h>
54#include <asm/pgtable.h>
55#include <asm/irq.h>
56#include <asm/cache.h>
57#include <asm/prom.h>
58#include <asm/ptrace.h>
1da177e4 59#include <asm/machdep.h>
a50b56d2 60#ifdef CONFIG_PPC_ISERIES
1da177e4 61#include <asm/paca.h>
756e7104 62#endif
1da177e4 63
868accb7 64int __irq_offset_value;
756e7104
SR
65static int ppc_spurious_interrupts;
66
756e7104 67#ifdef CONFIG_PPC32
b9e5b4e6
BH
68EXPORT_SYMBOL(__irq_offset_value);
69atomic_t ppc_n_lost_interrupts;
756e7104 70
b9e5b4e6
BH
71#ifndef CONFIG_PPC_MERGE
72#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
756e7104 73unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
b9e5b4e6 74#endif
756e7104
SR
75
76#ifdef CONFIG_TAU_INT
77extern int tau_initialized;
78extern int tau_interrupts(int);
79#endif
b9e5b4e6 80#endif /* CONFIG_PPC32 */
756e7104
SR
81
82#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
83extern atomic_t ipi_recv;
84extern atomic_t ipi_sent;
85#endif
756e7104
SR
86
87#ifdef CONFIG_PPC64
1da177e4
LT
88EXPORT_SYMBOL(irq_desc);
89
90int distribute_irqs = 1;
1da177e4 91u64 ppc64_interrupt_controller;
756e7104 92#endif /* CONFIG_PPC64 */
1da177e4
LT
93
94int show_interrupts(struct seq_file *p, void *v)
95{
756e7104
SR
96 int i = *(loff_t *)v, j;
97 struct irqaction *action;
1da177e4
LT
98 irq_desc_t *desc;
99 unsigned long flags;
100
101 if (i == 0) {
756e7104
SR
102 seq_puts(p, " ");
103 for_each_online_cpu(j)
104 seq_printf(p, "CPU%d ", j);
1da177e4
LT
105 seq_putc(p, '\n');
106 }
107
108 if (i < NR_IRQS) {
109 desc = get_irq_desc(i);
110 spin_lock_irqsave(&desc->lock, flags);
111 action = desc->action;
112 if (!action || !action->handler)
113 goto skip;
114 seq_printf(p, "%3d: ", i);
115#ifdef CONFIG_SMP
756e7104
SR
116 for_each_online_cpu(j)
117 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
1da177e4
LT
118#else
119 seq_printf(p, "%10u ", kstat_irqs(i));
120#endif /* CONFIG_SMP */
d1bef4ed
IM
121 if (desc->chip)
122 seq_printf(p, " %s ", desc->chip->typename);
1da177e4 123 else
756e7104 124 seq_puts(p, " None ");
1da177e4 125 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
756e7104
SR
126 seq_printf(p, " %s", action->name);
127 for (action = action->next; action; action = action->next)
1da177e4
LT
128 seq_printf(p, ", %s", action->name);
129 seq_putc(p, '\n');
130skip:
131 spin_unlock_irqrestore(&desc->lock, flags);
756e7104
SR
132 } else if (i == NR_IRQS) {
133#ifdef CONFIG_PPC32
134#ifdef CONFIG_TAU_INT
135 if (tau_initialized){
136 seq_puts(p, "TAU: ");
394e3902
AM
137 for_each_online_cpu(j)
138 seq_printf(p, "%10u ", tau_interrupts(j));
756e7104
SR
139 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
140 }
141#endif
142#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
143 /* should this be per processor send/receive? */
144 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
145 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
146#endif
147#endif /* CONFIG_PPC32 */
1da177e4 148 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
756e7104 149 }
1da177e4
LT
150 return 0;
151}
152
153#ifdef CONFIG_HOTPLUG_CPU
154void fixup_irqs(cpumask_t map)
155{
156 unsigned int irq;
157 static int warned;
158
159 for_each_irq(irq) {
160 cpumask_t mask;
161
162 if (irq_desc[irq].status & IRQ_PER_CPU)
163 continue;
164
a53da52f 165 cpus_and(mask, irq_desc[irq].affinity, map);
1da177e4
LT
166 if (any_online_cpu(mask) == NR_CPUS) {
167 printk("Breaking affinity for irq %i\n", irq);
168 mask = map;
169 }
d1bef4ed
IM
170 if (irq_desc[irq].chip->set_affinity)
171 irq_desc[irq].chip->set_affinity(irq, mask);
1da177e4
LT
172 else if (irq_desc[irq].action && !(warned++))
173 printk("Cannot set affinity for irq %i\n", irq);
174 }
175
176 local_irq_enable();
177 mdelay(1);
178 local_irq_disable();
179}
180#endif
181
1da177e4
LT
182void do_IRQ(struct pt_regs *regs)
183{
184 int irq;
b709c083
SR
185#ifdef CONFIG_IRQSTACKS
186 struct thread_info *curtp, *irqtp;
187#endif
1da177e4 188
756e7104 189 irq_enter();
1da177e4
LT
190
191#ifdef CONFIG_DEBUG_STACKOVERFLOW
192 /* Debugging check for stack overflow: is there less than 2KB free? */
193 {
194 long sp;
195
196 sp = __get_SP() & (THREAD_SIZE-1);
197
198 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
199 printk("do_IRQ: stack overflow: %ld\n",
200 sp - sizeof(struct thread_info));
201 dump_stack();
202 }
203 }
204#endif
205
756e7104
SR
206 /*
207 * Every platform is required to implement ppc_md.get_irq.
208 * This function will either return an irq number or -1 to
209 * indicate there are no more pending.
210 * The value -2 is for buggy hardware and means that this IRQ
211 * has already been handled. -- Tom
212 */
1da177e4
LT
213 irq = ppc_md.get_irq(regs);
214
b709c083
SR
215 if (irq >= 0) {
216#ifdef CONFIG_IRQSTACKS
217 /* Switch to the irq stack to handle this */
218 curtp = current_thread_info();
219 irqtp = hardirq_ctx[smp_processor_id()];
220 if (curtp != irqtp) {
b9e5b4e6
BH
221 struct irq_desc *desc = irq_desc + irq;
222 void *handler = desc->handle_irq;
223 if (handler == NULL)
224 handler = &__do_IRQ;
b709c083
SR
225 irqtp->task = curtp->task;
226 irqtp->flags = 0;
b9e5b4e6 227 call_handle_irq(irq, desc, regs, irqtp, handler);
b709c083
SR
228 irqtp->task = NULL;
229 if (irqtp->flags)
230 set_bits(irqtp->flags, &curtp->flags);
231 } else
232#endif
b9e5b4e6 233 generic_handle_irq(irq, regs);
e199500c
SR
234 } else if (irq != -2)
235 /* That's not SMP safe ... but who cares ? */
236 ppc_spurious_interrupts++;
237
756e7104 238 irq_exit();
756e7104 239
e199500c 240#ifdef CONFIG_PPC_ISERIES
3356bb9f
DG
241 if (get_lppaca()->int_dword.fields.decr_int) {
242 get_lppaca()->int_dword.fields.decr_int = 0;
243 /* Signal a fake decrementer interrupt */
244 timer_interrupt(regs);
e199500c
SR
245 }
246#endif
247}
1da177e4
LT
248
249void __init init_IRQ(void)
250{
1da177e4 251 ppc_md.init_IRQ();
756e7104 252#ifdef CONFIG_PPC64
1da177e4 253 irq_ctx_init();
756e7104 254#endif
1da177e4
LT
255}
256
756e7104 257#ifdef CONFIG_PPC64
1da177e4
LT
258/*
259 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
260 */
261
262#define UNDEFINED_IRQ 0xffffffff
263unsigned int virt_irq_to_real_map[NR_IRQS];
264
265/*
266 * Don't use virtual irqs 0, 1, 2 for devices.
267 * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
268 * and 2 is the XICS IPI interrupt.
7d01c880
SR
269 * We limit virtual irqs to __irq_offet_value less than virt_irq_max so
270 * that when we offset them we don't end up with an interrupt
271 * number >= virt_irq_max.
1da177e4
LT
272 */
273#define MIN_VIRT_IRQ 3
7d01c880
SR
274
275unsigned int virt_irq_max;
276static unsigned int max_virt_irq;
277static unsigned int nr_virt_irqs;
1da177e4
LT
278
279void
280virt_irq_init(void)
281{
282 int i;
7d01c880
SR
283
284 if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1)))
285 virt_irq_max = NR_IRQS - 1;
286 max_virt_irq = virt_irq_max - __irq_offset_value;
287 nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1;
288
1da177e4
LT
289 for (i = 0; i < NR_IRQS; i++)
290 virt_irq_to_real_map[i] = UNDEFINED_IRQ;
291}
292
293/* Create a mapping for a real_irq if it doesn't already exist.
294 * Return the virtual irq as a convenience.
295 */
296int virt_irq_create_mapping(unsigned int real_irq)
297{
298 unsigned int virq, first_virq;
299 static int warned;
300
301 if (ppc64_interrupt_controller == IC_OPEN_PIC)
302 return real_irq; /* no mapping for openpic (for now) */
303
f3f66f59 304 if (ppc64_interrupt_controller == IC_CELL_PIC)
fef1c772
AB
305 return real_irq; /* no mapping for iic either */
306
1da177e4
LT
307 /* don't map interrupts < MIN_VIRT_IRQ */
308 if (real_irq < MIN_VIRT_IRQ) {
309 virt_irq_to_real_map[real_irq] = real_irq;
310 return real_irq;
311 }
312
7d01c880 313 /* map to a number between MIN_VIRT_IRQ and max_virt_irq */
1da177e4 314 virq = real_irq;
7d01c880
SR
315 if (virq > max_virt_irq)
316 virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
1da177e4
LT
317
318 /* search for this number or a free slot */
319 first_virq = virq;
320 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
321 if (virt_irq_to_real_map[virq] == real_irq)
322 return virq;
7d01c880 323 if (++virq > max_virt_irq)
1da177e4
LT
324 virq = MIN_VIRT_IRQ;
325 if (virq == first_virq)
326 goto nospace; /* oops, no free slots */
327 }
328
329 virt_irq_to_real_map[virq] = real_irq;
330 return virq;
331
332 nospace:
333 if (!warned) {
334 printk(KERN_CRIT "Interrupt table is full\n");
7d01c880
SR
335 printk(KERN_CRIT "Increase virt_irq_max (currently %d) "
336 "in your kernel sources and rebuild.\n", virt_irq_max);
1da177e4
LT
337 warned = 1;
338 }
339 return NO_IRQ;
340}
341
342/*
343 * In most cases will get a hit on the very first slot checked in the
344 * virt_irq_to_real_map. Only when there are a large number of
345 * IRQs will this be expensive.
346 */
347unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
348{
349 unsigned int virq;
350 unsigned int first_virq;
351
352 virq = real_irq;
353
7d01c880
SR
354 if (virq > max_virt_irq)
355 virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
1da177e4
LT
356
357 first_virq = virq;
358
359 do {
360 if (virt_irq_to_real_map[virq] == real_irq)
361 return virq;
362
363 virq++;
364
7d01c880 365 if (virq >= max_virt_irq)
1da177e4
LT
366 virq = 0;
367
368 } while (first_virq != virq);
369
370 return NO_IRQ;
371
372}
c6622f63 373#endif /* CONFIG_PPC64 */
1da177e4 374
1da177e4 375#ifdef CONFIG_IRQSTACKS
22722051
AM
376struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
377struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
1da177e4
LT
378
379void irq_ctx_init(void)
380{
381 struct thread_info *tp;
382 int i;
383
0e551954 384 for_each_possible_cpu(i) {
1da177e4
LT
385 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
386 tp = softirq_ctx[i];
387 tp->cpu = i;
388 tp->preempt_count = SOFTIRQ_OFFSET;
389
390 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
391 tp = hardirq_ctx[i];
392 tp->cpu = i;
393 tp->preempt_count = HARDIRQ_OFFSET;
394 }
395}
396
c6622f63
PM
397static inline void do_softirq_onstack(void)
398{
399 struct thread_info *curtp, *irqtp;
400
401 curtp = current_thread_info();
402 irqtp = softirq_ctx[smp_processor_id()];
403 irqtp->task = curtp->task;
404 call_do_softirq(irqtp);
405 irqtp->task = NULL;
406}
407
408#else
409#define do_softirq_onstack() __do_softirq()
410#endif /* CONFIG_IRQSTACKS */
411
1da177e4
LT
412void do_softirq(void)
413{
414 unsigned long flags;
1da177e4
LT
415
416 if (in_interrupt())
417 return;
418
419 local_irq_save(flags);
420
421 if (local_softirq_pending()) {
c6622f63
PM
422 account_system_vtime(current);
423 local_bh_disable();
424 do_softirq_onstack();
425 account_system_vtime(current);
426 __local_bh_enable();
1da177e4
LT
427 }
428
429 local_irq_restore(flags);
430}
431EXPORT_SYMBOL(do_softirq);
432
204face4
JM
433#ifdef CONFIG_PCI_MSI
434int pci_enable_msi(struct pci_dev * pdev)
435{
436 if (ppc_md.enable_msi)
437 return ppc_md.enable_msi(pdev);
438 else
439 return -1;
440}
441
442void pci_disable_msi(struct pci_dev * pdev)
443{
444 if (ppc_md.disable_msi)
445 ppc_md.disable_msi(pdev);
446}
447
448void pci_scan_msi_device(struct pci_dev *dev) {}
449int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;}
450void pci_disable_msix(struct pci_dev *dev) {}
451void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
452void disable_msi_mode(struct pci_dev *dev, int pos, int type) {}
453void pci_no_msi(void) {}
454
455#endif
456
c6622f63 457#ifdef CONFIG_PPC64
1da177e4
LT
458static int __init setup_noirqdistrib(char *str)
459{
460 distribute_irqs = 0;
461 return 1;
462}
463
464__setup("noirqdistrib", setup_noirqdistrib);
756e7104 465#endif /* CONFIG_PPC64 */