2 * Code to handle x86 style IRQs plus some generic interrupt stuff.
4 * Copyright (C) 1992 Linus Torvalds
5 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
6 * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
7 * Copyright (C) 1999-2000 Grant Grundler
8 * Copyright (c) 2005 Matthew Wilcox
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/bitops.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/seq_file.h>
30 #include <linux/spinlock.h>
31 #include <linux/types.h>
36 #undef PARISC_IRQ_CR16_COUNTS
38 extern irqreturn_t
timer_interrupt(int, void *, struct pt_regs
*);
39 extern irqreturn_t
ipi_interrupt(int, void *, struct pt_regs
*);
41 #define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
43 /* Bits in EIEM correlate with cpu_irq_action[].
44 ** Numbered *Big Endian*! (ie bit 0 is MSB)
46 static volatile unsigned long cpu_eiem
= 0;
48 static void cpu_disable_irq(unsigned int irq
)
50 unsigned long eirr_bit
= EIEM_MASK(irq
);
52 cpu_eiem
&= ~eirr_bit
;
53 /* Do nothing on the other CPUs. If they get this interrupt,
54 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
55 * handle it, and the set_eiem() at the bottom will ensure it
56 * then gets disabled */
59 static void cpu_enable_irq(unsigned int irq
)
61 unsigned long eirr_bit
= EIEM_MASK(irq
);
65 /* FIXME: while our interrupts aren't nested, we cannot reset
66 * the eiem mask if we're already in an interrupt. Once we
67 * implement nested interrupts, this can go away
72 /* This is just a simple NOP IPI. But what it does is cause
73 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
74 * of the interrupt handler */
78 static unsigned int cpu_startup_irq(unsigned int irq
)
84 void no_ack_irq(unsigned int irq
) { }
85 void no_end_irq(unsigned int irq
) { }
88 int cpu_check_affinity(unsigned int irq
, cpumask_t
*dest
)
92 /* timer and ipi have to always be received on all CPUs */
93 if (irq
== TIMER_IRQ
|| irq
== IPI_IRQ
) {
94 /* Bad linux design decision. The mask has already
95 * been set; we must reset it */
96 irq_desc
[irq
].affinity
= CPU_MASK_ALL
;
100 /* whatever mask they set, we just allow one CPU */
101 cpu_dest
= first_cpu(*dest
);
102 *dest
= cpumask_of_cpu(cpu_dest
);
107 static void cpu_set_affinity_irq(unsigned int irq
, cpumask_t dest
)
109 if (cpu_check_affinity(irq
, &dest
))
112 irq_desc
[irq
].affinity
= dest
;
116 static struct hw_interrupt_type cpu_interrupt_type
= {
118 .startup
= cpu_startup_irq
,
119 .shutdown
= cpu_disable_irq
,
120 .enable
= cpu_enable_irq
,
121 .disable
= cpu_disable_irq
,
125 .set_affinity
= cpu_set_affinity_irq
,
127 /* XXX: Needs to be written. We managed without it so far, but
128 * we really ought to write it.
133 int show_interrupts(struct seq_file
*p
, void *v
)
135 int i
= *(loff_t
*) v
, j
;
140 for_each_online_cpu(j
)
141 seq_printf(p
, " CPU%d", j
);
143 #ifdef PARISC_IRQ_CR16_COUNTS
144 seq_printf(p
, " [min/avg/max] (CPU cycle counts)");
150 struct irqaction
*action
;
152 spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
153 action
= irq_desc
[i
].action
;
156 seq_printf(p
, "%3d: ", i
);
158 for_each_online_cpu(j
)
159 seq_printf(p
, "%10u ", kstat_cpu(j
).irqs
[i
]);
161 seq_printf(p
, "%10u ", kstat_irqs(i
));
164 seq_printf(p
, " %14s", irq_desc
[i
].chip
->typename
);
165 #ifndef PARISC_IRQ_CR16_COUNTS
166 seq_printf(p
, " %s", action
->name
);
168 while ((action
= action
->next
))
169 seq_printf(p
, ", %s", action
->name
);
171 for ( ;action
; action
= action
->next
) {
172 unsigned int k
, avg
, min
, max
;
174 min
= max
= action
->cr16_hist
[0];
176 for (avg
= k
= 0; k
< PARISC_CR16_HIST_SIZE
; k
++) {
177 int hist
= action
->cr16_hist
[k
];
184 if (hist
> max
) max
= hist
;
185 if (hist
< min
) min
= hist
;
189 seq_printf(p
, " %s[%d/%d/%d]", action
->name
,
196 spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
205 ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
206 ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
208 ** To use txn_XXX() interfaces, get a Virtual IRQ first.
209 ** Then use that to get the Transaction address and data.
212 int cpu_claim_irq(unsigned int irq
, struct hw_interrupt_type
*type
, void *data
)
214 if (irq_desc
[irq
].action
)
216 if (irq_desc
[irq
].chip
!= &cpu_interrupt_type
)
220 irq_desc
[irq
].chip
= type
;
221 irq_desc
[irq
].chip_data
= data
;
222 cpu_interrupt_type
.enable(irq
);
227 int txn_claim_irq(int irq
)
229 return cpu_claim_irq(irq
, NULL
, NULL
) ? -1 : irq
;
233 * The bits_wide parameter accommodates the limitations of the HW/SW which
235 * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
236 * V-class (EPIC): 6 bits
237 * N/L/A-class (iosapic): 8 bits
238 * PCI 2.2 MSI: 16 bits
239 * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
241 * On the service provider side:
242 * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
243 * o PA 2.0 wide mode 6-bits (per processor)
244 * o IA64 8-bits (0-256 total)
246 * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
247 * by the processor...and the N/L-class I/O subsystem supports more bits than
248 * PA2.0 has. The first case is the problem.
250 int txn_alloc_irq(unsigned int bits_wide
)
254 /* never return irq 0 cause that's the interval timer */
255 for (irq
= CPU_IRQ_BASE
+ 1; irq
<= CPU_IRQ_MAX
; irq
++) {
256 if (cpu_claim_irq(irq
, NULL
, NULL
) < 0)
258 if ((irq
- CPU_IRQ_BASE
) >= (1 << bits_wide
))
263 /* unlikely, but be prepared */
268 unsigned long txn_affinity_addr(unsigned int irq
, int cpu
)
271 irq_desc
[irq
].affinity
= cpumask_of_cpu(cpu
);
274 return cpu_data
[cpu
].txn_addr
;
278 unsigned long txn_alloc_addr(unsigned int virt_irq
)
280 static int next_cpu
= -1;
282 next_cpu
++; /* assign to "next" CPU we want this bugger on */
285 while ((next_cpu
< NR_CPUS
) && (!cpu_data
[next_cpu
].txn_addr
||
286 !cpu_online(next_cpu
)))
289 if (next_cpu
>= NR_CPUS
)
290 next_cpu
= 0; /* nothing else, assign monarch */
292 return txn_affinity_addr(virt_irq
, next_cpu
);
296 unsigned int txn_alloc_data(unsigned int virt_irq
)
298 return virt_irq
- CPU_IRQ_BASE
;
301 /* ONLY called from entry.S:intr_extint() */
302 void do_cpu_irq_mask(struct pt_regs
*regs
)
304 unsigned long eirr_val
;
309 * Don't allow TIMER or IPI nested interrupts.
310 * Allowing any single interrupt to nest can lead to that CPU
311 * handling interrupts with all enabled interrupts unmasked.
315 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
316 * 2) We loop here on EIRR contents in order to avoid
317 * nested interrupts or having to take another interrupt
318 * when we could have just handled it right away.
321 unsigned long bit
= (1UL << (BITS_PER_LONG
- 1));
323 eirr_val
= mfctl(23) & cpu_eiem
;
327 mtctl(eirr_val
, 23); /* reset bits we are going to process */
329 /* Work our way from MSb to LSb...same order we alloc EIRs */
330 for (irq
= TIMER_IRQ
; eirr_val
&& bit
; bit
>>=1, irq
++) {
332 cpumask_t dest
= irq_desc
[irq
].affinity
;
334 if (!(bit
& eirr_val
))
337 /* clear bit in mask - can exit loop sooner */
341 /* FIXME: because generic set affinity mucks
342 * with the affinity before sending it to us
343 * we can get the situation where the affinity is
344 * wrong for our CPU type interrupts */
345 if (irq
!= TIMER_IRQ
&& irq
!= IPI_IRQ
&&
346 !cpu_isset(smp_processor_id(), dest
)) {
347 int cpu
= first_cpu(dest
);
349 printk(KERN_DEBUG
"redirecting irq %d from CPU %d to %d\n",
350 irq
, smp_processor_id(), cpu
);
351 gsc_writel(irq
+ CPU_IRQ_BASE
,
361 set_eiem(cpu_eiem
); /* restore original mask */
366 static struct irqaction timer_action
= {
367 .handler
= timer_interrupt
,
369 .flags
= SA_INTERRUPT
,
373 static struct irqaction ipi_action
= {
374 .handler
= ipi_interrupt
,
376 .flags
= SA_INTERRUPT
,
380 static void claim_cpu_irqs(void)
383 for (i
= CPU_IRQ_BASE
; i
<= CPU_IRQ_MAX
; i
++) {
384 irq_desc
[i
].chip
= &cpu_interrupt_type
;
387 irq_desc
[TIMER_IRQ
].action
= &timer_action
;
388 irq_desc
[TIMER_IRQ
].status
|= IRQ_PER_CPU
;
390 irq_desc
[IPI_IRQ
].action
= &ipi_action
;
391 irq_desc
[IPI_IRQ
].status
= IRQ_PER_CPU
;
395 void __init
init_IRQ(void)
397 local_irq_disable(); /* PARANOID - should already be disabled */
398 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
402 cpu_eiem
= EIEM_MASK(IPI_IRQ
) | EIEM_MASK(TIMER_IRQ
);
404 cpu_eiem
= EIEM_MASK(TIMER_IRQ
);
406 set_eiem(cpu_eiem
); /* EIEM : enable all external intr */
410 void ack_bad_irq(unsigned int irq
)
412 printk("unexpected IRQ %d\n", irq
);