[IA64] Check if irq is sharable
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / ia64 / kernel / irq_ia64.c
CommitLineData
1da177e4 1/*
f30c2269 2 * linux/arch/ia64/kernel/irq_ia64.c
1da177e4
LT
3 *
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
10 *
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
15 */
16
1da177e4
LT
17#include <linux/module.h>
18
19#include <linux/jiffies.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kernel_stat.h>
25#include <linux/slab.h>
26#include <linux/ptrace.h>
27#include <linux/random.h> /* for rand_initialize_irq() */
28#include <linux/signal.h>
29#include <linux/smp.h>
1da177e4
LT
30#include <linux/threads.h>
31#include <linux/bitops.h>
b6cf2583 32#include <linux/irq.h>
1da177e4
LT
33
34#include <asm/delay.h>
35#include <asm/intrinsics.h>
36#include <asm/io.h>
37#include <asm/hw_irq.h>
38#include <asm/machvec.h>
39#include <asm/pgtable.h>
40#include <asm/system.h>
3be44b9c 41#include <asm/tlbflush.h>
1da177e4
LT
42
43#ifdef CONFIG_PERFMON
44# include <asm/perfmon.h>
45#endif
46
47#define IRQ_DEBUG 0
48
10083072
MM
49/* These can be overridden in platform_irq_init */
50int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
51int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
52
1da177e4
LT
53/* default base addr of IPI table */
54void __iomem *ipi_base_addr = ((void __iomem *)
55 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
56
57/*
58 * Legacy IRQ to IA-64 vector translation table.
59 */
60__u8 isa_irq_to_vector_map[16] = {
61 /* 8259 IRQ translation, first 16 entries */
62 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
63 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
64};
65EXPORT_SYMBOL(isa_irq_to_vector_map);
66
10083072 67static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
1da177e4
LT
68
69int
3b5cc090 70assign_irq_vector (int irq)
1da177e4
LT
71{
72 int pos, vector;
73 again:
74 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
75 vector = IA64_FIRST_DEVICE_VECTOR + pos;
76 if (vector > IA64_LAST_DEVICE_VECTOR)
3b5cc090 77 return -ENOSPC;
1da177e4
LT
78 if (test_and_set_bit(pos, ia64_vector_mask))
79 goto again;
80 return vector;
81}
82
83void
84free_irq_vector (int vector)
85{
86 int pos;
87
88 if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
89 return;
90
91 pos = vector - IA64_FIRST_DEVICE_VECTOR;
92 if (!test_and_clear_bit(pos, ia64_vector_mask))
93 printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
94}
95
10083072
MM
96int
97reserve_irq_vector (int vector)
98{
99 int pos;
100
101 if (vector < IA64_FIRST_DEVICE_VECTOR ||
102 vector > IA64_LAST_DEVICE_VECTOR)
103 return -EINVAL;
104
105 pos = vector - IA64_FIRST_DEVICE_VECTOR;
106 return test_and_set_bit(pos, ia64_vector_mask);
107}
108
b6cf2583
EB
109/*
110 * Dynamic irq allocate and deallocation for MSI
111 */
112int create_irq(void)
113{
114 int vector = assign_irq_vector(AUTO_ASSIGN);
115
116 if (vector >= 0)
117 dynamic_irq_init(vector);
118
119 return vector;
120}
121
122void destroy_irq(unsigned int irq)
123{
124 dynamic_irq_cleanup(irq);
125 free_irq_vector(irq);
126}
127
1da177e4
LT
128#ifdef CONFIG_SMP
129# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
3be44b9c 130# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
1da177e4
LT
131#else
132# define IS_RESCHEDULE(vec) (0)
3be44b9c 133# define IS_LOCAL_TLB_FLUSH(vec) (0)
1da177e4
LT
134#endif
135/*
136 * That's where the IVT branches when we get an external
137 * interrupt. This branches to the correct hardware IRQ handler via
138 * function ptr.
139 */
140void
141ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
142{
7d12e780 143 struct pt_regs *old_regs = set_irq_regs(regs);
1da177e4
LT
144 unsigned long saved_tpr;
145
146#if IRQ_DEBUG
147 {
148 unsigned long bsp, sp;
149
150 /*
151 * Note: if the interrupt happened while executing in
152 * the context switch routine (ia64_switch_to), we may
153 * get a spurious stack overflow here. This is
154 * because the register and the memory stack are not
155 * switched atomically.
156 */
157 bsp = ia64_getreg(_IA64_REG_AR_BSP);
158 sp = ia64_getreg(_IA64_REG_SP);
159
160 if ((sp - bsp) < 1024) {
161 static unsigned char count;
162 static long last_time;
163
164 if (jiffies - last_time > 5*HZ)
165 count = 0;
166 if (++count < 5) {
167 last_time = jiffies;
168 printk("ia64_handle_irq: DANGER: less than "
169 "1KB of free stack space!!\n"
170 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
171 }
172 }
173 }
174#endif /* IRQ_DEBUG */
175
176 /*
177 * Always set TPR to limit maximum interrupt nesting depth to
178 * 16 (without this, it would be ~240, which could easily lead
179 * to kernel stack overflows).
180 */
181 irq_enter();
182 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
183 ia64_srlz_d();
184 while (vector != IA64_SPURIOUS_INT_VECTOR) {
3be44b9c
JS
185 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
186 smp_local_flush_tlb();
187 kstat_this_cpu.irqs[vector]++;
188 } else if (unlikely(IS_RESCHEDULE(vector)))
189 kstat_this_cpu.irqs[vector]++;
9b3377f9 190 else {
1da177e4
LT
191 ia64_setreg(_IA64_REG_CR_TPR, vector);
192 ia64_srlz_d();
193
5fbb004a 194 generic_handle_irq(local_vector_to_irq(vector));
1da177e4
LT
195
196 /*
197 * Disable interrupts and send EOI:
198 */
199 local_irq_disable();
200 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
201 }
202 ia64_eoi();
203 vector = ia64_get_ivr();
204 }
205 /*
206 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
207 * handler needs to be able to wait for further keyboard interrupts, which can't
208 * come through until ia64_eoi() has been done.
209 */
210 irq_exit();
7d12e780 211 set_irq_regs(old_regs);
1da177e4
LT
212}
213
214#ifdef CONFIG_HOTPLUG_CPU
215/*
216 * This function emulates a interrupt processing when a cpu is about to be
217 * brought down.
218 */
219void ia64_process_pending_intr(void)
220{
221 ia64_vector vector;
222 unsigned long saved_tpr;
223 extern unsigned int vectors_in_migration[NR_IRQS];
224
225 vector = ia64_get_ivr();
226
227 irq_enter();
228 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
229 ia64_srlz_d();
230
231 /*
232 * Perform normal interrupt style processing
233 */
234 while (vector != IA64_SPURIOUS_INT_VECTOR) {
3be44b9c
JS
235 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
236 smp_local_flush_tlb();
237 kstat_this_cpu.irqs[vector]++;
238 } else if (unlikely(IS_RESCHEDULE(vector)))
239 kstat_this_cpu.irqs[vector]++;
9b3377f9 240 else {
8c1addbc
TL
241 struct pt_regs *old_regs = set_irq_regs(NULL);
242
1da177e4
LT
243 ia64_setreg(_IA64_REG_CR_TPR, vector);
244 ia64_srlz_d();
245
246 /*
247 * Now try calling normal ia64_handle_irq as it would have got called
248 * from a real intr handler. Try passing null for pt_regs, hopefully
249 * it will work. I hope it works!.
250 * Probably could shared code.
251 */
252 vectors_in_migration[local_vector_to_irq(vector)]=0;
5fbb004a 253 generic_handle_irq(local_vector_to_irq(vector));
8c1addbc 254 set_irq_regs(old_regs);
1da177e4
LT
255
256 /*
257 * Disable interrupts and send EOI
258 */
259 local_irq_disable();
260 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
261 }
262 ia64_eoi();
263 vector = ia64_get_ivr();
264 }
265 irq_exit();
266}
267#endif
268
269
270#ifdef CONFIG_SMP
1da177e4 271
9b3377f9
JS
272static irqreturn_t dummy_handler (int irq, void *dev_id)
273{
274 BUG();
275}
3be44b9c 276extern irqreturn_t handle_IPI (int irq, void *dev_id);
9b3377f9 277
1da177e4
LT
278static struct irqaction ipi_irqaction = {
279 .handler = handle_IPI,
121a4226 280 .flags = IRQF_DISABLED,
1da177e4
LT
281 .name = "IPI"
282};
9b3377f9
JS
283
284static struct irqaction resched_irqaction = {
285 .handler = dummy_handler,
38515e90 286 .flags = IRQF_DISABLED,
9b3377f9
JS
287 .name = "resched"
288};
3be44b9c
JS
289
290static struct irqaction tlb_irqaction = {
291 .handler = dummy_handler,
5329571b 292 .flags = IRQF_DISABLED,
3be44b9c
JS
293 .name = "tlb_flush"
294};
295
1da177e4
LT
296#endif
297
298void
299register_percpu_irq (ia64_vector vec, struct irqaction *action)
300{
301 irq_desc_t *desc;
302 unsigned int irq;
303
304 for (irq = 0; irq < NR_IRQS; ++irq)
305 if (irq_to_vector(irq) == vec) {
a8553acd 306 desc = irq_desc + irq;
1da177e4 307 desc->status |= IRQ_PER_CPU;
d1bef4ed 308 desc->chip = &irq_type_ia64_lsapic;
1da177e4
LT
309 if (action)
310 setup_irq(irq, action);
311 }
312}
313
314void __init
315init_IRQ (void)
316{
317 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
318#ifdef CONFIG_SMP
319 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
9b3377f9 320 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
3be44b9c 321 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
1da177e4
LT
322#endif
323#ifdef CONFIG_PERFMON
324 pfm_init_percpu();
325#endif
326 platform_irq_init();
327}
328
329void
330ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
331{
332 void __iomem *ipi_addr;
333 unsigned long ipi_data;
334 unsigned long phys_cpu_id;
335
336#ifdef CONFIG_SMP
337 phys_cpu_id = cpu_physical_id(cpu);
338#else
339 phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
340#endif
341
342 /*
343 * cpu number is in 8bit ID and 8bit EID
344 */
345
346 ipi_data = (delivery_mode << 8) | (vector & 0xff);
347 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
348
349 writeq(ipi_data, ipi_addr);
350}