Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / irq / chip.c
1 /*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18
19 #include "internals.h"
20
21 /**
22 * dynamic_irq_init - initialize a dynamically allocated irq
23 * @irq: irq number to initialize
24 */
25 void dynamic_irq_init(unsigned int irq)
26 {
27 struct irq_desc *desc;
28 unsigned long flags;
29
30 if (irq >= NR_IRQS) {
31 printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
32 WARN_ON(1);
33 return;
34 }
35
36 /* Ensure we don't have left over values from a previous use of this irq */
37 desc = irq_desc + irq;
38 spin_lock_irqsave(&desc->lock, flags);
39 desc->status = IRQ_DISABLED;
40 desc->chip = &no_irq_chip;
41 desc->handle_irq = handle_bad_irq;
42 desc->depth = 1;
43 desc->msi_desc = NULL;
44 desc->handler_data = NULL;
45 desc->chip_data = NULL;
46 desc->action = NULL;
47 desc->irq_count = 0;
48 desc->irqs_unhandled = 0;
49 #ifdef CONFIG_SMP
50 desc->affinity = CPU_MASK_ALL;
51 #endif
52 spin_unlock_irqrestore(&desc->lock, flags);
53 }
54
55 /**
56 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
57 * @irq: irq number to initialize
58 */
59 void dynamic_irq_cleanup(unsigned int irq)
60 {
61 struct irq_desc *desc;
62 unsigned long flags;
63
64 if (irq >= NR_IRQS) {
65 printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
66 WARN_ON(1);
67 return;
68 }
69
70 desc = irq_desc + irq;
71 spin_lock_irqsave(&desc->lock, flags);
72 if (desc->action) {
73 spin_unlock_irqrestore(&desc->lock, flags);
74 printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n",
75 irq);
76 WARN_ON(1);
77 return;
78 }
79 desc->msi_desc = NULL;
80 desc->handler_data = NULL;
81 desc->chip_data = NULL;
82 desc->handle_irq = handle_bad_irq;
83 desc->chip = &no_irq_chip;
84 spin_unlock_irqrestore(&desc->lock, flags);
85 }
86
87
88 /**
89 * set_irq_chip - set the irq chip for an irq
90 * @irq: irq number
91 * @chip: pointer to irq chip description structure
92 */
93 int set_irq_chip(unsigned int irq, struct irq_chip *chip)
94 {
95 struct irq_desc *desc;
96 unsigned long flags;
97
98 if (irq >= NR_IRQS) {
99 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
100 WARN_ON(1);
101 return -EINVAL;
102 }
103
104 if (!chip)
105 chip = &no_irq_chip;
106
107 desc = irq_desc + irq;
108 spin_lock_irqsave(&desc->lock, flags);
109 irq_chip_set_defaults(chip);
110 desc->chip = chip;
111 spin_unlock_irqrestore(&desc->lock, flags);
112
113 return 0;
114 }
115 EXPORT_SYMBOL(set_irq_chip);
116
117 /**
118 * set_irq_type - set the irq type for an irq
119 * @irq: irq number
120 * @type: interrupt type - see include/linux/interrupt.h
121 */
122 int set_irq_type(unsigned int irq, unsigned int type)
123 {
124 struct irq_desc *desc;
125 unsigned long flags;
126 int ret = -ENXIO;
127
128 if (irq >= NR_IRQS) {
129 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
130 return -ENODEV;
131 }
132
133 desc = irq_desc + irq;
134 if (desc->chip->set_type) {
135 spin_lock_irqsave(&desc->lock, flags);
136 ret = desc->chip->set_type(irq, type);
137 spin_unlock_irqrestore(&desc->lock, flags);
138 }
139 return ret;
140 }
141 EXPORT_SYMBOL(set_irq_type);
142
143 /**
144 * set_irq_data - set irq type data for an irq
145 * @irq: Interrupt number
146 * @data: Pointer to interrupt specific data
147 *
148 * Set the hardware irq controller data for an irq
149 */
150 int set_irq_data(unsigned int irq, void *data)
151 {
152 struct irq_desc *desc;
153 unsigned long flags;
154
155 if (irq >= NR_IRQS) {
156 printk(KERN_ERR
157 "Trying to install controller data for IRQ%d\n", irq);
158 return -EINVAL;
159 }
160
161 desc = irq_desc + irq;
162 spin_lock_irqsave(&desc->lock, flags);
163 desc->handler_data = data;
164 spin_unlock_irqrestore(&desc->lock, flags);
165 return 0;
166 }
167 EXPORT_SYMBOL(set_irq_data);
168
169 /**
170 * set_irq_data - set irq type data for an irq
171 * @irq: Interrupt number
172 * @entry: Pointer to MSI descriptor data
173 *
174 * Set the hardware irq controller data for an irq
175 */
176 int set_irq_msi(unsigned int irq, struct msi_desc *entry)
177 {
178 struct irq_desc *desc;
179 unsigned long flags;
180
181 if (irq >= NR_IRQS) {
182 printk(KERN_ERR
183 "Trying to install msi data for IRQ%d\n", irq);
184 return -EINVAL;
185 }
186 desc = irq_desc + irq;
187 spin_lock_irqsave(&desc->lock, flags);
188 desc->msi_desc = entry;
189 if (entry)
190 entry->irq = irq;
191 spin_unlock_irqrestore(&desc->lock, flags);
192 return 0;
193 }
194
195 /**
196 * set_irq_chip_data - set irq chip data for an irq
197 * @irq: Interrupt number
198 * @data: Pointer to chip specific data
199 *
200 * Set the hardware irq chip data for an irq
201 */
202 int set_irq_chip_data(unsigned int irq, void *data)
203 {
204 struct irq_desc *desc = irq_desc + irq;
205 unsigned long flags;
206
207 if (irq >= NR_IRQS || !desc->chip) {
208 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
209 return -EINVAL;
210 }
211
212 spin_lock_irqsave(&desc->lock, flags);
213 desc->chip_data = data;
214 spin_unlock_irqrestore(&desc->lock, flags);
215
216 return 0;
217 }
218 EXPORT_SYMBOL(set_irq_chip_data);
219
220 /*
221 * default enable function
222 */
223 static void default_enable(unsigned int irq)
224 {
225 struct irq_desc *desc = irq_desc + irq;
226
227 desc->chip->unmask(irq);
228 desc->status &= ~IRQ_MASKED;
229 }
230
231 /*
232 * default disable function
233 */
234 static void default_disable(unsigned int irq)
235 {
236 }
237
238 /*
239 * default startup function
240 */
241 static unsigned int default_startup(unsigned int irq)
242 {
243 irq_desc[irq].chip->enable(irq);
244
245 return 0;
246 }
247
248 /*
249 * Fixup enable/disable function pointers
250 */
251 void irq_chip_set_defaults(struct irq_chip *chip)
252 {
253 if (!chip->enable)
254 chip->enable = default_enable;
255 if (!chip->disable)
256 chip->disable = default_disable;
257 if (!chip->startup)
258 chip->startup = default_startup;
259 if (!chip->shutdown)
260 chip->shutdown = chip->disable;
261 if (!chip->name)
262 chip->name = chip->typename;
263 if (!chip->end)
264 chip->end = dummy_irq_chip.end;
265 }
266
267 static inline void mask_ack_irq(struct irq_desc *desc, int irq)
268 {
269 if (desc->chip->mask_ack)
270 desc->chip->mask_ack(irq);
271 else {
272 desc->chip->mask(irq);
273 desc->chip->ack(irq);
274 }
275 }
276
277 /**
278 * handle_simple_irq - Simple and software-decoded IRQs.
279 * @irq: the interrupt number
280 * @desc: the interrupt description structure for this irq
281 *
282 * Simple interrupts are either sent from a demultiplexing interrupt
283 * handler or come from hardware, where no interrupt hardware control
284 * is necessary.
285 *
286 * Note: The caller is expected to handle the ack, clear, mask and
287 * unmask issues if necessary.
288 */
289 void fastcall
290 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
291 {
292 struct irqaction *action;
293 irqreturn_t action_ret;
294 const unsigned int cpu = smp_processor_id();
295
296 spin_lock(&desc->lock);
297
298 if (unlikely(desc->status & IRQ_INPROGRESS))
299 goto out_unlock;
300 kstat_cpu(cpu).irqs[irq]++;
301
302 action = desc->action;
303 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
304 if (desc->chip->mask)
305 desc->chip->mask(irq);
306 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
307 desc->status |= IRQ_PENDING;
308 goto out_unlock;
309 }
310
311 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING);
312 desc->status |= IRQ_INPROGRESS;
313 spin_unlock(&desc->lock);
314
315 action_ret = handle_IRQ_event(irq, action);
316 if (!noirqdebug)
317 note_interrupt(irq, desc, action_ret);
318
319 spin_lock(&desc->lock);
320 desc->status &= ~IRQ_INPROGRESS;
321 out_unlock:
322 spin_unlock(&desc->lock);
323 }
324
325 /**
326 * handle_level_irq - Level type irq handler
327 * @irq: the interrupt number
328 * @desc: the interrupt description structure for this irq
329 *
330 * Level type interrupts are active as long as the hardware line has
331 * the active level. This may require to mask the interrupt and unmask
332 * it after the associated handler has acknowledged the device, so the
333 * interrupt line is back to inactive.
334 */
335 void fastcall
336 handle_level_irq(unsigned int irq, struct irq_desc *desc)
337 {
338 unsigned int cpu = smp_processor_id();
339 struct irqaction *action;
340 irqreturn_t action_ret;
341
342 spin_lock(&desc->lock);
343 mask_ack_irq(desc, irq);
344
345 if (unlikely(desc->status & IRQ_INPROGRESS))
346 goto out_unlock;
347 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
348 kstat_cpu(cpu).irqs[irq]++;
349
350 /*
351 * If its disabled or no action available
352 * keep it masked and get out of here
353 */
354 action = desc->action;
355 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
356 goto out_unlock;
357
358 desc->status |= IRQ_INPROGRESS;
359 spin_unlock(&desc->lock);
360
361 action_ret = handle_IRQ_event(irq, action);
362 if (!noirqdebug)
363 note_interrupt(irq, desc, action_ret);
364
365 spin_lock(&desc->lock);
366 desc->status &= ~IRQ_INPROGRESS;
367 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
368 desc->chip->unmask(irq);
369 out_unlock:
370 spin_unlock(&desc->lock);
371 }
372
373 /**
374 * handle_fasteoi_irq - irq handler for transparent controllers
375 * @irq: the interrupt number
376 * @desc: the interrupt description structure for this irq
377 *
378 * Only a single callback will be issued to the chip: an ->eoi()
379 * call when the interrupt has been serviced. This enables support
380 * for modern forms of interrupt handlers, which handle the flow
381 * details in hardware, transparently.
382 */
383 void fastcall
384 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
385 {
386 unsigned int cpu = smp_processor_id();
387 struct irqaction *action;
388 irqreturn_t action_ret;
389
390 spin_lock(&desc->lock);
391
392 if (unlikely(desc->status & IRQ_INPROGRESS))
393 goto out;
394
395 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
396 kstat_cpu(cpu).irqs[irq]++;
397
398 /*
399 * If its disabled or no action available
400 * then mask it and get out of here:
401 */
402 action = desc->action;
403 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
404 desc->status |= IRQ_PENDING;
405 if (desc->chip->mask)
406 desc->chip->mask(irq);
407 goto out;
408 }
409
410 desc->status |= IRQ_INPROGRESS;
411 desc->status &= ~IRQ_PENDING;
412 spin_unlock(&desc->lock);
413
414 action_ret = handle_IRQ_event(irq, action);
415 if (!noirqdebug)
416 note_interrupt(irq, desc, action_ret);
417
418 spin_lock(&desc->lock);
419 desc->status &= ~IRQ_INPROGRESS;
420 out:
421 desc->chip->eoi(irq);
422
423 spin_unlock(&desc->lock);
424 }
425
426 /**
427 * handle_edge_irq - edge type IRQ handler
428 * @irq: the interrupt number
429 * @desc: the interrupt description structure for this irq
430 *
431 * Interrupt occures on the falling and/or rising edge of a hardware
432 * signal. The occurence is latched into the irq controller hardware
433 * and must be acked in order to be reenabled. After the ack another
434 * interrupt can happen on the same source even before the first one
435 * is handled by the assosiacted event handler. If this happens it
436 * might be necessary to disable (mask) the interrupt depending on the
437 * controller hardware. This requires to reenable the interrupt inside
438 * of the loop which handles the interrupts which have arrived while
439 * the handler was running. If all pending interrupts are handled, the
440 * loop is left.
441 */
442 void fastcall
443 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
444 {
445 const unsigned int cpu = smp_processor_id();
446
447 spin_lock(&desc->lock);
448
449 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
450
451 /*
452 * If we're currently running this IRQ, or its disabled,
453 * we shouldn't process the IRQ. Mark it pending, handle
454 * the necessary masking and go out
455 */
456 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
457 !desc->action)) {
458 desc->status |= (IRQ_PENDING | IRQ_MASKED);
459 mask_ack_irq(desc, irq);
460 goto out_unlock;
461 }
462
463 kstat_cpu(cpu).irqs[irq]++;
464
465 /* Start handling the irq */
466 desc->chip->ack(irq);
467
468 /* Mark the IRQ currently in progress.*/
469 desc->status |= IRQ_INPROGRESS;
470
471 do {
472 struct irqaction *action = desc->action;
473 irqreturn_t action_ret;
474
475 if (unlikely(!action)) {
476 desc->chip->mask(irq);
477 goto out_unlock;
478 }
479
480 /*
481 * When another irq arrived while we were handling
482 * one, we could have masked the irq.
483 * Renable it, if it was not disabled in meantime.
484 */
485 if (unlikely((desc->status &
486 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
487 (IRQ_PENDING | IRQ_MASKED))) {
488 desc->chip->unmask(irq);
489 desc->status &= ~IRQ_MASKED;
490 }
491
492 desc->status &= ~IRQ_PENDING;
493 spin_unlock(&desc->lock);
494 action_ret = handle_IRQ_event(irq, action);
495 if (!noirqdebug)
496 note_interrupt(irq, desc, action_ret);
497 spin_lock(&desc->lock);
498
499 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
500
501 desc->status &= ~IRQ_INPROGRESS;
502 out_unlock:
503 spin_unlock(&desc->lock);
504 }
505
506 #ifdef CONFIG_SMP
507 /**
508 * handle_percpu_IRQ - Per CPU local irq handler
509 * @irq: the interrupt number
510 * @desc: the interrupt description structure for this irq
511 *
512 * Per CPU interrupts on SMP machines without locking requirements
513 */
514 void fastcall
515 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
516 {
517 irqreturn_t action_ret;
518
519 kstat_this_cpu.irqs[irq]++;
520
521 if (desc->chip->ack)
522 desc->chip->ack(irq);
523
524 action_ret = handle_IRQ_event(irq, desc->action);
525 if (!noirqdebug)
526 note_interrupt(irq, desc, action_ret);
527
528 if (desc->chip->eoi)
529 desc->chip->eoi(irq);
530 }
531
532 #endif /* CONFIG_SMP */
533
534 void
535 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
536 const char *name)
537 {
538 struct irq_desc *desc;
539 unsigned long flags;
540
541 if (irq >= NR_IRQS) {
542 printk(KERN_ERR
543 "Trying to install type control for IRQ%d\n", irq);
544 return;
545 }
546
547 desc = irq_desc + irq;
548
549 if (!handle)
550 handle = handle_bad_irq;
551 else if (desc->chip == &no_irq_chip) {
552 printk(KERN_WARNING "Trying to install %sinterrupt handler "
553 "for IRQ%d\n", is_chained ? "chained " : "", irq);
554 /*
555 * Some ARM implementations install a handler for really dumb
556 * interrupt hardware without setting an irq_chip. This worked
557 * with the ARM no_irq_chip but the check in setup_irq would
558 * prevent us to setup the interrupt at all. Switch it to
559 * dummy_irq_chip for easy transition.
560 */
561 desc->chip = &dummy_irq_chip;
562 }
563
564 spin_lock_irqsave(&desc->lock, flags);
565
566 /* Uninstall? */
567 if (handle == handle_bad_irq) {
568 if (desc->chip != &no_irq_chip)
569 mask_ack_irq(desc, irq);
570 desc->status |= IRQ_DISABLED;
571 desc->depth = 1;
572 }
573 desc->handle_irq = handle;
574 desc->name = name;
575
576 if (handle != handle_bad_irq && is_chained) {
577 desc->status &= ~IRQ_DISABLED;
578 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
579 desc->depth = 0;
580 desc->chip->unmask(irq);
581 }
582 spin_unlock_irqrestore(&desc->lock, flags);
583 }
584
585 void
586 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
587 irq_flow_handler_t handle)
588 {
589 set_irq_chip(irq, chip);
590 __set_irq_handler(irq, handle, 0, NULL);
591 }
592
593 void
594 set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
595 irq_flow_handler_t handle, const char *name)
596 {
597 set_irq_chip(irq, chip);
598 __set_irq_handler(irq, handle, 0, name);
599 }