genirq: Move IRQ_REPLAY and IRQ_WAITING to core
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / irq / chip.c
... / ...
CommitLineData
1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18
19#include "internals.h"
20
21/**
22 * irq_set_chip - set the irq chip for an irq
23 * @irq: irq number
24 * @chip: pointer to irq chip description structure
25 */
26int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27{
28 struct irq_desc *desc = irq_to_desc(irq);
29 unsigned long flags;
30
31 if (!desc) {
32 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
33 return -EINVAL;
34 }
35
36 if (!chip)
37 chip = &no_irq_chip;
38
39 raw_spin_lock_irqsave(&desc->lock, flags);
40 irq_chip_set_defaults(chip);
41 desc->irq_data.chip = chip;
42 raw_spin_unlock_irqrestore(&desc->lock, flags);
43
44 return 0;
45}
46EXPORT_SYMBOL(irq_set_chip);
47
48/**
49 * irq_set_type - set the irq trigger type for an irq
50 * @irq: irq number
51 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
52 */
53int irq_set_irq_type(unsigned int irq, unsigned int type)
54{
55 struct irq_desc *desc = irq_to_desc(irq);
56 unsigned long flags;
57 int ret = -ENXIO;
58
59 if (!desc) {
60 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
61 return -ENODEV;
62 }
63
64 type &= IRQ_TYPE_SENSE_MASK;
65 if (type == IRQ_TYPE_NONE)
66 return 0;
67
68 chip_bus_lock(desc);
69 raw_spin_lock_irqsave(&desc->lock, flags);
70 ret = __irq_set_trigger(desc, irq, type);
71 raw_spin_unlock_irqrestore(&desc->lock, flags);
72 chip_bus_sync_unlock(desc);
73 return ret;
74}
75EXPORT_SYMBOL(irq_set_irq_type);
76
77/**
78 * irq_set_handler_data - set irq handler data for an irq
79 * @irq: Interrupt number
80 * @data: Pointer to interrupt specific data
81 *
82 * Set the hardware irq controller data for an irq
83 */
84int irq_set_handler_data(unsigned int irq, void *data)
85{
86 struct irq_desc *desc = irq_to_desc(irq);
87 unsigned long flags;
88
89 if (!desc) {
90 printk(KERN_ERR
91 "Trying to install controller data for IRQ%d\n", irq);
92 return -EINVAL;
93 }
94
95 raw_spin_lock_irqsave(&desc->lock, flags);
96 desc->irq_data.handler_data = data;
97 raw_spin_unlock_irqrestore(&desc->lock, flags);
98 return 0;
99}
100EXPORT_SYMBOL(irq_set_handler_data);
101
102/**
103 * irq_set_msi_desc - set MSI descriptor data for an irq
104 * @irq: Interrupt number
105 * @entry: Pointer to MSI descriptor data
106 *
107 * Set the MSI descriptor entry for an irq
108 */
109int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
110{
111 struct irq_desc *desc = irq_to_desc(irq);
112 unsigned long flags;
113
114 if (!desc) {
115 printk(KERN_ERR
116 "Trying to install msi data for IRQ%d\n", irq);
117 return -EINVAL;
118 }
119
120 raw_spin_lock_irqsave(&desc->lock, flags);
121 desc->irq_data.msi_desc = entry;
122 if (entry)
123 entry->irq = irq;
124 raw_spin_unlock_irqrestore(&desc->lock, flags);
125 return 0;
126}
127
128/**
129 * irq_set_chip_data - set irq chip data for an irq
130 * @irq: Interrupt number
131 * @data: Pointer to chip specific data
132 *
133 * Set the hardware irq chip data for an irq
134 */
135int irq_set_chip_data(unsigned int irq, void *data)
136{
137 struct irq_desc *desc = irq_to_desc(irq);
138 unsigned long flags;
139
140 if (!desc) {
141 printk(KERN_ERR
142 "Trying to install chip data for IRQ%d\n", irq);
143 return -EINVAL;
144 }
145
146 if (!desc->irq_data.chip) {
147 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
148 return -EINVAL;
149 }
150
151 raw_spin_lock_irqsave(&desc->lock, flags);
152 desc->irq_data.chip_data = data;
153 raw_spin_unlock_irqrestore(&desc->lock, flags);
154
155 return 0;
156}
157EXPORT_SYMBOL(irq_set_chip_data);
158
159struct irq_data *irq_get_irq_data(unsigned int irq)
160{
161 struct irq_desc *desc = irq_to_desc(irq);
162
163 return desc ? &desc->irq_data : NULL;
164}
165EXPORT_SYMBOL_GPL(irq_get_irq_data);
166
167int irq_startup(struct irq_desc *desc)
168{
169 desc->status &= ~IRQ_DISABLED;
170 desc->depth = 0;
171
172 if (desc->irq_data.chip->irq_startup) {
173 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
174 desc->status &= ~IRQ_MASKED;
175 return ret;
176 }
177
178 irq_enable(desc);
179 return 0;
180}
181
182void irq_shutdown(struct irq_desc *desc)
183{
184 desc->status |= IRQ_DISABLED;
185 desc->depth = 1;
186 if (desc->irq_data.chip->irq_shutdown)
187 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
188 if (desc->irq_data.chip->irq_disable)
189 desc->irq_data.chip->irq_disable(&desc->irq_data);
190 else
191 desc->irq_data.chip->irq_mask(&desc->irq_data);
192 desc->status |= IRQ_MASKED;
193}
194
195void irq_enable(struct irq_desc *desc)
196{
197 desc->status &= ~IRQ_DISABLED;
198 if (desc->irq_data.chip->irq_enable)
199 desc->irq_data.chip->irq_enable(&desc->irq_data);
200 else
201 desc->irq_data.chip->irq_unmask(&desc->irq_data);
202 desc->status &= ~IRQ_MASKED;
203}
204
205void irq_disable(struct irq_desc *desc)
206{
207 desc->status |= IRQ_DISABLED;
208 if (desc->irq_data.chip->irq_disable) {
209 desc->irq_data.chip->irq_disable(&desc->irq_data);
210 desc->status |= IRQ_MASKED;
211 }
212}
213
214#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
215/* Temporary migration helpers */
216static void compat_irq_mask(struct irq_data *data)
217{
218 data->chip->mask(data->irq);
219}
220
221static void compat_irq_unmask(struct irq_data *data)
222{
223 data->chip->unmask(data->irq);
224}
225
226static void compat_irq_ack(struct irq_data *data)
227{
228 data->chip->ack(data->irq);
229}
230
231static void compat_irq_mask_ack(struct irq_data *data)
232{
233 data->chip->mask_ack(data->irq);
234}
235
236static void compat_irq_eoi(struct irq_data *data)
237{
238 data->chip->eoi(data->irq);
239}
240
241static void compat_irq_enable(struct irq_data *data)
242{
243 data->chip->enable(data->irq);
244}
245
246static void compat_irq_disable(struct irq_data *data)
247{
248 data->chip->disable(data->irq);
249}
250
251static void compat_irq_shutdown(struct irq_data *data)
252{
253 data->chip->shutdown(data->irq);
254}
255
256static unsigned int compat_irq_startup(struct irq_data *data)
257{
258 return data->chip->startup(data->irq);
259}
260
261static int compat_irq_set_affinity(struct irq_data *data,
262 const struct cpumask *dest, bool force)
263{
264 return data->chip->set_affinity(data->irq, dest);
265}
266
267static int compat_irq_set_type(struct irq_data *data, unsigned int type)
268{
269 return data->chip->set_type(data->irq, type);
270}
271
272static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
273{
274 return data->chip->set_wake(data->irq, on);
275}
276
277static int compat_irq_retrigger(struct irq_data *data)
278{
279 return data->chip->retrigger(data->irq);
280}
281
282static void compat_bus_lock(struct irq_data *data)
283{
284 data->chip->bus_lock(data->irq);
285}
286
287static void compat_bus_sync_unlock(struct irq_data *data)
288{
289 data->chip->bus_sync_unlock(data->irq);
290}
291#endif
292
293/*
294 * Fixup enable/disable function pointers
295 */
296void irq_chip_set_defaults(struct irq_chip *chip)
297{
298#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
299 if (chip->enable)
300 chip->irq_enable = compat_irq_enable;
301 if (chip->disable)
302 chip->irq_disable = compat_irq_disable;
303 if (chip->shutdown)
304 chip->irq_shutdown = compat_irq_shutdown;
305 if (chip->startup)
306 chip->irq_startup = compat_irq_startup;
307 if (!chip->end)
308 chip->end = dummy_irq_chip.end;
309 if (chip->bus_lock)
310 chip->irq_bus_lock = compat_bus_lock;
311 if (chip->bus_sync_unlock)
312 chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
313 if (chip->mask)
314 chip->irq_mask = compat_irq_mask;
315 if (chip->unmask)
316 chip->irq_unmask = compat_irq_unmask;
317 if (chip->ack)
318 chip->irq_ack = compat_irq_ack;
319 if (chip->mask_ack)
320 chip->irq_mask_ack = compat_irq_mask_ack;
321 if (chip->eoi)
322 chip->irq_eoi = compat_irq_eoi;
323 if (chip->set_affinity)
324 chip->irq_set_affinity = compat_irq_set_affinity;
325 if (chip->set_type)
326 chip->irq_set_type = compat_irq_set_type;
327 if (chip->set_wake)
328 chip->irq_set_wake = compat_irq_set_wake;
329 if (chip->retrigger)
330 chip->irq_retrigger = compat_irq_retrigger;
331#endif
332}
333
334static inline void mask_ack_irq(struct irq_desc *desc)
335{
336 if (desc->irq_data.chip->irq_mask_ack)
337 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
338 else {
339 desc->irq_data.chip->irq_mask(&desc->irq_data);
340 if (desc->irq_data.chip->irq_ack)
341 desc->irq_data.chip->irq_ack(&desc->irq_data);
342 }
343 desc->status |= IRQ_MASKED;
344}
345
346static inline void mask_irq(struct irq_desc *desc)
347{
348 if (desc->irq_data.chip->irq_mask) {
349 desc->irq_data.chip->irq_mask(&desc->irq_data);
350 desc->status |= IRQ_MASKED;
351 }
352}
353
354static inline void unmask_irq(struct irq_desc *desc)
355{
356 if (desc->irq_data.chip->irq_unmask) {
357 desc->irq_data.chip->irq_unmask(&desc->irq_data);
358 desc->status &= ~IRQ_MASKED;
359 }
360}
361
362/*
363 * handle_nested_irq - Handle a nested irq from a irq thread
364 * @irq: the interrupt number
365 *
366 * Handle interrupts which are nested into a threaded interrupt
367 * handler. The handler function is called inside the calling
368 * threads context.
369 */
370void handle_nested_irq(unsigned int irq)
371{
372 struct irq_desc *desc = irq_to_desc(irq);
373 struct irqaction *action;
374 irqreturn_t action_ret;
375
376 might_sleep();
377
378 raw_spin_lock_irq(&desc->lock);
379
380 kstat_incr_irqs_this_cpu(irq, desc);
381
382 action = desc->action;
383 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
384 goto out_unlock;
385
386 irq_compat_set_progress(desc);
387 desc->istate |= IRQS_INPROGRESS;
388 raw_spin_unlock_irq(&desc->lock);
389
390 action_ret = action->thread_fn(action->irq, action->dev_id);
391 if (!noirqdebug)
392 note_interrupt(irq, desc, action_ret);
393
394 raw_spin_lock_irq(&desc->lock);
395 desc->istate &= ~IRQS_INPROGRESS;
396 irq_compat_clr_progress(desc);
397
398out_unlock:
399 raw_spin_unlock_irq(&desc->lock);
400}
401EXPORT_SYMBOL_GPL(handle_nested_irq);
402
403static bool irq_check_poll(struct irq_desc *desc)
404{
405 if (!(desc->istate & IRQS_POLL_INPROGRESS))
406 return false;
407 return irq_wait_for_poll(desc);
408}
409
410/**
411 * handle_simple_irq - Simple and software-decoded IRQs.
412 * @irq: the interrupt number
413 * @desc: the interrupt description structure for this irq
414 *
415 * Simple interrupts are either sent from a demultiplexing interrupt
416 * handler or come from hardware, where no interrupt hardware control
417 * is necessary.
418 *
419 * Note: The caller is expected to handle the ack, clear, mask and
420 * unmask issues if necessary.
421 */
422void
423handle_simple_irq(unsigned int irq, struct irq_desc *desc)
424{
425 raw_spin_lock(&desc->lock);
426
427 if (unlikely(desc->istate & IRQS_INPROGRESS))
428 if (!irq_check_poll(desc))
429 goto out_unlock;
430
431 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
432 kstat_incr_irqs_this_cpu(irq, desc);
433
434 if (unlikely(!desc->action || (desc->status & IRQ_DISABLED)))
435 goto out_unlock;
436
437 handle_irq_event(desc);
438
439out_unlock:
440 raw_spin_unlock(&desc->lock);
441}
442
443/**
444 * handle_level_irq - Level type irq handler
445 * @irq: the interrupt number
446 * @desc: the interrupt description structure for this irq
447 *
448 * Level type interrupts are active as long as the hardware line has
449 * the active level. This may require to mask the interrupt and unmask
450 * it after the associated handler has acknowledged the device, so the
451 * interrupt line is back to inactive.
452 */
453void
454handle_level_irq(unsigned int irq, struct irq_desc *desc)
455{
456 raw_spin_lock(&desc->lock);
457 mask_ack_irq(desc);
458
459 if (unlikely(desc->istate & IRQS_INPROGRESS))
460 if (!irq_check_poll(desc))
461 goto out_unlock;
462
463 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
464 kstat_incr_irqs_this_cpu(irq, desc);
465
466 /*
467 * If its disabled or no action available
468 * keep it masked and get out of here
469 */
470 if (unlikely(!desc->action || (desc->status & IRQ_DISABLED)))
471 goto out_unlock;
472
473 handle_irq_event(desc);
474
475 if (!(desc->status & IRQ_DISABLED) && !(desc->istate & IRQS_ONESHOT))
476 unmask_irq(desc);
477out_unlock:
478 raw_spin_unlock(&desc->lock);
479}
480EXPORT_SYMBOL_GPL(handle_level_irq);
481
482/**
483 * handle_fasteoi_irq - irq handler for transparent controllers
484 * @irq: the interrupt number
485 * @desc: the interrupt description structure for this irq
486 *
487 * Only a single callback will be issued to the chip: an ->eoi()
488 * call when the interrupt has been serviced. This enables support
489 * for modern forms of interrupt handlers, which handle the flow
490 * details in hardware, transparently.
491 */
492void
493handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
494{
495 raw_spin_lock(&desc->lock);
496
497 if (unlikely(desc->istate & IRQS_INPROGRESS))
498 if (!irq_check_poll(desc))
499 goto out;
500
501 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
502 kstat_incr_irqs_this_cpu(irq, desc);
503
504 /*
505 * If its disabled or no action available
506 * then mask it and get out of here:
507 */
508 if (unlikely(!desc->action || (desc->status & IRQ_DISABLED))) {
509 desc->status |= IRQ_PENDING;
510 mask_irq(desc);
511 goto out;
512 }
513 handle_irq_event(desc);
514out:
515 desc->irq_data.chip->irq_eoi(&desc->irq_data);
516 raw_spin_unlock(&desc->lock);
517}
518
519/**
520 * handle_edge_irq - edge type IRQ handler
521 * @irq: the interrupt number
522 * @desc: the interrupt description structure for this irq
523 *
524 * Interrupt occures on the falling and/or rising edge of a hardware
525 * signal. The occurence is latched into the irq controller hardware
526 * and must be acked in order to be reenabled. After the ack another
527 * interrupt can happen on the same source even before the first one
528 * is handled by the associated event handler. If this happens it
529 * might be necessary to disable (mask) the interrupt depending on the
530 * controller hardware. This requires to reenable the interrupt inside
531 * of the loop which handles the interrupts which have arrived while
532 * the handler was running. If all pending interrupts are handled, the
533 * loop is left.
534 */
535void
536handle_edge_irq(unsigned int irq, struct irq_desc *desc)
537{
538 raw_spin_lock(&desc->lock);
539
540 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
541 /*
542 * If we're currently running this IRQ, or its disabled,
543 * we shouldn't process the IRQ. Mark it pending, handle
544 * the necessary masking and go out
545 */
546 if (unlikely((desc->istate & (IRQS_INPROGRESS) ||
547 (desc->status & IRQ_DISABLED) || !desc->action))) {
548 if (!irq_check_poll(desc)) {
549 desc->status |= IRQ_PENDING;
550 mask_ack_irq(desc);
551 goto out_unlock;
552 }
553 }
554 kstat_incr_irqs_this_cpu(irq, desc);
555
556 /* Start handling the irq */
557 desc->irq_data.chip->irq_ack(&desc->irq_data);
558
559 do {
560 if (unlikely(!desc->action)) {
561 mask_irq(desc);
562 goto out_unlock;
563 }
564
565 /*
566 * When another irq arrived while we were handling
567 * one, we could have masked the irq.
568 * Renable it, if it was not disabled in meantime.
569 */
570 if (unlikely((desc->status &
571 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
572 (IRQ_PENDING | IRQ_MASKED))) {
573 unmask_irq(desc);
574 }
575
576 handle_irq_event(desc);
577
578 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
579
580out_unlock:
581 raw_spin_unlock(&desc->lock);
582}
583
584/**
585 * handle_percpu_irq - Per CPU local irq handler
586 * @irq: the interrupt number
587 * @desc: the interrupt description structure for this irq
588 *
589 * Per CPU interrupts on SMP machines without locking requirements
590 */
591void
592handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
593{
594 struct irq_chip *chip = irq_desc_get_chip(desc);
595
596 kstat_incr_irqs_this_cpu(irq, desc);
597
598 if (chip->irq_ack)
599 chip->irq_ack(&desc->irq_data);
600
601 handle_irq_event_percpu(desc, desc->action);
602
603 if (chip->irq_eoi)
604 chip->irq_eoi(&desc->irq_data);
605}
606
607void
608__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
609 const char *name)
610{
611 struct irq_desc *desc = irq_to_desc(irq);
612 unsigned long flags;
613
614 if (!desc) {
615 printk(KERN_ERR
616 "Trying to install type control for IRQ%d\n", irq);
617 return;
618 }
619
620 if (!handle)
621 handle = handle_bad_irq;
622 else if (desc->irq_data.chip == &no_irq_chip) {
623 printk(KERN_WARNING "Trying to install %sinterrupt handler "
624 "for IRQ%d\n", is_chained ? "chained " : "", irq);
625 /*
626 * Some ARM implementations install a handler for really dumb
627 * interrupt hardware without setting an irq_chip. This worked
628 * with the ARM no_irq_chip but the check in setup_irq would
629 * prevent us to setup the interrupt at all. Switch it to
630 * dummy_irq_chip for easy transition.
631 */
632 desc->irq_data.chip = &dummy_irq_chip;
633 }
634
635 chip_bus_lock(desc);
636 raw_spin_lock_irqsave(&desc->lock, flags);
637
638 /* Uninstall? */
639 if (handle == handle_bad_irq) {
640 if (desc->irq_data.chip != &no_irq_chip)
641 mask_ack_irq(desc);
642 desc->status |= IRQ_DISABLED;
643 desc->depth = 1;
644 }
645 desc->handle_irq = handle;
646 desc->name = name;
647
648 if (handle != handle_bad_irq && is_chained) {
649 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
650 irq_startup(desc);
651 }
652 raw_spin_unlock_irqrestore(&desc->lock, flags);
653 chip_bus_sync_unlock(desc);
654}
655EXPORT_SYMBOL_GPL(__set_irq_handler);
656
657void
658set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
659 irq_flow_handler_t handle)
660{
661 irq_set_chip(irq, chip);
662 __set_irq_handler(irq, handle, 0, NULL);
663}
664
665void
666set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
667 irq_flow_handler_t handle, const char *name)
668{
669 irq_set_chip(irq, chip);
670 __set_irq_handler(irq, handle, 0, name);
671}
672
673void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
674{
675 struct irq_desc *desc = irq_to_desc(irq);
676 unsigned long flags;
677
678 if (!desc)
679 return;
680
681 /* Sanitize flags */
682 set &= IRQF_MODIFY_MASK;
683 clr &= IRQF_MODIFY_MASK;
684
685 raw_spin_lock_irqsave(&desc->lock, flags);
686 desc->status &= ~clr;
687 desc->status |= set;
688 raw_spin_unlock_irqrestore(&desc->lock, flags);
689}