2 * linux/kernel/irq/chip.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code, for irq-chip
10 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
19 #include "internals.h"
22 * irq_set_chip - set the irq chip for an irq
24 * @chip: pointer to irq chip description structure
26 int irq_set_chip(unsigned int irq
, struct irq_chip
*chip
)
29 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
);
37 irq_chip_set_defaults(chip
);
38 desc
->irq_data
.chip
= chip
;
39 irq_put_desc_unlock(desc
, flags
);
41 * For !CONFIG_SPARSE_IRQ make the irq show up in
42 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
43 * already marked, and this call is harmless.
48 EXPORT_SYMBOL(irq_set_chip
);
51 * irq_set_type - set the irq trigger type for an irq
53 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
55 int irq_set_irq_type(unsigned int irq
, unsigned int type
)
58 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
);
64 type
&= IRQ_TYPE_SENSE_MASK
;
65 if (type
!= IRQ_TYPE_NONE
)
66 ret
= __irq_set_trigger(desc
, irq
, type
);
67 irq_put_desc_busunlock(desc
, flags
);
70 EXPORT_SYMBOL(irq_set_irq_type
);
73 * irq_set_handler_data - set irq handler data for an irq
74 * @irq: Interrupt number
75 * @data: Pointer to interrupt specific data
77 * Set the hardware irq controller data for an irq
79 int irq_set_handler_data(unsigned int irq
, void *data
)
82 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
);
86 desc
->irq_data
.handler_data
= data
;
87 irq_put_desc_unlock(desc
, flags
);
90 EXPORT_SYMBOL(irq_set_handler_data
);
93 * irq_set_msi_desc - set MSI descriptor data for an irq
94 * @irq: Interrupt number
95 * @entry: Pointer to MSI descriptor data
97 * Set the MSI descriptor entry for an irq
99 int irq_set_msi_desc(unsigned int irq
, struct msi_desc
*entry
)
102 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
);
106 desc
->irq_data
.msi_desc
= entry
;
109 irq_put_desc_unlock(desc
, flags
);
114 * irq_set_chip_data - set irq chip data for an irq
115 * @irq: Interrupt number
116 * @data: Pointer to chip specific data
118 * Set the hardware irq chip data for an irq
120 int irq_set_chip_data(unsigned int irq
, void *data
)
123 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
);
127 desc
->irq_data
.chip_data
= data
;
128 irq_put_desc_unlock(desc
, flags
);
131 EXPORT_SYMBOL(irq_set_chip_data
);
133 struct irq_data
*irq_get_irq_data(unsigned int irq
)
135 struct irq_desc
*desc
= irq_to_desc(irq
);
137 return desc
? &desc
->irq_data
: NULL
;
139 EXPORT_SYMBOL_GPL(irq_get_irq_data
);
141 static void irq_state_clr_disabled(struct irq_desc
*desc
)
143 desc
->istate
&= ~IRQS_DISABLED
;
144 irqd_clear(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
145 irq_compat_clr_disabled(desc
);
148 static void irq_state_set_disabled(struct irq_desc
*desc
)
150 desc
->istate
|= IRQS_DISABLED
;
151 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
152 irq_compat_set_disabled(desc
);
155 static void irq_state_clr_masked(struct irq_desc
*desc
)
157 desc
->istate
&= ~IRQS_MASKED
;
158 irq_compat_clr_masked(desc
);
161 static void irq_state_set_masked(struct irq_desc
*desc
)
163 desc
->istate
|= IRQS_MASKED
;
164 irq_compat_set_masked(desc
);
167 int irq_startup(struct irq_desc
*desc
)
169 irq_state_clr_disabled(desc
);
172 if (desc
->irq_data
.chip
->irq_startup
) {
173 int ret
= desc
->irq_data
.chip
->irq_startup(&desc
->irq_data
);
174 irq_state_clr_masked(desc
);
182 void irq_shutdown(struct irq_desc
*desc
)
184 irq_state_set_disabled(desc
);
186 if (desc
->irq_data
.chip
->irq_shutdown
)
187 desc
->irq_data
.chip
->irq_shutdown(&desc
->irq_data
);
188 if (desc
->irq_data
.chip
->irq_disable
)
189 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
191 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
192 irq_state_set_masked(desc
);
195 void irq_enable(struct irq_desc
*desc
)
197 irq_state_clr_disabled(desc
);
198 if (desc
->irq_data
.chip
->irq_enable
)
199 desc
->irq_data
.chip
->irq_enable(&desc
->irq_data
);
201 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
202 irq_state_clr_masked(desc
);
205 void irq_disable(struct irq_desc
*desc
)
207 irq_state_set_disabled(desc
);
208 if (desc
->irq_data
.chip
->irq_disable
) {
209 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
210 irq_state_set_masked(desc
);
214 #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
215 /* Temporary migration helpers */
216 static void compat_irq_mask(struct irq_data
*data
)
218 data
->chip
->mask(data
->irq
);
221 static void compat_irq_unmask(struct irq_data
*data
)
223 data
->chip
->unmask(data
->irq
);
226 static void compat_irq_ack(struct irq_data
*data
)
228 data
->chip
->ack(data
->irq
);
231 static void compat_irq_mask_ack(struct irq_data
*data
)
233 data
->chip
->mask_ack(data
->irq
);
236 static void compat_irq_eoi(struct irq_data
*data
)
238 data
->chip
->eoi(data
->irq
);
241 static void compat_irq_enable(struct irq_data
*data
)
243 data
->chip
->enable(data
->irq
);
246 static void compat_irq_disable(struct irq_data
*data
)
248 data
->chip
->disable(data
->irq
);
251 static void compat_irq_shutdown(struct irq_data
*data
)
253 data
->chip
->shutdown(data
->irq
);
256 static unsigned int compat_irq_startup(struct irq_data
*data
)
258 return data
->chip
->startup(data
->irq
);
261 static int compat_irq_set_affinity(struct irq_data
*data
,
262 const struct cpumask
*dest
, bool force
)
264 return data
->chip
->set_affinity(data
->irq
, dest
);
267 static int compat_irq_set_type(struct irq_data
*data
, unsigned int type
)
269 return data
->chip
->set_type(data
->irq
, type
);
272 static int compat_irq_set_wake(struct irq_data
*data
, unsigned int on
)
274 return data
->chip
->set_wake(data
->irq
, on
);
277 static int compat_irq_retrigger(struct irq_data
*data
)
279 return data
->chip
->retrigger(data
->irq
);
282 static void compat_bus_lock(struct irq_data
*data
)
284 data
->chip
->bus_lock(data
->irq
);
287 static void compat_bus_sync_unlock(struct irq_data
*data
)
289 data
->chip
->bus_sync_unlock(data
->irq
);
294 * Fixup enable/disable function pointers
296 void irq_chip_set_defaults(struct irq_chip
*chip
)
298 #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
300 chip
->irq_enable
= compat_irq_enable
;
302 chip
->irq_disable
= compat_irq_disable
;
304 chip
->irq_shutdown
= compat_irq_shutdown
;
306 chip
->irq_startup
= compat_irq_startup
;
308 chip
->end
= dummy_irq_chip
.end
;
310 chip
->irq_bus_lock
= compat_bus_lock
;
311 if (chip
->bus_sync_unlock
)
312 chip
->irq_bus_sync_unlock
= compat_bus_sync_unlock
;
314 chip
->irq_mask
= compat_irq_mask
;
316 chip
->irq_unmask
= compat_irq_unmask
;
318 chip
->irq_ack
= compat_irq_ack
;
320 chip
->irq_mask_ack
= compat_irq_mask_ack
;
322 chip
->irq_eoi
= compat_irq_eoi
;
323 if (chip
->set_affinity
)
324 chip
->irq_set_affinity
= compat_irq_set_affinity
;
326 chip
->irq_set_type
= compat_irq_set_type
;
328 chip
->irq_set_wake
= compat_irq_set_wake
;
330 chip
->irq_retrigger
= compat_irq_retrigger
;
334 static inline void mask_ack_irq(struct irq_desc
*desc
)
336 if (desc
->irq_data
.chip
->irq_mask_ack
)
337 desc
->irq_data
.chip
->irq_mask_ack(&desc
->irq_data
);
339 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
340 if (desc
->irq_data
.chip
->irq_ack
)
341 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
343 irq_state_set_masked(desc
);
346 void mask_irq(struct irq_desc
*desc
)
348 if (desc
->irq_data
.chip
->irq_mask
) {
349 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
350 irq_state_set_masked(desc
);
354 void unmask_irq(struct irq_desc
*desc
)
356 if (desc
->irq_data
.chip
->irq_unmask
) {
357 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
358 irq_state_clr_masked(desc
);
363 * handle_nested_irq - Handle a nested irq from a irq thread
364 * @irq: the interrupt number
366 * Handle interrupts which are nested into a threaded interrupt
367 * handler. The handler function is called inside the calling
370 void handle_nested_irq(unsigned int irq
)
372 struct irq_desc
*desc
= irq_to_desc(irq
);
373 struct irqaction
*action
;
374 irqreturn_t action_ret
;
378 raw_spin_lock_irq(&desc
->lock
);
380 kstat_incr_irqs_this_cpu(irq
, desc
);
382 action
= desc
->action
;
383 if (unlikely(!action
|| (desc
->istate
& IRQS_DISABLED
)))
386 irq_compat_set_progress(desc
);
387 desc
->istate
|= IRQS_INPROGRESS
;
388 raw_spin_unlock_irq(&desc
->lock
);
390 action_ret
= action
->thread_fn(action
->irq
, action
->dev_id
);
392 note_interrupt(irq
, desc
, action_ret
);
394 raw_spin_lock_irq(&desc
->lock
);
395 desc
->istate
&= ~IRQS_INPROGRESS
;
396 irq_compat_clr_progress(desc
);
399 raw_spin_unlock_irq(&desc
->lock
);
401 EXPORT_SYMBOL_GPL(handle_nested_irq
);
403 static bool irq_check_poll(struct irq_desc
*desc
)
405 if (!(desc
->istate
& IRQS_POLL_INPROGRESS
))
407 return irq_wait_for_poll(desc
);
411 * handle_simple_irq - Simple and software-decoded IRQs.
412 * @irq: the interrupt number
413 * @desc: the interrupt description structure for this irq
415 * Simple interrupts are either sent from a demultiplexing interrupt
416 * handler or come from hardware, where no interrupt hardware control
419 * Note: The caller is expected to handle the ack, clear, mask and
420 * unmask issues if necessary.
423 handle_simple_irq(unsigned int irq
, struct irq_desc
*desc
)
425 raw_spin_lock(&desc
->lock
);
427 if (unlikely(desc
->istate
& IRQS_INPROGRESS
))
428 if (!irq_check_poll(desc
))
431 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
432 kstat_incr_irqs_this_cpu(irq
, desc
);
434 if (unlikely(!desc
->action
|| (desc
->istate
& IRQS_DISABLED
)))
437 handle_irq_event(desc
);
440 raw_spin_unlock(&desc
->lock
);
444 * handle_level_irq - Level type irq handler
445 * @irq: the interrupt number
446 * @desc: the interrupt description structure for this irq
448 * Level type interrupts are active as long as the hardware line has
449 * the active level. This may require to mask the interrupt and unmask
450 * it after the associated handler has acknowledged the device, so the
451 * interrupt line is back to inactive.
454 handle_level_irq(unsigned int irq
, struct irq_desc
*desc
)
456 raw_spin_lock(&desc
->lock
);
459 if (unlikely(desc
->istate
& IRQS_INPROGRESS
))
460 if (!irq_check_poll(desc
))
463 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
464 kstat_incr_irqs_this_cpu(irq
, desc
);
467 * If its disabled or no action available
468 * keep it masked and get out of here
470 if (unlikely(!desc
->action
|| (desc
->istate
& IRQS_DISABLED
)))
473 handle_irq_event(desc
);
475 if (!(desc
->istate
& (IRQS_DISABLED
| IRQS_ONESHOT
)))
478 raw_spin_unlock(&desc
->lock
);
480 EXPORT_SYMBOL_GPL(handle_level_irq
);
482 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
483 static inline void preflow_handler(struct irq_desc
*desc
)
485 if (desc
->preflow_handler
)
486 desc
->preflow_handler(&desc
->irq_data
);
489 static inline void preflow_handler(struct irq_desc
*desc
) { }
493 * handle_fasteoi_irq - irq handler for transparent controllers
494 * @irq: the interrupt number
495 * @desc: the interrupt description structure for this irq
497 * Only a single callback will be issued to the chip: an ->eoi()
498 * call when the interrupt has been serviced. This enables support
499 * for modern forms of interrupt handlers, which handle the flow
500 * details in hardware, transparently.
503 handle_fasteoi_irq(unsigned int irq
, struct irq_desc
*desc
)
505 raw_spin_lock(&desc
->lock
);
507 if (unlikely(desc
->istate
& IRQS_INPROGRESS
))
508 if (!irq_check_poll(desc
))
511 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
512 kstat_incr_irqs_this_cpu(irq
, desc
);
515 * If its disabled or no action available
516 * then mask it and get out of here:
518 if (unlikely(!desc
->action
|| (desc
->istate
& IRQS_DISABLED
))) {
519 irq_compat_set_pending(desc
);
520 desc
->istate
|= IRQS_PENDING
;
525 if (desc
->istate
& IRQS_ONESHOT
)
528 preflow_handler(desc
);
529 handle_irq_event(desc
);
532 desc
->irq_data
.chip
->irq_eoi(&desc
->irq_data
);
534 raw_spin_unlock(&desc
->lock
);
537 if (!(desc
->irq_data
.chip
->flags
& IRQCHIP_EOI_IF_HANDLED
))
543 * handle_edge_irq - edge type IRQ handler
544 * @irq: the interrupt number
545 * @desc: the interrupt description structure for this irq
547 * Interrupt occures on the falling and/or rising edge of a hardware
548 * signal. The occurence is latched into the irq controller hardware
549 * and must be acked in order to be reenabled. After the ack another
550 * interrupt can happen on the same source even before the first one
551 * is handled by the associated event handler. If this happens it
552 * might be necessary to disable (mask) the interrupt depending on the
553 * controller hardware. This requires to reenable the interrupt inside
554 * of the loop which handles the interrupts which have arrived while
555 * the handler was running. If all pending interrupts are handled, the
559 handle_edge_irq(unsigned int irq
, struct irq_desc
*desc
)
561 raw_spin_lock(&desc
->lock
);
563 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
565 * If we're currently running this IRQ, or its disabled,
566 * we shouldn't process the IRQ. Mark it pending, handle
567 * the necessary masking and go out
569 if (unlikely((desc
->istate
& (IRQS_DISABLED
| IRQS_INPROGRESS
) ||
571 if (!irq_check_poll(desc
)) {
572 irq_compat_set_pending(desc
);
573 desc
->istate
|= IRQS_PENDING
;
578 kstat_incr_irqs_this_cpu(irq
, desc
);
580 /* Start handling the irq */
581 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
584 if (unlikely(!desc
->action
)) {
590 * When another irq arrived while we were handling
591 * one, we could have masked the irq.
592 * Renable it, if it was not disabled in meantime.
594 if (unlikely(desc
->istate
& IRQS_PENDING
)) {
595 if (!(desc
->istate
& IRQS_DISABLED
) &&
596 (desc
->istate
& IRQS_MASKED
))
600 handle_irq_event(desc
);
602 } while ((desc
->istate
& IRQS_PENDING
) &&
603 !(desc
->istate
& IRQS_DISABLED
));
606 raw_spin_unlock(&desc
->lock
);
610 * handle_percpu_irq - Per CPU local irq handler
611 * @irq: the interrupt number
612 * @desc: the interrupt description structure for this irq
614 * Per CPU interrupts on SMP machines without locking requirements
617 handle_percpu_irq(unsigned int irq
, struct irq_desc
*desc
)
619 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
621 kstat_incr_irqs_this_cpu(irq
, desc
);
624 chip
->irq_ack(&desc
->irq_data
);
626 handle_irq_event_percpu(desc
, desc
->action
);
629 chip
->irq_eoi(&desc
->irq_data
);
633 __irq_set_handler(unsigned int irq
, irq_flow_handler_t handle
, int is_chained
,
637 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
);
643 handle
= handle_bad_irq
;
645 if (WARN_ON(desc
->irq_data
.chip
== &no_irq_chip
))
650 if (handle
== handle_bad_irq
) {
651 if (desc
->irq_data
.chip
!= &no_irq_chip
)
653 irq_state_set_disabled(desc
);
656 desc
->handle_irq
= handle
;
659 if (handle
!= handle_bad_irq
&& is_chained
) {
660 irq_settings_set_noprobe(desc
);
661 irq_settings_set_norequest(desc
);
665 irq_put_desc_busunlock(desc
, flags
);
667 EXPORT_SYMBOL_GPL(__irq_set_handler
);
670 irq_set_chip_and_handler_name(unsigned int irq
, struct irq_chip
*chip
,
671 irq_flow_handler_t handle
, const char *name
)
673 irq_set_chip(irq
, chip
);
674 __irq_set_handler(irq
, handle
, 0, name
);
677 void irq_modify_status(unsigned int irq
, unsigned long clr
, unsigned long set
)
680 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
);
684 irq_settings_clr_and_set(desc
, clr
, set
);
686 irqd_clear(&desc
->irq_data
, IRQD_NO_BALANCING
| IRQD_PER_CPU
|
687 IRQD_TRIGGER_MASK
| IRQD_LEVEL
| IRQD_MOVE_PCNTXT
);
688 if (irq_settings_has_no_balance_set(desc
))
689 irqd_set(&desc
->irq_data
, IRQD_NO_BALANCING
);
690 if (irq_settings_is_per_cpu(desc
))
691 irqd_set(&desc
->irq_data
, IRQD_PER_CPU
);
692 if (irq_settings_can_move_pcntxt(desc
))
693 irqd_set(&desc
->irq_data
, IRQD_MOVE_PCNTXT
);
695 irqd_set(&desc
->irq_data
, irq_settings_get_trigger_mask(desc
));
697 irq_put_desc_unlock(desc
, flags
);
701 * irq_cpu_online - Invoke all irq_cpu_online functions.
703 * Iterate through all irqs and invoke the chip.irq_cpu_online()
706 void irq_cpu_online(void)
708 struct irq_desc
*desc
;
709 struct irq_chip
*chip
;
713 for_each_active_irq(irq
) {
714 desc
= irq_to_desc(irq
);
718 raw_spin_lock_irqsave(&desc
->lock
, flags
);
720 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
722 if (chip
&& chip
->irq_cpu_online
)
723 chip
->irq_cpu_online(&desc
->irq_data
);
725 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
730 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
732 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
735 void irq_cpu_offline(void)
737 struct irq_desc
*desc
;
738 struct irq_chip
*chip
;
742 for_each_active_irq(irq
) {
743 desc
= irq_to_desc(irq
);
747 raw_spin_lock_irqsave(&desc
->lock
, flags
);
749 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
751 if (chip
&& chip
->irq_cpu_offline
)
752 chip
->irq_cpu_offline(&desc
->irq_data
);
754 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);