2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/smp.h>
15 #include <linux/percpu.h>
16 #include <linux/hrtimer.h>
18 #include <asm/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/system.h>
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
29 #define IRQF_TRIGGER_NONE 0x00000000
30 #define IRQF_TRIGGER_RISING 0x00000001
31 #define IRQF_TRIGGER_FALLING 0x00000002
32 #define IRQF_TRIGGER_HIGH 0x00000004
33 #define IRQF_TRIGGER_LOW 0x00000008
34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE 0x00000010
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
42 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
43 * DEPRECATED. This flag is a NOOP and scheduled to be removed
44 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
45 * IRQF_SHARED - allow sharing the irq among several devices
46 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
47 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
48 * IRQF_PERCPU - Interrupt is per cpu
49 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
50 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
51 * registered first in an shared interrupt is considered for
52 * performance reasons)
53 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
54 * Used by threaded interrupts which need to keep the
55 * irq line disabled until the threaded handler has been run.
57 #define IRQF_DISABLED 0x00000020
58 #define IRQF_SAMPLE_RANDOM 0x00000040
59 #define IRQF_SHARED 0x00000080
60 #define IRQF_PROBE_SHARED 0x00000100
61 #define IRQF_TIMER 0x00000200
62 #define IRQF_PERCPU 0x00000400
63 #define IRQF_NOBALANCING 0x00000800
64 #define IRQF_IRQPOLL 0x00001000
65 #define IRQF_ONESHOT 0x00002000
68 * Bits used by threaded handlers:
69 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
70 * IRQTF_DIED - handler thread died
71 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
72 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
82 * These values can be returned by request_any_context_irq() and
83 * describe the context the interrupt will be run in.
85 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
86 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
93 typedef irqreturn_t (*irq_handler_t
)(int, void *);
96 * struct irqaction - per interrupt action descriptor
97 * @handler: interrupt handler function
98 * @flags: flags (see IRQF_* above)
99 * @name: name of the device
100 * @dev_id: cookie to identify the device
101 * @next: pointer to the next irqaction for shared interrupts
102 * @irq: interrupt number
103 * @dir: pointer to the proc/irq/NN/name entry
104 * @thread_fn: interupt handler function for threaded interrupts
105 * @thread: thread pointer for threaded interrupts
106 * @thread_flags: flags related to @thread
109 irq_handler_t handler
;
113 struct irqaction
*next
;
115 struct proc_dir_entry
*dir
;
116 irq_handler_t thread_fn
;
117 struct task_struct
*thread
;
118 unsigned long thread_flags
;
121 extern irqreturn_t
no_action(int cpl
, void *dev_id
);
123 #ifdef CONFIG_GENERIC_HARDIRQS
124 extern int __must_check
125 request_threaded_irq(unsigned int irq
, irq_handler_t handler
,
126 irq_handler_t thread_fn
,
127 unsigned long flags
, const char *name
, void *dev
);
129 static inline int __must_check
130 request_irq(unsigned int irq
, irq_handler_t handler
, unsigned long flags
,
131 const char *name
, void *dev
)
133 return request_threaded_irq(irq
, handler
, NULL
, flags
, name
, dev
);
136 extern int __must_check
137 request_any_context_irq(unsigned int irq
, irq_handler_t handler
,
138 unsigned long flags
, const char *name
, void *dev_id
);
140 extern void exit_irq_thread(void);
143 extern int __must_check
144 request_irq(unsigned int irq
, irq_handler_t handler
, unsigned long flags
,
145 const char *name
, void *dev
);
148 * Special function to avoid ifdeffery in kernel/irq/devres.c which
149 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
150 * m68k). I really love these $@%#!* obvious Makefile references:
151 * ../../../kernel/irq/devres.o
153 static inline int __must_check
154 request_threaded_irq(unsigned int irq
, irq_handler_t handler
,
155 irq_handler_t thread_fn
,
156 unsigned long flags
, const char *name
, void *dev
)
158 return request_irq(irq
, handler
, flags
, name
, dev
);
161 static inline int __must_check
162 request_any_context_irq(unsigned int irq
, irq_handler_t handler
,
163 unsigned long flags
, const char *name
, void *dev_id
)
165 return request_irq(irq
, handler
, flags
, name
, dev_id
);
168 static inline void exit_irq_thread(void) { }
171 extern void free_irq(unsigned int, void *);
175 extern int __must_check
176 devm_request_threaded_irq(struct device
*dev
, unsigned int irq
,
177 irq_handler_t handler
, irq_handler_t thread_fn
,
178 unsigned long irqflags
, const char *devname
,
181 static inline int __must_check
182 devm_request_irq(struct device
*dev
, unsigned int irq
, irq_handler_t handler
,
183 unsigned long irqflags
, const char *devname
, void *dev_id
)
185 return devm_request_threaded_irq(dev
, irq
, handler
, NULL
, irqflags
,
189 extern void devm_free_irq(struct device
*dev
, unsigned int irq
, void *dev_id
);
192 * On lockdep we dont want to enable hardirqs in hardirq
193 * context. Use local_irq_enable_in_hardirq() to annotate
194 * kernel code that has to do this nevertheless (pretty much
195 * the only valid case is for old/broken hardware that is
198 * NOTE: in theory this might break fragile code that relies
199 * on hardirq delivery - in practice we dont seem to have such
200 * places left. So the only effect should be slightly increased
201 * irqs-off latencies.
203 #ifdef CONFIG_LOCKDEP
204 # define local_irq_enable_in_hardirq() do { } while (0)
206 # define local_irq_enable_in_hardirq() local_irq_enable()
209 extern void disable_irq_nosync(unsigned int irq
);
210 extern void disable_irq(unsigned int irq
);
211 extern void enable_irq(unsigned int irq
);
213 /* The following three functions are for the core kernel use only. */
214 #ifdef CONFIG_GENERIC_HARDIRQS
215 extern void suspend_device_irqs(void);
216 extern void resume_device_irqs(void);
217 #ifdef CONFIG_PM_SLEEP
218 extern int check_wakeup_irqs(void);
220 static inline int check_wakeup_irqs(void) { return 0; }
223 static inline void suspend_device_irqs(void) { };
224 static inline void resume_device_irqs(void) { };
225 static inline int check_wakeup_irqs(void) { return 0; }
228 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
230 extern cpumask_var_t irq_default_affinity
;
232 extern int irq_set_affinity(unsigned int irq
, const struct cpumask
*cpumask
);
233 extern int irq_can_set_affinity(unsigned int irq
);
234 extern int irq_select_affinity(unsigned int irq
);
236 #else /* CONFIG_SMP */
238 static inline int irq_set_affinity(unsigned int irq
, const struct cpumask
*m
)
243 static inline int irq_can_set_affinity(unsigned int irq
)
248 static inline int irq_select_affinity(unsigned int irq
) { return 0; }
250 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
252 #ifdef CONFIG_GENERIC_HARDIRQS
254 * Special lockdep variants of irq disabling/enabling.
255 * These should be used for locking constructs that
256 * know that a particular irq context which is disabled,
257 * and which is the only irq-context user of a lock,
258 * that it's safe to take the lock in the irq-disabled
259 * section without disabling hardirqs.
261 * On !CONFIG_LOCKDEP they are equivalent to the normal
262 * irq disable/enable methods.
264 static inline void disable_irq_nosync_lockdep(unsigned int irq
)
266 disable_irq_nosync(irq
);
267 #ifdef CONFIG_LOCKDEP
272 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq
, unsigned long *flags
)
274 disable_irq_nosync(irq
);
275 #ifdef CONFIG_LOCKDEP
276 local_irq_save(*flags
);
280 static inline void disable_irq_lockdep(unsigned int irq
)
283 #ifdef CONFIG_LOCKDEP
288 static inline void enable_irq_lockdep(unsigned int irq
)
290 #ifdef CONFIG_LOCKDEP
296 static inline void enable_irq_lockdep_irqrestore(unsigned int irq
, unsigned long *flags
)
298 #ifdef CONFIG_LOCKDEP
299 local_irq_restore(*flags
);
304 /* IRQ wakeup (PM) control: */
305 extern int set_irq_wake(unsigned int irq
, unsigned int on
);
307 static inline int enable_irq_wake(unsigned int irq
)
309 return set_irq_wake(irq
, 1);
312 static inline int disable_irq_wake(unsigned int irq
)
314 return set_irq_wake(irq
, 0);
317 #else /* !CONFIG_GENERIC_HARDIRQS */
319 * NOTE: non-genirq architectures, if they want to support the lock
320 * validator need to define the methods below in their asm/irq.h
321 * files, under an #ifdef CONFIG_LOCKDEP section.
323 #ifndef CONFIG_LOCKDEP
324 # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
325 # define disable_irq_nosync_lockdep_irqsave(irq, flags) \
326 disable_irq_nosync(irq)
327 # define disable_irq_lockdep(irq) disable_irq(irq)
328 # define enable_irq_lockdep(irq) enable_irq(irq)
329 # define enable_irq_lockdep_irqrestore(irq, flags) \
333 static inline int enable_irq_wake(unsigned int irq
)
338 static inline int disable_irq_wake(unsigned int irq
)
342 #endif /* CONFIG_GENERIC_HARDIRQS */
344 #ifndef __ARCH_SET_SOFTIRQ_PENDING
345 #define set_softirq_pending(x) (local_softirq_pending() = (x))
346 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
349 /* Some architectures might implement lazy enabling/disabling of
350 * interrupts. In some cases, such as stop_machine, we might want
351 * to ensure that after a local_irq_disable(), interrupts have
352 * really been disabled in hardware. Such architectures need to
353 * implement the following hook.
355 #ifndef hard_irq_disable
356 #define hard_irq_disable() do { } while(0)
359 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
360 frequency threaded job scheduling. For almost all the purposes
361 tasklets are more than enough. F.e. all serial device BHs et
362 al. should be converted to tasklets, not to softirqs.
372 BLOCK_IOPOLL_SOFTIRQ
,
376 RCU_SOFTIRQ
, /* Preferable RCU should always be the last softirq */
381 /* map softirq index to softirq name. update 'softirq_to_name' in
382 * kernel/softirq.c when adding a new softirq.
384 extern char *softirq_to_name
[NR_SOFTIRQS
];
386 /* softirq mask and active fields moved to irq_cpustat_t in
387 * asm/hardirq.h to get better cache usage. KAO
390 struct softirq_action
392 void (*action
)(struct softirq_action
*);
395 asmlinkage
void do_softirq(void);
396 asmlinkage
void __do_softirq(void);
397 extern void open_softirq(int nr
, void (*action
)(struct softirq_action
*));
398 extern void softirq_init(void);
399 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
400 extern void raise_softirq_irqoff(unsigned int nr
);
401 extern void raise_softirq(unsigned int nr
);
402 extern void wakeup_softirqd(void);
404 /* This is the worklist that queues up per-cpu softirq work.
406 * send_remote_sendirq() adds work to these lists, and
407 * the softirq handler itself dequeues from them. The queues
408 * are protected by disabling local cpu interrupts and they must
409 * only be accessed by the local cpu that they are for.
411 DECLARE_PER_CPU(struct list_head
[NR_SOFTIRQS
], softirq_work_list
);
413 /* Try to send a softirq to a remote cpu. If this cannot be done, the
414 * work will be queued to the local cpu.
416 extern void send_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
);
418 /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
419 * and compute the current cpu, passed in as 'this_cpu'.
421 extern void __send_remote_softirq(struct call_single_data
*cp
, int cpu
,
422 int this_cpu
, int softirq
);
424 /* Tasklets --- multithreaded analogue of BHs.
426 Main feature differing them of generic softirqs: tasklet
427 is running only on one CPU simultaneously.
429 Main feature differing them of BHs: different tasklets
430 may be run simultaneously on different CPUs.
433 * If tasklet_schedule() is called, then tasklet is guaranteed
434 to be executed on some cpu at least once after this.
435 * If the tasklet is already scheduled, but its excecution is still not
436 started, it will be executed only once.
437 * If this tasklet is already running on another CPU (or schedule is called
438 from tasklet itself), it is rescheduled for later.
439 * Tasklet is strictly serialized wrt itself, but not
440 wrt another tasklets. If client needs some intertask synchronization,
441 he makes it with spinlocks.
444 struct tasklet_struct
446 struct tasklet_struct
*next
;
449 void (*func
)(unsigned long);
453 #define DECLARE_TASKLET(name, func, data) \
454 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
456 #define DECLARE_TASKLET_DISABLED(name, func, data) \
457 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
462 TASKLET_STATE_SCHED
, /* Tasklet is scheduled for execution */
463 TASKLET_STATE_RUN
/* Tasklet is running (SMP only) */
467 static inline int tasklet_trylock(struct tasklet_struct
*t
)
469 return !test_and_set_bit(TASKLET_STATE_RUN
, &(t
)->state
);
472 static inline void tasklet_unlock(struct tasklet_struct
*t
)
474 smp_mb__before_clear_bit();
475 clear_bit(TASKLET_STATE_RUN
, &(t
)->state
);
478 static inline void tasklet_unlock_wait(struct tasklet_struct
*t
)
480 while (test_bit(TASKLET_STATE_RUN
, &(t
)->state
)) { barrier(); }
483 #define tasklet_trylock(t) 1
484 #define tasklet_unlock_wait(t) do { } while (0)
485 #define tasklet_unlock(t) do { } while (0)
488 extern void __tasklet_schedule(struct tasklet_struct
*t
);
490 static inline void tasklet_schedule(struct tasklet_struct
*t
)
492 if (!test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
))
493 __tasklet_schedule(t
);
496 extern void __tasklet_hi_schedule(struct tasklet_struct
*t
);
498 static inline void tasklet_hi_schedule(struct tasklet_struct
*t
)
500 if (!test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
))
501 __tasklet_hi_schedule(t
);
504 extern void __tasklet_hi_schedule_first(struct tasklet_struct
*t
);
507 * This version avoids touching any other tasklets. Needed for kmemcheck
508 * in order not to take any page faults while enqueueing this tasklet;
509 * consider VERY carefully whether you really need this or
510 * tasklet_hi_schedule()...
512 static inline void tasklet_hi_schedule_first(struct tasklet_struct
*t
)
514 if (!test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
))
515 __tasklet_hi_schedule_first(t
);
519 static inline void tasklet_disable_nosync(struct tasklet_struct
*t
)
521 atomic_inc(&t
->count
);
522 smp_mb__after_atomic_inc();
525 static inline void tasklet_disable(struct tasklet_struct
*t
)
527 tasklet_disable_nosync(t
);
528 tasklet_unlock_wait(t
);
532 static inline void tasklet_enable(struct tasklet_struct
*t
)
534 smp_mb__before_atomic_dec();
535 atomic_dec(&t
->count
);
538 static inline void tasklet_hi_enable(struct tasklet_struct
*t
)
540 smp_mb__before_atomic_dec();
541 atomic_dec(&t
->count
);
544 extern void tasklet_kill(struct tasklet_struct
*t
);
545 extern void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
);
546 extern void tasklet_init(struct tasklet_struct
*t
,
547 void (*func
)(unsigned long), unsigned long data
);
549 struct tasklet_hrtimer
{
550 struct hrtimer timer
;
551 struct tasklet_struct tasklet
;
552 enum hrtimer_restart (*function
)(struct hrtimer
*);
556 tasklet_hrtimer_init(struct tasklet_hrtimer
*ttimer
,
557 enum hrtimer_restart (*function
)(struct hrtimer
*),
558 clockid_t which_clock
, enum hrtimer_mode mode
);
561 int tasklet_hrtimer_start(struct tasklet_hrtimer
*ttimer
, ktime_t time
,
562 const enum hrtimer_mode mode
)
564 return hrtimer_start(&ttimer
->timer
, time
, mode
);
568 void tasklet_hrtimer_cancel(struct tasklet_hrtimer
*ttimer
)
570 hrtimer_cancel(&ttimer
->timer
);
571 tasklet_kill(&ttimer
->tasklet
);
575 * Autoprobing for irqs:
577 * probe_irq_on() and probe_irq_off() provide robust primitives
578 * for accurate IRQ probing during kernel initialization. They are
579 * reasonably simple to use, are not "fooled" by spurious interrupts,
580 * and, unlike other attempts at IRQ probing, they do not get hung on
581 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
583 * For reasonably foolproof probing, use them as follows:
585 * 1. clear and/or mask the device's internal interrupt.
587 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
588 * 4. enable the device and cause it to trigger an interrupt.
589 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
590 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
591 * 7. service the device to clear its pending interrupt.
592 * 8. loop again if paranoia is required.
594 * probe_irq_on() returns a mask of allocated irq's.
596 * probe_irq_off() takes the mask as a parameter,
597 * and returns the irq number which occurred,
598 * or zero if none occurred, or a negative irq number
599 * if more than one irq occurred.
602 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
603 static inline unsigned long probe_irq_on(void)
607 static inline int probe_irq_off(unsigned long val
)
611 static inline unsigned int probe_irq_mask(unsigned long val
)
616 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
617 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
618 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
621 #ifdef CONFIG_PROC_FS
622 /* Initialize /proc/irq/ */
623 extern void init_irq_proc(void);
625 static inline void init_irq_proc(void)
631 int show_interrupts(struct seq_file
*p
, void *v
);
635 extern int early_irq_init(void);
636 extern int arch_probe_nr_irqs(void);
637 extern int arch_early_irq_init(void);
638 extern int arch_init_chip_data(struct irq_desc
*desc
, int node
);