Merge tag 'v3.10.80' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / interrupt.h
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/hrtimer.h>
15 #include <linux/kref.h>
16 #include <linux/workqueue.h>
17
18 #include <linux/atomic.h>
19 #include <asm/ptrace.h>
20
21 /*
22 * These correspond to the IORESOURCE_IRQ_* defines in
23 * linux/ioport.h to select the interrupt line behaviour. When
24 * requesting an interrupt without specifying a IRQF_TRIGGER, the
25 * setting should be assumed to be "as already configured", which
26 * may be as per machine or firmware initialisation.
27 */
28 #define IRQF_TRIGGER_NONE 0x00000000
29 #define IRQF_TRIGGER_RISING 0x00000001
30 #define IRQF_TRIGGER_FALLING 0x00000002
31 #define IRQF_TRIGGER_HIGH 0x00000004
32 #define IRQF_TRIGGER_LOW 0x00000008
33 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
34 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
35 #define IRQF_TRIGGER_PROBE 0x00000010
36
37 /*
38 * These flags used only by the kernel as part of the
39 * irq handling routines.
40 *
41 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
42 * DEPRECATED. This flag is a NOOP and scheduled to be removed
43 * IRQF_SHARED - allow sharing the irq among several devices
44 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
45 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
46 * IRQF_PERCPU - Interrupt is per cpu
47 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
48 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
49 * registered first in an shared interrupt is considered for
50 * performance reasons)
51 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
52 * Used by threaded interrupts which need to keep the
53 * irq line disabled until the threaded handler has been run.
54 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
55 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
56 * IRQF_NO_THREAD - Interrupt cannot be threaded
57 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
58 * resume time.
59 */
60 #define IRQF_DISABLED 0x00000020
61 #define IRQF_SHARED 0x00000080
62 #define IRQF_PROBE_SHARED 0x00000100
63 #define __IRQF_TIMER 0x00000200
64 #define IRQF_PERCPU 0x00000400
65 #define IRQF_NOBALANCING 0x00000800
66 #define IRQF_IRQPOLL 0x00001000
67 #define IRQF_ONESHOT 0x00002000
68 #define IRQF_NO_SUSPEND 0x00004000
69 #define IRQF_FORCE_RESUME 0x00008000
70 #define IRQF_NO_THREAD 0x00010000
71 #define IRQF_EARLY_RESUME 0x00020000
72
73 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
74
75 /*
76 * These values can be returned by request_any_context_irq() and
77 * describe the context the interrupt will be run in.
78 *
79 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
80 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
81 */
82 enum {
83 IRQC_IS_HARDIRQ = 0,
84 IRQC_IS_NESTED,
85 };
86
87 typedef irqreturn_t (*irq_handler_t)(int, void *);
88
89 /**
90 * struct irqaction - per interrupt action descriptor
91 * @handler: interrupt handler function
92 * @name: name of the device
93 * @dev_id: cookie to identify the device
94 * @percpu_dev_id: cookie to identify the device
95 * @next: pointer to the next irqaction for shared interrupts
96 * @irq: interrupt number
97 * @flags: flags (see IRQF_* above)
98 * @thread_fn: interrupt handler function for threaded interrupts
99 * @thread: thread pointer for threaded interrupts
100 * @thread_flags: flags related to @thread
101 * @thread_mask: bitmask for keeping track of @thread activity
102 * @dir: pointer to the proc/irq/NN/name entry
103 */
104 struct irqaction {
105 irq_handler_t handler;
106 void *dev_id;
107 void __percpu *percpu_dev_id;
108 struct irqaction *next;
109 irq_handler_t thread_fn;
110 struct task_struct *thread;
111 unsigned int irq;
112 unsigned int flags;
113 unsigned long thread_flags;
114 unsigned long thread_mask;
115 const char *name;
116 #ifdef CONFIG_MTPROF_IRQ_DURATION
117 unsigned long long duration;
118 unsigned long count;
119 unsigned long long dur_max;
120 unsigned long long dur_min;
121 #endif
122 struct proc_dir_entry *dir;
123 } ____cacheline_internodealigned_in_smp;
124
125 extern irqreturn_t no_action(int cpl, void *dev_id);
126
127 #ifdef CONFIG_GENERIC_HARDIRQS
128 extern int __must_check
129 request_threaded_irq(unsigned int irq, irq_handler_t handler,
130 irq_handler_t thread_fn,
131 unsigned long flags, const char *name, void *dev);
132
133 static inline int __must_check
134 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
135 const char *name, void *dev)
136 {
137 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
138 }
139
140 extern int __must_check
141 request_any_context_irq(unsigned int irq, irq_handler_t handler,
142 unsigned long flags, const char *name, void *dev_id);
143
144 extern int __must_check
145 request_percpu_irq(unsigned int irq, irq_handler_t handler,
146 const char *devname, void __percpu *percpu_dev_id);
147 #else
148
149 extern int __must_check
150 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
151 const char *name, void *dev);
152
153 /*
154 * Special function to avoid ifdeffery in kernel/irq/devres.c which
155 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
156 * m68k). I really love these $@%#!* obvious Makefile references:
157 * ../../../kernel/irq/devres.o
158 */
159 static inline int __must_check
160 request_threaded_irq(unsigned int irq, irq_handler_t handler,
161 irq_handler_t thread_fn,
162 unsigned long flags, const char *name, void *dev)
163 {
164 return request_irq(irq, handler, flags, name, dev);
165 }
166
167 static inline int __must_check
168 request_any_context_irq(unsigned int irq, irq_handler_t handler,
169 unsigned long flags, const char *name, void *dev_id)
170 {
171 return request_irq(irq, handler, flags, name, dev_id);
172 }
173
174 static inline int __must_check
175 request_percpu_irq(unsigned int irq, irq_handler_t handler,
176 const char *devname, void __percpu *percpu_dev_id)
177 {
178 return request_irq(irq, handler, 0, devname, percpu_dev_id);
179 }
180 #endif
181
182 extern void free_irq(unsigned int, void *);
183 extern void free_percpu_irq(unsigned int, void __percpu *);
184
185 struct device;
186
187 extern int __must_check
188 devm_request_threaded_irq(struct device *dev, unsigned int irq,
189 irq_handler_t handler, irq_handler_t thread_fn,
190 unsigned long irqflags, const char *devname,
191 void *dev_id);
192
193 static inline int __must_check
194 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
195 unsigned long irqflags, const char *devname, void *dev_id)
196 {
197 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
198 devname, dev_id);
199 }
200
201 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
202
203 /*
204 * On lockdep we dont want to enable hardirqs in hardirq
205 * context. Use local_irq_enable_in_hardirq() to annotate
206 * kernel code that has to do this nevertheless (pretty much
207 * the only valid case is for old/broken hardware that is
208 * insanely slow).
209 *
210 * NOTE: in theory this might break fragile code that relies
211 * on hardirq delivery - in practice we dont seem to have such
212 * places left. So the only effect should be slightly increased
213 * irqs-off latencies.
214 */
215 #ifdef CONFIG_LOCKDEP
216 # define local_irq_enable_in_hardirq() do { } while (0)
217 #else
218 # define local_irq_enable_in_hardirq() local_irq_enable()
219 #endif
220
221 extern void disable_irq_nosync(unsigned int irq);
222 extern void disable_irq(unsigned int irq);
223 extern void disable_percpu_irq(unsigned int irq);
224 extern void enable_irq(unsigned int irq);
225 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
226
227 /* The following three functions are for the core kernel use only. */
228 #ifdef CONFIG_GENERIC_HARDIRQS
229 extern void suspend_device_irqs(void);
230 extern void resume_device_irqs(void);
231 #ifdef CONFIG_PM_SLEEP
232 extern int check_wakeup_irqs(void);
233 #else
234 static inline int check_wakeup_irqs(void) { return 0; }
235 #endif
236 #else
237 static inline void suspend_device_irqs(void) { };
238 static inline void resume_device_irqs(void) { };
239 static inline int check_wakeup_irqs(void) { return 0; }
240 #endif
241
242 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
243
244 extern cpumask_var_t irq_default_affinity;
245
246 /* Internal implementation. Use the helpers below */
247 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
248 bool force);
249
250 /**
251 * irq_set_affinity - Set the irq affinity of a given irq
252 * @irq: Interrupt to set affinity
253 * @mask: cpumask
254 *
255 * Fails if cpumask does not contain an online CPU
256 */
257 static inline int
258 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
259 {
260 return __irq_set_affinity(irq, cpumask, false);
261 }
262
263 /**
264 * irq_force_affinity - Force the irq affinity of a given irq
265 * @irq: Interrupt to set affinity
266 * @mask: cpumask
267 *
268 * Same as irq_set_affinity, but without checking the mask against
269 * online cpus.
270 *
271 * Solely for low level cpu hotplug code, where we need to make per
272 * cpu interrupts affine before the cpu becomes online.
273 */
274 static inline int
275 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
276 {
277 return __irq_set_affinity(irq, cpumask, true);
278 }
279
280 extern int irq_can_set_affinity(unsigned int irq);
281 extern int irq_select_affinity(unsigned int irq);
282
283 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
284
285 /**
286 * struct irq_affinity_notify - context for notification of IRQ affinity changes
287 * @irq: Interrupt to which notification applies
288 * @kref: Reference count, for internal use
289 * @work: Work item, for internal use
290 * @notify: Function to be called on change. This will be
291 * called in process context.
292 * @release: Function to be called on release. This will be
293 * called in process context. Once registered, the
294 * structure must only be freed when this function is
295 * called or later.
296 */
297 struct irq_affinity_notify {
298 unsigned int irq;
299 struct kref kref;
300 struct work_struct work;
301 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
302 void (*release)(struct kref *ref);
303 };
304
305 extern int
306 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
307
308 #else /* CONFIG_SMP */
309
310 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
311 {
312 return -EINVAL;
313 }
314
315 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
316 {
317 return 0;
318 }
319
320 static inline int irq_can_set_affinity(unsigned int irq)
321 {
322 return 0;
323 }
324
325 static inline int irq_select_affinity(unsigned int irq) { return 0; }
326
327 static inline int irq_set_affinity_hint(unsigned int irq,
328 const struct cpumask *m)
329 {
330 return -EINVAL;
331 }
332 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
333
334 #ifdef CONFIG_GENERIC_HARDIRQS
335 /*
336 * Special lockdep variants of irq disabling/enabling.
337 * These should be used for locking constructs that
338 * know that a particular irq context which is disabled,
339 * and which is the only irq-context user of a lock,
340 * that it's safe to take the lock in the irq-disabled
341 * section without disabling hardirqs.
342 *
343 * On !CONFIG_LOCKDEP they are equivalent to the normal
344 * irq disable/enable methods.
345 */
346 static inline void disable_irq_nosync_lockdep(unsigned int irq)
347 {
348 disable_irq_nosync(irq);
349 #ifdef CONFIG_LOCKDEP
350 local_irq_disable();
351 #endif
352 }
353
354 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
355 {
356 disable_irq_nosync(irq);
357 #ifdef CONFIG_LOCKDEP
358 local_irq_save(*flags);
359 #endif
360 }
361
362 static inline void disable_irq_lockdep(unsigned int irq)
363 {
364 disable_irq(irq);
365 #ifdef CONFIG_LOCKDEP
366 local_irq_disable();
367 #endif
368 }
369
370 static inline void enable_irq_lockdep(unsigned int irq)
371 {
372 #ifdef CONFIG_LOCKDEP
373 local_irq_enable();
374 #endif
375 enable_irq(irq);
376 }
377
378 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
379 {
380 #ifdef CONFIG_LOCKDEP
381 local_irq_restore(*flags);
382 #endif
383 enable_irq(irq);
384 }
385
386 /* IRQ wakeup (PM) control: */
387 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
388
389 static inline int enable_irq_wake(unsigned int irq)
390 {
391 return irq_set_irq_wake(irq, 1);
392 }
393
394 static inline int disable_irq_wake(unsigned int irq)
395 {
396 return irq_set_irq_wake(irq, 0);
397 }
398
399 #else /* !CONFIG_GENERIC_HARDIRQS */
400 /*
401 * NOTE: non-genirq architectures, if they want to support the lock
402 * validator need to define the methods below in their asm/irq.h
403 * files, under an #ifdef CONFIG_LOCKDEP section.
404 */
405 #ifndef CONFIG_LOCKDEP
406 # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
407 # define disable_irq_nosync_lockdep_irqsave(irq, flags) \
408 disable_irq_nosync(irq)
409 # define disable_irq_lockdep(irq) disable_irq(irq)
410 # define enable_irq_lockdep(irq) enable_irq(irq)
411 # define enable_irq_lockdep_irqrestore(irq, flags) \
412 enable_irq(irq)
413 # endif
414
415 static inline int enable_irq_wake(unsigned int irq)
416 {
417 return 0;
418 }
419
420 static inline int disable_irq_wake(unsigned int irq)
421 {
422 return 0;
423 }
424 #endif /* CONFIG_GENERIC_HARDIRQS */
425
426
427 #ifdef CONFIG_IRQ_FORCED_THREADING
428 extern bool force_irqthreads;
429 #else
430 #define force_irqthreads (0)
431 #endif
432
433 #ifndef __ARCH_SET_SOFTIRQ_PENDING
434 #define set_softirq_pending(x) (local_softirq_pending() = (x))
435 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
436 #endif
437
438 /* Some architectures might implement lazy enabling/disabling of
439 * interrupts. In some cases, such as stop_machine, we might want
440 * to ensure that after a local_irq_disable(), interrupts have
441 * really been disabled in hardware. Such architectures need to
442 * implement the following hook.
443 */
444 #ifndef hard_irq_disable
445 #define hard_irq_disable() do { } while(0)
446 #endif
447
448 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
449 frequency threaded job scheduling. For almost all the purposes
450 tasklets are more than enough. F.e. all serial device BHs et
451 al. should be converted to tasklets, not to softirqs.
452 */
453
454 enum
455 {
456 HI_SOFTIRQ=0,
457 TIMER_SOFTIRQ,
458 NET_TX_SOFTIRQ,
459 NET_RX_SOFTIRQ,
460 BLOCK_SOFTIRQ,
461 BLOCK_IOPOLL_SOFTIRQ,
462 TASKLET_SOFTIRQ,
463 SCHED_SOFTIRQ,
464 HRTIMER_SOFTIRQ,
465 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
466
467 NR_SOFTIRQS
468 };
469
470 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
471
472 /* map softirq index to softirq name. update 'softirq_to_name' in
473 * kernel/softirq.c when adding a new softirq.
474 */
475 extern char *softirq_to_name[NR_SOFTIRQS];
476
477 /* softirq mask and active fields moved to irq_cpustat_t in
478 * asm/hardirq.h to get better cache usage. KAO
479 */
480
481 struct softirq_action
482 {
483 void (*action)(struct softirq_action *);
484 };
485
486 asmlinkage void do_softirq(void);
487 asmlinkage void __do_softirq(void);
488 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
489 extern void softirq_init(void);
490 extern void __raise_softirq_irqoff(unsigned int nr);
491
492 extern void raise_softirq_irqoff(unsigned int nr);
493 extern void raise_softirq(unsigned int nr);
494
495 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
496
497 static inline struct task_struct *this_cpu_ksoftirqd(void)
498 {
499 return this_cpu_read(ksoftirqd);
500 }
501
502 /* Tasklets --- multithreaded analogue of BHs.
503
504 Main feature differing them of generic softirqs: tasklet
505 is running only on one CPU simultaneously.
506
507 Main feature differing them of BHs: different tasklets
508 may be run simultaneously on different CPUs.
509
510 Properties:
511 * If tasklet_schedule() is called, then tasklet is guaranteed
512 to be executed on some cpu at least once after this.
513 * If the tasklet is already scheduled, but its execution is still not
514 started, it will be executed only once.
515 * If this tasklet is already running on another CPU (or schedule is called
516 from tasklet itself), it is rescheduled for later.
517 * Tasklet is strictly serialized wrt itself, but not
518 wrt another tasklets. If client needs some intertask synchronization,
519 he makes it with spinlocks.
520 */
521
522 struct tasklet_struct
523 {
524 struct tasklet_struct *next;
525 unsigned long state;
526 atomic_t count;
527 void (*func)(unsigned long);
528 unsigned long data;
529 };
530
531 #define DECLARE_TASKLET(name, func, data) \
532 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
533
534 #define DECLARE_TASKLET_DISABLED(name, func, data) \
535 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
536
537
538 enum
539 {
540 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
541 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
542 };
543
544 #ifdef CONFIG_SMP
545 static inline int tasklet_trylock(struct tasklet_struct *t)
546 {
547 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
548 }
549
550 static inline void tasklet_unlock(struct tasklet_struct *t)
551 {
552 smp_mb__before_clear_bit();
553 clear_bit(TASKLET_STATE_RUN, &(t)->state);
554 }
555
556 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
557 {
558 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
559 }
560 #else
561 #define tasklet_trylock(t) 1
562 #define tasklet_unlock_wait(t) do { } while (0)
563 #define tasklet_unlock(t) do { } while (0)
564 #endif
565
566 extern void __tasklet_schedule(struct tasklet_struct *t);
567
568 static inline void tasklet_schedule(struct tasklet_struct *t)
569 {
570 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
571 __tasklet_schedule(t);
572 }
573
574 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
575
576 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
577 {
578 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
579 __tasklet_hi_schedule(t);
580 }
581
582 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
583
584 /*
585 * This version avoids touching any other tasklets. Needed for kmemcheck
586 * in order not to take any page faults while enqueueing this tasklet;
587 * consider VERY carefully whether you really need this or
588 * tasklet_hi_schedule()...
589 */
590 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
591 {
592 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
593 __tasklet_hi_schedule_first(t);
594 }
595
596
597 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
598 {
599 atomic_inc(&t->count);
600 smp_mb__after_atomic_inc();
601 }
602
603 static inline void tasklet_disable(struct tasklet_struct *t)
604 {
605 tasklet_disable_nosync(t);
606 tasklet_unlock_wait(t);
607 smp_mb();
608 }
609
610 static inline void tasklet_enable(struct tasklet_struct *t)
611 {
612 smp_mb__before_atomic_dec();
613 atomic_dec(&t->count);
614 }
615
616 static inline void tasklet_hi_enable(struct tasklet_struct *t)
617 {
618 smp_mb__before_atomic_dec();
619 atomic_dec(&t->count);
620 }
621
622 extern void tasklet_kill(struct tasklet_struct *t);
623 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
624 extern void tasklet_init(struct tasklet_struct *t,
625 void (*func)(unsigned long), unsigned long data);
626
627 struct tasklet_hrtimer {
628 struct hrtimer timer;
629 struct tasklet_struct tasklet;
630 enum hrtimer_restart (*function)(struct hrtimer *);
631 };
632
633 extern void
634 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
635 enum hrtimer_restart (*function)(struct hrtimer *),
636 clockid_t which_clock, enum hrtimer_mode mode);
637
638 static inline
639 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
640 const enum hrtimer_mode mode)
641 {
642 return hrtimer_start(&ttimer->timer, time, mode);
643 }
644
645 static inline
646 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
647 {
648 hrtimer_cancel(&ttimer->timer);
649 tasklet_kill(&ttimer->tasklet);
650 }
651
652 /*
653 * Autoprobing for irqs:
654 *
655 * probe_irq_on() and probe_irq_off() provide robust primitives
656 * for accurate IRQ probing during kernel initialization. They are
657 * reasonably simple to use, are not "fooled" by spurious interrupts,
658 * and, unlike other attempts at IRQ probing, they do not get hung on
659 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
660 *
661 * For reasonably foolproof probing, use them as follows:
662 *
663 * 1. clear and/or mask the device's internal interrupt.
664 * 2. sti();
665 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
666 * 4. enable the device and cause it to trigger an interrupt.
667 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
668 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
669 * 7. service the device to clear its pending interrupt.
670 * 8. loop again if paranoia is required.
671 *
672 * probe_irq_on() returns a mask of allocated irq's.
673 *
674 * probe_irq_off() takes the mask as a parameter,
675 * and returns the irq number which occurred,
676 * or zero if none occurred, or a negative irq number
677 * if more than one irq occurred.
678 */
679
680 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
681 static inline unsigned long probe_irq_on(void)
682 {
683 return 0;
684 }
685 static inline int probe_irq_off(unsigned long val)
686 {
687 return 0;
688 }
689 static inline unsigned int probe_irq_mask(unsigned long val)
690 {
691 return 0;
692 }
693 #else
694 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
695 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
696 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
697 #endif
698
699 #ifdef CONFIG_PROC_FS
700 /* Initialize /proc/irq/ */
701 extern void init_irq_proc(void);
702 #else
703 static inline void init_irq_proc(void)
704 {
705 }
706 #endif
707
708 struct seq_file;
709 int show_interrupts(struct seq_file *p, void *v);
710 int arch_show_interrupts(struct seq_file *p, int prec);
711
712 extern int early_irq_init(void);
713 extern int arch_probe_nr_irqs(void);
714 extern int arch_early_irq_init(void);
715
716 #endif