Merge commit 'linus/master' into HEAD
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / interrupt.h
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
4
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/sched.h>
14 #include <linux/irqflags.h>
15 #include <linux/smp.h>
16 #include <linux/percpu.h>
17
18 #include <asm/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/system.h>
21
22 /*
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
28 */
29 #define IRQF_TRIGGER_NONE 0x00000000
30 #define IRQF_TRIGGER_RISING 0x00000001
31 #define IRQF_TRIGGER_FALLING 0x00000002
32 #define IRQF_TRIGGER_HIGH 0x00000004
33 #define IRQF_TRIGGER_LOW 0x00000008
34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE 0x00000010
37
38 /*
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
41 *
42 * IRQF_DISABLED - keep irqs disabled when calling the action handler
43 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
44 * IRQF_SHARED - allow sharing the irq among several devices
45 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
46 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
47 * IRQF_PERCPU - Interrupt is per cpu
48 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
49 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50 * registered first in an shared interrupt is considered for
51 * performance reasons)
52 */
53 #define IRQF_DISABLED 0x00000020
54 #define IRQF_SAMPLE_RANDOM 0x00000040
55 #define IRQF_SHARED 0x00000080
56 #define IRQF_PROBE_SHARED 0x00000100
57 #define IRQF_TIMER 0x00000200
58 #define IRQF_PERCPU 0x00000400
59 #define IRQF_NOBALANCING 0x00000800
60 #define IRQF_IRQPOLL 0x00001000
61
62 /*
63 * Bits used by threaded handlers:
64 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
65 * IRQTF_DIED - handler thread died
66 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
67 */
68 enum {
69 IRQTF_RUNTHREAD,
70 IRQTF_DIED,
71 IRQTF_WARNED,
72 };
73
74 typedef irqreturn_t (*irq_handler_t)(int, void *);
75
76 /**
77 * struct irqaction - per interrupt action descriptor
78 * @handler: interrupt handler function
79 * @flags: flags (see IRQF_* above)
80 * @mask: no comment as it is useless and about to be removed
81 * @name: name of the device
82 * @dev_id: cookie to identify the device
83 * @next: pointer to the next irqaction for shared interrupts
84 * @irq: interrupt number
85 * @dir: pointer to the proc/irq/NN/name entry
86 * @thread_fn: interupt handler function for threaded interrupts
87 * @thread: thread pointer for threaded interrupts
88 * @thread_flags: flags related to @thread
89 */
90 struct irqaction {
91 irq_handler_t handler;
92 unsigned long flags;
93 cpumask_t mask;
94 const char *name;
95 void *dev_id;
96 struct irqaction *next;
97 int irq;
98 struct proc_dir_entry *dir;
99 irq_handler_t thread_fn;
100 struct task_struct *thread;
101 unsigned long thread_flags;
102 };
103
104 extern irqreturn_t no_action(int cpl, void *dev_id);
105
106 #ifdef CONFIG_GENERIC_HARDIRQS
107 extern int __must_check
108 request_threaded_irq(unsigned int irq, irq_handler_t handler,
109 irq_handler_t thread_fn,
110 unsigned long flags, const char *name, void *dev);
111
112 static inline int __must_check
113 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
114 const char *name, void *dev)
115 {
116 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
117 }
118
119 extern void exit_irq_thread(void);
120 #else
121
122 extern int __must_check
123 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
124 const char *name, void *dev);
125
126 /*
127 * Special function to avoid ifdeffery in kernel/irq/devres.c which
128 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
129 * m68k). I really love these $@%#!* obvious Makefile references:
130 * ../../../kernel/irq/devres.o
131 */
132 static inline int __must_check
133 request_threaded_irq(unsigned int irq, irq_handler_t handler,
134 irq_handler_t thread_fn,
135 unsigned long flags, const char *name, void *dev)
136 {
137 return request_irq(irq, handler, flags, name, dev);
138 }
139
140 static inline void exit_irq_thread(void) { }
141 #endif
142
143 extern void free_irq(unsigned int, void *);
144
145 struct device;
146
147 extern int __must_check
148 devm_request_threaded_irq(struct device *dev, unsigned int irq,
149 irq_handler_t handler, irq_handler_t thread_fn,
150 unsigned long irqflags, const char *devname,
151 void *dev_id);
152
153 static inline int __must_check
154 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
155 unsigned long irqflags, const char *devname, void *dev_id)
156 {
157 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
158 devname, dev_id);
159 }
160
161 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
162
163 /*
164 * On lockdep we dont want to enable hardirqs in hardirq
165 * context. Use local_irq_enable_in_hardirq() to annotate
166 * kernel code that has to do this nevertheless (pretty much
167 * the only valid case is for old/broken hardware that is
168 * insanely slow).
169 *
170 * NOTE: in theory this might break fragile code that relies
171 * on hardirq delivery - in practice we dont seem to have such
172 * places left. So the only effect should be slightly increased
173 * irqs-off latencies.
174 */
175 #ifdef CONFIG_LOCKDEP
176 # define local_irq_enable_in_hardirq() do { } while (0)
177 #else
178 # define local_irq_enable_in_hardirq() local_irq_enable()
179 #endif
180
181 extern void disable_irq_nosync(unsigned int irq);
182 extern void disable_irq(unsigned int irq);
183 extern void enable_irq(unsigned int irq);
184
185 /* The following three functions are for the core kernel use only. */
186 #ifdef CONFIG_GENERIC_HARDIRQS
187 extern void suspend_device_irqs(void);
188 extern void resume_device_irqs(void);
189 #ifdef CONFIG_PM_SLEEP
190 extern int check_wakeup_irqs(void);
191 #else
192 static inline int check_wakeup_irqs(void) { return 0; }
193 #endif
194 #else
195 static inline void suspend_device_irqs(void) { };
196 static inline void resume_device_irqs(void) { };
197 static inline int check_wakeup_irqs(void) { return 0; }
198 #endif
199
200 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
201
202 extern cpumask_var_t irq_default_affinity;
203
204 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
205 extern int irq_can_set_affinity(unsigned int irq);
206 extern int irq_select_affinity(unsigned int irq);
207
208 #else /* CONFIG_SMP */
209
210 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
211 {
212 return -EINVAL;
213 }
214
215 static inline int irq_can_set_affinity(unsigned int irq)
216 {
217 return 0;
218 }
219
220 static inline int irq_select_affinity(unsigned int irq) { return 0; }
221
222 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
223
224 #ifdef CONFIG_GENERIC_HARDIRQS
225 /*
226 * Special lockdep variants of irq disabling/enabling.
227 * These should be used for locking constructs that
228 * know that a particular irq context which is disabled,
229 * and which is the only irq-context user of a lock,
230 * that it's safe to take the lock in the irq-disabled
231 * section without disabling hardirqs.
232 *
233 * On !CONFIG_LOCKDEP they are equivalent to the normal
234 * irq disable/enable methods.
235 */
236 static inline void disable_irq_nosync_lockdep(unsigned int irq)
237 {
238 disable_irq_nosync(irq);
239 #ifdef CONFIG_LOCKDEP
240 local_irq_disable();
241 #endif
242 }
243
244 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
245 {
246 disable_irq_nosync(irq);
247 #ifdef CONFIG_LOCKDEP
248 local_irq_save(*flags);
249 #endif
250 }
251
252 static inline void disable_irq_lockdep(unsigned int irq)
253 {
254 disable_irq(irq);
255 #ifdef CONFIG_LOCKDEP
256 local_irq_disable();
257 #endif
258 }
259
260 static inline void enable_irq_lockdep(unsigned int irq)
261 {
262 #ifdef CONFIG_LOCKDEP
263 local_irq_enable();
264 #endif
265 enable_irq(irq);
266 }
267
268 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
269 {
270 #ifdef CONFIG_LOCKDEP
271 local_irq_restore(*flags);
272 #endif
273 enable_irq(irq);
274 }
275
276 /* IRQ wakeup (PM) control: */
277 extern int set_irq_wake(unsigned int irq, unsigned int on);
278
279 static inline int enable_irq_wake(unsigned int irq)
280 {
281 return set_irq_wake(irq, 1);
282 }
283
284 static inline int disable_irq_wake(unsigned int irq)
285 {
286 return set_irq_wake(irq, 0);
287 }
288
289 #else /* !CONFIG_GENERIC_HARDIRQS */
290 /*
291 * NOTE: non-genirq architectures, if they want to support the lock
292 * validator need to define the methods below in their asm/irq.h
293 * files, under an #ifdef CONFIG_LOCKDEP section.
294 */
295 #ifndef CONFIG_LOCKDEP
296 # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
297 # define disable_irq_nosync_lockdep_irqsave(irq, flags) \
298 disable_irq_nosync(irq)
299 # define disable_irq_lockdep(irq) disable_irq(irq)
300 # define enable_irq_lockdep(irq) enable_irq(irq)
301 # define enable_irq_lockdep_irqrestore(irq, flags) \
302 enable_irq(irq)
303 # endif
304
305 static inline int enable_irq_wake(unsigned int irq)
306 {
307 return 0;
308 }
309
310 static inline int disable_irq_wake(unsigned int irq)
311 {
312 return 0;
313 }
314 #endif /* CONFIG_GENERIC_HARDIRQS */
315
316 #ifndef __ARCH_SET_SOFTIRQ_PENDING
317 #define set_softirq_pending(x) (local_softirq_pending() = (x))
318 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
319 #endif
320
321 /* Some architectures might implement lazy enabling/disabling of
322 * interrupts. In some cases, such as stop_machine, we might want
323 * to ensure that after a local_irq_disable(), interrupts have
324 * really been disabled in hardware. Such architectures need to
325 * implement the following hook.
326 */
327 #ifndef hard_irq_disable
328 #define hard_irq_disable() do { } while(0)
329 #endif
330
331 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
332 frequency threaded job scheduling. For almost all the purposes
333 tasklets are more than enough. F.e. all serial device BHs et
334 al. should be converted to tasklets, not to softirqs.
335 */
336
337 enum
338 {
339 HI_SOFTIRQ=0,
340 TIMER_SOFTIRQ,
341 NET_TX_SOFTIRQ,
342 NET_RX_SOFTIRQ,
343 BLOCK_SOFTIRQ,
344 TASKLET_SOFTIRQ,
345 SCHED_SOFTIRQ,
346 HRTIMER_SOFTIRQ,
347 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
348
349 NR_SOFTIRQS
350 };
351
352 /* map softirq index to softirq name. update 'softirq_to_name' in
353 * kernel/softirq.c when adding a new softirq.
354 */
355 extern char *softirq_to_name[NR_SOFTIRQS];
356
357 /* softirq mask and active fields moved to irq_cpustat_t in
358 * asm/hardirq.h to get better cache usage. KAO
359 */
360
361 struct softirq_action
362 {
363 void (*action)(struct softirq_action *);
364 };
365
366 asmlinkage void do_softirq(void);
367 asmlinkage void __do_softirq(void);
368 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
369 extern void softirq_init(void);
370 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
371 extern void raise_softirq_irqoff(unsigned int nr);
372 extern void raise_softirq(unsigned int nr);
373 extern void wakeup_softirqd(void);
374
375 /* This is the worklist that queues up per-cpu softirq work.
376 *
377 * send_remote_sendirq() adds work to these lists, and
378 * the softirq handler itself dequeues from them. The queues
379 * are protected by disabling local cpu interrupts and they must
380 * only be accessed by the local cpu that they are for.
381 */
382 DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
383
384 /* Try to send a softirq to a remote cpu. If this cannot be done, the
385 * work will be queued to the local cpu.
386 */
387 extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
388
389 /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
390 * and compute the current cpu, passed in as 'this_cpu'.
391 */
392 extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
393 int this_cpu, int softirq);
394
395 /* Tasklets --- multithreaded analogue of BHs.
396
397 Main feature differing them of generic softirqs: tasklet
398 is running only on one CPU simultaneously.
399
400 Main feature differing them of BHs: different tasklets
401 may be run simultaneously on different CPUs.
402
403 Properties:
404 * If tasklet_schedule() is called, then tasklet is guaranteed
405 to be executed on some cpu at least once after this.
406 * If the tasklet is already scheduled, but its excecution is still not
407 started, it will be executed only once.
408 * If this tasklet is already running on another CPU (or schedule is called
409 from tasklet itself), it is rescheduled for later.
410 * Tasklet is strictly serialized wrt itself, but not
411 wrt another tasklets. If client needs some intertask synchronization,
412 he makes it with spinlocks.
413 */
414
415 struct tasklet_struct
416 {
417 struct tasklet_struct *next;
418 unsigned long state;
419 atomic_t count;
420 void (*func)(unsigned long);
421 unsigned long data;
422 };
423
424 #define DECLARE_TASKLET(name, func, data) \
425 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
426
427 #define DECLARE_TASKLET_DISABLED(name, func, data) \
428 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
429
430
431 enum
432 {
433 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
434 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
435 };
436
437 #ifdef CONFIG_SMP
438 static inline int tasklet_trylock(struct tasklet_struct *t)
439 {
440 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
441 }
442
443 static inline void tasklet_unlock(struct tasklet_struct *t)
444 {
445 smp_mb__before_clear_bit();
446 clear_bit(TASKLET_STATE_RUN, &(t)->state);
447 }
448
449 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
450 {
451 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
452 }
453 #else
454 #define tasklet_trylock(t) 1
455 #define tasklet_unlock_wait(t) do { } while (0)
456 #define tasklet_unlock(t) do { } while (0)
457 #endif
458
459 extern void __tasklet_schedule(struct tasklet_struct *t);
460
461 static inline void tasklet_schedule(struct tasklet_struct *t)
462 {
463 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
464 __tasklet_schedule(t);
465 }
466
467 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
468
469 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
470 {
471 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
472 __tasklet_hi_schedule(t);
473 }
474
475 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
476
477 /*
478 * This version avoids touching any other tasklets. Needed for kmemcheck
479 * in order not to take any page faults while enqueueing this tasklet;
480 * consider VERY carefully whether you really need this or
481 * tasklet_hi_schedule()...
482 */
483 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
484 {
485 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
486 __tasklet_hi_schedule_first(t);
487 }
488
489
490 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
491 {
492 atomic_inc(&t->count);
493 smp_mb__after_atomic_inc();
494 }
495
496 static inline void tasklet_disable(struct tasklet_struct *t)
497 {
498 tasklet_disable_nosync(t);
499 tasklet_unlock_wait(t);
500 smp_mb();
501 }
502
503 static inline void tasklet_enable(struct tasklet_struct *t)
504 {
505 smp_mb__before_atomic_dec();
506 atomic_dec(&t->count);
507 }
508
509 static inline void tasklet_hi_enable(struct tasklet_struct *t)
510 {
511 smp_mb__before_atomic_dec();
512 atomic_dec(&t->count);
513 }
514
515 extern void tasklet_kill(struct tasklet_struct *t);
516 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
517 extern void tasklet_init(struct tasklet_struct *t,
518 void (*func)(unsigned long), unsigned long data);
519
520 /*
521 * Autoprobing for irqs:
522 *
523 * probe_irq_on() and probe_irq_off() provide robust primitives
524 * for accurate IRQ probing during kernel initialization. They are
525 * reasonably simple to use, are not "fooled" by spurious interrupts,
526 * and, unlike other attempts at IRQ probing, they do not get hung on
527 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
528 *
529 * For reasonably foolproof probing, use them as follows:
530 *
531 * 1. clear and/or mask the device's internal interrupt.
532 * 2. sti();
533 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
534 * 4. enable the device and cause it to trigger an interrupt.
535 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
536 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
537 * 7. service the device to clear its pending interrupt.
538 * 8. loop again if paranoia is required.
539 *
540 * probe_irq_on() returns a mask of allocated irq's.
541 *
542 * probe_irq_off() takes the mask as a parameter,
543 * and returns the irq number which occurred,
544 * or zero if none occurred, or a negative irq number
545 * if more than one irq occurred.
546 */
547
548 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
549 static inline unsigned long probe_irq_on(void)
550 {
551 return 0;
552 }
553 static inline int probe_irq_off(unsigned long val)
554 {
555 return 0;
556 }
557 static inline unsigned int probe_irq_mask(unsigned long val)
558 {
559 return 0;
560 }
561 #else
562 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
563 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
564 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
565 #endif
566
567 #ifdef CONFIG_PROC_FS
568 /* Initialize /proc/irq/ */
569 extern void init_irq_proc(void);
570 #else
571 static inline void init_irq_proc(void)
572 {
573 }
574 #endif
575
576 #if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
577 extern void debug_poll_all_shared_irqs(void);
578 #else
579 static inline void debug_poll_all_shared_irqs(void) { }
580 #endif
581
582 int show_interrupts(struct seq_file *p, void *v);
583
584 struct irq_desc;
585
586 extern int early_irq_init(void);
587 extern int arch_probe_nr_irqs(void);
588 extern int arch_early_irq_init(void);
589 extern int arch_init_chip_data(struct irq_desc *desc, int node);
590
591 #endif