include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / um / kernel / irq.c
1 /*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 */
7
8 #include "linux/cpumask.h"
9 #include "linux/hardirq.h"
10 #include "linux/interrupt.h"
11 #include "linux/kernel_stat.h"
12 #include "linux/module.h"
13 #include "linux/sched.h"
14 #include "linux/seq_file.h"
15 #include "linux/slab.h"
16 #include "as-layout.h"
17 #include "kern_util.h"
18 #include "os.h"
19
20 /*
21 * Generic, controller-independent functions:
22 */
23
24 int show_interrupts(struct seq_file *p, void *v)
25 {
26 int i = *(loff_t *) v, j;
27 struct irqaction * action;
28 unsigned long flags;
29
30 if (i == 0) {
31 seq_printf(p, " ");
32 for_each_online_cpu(j)
33 seq_printf(p, "CPU%d ",j);
34 seq_putc(p, '\n');
35 }
36
37 if (i < NR_IRQS) {
38 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
39 action = irq_desc[i].action;
40 if (!action)
41 goto skip;
42 seq_printf(p, "%3d: ",i);
43 #ifndef CONFIG_SMP
44 seq_printf(p, "%10u ", kstat_irqs(i));
45 #else
46 for_each_online_cpu(j)
47 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
48 #endif
49 seq_printf(p, " %14s", irq_desc[i].chip->typename);
50 seq_printf(p, " %s", action->name);
51
52 for (action=action->next; action; action = action->next)
53 seq_printf(p, ", %s", action->name);
54
55 seq_putc(p, '\n');
56 skip:
57 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
58 } else if (i == NR_IRQS)
59 seq_putc(p, '\n');
60
61 return 0;
62 }
63
64 /*
65 * This list is accessed under irq_lock, except in sigio_handler,
66 * where it is safe from being modified. IRQ handlers won't change it -
67 * if an IRQ source has vanished, it will be freed by free_irqs just
68 * before returning from sigio_handler. That will process a separate
69 * list of irqs to free, with its own locking, coming back here to
70 * remove list elements, taking the irq_lock to do so.
71 */
72 static struct irq_fd *active_fds = NULL;
73 static struct irq_fd **last_irq_ptr = &active_fds;
74
75 extern void free_irqs(void);
76
77 void sigio_handler(int sig, struct uml_pt_regs *regs)
78 {
79 struct irq_fd *irq_fd;
80 int n;
81
82 if (smp_sigio_handler())
83 return;
84
85 while (1) {
86 n = os_waiting_for_events(active_fds);
87 if (n <= 0) {
88 if (n == -EINTR)
89 continue;
90 else break;
91 }
92
93 for (irq_fd = active_fds; irq_fd != NULL;
94 irq_fd = irq_fd->next) {
95 if (irq_fd->current_events != 0) {
96 irq_fd->current_events = 0;
97 do_IRQ(irq_fd->irq, regs);
98 }
99 }
100 }
101
102 free_irqs();
103 }
104
105 static DEFINE_SPINLOCK(irq_lock);
106
107 static int activate_fd(int irq, int fd, int type, void *dev_id)
108 {
109 struct pollfd *tmp_pfd;
110 struct irq_fd *new_fd, *irq_fd;
111 unsigned long flags;
112 int events, err, n;
113
114 err = os_set_fd_async(fd);
115 if (err < 0)
116 goto out;
117
118 err = -ENOMEM;
119 new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
120 if (new_fd == NULL)
121 goto out;
122
123 if (type == IRQ_READ)
124 events = UM_POLLIN | UM_POLLPRI;
125 else events = UM_POLLOUT;
126 *new_fd = ((struct irq_fd) { .next = NULL,
127 .id = dev_id,
128 .fd = fd,
129 .type = type,
130 .irq = irq,
131 .events = events,
132 .current_events = 0 } );
133
134 err = -EBUSY;
135 spin_lock_irqsave(&irq_lock, flags);
136 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
137 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
138 printk(KERN_ERR "Registering fd %d twice\n", fd);
139 printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
140 printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
141 dev_id);
142 goto out_unlock;
143 }
144 }
145
146 if (type == IRQ_WRITE)
147 fd = -1;
148
149 tmp_pfd = NULL;
150 n = 0;
151
152 while (1) {
153 n = os_create_pollfd(fd, events, tmp_pfd, n);
154 if (n == 0)
155 break;
156
157 /*
158 * n > 0
159 * It means we couldn't put new pollfd to current pollfds
160 * and tmp_fds is NULL or too small for new pollfds array.
161 * Needed size is equal to n as minimum.
162 *
163 * Here we have to drop the lock in order to call
164 * kmalloc, which might sleep.
165 * If something else came in and changed the pollfds array
166 * so we will not be able to put new pollfd struct to pollfds
167 * then we free the buffer tmp_fds and try again.
168 */
169 spin_unlock_irqrestore(&irq_lock, flags);
170 kfree(tmp_pfd);
171
172 tmp_pfd = kmalloc(n, GFP_KERNEL);
173 if (tmp_pfd == NULL)
174 goto out_kfree;
175
176 spin_lock_irqsave(&irq_lock, flags);
177 }
178
179 *last_irq_ptr = new_fd;
180 last_irq_ptr = &new_fd->next;
181
182 spin_unlock_irqrestore(&irq_lock, flags);
183
184 /*
185 * This calls activate_fd, so it has to be outside the critical
186 * section.
187 */
188 maybe_sigio_broken(fd, (type == IRQ_READ));
189
190 return 0;
191
192 out_unlock:
193 spin_unlock_irqrestore(&irq_lock, flags);
194 out_kfree:
195 kfree(new_fd);
196 out:
197 return err;
198 }
199
200 static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
201 {
202 unsigned long flags;
203
204 spin_lock_irqsave(&irq_lock, flags);
205 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
206 spin_unlock_irqrestore(&irq_lock, flags);
207 }
208
209 struct irq_and_dev {
210 int irq;
211 void *dev;
212 };
213
214 static int same_irq_and_dev(struct irq_fd *irq, void *d)
215 {
216 struct irq_and_dev *data = d;
217
218 return ((irq->irq == data->irq) && (irq->id == data->dev));
219 }
220
221 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
222 {
223 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
224 .dev = dev });
225
226 free_irq_by_cb(same_irq_and_dev, &data);
227 }
228
229 static int same_fd(struct irq_fd *irq, void *fd)
230 {
231 return (irq->fd == *((int *)fd));
232 }
233
234 void free_irq_by_fd(int fd)
235 {
236 free_irq_by_cb(same_fd, &fd);
237 }
238
239 /* Must be called with irq_lock held */
240 static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
241 {
242 struct irq_fd *irq;
243 int i = 0;
244 int fdi;
245
246 for (irq = active_fds; irq != NULL; irq = irq->next) {
247 if ((irq->fd == fd) && (irq->irq == irqnum))
248 break;
249 i++;
250 }
251 if (irq == NULL) {
252 printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
253 fd);
254 goto out;
255 }
256 fdi = os_get_pollfd(i);
257 if ((fdi != -1) && (fdi != fd)) {
258 printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
259 "and pollfds, fd %d vs %d, need %d\n", irq->fd,
260 fdi, fd);
261 irq = NULL;
262 goto out;
263 }
264 *index_out = i;
265 out:
266 return irq;
267 }
268
269 void reactivate_fd(int fd, int irqnum)
270 {
271 struct irq_fd *irq;
272 unsigned long flags;
273 int i;
274
275 spin_lock_irqsave(&irq_lock, flags);
276 irq = find_irq_by_fd(fd, irqnum, &i);
277 if (irq == NULL) {
278 spin_unlock_irqrestore(&irq_lock, flags);
279 return;
280 }
281 os_set_pollfd(i, irq->fd);
282 spin_unlock_irqrestore(&irq_lock, flags);
283
284 add_sigio_fd(fd);
285 }
286
287 void deactivate_fd(int fd, int irqnum)
288 {
289 struct irq_fd *irq;
290 unsigned long flags;
291 int i;
292
293 spin_lock_irqsave(&irq_lock, flags);
294 irq = find_irq_by_fd(fd, irqnum, &i);
295 if (irq == NULL) {
296 spin_unlock_irqrestore(&irq_lock, flags);
297 return;
298 }
299
300 os_set_pollfd(i, -1);
301 spin_unlock_irqrestore(&irq_lock, flags);
302
303 ignore_sigio_fd(fd);
304 }
305
306 /*
307 * Called just before shutdown in order to provide a clean exec
308 * environment in case the system is rebooting. No locking because
309 * that would cause a pointless shutdown hang if something hadn't
310 * released the lock.
311 */
312 int deactivate_all_fds(void)
313 {
314 struct irq_fd *irq;
315 int err;
316
317 for (irq = active_fds; irq != NULL; irq = irq->next) {
318 err = os_clear_fd_async(irq->fd);
319 if (err)
320 return err;
321 }
322 /* If there is a signal already queued, after unblocking ignore it */
323 os_set_ioignore();
324
325 return 0;
326 }
327
328 /*
329 * do_IRQ handles all normal device IRQs (the special
330 * SMP cross-CPU interrupts have their own specific
331 * handlers).
332 */
333 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
334 {
335 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
336 irq_enter();
337 __do_IRQ(irq);
338 irq_exit();
339 set_irq_regs(old_regs);
340 return 1;
341 }
342
343 int um_request_irq(unsigned int irq, int fd, int type,
344 irq_handler_t handler,
345 unsigned long irqflags, const char * devname,
346 void *dev_id)
347 {
348 int err;
349
350 if (fd != -1) {
351 err = activate_fd(irq, fd, type, dev_id);
352 if (err)
353 return err;
354 }
355
356 return request_irq(irq, handler, irqflags, devname, dev_id);
357 }
358
359 EXPORT_SYMBOL(um_request_irq);
360 EXPORT_SYMBOL(reactivate_fd);
361
362 /*
363 * irq_chip must define (startup || enable) &&
364 * (shutdown || disable) && end
365 */
366 static void dummy(unsigned int irq)
367 {
368 }
369
370 /* This is used for everything else than the timer. */
371 static struct irq_chip normal_irq_type = {
372 .typename = "SIGIO",
373 .release = free_irq_by_irq_and_dev,
374 .disable = dummy,
375 .enable = dummy,
376 .ack = dummy,
377 .end = dummy
378 };
379
380 static struct irq_chip SIGVTALRM_irq_type = {
381 .typename = "SIGVTALRM",
382 .release = free_irq_by_irq_and_dev,
383 .shutdown = dummy, /* never called */
384 .disable = dummy,
385 .enable = dummy,
386 .ack = dummy,
387 .end = dummy
388 };
389
390 void __init init_IRQ(void)
391 {
392 int i;
393
394 irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
395 irq_desc[TIMER_IRQ].action = NULL;
396 irq_desc[TIMER_IRQ].depth = 1;
397 irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
398 enable_irq(TIMER_IRQ);
399 for (i = 1; i < NR_IRQS; i++) {
400 irq_desc[i].status = IRQ_DISABLED;
401 irq_desc[i].action = NULL;
402 irq_desc[i].depth = 1;
403 irq_desc[i].chip = &normal_irq_type;
404 enable_irq(i);
405 }
406 }
407
408 /*
409 * IRQ stack entry and exit:
410 *
411 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
412 * and switch over to the IRQ stack after some preparation. We use
413 * sigaltstack to receive signals on a separate stack from the start.
414 * These two functions make sure the rest of the kernel won't be too
415 * upset by being on a different stack. The IRQ stack has a
416 * thread_info structure at the bottom so that current et al continue
417 * to work.
418 *
419 * to_irq_stack copies the current task's thread_info to the IRQ stack
420 * thread_info and sets the tasks's stack to point to the IRQ stack.
421 *
422 * from_irq_stack copies the thread_info struct back (flags may have
423 * been modified) and resets the task's stack pointer.
424 *
425 * Tricky bits -
426 *
427 * What happens when two signals race each other? UML doesn't block
428 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
429 * could arrive while a previous one is still setting up the
430 * thread_info.
431 *
432 * There are three cases -
433 * The first interrupt on the stack - sets up the thread_info and
434 * handles the interrupt
435 * A nested interrupt interrupting the copying of the thread_info -
436 * can't handle the interrupt, as the stack is in an unknown state
437 * A nested interrupt not interrupting the copying of the
438 * thread_info - doesn't do any setup, just handles the interrupt
439 *
440 * The first job is to figure out whether we interrupted stack setup.
441 * This is done by xchging the signal mask with thread_info->pending.
442 * If the value that comes back is zero, then there is no setup in
443 * progress, and the interrupt can be handled. If the value is
444 * non-zero, then there is stack setup in progress. In order to have
445 * the interrupt handled, we leave our signal in the mask, and it will
446 * be handled by the upper handler after it has set up the stack.
447 *
448 * Next is to figure out whether we are the outer handler or a nested
449 * one. As part of setting up the stack, thread_info->real_thread is
450 * set to non-NULL (and is reset to NULL on exit). This is the
451 * nesting indicator. If it is non-NULL, then the stack is already
452 * set up and the handler can run.
453 */
454
455 static unsigned long pending_mask;
456
457 unsigned long to_irq_stack(unsigned long *mask_out)
458 {
459 struct thread_info *ti;
460 unsigned long mask, old;
461 int nested;
462
463 mask = xchg(&pending_mask, *mask_out);
464 if (mask != 0) {
465 /*
466 * If any interrupts come in at this point, we want to
467 * make sure that their bits aren't lost by our
468 * putting our bit in. So, this loop accumulates bits
469 * until xchg returns the same value that we put in.
470 * When that happens, there were no new interrupts,
471 * and pending_mask contains a bit for each interrupt
472 * that came in.
473 */
474 old = *mask_out;
475 do {
476 old |= mask;
477 mask = xchg(&pending_mask, old);
478 } while (mask != old);
479 return 1;
480 }
481
482 ti = current_thread_info();
483 nested = (ti->real_thread != NULL);
484 if (!nested) {
485 struct task_struct *task;
486 struct thread_info *tti;
487
488 task = cpu_tasks[ti->cpu].task;
489 tti = task_thread_info(task);
490
491 *ti = *tti;
492 ti->real_thread = tti;
493 task->stack = ti;
494 }
495
496 mask = xchg(&pending_mask, 0);
497 *mask_out |= mask | nested;
498 return 0;
499 }
500
501 unsigned long from_irq_stack(int nested)
502 {
503 struct thread_info *ti, *to;
504 unsigned long mask;
505
506 ti = current_thread_info();
507
508 pending_mask = 1;
509
510 to = ti->real_thread;
511 current->stack = to;
512 ti->real_thread = NULL;
513 *to = *ti;
514
515 mask = xchg(&pending_mask, 0);
516 return mask & ~1;
517 }
518