Merge branch 'late/cleanup' into devel-late
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / irq / irqdesc.c
1 /*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17
18 #include "internals.h"
19
20 /*
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
22 */
23 static struct lock_class_key irq_desc_lock_class;
24
25 #if defined(CONFIG_SMP)
26 static void __init init_irq_default_affinity(void)
27 {
28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpumask_setall(irq_default_affinity);
30 }
31 #else
32 static void __init init_irq_default_affinity(void)
33 {
34 }
35 #endif
36
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39 {
40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 return -ENOMEM;
42
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 free_cpumask_var(desc->irq_data.affinity);
46 return -ENOMEM;
47 }
48 #endif
49 return 0;
50 }
51
52 static void desc_smp_init(struct irq_desc *desc, int node)
53 {
54 desc->irq_data.node = node;
55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57 cpumask_clear(desc->pending_mask);
58 #endif
59 }
60
61 static inline int desc_node(struct irq_desc *desc)
62 {
63 return desc->irq_data.node;
64 }
65
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
72
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
74 struct module *owner)
75 {
76 int cpu;
77
78 desc->irq_data.irq = irq;
79 desc->irq_data.chip = &no_irq_chip;
80 desc->irq_data.chip_data = NULL;
81 desc->irq_data.handler_data = NULL;
82 desc->irq_data.msi_desc = NULL;
83 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
84 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
85 desc->handle_irq = handle_bad_irq;
86 desc->depth = 1;
87 desc->irq_count = 0;
88 desc->irqs_unhandled = 0;
89 desc->name = NULL;
90 desc->owner = owner;
91 for_each_possible_cpu(cpu)
92 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
93 desc_smp_init(desc, node);
94 }
95
96 int nr_irqs = NR_IRQS;
97 EXPORT_SYMBOL_GPL(nr_irqs);
98
99 static DEFINE_MUTEX(sparse_irq_lock);
100 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
101
102 #ifdef CONFIG_SPARSE_IRQ
103
104 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
105
106 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
107 {
108 radix_tree_insert(&irq_desc_tree, irq, desc);
109 }
110
111 struct irq_desc *irq_to_desc(unsigned int irq)
112 {
113 return radix_tree_lookup(&irq_desc_tree, irq);
114 }
115 EXPORT_SYMBOL(irq_to_desc);
116
117 static void delete_irq_desc(unsigned int irq)
118 {
119 radix_tree_delete(&irq_desc_tree, irq);
120 }
121
122 #ifdef CONFIG_SMP
123 static void free_masks(struct irq_desc *desc)
124 {
125 #ifdef CONFIG_GENERIC_PENDING_IRQ
126 free_cpumask_var(desc->pending_mask);
127 #endif
128 free_cpumask_var(desc->irq_data.affinity);
129 }
130 #else
131 static inline void free_masks(struct irq_desc *desc) { }
132 #endif
133
134 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
135 {
136 struct irq_desc *desc;
137 gfp_t gfp = GFP_KERNEL;
138
139 desc = kzalloc_node(sizeof(*desc), gfp, node);
140 if (!desc)
141 return NULL;
142 /* allocate based on nr_cpu_ids */
143 desc->kstat_irqs = alloc_percpu(unsigned int);
144 if (!desc->kstat_irqs)
145 goto err_desc;
146
147 if (alloc_masks(desc, gfp, node))
148 goto err_kstat;
149
150 raw_spin_lock_init(&desc->lock);
151 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
152
153 desc_set_defaults(irq, desc, node, owner);
154
155 return desc;
156
157 err_kstat:
158 free_percpu(desc->kstat_irqs);
159 err_desc:
160 kfree(desc);
161 return NULL;
162 }
163
164 static void free_desc(unsigned int irq)
165 {
166 struct irq_desc *desc = irq_to_desc(irq);
167
168 unregister_irq_proc(irq, desc);
169
170 mutex_lock(&sparse_irq_lock);
171 delete_irq_desc(irq);
172 mutex_unlock(&sparse_irq_lock);
173
174 free_masks(desc);
175 free_percpu(desc->kstat_irqs);
176 kfree(desc);
177 }
178
179 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
180 struct module *owner)
181 {
182 struct irq_desc *desc;
183 int i;
184
185 for (i = 0; i < cnt; i++) {
186 desc = alloc_desc(start + i, node, owner);
187 if (!desc)
188 goto err;
189 mutex_lock(&sparse_irq_lock);
190 irq_insert_desc(start + i, desc);
191 mutex_unlock(&sparse_irq_lock);
192 }
193 return start;
194
195 err:
196 for (i--; i >= 0; i--)
197 free_desc(start + i);
198
199 mutex_lock(&sparse_irq_lock);
200 bitmap_clear(allocated_irqs, start, cnt);
201 mutex_unlock(&sparse_irq_lock);
202 return -ENOMEM;
203 }
204
205 static int irq_expand_nr_irqs(unsigned int nr)
206 {
207 if (nr > IRQ_BITMAP_BITS)
208 return -ENOMEM;
209 nr_irqs = nr;
210 return 0;
211 }
212
213 int __init early_irq_init(void)
214 {
215 int i, initcnt, node = first_online_node;
216 struct irq_desc *desc;
217
218 init_irq_default_affinity();
219
220 /* Let arch update nr_irqs and return the nr of preallocated irqs */
221 initcnt = arch_probe_nr_irqs();
222 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
223
224 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
225 nr_irqs = IRQ_BITMAP_BITS;
226
227 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
228 initcnt = IRQ_BITMAP_BITS;
229
230 if (initcnt > nr_irqs)
231 nr_irqs = initcnt;
232
233 for (i = 0; i < initcnt; i++) {
234 desc = alloc_desc(i, node, NULL);
235 set_bit(i, allocated_irqs);
236 irq_insert_desc(i, desc);
237 }
238 return arch_early_irq_init();
239 }
240
241 #else /* !CONFIG_SPARSE_IRQ */
242
243 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
244 [0 ... NR_IRQS-1] = {
245 .handle_irq = handle_bad_irq,
246 .depth = 1,
247 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
248 }
249 };
250
251 int __init early_irq_init(void)
252 {
253 int count, i, node = first_online_node;
254 struct irq_desc *desc;
255
256 init_irq_default_affinity();
257
258 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
259
260 desc = irq_desc;
261 count = ARRAY_SIZE(irq_desc);
262
263 for (i = 0; i < count; i++) {
264 desc[i].kstat_irqs = alloc_percpu(unsigned int);
265 alloc_masks(&desc[i], GFP_KERNEL, node);
266 raw_spin_lock_init(&desc[i].lock);
267 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
268 desc_set_defaults(i, &desc[i], node, NULL);
269 }
270 return arch_early_irq_init();
271 }
272
273 struct irq_desc *irq_to_desc(unsigned int irq)
274 {
275 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
276 }
277
278 static void free_desc(unsigned int irq)
279 {
280 dynamic_irq_cleanup(irq);
281 }
282
283 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
284 struct module *owner)
285 {
286 u32 i;
287
288 for (i = 0; i < cnt; i++) {
289 struct irq_desc *desc = irq_to_desc(start + i);
290
291 desc->owner = owner;
292 }
293 return start;
294 }
295
296 static int irq_expand_nr_irqs(unsigned int nr)
297 {
298 return -ENOMEM;
299 }
300
301 #endif /* !CONFIG_SPARSE_IRQ */
302
303 /**
304 * generic_handle_irq - Invoke the handler for a particular irq
305 * @irq: The irq number to handle
306 *
307 */
308 int generic_handle_irq(unsigned int irq)
309 {
310 struct irq_desc *desc = irq_to_desc(irq);
311
312 if (!desc)
313 return -EINVAL;
314 generic_handle_irq_desc(irq, desc);
315 return 0;
316 }
317 EXPORT_SYMBOL_GPL(generic_handle_irq);
318
319 /* Dynamic interrupt handling */
320
321 /**
322 * irq_free_descs - free irq descriptors
323 * @from: Start of descriptor range
324 * @cnt: Number of consecutive irqs to free
325 */
326 void irq_free_descs(unsigned int from, unsigned int cnt)
327 {
328 int i;
329
330 if (from >= nr_irqs || (from + cnt) > nr_irqs)
331 return;
332
333 for (i = 0; i < cnt; i++)
334 free_desc(from + i);
335
336 mutex_lock(&sparse_irq_lock);
337 bitmap_clear(allocated_irqs, from, cnt);
338 mutex_unlock(&sparse_irq_lock);
339 }
340 EXPORT_SYMBOL_GPL(irq_free_descs);
341
342 /**
343 * irq_alloc_descs - allocate and initialize a range of irq descriptors
344 * @irq: Allocate for specific irq number if irq >= 0
345 * @from: Start the search from this irq number
346 * @cnt: Number of consecutive irqs to allocate.
347 * @node: Preferred node on which the irq descriptor should be allocated
348 * @owner: Owning module (can be NULL)
349 *
350 * Returns the first irq number or error code
351 */
352 int __ref
353 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
354 struct module *owner)
355 {
356 int start, ret;
357
358 if (!cnt)
359 return -EINVAL;
360
361 if (irq >= 0) {
362 if (from > irq)
363 return -EINVAL;
364 from = irq;
365 }
366
367 mutex_lock(&sparse_irq_lock);
368
369 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
370 from, cnt, 0);
371 ret = -EEXIST;
372 if (irq >=0 && start != irq)
373 goto err;
374
375 if (start + cnt > nr_irqs) {
376 ret = irq_expand_nr_irqs(start + cnt);
377 if (ret)
378 goto err;
379 }
380
381 bitmap_set(allocated_irqs, start, cnt);
382 mutex_unlock(&sparse_irq_lock);
383 return alloc_descs(start, cnt, node, owner);
384
385 err:
386 mutex_unlock(&sparse_irq_lock);
387 return ret;
388 }
389 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
390
391 /**
392 * irq_reserve_irqs - mark irqs allocated
393 * @from: mark from irq number
394 * @cnt: number of irqs to mark
395 *
396 * Returns 0 on success or an appropriate error code
397 */
398 int irq_reserve_irqs(unsigned int from, unsigned int cnt)
399 {
400 unsigned int start;
401 int ret = 0;
402
403 if (!cnt || (from + cnt) > nr_irqs)
404 return -EINVAL;
405
406 mutex_lock(&sparse_irq_lock);
407 start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
408 if (start == from)
409 bitmap_set(allocated_irqs, start, cnt);
410 else
411 ret = -EEXIST;
412 mutex_unlock(&sparse_irq_lock);
413 return ret;
414 }
415
416 /**
417 * irq_get_next_irq - get next allocated irq number
418 * @offset: where to start the search
419 *
420 * Returns next irq number after offset or nr_irqs if none is found.
421 */
422 unsigned int irq_get_next_irq(unsigned int offset)
423 {
424 return find_next_bit(allocated_irqs, nr_irqs, offset);
425 }
426
427 struct irq_desc *
428 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
429 unsigned int check)
430 {
431 struct irq_desc *desc = irq_to_desc(irq);
432
433 if (desc) {
434 if (check & _IRQ_DESC_CHECK) {
435 if ((check & _IRQ_DESC_PERCPU) &&
436 !irq_settings_is_per_cpu_devid(desc))
437 return NULL;
438
439 if (!(check & _IRQ_DESC_PERCPU) &&
440 irq_settings_is_per_cpu_devid(desc))
441 return NULL;
442 }
443
444 if (bus)
445 chip_bus_lock(desc);
446 raw_spin_lock_irqsave(&desc->lock, *flags);
447 }
448 return desc;
449 }
450
451 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
452 {
453 raw_spin_unlock_irqrestore(&desc->lock, flags);
454 if (bus)
455 chip_bus_sync_unlock(desc);
456 }
457
458 int irq_set_percpu_devid(unsigned int irq)
459 {
460 struct irq_desc *desc = irq_to_desc(irq);
461
462 if (!desc)
463 return -EINVAL;
464
465 if (desc->percpu_enabled)
466 return -EINVAL;
467
468 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
469
470 if (!desc->percpu_enabled)
471 return -ENOMEM;
472
473 irq_set_percpu_devid_flags(irq);
474 return 0;
475 }
476
477 /**
478 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
479 * @irq: irq number to initialize
480 */
481 void dynamic_irq_cleanup(unsigned int irq)
482 {
483 struct irq_desc *desc = irq_to_desc(irq);
484 unsigned long flags;
485
486 raw_spin_lock_irqsave(&desc->lock, flags);
487 desc_set_defaults(irq, desc, desc_node(desc), NULL);
488 raw_spin_unlock_irqrestore(&desc->lock, flags);
489 }
490
491 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
492 {
493 struct irq_desc *desc = irq_to_desc(irq);
494
495 return desc && desc->kstat_irqs ?
496 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
497 }
498
499 unsigned int kstat_irqs(unsigned int irq)
500 {
501 struct irq_desc *desc = irq_to_desc(irq);
502 int cpu;
503 int sum = 0;
504
505 if (!desc || !desc->kstat_irqs)
506 return 0;
507 for_each_possible_cpu(cpu)
508 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
509 return sum;
510 }