workqueue: fix ghost PENDING flag while doing MQ IO
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / irq / proc.c
1 /*
2 * linux/kernel/irq/proc.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the /proc/irq/ handling code.
7 */
8
9 #include <linux/irq.h>
10 #include <linux/gfp.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/mutex.h>
16
17 #include "internals.h"
18
19 static struct proc_dir_entry *root_irq_dir;
20
21 #ifdef CONFIG_SMP
22
23 static int show_irq_affinity(int type, struct seq_file *m, void *v)
24 {
25 struct irq_desc *desc = irq_to_desc((long)m->private);
26 const struct cpumask *mask = desc->irq_data.affinity;
27
28 #ifdef CONFIG_GENERIC_PENDING_IRQ
29 if (irqd_is_setaffinity_pending(&desc->irq_data))
30 mask = desc->pending_mask;
31 #endif
32 if (type)
33 seq_cpumask_list(m, mask);
34 else
35 seq_cpumask(m, mask);
36 seq_putc(m, '\n');
37 return 0;
38 }
39
40 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
41 {
42 struct irq_desc *desc = irq_to_desc((long)m->private);
43 unsigned long flags;
44 cpumask_var_t mask;
45
46 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
47 return -ENOMEM;
48
49 raw_spin_lock_irqsave(&desc->lock, flags);
50 if (desc->affinity_hint)
51 cpumask_copy(mask, desc->affinity_hint);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54 seq_cpumask(m, mask);
55 seq_putc(m, '\n');
56 free_cpumask_var(mask);
57
58 return 0;
59 }
60
61 #ifndef is_affinity_mask_valid
62 #define is_affinity_mask_valid(val) 1
63 #endif
64
65 int no_irq_affinity;
66 static int irq_affinity_proc_show(struct seq_file *m, void *v)
67 {
68 return show_irq_affinity(0, m, v);
69 }
70
71 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
72 {
73 return show_irq_affinity(1, m, v);
74 }
75
76
77 static ssize_t write_irq_affinity(int type, struct file *file,
78 const char __user *buffer, size_t count, loff_t *pos)
79 {
80 unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
81 cpumask_var_t new_value;
82 int err;
83
84 if (!irq_can_set_affinity(irq) || no_irq_affinity)
85 return -EIO;
86
87 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
88 return -ENOMEM;
89
90 if (type)
91 err = cpumask_parselist_user(buffer, count, new_value);
92 else
93 err = cpumask_parse_user(buffer, count, new_value);
94 if (err)
95 goto free_cpumask;
96
97 if (!is_affinity_mask_valid(new_value)) {
98 err = -EINVAL;
99 goto free_cpumask;
100 }
101
102 /*
103 * Do not allow disabling IRQs completely - it's a too easy
104 * way to make the system unusable accidentally :-) At least
105 * one online CPU still has to be targeted.
106 */
107 if (!cpumask_intersects(new_value, cpu_online_mask)) {
108 /* Special case for empty set - allow the architecture
109 code to set default SMP affinity. */
110 err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
111 } else {
112 irq_set_affinity(irq, new_value);
113 err = count;
114 }
115
116 free_cpumask:
117 free_cpumask_var(new_value);
118 return err;
119 }
120
121 static ssize_t irq_affinity_proc_write(struct file *file,
122 const char __user *buffer, size_t count, loff_t *pos)
123 {
124 return write_irq_affinity(0, file, buffer, count, pos);
125 }
126
127 static ssize_t irq_affinity_list_proc_write(struct file *file,
128 const char __user *buffer, size_t count, loff_t *pos)
129 {
130 return write_irq_affinity(1, file, buffer, count, pos);
131 }
132
133 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
134 {
135 return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
136 }
137
138 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
139 {
140 return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
141 }
142
143 static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
144 {
145 return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
146 }
147
148 static const struct file_operations irq_affinity_proc_fops = {
149 .open = irq_affinity_proc_open,
150 .read = seq_read,
151 .llseek = seq_lseek,
152 .release = single_release,
153 .write = irq_affinity_proc_write,
154 };
155
156 static const struct file_operations irq_affinity_hint_proc_fops = {
157 .open = irq_affinity_hint_proc_open,
158 .read = seq_read,
159 .llseek = seq_lseek,
160 .release = single_release,
161 };
162
163 static const struct file_operations irq_affinity_list_proc_fops = {
164 .open = irq_affinity_list_proc_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168 .write = irq_affinity_list_proc_write,
169 };
170
171 static int default_affinity_show(struct seq_file *m, void *v)
172 {
173 seq_cpumask(m, irq_default_affinity);
174 seq_putc(m, '\n');
175 return 0;
176 }
177
178 static ssize_t default_affinity_write(struct file *file,
179 const char __user *buffer, size_t count, loff_t *ppos)
180 {
181 cpumask_var_t new_value;
182 int err;
183
184 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
185 return -ENOMEM;
186
187 err = cpumask_parse_user(buffer, count, new_value);
188 if (err)
189 goto out;
190
191 if (!is_affinity_mask_valid(new_value)) {
192 err = -EINVAL;
193 goto out;
194 }
195
196 /*
197 * Do not allow disabling IRQs completely - it's a too easy
198 * way to make the system unusable accidentally :-) At least
199 * one online CPU still has to be targeted.
200 */
201 if (!cpumask_intersects(new_value, cpu_online_mask)) {
202 err = -EINVAL;
203 goto out;
204 }
205
206 cpumask_copy(irq_default_affinity, new_value);
207 err = count;
208
209 out:
210 free_cpumask_var(new_value);
211 return err;
212 }
213
214 static int default_affinity_open(struct inode *inode, struct file *file)
215 {
216 return single_open(file, default_affinity_show, PDE_DATA(inode));
217 }
218
219 static const struct file_operations default_affinity_proc_fops = {
220 .open = default_affinity_open,
221 .read = seq_read,
222 .llseek = seq_lseek,
223 .release = single_release,
224 .write = default_affinity_write,
225 };
226
227 static int irq_node_proc_show(struct seq_file *m, void *v)
228 {
229 struct irq_desc *desc = irq_to_desc((long) m->private);
230
231 seq_printf(m, "%d\n", desc->irq_data.node);
232 return 0;
233 }
234
235 static int irq_node_proc_open(struct inode *inode, struct file *file)
236 {
237 return single_open(file, irq_node_proc_show, PDE_DATA(inode));
238 }
239
240 static const struct file_operations irq_node_proc_fops = {
241 .open = irq_node_proc_open,
242 .read = seq_read,
243 .llseek = seq_lseek,
244 .release = single_release,
245 };
246 #endif
247
248 static int irq_spurious_proc_show(struct seq_file *m, void *v)
249 {
250 struct irq_desc *desc = irq_to_desc((long) m->private);
251
252 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
253 desc->irq_count, desc->irqs_unhandled,
254 jiffies_to_msecs(desc->last_unhandled));
255 return 0;
256 }
257
258 static int irq_spurious_proc_open(struct inode *inode, struct file *file)
259 {
260 return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
261 }
262
263 static const struct file_operations irq_spurious_proc_fops = {
264 .open = irq_spurious_proc_open,
265 .read = seq_read,
266 .llseek = seq_lseek,
267 .release = single_release,
268 };
269
270 #define MAX_NAMELEN 128
271
272 static int name_unique(unsigned int irq, struct irqaction *new_action)
273 {
274 struct irq_desc *desc = irq_to_desc(irq);
275 struct irqaction *action;
276 unsigned long flags;
277 int ret = 1;
278
279 raw_spin_lock_irqsave(&desc->lock, flags);
280 for (action = desc->action ; action; action = action->next) {
281 if ((action != new_action) && action->name &&
282 !strcmp(new_action->name, action->name)) {
283 ret = 0;
284 break;
285 }
286 }
287 raw_spin_unlock_irqrestore(&desc->lock, flags);
288 return ret;
289 }
290
291 void register_handler_proc(unsigned int irq, struct irqaction *action)
292 {
293 char name [MAX_NAMELEN];
294 struct irq_desc *desc = irq_to_desc(irq);
295
296 if (!desc->dir || action->dir || !action->name ||
297 !name_unique(irq, action))
298 return;
299
300 memset(name, 0, MAX_NAMELEN);
301 snprintf(name, MAX_NAMELEN, "%s", action->name);
302
303 /* create /proc/irq/1234/handler/ */
304 action->dir = proc_mkdir(name, desc->dir);
305 }
306
307 #undef MAX_NAMELEN
308
309 #define MAX_NAMELEN 10
310
311 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
312 {
313 static DEFINE_MUTEX(register_lock);
314 char name [MAX_NAMELEN];
315
316 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
317 return;
318
319 /*
320 * irq directories are registered only when a handler is
321 * added, not when the descriptor is created, so multiple
322 * tasks might try to register at the same time.
323 */
324 mutex_lock(&register_lock);
325
326 if (desc->dir)
327 goto out_unlock;
328
329 memset(name, 0, MAX_NAMELEN);
330 sprintf(name, "%d", irq);
331
332 /* create /proc/irq/1234 */
333 desc->dir = proc_mkdir(name, root_irq_dir);
334 if (!desc->dir)
335 goto out_unlock;
336
337 #ifdef CONFIG_SMP
338 /* create /proc/irq/<irq>/smp_affinity */
339 proc_create_data("smp_affinity", 0600, desc->dir,
340 &irq_affinity_proc_fops, (void *)(long)irq);
341
342 /* create /proc/irq/<irq>/affinity_hint */
343 proc_create_data("affinity_hint", 0400, desc->dir,
344 &irq_affinity_hint_proc_fops, (void *)(long)irq);
345
346 /* create /proc/irq/<irq>/smp_affinity_list */
347 proc_create_data("smp_affinity_list", 0600, desc->dir,
348 &irq_affinity_list_proc_fops, (void *)(long)irq);
349
350 proc_create_data("node", 0444, desc->dir,
351 &irq_node_proc_fops, (void *)(long)irq);
352 #endif
353
354 proc_create_data("spurious", 0444, desc->dir,
355 &irq_spurious_proc_fops, (void *)(long)irq);
356
357 out_unlock:
358 mutex_unlock(&register_lock);
359 }
360
361 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
362 {
363 char name [MAX_NAMELEN];
364
365 if (!root_irq_dir || !desc->dir)
366 return;
367 #ifdef CONFIG_SMP
368 remove_proc_entry("smp_affinity", desc->dir);
369 remove_proc_entry("affinity_hint", desc->dir);
370 remove_proc_entry("smp_affinity_list", desc->dir);
371 remove_proc_entry("node", desc->dir);
372 #endif
373 remove_proc_entry("spurious", desc->dir);
374
375 memset(name, 0, MAX_NAMELEN);
376 sprintf(name, "%u", irq);
377 remove_proc_entry(name, root_irq_dir);
378 }
379
380 #undef MAX_NAMELEN
381
382 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
383 {
384 proc_remove(action->dir);
385 }
386
387 static void register_default_affinity_proc(void)
388 {
389 #ifdef CONFIG_SMP
390 proc_create("irq/default_smp_affinity", 0600, NULL,
391 &default_affinity_proc_fops);
392 #endif
393 }
394
395 void init_irq_proc(void)
396 {
397 unsigned int irq;
398 struct irq_desc *desc;
399
400 /* create /proc/irq */
401 root_irq_dir = proc_mkdir("irq", NULL);
402 if (!root_irq_dir)
403 return;
404
405 register_default_affinity_proc();
406
407 /*
408 * Create entries for all existing IRQs.
409 */
410 for_each_irq_desc(irq, desc) {
411 if (!desc)
412 continue;
413
414 register_irq_proc(irq, desc);
415 }
416 }
417
418 #ifdef CONFIG_GENERIC_IRQ_SHOW
419
420 int __weak arch_show_interrupts(struct seq_file *p, int prec)
421 {
422 return 0;
423 }
424
425 #ifndef ACTUAL_NR_IRQS
426 # define ACTUAL_NR_IRQS nr_irqs
427 #endif
428
429 int show_interrupts(struct seq_file *p, void *v)
430 {
431 static int prec;
432
433 unsigned long flags, any_count = 0;
434 int i = *(loff_t *) v, j;
435 struct irqaction *action;
436 struct irq_desc *desc;
437
438 if (i > ACTUAL_NR_IRQS)
439 return 0;
440
441 if (i == ACTUAL_NR_IRQS)
442 return arch_show_interrupts(p, prec);
443
444 /* print header and calculate the width of the first column */
445 if (i == 0) {
446 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
447 j *= 10;
448
449 seq_printf(p, "%*s", prec + 8, "");
450 for_each_online_cpu(j)
451 seq_printf(p, "CPU%-8d", j);
452 seq_putc(p, '\n');
453 }
454
455 desc = irq_to_desc(i);
456 if (!desc)
457 return 0;
458
459 raw_spin_lock_irqsave(&desc->lock, flags);
460 for_each_online_cpu(j)
461 any_count |= kstat_irqs_cpu(i, j);
462 action = desc->action;
463 if (!action && !any_count)
464 goto out;
465
466 seq_printf(p, "%*d: ", prec, i);
467 for_each_online_cpu(j)
468 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
469
470 if (desc->irq_data.chip) {
471 if (desc->irq_data.chip->irq_print_chip)
472 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
473 else if (desc->irq_data.chip->name)
474 seq_printf(p, " %8s", desc->irq_data.chip->name);
475 else
476 seq_printf(p, " %8s", "-");
477 } else {
478 seq_printf(p, " %8s", "None");
479 }
480 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
481 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
482 #endif
483 if (desc->name)
484 seq_printf(p, "-%-8s", desc->name);
485
486 if (action) {
487 seq_printf(p, " %s", action->name);
488 while ((action = action->next) != NULL)
489 seq_printf(p, ", %s", action->name);
490 }
491
492 seq_putc(p, '\n');
493 out:
494 raw_spin_unlock_irqrestore(&desc->lock, flags);
495 return 0;
496 }
497 #endif