| 1 | /* |
| 2 | * linux/arch/alpha/kernel/irq.c |
| 3 | * |
| 4 | * Copyright (C) 1995 Linus Torvalds |
| 5 | * |
| 6 | * This file contains the code used by various IRQ handling routines: |
| 7 | * asking for different IRQ's should be done through these routines |
| 8 | * instead of just grabbing them. Thus setups with different IRQ numbers |
| 9 | * shouldn't result in any weird surprises, and installing new handlers |
| 10 | * should be easier. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/errno.h> |
| 16 | #include <linux/kernel_stat.h> |
| 17 | #include <linux/signal.h> |
| 18 | #include <linux/sched.h> |
| 19 | #include <linux/ptrace.h> |
| 20 | #include <linux/interrupt.h> |
| 21 | #include <linux/random.h> |
| 22 | #include <linux/init.h> |
| 23 | #include <linux/irq.h> |
| 24 | #include <linux/proc_fs.h> |
| 25 | #include <linux/seq_file.h> |
| 26 | #include <linux/profile.h> |
| 27 | #include <linux/bitops.h> |
| 28 | |
| 29 | #include <asm/io.h> |
| 30 | #include <asm/uaccess.h> |
| 31 | |
| 32 | volatile unsigned long irq_err_count; |
| 33 | DEFINE_PER_CPU(unsigned long, irq_pmi_count); |
| 34 | |
| 35 | void ack_bad_irq(unsigned int irq) |
| 36 | { |
| 37 | irq_err_count++; |
| 38 | printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq); |
| 39 | } |
| 40 | |
| 41 | #ifdef CONFIG_SMP |
| 42 | static char irq_user_affinity[NR_IRQS]; |
| 43 | |
| 44 | int irq_select_affinity(unsigned int irq) |
| 45 | { |
| 46 | struct irq_data *data = irq_get_irq_data(irq); |
| 47 | struct irq_chip *chip; |
| 48 | static int last_cpu; |
| 49 | int cpu = last_cpu + 1; |
| 50 | |
| 51 | if (!data) |
| 52 | return 1; |
| 53 | chip = irq_data_get_irq_chip(data); |
| 54 | |
| 55 | if (!chip->irq_set_affinity || irq_user_affinity[irq]) |
| 56 | return 1; |
| 57 | |
| 58 | while (!cpu_possible(cpu) || |
| 59 | !cpumask_test_cpu(cpu, irq_default_affinity)) |
| 60 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); |
| 61 | last_cpu = cpu; |
| 62 | |
| 63 | cpumask_copy(data->affinity, cpumask_of(cpu)); |
| 64 | chip->irq_set_affinity(data, cpumask_of(cpu), false); |
| 65 | return 0; |
| 66 | } |
| 67 | #endif /* CONFIG_SMP */ |
| 68 | |
| 69 | int arch_show_interrupts(struct seq_file *p, int prec) |
| 70 | { |
| 71 | int j; |
| 72 | |
| 73 | #ifdef CONFIG_SMP |
| 74 | seq_puts(p, "IPI: "); |
| 75 | for_each_online_cpu(j) |
| 76 | seq_printf(p, "%10lu ", cpu_data[j].ipi_count); |
| 77 | seq_putc(p, '\n'); |
| 78 | #endif |
| 79 | seq_puts(p, "PMI: "); |
| 80 | for_each_online_cpu(j) |
| 81 | seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); |
| 82 | seq_puts(p, " Performance Monitoring\n"); |
| 83 | seq_printf(p, "ERR: %10lu\n", irq_err_count); |
| 84 | return 0; |
| 85 | } |
| 86 | |
| 87 | /* |
| 88 | * handle_irq handles all normal device IRQ's (the special |
| 89 | * SMP cross-CPU interrupts have their own specific |
| 90 | * handlers). |
| 91 | */ |
| 92 | |
| 93 | #define MAX_ILLEGAL_IRQS 16 |
| 94 | |
| 95 | void |
| 96 | handle_irq(int irq) |
| 97 | { |
| 98 | /* |
| 99 | * We ack quickly, we don't want the irq controller |
| 100 | * thinking we're snobs just because some other CPU has |
| 101 | * disabled global interrupts (we have already done the |
| 102 | * INT_ACK cycles, it's too late to try to pretend to the |
| 103 | * controller that we aren't taking the interrupt). |
| 104 | * |
| 105 | * 0 return value means that this irq is already being |
| 106 | * handled by some other CPU. (or is disabled) |
| 107 | */ |
| 108 | static unsigned int illegal_count=0; |
| 109 | struct irq_desc *desc = irq_to_desc(irq); |
| 110 | |
| 111 | if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS && |
| 112 | illegal_count < MAX_ILLEGAL_IRQS)) { |
| 113 | irq_err_count++; |
| 114 | illegal_count++; |
| 115 | printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", |
| 116 | irq); |
| 117 | return; |
| 118 | } |
| 119 | |
| 120 | irq_enter(); |
| 121 | generic_handle_irq_desc(irq, desc); |
| 122 | irq_exit(); |
| 123 | } |