add CONFIG_HAVE_SPARSE_IRQ to for use condensed array.
Get rid of irq_desc[] array assumptions.
Preallocate 32 irq_desc, and irq_desc() will try to get more.
( No change in functionality is expected anywhere, except the odd build
failure where we missed a code site or where a crossing commit itroduces
new irq_desc[] usage. )
v2: according to Eric, change get_irq_desc() to irq_desc()
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
config HAVE_DYN_ARRAY
def_bool n
+
+config HAVE_SPARSE_IRQ
+ def_bool n
+
select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_DYN_ARRAY
+ select HAVE_SPARSE_IRQ if X86_64
config ARCH_DEFCONFIG
string
struct irq_pin_list *entry = irq_2_pin + irq;
unsigned int apicid_value;
cpumask_t tmp;
+ struct irq_desc *desc;
cpus_and(tmp, cpumask, cpu_online_map);
if (cpus_empty(tmp))
break;
entry = irq_2_pin + entry->next;
}
- irq_desc[irq].affinity = cpumask;
+ desc = irq_to_desc(irq);
+ desc->affinity = cpumask;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
{
int i, j;
+ struct irq_desc *desc;
for_each_online_cpu(i) {
for (j = 0; j < nr_irqs; j++) {
- if (!irq_desc[j].action)
+ desc = irq_to_desc(j);
+ if (!desc->action)
continue;
/* Is it a significant load ? */
if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) <
unsigned long tmp_cpu_irq;
unsigned long imbalance = 0;
cpumask_t allowed_mask, target_cpu_mask, tmp;
+ struct irq_desc *desc;
for_each_possible_cpu(i) {
int package_index;
for (j = 0; j < nr_irqs; j++) {
unsigned long value_now, delta;
/* Is this an active IRQ or balancing disabled ? */
- if (!irq_desc[j].action || irq_balancing_disabled(j))
+ desc = irq_to_desc(j);
+ if (!desc->action || irq_balancing_disabled(j))
continue;
if (package_index == i)
IRQ_DELTA(package_index, j) = 0;
selected_irq = -1;
for (j = 0; j < nr_irqs; j++) {
/* Is this an active IRQ? */
- if (!irq_desc[j].action)
+ desc = irq_to_desc(j);
+ if (!desc->action)
continue;
if (imbalance <= IRQ_DELTA(max_loaded, j))
continue;
int i;
unsigned long prev_balance_time = jiffies;
long time_remaining = balanced_irq_interval;
+ struct irq_desc *desc;
/* push everything to CPU 0 to give us a starting point. */
for (i = 0 ; i < nr_irqs ; i++) {
- irq_desc[i].pending_mask = cpumask_of_cpu(0);
+ desc = irq_to_desc(i);
+ desc->pending_mask = cpumask_of_cpu(0);
set_pending_irq(i, cpumask_of_cpu(0));
}
static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL) {
- irq_desc[irq].status |= IRQ_LEVEL;
+ desc->status |= IRQ_LEVEL;
set_irq_chip_and_handler_name(irq, &ioapic_chip,
handle_fasteoi_irq, "fasteoi");
} else {
- irq_desc[irq].status &= ~IRQ_LEVEL;
+ desc->status &= ~IRQ_LEVEL;
set_irq_chip_and_handler_name(irq, &ioapic_chip,
handle_edge_irq, "edge");
}
static inline void init_IO_APIC_traps(void)
{
int irq;
+ struct irq_desc *desc;
/*
* NOTE! The local APIC isn't very good at handling
*/
if (irq < 16)
make_8259A_irq(irq);
- else
+ else {
+ desc = irq_to_desc(irq);
/* Strange. Oh, well.. */
- irq_desc[irq].chip = &no_irq_chip;
+ desc->chip = &no_irq_chip;
+ }
}
}
}
static void lapic_register_intr(int irq, int vector)
{
- irq_desc[irq].status &= ~IRQ_LEVEL;
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ desc->status &= ~IRQ_LEVEL;
set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
"edge");
set_intr_gate(vector, interrupt[irq]);
unsigned int dest;
cpumask_t tmp;
int vector;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
write_msi_msg(irq, &msg);
- irq_desc[irq].affinity = mask;
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
}
#endif /* CONFIG_SMP */
{
unsigned int dest;
cpumask_t tmp;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
dest = cpu_mask_to_apicid(mask);
target_ht_irq(irq, dest);
- irq_desc[irq].affinity = mask;
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
}
#endif
unsigned long flags;
unsigned int dest;
cpumask_t tmp;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
*/
dest = SET_APIC_LOGICAL_ID(dest);
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&ioapic_lock, flags);
__target_IO_APIC_irq(irq, dest, cfg->vector);
- irq_desc[irq].affinity = mask;
+ desc->affinity = mask;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
#endif
static void ioapic_register_intr(int irq, unsigned long trigger)
{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
if (trigger)
- irq_desc[irq].status |= IRQ_LEVEL;
+ desc->status |= IRQ_LEVEL;
else
- irq_desc[irq].status &= ~IRQ_LEVEL;
+ desc->status &= ~IRQ_LEVEL;
#ifdef CONFIG_INTR_REMAP
if (irq_remapped(irq)) {
- irq_desc[irq].status |= IRQ_MOVE_PCNTXT;
+ desc->status |= IRQ_MOVE_PCNTXT;
if (trigger)
set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
handle_fasteoi_irq,
static void migrate_ioapic_irq(int irq, cpumask_t mask)
{
struct irq_cfg *cfg = irq_cfg + irq;
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc;
cpumask_t tmp, cleanup_mask;
struct irte irte;
- int modify_ioapic_rte = desc->status & IRQ_LEVEL;
+ int modify_ioapic_rte;
unsigned int dest;
unsigned long flags;
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
+ desc = irq_to_desc(irq);
+ modify_ioapic_rte = desc->status & IRQ_LEVEL;
if (modify_ioapic_rte) {
spin_lock_irqsave(&ioapic_lock, flags);
__target_IO_APIC_irq(irq, dest, cfg->vector);
cfg->move_in_progress = 0;
}
- irq_desc[irq].affinity = mask;
+ desc->affinity = mask;
}
static int migrate_irq_remapped_level(int irq)
{
int ret = -1;
+ struct irq_desc *desc = irq_to_desc(irq);
mask_IO_APIC_irq(irq);
}
/* everthing is clear. we have right of way */
- migrate_ioapic_irq(irq, irq_desc[irq].pending_mask);
+ migrate_ioapic_irq(irq, desc->pending_mask);
ret = 0;
- irq_desc[irq].status &= ~IRQ_MOVE_PENDING;
- cpus_clear(irq_desc[irq].pending_mask);
+ desc->status &= ~IRQ_MOVE_PENDING;
+ cpus_clear(desc->pending_mask);
unmask:
unmask_IO_APIC_irq(irq);
int irq;
for (irq = 0; irq < nr_irqs; irq++) {
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
if (desc->status & IRQ_MOVE_PENDING) {
unsigned long flags;
continue;
}
- desc->chip->set_affinity(irq,
- irq_desc[irq].pending_mask);
+ desc->chip->set_affinity(irq, desc->pending_mask);
spin_unlock_irqrestore(&desc->lock, flags);
}
}
*/
static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
{
- if (irq_desc[irq].status & IRQ_LEVEL) {
- irq_desc[irq].status |= IRQ_MOVE_PENDING;
- irq_desc[irq].pending_mask = mask;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc->status & IRQ_LEVEL) {
+ desc->status |= IRQ_MOVE_PENDING;
+ desc->pending_mask = mask;
migrate_irq_remapped_level(irq);
return;
}
if (irq >= nr_irqs)
continue;
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
cfg = irq_cfg + irq;
spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
irq_complete_move(irq);
#ifdef CONFIG_GENERIC_PENDING_IRQ
/* If we are moving the irq we need to mask it */
- if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
+ if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
do_unmask_irq = 1;
mask_IO_APIC_irq(irq);
}
static inline void init_IO_APIC_traps(void)
{
int irq;
+ struct irq_desc *desc;
/*
* NOTE! The local APIC isn't very good at handling
*/
if (irq < 16)
make_8259A_irq(irq);
- else
+ else {
+ desc = irq_to_desc(irq);
/* Strange. Oh, well.. */
- irq_desc[irq].chip = &no_irq_chip;
+ desc->chip = &no_irq_chip;
+ }
}
}
}
static void lapic_register_intr(int irq)
{
- irq_desc[irq].status &= ~IRQ_LEVEL;
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ desc->status &= ~IRQ_LEVEL;
set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
"edge");
}
struct msi_msg msg;
unsigned int dest;
cpumask_t tmp;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
write_msi_msg(irq, &msg);
- irq_desc[irq].affinity = mask;
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
}
#ifdef CONFIG_INTR_REMAP
unsigned int dest;
cpumask_t tmp, cleanup_mask;
struct irte irte;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
cfg->move_in_progress = 0;
}
- irq_desc[irq].affinity = mask;
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
}
#endif
#endif /* CONFIG_SMP */
#ifdef CONFIG_INTR_REMAP
if (irq_remapped(irq)) {
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
/*
* irq migration in process context
*/
struct msi_msg msg;
unsigned int dest;
cpumask_t tmp;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
dmar_msi_write(irq, &msg);
- irq_desc[irq].affinity = mask;
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
}
#endif /* CONFIG_SMP */
struct irq_cfg *cfg = irq_cfg + irq;
unsigned int dest;
cpumask_t tmp;
+ struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
dest = cpu_mask_to_apicid(tmp);
target_ht_irq(irq, dest, cfg->vector);
- irq_desc[irq].affinity = mask;
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
}
#endif
struct pt_regs *old_regs;
/* high bit used in ret_from_ code */
int overflow, irq = ~regs->orig_ax;
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
if (unlikely((unsigned)irq >= nr_irqs)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
if (i < nr_irqs) {
unsigned any_count = 0;
+ struct irq_desc *desc = irq_to_desc(i);
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ spin_lock_irqsave(&desc->lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
#else
for_each_online_cpu(j)
any_count |= kstat_cpu(j).irqs[i];
#endif
- action = irq_desc[i].action;
+ action = desc->action;
if (!action && !any_count)
goto skip;
seq_printf(p, "%3d: ",i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %8s", irq_desc[i].chip->name);
- seq_printf(p, "-%-8s", irq_desc[i].name);
+ seq_printf(p, " %8s", desc->chip->name);
+ seq_printf(p, "-%-8s", desc->name);
if (action) {
seq_printf(p, " %s", action->name);
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == nr_irqs) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
for (irq = 0; irq < nr_irqs; irq++) {
cpumask_t mask;
+ struct irq_desc *desc;
+
if (irq == 2)
continue;
- cpus_and(mask, irq_desc[irq].affinity, map);
+ desc = irq_to_desc(irq);
+ cpus_and(mask, desc->affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
}
- if (irq_desc[irq].chip->set_affinity)
- irq_desc[irq].chip->set_affinity(irq, mask);
- else if (irq_desc[irq].action && !(warned++))
+ if (desc->chip->set_affinity)
+ desc->chip->set_affinity(irq, mask);
+ else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
if (i < nr_irqs) {
unsigned any_count = 0;
+ struct irq_desc *desc = irq_to_desc(i);
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ spin_lock_irqsave(&desc->lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
#else
for_each_online_cpu(j)
any_count |= kstat_cpu(j).irqs[i];
#endif
- action = irq_desc[i].action;
+ action = desc->action;
if (!action && !any_count)
goto skip;
seq_printf(p, "%3d: ",i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %8s", irq_desc[i].chip->name);
- seq_printf(p, "-%-8s", irq_desc[i].name);
+ seq_printf(p, " %8s", desc->chip->name);
+ seq_printf(p, "-%-8s", desc->name);
if (action) {
seq_printf(p, " %s", action->name);
}
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == nr_irqs) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
cpumask_t mask;
int break_affinity = 0;
int set_affinity = 1;
+ struct irq_desc *desc;
if (irq == 2)
continue;
+ desc = irq_to_desc(irq);
/* interrupt's are disabled at this point */
- spin_lock(&irq_desc[irq].lock);
+ spin_lock(&desc->lock);
if (!irq_has_action(irq) ||
- cpus_equal(irq_desc[irq].affinity, map)) {
- spin_unlock(&irq_desc[irq].lock);
+ cpus_equal(desc->affinity, map)) {
+ spin_unlock(&desc->lock);
continue;
}
- cpus_and(mask, irq_desc[irq].affinity, map);
+ cpus_and(mask, desc->affinity, map);
if (cpus_empty(mask)) {
break_affinity = 1;
mask = map;
}
- if (irq_desc[irq].chip->mask)
- irq_desc[irq].chip->mask(irq);
+ if (desc->chip->mask)
+ desc->chip->mask(irq);
- if (irq_desc[irq].chip->set_affinity)
- irq_desc[irq].chip->set_affinity(irq, mask);
+ if (desc->chip->set_affinity)
+ desc->chip->set_affinity(irq, mask);
else if (!(warned++))
set_affinity = 0;
- if (irq_desc[irq].chip->unmask)
- irq_desc[irq].chip->unmask(irq);
+ if (desc->chip->unmask)
+ desc->chip->unmask(irq);
- spin_unlock(&irq_desc[irq].lock);
+ spin_unlock(&desc->lock);
if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
init_8259A(0);
for (i = 0; i < nr_irqs; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
+ struct irq_desc *desc = irq_to_desc(i);
+
+ desc->status = IRQ_DISABLED;
+ desc->action = NULL;
+ desc->depth = 1;
if (i < 16) {
/*
/*
* 'high' PCI IRQs filled in on demand
*/
- irq_desc[i].chip = &no_irq_chip;
+ desc->chip = &no_irq_chip;
}
}
}
static unsigned int startup_cobalt_irq(unsigned int irq)
{
unsigned long flags;
+ struct irq_desc *desc = irq_to_desc(irq);
spin_lock_irqsave(&cobalt_lock, flags);
- if ((irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
- irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
+ if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
+ desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
enable_cobalt_irq(irq);
spin_unlock_irqrestore(&cobalt_lock, flags);
return 0;
static void end_cobalt_irq(unsigned int irq)
{
unsigned long flags;
+ struct irq_desc *desc = irq_to_desc(irq);
spin_lock_irqsave(&cobalt_lock, flags);
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
+ if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)))
enable_cobalt_irq(irq);
spin_unlock_irqrestore(&cobalt_lock, flags);
}
spin_unlock_irqrestore(&i8259A_lock, flags);
- desc = irq_desc + realirq;
+ desc = irq_to_desc(realirq);
/*
* handle this 'virtual interrupt' as a Cobalt one now.
int i;
for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
+ struct irq_desc *desc = irq_to_desc(i);
+
+ desc->status = IRQ_DISABLED;
+ desc->action = 0;
+ desc->depth = 1;
if (i == 0) {
- irq_desc[i].chip = &cobalt_irq_type;
+ desc->chip = &cobalt_irq_type;
}
else if (i == CO_IRQ_IDE0) {
- irq_desc[i].chip = &cobalt_irq_type;
+ desc->chip = &cobalt_irq_type;
}
else if (i == CO_IRQ_IDE1) {
- irq_desc[i].chip = &cobalt_irq_type;
+ desc->chip = &cobalt_irq_type;
}
else if (i == CO_IRQ_8259) {
- irq_desc[i].chip = &piix4_master_irq_type;
+ desc->chip = &piix4_master_irq_type;
}
else if (i < CO_IRQ_APIC0) {
- irq_desc[i].chip = &piix4_virtual_irq_type;
+ desc->chip = &piix4_virtual_irq_type;
}
else if (IS_CO_APIC(i)) {
- irq_desc[i].chip = &cobalt_irq_type;
+ desc->chip = &cobalt_irq_type;
}
}
* the interrupt off to another CPU */
static void before_handle_vic_irq(unsigned int irq)
{
- irq_desc_t *desc = irq_desc + irq;
+ irq_desc_t *desc = irq_to_desc(irq);
__u8 cpu = smp_processor_id();
_raw_spin_lock(&vic_irq_lock);
/* Finish the VIC interrupt: basically mask */
static void after_handle_vic_irq(unsigned int irq)
{
- irq_desc_t *desc = irq_desc + irq;
+ irq_desc_t *desc = irq_to_desc(irq);
_raw_spin_lock(&vic_irq_lock);
{
if (!is_out) {
int irq = gpio_to_irq(gpio);
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
/* This races with request_irq(), set_irq_type(),
* and set_irq_wake() ... but those are "rare".
irqnr = asic->irq_base +
(ASIC3_GPIOS_PER_BANK * bank)
+ i;
- desc = irq_desc + irqnr;
+ desc = irq_to_desc(irqnr);
desc->handle_irq(irqnr, desc);
if (asic->irq_bothedge[bank] & bit)
asic3_irq_flip_edge(asic, base,
for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) {
/* They start at bit 4 and go up */
if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) {
- desc = irq_desc + asic->irq_base + i;
+ desc = irq_to_desc(asic->irq_base + i);
desc->handle_irq(asic->irq_base + i,
desc);
}
/* Run irq handler */
pr_debug("got IRQ %d\n", irqpin);
irq = ei->irq_start + irqpin;
- desc = &irq_desc[irq];
+ desc = irq_to_desc(irq);
desc->handle_irq(irq, desc);
}
}
static void dino_disable_irq(unsigned int irq)
{
- struct dino_device *dino_dev = irq_desc[irq].chip_data;
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct dino_device *dino_dev = desc->chip_data;
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
static void dino_enable_irq(unsigned int irq)
{
- struct dino_device *dino_dev = irq_desc[irq].chip_data;
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct dino_device *dino_dev = desc->chip_data;
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
}
/* Reserve IRQ2 */
- irq_desc[2].action = &irq2_action;
+ irq_to_desc(2)->action = &irq2_action;
for (i = 0; i < 16; i++) {
- irq_desc[i].chip = &eisa_interrupt_type;
+ irq_to_desc(i)->chip = &eisa_interrupt_type;
}
EISA_bus = 1;
static void gsc_asic_disable_irq(unsigned int irq)
{
- struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct gsc_asic *irq_dev = desc->chip_data;
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
static void gsc_asic_enable_irq(unsigned int irq)
{
- struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct gsc_asic *irq_dev = desc->chip_data;
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
int gsc_assign_irq(struct hw_interrupt_type *type, void *data)
{
static int irq = GSC_IRQ_BASE;
+ struct irq_desc *desc;
if (irq > GSC_IRQ_MAX)
return NO_IRQ;
- irq_desc[irq].chip = type;
- irq_desc[irq].chip_data = data;
+ desc = irq_to_desc(irq);
+ desc->chip = type;
+ desc->chip_data = data;
return irq++;
}
static struct vector_info *iosapic_get_vector(unsigned int irq)
{
- return irq_desc[irq].chip_data;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ return desc->chip_data;
}
static void iosapic_disable_irq(unsigned int irq)
#endif
for (i = 0; i < 16; i++) {
- irq_desc[i].chip = &superio_interrupt_type;
+ struct irq_desc *desc = irq_to_desc(i);
+
+ desc->chip = &superio_interrupt_type;
}
/*
*/
static void hs_map_irq(hs_socket_t *sp, unsigned int irq)
{
+ struct irq_desc *desc;
+
DPRINTK("hs_map_irq(sock=%d irq=%d)\n", sp->number, irq);
if (irq >= HS_NUM_MAPPED_IRQS)
return;
+ desc = irq_to_desc(irq);
hs_mapped_irq[irq].sock = sp;
/* insert ourselves as the irq controller */
- hs_mapped_irq[irq].old_handler = irq_desc[irq].chip;
- irq_desc[irq].chip = &hd64465_ss_irq_type;
+ hs_mapped_irq[irq].old_handler = desc->chip;
+ desc->chip = &hd64465_ss_irq_type;
}
*/
static void hs_unmap_irq(hs_socket_t *sp, unsigned int irq)
{
+ struct irq_desc *desc;
+
DPRINTK("hs_unmap_irq(sock=%d irq=%d)\n", sp->number, irq);
if (irq >= HS_NUM_MAPPED_IRQS)
return;
+ desc = irq_to_desc(irq);
/* restore the original irq controller */
- irq_desc[irq].chip = hs_mapped_irq[irq].old_handler;
+ desc->chip = hs_mapped_irq[irq].old_handler;
}
/*============================================================*/
BUG_ON(irq == -1);
#ifdef CONFIG_SMP
- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+ irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
#endif
__clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
#ifdef CONFIG_SMP
int i;
/* By default all event channels notify CPU#0. */
- for (i = 0; i < nr_irqs; i++)
- irq_desc[i].affinity = cpumask_of_cpu(0);
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+ desc->affinity = cpumask_of_cpu(0);
+ }
#endif
memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
+ unsigned int irq;
+#ifdef CONFIG_HAVE_SPARSE_IRQ
+ struct irq_desc *next;
+#endif
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
struct msi_desc *msi_desc;
const char *name;
} ____cacheline_internodealigned_in_smp;
-#ifdef CONFIG_HAVE_DYN_ARRAY
-extern struct irq_desc *irq_desc;
-#else
+extern struct irq_desc *irq_to_desc(unsigned int irq);
+#ifndef CONFIG_HAVE_DYN_ARRAY
+/* could be removed if we get rid of all irq_desc reference */
extern struct irq_desc irq_desc[NR_IRQS];
#endif
static inline int irq_balancing_disabled(unsigned int irq)
{
- return irq_desc[irq].status & IRQ_NO_BALANCING_MASK;
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ return desc->status & IRQ_NO_BALANCING_MASK;
}
/* Handle irq action chains: */
*/
static inline void generic_handle_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
desc->handle_irq(irq, desc);
static inline void __set_irq_handler_unlocked(int irq,
irq_flow_handler_t handler)
{
- irq_desc[irq].handle_irq = handler;
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ desc->handle_irq = handler;
}
/*
/* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
return desc->action != NULL;
}
extern int set_irq_type(unsigned int irq, unsigned int type);
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
-#define get_irq_chip(irq) (irq_desc[irq].chip)
-#define get_irq_chip_data(irq) (irq_desc[irq].chip_data)
-#define get_irq_data(irq) (irq_desc[irq].handler_data)
-#define get_irq_msi(irq) (irq_desc[irq].msi_desc)
+#define get_irq_chip(irq) (irq_to_desc(irq)->chip)
+#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data)
+#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
+#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
#endif /* CONFIG_GENERIC_HARDIRQS */
* flush such a longstanding irq before considering it as spurious.
*/
for (i = nr_irqs-1; i > 0; i--) {
- desc = irq_desc + i;
+ desc = irq_to_desc(i);
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
* happened in the previous stage, it may have masked itself)
*/
for (i = nr_irqs-1; i > 0; i--) {
- desc = irq_desc + i;
+ desc = irq_to_desc(i);
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
for (i = 0; i < nr_irqs; i++) {
unsigned int status;
- desc = irq_desc + i;
+ desc = irq_to_desc(i);
spin_lock_irq(&desc->lock);
status = desc->status;
mask = 0;
for (i = 0; i < nr_irqs; i++) {
- struct irq_desc *desc = irq_desc + i;
+ struct irq_desc *desc = irq_to_desc(i);
unsigned int status;
spin_lock_irq(&desc->lock);
int i, irq_found = 0, nr_irqs = 0;
for (i = 0; i < nr_irqs; i++) {
- struct irq_desc *desc = irq_desc + i;
+ struct irq_desc *desc = irq_to_desc(i);
unsigned int status;
spin_lock_irq(&desc->lock);
}
/* Ensure we don't have left over values from a previous use of this irq */
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock, flags);
desc->status = IRQ_DISABLED;
desc->chip = &no_irq_chip;
return;
}
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock, flags);
if (desc->action) {
spin_unlock_irqrestore(&desc->lock, flags);
if (!chip)
chip = &no_irq_chip;
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock, flags);
irq_chip_set_defaults(chip);
desc->chip = chip;
return -ENODEV;
}
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
if (type == IRQ_TYPE_NONE)
return 0;
return -EINVAL;
}
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock, flags);
desc->handler_data = data;
spin_unlock_irqrestore(&desc->lock, flags);
"Trying to install msi data for IRQ%d\n", irq);
return -EINVAL;
}
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock, flags);
desc->msi_desc = entry;
if (entry)
*/
int set_irq_chip_data(unsigned int irq, void *data)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc;
unsigned long flags;
+ desc = irq_to_desc(irq);
if (irq >= nr_irqs || !desc->chip) {
printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
return -EINVAL;
*/
static void default_enable(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc;
+ desc = irq_to_desc(irq);
desc->chip->unmask(irq);
desc->status &= ~IRQ_MASKED;
}
*/
static unsigned int default_startup(unsigned int irq)
{
- irq_desc[irq].chip->enable(irq);
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ desc->chip->enable(irq);
return 0;
}
*/
static void default_shutdown(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc;
+ desc = irq_to_desc(irq);
desc->chip->mask(irq);
desc->status |= IRQ_MASKED;
}
return;
}
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
if (!handle)
handle = handle_bad_irq;
return;
}
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOPROBE;
return;
}
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock, flags);
desc->status &= ~IRQ_NOPROBE;
#include "internals.h"
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+/*
+ * lockdep: we want to handle all irq_desc locks as a single lock-class:
+ */
+static struct lock_class_key irq_desc_lock_class;
+#endif
+
/**
* handle_bad_irq - handle spurious and unhandled irqs
* @irq: the interrupt number
EXPORT_SYMBOL_GPL(nr_irqs);
#ifdef CONFIG_HAVE_DYN_ARRAY
-static struct irq_desc irq_desc_init __initdata = {
+static struct irq_desc irq_desc_init = {
+ .irq = -1U,
.status = IRQ_DISABLED,
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
#endif
};
+
+static void init_one_irq_desc(struct irq_desc *desc)
+{
+ memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
+#ifdef CONFIG_TRACE_IRQFLAGS
+ lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+#endif
+}
+
+#ifdef CONFIG_HAVE_SPARSE_IRQ
+static int nr_irq_desc = 32;
+
+static int __init parse_nr_irq_desc(char *arg)
+{
+ if (arg)
+ nr_irq_desc = simple_strtoul(arg, NULL, 0);
+ return 0;
+}
+
+early_param("nr_irq_desc", parse_nr_irq_desc);
+
static void __init init_work(void *data)
{
struct dyn_array *da = data;
desc = *da->name;
for (i = 0; i < *da->nr; i++)
- memcpy(&desc[i], &irq_desc_init, sizeof(struct irq_desc));
+ init_one_irq_desc(&desc[i]);
+
+ for (i = 1; i < *da->nr; i++)
+ desc[i-1].next = &desc[i];
}
-struct irq_desc *irq_desc;
+static struct irq_desc *sparse_irqs;
+DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work);
+
+extern int after_bootmem;
+extern void *__alloc_bootmem_nopanic(unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ struct irq_desc *desc, *desc_pri;
+ int i;
+ int count = 0;
+
+ BUG_ON(irq == -1U);
+
+ desc_pri = desc = &sparse_irqs[0];
+ while (desc) {
+ if (desc->irq == irq)
+ return desc;
+
+ if (desc->irq == -1U) {
+ desc->irq = irq;
+ return desc;
+ }
+ desc_pri = desc;
+ desc = desc->next;
+ count++;
+ }
+
+ /*
+ * we run out of pre-allocate ones, allocate more
+ */
+ printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc);
+
+ if (after_bootmem)
+ desc = kzalloc(sizeof(struct irq_desc)*nr_irq_desc, GFP_ATOMIC);
+ else
+ desc = __alloc_bootmem_nopanic(sizeof(struct irq_desc)*nr_irq_desc, PAGE_SIZE, 0);
+
+ if (!desc)
+ panic("please boot with nr_irq_desc= %d\n", count * 2);
+
+ for (i = 0; i < nr_irq_desc; i++)
+ init_one_irq_desc(&desc[i]);
+
+ for (i = 1; i < nr_irq_desc; i++)
+ desc[i-1].next = &desc[i];
+
+ desc->irq = irq;
+ desc_pri->next = desc;
+
+ return desc;
+}
+#else
+static void __init init_work(void *data)
+{
+ struct dyn_array *da = data;
+ int i;
+ struct irq_desc *desc;
+
+ desc = *da->name;
+
+ for (i = 0; i < *da->nr; i++)
+ init_one_irq_desc(&desc[i]);
+
+}
+static struct irq_desc *irq_desc;
DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
+#endif
+
#else
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __SPIN_LOCK_UNLOCKED(sparse_irqs->lock),
#ifdef CONFIG_SMP
.affinity = CPU_MASK_ALL
#endif
}
};
+
+#endif
+
+#ifndef CONFIG_HAVE_SPARSE_IRQ
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ if (irq < nr_irqs)
+ return &irq_desc[irq];
+
+ return NULL;
+}
#endif
/*
*/
static void ack_bad(unsigned int irq)
{
- print_irq_desc(irq, irq_desc + irq);
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ print_irq_desc(irq, desc);
ack_bad_irq(irq);
}
*/
unsigned int __do_IRQ(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
unsigned int status;
}
#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
-
-/*
- * lockdep: we want to handle all irq_desc locks as a single lock-class:
- */
-static struct lock_class_key irq_desc_lock_class;
+#ifdef CONFIG_TRACE_IRQFLAGS
void early_init_irq_lock_class(void)
{
+#ifndef CONFIG_HAVE_DYN_ARRAY
int i;
for (i = 0; i < nr_irqs; i++)
lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
+#endif
}
-
#endif
+
*/
void synchronize_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
unsigned int status;
if (irq >= nr_irqs)
*/
int irq_can_set_affinity(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
!desc->chip->set_affinity)
*/
int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
if (!desc->chip->set_affinity)
return -EINVAL;
int irq_select_affinity(unsigned int irq)
{
cpumask_t mask;
+ struct irq_desc *desc;
if (!irq_can_set_affinity(irq))
return 0;
cpus_and(mask, cpu_online_map, irq_default_affinity);
- irq_desc[irq].affinity = mask;
- irq_desc[irq].chip->set_affinity(irq, mask);
+ desc = irq_to_desc(irq);
+ desc->affinity = mask;
+ desc->chip->set_affinity(irq, mask);
set_balance_irq_affinity(irq, mask);
return 0;
*/
void disable_irq_nosync(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
if (irq >= nr_irqs)
*/
void disable_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
if (irq >= nr_irqs)
return;
*/
void enable_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
if (irq >= nr_irqs)
static int set_irq_wake_real(unsigned int irq, unsigned int on)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
int ret = -ENXIO;
if (desc->chip->set_wake)
*/
int set_irq_wake(unsigned int irq, unsigned int on)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
int ret = 0;
*/
int can_request_irq(unsigned int irq, unsigned long irqflags)
{
+ struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
- if (irq >= nr_irqs || irq_desc[irq].status & IRQ_NOREQUEST)
+ if (irq >= nr_irqs || desc->status & IRQ_NOREQUEST)
return 0;
- action = irq_desc[irq].action;
+ action = desc->action;
if (action)
if (irqflags & action->flags & IRQF_SHARED)
action = NULL;
*/
int setup_irq(unsigned int irq, struct irqaction *new)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *old, **p;
const char *old_name = NULL;
unsigned long flags;
if (irq >= nr_irqs)
return;
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
spin_lock_irqsave(&desc->lock, flags);
p = &desc->action;
for (;;) {
{
struct irqaction *action;
int retval;
+ struct irq_desc *desc;
#ifdef CONFIG_LOCKDEP
/*
return -EINVAL;
if (irq >= nr_irqs)
return -EINVAL;
- if (irq_desc[irq].status & IRQ_NOREQUEST)
+ desc = irq_to_desc(irq);
+ if (desc->status & IRQ_NOREQUEST)
return -EINVAL;
if (!handler)
return -EINVAL;
void set_pending_irq(unsigned int irq, cpumask_t mask)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_MOVE_PENDING;
- irq_desc[irq].pending_mask = mask;
+ desc->pending_mask = mask;
spin_unlock_irqrestore(&desc->lock, flags);
}
void move_masked_irq(int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
cpumask_t tmp;
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
desc->status &= ~IRQ_MOVE_PENDING;
- if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
+ if (unlikely(cpus_empty(desc->pending_mask)))
return;
if (!desc->chip->set_affinity)
assert_spin_locked(&desc->lock);
- cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
+ cpus_and(tmp, desc->pending_mask, cpu_online_map);
/*
* If there was a valid mask to work with, please
if (likely(!cpus_empty(tmp))) {
desc->chip->set_affinity(irq,tmp);
}
- cpus_clear(irq_desc[irq].pending_mask);
+ cpus_clear(desc->pending_mask);
}
void move_native_irq(int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
return;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
- struct irq_desc *desc = irq_desc + (long)m->private;
+ struct irq_desc *desc = irq_to_desc((long)m->private);
cpumask_t *mask = &desc->affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_t new_value;
int err;
- if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
+ if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
irq_balancing_disabled(irq))
return -EIO;
static int irq_spurious_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- struct irq_desc *d = &irq_desc[(long) data];
+ struct irq_desc *desc = irq_to_desc((long) data);
return sprintf(page, "count %u\n"
"unhandled %u\n"
"last_unhandled %u ms\n",
- d->irq_count,
- d->irqs_unhandled,
- jiffies_to_msecs(d->last_unhandled));
+ desc->irq_count,
+ desc->irqs_unhandled,
+ jiffies_to_msecs(desc->last_unhandled));
}
#define MAX_NAMELEN 128
static int name_unique(unsigned int irq, struct irqaction *new_action)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
unsigned long flags;
int ret = 1;
void register_handler_proc(unsigned int irq, struct irqaction *action)
{
char name [MAX_NAMELEN];
+ struct irq_desc *desc = irq_to_desc(irq);
- if (!irq_desc[irq].dir || action->dir || !action->name ||
+ if (!desc->dir || action->dir || !action->name ||
!name_unique(irq, action))
return;
snprintf(name, MAX_NAMELEN, "%s", action->name);
/* create /proc/irq/1234/handler/ */
- action->dir = proc_mkdir(name, irq_desc[irq].dir);
+ action->dir = proc_mkdir(name, desc->dir);
}
#undef MAX_NAMELEN
{
char name [MAX_NAMELEN];
struct proc_dir_entry *entry;
+ struct irq_desc *desc = irq_to_desc(irq);
- if (!root_irq_dir ||
- (irq_desc[irq].chip == &no_irq_chip) ||
- irq_desc[irq].dir)
+ if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
return;
memset(name, 0, MAX_NAMELEN);
sprintf(name, "%d", irq);
/* create /proc/irq/1234 */
- irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
+ desc->dir = proc_mkdir(name, root_irq_dir);
#ifdef CONFIG_SMP
/* create /proc/irq/<irq>/smp_affinity */
- proc_create_data("smp_affinity", 0600, irq_desc[irq].dir,
+ proc_create_data("smp_affinity", 0600, desc->dir,
&irq_affinity_proc_fops, (void *)(long)irq);
#endif
- entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir);
+ entry = create_proc_entry("spurious", 0444, desc->dir);
if (entry) {
entry->data = (void *)(long)irq;
entry->read_proc = irq_spurious_read;
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
{
- if (action->dir)
- remove_proc_entry(action->dir->name, irq_desc[irq].dir);
+ if (action->dir) {
+ struct irq_desc *desc = irq_to_desc(irq);
+ remove_proc_entry(action->dir->name, desc->dir);
+ }
}
void register_default_affinity_proc(void)
while (!bitmap_empty(irqs_resend, nr_irqs)) {
irq = find_first_bit(irqs_resend, nr_irqs);
clear_bit(irq, irqs_resend);
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
local_irq_disable();
desc->handle_irq(irq, desc);
local_irq_enable();
int ok = 0;
for (i = 1; i < nr_irqs; i++) {
- struct irq_desc *desc = irq_desc + i;
+ struct irq_desc *desc;
if (i == irq) /* Already tried */
continue;
+ desc = irq_to_desc(i);
if (try_one_irq(i, desc))
ok = 1;
}
{
int i;
for (i = 1; i < nr_irqs; i++) {
- struct irq_desc *desc = irq_desc + i;
+ struct irq_desc *desc = irq_to_desc(i);
unsigned int status;
/* Racy but it doesn't matter */