struct irq_pin_list *entry;
unsigned int apicid_value;
cpumask_t tmp;
- struct irq_desc *desc;
cfg = irq_cfg(irq);
break;
entry = entry->next;
}
- desc = irq_to_desc(irq);
- desc->affinity = cpumask;
+ irq_to_desc(irq)->affinity = cpumask;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
int vector, offset;
struct irq_cfg *cfg;
- BUG_ON((unsigned)irq >= nr_irqs);
-
cfg = irq_cfg(irq);
if (cfg->vector > 0)
return cfg->vector;
{
struct irq_desc *desc;
- desc = irq_to_desc(irq);
+ /* first time to use this irq_desc */
+ if (irq < 16)
+ desc = irq_to_desc(irq);
+ else
+ desc = irq_to_desc_alloc(irq);
+
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL) {
desc->status |= IRQ_LEVEL;
/*
* Dynamic irq allocate and deallocation
*/
-int create_irq(void)
+unsigned int create_irq_nr(unsigned int irq_want)
{
/* Allocate an unused irq */
- int irq, new, vector = 0;
+ unsigned int irq, new, vector = 0;
unsigned long flags;
struct irq_cfg *cfg_new;
- irq = -ENOSPC;
+ /* only can use bus/dev/fn.. when per_cpu vector is used */
+ irq_want = nr_irqs - 1;
+
+ irq = 0;
spin_lock_irqsave(&vector_lock, flags);
- for (new = (nr_irqs - 1); new >= 0; new--) {
+ for (new = (nr_irqs - 1); new > 0; new--) {
if (platform_legacy_irq(new))
continue;
cfg_new = irq_cfg(new);
}
spin_unlock_irqrestore(&vector_lock, flags);
- if (irq >= 0) {
+ if (irq > 0) {
set_intr_gate(vector, interrupt[irq]);
dynamic_irq_init(irq);
}
return irq;
}
+int create_irq(void)
+{
+ return create_irq_nr(nr_irqs - 1);
+}
+
void destroy_irq(unsigned int irq)
{
unsigned long flags;
unsigned int dest;
cpumask_t tmp;
int vector;
- struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
write_msi_msg(irq, &msg);
- desc = irq_to_desc(irq);
- desc->affinity = mask;
+ irq_to_desc(irq)->affinity = mask;
}
#endif /* CONFIG_SMP */
.retrigger = ioapic_retrigger_irq,
};
+static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
+{
+ unsigned int irq;
+
+ irq = dev->bus->number;
+ irq <<= 8;
+ irq |= dev->devfn;
+ irq <<= 12;
+
+ return irq;
+}
+
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
struct msi_msg msg;
int irq, ret;
- irq = create_irq();
- if (irq < 0)
- return irq;
+
+ unsigned int irq_want;
+
+ irq_want = build_irq_for_pci_dev(dev) + 0x100;
+
+ irq = create_irq_nr(irq_want);
+
+ if (irq == 0)
+ return -1;
ret = msi_compose_msg(dev, irq, &msg);
if (ret < 0) {
{
unsigned int dest;
cpumask_t tmp;
- struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
dest = cpu_mask_to_apicid(mask);
target_ht_irq(irq, dest);
- desc = irq_to_desc(irq);
- desc->affinity = mask;
+ irq_to_desc(irq)->affinity = mask;
}
#endif
struct pt_regs *old_regs;
/* high bit used in ret_from_ code */
int overflow, irq = ~regs->orig_ax;
- struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_desc *desc;
- if (unlikely((unsigned)irq >= nr_irqs)) {
+ desc = irq_to_desc(irq);
+ if (unlikely(!desc)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq);
BUG();
int i = *(loff_t *) v, j;
struct irqaction * action;
unsigned long flags;
+ unsigned int entries;
+ struct irq_desc *desc;
+ int tail = 0;
+
+#ifdef CONFIG_HAVE_SPARSE_IRQ
+ desc = (struct irq_desc *)v;
+ entries = -1U;
+ i = desc->irq;
+ if (!desc->next)
+ tail = 1;
+#else
+ entries = nr_irqs - 1;
+ i = *(loff_t *) v;
+ if (i == nr_irqs)
+ tail = 1;
+ else
+ desc = irq_to_desc(i);
+#endif
if (i == 0) {
seq_printf(p, " ");
seq_putc(p, '\n');
}
- if (i < nr_irqs) {
+ if (i <= entries) {
unsigned any_count = 0;
- struct irq_desc *desc = irq_to_desc(i);
spin_lock_irqsave(&desc->lock, flags);
#ifndef CONFIG_SMP
action = desc->action;
if (!action && !any_count)
goto skip;
- seq_printf(p, "%3d: ",i);
+ seq_printf(p, "%#x: ",i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&desc->lock, flags);
- } else if (i == nr_irqs) {
+ }
+
+ if (tail) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", nmi_count(j));
{
unsigned int irq;
static int warned;
+ struct irq_desc *desc;
- for (irq = 0; irq < nr_irqs; irq++) {
+ for_each_irq_desc(irq, desc) {
cpumask_t mask;
- struct irq_desc *desc;
if (irq == 2)
continue;
- desc = irq_to_desc(irq);
cpus_and(mask, desc->affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);