extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
+static inline int nr_legacy_irqs(void)
+{
+ return legacy_pic->nr_legacy_irqs;
+}
+
#endif /* _ASM_X86_I8259_H */
#include <asm/io.h>
#include <asm/mpspec.h>
#include <asm/smp.h>
+#include <asm/i8259.h>
#include "sleep.h" /* To include x86_acpi_suspend_lowlevel */
static int __initdata acpi_force = 0;
static unsigned int gsi_to_irq(unsigned int gsi)
{
- unsigned int irq = gsi + NR_IRQS_LEGACY;
+ unsigned int irq = gsi + nr_legacy_irqs();
unsigned int i;
- for (i = 0; i < NR_IRQS_LEGACY; i++) {
+ for (i = 0; i < nr_legacy_irqs(); i++) {
if (isa_irq_to_gsi[i] == gsi) {
return i;
}
* except on truly weird platforms that have
* non isa irqs in the first 16 gsis.
*/
- if (gsi >= NR_IRQS_LEGACY)
+ if (gsi >= nr_legacy_irqs())
irq = gsi;
else
irq = gsi_top + gsi;
* otherwise there will be more than one entry with the same GSI
* and acpi_isa_irq_to_gsi() may give wrong result.
*/
- if (gsi < NR_IRQS_LEGACY && isa_irq_to_gsi[gsi] == gsi)
+ if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi)
isa_irq_to_gsi[gsi] = ACPI_INVALID_GSI;
isa_irq_to_gsi[bus_irq] = gsi;
}
int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
{
- if (isa_irq < NR_IRQS_LEGACY &&
+ if (isa_irq < nr_legacy_irqs() &&
isa_irq_to_gsi[isa_irq] != ACPI_INVALID_GSI) {
*gsi = isa_irq_to_gsi[isa_irq];
return 0;
* Use the default configuration for the IRQs 0-15. Unless
* overridden by (MADT) interrupt source override entries.
*/
- for (i = 0; i < NR_IRQS_LEGACY; i++) {
+ for (i = 0; i < nr_legacy_irqs(); i++) {
int ioapic, pin;
unsigned int dstapic;
int idx;
return mp_ioapic_gsi_routing(ioapic)->gsi_base + pin;
}
-/* Initialize all legacy IRQs and all pins on the first IOAPIC at boot */
+/*
+ * Initialize all legacy IRQs and all pins on the first IOAPIC
+ * if we have legacy interrupt controller. Kernel boot option "pirq="
+ * may rely on non-legacy pins on the first IOAPIC.
+ */
static inline int mp_init_irq_at_boot(int ioapic, int irq)
{
- return ioapic == 0 || (irq >= 0 && irq < NR_IRQS_LEGACY);
+ if (!nr_legacy_irqs())
+ return 0;
+
+ return ioapic == 0 || (irq >= 0 && irq < nr_legacy_irqs());
}
int nr_ioapics;
struct irq_cfg *cfg;
int count, node, i;
- if (!legacy_pic->nr_legacy_irqs)
+ if (!nr_legacy_irqs())
io_apic_irqs = ~0UL;
for_each_ioapic(i) {
* For legacy IRQ's, start with assigning irq0 to irq15 to
* IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
*/
- if (i < legacy_pic->nr_legacy_irqs) {
+ if (i < nr_legacy_irqs()) {
cfg[i].vector = IRQ0_VECTOR + i;
cpumask_setall(cfg[i].domain);
}
*/
static int EISA_ELCR(unsigned int irq)
{
- if (irq < legacy_pic->nr_legacy_irqs) {
+ if (irq < nr_legacy_irqs()) {
unsigned int port = 0x4d0 + (irq >> 3);
return (inb(port) >> (irq & 7)) & 1;
}
} else {
u32 gsi = gsi_cfg->gsi_base + pin;
- if (gsi >= NR_IRQS_LEGACY)
+ if (gsi >= nr_legacy_irqs())
irq = gsi;
else
irq = gsi_top + gsi;
}
ioapic_register_intr(irq, cfg, attr->trigger);
- if (irq < legacy_pic->nr_legacy_irqs)
+ if (irq < nr_legacy_irqs())
legacy_pic->mask(irq);
ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry);
unsigned int v;
unsigned long flags;
- if (!legacy_pic->nr_legacy_irqs)
+ if (!nr_legacy_irqs())
return;
printk(KERN_DEBUG "\nprinting PIC contents\n");
int i8259_apic, i8259_pin;
int apic, pin;
- if (!legacy_pic->nr_legacy_irqs)
+ if (!nr_legacy_irqs())
return;
for_each_ioapic_pin(apic, pin) {
*/
clear_IO_APIC();
- if (!legacy_pic->nr_legacy_irqs)
+ if (!nr_legacy_irqs())
return;
x86_io_apic_ops.disable();
unsigned long flags;
raw_spin_lock_irqsave(&ioapic_lock, flags);
- if (irq < legacy_pic->nr_legacy_irqs) {
+ if (irq < nr_legacy_irqs()) {
legacy_pic->mask(irq);
if (legacy_pic->irq_pending(irq))
was_pending = 1;
* so default to an old-fashioned 8259
* interrupt if we can..
*/
- if (irq < legacy_pic->nr_legacy_irqs)
+ if (irq < nr_legacy_irqs())
legacy_pic->make_irq(irq);
else
/* Strange. Oh, well.. */
/*
* calling enable_IO_APIC() is moved to setup_local_APIC for BP
*/
- io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
+ io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL;
apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
/*
sync_Arb_IDs();
setup_IO_APIC_irqs();
init_IO_APIC_traps();
- if (legacy_pic->nr_legacy_irqs)
+ if (nr_legacy_irqs())
check_timer();
}
unsigned int arch_dynirq_lower_bound(unsigned int from)
{
- unsigned int min = gsi_top + NR_IRQS_LEGACY;
+ unsigned int min = gsi_top + nr_legacy_irqs();
return from < min ? min : from;
}
if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
nr_irqs = NR_VECTORS * nr_cpu_ids;
- nr = (gsi_top + NR_IRQS_LEGACY) + 8 * nr_cpu_ids;
+ nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
/*
* for MSI and HT dyn irq
*/
- nr += (gsi_top + NR_IRQS_LEGACY) * 16;
+ nr += gsi_top * 16;
#endif
if (nr < nr_irqs)
nr_irqs = nr;
- return NR_IRQS_LEGACY;
+ return nr_legacy_irqs();
}
int io_apic_set_pci_routing(struct device *dev, int irq,
#include <asm/apic.h>
#include <asm/pci_x86.h>
#include <asm/setup.h>
+#include <asm/i8259.h>
__initdata u64 initial_dtb;
char __initdata cmd_line[COMMAND_LINE_SIZE];
struct irq_domain *id;
struct mp_ioapic_gsi *gsi_cfg;
int ret;
- int num;
+ int num, legacy_irqs = nr_legacy_irqs();
gsi_cfg = mp_ioapic_gsi_routing(ioapic_num);
num = gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1;
BUG_ON(!id);
if (gsi_cfg->gsi_base == 0) {
/*
- * The first NR_IRQS_LEGACY irq descs are allocated in
+ * The first nr_legacy_irqs() irq descs are allocated in
* early_irq_init() and need just a mapping. The
* remaining irqs need both. All of them are preallocated
* and assigned so we can keep the 1:1 mapping which the ioapic
* is having.
*/
- irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY);
+ irq_domain_associate_many(id, 0, 0, legacy_irqs);
- if (num > NR_IRQS_LEGACY) {
- ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY,
- NR_IRQS_LEGACY, num - NR_IRQS_LEGACY);
+ if (num > legacy_irqs) {
+ ret = irq_create_strict_mappings(id, legacy_irqs,
+ legacy_irqs, num - legacy_irqs);
if (ret)
pr_err("Error creating mapping for the "
"remaining IRQs: %d\n", ret);
#endif
legacy_pic->init(0);
- for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
+ for (i = 0; i < nr_legacy_irqs(); i++)
irq_set_chip_and_handler_name(i, chip, handle_level_irq, name);
}
* then this vector space can be freed and re-used dynamically as the
* irq's migrate etc.
*/
- for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
+ for (i = 0; i < nr_legacy_irqs(); i++)
per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
x86_init.irqs.intr_init();
* legacy PIC, for the new cpu that is coming online, setup the static
* legacy vector to irq mapping:
*/
- for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++)
+ for (irq = 0; irq < nr_legacy_irqs(); irq++)
per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
#endif
#include <xen/features.h>
#include <xen/events.h>
#include <asm/xen/pci.h>
+#include <asm/i8259.h>
static int xen_pcifront_enable_irq(struct pci_dev *dev)
{
/* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
pirq = gsi;
- if (gsi < NR_IRQS_LEGACY)
+ if (gsi < nr_legacy_irqs())
share = 0;
rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
xen_setup_acpi_sci();
__acpi_register_gsi = acpi_register_gsi_xen;
/* Pre-allocate legacy irqs */
- for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
+ for (irq = 0; irq < nr_legacy_irqs(); irq++) {
int trigger, polarity;
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
true /* Map GSI to PIRQ */);
}
if (0 == nr_ioapics) {
- for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
+ for (irq = 0; irq < nr_legacy_irqs(); irq++)
xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
}
return 0;