1a5060b434b8a8858d2be94d3aaa2eb05c4e32fd
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86_64 / kernel / io_apic.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/config.h>
29 #include <linux/smp_lock.h>
30 #include <linux/mc146818rtc.h>
31 #include <linux/acpi.h>
32 #include <linux/sysdev.h>
33
34 #include <asm/io.h>
35 #include <asm/smp.h>
36 #include <asm/desc.h>
37 #include <asm/proto.h>
38 #include <asm/mach_apic.h>
39 #include <asm/acpi.h>
40 #include <asm/dma.h>
41
42 #define __apicdebuginit __init
43
44 int sis_apic_bug; /* not actually supported, dummy for compile */
45
46 static int no_timer_check;
47
48 int disable_timer_pin_1 __initdata;
49
50 /* Where if anywhere is the i8259 connect in external int mode */
51 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
52
53 static DEFINE_SPINLOCK(ioapic_lock);
54
55 /*
56 * # of IRQ routing registers
57 */
58 int nr_ioapic_registers[MAX_IO_APICS];
59
60 /*
61 * Rough estimation of how many shared IRQs there are, can
62 * be changed anytime.
63 */
64 #define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
65 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
66
67 /*
68 * This is performance-critical, we want to do it O(1)
69 *
70 * the indexing order of this array favors 1:1 mappings
71 * between pins and IRQs.
72 */
73
74 static struct irq_pin_list {
75 short apic, pin, next;
76 } irq_2_pin[PIN_MAP_SIZE];
77
78 int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
79 #ifdef CONFIG_PCI_MSI
80 #define vector_to_irq(vector) \
81 (platform_legacy_irq(vector) ? vector : vector_irq[vector])
82 #else
83 #define vector_to_irq(vector) (vector)
84 #endif
85
86 #define __DO_ACTION(R, ACTION, FINAL) \
87 \
88 { \
89 int pin; \
90 struct irq_pin_list *entry = irq_2_pin + irq; \
91 \
92 BUG_ON(irq >= NR_IRQS); \
93 for (;;) { \
94 unsigned int reg; \
95 pin = entry->pin; \
96 if (pin == -1) \
97 break; \
98 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
99 reg ACTION; \
100 io_apic_modify(entry->apic, reg); \
101 if (!entry->next) \
102 break; \
103 entry = irq_2_pin + entry->next; \
104 } \
105 FINAL; \
106 }
107
108 #ifdef CONFIG_SMP
109 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
110 {
111 unsigned long flags;
112 unsigned int dest;
113 cpumask_t tmp;
114
115 cpus_and(tmp, mask, cpu_online_map);
116 if (cpus_empty(tmp))
117 tmp = TARGET_CPUS;
118
119 cpus_and(mask, tmp, CPU_MASK_ALL);
120
121 dest = cpu_mask_to_apicid(mask);
122
123 /*
124 * Only the high 8 bits are valid.
125 */
126 dest = SET_APIC_LOGICAL_ID(dest);
127
128 spin_lock_irqsave(&ioapic_lock, flags);
129 __DO_ACTION(1, = dest, )
130 set_irq_info(irq, mask);
131 spin_unlock_irqrestore(&ioapic_lock, flags);
132 }
133 #endif
134
135 static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
136
137 /*
138 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
139 * shared ISA-space IRQs, so we have to support them. We are super
140 * fast in the common case, and fast for shared ISA-space IRQs.
141 */
142 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
143 {
144 static int first_free_entry = NR_IRQS;
145 struct irq_pin_list *entry = irq_2_pin + irq;
146
147 BUG_ON(irq >= NR_IRQS);
148 while (entry->next)
149 entry = irq_2_pin + entry->next;
150
151 if (entry->pin != -1) {
152 entry->next = first_free_entry;
153 entry = irq_2_pin + entry->next;
154 if (++first_free_entry >= PIN_MAP_SIZE)
155 panic("io_apic.c: ran out of irq_2_pin entries!");
156 }
157 entry->apic = apic;
158 entry->pin = pin;
159 }
160
161
162 #define DO_ACTION(name,R,ACTION, FINAL) \
163 \
164 static void name##_IO_APIC_irq (unsigned int irq) \
165 __DO_ACTION(R, ACTION, FINAL)
166
167 DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
168 /* mask = 1 */
169 DO_ACTION( __unmask, 0, &= 0xfffeffff, )
170 /* mask = 0 */
171
172 static void mask_IO_APIC_irq (unsigned int irq)
173 {
174 unsigned long flags;
175
176 spin_lock_irqsave(&ioapic_lock, flags);
177 __mask_IO_APIC_irq(irq);
178 spin_unlock_irqrestore(&ioapic_lock, flags);
179 }
180
181 static void unmask_IO_APIC_irq (unsigned int irq)
182 {
183 unsigned long flags;
184
185 spin_lock_irqsave(&ioapic_lock, flags);
186 __unmask_IO_APIC_irq(irq);
187 spin_unlock_irqrestore(&ioapic_lock, flags);
188 }
189
190 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
191 {
192 struct IO_APIC_route_entry entry;
193 unsigned long flags;
194
195 /* Check delivery_mode to be sure we're not clearing an SMI pin */
196 spin_lock_irqsave(&ioapic_lock, flags);
197 *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
198 *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
199 spin_unlock_irqrestore(&ioapic_lock, flags);
200 if (entry.delivery_mode == dest_SMI)
201 return;
202 /*
203 * Disable it in the IO-APIC irq-routing table:
204 */
205 memset(&entry, 0, sizeof(entry));
206 entry.mask = 1;
207 spin_lock_irqsave(&ioapic_lock, flags);
208 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
209 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
210 spin_unlock_irqrestore(&ioapic_lock, flags);
211 }
212
213 static void clear_IO_APIC (void)
214 {
215 int apic, pin;
216
217 for (apic = 0; apic < nr_ioapics; apic++)
218 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
219 clear_IO_APIC_pin(apic, pin);
220 }
221
222 /*
223 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
224 * specific CPU-side IRQs.
225 */
226
227 #define MAX_PIRQS 8
228 static int pirq_entries [MAX_PIRQS];
229 static int pirqs_enabled;
230 int skip_ioapic_setup;
231 int ioapic_force;
232
233 /* dummy parsing: see setup.c */
234
235 static int __init disable_ioapic_setup(char *str)
236 {
237 skip_ioapic_setup = 1;
238 return 1;
239 }
240
241 static int __init enable_ioapic_setup(char *str)
242 {
243 ioapic_force = 1;
244 skip_ioapic_setup = 0;
245 return 1;
246 }
247
248 __setup("noapic", disable_ioapic_setup);
249 __setup("apic", enable_ioapic_setup);
250
251 #include <asm/pci-direct.h>
252 #include <linux/pci_ids.h>
253 #include <linux/pci.h>
254
255 /* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
256 off. Check for an Nvidia or VIA PCI bridge and turn it off.
257 Use pci direct infrastructure because this runs before the PCI subsystem.
258
259 Can be overwritten with "apic"
260
261 And another hack to disable the IOMMU on VIA chipsets.
262
263 Kludge-O-Rama. */
264 void __init check_ioapic(void)
265 {
266 int num,slot,func;
267 /* Poor man's PCI discovery */
268 for (num = 0; num < 32; num++) {
269 for (slot = 0; slot < 32; slot++) {
270 for (func = 0; func < 8; func++) {
271 u32 class;
272 u32 vendor;
273 u8 type;
274 class = read_pci_config(num,slot,func,
275 PCI_CLASS_REVISION);
276 if (class == 0xffffffff)
277 break;
278
279 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
280 continue;
281
282 vendor = read_pci_config(num, slot, func,
283 PCI_VENDOR_ID);
284 vendor &= 0xffff;
285 switch (vendor) {
286 case PCI_VENDOR_ID_VIA:
287 #ifdef CONFIG_GART_IOMMU
288 if ((end_pfn > MAX_DMA32_PFN ||
289 force_iommu) &&
290 !iommu_aperture_allowed) {
291 printk(KERN_INFO
292 "Looks like a VIA chipset. Disabling IOMMU. Overwrite with \"iommu=allowed\"\n");
293 iommu_aperture_disabled = 1;
294 }
295 #endif
296 return;
297 case PCI_VENDOR_ID_NVIDIA:
298 #ifdef CONFIG_ACPI
299 /* All timer overrides on Nvidia
300 seem to be wrong. Skip them. */
301 acpi_skip_timer_override = 1;
302 printk(KERN_INFO
303 "Nvidia board detected. Ignoring ACPI timer override.\n");
304 #endif
305 /* RED-PEN skip them on mptables too? */
306 return;
307 }
308
309 /* No multi-function device? */
310 type = read_pci_config_byte(num,slot,func,
311 PCI_HEADER_TYPE);
312 if (!(type & 0x80))
313 break;
314 }
315 }
316 }
317 }
318
319 static int __init ioapic_pirq_setup(char *str)
320 {
321 int i, max;
322 int ints[MAX_PIRQS+1];
323
324 get_options(str, ARRAY_SIZE(ints), ints);
325
326 for (i = 0; i < MAX_PIRQS; i++)
327 pirq_entries[i] = -1;
328
329 pirqs_enabled = 1;
330 apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
331 max = MAX_PIRQS;
332 if (ints[0] < MAX_PIRQS)
333 max = ints[0];
334
335 for (i = 0; i < max; i++) {
336 apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
337 /*
338 * PIRQs are mapped upside down, usually.
339 */
340 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
341 }
342 return 1;
343 }
344
345 __setup("pirq=", ioapic_pirq_setup);
346
347 /*
348 * Find the IRQ entry number of a certain pin.
349 */
350 static int find_irq_entry(int apic, int pin, int type)
351 {
352 int i;
353
354 for (i = 0; i < mp_irq_entries; i++)
355 if (mp_irqs[i].mpc_irqtype == type &&
356 (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
357 mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
358 mp_irqs[i].mpc_dstirq == pin)
359 return i;
360
361 return -1;
362 }
363
364 /*
365 * Find the pin to which IRQ[irq] (ISA) is connected
366 */
367 static int __init find_isa_irq_pin(int irq, int type)
368 {
369 int i;
370
371 for (i = 0; i < mp_irq_entries; i++) {
372 int lbus = mp_irqs[i].mpc_srcbus;
373
374 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
375 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
376 mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
377 (mp_irqs[i].mpc_irqtype == type) &&
378 (mp_irqs[i].mpc_srcbusirq == irq))
379
380 return mp_irqs[i].mpc_dstirq;
381 }
382 return -1;
383 }
384
385 static int __init find_isa_irq_apic(int irq, int type)
386 {
387 int i;
388
389 for (i = 0; i < mp_irq_entries; i++) {
390 int lbus = mp_irqs[i].mpc_srcbus;
391
392 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
393 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
394 mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
395 (mp_irqs[i].mpc_irqtype == type) &&
396 (mp_irqs[i].mpc_srcbusirq == irq))
397 break;
398 }
399 if (i < mp_irq_entries) {
400 int apic;
401 for(apic = 0; apic < nr_ioapics; apic++) {
402 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
403 return apic;
404 }
405 }
406
407 return -1;
408 }
409
410 /*
411 * Find a specific PCI IRQ entry.
412 * Not an __init, possibly needed by modules
413 */
414 static int pin_2_irq(int idx, int apic, int pin);
415
416 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
417 {
418 int apic, i, best_guess = -1;
419
420 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
421 bus, slot, pin);
422 if (mp_bus_id_to_pci_bus[bus] == -1) {
423 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
424 return -1;
425 }
426 for (i = 0; i < mp_irq_entries; i++) {
427 int lbus = mp_irqs[i].mpc_srcbus;
428
429 for (apic = 0; apic < nr_ioapics; apic++)
430 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
431 mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
432 break;
433
434 if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
435 !mp_irqs[i].mpc_irqtype &&
436 (bus == lbus) &&
437 (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
438 int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
439
440 if (!(apic || IO_APIC_IRQ(irq)))
441 continue;
442
443 if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
444 return irq;
445 /*
446 * Use the first all-but-pin matching entry as a
447 * best-guess fuzzy result for broken mptables.
448 */
449 if (best_guess < 0)
450 best_guess = irq;
451 }
452 }
453 BUG_ON(best_guess >= NR_IRQS);
454 return best_guess;
455 }
456
457 /*
458 * EISA Edge/Level control register, ELCR
459 */
460 static int EISA_ELCR(unsigned int irq)
461 {
462 if (irq < 16) {
463 unsigned int port = 0x4d0 + (irq >> 3);
464 return (inb(port) >> (irq & 7)) & 1;
465 }
466 apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
467 return 0;
468 }
469
470 /* EISA interrupts are always polarity zero and can be edge or level
471 * trigger depending on the ELCR value. If an interrupt is listed as
472 * EISA conforming in the MP table, that means its trigger type must
473 * be read in from the ELCR */
474
475 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
476 #define default_EISA_polarity(idx) (0)
477
478 /* ISA interrupts are always polarity zero edge triggered,
479 * when listed as conforming in the MP table. */
480
481 #define default_ISA_trigger(idx) (0)
482 #define default_ISA_polarity(idx) (0)
483
484 /* PCI interrupts are always polarity one level triggered,
485 * when listed as conforming in the MP table. */
486
487 #define default_PCI_trigger(idx) (1)
488 #define default_PCI_polarity(idx) (1)
489
490 /* MCA interrupts are always polarity zero level triggered,
491 * when listed as conforming in the MP table. */
492
493 #define default_MCA_trigger(idx) (1)
494 #define default_MCA_polarity(idx) (0)
495
496 static int __init MPBIOS_polarity(int idx)
497 {
498 int bus = mp_irqs[idx].mpc_srcbus;
499 int polarity;
500
501 /*
502 * Determine IRQ line polarity (high active or low active):
503 */
504 switch (mp_irqs[idx].mpc_irqflag & 3)
505 {
506 case 0: /* conforms, ie. bus-type dependent polarity */
507 {
508 switch (mp_bus_id_to_type[bus])
509 {
510 case MP_BUS_ISA: /* ISA pin */
511 {
512 polarity = default_ISA_polarity(idx);
513 break;
514 }
515 case MP_BUS_EISA: /* EISA pin */
516 {
517 polarity = default_EISA_polarity(idx);
518 break;
519 }
520 case MP_BUS_PCI: /* PCI pin */
521 {
522 polarity = default_PCI_polarity(idx);
523 break;
524 }
525 case MP_BUS_MCA: /* MCA pin */
526 {
527 polarity = default_MCA_polarity(idx);
528 break;
529 }
530 default:
531 {
532 printk(KERN_WARNING "broken BIOS!!\n");
533 polarity = 1;
534 break;
535 }
536 }
537 break;
538 }
539 case 1: /* high active */
540 {
541 polarity = 0;
542 break;
543 }
544 case 2: /* reserved */
545 {
546 printk(KERN_WARNING "broken BIOS!!\n");
547 polarity = 1;
548 break;
549 }
550 case 3: /* low active */
551 {
552 polarity = 1;
553 break;
554 }
555 default: /* invalid */
556 {
557 printk(KERN_WARNING "broken BIOS!!\n");
558 polarity = 1;
559 break;
560 }
561 }
562 return polarity;
563 }
564
565 static int MPBIOS_trigger(int idx)
566 {
567 int bus = mp_irqs[idx].mpc_srcbus;
568 int trigger;
569
570 /*
571 * Determine IRQ trigger mode (edge or level sensitive):
572 */
573 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
574 {
575 case 0: /* conforms, ie. bus-type dependent */
576 {
577 switch (mp_bus_id_to_type[bus])
578 {
579 case MP_BUS_ISA: /* ISA pin */
580 {
581 trigger = default_ISA_trigger(idx);
582 break;
583 }
584 case MP_BUS_EISA: /* EISA pin */
585 {
586 trigger = default_EISA_trigger(idx);
587 break;
588 }
589 case MP_BUS_PCI: /* PCI pin */
590 {
591 trigger = default_PCI_trigger(idx);
592 break;
593 }
594 case MP_BUS_MCA: /* MCA pin */
595 {
596 trigger = default_MCA_trigger(idx);
597 break;
598 }
599 default:
600 {
601 printk(KERN_WARNING "broken BIOS!!\n");
602 trigger = 1;
603 break;
604 }
605 }
606 break;
607 }
608 case 1: /* edge */
609 {
610 trigger = 0;
611 break;
612 }
613 case 2: /* reserved */
614 {
615 printk(KERN_WARNING "broken BIOS!!\n");
616 trigger = 1;
617 break;
618 }
619 case 3: /* level */
620 {
621 trigger = 1;
622 break;
623 }
624 default: /* invalid */
625 {
626 printk(KERN_WARNING "broken BIOS!!\n");
627 trigger = 0;
628 break;
629 }
630 }
631 return trigger;
632 }
633
634 static inline int irq_polarity(int idx)
635 {
636 return MPBIOS_polarity(idx);
637 }
638
639 static inline int irq_trigger(int idx)
640 {
641 return MPBIOS_trigger(idx);
642 }
643
644 static int next_irq = 16;
645
646 /*
647 * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
648 * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
649 * from ACPI, which can reach 800 in large boxen.
650 *
651 * Compact the sparse GSI space into a sequential IRQ series and reuse
652 * vectors if possible.
653 */
654 int gsi_irq_sharing(int gsi)
655 {
656 int i, tries, vector;
657
658 BUG_ON(gsi >= NR_IRQ_VECTORS);
659
660 if (platform_legacy_irq(gsi))
661 return gsi;
662
663 if (gsi_2_irq[gsi] != 0xFF)
664 return (int)gsi_2_irq[gsi];
665
666 tries = NR_IRQS;
667 try_again:
668 vector = assign_irq_vector(gsi);
669
670 /*
671 * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
672 * use of vector and if found, return that IRQ. However, we never want
673 * to share legacy IRQs, which usually have a different trigger mode
674 * than PCI.
675 */
676 for (i = 0; i < NR_IRQS; i++)
677 if (IO_APIC_VECTOR(i) == vector)
678 break;
679 if (platform_legacy_irq(i)) {
680 if (--tries >= 0) {
681 IO_APIC_VECTOR(i) = 0;
682 goto try_again;
683 }
684 panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
685 }
686 if (i < NR_IRQS) {
687 gsi_2_irq[gsi] = i;
688 printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
689 gsi, vector, i);
690 return i;
691 }
692
693 i = next_irq++;
694 BUG_ON(i >= NR_IRQS);
695 gsi_2_irq[gsi] = i;
696 IO_APIC_VECTOR(i) = vector;
697 printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
698 gsi, vector, i);
699 return i;
700 }
701
702 static int pin_2_irq(int idx, int apic, int pin)
703 {
704 int irq, i;
705 int bus = mp_irqs[idx].mpc_srcbus;
706
707 /*
708 * Debugging check, we are in big trouble if this message pops up!
709 */
710 if (mp_irqs[idx].mpc_dstirq != pin)
711 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
712
713 switch (mp_bus_id_to_type[bus])
714 {
715 case MP_BUS_ISA: /* ISA pin */
716 case MP_BUS_EISA:
717 case MP_BUS_MCA:
718 {
719 irq = mp_irqs[idx].mpc_srcbusirq;
720 break;
721 }
722 case MP_BUS_PCI: /* PCI pin */
723 {
724 /*
725 * PCI IRQs are mapped in order
726 */
727 i = irq = 0;
728 while (i < apic)
729 irq += nr_ioapic_registers[i++];
730 irq += pin;
731 irq = gsi_irq_sharing(irq);
732 break;
733 }
734 default:
735 {
736 printk(KERN_ERR "unknown bus type %d.\n",bus);
737 irq = 0;
738 break;
739 }
740 }
741 BUG_ON(irq >= NR_IRQS);
742
743 /*
744 * PCI IRQ command line redirection. Yes, limits are hardcoded.
745 */
746 if ((pin >= 16) && (pin <= 23)) {
747 if (pirq_entries[pin-16] != -1) {
748 if (!pirq_entries[pin-16]) {
749 apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
750 } else {
751 irq = pirq_entries[pin-16];
752 apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
753 pin-16, irq);
754 }
755 }
756 }
757 BUG_ON(irq >= NR_IRQS);
758 return irq;
759 }
760
761 static inline int IO_APIC_irq_trigger(int irq)
762 {
763 int apic, idx, pin;
764
765 for (apic = 0; apic < nr_ioapics; apic++) {
766 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
767 idx = find_irq_entry(apic,pin,mp_INT);
768 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
769 return irq_trigger(idx);
770 }
771 }
772 /*
773 * nonexistent IRQs are edge default
774 */
775 return 0;
776 }
777
778 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
779 u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
780
781 int assign_irq_vector(int irq)
782 {
783 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
784
785 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
786 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
787 return IO_APIC_VECTOR(irq);
788 next:
789 current_vector += 8;
790 if (current_vector == IA32_SYSCALL_VECTOR)
791 goto next;
792
793 if (current_vector >= FIRST_SYSTEM_VECTOR) {
794 /* If we run out of vectors on large boxen, must share them. */
795 offset = (offset + 1) % 8;
796 current_vector = FIRST_DEVICE_VECTOR + offset;
797 }
798
799 vector_irq[current_vector] = irq;
800 if (irq != AUTO_ASSIGN)
801 IO_APIC_VECTOR(irq) = current_vector;
802
803 return current_vector;
804 }
805
806 extern void (*interrupt[NR_IRQS])(void);
807 static struct hw_interrupt_type ioapic_level_type;
808 static struct hw_interrupt_type ioapic_edge_type;
809
810 #define IOAPIC_AUTO -1
811 #define IOAPIC_EDGE 0
812 #define IOAPIC_LEVEL 1
813
814 static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
815 {
816 if (use_pci_vector() && !platform_legacy_irq(irq)) {
817 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
818 trigger == IOAPIC_LEVEL)
819 irq_desc[vector].handler = &ioapic_level_type;
820 else
821 irq_desc[vector].handler = &ioapic_edge_type;
822 set_intr_gate(vector, interrupt[vector]);
823 } else {
824 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
825 trigger == IOAPIC_LEVEL)
826 irq_desc[irq].handler = &ioapic_level_type;
827 else
828 irq_desc[irq].handler = &ioapic_edge_type;
829 set_intr_gate(vector, interrupt[irq]);
830 }
831 }
832
833 static void __init setup_IO_APIC_irqs(void)
834 {
835 struct IO_APIC_route_entry entry;
836 int apic, pin, idx, irq, first_notcon = 1, vector;
837 unsigned long flags;
838
839 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
840
841 for (apic = 0; apic < nr_ioapics; apic++) {
842 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
843
844 /*
845 * add it to the IO-APIC irq-routing table:
846 */
847 memset(&entry,0,sizeof(entry));
848
849 entry.delivery_mode = INT_DELIVERY_MODE;
850 entry.dest_mode = INT_DEST_MODE;
851 entry.mask = 0; /* enable IRQ */
852 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
853
854 idx = find_irq_entry(apic,pin,mp_INT);
855 if (idx == -1) {
856 if (first_notcon) {
857 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
858 first_notcon = 0;
859 } else
860 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
861 continue;
862 }
863
864 entry.trigger = irq_trigger(idx);
865 entry.polarity = irq_polarity(idx);
866
867 if (irq_trigger(idx)) {
868 entry.trigger = 1;
869 entry.mask = 1;
870 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
871 }
872
873 irq = pin_2_irq(idx, apic, pin);
874 add_pin_to_irq(irq, apic, pin);
875
876 if (!apic && !IO_APIC_IRQ(irq))
877 continue;
878
879 if (IO_APIC_IRQ(irq)) {
880 vector = assign_irq_vector(irq);
881 entry.vector = vector;
882
883 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
884 if (!apic && (irq < 16))
885 disable_8259A_irq(irq);
886 }
887 spin_lock_irqsave(&ioapic_lock, flags);
888 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
889 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
890 set_native_irq_info(irq, TARGET_CPUS);
891 spin_unlock_irqrestore(&ioapic_lock, flags);
892 }
893 }
894
895 if (!first_notcon)
896 apic_printk(APIC_VERBOSE," not connected.\n");
897 }
898
899 /*
900 * Set up the 8259A-master output pin as broadcast to all
901 * CPUs.
902 */
903 static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
904 {
905 struct IO_APIC_route_entry entry;
906 unsigned long flags;
907
908 memset(&entry,0,sizeof(entry));
909
910 disable_8259A_irq(0);
911
912 /* mask LVT0 */
913 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
914
915 /*
916 * We use logical delivery to get the timer IRQ
917 * to the first CPU.
918 */
919 entry.dest_mode = INT_DEST_MODE;
920 entry.mask = 0; /* unmask IRQ now */
921 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
922 entry.delivery_mode = INT_DELIVERY_MODE;
923 entry.polarity = 0;
924 entry.trigger = 0;
925 entry.vector = vector;
926
927 /*
928 * The timer IRQ doesn't have to know that behind the
929 * scene we have a 8259A-master in AEOI mode ...
930 */
931 irq_desc[0].handler = &ioapic_edge_type;
932
933 /*
934 * Add it to the IO-APIC irq-routing table:
935 */
936 spin_lock_irqsave(&ioapic_lock, flags);
937 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
938 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
939 spin_unlock_irqrestore(&ioapic_lock, flags);
940
941 enable_8259A_irq(0);
942 }
943
944 void __init UNEXPECTED_IO_APIC(void)
945 {
946 }
947
948 void __apicdebuginit print_IO_APIC(void)
949 {
950 int apic, i;
951 union IO_APIC_reg_00 reg_00;
952 union IO_APIC_reg_01 reg_01;
953 union IO_APIC_reg_02 reg_02;
954 unsigned long flags;
955
956 if (apic_verbosity == APIC_QUIET)
957 return;
958
959 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
960 for (i = 0; i < nr_ioapics; i++)
961 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
962 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
963
964 /*
965 * We are a bit conservative about what we expect. We have to
966 * know about every hardware change ASAP.
967 */
968 printk(KERN_INFO "testing the IO APIC.......................\n");
969
970 for (apic = 0; apic < nr_ioapics; apic++) {
971
972 spin_lock_irqsave(&ioapic_lock, flags);
973 reg_00.raw = io_apic_read(apic, 0);
974 reg_01.raw = io_apic_read(apic, 1);
975 if (reg_01.bits.version >= 0x10)
976 reg_02.raw = io_apic_read(apic, 2);
977 spin_unlock_irqrestore(&ioapic_lock, flags);
978
979 printk("\n");
980 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
981 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
982 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
983 if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
984 UNEXPECTED_IO_APIC();
985
986 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
987 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
988 if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
989 (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
990 (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
991 (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
992 (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
993 (reg_01.bits.entries != 0x2E) &&
994 (reg_01.bits.entries != 0x3F) &&
995 (reg_01.bits.entries != 0x03)
996 )
997 UNEXPECTED_IO_APIC();
998
999 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1000 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1001 if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
1002 (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
1003 (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
1004 (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
1005 (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
1006 (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
1007 )
1008 UNEXPECTED_IO_APIC();
1009 if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
1010 UNEXPECTED_IO_APIC();
1011
1012 if (reg_01.bits.version >= 0x10) {
1013 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1014 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1015 if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
1016 UNEXPECTED_IO_APIC();
1017 }
1018
1019 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1020
1021 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
1022 " Stat Dest Deli Vect: \n");
1023
1024 for (i = 0; i <= reg_01.bits.entries; i++) {
1025 struct IO_APIC_route_entry entry;
1026
1027 spin_lock_irqsave(&ioapic_lock, flags);
1028 *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
1029 *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
1030 spin_unlock_irqrestore(&ioapic_lock, flags);
1031
1032 printk(KERN_DEBUG " %02x %03X %02X ",
1033 i,
1034 entry.dest.logical.logical_dest,
1035 entry.dest.physical.physical_dest
1036 );
1037
1038 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1039 entry.mask,
1040 entry.trigger,
1041 entry.irr,
1042 entry.polarity,
1043 entry.delivery_status,
1044 entry.dest_mode,
1045 entry.delivery_mode,
1046 entry.vector
1047 );
1048 }
1049 }
1050 if (use_pci_vector())
1051 printk(KERN_INFO "Using vector-based indexing\n");
1052 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1053 for (i = 0; i < NR_IRQS; i++) {
1054 struct irq_pin_list *entry = irq_2_pin + i;
1055 if (entry->pin < 0)
1056 continue;
1057 if (use_pci_vector() && !platform_legacy_irq(i))
1058 printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
1059 else
1060 printk(KERN_DEBUG "IRQ%d ", i);
1061 for (;;) {
1062 printk("-> %d:%d", entry->apic, entry->pin);
1063 if (!entry->next)
1064 break;
1065 entry = irq_2_pin + entry->next;
1066 }
1067 printk("\n");
1068 }
1069
1070 printk(KERN_INFO ".................................... done.\n");
1071
1072 return;
1073 }
1074
1075 #if 0
1076
1077 static __apicdebuginit void print_APIC_bitfield (int base)
1078 {
1079 unsigned int v;
1080 int i, j;
1081
1082 if (apic_verbosity == APIC_QUIET)
1083 return;
1084
1085 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1086 for (i = 0; i < 8; i++) {
1087 v = apic_read(base + i*0x10);
1088 for (j = 0; j < 32; j++) {
1089 if (v & (1<<j))
1090 printk("1");
1091 else
1092 printk("0");
1093 }
1094 printk("\n");
1095 }
1096 }
1097
1098 void __apicdebuginit print_local_APIC(void * dummy)
1099 {
1100 unsigned int v, ver, maxlvt;
1101
1102 if (apic_verbosity == APIC_QUIET)
1103 return;
1104
1105 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1106 smp_processor_id(), hard_smp_processor_id());
1107 v = apic_read(APIC_ID);
1108 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
1109 v = apic_read(APIC_LVR);
1110 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1111 ver = GET_APIC_VERSION(v);
1112 maxlvt = get_maxlvt();
1113
1114 v = apic_read(APIC_TASKPRI);
1115 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1116
1117 v = apic_read(APIC_ARBPRI);
1118 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1119 v & APIC_ARBPRI_MASK);
1120 v = apic_read(APIC_PROCPRI);
1121 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1122
1123 v = apic_read(APIC_EOI);
1124 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1125 v = apic_read(APIC_RRR);
1126 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1127 v = apic_read(APIC_LDR);
1128 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1129 v = apic_read(APIC_DFR);
1130 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1131 v = apic_read(APIC_SPIV);
1132 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1133
1134 printk(KERN_DEBUG "... APIC ISR field:\n");
1135 print_APIC_bitfield(APIC_ISR);
1136 printk(KERN_DEBUG "... APIC TMR field:\n");
1137 print_APIC_bitfield(APIC_TMR);
1138 printk(KERN_DEBUG "... APIC IRR field:\n");
1139 print_APIC_bitfield(APIC_IRR);
1140
1141 v = apic_read(APIC_ESR);
1142 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1143
1144 v = apic_read(APIC_ICR);
1145 printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
1146 v = apic_read(APIC_ICR2);
1147 printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
1148
1149 v = apic_read(APIC_LVTT);
1150 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1151
1152 if (maxlvt > 3) { /* PC is LVT#4. */
1153 v = apic_read(APIC_LVTPC);
1154 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1155 }
1156 v = apic_read(APIC_LVT0);
1157 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1158 v = apic_read(APIC_LVT1);
1159 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1160
1161 if (maxlvt > 2) { /* ERR is LVT#3. */
1162 v = apic_read(APIC_LVTERR);
1163 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1164 }
1165
1166 v = apic_read(APIC_TMICT);
1167 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1168 v = apic_read(APIC_TMCCT);
1169 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1170 v = apic_read(APIC_TDCR);
1171 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1172 printk("\n");
1173 }
1174
1175 void print_all_local_APICs (void)
1176 {
1177 on_each_cpu(print_local_APIC, NULL, 1, 1);
1178 }
1179
1180 void __apicdebuginit print_PIC(void)
1181 {
1182 unsigned int v;
1183 unsigned long flags;
1184
1185 if (apic_verbosity == APIC_QUIET)
1186 return;
1187
1188 printk(KERN_DEBUG "\nprinting PIC contents\n");
1189
1190 spin_lock_irqsave(&i8259A_lock, flags);
1191
1192 v = inb(0xa1) << 8 | inb(0x21);
1193 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1194
1195 v = inb(0xa0) << 8 | inb(0x20);
1196 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1197
1198 outb(0x0b,0xa0);
1199 outb(0x0b,0x20);
1200 v = inb(0xa0) << 8 | inb(0x20);
1201 outb(0x0a,0xa0);
1202 outb(0x0a,0x20);
1203
1204 spin_unlock_irqrestore(&i8259A_lock, flags);
1205
1206 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1207
1208 v = inb(0x4d1) << 8 | inb(0x4d0);
1209 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1210 }
1211
1212 #endif /* 0 */
1213
1214 static void __init enable_IO_APIC(void)
1215 {
1216 union IO_APIC_reg_01 reg_01;
1217 int i8259_apic, i8259_pin;
1218 int i, apic;
1219 unsigned long flags;
1220
1221 for (i = 0; i < PIN_MAP_SIZE; i++) {
1222 irq_2_pin[i].pin = -1;
1223 irq_2_pin[i].next = 0;
1224 }
1225 if (!pirqs_enabled)
1226 for (i = 0; i < MAX_PIRQS; i++)
1227 pirq_entries[i] = -1;
1228
1229 /*
1230 * The number of IO-APIC IRQ registers (== #pins):
1231 */
1232 for (apic = 0; apic < nr_ioapics; apic++) {
1233 spin_lock_irqsave(&ioapic_lock, flags);
1234 reg_01.raw = io_apic_read(apic, 1);
1235 spin_unlock_irqrestore(&ioapic_lock, flags);
1236 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1237 }
1238 for(apic = 0; apic < nr_ioapics; apic++) {
1239 int pin;
1240 /* See if any of the pins is in ExtINT mode */
1241 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1242 struct IO_APIC_route_entry entry;
1243 spin_lock_irqsave(&ioapic_lock, flags);
1244 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1245 *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1246 spin_unlock_irqrestore(&ioapic_lock, flags);
1247
1248
1249 /* If the interrupt line is enabled and in ExtInt mode
1250 * I have found the pin where the i8259 is connected.
1251 */
1252 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1253 ioapic_i8259.apic = apic;
1254 ioapic_i8259.pin = pin;
1255 goto found_i8259;
1256 }
1257 }
1258 }
1259 found_i8259:
1260 /* Look to see what if the MP table has reported the ExtINT */
1261 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1262 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1263 /* Trust the MP table if nothing is setup in the hardware */
1264 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1265 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1266 ioapic_i8259.pin = i8259_pin;
1267 ioapic_i8259.apic = i8259_apic;
1268 }
1269 /* Complain if the MP table and the hardware disagree */
1270 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1271 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1272 {
1273 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1274 }
1275
1276 /*
1277 * Do not trust the IO-APIC being empty at bootup
1278 */
1279 clear_IO_APIC();
1280 }
1281
1282 /*
1283 * Not an __init, needed by the reboot code
1284 */
1285 void disable_IO_APIC(void)
1286 {
1287 /*
1288 * Clear the IO-APIC before rebooting:
1289 */
1290 clear_IO_APIC();
1291
1292 /*
1293 * If the i8259 is routed through an IOAPIC
1294 * Put that IOAPIC in virtual wire mode
1295 * so legacy interrupts can be delivered.
1296 */
1297 if (ioapic_i8259.pin != -1) {
1298 struct IO_APIC_route_entry entry;
1299 unsigned long flags;
1300
1301 memset(&entry, 0, sizeof(entry));
1302 entry.mask = 0; /* Enabled */
1303 entry.trigger = 0; /* Edge */
1304 entry.irr = 0;
1305 entry.polarity = 0; /* High */
1306 entry.delivery_status = 0;
1307 entry.dest_mode = 0; /* Physical */
1308 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1309 entry.vector = 0;
1310 entry.dest.physical.physical_dest =
1311 GET_APIC_ID(apic_read(APIC_ID));
1312
1313 /*
1314 * Add it to the IO-APIC irq-routing table:
1315 */
1316 spin_lock_irqsave(&ioapic_lock, flags);
1317 io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
1318 *(((int *)&entry)+1));
1319 io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
1320 *(((int *)&entry)+0));
1321 spin_unlock_irqrestore(&ioapic_lock, flags);
1322 }
1323
1324 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1325 }
1326
1327 /*
1328 * function to set the IO-APIC physical IDs based on the
1329 * values stored in the MPC table.
1330 *
1331 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1332 */
1333
1334 static void __init setup_ioapic_ids_from_mpc (void)
1335 {
1336 union IO_APIC_reg_00 reg_00;
1337 int apic;
1338 int i;
1339 unsigned char old_id;
1340 unsigned long flags;
1341
1342 /*
1343 * Set the IOAPIC ID to the value stored in the MPC table.
1344 */
1345 for (apic = 0; apic < nr_ioapics; apic++) {
1346
1347 /* Read the register 0 value */
1348 spin_lock_irqsave(&ioapic_lock, flags);
1349 reg_00.raw = io_apic_read(apic, 0);
1350 spin_unlock_irqrestore(&ioapic_lock, flags);
1351
1352 old_id = mp_ioapics[apic].mpc_apicid;
1353
1354
1355 printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
1356
1357
1358 /*
1359 * We need to adjust the IRQ routing table
1360 * if the ID changed.
1361 */
1362 if (old_id != mp_ioapics[apic].mpc_apicid)
1363 for (i = 0; i < mp_irq_entries; i++)
1364 if (mp_irqs[i].mpc_dstapic == old_id)
1365 mp_irqs[i].mpc_dstapic
1366 = mp_ioapics[apic].mpc_apicid;
1367
1368 /*
1369 * Read the right value from the MPC table and
1370 * write it into the ID register.
1371 */
1372 apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
1373 mp_ioapics[apic].mpc_apicid);
1374
1375 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
1376 spin_lock_irqsave(&ioapic_lock, flags);
1377 io_apic_write(apic, 0, reg_00.raw);
1378 spin_unlock_irqrestore(&ioapic_lock, flags);
1379
1380 /*
1381 * Sanity check
1382 */
1383 spin_lock_irqsave(&ioapic_lock, flags);
1384 reg_00.raw = io_apic_read(apic, 0);
1385 spin_unlock_irqrestore(&ioapic_lock, flags);
1386 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
1387 printk("could not set ID!\n");
1388 else
1389 apic_printk(APIC_VERBOSE," ok.\n");
1390 }
1391 }
1392
1393 /*
1394 * There is a nasty bug in some older SMP boards, their mptable lies
1395 * about the timer IRQ. We do the following to work around the situation:
1396 *
1397 * - timer IRQ defaults to IO-APIC IRQ
1398 * - if this function detects that timer IRQs are defunct, then we fall
1399 * back to ISA timer IRQs
1400 */
1401 static int __init timer_irq_works(void)
1402 {
1403 unsigned long t1 = jiffies;
1404
1405 local_irq_enable();
1406 /* Let ten ticks pass... */
1407 mdelay((10 * 1000) / HZ);
1408
1409 /*
1410 * Expect a few ticks at least, to be sure some possible
1411 * glue logic does not lock up after one or two first
1412 * ticks in a non-ExtINT mode. Also the local APIC
1413 * might have cached one ExtINT interrupt. Finally, at
1414 * least one tick may be lost due to delays.
1415 */
1416
1417 /* jiffies wrap? */
1418 if (jiffies - t1 > 4)
1419 return 1;
1420 return 0;
1421 }
1422
1423 /*
1424 * In the SMP+IOAPIC case it might happen that there are an unspecified
1425 * number of pending IRQ events unhandled. These cases are very rare,
1426 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1427 * better to do it this way as thus we do not have to be aware of
1428 * 'pending' interrupts in the IRQ path, except at this point.
1429 */
1430 /*
1431 * Edge triggered needs to resend any interrupt
1432 * that was delayed but this is now handled in the device
1433 * independent code.
1434 */
1435
1436 /*
1437 * Starting up a edge-triggered IO-APIC interrupt is
1438 * nasty - we need to make sure that we get the edge.
1439 * If it is already asserted for some reason, we need
1440 * return 1 to indicate that is was pending.
1441 *
1442 * This is not complete - we should be able to fake
1443 * an edge even if it isn't on the 8259A...
1444 */
1445
1446 static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1447 {
1448 int was_pending = 0;
1449 unsigned long flags;
1450
1451 spin_lock_irqsave(&ioapic_lock, flags);
1452 if (irq < 16) {
1453 disable_8259A_irq(irq);
1454 if (i8259A_irq_pending(irq))
1455 was_pending = 1;
1456 }
1457 __unmask_IO_APIC_irq(irq);
1458 spin_unlock_irqrestore(&ioapic_lock, flags);
1459
1460 return was_pending;
1461 }
1462
1463 /*
1464 * Once we have recorded IRQ_PENDING already, we can mask the
1465 * interrupt for real. This prevents IRQ storms from unhandled
1466 * devices.
1467 */
1468 static void ack_edge_ioapic_irq(unsigned int irq)
1469 {
1470 move_irq(irq);
1471 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
1472 == (IRQ_PENDING | IRQ_DISABLED))
1473 mask_IO_APIC_irq(irq);
1474 ack_APIC_irq();
1475 }
1476
1477 /*
1478 * Level triggered interrupts can just be masked,
1479 * and shutting down and starting up the interrupt
1480 * is the same as enabling and disabling them -- except
1481 * with a startup need to return a "was pending" value.
1482 *
1483 * Level triggered interrupts are special because we
1484 * do not touch any IO-APIC register while handling
1485 * them. We ack the APIC in the end-IRQ handler, not
1486 * in the start-IRQ-handler. Protection against reentrance
1487 * from the same interrupt is still provided, both by the
1488 * generic IRQ layer and by the fact that an unacked local
1489 * APIC does not accept IRQs.
1490 */
1491 static unsigned int startup_level_ioapic_irq (unsigned int irq)
1492 {
1493 unmask_IO_APIC_irq(irq);
1494
1495 return 0; /* don't check for pending */
1496 }
1497
1498 static void end_level_ioapic_irq (unsigned int irq)
1499 {
1500 move_irq(irq);
1501 ack_APIC_irq();
1502 }
1503
1504 #ifdef CONFIG_PCI_MSI
1505 static unsigned int startup_edge_ioapic_vector(unsigned int vector)
1506 {
1507 int irq = vector_to_irq(vector);
1508
1509 return startup_edge_ioapic_irq(irq);
1510 }
1511
1512 static void ack_edge_ioapic_vector(unsigned int vector)
1513 {
1514 int irq = vector_to_irq(vector);
1515
1516 move_native_irq(vector);
1517 ack_edge_ioapic_irq(irq);
1518 }
1519
1520 static unsigned int startup_level_ioapic_vector (unsigned int vector)
1521 {
1522 int irq = vector_to_irq(vector);
1523
1524 return startup_level_ioapic_irq (irq);
1525 }
1526
1527 static void end_level_ioapic_vector (unsigned int vector)
1528 {
1529 int irq = vector_to_irq(vector);
1530
1531 move_native_irq(vector);
1532 end_level_ioapic_irq(irq);
1533 }
1534
1535 static void mask_IO_APIC_vector (unsigned int vector)
1536 {
1537 int irq = vector_to_irq(vector);
1538
1539 mask_IO_APIC_irq(irq);
1540 }
1541
1542 static void unmask_IO_APIC_vector (unsigned int vector)
1543 {
1544 int irq = vector_to_irq(vector);
1545
1546 unmask_IO_APIC_irq(irq);
1547 }
1548
1549 #ifdef CONFIG_SMP
1550 static void set_ioapic_affinity_vector (unsigned int vector,
1551 cpumask_t cpu_mask)
1552 {
1553 int irq = vector_to_irq(vector);
1554
1555 set_native_irq_info(vector, cpu_mask);
1556 set_ioapic_affinity_irq(irq, cpu_mask);
1557 }
1558 #endif // CONFIG_SMP
1559 #endif // CONFIG_PCI_MSI
1560
1561 /*
1562 * Level and edge triggered IO-APIC interrupts need different handling,
1563 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1564 * handled with the level-triggered descriptor, but that one has slightly
1565 * more overhead. Level-triggered interrupts cannot be handled with the
1566 * edge-triggered handler, without risking IRQ storms and other ugly
1567 * races.
1568 */
1569
1570 static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
1571 .typename = "IO-APIC-edge",
1572 .startup = startup_edge_ioapic,
1573 .shutdown = shutdown_edge_ioapic,
1574 .enable = enable_edge_ioapic,
1575 .disable = disable_edge_ioapic,
1576 .ack = ack_edge_ioapic,
1577 .end = end_edge_ioapic,
1578 #ifdef CONFIG_SMP
1579 .set_affinity = set_ioapic_affinity,
1580 #endif
1581 };
1582
1583 static struct hw_interrupt_type ioapic_level_type __read_mostly = {
1584 .typename = "IO-APIC-level",
1585 .startup = startup_level_ioapic,
1586 .shutdown = shutdown_level_ioapic,
1587 .enable = enable_level_ioapic,
1588 .disable = disable_level_ioapic,
1589 .ack = mask_and_ack_level_ioapic,
1590 .end = end_level_ioapic,
1591 #ifdef CONFIG_SMP
1592 .set_affinity = set_ioapic_affinity,
1593 #endif
1594 };
1595
1596 static inline void init_IO_APIC_traps(void)
1597 {
1598 int irq;
1599
1600 /*
1601 * NOTE! The local APIC isn't very good at handling
1602 * multiple interrupts at the same interrupt level.
1603 * As the interrupt level is determined by taking the
1604 * vector number and shifting that right by 4, we
1605 * want to spread these out a bit so that they don't
1606 * all fall in the same interrupt level.
1607 *
1608 * Also, we've got to be careful not to trash gate
1609 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1610 */
1611 for (irq = 0; irq < NR_IRQS ; irq++) {
1612 int tmp = irq;
1613 if (use_pci_vector()) {
1614 if (!platform_legacy_irq(tmp))
1615 if ((tmp = vector_to_irq(tmp)) == -1)
1616 continue;
1617 }
1618 if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
1619 /*
1620 * Hmm.. We don't have an entry for this,
1621 * so default to an old-fashioned 8259
1622 * interrupt if we can..
1623 */
1624 if (irq < 16)
1625 make_8259A_irq(irq);
1626 else
1627 /* Strange. Oh, well.. */
1628 irq_desc[irq].handler = &no_irq_type;
1629 }
1630 }
1631 }
1632
1633 static void enable_lapic_irq (unsigned int irq)
1634 {
1635 unsigned long v;
1636
1637 v = apic_read(APIC_LVT0);
1638 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1639 }
1640
1641 static void disable_lapic_irq (unsigned int irq)
1642 {
1643 unsigned long v;
1644
1645 v = apic_read(APIC_LVT0);
1646 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1647 }
1648
1649 static void ack_lapic_irq (unsigned int irq)
1650 {
1651 ack_APIC_irq();
1652 }
1653
1654 static void end_lapic_irq (unsigned int i) { /* nothing */ }
1655
1656 static struct hw_interrupt_type lapic_irq_type __read_mostly = {
1657 .typename = "local-APIC-edge",
1658 .startup = NULL, /* startup_irq() not used for IRQ0 */
1659 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
1660 .enable = enable_lapic_irq,
1661 .disable = disable_lapic_irq,
1662 .ack = ack_lapic_irq,
1663 .end = end_lapic_irq,
1664 };
1665
1666 static void setup_nmi (void)
1667 {
1668 /*
1669 * Dirty trick to enable the NMI watchdog ...
1670 * We put the 8259A master into AEOI mode and
1671 * unmask on all local APICs LVT0 as NMI.
1672 *
1673 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1674 * is from Maciej W. Rozycki - so we do not have to EOI from
1675 * the NMI handler or the timer interrupt.
1676 */
1677 printk(KERN_INFO "activating NMI Watchdog ...");
1678
1679 enable_NMI_through_LVT0(NULL);
1680
1681 printk(" done.\n");
1682 }
1683
1684 /*
1685 * This looks a bit hackish but it's about the only one way of sending
1686 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1687 * not support the ExtINT mode, unfortunately. We need to send these
1688 * cycles as some i82489DX-based boards have glue logic that keeps the
1689 * 8259A interrupt line asserted until INTA. --macro
1690 */
1691 static inline void unlock_ExtINT_logic(void)
1692 {
1693 int apic, pin, i;
1694 struct IO_APIC_route_entry entry0, entry1;
1695 unsigned char save_control, save_freq_select;
1696 unsigned long flags;
1697
1698 pin = find_isa_irq_pin(8, mp_INT);
1699 apic = find_isa_irq_apic(8, mp_INT);
1700 if (pin == -1)
1701 return;
1702
1703 spin_lock_irqsave(&ioapic_lock, flags);
1704 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1705 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1706 spin_unlock_irqrestore(&ioapic_lock, flags);
1707 clear_IO_APIC_pin(apic, pin);
1708
1709 memset(&entry1, 0, sizeof(entry1));
1710
1711 entry1.dest_mode = 0; /* physical delivery */
1712 entry1.mask = 0; /* unmask IRQ now */
1713 entry1.dest.physical.physical_dest = hard_smp_processor_id();
1714 entry1.delivery_mode = dest_ExtINT;
1715 entry1.polarity = entry0.polarity;
1716 entry1.trigger = 0;
1717 entry1.vector = 0;
1718
1719 spin_lock_irqsave(&ioapic_lock, flags);
1720 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
1721 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
1722 spin_unlock_irqrestore(&ioapic_lock, flags);
1723
1724 save_control = CMOS_READ(RTC_CONTROL);
1725 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1726 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1727 RTC_FREQ_SELECT);
1728 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1729
1730 i = 100;
1731 while (i-- > 0) {
1732 mdelay(10);
1733 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1734 i -= 10;
1735 }
1736
1737 CMOS_WRITE(save_control, RTC_CONTROL);
1738 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1739 clear_IO_APIC_pin(apic, pin);
1740
1741 spin_lock_irqsave(&ioapic_lock, flags);
1742 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
1743 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
1744 spin_unlock_irqrestore(&ioapic_lock, flags);
1745 }
1746
1747 /*
1748 * This code may look a bit paranoid, but it's supposed to cooperate with
1749 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1750 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1751 * fanatically on his truly buggy board.
1752 */
1753 static inline void check_timer(void)
1754 {
1755 int apic1, pin1, apic2, pin2;
1756 int vector;
1757
1758 /*
1759 * get/set the timer IRQ vector:
1760 */
1761 disable_8259A_irq(0);
1762 vector = assign_irq_vector(0);
1763 set_intr_gate(vector, interrupt[0]);
1764
1765 /*
1766 * Subtle, code in do_timer_interrupt() expects an AEOI
1767 * mode for the 8259A whenever interrupts are routed
1768 * through I/O APICs. Also IRQ0 has to be enabled in
1769 * the 8259A which implies the virtual wire has to be
1770 * disabled in the local APIC.
1771 */
1772 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1773 init_8259A(1);
1774 enable_8259A_irq(0);
1775
1776 pin1 = find_isa_irq_pin(0, mp_INT);
1777 apic1 = find_isa_irq_apic(0, mp_INT);
1778 pin2 = ioapic_i8259.pin;
1779 apic2 = ioapic_i8259.apic;
1780
1781 apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
1782 vector, apic1, pin1, apic2, pin2);
1783
1784 if (pin1 != -1) {
1785 /*
1786 * Ok, does IRQ0 through the IOAPIC work?
1787 */
1788 unmask_IO_APIC_irq(0);
1789 if (!no_timer_check && timer_irq_works()) {
1790 nmi_watchdog_default();
1791 if (nmi_watchdog == NMI_IO_APIC) {
1792 disable_8259A_irq(0);
1793 setup_nmi();
1794 enable_8259A_irq(0);
1795 }
1796 if (disable_timer_pin_1 > 0)
1797 clear_IO_APIC_pin(0, pin1);
1798 return;
1799 }
1800 clear_IO_APIC_pin(apic1, pin1);
1801 apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
1802 "connected to IO-APIC\n");
1803 }
1804
1805 apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
1806 "through the 8259A ... ");
1807 if (pin2 != -1) {
1808 apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
1809 apic2, pin2);
1810 /*
1811 * legacy devices should be connected to IO APIC #0
1812 */
1813 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
1814 if (timer_irq_works()) {
1815 printk("works.\n");
1816 nmi_watchdog_default();
1817 if (nmi_watchdog == NMI_IO_APIC) {
1818 setup_nmi();
1819 }
1820 return;
1821 }
1822 /*
1823 * Cleanup, just in case ...
1824 */
1825 clear_IO_APIC_pin(apic2, pin2);
1826 }
1827 printk(" failed.\n");
1828
1829 if (nmi_watchdog) {
1830 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
1831 nmi_watchdog = 0;
1832 }
1833
1834 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
1835
1836 disable_8259A_irq(0);
1837 irq_desc[0].handler = &lapic_irq_type;
1838 apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
1839 enable_8259A_irq(0);
1840
1841 if (timer_irq_works()) {
1842 apic_printk(APIC_QUIET, " works.\n");
1843 return;
1844 }
1845 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
1846 apic_printk(APIC_VERBOSE," failed.\n");
1847
1848 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
1849
1850 init_8259A(0);
1851 make_8259A_irq(0);
1852 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1853
1854 unlock_ExtINT_logic();
1855
1856 if (timer_irq_works()) {
1857 apic_printk(APIC_VERBOSE," works.\n");
1858 return;
1859 }
1860 apic_printk(APIC_VERBOSE," failed :(.\n");
1861 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
1862 }
1863
1864 static int __init notimercheck(char *s)
1865 {
1866 no_timer_check = 1;
1867 return 1;
1868 }
1869 __setup("no_timer_check", notimercheck);
1870
1871 /*
1872 *
1873 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
1874 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1875 * Linux doesn't really care, as it's not actually used
1876 * for any interrupt handling anyway.
1877 */
1878 #define PIC_IRQS (1<<2)
1879
1880 void __init setup_IO_APIC(void)
1881 {
1882 enable_IO_APIC();
1883
1884 if (acpi_ioapic)
1885 io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
1886 else
1887 io_apic_irqs = ~PIC_IRQS;
1888
1889 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
1890
1891 /*
1892 * Set up the IO-APIC IRQ routing table.
1893 */
1894 if (!acpi_ioapic)
1895 setup_ioapic_ids_from_mpc();
1896 sync_Arb_IDs();
1897 setup_IO_APIC_irqs();
1898 init_IO_APIC_traps();
1899 check_timer();
1900 if (!acpi_ioapic)
1901 print_IO_APIC();
1902 }
1903
1904 struct sysfs_ioapic_data {
1905 struct sys_device dev;
1906 struct IO_APIC_route_entry entry[0];
1907 };
1908 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
1909
1910 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1911 {
1912 struct IO_APIC_route_entry *entry;
1913 struct sysfs_ioapic_data *data;
1914 unsigned long flags;
1915 int i;
1916
1917 data = container_of(dev, struct sysfs_ioapic_data, dev);
1918 entry = data->entry;
1919 spin_lock_irqsave(&ioapic_lock, flags);
1920 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
1921 *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
1922 *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
1923 }
1924 spin_unlock_irqrestore(&ioapic_lock, flags);
1925
1926 return 0;
1927 }
1928
1929 static int ioapic_resume(struct sys_device *dev)
1930 {
1931 struct IO_APIC_route_entry *entry;
1932 struct sysfs_ioapic_data *data;
1933 unsigned long flags;
1934 union IO_APIC_reg_00 reg_00;
1935 int i;
1936
1937 data = container_of(dev, struct sysfs_ioapic_data, dev);
1938 entry = data->entry;
1939
1940 spin_lock_irqsave(&ioapic_lock, flags);
1941 reg_00.raw = io_apic_read(dev->id, 0);
1942 if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
1943 reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
1944 io_apic_write(dev->id, 0, reg_00.raw);
1945 }
1946 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
1947 io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
1948 io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
1949 }
1950 spin_unlock_irqrestore(&ioapic_lock, flags);
1951
1952 return 0;
1953 }
1954
1955 static struct sysdev_class ioapic_sysdev_class = {
1956 set_kset_name("ioapic"),
1957 .suspend = ioapic_suspend,
1958 .resume = ioapic_resume,
1959 };
1960
1961 static int __init ioapic_init_sysfs(void)
1962 {
1963 struct sys_device * dev;
1964 int i, size, error = 0;
1965
1966 error = sysdev_class_register(&ioapic_sysdev_class);
1967 if (error)
1968 return error;
1969
1970 for (i = 0; i < nr_ioapics; i++ ) {
1971 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
1972 * sizeof(struct IO_APIC_route_entry);
1973 mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
1974 if (!mp_ioapic_data[i]) {
1975 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1976 continue;
1977 }
1978 memset(mp_ioapic_data[i], 0, size);
1979 dev = &mp_ioapic_data[i]->dev;
1980 dev->id = i;
1981 dev->cls = &ioapic_sysdev_class;
1982 error = sysdev_register(dev);
1983 if (error) {
1984 kfree(mp_ioapic_data[i]);
1985 mp_ioapic_data[i] = NULL;
1986 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1987 continue;
1988 }
1989 }
1990
1991 return 0;
1992 }
1993
1994 device_initcall(ioapic_init_sysfs);
1995
1996 /* --------------------------------------------------------------------------
1997 ACPI-based IOAPIC Configuration
1998 -------------------------------------------------------------------------- */
1999
2000 #ifdef CONFIG_ACPI
2001
2002 #define IO_APIC_MAX_ID 0xFE
2003
2004 int __init io_apic_get_version (int ioapic)
2005 {
2006 union IO_APIC_reg_01 reg_01;
2007 unsigned long flags;
2008
2009 spin_lock_irqsave(&ioapic_lock, flags);
2010 reg_01.raw = io_apic_read(ioapic, 1);
2011 spin_unlock_irqrestore(&ioapic_lock, flags);
2012
2013 return reg_01.bits.version;
2014 }
2015
2016
2017 int __init io_apic_get_redir_entries (int ioapic)
2018 {
2019 union IO_APIC_reg_01 reg_01;
2020 unsigned long flags;
2021
2022 spin_lock_irqsave(&ioapic_lock, flags);
2023 reg_01.raw = io_apic_read(ioapic, 1);
2024 spin_unlock_irqrestore(&ioapic_lock, flags);
2025
2026 return reg_01.bits.entries;
2027 }
2028
2029
2030 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2031 {
2032 struct IO_APIC_route_entry entry;
2033 unsigned long flags;
2034
2035 if (!IO_APIC_IRQ(irq)) {
2036 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2037 ioapic);
2038 return -EINVAL;
2039 }
2040
2041 /*
2042 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
2043 * Note that we mask (disable) IRQs now -- these get enabled when the
2044 * corresponding device driver registers for this IRQ.
2045 */
2046
2047 memset(&entry,0,sizeof(entry));
2048
2049 entry.delivery_mode = INT_DELIVERY_MODE;
2050 entry.dest_mode = INT_DEST_MODE;
2051 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
2052 entry.trigger = triggering;
2053 entry.polarity = polarity;
2054 entry.mask = 1; /* Disabled (masked) */
2055
2056 irq = gsi_irq_sharing(irq);
2057 /*
2058 * IRQs < 16 are already in the irq_2_pin[] map
2059 */
2060 if (irq >= 16)
2061 add_pin_to_irq(irq, ioapic, pin);
2062
2063 entry.vector = assign_irq_vector(irq);
2064
2065 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
2066 "IRQ %d Mode:%i Active:%i)\n", ioapic,
2067 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
2068 triggering, polarity);
2069
2070 ioapic_register_intr(irq, entry.vector, triggering);
2071
2072 if (!ioapic && (irq < 16))
2073 disable_8259A_irq(irq);
2074
2075 spin_lock_irqsave(&ioapic_lock, flags);
2076 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
2077 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
2078 set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
2079 spin_unlock_irqrestore(&ioapic_lock, flags);
2080
2081 return 0;
2082 }
2083
2084 #endif /* CONFIG_ACPI */
2085
2086
2087 /*
2088 * This function currently is only a helper for the i386 smp boot process where
2089 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2090 * so mask in all cases should simply be TARGET_CPUS
2091 */
2092 #ifdef CONFIG_SMP
2093 void __init setup_ioapic_dest(void)
2094 {
2095 int pin, ioapic, irq, irq_entry;
2096
2097 if (skip_ioapic_setup == 1)
2098 return;
2099
2100 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2101 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2102 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2103 if (irq_entry == -1)
2104 continue;
2105 irq = pin_2_irq(irq_entry, ioapic, pin);
2106 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2107 }
2108
2109 }
2110 }
2111 #endif