[PATCH] x86_64: Automatically enable apicmaintimer on ATI boards
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86_64 / kernel / io_apic.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/config.h>
29 #include <linux/smp_lock.h>
30 #include <linux/mc146818rtc.h>
31 #include <linux/acpi.h>
32 #include <linux/sysdev.h>
33
34 #include <asm/io.h>
35 #include <asm/smp.h>
36 #include <asm/desc.h>
37 #include <asm/proto.h>
38 #include <asm/mach_apic.h>
39 #include <asm/acpi.h>
40 #include <asm/dma.h>
41
42 #define __apicdebuginit __init
43
44 int sis_apic_bug; /* not actually supported, dummy for compile */
45
46 static int no_timer_check;
47
48 int disable_timer_pin_1 __initdata;
49
50 /* Where if anywhere is the i8259 connect in external int mode */
51 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
52
53 static DEFINE_SPINLOCK(ioapic_lock);
54
55 /*
56 * # of IRQ routing registers
57 */
58 int nr_ioapic_registers[MAX_IO_APICS];
59
60 /*
61 * Rough estimation of how many shared IRQs there are, can
62 * be changed anytime.
63 */
64 #define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
65 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
66
67 /*
68 * This is performance-critical, we want to do it O(1)
69 *
70 * the indexing order of this array favors 1:1 mappings
71 * between pins and IRQs.
72 */
73
74 static struct irq_pin_list {
75 short apic, pin, next;
76 } irq_2_pin[PIN_MAP_SIZE];
77
78 int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
79 #ifdef CONFIG_PCI_MSI
80 #define vector_to_irq(vector) \
81 (platform_legacy_irq(vector) ? vector : vector_irq[vector])
82 #else
83 #define vector_to_irq(vector) (vector)
84 #endif
85
86 #define __DO_ACTION(R, ACTION, FINAL) \
87 \
88 { \
89 int pin; \
90 struct irq_pin_list *entry = irq_2_pin + irq; \
91 \
92 BUG_ON(irq >= NR_IRQS); \
93 for (;;) { \
94 unsigned int reg; \
95 pin = entry->pin; \
96 if (pin == -1) \
97 break; \
98 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
99 reg ACTION; \
100 io_apic_modify(entry->apic, reg); \
101 if (!entry->next) \
102 break; \
103 entry = irq_2_pin + entry->next; \
104 } \
105 FINAL; \
106 }
107
108 #ifdef CONFIG_SMP
109 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
110 {
111 unsigned long flags;
112 unsigned int dest;
113 cpumask_t tmp;
114
115 cpus_and(tmp, mask, cpu_online_map);
116 if (cpus_empty(tmp))
117 tmp = TARGET_CPUS;
118
119 cpus_and(mask, tmp, CPU_MASK_ALL);
120
121 dest = cpu_mask_to_apicid(mask);
122
123 /*
124 * Only the high 8 bits are valid.
125 */
126 dest = SET_APIC_LOGICAL_ID(dest);
127
128 spin_lock_irqsave(&ioapic_lock, flags);
129 __DO_ACTION(1, = dest, )
130 set_irq_info(irq, mask);
131 spin_unlock_irqrestore(&ioapic_lock, flags);
132 }
133 #endif
134
135 static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
136
137 /*
138 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
139 * shared ISA-space IRQs, so we have to support them. We are super
140 * fast in the common case, and fast for shared ISA-space IRQs.
141 */
142 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
143 {
144 static int first_free_entry = NR_IRQS;
145 struct irq_pin_list *entry = irq_2_pin + irq;
146
147 BUG_ON(irq >= NR_IRQS);
148 while (entry->next)
149 entry = irq_2_pin + entry->next;
150
151 if (entry->pin != -1) {
152 entry->next = first_free_entry;
153 entry = irq_2_pin + entry->next;
154 if (++first_free_entry >= PIN_MAP_SIZE)
155 panic("io_apic.c: ran out of irq_2_pin entries!");
156 }
157 entry->apic = apic;
158 entry->pin = pin;
159 }
160
161
162 #define DO_ACTION(name,R,ACTION, FINAL) \
163 \
164 static void name##_IO_APIC_irq (unsigned int irq) \
165 __DO_ACTION(R, ACTION, FINAL)
166
167 DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
168 /* mask = 1 */
169 DO_ACTION( __unmask, 0, &= 0xfffeffff, )
170 /* mask = 0 */
171
172 static void mask_IO_APIC_irq (unsigned int irq)
173 {
174 unsigned long flags;
175
176 spin_lock_irqsave(&ioapic_lock, flags);
177 __mask_IO_APIC_irq(irq);
178 spin_unlock_irqrestore(&ioapic_lock, flags);
179 }
180
181 static void unmask_IO_APIC_irq (unsigned int irq)
182 {
183 unsigned long flags;
184
185 spin_lock_irqsave(&ioapic_lock, flags);
186 __unmask_IO_APIC_irq(irq);
187 spin_unlock_irqrestore(&ioapic_lock, flags);
188 }
189
190 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
191 {
192 struct IO_APIC_route_entry entry;
193 unsigned long flags;
194
195 /* Check delivery_mode to be sure we're not clearing an SMI pin */
196 spin_lock_irqsave(&ioapic_lock, flags);
197 *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
198 *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
199 spin_unlock_irqrestore(&ioapic_lock, flags);
200 if (entry.delivery_mode == dest_SMI)
201 return;
202 /*
203 * Disable it in the IO-APIC irq-routing table:
204 */
205 memset(&entry, 0, sizeof(entry));
206 entry.mask = 1;
207 spin_lock_irqsave(&ioapic_lock, flags);
208 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
209 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
210 spin_unlock_irqrestore(&ioapic_lock, flags);
211 }
212
213 static void clear_IO_APIC (void)
214 {
215 int apic, pin;
216
217 for (apic = 0; apic < nr_ioapics; apic++)
218 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
219 clear_IO_APIC_pin(apic, pin);
220 }
221
222 /*
223 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
224 * specific CPU-side IRQs.
225 */
226
227 #define MAX_PIRQS 8
228 static int pirq_entries [MAX_PIRQS];
229 static int pirqs_enabled;
230 int skip_ioapic_setup;
231 int ioapic_force;
232
233 /* dummy parsing: see setup.c */
234
235 static int __init disable_ioapic_setup(char *str)
236 {
237 skip_ioapic_setup = 1;
238 return 1;
239 }
240
241 static int __init enable_ioapic_setup(char *str)
242 {
243 ioapic_force = 1;
244 skip_ioapic_setup = 0;
245 return 1;
246 }
247
248 __setup("noapic", disable_ioapic_setup);
249 __setup("apic", enable_ioapic_setup);
250
251 #include <asm/pci-direct.h>
252 #include <linux/pci_ids.h>
253 #include <linux/pci.h>
254
255 /* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
256 off. Check for an Nvidia or VIA PCI bridge and turn it off.
257 Use pci direct infrastructure because this runs before the PCI subsystem.
258
259 Can be overwritten with "apic"
260
261 And another hack to disable the IOMMU on VIA chipsets.
262
263 Kludge-O-Rama. */
264 void __init check_ioapic(void)
265 {
266 int num,slot,func;
267 /* Poor man's PCI discovery */
268 for (num = 0; num < 32; num++) {
269 for (slot = 0; slot < 32; slot++) {
270 for (func = 0; func < 8; func++) {
271 u32 class;
272 u32 vendor;
273 u8 type;
274 class = read_pci_config(num,slot,func,
275 PCI_CLASS_REVISION);
276 if (class == 0xffffffff)
277 break;
278
279 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
280 continue;
281
282 vendor = read_pci_config(num, slot, func,
283 PCI_VENDOR_ID);
284 vendor &= 0xffff;
285 switch (vendor) {
286 case PCI_VENDOR_ID_VIA:
287 #ifdef CONFIG_GART_IOMMU
288 if ((end_pfn > MAX_DMA32_PFN ||
289 force_iommu) &&
290 !iommu_aperture_allowed) {
291 printk(KERN_INFO
292 "Looks like a VIA chipset. Disabling IOMMU. Overwrite with \"iommu=allowed\"\n");
293 iommu_aperture_disabled = 1;
294 }
295 #endif
296 return;
297 case PCI_VENDOR_ID_NVIDIA:
298 #ifdef CONFIG_ACPI
299 /* All timer overrides on Nvidia
300 seem to be wrong. Skip them. */
301 acpi_skip_timer_override = 1;
302 printk(KERN_INFO
303 "Nvidia board detected. Ignoring ACPI timer override.\n");
304 #endif
305 /* RED-PEN skip them on mptables too? */
306 return;
307 case PCI_VENDOR_ID_ATI:
308 if (apic_runs_main_timer != 0)
309 break;
310 printk(KERN_INFO
311 "ATI board detected. Using APIC/PM timer.\n");
312 apic_runs_main_timer = 1;
313 nohpet = 1;
314 return;
315 }
316
317 /* No multi-function device? */
318 type = read_pci_config_byte(num,slot,func,
319 PCI_HEADER_TYPE);
320 if (!(type & 0x80))
321 break;
322 }
323 }
324 }
325 }
326
327 static int __init ioapic_pirq_setup(char *str)
328 {
329 int i, max;
330 int ints[MAX_PIRQS+1];
331
332 get_options(str, ARRAY_SIZE(ints), ints);
333
334 for (i = 0; i < MAX_PIRQS; i++)
335 pirq_entries[i] = -1;
336
337 pirqs_enabled = 1;
338 apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
339 max = MAX_PIRQS;
340 if (ints[0] < MAX_PIRQS)
341 max = ints[0];
342
343 for (i = 0; i < max; i++) {
344 apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
345 /*
346 * PIRQs are mapped upside down, usually.
347 */
348 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
349 }
350 return 1;
351 }
352
353 __setup("pirq=", ioapic_pirq_setup);
354
355 /*
356 * Find the IRQ entry number of a certain pin.
357 */
358 static int find_irq_entry(int apic, int pin, int type)
359 {
360 int i;
361
362 for (i = 0; i < mp_irq_entries; i++)
363 if (mp_irqs[i].mpc_irqtype == type &&
364 (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
365 mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
366 mp_irqs[i].mpc_dstirq == pin)
367 return i;
368
369 return -1;
370 }
371
372 /*
373 * Find the pin to which IRQ[irq] (ISA) is connected
374 */
375 static int __init find_isa_irq_pin(int irq, int type)
376 {
377 int i;
378
379 for (i = 0; i < mp_irq_entries; i++) {
380 int lbus = mp_irqs[i].mpc_srcbus;
381
382 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
383 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
384 mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
385 (mp_irqs[i].mpc_irqtype == type) &&
386 (mp_irqs[i].mpc_srcbusirq == irq))
387
388 return mp_irqs[i].mpc_dstirq;
389 }
390 return -1;
391 }
392
393 static int __init find_isa_irq_apic(int irq, int type)
394 {
395 int i;
396
397 for (i = 0; i < mp_irq_entries; i++) {
398 int lbus = mp_irqs[i].mpc_srcbus;
399
400 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
401 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
402 mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
403 (mp_irqs[i].mpc_irqtype == type) &&
404 (mp_irqs[i].mpc_srcbusirq == irq))
405 break;
406 }
407 if (i < mp_irq_entries) {
408 int apic;
409 for(apic = 0; apic < nr_ioapics; apic++) {
410 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
411 return apic;
412 }
413 }
414
415 return -1;
416 }
417
418 /*
419 * Find a specific PCI IRQ entry.
420 * Not an __init, possibly needed by modules
421 */
422 static int pin_2_irq(int idx, int apic, int pin);
423
424 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
425 {
426 int apic, i, best_guess = -1;
427
428 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
429 bus, slot, pin);
430 if (mp_bus_id_to_pci_bus[bus] == -1) {
431 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
432 return -1;
433 }
434 for (i = 0; i < mp_irq_entries; i++) {
435 int lbus = mp_irqs[i].mpc_srcbus;
436
437 for (apic = 0; apic < nr_ioapics; apic++)
438 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
439 mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
440 break;
441
442 if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
443 !mp_irqs[i].mpc_irqtype &&
444 (bus == lbus) &&
445 (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
446 int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
447
448 if (!(apic || IO_APIC_IRQ(irq)))
449 continue;
450
451 if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
452 return irq;
453 /*
454 * Use the first all-but-pin matching entry as a
455 * best-guess fuzzy result for broken mptables.
456 */
457 if (best_guess < 0)
458 best_guess = irq;
459 }
460 }
461 BUG_ON(best_guess >= NR_IRQS);
462 return best_guess;
463 }
464
465 /*
466 * EISA Edge/Level control register, ELCR
467 */
468 static int EISA_ELCR(unsigned int irq)
469 {
470 if (irq < 16) {
471 unsigned int port = 0x4d0 + (irq >> 3);
472 return (inb(port) >> (irq & 7)) & 1;
473 }
474 apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
475 return 0;
476 }
477
478 /* EISA interrupts are always polarity zero and can be edge or level
479 * trigger depending on the ELCR value. If an interrupt is listed as
480 * EISA conforming in the MP table, that means its trigger type must
481 * be read in from the ELCR */
482
483 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
484 #define default_EISA_polarity(idx) (0)
485
486 /* ISA interrupts are always polarity zero edge triggered,
487 * when listed as conforming in the MP table. */
488
489 #define default_ISA_trigger(idx) (0)
490 #define default_ISA_polarity(idx) (0)
491
492 /* PCI interrupts are always polarity one level triggered,
493 * when listed as conforming in the MP table. */
494
495 #define default_PCI_trigger(idx) (1)
496 #define default_PCI_polarity(idx) (1)
497
498 /* MCA interrupts are always polarity zero level triggered,
499 * when listed as conforming in the MP table. */
500
501 #define default_MCA_trigger(idx) (1)
502 #define default_MCA_polarity(idx) (0)
503
504 static int __init MPBIOS_polarity(int idx)
505 {
506 int bus = mp_irqs[idx].mpc_srcbus;
507 int polarity;
508
509 /*
510 * Determine IRQ line polarity (high active or low active):
511 */
512 switch (mp_irqs[idx].mpc_irqflag & 3)
513 {
514 case 0: /* conforms, ie. bus-type dependent polarity */
515 {
516 switch (mp_bus_id_to_type[bus])
517 {
518 case MP_BUS_ISA: /* ISA pin */
519 {
520 polarity = default_ISA_polarity(idx);
521 break;
522 }
523 case MP_BUS_EISA: /* EISA pin */
524 {
525 polarity = default_EISA_polarity(idx);
526 break;
527 }
528 case MP_BUS_PCI: /* PCI pin */
529 {
530 polarity = default_PCI_polarity(idx);
531 break;
532 }
533 case MP_BUS_MCA: /* MCA pin */
534 {
535 polarity = default_MCA_polarity(idx);
536 break;
537 }
538 default:
539 {
540 printk(KERN_WARNING "broken BIOS!!\n");
541 polarity = 1;
542 break;
543 }
544 }
545 break;
546 }
547 case 1: /* high active */
548 {
549 polarity = 0;
550 break;
551 }
552 case 2: /* reserved */
553 {
554 printk(KERN_WARNING "broken BIOS!!\n");
555 polarity = 1;
556 break;
557 }
558 case 3: /* low active */
559 {
560 polarity = 1;
561 break;
562 }
563 default: /* invalid */
564 {
565 printk(KERN_WARNING "broken BIOS!!\n");
566 polarity = 1;
567 break;
568 }
569 }
570 return polarity;
571 }
572
573 static int MPBIOS_trigger(int idx)
574 {
575 int bus = mp_irqs[idx].mpc_srcbus;
576 int trigger;
577
578 /*
579 * Determine IRQ trigger mode (edge or level sensitive):
580 */
581 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
582 {
583 case 0: /* conforms, ie. bus-type dependent */
584 {
585 switch (mp_bus_id_to_type[bus])
586 {
587 case MP_BUS_ISA: /* ISA pin */
588 {
589 trigger = default_ISA_trigger(idx);
590 break;
591 }
592 case MP_BUS_EISA: /* EISA pin */
593 {
594 trigger = default_EISA_trigger(idx);
595 break;
596 }
597 case MP_BUS_PCI: /* PCI pin */
598 {
599 trigger = default_PCI_trigger(idx);
600 break;
601 }
602 case MP_BUS_MCA: /* MCA pin */
603 {
604 trigger = default_MCA_trigger(idx);
605 break;
606 }
607 default:
608 {
609 printk(KERN_WARNING "broken BIOS!!\n");
610 trigger = 1;
611 break;
612 }
613 }
614 break;
615 }
616 case 1: /* edge */
617 {
618 trigger = 0;
619 break;
620 }
621 case 2: /* reserved */
622 {
623 printk(KERN_WARNING "broken BIOS!!\n");
624 trigger = 1;
625 break;
626 }
627 case 3: /* level */
628 {
629 trigger = 1;
630 break;
631 }
632 default: /* invalid */
633 {
634 printk(KERN_WARNING "broken BIOS!!\n");
635 trigger = 0;
636 break;
637 }
638 }
639 return trigger;
640 }
641
642 static inline int irq_polarity(int idx)
643 {
644 return MPBIOS_polarity(idx);
645 }
646
647 static inline int irq_trigger(int idx)
648 {
649 return MPBIOS_trigger(idx);
650 }
651
652 static int next_irq = 16;
653
654 /*
655 * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
656 * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
657 * from ACPI, which can reach 800 in large boxen.
658 *
659 * Compact the sparse GSI space into a sequential IRQ series and reuse
660 * vectors if possible.
661 */
662 int gsi_irq_sharing(int gsi)
663 {
664 int i, tries, vector;
665
666 BUG_ON(gsi >= NR_IRQ_VECTORS);
667
668 if (platform_legacy_irq(gsi))
669 return gsi;
670
671 if (gsi_2_irq[gsi] != 0xFF)
672 return (int)gsi_2_irq[gsi];
673
674 tries = NR_IRQS;
675 try_again:
676 vector = assign_irq_vector(gsi);
677
678 /*
679 * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
680 * use of vector and if found, return that IRQ. However, we never want
681 * to share legacy IRQs, which usually have a different trigger mode
682 * than PCI.
683 */
684 for (i = 0; i < NR_IRQS; i++)
685 if (IO_APIC_VECTOR(i) == vector)
686 break;
687 if (platform_legacy_irq(i)) {
688 if (--tries >= 0) {
689 IO_APIC_VECTOR(i) = 0;
690 goto try_again;
691 }
692 panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
693 }
694 if (i < NR_IRQS) {
695 gsi_2_irq[gsi] = i;
696 printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
697 gsi, vector, i);
698 return i;
699 }
700
701 i = next_irq++;
702 BUG_ON(i >= NR_IRQS);
703 gsi_2_irq[gsi] = i;
704 IO_APIC_VECTOR(i) = vector;
705 printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
706 gsi, vector, i);
707 return i;
708 }
709
710 static int pin_2_irq(int idx, int apic, int pin)
711 {
712 int irq, i;
713 int bus = mp_irqs[idx].mpc_srcbus;
714
715 /*
716 * Debugging check, we are in big trouble if this message pops up!
717 */
718 if (mp_irqs[idx].mpc_dstirq != pin)
719 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
720
721 switch (mp_bus_id_to_type[bus])
722 {
723 case MP_BUS_ISA: /* ISA pin */
724 case MP_BUS_EISA:
725 case MP_BUS_MCA:
726 {
727 irq = mp_irqs[idx].mpc_srcbusirq;
728 break;
729 }
730 case MP_BUS_PCI: /* PCI pin */
731 {
732 /*
733 * PCI IRQs are mapped in order
734 */
735 i = irq = 0;
736 while (i < apic)
737 irq += nr_ioapic_registers[i++];
738 irq += pin;
739 irq = gsi_irq_sharing(irq);
740 break;
741 }
742 default:
743 {
744 printk(KERN_ERR "unknown bus type %d.\n",bus);
745 irq = 0;
746 break;
747 }
748 }
749 BUG_ON(irq >= NR_IRQS);
750
751 /*
752 * PCI IRQ command line redirection. Yes, limits are hardcoded.
753 */
754 if ((pin >= 16) && (pin <= 23)) {
755 if (pirq_entries[pin-16] != -1) {
756 if (!pirq_entries[pin-16]) {
757 apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
758 } else {
759 irq = pirq_entries[pin-16];
760 apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
761 pin-16, irq);
762 }
763 }
764 }
765 BUG_ON(irq >= NR_IRQS);
766 return irq;
767 }
768
769 static inline int IO_APIC_irq_trigger(int irq)
770 {
771 int apic, idx, pin;
772
773 for (apic = 0; apic < nr_ioapics; apic++) {
774 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
775 idx = find_irq_entry(apic,pin,mp_INT);
776 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
777 return irq_trigger(idx);
778 }
779 }
780 /*
781 * nonexistent IRQs are edge default
782 */
783 return 0;
784 }
785
786 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
787 u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
788
789 int assign_irq_vector(int irq)
790 {
791 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
792
793 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
794 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
795 return IO_APIC_VECTOR(irq);
796 next:
797 current_vector += 8;
798 if (current_vector == IA32_SYSCALL_VECTOR)
799 goto next;
800
801 if (current_vector >= FIRST_SYSTEM_VECTOR) {
802 /* If we run out of vectors on large boxen, must share them. */
803 offset = (offset + 1) % 8;
804 current_vector = FIRST_DEVICE_VECTOR + offset;
805 }
806
807 vector_irq[current_vector] = irq;
808 if (irq != AUTO_ASSIGN)
809 IO_APIC_VECTOR(irq) = current_vector;
810
811 return current_vector;
812 }
813
814 extern void (*interrupt[NR_IRQS])(void);
815 static struct hw_interrupt_type ioapic_level_type;
816 static struct hw_interrupt_type ioapic_edge_type;
817
818 #define IOAPIC_AUTO -1
819 #define IOAPIC_EDGE 0
820 #define IOAPIC_LEVEL 1
821
822 static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
823 {
824 if (use_pci_vector() && !platform_legacy_irq(irq)) {
825 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
826 trigger == IOAPIC_LEVEL)
827 irq_desc[vector].handler = &ioapic_level_type;
828 else
829 irq_desc[vector].handler = &ioapic_edge_type;
830 set_intr_gate(vector, interrupt[vector]);
831 } else {
832 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
833 trigger == IOAPIC_LEVEL)
834 irq_desc[irq].handler = &ioapic_level_type;
835 else
836 irq_desc[irq].handler = &ioapic_edge_type;
837 set_intr_gate(vector, interrupt[irq]);
838 }
839 }
840
841 static void __init setup_IO_APIC_irqs(void)
842 {
843 struct IO_APIC_route_entry entry;
844 int apic, pin, idx, irq, first_notcon = 1, vector;
845 unsigned long flags;
846
847 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
848
849 for (apic = 0; apic < nr_ioapics; apic++) {
850 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
851
852 /*
853 * add it to the IO-APIC irq-routing table:
854 */
855 memset(&entry,0,sizeof(entry));
856
857 entry.delivery_mode = INT_DELIVERY_MODE;
858 entry.dest_mode = INT_DEST_MODE;
859 entry.mask = 0; /* enable IRQ */
860 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
861
862 idx = find_irq_entry(apic,pin,mp_INT);
863 if (idx == -1) {
864 if (first_notcon) {
865 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
866 first_notcon = 0;
867 } else
868 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
869 continue;
870 }
871
872 entry.trigger = irq_trigger(idx);
873 entry.polarity = irq_polarity(idx);
874
875 if (irq_trigger(idx)) {
876 entry.trigger = 1;
877 entry.mask = 1;
878 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
879 }
880
881 irq = pin_2_irq(idx, apic, pin);
882 add_pin_to_irq(irq, apic, pin);
883
884 if (!apic && !IO_APIC_IRQ(irq))
885 continue;
886
887 if (IO_APIC_IRQ(irq)) {
888 vector = assign_irq_vector(irq);
889 entry.vector = vector;
890
891 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
892 if (!apic && (irq < 16))
893 disable_8259A_irq(irq);
894 }
895 spin_lock_irqsave(&ioapic_lock, flags);
896 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
897 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
898 set_native_irq_info(irq, TARGET_CPUS);
899 spin_unlock_irqrestore(&ioapic_lock, flags);
900 }
901 }
902
903 if (!first_notcon)
904 apic_printk(APIC_VERBOSE," not connected.\n");
905 }
906
907 /*
908 * Set up the 8259A-master output pin as broadcast to all
909 * CPUs.
910 */
911 static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
912 {
913 struct IO_APIC_route_entry entry;
914 unsigned long flags;
915
916 memset(&entry,0,sizeof(entry));
917
918 disable_8259A_irq(0);
919
920 /* mask LVT0 */
921 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
922
923 /*
924 * We use logical delivery to get the timer IRQ
925 * to the first CPU.
926 */
927 entry.dest_mode = INT_DEST_MODE;
928 entry.mask = 0; /* unmask IRQ now */
929 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
930 entry.delivery_mode = INT_DELIVERY_MODE;
931 entry.polarity = 0;
932 entry.trigger = 0;
933 entry.vector = vector;
934
935 /*
936 * The timer IRQ doesn't have to know that behind the
937 * scene we have a 8259A-master in AEOI mode ...
938 */
939 irq_desc[0].handler = &ioapic_edge_type;
940
941 /*
942 * Add it to the IO-APIC irq-routing table:
943 */
944 spin_lock_irqsave(&ioapic_lock, flags);
945 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
946 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
947 spin_unlock_irqrestore(&ioapic_lock, flags);
948
949 enable_8259A_irq(0);
950 }
951
952 void __init UNEXPECTED_IO_APIC(void)
953 {
954 }
955
956 void __apicdebuginit print_IO_APIC(void)
957 {
958 int apic, i;
959 union IO_APIC_reg_00 reg_00;
960 union IO_APIC_reg_01 reg_01;
961 union IO_APIC_reg_02 reg_02;
962 unsigned long flags;
963
964 if (apic_verbosity == APIC_QUIET)
965 return;
966
967 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
968 for (i = 0; i < nr_ioapics; i++)
969 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
970 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
971
972 /*
973 * We are a bit conservative about what we expect. We have to
974 * know about every hardware change ASAP.
975 */
976 printk(KERN_INFO "testing the IO APIC.......................\n");
977
978 for (apic = 0; apic < nr_ioapics; apic++) {
979
980 spin_lock_irqsave(&ioapic_lock, flags);
981 reg_00.raw = io_apic_read(apic, 0);
982 reg_01.raw = io_apic_read(apic, 1);
983 if (reg_01.bits.version >= 0x10)
984 reg_02.raw = io_apic_read(apic, 2);
985 spin_unlock_irqrestore(&ioapic_lock, flags);
986
987 printk("\n");
988 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
989 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
990 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
991 if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
992 UNEXPECTED_IO_APIC();
993
994 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
995 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
996 if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
997 (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
998 (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
999 (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
1000 (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
1001 (reg_01.bits.entries != 0x2E) &&
1002 (reg_01.bits.entries != 0x3F) &&
1003 (reg_01.bits.entries != 0x03)
1004 )
1005 UNEXPECTED_IO_APIC();
1006
1007 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1008 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1009 if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
1010 (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
1011 (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
1012 (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
1013 (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
1014 (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
1015 )
1016 UNEXPECTED_IO_APIC();
1017 if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
1018 UNEXPECTED_IO_APIC();
1019
1020 if (reg_01.bits.version >= 0x10) {
1021 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1022 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1023 if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
1024 UNEXPECTED_IO_APIC();
1025 }
1026
1027 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1028
1029 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
1030 " Stat Dest Deli Vect: \n");
1031
1032 for (i = 0; i <= reg_01.bits.entries; i++) {
1033 struct IO_APIC_route_entry entry;
1034
1035 spin_lock_irqsave(&ioapic_lock, flags);
1036 *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
1037 *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
1038 spin_unlock_irqrestore(&ioapic_lock, flags);
1039
1040 printk(KERN_DEBUG " %02x %03X %02X ",
1041 i,
1042 entry.dest.logical.logical_dest,
1043 entry.dest.physical.physical_dest
1044 );
1045
1046 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1047 entry.mask,
1048 entry.trigger,
1049 entry.irr,
1050 entry.polarity,
1051 entry.delivery_status,
1052 entry.dest_mode,
1053 entry.delivery_mode,
1054 entry.vector
1055 );
1056 }
1057 }
1058 if (use_pci_vector())
1059 printk(KERN_INFO "Using vector-based indexing\n");
1060 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1061 for (i = 0; i < NR_IRQS; i++) {
1062 struct irq_pin_list *entry = irq_2_pin + i;
1063 if (entry->pin < 0)
1064 continue;
1065 if (use_pci_vector() && !platform_legacy_irq(i))
1066 printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
1067 else
1068 printk(KERN_DEBUG "IRQ%d ", i);
1069 for (;;) {
1070 printk("-> %d:%d", entry->apic, entry->pin);
1071 if (!entry->next)
1072 break;
1073 entry = irq_2_pin + entry->next;
1074 }
1075 printk("\n");
1076 }
1077
1078 printk(KERN_INFO ".................................... done.\n");
1079
1080 return;
1081 }
1082
1083 #if 0
1084
1085 static __apicdebuginit void print_APIC_bitfield (int base)
1086 {
1087 unsigned int v;
1088 int i, j;
1089
1090 if (apic_verbosity == APIC_QUIET)
1091 return;
1092
1093 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1094 for (i = 0; i < 8; i++) {
1095 v = apic_read(base + i*0x10);
1096 for (j = 0; j < 32; j++) {
1097 if (v & (1<<j))
1098 printk("1");
1099 else
1100 printk("0");
1101 }
1102 printk("\n");
1103 }
1104 }
1105
1106 void __apicdebuginit print_local_APIC(void * dummy)
1107 {
1108 unsigned int v, ver, maxlvt;
1109
1110 if (apic_verbosity == APIC_QUIET)
1111 return;
1112
1113 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1114 smp_processor_id(), hard_smp_processor_id());
1115 v = apic_read(APIC_ID);
1116 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
1117 v = apic_read(APIC_LVR);
1118 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1119 ver = GET_APIC_VERSION(v);
1120 maxlvt = get_maxlvt();
1121
1122 v = apic_read(APIC_TASKPRI);
1123 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1124
1125 v = apic_read(APIC_ARBPRI);
1126 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1127 v & APIC_ARBPRI_MASK);
1128 v = apic_read(APIC_PROCPRI);
1129 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1130
1131 v = apic_read(APIC_EOI);
1132 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1133 v = apic_read(APIC_RRR);
1134 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1135 v = apic_read(APIC_LDR);
1136 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1137 v = apic_read(APIC_DFR);
1138 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1139 v = apic_read(APIC_SPIV);
1140 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1141
1142 printk(KERN_DEBUG "... APIC ISR field:\n");
1143 print_APIC_bitfield(APIC_ISR);
1144 printk(KERN_DEBUG "... APIC TMR field:\n");
1145 print_APIC_bitfield(APIC_TMR);
1146 printk(KERN_DEBUG "... APIC IRR field:\n");
1147 print_APIC_bitfield(APIC_IRR);
1148
1149 v = apic_read(APIC_ESR);
1150 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1151
1152 v = apic_read(APIC_ICR);
1153 printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
1154 v = apic_read(APIC_ICR2);
1155 printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
1156
1157 v = apic_read(APIC_LVTT);
1158 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1159
1160 if (maxlvt > 3) { /* PC is LVT#4. */
1161 v = apic_read(APIC_LVTPC);
1162 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1163 }
1164 v = apic_read(APIC_LVT0);
1165 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1166 v = apic_read(APIC_LVT1);
1167 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1168
1169 if (maxlvt > 2) { /* ERR is LVT#3. */
1170 v = apic_read(APIC_LVTERR);
1171 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1172 }
1173
1174 v = apic_read(APIC_TMICT);
1175 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1176 v = apic_read(APIC_TMCCT);
1177 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1178 v = apic_read(APIC_TDCR);
1179 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1180 printk("\n");
1181 }
1182
1183 void print_all_local_APICs (void)
1184 {
1185 on_each_cpu(print_local_APIC, NULL, 1, 1);
1186 }
1187
1188 void __apicdebuginit print_PIC(void)
1189 {
1190 unsigned int v;
1191 unsigned long flags;
1192
1193 if (apic_verbosity == APIC_QUIET)
1194 return;
1195
1196 printk(KERN_DEBUG "\nprinting PIC contents\n");
1197
1198 spin_lock_irqsave(&i8259A_lock, flags);
1199
1200 v = inb(0xa1) << 8 | inb(0x21);
1201 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1202
1203 v = inb(0xa0) << 8 | inb(0x20);
1204 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1205
1206 outb(0x0b,0xa0);
1207 outb(0x0b,0x20);
1208 v = inb(0xa0) << 8 | inb(0x20);
1209 outb(0x0a,0xa0);
1210 outb(0x0a,0x20);
1211
1212 spin_unlock_irqrestore(&i8259A_lock, flags);
1213
1214 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1215
1216 v = inb(0x4d1) << 8 | inb(0x4d0);
1217 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1218 }
1219
1220 #endif /* 0 */
1221
1222 static void __init enable_IO_APIC(void)
1223 {
1224 union IO_APIC_reg_01 reg_01;
1225 int i8259_apic, i8259_pin;
1226 int i, apic;
1227 unsigned long flags;
1228
1229 for (i = 0; i < PIN_MAP_SIZE; i++) {
1230 irq_2_pin[i].pin = -1;
1231 irq_2_pin[i].next = 0;
1232 }
1233 if (!pirqs_enabled)
1234 for (i = 0; i < MAX_PIRQS; i++)
1235 pirq_entries[i] = -1;
1236
1237 /*
1238 * The number of IO-APIC IRQ registers (== #pins):
1239 */
1240 for (apic = 0; apic < nr_ioapics; apic++) {
1241 spin_lock_irqsave(&ioapic_lock, flags);
1242 reg_01.raw = io_apic_read(apic, 1);
1243 spin_unlock_irqrestore(&ioapic_lock, flags);
1244 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1245 }
1246 for(apic = 0; apic < nr_ioapics; apic++) {
1247 int pin;
1248 /* See if any of the pins is in ExtINT mode */
1249 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1250 struct IO_APIC_route_entry entry;
1251 spin_lock_irqsave(&ioapic_lock, flags);
1252 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1253 *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1254 spin_unlock_irqrestore(&ioapic_lock, flags);
1255
1256
1257 /* If the interrupt line is enabled and in ExtInt mode
1258 * I have found the pin where the i8259 is connected.
1259 */
1260 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1261 ioapic_i8259.apic = apic;
1262 ioapic_i8259.pin = pin;
1263 goto found_i8259;
1264 }
1265 }
1266 }
1267 found_i8259:
1268 /* Look to see what if the MP table has reported the ExtINT */
1269 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1270 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1271 /* Trust the MP table if nothing is setup in the hardware */
1272 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1273 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1274 ioapic_i8259.pin = i8259_pin;
1275 ioapic_i8259.apic = i8259_apic;
1276 }
1277 /* Complain if the MP table and the hardware disagree */
1278 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1279 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1280 {
1281 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1282 }
1283
1284 /*
1285 * Do not trust the IO-APIC being empty at bootup
1286 */
1287 clear_IO_APIC();
1288 }
1289
1290 /*
1291 * Not an __init, needed by the reboot code
1292 */
1293 void disable_IO_APIC(void)
1294 {
1295 /*
1296 * Clear the IO-APIC before rebooting:
1297 */
1298 clear_IO_APIC();
1299
1300 /*
1301 * If the i8259 is routed through an IOAPIC
1302 * Put that IOAPIC in virtual wire mode
1303 * so legacy interrupts can be delivered.
1304 */
1305 if (ioapic_i8259.pin != -1) {
1306 struct IO_APIC_route_entry entry;
1307 unsigned long flags;
1308
1309 memset(&entry, 0, sizeof(entry));
1310 entry.mask = 0; /* Enabled */
1311 entry.trigger = 0; /* Edge */
1312 entry.irr = 0;
1313 entry.polarity = 0; /* High */
1314 entry.delivery_status = 0;
1315 entry.dest_mode = 0; /* Physical */
1316 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1317 entry.vector = 0;
1318 entry.dest.physical.physical_dest =
1319 GET_APIC_ID(apic_read(APIC_ID));
1320
1321 /*
1322 * Add it to the IO-APIC irq-routing table:
1323 */
1324 spin_lock_irqsave(&ioapic_lock, flags);
1325 io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
1326 *(((int *)&entry)+1));
1327 io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
1328 *(((int *)&entry)+0));
1329 spin_unlock_irqrestore(&ioapic_lock, flags);
1330 }
1331
1332 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1333 }
1334
1335 /*
1336 * function to set the IO-APIC physical IDs based on the
1337 * values stored in the MPC table.
1338 *
1339 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1340 */
1341
1342 static void __init setup_ioapic_ids_from_mpc (void)
1343 {
1344 union IO_APIC_reg_00 reg_00;
1345 int apic;
1346 int i;
1347 unsigned char old_id;
1348 unsigned long flags;
1349
1350 /*
1351 * Set the IOAPIC ID to the value stored in the MPC table.
1352 */
1353 for (apic = 0; apic < nr_ioapics; apic++) {
1354
1355 /* Read the register 0 value */
1356 spin_lock_irqsave(&ioapic_lock, flags);
1357 reg_00.raw = io_apic_read(apic, 0);
1358 spin_unlock_irqrestore(&ioapic_lock, flags);
1359
1360 old_id = mp_ioapics[apic].mpc_apicid;
1361
1362
1363 printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
1364
1365
1366 /*
1367 * We need to adjust the IRQ routing table
1368 * if the ID changed.
1369 */
1370 if (old_id != mp_ioapics[apic].mpc_apicid)
1371 for (i = 0; i < mp_irq_entries; i++)
1372 if (mp_irqs[i].mpc_dstapic == old_id)
1373 mp_irqs[i].mpc_dstapic
1374 = mp_ioapics[apic].mpc_apicid;
1375
1376 /*
1377 * Read the right value from the MPC table and
1378 * write it into the ID register.
1379 */
1380 apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
1381 mp_ioapics[apic].mpc_apicid);
1382
1383 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
1384 spin_lock_irqsave(&ioapic_lock, flags);
1385 io_apic_write(apic, 0, reg_00.raw);
1386 spin_unlock_irqrestore(&ioapic_lock, flags);
1387
1388 /*
1389 * Sanity check
1390 */
1391 spin_lock_irqsave(&ioapic_lock, flags);
1392 reg_00.raw = io_apic_read(apic, 0);
1393 spin_unlock_irqrestore(&ioapic_lock, flags);
1394 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
1395 printk("could not set ID!\n");
1396 else
1397 apic_printk(APIC_VERBOSE," ok.\n");
1398 }
1399 }
1400
1401 /*
1402 * There is a nasty bug in some older SMP boards, their mptable lies
1403 * about the timer IRQ. We do the following to work around the situation:
1404 *
1405 * - timer IRQ defaults to IO-APIC IRQ
1406 * - if this function detects that timer IRQs are defunct, then we fall
1407 * back to ISA timer IRQs
1408 */
1409 static int __init timer_irq_works(void)
1410 {
1411 unsigned long t1 = jiffies;
1412
1413 local_irq_enable();
1414 /* Let ten ticks pass... */
1415 mdelay((10 * 1000) / HZ);
1416
1417 /*
1418 * Expect a few ticks at least, to be sure some possible
1419 * glue logic does not lock up after one or two first
1420 * ticks in a non-ExtINT mode. Also the local APIC
1421 * might have cached one ExtINT interrupt. Finally, at
1422 * least one tick may be lost due to delays.
1423 */
1424
1425 /* jiffies wrap? */
1426 if (jiffies - t1 > 4)
1427 return 1;
1428 return 0;
1429 }
1430
1431 /*
1432 * In the SMP+IOAPIC case it might happen that there are an unspecified
1433 * number of pending IRQ events unhandled. These cases are very rare,
1434 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1435 * better to do it this way as thus we do not have to be aware of
1436 * 'pending' interrupts in the IRQ path, except at this point.
1437 */
1438 /*
1439 * Edge triggered needs to resend any interrupt
1440 * that was delayed but this is now handled in the device
1441 * independent code.
1442 */
1443
1444 /*
1445 * Starting up a edge-triggered IO-APIC interrupt is
1446 * nasty - we need to make sure that we get the edge.
1447 * If it is already asserted for some reason, we need
1448 * return 1 to indicate that is was pending.
1449 *
1450 * This is not complete - we should be able to fake
1451 * an edge even if it isn't on the 8259A...
1452 */
1453
1454 static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1455 {
1456 int was_pending = 0;
1457 unsigned long flags;
1458
1459 spin_lock_irqsave(&ioapic_lock, flags);
1460 if (irq < 16) {
1461 disable_8259A_irq(irq);
1462 if (i8259A_irq_pending(irq))
1463 was_pending = 1;
1464 }
1465 __unmask_IO_APIC_irq(irq);
1466 spin_unlock_irqrestore(&ioapic_lock, flags);
1467
1468 return was_pending;
1469 }
1470
1471 /*
1472 * Once we have recorded IRQ_PENDING already, we can mask the
1473 * interrupt for real. This prevents IRQ storms from unhandled
1474 * devices.
1475 */
1476 static void ack_edge_ioapic_irq(unsigned int irq)
1477 {
1478 move_irq(irq);
1479 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
1480 == (IRQ_PENDING | IRQ_DISABLED))
1481 mask_IO_APIC_irq(irq);
1482 ack_APIC_irq();
1483 }
1484
1485 /*
1486 * Level triggered interrupts can just be masked,
1487 * and shutting down and starting up the interrupt
1488 * is the same as enabling and disabling them -- except
1489 * with a startup need to return a "was pending" value.
1490 *
1491 * Level triggered interrupts are special because we
1492 * do not touch any IO-APIC register while handling
1493 * them. We ack the APIC in the end-IRQ handler, not
1494 * in the start-IRQ-handler. Protection against reentrance
1495 * from the same interrupt is still provided, both by the
1496 * generic IRQ layer and by the fact that an unacked local
1497 * APIC does not accept IRQs.
1498 */
1499 static unsigned int startup_level_ioapic_irq (unsigned int irq)
1500 {
1501 unmask_IO_APIC_irq(irq);
1502
1503 return 0; /* don't check for pending */
1504 }
1505
1506 static void end_level_ioapic_irq (unsigned int irq)
1507 {
1508 move_irq(irq);
1509 ack_APIC_irq();
1510 }
1511
1512 #ifdef CONFIG_PCI_MSI
1513 static unsigned int startup_edge_ioapic_vector(unsigned int vector)
1514 {
1515 int irq = vector_to_irq(vector);
1516
1517 return startup_edge_ioapic_irq(irq);
1518 }
1519
1520 static void ack_edge_ioapic_vector(unsigned int vector)
1521 {
1522 int irq = vector_to_irq(vector);
1523
1524 move_native_irq(vector);
1525 ack_edge_ioapic_irq(irq);
1526 }
1527
1528 static unsigned int startup_level_ioapic_vector (unsigned int vector)
1529 {
1530 int irq = vector_to_irq(vector);
1531
1532 return startup_level_ioapic_irq (irq);
1533 }
1534
1535 static void end_level_ioapic_vector (unsigned int vector)
1536 {
1537 int irq = vector_to_irq(vector);
1538
1539 move_native_irq(vector);
1540 end_level_ioapic_irq(irq);
1541 }
1542
1543 static void mask_IO_APIC_vector (unsigned int vector)
1544 {
1545 int irq = vector_to_irq(vector);
1546
1547 mask_IO_APIC_irq(irq);
1548 }
1549
1550 static void unmask_IO_APIC_vector (unsigned int vector)
1551 {
1552 int irq = vector_to_irq(vector);
1553
1554 unmask_IO_APIC_irq(irq);
1555 }
1556
1557 #ifdef CONFIG_SMP
1558 static void set_ioapic_affinity_vector (unsigned int vector,
1559 cpumask_t cpu_mask)
1560 {
1561 int irq = vector_to_irq(vector);
1562
1563 set_native_irq_info(vector, cpu_mask);
1564 set_ioapic_affinity_irq(irq, cpu_mask);
1565 }
1566 #endif // CONFIG_SMP
1567 #endif // CONFIG_PCI_MSI
1568
1569 /*
1570 * Level and edge triggered IO-APIC interrupts need different handling,
1571 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1572 * handled with the level-triggered descriptor, but that one has slightly
1573 * more overhead. Level-triggered interrupts cannot be handled with the
1574 * edge-triggered handler, without risking IRQ storms and other ugly
1575 * races.
1576 */
1577
1578 static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
1579 .typename = "IO-APIC-edge",
1580 .startup = startup_edge_ioapic,
1581 .shutdown = shutdown_edge_ioapic,
1582 .enable = enable_edge_ioapic,
1583 .disable = disable_edge_ioapic,
1584 .ack = ack_edge_ioapic,
1585 .end = end_edge_ioapic,
1586 #ifdef CONFIG_SMP
1587 .set_affinity = set_ioapic_affinity,
1588 #endif
1589 };
1590
1591 static struct hw_interrupt_type ioapic_level_type __read_mostly = {
1592 .typename = "IO-APIC-level",
1593 .startup = startup_level_ioapic,
1594 .shutdown = shutdown_level_ioapic,
1595 .enable = enable_level_ioapic,
1596 .disable = disable_level_ioapic,
1597 .ack = mask_and_ack_level_ioapic,
1598 .end = end_level_ioapic,
1599 #ifdef CONFIG_SMP
1600 .set_affinity = set_ioapic_affinity,
1601 #endif
1602 };
1603
1604 static inline void init_IO_APIC_traps(void)
1605 {
1606 int irq;
1607
1608 /*
1609 * NOTE! The local APIC isn't very good at handling
1610 * multiple interrupts at the same interrupt level.
1611 * As the interrupt level is determined by taking the
1612 * vector number and shifting that right by 4, we
1613 * want to spread these out a bit so that they don't
1614 * all fall in the same interrupt level.
1615 *
1616 * Also, we've got to be careful not to trash gate
1617 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1618 */
1619 for (irq = 0; irq < NR_IRQS ; irq++) {
1620 int tmp = irq;
1621 if (use_pci_vector()) {
1622 if (!platform_legacy_irq(tmp))
1623 if ((tmp = vector_to_irq(tmp)) == -1)
1624 continue;
1625 }
1626 if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
1627 /*
1628 * Hmm.. We don't have an entry for this,
1629 * so default to an old-fashioned 8259
1630 * interrupt if we can..
1631 */
1632 if (irq < 16)
1633 make_8259A_irq(irq);
1634 else
1635 /* Strange. Oh, well.. */
1636 irq_desc[irq].handler = &no_irq_type;
1637 }
1638 }
1639 }
1640
1641 static void enable_lapic_irq (unsigned int irq)
1642 {
1643 unsigned long v;
1644
1645 v = apic_read(APIC_LVT0);
1646 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1647 }
1648
1649 static void disable_lapic_irq (unsigned int irq)
1650 {
1651 unsigned long v;
1652
1653 v = apic_read(APIC_LVT0);
1654 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1655 }
1656
1657 static void ack_lapic_irq (unsigned int irq)
1658 {
1659 ack_APIC_irq();
1660 }
1661
1662 static void end_lapic_irq (unsigned int i) { /* nothing */ }
1663
1664 static struct hw_interrupt_type lapic_irq_type __read_mostly = {
1665 .typename = "local-APIC-edge",
1666 .startup = NULL, /* startup_irq() not used for IRQ0 */
1667 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
1668 .enable = enable_lapic_irq,
1669 .disable = disable_lapic_irq,
1670 .ack = ack_lapic_irq,
1671 .end = end_lapic_irq,
1672 };
1673
1674 static void setup_nmi (void)
1675 {
1676 /*
1677 * Dirty trick to enable the NMI watchdog ...
1678 * We put the 8259A master into AEOI mode and
1679 * unmask on all local APICs LVT0 as NMI.
1680 *
1681 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1682 * is from Maciej W. Rozycki - so we do not have to EOI from
1683 * the NMI handler or the timer interrupt.
1684 */
1685 printk(KERN_INFO "activating NMI Watchdog ...");
1686
1687 enable_NMI_through_LVT0(NULL);
1688
1689 printk(" done.\n");
1690 }
1691
1692 /*
1693 * This looks a bit hackish but it's about the only one way of sending
1694 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1695 * not support the ExtINT mode, unfortunately. We need to send these
1696 * cycles as some i82489DX-based boards have glue logic that keeps the
1697 * 8259A interrupt line asserted until INTA. --macro
1698 */
1699 static inline void unlock_ExtINT_logic(void)
1700 {
1701 int apic, pin, i;
1702 struct IO_APIC_route_entry entry0, entry1;
1703 unsigned char save_control, save_freq_select;
1704 unsigned long flags;
1705
1706 pin = find_isa_irq_pin(8, mp_INT);
1707 apic = find_isa_irq_apic(8, mp_INT);
1708 if (pin == -1)
1709 return;
1710
1711 spin_lock_irqsave(&ioapic_lock, flags);
1712 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1713 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1714 spin_unlock_irqrestore(&ioapic_lock, flags);
1715 clear_IO_APIC_pin(apic, pin);
1716
1717 memset(&entry1, 0, sizeof(entry1));
1718
1719 entry1.dest_mode = 0; /* physical delivery */
1720 entry1.mask = 0; /* unmask IRQ now */
1721 entry1.dest.physical.physical_dest = hard_smp_processor_id();
1722 entry1.delivery_mode = dest_ExtINT;
1723 entry1.polarity = entry0.polarity;
1724 entry1.trigger = 0;
1725 entry1.vector = 0;
1726
1727 spin_lock_irqsave(&ioapic_lock, flags);
1728 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
1729 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
1730 spin_unlock_irqrestore(&ioapic_lock, flags);
1731
1732 save_control = CMOS_READ(RTC_CONTROL);
1733 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1734 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1735 RTC_FREQ_SELECT);
1736 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1737
1738 i = 100;
1739 while (i-- > 0) {
1740 mdelay(10);
1741 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1742 i -= 10;
1743 }
1744
1745 CMOS_WRITE(save_control, RTC_CONTROL);
1746 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1747 clear_IO_APIC_pin(apic, pin);
1748
1749 spin_lock_irqsave(&ioapic_lock, flags);
1750 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
1751 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
1752 spin_unlock_irqrestore(&ioapic_lock, flags);
1753 }
1754
1755 /*
1756 * This code may look a bit paranoid, but it's supposed to cooperate with
1757 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1758 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1759 * fanatically on his truly buggy board.
1760 */
1761 static inline void check_timer(void)
1762 {
1763 int apic1, pin1, apic2, pin2;
1764 int vector;
1765
1766 /*
1767 * get/set the timer IRQ vector:
1768 */
1769 disable_8259A_irq(0);
1770 vector = assign_irq_vector(0);
1771 set_intr_gate(vector, interrupt[0]);
1772
1773 /*
1774 * Subtle, code in do_timer_interrupt() expects an AEOI
1775 * mode for the 8259A whenever interrupts are routed
1776 * through I/O APICs. Also IRQ0 has to be enabled in
1777 * the 8259A which implies the virtual wire has to be
1778 * disabled in the local APIC.
1779 */
1780 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1781 init_8259A(1);
1782 enable_8259A_irq(0);
1783
1784 pin1 = find_isa_irq_pin(0, mp_INT);
1785 apic1 = find_isa_irq_apic(0, mp_INT);
1786 pin2 = ioapic_i8259.pin;
1787 apic2 = ioapic_i8259.apic;
1788
1789 apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
1790 vector, apic1, pin1, apic2, pin2);
1791
1792 if (pin1 != -1) {
1793 /*
1794 * Ok, does IRQ0 through the IOAPIC work?
1795 */
1796 unmask_IO_APIC_irq(0);
1797 if (!no_timer_check && timer_irq_works()) {
1798 nmi_watchdog_default();
1799 if (nmi_watchdog == NMI_IO_APIC) {
1800 disable_8259A_irq(0);
1801 setup_nmi();
1802 enable_8259A_irq(0);
1803 }
1804 if (disable_timer_pin_1 > 0)
1805 clear_IO_APIC_pin(0, pin1);
1806 return;
1807 }
1808 clear_IO_APIC_pin(apic1, pin1);
1809 apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
1810 "connected to IO-APIC\n");
1811 }
1812
1813 apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
1814 "through the 8259A ... ");
1815 if (pin2 != -1) {
1816 apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
1817 apic2, pin2);
1818 /*
1819 * legacy devices should be connected to IO APIC #0
1820 */
1821 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
1822 if (timer_irq_works()) {
1823 printk("works.\n");
1824 nmi_watchdog_default();
1825 if (nmi_watchdog == NMI_IO_APIC) {
1826 setup_nmi();
1827 }
1828 return;
1829 }
1830 /*
1831 * Cleanup, just in case ...
1832 */
1833 clear_IO_APIC_pin(apic2, pin2);
1834 }
1835 printk(" failed.\n");
1836
1837 if (nmi_watchdog) {
1838 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
1839 nmi_watchdog = 0;
1840 }
1841
1842 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
1843
1844 disable_8259A_irq(0);
1845 irq_desc[0].handler = &lapic_irq_type;
1846 apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
1847 enable_8259A_irq(0);
1848
1849 if (timer_irq_works()) {
1850 apic_printk(APIC_QUIET, " works.\n");
1851 return;
1852 }
1853 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
1854 apic_printk(APIC_VERBOSE," failed.\n");
1855
1856 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
1857
1858 init_8259A(0);
1859 make_8259A_irq(0);
1860 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1861
1862 unlock_ExtINT_logic();
1863
1864 if (timer_irq_works()) {
1865 apic_printk(APIC_VERBOSE," works.\n");
1866 return;
1867 }
1868 apic_printk(APIC_VERBOSE," failed :(.\n");
1869 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
1870 }
1871
1872 static int __init notimercheck(char *s)
1873 {
1874 no_timer_check = 1;
1875 return 1;
1876 }
1877 __setup("no_timer_check", notimercheck);
1878
1879 /*
1880 *
1881 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
1882 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1883 * Linux doesn't really care, as it's not actually used
1884 * for any interrupt handling anyway.
1885 */
1886 #define PIC_IRQS (1<<2)
1887
1888 void __init setup_IO_APIC(void)
1889 {
1890 enable_IO_APIC();
1891
1892 if (acpi_ioapic)
1893 io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
1894 else
1895 io_apic_irqs = ~PIC_IRQS;
1896
1897 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
1898
1899 /*
1900 * Set up the IO-APIC IRQ routing table.
1901 */
1902 if (!acpi_ioapic)
1903 setup_ioapic_ids_from_mpc();
1904 sync_Arb_IDs();
1905 setup_IO_APIC_irqs();
1906 init_IO_APIC_traps();
1907 check_timer();
1908 if (!acpi_ioapic)
1909 print_IO_APIC();
1910 }
1911
1912 struct sysfs_ioapic_data {
1913 struct sys_device dev;
1914 struct IO_APIC_route_entry entry[0];
1915 };
1916 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
1917
1918 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1919 {
1920 struct IO_APIC_route_entry *entry;
1921 struct sysfs_ioapic_data *data;
1922 unsigned long flags;
1923 int i;
1924
1925 data = container_of(dev, struct sysfs_ioapic_data, dev);
1926 entry = data->entry;
1927 spin_lock_irqsave(&ioapic_lock, flags);
1928 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
1929 *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
1930 *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
1931 }
1932 spin_unlock_irqrestore(&ioapic_lock, flags);
1933
1934 return 0;
1935 }
1936
1937 static int ioapic_resume(struct sys_device *dev)
1938 {
1939 struct IO_APIC_route_entry *entry;
1940 struct sysfs_ioapic_data *data;
1941 unsigned long flags;
1942 union IO_APIC_reg_00 reg_00;
1943 int i;
1944
1945 data = container_of(dev, struct sysfs_ioapic_data, dev);
1946 entry = data->entry;
1947
1948 spin_lock_irqsave(&ioapic_lock, flags);
1949 reg_00.raw = io_apic_read(dev->id, 0);
1950 if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
1951 reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
1952 io_apic_write(dev->id, 0, reg_00.raw);
1953 }
1954 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
1955 io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
1956 io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
1957 }
1958 spin_unlock_irqrestore(&ioapic_lock, flags);
1959
1960 return 0;
1961 }
1962
1963 static struct sysdev_class ioapic_sysdev_class = {
1964 set_kset_name("ioapic"),
1965 .suspend = ioapic_suspend,
1966 .resume = ioapic_resume,
1967 };
1968
1969 static int __init ioapic_init_sysfs(void)
1970 {
1971 struct sys_device * dev;
1972 int i, size, error = 0;
1973
1974 error = sysdev_class_register(&ioapic_sysdev_class);
1975 if (error)
1976 return error;
1977
1978 for (i = 0; i < nr_ioapics; i++ ) {
1979 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
1980 * sizeof(struct IO_APIC_route_entry);
1981 mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
1982 if (!mp_ioapic_data[i]) {
1983 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1984 continue;
1985 }
1986 memset(mp_ioapic_data[i], 0, size);
1987 dev = &mp_ioapic_data[i]->dev;
1988 dev->id = i;
1989 dev->cls = &ioapic_sysdev_class;
1990 error = sysdev_register(dev);
1991 if (error) {
1992 kfree(mp_ioapic_data[i]);
1993 mp_ioapic_data[i] = NULL;
1994 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1995 continue;
1996 }
1997 }
1998
1999 return 0;
2000 }
2001
2002 device_initcall(ioapic_init_sysfs);
2003
2004 /* --------------------------------------------------------------------------
2005 ACPI-based IOAPIC Configuration
2006 -------------------------------------------------------------------------- */
2007
2008 #ifdef CONFIG_ACPI
2009
2010 #define IO_APIC_MAX_ID 0xFE
2011
2012 int __init io_apic_get_version (int ioapic)
2013 {
2014 union IO_APIC_reg_01 reg_01;
2015 unsigned long flags;
2016
2017 spin_lock_irqsave(&ioapic_lock, flags);
2018 reg_01.raw = io_apic_read(ioapic, 1);
2019 spin_unlock_irqrestore(&ioapic_lock, flags);
2020
2021 return reg_01.bits.version;
2022 }
2023
2024
2025 int __init io_apic_get_redir_entries (int ioapic)
2026 {
2027 union IO_APIC_reg_01 reg_01;
2028 unsigned long flags;
2029
2030 spin_lock_irqsave(&ioapic_lock, flags);
2031 reg_01.raw = io_apic_read(ioapic, 1);
2032 spin_unlock_irqrestore(&ioapic_lock, flags);
2033
2034 return reg_01.bits.entries;
2035 }
2036
2037
2038 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2039 {
2040 struct IO_APIC_route_entry entry;
2041 unsigned long flags;
2042
2043 if (!IO_APIC_IRQ(irq)) {
2044 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2045 ioapic);
2046 return -EINVAL;
2047 }
2048
2049 /*
2050 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
2051 * Note that we mask (disable) IRQs now -- these get enabled when the
2052 * corresponding device driver registers for this IRQ.
2053 */
2054
2055 memset(&entry,0,sizeof(entry));
2056
2057 entry.delivery_mode = INT_DELIVERY_MODE;
2058 entry.dest_mode = INT_DEST_MODE;
2059 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
2060 entry.trigger = triggering;
2061 entry.polarity = polarity;
2062 entry.mask = 1; /* Disabled (masked) */
2063
2064 irq = gsi_irq_sharing(irq);
2065 /*
2066 * IRQs < 16 are already in the irq_2_pin[] map
2067 */
2068 if (irq >= 16)
2069 add_pin_to_irq(irq, ioapic, pin);
2070
2071 entry.vector = assign_irq_vector(irq);
2072
2073 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
2074 "IRQ %d Mode:%i Active:%i)\n", ioapic,
2075 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
2076 triggering, polarity);
2077
2078 ioapic_register_intr(irq, entry.vector, triggering);
2079
2080 if (!ioapic && (irq < 16))
2081 disable_8259A_irq(irq);
2082
2083 spin_lock_irqsave(&ioapic_lock, flags);
2084 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
2085 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
2086 set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
2087 spin_unlock_irqrestore(&ioapic_lock, flags);
2088
2089 return 0;
2090 }
2091
2092 #endif /* CONFIG_ACPI */
2093
2094
2095 /*
2096 * This function currently is only a helper for the i386 smp boot process where
2097 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2098 * so mask in all cases should simply be TARGET_CPUS
2099 */
2100 #ifdef CONFIG_SMP
2101 void __init setup_ioapic_dest(void)
2102 {
2103 int pin, ioapic, irq, irq_entry;
2104
2105 if (skip_ioapic_setup == 1)
2106 return;
2107
2108 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2109 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2110 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2111 if (irq_entry == -1)
2112 continue;
2113 irq = pin_2_irq(irq_entry, ioapic, pin);
2114 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2115 }
2116
2117 }
2118 }
2119 #endif