Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / i386 / kernel / acpi / boot.c
1 /*
2 * boot.c - Architecture-Specific Low-Level ACPI Boot Support
3 *
4 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26 #include <linux/init.h>
27 #include <linux/acpi.h>
28 #include <linux/efi.h>
29 #include <linux/cpumask.h>
30 #include <linux/module.h>
31 #include <linux/dmi.h>
32 #include <linux/irq.h>
33 #include <linux/bootmem.h>
34 #include <linux/ioport.h>
35
36 #include <asm/pgtable.h>
37 #include <asm/io_apic.h>
38 #include <asm/apic.h>
39 #include <asm/io.h>
40 #include <asm/mpspec.h>
41
42 static int __initdata acpi_force = 0;
43
44 #ifdef CONFIG_ACPI
45 int acpi_disabled = 0;
46 #else
47 int acpi_disabled = 1;
48 #endif
49 EXPORT_SYMBOL(acpi_disabled);
50
51 #ifdef CONFIG_X86_64
52
53 #include <asm/proto.h>
54
55 static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
56
57
58 #else /* X86 */
59
60 #ifdef CONFIG_X86_LOCAL_APIC
61 #include <mach_apic.h>
62 #include <mach_mpparse.h>
63 #endif /* CONFIG_X86_LOCAL_APIC */
64
65 #endif /* X86 */
66
67 #define BAD_MADT_ENTRY(entry, end) ( \
68 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
69 ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
70
71 #define PREFIX "ACPI: "
72
73 int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
74 int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
75 int acpi_ht __initdata = 1; /* enable HT */
76
77 int acpi_lapic;
78 int acpi_ioapic;
79 int acpi_strict;
80 EXPORT_SYMBOL(acpi_strict);
81
82 acpi_interrupt_flags acpi_sci_flags __initdata;
83 int acpi_sci_override_gsi __initdata;
84 int acpi_skip_timer_override __initdata;
85
86 #ifdef CONFIG_X86_LOCAL_APIC
87 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
88 #endif
89
90 #ifndef __HAVE_ARCH_CMPXCHG
91 #warning ACPI uses CMPXCHG, i486 and later hardware
92 #endif
93
94 #define MAX_MADT_ENTRIES 256
95 u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
96 {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
97 EXPORT_SYMBOL(x86_acpiid_to_apicid);
98
99 /* --------------------------------------------------------------------------
100 Boot-time Configuration
101 -------------------------------------------------------------------------- */
102
103 /*
104 * The default interrupt routing model is PIC (8259). This gets
105 * overriden if IOAPICs are enumerated (below).
106 */
107 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
108
109 #ifdef CONFIG_X86_64
110
111 /* rely on all ACPI tables being in the direct mapping */
112 char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
113 {
114 if (!phys_addr || !size)
115 return NULL;
116
117 if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
118 return __va(phys_addr);
119
120 return NULL;
121 }
122
123 #else
124
125 /*
126 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
127 * to map the target physical address. The problem is that set_fixmap()
128 * provides a single page, and it is possible that the page is not
129 * sufficient.
130 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
131 * i.e. until the next __va_range() call.
132 *
133 * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
134 * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
135 * count idx down while incrementing the phys address.
136 */
137 char *__acpi_map_table(unsigned long phys, unsigned long size)
138 {
139 unsigned long base, offset, mapped_size;
140 int idx;
141
142 if (phys + size < 8 * 1024 * 1024)
143 return __va(phys);
144
145 offset = phys & (PAGE_SIZE - 1);
146 mapped_size = PAGE_SIZE - offset;
147 set_fixmap(FIX_ACPI_END, phys);
148 base = fix_to_virt(FIX_ACPI_END);
149
150 /*
151 * Most cases can be covered by the below.
152 */
153 idx = FIX_ACPI_END;
154 while (mapped_size < size) {
155 if (--idx < FIX_ACPI_BEGIN)
156 return NULL; /* cannot handle this */
157 phys += PAGE_SIZE;
158 set_fixmap(idx, phys);
159 mapped_size += PAGE_SIZE;
160 }
161
162 return ((unsigned char *)base + offset);
163 }
164 #endif
165
166 #ifdef CONFIG_PCI_MMCONFIG
167 /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
168 struct acpi_table_mcfg_config *pci_mmcfg_config;
169 int pci_mmcfg_config_num;
170
171 int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
172 {
173 struct acpi_table_mcfg *mcfg;
174 unsigned long i;
175 int config_size;
176
177 if (!phys_addr || !size)
178 return -EINVAL;
179
180 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
181 if (!mcfg) {
182 printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
183 return -ENODEV;
184 }
185
186 /* how many config structures do we have */
187 pci_mmcfg_config_num = 0;
188 i = size - sizeof(struct acpi_table_mcfg);
189 while (i >= sizeof(struct acpi_table_mcfg_config)) {
190 ++pci_mmcfg_config_num;
191 i -= sizeof(struct acpi_table_mcfg_config);
192 };
193 if (pci_mmcfg_config_num == 0) {
194 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
195 return -ENODEV;
196 }
197
198 config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
199 pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
200 if (!pci_mmcfg_config) {
201 printk(KERN_WARNING PREFIX
202 "No memory for MCFG config tables\n");
203 return -ENOMEM;
204 }
205
206 memcpy(pci_mmcfg_config, &mcfg->config, config_size);
207 for (i = 0; i < pci_mmcfg_config_num; ++i) {
208 if (mcfg->config[i].base_reserved) {
209 printk(KERN_ERR PREFIX
210 "MMCONFIG not in low 4GB of memory\n");
211 kfree(pci_mmcfg_config);
212 pci_mmcfg_config_num = 0;
213 return -ENODEV;
214 }
215 }
216
217 return 0;
218 }
219 #endif /* CONFIG_PCI_MMCONFIG */
220
221 #ifdef CONFIG_X86_LOCAL_APIC
222 static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
223 {
224 struct acpi_table_madt *madt = NULL;
225
226 if (!phys_addr || !size || !cpu_has_apic)
227 return -EINVAL;
228
229 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
230 if (!madt) {
231 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
232 return -ENODEV;
233 }
234
235 if (madt->lapic_address) {
236 acpi_lapic_addr = (u64) madt->lapic_address;
237
238 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
239 madt->lapic_address);
240 }
241
242 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
243
244 return 0;
245 }
246
247 static int __init
248 acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
249 {
250 struct acpi_table_lapic *processor = NULL;
251
252 processor = (struct acpi_table_lapic *)header;
253
254 if (BAD_MADT_ENTRY(processor, end))
255 return -EINVAL;
256
257 acpi_table_print_madt_entry(header);
258
259 /* Record local apic id only when enabled */
260 if (processor->flags.enabled)
261 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
262
263 /*
264 * We need to register disabled CPU as well to permit
265 * counting disabled CPUs. This allows us to size
266 * cpus_possible_map more accurately, to permit
267 * to not preallocating memory for all NR_CPUS
268 * when we use CPU hotplug.
269 */
270 mp_register_lapic(processor->id, /* APIC ID */
271 processor->flags.enabled); /* Enabled? */
272
273 return 0;
274 }
275
276 static int __init
277 acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
278 const unsigned long end)
279 {
280 struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
281
282 lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
283
284 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
285 return -EINVAL;
286
287 acpi_lapic_addr = lapic_addr_ovr->address;
288
289 return 0;
290 }
291
292 static int __init
293 acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
294 {
295 struct acpi_table_lapic_nmi *lapic_nmi = NULL;
296
297 lapic_nmi = (struct acpi_table_lapic_nmi *)header;
298
299 if (BAD_MADT_ENTRY(lapic_nmi, end))
300 return -EINVAL;
301
302 acpi_table_print_madt_entry(header);
303
304 if (lapic_nmi->lint != 1)
305 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
306
307 return 0;
308 }
309
310 #endif /*CONFIG_X86_LOCAL_APIC */
311
312 #ifdef CONFIG_X86_IO_APIC
313
314 static int __init
315 acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
316 {
317 struct acpi_table_ioapic *ioapic = NULL;
318
319 ioapic = (struct acpi_table_ioapic *)header;
320
321 if (BAD_MADT_ENTRY(ioapic, end))
322 return -EINVAL;
323
324 acpi_table_print_madt_entry(header);
325
326 mp_register_ioapic(ioapic->id,
327 ioapic->address, ioapic->global_irq_base);
328
329 return 0;
330 }
331
332 /*
333 * Parse Interrupt Source Override for the ACPI SCI
334 */
335 static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
336 {
337 if (trigger == 0) /* compatible SCI trigger is level */
338 trigger = 3;
339
340 if (polarity == 0) /* compatible SCI polarity is low */
341 polarity = 3;
342
343 /* Command-line over-ride via acpi_sci= */
344 if (acpi_sci_flags.trigger)
345 trigger = acpi_sci_flags.trigger;
346
347 if (acpi_sci_flags.polarity)
348 polarity = acpi_sci_flags.polarity;
349
350 /*
351 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
352 * If GSI is < 16, this will update its flags,
353 * else it will create a new mp_irqs[] entry.
354 */
355 mp_override_legacy_irq(gsi, polarity, trigger, gsi);
356
357 /*
358 * stash over-ride to indicate we've been here
359 * and for later update of acpi_fadt
360 */
361 acpi_sci_override_gsi = gsi;
362 return;
363 }
364
365 static int __init
366 acpi_parse_int_src_ovr(acpi_table_entry_header * header,
367 const unsigned long end)
368 {
369 struct acpi_table_int_src_ovr *intsrc = NULL;
370
371 intsrc = (struct acpi_table_int_src_ovr *)header;
372
373 if (BAD_MADT_ENTRY(intsrc, end))
374 return -EINVAL;
375
376 acpi_table_print_madt_entry(header);
377
378 if (intsrc->bus_irq == acpi_fadt.sci_int) {
379 acpi_sci_ioapic_setup(intsrc->global_irq,
380 intsrc->flags.polarity,
381 intsrc->flags.trigger);
382 return 0;
383 }
384
385 if (acpi_skip_timer_override &&
386 intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
387 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
388 return 0;
389 }
390
391 mp_override_legacy_irq(intsrc->bus_irq,
392 intsrc->flags.polarity,
393 intsrc->flags.trigger, intsrc->global_irq);
394
395 return 0;
396 }
397
398 static int __init
399 acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
400 {
401 struct acpi_table_nmi_src *nmi_src = NULL;
402
403 nmi_src = (struct acpi_table_nmi_src *)header;
404
405 if (BAD_MADT_ENTRY(nmi_src, end))
406 return -EINVAL;
407
408 acpi_table_print_madt_entry(header);
409
410 /* TBD: Support nimsrc entries? */
411
412 return 0;
413 }
414
415 #endif /* CONFIG_X86_IO_APIC */
416
417 /*
418 * acpi_pic_sci_set_trigger()
419 *
420 * use ELCR to set PIC-mode trigger type for SCI
421 *
422 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
423 * it may require Edge Trigger -- use "acpi_sci=edge"
424 *
425 * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
426 * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
427 * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
428 * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
429 */
430
431 void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
432 {
433 unsigned int mask = 1 << irq;
434 unsigned int old, new;
435
436 /* Real old ELCR mask */
437 old = inb(0x4d0) | (inb(0x4d1) << 8);
438
439 /*
440 * If we use ACPI to set PCI irq's, then we should clear ELCR
441 * since we will set it correctly as we enable the PCI irq
442 * routing.
443 */
444 new = acpi_noirq ? old : 0;
445
446 /*
447 * Update SCI information in the ELCR, it isn't in the PCI
448 * routing tables..
449 */
450 switch (trigger) {
451 case 1: /* Edge - clear */
452 new &= ~mask;
453 break;
454 case 3: /* Level - set */
455 new |= mask;
456 break;
457 }
458
459 if (old == new)
460 return;
461
462 printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
463 outb(new, 0x4d0);
464 outb(new >> 8, 0x4d1);
465 }
466
467 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
468 {
469 *irq = gsi;
470 return 0;
471 }
472
473 /*
474 * success: return IRQ number (>=0)
475 * failure: return < 0
476 */
477 int acpi_register_gsi(u32 gsi, int triggering, int polarity)
478 {
479 unsigned int irq;
480 unsigned int plat_gsi = gsi;
481
482 #ifdef CONFIG_PCI
483 /*
484 * Make sure all (legacy) PCI IRQs are set as level-triggered.
485 */
486 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
487 extern void eisa_set_level_irq(unsigned int irq);
488
489 if (triggering == ACPI_LEVEL_SENSITIVE)
490 eisa_set_level_irq(gsi);
491 }
492 #endif
493
494 #ifdef CONFIG_X86_IO_APIC
495 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
496 plat_gsi = mp_register_gsi(gsi, triggering, polarity);
497 }
498 #endif
499 acpi_gsi_to_irq(plat_gsi, &irq);
500 return irq;
501 }
502
503 EXPORT_SYMBOL(acpi_register_gsi);
504
505 /*
506 * ACPI based hotplug support for CPU
507 */
508 #ifdef CONFIG_ACPI_HOTPLUG_CPU
509 int acpi_map_lsapic(acpi_handle handle, int *pcpu)
510 {
511 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
512 union acpi_object *obj;
513 struct acpi_table_lapic *lapic;
514 cpumask_t tmp_map, new_map;
515 u8 physid;
516 int cpu;
517
518 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
519 return -EINVAL;
520
521 if (!buffer.length || !buffer.pointer)
522 return -EINVAL;
523
524 obj = buffer.pointer;
525 if (obj->type != ACPI_TYPE_BUFFER ||
526 obj->buffer.length < sizeof(*lapic)) {
527 kfree(buffer.pointer);
528 return -EINVAL;
529 }
530
531 lapic = (struct acpi_table_lapic *)obj->buffer.pointer;
532
533 if ((lapic->header.type != ACPI_MADT_LAPIC) ||
534 (!lapic->flags.enabled)) {
535 kfree(buffer.pointer);
536 return -EINVAL;
537 }
538
539 physid = lapic->id;
540
541 kfree(buffer.pointer);
542 buffer.length = ACPI_ALLOCATE_BUFFER;
543 buffer.pointer = NULL;
544
545 tmp_map = cpu_present_map;
546 mp_register_lapic(physid, lapic->flags.enabled);
547
548 /*
549 * If mp_register_lapic successfully generates a new logical cpu
550 * number, then the following will get us exactly what was mapped
551 */
552 cpus_andnot(new_map, cpu_present_map, tmp_map);
553 if (cpus_empty(new_map)) {
554 printk ("Unable to map lapic to logical cpu number\n");
555 return -EINVAL;
556 }
557
558 cpu = first_cpu(new_map);
559
560 *pcpu = cpu;
561 return 0;
562 }
563
564 EXPORT_SYMBOL(acpi_map_lsapic);
565
566 int acpi_unmap_lsapic(int cpu)
567 {
568 int i;
569
570 for_each_possible_cpu(i) {
571 if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
572 x86_acpiid_to_apicid[i] = -1;
573 break;
574 }
575 }
576 x86_cpu_to_apicid[cpu] = -1;
577 cpu_clear(cpu, cpu_present_map);
578 num_processors--;
579
580 return (0);
581 }
582
583 EXPORT_SYMBOL(acpi_unmap_lsapic);
584 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
585
586 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
587 {
588 /* TBD */
589 return -EINVAL;
590 }
591
592 EXPORT_SYMBOL(acpi_register_ioapic);
593
594 int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
595 {
596 /* TBD */
597 return -EINVAL;
598 }
599
600 EXPORT_SYMBOL(acpi_unregister_ioapic);
601
602 static unsigned long __init
603 acpi_scan_rsdp(unsigned long start, unsigned long length)
604 {
605 unsigned long offset = 0;
606 unsigned long sig_len = sizeof("RSD PTR ") - 1;
607
608 /*
609 * Scan all 16-byte boundaries of the physical memory region for the
610 * RSDP signature.
611 */
612 for (offset = 0; offset < length; offset += 16) {
613 if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
614 continue;
615 return (start + offset);
616 }
617
618 return 0;
619 }
620
621 static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
622 {
623 struct acpi_table_sbf *sb;
624
625 if (!phys_addr || !size)
626 return -EINVAL;
627
628 sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
629 if (!sb) {
630 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
631 return -ENODEV;
632 }
633
634 sbf_port = sb->sbf_cmos; /* Save CMOS port */
635
636 return 0;
637 }
638
639 #ifdef CONFIG_HPET_TIMER
640
641 static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
642 {
643 struct acpi_table_hpet *hpet_tbl;
644 struct resource *hpet_res;
645 resource_size_t res_start;
646
647 if (!phys || !size)
648 return -EINVAL;
649
650 hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
651 if (!hpet_tbl) {
652 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
653 return -ENODEV;
654 }
655
656 if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
657 printk(KERN_WARNING PREFIX "HPET timers must be located in "
658 "memory.\n");
659 return -1;
660 }
661
662 #define HPET_RESOURCE_NAME_SIZE 9
663 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
664 if (hpet_res) {
665 memset(hpet_res, 0, sizeof(*hpet_res));
666 hpet_res->name = (void *)&hpet_res[1];
667 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
668 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
669 "HPET %u", hpet_tbl->number);
670 hpet_res->end = (1 * 1024) - 1;
671 }
672
673 #ifdef CONFIG_X86_64
674 vxtime.hpet_address = hpet_tbl->addr.addrl |
675 ((long)hpet_tbl->addr.addrh << 32);
676
677 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
678 hpet_tbl->id, vxtime.hpet_address);
679
680 res_start = vxtime.hpet_address;
681 #else /* X86 */
682 {
683 extern unsigned long hpet_address;
684
685 hpet_address = hpet_tbl->addr.addrl;
686 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
687 hpet_tbl->id, hpet_address);
688
689 res_start = hpet_address;
690 }
691 #endif /* X86 */
692
693 if (hpet_res) {
694 hpet_res->start = res_start;
695 hpet_res->end += res_start;
696 insert_resource(&iomem_resource, hpet_res);
697 }
698
699 return 0;
700 }
701 #else
702 #define acpi_parse_hpet NULL
703 #endif
704
705 #ifdef CONFIG_X86_PM_TIMER
706 extern u32 pmtmr_ioport;
707 #endif
708
709 static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
710 {
711 struct fadt_descriptor *fadt = NULL;
712
713 fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
714 if (!fadt) {
715 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
716 return 0;
717 }
718 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
719 acpi_fadt.sci_int = fadt->sci_int;
720
721 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
722 acpi_fadt.revision = fadt->revision;
723 acpi_fadt.force_apic_physical_destination_mode =
724 fadt->force_apic_physical_destination_mode;
725
726 #ifdef CONFIG_X86_PM_TIMER
727 /* detect the location of the ACPI PM Timer */
728 if (fadt->revision >= FADT2_REVISION_ID) {
729 /* FADT rev. 2 */
730 if (fadt->xpm_tmr_blk.address_space_id !=
731 ACPI_ADR_SPACE_SYSTEM_IO)
732 return 0;
733
734 pmtmr_ioport = fadt->xpm_tmr_blk.address;
735 /*
736 * "X" fields are optional extensions to the original V1.0
737 * fields, so we must selectively expand V1.0 fields if the
738 * corresponding X field is zero.
739 */
740 if (!pmtmr_ioport)
741 pmtmr_ioport = fadt->V1_pm_tmr_blk;
742 } else {
743 /* FADT rev. 1 */
744 pmtmr_ioport = fadt->V1_pm_tmr_blk;
745 }
746 if (pmtmr_ioport)
747 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
748 pmtmr_ioport);
749 #endif
750 return 0;
751 }
752
753 unsigned long __init acpi_find_rsdp(void)
754 {
755 unsigned long rsdp_phys = 0;
756
757 if (efi_enabled) {
758 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
759 return efi.acpi20;
760 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
761 return efi.acpi;
762 }
763 /*
764 * Scan memory looking for the RSDP signature. First search EBDA (low
765 * memory) paragraphs and then search upper memory (E0000-FFFFF).
766 */
767 rsdp_phys = acpi_scan_rsdp(0, 0x400);
768 if (!rsdp_phys)
769 rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
770
771 return rsdp_phys;
772 }
773
774 #ifdef CONFIG_X86_LOCAL_APIC
775 /*
776 * Parse LAPIC entries in MADT
777 * returns 0 on success, < 0 on error
778 */
779 static int __init acpi_parse_madt_lapic_entries(void)
780 {
781 int count;
782
783 if (!cpu_has_apic)
784 return -ENODEV;
785
786 /*
787 * Note that the LAPIC address is obtained from the MADT (32-bit value)
788 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
789 */
790
791 count =
792 acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
793 acpi_parse_lapic_addr_ovr, 0);
794 if (count < 0) {
795 printk(KERN_ERR PREFIX
796 "Error parsing LAPIC address override entry\n");
797 return count;
798 }
799
800 mp_register_lapic_address(acpi_lapic_addr);
801
802 count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
803 MAX_APICS);
804 if (!count) {
805 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
806 /* TBD: Cleanup to allow fallback to MPS */
807 return -ENODEV;
808 } else if (count < 0) {
809 printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
810 /* TBD: Cleanup to allow fallback to MPS */
811 return count;
812 }
813
814 count =
815 acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
816 if (count < 0) {
817 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
818 /* TBD: Cleanup to allow fallback to MPS */
819 return count;
820 }
821 return 0;
822 }
823 #endif /* CONFIG_X86_LOCAL_APIC */
824
825 #ifdef CONFIG_X86_IO_APIC
826 /*
827 * Parse IOAPIC related entries in MADT
828 * returns 0 on success, < 0 on error
829 */
830 static int __init acpi_parse_madt_ioapic_entries(void)
831 {
832 int count;
833
834 /*
835 * ACPI interpreter is required to complete interrupt setup,
836 * so if it is off, don't enumerate the io-apics with ACPI.
837 * If MPS is present, it will handle them,
838 * otherwise the system will stay in PIC mode
839 */
840 if (acpi_disabled || acpi_noirq) {
841 return -ENODEV;
842 }
843
844 if (!cpu_has_apic)
845 return -ENODEV;
846
847 /*
848 * if "noapic" boot option, don't look for IO-APICs
849 */
850 if (skip_ioapic_setup) {
851 printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
852 "due to 'noapic' option.\n");
853 return -ENODEV;
854 }
855
856 count =
857 acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
858 MAX_IO_APICS);
859 if (!count) {
860 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
861 return -ENODEV;
862 } else if (count < 0) {
863 printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
864 return count;
865 }
866
867 count =
868 acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
869 NR_IRQ_VECTORS);
870 if (count < 0) {
871 printk(KERN_ERR PREFIX
872 "Error parsing interrupt source overrides entry\n");
873 /* TBD: Cleanup to allow fallback to MPS */
874 return count;
875 }
876
877 /*
878 * If BIOS did not supply an INT_SRC_OVR for the SCI
879 * pretend we got one so we can set the SCI flags.
880 */
881 if (!acpi_sci_override_gsi)
882 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
883
884 /* Fill in identity legacy mapings where no override */
885 mp_config_acpi_legacy_irqs();
886
887 count =
888 acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
889 NR_IRQ_VECTORS);
890 if (count < 0) {
891 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
892 /* TBD: Cleanup to allow fallback to MPS */
893 return count;
894 }
895
896 return 0;
897 }
898 #else
899 static inline int acpi_parse_madt_ioapic_entries(void)
900 {
901 return -1;
902 }
903 #endif /* !CONFIG_X86_IO_APIC */
904
905 static void __init acpi_process_madt(void)
906 {
907 #ifdef CONFIG_X86_LOCAL_APIC
908 int count, error;
909
910 count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
911 if (count >= 1) {
912
913 /*
914 * Parse MADT LAPIC entries
915 */
916 error = acpi_parse_madt_lapic_entries();
917 if (!error) {
918 acpi_lapic = 1;
919
920 #ifdef CONFIG_X86_GENERICARCH
921 generic_bigsmp_probe();
922 #endif
923 /*
924 * Parse MADT IO-APIC entries
925 */
926 error = acpi_parse_madt_ioapic_entries();
927 if (!error) {
928 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
929 acpi_irq_balance_set(NULL);
930 acpi_ioapic = 1;
931
932 smp_found_config = 1;
933 clustered_apic_check();
934 }
935 }
936 if (error == -EINVAL) {
937 /*
938 * Dell Precision Workstation 410, 610 come here.
939 */
940 printk(KERN_ERR PREFIX
941 "Invalid BIOS MADT, disabling ACPI\n");
942 disable_acpi();
943 }
944 }
945 #endif
946 return;
947 }
948
949 #ifdef __i386__
950
951 static int __init disable_acpi_irq(struct dmi_system_id *d)
952 {
953 if (!acpi_force) {
954 printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
955 d->ident);
956 acpi_noirq_set();
957 }
958 return 0;
959 }
960
961 static int __init disable_acpi_pci(struct dmi_system_id *d)
962 {
963 if (!acpi_force) {
964 printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
965 d->ident);
966 acpi_disable_pci();
967 }
968 return 0;
969 }
970
971 static int __init dmi_disable_acpi(struct dmi_system_id *d)
972 {
973 if (!acpi_force) {
974 printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
975 disable_acpi();
976 } else {
977 printk(KERN_NOTICE
978 "Warning: DMI blacklist says broken, but acpi forced\n");
979 }
980 return 0;
981 }
982
983 /*
984 * Limit ACPI to CPU enumeration for HT
985 */
986 static int __init force_acpi_ht(struct dmi_system_id *d)
987 {
988 if (!acpi_force) {
989 printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
990 d->ident);
991 disable_acpi();
992 acpi_ht = 1;
993 } else {
994 printk(KERN_NOTICE
995 "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
996 }
997 return 0;
998 }
999
1000 /*
1001 * If your system is blacklisted here, but you find that acpi=force
1002 * works for you, please contact acpi-devel@sourceforge.net
1003 */
1004 static struct dmi_system_id __initdata acpi_dmi_table[] = {
1005 /*
1006 * Boxes that need ACPI disabled
1007 */
1008 {
1009 .callback = dmi_disable_acpi,
1010 .ident = "IBM Thinkpad",
1011 .matches = {
1012 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1013 DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
1014 },
1015 },
1016
1017 /*
1018 * Boxes that need acpi=ht
1019 */
1020 {
1021 .callback = force_acpi_ht,
1022 .ident = "FSC Primergy T850",
1023 .matches = {
1024 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1025 DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
1026 },
1027 },
1028 {
1029 .callback = force_acpi_ht,
1030 .ident = "DELL GX240",
1031 .matches = {
1032 DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
1033 DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
1034 },
1035 },
1036 {
1037 .callback = force_acpi_ht,
1038 .ident = "HP VISUALIZE NT Workstation",
1039 .matches = {
1040 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
1041 DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
1042 },
1043 },
1044 {
1045 .callback = force_acpi_ht,
1046 .ident = "Compaq Workstation W8000",
1047 .matches = {
1048 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1049 DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
1050 },
1051 },
1052 {
1053 .callback = force_acpi_ht,
1054 .ident = "ASUS P4B266",
1055 .matches = {
1056 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1057 DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
1058 },
1059 },
1060 {
1061 .callback = force_acpi_ht,
1062 .ident = "ASUS P2B-DS",
1063 .matches = {
1064 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1065 DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
1066 },
1067 },
1068 {
1069 .callback = force_acpi_ht,
1070 .ident = "ASUS CUR-DLS",
1071 .matches = {
1072 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1073 DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
1074 },
1075 },
1076 {
1077 .callback = force_acpi_ht,
1078 .ident = "ABIT i440BX-W83977",
1079 .matches = {
1080 DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
1081 DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
1082 },
1083 },
1084 {
1085 .callback = force_acpi_ht,
1086 .ident = "IBM Bladecenter",
1087 .matches = {
1088 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1089 DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
1090 },
1091 },
1092 {
1093 .callback = force_acpi_ht,
1094 .ident = "IBM eServer xSeries 360",
1095 .matches = {
1096 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1097 DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
1098 },
1099 },
1100 {
1101 .callback = force_acpi_ht,
1102 .ident = "IBM eserver xSeries 330",
1103 .matches = {
1104 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1105 DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
1106 },
1107 },
1108 {
1109 .callback = force_acpi_ht,
1110 .ident = "IBM eserver xSeries 440",
1111 .matches = {
1112 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1113 DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
1114 },
1115 },
1116
1117 /*
1118 * Boxes that need ACPI PCI IRQ routing disabled
1119 */
1120 {
1121 .callback = disable_acpi_irq,
1122 .ident = "ASUS A7V",
1123 .matches = {
1124 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
1125 DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
1126 /* newer BIOS, Revision 1011, does work */
1127 DMI_MATCH(DMI_BIOS_VERSION,
1128 "ASUS A7V ACPI BIOS Revision 1007"),
1129 },
1130 },
1131
1132 /*
1133 * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
1134 */
1135 { /* _BBN 0 bug */
1136 .callback = disable_acpi_pci,
1137 .ident = "ASUS PR-DLS",
1138 .matches = {
1139 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1140 DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
1141 DMI_MATCH(DMI_BIOS_VERSION,
1142 "ASUS PR-DLS ACPI BIOS Revision 1010"),
1143 DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
1144 },
1145 },
1146 {
1147 .callback = disable_acpi_pci,
1148 .ident = "Acer TravelMate 36x Laptop",
1149 .matches = {
1150 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1151 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1152 },
1153 },
1154 {}
1155 };
1156
1157 #endif /* __i386__ */
1158
1159 /*
1160 * acpi_boot_table_init() and acpi_boot_init()
1161 * called from setup_arch(), always.
1162 * 1. checksums all tables
1163 * 2. enumerates lapics
1164 * 3. enumerates io-apics
1165 *
1166 * acpi_table_init() is separate to allow reading SRAT without
1167 * other side effects.
1168 *
1169 * side effects of acpi_boot_init:
1170 * acpi_lapic = 1 if LAPIC found
1171 * acpi_ioapic = 1 if IOAPIC found
1172 * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
1173 * if acpi_blacklisted() acpi_disabled = 1;
1174 * acpi_irq_model=...
1175 * ...
1176 *
1177 * return value: (currently ignored)
1178 * 0: success
1179 * !0: failure
1180 */
1181
1182 int __init acpi_boot_table_init(void)
1183 {
1184 int error;
1185
1186 #ifdef __i386__
1187 dmi_check_system(acpi_dmi_table);
1188 #endif
1189
1190 /*
1191 * If acpi_disabled, bail out
1192 * One exception: acpi=ht continues far enough to enumerate LAPICs
1193 */
1194 if (acpi_disabled && !acpi_ht)
1195 return 1;
1196
1197 /*
1198 * Initialize the ACPI boot-time table parser.
1199 */
1200 error = acpi_table_init();
1201 if (error) {
1202 disable_acpi();
1203 return error;
1204 }
1205
1206 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1207
1208 /*
1209 * blacklist may disable ACPI entirely
1210 */
1211 error = acpi_blacklisted();
1212 if (error) {
1213 if (acpi_force) {
1214 printk(KERN_WARNING PREFIX "acpi=force override\n");
1215 } else {
1216 printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1217 disable_acpi();
1218 return error;
1219 }
1220 }
1221
1222 return 0;
1223 }
1224
1225 int __init acpi_boot_init(void)
1226 {
1227 /*
1228 * If acpi_disabled, bail out
1229 * One exception: acpi=ht continues far enough to enumerate LAPICs
1230 */
1231 if (acpi_disabled && !acpi_ht)
1232 return 1;
1233
1234 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1235
1236 /*
1237 * set sci_int and PM timer address
1238 */
1239 acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
1240
1241 /*
1242 * Process the Multiple APIC Description Table (MADT), if present
1243 */
1244 acpi_process_madt();
1245
1246 acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
1247
1248 return 0;
1249 }
1250
1251 static int __init parse_acpi(char *arg)
1252 {
1253 if (!arg)
1254 return -EINVAL;
1255
1256 /* "acpi=off" disables both ACPI table parsing and interpreter */
1257 if (strcmp(arg, "off") == 0) {
1258 disable_acpi();
1259 }
1260 /* acpi=force to over-ride black-list */
1261 else if (strcmp(arg, "force") == 0) {
1262 acpi_force = 1;
1263 acpi_ht = 1;
1264 acpi_disabled = 0;
1265 }
1266 /* acpi=strict disables out-of-spec workarounds */
1267 else if (strcmp(arg, "strict") == 0) {
1268 acpi_strict = 1;
1269 }
1270 /* Limit ACPI just to boot-time to enable HT */
1271 else if (strcmp(arg, "ht") == 0) {
1272 if (!acpi_force)
1273 disable_acpi();
1274 acpi_ht = 1;
1275 }
1276 /* "acpi=noirq" disables ACPI interrupt routing */
1277 else if (strcmp(arg, "noirq") == 0) {
1278 acpi_noirq_set();
1279 } else {
1280 /* Core will printk when we return error. */
1281 return -EINVAL;
1282 }
1283 return 0;
1284 }
1285 early_param("acpi", parse_acpi);
1286
1287 /* FIXME: Using pci= for an ACPI parameter is a travesty. */
1288 static int __init parse_pci(char *arg)
1289 {
1290 if (arg && strcmp(arg, "noacpi") == 0)
1291 acpi_disable_pci();
1292 return 0;
1293 }
1294 early_param("pci", parse_pci);
1295
1296 #ifdef CONFIG_X86_IO_APIC
1297 static int __init parse_acpi_skip_timer_override(char *arg)
1298 {
1299 acpi_skip_timer_override = 1;
1300 return 0;
1301 }
1302 early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
1303 #endif /* CONFIG_X86_IO_APIC */
1304
1305 static int __init setup_acpi_sci(char *s)
1306 {
1307 if (!s)
1308 return -EINVAL;
1309 if (!strcmp(s, "edge"))
1310 acpi_sci_flags.trigger = 1;
1311 else if (!strcmp(s, "level"))
1312 acpi_sci_flags.trigger = 3;
1313 else if (!strcmp(s, "high"))
1314 acpi_sci_flags.polarity = 1;
1315 else if (!strcmp(s, "low"))
1316 acpi_sci_flags.polarity = 3;
1317 else
1318 return -EINVAL;
1319 return 0;
1320 }
1321 early_param("acpi_sci", setup_acpi_sci);