[PATCH] x86_64: Reject SRAT tables that don't cover all memory
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86_64 / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */
11
12/*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/ptrace.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/a.out.h>
26#include <linux/tty.h>
27#include <linux/ioport.h>
28#include <linux/delay.h>
29#include <linux/config.h>
30#include <linux/init.h>
31#include <linux/initrd.h>
32#include <linux/highmem.h>
33#include <linux/bootmem.h>
34#include <linux/module.h>
35#include <asm/processor.h>
36#include <linux/console.h>
37#include <linux/seq_file.h>
aac04b32 38#include <linux/crash_dump.h>
1da177e4
LT
39#include <linux/root_dev.h>
40#include <linux/pci.h>
41#include <linux/acpi.h>
42#include <linux/kallsyms.h>
43#include <linux/edd.h>
bbfceef4 44#include <linux/mmzone.h>
5f5609df 45#include <linux/kexec.h>
95235ca2 46#include <linux/cpufreq.h>
e9928674 47#include <linux/dmi.h>
bbfceef4 48
1da177e4
LT
49#include <asm/mtrr.h>
50#include <asm/uaccess.h>
51#include <asm/system.h>
52#include <asm/io.h>
53#include <asm/smp.h>
54#include <asm/msr.h>
55#include <asm/desc.h>
56#include <video/edid.h>
57#include <asm/e820.h>
58#include <asm/dma.h>
59#include <asm/mpspec.h>
60#include <asm/mmu_context.h>
61#include <asm/bootsetup.h>
62#include <asm/proto.h>
63#include <asm/setup.h>
64#include <asm/mach_apic.h>
65#include <asm/numa.h>
2bc0414e 66#include <asm/sections.h>
1da177e4
LT
67
68/*
69 * Machine setup..
70 */
71
6c231b7b 72struct cpuinfo_x86 boot_cpu_data __read_mostly;
1da177e4
LT
73
74unsigned long mmu_cr4_features;
75
76int acpi_disabled;
77EXPORT_SYMBOL(acpi_disabled);
888ba6c6 78#ifdef CONFIG_ACPI
1da177e4
LT
79extern int __initdata acpi_ht;
80extern acpi_interrupt_flags acpi_sci_flags;
81int __initdata acpi_force = 0;
82#endif
83
84int acpi_numa __initdata;
85
1da177e4
LT
86/* Boot loader ID as an integer, for the benefit of proc_dointvec */
87int bootloader_type;
88
89unsigned long saved_video_mode;
90
91#ifdef CONFIG_SWIOTLB
92int swiotlb;
93EXPORT_SYMBOL(swiotlb);
94#endif
95
96/*
97 * Setup options
98 */
99struct drive_info_struct { char dummy[32]; } drive_info;
100struct screen_info screen_info;
101struct sys_desc_table_struct {
102 unsigned short length;
103 unsigned char table[0];
104};
105
106struct edid_info edid_info;
107struct e820map e820;
108
109extern int root_mountflags;
1da177e4
LT
110
111char command_line[COMMAND_LINE_SIZE];
112
113struct resource standard_io_resources[] = {
114 { .name = "dma1", .start = 0x00, .end = 0x1f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "pic1", .start = 0x20, .end = 0x21,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "timer0", .start = 0x40, .end = 0x43,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "timer1", .start = 0x50, .end = 0x53,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "keyboard", .start = 0x60, .end = 0x6f,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "pic2", .start = 0xa0, .end = 0xa1,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "dma2", .start = 0xc0, .end = 0xdf,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "fpu", .start = 0xf0, .end = 0xff,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
132};
133
134#define STANDARD_IO_RESOURCES \
135 (sizeof standard_io_resources / sizeof standard_io_resources[0])
136
137#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
138
139struct resource data_resource = {
140 .name = "Kernel data",
141 .start = 0,
142 .end = 0,
143 .flags = IORESOURCE_RAM,
144};
145struct resource code_resource = {
146 .name = "Kernel code",
147 .start = 0,
148 .end = 0,
149 .flags = IORESOURCE_RAM,
150};
151
152#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
153
154static struct resource system_rom_resource = {
155 .name = "System ROM",
156 .start = 0xf0000,
157 .end = 0xfffff,
158 .flags = IORESOURCE_ROM,
159};
160
161static struct resource extension_rom_resource = {
162 .name = "Extension ROM",
163 .start = 0xe0000,
164 .end = 0xeffff,
165 .flags = IORESOURCE_ROM,
166};
167
168static struct resource adapter_rom_resources[] = {
169 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
170 .flags = IORESOURCE_ROM },
171 { .name = "Adapter ROM", .start = 0, .end = 0,
172 .flags = IORESOURCE_ROM },
173 { .name = "Adapter ROM", .start = 0, .end = 0,
174 .flags = IORESOURCE_ROM },
175 { .name = "Adapter ROM", .start = 0, .end = 0,
176 .flags = IORESOURCE_ROM },
177 { .name = "Adapter ROM", .start = 0, .end = 0,
178 .flags = IORESOURCE_ROM },
179 { .name = "Adapter ROM", .start = 0, .end = 0,
180 .flags = IORESOURCE_ROM }
181};
182
183#define ADAPTER_ROM_RESOURCES \
184 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
185
186static struct resource video_rom_resource = {
187 .name = "Video ROM",
188 .start = 0xc0000,
189 .end = 0xc7fff,
190 .flags = IORESOURCE_ROM,
191};
192
193static struct resource video_ram_resource = {
194 .name = "Video RAM area",
195 .start = 0xa0000,
196 .end = 0xbffff,
197 .flags = IORESOURCE_RAM,
198};
199
200#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
201
202static int __init romchecksum(unsigned char *rom, unsigned long length)
203{
204 unsigned char *p, sum = 0;
205
206 for (p = rom; p < rom + length; p++)
207 sum += *p;
208 return sum == 0;
209}
210
211static void __init probe_roms(void)
212{
213 unsigned long start, length, upper;
214 unsigned char *rom;
215 int i;
216
217 /* video rom */
218 upper = adapter_rom_resources[0].start;
219 for (start = video_rom_resource.start; start < upper; start += 2048) {
220 rom = isa_bus_to_virt(start);
221 if (!romsignature(rom))
222 continue;
223
224 video_rom_resource.start = start;
225
226 /* 0 < length <= 0x7f * 512, historically */
227 length = rom[2] * 512;
228
229 /* if checksum okay, trust length byte */
230 if (length && romchecksum(rom, length))
231 video_rom_resource.end = start + length - 1;
232
233 request_resource(&iomem_resource, &video_rom_resource);
234 break;
235 }
236
237 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
238 if (start < upper)
239 start = upper;
240
241 /* system rom */
242 request_resource(&iomem_resource, &system_rom_resource);
243 upper = system_rom_resource.start;
244
245 /* check for extension rom (ignore length byte!) */
246 rom = isa_bus_to_virt(extension_rom_resource.start);
247 if (romsignature(rom)) {
248 length = extension_rom_resource.end - extension_rom_resource.start + 1;
249 if (romchecksum(rom, length)) {
250 request_resource(&iomem_resource, &extension_rom_resource);
251 upper = extension_rom_resource.start;
252 }
253 }
254
255 /* check for adapter roms on 2k boundaries */
256 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
257 rom = isa_bus_to_virt(start);
258 if (!romsignature(rom))
259 continue;
260
261 /* 0 < length <= 0x7f * 512, historically */
262 length = rom[2] * 512;
263
264 /* but accept any length that fits if checksum okay */
265 if (!length || start + length > upper || !romchecksum(rom, length))
266 continue;
267
268 adapter_rom_resources[i].start = start;
269 adapter_rom_resources[i].end = start + length - 1;
270 request_resource(&iomem_resource, &adapter_rom_resources[i]);
271
272 start = adapter_rom_resources[i++].end & ~2047UL;
273 }
274}
275
276static __init void parse_cmdline_early (char ** cmdline_p)
277{
278 char c = ' ', *to = command_line, *from = COMMAND_LINE;
279 int len = 0;
69cda7b1 280 int userdef = 0;
1da177e4
LT
281
282 /* Save unparsed command line copy for /proc/cmdline */
283 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
284 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
285
286 for (;;) {
287 if (c != ' ')
288 goto next_char;
289
290#ifdef CONFIG_SMP
291 /*
292 * If the BIOS enumerates physical processors before logical,
293 * maxcpus=N at enumeration-time can be used to disable HT.
294 */
295 else if (!memcmp(from, "maxcpus=", 8)) {
296 extern unsigned int maxcpus;
297
298 maxcpus = simple_strtoul(from + 8, NULL, 0);
299 }
300#endif
888ba6c6 301#ifdef CONFIG_ACPI
1da177e4
LT
302 /* "acpi=off" disables both ACPI table parsing and interpreter init */
303 if (!memcmp(from, "acpi=off", 8))
304 disable_acpi();
305
306 if (!memcmp(from, "acpi=force", 10)) {
307 /* add later when we do DMI horrors: */
308 acpi_force = 1;
309 acpi_disabled = 0;
310 }
311
312 /* acpi=ht just means: do ACPI MADT parsing
313 at bootup, but don't enable the full ACPI interpreter */
314 if (!memcmp(from, "acpi=ht", 7)) {
315 if (!acpi_force)
316 disable_acpi();
317 acpi_ht = 1;
318 }
319 else if (!memcmp(from, "pci=noacpi", 10))
320 acpi_disable_pci();
321 else if (!memcmp(from, "acpi=noirq", 10))
322 acpi_noirq_set();
323
324 else if (!memcmp(from, "acpi_sci=edge", 13))
325 acpi_sci_flags.trigger = 1;
326 else if (!memcmp(from, "acpi_sci=level", 14))
327 acpi_sci_flags.trigger = 3;
328 else if (!memcmp(from, "acpi_sci=high", 13))
329 acpi_sci_flags.polarity = 1;
330 else if (!memcmp(from, "acpi_sci=low", 12))
331 acpi_sci_flags.polarity = 3;
332
333 /* acpi=strict disables out-of-spec workarounds */
334 else if (!memcmp(from, "acpi=strict", 11)) {
335 acpi_strict = 1;
336 }
22999244
AK
337#ifdef CONFIG_X86_IO_APIC
338 else if (!memcmp(from, "acpi_skip_timer_override", 24))
339 acpi_skip_timer_override = 1;
340#endif
1da177e4
LT
341#endif
342
66759a01
CE
343 if (!memcmp(from, "disable_timer_pin_1", 19))
344 disable_timer_pin_1 = 1;
345 if (!memcmp(from, "enable_timer_pin_1", 18))
346 disable_timer_pin_1 = -1;
347
1da177e4
LT
348 if (!memcmp(from, "nolapic", 7) ||
349 !memcmp(from, "disableapic", 11))
350 disable_apic = 1;
351
352 if (!memcmp(from, "noapic", 6))
353 skip_ioapic_setup = 1;
354
ed8388a5
AK
355 /* Make sure to not confuse with apic= */
356 if (!memcmp(from, "apic", 4) &&
357 (from[4] == ' ' || from[4] == 0)) {
1da177e4
LT
358 skip_ioapic_setup = 0;
359 ioapic_force = 1;
360 }
361
362 if (!memcmp(from, "mem=", 4))
363 parse_memopt(from+4, &from);
364
69cda7b1
AM
365 if (!memcmp(from, "memmap=", 7)) {
366 /* exactmap option is for used defined memory */
367 if (!memcmp(from+7, "exactmap", 8)) {
368#ifdef CONFIG_CRASH_DUMP
369 /* If we are doing a crash dump, we
370 * still need to know the real mem
371 * size before original memory map is
372 * reset.
373 */
374 saved_max_pfn = e820_end_of_ram();
375#endif
376 from += 8+7;
377 end_pfn_map = 0;
378 e820.nr_map = 0;
379 userdef = 1;
380 }
381 else {
382 parse_memmapopt(from+7, &from);
383 userdef = 1;
384 }
385 }
386
2b97690f 387#ifdef CONFIG_NUMA
1da177e4
LT
388 if (!memcmp(from, "numa=", 5))
389 numa_setup(from+5);
390#endif
391
392#ifdef CONFIG_GART_IOMMU
393 if (!memcmp(from,"iommu=",6)) {
394 iommu_setup(from+6);
395 }
396#endif
397
398 if (!memcmp(from,"oops=panic", 10))
399 panic_on_oops = 1;
400
401 if (!memcmp(from, "noexec=", 7))
402 nonx_setup(from + 7);
403
5f5609df
EB
404#ifdef CONFIG_KEXEC
405 /* crashkernel=size@addr specifies the location to reserve for
406 * a crash kernel. By reserving this memory we guarantee
407 * that linux never set's it up as a DMA target.
408 * Useful for holding code to do something appropriate
409 * after a kernel panic.
410 */
411 else if (!memcmp(from, "crashkernel=", 12)) {
412 unsigned long size, base;
413 size = memparse(from+12, &from);
414 if (*from == '@') {
415 base = memparse(from+1, &from);
416 /* FIXME: Do I want a sanity check
417 * to validate the memory range?
418 */
419 crashk_res.start = base;
420 crashk_res.end = base + size - 1;
421 }
422 }
423#endif
424
aac04b32
VG
425#ifdef CONFIG_PROC_VMCORE
426 /* elfcorehdr= specifies the location of elf core header
427 * stored by the crashed kernel. This option will be passed
428 * by kexec loader to the capture kernel.
429 */
430 else if(!memcmp(from, "elfcorehdr=", 11))
431 elfcorehdr_addr = memparse(from+11, &from);
432#endif
1da177e4
LT
433 next_char:
434 c = *(from++);
435 if (!c)
436 break;
437 if (COMMAND_LINE_SIZE <= ++len)
438 break;
439 *(to++) = c;
440 }
69cda7b1
AM
441 if (userdef) {
442 printk(KERN_INFO "user-defined physical RAM map:\n");
443 e820_print_map("user");
444 }
1da177e4
LT
445 *to = '\0';
446 *cmdline_p = command_line;
447}
448
2b97690f 449#ifndef CONFIG_NUMA
bbfceef4
MT
450static void __init
451contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 452{
bbfceef4
MT
453 unsigned long bootmap_size, bootmap;
454
bbfceef4
MT
455 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
456 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
457 if (bootmap == -1L)
458 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
459 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
460 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
461 reserve_bootmem(bootmap, bootmap_size);
1da177e4
LT
462}
463#endif
464
465/* Use inline assembly to define this because the nops are defined
466 as inline assembly strings in the include files and we cannot
467 get them easily into strings. */
468asm("\t.data\nk8nops: "
469 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
470 K8_NOP7 K8_NOP8);
471
472extern unsigned char k8nops[];
473static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
474 NULL,
475 k8nops,
476 k8nops + 1,
477 k8nops + 1 + 2,
478 k8nops + 1 + 2 + 3,
479 k8nops + 1 + 2 + 3 + 4,
480 k8nops + 1 + 2 + 3 + 4 + 5,
481 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
482 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
483};
484
485/* Replace instructions with better alternatives for this CPU type.
486
487 This runs before SMP is initialized to avoid SMP problems with
488 self modifying code. This implies that assymetric systems where
489 APs have less capabilities than the boot processor are not handled.
490 In this case boot with "noreplacement". */
491void apply_alternatives(void *start, void *end)
492{
493 struct alt_instr *a;
494 int diff, i, k;
495 for (a = start; (void *)a < end; a++) {
496 if (!boot_cpu_has(a->cpuid))
497 continue;
498
499 BUG_ON(a->replacementlen > a->instrlen);
500 __inline_memcpy(a->instr, a->replacement, a->replacementlen);
501 diff = a->instrlen - a->replacementlen;
502
503 /* Pad the rest with nops */
504 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
505 k = diff;
506 if (k > ASM_NOP_MAX)
507 k = ASM_NOP_MAX;
508 __inline_memcpy(a->instr + i, k8_nops[k], k);
509 }
510 }
511}
512
513static int no_replacement __initdata = 0;
514
515void __init alternative_instructions(void)
516{
517 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
518 if (no_replacement)
519 return;
520 apply_alternatives(__alt_instructions, __alt_instructions_end);
521}
522
523static int __init noreplacement_setup(char *s)
524{
525 no_replacement = 1;
526 return 0;
527}
528
529__setup("noreplacement", noreplacement_setup);
530
531#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
532struct edd edd;
533#ifdef CONFIG_EDD_MODULE
534EXPORT_SYMBOL(edd);
535#endif
536/**
537 * copy_edd() - Copy the BIOS EDD information
538 * from boot_params into a safe place.
539 *
540 */
541static inline void copy_edd(void)
542{
543 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
544 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
545 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
546 edd.edd_info_nr = EDD_NR;
547}
548#else
549static inline void copy_edd(void)
550{
551}
552#endif
553
554#define EBDA_ADDR_POINTER 0x40E
555static void __init reserve_ebda_region(void)
556{
557 unsigned int addr;
558 /**
559 * there is a real-mode segmented pointer pointing to the
560 * 4K EBDA area at 0x40E
561 */
562 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
563 addr <<= 4;
564 if (addr)
565 reserve_bootmem_generic(addr, PAGE_SIZE);
566}
567
568void __init setup_arch(char **cmdline_p)
569{
1da177e4
LT
570 unsigned long kernel_end;
571
572 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
573 drive_info = DRIVE_INFO;
574 screen_info = SCREEN_INFO;
575 edid_info = EDID_INFO;
576 saved_video_mode = SAVED_VIDEO_MODE;
577 bootloader_type = LOADER_TYPE;
578
579#ifdef CONFIG_BLK_DEV_RAM
580 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
581 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
582 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
583#endif
584 setup_memory_region();
585 copy_edd();
586
587 if (!MOUNT_ROOT_RDONLY)
588 root_mountflags &= ~MS_RDONLY;
589 init_mm.start_code = (unsigned long) &_text;
590 init_mm.end_code = (unsigned long) &_etext;
591 init_mm.end_data = (unsigned long) &_edata;
592 init_mm.brk = (unsigned long) &_end;
593
594 code_resource.start = virt_to_phys(&_text);
595 code_resource.end = virt_to_phys(&_etext)-1;
596 data_resource.start = virt_to_phys(&_etext);
597 data_resource.end = virt_to_phys(&_edata)-1;
598
599 parse_cmdline_early(cmdline_p);
600
601 early_identify_cpu(&boot_cpu_data);
602
603 /*
604 * partially used pages are not usable - thus
605 * we are rounding upwards:
606 */
607 end_pfn = e820_end_of_ram();
608
609 check_efer();
610
611 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
612
f6c2e333
SS
613 zap_low_mappings(0);
614
888ba6c6 615#ifdef CONFIG_ACPI
1da177e4
LT
616 /*
617 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
618 * Call this early for SRAT node setup.
619 */
620 acpi_boot_table_init();
621#endif
622
623#ifdef CONFIG_ACPI_NUMA
624 /*
625 * Parse SRAT to discover nodes.
626 */
627 acpi_numa_init();
628#endif
629
2b97690f 630#ifdef CONFIG_NUMA
1da177e4
LT
631 numa_initmem_init(0, end_pfn);
632#else
bbfceef4 633 contig_initmem_init(0, end_pfn);
1da177e4
LT
634#endif
635
636 /* Reserve direct mapping */
637 reserve_bootmem_generic(table_start << PAGE_SHIFT,
638 (table_end - table_start) << PAGE_SHIFT);
639
640 /* reserve kernel */
641 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
642 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
643
644 /*
645 * reserve physical page 0 - it's a special BIOS page on many boxes,
646 * enabling clean reboots, SMP operation, laptop functions.
647 */
648 reserve_bootmem_generic(0, PAGE_SIZE);
649
650 /* reserve ebda region */
651 reserve_ebda_region();
652
653#ifdef CONFIG_SMP
654 /*
655 * But first pinch a few for the stack/trampoline stuff
656 * FIXME: Don't need the extra page at 4K, but need to fix
657 * trampoline before removing it. (see the GDT stuff)
658 */
659 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
660
661 /* Reserve SMP trampoline */
662 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
663#endif
664
665#ifdef CONFIG_ACPI_SLEEP
666 /*
667 * Reserve low memory region for sleep support.
668 */
669 acpi_reserve_bootmem();
670#endif
671#ifdef CONFIG_X86_LOCAL_APIC
672 /*
673 * Find and reserve possible boot-time SMP configuration:
674 */
675 find_smp_config();
676#endif
677#ifdef CONFIG_BLK_DEV_INITRD
678 if (LOADER_TYPE && INITRD_START) {
679 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
680 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
681 initrd_start =
682 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
683 initrd_end = initrd_start+INITRD_SIZE;
684 }
685 else {
686 printk(KERN_ERR "initrd extends beyond end of memory "
687 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
688 (unsigned long)(INITRD_START + INITRD_SIZE),
689 (unsigned long)(end_pfn << PAGE_SHIFT));
690 initrd_start = 0;
691 }
692 }
693#endif
5f5609df
EB
694#ifdef CONFIG_KEXEC
695 if (crashk_res.start != crashk_res.end) {
696 reserve_bootmem(crashk_res.start,
697 crashk_res.end - crashk_res.start + 1);
698 }
699#endif
0d317fb7 700
1da177e4
LT
701 paging_init();
702
703 check_ioapic();
704
888ba6c6 705#ifdef CONFIG_ACPI
1da177e4
LT
706 /*
707 * Read APIC and some other early information from ACPI tables.
708 */
709 acpi_boot_init();
710#endif
711
712#ifdef CONFIG_X86_LOCAL_APIC
713 /*
714 * get boot-time SMP configuration:
715 */
716 if (smp_found_config)
717 get_smp_config();
718 init_apic_mappings();
719#endif
720
721 /*
722 * Request address space for all standard RAM and ROM resources
723 * and also for regions reported as reserved by the e820.
724 */
725 probe_roms();
726 e820_reserve_resources();
727
728 request_resource(&iomem_resource, &video_ram_resource);
729
730 {
731 unsigned i;
732 /* request I/O space for devices used on all i[345]86 PCs */
733 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
734 request_resource(&ioport_resource, &standard_io_resources[i]);
735 }
736
a1e97782 737 e820_setup_gap();
1da177e4
LT
738
739#ifdef CONFIG_GART_IOMMU
740 iommu_hole_init();
741#endif
742
743#ifdef CONFIG_VT
744#if defined(CONFIG_VGA_CONSOLE)
745 conswitchp = &vga_con;
746#elif defined(CONFIG_DUMMY_CONSOLE)
747 conswitchp = &dummy_con;
748#endif
749#endif
750}
751
e6982c67 752static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
753{
754 unsigned int *v;
755
ebfcaa96 756 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
757 return 0;
758
759 v = (unsigned int *) c->x86_model_id;
760 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
761 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
762 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
763 c->x86_model_id[48] = 0;
764 return 1;
765}
766
767
e6982c67 768static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
769{
770 unsigned int n, dummy, eax, ebx, ecx, edx;
771
ebfcaa96 772 n = c->extended_cpuid_level;
1da177e4
LT
773
774 if (n >= 0x80000005) {
775 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
776 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
777 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
778 c->x86_cache_size=(ecx>>24)+(edx>>24);
779 /* On K8 L1 TLB is inclusive, so don't count it */
780 c->x86_tlbsize = 0;
781 }
782
783 if (n >= 0x80000006) {
784 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
785 ecx = cpuid_ecx(0x80000006);
786 c->x86_cache_size = ecx >> 16;
787 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
788
789 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
790 c->x86_cache_size, ecx & 0xFF);
791 }
792
793 if (n >= 0x80000007)
794 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
795 if (n >= 0x80000008) {
796 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
797 c->x86_virt_bits = (eax >> 8) & 0xff;
798 c->x86_phys_bits = eax & 0xff;
799 }
800}
801
3f098c26
AK
802#ifdef CONFIG_NUMA
803static int nearby_node(int apicid)
804{
805 int i;
806 for (i = apicid - 1; i >= 0; i--) {
807 int node = apicid_to_node[i];
808 if (node != NUMA_NO_NODE && node_online(node))
809 return node;
810 }
811 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
812 int node = apicid_to_node[i];
813 if (node != NUMA_NO_NODE && node_online(node))
814 return node;
815 }
816 return first_node(node_online_map); /* Shouldn't happen */
817}
818#endif
819
63518644
AK
820/*
821 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
822 * Assumes number of cores is a power of two.
823 */
824static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
825{
826#ifdef CONFIG_SMP
2942283e 827 int cpu = smp_processor_id();
b41e2939 828 unsigned bits;
3f098c26
AK
829#ifdef CONFIG_NUMA
830 int node = 0;
0b07e984 831 unsigned apicid = phys_proc_id[cpu];
3f098c26 832#endif
b41e2939
AK
833
834 bits = 0;
94605eff 835 while ((1 << bits) < c->x86_max_cores)
b41e2939
AK
836 bits++;
837
838 /* Low order bits define the core id (index of core in socket) */
839 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
840 /* Convert the APIC ID into the socket ID */
841 phys_proc_id[cpu] >>= bits;
63518644
AK
842
843#ifdef CONFIG_NUMA
3f098c26
AK
844 node = phys_proc_id[cpu];
845 if (apicid_to_node[apicid] != NUMA_NO_NODE)
846 node = apicid_to_node[apicid];
847 if (!node_online(node)) {
848 /* Two possibilities here:
849 - The CPU is missing memory and no node was created.
850 In that case try picking one from a nearby CPU
851 - The APIC IDs differ from the HyperTransport node IDs
852 which the K8 northbridge parsing fills in.
853 Assume they are all increased by a constant offset,
854 but in the same order as the HT nodeids.
855 If that doesn't result in a usable node fall back to the
856 path for the previous case. */
857 int ht_nodeid = apicid - (phys_proc_id[0] << bits);
858 if (ht_nodeid >= 0 &&
859 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
860 node = apicid_to_node[ht_nodeid];
861 /* Pick a nearby node */
862 if (!node_online(node))
863 node = nearby_node(apicid);
864 }
69d81fcd 865 numa_set_node(cpu, node);
3f098c26
AK
866
867 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
94605eff 868 cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
63518644 869#endif
63518644
AK
870#endif
871}
1da177e4
LT
872
873static int __init init_amd(struct cpuinfo_x86 *c)
874{
875 int r;
876 int level;
1da177e4 877
bc5e8fdf
LT
878#ifdef CONFIG_SMP
879 unsigned long value;
880
7d318d77
AK
881 /*
882 * Disable TLB flush filter by setting HWCR.FFDIS on K8
883 * bit 6 of msr C001_0015
884 *
885 * Errata 63 for SH-B3 steppings
886 * Errata 122 for all steppings (F+ have it disabled by default)
887 */
888 if (c->x86 == 15) {
889 rdmsrl(MSR_K8_HWCR, value);
890 value |= 1 << 6;
891 wrmsrl(MSR_K8_HWCR, value);
892 }
bc5e8fdf
LT
893#endif
894
1da177e4
LT
895 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
896 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
897 clear_bit(0*32+31, &c->x86_capability);
898
899 /* C-stepping K8? */
900 level = cpuid_eax(1);
901 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
902 set_bit(X86_FEATURE_K8_C, &c->x86_capability);
903
904 r = get_model_name(c);
905 if (!r) {
906 switch (c->x86) {
907 case 15:
908 /* Should distinguish Models here, but this is only
909 a fallback anyways. */
910 strcpy(c->x86_model_id, "Hammer");
911 break;
912 }
913 }
914 display_cacheinfo(c);
915
130951cc
AK
916 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
917 if (c->x86_power & (1<<8))
918 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
919
ebfcaa96 920 if (c->extended_cpuid_level >= 0x80000008) {
94605eff
SS
921 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
922 if (c->x86_max_cores & (c->x86_max_cores - 1))
923 c->x86_max_cores = 1;
1da177e4 924
63518644 925 amd_detect_cmp(c);
1da177e4
LT
926 }
927
928 return r;
929}
930
e6982c67 931static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
932{
933#ifdef CONFIG_SMP
934 u32 eax, ebx, ecx, edx;
94605eff 935 int index_msb, core_bits;
1da177e4 936 int cpu = smp_processor_id();
94605eff
SS
937
938 cpuid(1, &eax, &ebx, &ecx, &edx);
939
940 c->apicid = phys_pkg_id(0);
941
63518644 942 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1da177e4
LT
943 return;
944
1da177e4 945 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 946
1da177e4
LT
947 if (smp_num_siblings == 1) {
948 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
94605eff
SS
949 } else if (smp_num_siblings > 1 ) {
950
1da177e4
LT
951 if (smp_num_siblings > NR_CPUS) {
952 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
953 smp_num_siblings = 1;
954 return;
955 }
94605eff
SS
956
957 index_msb = get_count_order(smp_num_siblings);
1da177e4 958 phys_proc_id[cpu] = phys_pkg_id(index_msb);
94605eff 959
1da177e4
LT
960 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
961 phys_proc_id[cpu]);
3dd9d514 962
94605eff 963 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 964
94605eff
SS
965 index_msb = get_count_order(smp_num_siblings) ;
966
967 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 968
94605eff
SS
969 cpu_core_id[cpu] = phys_pkg_id(index_msb) &
970 ((1 << core_bits) - 1);
3dd9d514 971
94605eff 972 if (c->x86_max_cores > 1)
3dd9d514
AK
973 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
974 cpu_core_id[cpu]);
1da177e4
LT
975 }
976#endif
977}
978
3dd9d514
AK
979/*
980 * find out the number of processor cores on the die
981 */
e6982c67 982static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514
AK
983{
984 unsigned int eax;
985
986 if (c->cpuid_level < 4)
987 return 1;
988
989 __asm__("cpuid"
990 : "=a" (eax)
991 : "0" (4), "c" (0)
992 : "bx", "dx");
993
994 if (eax & 0x1f)
995 return ((eax >> 26) + 1);
996 else
997 return 1;
998}
999
df0cc26b
AK
1000static void srat_detect_node(void)
1001{
1002#ifdef CONFIG_NUMA
ddea7be0 1003 unsigned node;
df0cc26b
AK
1004 int cpu = smp_processor_id();
1005
1006 /* Don't do the funky fallback heuristics the AMD version employs
1007 for now. */
ddea7be0 1008 node = apicid_to_node[hard_smp_processor_id()];
df0cc26b
AK
1009 if (node == NUMA_NO_NODE)
1010 node = 0;
69d81fcd 1011 numa_set_node(cpu, node);
df0cc26b
AK
1012
1013 if (acpi_numa > 0)
1014 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
1015#endif
1016}
1017
e6982c67 1018static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
1019{
1020 /* Cache sizes */
1021 unsigned n;
1022
1023 init_intel_cacheinfo(c);
ebfcaa96 1024 n = c->extended_cpuid_level;
1da177e4
LT
1025 if (n >= 0x80000008) {
1026 unsigned eax = cpuid_eax(0x80000008);
1027 c->x86_virt_bits = (eax >> 8) & 0xff;
1028 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
1029 /* CPUID workaround for Intel 0F34 CPU */
1030 if (c->x86_vendor == X86_VENDOR_INTEL &&
1031 c->x86 == 0xF && c->x86_model == 0x3 &&
1032 c->x86_mask == 0x4)
1033 c->x86_phys_bits = 36;
1da177e4
LT
1034 }
1035
1036 if (c->x86 == 15)
1037 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
1038 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
1039 (c->x86 == 0x6 && c->x86_model >= 0x0e))
c29601e9 1040 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
94605eff 1041 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
1042
1043 srat_detect_node();
1da177e4
LT
1044}
1045
672289e9 1046static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
1047{
1048 char *v = c->x86_vendor_id;
1049
1050 if (!strcmp(v, "AuthenticAMD"))
1051 c->x86_vendor = X86_VENDOR_AMD;
1052 else if (!strcmp(v, "GenuineIntel"))
1053 c->x86_vendor = X86_VENDOR_INTEL;
1054 else
1055 c->x86_vendor = X86_VENDOR_UNKNOWN;
1056}
1057
1058struct cpu_model_info {
1059 int vendor;
1060 int family;
1061 char *model_names[16];
1062};
1063
1064/* Do some early cpuid on the boot CPU to get some parameter that are
1065 needed before check_bugs. Everything advanced is in identify_cpu
1066 below. */
e6982c67 1067void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
1068{
1069 u32 tfms;
1070
1071 c->loops_per_jiffy = loops_per_jiffy;
1072 c->x86_cache_size = -1;
1073 c->x86_vendor = X86_VENDOR_UNKNOWN;
1074 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1075 c->x86_vendor_id[0] = '\0'; /* Unset */
1076 c->x86_model_id[0] = '\0'; /* Unset */
1077 c->x86_clflush_size = 64;
1078 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 1079 c->x86_max_cores = 1;
ebfcaa96 1080 c->extended_cpuid_level = 0;
1da177e4
LT
1081 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1082
1083 /* Get vendor name */
1084 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1085 (unsigned int *)&c->x86_vendor_id[0],
1086 (unsigned int *)&c->x86_vendor_id[8],
1087 (unsigned int *)&c->x86_vendor_id[4]);
1088
1089 get_cpu_vendor(c);
1090
1091 /* Initialize the standard set of capabilities */
1092 /* Note that the vendor-specific code below might override */
1093
1094 /* Intel-defined flags: level 0x00000001 */
1095 if (c->cpuid_level >= 0x00000001) {
1096 __u32 misc;
1097 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1098 &c->x86_capability[0]);
1099 c->x86 = (tfms >> 8) & 0xf;
1100 c->x86_model = (tfms >> 4) & 0xf;
1101 c->x86_mask = tfms & 0xf;
f5f786d0 1102 if (c->x86 == 0xf)
1da177e4 1103 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 1104 if (c->x86 >= 0x6)
1da177e4 1105 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1da177e4
LT
1106 if (c->x86_capability[0] & (1<<19))
1107 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
1108 } else {
1109 /* Have CPUID level 0 only - unheard of */
1110 c->x86 = 4;
1111 }
a158608b
AK
1112
1113#ifdef CONFIG_SMP
b41e2939 1114 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 1115#endif
1da177e4
LT
1116}
1117
1118/*
1119 * This does the hard work of actually picking apart the CPU stuff...
1120 */
e6982c67 1121void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
1122{
1123 int i;
1124 u32 xlvl;
1125
1126 early_identify_cpu(c);
1127
1128 /* AMD-defined flags: level 0x80000001 */
1129 xlvl = cpuid_eax(0x80000000);
ebfcaa96 1130 c->extended_cpuid_level = xlvl;
1da177e4
LT
1131 if ((xlvl & 0xffff0000) == 0x80000000) {
1132 if (xlvl >= 0x80000001) {
1133 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 1134 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
1135 }
1136 if (xlvl >= 0x80000004)
1137 get_model_name(c); /* Default name */
1138 }
1139
1140 /* Transmeta-defined flags: level 0x80860001 */
1141 xlvl = cpuid_eax(0x80860000);
1142 if ((xlvl & 0xffff0000) == 0x80860000) {
1143 /* Don't set x86_cpuid_level here for now to not confuse. */
1144 if (xlvl >= 0x80860001)
1145 c->x86_capability[2] = cpuid_edx(0x80860001);
1146 }
1147
1148 /*
1149 * Vendor-specific initialization. In this section we
1150 * canonicalize the feature flags, meaning if there are
1151 * features a certain CPU supports which CPUID doesn't
1152 * tell us, CPUID claiming incorrect flags, or other bugs,
1153 * we handle them here.
1154 *
1155 * At the end of this section, c->x86_capability better
1156 * indicate the features this CPU genuinely supports!
1157 */
1158 switch (c->x86_vendor) {
1159 case X86_VENDOR_AMD:
1160 init_amd(c);
1161 break;
1162
1163 case X86_VENDOR_INTEL:
1164 init_intel(c);
1165 break;
1166
1167 case X86_VENDOR_UNKNOWN:
1168 default:
1169 display_cacheinfo(c);
1170 break;
1171 }
1172
1173 select_idle_routine(c);
1174 detect_ht(c);
1da177e4
LT
1175
1176 /*
1177 * On SMP, boot_cpu_data holds the common feature set between
1178 * all CPUs; so make sure that we indicate which features are
1179 * common between the CPUs. The first time this routine gets
1180 * executed, c == &boot_cpu_data.
1181 */
1182 if (c != &boot_cpu_data) {
1183 /* AND the already accumulated flags with these */
1184 for (i = 0 ; i < NCAPINTS ; i++)
1185 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1186 }
1187
1188#ifdef CONFIG_X86_MCE
1189 mcheck_init(c);
1190#endif
3b520b23
SL
1191 if (c == &boot_cpu_data)
1192 mtrr_bp_init();
1193 else
1194 mtrr_ap_init();
1da177e4 1195#ifdef CONFIG_NUMA
3019e8eb 1196 numa_add_cpu(smp_processor_id());
1da177e4
LT
1197#endif
1198}
1199
1200
e6982c67 1201void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1202{
1203 if (c->x86_model_id[0])
1204 printk("%s", c->x86_model_id);
1205
1206 if (c->x86_mask || c->cpuid_level >= 0)
1207 printk(" stepping %02x\n", c->x86_mask);
1208 else
1209 printk("\n");
1210}
1211
1212/*
1213 * Get CPU information for use by the procfs.
1214 */
1215
1216static int show_cpuinfo(struct seq_file *m, void *v)
1217{
1218 struct cpuinfo_x86 *c = v;
1219
1220 /*
1221 * These flag bits must match the definitions in <asm/cpufeature.h>.
1222 * NULL means this bit is undefined or reserved; either way it doesn't
1223 * have meaning as far as Linux is concerned. Note that it's important
1224 * to realize there is a difference between this table and CPUID -- if
1225 * applications want to get the raw CPUID data, they should access
1226 * /dev/cpu/<cpu_nr>/cpuid instead.
1227 */
1228 static char *x86_cap_flags[] = {
1229 /* Intel-defined */
1230 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1231 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1232 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1233 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1234
1235 /* AMD-defined */
3c3b73b6 1236 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1237 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1238 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
3f98bc49 1239 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
1da177e4
LT
1240
1241 /* Transmeta-defined */
1242 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1243 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1244 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1245 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1246
1247 /* Other (Linux-defined) */
622dcaf9 1248 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
c29601e9 1249 "constant_tsc", NULL, NULL,
1da177e4
LT
1250 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1251 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1252 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1253
1254 /* Intel-defined (#2) */
daedb82d 1255 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
1da177e4
LT
1256 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1257 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1258 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1259
5b7abc6f
PA
1260 /* VIA/Cyrix/Centaur-defined */
1261 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1262 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1263 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1264 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1265
1da177e4 1266 /* AMD-defined (#2) */
3f98bc49 1267 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1da177e4
LT
1268 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1269 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1270 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1271 };
1272 static char *x86_power_flags[] = {
1273 "ts", /* temperature sensor */
1274 "fid", /* frequency id control */
1275 "vid", /* voltage id control */
1276 "ttp", /* thermal trip */
1277 "tm",
3f98bc49
AK
1278 "stc",
1279 NULL,
39b3a791 1280 /* nothing */ /* constant_tsc - moved to flags */
1da177e4
LT
1281 };
1282
1283
1284#ifdef CONFIG_SMP
1285 if (!cpu_online(c-cpu_data))
1286 return 0;
1287#endif
1288
1289 seq_printf(m,"processor\t: %u\n"
1290 "vendor_id\t: %s\n"
1291 "cpu family\t: %d\n"
1292 "model\t\t: %d\n"
1293 "model name\t: %s\n",
1294 (unsigned)(c-cpu_data),
1295 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1296 c->x86,
1297 (int)c->x86_model,
1298 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1299
1300 if (c->x86_mask || c->cpuid_level >= 0)
1301 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1302 else
1303 seq_printf(m, "stepping\t: unknown\n");
1304
1305 if (cpu_has(c,X86_FEATURE_TSC)) {
95235ca2
VP
1306 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1307 if (!freq)
1308 freq = cpu_khz;
1da177e4 1309 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
95235ca2 1310 freq / 1000, (freq % 1000));
1da177e4
LT
1311 }
1312
1313 /* Cache size */
1314 if (c->x86_cache_size >= 0)
1315 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1316
1317#ifdef CONFIG_SMP
94605eff 1318 if (smp_num_siblings * c->x86_max_cores > 1) {
db468681
AK
1319 int cpu = c - cpu_data;
1320 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
94605eff 1321 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
d31ddaa1 1322 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
94605eff 1323 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1324 }
1da177e4
LT
1325#endif
1326
1327 seq_printf(m,
1328 "fpu\t\t: yes\n"
1329 "fpu_exception\t: yes\n"
1330 "cpuid level\t: %d\n"
1331 "wp\t\t: yes\n"
1332 "flags\t\t:",
1333 c->cpuid_level);
1334
1335 {
1336 int i;
1337 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1338 if ( test_bit(i, &c->x86_capability) &&
1339 x86_cap_flags[i] != NULL )
1340 seq_printf(m, " %s", x86_cap_flags[i]);
1341 }
1342
1343 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1344 c->loops_per_jiffy/(500000/HZ),
1345 (c->loops_per_jiffy/(5000/HZ)) % 100);
1346
1347 if (c->x86_tlbsize > 0)
1348 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1349 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1350 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1351
1352 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1353 c->x86_phys_bits, c->x86_virt_bits);
1354
1355 seq_printf(m, "power management:");
1356 {
1357 unsigned i;
1358 for (i = 0; i < 32; i++)
1359 if (c->x86_power & (1 << i)) {
3f98bc49
AK
1360 if (i < ARRAY_SIZE(x86_power_flags) &&
1361 x86_power_flags[i])
1362 seq_printf(m, "%s%s",
1363 x86_power_flags[i][0]?" ":"",
1364 x86_power_flags[i]);
1da177e4
LT
1365 else
1366 seq_printf(m, " [%d]", i);
1367 }
1368 }
1da177e4 1369
d31ddaa1 1370 seq_printf(m, "\n\n");
1da177e4
LT
1371
1372 return 0;
1373}
1374
1375static void *c_start(struct seq_file *m, loff_t *pos)
1376{
1377 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1378}
1379
1380static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1381{
1382 ++*pos;
1383 return c_start(m, pos);
1384}
1385
1386static void c_stop(struct seq_file *m, void *v)
1387{
1388}
1389
1390struct seq_operations cpuinfo_op = {
1391 .start =c_start,
1392 .next = c_next,
1393 .stop = c_stop,
1394 .show = show_cpuinfo,
1395};
e9928674
AK
1396
1397static int __init run_dmi_scan(void)
1398{
1399 dmi_scan_machine();
1400 return 0;
1401}
1402core_initcall(run_dmi_scan);
1403