x86: Set CFQ as default in 32-bit defconfig
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
894673ee 19#include <linux/screen_info.h>
1da177e4
LT
20#include <linux/ioport.h>
21#include <linux/delay.h>
1da177e4
LT
22#include <linux/init.h>
23#include <linux/initrd.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <asm/processor.h>
28#include <linux/console.h>
29#include <linux/seq_file.h>
aac04b32 30#include <linux/crash_dump.h>
1da177e4
LT
31#include <linux/root_dev.h>
32#include <linux/pci.h>
5b83683f 33#include <linux/efi.h>
1da177e4
LT
34#include <linux/acpi.h>
35#include <linux/kallsyms.h>
36#include <linux/edd.h>
bbfceef4 37#include <linux/mmzone.h>
5f5609df 38#include <linux/kexec.h>
95235ca2 39#include <linux/cpufreq.h>
e9928674 40#include <linux/dmi.h>
17a941d8 41#include <linux/dma-mapping.h>
681558fd 42#include <linux/ctype.h>
746ef0cd 43#include <linux/uaccess.h>
bbfceef4 44
1da177e4
LT
45#include <asm/mtrr.h>
46#include <asm/uaccess.h>
47#include <asm/system.h>
e4026440 48#include <asm/vsyscall.h>
1da177e4
LT
49#include <asm/io.h>
50#include <asm/smp.h>
51#include <asm/msr.h>
52#include <asm/desc.h>
53#include <video/edid.h>
54#include <asm/e820.h>
55#include <asm/dma.h>
56#include <asm/mpspec.h>
57#include <asm/mmu_context.h>
1da177e4
LT
58#include <asm/proto.h>
59#include <asm/setup.h>
60#include <asm/mach_apic.h>
61#include <asm/numa.h>
2bc0414e 62#include <asm/sections.h>
f2d3efed 63#include <asm/dmi.h>
00bf4098 64#include <asm/cacheflush.h>
af7a78e9 65#include <asm/mce.h>
eee3af4a 66#include <asm/ds.h>
1da177e4 67
746ef0cd
GOC
68#ifdef CONFIG_PARAVIRT
69#include <asm/paravirt.h>
70#else
71#define ARCH_SETUP
72#endif
73
1da177e4
LT
74/*
75 * Machine setup..
76 */
77
6c231b7b 78struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 79EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
80
81unsigned long mmu_cr4_features;
82
1da177e4
LT
83/* Boot loader ID as an integer, for the benefit of proc_dointvec */
84int bootloader_type;
85
86unsigned long saved_video_mode;
87
f039b754
AK
88int force_mwait __cpuinitdata;
89
04e1ba85 90/*
f2d3efed
AK
91 * Early DMI memory
92 */
93int dmi_alloc_index;
94char dmi_alloc_data[DMI_MAX_DATA];
95
1da177e4
LT
96/*
97 * Setup options
98 */
1da177e4 99struct screen_info screen_info;
2ee60e17 100EXPORT_SYMBOL(screen_info);
1da177e4
LT
101struct sys_desc_table_struct {
102 unsigned short length;
103 unsigned char table[0];
104};
105
106struct edid_info edid_info;
ba70710e 107EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
108
109extern int root_mountflags;
1da177e4 110
adf48856 111char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
112
113struct resource standard_io_resources[] = {
114 { .name = "dma1", .start = 0x00, .end = 0x1f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "pic1", .start = 0x20, .end = 0x21,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "timer0", .start = 0x40, .end = 0x43,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "timer1", .start = 0x50, .end = 0x53,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "keyboard", .start = 0x60, .end = 0x6f,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "pic2", .start = 0xa0, .end = 0xa1,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "dma2", .start = 0xc0, .end = 0xdf,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "fpu", .start = 0xf0, .end = 0xff,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
132};
133
1da177e4
LT
134#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
135
c9cce83d 136static struct resource data_resource = {
1da177e4
LT
137 .name = "Kernel data",
138 .start = 0,
139 .end = 0,
140 .flags = IORESOURCE_RAM,
141};
c9cce83d 142static struct resource code_resource = {
1da177e4
LT
143 .name = "Kernel code",
144 .start = 0,
145 .end = 0,
146 .flags = IORESOURCE_RAM,
147};
c9cce83d 148static struct resource bss_resource = {
00bf4098
BW
149 .name = "Kernel bss",
150 .start = 0,
151 .end = 0,
152 .flags = IORESOURCE_RAM,
153};
1da177e4 154
8c61b900
TG
155static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
156
2c8c0e6b
AK
157#ifdef CONFIG_PROC_VMCORE
158/* elfcorehdr= specifies the location of elf core header
159 * stored by the crashed kernel. This option will be passed
160 * by kexec loader to the capture kernel.
161 */
162static int __init setup_elfcorehdr(char *arg)
681558fd 163{
2c8c0e6b
AK
164 char *end;
165 if (!arg)
166 return -EINVAL;
167 elfcorehdr_addr = memparse(arg, &end);
168 return end > arg ? 0 : -EINVAL;
681558fd 169}
2c8c0e6b 170early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
171#endif
172
2b97690f 173#ifndef CONFIG_NUMA
bbfceef4
MT
174static void __init
175contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 176{
bbfceef4
MT
177 unsigned long bootmap_size, bootmap;
178
bbfceef4
MT
179 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
180 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
181 if (bootmap == -1L)
04e1ba85 182 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
bbfceef4 183 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
184 e820_register_active_regions(0, start_pfn, end_pfn);
185 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 186 reserve_bootmem(bootmap, bootmap_size);
04e1ba85 187}
1da177e4
LT
188#endif
189
1da177e4
LT
190#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
191struct edd edd;
192#ifdef CONFIG_EDD_MODULE
193EXPORT_SYMBOL(edd);
194#endif
195/**
196 * copy_edd() - Copy the BIOS EDD information
197 * from boot_params into a safe place.
198 *
199 */
200static inline void copy_edd(void)
201{
30c82645
PA
202 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
203 sizeof(edd.mbr_signature));
204 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
205 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
206 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
207}
208#else
209static inline void copy_edd(void)
210{
211}
212#endif
213
5c3391f9
BW
214#ifdef CONFIG_KEXEC
215static void __init reserve_crashkernel(void)
216{
217 unsigned long long free_mem;
218 unsigned long long crash_size, crash_base;
219 int ret;
220
04e1ba85
TG
221 free_mem =
222 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
5c3391f9
BW
223
224 ret = parse_crashkernel(boot_command_line, free_mem,
225 &crash_size, &crash_base);
226 if (ret == 0 && crash_size) {
227 if (crash_base > 0) {
228 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
229 "for crashkernel (System RAM: %ldMB)\n",
230 (unsigned long)(crash_size >> 20),
231 (unsigned long)(crash_base >> 20),
232 (unsigned long)(free_mem >> 20));
233 crashk_res.start = crash_base;
234 crashk_res.end = crash_base + crash_size - 1;
235 reserve_bootmem(crash_base, crash_size);
236 } else
237 printk(KERN_INFO "crashkernel reservation failed - "
238 "you have to specify a base address\n");
239 }
240}
241#else
242static inline void __init reserve_crashkernel(void)
243{}
244#endif
245
1da177e4 246#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
247
248unsigned __initdata ebda_addr;
249unsigned __initdata ebda_size;
250
251static void discover_ebda(void)
1da177e4 252{
ac71d12c 253 /*
04e1ba85 254 * there is a real-mode segmented pointer pointing to the
1da177e4
LT
255 * 4K EBDA area at 0x40E
256 */
bdb96a66 257 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
746ef0cd
GOC
258 /*
259 * There can be some situations, like paravirtualized guests,
260 * in which there is no available ebda information. In such
261 * case, just skip it
262 */
263 if (!ebda_addr) {
264 ebda_size = 0;
265 return;
266 }
267
ac71d12c
AK
268 ebda_addr <<= 4;
269
bdb96a66 270 ebda_size = *(unsigned short *)__va(ebda_addr);
ac71d12c
AK
271
272 /* Round EBDA up to pages */
273 if (ebda_size == 0)
274 ebda_size = 1;
275 ebda_size <<= 10;
276 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
277 if (ebda_size > 64*1024)
278 ebda_size = 64*1024;
1da177e4
LT
279}
280
746ef0cd
GOC
281/* Overridden in paravirt.c if CONFIG_PARAVIRT */
282void __attribute__((weak)) memory_setup(void)
283{
284 machine_specific_memory_setup();
285}
286
1da177e4
LT
287void __init setup_arch(char **cmdline_p)
288{
04e1ba85
TG
289 unsigned i;
290
adf48856 291 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 292
30c82645
PA
293 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
294 screen_info = boot_params.screen_info;
295 edid_info = boot_params.edid_info;
296 saved_video_mode = boot_params.hdr.vid_mode;
297 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
298
299#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
300 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
301 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
302 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4 303#endif
5b83683f
HY
304#ifdef CONFIG_EFI
305 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
306 "EL64", 4))
307 efi_enabled = 1;
308#endif
746ef0cd
GOC
309
310 ARCH_SETUP
311
312 memory_setup();
1da177e4
LT
313 copy_edd();
314
30c82645 315 if (!boot_params.hdr.root_flags)
1da177e4
LT
316 root_mountflags &= ~MS_RDONLY;
317 init_mm.start_code = (unsigned long) &_text;
318 init_mm.end_code = (unsigned long) &_etext;
319 init_mm.end_data = (unsigned long) &_edata;
320 init_mm.brk = (unsigned long) &_end;
321
e3ebadd9
LT
322 code_resource.start = virt_to_phys(&_text);
323 code_resource.end = virt_to_phys(&_etext)-1;
324 data_resource.start = virt_to_phys(&_etext);
325 data_resource.end = virt_to_phys(&_edata)-1;
00bf4098
BW
326 bss_resource.start = virt_to_phys(&__bss_start);
327 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 328
1da177e4
LT
329 early_identify_cpu(&boot_cpu_data);
330
adf48856 331 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
332 *cmdline_p = command_line;
333
334 parse_early_param();
335
336 finish_e820_parsing();
9ca33eb6 337
5cb248ab 338 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
339 /*
340 * partially used pages are not usable - thus
341 * we are rounding upwards:
342 */
343 end_pfn = e820_end_of_ram();
caff0710 344 num_physpages = end_pfn;
1da177e4
LT
345
346 check_efer();
347
ac71d12c
AK
348 discover_ebda();
349
1da177e4 350 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
5b83683f
HY
351 if (efi_enabled)
352 efi_init();
1da177e4 353
f2d3efed
AK
354 dmi_scan_machine();
355
b02aae9c
RH
356 io_delay_init();
357
71fff5e6
MT
358#ifdef CONFIG_SMP
359 /* setup to use the static apicid table during kernel startup */
360 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
361#endif
362
888ba6c6 363#ifdef CONFIG_ACPI
1da177e4
LT
364 /*
365 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
366 * Call this early for SRAT node setup.
367 */
368 acpi_boot_table_init();
369#endif
370
caff0710
JB
371 /* How many end-of-memory variables you have, grandma! */
372 max_low_pfn = end_pfn;
373 max_pfn = end_pfn;
374 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
375
5cb248ab
MG
376 /* Remove active ranges so rediscovery with NUMA-awareness happens */
377 remove_all_active_ranges();
378
1da177e4
LT
379#ifdef CONFIG_ACPI_NUMA
380 /*
381 * Parse SRAT to discover nodes.
382 */
383 acpi_numa_init();
384#endif
385
2b97690f 386#ifdef CONFIG_NUMA
04e1ba85 387 numa_initmem_init(0, end_pfn);
1da177e4 388#else
bbfceef4 389 contig_initmem_init(0, end_pfn);
1da177e4
LT
390#endif
391
392 /* Reserve direct mapping */
04e1ba85 393 reserve_bootmem_generic(table_start << PAGE_SHIFT,
1da177e4
LT
394 (table_end - table_start) << PAGE_SHIFT);
395
396 /* reserve kernel */
ceee8822
AK
397 reserve_bootmem_generic(__pa_symbol(&_text),
398 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
399
400 /*
401 * reserve physical page 0 - it's a special BIOS page on many boxes,
402 * enabling clean reboots, SMP operation, laptop functions.
403 */
404 reserve_bootmem_generic(0, PAGE_SIZE);
405
406 /* reserve ebda region */
ac71d12c
AK
407 if (ebda_addr)
408 reserve_bootmem_generic(ebda_addr, ebda_size);
076422d2
AS
409#ifdef CONFIG_NUMA
410 /* reserve nodemap region */
411 if (nodemap_addr)
412 reserve_bootmem_generic(nodemap_addr, nodemap_size);
413#endif
1da177e4
LT
414
415#ifdef CONFIG_SMP
1da177e4 416 /* Reserve SMP trampoline */
90b1c208 417 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
1da177e4
LT
418#endif
419
673d5b43 420#ifdef CONFIG_ACPI_SLEEP
1da177e4 421 /*
04e1ba85 422 * Reserve low memory region for sleep support.
1da177e4 423 */
04e1ba85
TG
424 acpi_reserve_bootmem();
425#endif
5b83683f
HY
426
427 if (efi_enabled) {
428 efi_map_memmap();
429 efi_reserve_bootmem();
430 }
431
04e1ba85
TG
432 /*
433 * Find and reserve possible boot-time SMP configuration:
434 */
1da177e4 435 find_smp_config();
1da177e4 436#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
437 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
438 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
439 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
440 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
441 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
442
443 if (ramdisk_end <= end_of_mem) {
444 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
445 initrd_start = ramdisk_image + PAGE_OFFSET;
446 initrd_end = initrd_start+ramdisk_size;
447 } else {
1da177e4 448 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
449 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
450 ramdisk_end, end_of_mem);
1da177e4
LT
451 initrd_start = 0;
452 }
453 }
454#endif
5c3391f9 455 reserve_crashkernel();
1da177e4 456 paging_init();
e4026440 457 map_vsyscall();
1da177e4 458
dfa4698c 459 early_quirks();
1da177e4 460
51f62e18
AR
461 /*
462 * set this early, so we dont allocate cpu0
463 * if MADT list doesnt list BSP first
464 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
465 */
466 cpu_set(0, cpu_present_map);
888ba6c6 467#ifdef CONFIG_ACPI
1da177e4
LT
468 /*
469 * Read APIC and some other early information from ACPI tables.
470 */
471 acpi_boot_init();
472#endif
473
05b3cbd8
RT
474 init_cpu_to_node();
475
1da177e4
LT
476 /*
477 * get boot-time SMP configuration:
478 */
479 if (smp_found_config)
480 get_smp_config();
481 init_apic_mappings();
3e35a0e5 482 ioapic_init_mappings();
1da177e4
LT
483
484 /*
fc986db4 485 * We trust e820 completely. No explicit ROM probing in memory.
04e1ba85 486 */
c9cce83d 487 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
e8eff5ac 488 e820_mark_nosave_regions();
1da177e4 489
1da177e4 490 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 491 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4 492 request_resource(&ioport_resource, &standard_io_resources[i]);
1da177e4 493
a1e97782 494 e820_setup_gap();
1da177e4 495
1da177e4
LT
496#ifdef CONFIG_VT
497#if defined(CONFIG_VGA_CONSOLE)
5b83683f
HY
498 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
499 conswitchp = &vga_con;
1da177e4
LT
500#elif defined(CONFIG_DUMMY_CONSOLE)
501 conswitchp = &dummy_con;
502#endif
503#endif
504}
505
e6982c67 506static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
507{
508 unsigned int *v;
509
ebfcaa96 510 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
511 return 0;
512
513 v = (unsigned int *) c->x86_model_id;
514 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
515 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
516 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
517 c->x86_model_id[48] = 0;
518 return 1;
519}
520
521
e6982c67 522static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
523{
524 unsigned int n, dummy, eax, ebx, ecx, edx;
525
ebfcaa96 526 n = c->extended_cpuid_level;
1da177e4
LT
527
528 if (n >= 0x80000005) {
529 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
04e1ba85
TG
530 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
531 "D cache %dK (%d bytes/line)\n",
532 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
533 c->x86_cache_size = (ecx>>24) + (edx>>24);
1da177e4
LT
534 /* On K8 L1 TLB is inclusive, so don't count it */
535 c->x86_tlbsize = 0;
536 }
537
538 if (n >= 0x80000006) {
539 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
540 ecx = cpuid_ecx(0x80000006);
541 c->x86_cache_size = ecx >> 16;
542 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
543
544 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
545 c->x86_cache_size, ecx & 0xFF);
546 }
1da177e4 547 if (n >= 0x80000008) {
04e1ba85 548 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1da177e4
LT
549 c->x86_virt_bits = (eax >> 8) & 0xff;
550 c->x86_phys_bits = eax & 0xff;
551 }
552}
553
3f098c26
AK
554#ifdef CONFIG_NUMA
555static int nearby_node(int apicid)
556{
04e1ba85
TG
557 int i, node;
558
3f098c26 559 for (i = apicid - 1; i >= 0; i--) {
04e1ba85 560 node = apicid_to_node[i];
3f098c26
AK
561 if (node != NUMA_NO_NODE && node_online(node))
562 return node;
563 }
564 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
04e1ba85 565 node = apicid_to_node[i];
3f098c26
AK
566 if (node != NUMA_NO_NODE && node_online(node))
567 return node;
568 }
569 return first_node(node_online_map); /* Shouldn't happen */
570}
571#endif
572
63518644
AK
573/*
574 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
575 * Assumes number of cores is a power of two.
576 */
577static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
578{
579#ifdef CONFIG_SMP
b41e2939 580 unsigned bits;
3f098c26 581#ifdef CONFIG_NUMA
f3fa8ebc 582 int cpu = smp_processor_id();
3f098c26 583 int node = 0;
60c1bc82 584 unsigned apicid = hard_smp_processor_id();
3f098c26 585#endif
a860b63c 586 bits = c->x86_coreid_bits;
b41e2939
AK
587
588 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 589 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 590 /* Convert the APIC ID into the socket ID */
f3fa8ebc 591 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
592
593#ifdef CONFIG_NUMA
04e1ba85
TG
594 node = c->phys_proc_id;
595 if (apicid_to_node[apicid] != NUMA_NO_NODE)
596 node = apicid_to_node[apicid];
597 if (!node_online(node)) {
598 /* Two possibilities here:
599 - The CPU is missing memory and no node was created.
600 In that case try picking one from a nearby CPU
601 - The APIC IDs differ from the HyperTransport node IDs
602 which the K8 northbridge parsing fills in.
603 Assume they are all increased by a constant offset,
604 but in the same order as the HT nodeids.
605 If that doesn't result in a usable node fall back to the
606 path for the previous case. */
607
92cb7612 608 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
04e1ba85
TG
609
610 if (ht_nodeid >= 0 &&
611 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
612 node = apicid_to_node[ht_nodeid];
613 /* Pick a nearby node */
614 if (!node_online(node))
615 node = nearby_node(apicid);
616 }
69d81fcd 617 numa_set_node(cpu, node);
3f098c26 618
e42f9437 619 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 620#endif
63518644
AK
621#endif
622}
1da177e4 623
2b16a235 624static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
a860b63c
YL
625{
626#ifdef CONFIG_SMP
627 unsigned bits, ecx;
628
629 /* Multi core CPU? */
630 if (c->extended_cpuid_level < 0x80000008)
631 return;
632
633 ecx = cpuid_ecx(0x80000008);
634
635 c->x86_max_cores = (ecx & 0xff) + 1;
636
637 /* CPU telling us the core id bits shift? */
638 bits = (ecx >> 12) & 0xF;
639
640 /* Otherwise recompute */
641 if (bits == 0) {
642 while ((1 << bits) < c->x86_max_cores)
643 bits++;
644 }
645
646 c->x86_coreid_bits = bits;
647
648#endif
649}
650
fb79d22e
TG
651#define ENABLE_C1E_MASK 0x18000000
652#define CPUID_PROCESSOR_SIGNATURE 1
653#define CPUID_XFAM 0x0ff00000
654#define CPUID_XFAM_K8 0x00000000
655#define CPUID_XFAM_10H 0x00100000
656#define CPUID_XFAM_11H 0x00200000
657#define CPUID_XMOD 0x000f0000
658#define CPUID_XMOD_REV_F 0x00040000
659
660/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
661static __cpuinit int amd_apic_timer_broken(void)
662{
04e1ba85
TG
663 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
664
fb79d22e
TG
665 switch (eax & CPUID_XFAM) {
666 case CPUID_XFAM_K8:
667 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
668 break;
669 case CPUID_XFAM_10H:
670 case CPUID_XFAM_11H:
671 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
672 if (lo & ENABLE_C1E_MASK)
673 return 1;
674 break;
675 default:
676 /* err on the side of caution */
677 return 1;
678 }
679 return 0;
680}
681
2b16a235
AK
682static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
683{
684 early_init_amd_mc(c);
685
686 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
687 if (c->x86_power & (1<<8))
688 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
689}
690
ed77504b 691static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 692{
7bcd3f34 693 unsigned level;
1da177e4 694
bc5e8fdf
LT
695#ifdef CONFIG_SMP
696 unsigned long value;
697
7d318d77
AK
698 /*
699 * Disable TLB flush filter by setting HWCR.FFDIS on K8
700 * bit 6 of msr C001_0015
04e1ba85 701 *
7d318d77
AK
702 * Errata 63 for SH-B3 steppings
703 * Errata 122 for all steppings (F+ have it disabled by default)
704 */
705 if (c->x86 == 15) {
706 rdmsrl(MSR_K8_HWCR, value);
707 value |= 1 << 6;
708 wrmsrl(MSR_K8_HWCR, value);
709 }
bc5e8fdf
LT
710#endif
711
1da177e4
LT
712 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
713 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
5548fecd 714 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
04e1ba85 715
7bcd3f34
AK
716 /* On C+ stepping K8 rep microcode works well for copy/memset */
717 level = cpuid_eax(1);
04e1ba85
TG
718 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
719 level >= 0x0f58))
53756d37 720 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
99741faa 721 if (c->x86 == 0x10 || c->x86 == 0x11)
53756d37 722 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
7bcd3f34 723
18bd057b
AK
724 /* Enable workaround for FXSAVE leak */
725 if (c->x86 >= 6)
53756d37 726 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
18bd057b 727
e42f9437
RS
728 level = get_model_name(c);
729 if (!level) {
04e1ba85 730 switch (c->x86) {
1da177e4
LT
731 case 15:
732 /* Should distinguish Models here, but this is only
733 a fallback anyways. */
734 strcpy(c->x86_model_id, "Hammer");
04e1ba85
TG
735 break;
736 }
737 }
1da177e4
LT
738 display_cacheinfo(c);
739
faee9a5d
AK
740 /* Multi core CPU? */
741 if (c->extended_cpuid_level >= 0x80000008)
63518644 742 amd_detect_cmp(c);
1da177e4 743
67cddd94
AK
744 if (c->extended_cpuid_level >= 0x80000006 &&
745 (cpuid_edx(0x80000006) & 0xf000))
746 num_cache_leaves = 4;
747 else
748 num_cache_leaves = 3;
2049336f 749
0bd8acd1 750 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
53756d37 751 set_cpu_cap(c, X86_FEATURE_K8);
0bd8acd1 752
de421863
AK
753 /* MFENCE stops RDTSC speculation */
754 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
f039b754
AK
755
756 /* Family 10 doesn't support C states in MWAIT so don't use it */
757 if (c->x86 == 0x10 && !force_mwait)
53756d37 758 clear_cpu_cap(c, X86_FEATURE_MWAIT);
fb79d22e
TG
759
760 if (amd_apic_timer_broken())
761 disable_apic_timer = 1;
1da177e4
LT
762}
763
1a53905a 764void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
765{
766#ifdef CONFIG_SMP
04e1ba85
TG
767 u32 eax, ebx, ecx, edx;
768 int index_msb, core_bits;
94605eff
SS
769
770 cpuid(1, &eax, &ebx, &ecx, &edx);
771
94605eff 772
e42f9437 773 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 774 return;
04e1ba85 775 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
e42f9437 776 goto out;
1da177e4 777
1da177e4 778 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 779
1da177e4
LT
780 if (smp_num_siblings == 1) {
781 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
04e1ba85 782 } else if (smp_num_siblings > 1) {
94605eff 783
1da177e4 784 if (smp_num_siblings > NR_CPUS) {
04e1ba85
TG
785 printk(KERN_WARNING "CPU: Unsupported number of "
786 "siblings %d", smp_num_siblings);
1da177e4
LT
787 smp_num_siblings = 1;
788 return;
789 }
94605eff
SS
790
791 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 792 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 793
94605eff 794 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 795
04e1ba85 796 index_msb = get_count_order(smp_num_siblings);
94605eff
SS
797
798 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 799
f3fa8ebc 800 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 801 ((1 << core_bits) - 1);
1da177e4 802 }
e42f9437
RS
803out:
804 if ((c->x86_max_cores * smp_num_siblings) > 1) {
04e1ba85
TG
805 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
806 c->phys_proc_id);
807 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
808 c->cpu_core_id);
e42f9437
RS
809 }
810
1da177e4
LT
811#endif
812}
813
3dd9d514
AK
814/*
815 * find out the number of processor cores on the die
816 */
e6982c67 817static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 818{
2bbc419f 819 unsigned int eax, t;
3dd9d514
AK
820
821 if (c->cpuid_level < 4)
822 return 1;
823
2bbc419f 824 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
825
826 if (eax & 0x1f)
827 return ((eax >> 26) + 1);
828 else
829 return 1;
830}
831
df0cc26b
AK
832static void srat_detect_node(void)
833{
834#ifdef CONFIG_NUMA
ddea7be0 835 unsigned node;
df0cc26b 836 int cpu = smp_processor_id();
e42f9437 837 int apicid = hard_smp_processor_id();
df0cc26b
AK
838
839 /* Don't do the funky fallback heuristics the AMD version employs
840 for now. */
e42f9437 841 node = apicid_to_node[apicid];
df0cc26b 842 if (node == NUMA_NO_NODE)
0d015324 843 node = first_node(node_online_map);
69d81fcd 844 numa_set_node(cpu, node);
df0cc26b 845
c31fbb1a 846 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
847#endif
848}
849
2b16a235
AK
850static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
851{
852 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
853 (c->x86 == 0x6 && c->x86_model >= 0x0e))
854 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
855}
856
e6982c67 857static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
858{
859 /* Cache sizes */
860 unsigned n;
861
862 init_intel_cacheinfo(c);
04e1ba85 863 if (c->cpuid_level > 9) {
0080e667
VP
864 unsigned eax = cpuid_eax(10);
865 /* Check for version and the number of counters */
866 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
53756d37 867 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667
VP
868 }
869
36b2a8d5
SE
870 if (cpu_has_ds) {
871 unsigned int l1, l2;
872 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5 873 if (!(l1 & (1<<11)))
53756d37 874 set_cpu_cap(c, X86_FEATURE_BTS);
36b2a8d5 875 if (!(l1 & (1<<12)))
53756d37 876 set_cpu_cap(c, X86_FEATURE_PEBS);
36b2a8d5
SE
877 }
878
eee3af4a
MM
879
880 if (cpu_has_bts)
881 ds_init_intel(c);
882
ebfcaa96 883 n = c->extended_cpuid_level;
1da177e4
LT
884 if (n >= 0x80000008) {
885 unsigned eax = cpuid_eax(0x80000008);
886 c->x86_virt_bits = (eax >> 8) & 0xff;
887 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
888 /* CPUID workaround for Intel 0F34 CPU */
889 if (c->x86_vendor == X86_VENDOR_INTEL &&
890 c->x86 == 0xF && c->x86_model == 0x3 &&
891 c->x86_mask == 0x4)
892 c->x86_phys_bits = 36;
1da177e4
LT
893 }
894
895 if (c->x86 == 15)
896 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
897 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
898 (c->x86 == 0x6 && c->x86_model >= 0x0e))
53756d37 899 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
27fbe5b2 900 if (c->x86 == 6)
53756d37 901 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
707fa8ed 902 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
04e1ba85 903 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
904
905 srat_detect_node();
1da177e4
LT
906}
907
672289e9 908static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
909{
910 char *v = c->x86_vendor_id;
911
912 if (!strcmp(v, "AuthenticAMD"))
913 c->x86_vendor = X86_VENDOR_AMD;
914 else if (!strcmp(v, "GenuineIntel"))
915 c->x86_vendor = X86_VENDOR_INTEL;
916 else
917 c->x86_vendor = X86_VENDOR_UNKNOWN;
918}
919
920struct cpu_model_info {
921 int vendor;
922 int family;
923 char *model_names[16];
924};
925
926/* Do some early cpuid on the boot CPU to get some parameter that are
927 needed before check_bugs. Everything advanced is in identify_cpu
928 below. */
8c61b900 929static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4 930{
a860b63c 931 u32 tfms, xlvl;
1da177e4
LT
932
933 c->loops_per_jiffy = loops_per_jiffy;
934 c->x86_cache_size = -1;
935 c->x86_vendor = X86_VENDOR_UNKNOWN;
936 c->x86_model = c->x86_mask = 0; /* So far unknown... */
937 c->x86_vendor_id[0] = '\0'; /* Unset */
938 c->x86_model_id[0] = '\0'; /* Unset */
939 c->x86_clflush_size = 64;
940 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 941 c->x86_max_cores = 1;
a860b63c 942 c->x86_coreid_bits = 0;
ebfcaa96 943 c->extended_cpuid_level = 0;
1da177e4
LT
944 memset(&c->x86_capability, 0, sizeof c->x86_capability);
945
946 /* Get vendor name */
947 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
948 (unsigned int *)&c->x86_vendor_id[0],
949 (unsigned int *)&c->x86_vendor_id[8],
950 (unsigned int *)&c->x86_vendor_id[4]);
04e1ba85 951
1da177e4
LT
952 get_cpu_vendor(c);
953
954 /* Initialize the standard set of capabilities */
955 /* Note that the vendor-specific code below might override */
956
957 /* Intel-defined flags: level 0x00000001 */
958 if (c->cpuid_level >= 0x00000001) {
959 __u32 misc;
960 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
961 &c->x86_capability[0]);
962 c->x86 = (tfms >> 8) & 0xf;
963 c->x86_model = (tfms >> 4) & 0xf;
964 c->x86_mask = tfms & 0xf;
f5f786d0 965 if (c->x86 == 0xf)
1da177e4 966 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 967 if (c->x86 >= 0x6)
1da177e4 968 c->x86_model += ((tfms >> 16) & 0xF) << 4;
04e1ba85 969 if (c->x86_capability[0] & (1<<19))
1da177e4 970 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
971 } else {
972 /* Have CPUID level 0 only - unheard of */
973 c->x86 = 4;
974 }
a158608b
AK
975
976#ifdef CONFIG_SMP
f3fa8ebc 977 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 978#endif
1da177e4
LT
979 /* AMD-defined flags: level 0x80000001 */
980 xlvl = cpuid_eax(0x80000000);
ebfcaa96 981 c->extended_cpuid_level = xlvl;
1da177e4
LT
982 if ((xlvl & 0xffff0000) == 0x80000000) {
983 if (xlvl >= 0x80000001) {
984 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 985 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
986 }
987 if (xlvl >= 0x80000004)
988 get_model_name(c); /* Default name */
989 }
990
991 /* Transmeta-defined flags: level 0x80860001 */
992 xlvl = cpuid_eax(0x80860000);
993 if ((xlvl & 0xffff0000) == 0x80860000) {
994 /* Don't set x86_cpuid_level here for now to not confuse. */
995 if (xlvl >= 0x80860001)
996 c->x86_capability[2] = cpuid_edx(0x80860001);
997 }
998
9566e91d
AH
999 c->extended_cpuid_level = cpuid_eax(0x80000000);
1000 if (c->extended_cpuid_level >= 0x80000007)
1001 c->x86_power = cpuid_edx(0x80000007);
1002
a860b63c
YL
1003 switch (c->x86_vendor) {
1004 case X86_VENDOR_AMD:
1005 early_init_amd(c);
1006 break;
1007 }
1008
1009}
1010
1011/*
1012 * This does the hard work of actually picking apart the CPU stuff...
1013 */
1014void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1015{
1016 int i;
1017
1018 early_identify_cpu(c);
1019
1d67953f
VP
1020 init_scattered_cpuid_features(c);
1021
1e9f28fa
SS
1022 c->apicid = phys_pkg_id(0);
1023
1da177e4
LT
1024 /*
1025 * Vendor-specific initialization. In this section we
1026 * canonicalize the feature flags, meaning if there are
1027 * features a certain CPU supports which CPUID doesn't
1028 * tell us, CPUID claiming incorrect flags, or other bugs,
1029 * we handle them here.
1030 *
1031 * At the end of this section, c->x86_capability better
1032 * indicate the features this CPU genuinely supports!
1033 */
1034 switch (c->x86_vendor) {
1035 case X86_VENDOR_AMD:
1036 init_amd(c);
1037 break;
1038
1039 case X86_VENDOR_INTEL:
1040 init_intel(c);
1041 break;
1042
1043 case X86_VENDOR_UNKNOWN:
1044 default:
1045 display_cacheinfo(c);
1046 break;
1047 }
1048
1049 select_idle_routine(c);
04e1ba85 1050 detect_ht(c);
1da177e4
LT
1051
1052 /*
1053 * On SMP, boot_cpu_data holds the common feature set between
1054 * all CPUs; so make sure that we indicate which features are
1055 * common between the CPUs. The first time this routine gets
1056 * executed, c == &boot_cpu_data.
1057 */
1058 if (c != &boot_cpu_data) {
1059 /* AND the already accumulated flags with these */
04e1ba85 1060 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
1061 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1062 }
1063
1064#ifdef CONFIG_X86_MCE
1065 mcheck_init(c);
1066#endif
8bd99481 1067 if (c != &boot_cpu_data)
3b520b23 1068 mtrr_ap_init();
1da177e4 1069#ifdef CONFIG_NUMA
3019e8eb 1070 numa_add_cpu(smp_processor_id());
1da177e4 1071#endif
2b16a235 1072
2b16a235
AK
1073 switch (c->x86_vendor) {
1074 case X86_VENDOR_AMD:
1075 early_init_amd(c);
1076 break;
1077 case X86_VENDOR_INTEL:
1078 early_init_intel(c);
1079 break;
1080 }
1da177e4 1081}
1da177e4 1082
e6982c67 1083void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1084{
1085 if (c->x86_model_id[0])
04e1ba85 1086 printk(KERN_INFO "%s", c->x86_model_id);
1da177e4 1087
04e1ba85
TG
1088 if (c->x86_mask || c->cpuid_level >= 0)
1089 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 1090 else
04e1ba85 1091 printk(KERN_CONT "\n");
1da177e4
LT
1092}
1093
1094/*
1095 * Get CPU information for use by the procfs.
1096 */
1097
1098static int show_cpuinfo(struct seq_file *m, void *v)
1099{
1100 struct cpuinfo_x86 *c = v;
04e1ba85 1101 int cpu = 0, i;
1da177e4 1102
04e1ba85 1103 /*
1da177e4
LT
1104 * These flag bits must match the definitions in <asm/cpufeature.h>.
1105 * NULL means this bit is undefined or reserved; either way it doesn't
1106 * have meaning as far as Linux is concerned. Note that it's important
1107 * to realize there is a difference between this table and CPUID -- if
1108 * applications want to get the raw CPUID data, they should access
1109 * /dev/cpu/<cpu_nr>/cpuid instead.
1110 */
121d7bf5 1111 static const char *const x86_cap_flags[] = {
1da177e4 1112 /* Intel-defined */
04e1ba85
TG
1113 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1114 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1115 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1116 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1da177e4
LT
1117
1118 /* AMD-defined */
3c3b73b6 1119 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1120 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
f790cd30
AK
1122 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1123 "3dnowext", "3dnow",
1da177e4
LT
1124
1125 /* Transmeta-defined */
1126 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1127 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1128 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1129 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1130
1131 /* Other (Linux-defined) */
ec481536
PA
1132 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1133 NULL, NULL, NULL, NULL,
1134 "constant_tsc", "up", NULL, "arch_perfmon",
1135 "pebs", "bts", NULL, "sync_rdtsc",
1136 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1137 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1138
1139 /* Intel-defined (#2) */
9d95dd84 1140 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307 1141 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
e1054b39 1142 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1da177e4
LT
1143 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1144
5b7abc6f
PA
1145 /* VIA/Cyrix/Centaur-defined */
1146 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
ec481536 1147 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
5b7abc6f
PA
1148 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1149 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1150
1da177e4 1151 /* AMD-defined (#2) */
e1054b39
PA
1152 "lahf_lm", "cmp_legacy", "svm", "extapic",
1153 "cr8_legacy", "abm", "sse4a", "misalignsse",
1154 "3dnowprefetch", "osvw", "ibs", "sse5",
1155 "skinit", "wdt", NULL, NULL,
1da177e4 1156 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1157 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1d67953f
VP
1158
1159 /* Auxiliary (Linux-defined) */
1160 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1161 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1162 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1163 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4 1164 };
121d7bf5 1165 static const char *const x86_power_flags[] = {
1da177e4
LT
1166 "ts", /* temperature sensor */
1167 "fid", /* frequency id control */
1168 "vid", /* voltage id control */
1169 "ttp", /* thermal trip */
1170 "tm",
3f98bc49 1171 "stc",
f790cd30
AK
1172 "100mhzsteps",
1173 "hwpstate",
d824395c
JR
1174 "", /* tsc invariant mapped to constant_tsc */
1175 /* nothing */
1da177e4
LT
1176 };
1177
1178
1179#ifdef CONFIG_SMP
92cb7612 1180 cpu = c->cpu_index;
1da177e4
LT
1181#endif
1182
04e1ba85
TG
1183 seq_printf(m, "processor\t: %u\n"
1184 "vendor_id\t: %s\n"
1185 "cpu family\t: %d\n"
1186 "model\t\t: %d\n"
1187 "model name\t: %s\n",
1188 (unsigned)cpu,
1189 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1190 c->x86,
1191 (int)c->x86_model,
1192 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1193
1da177e4
LT
1194 if (c->x86_mask || c->cpuid_level >= 0)
1195 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1196 else
1197 seq_printf(m, "stepping\t: unknown\n");
04e1ba85
TG
1198
1199 if (cpu_has(c, X86_FEATURE_TSC)) {
92cb7612 1200 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
04e1ba85 1201
95235ca2
VP
1202 if (!freq)
1203 freq = cpu_khz;
1da177e4 1204 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
04e1ba85 1205 freq / 1000, (freq % 1000));
1da177e4
LT
1206 }
1207
1208 /* Cache size */
04e1ba85 1209 if (c->x86_cache_size >= 0)
1da177e4 1210 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
04e1ba85 1211
1da177e4 1212#ifdef CONFIG_SMP
94605eff 1213 if (smp_num_siblings * c->x86_max_cores > 1) {
f3fa8ebc 1214 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
08357611
MT
1215 seq_printf(m, "siblings\t: %d\n",
1216 cpus_weight(per_cpu(cpu_core_map, cpu)));
f3fa8ebc 1217 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1218 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1219 }
04e1ba85 1220#endif
1da177e4
LT
1221
1222 seq_printf(m,
04e1ba85
TG
1223 "fpu\t\t: yes\n"
1224 "fpu_exception\t: yes\n"
1225 "cpuid level\t: %d\n"
1226 "wp\t\t: yes\n"
1227 "flags\t\t:",
1da177e4
LT
1228 c->cpuid_level);
1229
04e1ba85
TG
1230 for (i = 0; i < 32*NCAPINTS; i++)
1231 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1232 seq_printf(m, " %s", x86_cap_flags[i]);
1233
1da177e4
LT
1234 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1235 c->loops_per_jiffy/(500000/HZ),
1236 (c->loops_per_jiffy/(5000/HZ)) % 100);
1237
04e1ba85 1238 if (c->x86_tlbsize > 0)
1da177e4
LT
1239 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1240 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1241 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1242
04e1ba85 1243 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1da177e4
LT
1244 c->x86_phys_bits, c->x86_virt_bits);
1245
1246 seq_printf(m, "power management:");
04e1ba85
TG
1247 for (i = 0; i < 32; i++) {
1248 if (c->x86_power & (1 << i)) {
1249 if (i < ARRAY_SIZE(x86_power_flags) &&
1250 x86_power_flags[i])
1251 seq_printf(m, "%s%s",
1252 x86_power_flags[i][0]?" ":"",
1253 x86_power_flags[i]);
1254 else
1255 seq_printf(m, " [%d]", i);
1256 }
1da177e4 1257 }
1da177e4 1258
d31ddaa1 1259 seq_printf(m, "\n\n");
1da177e4
LT
1260
1261 return 0;
1262}
1263
1264static void *c_start(struct seq_file *m, loff_t *pos)
1265{
92cb7612 1266 if (*pos == 0) /* just in case, cpu 0 is not the first */
c0c52d28
AH
1267 *pos = first_cpu(cpu_online_map);
1268 if ((*pos) < NR_CPUS && cpu_online(*pos))
92cb7612
MT
1269 return &cpu_data(*pos);
1270 return NULL;
1da177e4
LT
1271}
1272
1273static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1274{
c0c52d28 1275 *pos = next_cpu(*pos, cpu_online_map);
1da177e4
LT
1276 return c_start(m, pos);
1277}
1278
1279static void c_stop(struct seq_file *m, void *v)
1280{
1281}
1282
1283struct seq_operations cpuinfo_op = {
04e1ba85 1284 .start = c_start,
1da177e4
LT
1285 .next = c_next,
1286 .stop = c_stop,
1287 .show = show_cpuinfo,
1288};