x86: remove the now unused X86_FEATURE_SYNC_RDTSC
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
894673ee 19#include <linux/screen_info.h>
1da177e4
LT
20#include <linux/ioport.h>
21#include <linux/delay.h>
1da177e4
LT
22#include <linux/init.h>
23#include <linux/initrd.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <asm/processor.h>
28#include <linux/console.h>
29#include <linux/seq_file.h>
aac04b32 30#include <linux/crash_dump.h>
1da177e4
LT
31#include <linux/root_dev.h>
32#include <linux/pci.h>
5b83683f 33#include <linux/efi.h>
1da177e4
LT
34#include <linux/acpi.h>
35#include <linux/kallsyms.h>
36#include <linux/edd.h>
bbfceef4 37#include <linux/mmzone.h>
5f5609df 38#include <linux/kexec.h>
95235ca2 39#include <linux/cpufreq.h>
e9928674 40#include <linux/dmi.h>
17a941d8 41#include <linux/dma-mapping.h>
681558fd 42#include <linux/ctype.h>
746ef0cd 43#include <linux/uaccess.h>
bbfceef4 44
1da177e4
LT
45#include <asm/mtrr.h>
46#include <asm/uaccess.h>
47#include <asm/system.h>
e4026440 48#include <asm/vsyscall.h>
1da177e4
LT
49#include <asm/io.h>
50#include <asm/smp.h>
51#include <asm/msr.h>
52#include <asm/desc.h>
53#include <video/edid.h>
54#include <asm/e820.h>
55#include <asm/dma.h>
56#include <asm/mpspec.h>
57#include <asm/mmu_context.h>
1da177e4
LT
58#include <asm/proto.h>
59#include <asm/setup.h>
60#include <asm/mach_apic.h>
61#include <asm/numa.h>
2bc0414e 62#include <asm/sections.h>
f2d3efed 63#include <asm/dmi.h>
00bf4098 64#include <asm/cacheflush.h>
af7a78e9 65#include <asm/mce.h>
eee3af4a 66#include <asm/ds.h>
1da177e4 67
746ef0cd
GOC
68#ifdef CONFIG_PARAVIRT
69#include <asm/paravirt.h>
70#else
71#define ARCH_SETUP
72#endif
73
1da177e4
LT
74/*
75 * Machine setup..
76 */
77
6c231b7b 78struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 79EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
80
81unsigned long mmu_cr4_features;
82
1da177e4
LT
83/* Boot loader ID as an integer, for the benefit of proc_dointvec */
84int bootloader_type;
85
86unsigned long saved_video_mode;
87
f039b754
AK
88int force_mwait __cpuinitdata;
89
04e1ba85 90/*
f2d3efed
AK
91 * Early DMI memory
92 */
93int dmi_alloc_index;
94char dmi_alloc_data[DMI_MAX_DATA];
95
1da177e4
LT
96/*
97 * Setup options
98 */
1da177e4 99struct screen_info screen_info;
2ee60e17 100EXPORT_SYMBOL(screen_info);
1da177e4
LT
101struct sys_desc_table_struct {
102 unsigned short length;
103 unsigned char table[0];
104};
105
106struct edid_info edid_info;
ba70710e 107EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
108
109extern int root_mountflags;
1da177e4 110
adf48856 111char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
112
113struct resource standard_io_resources[] = {
114 { .name = "dma1", .start = 0x00, .end = 0x1f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "pic1", .start = 0x20, .end = 0x21,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "timer0", .start = 0x40, .end = 0x43,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "timer1", .start = 0x50, .end = 0x53,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "keyboard", .start = 0x60, .end = 0x6f,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "pic2", .start = 0xa0, .end = 0xa1,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "dma2", .start = 0xc0, .end = 0xdf,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "fpu", .start = 0xf0, .end = 0xff,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
132};
133
1da177e4
LT
134#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
135
c9cce83d 136static struct resource data_resource = {
1da177e4
LT
137 .name = "Kernel data",
138 .start = 0,
139 .end = 0,
140 .flags = IORESOURCE_RAM,
141};
c9cce83d 142static struct resource code_resource = {
1da177e4
LT
143 .name = "Kernel code",
144 .start = 0,
145 .end = 0,
146 .flags = IORESOURCE_RAM,
147};
c9cce83d 148static struct resource bss_resource = {
00bf4098
BW
149 .name = "Kernel bss",
150 .start = 0,
151 .end = 0,
152 .flags = IORESOURCE_RAM,
153};
1da177e4 154
8c61b900
TG
155static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
156
2c8c0e6b
AK
157#ifdef CONFIG_PROC_VMCORE
158/* elfcorehdr= specifies the location of elf core header
159 * stored by the crashed kernel. This option will be passed
160 * by kexec loader to the capture kernel.
161 */
162static int __init setup_elfcorehdr(char *arg)
681558fd 163{
2c8c0e6b
AK
164 char *end;
165 if (!arg)
166 return -EINVAL;
167 elfcorehdr_addr = memparse(arg, &end);
168 return end > arg ? 0 : -EINVAL;
681558fd 169}
2c8c0e6b 170early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
171#endif
172
2b97690f 173#ifndef CONFIG_NUMA
bbfceef4
MT
174static void __init
175contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 176{
bbfceef4
MT
177 unsigned long bootmap_size, bootmap;
178
bbfceef4
MT
179 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
180 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
181 if (bootmap == -1L)
04e1ba85 182 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
bbfceef4 183 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
184 e820_register_active_regions(0, start_pfn, end_pfn);
185 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 186 reserve_bootmem(bootmap, bootmap_size);
04e1ba85 187}
1da177e4
LT
188#endif
189
1da177e4
LT
190#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
191struct edd edd;
192#ifdef CONFIG_EDD_MODULE
193EXPORT_SYMBOL(edd);
194#endif
195/**
196 * copy_edd() - Copy the BIOS EDD information
197 * from boot_params into a safe place.
198 *
199 */
200static inline void copy_edd(void)
201{
30c82645
PA
202 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
203 sizeof(edd.mbr_signature));
204 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
205 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
206 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
207}
208#else
209static inline void copy_edd(void)
210{
211}
212#endif
213
5c3391f9
BW
214#ifdef CONFIG_KEXEC
215static void __init reserve_crashkernel(void)
216{
217 unsigned long long free_mem;
218 unsigned long long crash_size, crash_base;
219 int ret;
220
04e1ba85
TG
221 free_mem =
222 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
5c3391f9
BW
223
224 ret = parse_crashkernel(boot_command_line, free_mem,
225 &crash_size, &crash_base);
226 if (ret == 0 && crash_size) {
227 if (crash_base > 0) {
228 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
229 "for crashkernel (System RAM: %ldMB)\n",
230 (unsigned long)(crash_size >> 20),
231 (unsigned long)(crash_base >> 20),
232 (unsigned long)(free_mem >> 20));
233 crashk_res.start = crash_base;
234 crashk_res.end = crash_base + crash_size - 1;
235 reserve_bootmem(crash_base, crash_size);
236 } else
237 printk(KERN_INFO "crashkernel reservation failed - "
238 "you have to specify a base address\n");
239 }
240}
241#else
242static inline void __init reserve_crashkernel(void)
243{}
244#endif
245
1da177e4 246#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
247
248unsigned __initdata ebda_addr;
249unsigned __initdata ebda_size;
250
251static void discover_ebda(void)
1da177e4 252{
ac71d12c 253 /*
04e1ba85 254 * there is a real-mode segmented pointer pointing to the
1da177e4
LT
255 * 4K EBDA area at 0x40E
256 */
bdb96a66 257 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
746ef0cd
GOC
258 /*
259 * There can be some situations, like paravirtualized guests,
260 * in which there is no available ebda information. In such
261 * case, just skip it
262 */
263 if (!ebda_addr) {
264 ebda_size = 0;
265 return;
266 }
267
ac71d12c
AK
268 ebda_addr <<= 4;
269
bdb96a66 270 ebda_size = *(unsigned short *)__va(ebda_addr);
ac71d12c
AK
271
272 /* Round EBDA up to pages */
273 if (ebda_size == 0)
274 ebda_size = 1;
275 ebda_size <<= 10;
276 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
277 if (ebda_size > 64*1024)
278 ebda_size = 64*1024;
1da177e4
LT
279}
280
746ef0cd
GOC
281/* Overridden in paravirt.c if CONFIG_PARAVIRT */
282void __attribute__((weak)) memory_setup(void)
283{
284 machine_specific_memory_setup();
285}
286
1da177e4
LT
287void __init setup_arch(char **cmdline_p)
288{
04e1ba85
TG
289 unsigned i;
290
adf48856 291 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 292
30c82645
PA
293 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
294 screen_info = boot_params.screen_info;
295 edid_info = boot_params.edid_info;
296 saved_video_mode = boot_params.hdr.vid_mode;
297 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
298
299#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
300 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
301 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
302 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4 303#endif
5b83683f
HY
304#ifdef CONFIG_EFI
305 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
306 "EL64", 4))
307 efi_enabled = 1;
308#endif
746ef0cd
GOC
309
310 ARCH_SETUP
311
312 memory_setup();
1da177e4
LT
313 copy_edd();
314
30c82645 315 if (!boot_params.hdr.root_flags)
1da177e4
LT
316 root_mountflags &= ~MS_RDONLY;
317 init_mm.start_code = (unsigned long) &_text;
318 init_mm.end_code = (unsigned long) &_etext;
319 init_mm.end_data = (unsigned long) &_edata;
320 init_mm.brk = (unsigned long) &_end;
321
e3ebadd9
LT
322 code_resource.start = virt_to_phys(&_text);
323 code_resource.end = virt_to_phys(&_etext)-1;
324 data_resource.start = virt_to_phys(&_etext);
325 data_resource.end = virt_to_phys(&_edata)-1;
00bf4098
BW
326 bss_resource.start = virt_to_phys(&__bss_start);
327 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 328
1da177e4
LT
329 early_identify_cpu(&boot_cpu_data);
330
adf48856 331 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
332 *cmdline_p = command_line;
333
334 parse_early_param();
335
336 finish_e820_parsing();
9ca33eb6 337
5cb248ab 338 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
339 /*
340 * partially used pages are not usable - thus
341 * we are rounding upwards:
342 */
343 end_pfn = e820_end_of_ram();
caff0710 344 num_physpages = end_pfn;
1da177e4
LT
345
346 check_efer();
347
ac71d12c
AK
348 discover_ebda();
349
1da177e4 350 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
5b83683f
HY
351 if (efi_enabled)
352 efi_init();
1da177e4 353
f2d3efed
AK
354 dmi_scan_machine();
355
b02aae9c
RH
356 io_delay_init();
357
71fff5e6
MT
358#ifdef CONFIG_SMP
359 /* setup to use the static apicid table during kernel startup */
360 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
361#endif
362
888ba6c6 363#ifdef CONFIG_ACPI
1da177e4
LT
364 /*
365 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
366 * Call this early for SRAT node setup.
367 */
368 acpi_boot_table_init();
369#endif
370
caff0710
JB
371 /* How many end-of-memory variables you have, grandma! */
372 max_low_pfn = end_pfn;
373 max_pfn = end_pfn;
374 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
375
5cb248ab
MG
376 /* Remove active ranges so rediscovery with NUMA-awareness happens */
377 remove_all_active_ranges();
378
1da177e4
LT
379#ifdef CONFIG_ACPI_NUMA
380 /*
381 * Parse SRAT to discover nodes.
382 */
383 acpi_numa_init();
384#endif
385
2b97690f 386#ifdef CONFIG_NUMA
04e1ba85 387 numa_initmem_init(0, end_pfn);
1da177e4 388#else
bbfceef4 389 contig_initmem_init(0, end_pfn);
1da177e4
LT
390#endif
391
392 /* Reserve direct mapping */
04e1ba85 393 reserve_bootmem_generic(table_start << PAGE_SHIFT,
1da177e4
LT
394 (table_end - table_start) << PAGE_SHIFT);
395
396 /* reserve kernel */
ceee8822
AK
397 reserve_bootmem_generic(__pa_symbol(&_text),
398 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
399
400 /*
401 * reserve physical page 0 - it's a special BIOS page on many boxes,
402 * enabling clean reboots, SMP operation, laptop functions.
403 */
404 reserve_bootmem_generic(0, PAGE_SIZE);
405
406 /* reserve ebda region */
ac71d12c
AK
407 if (ebda_addr)
408 reserve_bootmem_generic(ebda_addr, ebda_size);
076422d2
AS
409#ifdef CONFIG_NUMA
410 /* reserve nodemap region */
411 if (nodemap_addr)
412 reserve_bootmem_generic(nodemap_addr, nodemap_size);
413#endif
1da177e4
LT
414
415#ifdef CONFIG_SMP
1da177e4 416 /* Reserve SMP trampoline */
90b1c208 417 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
1da177e4
LT
418#endif
419
673d5b43 420#ifdef CONFIG_ACPI_SLEEP
1da177e4 421 /*
04e1ba85 422 * Reserve low memory region for sleep support.
1da177e4 423 */
04e1ba85
TG
424 acpi_reserve_bootmem();
425#endif
5b83683f
HY
426
427 if (efi_enabled) {
428 efi_map_memmap();
429 efi_reserve_bootmem();
430 }
431
04e1ba85
TG
432 /*
433 * Find and reserve possible boot-time SMP configuration:
434 */
1da177e4 435 find_smp_config();
1da177e4 436#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
437 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
438 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
439 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
440 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
441 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
442
443 if (ramdisk_end <= end_of_mem) {
444 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
445 initrd_start = ramdisk_image + PAGE_OFFSET;
446 initrd_end = initrd_start+ramdisk_size;
447 } else {
1da177e4 448 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
449 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
450 ramdisk_end, end_of_mem);
1da177e4
LT
451 initrd_start = 0;
452 }
453 }
454#endif
5c3391f9 455 reserve_crashkernel();
1da177e4 456 paging_init();
e4026440 457 map_vsyscall();
1da177e4 458
dfa4698c 459 early_quirks();
1da177e4 460
51f62e18
AR
461 /*
462 * set this early, so we dont allocate cpu0
463 * if MADT list doesnt list BSP first
464 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
465 */
466 cpu_set(0, cpu_present_map);
888ba6c6 467#ifdef CONFIG_ACPI
1da177e4
LT
468 /*
469 * Read APIC and some other early information from ACPI tables.
470 */
471 acpi_boot_init();
472#endif
473
05b3cbd8
RT
474 init_cpu_to_node();
475
1da177e4
LT
476 /*
477 * get boot-time SMP configuration:
478 */
479 if (smp_found_config)
480 get_smp_config();
481 init_apic_mappings();
3e35a0e5 482 ioapic_init_mappings();
1da177e4
LT
483
484 /*
fc986db4 485 * We trust e820 completely. No explicit ROM probing in memory.
04e1ba85 486 */
c9cce83d 487 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
e8eff5ac 488 e820_mark_nosave_regions();
1da177e4 489
1da177e4 490 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 491 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4 492 request_resource(&ioport_resource, &standard_io_resources[i]);
1da177e4 493
a1e97782 494 e820_setup_gap();
1da177e4 495
1da177e4
LT
496#ifdef CONFIG_VT
497#if defined(CONFIG_VGA_CONSOLE)
5b83683f
HY
498 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
499 conswitchp = &vga_con;
1da177e4
LT
500#elif defined(CONFIG_DUMMY_CONSOLE)
501 conswitchp = &dummy_con;
502#endif
503#endif
504}
505
e6982c67 506static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
507{
508 unsigned int *v;
509
ebfcaa96 510 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
511 return 0;
512
513 v = (unsigned int *) c->x86_model_id;
514 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
515 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
516 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
517 c->x86_model_id[48] = 0;
518 return 1;
519}
520
521
e6982c67 522static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
523{
524 unsigned int n, dummy, eax, ebx, ecx, edx;
525
ebfcaa96 526 n = c->extended_cpuid_level;
1da177e4
LT
527
528 if (n >= 0x80000005) {
529 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
04e1ba85
TG
530 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
531 "D cache %dK (%d bytes/line)\n",
532 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
533 c->x86_cache_size = (ecx>>24) + (edx>>24);
1da177e4
LT
534 /* On K8 L1 TLB is inclusive, so don't count it */
535 c->x86_tlbsize = 0;
536 }
537
538 if (n >= 0x80000006) {
539 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
540 ecx = cpuid_ecx(0x80000006);
541 c->x86_cache_size = ecx >> 16;
542 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
543
544 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
545 c->x86_cache_size, ecx & 0xFF);
546 }
547
548 if (n >= 0x80000007)
04e1ba85 549 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
1da177e4 550 if (n >= 0x80000008) {
04e1ba85 551 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1da177e4
LT
552 c->x86_virt_bits = (eax >> 8) & 0xff;
553 c->x86_phys_bits = eax & 0xff;
554 }
555}
556
3f098c26
AK
557#ifdef CONFIG_NUMA
558static int nearby_node(int apicid)
559{
04e1ba85
TG
560 int i, node;
561
3f098c26 562 for (i = apicid - 1; i >= 0; i--) {
04e1ba85 563 node = apicid_to_node[i];
3f098c26
AK
564 if (node != NUMA_NO_NODE && node_online(node))
565 return node;
566 }
567 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
04e1ba85 568 node = apicid_to_node[i];
3f098c26
AK
569 if (node != NUMA_NO_NODE && node_online(node))
570 return node;
571 }
572 return first_node(node_online_map); /* Shouldn't happen */
573}
574#endif
575
63518644
AK
576/*
577 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
578 * Assumes number of cores is a power of two.
579 */
580static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
581{
582#ifdef CONFIG_SMP
b41e2939 583 unsigned bits;
3f098c26 584#ifdef CONFIG_NUMA
f3fa8ebc 585 int cpu = smp_processor_id();
3f098c26 586 int node = 0;
60c1bc82 587 unsigned apicid = hard_smp_processor_id();
3f098c26 588#endif
a860b63c 589 bits = c->x86_coreid_bits;
b41e2939
AK
590
591 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 592 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 593 /* Convert the APIC ID into the socket ID */
f3fa8ebc 594 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
595
596#ifdef CONFIG_NUMA
04e1ba85
TG
597 node = c->phys_proc_id;
598 if (apicid_to_node[apicid] != NUMA_NO_NODE)
599 node = apicid_to_node[apicid];
600 if (!node_online(node)) {
601 /* Two possibilities here:
602 - The CPU is missing memory and no node was created.
603 In that case try picking one from a nearby CPU
604 - The APIC IDs differ from the HyperTransport node IDs
605 which the K8 northbridge parsing fills in.
606 Assume they are all increased by a constant offset,
607 but in the same order as the HT nodeids.
608 If that doesn't result in a usable node fall back to the
609 path for the previous case. */
610
92cb7612 611 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
04e1ba85
TG
612
613 if (ht_nodeid >= 0 &&
614 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
615 node = apicid_to_node[ht_nodeid];
616 /* Pick a nearby node */
617 if (!node_online(node))
618 node = nearby_node(apicid);
619 }
69d81fcd 620 numa_set_node(cpu, node);
3f098c26 621
e42f9437 622 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 623#endif
63518644
AK
624#endif
625}
1da177e4 626
a860b63c
YL
627static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
628{
629#ifdef CONFIG_SMP
630 unsigned bits, ecx;
631
632 /* Multi core CPU? */
633 if (c->extended_cpuid_level < 0x80000008)
634 return;
635
636 ecx = cpuid_ecx(0x80000008);
637
638 c->x86_max_cores = (ecx & 0xff) + 1;
639
640 /* CPU telling us the core id bits shift? */
641 bits = (ecx >> 12) & 0xF;
642
643 /* Otherwise recompute */
644 if (bits == 0) {
645 while ((1 << bits) < c->x86_max_cores)
646 bits++;
647 }
648
649 c->x86_coreid_bits = bits;
650
651#endif
652}
653
fb79d22e
TG
654#define ENABLE_C1E_MASK 0x18000000
655#define CPUID_PROCESSOR_SIGNATURE 1
656#define CPUID_XFAM 0x0ff00000
657#define CPUID_XFAM_K8 0x00000000
658#define CPUID_XFAM_10H 0x00100000
659#define CPUID_XFAM_11H 0x00200000
660#define CPUID_XMOD 0x000f0000
661#define CPUID_XMOD_REV_F 0x00040000
662
663/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
664static __cpuinit int amd_apic_timer_broken(void)
665{
04e1ba85
TG
666 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
667
fb79d22e
TG
668 switch (eax & CPUID_XFAM) {
669 case CPUID_XFAM_K8:
670 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
671 break;
672 case CPUID_XFAM_10H:
673 case CPUID_XFAM_11H:
674 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
675 if (lo & ENABLE_C1E_MASK)
676 return 1;
677 break;
678 default:
679 /* err on the side of caution */
680 return 1;
681 }
682 return 0;
683}
684
ed77504b 685static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 686{
7bcd3f34 687 unsigned level;
1da177e4 688
bc5e8fdf
LT
689#ifdef CONFIG_SMP
690 unsigned long value;
691
7d318d77
AK
692 /*
693 * Disable TLB flush filter by setting HWCR.FFDIS on K8
694 * bit 6 of msr C001_0015
04e1ba85 695 *
7d318d77
AK
696 * Errata 63 for SH-B3 steppings
697 * Errata 122 for all steppings (F+ have it disabled by default)
698 */
699 if (c->x86 == 15) {
700 rdmsrl(MSR_K8_HWCR, value);
701 value |= 1 << 6;
702 wrmsrl(MSR_K8_HWCR, value);
703 }
bc5e8fdf
LT
704#endif
705
1da177e4
LT
706 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
707 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
5548fecd 708 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
04e1ba85 709
7bcd3f34
AK
710 /* On C+ stepping K8 rep microcode works well for copy/memset */
711 level = cpuid_eax(1);
04e1ba85
TG
712 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
713 level >= 0x0f58))
53756d37 714 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
99741faa 715 if (c->x86 == 0x10 || c->x86 == 0x11)
53756d37 716 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
7bcd3f34 717
18bd057b
AK
718 /* Enable workaround for FXSAVE leak */
719 if (c->x86 >= 6)
53756d37 720 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
18bd057b 721
e42f9437
RS
722 level = get_model_name(c);
723 if (!level) {
04e1ba85 724 switch (c->x86) {
1da177e4
LT
725 case 15:
726 /* Should distinguish Models here, but this is only
727 a fallback anyways. */
728 strcpy(c->x86_model_id, "Hammer");
04e1ba85
TG
729 break;
730 }
731 }
1da177e4
LT
732 display_cacheinfo(c);
733
130951cc
AK
734 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
735 if (c->x86_power & (1<<8))
53756d37 736 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
130951cc 737
faee9a5d
AK
738 /* Multi core CPU? */
739 if (c->extended_cpuid_level >= 0x80000008)
63518644 740 amd_detect_cmp(c);
1da177e4 741
67cddd94
AK
742 if (c->extended_cpuid_level >= 0x80000006 &&
743 (cpuid_edx(0x80000006) & 0xf000))
744 num_cache_leaves = 4;
745 else
746 num_cache_leaves = 3;
2049336f 747
0bd8acd1 748 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
53756d37 749 set_cpu_cap(c, X86_FEATURE_K8);
0bd8acd1 750
de421863
AK
751 /* MFENCE stops RDTSC speculation */
752 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
f039b754
AK
753
754 /* Family 10 doesn't support C states in MWAIT so don't use it */
755 if (c->x86 == 0x10 && !force_mwait)
53756d37 756 clear_cpu_cap(c, X86_FEATURE_MWAIT);
fb79d22e
TG
757
758 if (amd_apic_timer_broken())
759 disable_apic_timer = 1;
1da177e4
LT
760}
761
1a53905a 762void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
763{
764#ifdef CONFIG_SMP
04e1ba85
TG
765 u32 eax, ebx, ecx, edx;
766 int index_msb, core_bits;
94605eff
SS
767
768 cpuid(1, &eax, &ebx, &ecx, &edx);
769
94605eff 770
e42f9437 771 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 772 return;
04e1ba85 773 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
e42f9437 774 goto out;
1da177e4 775
1da177e4 776 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 777
1da177e4
LT
778 if (smp_num_siblings == 1) {
779 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
04e1ba85 780 } else if (smp_num_siblings > 1) {
94605eff 781
1da177e4 782 if (smp_num_siblings > NR_CPUS) {
04e1ba85
TG
783 printk(KERN_WARNING "CPU: Unsupported number of "
784 "siblings %d", smp_num_siblings);
1da177e4
LT
785 smp_num_siblings = 1;
786 return;
787 }
94605eff
SS
788
789 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 790 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 791
94605eff 792 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 793
04e1ba85 794 index_msb = get_count_order(smp_num_siblings);
94605eff
SS
795
796 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 797
f3fa8ebc 798 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 799 ((1 << core_bits) - 1);
1da177e4 800 }
e42f9437
RS
801out:
802 if ((c->x86_max_cores * smp_num_siblings) > 1) {
04e1ba85
TG
803 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
804 c->phys_proc_id);
805 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
806 c->cpu_core_id);
e42f9437
RS
807 }
808
1da177e4
LT
809#endif
810}
811
3dd9d514
AK
812/*
813 * find out the number of processor cores on the die
814 */
e6982c67 815static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 816{
2bbc419f 817 unsigned int eax, t;
3dd9d514
AK
818
819 if (c->cpuid_level < 4)
820 return 1;
821
2bbc419f 822 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
823
824 if (eax & 0x1f)
825 return ((eax >> 26) + 1);
826 else
827 return 1;
828}
829
df0cc26b
AK
830static void srat_detect_node(void)
831{
832#ifdef CONFIG_NUMA
ddea7be0 833 unsigned node;
df0cc26b 834 int cpu = smp_processor_id();
e42f9437 835 int apicid = hard_smp_processor_id();
df0cc26b
AK
836
837 /* Don't do the funky fallback heuristics the AMD version employs
838 for now. */
e42f9437 839 node = apicid_to_node[apicid];
df0cc26b 840 if (node == NUMA_NO_NODE)
0d015324 841 node = first_node(node_online_map);
69d81fcd 842 numa_set_node(cpu, node);
df0cc26b 843
c31fbb1a 844 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
845#endif
846}
847
e6982c67 848static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
849{
850 /* Cache sizes */
851 unsigned n;
852
853 init_intel_cacheinfo(c);
04e1ba85 854 if (c->cpuid_level > 9) {
0080e667
VP
855 unsigned eax = cpuid_eax(10);
856 /* Check for version and the number of counters */
857 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
53756d37 858 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667
VP
859 }
860
36b2a8d5
SE
861 if (cpu_has_ds) {
862 unsigned int l1, l2;
863 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5 864 if (!(l1 & (1<<11)))
53756d37 865 set_cpu_cap(c, X86_FEATURE_BTS);
36b2a8d5 866 if (!(l1 & (1<<12)))
53756d37 867 set_cpu_cap(c, X86_FEATURE_PEBS);
36b2a8d5
SE
868 }
869
eee3af4a
MM
870
871 if (cpu_has_bts)
872 ds_init_intel(c);
873
ebfcaa96 874 n = c->extended_cpuid_level;
1da177e4
LT
875 if (n >= 0x80000008) {
876 unsigned eax = cpuid_eax(0x80000008);
877 c->x86_virt_bits = (eax >> 8) & 0xff;
878 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
879 /* CPUID workaround for Intel 0F34 CPU */
880 if (c->x86_vendor == X86_VENDOR_INTEL &&
881 c->x86 == 0xF && c->x86_model == 0x3 &&
882 c->x86_mask == 0x4)
883 c->x86_phys_bits = 36;
1da177e4
LT
884 }
885
886 if (c->x86 == 15)
887 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
888 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
889 (c->x86 == 0x6 && c->x86_model >= 0x0e))
53756d37 890 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
27fbe5b2 891 if (c->x86 == 6)
53756d37 892 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
707fa8ed 893 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
04e1ba85 894 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
895
896 srat_detect_node();
1da177e4
LT
897}
898
672289e9 899static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
900{
901 char *v = c->x86_vendor_id;
902
903 if (!strcmp(v, "AuthenticAMD"))
904 c->x86_vendor = X86_VENDOR_AMD;
905 else if (!strcmp(v, "GenuineIntel"))
906 c->x86_vendor = X86_VENDOR_INTEL;
907 else
908 c->x86_vendor = X86_VENDOR_UNKNOWN;
909}
910
911struct cpu_model_info {
912 int vendor;
913 int family;
914 char *model_names[16];
915};
916
917/* Do some early cpuid on the boot CPU to get some parameter that are
918 needed before check_bugs. Everything advanced is in identify_cpu
919 below. */
8c61b900 920static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4 921{
a860b63c 922 u32 tfms, xlvl;
1da177e4
LT
923
924 c->loops_per_jiffy = loops_per_jiffy;
925 c->x86_cache_size = -1;
926 c->x86_vendor = X86_VENDOR_UNKNOWN;
927 c->x86_model = c->x86_mask = 0; /* So far unknown... */
928 c->x86_vendor_id[0] = '\0'; /* Unset */
929 c->x86_model_id[0] = '\0'; /* Unset */
930 c->x86_clflush_size = 64;
931 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 932 c->x86_max_cores = 1;
a860b63c 933 c->x86_coreid_bits = 0;
ebfcaa96 934 c->extended_cpuid_level = 0;
1da177e4
LT
935 memset(&c->x86_capability, 0, sizeof c->x86_capability);
936
937 /* Get vendor name */
938 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
939 (unsigned int *)&c->x86_vendor_id[0],
940 (unsigned int *)&c->x86_vendor_id[8],
941 (unsigned int *)&c->x86_vendor_id[4]);
04e1ba85 942
1da177e4
LT
943 get_cpu_vendor(c);
944
945 /* Initialize the standard set of capabilities */
946 /* Note that the vendor-specific code below might override */
947
948 /* Intel-defined flags: level 0x00000001 */
949 if (c->cpuid_level >= 0x00000001) {
950 __u32 misc;
951 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
952 &c->x86_capability[0]);
953 c->x86 = (tfms >> 8) & 0xf;
954 c->x86_model = (tfms >> 4) & 0xf;
955 c->x86_mask = tfms & 0xf;
f5f786d0 956 if (c->x86 == 0xf)
1da177e4 957 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 958 if (c->x86 >= 0x6)
1da177e4 959 c->x86_model += ((tfms >> 16) & 0xF) << 4;
04e1ba85 960 if (c->x86_capability[0] & (1<<19))
1da177e4 961 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
962 } else {
963 /* Have CPUID level 0 only - unheard of */
964 c->x86 = 4;
965 }
a158608b
AK
966
967#ifdef CONFIG_SMP
f3fa8ebc 968 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 969#endif
1da177e4
LT
970 /* AMD-defined flags: level 0x80000001 */
971 xlvl = cpuid_eax(0x80000000);
ebfcaa96 972 c->extended_cpuid_level = xlvl;
1da177e4
LT
973 if ((xlvl & 0xffff0000) == 0x80000000) {
974 if (xlvl >= 0x80000001) {
975 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 976 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
977 }
978 if (xlvl >= 0x80000004)
979 get_model_name(c); /* Default name */
980 }
981
982 /* Transmeta-defined flags: level 0x80860001 */
983 xlvl = cpuid_eax(0x80860000);
984 if ((xlvl & 0xffff0000) == 0x80860000) {
985 /* Don't set x86_cpuid_level here for now to not confuse. */
986 if (xlvl >= 0x80860001)
987 c->x86_capability[2] = cpuid_edx(0x80860001);
988 }
989
a860b63c
YL
990 switch (c->x86_vendor) {
991 case X86_VENDOR_AMD:
992 early_init_amd(c);
993 break;
994 }
995
996}
997
998/*
999 * This does the hard work of actually picking apart the CPU stuff...
1000 */
1001void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1002{
1003 int i;
1004
1005 early_identify_cpu(c);
1006
1d67953f
VP
1007 init_scattered_cpuid_features(c);
1008
1e9f28fa
SS
1009 c->apicid = phys_pkg_id(0);
1010
1da177e4
LT
1011 /*
1012 * Vendor-specific initialization. In this section we
1013 * canonicalize the feature flags, meaning if there are
1014 * features a certain CPU supports which CPUID doesn't
1015 * tell us, CPUID claiming incorrect flags, or other bugs,
1016 * we handle them here.
1017 *
1018 * At the end of this section, c->x86_capability better
1019 * indicate the features this CPU genuinely supports!
1020 */
1021 switch (c->x86_vendor) {
1022 case X86_VENDOR_AMD:
1023 init_amd(c);
1024 break;
1025
1026 case X86_VENDOR_INTEL:
1027 init_intel(c);
1028 break;
1029
1030 case X86_VENDOR_UNKNOWN:
1031 default:
1032 display_cacheinfo(c);
1033 break;
1034 }
1035
1036 select_idle_routine(c);
04e1ba85 1037 detect_ht(c);
1da177e4
LT
1038
1039 /*
1040 * On SMP, boot_cpu_data holds the common feature set between
1041 * all CPUs; so make sure that we indicate which features are
1042 * common between the CPUs. The first time this routine gets
1043 * executed, c == &boot_cpu_data.
1044 */
1045 if (c != &boot_cpu_data) {
1046 /* AND the already accumulated flags with these */
04e1ba85 1047 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
1048 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1049 }
1050
1051#ifdef CONFIG_X86_MCE
1052 mcheck_init(c);
1053#endif
8bd99481 1054 if (c != &boot_cpu_data)
3b520b23 1055 mtrr_ap_init();
1da177e4 1056#ifdef CONFIG_NUMA
3019e8eb 1057 numa_add_cpu(smp_processor_id());
1da177e4
LT
1058#endif
1059}
1da177e4 1060
e6982c67 1061void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1062{
1063 if (c->x86_model_id[0])
04e1ba85 1064 printk(KERN_INFO "%s", c->x86_model_id);
1da177e4 1065
04e1ba85
TG
1066 if (c->x86_mask || c->cpuid_level >= 0)
1067 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 1068 else
04e1ba85 1069 printk(KERN_CONT "\n");
1da177e4
LT
1070}
1071
1072/*
1073 * Get CPU information for use by the procfs.
1074 */
1075
1076static int show_cpuinfo(struct seq_file *m, void *v)
1077{
1078 struct cpuinfo_x86 *c = v;
04e1ba85 1079 int cpu = 0, i;
1da177e4 1080
04e1ba85 1081 /*
1da177e4
LT
1082 * These flag bits must match the definitions in <asm/cpufeature.h>.
1083 * NULL means this bit is undefined or reserved; either way it doesn't
1084 * have meaning as far as Linux is concerned. Note that it's important
1085 * to realize there is a difference between this table and CPUID -- if
1086 * applications want to get the raw CPUID data, they should access
1087 * /dev/cpu/<cpu_nr>/cpuid instead.
1088 */
121d7bf5 1089 static const char *const x86_cap_flags[] = {
1da177e4 1090 /* Intel-defined */
04e1ba85
TG
1091 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1092 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1093 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1094 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1da177e4
LT
1095
1096 /* AMD-defined */
3c3b73b6 1097 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1098 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1099 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
f790cd30
AK
1100 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1101 "3dnowext", "3dnow",
1da177e4
LT
1102
1103 /* Transmeta-defined */
1104 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1108
1109 /* Other (Linux-defined) */
ec481536
PA
1110 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1111 NULL, NULL, NULL, NULL,
1112 "constant_tsc", "up", NULL, "arch_perfmon",
1113 "pebs", "bts", NULL, "sync_rdtsc",
1114 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1115 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1116
1117 /* Intel-defined (#2) */
9d95dd84 1118 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307 1119 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
e1054b39 1120 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1da177e4
LT
1121 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1122
5b7abc6f
PA
1123 /* VIA/Cyrix/Centaur-defined */
1124 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
ec481536 1125 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
5b7abc6f
PA
1126 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1127 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1128
1da177e4 1129 /* AMD-defined (#2) */
e1054b39
PA
1130 "lahf_lm", "cmp_legacy", "svm", "extapic",
1131 "cr8_legacy", "abm", "sse4a", "misalignsse",
1132 "3dnowprefetch", "osvw", "ibs", "sse5",
1133 "skinit", "wdt", NULL, NULL,
1da177e4 1134 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1135 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1d67953f
VP
1136
1137 /* Auxiliary (Linux-defined) */
1138 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1139 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1140 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1141 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4 1142 };
121d7bf5 1143 static const char *const x86_power_flags[] = {
1da177e4
LT
1144 "ts", /* temperature sensor */
1145 "fid", /* frequency id control */
1146 "vid", /* voltage id control */
1147 "ttp", /* thermal trip */
1148 "tm",
3f98bc49 1149 "stc",
f790cd30
AK
1150 "100mhzsteps",
1151 "hwpstate",
d824395c
JR
1152 "", /* tsc invariant mapped to constant_tsc */
1153 /* nothing */
1da177e4
LT
1154 };
1155
1156
1157#ifdef CONFIG_SMP
92cb7612 1158 cpu = c->cpu_index;
1da177e4
LT
1159#endif
1160
04e1ba85
TG
1161 seq_printf(m, "processor\t: %u\n"
1162 "vendor_id\t: %s\n"
1163 "cpu family\t: %d\n"
1164 "model\t\t: %d\n"
1165 "model name\t: %s\n",
1166 (unsigned)cpu,
1167 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1168 c->x86,
1169 (int)c->x86_model,
1170 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1171
1da177e4
LT
1172 if (c->x86_mask || c->cpuid_level >= 0)
1173 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1174 else
1175 seq_printf(m, "stepping\t: unknown\n");
04e1ba85
TG
1176
1177 if (cpu_has(c, X86_FEATURE_TSC)) {
92cb7612 1178 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
04e1ba85 1179
95235ca2
VP
1180 if (!freq)
1181 freq = cpu_khz;
1da177e4 1182 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
04e1ba85 1183 freq / 1000, (freq % 1000));
1da177e4
LT
1184 }
1185
1186 /* Cache size */
04e1ba85 1187 if (c->x86_cache_size >= 0)
1da177e4 1188 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
04e1ba85 1189
1da177e4 1190#ifdef CONFIG_SMP
94605eff 1191 if (smp_num_siblings * c->x86_max_cores > 1) {
f3fa8ebc 1192 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
08357611
MT
1193 seq_printf(m, "siblings\t: %d\n",
1194 cpus_weight(per_cpu(cpu_core_map, cpu)));
f3fa8ebc 1195 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1196 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1197 }
04e1ba85 1198#endif
1da177e4
LT
1199
1200 seq_printf(m,
04e1ba85
TG
1201 "fpu\t\t: yes\n"
1202 "fpu_exception\t: yes\n"
1203 "cpuid level\t: %d\n"
1204 "wp\t\t: yes\n"
1205 "flags\t\t:",
1da177e4
LT
1206 c->cpuid_level);
1207
04e1ba85
TG
1208 for (i = 0; i < 32*NCAPINTS; i++)
1209 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1210 seq_printf(m, " %s", x86_cap_flags[i]);
1211
1da177e4
LT
1212 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1213 c->loops_per_jiffy/(500000/HZ),
1214 (c->loops_per_jiffy/(5000/HZ)) % 100);
1215
04e1ba85 1216 if (c->x86_tlbsize > 0)
1da177e4
LT
1217 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1218 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1219 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1220
04e1ba85 1221 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1da177e4
LT
1222 c->x86_phys_bits, c->x86_virt_bits);
1223
1224 seq_printf(m, "power management:");
04e1ba85
TG
1225 for (i = 0; i < 32; i++) {
1226 if (c->x86_power & (1 << i)) {
1227 if (i < ARRAY_SIZE(x86_power_flags) &&
1228 x86_power_flags[i])
1229 seq_printf(m, "%s%s",
1230 x86_power_flags[i][0]?" ":"",
1231 x86_power_flags[i]);
1232 else
1233 seq_printf(m, " [%d]", i);
1234 }
1da177e4 1235 }
1da177e4 1236
d31ddaa1 1237 seq_printf(m, "\n\n");
1da177e4
LT
1238
1239 return 0;
1240}
1241
1242static void *c_start(struct seq_file *m, loff_t *pos)
1243{
92cb7612 1244 if (*pos == 0) /* just in case, cpu 0 is not the first */
c0c52d28
AH
1245 *pos = first_cpu(cpu_online_map);
1246 if ((*pos) < NR_CPUS && cpu_online(*pos))
92cb7612
MT
1247 return &cpu_data(*pos);
1248 return NULL;
1da177e4
LT
1249}
1250
1251static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1252{
c0c52d28 1253 *pos = next_cpu(*pos, cpu_online_map);
1da177e4
LT
1254 return c_start(m, pos);
1255}
1256
1257static void c_stop(struct seq_file *m, void *v)
1258{
1259}
1260
1261struct seq_operations cpuinfo_op = {
04e1ba85 1262 .start = c_start,
1da177e4
LT
1263 .next = c_next,
1264 .stop = c_stop,
1265 .show = show_cpuinfo,
1266};