x86: implement support to synchronize RDTSC through MFENCE on AMD CPUs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
894673ee 19#include <linux/screen_info.h>
1da177e4
LT
20#include <linux/ioport.h>
21#include <linux/delay.h>
1da177e4
LT
22#include <linux/init.h>
23#include <linux/initrd.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <asm/processor.h>
28#include <linux/console.h>
29#include <linux/seq_file.h>
aac04b32 30#include <linux/crash_dump.h>
1da177e4
LT
31#include <linux/root_dev.h>
32#include <linux/pci.h>
5b83683f 33#include <linux/efi.h>
1da177e4
LT
34#include <linux/acpi.h>
35#include <linux/kallsyms.h>
36#include <linux/edd.h>
bbfceef4 37#include <linux/mmzone.h>
5f5609df 38#include <linux/kexec.h>
95235ca2 39#include <linux/cpufreq.h>
e9928674 40#include <linux/dmi.h>
17a941d8 41#include <linux/dma-mapping.h>
681558fd 42#include <linux/ctype.h>
746ef0cd 43#include <linux/uaccess.h>
bbfceef4 44
1da177e4
LT
45#include <asm/mtrr.h>
46#include <asm/uaccess.h>
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/smp.h>
50#include <asm/msr.h>
51#include <asm/desc.h>
52#include <video/edid.h>
53#include <asm/e820.h>
54#include <asm/dma.h>
55#include <asm/mpspec.h>
56#include <asm/mmu_context.h>
1da177e4
LT
57#include <asm/proto.h>
58#include <asm/setup.h>
59#include <asm/mach_apic.h>
60#include <asm/numa.h>
2bc0414e 61#include <asm/sections.h>
f2d3efed 62#include <asm/dmi.h>
00bf4098 63#include <asm/cacheflush.h>
af7a78e9 64#include <asm/mce.h>
eee3af4a 65#include <asm/ds.h>
1da177e4 66
746ef0cd
GOC
67#ifdef CONFIG_PARAVIRT
68#include <asm/paravirt.h>
69#else
70#define ARCH_SETUP
71#endif
72
1da177e4
LT
73/*
74 * Machine setup..
75 */
76
6c231b7b 77struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 78EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
79
80unsigned long mmu_cr4_features;
81
1da177e4
LT
82/* Boot loader ID as an integer, for the benefit of proc_dointvec */
83int bootloader_type;
84
85unsigned long saved_video_mode;
86
f039b754
AK
87int force_mwait __cpuinitdata;
88
04e1ba85 89/*
f2d3efed
AK
90 * Early DMI memory
91 */
92int dmi_alloc_index;
93char dmi_alloc_data[DMI_MAX_DATA];
94
1da177e4
LT
95/*
96 * Setup options
97 */
1da177e4 98struct screen_info screen_info;
2ee60e17 99EXPORT_SYMBOL(screen_info);
1da177e4
LT
100struct sys_desc_table_struct {
101 unsigned short length;
102 unsigned char table[0];
103};
104
105struct edid_info edid_info;
ba70710e 106EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
107
108extern int root_mountflags;
1da177e4 109
adf48856 110char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
111
112struct resource standard_io_resources[] = {
113 { .name = "dma1", .start = 0x00, .end = 0x1f,
114 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
115 { .name = "pic1", .start = 0x20, .end = 0x21,
116 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
117 { .name = "timer0", .start = 0x40, .end = 0x43,
118 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
119 { .name = "timer1", .start = 0x50, .end = 0x53,
120 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
121 { .name = "keyboard", .start = 0x60, .end = 0x6f,
122 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
123 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
124 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
125 { .name = "pic2", .start = 0xa0, .end = 0xa1,
126 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
127 { .name = "dma2", .start = 0xc0, .end = 0xdf,
128 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
129 { .name = "fpu", .start = 0xf0, .end = 0xff,
130 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
131};
132
1da177e4
LT
133#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
134
c9cce83d 135static struct resource data_resource = {
1da177e4
LT
136 .name = "Kernel data",
137 .start = 0,
138 .end = 0,
139 .flags = IORESOURCE_RAM,
140};
c9cce83d 141static struct resource code_resource = {
1da177e4
LT
142 .name = "Kernel code",
143 .start = 0,
144 .end = 0,
145 .flags = IORESOURCE_RAM,
146};
c9cce83d 147static struct resource bss_resource = {
00bf4098
BW
148 .name = "Kernel bss",
149 .start = 0,
150 .end = 0,
151 .flags = IORESOURCE_RAM,
152};
1da177e4 153
8c61b900
TG
154static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
155
2c8c0e6b
AK
156#ifdef CONFIG_PROC_VMCORE
157/* elfcorehdr= specifies the location of elf core header
158 * stored by the crashed kernel. This option will be passed
159 * by kexec loader to the capture kernel.
160 */
161static int __init setup_elfcorehdr(char *arg)
681558fd 162{
2c8c0e6b
AK
163 char *end;
164 if (!arg)
165 return -EINVAL;
166 elfcorehdr_addr = memparse(arg, &end);
167 return end > arg ? 0 : -EINVAL;
681558fd 168}
2c8c0e6b 169early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
170#endif
171
2b97690f 172#ifndef CONFIG_NUMA
bbfceef4
MT
173static void __init
174contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 175{
bbfceef4
MT
176 unsigned long bootmap_size, bootmap;
177
bbfceef4
MT
178 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
179 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
180 if (bootmap == -1L)
04e1ba85 181 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
bbfceef4 182 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
183 e820_register_active_regions(0, start_pfn, end_pfn);
184 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 185 reserve_bootmem(bootmap, bootmap_size);
04e1ba85 186}
1da177e4
LT
187#endif
188
1da177e4
LT
189#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
190struct edd edd;
191#ifdef CONFIG_EDD_MODULE
192EXPORT_SYMBOL(edd);
193#endif
194/**
195 * copy_edd() - Copy the BIOS EDD information
196 * from boot_params into a safe place.
197 *
198 */
199static inline void copy_edd(void)
200{
30c82645
PA
201 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
202 sizeof(edd.mbr_signature));
203 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
204 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
205 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
206}
207#else
208static inline void copy_edd(void)
209{
210}
211#endif
212
5c3391f9
BW
213#ifdef CONFIG_KEXEC
214static void __init reserve_crashkernel(void)
215{
216 unsigned long long free_mem;
217 unsigned long long crash_size, crash_base;
218 int ret;
219
04e1ba85
TG
220 free_mem =
221 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
5c3391f9
BW
222
223 ret = parse_crashkernel(boot_command_line, free_mem,
224 &crash_size, &crash_base);
225 if (ret == 0 && crash_size) {
226 if (crash_base > 0) {
227 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
228 "for crashkernel (System RAM: %ldMB)\n",
229 (unsigned long)(crash_size >> 20),
230 (unsigned long)(crash_base >> 20),
231 (unsigned long)(free_mem >> 20));
232 crashk_res.start = crash_base;
233 crashk_res.end = crash_base + crash_size - 1;
234 reserve_bootmem(crash_base, crash_size);
235 } else
236 printk(KERN_INFO "crashkernel reservation failed - "
237 "you have to specify a base address\n");
238 }
239}
240#else
241static inline void __init reserve_crashkernel(void)
242{}
243#endif
244
1da177e4 245#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
246
247unsigned __initdata ebda_addr;
248unsigned __initdata ebda_size;
249
250static void discover_ebda(void)
1da177e4 251{
ac71d12c 252 /*
04e1ba85 253 * there is a real-mode segmented pointer pointing to the
1da177e4
LT
254 * 4K EBDA area at 0x40E
255 */
bdb96a66 256 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
746ef0cd
GOC
257 /*
258 * There can be some situations, like paravirtualized guests,
259 * in which there is no available ebda information. In such
260 * case, just skip it
261 */
262 if (!ebda_addr) {
263 ebda_size = 0;
264 return;
265 }
266
ac71d12c
AK
267 ebda_addr <<= 4;
268
bdb96a66 269 ebda_size = *(unsigned short *)__va(ebda_addr);
ac71d12c
AK
270
271 /* Round EBDA up to pages */
272 if (ebda_size == 0)
273 ebda_size = 1;
274 ebda_size <<= 10;
275 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
276 if (ebda_size > 64*1024)
277 ebda_size = 64*1024;
1da177e4
LT
278}
279
746ef0cd
GOC
280/* Overridden in paravirt.c if CONFIG_PARAVIRT */
281void __attribute__((weak)) memory_setup(void)
282{
283 machine_specific_memory_setup();
284}
285
1da177e4
LT
286void __init setup_arch(char **cmdline_p)
287{
04e1ba85
TG
288 unsigned i;
289
adf48856 290 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 291
30c82645
PA
292 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
293 screen_info = boot_params.screen_info;
294 edid_info = boot_params.edid_info;
295 saved_video_mode = boot_params.hdr.vid_mode;
296 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
297
298#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
299 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
300 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
301 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4 302#endif
5b83683f
HY
303#ifdef CONFIG_EFI
304 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
305 "EL64", 4))
306 efi_enabled = 1;
307#endif
746ef0cd
GOC
308
309 ARCH_SETUP
310
311 memory_setup();
1da177e4
LT
312 copy_edd();
313
30c82645 314 if (!boot_params.hdr.root_flags)
1da177e4
LT
315 root_mountflags &= ~MS_RDONLY;
316 init_mm.start_code = (unsigned long) &_text;
317 init_mm.end_code = (unsigned long) &_etext;
318 init_mm.end_data = (unsigned long) &_edata;
319 init_mm.brk = (unsigned long) &_end;
320
e3ebadd9
LT
321 code_resource.start = virt_to_phys(&_text);
322 code_resource.end = virt_to_phys(&_etext)-1;
323 data_resource.start = virt_to_phys(&_etext);
324 data_resource.end = virt_to_phys(&_edata)-1;
00bf4098
BW
325 bss_resource.start = virt_to_phys(&__bss_start);
326 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 327
1da177e4
LT
328 early_identify_cpu(&boot_cpu_data);
329
adf48856 330 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
331 *cmdline_p = command_line;
332
333 parse_early_param();
334
335 finish_e820_parsing();
9ca33eb6 336
5cb248ab 337 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
338 /*
339 * partially used pages are not usable - thus
340 * we are rounding upwards:
341 */
342 end_pfn = e820_end_of_ram();
caff0710 343 num_physpages = end_pfn;
1da177e4
LT
344
345 check_efer();
346
ac71d12c
AK
347 discover_ebda();
348
1da177e4 349 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
5b83683f
HY
350 if (efi_enabled)
351 efi_init();
1da177e4 352
f2d3efed
AK
353 dmi_scan_machine();
354
b02aae9c
RH
355 io_delay_init();
356
71fff5e6
MT
357#ifdef CONFIG_SMP
358 /* setup to use the static apicid table during kernel startup */
359 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
360#endif
361
888ba6c6 362#ifdef CONFIG_ACPI
1da177e4
LT
363 /*
364 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
365 * Call this early for SRAT node setup.
366 */
367 acpi_boot_table_init();
368#endif
369
caff0710
JB
370 /* How many end-of-memory variables you have, grandma! */
371 max_low_pfn = end_pfn;
372 max_pfn = end_pfn;
373 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
374
5cb248ab
MG
375 /* Remove active ranges so rediscovery with NUMA-awareness happens */
376 remove_all_active_ranges();
377
1da177e4
LT
378#ifdef CONFIG_ACPI_NUMA
379 /*
380 * Parse SRAT to discover nodes.
381 */
382 acpi_numa_init();
383#endif
384
2b97690f 385#ifdef CONFIG_NUMA
04e1ba85 386 numa_initmem_init(0, end_pfn);
1da177e4 387#else
bbfceef4 388 contig_initmem_init(0, end_pfn);
1da177e4
LT
389#endif
390
391 /* Reserve direct mapping */
04e1ba85 392 reserve_bootmem_generic(table_start << PAGE_SHIFT,
1da177e4
LT
393 (table_end - table_start) << PAGE_SHIFT);
394
395 /* reserve kernel */
ceee8822
AK
396 reserve_bootmem_generic(__pa_symbol(&_text),
397 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
398
399 /*
400 * reserve physical page 0 - it's a special BIOS page on many boxes,
401 * enabling clean reboots, SMP operation, laptop functions.
402 */
403 reserve_bootmem_generic(0, PAGE_SIZE);
404
405 /* reserve ebda region */
ac71d12c
AK
406 if (ebda_addr)
407 reserve_bootmem_generic(ebda_addr, ebda_size);
076422d2
AS
408#ifdef CONFIG_NUMA
409 /* reserve nodemap region */
410 if (nodemap_addr)
411 reserve_bootmem_generic(nodemap_addr, nodemap_size);
412#endif
1da177e4
LT
413
414#ifdef CONFIG_SMP
1da177e4 415 /* Reserve SMP trampoline */
90b1c208 416 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
1da177e4
LT
417#endif
418
673d5b43 419#ifdef CONFIG_ACPI_SLEEP
1da177e4 420 /*
04e1ba85 421 * Reserve low memory region for sleep support.
1da177e4 422 */
04e1ba85
TG
423 acpi_reserve_bootmem();
424#endif
5b83683f
HY
425
426 if (efi_enabled) {
427 efi_map_memmap();
428 efi_reserve_bootmem();
429 }
430
04e1ba85
TG
431 /*
432 * Find and reserve possible boot-time SMP configuration:
433 */
1da177e4 434 find_smp_config();
1da177e4 435#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
436 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
437 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
438 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
439 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
440 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
441
442 if (ramdisk_end <= end_of_mem) {
443 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
444 initrd_start = ramdisk_image + PAGE_OFFSET;
445 initrd_end = initrd_start+ramdisk_size;
446 } else {
1da177e4 447 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
448 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
449 ramdisk_end, end_of_mem);
1da177e4
LT
450 initrd_start = 0;
451 }
452 }
453#endif
5c3391f9 454 reserve_crashkernel();
1da177e4
LT
455 paging_init();
456
dfa4698c 457 early_quirks();
1da177e4 458
51f62e18
AR
459 /*
460 * set this early, so we dont allocate cpu0
461 * if MADT list doesnt list BSP first
462 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
463 */
464 cpu_set(0, cpu_present_map);
888ba6c6 465#ifdef CONFIG_ACPI
1da177e4
LT
466 /*
467 * Read APIC and some other early information from ACPI tables.
468 */
469 acpi_boot_init();
470#endif
471
05b3cbd8
RT
472 init_cpu_to_node();
473
1da177e4
LT
474 /*
475 * get boot-time SMP configuration:
476 */
477 if (smp_found_config)
478 get_smp_config();
479 init_apic_mappings();
3e35a0e5 480 ioapic_init_mappings();
1da177e4
LT
481
482 /*
fc986db4 483 * We trust e820 completely. No explicit ROM probing in memory.
04e1ba85 484 */
c9cce83d 485 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
e8eff5ac 486 e820_mark_nosave_regions();
1da177e4 487
1da177e4 488 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 489 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4 490 request_resource(&ioport_resource, &standard_io_resources[i]);
1da177e4 491
a1e97782 492 e820_setup_gap();
1da177e4 493
1da177e4
LT
494#ifdef CONFIG_VT
495#if defined(CONFIG_VGA_CONSOLE)
5b83683f
HY
496 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
497 conswitchp = &vga_con;
1da177e4
LT
498#elif defined(CONFIG_DUMMY_CONSOLE)
499 conswitchp = &dummy_con;
500#endif
501#endif
502}
503
e6982c67 504static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
505{
506 unsigned int *v;
507
ebfcaa96 508 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
509 return 0;
510
511 v = (unsigned int *) c->x86_model_id;
512 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
513 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
514 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
515 c->x86_model_id[48] = 0;
516 return 1;
517}
518
519
e6982c67 520static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
521{
522 unsigned int n, dummy, eax, ebx, ecx, edx;
523
ebfcaa96 524 n = c->extended_cpuid_level;
1da177e4
LT
525
526 if (n >= 0x80000005) {
527 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
04e1ba85
TG
528 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
529 "D cache %dK (%d bytes/line)\n",
530 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
531 c->x86_cache_size = (ecx>>24) + (edx>>24);
1da177e4
LT
532 /* On K8 L1 TLB is inclusive, so don't count it */
533 c->x86_tlbsize = 0;
534 }
535
536 if (n >= 0x80000006) {
537 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
538 ecx = cpuid_ecx(0x80000006);
539 c->x86_cache_size = ecx >> 16;
540 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
541
542 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
543 c->x86_cache_size, ecx & 0xFF);
544 }
545
546 if (n >= 0x80000007)
04e1ba85 547 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
1da177e4 548 if (n >= 0x80000008) {
04e1ba85 549 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1da177e4
LT
550 c->x86_virt_bits = (eax >> 8) & 0xff;
551 c->x86_phys_bits = eax & 0xff;
552 }
553}
554
3f098c26
AK
555#ifdef CONFIG_NUMA
556static int nearby_node(int apicid)
557{
04e1ba85
TG
558 int i, node;
559
3f098c26 560 for (i = apicid - 1; i >= 0; i--) {
04e1ba85 561 node = apicid_to_node[i];
3f098c26
AK
562 if (node != NUMA_NO_NODE && node_online(node))
563 return node;
564 }
565 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
04e1ba85 566 node = apicid_to_node[i];
3f098c26
AK
567 if (node != NUMA_NO_NODE && node_online(node))
568 return node;
569 }
570 return first_node(node_online_map); /* Shouldn't happen */
571}
572#endif
573
63518644
AK
574/*
575 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
576 * Assumes number of cores is a power of two.
577 */
578static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
579{
580#ifdef CONFIG_SMP
b41e2939 581 unsigned bits;
3f098c26 582#ifdef CONFIG_NUMA
f3fa8ebc 583 int cpu = smp_processor_id();
3f098c26 584 int node = 0;
60c1bc82 585 unsigned apicid = hard_smp_processor_id();
3f098c26 586#endif
a860b63c 587 bits = c->x86_coreid_bits;
b41e2939
AK
588
589 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 590 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 591 /* Convert the APIC ID into the socket ID */
f3fa8ebc 592 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
593
594#ifdef CONFIG_NUMA
04e1ba85
TG
595 node = c->phys_proc_id;
596 if (apicid_to_node[apicid] != NUMA_NO_NODE)
597 node = apicid_to_node[apicid];
598 if (!node_online(node)) {
599 /* Two possibilities here:
600 - The CPU is missing memory and no node was created.
601 In that case try picking one from a nearby CPU
602 - The APIC IDs differ from the HyperTransport node IDs
603 which the K8 northbridge parsing fills in.
604 Assume they are all increased by a constant offset,
605 but in the same order as the HT nodeids.
606 If that doesn't result in a usable node fall back to the
607 path for the previous case. */
608
92cb7612 609 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
04e1ba85
TG
610
611 if (ht_nodeid >= 0 &&
612 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
613 node = apicid_to_node[ht_nodeid];
614 /* Pick a nearby node */
615 if (!node_online(node))
616 node = nearby_node(apicid);
617 }
69d81fcd 618 numa_set_node(cpu, node);
3f098c26 619
e42f9437 620 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 621#endif
63518644
AK
622#endif
623}
1da177e4 624
a860b63c
YL
625static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
626{
627#ifdef CONFIG_SMP
628 unsigned bits, ecx;
629
630 /* Multi core CPU? */
631 if (c->extended_cpuid_level < 0x80000008)
632 return;
633
634 ecx = cpuid_ecx(0x80000008);
635
636 c->x86_max_cores = (ecx & 0xff) + 1;
637
638 /* CPU telling us the core id bits shift? */
639 bits = (ecx >> 12) & 0xF;
640
641 /* Otherwise recompute */
642 if (bits == 0) {
643 while ((1 << bits) < c->x86_max_cores)
644 bits++;
645 }
646
647 c->x86_coreid_bits = bits;
648
649#endif
650}
651
fb79d22e
TG
652#define ENABLE_C1E_MASK 0x18000000
653#define CPUID_PROCESSOR_SIGNATURE 1
654#define CPUID_XFAM 0x0ff00000
655#define CPUID_XFAM_K8 0x00000000
656#define CPUID_XFAM_10H 0x00100000
657#define CPUID_XFAM_11H 0x00200000
658#define CPUID_XMOD 0x000f0000
659#define CPUID_XMOD_REV_F 0x00040000
660
661/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
662static __cpuinit int amd_apic_timer_broken(void)
663{
04e1ba85
TG
664 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
665
fb79d22e
TG
666 switch (eax & CPUID_XFAM) {
667 case CPUID_XFAM_K8:
668 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
669 break;
670 case CPUID_XFAM_10H:
671 case CPUID_XFAM_11H:
672 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
673 if (lo & ENABLE_C1E_MASK)
674 return 1;
675 break;
676 default:
677 /* err on the side of caution */
678 return 1;
679 }
680 return 0;
681}
682
ed77504b 683static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 684{
7bcd3f34 685 unsigned level;
1da177e4 686
bc5e8fdf
LT
687#ifdef CONFIG_SMP
688 unsigned long value;
689
7d318d77
AK
690 /*
691 * Disable TLB flush filter by setting HWCR.FFDIS on K8
692 * bit 6 of msr C001_0015
04e1ba85 693 *
7d318d77
AK
694 * Errata 63 for SH-B3 steppings
695 * Errata 122 for all steppings (F+ have it disabled by default)
696 */
697 if (c->x86 == 15) {
698 rdmsrl(MSR_K8_HWCR, value);
699 value |= 1 << 6;
700 wrmsrl(MSR_K8_HWCR, value);
701 }
bc5e8fdf
LT
702#endif
703
1da177e4
LT
704 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
705 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
5548fecd 706 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
04e1ba85 707
7bcd3f34
AK
708 /* On C+ stepping K8 rep microcode works well for copy/memset */
709 level = cpuid_eax(1);
04e1ba85
TG
710 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
711 level >= 0x0f58))
53756d37 712 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
99741faa 713 if (c->x86 == 0x10 || c->x86 == 0x11)
53756d37 714 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
7bcd3f34 715
18bd057b
AK
716 /* Enable workaround for FXSAVE leak */
717 if (c->x86 >= 6)
53756d37 718 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
18bd057b 719
e42f9437
RS
720 level = get_model_name(c);
721 if (!level) {
04e1ba85 722 switch (c->x86) {
1da177e4
LT
723 case 15:
724 /* Should distinguish Models here, but this is only
725 a fallback anyways. */
726 strcpy(c->x86_model_id, "Hammer");
04e1ba85
TG
727 break;
728 }
729 }
1da177e4
LT
730 display_cacheinfo(c);
731
130951cc
AK
732 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
733 if (c->x86_power & (1<<8))
53756d37 734 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
130951cc 735
faee9a5d
AK
736 /* Multi core CPU? */
737 if (c->extended_cpuid_level >= 0x80000008)
63518644 738 amd_detect_cmp(c);
1da177e4 739
67cddd94
AK
740 if (c->extended_cpuid_level >= 0x80000006 &&
741 (cpuid_edx(0x80000006) & 0xf000))
742 num_cache_leaves = 4;
743 else
744 num_cache_leaves = 3;
2049336f 745
0bd8acd1 746 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
53756d37 747 set_cpu_cap(c, X86_FEATURE_K8);
0bd8acd1 748
de421863
AK
749 /* MFENCE stops RDTSC speculation */
750 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
f039b754
AK
751
752 /* Family 10 doesn't support C states in MWAIT so don't use it */
753 if (c->x86 == 0x10 && !force_mwait)
53756d37 754 clear_cpu_cap(c, X86_FEATURE_MWAIT);
fb79d22e
TG
755
756 if (amd_apic_timer_broken())
757 disable_apic_timer = 1;
1da177e4
LT
758}
759
1a53905a 760void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
761{
762#ifdef CONFIG_SMP
04e1ba85
TG
763 u32 eax, ebx, ecx, edx;
764 int index_msb, core_bits;
94605eff
SS
765
766 cpuid(1, &eax, &ebx, &ecx, &edx);
767
94605eff 768
e42f9437 769 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 770 return;
04e1ba85 771 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
e42f9437 772 goto out;
1da177e4 773
1da177e4 774 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 775
1da177e4
LT
776 if (smp_num_siblings == 1) {
777 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
04e1ba85 778 } else if (smp_num_siblings > 1) {
94605eff 779
1da177e4 780 if (smp_num_siblings > NR_CPUS) {
04e1ba85
TG
781 printk(KERN_WARNING "CPU: Unsupported number of "
782 "siblings %d", smp_num_siblings);
1da177e4
LT
783 smp_num_siblings = 1;
784 return;
785 }
94605eff
SS
786
787 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 788 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 789
94605eff 790 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 791
04e1ba85 792 index_msb = get_count_order(smp_num_siblings);
94605eff
SS
793
794 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 795
f3fa8ebc 796 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 797 ((1 << core_bits) - 1);
1da177e4 798 }
e42f9437
RS
799out:
800 if ((c->x86_max_cores * smp_num_siblings) > 1) {
04e1ba85
TG
801 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
802 c->phys_proc_id);
803 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
804 c->cpu_core_id);
e42f9437
RS
805 }
806
1da177e4
LT
807#endif
808}
809
3dd9d514
AK
810/*
811 * find out the number of processor cores on the die
812 */
e6982c67 813static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 814{
2bbc419f 815 unsigned int eax, t;
3dd9d514
AK
816
817 if (c->cpuid_level < 4)
818 return 1;
819
2bbc419f 820 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
821
822 if (eax & 0x1f)
823 return ((eax >> 26) + 1);
824 else
825 return 1;
826}
827
df0cc26b
AK
828static void srat_detect_node(void)
829{
830#ifdef CONFIG_NUMA
ddea7be0 831 unsigned node;
df0cc26b 832 int cpu = smp_processor_id();
e42f9437 833 int apicid = hard_smp_processor_id();
df0cc26b
AK
834
835 /* Don't do the funky fallback heuristics the AMD version employs
836 for now. */
e42f9437 837 node = apicid_to_node[apicid];
df0cc26b 838 if (node == NUMA_NO_NODE)
0d015324 839 node = first_node(node_online_map);
69d81fcd 840 numa_set_node(cpu, node);
df0cc26b 841
c31fbb1a 842 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
843#endif
844}
845
e6982c67 846static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
847{
848 /* Cache sizes */
849 unsigned n;
850
851 init_intel_cacheinfo(c);
04e1ba85 852 if (c->cpuid_level > 9) {
0080e667
VP
853 unsigned eax = cpuid_eax(10);
854 /* Check for version and the number of counters */
855 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
53756d37 856 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667
VP
857 }
858
36b2a8d5
SE
859 if (cpu_has_ds) {
860 unsigned int l1, l2;
861 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5 862 if (!(l1 & (1<<11)))
53756d37 863 set_cpu_cap(c, X86_FEATURE_BTS);
36b2a8d5 864 if (!(l1 & (1<<12)))
53756d37 865 set_cpu_cap(c, X86_FEATURE_PEBS);
36b2a8d5
SE
866 }
867
eee3af4a
MM
868
869 if (cpu_has_bts)
870 ds_init_intel(c);
871
ebfcaa96 872 n = c->extended_cpuid_level;
1da177e4
LT
873 if (n >= 0x80000008) {
874 unsigned eax = cpuid_eax(0x80000008);
875 c->x86_virt_bits = (eax >> 8) & 0xff;
876 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
877 /* CPUID workaround for Intel 0F34 CPU */
878 if (c->x86_vendor == X86_VENDOR_INTEL &&
879 c->x86 == 0xF && c->x86_model == 0x3 &&
880 c->x86_mask == 0x4)
881 c->x86_phys_bits = 36;
1da177e4
LT
882 }
883
884 if (c->x86 == 15)
885 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
886 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
887 (c->x86 == 0x6 && c->x86_model >= 0x0e))
53756d37 888 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
27fbe5b2 889 if (c->x86 == 6)
53756d37 890 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
f3d73707 891 if (c->x86 == 15)
53756d37 892 set_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
f3d73707 893 else
53756d37 894 clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
04e1ba85 895 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
896
897 srat_detect_node();
1da177e4
LT
898}
899
672289e9 900static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
901{
902 char *v = c->x86_vendor_id;
903
904 if (!strcmp(v, "AuthenticAMD"))
905 c->x86_vendor = X86_VENDOR_AMD;
906 else if (!strcmp(v, "GenuineIntel"))
907 c->x86_vendor = X86_VENDOR_INTEL;
908 else
909 c->x86_vendor = X86_VENDOR_UNKNOWN;
910}
911
912struct cpu_model_info {
913 int vendor;
914 int family;
915 char *model_names[16];
916};
917
918/* Do some early cpuid on the boot CPU to get some parameter that are
919 needed before check_bugs. Everything advanced is in identify_cpu
920 below. */
8c61b900 921static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4 922{
a860b63c 923 u32 tfms, xlvl;
1da177e4
LT
924
925 c->loops_per_jiffy = loops_per_jiffy;
926 c->x86_cache_size = -1;
927 c->x86_vendor = X86_VENDOR_UNKNOWN;
928 c->x86_model = c->x86_mask = 0; /* So far unknown... */
929 c->x86_vendor_id[0] = '\0'; /* Unset */
930 c->x86_model_id[0] = '\0'; /* Unset */
931 c->x86_clflush_size = 64;
932 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 933 c->x86_max_cores = 1;
a860b63c 934 c->x86_coreid_bits = 0;
ebfcaa96 935 c->extended_cpuid_level = 0;
1da177e4
LT
936 memset(&c->x86_capability, 0, sizeof c->x86_capability);
937
938 /* Get vendor name */
939 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
940 (unsigned int *)&c->x86_vendor_id[0],
941 (unsigned int *)&c->x86_vendor_id[8],
942 (unsigned int *)&c->x86_vendor_id[4]);
04e1ba85 943
1da177e4
LT
944 get_cpu_vendor(c);
945
946 /* Initialize the standard set of capabilities */
947 /* Note that the vendor-specific code below might override */
948
949 /* Intel-defined flags: level 0x00000001 */
950 if (c->cpuid_level >= 0x00000001) {
951 __u32 misc;
952 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
953 &c->x86_capability[0]);
954 c->x86 = (tfms >> 8) & 0xf;
955 c->x86_model = (tfms >> 4) & 0xf;
956 c->x86_mask = tfms & 0xf;
f5f786d0 957 if (c->x86 == 0xf)
1da177e4 958 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 959 if (c->x86 >= 0x6)
1da177e4 960 c->x86_model += ((tfms >> 16) & 0xF) << 4;
04e1ba85 961 if (c->x86_capability[0] & (1<<19))
1da177e4 962 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
963 } else {
964 /* Have CPUID level 0 only - unheard of */
965 c->x86 = 4;
966 }
a158608b
AK
967
968#ifdef CONFIG_SMP
f3fa8ebc 969 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 970#endif
1da177e4
LT
971 /* AMD-defined flags: level 0x80000001 */
972 xlvl = cpuid_eax(0x80000000);
ebfcaa96 973 c->extended_cpuid_level = xlvl;
1da177e4
LT
974 if ((xlvl & 0xffff0000) == 0x80000000) {
975 if (xlvl >= 0x80000001) {
976 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 977 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
978 }
979 if (xlvl >= 0x80000004)
980 get_model_name(c); /* Default name */
981 }
982
983 /* Transmeta-defined flags: level 0x80860001 */
984 xlvl = cpuid_eax(0x80860000);
985 if ((xlvl & 0xffff0000) == 0x80860000) {
986 /* Don't set x86_cpuid_level here for now to not confuse. */
987 if (xlvl >= 0x80860001)
988 c->x86_capability[2] = cpuid_edx(0x80860001);
989 }
990
a860b63c
YL
991 switch (c->x86_vendor) {
992 case X86_VENDOR_AMD:
993 early_init_amd(c);
994 break;
995 }
996
997}
998
999/*
1000 * This does the hard work of actually picking apart the CPU stuff...
1001 */
1002void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1003{
1004 int i;
1005
1006 early_identify_cpu(c);
1007
1d67953f
VP
1008 init_scattered_cpuid_features(c);
1009
1e9f28fa
SS
1010 c->apicid = phys_pkg_id(0);
1011
1da177e4
LT
1012 /*
1013 * Vendor-specific initialization. In this section we
1014 * canonicalize the feature flags, meaning if there are
1015 * features a certain CPU supports which CPUID doesn't
1016 * tell us, CPUID claiming incorrect flags, or other bugs,
1017 * we handle them here.
1018 *
1019 * At the end of this section, c->x86_capability better
1020 * indicate the features this CPU genuinely supports!
1021 */
1022 switch (c->x86_vendor) {
1023 case X86_VENDOR_AMD:
1024 init_amd(c);
1025 break;
1026
1027 case X86_VENDOR_INTEL:
1028 init_intel(c);
1029 break;
1030
1031 case X86_VENDOR_UNKNOWN:
1032 default:
1033 display_cacheinfo(c);
1034 break;
1035 }
1036
1037 select_idle_routine(c);
04e1ba85 1038 detect_ht(c);
1da177e4
LT
1039
1040 /*
1041 * On SMP, boot_cpu_data holds the common feature set between
1042 * all CPUs; so make sure that we indicate which features are
1043 * common between the CPUs. The first time this routine gets
1044 * executed, c == &boot_cpu_data.
1045 */
1046 if (c != &boot_cpu_data) {
1047 /* AND the already accumulated flags with these */
04e1ba85 1048 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
1049 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1050 }
1051
1052#ifdef CONFIG_X86_MCE
1053 mcheck_init(c);
1054#endif
8bd99481 1055 if (c != &boot_cpu_data)
3b520b23 1056 mtrr_ap_init();
1da177e4 1057#ifdef CONFIG_NUMA
3019e8eb 1058 numa_add_cpu(smp_processor_id());
1da177e4
LT
1059#endif
1060}
1da177e4 1061
e6982c67 1062void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1063{
1064 if (c->x86_model_id[0])
04e1ba85 1065 printk(KERN_INFO "%s", c->x86_model_id);
1da177e4 1066
04e1ba85
TG
1067 if (c->x86_mask || c->cpuid_level >= 0)
1068 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 1069 else
04e1ba85 1070 printk(KERN_CONT "\n");
1da177e4
LT
1071}
1072
1073/*
1074 * Get CPU information for use by the procfs.
1075 */
1076
1077static int show_cpuinfo(struct seq_file *m, void *v)
1078{
1079 struct cpuinfo_x86 *c = v;
04e1ba85 1080 int cpu = 0, i;
1da177e4 1081
04e1ba85 1082 /*
1da177e4
LT
1083 * These flag bits must match the definitions in <asm/cpufeature.h>.
1084 * NULL means this bit is undefined or reserved; either way it doesn't
1085 * have meaning as far as Linux is concerned. Note that it's important
1086 * to realize there is a difference between this table and CPUID -- if
1087 * applications want to get the raw CPUID data, they should access
1088 * /dev/cpu/<cpu_nr>/cpuid instead.
1089 */
121d7bf5 1090 static const char *const x86_cap_flags[] = {
1da177e4 1091 /* Intel-defined */
04e1ba85
TG
1092 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1093 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1094 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1095 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1da177e4
LT
1096
1097 /* AMD-defined */
3c3b73b6 1098 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1099 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1100 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
f790cd30
AK
1101 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1102 "3dnowext", "3dnow",
1da177e4
LT
1103
1104 /* Transmeta-defined */
1105 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1108 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1109
1110 /* Other (Linux-defined) */
ec481536
PA
1111 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1112 NULL, NULL, NULL, NULL,
1113 "constant_tsc", "up", NULL, "arch_perfmon",
1114 "pebs", "bts", NULL, "sync_rdtsc",
1115 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1116 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1117
1118 /* Intel-defined (#2) */
9d95dd84 1119 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307 1120 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
e1054b39 1121 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1da177e4
LT
1122 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1123
5b7abc6f
PA
1124 /* VIA/Cyrix/Centaur-defined */
1125 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
ec481536 1126 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
5b7abc6f
PA
1127 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1128 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1129
1da177e4 1130 /* AMD-defined (#2) */
e1054b39
PA
1131 "lahf_lm", "cmp_legacy", "svm", "extapic",
1132 "cr8_legacy", "abm", "sse4a", "misalignsse",
1133 "3dnowprefetch", "osvw", "ibs", "sse5",
1134 "skinit", "wdt", NULL, NULL,
1da177e4 1135 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1136 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1d67953f
VP
1137
1138 /* Auxiliary (Linux-defined) */
1139 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1140 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1141 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1142 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4 1143 };
121d7bf5 1144 static const char *const x86_power_flags[] = {
1da177e4
LT
1145 "ts", /* temperature sensor */
1146 "fid", /* frequency id control */
1147 "vid", /* voltage id control */
1148 "ttp", /* thermal trip */
1149 "tm",
3f98bc49 1150 "stc",
f790cd30
AK
1151 "100mhzsteps",
1152 "hwpstate",
d824395c
JR
1153 "", /* tsc invariant mapped to constant_tsc */
1154 /* nothing */
1da177e4
LT
1155 };
1156
1157
1158#ifdef CONFIG_SMP
92cb7612 1159 cpu = c->cpu_index;
1da177e4
LT
1160#endif
1161
04e1ba85
TG
1162 seq_printf(m, "processor\t: %u\n"
1163 "vendor_id\t: %s\n"
1164 "cpu family\t: %d\n"
1165 "model\t\t: %d\n"
1166 "model name\t: %s\n",
1167 (unsigned)cpu,
1168 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1169 c->x86,
1170 (int)c->x86_model,
1171 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1172
1da177e4
LT
1173 if (c->x86_mask || c->cpuid_level >= 0)
1174 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1175 else
1176 seq_printf(m, "stepping\t: unknown\n");
04e1ba85
TG
1177
1178 if (cpu_has(c, X86_FEATURE_TSC)) {
92cb7612 1179 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
04e1ba85 1180
95235ca2
VP
1181 if (!freq)
1182 freq = cpu_khz;
1da177e4 1183 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
04e1ba85 1184 freq / 1000, (freq % 1000));
1da177e4
LT
1185 }
1186
1187 /* Cache size */
04e1ba85 1188 if (c->x86_cache_size >= 0)
1da177e4 1189 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
04e1ba85 1190
1da177e4 1191#ifdef CONFIG_SMP
94605eff 1192 if (smp_num_siblings * c->x86_max_cores > 1) {
f3fa8ebc 1193 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
08357611
MT
1194 seq_printf(m, "siblings\t: %d\n",
1195 cpus_weight(per_cpu(cpu_core_map, cpu)));
f3fa8ebc 1196 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1197 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1198 }
04e1ba85 1199#endif
1da177e4
LT
1200
1201 seq_printf(m,
04e1ba85
TG
1202 "fpu\t\t: yes\n"
1203 "fpu_exception\t: yes\n"
1204 "cpuid level\t: %d\n"
1205 "wp\t\t: yes\n"
1206 "flags\t\t:",
1da177e4
LT
1207 c->cpuid_level);
1208
04e1ba85
TG
1209 for (i = 0; i < 32*NCAPINTS; i++)
1210 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1211 seq_printf(m, " %s", x86_cap_flags[i]);
1212
1da177e4
LT
1213 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1214 c->loops_per_jiffy/(500000/HZ),
1215 (c->loops_per_jiffy/(5000/HZ)) % 100);
1216
04e1ba85 1217 if (c->x86_tlbsize > 0)
1da177e4
LT
1218 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1219 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1220 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1221
04e1ba85 1222 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1da177e4
LT
1223 c->x86_phys_bits, c->x86_virt_bits);
1224
1225 seq_printf(m, "power management:");
04e1ba85
TG
1226 for (i = 0; i < 32; i++) {
1227 if (c->x86_power & (1 << i)) {
1228 if (i < ARRAY_SIZE(x86_power_flags) &&
1229 x86_power_flags[i])
1230 seq_printf(m, "%s%s",
1231 x86_power_flags[i][0]?" ":"",
1232 x86_power_flags[i]);
1233 else
1234 seq_printf(m, " [%d]", i);
1235 }
1da177e4 1236 }
1da177e4 1237
d31ddaa1 1238 seq_printf(m, "\n\n");
1da177e4
LT
1239
1240 return 0;
1241}
1242
1243static void *c_start(struct seq_file *m, loff_t *pos)
1244{
92cb7612 1245 if (*pos == 0) /* just in case, cpu 0 is not the first */
c0c52d28
AH
1246 *pos = first_cpu(cpu_online_map);
1247 if ((*pos) < NR_CPUS && cpu_online(*pos))
92cb7612
MT
1248 return &cpu_data(*pos);
1249 return NULL;
1da177e4
LT
1250}
1251
1252static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1253{
c0c52d28 1254 *pos = next_cpu(*pos, cpu_online_map);
1da177e4
LT
1255 return c_start(m, pos);
1256}
1257
1258static void c_stop(struct seq_file *m, void *v)
1259{
1260}
1261
1262struct seq_operations cpuinfo_op = {
04e1ba85 1263 .start = c_start,
1da177e4
LT
1264 .next = c_next,
1265 .stop = c_stop,
1266 .show = show_cpuinfo,
1267};