x86: use reboot_type on EFI 32
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
894673ee 19#include <linux/screen_info.h>
1da177e4
LT
20#include <linux/ioport.h>
21#include <linux/delay.h>
1da177e4
LT
22#include <linux/init.h>
23#include <linux/initrd.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <asm/processor.h>
28#include <linux/console.h>
29#include <linux/seq_file.h>
aac04b32 30#include <linux/crash_dump.h>
1da177e4
LT
31#include <linux/root_dev.h>
32#include <linux/pci.h>
5b83683f 33#include <linux/efi.h>
1da177e4
LT
34#include <linux/acpi.h>
35#include <linux/kallsyms.h>
36#include <linux/edd.h>
bbfceef4 37#include <linux/mmzone.h>
5f5609df 38#include <linux/kexec.h>
95235ca2 39#include <linux/cpufreq.h>
e9928674 40#include <linux/dmi.h>
17a941d8 41#include <linux/dma-mapping.h>
681558fd 42#include <linux/ctype.h>
746ef0cd 43#include <linux/uaccess.h>
bbfceef4 44
1da177e4
LT
45#include <asm/mtrr.h>
46#include <asm/uaccess.h>
47#include <asm/system.h>
e4026440 48#include <asm/vsyscall.h>
1da177e4
LT
49#include <asm/io.h>
50#include <asm/smp.h>
51#include <asm/msr.h>
52#include <asm/desc.h>
53#include <video/edid.h>
54#include <asm/e820.h>
55#include <asm/dma.h>
aaf23042 56#include <asm/gart.h>
1da177e4
LT
57#include <asm/mpspec.h>
58#include <asm/mmu_context.h>
1da177e4
LT
59#include <asm/proto.h>
60#include <asm/setup.h>
61#include <asm/mach_apic.h>
62#include <asm/numa.h>
2bc0414e 63#include <asm/sections.h>
f2d3efed 64#include <asm/dmi.h>
00bf4098 65#include <asm/cacheflush.h>
af7a78e9 66#include <asm/mce.h>
eee3af4a 67#include <asm/ds.h>
df3825c5 68#include <asm/topology.h>
1da177e4 69
746ef0cd
GOC
70#ifdef CONFIG_PARAVIRT
71#include <asm/paravirt.h>
72#else
73#define ARCH_SETUP
74#endif
75
1da177e4
LT
76/*
77 * Machine setup..
78 */
79
6c231b7b 80struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 81EXPORT_SYMBOL(boot_cpu_data);
1da177e4 82
7d851c8d
AK
83__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
84
1da177e4
LT
85unsigned long mmu_cr4_features;
86
1da177e4
LT
87/* Boot loader ID as an integer, for the benefit of proc_dointvec */
88int bootloader_type;
89
90unsigned long saved_video_mode;
91
f039b754
AK
92int force_mwait __cpuinitdata;
93
04e1ba85 94/*
f2d3efed
AK
95 * Early DMI memory
96 */
97int dmi_alloc_index;
98char dmi_alloc_data[DMI_MAX_DATA];
99
1da177e4
LT
100/*
101 * Setup options
102 */
1da177e4 103struct screen_info screen_info;
2ee60e17 104EXPORT_SYMBOL(screen_info);
1da177e4
LT
105struct sys_desc_table_struct {
106 unsigned short length;
107 unsigned char table[0];
108};
109
110struct edid_info edid_info;
ba70710e 111EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
112
113extern int root_mountflags;
1da177e4 114
adf48856 115char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
116
117struct resource standard_io_resources[] = {
118 { .name = "dma1", .start = 0x00, .end = 0x1f,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "pic1", .start = 0x20, .end = 0x21,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "timer0", .start = 0x40, .end = 0x43,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "timer1", .start = 0x50, .end = 0x53,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "keyboard", .start = 0x60, .end = 0x6f,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "pic2", .start = 0xa0, .end = 0xa1,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
132 { .name = "dma2", .start = 0xc0, .end = 0xdf,
133 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
134 { .name = "fpu", .start = 0xf0, .end = 0xff,
135 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
136};
137
1da177e4
LT
138#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
139
c9cce83d 140static struct resource data_resource = {
1da177e4
LT
141 .name = "Kernel data",
142 .start = 0,
143 .end = 0,
144 .flags = IORESOURCE_RAM,
145};
c9cce83d 146static struct resource code_resource = {
1da177e4
LT
147 .name = "Kernel code",
148 .start = 0,
149 .end = 0,
150 .flags = IORESOURCE_RAM,
151};
c9cce83d 152static struct resource bss_resource = {
00bf4098
BW
153 .name = "Kernel bss",
154 .start = 0,
155 .end = 0,
156 .flags = IORESOURCE_RAM,
157};
1da177e4 158
8c61b900
TG
159static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
160
2c8c0e6b
AK
161#ifdef CONFIG_PROC_VMCORE
162/* elfcorehdr= specifies the location of elf core header
163 * stored by the crashed kernel. This option will be passed
164 * by kexec loader to the capture kernel.
165 */
166static int __init setup_elfcorehdr(char *arg)
681558fd 167{
2c8c0e6b
AK
168 char *end;
169 if (!arg)
170 return -EINVAL;
171 elfcorehdr_addr = memparse(arg, &end);
172 return end > arg ? 0 : -EINVAL;
681558fd 173}
2c8c0e6b 174early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
175#endif
176
2b97690f 177#ifndef CONFIG_NUMA
bbfceef4
MT
178static void __init
179contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 180{
bbfceef4
MT
181 unsigned long bootmap_size, bootmap;
182
bbfceef4
MT
183 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
184 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
185 if (bootmap == -1L)
04e1ba85 186 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
bbfceef4 187 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
188 e820_register_active_regions(0, start_pfn, end_pfn);
189 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 190 reserve_bootmem(bootmap, bootmap_size);
04e1ba85 191}
1da177e4
LT
192#endif
193
1da177e4
LT
194#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
195struct edd edd;
196#ifdef CONFIG_EDD_MODULE
197EXPORT_SYMBOL(edd);
198#endif
199/**
200 * copy_edd() - Copy the BIOS EDD information
201 * from boot_params into a safe place.
202 *
203 */
204static inline void copy_edd(void)
205{
30c82645
PA
206 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
207 sizeof(edd.mbr_signature));
208 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
209 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
210 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
211}
212#else
213static inline void copy_edd(void)
214{
215}
216#endif
217
5c3391f9
BW
218#ifdef CONFIG_KEXEC
219static void __init reserve_crashkernel(void)
220{
221 unsigned long long free_mem;
222 unsigned long long crash_size, crash_base;
223 int ret;
224
04e1ba85
TG
225 free_mem =
226 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
5c3391f9
BW
227
228 ret = parse_crashkernel(boot_command_line, free_mem,
229 &crash_size, &crash_base);
230 if (ret == 0 && crash_size) {
231 if (crash_base > 0) {
232 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
233 "for crashkernel (System RAM: %ldMB)\n",
234 (unsigned long)(crash_size >> 20),
235 (unsigned long)(crash_base >> 20),
236 (unsigned long)(free_mem >> 20));
237 crashk_res.start = crash_base;
238 crashk_res.end = crash_base + crash_size - 1;
239 reserve_bootmem(crash_base, crash_size);
240 } else
241 printk(KERN_INFO "crashkernel reservation failed - "
242 "you have to specify a base address\n");
243 }
244}
245#else
246static inline void __init reserve_crashkernel(void)
247{}
248#endif
249
746ef0cd 250/* Overridden in paravirt.c if CONFIG_PARAVIRT */
e3cfac84 251void __attribute__((weak)) __init memory_setup(void)
746ef0cd
GOC
252{
253 machine_specific_memory_setup();
254}
255
1da177e4
LT
256void __init setup_arch(char **cmdline_p)
257{
04e1ba85
TG
258 unsigned i;
259
adf48856 260 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 261
30c82645
PA
262 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
263 screen_info = boot_params.screen_info;
264 edid_info = boot_params.edid_info;
265 saved_video_mode = boot_params.hdr.vid_mode;
266 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
267
268#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
269 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
270 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
271 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4 272#endif
5b83683f
HY
273#ifdef CONFIG_EFI
274 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
275 "EL64", 4))
276 efi_enabled = 1;
277#endif
746ef0cd
GOC
278
279 ARCH_SETUP
280
281 memory_setup();
1da177e4
LT
282 copy_edd();
283
30c82645 284 if (!boot_params.hdr.root_flags)
1da177e4
LT
285 root_mountflags &= ~MS_RDONLY;
286 init_mm.start_code = (unsigned long) &_text;
287 init_mm.end_code = (unsigned long) &_etext;
288 init_mm.end_data = (unsigned long) &_edata;
289 init_mm.brk = (unsigned long) &_end;
290
e3ebadd9
LT
291 code_resource.start = virt_to_phys(&_text);
292 code_resource.end = virt_to_phys(&_etext)-1;
293 data_resource.start = virt_to_phys(&_etext);
294 data_resource.end = virt_to_phys(&_edata)-1;
00bf4098
BW
295 bss_resource.start = virt_to_phys(&__bss_start);
296 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 297
1da177e4
LT
298 early_identify_cpu(&boot_cpu_data);
299
adf48856 300 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
301 *cmdline_p = command_line;
302
303 parse_early_param();
304
305 finish_e820_parsing();
9ca33eb6 306
aaf23042
YL
307 early_gart_iommu_check();
308
5cb248ab 309 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
310 /*
311 * partially used pages are not usable - thus
312 * we are rounding upwards:
313 */
314 end_pfn = e820_end_of_ram();
99fc8d42
JB
315 /* update e820 for memory not covered by WB MTRRs */
316 mtrr_bp_init();
317 if (mtrr_trim_uncached_memory(end_pfn)) {
318 e820_register_active_regions(0, 0, -1UL);
319 end_pfn = e820_end_of_ram();
320 }
321
caff0710 322 num_physpages = end_pfn;
1da177e4
LT
323
324 check_efer();
325
326 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
5b83683f
HY
327 if (efi_enabled)
328 efi_init();
1da177e4 329
f2d3efed
AK
330 dmi_scan_machine();
331
b02aae9c
RH
332 io_delay_init();
333
71fff5e6 334#ifdef CONFIG_SMP
df3825c5 335 /* setup to use the early static init tables during kernel startup */
3effef1f
YL
336 x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
337 x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
e8c10ef9 338#ifdef CONFIG_NUMA
3effef1f 339 x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
71fff5e6 340#endif
e8c10ef9 341#endif
71fff5e6 342
888ba6c6 343#ifdef CONFIG_ACPI
1da177e4
LT
344 /*
345 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
346 * Call this early for SRAT node setup.
347 */
348 acpi_boot_table_init();
349#endif
350
caff0710
JB
351 /* How many end-of-memory variables you have, grandma! */
352 max_low_pfn = end_pfn;
353 max_pfn = end_pfn;
354 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
355
5cb248ab
MG
356 /* Remove active ranges so rediscovery with NUMA-awareness happens */
357 remove_all_active_ranges();
358
1da177e4
LT
359#ifdef CONFIG_ACPI_NUMA
360 /*
361 * Parse SRAT to discover nodes.
362 */
363 acpi_numa_init();
364#endif
365
2b97690f 366#ifdef CONFIG_NUMA
04e1ba85 367 numa_initmem_init(0, end_pfn);
1da177e4 368#else
bbfceef4 369 contig_initmem_init(0, end_pfn);
1da177e4
LT
370#endif
371
75175278 372 early_res_to_bootmem();
1da177e4 373
673d5b43 374#ifdef CONFIG_ACPI_SLEEP
1da177e4 375 /*
04e1ba85 376 * Reserve low memory region for sleep support.
1da177e4 377 */
04e1ba85
TG
378 acpi_reserve_bootmem();
379#endif
5b83683f
HY
380
381 if (efi_enabled) {
382 efi_map_memmap();
383 efi_reserve_bootmem();
384 }
385
04e1ba85
TG
386 /*
387 * Find and reserve possible boot-time SMP configuration:
388 */
1da177e4 389 find_smp_config();
1da177e4 390#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
391 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
392 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
393 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
394 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
395 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
396
397 if (ramdisk_end <= end_of_mem) {
398 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
399 initrd_start = ramdisk_image + PAGE_OFFSET;
400 initrd_end = initrd_start+ramdisk_size;
401 } else {
75175278
AK
402 /* Assumes everything on node 0 */
403 free_bootmem(ramdisk_image, ramdisk_size);
1da177e4 404 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
405 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
406 ramdisk_end, end_of_mem);
1da177e4
LT
407 initrd_start = 0;
408 }
409 }
410#endif
5c3391f9 411 reserve_crashkernel();
1da177e4 412 paging_init();
e4026440 413 map_vsyscall();
1da177e4 414
dfa4698c 415 early_quirks();
1da177e4 416
888ba6c6 417#ifdef CONFIG_ACPI
1da177e4
LT
418 /*
419 * Read APIC and some other early information from ACPI tables.
420 */
421 acpi_boot_init();
422#endif
423
05b3cbd8
RT
424 init_cpu_to_node();
425
1da177e4
LT
426 /*
427 * get boot-time SMP configuration:
428 */
429 if (smp_found_config)
430 get_smp_config();
431 init_apic_mappings();
3e35a0e5 432 ioapic_init_mappings();
1da177e4
LT
433
434 /*
fc986db4 435 * We trust e820 completely. No explicit ROM probing in memory.
04e1ba85 436 */
c9cce83d 437 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
e8eff5ac 438 e820_mark_nosave_regions();
1da177e4 439
1da177e4 440 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 441 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4 442 request_resource(&ioport_resource, &standard_io_resources[i]);
1da177e4 443
a1e97782 444 e820_setup_gap();
1da177e4 445
1da177e4
LT
446#ifdef CONFIG_VT
447#if defined(CONFIG_VGA_CONSOLE)
5b83683f
HY
448 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
449 conswitchp = &vga_con;
1da177e4
LT
450#elif defined(CONFIG_DUMMY_CONSOLE)
451 conswitchp = &dummy_con;
452#endif
453#endif
454}
455
e6982c67 456static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
457{
458 unsigned int *v;
459
ebfcaa96 460 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
461 return 0;
462
463 v = (unsigned int *) c->x86_model_id;
464 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
465 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
466 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
467 c->x86_model_id[48] = 0;
468 return 1;
469}
470
471
e6982c67 472static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
473{
474 unsigned int n, dummy, eax, ebx, ecx, edx;
475
ebfcaa96 476 n = c->extended_cpuid_level;
1da177e4
LT
477
478 if (n >= 0x80000005) {
479 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
04e1ba85
TG
480 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
481 "D cache %dK (%d bytes/line)\n",
482 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
483 c->x86_cache_size = (ecx>>24) + (edx>>24);
1da177e4
LT
484 /* On K8 L1 TLB is inclusive, so don't count it */
485 c->x86_tlbsize = 0;
486 }
487
488 if (n >= 0x80000006) {
489 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
490 ecx = cpuid_ecx(0x80000006);
491 c->x86_cache_size = ecx >> 16;
492 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
493
494 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
495 c->x86_cache_size, ecx & 0xFF);
496 }
1da177e4 497 if (n >= 0x80000008) {
04e1ba85 498 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1da177e4
LT
499 c->x86_virt_bits = (eax >> 8) & 0xff;
500 c->x86_phys_bits = eax & 0xff;
501 }
502}
503
3f098c26
AK
504#ifdef CONFIG_NUMA
505static int nearby_node(int apicid)
506{
04e1ba85
TG
507 int i, node;
508
3f098c26 509 for (i = apicid - 1; i >= 0; i--) {
04e1ba85 510 node = apicid_to_node[i];
3f098c26
AK
511 if (node != NUMA_NO_NODE && node_online(node))
512 return node;
513 }
514 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
04e1ba85 515 node = apicid_to_node[i];
3f098c26
AK
516 if (node != NUMA_NO_NODE && node_online(node))
517 return node;
518 }
519 return first_node(node_online_map); /* Shouldn't happen */
520}
521#endif
522
63518644
AK
523/*
524 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
525 * Assumes number of cores is a power of two.
526 */
adb8daed 527static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
63518644
AK
528{
529#ifdef CONFIG_SMP
b41e2939 530 unsigned bits;
3f098c26 531#ifdef CONFIG_NUMA
f3fa8ebc 532 int cpu = smp_processor_id();
3f098c26 533 int node = 0;
60c1bc82 534 unsigned apicid = hard_smp_processor_id();
3f098c26 535#endif
a860b63c 536 bits = c->x86_coreid_bits;
b41e2939
AK
537
538 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 539 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 540 /* Convert the APIC ID into the socket ID */
f3fa8ebc 541 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
542
543#ifdef CONFIG_NUMA
04e1ba85
TG
544 node = c->phys_proc_id;
545 if (apicid_to_node[apicid] != NUMA_NO_NODE)
546 node = apicid_to_node[apicid];
547 if (!node_online(node)) {
548 /* Two possibilities here:
549 - The CPU is missing memory and no node was created.
550 In that case try picking one from a nearby CPU
551 - The APIC IDs differ from the HyperTransport node IDs
552 which the K8 northbridge parsing fills in.
553 Assume they are all increased by a constant offset,
554 but in the same order as the HT nodeids.
555 If that doesn't result in a usable node fall back to the
556 path for the previous case. */
557
92cb7612 558 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
04e1ba85
TG
559
560 if (ht_nodeid >= 0 &&
561 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
562 node = apicid_to_node[ht_nodeid];
563 /* Pick a nearby node */
564 if (!node_online(node))
565 node = nearby_node(apicid);
566 }
69d81fcd 567 numa_set_node(cpu, node);
3f098c26 568
e42f9437 569 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 570#endif
63518644
AK
571#endif
572}
1da177e4 573
2b16a235 574static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
a860b63c
YL
575{
576#ifdef CONFIG_SMP
577 unsigned bits, ecx;
578
579 /* Multi core CPU? */
580 if (c->extended_cpuid_level < 0x80000008)
581 return;
582
583 ecx = cpuid_ecx(0x80000008);
584
585 c->x86_max_cores = (ecx & 0xff) + 1;
586
587 /* CPU telling us the core id bits shift? */
588 bits = (ecx >> 12) & 0xF;
589
590 /* Otherwise recompute */
591 if (bits == 0) {
592 while ((1 << bits) < c->x86_max_cores)
593 bits++;
594 }
595
596 c->x86_coreid_bits = bits;
597
598#endif
599}
600
fb79d22e
TG
601#define ENABLE_C1E_MASK 0x18000000
602#define CPUID_PROCESSOR_SIGNATURE 1
603#define CPUID_XFAM 0x0ff00000
604#define CPUID_XFAM_K8 0x00000000
605#define CPUID_XFAM_10H 0x00100000
606#define CPUID_XFAM_11H 0x00200000
607#define CPUID_XMOD 0x000f0000
608#define CPUID_XMOD_REV_F 0x00040000
609
610/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
611static __cpuinit int amd_apic_timer_broken(void)
612{
04e1ba85
TG
613 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
614
fb79d22e
TG
615 switch (eax & CPUID_XFAM) {
616 case CPUID_XFAM_K8:
617 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
618 break;
619 case CPUID_XFAM_10H:
620 case CPUID_XFAM_11H:
621 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
622 if (lo & ENABLE_C1E_MASK)
623 return 1;
624 break;
625 default:
626 /* err on the side of caution */
627 return 1;
628 }
629 return 0;
630}
631
2b16a235
AK
632static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
633{
634 early_init_amd_mc(c);
635
636 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
637 if (c->x86_power & (1<<8))
638 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
639}
640
ed77504b 641static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 642{
7bcd3f34 643 unsigned level;
1da177e4 644
bc5e8fdf
LT
645#ifdef CONFIG_SMP
646 unsigned long value;
647
7d318d77
AK
648 /*
649 * Disable TLB flush filter by setting HWCR.FFDIS on K8
650 * bit 6 of msr C001_0015
04e1ba85 651 *
7d318d77
AK
652 * Errata 63 for SH-B3 steppings
653 * Errata 122 for all steppings (F+ have it disabled by default)
654 */
655 if (c->x86 == 15) {
656 rdmsrl(MSR_K8_HWCR, value);
657 value |= 1 << 6;
658 wrmsrl(MSR_K8_HWCR, value);
659 }
bc5e8fdf
LT
660#endif
661
1da177e4
LT
662 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
663 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
5548fecd 664 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
04e1ba85 665
7bcd3f34
AK
666 /* On C+ stepping K8 rep microcode works well for copy/memset */
667 level = cpuid_eax(1);
04e1ba85
TG
668 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
669 level >= 0x0f58))
53756d37 670 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
99741faa 671 if (c->x86 == 0x10 || c->x86 == 0x11)
53756d37 672 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
7bcd3f34 673
18bd057b
AK
674 /* Enable workaround for FXSAVE leak */
675 if (c->x86 >= 6)
53756d37 676 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
18bd057b 677
e42f9437
RS
678 level = get_model_name(c);
679 if (!level) {
04e1ba85 680 switch (c->x86) {
1da177e4
LT
681 case 15:
682 /* Should distinguish Models here, but this is only
683 a fallback anyways. */
684 strcpy(c->x86_model_id, "Hammer");
04e1ba85
TG
685 break;
686 }
687 }
1da177e4
LT
688 display_cacheinfo(c);
689
faee9a5d
AK
690 /* Multi core CPU? */
691 if (c->extended_cpuid_level >= 0x80000008)
63518644 692 amd_detect_cmp(c);
1da177e4 693
67cddd94
AK
694 if (c->extended_cpuid_level >= 0x80000006 &&
695 (cpuid_edx(0x80000006) & 0xf000))
696 num_cache_leaves = 4;
697 else
698 num_cache_leaves = 3;
2049336f 699
0bd8acd1 700 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
53756d37 701 set_cpu_cap(c, X86_FEATURE_K8);
0bd8acd1 702
de421863
AK
703 /* MFENCE stops RDTSC speculation */
704 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
f039b754 705
fb79d22e
TG
706 if (amd_apic_timer_broken())
707 disable_apic_timer = 1;
1da177e4
LT
708}
709
1a53905a 710void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
711{
712#ifdef CONFIG_SMP
04e1ba85
TG
713 u32 eax, ebx, ecx, edx;
714 int index_msb, core_bits;
94605eff
SS
715
716 cpuid(1, &eax, &ebx, &ecx, &edx);
717
94605eff 718
e42f9437 719 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 720 return;
04e1ba85 721 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
e42f9437 722 goto out;
1da177e4 723
1da177e4 724 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 725
1da177e4
LT
726 if (smp_num_siblings == 1) {
727 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
04e1ba85 728 } else if (smp_num_siblings > 1) {
94605eff 729
1da177e4 730 if (smp_num_siblings > NR_CPUS) {
04e1ba85
TG
731 printk(KERN_WARNING "CPU: Unsupported number of "
732 "siblings %d", smp_num_siblings);
1da177e4
LT
733 smp_num_siblings = 1;
734 return;
735 }
94605eff
SS
736
737 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 738 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 739
94605eff 740 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 741
04e1ba85 742 index_msb = get_count_order(smp_num_siblings);
94605eff
SS
743
744 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 745
f3fa8ebc 746 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 747 ((1 << core_bits) - 1);
1da177e4 748 }
e42f9437
RS
749out:
750 if ((c->x86_max_cores * smp_num_siblings) > 1) {
04e1ba85
TG
751 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
752 c->phys_proc_id);
753 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
754 c->cpu_core_id);
e42f9437
RS
755 }
756
1da177e4
LT
757#endif
758}
759
3dd9d514
AK
760/*
761 * find out the number of processor cores on the die
762 */
e6982c67 763static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 764{
2bbc419f 765 unsigned int eax, t;
3dd9d514
AK
766
767 if (c->cpuid_level < 4)
768 return 1;
769
2bbc419f 770 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
771
772 if (eax & 0x1f)
773 return ((eax >> 26) + 1);
774 else
775 return 1;
776}
777
df0cc26b
AK
778static void srat_detect_node(void)
779{
780#ifdef CONFIG_NUMA
ddea7be0 781 unsigned node;
df0cc26b 782 int cpu = smp_processor_id();
e42f9437 783 int apicid = hard_smp_processor_id();
df0cc26b
AK
784
785 /* Don't do the funky fallback heuristics the AMD version employs
786 for now. */
e42f9437 787 node = apicid_to_node[apicid];
df0cc26b 788 if (node == NUMA_NO_NODE)
0d015324 789 node = first_node(node_online_map);
69d81fcd 790 numa_set_node(cpu, node);
df0cc26b 791
c31fbb1a 792 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
793#endif
794}
795
2b16a235
AK
796static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
797{
798 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
799 (c->x86 == 0x6 && c->x86_model >= 0x0e))
800 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
801}
802
e6982c67 803static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
804{
805 /* Cache sizes */
806 unsigned n;
807
808 init_intel_cacheinfo(c);
04e1ba85 809 if (c->cpuid_level > 9) {
0080e667
VP
810 unsigned eax = cpuid_eax(10);
811 /* Check for version and the number of counters */
812 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
53756d37 813 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667
VP
814 }
815
36b2a8d5
SE
816 if (cpu_has_ds) {
817 unsigned int l1, l2;
818 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5 819 if (!(l1 & (1<<11)))
53756d37 820 set_cpu_cap(c, X86_FEATURE_BTS);
36b2a8d5 821 if (!(l1 & (1<<12)))
53756d37 822 set_cpu_cap(c, X86_FEATURE_PEBS);
36b2a8d5
SE
823 }
824
eee3af4a
MM
825
826 if (cpu_has_bts)
827 ds_init_intel(c);
828
ebfcaa96 829 n = c->extended_cpuid_level;
1da177e4
LT
830 if (n >= 0x80000008) {
831 unsigned eax = cpuid_eax(0x80000008);
832 c->x86_virt_bits = (eax >> 8) & 0xff;
833 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
834 /* CPUID workaround for Intel 0F34 CPU */
835 if (c->x86_vendor == X86_VENDOR_INTEL &&
836 c->x86 == 0xF && c->x86_model == 0x3 &&
837 c->x86_mask == 0x4)
838 c->x86_phys_bits = 36;
1da177e4
LT
839 }
840
841 if (c->x86 == 15)
842 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
843 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
844 (c->x86 == 0x6 && c->x86_model >= 0x0e))
53756d37 845 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
27fbe5b2 846 if (c->x86 == 6)
53756d37 847 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
707fa8ed 848 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
04e1ba85 849 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
850
851 srat_detect_node();
1da177e4
LT
852}
853
672289e9 854static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
855{
856 char *v = c->x86_vendor_id;
857
858 if (!strcmp(v, "AuthenticAMD"))
859 c->x86_vendor = X86_VENDOR_AMD;
860 else if (!strcmp(v, "GenuineIntel"))
861 c->x86_vendor = X86_VENDOR_INTEL;
862 else
863 c->x86_vendor = X86_VENDOR_UNKNOWN;
864}
865
1da177e4
LT
866/* Do some early cpuid on the boot CPU to get some parameter that are
867 needed before check_bugs. Everything advanced is in identify_cpu
868 below. */
8c61b900 869static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4 870{
a860b63c 871 u32 tfms, xlvl;
1da177e4
LT
872
873 c->loops_per_jiffy = loops_per_jiffy;
874 c->x86_cache_size = -1;
875 c->x86_vendor = X86_VENDOR_UNKNOWN;
876 c->x86_model = c->x86_mask = 0; /* So far unknown... */
877 c->x86_vendor_id[0] = '\0'; /* Unset */
878 c->x86_model_id[0] = '\0'; /* Unset */
879 c->x86_clflush_size = 64;
880 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 881 c->x86_max_cores = 1;
a860b63c 882 c->x86_coreid_bits = 0;
ebfcaa96 883 c->extended_cpuid_level = 0;
1da177e4
LT
884 memset(&c->x86_capability, 0, sizeof c->x86_capability);
885
886 /* Get vendor name */
887 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
888 (unsigned int *)&c->x86_vendor_id[0],
889 (unsigned int *)&c->x86_vendor_id[8],
890 (unsigned int *)&c->x86_vendor_id[4]);
04e1ba85 891
1da177e4
LT
892 get_cpu_vendor(c);
893
894 /* Initialize the standard set of capabilities */
895 /* Note that the vendor-specific code below might override */
896
897 /* Intel-defined flags: level 0x00000001 */
898 if (c->cpuid_level >= 0x00000001) {
899 __u32 misc;
900 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
901 &c->x86_capability[0]);
902 c->x86 = (tfms >> 8) & 0xf;
903 c->x86_model = (tfms >> 4) & 0xf;
904 c->x86_mask = tfms & 0xf;
f5f786d0 905 if (c->x86 == 0xf)
1da177e4 906 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 907 if (c->x86 >= 0x6)
1da177e4 908 c->x86_model += ((tfms >> 16) & 0xF) << 4;
04e1ba85 909 if (c->x86_capability[0] & (1<<19))
1da177e4 910 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
911 } else {
912 /* Have CPUID level 0 only - unheard of */
913 c->x86 = 4;
914 }
a158608b
AK
915
916#ifdef CONFIG_SMP
f3fa8ebc 917 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 918#endif
1da177e4
LT
919 /* AMD-defined flags: level 0x80000001 */
920 xlvl = cpuid_eax(0x80000000);
ebfcaa96 921 c->extended_cpuid_level = xlvl;
1da177e4
LT
922 if ((xlvl & 0xffff0000) == 0x80000000) {
923 if (xlvl >= 0x80000001) {
924 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 925 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
926 }
927 if (xlvl >= 0x80000004)
928 get_model_name(c); /* Default name */
929 }
930
931 /* Transmeta-defined flags: level 0x80860001 */
932 xlvl = cpuid_eax(0x80860000);
933 if ((xlvl & 0xffff0000) == 0x80860000) {
934 /* Don't set x86_cpuid_level here for now to not confuse. */
935 if (xlvl >= 0x80860001)
936 c->x86_capability[2] = cpuid_edx(0x80860001);
937 }
938
9566e91d
AH
939 c->extended_cpuid_level = cpuid_eax(0x80000000);
940 if (c->extended_cpuid_level >= 0x80000007)
941 c->x86_power = cpuid_edx(0x80000007);
942
a860b63c
YL
943 switch (c->x86_vendor) {
944 case X86_VENDOR_AMD:
945 early_init_amd(c);
946 break;
71617bf1
YL
947 case X86_VENDOR_INTEL:
948 early_init_intel(c);
949 break;
a860b63c
YL
950 }
951
952}
953
954/*
955 * This does the hard work of actually picking apart the CPU stuff...
956 */
957void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
958{
959 int i;
960
961 early_identify_cpu(c);
962
1d67953f
VP
963 init_scattered_cpuid_features(c);
964
1e9f28fa
SS
965 c->apicid = phys_pkg_id(0);
966
1da177e4
LT
967 /*
968 * Vendor-specific initialization. In this section we
969 * canonicalize the feature flags, meaning if there are
970 * features a certain CPU supports which CPUID doesn't
971 * tell us, CPUID claiming incorrect flags, or other bugs,
972 * we handle them here.
973 *
974 * At the end of this section, c->x86_capability better
975 * indicate the features this CPU genuinely supports!
976 */
977 switch (c->x86_vendor) {
978 case X86_VENDOR_AMD:
979 init_amd(c);
980 break;
981
982 case X86_VENDOR_INTEL:
983 init_intel(c);
984 break;
985
986 case X86_VENDOR_UNKNOWN:
987 default:
988 display_cacheinfo(c);
989 break;
990 }
991
04e1ba85 992 detect_ht(c);
1da177e4
LT
993
994 /*
995 * On SMP, boot_cpu_data holds the common feature set between
996 * all CPUs; so make sure that we indicate which features are
997 * common between the CPUs. The first time this routine gets
998 * executed, c == &boot_cpu_data.
999 */
1000 if (c != &boot_cpu_data) {
1001 /* AND the already accumulated flags with these */
04e1ba85 1002 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
1003 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1004 }
1005
7d851c8d
AK
1006 /* Clear all flags overriden by options */
1007 for (i = 0; i < NCAPINTS; i++)
1008 c->x86_capability[i] ^= cleared_cpu_caps[i];
1009
1da177e4
LT
1010#ifdef CONFIG_X86_MCE
1011 mcheck_init(c);
1012#endif
74ff305b
HS
1013 select_idle_routine(c);
1014
8bd99481 1015 if (c != &boot_cpu_data)
3b520b23 1016 mtrr_ap_init();
1da177e4 1017#ifdef CONFIG_NUMA
3019e8eb 1018 numa_add_cpu(smp_processor_id());
1da177e4 1019#endif
2b16a235 1020
1da177e4 1021}
1da177e4 1022
191679fd
AK
1023static __init int setup_noclflush(char *arg)
1024{
1025 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
1026 return 1;
1027}
1028__setup("noclflush", setup_noclflush);
1029
e6982c67 1030void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1031{
1032 if (c->x86_model_id[0])
04e1ba85 1033 printk(KERN_INFO "%s", c->x86_model_id);
1da177e4 1034
04e1ba85
TG
1035 if (c->x86_mask || c->cpuid_level >= 0)
1036 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 1037 else
04e1ba85 1038 printk(KERN_CONT "\n");
1da177e4
LT
1039}
1040
ac72e788
AK
1041static __init int setup_disablecpuid(char *arg)
1042{
1043 int bit;
1044 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1045 setup_clear_cpu_cap(bit);
1046 else
1047 return 0;
1048 return 1;
1049}
1050__setup("clearcpuid=", setup_disablecpuid);
1051
1da177e4
LT
1052/*
1053 * Get CPU information for use by the procfs.
1054 */
1055
1056static int show_cpuinfo(struct seq_file *m, void *v)
1057{
1058 struct cpuinfo_x86 *c = v;
04e1ba85 1059 int cpu = 0, i;
1da177e4 1060
04e1ba85 1061 /*
1da177e4
LT
1062 * These flag bits must match the definitions in <asm/cpufeature.h>.
1063 * NULL means this bit is undefined or reserved; either way it doesn't
1064 * have meaning as far as Linux is concerned. Note that it's important
1065 * to realize there is a difference between this table and CPUID -- if
1066 * applications want to get the raw CPUID data, they should access
1067 * /dev/cpu/<cpu_nr>/cpuid instead.
1068 */
121d7bf5 1069 static const char *const x86_cap_flags[] = {
1da177e4 1070 /* Intel-defined */
04e1ba85
TG
1071 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1072 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1073 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1074 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1da177e4
LT
1075
1076 /* AMD-defined */
3c3b73b6 1077 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1078 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1079 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
f790cd30
AK
1080 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1081 "3dnowext", "3dnow",
1da177e4
LT
1082
1083 /* Transmeta-defined */
1084 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1085 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1086 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1088
1089 /* Other (Linux-defined) */
ec481536
PA
1090 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1091 NULL, NULL, NULL, NULL,
1092 "constant_tsc", "up", NULL, "arch_perfmon",
1093 "pebs", "bts", NULL, "sync_rdtsc",
1094 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1095 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1096
1097 /* Intel-defined (#2) */
9d95dd84 1098 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307 1099 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
e1054b39 1100 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1da177e4
LT
1101 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1102
5b7abc6f
PA
1103 /* VIA/Cyrix/Centaur-defined */
1104 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
ec481536 1105 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
5b7abc6f
PA
1106 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1108
1da177e4 1109 /* AMD-defined (#2) */
e1054b39
PA
1110 "lahf_lm", "cmp_legacy", "svm", "extapic",
1111 "cr8_legacy", "abm", "sse4a", "misalignsse",
1112 "3dnowprefetch", "osvw", "ibs", "sse5",
1113 "skinit", "wdt", NULL, NULL,
1da177e4 1114 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1115 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1d67953f
VP
1116
1117 /* Auxiliary (Linux-defined) */
1118 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1119 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4 1122 };
121d7bf5 1123 static const char *const x86_power_flags[] = {
1da177e4
LT
1124 "ts", /* temperature sensor */
1125 "fid", /* frequency id control */
1126 "vid", /* voltage id control */
1127 "ttp", /* thermal trip */
1128 "tm",
3f98bc49 1129 "stc",
f790cd30
AK
1130 "100mhzsteps",
1131 "hwpstate",
d824395c
JR
1132 "", /* tsc invariant mapped to constant_tsc */
1133 /* nothing */
1da177e4
LT
1134 };
1135
1136
1137#ifdef CONFIG_SMP
92cb7612 1138 cpu = c->cpu_index;
1da177e4
LT
1139#endif
1140
04e1ba85
TG
1141 seq_printf(m, "processor\t: %u\n"
1142 "vendor_id\t: %s\n"
1143 "cpu family\t: %d\n"
1144 "model\t\t: %d\n"
1145 "model name\t: %s\n",
1146 (unsigned)cpu,
1147 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1148 c->x86,
1149 (int)c->x86_model,
1150 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1151
1da177e4
LT
1152 if (c->x86_mask || c->cpuid_level >= 0)
1153 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1154 else
1155 seq_printf(m, "stepping\t: unknown\n");
04e1ba85
TG
1156
1157 if (cpu_has(c, X86_FEATURE_TSC)) {
92cb7612 1158 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
04e1ba85 1159
95235ca2
VP
1160 if (!freq)
1161 freq = cpu_khz;
1da177e4 1162 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
04e1ba85 1163 freq / 1000, (freq % 1000));
1da177e4
LT
1164 }
1165
1166 /* Cache size */
04e1ba85 1167 if (c->x86_cache_size >= 0)
1da177e4 1168 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
04e1ba85 1169
1da177e4 1170#ifdef CONFIG_SMP
94605eff 1171 if (smp_num_siblings * c->x86_max_cores > 1) {
f3fa8ebc 1172 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
08357611
MT
1173 seq_printf(m, "siblings\t: %d\n",
1174 cpus_weight(per_cpu(cpu_core_map, cpu)));
f3fa8ebc 1175 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1176 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1177 }
04e1ba85 1178#endif
1da177e4
LT
1179
1180 seq_printf(m,
04e1ba85
TG
1181 "fpu\t\t: yes\n"
1182 "fpu_exception\t: yes\n"
1183 "cpuid level\t: %d\n"
1184 "wp\t\t: yes\n"
1185 "flags\t\t:",
1da177e4
LT
1186 c->cpuid_level);
1187
04e1ba85
TG
1188 for (i = 0; i < 32*NCAPINTS; i++)
1189 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1190 seq_printf(m, " %s", x86_cap_flags[i]);
1191
1da177e4
LT
1192 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1193 c->loops_per_jiffy/(500000/HZ),
1194 (c->loops_per_jiffy/(5000/HZ)) % 100);
1195
04e1ba85 1196 if (c->x86_tlbsize > 0)
1da177e4
LT
1197 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1198 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1199 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1200
04e1ba85 1201 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1da177e4
LT
1202 c->x86_phys_bits, c->x86_virt_bits);
1203
1204 seq_printf(m, "power management:");
04e1ba85
TG
1205 for (i = 0; i < 32; i++) {
1206 if (c->x86_power & (1 << i)) {
1207 if (i < ARRAY_SIZE(x86_power_flags) &&
1208 x86_power_flags[i])
1209 seq_printf(m, "%s%s",
1210 x86_power_flags[i][0]?" ":"",
1211 x86_power_flags[i]);
1212 else
1213 seq_printf(m, " [%d]", i);
1214 }
1da177e4 1215 }
1da177e4 1216
d31ddaa1 1217 seq_printf(m, "\n\n");
1da177e4
LT
1218
1219 return 0;
1220}
1221
1222static void *c_start(struct seq_file *m, loff_t *pos)
1223{
92cb7612 1224 if (*pos == 0) /* just in case, cpu 0 is not the first */
c0c52d28
AH
1225 *pos = first_cpu(cpu_online_map);
1226 if ((*pos) < NR_CPUS && cpu_online(*pos))
92cb7612
MT
1227 return &cpu_data(*pos);
1228 return NULL;
1da177e4
LT
1229}
1230
1231static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1232{
c0c52d28 1233 *pos = next_cpu(*pos, cpu_online_map);
1da177e4
LT
1234 return c_start(m, pos);
1235}
1236
1237static void c_stop(struct seq_file *m, void *v)
1238{
1239}
1240
8a45eb31 1241const struct seq_operations cpuinfo_op = {
04e1ba85 1242 .start = c_start,
1da177e4
LT
1243 .next = c_next,
1244 .stop = c_stop,
1245 .show = show_cpuinfo,
1246};