x86: tweak io_64.h for paravirt.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
894673ee 19#include <linux/screen_info.h>
1da177e4
LT
20#include <linux/ioport.h>
21#include <linux/delay.h>
1da177e4
LT
22#include <linux/init.h>
23#include <linux/initrd.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <asm/processor.h>
28#include <linux/console.h>
29#include <linux/seq_file.h>
aac04b32 30#include <linux/crash_dump.h>
1da177e4
LT
31#include <linux/root_dev.h>
32#include <linux/pci.h>
33#include <linux/acpi.h>
34#include <linux/kallsyms.h>
35#include <linux/edd.h>
bbfceef4 36#include <linux/mmzone.h>
5f5609df 37#include <linux/kexec.h>
95235ca2 38#include <linux/cpufreq.h>
e9928674 39#include <linux/dmi.h>
17a941d8 40#include <linux/dma-mapping.h>
681558fd 41#include <linux/ctype.h>
bbfceef4 42
1da177e4
LT
43#include <asm/mtrr.h>
44#include <asm/uaccess.h>
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/smp.h>
48#include <asm/msr.h>
49#include <asm/desc.h>
50#include <video/edid.h>
51#include <asm/e820.h>
52#include <asm/dma.h>
53#include <asm/mpspec.h>
54#include <asm/mmu_context.h>
1da177e4
LT
55#include <asm/proto.h>
56#include <asm/setup.h>
57#include <asm/mach_apic.h>
58#include <asm/numa.h>
2bc0414e 59#include <asm/sections.h>
f2d3efed 60#include <asm/dmi.h>
00bf4098 61#include <asm/cacheflush.h>
af7a78e9 62#include <asm/mce.h>
eee3af4a 63#include <asm/ds.h>
1da177e4
LT
64
65/*
66 * Machine setup..
67 */
68
6c231b7b 69struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 70EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
71
72unsigned long mmu_cr4_features;
73
1da177e4
LT
74/* Boot loader ID as an integer, for the benefit of proc_dointvec */
75int bootloader_type;
76
77unsigned long saved_video_mode;
78
f039b754
AK
79int force_mwait __cpuinitdata;
80
04e1ba85 81/*
f2d3efed
AK
82 * Early DMI memory
83 */
84int dmi_alloc_index;
85char dmi_alloc_data[DMI_MAX_DATA];
86
1da177e4
LT
87/*
88 * Setup options
89 */
1da177e4 90struct screen_info screen_info;
2ee60e17 91EXPORT_SYMBOL(screen_info);
1da177e4
LT
92struct sys_desc_table_struct {
93 unsigned short length;
94 unsigned char table[0];
95};
96
97struct edid_info edid_info;
ba70710e 98EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
99
100extern int root_mountflags;
1da177e4 101
adf48856 102char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
103
104struct resource standard_io_resources[] = {
105 { .name = "dma1", .start = 0x00, .end = 0x1f,
106 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
107 { .name = "pic1", .start = 0x20, .end = 0x21,
108 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
109 { .name = "timer0", .start = 0x40, .end = 0x43,
110 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
111 { .name = "timer1", .start = 0x50, .end = 0x53,
112 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
113 { .name = "keyboard", .start = 0x60, .end = 0x6f,
114 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
115 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
116 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
117 { .name = "pic2", .start = 0xa0, .end = 0xa1,
118 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
119 { .name = "dma2", .start = 0xc0, .end = 0xdf,
120 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
121 { .name = "fpu", .start = 0xf0, .end = 0xff,
122 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
123};
124
1da177e4
LT
125#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
126
c9cce83d 127static struct resource data_resource = {
1da177e4
LT
128 .name = "Kernel data",
129 .start = 0,
130 .end = 0,
131 .flags = IORESOURCE_RAM,
132};
c9cce83d 133static struct resource code_resource = {
1da177e4
LT
134 .name = "Kernel code",
135 .start = 0,
136 .end = 0,
137 .flags = IORESOURCE_RAM,
138};
c9cce83d 139static struct resource bss_resource = {
00bf4098
BW
140 .name = "Kernel bss",
141 .start = 0,
142 .end = 0,
143 .flags = IORESOURCE_RAM,
144};
1da177e4 145
8c61b900
TG
146static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
147
2c8c0e6b
AK
148#ifdef CONFIG_PROC_VMCORE
149/* elfcorehdr= specifies the location of elf core header
150 * stored by the crashed kernel. This option will be passed
151 * by kexec loader to the capture kernel.
152 */
153static int __init setup_elfcorehdr(char *arg)
681558fd 154{
2c8c0e6b
AK
155 char *end;
156 if (!arg)
157 return -EINVAL;
158 elfcorehdr_addr = memparse(arg, &end);
159 return end > arg ? 0 : -EINVAL;
681558fd 160}
2c8c0e6b 161early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
162#endif
163
2b97690f 164#ifndef CONFIG_NUMA
bbfceef4
MT
165static void __init
166contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 167{
bbfceef4
MT
168 unsigned long bootmap_size, bootmap;
169
bbfceef4
MT
170 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
171 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
172 if (bootmap == -1L)
04e1ba85 173 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
bbfceef4 174 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
175 e820_register_active_regions(0, start_pfn, end_pfn);
176 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 177 reserve_bootmem(bootmap, bootmap_size);
04e1ba85 178}
1da177e4
LT
179#endif
180
1da177e4
LT
181#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
182struct edd edd;
183#ifdef CONFIG_EDD_MODULE
184EXPORT_SYMBOL(edd);
185#endif
186/**
187 * copy_edd() - Copy the BIOS EDD information
188 * from boot_params into a safe place.
189 *
190 */
191static inline void copy_edd(void)
192{
30c82645
PA
193 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
194 sizeof(edd.mbr_signature));
195 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
196 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
197 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
198}
199#else
200static inline void copy_edd(void)
201{
202}
203#endif
204
5c3391f9
BW
205#ifdef CONFIG_KEXEC
206static void __init reserve_crashkernel(void)
207{
208 unsigned long long free_mem;
209 unsigned long long crash_size, crash_base;
210 int ret;
211
04e1ba85
TG
212 free_mem =
213 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
5c3391f9
BW
214
215 ret = parse_crashkernel(boot_command_line, free_mem,
216 &crash_size, &crash_base);
217 if (ret == 0 && crash_size) {
218 if (crash_base > 0) {
219 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
220 "for crashkernel (System RAM: %ldMB)\n",
221 (unsigned long)(crash_size >> 20),
222 (unsigned long)(crash_base >> 20),
223 (unsigned long)(free_mem >> 20));
224 crashk_res.start = crash_base;
225 crashk_res.end = crash_base + crash_size - 1;
226 reserve_bootmem(crash_base, crash_size);
227 } else
228 printk(KERN_INFO "crashkernel reservation failed - "
229 "you have to specify a base address\n");
230 }
231}
232#else
233static inline void __init reserve_crashkernel(void)
234{}
235#endif
236
1da177e4 237#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
238
239unsigned __initdata ebda_addr;
240unsigned __initdata ebda_size;
241
242static void discover_ebda(void)
1da177e4 243{
ac71d12c 244 /*
04e1ba85 245 * there is a real-mode segmented pointer pointing to the
1da177e4
LT
246 * 4K EBDA area at 0x40E
247 */
bdb96a66 248 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
ac71d12c
AK
249 ebda_addr <<= 4;
250
bdb96a66 251 ebda_size = *(unsigned short *)__va(ebda_addr);
ac71d12c
AK
252
253 /* Round EBDA up to pages */
254 if (ebda_size == 0)
255 ebda_size = 1;
256 ebda_size <<= 10;
257 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
258 if (ebda_size > 64*1024)
259 ebda_size = 64*1024;
1da177e4
LT
260}
261
262void __init setup_arch(char **cmdline_p)
263{
04e1ba85
TG
264 unsigned i;
265
adf48856 266 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 267
30c82645
PA
268 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
269 screen_info = boot_params.screen_info;
270 edid_info = boot_params.edid_info;
271 saved_video_mode = boot_params.hdr.vid_mode;
272 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
273
274#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
275 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
276 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
277 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4
LT
278#endif
279 setup_memory_region();
280 copy_edd();
281
30c82645 282 if (!boot_params.hdr.root_flags)
1da177e4
LT
283 root_mountflags &= ~MS_RDONLY;
284 init_mm.start_code = (unsigned long) &_text;
285 init_mm.end_code = (unsigned long) &_etext;
286 init_mm.end_data = (unsigned long) &_edata;
287 init_mm.brk = (unsigned long) &_end;
288
e3ebadd9
LT
289 code_resource.start = virt_to_phys(&_text);
290 code_resource.end = virt_to_phys(&_etext)-1;
291 data_resource.start = virt_to_phys(&_etext);
292 data_resource.end = virt_to_phys(&_edata)-1;
00bf4098
BW
293 bss_resource.start = virt_to_phys(&__bss_start);
294 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 295
1da177e4
LT
296 early_identify_cpu(&boot_cpu_data);
297
adf48856 298 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
299 *cmdline_p = command_line;
300
301 parse_early_param();
302
303 finish_e820_parsing();
9ca33eb6 304
5cb248ab 305 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
306 /*
307 * partially used pages are not usable - thus
308 * we are rounding upwards:
309 */
310 end_pfn = e820_end_of_ram();
caff0710 311 num_physpages = end_pfn;
1da177e4
LT
312
313 check_efer();
314
ac71d12c
AK
315 discover_ebda();
316
1da177e4
LT
317 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
318
f2d3efed
AK
319 dmi_scan_machine();
320
b02aae9c
RH
321 io_delay_init();
322
71fff5e6
MT
323#ifdef CONFIG_SMP
324 /* setup to use the static apicid table during kernel startup */
325 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
326#endif
327
888ba6c6 328#ifdef CONFIG_ACPI
1da177e4
LT
329 /*
330 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
331 * Call this early for SRAT node setup.
332 */
333 acpi_boot_table_init();
334#endif
335
caff0710
JB
336 /* How many end-of-memory variables you have, grandma! */
337 max_low_pfn = end_pfn;
338 max_pfn = end_pfn;
339 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
340
5cb248ab
MG
341 /* Remove active ranges so rediscovery with NUMA-awareness happens */
342 remove_all_active_ranges();
343
1da177e4
LT
344#ifdef CONFIG_ACPI_NUMA
345 /*
346 * Parse SRAT to discover nodes.
347 */
348 acpi_numa_init();
349#endif
350
2b97690f 351#ifdef CONFIG_NUMA
04e1ba85 352 numa_initmem_init(0, end_pfn);
1da177e4 353#else
bbfceef4 354 contig_initmem_init(0, end_pfn);
1da177e4
LT
355#endif
356
357 /* Reserve direct mapping */
04e1ba85 358 reserve_bootmem_generic(table_start << PAGE_SHIFT,
1da177e4
LT
359 (table_end - table_start) << PAGE_SHIFT);
360
361 /* reserve kernel */
ceee8822
AK
362 reserve_bootmem_generic(__pa_symbol(&_text),
363 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
364
365 /*
366 * reserve physical page 0 - it's a special BIOS page on many boxes,
367 * enabling clean reboots, SMP operation, laptop functions.
368 */
369 reserve_bootmem_generic(0, PAGE_SIZE);
370
371 /* reserve ebda region */
ac71d12c
AK
372 if (ebda_addr)
373 reserve_bootmem_generic(ebda_addr, ebda_size);
076422d2
AS
374#ifdef CONFIG_NUMA
375 /* reserve nodemap region */
376 if (nodemap_addr)
377 reserve_bootmem_generic(nodemap_addr, nodemap_size);
378#endif
1da177e4
LT
379
380#ifdef CONFIG_SMP
1da177e4 381 /* Reserve SMP trampoline */
90b1c208 382 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
1da177e4
LT
383#endif
384
673d5b43 385#ifdef CONFIG_ACPI_SLEEP
1da177e4 386 /*
04e1ba85 387 * Reserve low memory region for sleep support.
1da177e4 388 */
04e1ba85
TG
389 acpi_reserve_bootmem();
390#endif
391 /*
392 * Find and reserve possible boot-time SMP configuration:
393 */
1da177e4 394 find_smp_config();
1da177e4 395#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
396 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
397 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
398 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
399 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
400 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
401
402 if (ramdisk_end <= end_of_mem) {
403 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
404 initrd_start = ramdisk_image + PAGE_OFFSET;
405 initrd_end = initrd_start+ramdisk_size;
406 } else {
1da177e4 407 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
408 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
409 ramdisk_end, end_of_mem);
1da177e4
LT
410 initrd_start = 0;
411 }
412 }
413#endif
5c3391f9 414 reserve_crashkernel();
1da177e4
LT
415 paging_init();
416
dfa4698c 417 early_quirks();
1da177e4 418
51f62e18
AR
419 /*
420 * set this early, so we dont allocate cpu0
421 * if MADT list doesnt list BSP first
422 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
423 */
424 cpu_set(0, cpu_present_map);
888ba6c6 425#ifdef CONFIG_ACPI
1da177e4
LT
426 /*
427 * Read APIC and some other early information from ACPI tables.
428 */
429 acpi_boot_init();
430#endif
431
05b3cbd8
RT
432 init_cpu_to_node();
433
1da177e4
LT
434 /*
435 * get boot-time SMP configuration:
436 */
437 if (smp_found_config)
438 get_smp_config();
439 init_apic_mappings();
3e35a0e5 440 ioapic_init_mappings();
1da177e4
LT
441
442 /*
fc986db4 443 * We trust e820 completely. No explicit ROM probing in memory.
04e1ba85 444 */
c9cce83d 445 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
e8eff5ac 446 e820_mark_nosave_regions();
1da177e4 447
1da177e4 448 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 449 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4 450 request_resource(&ioport_resource, &standard_io_resources[i]);
1da177e4 451
a1e97782 452 e820_setup_gap();
1da177e4 453
1da177e4
LT
454#ifdef CONFIG_VT
455#if defined(CONFIG_VGA_CONSOLE)
456 conswitchp = &vga_con;
457#elif defined(CONFIG_DUMMY_CONSOLE)
458 conswitchp = &dummy_con;
459#endif
460#endif
461}
462
e6982c67 463static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
464{
465 unsigned int *v;
466
ebfcaa96 467 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
468 return 0;
469
470 v = (unsigned int *) c->x86_model_id;
471 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
472 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
473 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
474 c->x86_model_id[48] = 0;
475 return 1;
476}
477
478
e6982c67 479static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
480{
481 unsigned int n, dummy, eax, ebx, ecx, edx;
482
ebfcaa96 483 n = c->extended_cpuid_level;
1da177e4
LT
484
485 if (n >= 0x80000005) {
486 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
04e1ba85
TG
487 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
488 "D cache %dK (%d bytes/line)\n",
489 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
490 c->x86_cache_size = (ecx>>24) + (edx>>24);
1da177e4
LT
491 /* On K8 L1 TLB is inclusive, so don't count it */
492 c->x86_tlbsize = 0;
493 }
494
495 if (n >= 0x80000006) {
496 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
497 ecx = cpuid_ecx(0x80000006);
498 c->x86_cache_size = ecx >> 16;
499 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
500
501 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
502 c->x86_cache_size, ecx & 0xFF);
503 }
504
505 if (n >= 0x80000007)
04e1ba85 506 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
1da177e4 507 if (n >= 0x80000008) {
04e1ba85 508 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1da177e4
LT
509 c->x86_virt_bits = (eax >> 8) & 0xff;
510 c->x86_phys_bits = eax & 0xff;
511 }
512}
513
3f098c26
AK
514#ifdef CONFIG_NUMA
515static int nearby_node(int apicid)
516{
04e1ba85
TG
517 int i, node;
518
3f098c26 519 for (i = apicid - 1; i >= 0; i--) {
04e1ba85 520 node = apicid_to_node[i];
3f098c26
AK
521 if (node != NUMA_NO_NODE && node_online(node))
522 return node;
523 }
524 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
04e1ba85 525 node = apicid_to_node[i];
3f098c26
AK
526 if (node != NUMA_NO_NODE && node_online(node))
527 return node;
528 }
529 return first_node(node_online_map); /* Shouldn't happen */
530}
531#endif
532
63518644
AK
533/*
534 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
535 * Assumes number of cores is a power of two.
536 */
537static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
538{
539#ifdef CONFIG_SMP
b41e2939 540 unsigned bits;
3f098c26 541#ifdef CONFIG_NUMA
f3fa8ebc 542 int cpu = smp_processor_id();
3f098c26 543 int node = 0;
60c1bc82 544 unsigned apicid = hard_smp_processor_id();
3f098c26 545#endif
a860b63c 546 bits = c->x86_coreid_bits;
b41e2939
AK
547
548 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 549 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 550 /* Convert the APIC ID into the socket ID */
f3fa8ebc 551 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
552
553#ifdef CONFIG_NUMA
04e1ba85
TG
554 node = c->phys_proc_id;
555 if (apicid_to_node[apicid] != NUMA_NO_NODE)
556 node = apicid_to_node[apicid];
557 if (!node_online(node)) {
558 /* Two possibilities here:
559 - The CPU is missing memory and no node was created.
560 In that case try picking one from a nearby CPU
561 - The APIC IDs differ from the HyperTransport node IDs
562 which the K8 northbridge parsing fills in.
563 Assume they are all increased by a constant offset,
564 but in the same order as the HT nodeids.
565 If that doesn't result in a usable node fall back to the
566 path for the previous case. */
567
92cb7612 568 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
04e1ba85
TG
569
570 if (ht_nodeid >= 0 &&
571 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
572 node = apicid_to_node[ht_nodeid];
573 /* Pick a nearby node */
574 if (!node_online(node))
575 node = nearby_node(apicid);
576 }
69d81fcd 577 numa_set_node(cpu, node);
3f098c26 578
e42f9437 579 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 580#endif
63518644
AK
581#endif
582}
1da177e4 583
a860b63c
YL
584static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
585{
586#ifdef CONFIG_SMP
587 unsigned bits, ecx;
588
589 /* Multi core CPU? */
590 if (c->extended_cpuid_level < 0x80000008)
591 return;
592
593 ecx = cpuid_ecx(0x80000008);
594
595 c->x86_max_cores = (ecx & 0xff) + 1;
596
597 /* CPU telling us the core id bits shift? */
598 bits = (ecx >> 12) & 0xF;
599
600 /* Otherwise recompute */
601 if (bits == 0) {
602 while ((1 << bits) < c->x86_max_cores)
603 bits++;
604 }
605
606 c->x86_coreid_bits = bits;
607
608#endif
609}
610
fb79d22e
TG
611#define ENABLE_C1E_MASK 0x18000000
612#define CPUID_PROCESSOR_SIGNATURE 1
613#define CPUID_XFAM 0x0ff00000
614#define CPUID_XFAM_K8 0x00000000
615#define CPUID_XFAM_10H 0x00100000
616#define CPUID_XFAM_11H 0x00200000
617#define CPUID_XMOD 0x000f0000
618#define CPUID_XMOD_REV_F 0x00040000
619
620/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
621static __cpuinit int amd_apic_timer_broken(void)
622{
04e1ba85
TG
623 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
624
fb79d22e
TG
625 switch (eax & CPUID_XFAM) {
626 case CPUID_XFAM_K8:
627 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
628 break;
629 case CPUID_XFAM_10H:
630 case CPUID_XFAM_11H:
631 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
632 if (lo & ENABLE_C1E_MASK)
633 return 1;
634 break;
635 default:
636 /* err on the side of caution */
637 return 1;
638 }
639 return 0;
640}
641
ed77504b 642static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 643{
7bcd3f34 644 unsigned level;
1da177e4 645
bc5e8fdf
LT
646#ifdef CONFIG_SMP
647 unsigned long value;
648
7d318d77
AK
649 /*
650 * Disable TLB flush filter by setting HWCR.FFDIS on K8
651 * bit 6 of msr C001_0015
04e1ba85 652 *
7d318d77
AK
653 * Errata 63 for SH-B3 steppings
654 * Errata 122 for all steppings (F+ have it disabled by default)
655 */
656 if (c->x86 == 15) {
657 rdmsrl(MSR_K8_HWCR, value);
658 value |= 1 << 6;
659 wrmsrl(MSR_K8_HWCR, value);
660 }
bc5e8fdf
LT
661#endif
662
1da177e4
LT
663 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
664 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
5548fecd 665 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
04e1ba85 666
7bcd3f34
AK
667 /* On C+ stepping K8 rep microcode works well for copy/memset */
668 level = cpuid_eax(1);
04e1ba85
TG
669 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
670 level >= 0x0f58))
53756d37 671 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
99741faa 672 if (c->x86 == 0x10 || c->x86 == 0x11)
53756d37 673 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
7bcd3f34 674
18bd057b
AK
675 /* Enable workaround for FXSAVE leak */
676 if (c->x86 >= 6)
53756d37 677 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
18bd057b 678
e42f9437
RS
679 level = get_model_name(c);
680 if (!level) {
04e1ba85 681 switch (c->x86) {
1da177e4
LT
682 case 15:
683 /* Should distinguish Models here, but this is only
684 a fallback anyways. */
685 strcpy(c->x86_model_id, "Hammer");
04e1ba85
TG
686 break;
687 }
688 }
1da177e4
LT
689 display_cacheinfo(c);
690
130951cc
AK
691 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
692 if (c->x86_power & (1<<8))
53756d37 693 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
130951cc 694
faee9a5d
AK
695 /* Multi core CPU? */
696 if (c->extended_cpuid_level >= 0x80000008)
63518644 697 amd_detect_cmp(c);
1da177e4 698
67cddd94
AK
699 if (c->extended_cpuid_level >= 0x80000006 &&
700 (cpuid_edx(0x80000006) & 0xf000))
701 num_cache_leaves = 4;
702 else
703 num_cache_leaves = 3;
2049336f 704
0bd8acd1 705 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
53756d37 706 set_cpu_cap(c, X86_FEATURE_K8);
0bd8acd1 707
61677965 708 /* RDTSC can be speculated around */
53756d37 709 clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
f039b754
AK
710
711 /* Family 10 doesn't support C states in MWAIT so don't use it */
712 if (c->x86 == 0x10 && !force_mwait)
53756d37 713 clear_cpu_cap(c, X86_FEATURE_MWAIT);
fb79d22e
TG
714
715 if (amd_apic_timer_broken())
716 disable_apic_timer = 1;
1da177e4
LT
717}
718
e6982c67 719static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
720{
721#ifdef CONFIG_SMP
04e1ba85
TG
722 u32 eax, ebx, ecx, edx;
723 int index_msb, core_bits;
94605eff
SS
724
725 cpuid(1, &eax, &ebx, &ecx, &edx);
726
94605eff 727
e42f9437 728 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 729 return;
04e1ba85 730 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
e42f9437 731 goto out;
1da177e4 732
1da177e4 733 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 734
1da177e4
LT
735 if (smp_num_siblings == 1) {
736 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
04e1ba85 737 } else if (smp_num_siblings > 1) {
94605eff 738
1da177e4 739 if (smp_num_siblings > NR_CPUS) {
04e1ba85
TG
740 printk(KERN_WARNING "CPU: Unsupported number of "
741 "siblings %d", smp_num_siblings);
1da177e4
LT
742 smp_num_siblings = 1;
743 return;
744 }
94605eff
SS
745
746 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 747 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 748
94605eff 749 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 750
04e1ba85 751 index_msb = get_count_order(smp_num_siblings);
94605eff
SS
752
753 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 754
f3fa8ebc 755 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 756 ((1 << core_bits) - 1);
1da177e4 757 }
e42f9437
RS
758out:
759 if ((c->x86_max_cores * smp_num_siblings) > 1) {
04e1ba85
TG
760 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
761 c->phys_proc_id);
762 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
763 c->cpu_core_id);
e42f9437
RS
764 }
765
1da177e4
LT
766#endif
767}
768
3dd9d514
AK
769/*
770 * find out the number of processor cores on the die
771 */
e6982c67 772static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 773{
2bbc419f 774 unsigned int eax, t;
3dd9d514
AK
775
776 if (c->cpuid_level < 4)
777 return 1;
778
2bbc419f 779 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
780
781 if (eax & 0x1f)
782 return ((eax >> 26) + 1);
783 else
784 return 1;
785}
786
df0cc26b
AK
787static void srat_detect_node(void)
788{
789#ifdef CONFIG_NUMA
ddea7be0 790 unsigned node;
df0cc26b 791 int cpu = smp_processor_id();
e42f9437 792 int apicid = hard_smp_processor_id();
df0cc26b
AK
793
794 /* Don't do the funky fallback heuristics the AMD version employs
795 for now. */
e42f9437 796 node = apicid_to_node[apicid];
df0cc26b 797 if (node == NUMA_NO_NODE)
0d015324 798 node = first_node(node_online_map);
69d81fcd 799 numa_set_node(cpu, node);
df0cc26b 800
c31fbb1a 801 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
802#endif
803}
804
e6982c67 805static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
806{
807 /* Cache sizes */
808 unsigned n;
809
810 init_intel_cacheinfo(c);
04e1ba85 811 if (c->cpuid_level > 9) {
0080e667
VP
812 unsigned eax = cpuid_eax(10);
813 /* Check for version and the number of counters */
814 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
53756d37 815 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667
VP
816 }
817
36b2a8d5
SE
818 if (cpu_has_ds) {
819 unsigned int l1, l2;
820 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5 821 if (!(l1 & (1<<11)))
53756d37 822 set_cpu_cap(c, X86_FEATURE_BTS);
36b2a8d5 823 if (!(l1 & (1<<12)))
53756d37 824 set_cpu_cap(c, X86_FEATURE_PEBS);
36b2a8d5
SE
825 }
826
eee3af4a
MM
827
828 if (cpu_has_bts)
829 ds_init_intel(c);
830
ebfcaa96 831 n = c->extended_cpuid_level;
1da177e4
LT
832 if (n >= 0x80000008) {
833 unsigned eax = cpuid_eax(0x80000008);
834 c->x86_virt_bits = (eax >> 8) & 0xff;
835 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
836 /* CPUID workaround for Intel 0F34 CPU */
837 if (c->x86_vendor == X86_VENDOR_INTEL &&
838 c->x86 == 0xF && c->x86_model == 0x3 &&
839 c->x86_mask == 0x4)
840 c->x86_phys_bits = 36;
1da177e4
LT
841 }
842
843 if (c->x86 == 15)
844 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
845 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
846 (c->x86 == 0x6 && c->x86_model >= 0x0e))
53756d37 847 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
27fbe5b2 848 if (c->x86 == 6)
53756d37 849 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
f3d73707 850 if (c->x86 == 15)
53756d37 851 set_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
f3d73707 852 else
53756d37 853 clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
04e1ba85 854 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
855
856 srat_detect_node();
1da177e4
LT
857}
858
672289e9 859static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
860{
861 char *v = c->x86_vendor_id;
862
863 if (!strcmp(v, "AuthenticAMD"))
864 c->x86_vendor = X86_VENDOR_AMD;
865 else if (!strcmp(v, "GenuineIntel"))
866 c->x86_vendor = X86_VENDOR_INTEL;
867 else
868 c->x86_vendor = X86_VENDOR_UNKNOWN;
869}
870
871struct cpu_model_info {
872 int vendor;
873 int family;
874 char *model_names[16];
875};
876
877/* Do some early cpuid on the boot CPU to get some parameter that are
878 needed before check_bugs. Everything advanced is in identify_cpu
879 below. */
8c61b900 880static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4 881{
a860b63c 882 u32 tfms, xlvl;
1da177e4
LT
883
884 c->loops_per_jiffy = loops_per_jiffy;
885 c->x86_cache_size = -1;
886 c->x86_vendor = X86_VENDOR_UNKNOWN;
887 c->x86_model = c->x86_mask = 0; /* So far unknown... */
888 c->x86_vendor_id[0] = '\0'; /* Unset */
889 c->x86_model_id[0] = '\0'; /* Unset */
890 c->x86_clflush_size = 64;
891 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 892 c->x86_max_cores = 1;
a860b63c 893 c->x86_coreid_bits = 0;
ebfcaa96 894 c->extended_cpuid_level = 0;
1da177e4
LT
895 memset(&c->x86_capability, 0, sizeof c->x86_capability);
896
897 /* Get vendor name */
898 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
899 (unsigned int *)&c->x86_vendor_id[0],
900 (unsigned int *)&c->x86_vendor_id[8],
901 (unsigned int *)&c->x86_vendor_id[4]);
04e1ba85 902
1da177e4
LT
903 get_cpu_vendor(c);
904
905 /* Initialize the standard set of capabilities */
906 /* Note that the vendor-specific code below might override */
907
908 /* Intel-defined flags: level 0x00000001 */
909 if (c->cpuid_level >= 0x00000001) {
910 __u32 misc;
911 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
912 &c->x86_capability[0]);
913 c->x86 = (tfms >> 8) & 0xf;
914 c->x86_model = (tfms >> 4) & 0xf;
915 c->x86_mask = tfms & 0xf;
f5f786d0 916 if (c->x86 == 0xf)
1da177e4 917 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 918 if (c->x86 >= 0x6)
1da177e4 919 c->x86_model += ((tfms >> 16) & 0xF) << 4;
04e1ba85 920 if (c->x86_capability[0] & (1<<19))
1da177e4 921 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
922 } else {
923 /* Have CPUID level 0 only - unheard of */
924 c->x86 = 4;
925 }
a158608b
AK
926
927#ifdef CONFIG_SMP
f3fa8ebc 928 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 929#endif
1da177e4
LT
930 /* AMD-defined flags: level 0x80000001 */
931 xlvl = cpuid_eax(0x80000000);
ebfcaa96 932 c->extended_cpuid_level = xlvl;
1da177e4
LT
933 if ((xlvl & 0xffff0000) == 0x80000000) {
934 if (xlvl >= 0x80000001) {
935 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 936 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
937 }
938 if (xlvl >= 0x80000004)
939 get_model_name(c); /* Default name */
940 }
941
942 /* Transmeta-defined flags: level 0x80860001 */
943 xlvl = cpuid_eax(0x80860000);
944 if ((xlvl & 0xffff0000) == 0x80860000) {
945 /* Don't set x86_cpuid_level here for now to not confuse. */
946 if (xlvl >= 0x80860001)
947 c->x86_capability[2] = cpuid_edx(0x80860001);
948 }
949
a860b63c
YL
950 switch (c->x86_vendor) {
951 case X86_VENDOR_AMD:
952 early_init_amd(c);
953 break;
954 }
955
956}
957
958/*
959 * This does the hard work of actually picking apart the CPU stuff...
960 */
961void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
962{
963 int i;
964
965 early_identify_cpu(c);
966
1d67953f
VP
967 init_scattered_cpuid_features(c);
968
1e9f28fa
SS
969 c->apicid = phys_pkg_id(0);
970
1da177e4
LT
971 /*
972 * Vendor-specific initialization. In this section we
973 * canonicalize the feature flags, meaning if there are
974 * features a certain CPU supports which CPUID doesn't
975 * tell us, CPUID claiming incorrect flags, or other bugs,
976 * we handle them here.
977 *
978 * At the end of this section, c->x86_capability better
979 * indicate the features this CPU genuinely supports!
980 */
981 switch (c->x86_vendor) {
982 case X86_VENDOR_AMD:
983 init_amd(c);
984 break;
985
986 case X86_VENDOR_INTEL:
987 init_intel(c);
988 break;
989
990 case X86_VENDOR_UNKNOWN:
991 default:
992 display_cacheinfo(c);
993 break;
994 }
995
996 select_idle_routine(c);
04e1ba85 997 detect_ht(c);
1da177e4
LT
998
999 /*
1000 * On SMP, boot_cpu_data holds the common feature set between
1001 * all CPUs; so make sure that we indicate which features are
1002 * common between the CPUs. The first time this routine gets
1003 * executed, c == &boot_cpu_data.
1004 */
1005 if (c != &boot_cpu_data) {
1006 /* AND the already accumulated flags with these */
04e1ba85 1007 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
1008 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1009 }
1010
1011#ifdef CONFIG_X86_MCE
1012 mcheck_init(c);
1013#endif
8bd99481 1014 if (c != &boot_cpu_data)
3b520b23 1015 mtrr_ap_init();
1da177e4 1016#ifdef CONFIG_NUMA
3019e8eb 1017 numa_add_cpu(smp_processor_id());
1da177e4
LT
1018#endif
1019}
1da177e4 1020
e6982c67 1021void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1022{
1023 if (c->x86_model_id[0])
04e1ba85 1024 printk(KERN_INFO "%s", c->x86_model_id);
1da177e4 1025
04e1ba85
TG
1026 if (c->x86_mask || c->cpuid_level >= 0)
1027 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 1028 else
04e1ba85 1029 printk(KERN_CONT "\n");
1da177e4
LT
1030}
1031
1032/*
1033 * Get CPU information for use by the procfs.
1034 */
1035
1036static int show_cpuinfo(struct seq_file *m, void *v)
1037{
1038 struct cpuinfo_x86 *c = v;
04e1ba85 1039 int cpu = 0, i;
1da177e4 1040
04e1ba85 1041 /*
1da177e4
LT
1042 * These flag bits must match the definitions in <asm/cpufeature.h>.
1043 * NULL means this bit is undefined or reserved; either way it doesn't
1044 * have meaning as far as Linux is concerned. Note that it's important
1045 * to realize there is a difference between this table and CPUID -- if
1046 * applications want to get the raw CPUID data, they should access
1047 * /dev/cpu/<cpu_nr>/cpuid instead.
1048 */
121d7bf5 1049 static const char *const x86_cap_flags[] = {
1da177e4 1050 /* Intel-defined */
04e1ba85
TG
1051 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1052 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1053 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1054 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1da177e4
LT
1055
1056 /* AMD-defined */
3c3b73b6 1057 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1058 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1059 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
f790cd30
AK
1060 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1061 "3dnowext", "3dnow",
1da177e4
LT
1062
1063 /* Transmeta-defined */
1064 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1066 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1067 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1068
1069 /* Other (Linux-defined) */
ec481536
PA
1070 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1071 NULL, NULL, NULL, NULL,
1072 "constant_tsc", "up", NULL, "arch_perfmon",
1073 "pebs", "bts", NULL, "sync_rdtsc",
1074 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1075 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1076
1077 /* Intel-defined (#2) */
9d95dd84 1078 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307 1079 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
e1054b39 1080 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1da177e4
LT
1081 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1082
5b7abc6f
PA
1083 /* VIA/Cyrix/Centaur-defined */
1084 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
ec481536 1085 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
5b7abc6f
PA
1086 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1088
1da177e4 1089 /* AMD-defined (#2) */
e1054b39
PA
1090 "lahf_lm", "cmp_legacy", "svm", "extapic",
1091 "cr8_legacy", "abm", "sse4a", "misalignsse",
1092 "3dnowprefetch", "osvw", "ibs", "sse5",
1093 "skinit", "wdt", NULL, NULL,
1da177e4 1094 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1095 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1d67953f
VP
1096
1097 /* Auxiliary (Linux-defined) */
1098 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1099 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1100 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4 1102 };
121d7bf5 1103 static const char *const x86_power_flags[] = {
1da177e4
LT
1104 "ts", /* temperature sensor */
1105 "fid", /* frequency id control */
1106 "vid", /* voltage id control */
1107 "ttp", /* thermal trip */
1108 "tm",
3f98bc49 1109 "stc",
f790cd30
AK
1110 "100mhzsteps",
1111 "hwpstate",
d824395c
JR
1112 "", /* tsc invariant mapped to constant_tsc */
1113 /* nothing */
1da177e4
LT
1114 };
1115
1116
1117#ifdef CONFIG_SMP
92cb7612 1118 cpu = c->cpu_index;
1da177e4
LT
1119#endif
1120
04e1ba85
TG
1121 seq_printf(m, "processor\t: %u\n"
1122 "vendor_id\t: %s\n"
1123 "cpu family\t: %d\n"
1124 "model\t\t: %d\n"
1125 "model name\t: %s\n",
1126 (unsigned)cpu,
1127 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1128 c->x86,
1129 (int)c->x86_model,
1130 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1131
1da177e4
LT
1132 if (c->x86_mask || c->cpuid_level >= 0)
1133 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1134 else
1135 seq_printf(m, "stepping\t: unknown\n");
04e1ba85
TG
1136
1137 if (cpu_has(c, X86_FEATURE_TSC)) {
92cb7612 1138 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
04e1ba85 1139
95235ca2
VP
1140 if (!freq)
1141 freq = cpu_khz;
1da177e4 1142 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
04e1ba85 1143 freq / 1000, (freq % 1000));
1da177e4
LT
1144 }
1145
1146 /* Cache size */
04e1ba85 1147 if (c->x86_cache_size >= 0)
1da177e4 1148 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
04e1ba85 1149
1da177e4 1150#ifdef CONFIG_SMP
94605eff 1151 if (smp_num_siblings * c->x86_max_cores > 1) {
f3fa8ebc 1152 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
08357611
MT
1153 seq_printf(m, "siblings\t: %d\n",
1154 cpus_weight(per_cpu(cpu_core_map, cpu)));
f3fa8ebc 1155 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1156 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1157 }
04e1ba85 1158#endif
1da177e4
LT
1159
1160 seq_printf(m,
04e1ba85
TG
1161 "fpu\t\t: yes\n"
1162 "fpu_exception\t: yes\n"
1163 "cpuid level\t: %d\n"
1164 "wp\t\t: yes\n"
1165 "flags\t\t:",
1da177e4
LT
1166 c->cpuid_level);
1167
04e1ba85
TG
1168 for (i = 0; i < 32*NCAPINTS; i++)
1169 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1170 seq_printf(m, " %s", x86_cap_flags[i]);
1171
1da177e4
LT
1172 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1173 c->loops_per_jiffy/(500000/HZ),
1174 (c->loops_per_jiffy/(5000/HZ)) % 100);
1175
04e1ba85 1176 if (c->x86_tlbsize > 0)
1da177e4
LT
1177 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1178 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1179 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1180
04e1ba85 1181 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1da177e4
LT
1182 c->x86_phys_bits, c->x86_virt_bits);
1183
1184 seq_printf(m, "power management:");
04e1ba85
TG
1185 for (i = 0; i < 32; i++) {
1186 if (c->x86_power & (1 << i)) {
1187 if (i < ARRAY_SIZE(x86_power_flags) &&
1188 x86_power_flags[i])
1189 seq_printf(m, "%s%s",
1190 x86_power_flags[i][0]?" ":"",
1191 x86_power_flags[i]);
1192 else
1193 seq_printf(m, " [%d]", i);
1194 }
1da177e4 1195 }
1da177e4 1196
d31ddaa1 1197 seq_printf(m, "\n\n");
1da177e4
LT
1198
1199 return 0;
1200}
1201
1202static void *c_start(struct seq_file *m, loff_t *pos)
1203{
92cb7612 1204 if (*pos == 0) /* just in case, cpu 0 is not the first */
c0c52d28
AH
1205 *pos = first_cpu(cpu_online_map);
1206 if ((*pos) < NR_CPUS && cpu_online(*pos))
92cb7612
MT
1207 return &cpu_data(*pos);
1208 return NULL;
1da177e4
LT
1209}
1210
1211static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1212{
c0c52d28 1213 *pos = next_cpu(*pos, cpu_online_map);
1da177e4
LT
1214 return c_start(m, pos);
1215}
1216
1217static void c_stop(struct seq_file *m, void *v)
1218{
1219}
1220
1221struct seq_operations cpuinfo_op = {
04e1ba85 1222 .start = c_start,
1da177e4
LT
1223 .next = c_next,
1224 .stop = c_stop,
1225 .show = show_cpuinfo,
1226};