x86: remove unused tsk_thread from asm-offsets_64.c
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
894673ee 19#include <linux/screen_info.h>
1da177e4
LT
20#include <linux/ioport.h>
21#include <linux/delay.h>
1da177e4
LT
22#include <linux/init.h>
23#include <linux/initrd.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <asm/processor.h>
28#include <linux/console.h>
29#include <linux/seq_file.h>
aac04b32 30#include <linux/crash_dump.h>
1da177e4
LT
31#include <linux/root_dev.h>
32#include <linux/pci.h>
33#include <linux/acpi.h>
34#include <linux/kallsyms.h>
35#include <linux/edd.h>
bbfceef4 36#include <linux/mmzone.h>
5f5609df 37#include <linux/kexec.h>
95235ca2 38#include <linux/cpufreq.h>
e9928674 39#include <linux/dmi.h>
17a941d8 40#include <linux/dma-mapping.h>
681558fd 41#include <linux/ctype.h>
bbfceef4 42
1da177e4
LT
43#include <asm/mtrr.h>
44#include <asm/uaccess.h>
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/smp.h>
48#include <asm/msr.h>
49#include <asm/desc.h>
50#include <video/edid.h>
51#include <asm/e820.h>
52#include <asm/dma.h>
53#include <asm/mpspec.h>
54#include <asm/mmu_context.h>
1da177e4
LT
55#include <asm/proto.h>
56#include <asm/setup.h>
57#include <asm/mach_apic.h>
58#include <asm/numa.h>
2bc0414e 59#include <asm/sections.h>
f2d3efed 60#include <asm/dmi.h>
00bf4098 61#include <asm/cacheflush.h>
af7a78e9 62#include <asm/mce.h>
1da177e4
LT
63
64/*
65 * Machine setup..
66 */
67
6c231b7b 68struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 69EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
70
71unsigned long mmu_cr4_features;
72
1da177e4
LT
73/* Boot loader ID as an integer, for the benefit of proc_dointvec */
74int bootloader_type;
75
76unsigned long saved_video_mode;
77
f039b754
AK
78int force_mwait __cpuinitdata;
79
f2d3efed
AK
80/*
81 * Early DMI memory
82 */
83int dmi_alloc_index;
84char dmi_alloc_data[DMI_MAX_DATA];
85
1da177e4
LT
86/*
87 * Setup options
88 */
1da177e4 89struct screen_info screen_info;
2ee60e17 90EXPORT_SYMBOL(screen_info);
1da177e4
LT
91struct sys_desc_table_struct {
92 unsigned short length;
93 unsigned char table[0];
94};
95
96struct edid_info edid_info;
ba70710e 97EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
98
99extern int root_mountflags;
1da177e4 100
adf48856 101char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
102
103struct resource standard_io_resources[] = {
104 { .name = "dma1", .start = 0x00, .end = 0x1f,
105 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
106 { .name = "pic1", .start = 0x20, .end = 0x21,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "timer0", .start = 0x40, .end = 0x43,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "timer1", .start = 0x50, .end = 0x53,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "keyboard", .start = 0x60, .end = 0x6f,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "pic2", .start = 0xa0, .end = 0xa1,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "dma2", .start = 0xc0, .end = 0xdf,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "fpu", .start = 0xf0, .end = 0xff,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
122};
123
1da177e4
LT
124#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
125
c9cce83d 126static struct resource data_resource = {
1da177e4
LT
127 .name = "Kernel data",
128 .start = 0,
129 .end = 0,
130 .flags = IORESOURCE_RAM,
131};
c9cce83d 132static struct resource code_resource = {
1da177e4
LT
133 .name = "Kernel code",
134 .start = 0,
135 .end = 0,
136 .flags = IORESOURCE_RAM,
137};
c9cce83d 138static struct resource bss_resource = {
00bf4098
BW
139 .name = "Kernel bss",
140 .start = 0,
141 .end = 0,
142 .flags = IORESOURCE_RAM,
143};
1da177e4 144
8c61b900
TG
145static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
146
2c8c0e6b
AK
147#ifdef CONFIG_PROC_VMCORE
148/* elfcorehdr= specifies the location of elf core header
149 * stored by the crashed kernel. This option will be passed
150 * by kexec loader to the capture kernel.
151 */
152static int __init setup_elfcorehdr(char *arg)
681558fd 153{
2c8c0e6b
AK
154 char *end;
155 if (!arg)
156 return -EINVAL;
157 elfcorehdr_addr = memparse(arg, &end);
158 return end > arg ? 0 : -EINVAL;
681558fd 159}
2c8c0e6b 160early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
161#endif
162
2b97690f 163#ifndef CONFIG_NUMA
bbfceef4
MT
164static void __init
165contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 166{
bbfceef4
MT
167 unsigned long bootmap_size, bootmap;
168
bbfceef4
MT
169 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
170 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
171 if (bootmap == -1L)
172 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
173 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
174 e820_register_active_regions(0, start_pfn, end_pfn);
175 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 176 reserve_bootmem(bootmap, bootmap_size);
1da177e4
LT
177}
178#endif
179
1da177e4
LT
180#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
181struct edd edd;
182#ifdef CONFIG_EDD_MODULE
183EXPORT_SYMBOL(edd);
184#endif
185/**
186 * copy_edd() - Copy the BIOS EDD information
187 * from boot_params into a safe place.
188 *
189 */
190static inline void copy_edd(void)
191{
30c82645
PA
192 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
193 sizeof(edd.mbr_signature));
194 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
195 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
196 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
197}
198#else
199static inline void copy_edd(void)
200{
201}
202#endif
203
5c3391f9
BW
204#ifdef CONFIG_KEXEC
205static void __init reserve_crashkernel(void)
206{
207 unsigned long long free_mem;
208 unsigned long long crash_size, crash_base;
209 int ret;
210
211 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
212
213 ret = parse_crashkernel(boot_command_line, free_mem,
214 &crash_size, &crash_base);
215 if (ret == 0 && crash_size) {
216 if (crash_base > 0) {
217 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
218 "for crashkernel (System RAM: %ldMB)\n",
219 (unsigned long)(crash_size >> 20),
220 (unsigned long)(crash_base >> 20),
221 (unsigned long)(free_mem >> 20));
222 crashk_res.start = crash_base;
223 crashk_res.end = crash_base + crash_size - 1;
224 reserve_bootmem(crash_base, crash_size);
225 } else
226 printk(KERN_INFO "crashkernel reservation failed - "
227 "you have to specify a base address\n");
228 }
229}
230#else
231static inline void __init reserve_crashkernel(void)
232{}
233#endif
234
1da177e4 235#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
236
237unsigned __initdata ebda_addr;
238unsigned __initdata ebda_size;
239
240static void discover_ebda(void)
1da177e4 241{
ac71d12c 242 /*
1da177e4
LT
243 * there is a real-mode segmented pointer pointing to the
244 * 4K EBDA area at 0x40E
245 */
bdb96a66 246 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
ac71d12c
AK
247 ebda_addr <<= 4;
248
bdb96a66 249 ebda_size = *(unsigned short *)__va(ebda_addr);
ac71d12c
AK
250
251 /* Round EBDA up to pages */
252 if (ebda_size == 0)
253 ebda_size = 1;
254 ebda_size <<= 10;
255 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
256 if (ebda_size > 64*1024)
257 ebda_size = 64*1024;
1da177e4
LT
258}
259
260void __init setup_arch(char **cmdline_p)
261{
adf48856 262 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 263
30c82645
PA
264 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
265 screen_info = boot_params.screen_info;
266 edid_info = boot_params.edid_info;
267 saved_video_mode = boot_params.hdr.vid_mode;
268 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
269
270#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
271 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
272 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
273 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4
LT
274#endif
275 setup_memory_region();
276 copy_edd();
277
30c82645 278 if (!boot_params.hdr.root_flags)
1da177e4
LT
279 root_mountflags &= ~MS_RDONLY;
280 init_mm.start_code = (unsigned long) &_text;
281 init_mm.end_code = (unsigned long) &_etext;
282 init_mm.end_data = (unsigned long) &_edata;
283 init_mm.brk = (unsigned long) &_end;
284
e3ebadd9
LT
285 code_resource.start = virt_to_phys(&_text);
286 code_resource.end = virt_to_phys(&_etext)-1;
287 data_resource.start = virt_to_phys(&_etext);
288 data_resource.end = virt_to_phys(&_edata)-1;
00bf4098
BW
289 bss_resource.start = virt_to_phys(&__bss_start);
290 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 291
1da177e4
LT
292 early_identify_cpu(&boot_cpu_data);
293
adf48856 294 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
295 *cmdline_p = command_line;
296
297 parse_early_param();
298
299 finish_e820_parsing();
9ca33eb6 300
5cb248ab 301 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
302 /*
303 * partially used pages are not usable - thus
304 * we are rounding upwards:
305 */
306 end_pfn = e820_end_of_ram();
caff0710 307 num_physpages = end_pfn;
1da177e4
LT
308
309 check_efer();
310
ac71d12c
AK
311 discover_ebda();
312
1da177e4
LT
313 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
314
f2d3efed
AK
315 dmi_scan_machine();
316
b02aae9c
RH
317 io_delay_init();
318
71fff5e6
MT
319#ifdef CONFIG_SMP
320 /* setup to use the static apicid table during kernel startup */
321 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
322#endif
323
888ba6c6 324#ifdef CONFIG_ACPI
1da177e4
LT
325 /*
326 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
327 * Call this early for SRAT node setup.
328 */
329 acpi_boot_table_init();
330#endif
331
caff0710
JB
332 /* How many end-of-memory variables you have, grandma! */
333 max_low_pfn = end_pfn;
334 max_pfn = end_pfn;
335 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
336
5cb248ab
MG
337 /* Remove active ranges so rediscovery with NUMA-awareness happens */
338 remove_all_active_ranges();
339
1da177e4
LT
340#ifdef CONFIG_ACPI_NUMA
341 /*
342 * Parse SRAT to discover nodes.
343 */
344 acpi_numa_init();
345#endif
346
2b97690f 347#ifdef CONFIG_NUMA
1da177e4
LT
348 numa_initmem_init(0, end_pfn);
349#else
bbfceef4 350 contig_initmem_init(0, end_pfn);
1da177e4
LT
351#endif
352
353 /* Reserve direct mapping */
354 reserve_bootmem_generic(table_start << PAGE_SHIFT,
355 (table_end - table_start) << PAGE_SHIFT);
356
357 /* reserve kernel */
ceee8822
AK
358 reserve_bootmem_generic(__pa_symbol(&_text),
359 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
360
361 /*
362 * reserve physical page 0 - it's a special BIOS page on many boxes,
363 * enabling clean reboots, SMP operation, laptop functions.
364 */
365 reserve_bootmem_generic(0, PAGE_SIZE);
366
367 /* reserve ebda region */
ac71d12c
AK
368 if (ebda_addr)
369 reserve_bootmem_generic(ebda_addr, ebda_size);
076422d2
AS
370#ifdef CONFIG_NUMA
371 /* reserve nodemap region */
372 if (nodemap_addr)
373 reserve_bootmem_generic(nodemap_addr, nodemap_size);
374#endif
1da177e4
LT
375
376#ifdef CONFIG_SMP
1da177e4 377 /* Reserve SMP trampoline */
90b1c208 378 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
1da177e4
LT
379#endif
380
673d5b43 381#ifdef CONFIG_ACPI_SLEEP
1da177e4
LT
382 /*
383 * Reserve low memory region for sleep support.
384 */
385 acpi_reserve_bootmem();
386#endif
1da177e4
LT
387 /*
388 * Find and reserve possible boot-time SMP configuration:
389 */
390 find_smp_config();
1da177e4 391#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
392 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
393 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
394 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
395 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
396 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
397
398 if (ramdisk_end <= end_of_mem) {
399 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
400 initrd_start = ramdisk_image + PAGE_OFFSET;
401 initrd_end = initrd_start+ramdisk_size;
402 } else {
1da177e4 403 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
404 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
405 ramdisk_end, end_of_mem);
1da177e4
LT
406 initrd_start = 0;
407 }
408 }
409#endif
5c3391f9 410 reserve_crashkernel();
1da177e4
LT
411 paging_init();
412
dfa4698c 413 early_quirks();
1da177e4 414
51f62e18
AR
415 /*
416 * set this early, so we dont allocate cpu0
417 * if MADT list doesnt list BSP first
418 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
419 */
420 cpu_set(0, cpu_present_map);
888ba6c6 421#ifdef CONFIG_ACPI
1da177e4
LT
422 /*
423 * Read APIC and some other early information from ACPI tables.
424 */
425 acpi_boot_init();
426#endif
427
05b3cbd8
RT
428 init_cpu_to_node();
429
1da177e4
LT
430 /*
431 * get boot-time SMP configuration:
432 */
433 if (smp_found_config)
434 get_smp_config();
435 init_apic_mappings();
3e35a0e5 436 ioapic_init_mappings();
1da177e4
LT
437
438 /*
fc986db4
AK
439 * We trust e820 completely. No explicit ROM probing in memory.
440 */
c9cce83d 441 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
e8eff5ac 442 e820_mark_nosave_regions();
1da177e4 443
1da177e4
LT
444 {
445 unsigned i;
446 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 447 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4
LT
448 request_resource(&ioport_resource, &standard_io_resources[i]);
449 }
450
a1e97782 451 e820_setup_gap();
1da177e4 452
1da177e4
LT
453#ifdef CONFIG_VT
454#if defined(CONFIG_VGA_CONSOLE)
455 conswitchp = &vga_con;
456#elif defined(CONFIG_DUMMY_CONSOLE)
457 conswitchp = &dummy_con;
458#endif
459#endif
460}
461
e6982c67 462static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
463{
464 unsigned int *v;
465
ebfcaa96 466 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
467 return 0;
468
469 v = (unsigned int *) c->x86_model_id;
470 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
471 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
472 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
473 c->x86_model_id[48] = 0;
474 return 1;
475}
476
477
e6982c67 478static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
479{
480 unsigned int n, dummy, eax, ebx, ecx, edx;
481
ebfcaa96 482 n = c->extended_cpuid_level;
1da177e4
LT
483
484 if (n >= 0x80000005) {
485 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
486 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
487 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
488 c->x86_cache_size=(ecx>>24)+(edx>>24);
489 /* On K8 L1 TLB is inclusive, so don't count it */
490 c->x86_tlbsize = 0;
491 }
492
493 if (n >= 0x80000006) {
494 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
495 ecx = cpuid_ecx(0x80000006);
496 c->x86_cache_size = ecx >> 16;
497 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
498
499 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
500 c->x86_cache_size, ecx & 0xFF);
501 }
502
503 if (n >= 0x80000007)
504 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
505 if (n >= 0x80000008) {
506 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
507 c->x86_virt_bits = (eax >> 8) & 0xff;
508 c->x86_phys_bits = eax & 0xff;
509 }
510}
511
3f098c26
AK
512#ifdef CONFIG_NUMA
513static int nearby_node(int apicid)
514{
515 int i;
516 for (i = apicid - 1; i >= 0; i--) {
517 int node = apicid_to_node[i];
518 if (node != NUMA_NO_NODE && node_online(node))
519 return node;
520 }
521 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
522 int node = apicid_to_node[i];
523 if (node != NUMA_NO_NODE && node_online(node))
524 return node;
525 }
526 return first_node(node_online_map); /* Shouldn't happen */
527}
528#endif
529
63518644
AK
530/*
531 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
532 * Assumes number of cores is a power of two.
533 */
534static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
535{
536#ifdef CONFIG_SMP
b41e2939 537 unsigned bits;
3f098c26 538#ifdef CONFIG_NUMA
f3fa8ebc 539 int cpu = smp_processor_id();
3f098c26 540 int node = 0;
60c1bc82 541 unsigned apicid = hard_smp_processor_id();
3f098c26 542#endif
faee9a5d 543 unsigned ecx = cpuid_ecx(0x80000008);
b41e2939 544
faee9a5d 545 c->x86_max_cores = (ecx & 0xff) + 1;
b41e2939 546
faee9a5d
AK
547 /* CPU telling us the core id bits shift? */
548 bits = (ecx >> 12) & 0xF;
549
550 /* Otherwise recompute */
551 if (bits == 0) {
552 while ((1 << bits) < c->x86_max_cores)
553 bits++;
554 }
b41e2939
AK
555
556 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 557 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 558 /* Convert the APIC ID into the socket ID */
f3fa8ebc 559 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
560
561#ifdef CONFIG_NUMA
f3fa8ebc 562 node = c->phys_proc_id;
3f098c26
AK
563 if (apicid_to_node[apicid] != NUMA_NO_NODE)
564 node = apicid_to_node[apicid];
565 if (!node_online(node)) {
566 /* Two possibilities here:
567 - The CPU is missing memory and no node was created.
568 In that case try picking one from a nearby CPU
569 - The APIC IDs differ from the HyperTransport node IDs
570 which the K8 northbridge parsing fills in.
571 Assume they are all increased by a constant offset,
572 but in the same order as the HT nodeids.
573 If that doesn't result in a usable node fall back to the
574 path for the previous case. */
92cb7612 575 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
3f098c26
AK
576 if (ht_nodeid >= 0 &&
577 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
578 node = apicid_to_node[ht_nodeid];
579 /* Pick a nearby node */
580 if (!node_online(node))
581 node = nearby_node(apicid);
582 }
69d81fcd 583 numa_set_node(cpu, node);
3f098c26 584
e42f9437 585 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 586#endif
63518644
AK
587#endif
588}
1da177e4 589
fb79d22e
TG
590#define ENABLE_C1E_MASK 0x18000000
591#define CPUID_PROCESSOR_SIGNATURE 1
592#define CPUID_XFAM 0x0ff00000
593#define CPUID_XFAM_K8 0x00000000
594#define CPUID_XFAM_10H 0x00100000
595#define CPUID_XFAM_11H 0x00200000
596#define CPUID_XMOD 0x000f0000
597#define CPUID_XMOD_REV_F 0x00040000
598
599/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
600static __cpuinit int amd_apic_timer_broken(void)
601{
602 u32 lo, hi;
603 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
604 switch (eax & CPUID_XFAM) {
605 case CPUID_XFAM_K8:
606 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
607 break;
608 case CPUID_XFAM_10H:
609 case CPUID_XFAM_11H:
610 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
611 if (lo & ENABLE_C1E_MASK)
612 return 1;
613 break;
614 default:
615 /* err on the side of caution */
616 return 1;
617 }
618 return 0;
619}
620
ed77504b 621static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 622{
7bcd3f34 623 unsigned level;
1da177e4 624
bc5e8fdf
LT
625#ifdef CONFIG_SMP
626 unsigned long value;
627
7d318d77
AK
628 /*
629 * Disable TLB flush filter by setting HWCR.FFDIS on K8
630 * bit 6 of msr C001_0015
631 *
632 * Errata 63 for SH-B3 steppings
633 * Errata 122 for all steppings (F+ have it disabled by default)
634 */
635 if (c->x86 == 15) {
636 rdmsrl(MSR_K8_HWCR, value);
637 value |= 1 << 6;
638 wrmsrl(MSR_K8_HWCR, value);
639 }
bc5e8fdf
LT
640#endif
641
1da177e4
LT
642 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
643 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
644 clear_bit(0*32+31, &c->x86_capability);
645
7bcd3f34
AK
646 /* On C+ stepping K8 rep microcode works well for copy/memset */
647 level = cpuid_eax(1);
648 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
649 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
99741faa 650 if (c->x86 == 0x10 || c->x86 == 0x11)
5b74e3ab 651 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
7bcd3f34 652
18bd057b
AK
653 /* Enable workaround for FXSAVE leak */
654 if (c->x86 >= 6)
655 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
656
e42f9437
RS
657 level = get_model_name(c);
658 if (!level) {
1da177e4
LT
659 switch (c->x86) {
660 case 15:
661 /* Should distinguish Models here, but this is only
662 a fallback anyways. */
663 strcpy(c->x86_model_id, "Hammer");
664 break;
665 }
666 }
667 display_cacheinfo(c);
668
130951cc
AK
669 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
670 if (c->x86_power & (1<<8))
671 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
672
faee9a5d
AK
673 /* Multi core CPU? */
674 if (c->extended_cpuid_level >= 0x80000008)
63518644 675 amd_detect_cmp(c);
1da177e4 676
67cddd94
AK
677 if (c->extended_cpuid_level >= 0x80000006 &&
678 (cpuid_edx(0x80000006) & 0xf000))
679 num_cache_leaves = 4;
680 else
681 num_cache_leaves = 3;
2049336f 682
0bd8acd1
AK
683 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
684 set_bit(X86_FEATURE_K8, &c->x86_capability);
685
61677965
AK
686 /* RDTSC can be speculated around */
687 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
f039b754
AK
688
689 /* Family 10 doesn't support C states in MWAIT so don't use it */
690 if (c->x86 == 0x10 && !force_mwait)
691 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
fb79d22e
TG
692
693 if (amd_apic_timer_broken())
694 disable_apic_timer = 1;
1da177e4
LT
695}
696
e6982c67 697static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
698{
699#ifdef CONFIG_SMP
700 u32 eax, ebx, ecx, edx;
94605eff 701 int index_msb, core_bits;
94605eff
SS
702
703 cpuid(1, &eax, &ebx, &ecx, &edx);
704
94605eff 705
e42f9437 706 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 707 return;
e42f9437
RS
708 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
709 goto out;
1da177e4 710
1da177e4 711 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 712
1da177e4
LT
713 if (smp_num_siblings == 1) {
714 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
94605eff
SS
715 } else if (smp_num_siblings > 1 ) {
716
1da177e4
LT
717 if (smp_num_siblings > NR_CPUS) {
718 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
719 smp_num_siblings = 1;
720 return;
721 }
94605eff
SS
722
723 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 724 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 725
94605eff 726 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 727
94605eff
SS
728 index_msb = get_count_order(smp_num_siblings) ;
729
730 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 731
f3fa8ebc 732 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 733 ((1 << core_bits) - 1);
1da177e4 734 }
e42f9437
RS
735out:
736 if ((c->x86_max_cores * smp_num_siblings) > 1) {
737 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
738 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
739 }
740
1da177e4
LT
741#endif
742}
743
3dd9d514
AK
744/*
745 * find out the number of processor cores on the die
746 */
e6982c67 747static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 748{
2bbc419f 749 unsigned int eax, t;
3dd9d514
AK
750
751 if (c->cpuid_level < 4)
752 return 1;
753
2bbc419f 754 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
755
756 if (eax & 0x1f)
757 return ((eax >> 26) + 1);
758 else
759 return 1;
760}
761
df0cc26b
AK
762static void srat_detect_node(void)
763{
764#ifdef CONFIG_NUMA
ddea7be0 765 unsigned node;
df0cc26b 766 int cpu = smp_processor_id();
e42f9437 767 int apicid = hard_smp_processor_id();
df0cc26b
AK
768
769 /* Don't do the funky fallback heuristics the AMD version employs
770 for now. */
e42f9437 771 node = apicid_to_node[apicid];
df0cc26b 772 if (node == NUMA_NO_NODE)
0d015324 773 node = first_node(node_online_map);
69d81fcd 774 numa_set_node(cpu, node);
df0cc26b 775
c31fbb1a 776 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
777#endif
778}
779
e6982c67 780static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
781{
782 /* Cache sizes */
783 unsigned n;
784
785 init_intel_cacheinfo(c);
0080e667
VP
786 if (c->cpuid_level > 9 ) {
787 unsigned eax = cpuid_eax(10);
788 /* Check for version and the number of counters */
789 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
790 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
791 }
792
36b2a8d5
SE
793 if (cpu_has_ds) {
794 unsigned int l1, l2;
795 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5
SE
796 if (!(l1 & (1<<11)))
797 set_bit(X86_FEATURE_BTS, c->x86_capability);
36b2a8d5
SE
798 if (!(l1 & (1<<12)))
799 set_bit(X86_FEATURE_PEBS, c->x86_capability);
800 }
801
ebfcaa96 802 n = c->extended_cpuid_level;
1da177e4
LT
803 if (n >= 0x80000008) {
804 unsigned eax = cpuid_eax(0x80000008);
805 c->x86_virt_bits = (eax >> 8) & 0xff;
806 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
807 /* CPUID workaround for Intel 0F34 CPU */
808 if (c->x86_vendor == X86_VENDOR_INTEL &&
809 c->x86 == 0xF && c->x86_model == 0x3 &&
810 c->x86_mask == 0x4)
811 c->x86_phys_bits = 36;
1da177e4
LT
812 }
813
814 if (c->x86 == 15)
815 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
816 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
817 (c->x86 == 0x6 && c->x86_model >= 0x0e))
c29601e9 818 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
27fbe5b2
AK
819 if (c->x86 == 6)
820 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
f3d73707
AV
821 if (c->x86 == 15)
822 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
823 else
824 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
94605eff 825 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
826
827 srat_detect_node();
1da177e4
LT
828}
829
672289e9 830static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
831{
832 char *v = c->x86_vendor_id;
833
834 if (!strcmp(v, "AuthenticAMD"))
835 c->x86_vendor = X86_VENDOR_AMD;
836 else if (!strcmp(v, "GenuineIntel"))
837 c->x86_vendor = X86_VENDOR_INTEL;
838 else
839 c->x86_vendor = X86_VENDOR_UNKNOWN;
840}
841
842struct cpu_model_info {
843 int vendor;
844 int family;
845 char *model_names[16];
846};
847
848/* Do some early cpuid on the boot CPU to get some parameter that are
849 needed before check_bugs. Everything advanced is in identify_cpu
850 below. */
8c61b900 851static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
852{
853 u32 tfms;
854
855 c->loops_per_jiffy = loops_per_jiffy;
856 c->x86_cache_size = -1;
857 c->x86_vendor = X86_VENDOR_UNKNOWN;
858 c->x86_model = c->x86_mask = 0; /* So far unknown... */
859 c->x86_vendor_id[0] = '\0'; /* Unset */
860 c->x86_model_id[0] = '\0'; /* Unset */
861 c->x86_clflush_size = 64;
862 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 863 c->x86_max_cores = 1;
ebfcaa96 864 c->extended_cpuid_level = 0;
1da177e4
LT
865 memset(&c->x86_capability, 0, sizeof c->x86_capability);
866
867 /* Get vendor name */
868 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
869 (unsigned int *)&c->x86_vendor_id[0],
870 (unsigned int *)&c->x86_vendor_id[8],
871 (unsigned int *)&c->x86_vendor_id[4]);
872
873 get_cpu_vendor(c);
874
875 /* Initialize the standard set of capabilities */
876 /* Note that the vendor-specific code below might override */
877
878 /* Intel-defined flags: level 0x00000001 */
879 if (c->cpuid_level >= 0x00000001) {
880 __u32 misc;
881 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
882 &c->x86_capability[0]);
883 c->x86 = (tfms >> 8) & 0xf;
884 c->x86_model = (tfms >> 4) & 0xf;
885 c->x86_mask = tfms & 0xf;
f5f786d0 886 if (c->x86 == 0xf)
1da177e4 887 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 888 if (c->x86 >= 0x6)
1da177e4 889 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1da177e4
LT
890 if (c->x86_capability[0] & (1<<19))
891 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
892 } else {
893 /* Have CPUID level 0 only - unheard of */
894 c->x86 = 4;
895 }
a158608b
AK
896
897#ifdef CONFIG_SMP
f3fa8ebc 898 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 899#endif
1da177e4
LT
900}
901
902/*
903 * This does the hard work of actually picking apart the CPU stuff...
904 */
e6982c67 905void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
906{
907 int i;
908 u32 xlvl;
909
910 early_identify_cpu(c);
911
912 /* AMD-defined flags: level 0x80000001 */
913 xlvl = cpuid_eax(0x80000000);
ebfcaa96 914 c->extended_cpuid_level = xlvl;
1da177e4
LT
915 if ((xlvl & 0xffff0000) == 0x80000000) {
916 if (xlvl >= 0x80000001) {
917 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 918 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
919 }
920 if (xlvl >= 0x80000004)
921 get_model_name(c); /* Default name */
922 }
923
924 /* Transmeta-defined flags: level 0x80860001 */
925 xlvl = cpuid_eax(0x80860000);
926 if ((xlvl & 0xffff0000) == 0x80860000) {
927 /* Don't set x86_cpuid_level here for now to not confuse. */
928 if (xlvl >= 0x80860001)
929 c->x86_capability[2] = cpuid_edx(0x80860001);
930 }
931
1d67953f
VP
932 init_scattered_cpuid_features(c);
933
1e9f28fa
SS
934 c->apicid = phys_pkg_id(0);
935
1da177e4
LT
936 /*
937 * Vendor-specific initialization. In this section we
938 * canonicalize the feature flags, meaning if there are
939 * features a certain CPU supports which CPUID doesn't
940 * tell us, CPUID claiming incorrect flags, or other bugs,
941 * we handle them here.
942 *
943 * At the end of this section, c->x86_capability better
944 * indicate the features this CPU genuinely supports!
945 */
946 switch (c->x86_vendor) {
947 case X86_VENDOR_AMD:
948 init_amd(c);
949 break;
950
951 case X86_VENDOR_INTEL:
952 init_intel(c);
953 break;
954
955 case X86_VENDOR_UNKNOWN:
956 default:
957 display_cacheinfo(c);
958 break;
959 }
960
961 select_idle_routine(c);
962 detect_ht(c);
1da177e4
LT
963
964 /*
965 * On SMP, boot_cpu_data holds the common feature set between
966 * all CPUs; so make sure that we indicate which features are
967 * common between the CPUs. The first time this routine gets
968 * executed, c == &boot_cpu_data.
969 */
970 if (c != &boot_cpu_data) {
971 /* AND the already accumulated flags with these */
972 for (i = 0 ; i < NCAPINTS ; i++)
973 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
974 }
975
976#ifdef CONFIG_X86_MCE
977 mcheck_init(c);
978#endif
8bd99481 979 if (c != &boot_cpu_data)
3b520b23 980 mtrr_ap_init();
1da177e4 981#ifdef CONFIG_NUMA
3019e8eb 982 numa_add_cpu(smp_processor_id());
1da177e4
LT
983#endif
984}
985
986
e6982c67 987void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
988{
989 if (c->x86_model_id[0])
990 printk("%s", c->x86_model_id);
991
992 if (c->x86_mask || c->cpuid_level >= 0)
993 printk(" stepping %02x\n", c->x86_mask);
994 else
995 printk("\n");
996}
997
998/*
999 * Get CPU information for use by the procfs.
1000 */
1001
1002static int show_cpuinfo(struct seq_file *m, void *v)
1003{
1004 struct cpuinfo_x86 *c = v;
92cb7612 1005 int cpu = 0;
1da177e4
LT
1006
1007 /*
1008 * These flag bits must match the definitions in <asm/cpufeature.h>.
1009 * NULL means this bit is undefined or reserved; either way it doesn't
1010 * have meaning as far as Linux is concerned. Note that it's important
1011 * to realize there is a difference between this table and CPUID -- if
1012 * applications want to get the raw CPUID data, they should access
1013 * /dev/cpu/<cpu_nr>/cpuid instead.
1014 */
121d7bf5 1015 static const char *const x86_cap_flags[] = {
1da177e4
LT
1016 /* Intel-defined */
1017 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1018 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1019 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
ec481536 1020 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1da177e4
LT
1021
1022 /* AMD-defined */
3c3b73b6 1023 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1024 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1025 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
f790cd30
AK
1026 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1027 "3dnowext", "3dnow",
1da177e4
LT
1028
1029 /* Transmeta-defined */
1030 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1034
1035 /* Other (Linux-defined) */
ec481536
PA
1036 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1037 NULL, NULL, NULL, NULL,
1038 "constant_tsc", "up", NULL, "arch_perfmon",
1039 "pebs", "bts", NULL, "sync_rdtsc",
1040 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1041 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1042
1043 /* Intel-defined (#2) */
9d95dd84 1044 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307 1045 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
e1054b39 1046 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1da177e4
LT
1047 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1048
5b7abc6f
PA
1049 /* VIA/Cyrix/Centaur-defined */
1050 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
ec481536 1051 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
5b7abc6f
PA
1052 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1053 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1054
1da177e4 1055 /* AMD-defined (#2) */
e1054b39
PA
1056 "lahf_lm", "cmp_legacy", "svm", "extapic",
1057 "cr8_legacy", "abm", "sse4a", "misalignsse",
1058 "3dnowprefetch", "osvw", "ibs", "sse5",
1059 "skinit", "wdt", NULL, NULL,
1da177e4 1060 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1061 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1d67953f
VP
1062
1063 /* Auxiliary (Linux-defined) */
1064 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1066 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1067 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4 1068 };
121d7bf5 1069 static const char *const x86_power_flags[] = {
1da177e4
LT
1070 "ts", /* temperature sensor */
1071 "fid", /* frequency id control */
1072 "vid", /* voltage id control */
1073 "ttp", /* thermal trip */
1074 "tm",
3f98bc49 1075 "stc",
f790cd30
AK
1076 "100mhzsteps",
1077 "hwpstate",
d824395c
JR
1078 "", /* tsc invariant mapped to constant_tsc */
1079 /* nothing */
1da177e4
LT
1080 };
1081
1082
1083#ifdef CONFIG_SMP
92cb7612 1084 cpu = c->cpu_index;
1da177e4
LT
1085#endif
1086
1087 seq_printf(m,"processor\t: %u\n"
1088 "vendor_id\t: %s\n"
1089 "cpu family\t: %d\n"
1090 "model\t\t: %d\n"
1091 "model name\t: %s\n",
92cb7612 1092 (unsigned)cpu,
1da177e4
LT
1093 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1094 c->x86,
1095 (int)c->x86_model,
1096 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1097
1098 if (c->x86_mask || c->cpuid_level >= 0)
1099 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1100 else
1101 seq_printf(m, "stepping\t: unknown\n");
1102
1103 if (cpu_has(c,X86_FEATURE_TSC)) {
92cb7612 1104 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
95235ca2
VP
1105 if (!freq)
1106 freq = cpu_khz;
1da177e4 1107 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
95235ca2 1108 freq / 1000, (freq % 1000));
1da177e4
LT
1109 }
1110
1111 /* Cache size */
1112 if (c->x86_cache_size >= 0)
1113 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1114
1115#ifdef CONFIG_SMP
94605eff 1116 if (smp_num_siblings * c->x86_max_cores > 1) {
f3fa8ebc 1117 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
08357611
MT
1118 seq_printf(m, "siblings\t: %d\n",
1119 cpus_weight(per_cpu(cpu_core_map, cpu)));
f3fa8ebc 1120 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1121 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1122 }
1da177e4
LT
1123#endif
1124
1125 seq_printf(m,
1126 "fpu\t\t: yes\n"
1127 "fpu_exception\t: yes\n"
1128 "cpuid level\t: %d\n"
1129 "wp\t\t: yes\n"
1130 "flags\t\t:",
1131 c->cpuid_level);
1132
1133 {
1134 int i;
1135 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
3d1712c9 1136 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1da177e4
LT
1137 seq_printf(m, " %s", x86_cap_flags[i]);
1138 }
1139
1140 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1141 c->loops_per_jiffy/(500000/HZ),
1142 (c->loops_per_jiffy/(5000/HZ)) % 100);
1143
1144 if (c->x86_tlbsize > 0)
1145 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1146 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1147 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1148
1149 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1150 c->x86_phys_bits, c->x86_virt_bits);
1151
1152 seq_printf(m, "power management:");
1153 {
1154 unsigned i;
1155 for (i = 0; i < 32; i++)
1156 if (c->x86_power & (1 << i)) {
3f98bc49
AK
1157 if (i < ARRAY_SIZE(x86_power_flags) &&
1158 x86_power_flags[i])
1159 seq_printf(m, "%s%s",
1160 x86_power_flags[i][0]?" ":"",
1161 x86_power_flags[i]);
1da177e4
LT
1162 else
1163 seq_printf(m, " [%d]", i);
1164 }
1165 }
1da177e4 1166
d31ddaa1 1167 seq_printf(m, "\n\n");
1da177e4
LT
1168
1169 return 0;
1170}
1171
1172static void *c_start(struct seq_file *m, loff_t *pos)
1173{
92cb7612 1174 if (*pos == 0) /* just in case, cpu 0 is not the first */
c0c52d28
AH
1175 *pos = first_cpu(cpu_online_map);
1176 if ((*pos) < NR_CPUS && cpu_online(*pos))
92cb7612
MT
1177 return &cpu_data(*pos);
1178 return NULL;
1da177e4
LT
1179}
1180
1181static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1182{
c0c52d28 1183 *pos = next_cpu(*pos, cpu_online_map);
1da177e4
LT
1184 return c_start(m, pos);
1185}
1186
1187static void c_stop(struct seq_file *m, void *v)
1188{
1189}
1190
1191struct seq_operations cpuinfo_op = {
1192 .start =c_start,
1193 .next = c_next,
1194 .stop = c_stop,
1195 .show = show_cpuinfo,
1196};