intel-iommu sg chaining support
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
894673ee 19#include <linux/screen_info.h>
1da177e4
LT
20#include <linux/ioport.h>
21#include <linux/delay.h>
1da177e4
LT
22#include <linux/init.h>
23#include <linux/initrd.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <asm/processor.h>
28#include <linux/console.h>
29#include <linux/seq_file.h>
aac04b32 30#include <linux/crash_dump.h>
1da177e4
LT
31#include <linux/root_dev.h>
32#include <linux/pci.h>
33#include <linux/acpi.h>
34#include <linux/kallsyms.h>
35#include <linux/edd.h>
bbfceef4 36#include <linux/mmzone.h>
5f5609df 37#include <linux/kexec.h>
95235ca2 38#include <linux/cpufreq.h>
e9928674 39#include <linux/dmi.h>
17a941d8 40#include <linux/dma-mapping.h>
681558fd 41#include <linux/ctype.h>
bbfceef4 42
1da177e4
LT
43#include <asm/mtrr.h>
44#include <asm/uaccess.h>
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/smp.h>
48#include <asm/msr.h>
49#include <asm/desc.h>
50#include <video/edid.h>
51#include <asm/e820.h>
52#include <asm/dma.h>
53#include <asm/mpspec.h>
54#include <asm/mmu_context.h>
1da177e4
LT
55#include <asm/proto.h>
56#include <asm/setup.h>
57#include <asm/mach_apic.h>
58#include <asm/numa.h>
2bc0414e 59#include <asm/sections.h>
f2d3efed 60#include <asm/dmi.h>
1da177e4
LT
61
62/*
63 * Machine setup..
64 */
65
6c231b7b 66struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 67EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
68
69unsigned long mmu_cr4_features;
70
1da177e4
LT
71/* Boot loader ID as an integer, for the benefit of proc_dointvec */
72int bootloader_type;
73
74unsigned long saved_video_mode;
75
f039b754
AK
76int force_mwait __cpuinitdata;
77
f2d3efed
AK
78/*
79 * Early DMI memory
80 */
81int dmi_alloc_index;
82char dmi_alloc_data[DMI_MAX_DATA];
83
1da177e4
LT
84/*
85 * Setup options
86 */
1da177e4 87struct screen_info screen_info;
2ee60e17 88EXPORT_SYMBOL(screen_info);
1da177e4
LT
89struct sys_desc_table_struct {
90 unsigned short length;
91 unsigned char table[0];
92};
93
94struct edid_info edid_info;
ba70710e 95EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
96
97extern int root_mountflags;
1da177e4 98
adf48856 99char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
100
101struct resource standard_io_resources[] = {
102 { .name = "dma1", .start = 0x00, .end = 0x1f,
103 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
104 { .name = "pic1", .start = 0x20, .end = 0x21,
105 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
106 { .name = "timer0", .start = 0x40, .end = 0x43,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "timer1", .start = 0x50, .end = 0x53,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "keyboard", .start = 0x60, .end = 0x6f,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "pic2", .start = 0xa0, .end = 0xa1,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "dma2", .start = 0xc0, .end = 0xdf,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "fpu", .start = 0xf0, .end = 0xff,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
120};
121
1da177e4
LT
122#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
123
124struct resource data_resource = {
125 .name = "Kernel data",
126 .start = 0,
127 .end = 0,
128 .flags = IORESOURCE_RAM,
129};
130struct resource code_resource = {
131 .name = "Kernel code",
132 .start = 0,
133 .end = 0,
134 .flags = IORESOURCE_RAM,
135};
136
2c8c0e6b
AK
137#ifdef CONFIG_PROC_VMCORE
138/* elfcorehdr= specifies the location of elf core header
139 * stored by the crashed kernel. This option will be passed
140 * by kexec loader to the capture kernel.
141 */
142static int __init setup_elfcorehdr(char *arg)
681558fd 143{
2c8c0e6b
AK
144 char *end;
145 if (!arg)
146 return -EINVAL;
147 elfcorehdr_addr = memparse(arg, &end);
148 return end > arg ? 0 : -EINVAL;
681558fd 149}
2c8c0e6b 150early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
151#endif
152
2b97690f 153#ifndef CONFIG_NUMA
bbfceef4
MT
154static void __init
155contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 156{
bbfceef4
MT
157 unsigned long bootmap_size, bootmap;
158
bbfceef4
MT
159 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
160 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
161 if (bootmap == -1L)
162 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
163 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
164 e820_register_active_regions(0, start_pfn, end_pfn);
165 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 166 reserve_bootmem(bootmap, bootmap_size);
1da177e4
LT
167}
168#endif
169
1da177e4
LT
170#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
171struct edd edd;
172#ifdef CONFIG_EDD_MODULE
173EXPORT_SYMBOL(edd);
174#endif
175/**
176 * copy_edd() - Copy the BIOS EDD information
177 * from boot_params into a safe place.
178 *
179 */
180static inline void copy_edd(void)
181{
30c82645
PA
182 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
183 sizeof(edd.mbr_signature));
184 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
185 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
186 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
187}
188#else
189static inline void copy_edd(void)
190{
191}
192#endif
193
5c3391f9
BW
194#ifdef CONFIG_KEXEC
195static void __init reserve_crashkernel(void)
196{
197 unsigned long long free_mem;
198 unsigned long long crash_size, crash_base;
199 int ret;
200
201 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
202
203 ret = parse_crashkernel(boot_command_line, free_mem,
204 &crash_size, &crash_base);
205 if (ret == 0 && crash_size) {
206 if (crash_base > 0) {
207 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
208 "for crashkernel (System RAM: %ldMB)\n",
209 (unsigned long)(crash_size >> 20),
210 (unsigned long)(crash_base >> 20),
211 (unsigned long)(free_mem >> 20));
212 crashk_res.start = crash_base;
213 crashk_res.end = crash_base + crash_size - 1;
214 reserve_bootmem(crash_base, crash_size);
215 } else
216 printk(KERN_INFO "crashkernel reservation failed - "
217 "you have to specify a base address\n");
218 }
219}
220#else
221static inline void __init reserve_crashkernel(void)
222{}
223#endif
224
1da177e4 225#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
226
227unsigned __initdata ebda_addr;
228unsigned __initdata ebda_size;
229
230static void discover_ebda(void)
1da177e4 231{
ac71d12c 232 /*
1da177e4
LT
233 * there is a real-mode segmented pointer pointing to the
234 * 4K EBDA area at 0x40E
235 */
bdb96a66 236 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
ac71d12c
AK
237 ebda_addr <<= 4;
238
bdb96a66 239 ebda_size = *(unsigned short *)__va(ebda_addr);
ac71d12c
AK
240
241 /* Round EBDA up to pages */
242 if (ebda_size == 0)
243 ebda_size = 1;
244 ebda_size <<= 10;
245 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
246 if (ebda_size > 64*1024)
247 ebda_size = 64*1024;
1da177e4
LT
248}
249
250void __init setup_arch(char **cmdline_p)
251{
adf48856 252 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 253
30c82645
PA
254 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
255 screen_info = boot_params.screen_info;
256 edid_info = boot_params.edid_info;
257 saved_video_mode = boot_params.hdr.vid_mode;
258 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
259
260#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
261 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
262 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
263 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4
LT
264#endif
265 setup_memory_region();
266 copy_edd();
267
30c82645 268 if (!boot_params.hdr.root_flags)
1da177e4
LT
269 root_mountflags &= ~MS_RDONLY;
270 init_mm.start_code = (unsigned long) &_text;
271 init_mm.end_code = (unsigned long) &_etext;
272 init_mm.end_data = (unsigned long) &_edata;
273 init_mm.brk = (unsigned long) &_end;
274
e3ebadd9
LT
275 code_resource.start = virt_to_phys(&_text);
276 code_resource.end = virt_to_phys(&_etext)-1;
277 data_resource.start = virt_to_phys(&_etext);
278 data_resource.end = virt_to_phys(&_edata)-1;
1da177e4 279
1da177e4
LT
280 early_identify_cpu(&boot_cpu_data);
281
adf48856 282 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
283 *cmdline_p = command_line;
284
285 parse_early_param();
286
287 finish_e820_parsing();
9ca33eb6 288
5cb248ab 289 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
290 /*
291 * partially used pages are not usable - thus
292 * we are rounding upwards:
293 */
294 end_pfn = e820_end_of_ram();
caff0710 295 num_physpages = end_pfn;
1da177e4
LT
296
297 check_efer();
298
ac71d12c
AK
299 discover_ebda();
300
1da177e4
LT
301 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
302
f2d3efed
AK
303 dmi_scan_machine();
304
71fff5e6
MT
305#ifdef CONFIG_SMP
306 /* setup to use the static apicid table during kernel startup */
307 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
308#endif
309
888ba6c6 310#ifdef CONFIG_ACPI
1da177e4
LT
311 /*
312 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
313 * Call this early for SRAT node setup.
314 */
315 acpi_boot_table_init();
316#endif
317
caff0710
JB
318 /* How many end-of-memory variables you have, grandma! */
319 max_low_pfn = end_pfn;
320 max_pfn = end_pfn;
321 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
322
5cb248ab
MG
323 /* Remove active ranges so rediscovery with NUMA-awareness happens */
324 remove_all_active_ranges();
325
1da177e4
LT
326#ifdef CONFIG_ACPI_NUMA
327 /*
328 * Parse SRAT to discover nodes.
329 */
330 acpi_numa_init();
331#endif
332
2b97690f 333#ifdef CONFIG_NUMA
1da177e4
LT
334 numa_initmem_init(0, end_pfn);
335#else
bbfceef4 336 contig_initmem_init(0, end_pfn);
1da177e4
LT
337#endif
338
339 /* Reserve direct mapping */
340 reserve_bootmem_generic(table_start << PAGE_SHIFT,
341 (table_end - table_start) << PAGE_SHIFT);
342
343 /* reserve kernel */
ceee8822
AK
344 reserve_bootmem_generic(__pa_symbol(&_text),
345 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
346
347 /*
348 * reserve physical page 0 - it's a special BIOS page on many boxes,
349 * enabling clean reboots, SMP operation, laptop functions.
350 */
351 reserve_bootmem_generic(0, PAGE_SIZE);
352
353 /* reserve ebda region */
ac71d12c
AK
354 if (ebda_addr)
355 reserve_bootmem_generic(ebda_addr, ebda_size);
076422d2
AS
356#ifdef CONFIG_NUMA
357 /* reserve nodemap region */
358 if (nodemap_addr)
359 reserve_bootmem_generic(nodemap_addr, nodemap_size);
360#endif
1da177e4
LT
361
362#ifdef CONFIG_SMP
1da177e4 363 /* Reserve SMP trampoline */
90b1c208 364 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
1da177e4
LT
365#endif
366
673d5b43 367#ifdef CONFIG_ACPI_SLEEP
1da177e4
LT
368 /*
369 * Reserve low memory region for sleep support.
370 */
371 acpi_reserve_bootmem();
372#endif
1da177e4
LT
373 /*
374 * Find and reserve possible boot-time SMP configuration:
375 */
376 find_smp_config();
1da177e4 377#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
378 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
379 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
380 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
381 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
382 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
383
384 if (ramdisk_end <= end_of_mem) {
385 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
386 initrd_start = ramdisk_image + PAGE_OFFSET;
387 initrd_end = initrd_start+ramdisk_size;
388 } else {
1da177e4 389 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
390 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
391 ramdisk_end, end_of_mem);
1da177e4
LT
392 initrd_start = 0;
393 }
394 }
395#endif
5c3391f9 396 reserve_crashkernel();
1da177e4
LT
397 paging_init();
398
f157cbb1 399#ifdef CONFIG_PCI
dfa4698c 400 early_quirks();
f157cbb1 401#endif
1da177e4 402
51f62e18
AR
403 /*
404 * set this early, so we dont allocate cpu0
405 * if MADT list doesnt list BSP first
406 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
407 */
408 cpu_set(0, cpu_present_map);
888ba6c6 409#ifdef CONFIG_ACPI
1da177e4
LT
410 /*
411 * Read APIC and some other early information from ACPI tables.
412 */
413 acpi_boot_init();
414#endif
415
05b3cbd8
RT
416 init_cpu_to_node();
417
1da177e4
LT
418 /*
419 * get boot-time SMP configuration:
420 */
421 if (smp_found_config)
422 get_smp_config();
423 init_apic_mappings();
1da177e4
LT
424
425 /*
fc986db4
AK
426 * We trust e820 completely. No explicit ROM probing in memory.
427 */
1da177e4 428 e820_reserve_resources();
e8eff5ac 429 e820_mark_nosave_regions();
1da177e4 430
1da177e4
LT
431 {
432 unsigned i;
433 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 434 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4
LT
435 request_resource(&ioport_resource, &standard_io_resources[i]);
436 }
437
a1e97782 438 e820_setup_gap();
1da177e4 439
1da177e4
LT
440#ifdef CONFIG_VT
441#if defined(CONFIG_VGA_CONSOLE)
442 conswitchp = &vga_con;
443#elif defined(CONFIG_DUMMY_CONSOLE)
444 conswitchp = &dummy_con;
445#endif
446#endif
447}
448
e6982c67 449static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
450{
451 unsigned int *v;
452
ebfcaa96 453 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
454 return 0;
455
456 v = (unsigned int *) c->x86_model_id;
457 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
458 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
459 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
460 c->x86_model_id[48] = 0;
461 return 1;
462}
463
464
e6982c67 465static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
466{
467 unsigned int n, dummy, eax, ebx, ecx, edx;
468
ebfcaa96 469 n = c->extended_cpuid_level;
1da177e4
LT
470
471 if (n >= 0x80000005) {
472 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
473 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
474 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
475 c->x86_cache_size=(ecx>>24)+(edx>>24);
476 /* On K8 L1 TLB is inclusive, so don't count it */
477 c->x86_tlbsize = 0;
478 }
479
480 if (n >= 0x80000006) {
481 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
482 ecx = cpuid_ecx(0x80000006);
483 c->x86_cache_size = ecx >> 16;
484 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
485
486 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
487 c->x86_cache_size, ecx & 0xFF);
488 }
489
490 if (n >= 0x80000007)
491 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
492 if (n >= 0x80000008) {
493 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
494 c->x86_virt_bits = (eax >> 8) & 0xff;
495 c->x86_phys_bits = eax & 0xff;
496 }
497}
498
3f098c26
AK
499#ifdef CONFIG_NUMA
500static int nearby_node(int apicid)
501{
502 int i;
503 for (i = apicid - 1; i >= 0; i--) {
504 int node = apicid_to_node[i];
505 if (node != NUMA_NO_NODE && node_online(node))
506 return node;
507 }
508 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
509 int node = apicid_to_node[i];
510 if (node != NUMA_NO_NODE && node_online(node))
511 return node;
512 }
513 return first_node(node_online_map); /* Shouldn't happen */
514}
515#endif
516
63518644
AK
517/*
518 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
519 * Assumes number of cores is a power of two.
520 */
521static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
522{
523#ifdef CONFIG_SMP
b41e2939 524 unsigned bits;
3f098c26 525#ifdef CONFIG_NUMA
f3fa8ebc 526 int cpu = smp_processor_id();
3f098c26 527 int node = 0;
60c1bc82 528 unsigned apicid = hard_smp_processor_id();
3f098c26 529#endif
faee9a5d 530 unsigned ecx = cpuid_ecx(0x80000008);
b41e2939 531
faee9a5d 532 c->x86_max_cores = (ecx & 0xff) + 1;
b41e2939 533
faee9a5d
AK
534 /* CPU telling us the core id bits shift? */
535 bits = (ecx >> 12) & 0xF;
536
537 /* Otherwise recompute */
538 if (bits == 0) {
539 while ((1 << bits) < c->x86_max_cores)
540 bits++;
541 }
b41e2939
AK
542
543 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 544 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 545 /* Convert the APIC ID into the socket ID */
f3fa8ebc 546 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
547
548#ifdef CONFIG_NUMA
f3fa8ebc 549 node = c->phys_proc_id;
3f098c26
AK
550 if (apicid_to_node[apicid] != NUMA_NO_NODE)
551 node = apicid_to_node[apicid];
552 if (!node_online(node)) {
553 /* Two possibilities here:
554 - The CPU is missing memory and no node was created.
555 In that case try picking one from a nearby CPU
556 - The APIC IDs differ from the HyperTransport node IDs
557 which the K8 northbridge parsing fills in.
558 Assume they are all increased by a constant offset,
559 but in the same order as the HT nodeids.
560 If that doesn't result in a usable node fall back to the
561 path for the previous case. */
92cb7612 562 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
3f098c26
AK
563 if (ht_nodeid >= 0 &&
564 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
565 node = apicid_to_node[ht_nodeid];
566 /* Pick a nearby node */
567 if (!node_online(node))
568 node = nearby_node(apicid);
569 }
69d81fcd 570 numa_set_node(cpu, node);
3f098c26 571
e42f9437 572 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 573#endif
63518644
AK
574#endif
575}
1da177e4 576
fb79d22e
TG
577#define ENABLE_C1E_MASK 0x18000000
578#define CPUID_PROCESSOR_SIGNATURE 1
579#define CPUID_XFAM 0x0ff00000
580#define CPUID_XFAM_K8 0x00000000
581#define CPUID_XFAM_10H 0x00100000
582#define CPUID_XFAM_11H 0x00200000
583#define CPUID_XMOD 0x000f0000
584#define CPUID_XMOD_REV_F 0x00040000
585
586/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
587static __cpuinit int amd_apic_timer_broken(void)
588{
589 u32 lo, hi;
590 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
591 switch (eax & CPUID_XFAM) {
592 case CPUID_XFAM_K8:
593 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
594 break;
595 case CPUID_XFAM_10H:
596 case CPUID_XFAM_11H:
597 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
598 if (lo & ENABLE_C1E_MASK)
599 return 1;
600 break;
601 default:
602 /* err on the side of caution */
603 return 1;
604 }
605 return 0;
606}
607
ed77504b 608static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 609{
7bcd3f34 610 unsigned level;
1da177e4 611
bc5e8fdf
LT
612#ifdef CONFIG_SMP
613 unsigned long value;
614
7d318d77
AK
615 /*
616 * Disable TLB flush filter by setting HWCR.FFDIS on K8
617 * bit 6 of msr C001_0015
618 *
619 * Errata 63 for SH-B3 steppings
620 * Errata 122 for all steppings (F+ have it disabled by default)
621 */
622 if (c->x86 == 15) {
623 rdmsrl(MSR_K8_HWCR, value);
624 value |= 1 << 6;
625 wrmsrl(MSR_K8_HWCR, value);
626 }
bc5e8fdf
LT
627#endif
628
1da177e4
LT
629 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
630 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
631 clear_bit(0*32+31, &c->x86_capability);
632
7bcd3f34
AK
633 /* On C+ stepping K8 rep microcode works well for copy/memset */
634 level = cpuid_eax(1);
635 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
636 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
99741faa 637 if (c->x86 == 0x10 || c->x86 == 0x11)
5b74e3ab 638 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
7bcd3f34 639
18bd057b
AK
640 /* Enable workaround for FXSAVE leak */
641 if (c->x86 >= 6)
642 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
643
e42f9437
RS
644 level = get_model_name(c);
645 if (!level) {
1da177e4
LT
646 switch (c->x86) {
647 case 15:
648 /* Should distinguish Models here, but this is only
649 a fallback anyways. */
650 strcpy(c->x86_model_id, "Hammer");
651 break;
652 }
653 }
654 display_cacheinfo(c);
655
130951cc
AK
656 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
657 if (c->x86_power & (1<<8))
658 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
659
faee9a5d
AK
660 /* Multi core CPU? */
661 if (c->extended_cpuid_level >= 0x80000008)
63518644 662 amd_detect_cmp(c);
1da177e4 663
67cddd94
AK
664 if (c->extended_cpuid_level >= 0x80000006 &&
665 (cpuid_edx(0x80000006) & 0xf000))
666 num_cache_leaves = 4;
667 else
668 num_cache_leaves = 3;
2049336f 669
0bd8acd1
AK
670 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
671 set_bit(X86_FEATURE_K8, &c->x86_capability);
672
61677965
AK
673 /* RDTSC can be speculated around */
674 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
f039b754
AK
675
676 /* Family 10 doesn't support C states in MWAIT so don't use it */
677 if (c->x86 == 0x10 && !force_mwait)
678 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
fb79d22e
TG
679
680 if (amd_apic_timer_broken())
681 disable_apic_timer = 1;
1da177e4
LT
682}
683
e6982c67 684static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
685{
686#ifdef CONFIG_SMP
687 u32 eax, ebx, ecx, edx;
94605eff 688 int index_msb, core_bits;
94605eff
SS
689
690 cpuid(1, &eax, &ebx, &ecx, &edx);
691
94605eff 692
e42f9437 693 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 694 return;
e42f9437
RS
695 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
696 goto out;
1da177e4 697
1da177e4 698 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 699
1da177e4
LT
700 if (smp_num_siblings == 1) {
701 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
94605eff
SS
702 } else if (smp_num_siblings > 1 ) {
703
1da177e4
LT
704 if (smp_num_siblings > NR_CPUS) {
705 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
706 smp_num_siblings = 1;
707 return;
708 }
94605eff
SS
709
710 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 711 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 712
94605eff 713 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 714
94605eff
SS
715 index_msb = get_count_order(smp_num_siblings) ;
716
717 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 718
f3fa8ebc 719 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 720 ((1 << core_bits) - 1);
1da177e4 721 }
e42f9437
RS
722out:
723 if ((c->x86_max_cores * smp_num_siblings) > 1) {
724 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
725 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
726 }
727
1da177e4
LT
728#endif
729}
730
3dd9d514
AK
731/*
732 * find out the number of processor cores on the die
733 */
e6982c67 734static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 735{
2bbc419f 736 unsigned int eax, t;
3dd9d514
AK
737
738 if (c->cpuid_level < 4)
739 return 1;
740
2bbc419f 741 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
742
743 if (eax & 0x1f)
744 return ((eax >> 26) + 1);
745 else
746 return 1;
747}
748
df0cc26b
AK
749static void srat_detect_node(void)
750{
751#ifdef CONFIG_NUMA
ddea7be0 752 unsigned node;
df0cc26b 753 int cpu = smp_processor_id();
e42f9437 754 int apicid = hard_smp_processor_id();
df0cc26b
AK
755
756 /* Don't do the funky fallback heuristics the AMD version employs
757 for now. */
e42f9437 758 node = apicid_to_node[apicid];
df0cc26b 759 if (node == NUMA_NO_NODE)
0d015324 760 node = first_node(node_online_map);
69d81fcd 761 numa_set_node(cpu, node);
df0cc26b 762
c31fbb1a 763 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
764#endif
765}
766
e6982c67 767static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
768{
769 /* Cache sizes */
770 unsigned n;
771
772 init_intel_cacheinfo(c);
0080e667
VP
773 if (c->cpuid_level > 9 ) {
774 unsigned eax = cpuid_eax(10);
775 /* Check for version and the number of counters */
776 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
777 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
778 }
779
36b2a8d5
SE
780 if (cpu_has_ds) {
781 unsigned int l1, l2;
782 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5
SE
783 if (!(l1 & (1<<11)))
784 set_bit(X86_FEATURE_BTS, c->x86_capability);
36b2a8d5
SE
785 if (!(l1 & (1<<12)))
786 set_bit(X86_FEATURE_PEBS, c->x86_capability);
787 }
788
ebfcaa96 789 n = c->extended_cpuid_level;
1da177e4
LT
790 if (n >= 0x80000008) {
791 unsigned eax = cpuid_eax(0x80000008);
792 c->x86_virt_bits = (eax >> 8) & 0xff;
793 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
794 /* CPUID workaround for Intel 0F34 CPU */
795 if (c->x86_vendor == X86_VENDOR_INTEL &&
796 c->x86 == 0xF && c->x86_model == 0x3 &&
797 c->x86_mask == 0x4)
798 c->x86_phys_bits = 36;
1da177e4
LT
799 }
800
801 if (c->x86 == 15)
802 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
803 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
804 (c->x86 == 0x6 && c->x86_model >= 0x0e))
c29601e9 805 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
27fbe5b2
AK
806 if (c->x86 == 6)
807 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
f3d73707
AV
808 if (c->x86 == 15)
809 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
810 else
811 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
94605eff 812 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
813
814 srat_detect_node();
1da177e4
LT
815}
816
672289e9 817static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
818{
819 char *v = c->x86_vendor_id;
820
821 if (!strcmp(v, "AuthenticAMD"))
822 c->x86_vendor = X86_VENDOR_AMD;
823 else if (!strcmp(v, "GenuineIntel"))
824 c->x86_vendor = X86_VENDOR_INTEL;
825 else
826 c->x86_vendor = X86_VENDOR_UNKNOWN;
827}
828
829struct cpu_model_info {
830 int vendor;
831 int family;
832 char *model_names[16];
833};
834
835/* Do some early cpuid on the boot CPU to get some parameter that are
836 needed before check_bugs. Everything advanced is in identify_cpu
837 below. */
e6982c67 838void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
839{
840 u32 tfms;
841
842 c->loops_per_jiffy = loops_per_jiffy;
843 c->x86_cache_size = -1;
844 c->x86_vendor = X86_VENDOR_UNKNOWN;
845 c->x86_model = c->x86_mask = 0; /* So far unknown... */
846 c->x86_vendor_id[0] = '\0'; /* Unset */
847 c->x86_model_id[0] = '\0'; /* Unset */
848 c->x86_clflush_size = 64;
849 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 850 c->x86_max_cores = 1;
ebfcaa96 851 c->extended_cpuid_level = 0;
1da177e4
LT
852 memset(&c->x86_capability, 0, sizeof c->x86_capability);
853
854 /* Get vendor name */
855 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
856 (unsigned int *)&c->x86_vendor_id[0],
857 (unsigned int *)&c->x86_vendor_id[8],
858 (unsigned int *)&c->x86_vendor_id[4]);
859
860 get_cpu_vendor(c);
861
862 /* Initialize the standard set of capabilities */
863 /* Note that the vendor-specific code below might override */
864
865 /* Intel-defined flags: level 0x00000001 */
866 if (c->cpuid_level >= 0x00000001) {
867 __u32 misc;
868 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
869 &c->x86_capability[0]);
870 c->x86 = (tfms >> 8) & 0xf;
871 c->x86_model = (tfms >> 4) & 0xf;
872 c->x86_mask = tfms & 0xf;
f5f786d0 873 if (c->x86 == 0xf)
1da177e4 874 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 875 if (c->x86 >= 0x6)
1da177e4 876 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1da177e4
LT
877 if (c->x86_capability[0] & (1<<19))
878 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
879 } else {
880 /* Have CPUID level 0 only - unheard of */
881 c->x86 = 4;
882 }
a158608b
AK
883
884#ifdef CONFIG_SMP
f3fa8ebc 885 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
92cb7612 886 c->cpu_index = 0;
a158608b 887#endif
1da177e4
LT
888}
889
890/*
891 * This does the hard work of actually picking apart the CPU stuff...
892 */
e6982c67 893void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
894{
895 int i;
896 u32 xlvl;
897
898 early_identify_cpu(c);
899
900 /* AMD-defined flags: level 0x80000001 */
901 xlvl = cpuid_eax(0x80000000);
ebfcaa96 902 c->extended_cpuid_level = xlvl;
1da177e4
LT
903 if ((xlvl & 0xffff0000) == 0x80000000) {
904 if (xlvl >= 0x80000001) {
905 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 906 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
907 }
908 if (xlvl >= 0x80000004)
909 get_model_name(c); /* Default name */
910 }
911
912 /* Transmeta-defined flags: level 0x80860001 */
913 xlvl = cpuid_eax(0x80860000);
914 if ((xlvl & 0xffff0000) == 0x80860000) {
915 /* Don't set x86_cpuid_level here for now to not confuse. */
916 if (xlvl >= 0x80860001)
917 c->x86_capability[2] = cpuid_edx(0x80860001);
918 }
919
1d67953f
VP
920 init_scattered_cpuid_features(c);
921
1e9f28fa
SS
922 c->apicid = phys_pkg_id(0);
923
1da177e4
LT
924 /*
925 * Vendor-specific initialization. In this section we
926 * canonicalize the feature flags, meaning if there are
927 * features a certain CPU supports which CPUID doesn't
928 * tell us, CPUID claiming incorrect flags, or other bugs,
929 * we handle them here.
930 *
931 * At the end of this section, c->x86_capability better
932 * indicate the features this CPU genuinely supports!
933 */
934 switch (c->x86_vendor) {
935 case X86_VENDOR_AMD:
936 init_amd(c);
937 break;
938
939 case X86_VENDOR_INTEL:
940 init_intel(c);
941 break;
942
943 case X86_VENDOR_UNKNOWN:
944 default:
945 display_cacheinfo(c);
946 break;
947 }
948
949 select_idle_routine(c);
950 detect_ht(c);
1da177e4
LT
951
952 /*
953 * On SMP, boot_cpu_data holds the common feature set between
954 * all CPUs; so make sure that we indicate which features are
955 * common between the CPUs. The first time this routine gets
956 * executed, c == &boot_cpu_data.
957 */
958 if (c != &boot_cpu_data) {
959 /* AND the already accumulated flags with these */
960 for (i = 0 ; i < NCAPINTS ; i++)
961 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
962 }
963
964#ifdef CONFIG_X86_MCE
965 mcheck_init(c);
966#endif
8bd99481 967 if (c != &boot_cpu_data)
3b520b23 968 mtrr_ap_init();
1da177e4 969#ifdef CONFIG_NUMA
3019e8eb 970 numa_add_cpu(smp_processor_id());
1da177e4
LT
971#endif
972}
973
974
e6982c67 975void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
976{
977 if (c->x86_model_id[0])
978 printk("%s", c->x86_model_id);
979
980 if (c->x86_mask || c->cpuid_level >= 0)
981 printk(" stepping %02x\n", c->x86_mask);
982 else
983 printk("\n");
984}
985
986/*
987 * Get CPU information for use by the procfs.
988 */
989
990static int show_cpuinfo(struct seq_file *m, void *v)
991{
992 struct cpuinfo_x86 *c = v;
92cb7612 993 int cpu = 0;
1da177e4
LT
994
995 /*
996 * These flag bits must match the definitions in <asm/cpufeature.h>.
997 * NULL means this bit is undefined or reserved; either way it doesn't
998 * have meaning as far as Linux is concerned. Note that it's important
999 * to realize there is a difference between this table and CPUID -- if
1000 * applications want to get the raw CPUID data, they should access
1001 * /dev/cpu/<cpu_nr>/cpuid instead.
1002 */
121d7bf5 1003 static const char *const x86_cap_flags[] = {
1da177e4
LT
1004 /* Intel-defined */
1005 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1006 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1007 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
ec481536 1008 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1da177e4
LT
1009
1010 /* AMD-defined */
3c3b73b6 1011 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1012 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
f790cd30
AK
1014 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1015 "3dnowext", "3dnow",
1da177e4
LT
1016
1017 /* Transmeta-defined */
1018 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1019 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1020 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1021 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1022
1023 /* Other (Linux-defined) */
ec481536
PA
1024 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1025 NULL, NULL, NULL, NULL,
1026 "constant_tsc", "up", NULL, "arch_perfmon",
1027 "pebs", "bts", NULL, "sync_rdtsc",
1028 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1029 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1030
1031 /* Intel-defined (#2) */
9d95dd84 1032 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307 1033 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
f790cd30 1034 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
1da177e4
LT
1035 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1036
5b7abc6f
PA
1037 /* VIA/Cyrix/Centaur-defined */
1038 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
ec481536 1039 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
5b7abc6f
PA
1040 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1041 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1042
1da177e4 1043 /* AMD-defined (#2) */
f790cd30
AK
1044 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
1045 "altmovcr8", "abm", "sse4a",
1046 "misalignsse", "3dnowprefetch",
1047 "osvw", "ibs", NULL, NULL, NULL, NULL,
1da177e4 1048 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1049 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1d67953f
VP
1050
1051 /* Auxiliary (Linux-defined) */
1052 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1053 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1054 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1055 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4 1056 };
121d7bf5 1057 static const char *const x86_power_flags[] = {
1da177e4
LT
1058 "ts", /* temperature sensor */
1059 "fid", /* frequency id control */
1060 "vid", /* voltage id control */
1061 "ttp", /* thermal trip */
1062 "tm",
3f98bc49 1063 "stc",
f790cd30
AK
1064 "100mhzsteps",
1065 "hwpstate",
d824395c
JR
1066 "", /* tsc invariant mapped to constant_tsc */
1067 /* nothing */
1da177e4
LT
1068 };
1069
1070
1071#ifdef CONFIG_SMP
92cb7612 1072 if (!cpu_online(c->cpu_index))
1da177e4 1073 return 0;
92cb7612 1074 cpu = c->cpu_index;
1da177e4
LT
1075#endif
1076
1077 seq_printf(m,"processor\t: %u\n"
1078 "vendor_id\t: %s\n"
1079 "cpu family\t: %d\n"
1080 "model\t\t: %d\n"
1081 "model name\t: %s\n",
92cb7612 1082 (unsigned)cpu,
1da177e4
LT
1083 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1084 c->x86,
1085 (int)c->x86_model,
1086 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1087
1088 if (c->x86_mask || c->cpuid_level >= 0)
1089 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1090 else
1091 seq_printf(m, "stepping\t: unknown\n");
1092
1093 if (cpu_has(c,X86_FEATURE_TSC)) {
92cb7612 1094 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
95235ca2
VP
1095 if (!freq)
1096 freq = cpu_khz;
1da177e4 1097 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
95235ca2 1098 freq / 1000, (freq % 1000));
1da177e4
LT
1099 }
1100
1101 /* Cache size */
1102 if (c->x86_cache_size >= 0)
1103 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1104
1105#ifdef CONFIG_SMP
94605eff 1106 if (smp_num_siblings * c->x86_max_cores > 1) {
f3fa8ebc 1107 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
08357611
MT
1108 seq_printf(m, "siblings\t: %d\n",
1109 cpus_weight(per_cpu(cpu_core_map, cpu)));
f3fa8ebc 1110 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1111 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1112 }
1da177e4
LT
1113#endif
1114
1115 seq_printf(m,
1116 "fpu\t\t: yes\n"
1117 "fpu_exception\t: yes\n"
1118 "cpuid level\t: %d\n"
1119 "wp\t\t: yes\n"
1120 "flags\t\t:",
1121 c->cpuid_level);
1122
1123 {
1124 int i;
1125 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
3d1712c9 1126 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1da177e4
LT
1127 seq_printf(m, " %s", x86_cap_flags[i]);
1128 }
1129
1130 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1131 c->loops_per_jiffy/(500000/HZ),
1132 (c->loops_per_jiffy/(5000/HZ)) % 100);
1133
1134 if (c->x86_tlbsize > 0)
1135 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1136 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1137 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1138
1139 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1140 c->x86_phys_bits, c->x86_virt_bits);
1141
1142 seq_printf(m, "power management:");
1143 {
1144 unsigned i;
1145 for (i = 0; i < 32; i++)
1146 if (c->x86_power & (1 << i)) {
3f98bc49
AK
1147 if (i < ARRAY_SIZE(x86_power_flags) &&
1148 x86_power_flags[i])
1149 seq_printf(m, "%s%s",
1150 x86_power_flags[i][0]?" ":"",
1151 x86_power_flags[i]);
1da177e4
LT
1152 else
1153 seq_printf(m, " [%d]", i);
1154 }
1155 }
1da177e4 1156
d31ddaa1 1157 seq_printf(m, "\n\n");
1da177e4
LT
1158
1159 return 0;
1160}
1161
1162static void *c_start(struct seq_file *m, loff_t *pos)
1163{
92cb7612
MT
1164 if (*pos == 0) /* just in case, cpu 0 is not the first */
1165 *pos = first_cpu(cpu_possible_map);
1166 if ((*pos) < NR_CPUS && cpu_possible(*pos))
1167 return &cpu_data(*pos);
1168 return NULL;
1da177e4
LT
1169}
1170
1171static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1172{
92cb7612 1173 *pos = next_cpu(*pos, cpu_possible_map);
1da177e4
LT
1174 return c_start(m, pos);
1175}
1176
1177static void c_stop(struct seq_file *m, void *v)
1178{
1179}
1180
1181struct seq_operations cpuinfo_op = {
1182 .start =c_start,
1183 .next = c_next,
1184 .stop = c_stop,
1185 .show = show_cpuinfo,
1186};