[PATCH] i386: replace intermediate array-size definitions with ARRAY_SIZE()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86_64 / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
1da177e4
LT
8 */
9
10/*
11 * This file handles the architecture-dependent parts of initialization
12 */
13
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/ptrace.h>
21#include <linux/slab.h>
22#include <linux/user.h>
23#include <linux/a.out.h>
894673ee 24#include <linux/screen_info.h>
1da177e4
LT
25#include <linux/ioport.h>
26#include <linux/delay.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/initrd.h>
29#include <linux/highmem.h>
30#include <linux/bootmem.h>
31#include <linux/module.h>
32#include <asm/processor.h>
33#include <linux/console.h>
34#include <linux/seq_file.h>
aac04b32 35#include <linux/crash_dump.h>
1da177e4
LT
36#include <linux/root_dev.h>
37#include <linux/pci.h>
38#include <linux/acpi.h>
39#include <linux/kallsyms.h>
40#include <linux/edd.h>
bbfceef4 41#include <linux/mmzone.h>
5f5609df 42#include <linux/kexec.h>
95235ca2 43#include <linux/cpufreq.h>
e9928674 44#include <linux/dmi.h>
17a941d8 45#include <linux/dma-mapping.h>
681558fd 46#include <linux/ctype.h>
bbfceef4 47
1da177e4
LT
48#include <asm/mtrr.h>
49#include <asm/uaccess.h>
50#include <asm/system.h>
51#include <asm/io.h>
52#include <asm/smp.h>
53#include <asm/msr.h>
54#include <asm/desc.h>
55#include <video/edid.h>
56#include <asm/e820.h>
57#include <asm/dma.h>
58#include <asm/mpspec.h>
59#include <asm/mmu_context.h>
60#include <asm/bootsetup.h>
61#include <asm/proto.h>
62#include <asm/setup.h>
63#include <asm/mach_apic.h>
64#include <asm/numa.h>
2bc0414e 65#include <asm/sections.h>
f2d3efed 66#include <asm/dmi.h>
1da177e4
LT
67
68/*
69 * Machine setup..
70 */
71
6c231b7b 72struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 73EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
74
75unsigned long mmu_cr4_features;
76
1da177e4
LT
77/* Boot loader ID as an integer, for the benefit of proc_dointvec */
78int bootloader_type;
79
80unsigned long saved_video_mode;
81
f2d3efed
AK
82/*
83 * Early DMI memory
84 */
85int dmi_alloc_index;
86char dmi_alloc_data[DMI_MAX_DATA];
87
1da177e4
LT
88/*
89 * Setup options
90 */
1da177e4 91struct screen_info screen_info;
2ee60e17 92EXPORT_SYMBOL(screen_info);
1da177e4
LT
93struct sys_desc_table_struct {
94 unsigned short length;
95 unsigned char table[0];
96};
97
98struct edid_info edid_info;
ba70710e 99EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
100
101extern int root_mountflags;
1da177e4
LT
102
103char command_line[COMMAND_LINE_SIZE];
104
105struct resource standard_io_resources[] = {
106 { .name = "dma1", .start = 0x00, .end = 0x1f,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "pic1", .start = 0x20, .end = 0x21,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "timer0", .start = 0x40, .end = 0x43,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "timer1", .start = 0x50, .end = 0x53,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "keyboard", .start = 0x60, .end = 0x6f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "pic2", .start = 0xa0, .end = 0xa1,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "dma2", .start = 0xc0, .end = 0xdf,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "fpu", .start = 0xf0, .end = 0xff,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
124};
125
126#define STANDARD_IO_RESOURCES \
127 (sizeof standard_io_resources / sizeof standard_io_resources[0])
128
129#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
130
131struct resource data_resource = {
132 .name = "Kernel data",
133 .start = 0,
134 .end = 0,
135 .flags = IORESOURCE_RAM,
136};
137struct resource code_resource = {
138 .name = "Kernel code",
139 .start = 0,
140 .end = 0,
141 .flags = IORESOURCE_RAM,
142};
143
144#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
145
146static struct resource system_rom_resource = {
147 .name = "System ROM",
148 .start = 0xf0000,
149 .end = 0xfffff,
150 .flags = IORESOURCE_ROM,
151};
152
153static struct resource extension_rom_resource = {
154 .name = "Extension ROM",
155 .start = 0xe0000,
156 .end = 0xeffff,
157 .flags = IORESOURCE_ROM,
158};
159
160static struct resource adapter_rom_resources[] = {
161 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
162 .flags = IORESOURCE_ROM },
163 { .name = "Adapter ROM", .start = 0, .end = 0,
164 .flags = IORESOURCE_ROM },
165 { .name = "Adapter ROM", .start = 0, .end = 0,
166 .flags = IORESOURCE_ROM },
167 { .name = "Adapter ROM", .start = 0, .end = 0,
168 .flags = IORESOURCE_ROM },
169 { .name = "Adapter ROM", .start = 0, .end = 0,
170 .flags = IORESOURCE_ROM },
171 { .name = "Adapter ROM", .start = 0, .end = 0,
172 .flags = IORESOURCE_ROM }
173};
174
175#define ADAPTER_ROM_RESOURCES \
176 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
177
178static struct resource video_rom_resource = {
179 .name = "Video ROM",
180 .start = 0xc0000,
181 .end = 0xc7fff,
182 .flags = IORESOURCE_ROM,
183};
184
185static struct resource video_ram_resource = {
186 .name = "Video RAM area",
187 .start = 0xa0000,
188 .end = 0xbffff,
189 .flags = IORESOURCE_RAM,
190};
191
192#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
193
194static int __init romchecksum(unsigned char *rom, unsigned long length)
195{
196 unsigned char *p, sum = 0;
197
198 for (p = rom; p < rom + length; p++)
199 sum += *p;
200 return sum == 0;
201}
202
203static void __init probe_roms(void)
204{
205 unsigned long start, length, upper;
206 unsigned char *rom;
207 int i;
208
209 /* video rom */
210 upper = adapter_rom_resources[0].start;
211 for (start = video_rom_resource.start; start < upper; start += 2048) {
212 rom = isa_bus_to_virt(start);
213 if (!romsignature(rom))
214 continue;
215
216 video_rom_resource.start = start;
217
218 /* 0 < length <= 0x7f * 512, historically */
219 length = rom[2] * 512;
220
221 /* if checksum okay, trust length byte */
222 if (length && romchecksum(rom, length))
223 video_rom_resource.end = start + length - 1;
224
225 request_resource(&iomem_resource, &video_rom_resource);
226 break;
227 }
228
229 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
230 if (start < upper)
231 start = upper;
232
233 /* system rom */
234 request_resource(&iomem_resource, &system_rom_resource);
235 upper = system_rom_resource.start;
236
237 /* check for extension rom (ignore length byte!) */
238 rom = isa_bus_to_virt(extension_rom_resource.start);
239 if (romsignature(rom)) {
240 length = extension_rom_resource.end - extension_rom_resource.start + 1;
241 if (romchecksum(rom, length)) {
242 request_resource(&iomem_resource, &extension_rom_resource);
243 upper = extension_rom_resource.start;
244 }
245 }
246
247 /* check for adapter roms on 2k boundaries */
248 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
249 rom = isa_bus_to_virt(start);
250 if (!romsignature(rom))
251 continue;
252
253 /* 0 < length <= 0x7f * 512, historically */
254 length = rom[2] * 512;
255
256 /* but accept any length that fits if checksum okay */
257 if (!length || start + length > upper || !romchecksum(rom, length))
258 continue;
259
260 adapter_rom_resources[i].start = start;
261 adapter_rom_resources[i].end = start + length - 1;
262 request_resource(&iomem_resource, &adapter_rom_resources[i]);
263
264 start = adapter_rom_resources[i++].end & ~2047UL;
265 }
266}
267
2c8c0e6b
AK
268#ifdef CONFIG_PROC_VMCORE
269/* elfcorehdr= specifies the location of elf core header
270 * stored by the crashed kernel. This option will be passed
271 * by kexec loader to the capture kernel.
272 */
273static int __init setup_elfcorehdr(char *arg)
681558fd 274{
2c8c0e6b
AK
275 char *end;
276 if (!arg)
277 return -EINVAL;
278 elfcorehdr_addr = memparse(arg, &end);
279 return end > arg ? 0 : -EINVAL;
681558fd 280}
2c8c0e6b 281early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
282#endif
283
2b97690f 284#ifndef CONFIG_NUMA
bbfceef4
MT
285static void __init
286contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 287{
bbfceef4
MT
288 unsigned long bootmap_size, bootmap;
289
bbfceef4
MT
290 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
291 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
292 if (bootmap == -1L)
293 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
294 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
295 e820_register_active_regions(0, start_pfn, end_pfn);
296 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 297 reserve_bootmem(bootmap, bootmap_size);
1da177e4
LT
298}
299#endif
300
1da177e4
LT
301#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
302struct edd edd;
303#ifdef CONFIG_EDD_MODULE
304EXPORT_SYMBOL(edd);
305#endif
306/**
307 * copy_edd() - Copy the BIOS EDD information
308 * from boot_params into a safe place.
309 *
310 */
311static inline void copy_edd(void)
312{
313 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
314 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
315 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
316 edd.edd_info_nr = EDD_NR;
317}
318#else
319static inline void copy_edd(void)
320{
321}
322#endif
323
324#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
325
326unsigned __initdata ebda_addr;
327unsigned __initdata ebda_size;
328
329static void discover_ebda(void)
1da177e4 330{
ac71d12c 331 /*
1da177e4
LT
332 * there is a real-mode segmented pointer pointing to the
333 * 4K EBDA area at 0x40E
334 */
ac71d12c
AK
335 ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
336 ebda_addr <<= 4;
337
338 ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
339
340 /* Round EBDA up to pages */
341 if (ebda_size == 0)
342 ebda_size = 1;
343 ebda_size <<= 10;
344 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
345 if (ebda_size > 64*1024)
346 ebda_size = 64*1024;
1da177e4
LT
347}
348
349void __init setup_arch(char **cmdline_p)
350{
43c85c9c
AK
351 printk(KERN_INFO "Command line: %s\n", saved_command_line);
352
1da177e4 353 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
1da177e4
LT
354 screen_info = SCREEN_INFO;
355 edid_info = EDID_INFO;
356 saved_video_mode = SAVED_VIDEO_MODE;
357 bootloader_type = LOADER_TYPE;
358
359#ifdef CONFIG_BLK_DEV_RAM
360 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
361 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
362 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
363#endif
364 setup_memory_region();
365 copy_edd();
366
367 if (!MOUNT_ROOT_RDONLY)
368 root_mountflags &= ~MS_RDONLY;
369 init_mm.start_code = (unsigned long) &_text;
370 init_mm.end_code = (unsigned long) &_etext;
371 init_mm.end_data = (unsigned long) &_edata;
372 init_mm.brk = (unsigned long) &_end;
373
374 code_resource.start = virt_to_phys(&_text);
375 code_resource.end = virt_to_phys(&_etext)-1;
376 data_resource.start = virt_to_phys(&_etext);
377 data_resource.end = virt_to_phys(&_edata)-1;
378
1da177e4
LT
379 early_identify_cpu(&boot_cpu_data);
380
2c8c0e6b
AK
381 strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
382 *cmdline_p = command_line;
383
384 parse_early_param();
385
386 finish_e820_parsing();
9ca33eb6 387
5cb248ab 388 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
389 /*
390 * partially used pages are not usable - thus
391 * we are rounding upwards:
392 */
393 end_pfn = e820_end_of_ram();
caff0710 394 num_physpages = end_pfn;
1da177e4
LT
395
396 check_efer();
397
ac71d12c
AK
398 discover_ebda();
399
1da177e4
LT
400 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
401
f2d3efed
AK
402 dmi_scan_machine();
403
f6c2e333
SS
404 zap_low_mappings(0);
405
888ba6c6 406#ifdef CONFIG_ACPI
1da177e4
LT
407 /*
408 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
409 * Call this early for SRAT node setup.
410 */
411 acpi_boot_table_init();
412#endif
413
caff0710
JB
414 /* How many end-of-memory variables you have, grandma! */
415 max_low_pfn = end_pfn;
416 max_pfn = end_pfn;
417 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
418
5cb248ab
MG
419 /* Remove active ranges so rediscovery with NUMA-awareness happens */
420 remove_all_active_ranges();
421
1da177e4
LT
422#ifdef CONFIG_ACPI_NUMA
423 /*
424 * Parse SRAT to discover nodes.
425 */
426 acpi_numa_init();
427#endif
428
2b97690f 429#ifdef CONFIG_NUMA
1da177e4
LT
430 numa_initmem_init(0, end_pfn);
431#else
bbfceef4 432 contig_initmem_init(0, end_pfn);
1da177e4
LT
433#endif
434
435 /* Reserve direct mapping */
436 reserve_bootmem_generic(table_start << PAGE_SHIFT,
437 (table_end - table_start) << PAGE_SHIFT);
438
439 /* reserve kernel */
ceee8822
AK
440 reserve_bootmem_generic(__pa_symbol(&_text),
441 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
442
443 /*
444 * reserve physical page 0 - it's a special BIOS page on many boxes,
445 * enabling clean reboots, SMP operation, laptop functions.
446 */
447 reserve_bootmem_generic(0, PAGE_SIZE);
448
449 /* reserve ebda region */
ac71d12c
AK
450 if (ebda_addr)
451 reserve_bootmem_generic(ebda_addr, ebda_size);
1da177e4
LT
452
453#ifdef CONFIG_SMP
454 /*
455 * But first pinch a few for the stack/trampoline stuff
456 * FIXME: Don't need the extra page at 4K, but need to fix
457 * trampoline before removing it. (see the GDT stuff)
458 */
459 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
460
461 /* Reserve SMP trampoline */
462 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
463#endif
464
465#ifdef CONFIG_ACPI_SLEEP
466 /*
467 * Reserve low memory region for sleep support.
468 */
469 acpi_reserve_bootmem();
470#endif
1da177e4
LT
471 /*
472 * Find and reserve possible boot-time SMP configuration:
473 */
474 find_smp_config();
1da177e4
LT
475#ifdef CONFIG_BLK_DEV_INITRD
476 if (LOADER_TYPE && INITRD_START) {
477 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
478 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
479 initrd_start =
480 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
481 initrd_end = initrd_start+INITRD_SIZE;
482 }
483 else {
484 printk(KERN_ERR "initrd extends beyond end of memory "
485 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
486 (unsigned long)(INITRD_START + INITRD_SIZE),
487 (unsigned long)(end_pfn << PAGE_SHIFT));
488 initrd_start = 0;
489 }
490 }
491#endif
5f5609df
EB
492#ifdef CONFIG_KEXEC
493 if (crashk_res.start != crashk_res.end) {
00212fef 494 reserve_bootmem_generic(crashk_res.start,
5f5609df
EB
495 crashk_res.end - crashk_res.start + 1);
496 }
497#endif
0d317fb7 498
1da177e4
LT
499 paging_init();
500
f157cbb1 501#ifdef CONFIG_PCI
dfa4698c 502 early_quirks();
f157cbb1 503#endif
1da177e4 504
51f62e18
AR
505 /*
506 * set this early, so we dont allocate cpu0
507 * if MADT list doesnt list BSP first
508 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
509 */
510 cpu_set(0, cpu_present_map);
888ba6c6 511#ifdef CONFIG_ACPI
1da177e4
LT
512 /*
513 * Read APIC and some other early information from ACPI tables.
514 */
515 acpi_boot_init();
516#endif
517
05b3cbd8
RT
518 init_cpu_to_node();
519
1da177e4
LT
520 /*
521 * get boot-time SMP configuration:
522 */
523 if (smp_found_config)
524 get_smp_config();
525 init_apic_mappings();
1da177e4
LT
526
527 /*
528 * Request address space for all standard RAM and ROM resources
529 * and also for regions reported as reserved by the e820.
530 */
531 probe_roms();
532 e820_reserve_resources();
e8eff5ac 533 e820_mark_nosave_regions();
1da177e4
LT
534
535 request_resource(&iomem_resource, &video_ram_resource);
536
537 {
538 unsigned i;
539 /* request I/O space for devices used on all i[345]86 PCs */
540 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
541 request_resource(&ioport_resource, &standard_io_resources[i]);
542 }
543
a1e97782 544 e820_setup_gap();
1da177e4 545
1da177e4
LT
546#ifdef CONFIG_VT
547#if defined(CONFIG_VGA_CONSOLE)
548 conswitchp = &vga_con;
549#elif defined(CONFIG_DUMMY_CONSOLE)
550 conswitchp = &dummy_con;
551#endif
552#endif
553}
554
e6982c67 555static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
556{
557 unsigned int *v;
558
ebfcaa96 559 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
560 return 0;
561
562 v = (unsigned int *) c->x86_model_id;
563 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
564 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
565 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
566 c->x86_model_id[48] = 0;
567 return 1;
568}
569
570
e6982c67 571static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
572{
573 unsigned int n, dummy, eax, ebx, ecx, edx;
574
ebfcaa96 575 n = c->extended_cpuid_level;
1da177e4
LT
576
577 if (n >= 0x80000005) {
578 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
579 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
580 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
581 c->x86_cache_size=(ecx>>24)+(edx>>24);
582 /* On K8 L1 TLB is inclusive, so don't count it */
583 c->x86_tlbsize = 0;
584 }
585
586 if (n >= 0x80000006) {
587 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
588 ecx = cpuid_ecx(0x80000006);
589 c->x86_cache_size = ecx >> 16;
590 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
591
592 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
593 c->x86_cache_size, ecx & 0xFF);
594 }
595
596 if (n >= 0x80000007)
597 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
598 if (n >= 0x80000008) {
599 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
600 c->x86_virt_bits = (eax >> 8) & 0xff;
601 c->x86_phys_bits = eax & 0xff;
602 }
603}
604
3f098c26
AK
605#ifdef CONFIG_NUMA
606static int nearby_node(int apicid)
607{
608 int i;
609 for (i = apicid - 1; i >= 0; i--) {
610 int node = apicid_to_node[i];
611 if (node != NUMA_NO_NODE && node_online(node))
612 return node;
613 }
614 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
615 int node = apicid_to_node[i];
616 if (node != NUMA_NO_NODE && node_online(node))
617 return node;
618 }
619 return first_node(node_online_map); /* Shouldn't happen */
620}
621#endif
622
63518644
AK
623/*
624 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
625 * Assumes number of cores is a power of two.
626 */
627static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
628{
629#ifdef CONFIG_SMP
b41e2939 630 unsigned bits;
3f098c26 631#ifdef CONFIG_NUMA
f3fa8ebc 632 int cpu = smp_processor_id();
3f098c26 633 int node = 0;
60c1bc82 634 unsigned apicid = hard_smp_processor_id();
3f098c26 635#endif
faee9a5d 636 unsigned ecx = cpuid_ecx(0x80000008);
b41e2939 637
faee9a5d 638 c->x86_max_cores = (ecx & 0xff) + 1;
b41e2939 639
faee9a5d
AK
640 /* CPU telling us the core id bits shift? */
641 bits = (ecx >> 12) & 0xF;
642
643 /* Otherwise recompute */
644 if (bits == 0) {
645 while ((1 << bits) < c->x86_max_cores)
646 bits++;
647 }
b41e2939
AK
648
649 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 650 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 651 /* Convert the APIC ID into the socket ID */
f3fa8ebc 652 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
653
654#ifdef CONFIG_NUMA
f3fa8ebc 655 node = c->phys_proc_id;
3f098c26
AK
656 if (apicid_to_node[apicid] != NUMA_NO_NODE)
657 node = apicid_to_node[apicid];
658 if (!node_online(node)) {
659 /* Two possibilities here:
660 - The CPU is missing memory and no node was created.
661 In that case try picking one from a nearby CPU
662 - The APIC IDs differ from the HyperTransport node IDs
663 which the K8 northbridge parsing fills in.
664 Assume they are all increased by a constant offset,
665 but in the same order as the HT nodeids.
666 If that doesn't result in a usable node fall back to the
667 path for the previous case. */
f3fa8ebc 668 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
3f098c26
AK
669 if (ht_nodeid >= 0 &&
670 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
671 node = apicid_to_node[ht_nodeid];
672 /* Pick a nearby node */
673 if (!node_online(node))
674 node = nearby_node(apicid);
675 }
69d81fcd 676 numa_set_node(cpu, node);
3f098c26 677
e42f9437 678 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 679#endif
63518644
AK
680#endif
681}
1da177e4 682
ed77504b 683static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 684{
7bcd3f34 685 unsigned level;
1da177e4 686
bc5e8fdf
LT
687#ifdef CONFIG_SMP
688 unsigned long value;
689
7d318d77
AK
690 /*
691 * Disable TLB flush filter by setting HWCR.FFDIS on K8
692 * bit 6 of msr C001_0015
693 *
694 * Errata 63 for SH-B3 steppings
695 * Errata 122 for all steppings (F+ have it disabled by default)
696 */
697 if (c->x86 == 15) {
698 rdmsrl(MSR_K8_HWCR, value);
699 value |= 1 << 6;
700 wrmsrl(MSR_K8_HWCR, value);
701 }
bc5e8fdf
LT
702#endif
703
1da177e4
LT
704 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
705 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
706 clear_bit(0*32+31, &c->x86_capability);
707
7bcd3f34
AK
708 /* On C+ stepping K8 rep microcode works well for copy/memset */
709 level = cpuid_eax(1);
710 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
711 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
712
18bd057b
AK
713 /* Enable workaround for FXSAVE leak */
714 if (c->x86 >= 6)
715 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
716
e42f9437
RS
717 level = get_model_name(c);
718 if (!level) {
1da177e4
LT
719 switch (c->x86) {
720 case 15:
721 /* Should distinguish Models here, but this is only
722 a fallback anyways. */
723 strcpy(c->x86_model_id, "Hammer");
724 break;
725 }
726 }
727 display_cacheinfo(c);
728
130951cc
AK
729 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
730 if (c->x86_power & (1<<8))
731 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
732
faee9a5d
AK
733 /* Multi core CPU? */
734 if (c->extended_cpuid_level >= 0x80000008)
63518644 735 amd_detect_cmp(c);
1da177e4 736
240cd6a8
AK
737 /* Fix cpuid4 emulation for more */
738 num_cache_leaves = 3;
2049336f
AK
739
740 /* When there is only one core no need to synchronize RDTSC */
741 if (num_possible_cpus() == 1)
742 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
743 else
744 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1da177e4
LT
745}
746
e6982c67 747static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
748{
749#ifdef CONFIG_SMP
750 u32 eax, ebx, ecx, edx;
94605eff 751 int index_msb, core_bits;
94605eff
SS
752
753 cpuid(1, &eax, &ebx, &ecx, &edx);
754
94605eff 755
e42f9437 756 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 757 return;
e42f9437
RS
758 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
759 goto out;
1da177e4 760
1da177e4 761 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 762
1da177e4
LT
763 if (smp_num_siblings == 1) {
764 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
94605eff
SS
765 } else if (smp_num_siblings > 1 ) {
766
1da177e4
LT
767 if (smp_num_siblings > NR_CPUS) {
768 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
769 smp_num_siblings = 1;
770 return;
771 }
94605eff
SS
772
773 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 774 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 775
94605eff 776 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 777
94605eff
SS
778 index_msb = get_count_order(smp_num_siblings) ;
779
780 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 781
f3fa8ebc 782 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 783 ((1 << core_bits) - 1);
1da177e4 784 }
e42f9437
RS
785out:
786 if ((c->x86_max_cores * smp_num_siblings) > 1) {
787 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
788 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
789 }
790
1da177e4
LT
791#endif
792}
793
3dd9d514
AK
794/*
795 * find out the number of processor cores on the die
796 */
e6982c67 797static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 798{
2bbc419f 799 unsigned int eax, t;
3dd9d514
AK
800
801 if (c->cpuid_level < 4)
802 return 1;
803
2bbc419f 804 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
805
806 if (eax & 0x1f)
807 return ((eax >> 26) + 1);
808 else
809 return 1;
810}
811
df0cc26b
AK
812static void srat_detect_node(void)
813{
814#ifdef CONFIG_NUMA
ddea7be0 815 unsigned node;
df0cc26b 816 int cpu = smp_processor_id();
e42f9437 817 int apicid = hard_smp_processor_id();
df0cc26b
AK
818
819 /* Don't do the funky fallback heuristics the AMD version employs
820 for now. */
e42f9437 821 node = apicid_to_node[apicid];
df0cc26b 822 if (node == NUMA_NO_NODE)
0d015324 823 node = first_node(node_online_map);
69d81fcd 824 numa_set_node(cpu, node);
df0cc26b 825
c31fbb1a 826 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
827#endif
828}
829
e6982c67 830static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
831{
832 /* Cache sizes */
833 unsigned n;
834
835 init_intel_cacheinfo(c);
0080e667
VP
836 if (c->cpuid_level > 9 ) {
837 unsigned eax = cpuid_eax(10);
838 /* Check for version and the number of counters */
839 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
840 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
841 }
842
ebfcaa96 843 n = c->extended_cpuid_level;
1da177e4
LT
844 if (n >= 0x80000008) {
845 unsigned eax = cpuid_eax(0x80000008);
846 c->x86_virt_bits = (eax >> 8) & 0xff;
847 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
848 /* CPUID workaround for Intel 0F34 CPU */
849 if (c->x86_vendor == X86_VENDOR_INTEL &&
850 c->x86 == 0xF && c->x86_model == 0x3 &&
851 c->x86_mask == 0x4)
852 c->x86_phys_bits = 36;
1da177e4
LT
853 }
854
855 if (c->x86 == 15)
856 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
857 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
858 (c->x86 == 0x6 && c->x86_model >= 0x0e))
c29601e9 859 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
27fbe5b2
AK
860 if (c->x86 == 6)
861 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
c818a181 862 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
94605eff 863 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
864
865 srat_detect_node();
1da177e4
LT
866}
867
672289e9 868static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
869{
870 char *v = c->x86_vendor_id;
871
872 if (!strcmp(v, "AuthenticAMD"))
873 c->x86_vendor = X86_VENDOR_AMD;
874 else if (!strcmp(v, "GenuineIntel"))
875 c->x86_vendor = X86_VENDOR_INTEL;
876 else
877 c->x86_vendor = X86_VENDOR_UNKNOWN;
878}
879
880struct cpu_model_info {
881 int vendor;
882 int family;
883 char *model_names[16];
884};
885
886/* Do some early cpuid on the boot CPU to get some parameter that are
887 needed before check_bugs. Everything advanced is in identify_cpu
888 below. */
e6982c67 889void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
890{
891 u32 tfms;
892
893 c->loops_per_jiffy = loops_per_jiffy;
894 c->x86_cache_size = -1;
895 c->x86_vendor = X86_VENDOR_UNKNOWN;
896 c->x86_model = c->x86_mask = 0; /* So far unknown... */
897 c->x86_vendor_id[0] = '\0'; /* Unset */
898 c->x86_model_id[0] = '\0'; /* Unset */
899 c->x86_clflush_size = 64;
900 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 901 c->x86_max_cores = 1;
ebfcaa96 902 c->extended_cpuid_level = 0;
1da177e4
LT
903 memset(&c->x86_capability, 0, sizeof c->x86_capability);
904
905 /* Get vendor name */
906 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
907 (unsigned int *)&c->x86_vendor_id[0],
908 (unsigned int *)&c->x86_vendor_id[8],
909 (unsigned int *)&c->x86_vendor_id[4]);
910
911 get_cpu_vendor(c);
912
913 /* Initialize the standard set of capabilities */
914 /* Note that the vendor-specific code below might override */
915
916 /* Intel-defined flags: level 0x00000001 */
917 if (c->cpuid_level >= 0x00000001) {
918 __u32 misc;
919 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
920 &c->x86_capability[0]);
921 c->x86 = (tfms >> 8) & 0xf;
922 c->x86_model = (tfms >> 4) & 0xf;
923 c->x86_mask = tfms & 0xf;
f5f786d0 924 if (c->x86 == 0xf)
1da177e4 925 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 926 if (c->x86 >= 0x6)
1da177e4 927 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1da177e4
LT
928 if (c->x86_capability[0] & (1<<19))
929 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
930 } else {
931 /* Have CPUID level 0 only - unheard of */
932 c->x86 = 4;
933 }
a158608b
AK
934
935#ifdef CONFIG_SMP
f3fa8ebc 936 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 937#endif
1da177e4
LT
938}
939
940/*
941 * This does the hard work of actually picking apart the CPU stuff...
942 */
e6982c67 943void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
944{
945 int i;
946 u32 xlvl;
947
948 early_identify_cpu(c);
949
950 /* AMD-defined flags: level 0x80000001 */
951 xlvl = cpuid_eax(0x80000000);
ebfcaa96 952 c->extended_cpuid_level = xlvl;
1da177e4
LT
953 if ((xlvl & 0xffff0000) == 0x80000000) {
954 if (xlvl >= 0x80000001) {
955 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 956 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
957 }
958 if (xlvl >= 0x80000004)
959 get_model_name(c); /* Default name */
960 }
961
962 /* Transmeta-defined flags: level 0x80860001 */
963 xlvl = cpuid_eax(0x80860000);
964 if ((xlvl & 0xffff0000) == 0x80860000) {
965 /* Don't set x86_cpuid_level here for now to not confuse. */
966 if (xlvl >= 0x80860001)
967 c->x86_capability[2] = cpuid_edx(0x80860001);
968 }
969
1e9f28fa
SS
970 c->apicid = phys_pkg_id(0);
971
1da177e4
LT
972 /*
973 * Vendor-specific initialization. In this section we
974 * canonicalize the feature flags, meaning if there are
975 * features a certain CPU supports which CPUID doesn't
976 * tell us, CPUID claiming incorrect flags, or other bugs,
977 * we handle them here.
978 *
979 * At the end of this section, c->x86_capability better
980 * indicate the features this CPU genuinely supports!
981 */
982 switch (c->x86_vendor) {
983 case X86_VENDOR_AMD:
984 init_amd(c);
985 break;
986
987 case X86_VENDOR_INTEL:
988 init_intel(c);
989 break;
990
991 case X86_VENDOR_UNKNOWN:
992 default:
993 display_cacheinfo(c);
994 break;
995 }
996
997 select_idle_routine(c);
998 detect_ht(c);
1da177e4
LT
999
1000 /*
1001 * On SMP, boot_cpu_data holds the common feature set between
1002 * all CPUs; so make sure that we indicate which features are
1003 * common between the CPUs. The first time this routine gets
1004 * executed, c == &boot_cpu_data.
1005 */
1006 if (c != &boot_cpu_data) {
1007 /* AND the already accumulated flags with these */
1008 for (i = 0 ; i < NCAPINTS ; i++)
1009 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1010 }
1011
1012#ifdef CONFIG_X86_MCE
1013 mcheck_init(c);
1014#endif
3b520b23
SL
1015 if (c == &boot_cpu_data)
1016 mtrr_bp_init();
1017 else
1018 mtrr_ap_init();
1da177e4 1019#ifdef CONFIG_NUMA
3019e8eb 1020 numa_add_cpu(smp_processor_id());
1da177e4
LT
1021#endif
1022}
1023
1024
e6982c67 1025void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1026{
1027 if (c->x86_model_id[0])
1028 printk("%s", c->x86_model_id);
1029
1030 if (c->x86_mask || c->cpuid_level >= 0)
1031 printk(" stepping %02x\n", c->x86_mask);
1032 else
1033 printk("\n");
1034}
1035
1036/*
1037 * Get CPU information for use by the procfs.
1038 */
1039
1040static int show_cpuinfo(struct seq_file *m, void *v)
1041{
1042 struct cpuinfo_x86 *c = v;
1043
1044 /*
1045 * These flag bits must match the definitions in <asm/cpufeature.h>.
1046 * NULL means this bit is undefined or reserved; either way it doesn't
1047 * have meaning as far as Linux is concerned. Note that it's important
1048 * to realize there is a difference between this table and CPUID -- if
1049 * applications want to get the raw CPUID data, they should access
1050 * /dev/cpu/<cpu_nr>/cpuid instead.
1051 */
1052 static char *x86_cap_flags[] = {
1053 /* Intel-defined */
1054 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1055 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1056 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1057 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1058
1059 /* AMD-defined */
3c3b73b6 1060 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1061 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1062 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
7b0e8501 1063 NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
1da177e4
LT
1064
1065 /* Transmeta-defined */
1066 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1067 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1068 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1069 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1070
1071 /* Other (Linux-defined) */
622dcaf9 1072 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
c29601e9 1073 "constant_tsc", NULL, NULL,
d167a518 1074 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1075 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1076 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1077
1078 /* Intel-defined (#2) */
9d95dd84 1079 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307
DJ
1080 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1081 NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1082 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1083
5b7abc6f
PA
1084 /* VIA/Cyrix/Centaur-defined */
1085 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1086 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1088 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1089
1da177e4 1090 /* AMD-defined (#2) */
3f98bc49 1091 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1da177e4
LT
1092 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1093 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1094 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1095 };
1096 static char *x86_power_flags[] = {
1097 "ts", /* temperature sensor */
1098 "fid", /* frequency id control */
1099 "vid", /* voltage id control */
1100 "ttp", /* thermal trip */
1101 "tm",
3f98bc49
AK
1102 "stc",
1103 NULL,
39b3a791 1104 /* nothing */ /* constant_tsc - moved to flags */
1da177e4
LT
1105 };
1106
1107
1108#ifdef CONFIG_SMP
1109 if (!cpu_online(c-cpu_data))
1110 return 0;
1111#endif
1112
1113 seq_printf(m,"processor\t: %u\n"
1114 "vendor_id\t: %s\n"
1115 "cpu family\t: %d\n"
1116 "model\t\t: %d\n"
1117 "model name\t: %s\n",
1118 (unsigned)(c-cpu_data),
1119 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1120 c->x86,
1121 (int)c->x86_model,
1122 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1123
1124 if (c->x86_mask || c->cpuid_level >= 0)
1125 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1126 else
1127 seq_printf(m, "stepping\t: unknown\n");
1128
1129 if (cpu_has(c,X86_FEATURE_TSC)) {
95235ca2
VP
1130 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1131 if (!freq)
1132 freq = cpu_khz;
1da177e4 1133 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
95235ca2 1134 freq / 1000, (freq % 1000));
1da177e4
LT
1135 }
1136
1137 /* Cache size */
1138 if (c->x86_cache_size >= 0)
1139 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1140
1141#ifdef CONFIG_SMP
94605eff 1142 if (smp_num_siblings * c->x86_max_cores > 1) {
db468681 1143 int cpu = c - cpu_data;
f3fa8ebc 1144 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
94605eff 1145 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
f3fa8ebc 1146 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1147 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1148 }
1da177e4
LT
1149#endif
1150
1151 seq_printf(m,
1152 "fpu\t\t: yes\n"
1153 "fpu_exception\t: yes\n"
1154 "cpuid level\t: %d\n"
1155 "wp\t\t: yes\n"
1156 "flags\t\t:",
1157 c->cpuid_level);
1158
1159 {
1160 int i;
1161 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
3d1712c9 1162 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1da177e4
LT
1163 seq_printf(m, " %s", x86_cap_flags[i]);
1164 }
1165
1166 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1167 c->loops_per_jiffy/(500000/HZ),
1168 (c->loops_per_jiffy/(5000/HZ)) % 100);
1169
1170 if (c->x86_tlbsize > 0)
1171 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1172 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1173 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1174
1175 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1176 c->x86_phys_bits, c->x86_virt_bits);
1177
1178 seq_printf(m, "power management:");
1179 {
1180 unsigned i;
1181 for (i = 0; i < 32; i++)
1182 if (c->x86_power & (1 << i)) {
3f98bc49
AK
1183 if (i < ARRAY_SIZE(x86_power_flags) &&
1184 x86_power_flags[i])
1185 seq_printf(m, "%s%s",
1186 x86_power_flags[i][0]?" ":"",
1187 x86_power_flags[i]);
1da177e4
LT
1188 else
1189 seq_printf(m, " [%d]", i);
1190 }
1191 }
1da177e4 1192
d31ddaa1 1193 seq_printf(m, "\n\n");
1da177e4
LT
1194
1195 return 0;
1196}
1197
1198static void *c_start(struct seq_file *m, loff_t *pos)
1199{
1200 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1201}
1202
1203static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1204{
1205 ++*pos;
1206 return c_start(m, pos);
1207}
1208
1209static void c_stop(struct seq_file *m, void *v)
1210{
1211}
1212
1213struct seq_operations cpuinfo_op = {
1214 .start =c_start,
1215 .next = c_next,
1216 .stop = c_stop,
1217 .show = show_cpuinfo,
1218};
e9928674 1219
9c63f873 1220#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
160bd18e
MP
1221#include <linux/platform_device.h>
1222static __init int add_pcspkr(void)
1223{
1224 struct platform_device *pd;
1225 int ret;
1226
1227 pd = platform_device_alloc("pcspkr", -1);
1228 if (!pd)
1229 return -ENOMEM;
1230
1231 ret = platform_device_add(pd);
1232 if (ret)
1233 platform_device_put(pd);
1234
1235 return ret;
1236}
1237device_initcall(add_pcspkr);
1238#endif