x86: change bios_cpu_apicid to percpu data variable
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / setup_64.c
1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 */
4
5 /*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/efi.h>
34 #include <linux/acpi.h>
35 #include <linux/kallsyms.h>
36 #include <linux/edd.h>
37 #include <linux/mmzone.h>
38 #include <linux/kexec.h>
39 #include <linux/cpufreq.h>
40 #include <linux/dmi.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/ctype.h>
43 #include <linux/uaccess.h>
44
45 #include <asm/mtrr.h>
46 #include <asm/uaccess.h>
47 #include <asm/system.h>
48 #include <asm/vsyscall.h>
49 #include <asm/io.h>
50 #include <asm/smp.h>
51 #include <asm/msr.h>
52 #include <asm/desc.h>
53 #include <video/edid.h>
54 #include <asm/e820.h>
55 #include <asm/dma.h>
56 #include <asm/gart.h>
57 #include <asm/mpspec.h>
58 #include <asm/mmu_context.h>
59 #include <asm/proto.h>
60 #include <asm/setup.h>
61 #include <asm/mach_apic.h>
62 #include <asm/numa.h>
63 #include <asm/sections.h>
64 #include <asm/dmi.h>
65 #include <asm/cacheflush.h>
66 #include <asm/mce.h>
67 #include <asm/ds.h>
68 #include <asm/topology.h>
69
70 #ifdef CONFIG_PARAVIRT
71 #include <asm/paravirt.h>
72 #else
73 #define ARCH_SETUP
74 #endif
75
76 /*
77 * Machine setup..
78 */
79
80 struct cpuinfo_x86 boot_cpu_data __read_mostly;
81 EXPORT_SYMBOL(boot_cpu_data);
82
83 unsigned long mmu_cr4_features;
84
85 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
86 int bootloader_type;
87
88 unsigned long saved_video_mode;
89
90 int force_mwait __cpuinitdata;
91
92 /*
93 * Early DMI memory
94 */
95 int dmi_alloc_index;
96 char dmi_alloc_data[DMI_MAX_DATA];
97
98 /*
99 * Setup options
100 */
101 struct screen_info screen_info;
102 EXPORT_SYMBOL(screen_info);
103 struct sys_desc_table_struct {
104 unsigned short length;
105 unsigned char table[0];
106 };
107
108 struct edid_info edid_info;
109 EXPORT_SYMBOL_GPL(edid_info);
110
111 extern int root_mountflags;
112
113 char __initdata command_line[COMMAND_LINE_SIZE];
114
115 struct resource standard_io_resources[] = {
116 { .name = "dma1", .start = 0x00, .end = 0x1f,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "pic1", .start = 0x20, .end = 0x21,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "timer0", .start = 0x40, .end = 0x43,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "timer1", .start = 0x50, .end = 0x53,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "keyboard", .start = 0x60, .end = 0x6f,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "pic2", .start = 0xa0, .end = 0xa1,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "dma2", .start = 0xc0, .end = 0xdf,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
132 { .name = "fpu", .start = 0xf0, .end = 0xff,
133 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
134 };
135
136 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
137
138 static struct resource data_resource = {
139 .name = "Kernel data",
140 .start = 0,
141 .end = 0,
142 .flags = IORESOURCE_RAM,
143 };
144 static struct resource code_resource = {
145 .name = "Kernel code",
146 .start = 0,
147 .end = 0,
148 .flags = IORESOURCE_RAM,
149 };
150 static struct resource bss_resource = {
151 .name = "Kernel bss",
152 .start = 0,
153 .end = 0,
154 .flags = IORESOURCE_RAM,
155 };
156
157 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
158
159 #ifdef CONFIG_PROC_VMCORE
160 /* elfcorehdr= specifies the location of elf core header
161 * stored by the crashed kernel. This option will be passed
162 * by kexec loader to the capture kernel.
163 */
164 static int __init setup_elfcorehdr(char *arg)
165 {
166 char *end;
167 if (!arg)
168 return -EINVAL;
169 elfcorehdr_addr = memparse(arg, &end);
170 return end > arg ? 0 : -EINVAL;
171 }
172 early_param("elfcorehdr", setup_elfcorehdr);
173 #endif
174
175 #ifndef CONFIG_NUMA
176 static void __init
177 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
178 {
179 unsigned long bootmap_size, bootmap;
180
181 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
182 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
183 if (bootmap == -1L)
184 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
185 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
186 e820_register_active_regions(0, start_pfn, end_pfn);
187 free_bootmem_with_active_regions(0, end_pfn);
188 reserve_bootmem(bootmap, bootmap_size);
189 }
190 #endif
191
192 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
193 struct edd edd;
194 #ifdef CONFIG_EDD_MODULE
195 EXPORT_SYMBOL(edd);
196 #endif
197 /**
198 * copy_edd() - Copy the BIOS EDD information
199 * from boot_params into a safe place.
200 *
201 */
202 static inline void copy_edd(void)
203 {
204 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
205 sizeof(edd.mbr_signature));
206 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
207 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
208 edd.edd_info_nr = boot_params.eddbuf_entries;
209 }
210 #else
211 static inline void copy_edd(void)
212 {
213 }
214 #endif
215
216 #ifdef CONFIG_KEXEC
217 static void __init reserve_crashkernel(void)
218 {
219 unsigned long long free_mem;
220 unsigned long long crash_size, crash_base;
221 int ret;
222
223 free_mem =
224 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
225
226 ret = parse_crashkernel(boot_command_line, free_mem,
227 &crash_size, &crash_base);
228 if (ret == 0 && crash_size) {
229 if (crash_base > 0) {
230 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
231 "for crashkernel (System RAM: %ldMB)\n",
232 (unsigned long)(crash_size >> 20),
233 (unsigned long)(crash_base >> 20),
234 (unsigned long)(free_mem >> 20));
235 crashk_res.start = crash_base;
236 crashk_res.end = crash_base + crash_size - 1;
237 reserve_bootmem(crash_base, crash_size);
238 } else
239 printk(KERN_INFO "crashkernel reservation failed - "
240 "you have to specify a base address\n");
241 }
242 }
243 #else
244 static inline void __init reserve_crashkernel(void)
245 {}
246 #endif
247
248 #define EBDA_ADDR_POINTER 0x40E
249
250 unsigned __initdata ebda_addr;
251 unsigned __initdata ebda_size;
252
253 static void __init discover_ebda(void)
254 {
255 /*
256 * there is a real-mode segmented pointer pointing to the
257 * 4K EBDA area at 0x40E
258 */
259 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
260 /*
261 * There can be some situations, like paravirtualized guests,
262 * in which there is no available ebda information. In such
263 * case, just skip it
264 */
265 if (!ebda_addr) {
266 ebda_size = 0;
267 return;
268 }
269
270 ebda_addr <<= 4;
271
272 ebda_size = *(unsigned short *)__va(ebda_addr);
273
274 /* Round EBDA up to pages */
275 if (ebda_size == 0)
276 ebda_size = 1;
277 ebda_size <<= 10;
278 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
279 if (ebda_size > 64*1024)
280 ebda_size = 64*1024;
281 }
282
283 /* Overridden in paravirt.c if CONFIG_PARAVIRT */
284 void __attribute__((weak)) __init memory_setup(void)
285 {
286 machine_specific_memory_setup();
287 }
288
289 void __init setup_arch(char **cmdline_p)
290 {
291 unsigned i;
292
293 printk(KERN_INFO "Command line: %s\n", boot_command_line);
294
295 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
296 screen_info = boot_params.screen_info;
297 edid_info = boot_params.edid_info;
298 saved_video_mode = boot_params.hdr.vid_mode;
299 bootloader_type = boot_params.hdr.type_of_loader;
300
301 #ifdef CONFIG_BLK_DEV_RAM
302 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
303 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
304 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
305 #endif
306 #ifdef CONFIG_EFI
307 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
308 "EL64", 4))
309 efi_enabled = 1;
310 #endif
311
312 ARCH_SETUP
313
314 memory_setup();
315 copy_edd();
316
317 if (!boot_params.hdr.root_flags)
318 root_mountflags &= ~MS_RDONLY;
319 init_mm.start_code = (unsigned long) &_text;
320 init_mm.end_code = (unsigned long) &_etext;
321 init_mm.end_data = (unsigned long) &_edata;
322 init_mm.brk = (unsigned long) &_end;
323
324 code_resource.start = virt_to_phys(&_text);
325 code_resource.end = virt_to_phys(&_etext)-1;
326 data_resource.start = virt_to_phys(&_etext);
327 data_resource.end = virt_to_phys(&_edata)-1;
328 bss_resource.start = virt_to_phys(&__bss_start);
329 bss_resource.end = virt_to_phys(&__bss_stop)-1;
330
331 early_identify_cpu(&boot_cpu_data);
332
333 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
334 *cmdline_p = command_line;
335
336 parse_early_param();
337
338 finish_e820_parsing();
339
340 early_gart_iommu_check();
341
342 e820_register_active_regions(0, 0, -1UL);
343 /*
344 * partially used pages are not usable - thus
345 * we are rounding upwards:
346 */
347 end_pfn = e820_end_of_ram();
348 num_physpages = end_pfn;
349
350 check_efer();
351
352 discover_ebda();
353
354 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
355 if (efi_enabled)
356 efi_init();
357
358 dmi_scan_machine();
359
360 io_delay_init();
361
362 #ifdef CONFIG_SMP
363 /* setup to use the early static init tables during kernel startup */
364 x86_cpu_to_apicid_early_ptr = (void *)&x86_cpu_to_apicid_init;
365 #ifdef CONFIG_NUMA
366 x86_cpu_to_node_map_early_ptr = (void *)&x86_cpu_to_node_map_init;
367 #endif
368 x86_bios_cpu_apicid_early_ptr = (void *)&x86_bios_cpu_apicid_init;
369 #endif
370
371 #ifdef CONFIG_ACPI
372 /*
373 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
374 * Call this early for SRAT node setup.
375 */
376 acpi_boot_table_init();
377 #endif
378
379 /* How many end-of-memory variables you have, grandma! */
380 max_low_pfn = end_pfn;
381 max_pfn = end_pfn;
382 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
383
384 /* Remove active ranges so rediscovery with NUMA-awareness happens */
385 remove_all_active_ranges();
386
387 #ifdef CONFIG_ACPI_NUMA
388 /*
389 * Parse SRAT to discover nodes.
390 */
391 acpi_numa_init();
392 #endif
393
394 #ifdef CONFIG_NUMA
395 numa_initmem_init(0, end_pfn);
396 #else
397 contig_initmem_init(0, end_pfn);
398 #endif
399
400 /* Reserve direct mapping */
401 reserve_bootmem_generic(table_start << PAGE_SHIFT,
402 (table_end - table_start) << PAGE_SHIFT);
403
404 /* reserve kernel */
405 reserve_bootmem_generic(__pa_symbol(&_text),
406 __pa_symbol(&_end) - __pa_symbol(&_text));
407
408 /*
409 * reserve physical page 0 - it's a special BIOS page on many boxes,
410 * enabling clean reboots, SMP operation, laptop functions.
411 */
412 reserve_bootmem_generic(0, PAGE_SIZE);
413
414 /* reserve ebda region */
415 if (ebda_addr)
416 reserve_bootmem_generic(ebda_addr, ebda_size);
417 #ifdef CONFIG_NUMA
418 /* reserve nodemap region */
419 if (nodemap_addr)
420 reserve_bootmem_generic(nodemap_addr, nodemap_size);
421 #endif
422
423 #ifdef CONFIG_SMP
424 /* Reserve SMP trampoline */
425 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
426 #endif
427
428 #ifdef CONFIG_ACPI_SLEEP
429 /*
430 * Reserve low memory region for sleep support.
431 */
432 acpi_reserve_bootmem();
433 #endif
434
435 if (efi_enabled) {
436 efi_map_memmap();
437 efi_reserve_bootmem();
438 }
439
440 /*
441 * Find and reserve possible boot-time SMP configuration:
442 */
443 find_smp_config();
444 #ifdef CONFIG_BLK_DEV_INITRD
445 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
446 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
447 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
448 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
449 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
450
451 if (ramdisk_end <= end_of_mem) {
452 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
453 initrd_start = ramdisk_image + PAGE_OFFSET;
454 initrd_end = initrd_start+ramdisk_size;
455 } else {
456 printk(KERN_ERR "initrd extends beyond end of memory "
457 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
458 ramdisk_end, end_of_mem);
459 initrd_start = 0;
460 }
461 }
462 #endif
463 reserve_crashkernel();
464 paging_init();
465 map_vsyscall();
466
467 early_quirks();
468
469 /*
470 * set this early, so we dont allocate cpu0
471 * if MADT list doesnt list BSP first
472 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
473 */
474 cpu_set(0, cpu_present_map);
475 #ifdef CONFIG_ACPI
476 /*
477 * Read APIC and some other early information from ACPI tables.
478 */
479 acpi_boot_init();
480 #endif
481
482 init_cpu_to_node();
483
484 /*
485 * get boot-time SMP configuration:
486 */
487 if (smp_found_config)
488 get_smp_config();
489 init_apic_mappings();
490 ioapic_init_mappings();
491
492 /*
493 * We trust e820 completely. No explicit ROM probing in memory.
494 */
495 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
496 e820_mark_nosave_regions();
497
498 /* request I/O space for devices used on all i[345]86 PCs */
499 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
500 request_resource(&ioport_resource, &standard_io_resources[i]);
501
502 e820_setup_gap();
503
504 #ifdef CONFIG_VT
505 #if defined(CONFIG_VGA_CONSOLE)
506 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
507 conswitchp = &vga_con;
508 #elif defined(CONFIG_DUMMY_CONSOLE)
509 conswitchp = &dummy_con;
510 #endif
511 #endif
512 }
513
514 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
515 {
516 unsigned int *v;
517
518 if (c->extended_cpuid_level < 0x80000004)
519 return 0;
520
521 v = (unsigned int *) c->x86_model_id;
522 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
523 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
524 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
525 c->x86_model_id[48] = 0;
526 return 1;
527 }
528
529
530 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
531 {
532 unsigned int n, dummy, eax, ebx, ecx, edx;
533
534 n = c->extended_cpuid_level;
535
536 if (n >= 0x80000005) {
537 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
538 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
539 "D cache %dK (%d bytes/line)\n",
540 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
541 c->x86_cache_size = (ecx>>24) + (edx>>24);
542 /* On K8 L1 TLB is inclusive, so don't count it */
543 c->x86_tlbsize = 0;
544 }
545
546 if (n >= 0x80000006) {
547 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
548 ecx = cpuid_ecx(0x80000006);
549 c->x86_cache_size = ecx >> 16;
550 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
551
552 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
553 c->x86_cache_size, ecx & 0xFF);
554 }
555 if (n >= 0x80000008) {
556 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
557 c->x86_virt_bits = (eax >> 8) & 0xff;
558 c->x86_phys_bits = eax & 0xff;
559 }
560 }
561
562 #ifdef CONFIG_NUMA
563 static int nearby_node(int apicid)
564 {
565 int i, node;
566
567 for (i = apicid - 1; i >= 0; i--) {
568 node = apicid_to_node[i];
569 if (node != NUMA_NO_NODE && node_online(node))
570 return node;
571 }
572 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
573 node = apicid_to_node[i];
574 if (node != NUMA_NO_NODE && node_online(node))
575 return node;
576 }
577 return first_node(node_online_map); /* Shouldn't happen */
578 }
579 #endif
580
581 /*
582 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
583 * Assumes number of cores is a power of two.
584 */
585 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
586 {
587 #ifdef CONFIG_SMP
588 unsigned bits;
589 #ifdef CONFIG_NUMA
590 int cpu = smp_processor_id();
591 int node = 0;
592 unsigned apicid = hard_smp_processor_id();
593 #endif
594 bits = c->x86_coreid_bits;
595
596 /* Low order bits define the core id (index of core in socket) */
597 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
598 /* Convert the APIC ID into the socket ID */
599 c->phys_proc_id = phys_pkg_id(bits);
600
601 #ifdef CONFIG_NUMA
602 node = c->phys_proc_id;
603 if (apicid_to_node[apicid] != NUMA_NO_NODE)
604 node = apicid_to_node[apicid];
605 if (!node_online(node)) {
606 /* Two possibilities here:
607 - The CPU is missing memory and no node was created.
608 In that case try picking one from a nearby CPU
609 - The APIC IDs differ from the HyperTransport node IDs
610 which the K8 northbridge parsing fills in.
611 Assume they are all increased by a constant offset,
612 but in the same order as the HT nodeids.
613 If that doesn't result in a usable node fall back to the
614 path for the previous case. */
615
616 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
617
618 if (ht_nodeid >= 0 &&
619 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
620 node = apicid_to_node[ht_nodeid];
621 /* Pick a nearby node */
622 if (!node_online(node))
623 node = nearby_node(apicid);
624 }
625 numa_set_node(cpu, node);
626
627 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
628 #endif
629 #endif
630 }
631
632 static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
633 {
634 #ifdef CONFIG_SMP
635 unsigned bits, ecx;
636
637 /* Multi core CPU? */
638 if (c->extended_cpuid_level < 0x80000008)
639 return;
640
641 ecx = cpuid_ecx(0x80000008);
642
643 c->x86_max_cores = (ecx & 0xff) + 1;
644
645 /* CPU telling us the core id bits shift? */
646 bits = (ecx >> 12) & 0xF;
647
648 /* Otherwise recompute */
649 if (bits == 0) {
650 while ((1 << bits) < c->x86_max_cores)
651 bits++;
652 }
653
654 c->x86_coreid_bits = bits;
655
656 #endif
657 }
658
659 #define ENABLE_C1E_MASK 0x18000000
660 #define CPUID_PROCESSOR_SIGNATURE 1
661 #define CPUID_XFAM 0x0ff00000
662 #define CPUID_XFAM_K8 0x00000000
663 #define CPUID_XFAM_10H 0x00100000
664 #define CPUID_XFAM_11H 0x00200000
665 #define CPUID_XMOD 0x000f0000
666 #define CPUID_XMOD_REV_F 0x00040000
667
668 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
669 static __cpuinit int amd_apic_timer_broken(void)
670 {
671 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
672
673 switch (eax & CPUID_XFAM) {
674 case CPUID_XFAM_K8:
675 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
676 break;
677 case CPUID_XFAM_10H:
678 case CPUID_XFAM_11H:
679 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
680 if (lo & ENABLE_C1E_MASK)
681 return 1;
682 break;
683 default:
684 /* err on the side of caution */
685 return 1;
686 }
687 return 0;
688 }
689
690 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
691 {
692 early_init_amd_mc(c);
693
694 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
695 if (c->x86_power & (1<<8))
696 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
697 }
698
699 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
700 {
701 unsigned level;
702
703 #ifdef CONFIG_SMP
704 unsigned long value;
705
706 /*
707 * Disable TLB flush filter by setting HWCR.FFDIS on K8
708 * bit 6 of msr C001_0015
709 *
710 * Errata 63 for SH-B3 steppings
711 * Errata 122 for all steppings (F+ have it disabled by default)
712 */
713 if (c->x86 == 15) {
714 rdmsrl(MSR_K8_HWCR, value);
715 value |= 1 << 6;
716 wrmsrl(MSR_K8_HWCR, value);
717 }
718 #endif
719
720 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
721 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
722 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
723
724 /* On C+ stepping K8 rep microcode works well for copy/memset */
725 level = cpuid_eax(1);
726 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
727 level >= 0x0f58))
728 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
729 if (c->x86 == 0x10 || c->x86 == 0x11)
730 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
731
732 /* Enable workaround for FXSAVE leak */
733 if (c->x86 >= 6)
734 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
735
736 level = get_model_name(c);
737 if (!level) {
738 switch (c->x86) {
739 case 15:
740 /* Should distinguish Models here, but this is only
741 a fallback anyways. */
742 strcpy(c->x86_model_id, "Hammer");
743 break;
744 }
745 }
746 display_cacheinfo(c);
747
748 /* Multi core CPU? */
749 if (c->extended_cpuid_level >= 0x80000008)
750 amd_detect_cmp(c);
751
752 if (c->extended_cpuid_level >= 0x80000006 &&
753 (cpuid_edx(0x80000006) & 0xf000))
754 num_cache_leaves = 4;
755 else
756 num_cache_leaves = 3;
757
758 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
759 set_cpu_cap(c, X86_FEATURE_K8);
760
761 /* MFENCE stops RDTSC speculation */
762 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
763
764 /* Family 10 doesn't support C states in MWAIT so don't use it */
765 if (c->x86 == 0x10 && !force_mwait)
766 clear_cpu_cap(c, X86_FEATURE_MWAIT);
767
768 if (amd_apic_timer_broken())
769 disable_apic_timer = 1;
770 }
771
772 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
773 {
774 #ifdef CONFIG_SMP
775 u32 eax, ebx, ecx, edx;
776 int index_msb, core_bits;
777
778 cpuid(1, &eax, &ebx, &ecx, &edx);
779
780
781 if (!cpu_has(c, X86_FEATURE_HT))
782 return;
783 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
784 goto out;
785
786 smp_num_siblings = (ebx & 0xff0000) >> 16;
787
788 if (smp_num_siblings == 1) {
789 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
790 } else if (smp_num_siblings > 1) {
791
792 if (smp_num_siblings > NR_CPUS) {
793 printk(KERN_WARNING "CPU: Unsupported number of "
794 "siblings %d", smp_num_siblings);
795 smp_num_siblings = 1;
796 return;
797 }
798
799 index_msb = get_count_order(smp_num_siblings);
800 c->phys_proc_id = phys_pkg_id(index_msb);
801
802 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
803
804 index_msb = get_count_order(smp_num_siblings);
805
806 core_bits = get_count_order(c->x86_max_cores);
807
808 c->cpu_core_id = phys_pkg_id(index_msb) &
809 ((1 << core_bits) - 1);
810 }
811 out:
812 if ((c->x86_max_cores * smp_num_siblings) > 1) {
813 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
814 c->phys_proc_id);
815 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
816 c->cpu_core_id);
817 }
818
819 #endif
820 }
821
822 /*
823 * find out the number of processor cores on the die
824 */
825 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
826 {
827 unsigned int eax, t;
828
829 if (c->cpuid_level < 4)
830 return 1;
831
832 cpuid_count(4, 0, &eax, &t, &t, &t);
833
834 if (eax & 0x1f)
835 return ((eax >> 26) + 1);
836 else
837 return 1;
838 }
839
840 static void srat_detect_node(void)
841 {
842 #ifdef CONFIG_NUMA
843 unsigned node;
844 int cpu = smp_processor_id();
845 int apicid = hard_smp_processor_id();
846
847 /* Don't do the funky fallback heuristics the AMD version employs
848 for now. */
849 node = apicid_to_node[apicid];
850 if (node == NUMA_NO_NODE)
851 node = first_node(node_online_map);
852 numa_set_node(cpu, node);
853
854 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
855 #endif
856 }
857
858 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
859 {
860 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
861 (c->x86 == 0x6 && c->x86_model >= 0x0e))
862 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
863 }
864
865 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
866 {
867 /* Cache sizes */
868 unsigned n;
869
870 init_intel_cacheinfo(c);
871 if (c->cpuid_level > 9) {
872 unsigned eax = cpuid_eax(10);
873 /* Check for version and the number of counters */
874 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
875 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
876 }
877
878 if (cpu_has_ds) {
879 unsigned int l1, l2;
880 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
881 if (!(l1 & (1<<11)))
882 set_cpu_cap(c, X86_FEATURE_BTS);
883 if (!(l1 & (1<<12)))
884 set_cpu_cap(c, X86_FEATURE_PEBS);
885 }
886
887
888 if (cpu_has_bts)
889 ds_init_intel(c);
890
891 n = c->extended_cpuid_level;
892 if (n >= 0x80000008) {
893 unsigned eax = cpuid_eax(0x80000008);
894 c->x86_virt_bits = (eax >> 8) & 0xff;
895 c->x86_phys_bits = eax & 0xff;
896 /* CPUID workaround for Intel 0F34 CPU */
897 if (c->x86_vendor == X86_VENDOR_INTEL &&
898 c->x86 == 0xF && c->x86_model == 0x3 &&
899 c->x86_mask == 0x4)
900 c->x86_phys_bits = 36;
901 }
902
903 if (c->x86 == 15)
904 c->x86_cache_alignment = c->x86_clflush_size * 2;
905 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
906 (c->x86 == 0x6 && c->x86_model >= 0x0e))
907 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
908 if (c->x86 == 6)
909 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
910 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
911 c->x86_max_cores = intel_num_cpu_cores(c);
912
913 srat_detect_node();
914 }
915
916 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
917 {
918 char *v = c->x86_vendor_id;
919
920 if (!strcmp(v, "AuthenticAMD"))
921 c->x86_vendor = X86_VENDOR_AMD;
922 else if (!strcmp(v, "GenuineIntel"))
923 c->x86_vendor = X86_VENDOR_INTEL;
924 else
925 c->x86_vendor = X86_VENDOR_UNKNOWN;
926 }
927
928 struct cpu_model_info {
929 int vendor;
930 int family;
931 char *model_names[16];
932 };
933
934 /* Do some early cpuid on the boot CPU to get some parameter that are
935 needed before check_bugs. Everything advanced is in identify_cpu
936 below. */
937 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
938 {
939 u32 tfms, xlvl;
940
941 c->loops_per_jiffy = loops_per_jiffy;
942 c->x86_cache_size = -1;
943 c->x86_vendor = X86_VENDOR_UNKNOWN;
944 c->x86_model = c->x86_mask = 0; /* So far unknown... */
945 c->x86_vendor_id[0] = '\0'; /* Unset */
946 c->x86_model_id[0] = '\0'; /* Unset */
947 c->x86_clflush_size = 64;
948 c->x86_cache_alignment = c->x86_clflush_size;
949 c->x86_max_cores = 1;
950 c->x86_coreid_bits = 0;
951 c->extended_cpuid_level = 0;
952 memset(&c->x86_capability, 0, sizeof c->x86_capability);
953
954 /* Get vendor name */
955 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
956 (unsigned int *)&c->x86_vendor_id[0],
957 (unsigned int *)&c->x86_vendor_id[8],
958 (unsigned int *)&c->x86_vendor_id[4]);
959
960 get_cpu_vendor(c);
961
962 /* Initialize the standard set of capabilities */
963 /* Note that the vendor-specific code below might override */
964
965 /* Intel-defined flags: level 0x00000001 */
966 if (c->cpuid_level >= 0x00000001) {
967 __u32 misc;
968 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
969 &c->x86_capability[0]);
970 c->x86 = (tfms >> 8) & 0xf;
971 c->x86_model = (tfms >> 4) & 0xf;
972 c->x86_mask = tfms & 0xf;
973 if (c->x86 == 0xf)
974 c->x86 += (tfms >> 20) & 0xff;
975 if (c->x86 >= 0x6)
976 c->x86_model += ((tfms >> 16) & 0xF) << 4;
977 if (c->x86_capability[0] & (1<<19))
978 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
979 } else {
980 /* Have CPUID level 0 only - unheard of */
981 c->x86 = 4;
982 }
983
984 #ifdef CONFIG_SMP
985 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
986 #endif
987 /* AMD-defined flags: level 0x80000001 */
988 xlvl = cpuid_eax(0x80000000);
989 c->extended_cpuid_level = xlvl;
990 if ((xlvl & 0xffff0000) == 0x80000000) {
991 if (xlvl >= 0x80000001) {
992 c->x86_capability[1] = cpuid_edx(0x80000001);
993 c->x86_capability[6] = cpuid_ecx(0x80000001);
994 }
995 if (xlvl >= 0x80000004)
996 get_model_name(c); /* Default name */
997 }
998
999 /* Transmeta-defined flags: level 0x80860001 */
1000 xlvl = cpuid_eax(0x80860000);
1001 if ((xlvl & 0xffff0000) == 0x80860000) {
1002 /* Don't set x86_cpuid_level here for now to not confuse. */
1003 if (xlvl >= 0x80860001)
1004 c->x86_capability[2] = cpuid_edx(0x80860001);
1005 }
1006
1007 c->extended_cpuid_level = cpuid_eax(0x80000000);
1008 if (c->extended_cpuid_level >= 0x80000007)
1009 c->x86_power = cpuid_edx(0x80000007);
1010
1011 switch (c->x86_vendor) {
1012 case X86_VENDOR_AMD:
1013 early_init_amd(c);
1014 break;
1015 }
1016
1017 }
1018
1019 /*
1020 * This does the hard work of actually picking apart the CPU stuff...
1021 */
1022 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1023 {
1024 int i;
1025
1026 early_identify_cpu(c);
1027
1028 init_scattered_cpuid_features(c);
1029
1030 c->apicid = phys_pkg_id(0);
1031
1032 /*
1033 * Vendor-specific initialization. In this section we
1034 * canonicalize the feature flags, meaning if there are
1035 * features a certain CPU supports which CPUID doesn't
1036 * tell us, CPUID claiming incorrect flags, or other bugs,
1037 * we handle them here.
1038 *
1039 * At the end of this section, c->x86_capability better
1040 * indicate the features this CPU genuinely supports!
1041 */
1042 switch (c->x86_vendor) {
1043 case X86_VENDOR_AMD:
1044 init_amd(c);
1045 break;
1046
1047 case X86_VENDOR_INTEL:
1048 init_intel(c);
1049 break;
1050
1051 case X86_VENDOR_UNKNOWN:
1052 default:
1053 display_cacheinfo(c);
1054 break;
1055 }
1056
1057 select_idle_routine(c);
1058 detect_ht(c);
1059
1060 /*
1061 * On SMP, boot_cpu_data holds the common feature set between
1062 * all CPUs; so make sure that we indicate which features are
1063 * common between the CPUs. The first time this routine gets
1064 * executed, c == &boot_cpu_data.
1065 */
1066 if (c != &boot_cpu_data) {
1067 /* AND the already accumulated flags with these */
1068 for (i = 0; i < NCAPINTS; i++)
1069 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1070 }
1071
1072 #ifdef CONFIG_X86_MCE
1073 mcheck_init(c);
1074 #endif
1075 if (c != &boot_cpu_data)
1076 mtrr_ap_init();
1077 #ifdef CONFIG_NUMA
1078 numa_add_cpu(smp_processor_id());
1079 #endif
1080
1081 switch (c->x86_vendor) {
1082 case X86_VENDOR_AMD:
1083 early_init_amd(c);
1084 break;
1085 case X86_VENDOR_INTEL:
1086 early_init_intel(c);
1087 break;
1088 }
1089 }
1090
1091 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1092 {
1093 if (c->x86_model_id[0])
1094 printk(KERN_INFO "%s", c->x86_model_id);
1095
1096 if (c->x86_mask || c->cpuid_level >= 0)
1097 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1098 else
1099 printk(KERN_CONT "\n");
1100 }
1101
1102 /*
1103 * Get CPU information for use by the procfs.
1104 */
1105
1106 static int show_cpuinfo(struct seq_file *m, void *v)
1107 {
1108 struct cpuinfo_x86 *c = v;
1109 int cpu = 0, i;
1110
1111 /*
1112 * These flag bits must match the definitions in <asm/cpufeature.h>.
1113 * NULL means this bit is undefined or reserved; either way it doesn't
1114 * have meaning as far as Linux is concerned. Note that it's important
1115 * to realize there is a difference between this table and CPUID -- if
1116 * applications want to get the raw CPUID data, they should access
1117 * /dev/cpu/<cpu_nr>/cpuid instead.
1118 */
1119 static const char *const x86_cap_flags[] = {
1120 /* Intel-defined */
1121 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1122 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1123 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1124 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1125
1126 /* AMD-defined */
1127 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1128 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1129 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1130 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1131 "3dnowext", "3dnow",
1132
1133 /* Transmeta-defined */
1134 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1138
1139 /* Other (Linux-defined) */
1140 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1141 NULL, NULL, NULL, NULL,
1142 "constant_tsc", "up", NULL, "arch_perfmon",
1143 "pebs", "bts", NULL, "sync_rdtsc",
1144 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1145 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1146
1147 /* Intel-defined (#2) */
1148 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1149 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1150 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1151 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1152
1153 /* VIA/Cyrix/Centaur-defined */
1154 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1155 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1156 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1157 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1158
1159 /* AMD-defined (#2) */
1160 "lahf_lm", "cmp_legacy", "svm", "extapic",
1161 "cr8_legacy", "abm", "sse4a", "misalignsse",
1162 "3dnowprefetch", "osvw", "ibs", "sse5",
1163 "skinit", "wdt", NULL, NULL,
1164 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1165 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1166
1167 /* Auxiliary (Linux-defined) */
1168 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1169 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1170 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1171 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1172 };
1173 static const char *const x86_power_flags[] = {
1174 "ts", /* temperature sensor */
1175 "fid", /* frequency id control */
1176 "vid", /* voltage id control */
1177 "ttp", /* thermal trip */
1178 "tm",
1179 "stc",
1180 "100mhzsteps",
1181 "hwpstate",
1182 "", /* tsc invariant mapped to constant_tsc */
1183 /* nothing */
1184 };
1185
1186
1187 #ifdef CONFIG_SMP
1188 cpu = c->cpu_index;
1189 #endif
1190
1191 seq_printf(m, "processor\t: %u\n"
1192 "vendor_id\t: %s\n"
1193 "cpu family\t: %d\n"
1194 "model\t\t: %d\n"
1195 "model name\t: %s\n",
1196 (unsigned)cpu,
1197 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1198 c->x86,
1199 (int)c->x86_model,
1200 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1201
1202 if (c->x86_mask || c->cpuid_level >= 0)
1203 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1204 else
1205 seq_printf(m, "stepping\t: unknown\n");
1206
1207 if (cpu_has(c, X86_FEATURE_TSC)) {
1208 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1209
1210 if (!freq)
1211 freq = cpu_khz;
1212 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1213 freq / 1000, (freq % 1000));
1214 }
1215
1216 /* Cache size */
1217 if (c->x86_cache_size >= 0)
1218 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1219
1220 #ifdef CONFIG_SMP
1221 if (smp_num_siblings * c->x86_max_cores > 1) {
1222 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1223 seq_printf(m, "siblings\t: %d\n",
1224 cpus_weight(per_cpu(cpu_core_map, cpu)));
1225 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1226 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1227 }
1228 #endif
1229
1230 seq_printf(m,
1231 "fpu\t\t: yes\n"
1232 "fpu_exception\t: yes\n"
1233 "cpuid level\t: %d\n"
1234 "wp\t\t: yes\n"
1235 "flags\t\t:",
1236 c->cpuid_level);
1237
1238 for (i = 0; i < 32*NCAPINTS; i++)
1239 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1240 seq_printf(m, " %s", x86_cap_flags[i]);
1241
1242 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1243 c->loops_per_jiffy/(500000/HZ),
1244 (c->loops_per_jiffy/(5000/HZ)) % 100);
1245
1246 if (c->x86_tlbsize > 0)
1247 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1248 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1249 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1250
1251 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1252 c->x86_phys_bits, c->x86_virt_bits);
1253
1254 seq_printf(m, "power management:");
1255 for (i = 0; i < 32; i++) {
1256 if (c->x86_power & (1 << i)) {
1257 if (i < ARRAY_SIZE(x86_power_flags) &&
1258 x86_power_flags[i])
1259 seq_printf(m, "%s%s",
1260 x86_power_flags[i][0]?" ":"",
1261 x86_power_flags[i]);
1262 else
1263 seq_printf(m, " [%d]", i);
1264 }
1265 }
1266
1267 seq_printf(m, "\n\n");
1268
1269 return 0;
1270 }
1271
1272 static void *c_start(struct seq_file *m, loff_t *pos)
1273 {
1274 if (*pos == 0) /* just in case, cpu 0 is not the first */
1275 *pos = first_cpu(cpu_online_map);
1276 if ((*pos) < NR_CPUS && cpu_online(*pos))
1277 return &cpu_data(*pos);
1278 return NULL;
1279 }
1280
1281 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1282 {
1283 *pos = next_cpu(*pos, cpu_online_map);
1284 return c_start(m, pos);
1285 }
1286
1287 static void c_stop(struct seq_file *m, void *v)
1288 {
1289 }
1290
1291 struct seq_operations cpuinfo_op = {
1292 .start = c_start,
1293 .next = c_next,
1294 .stop = c_stop,
1295 .show = show_cpuinfo,
1296 };