2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/acpi.h>
34 #include <linux/kallsyms.h>
35 #include <linux/edd.h>
36 #include <linux/mmzone.h>
37 #include <linux/kexec.h>
38 #include <linux/cpufreq.h>
39 #include <linux/dmi.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/ctype.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/proto.h>
56 #include <asm/setup.h>
57 #include <asm/mach_apic.h>
59 #include <asm/sections.h>
61 #include <asm/cacheflush.h>
69 struct cpuinfo_x86 boot_cpu_data __read_mostly
;
70 EXPORT_SYMBOL(boot_cpu_data
);
72 unsigned long mmu_cr4_features
;
74 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
77 unsigned long saved_video_mode
;
79 int force_mwait __cpuinitdata
;
85 char dmi_alloc_data
[DMI_MAX_DATA
];
90 struct screen_info screen_info
;
91 EXPORT_SYMBOL(screen_info
);
92 struct sys_desc_table_struct
{
93 unsigned short length
;
94 unsigned char table
[0];
97 struct edid_info edid_info
;
98 EXPORT_SYMBOL_GPL(edid_info
);
100 extern int root_mountflags
;
102 char __initdata command_line
[COMMAND_LINE_SIZE
];
104 struct resource standard_io_resources
[] = {
105 { .name
= "dma1", .start
= 0x00, .end
= 0x1f,
106 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
107 { .name
= "pic1", .start
= 0x20, .end
= 0x21,
108 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
109 { .name
= "timer0", .start
= 0x40, .end
= 0x43,
110 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
111 { .name
= "timer1", .start
= 0x50, .end
= 0x53,
112 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
113 { .name
= "keyboard", .start
= 0x60, .end
= 0x6f,
114 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
115 { .name
= "dma page reg", .start
= 0x80, .end
= 0x8f,
116 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
117 { .name
= "pic2", .start
= 0xa0, .end
= 0xa1,
118 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
119 { .name
= "dma2", .start
= 0xc0, .end
= 0xdf,
120 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
121 { .name
= "fpu", .start
= 0xf0, .end
= 0xff,
122 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
}
125 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
127 static struct resource data_resource
= {
128 .name
= "Kernel data",
131 .flags
= IORESOURCE_RAM
,
133 static struct resource code_resource
= {
134 .name
= "Kernel code",
137 .flags
= IORESOURCE_RAM
,
139 static struct resource bss_resource
= {
140 .name
= "Kernel bss",
143 .flags
= IORESOURCE_RAM
,
146 static void __cpuinit
early_identify_cpu(struct cpuinfo_x86
*c
);
148 #ifdef CONFIG_PROC_VMCORE
149 /* elfcorehdr= specifies the location of elf core header
150 * stored by the crashed kernel. This option will be passed
151 * by kexec loader to the capture kernel.
153 static int __init
setup_elfcorehdr(char *arg
)
158 elfcorehdr_addr
= memparse(arg
, &end
);
159 return end
> arg
? 0 : -EINVAL
;
161 early_param("elfcorehdr", setup_elfcorehdr
);
166 contig_initmem_init(unsigned long start_pfn
, unsigned long end_pfn
)
168 unsigned long bootmap_size
, bootmap
;
170 bootmap_size
= bootmem_bootmap_pages(end_pfn
)<<PAGE_SHIFT
;
171 bootmap
= find_e820_area(0, end_pfn
<<PAGE_SHIFT
, bootmap_size
);
173 panic("Cannot find bootmem map of size %ld\n", bootmap_size
);
174 bootmap_size
= init_bootmem(bootmap
>> PAGE_SHIFT
, end_pfn
);
175 e820_register_active_regions(0, start_pfn
, end_pfn
);
176 free_bootmem_with_active_regions(0, end_pfn
);
177 reserve_bootmem(bootmap
, bootmap_size
);
181 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
183 #ifdef CONFIG_EDD_MODULE
187 * copy_edd() - Copy the BIOS EDD information
188 * from boot_params into a safe place.
191 static inline void copy_edd(void)
193 memcpy(edd
.mbr_signature
, boot_params
.edd_mbr_sig_buffer
,
194 sizeof(edd
.mbr_signature
));
195 memcpy(edd
.edd_info
, boot_params
.eddbuf
, sizeof(edd
.edd_info
));
196 edd
.mbr_signature_nr
= boot_params
.edd_mbr_sig_buf_entries
;
197 edd
.edd_info_nr
= boot_params
.eddbuf_entries
;
200 static inline void copy_edd(void)
206 static void __init
reserve_crashkernel(void)
208 unsigned long long free_mem
;
209 unsigned long long crash_size
, crash_base
;
213 ((unsigned long long)max_low_pfn
- min_low_pfn
) << PAGE_SHIFT
;
215 ret
= parse_crashkernel(boot_command_line
, free_mem
,
216 &crash_size
, &crash_base
);
217 if (ret
== 0 && crash_size
) {
218 if (crash_base
> 0) {
219 printk(KERN_INFO
"Reserving %ldMB of memory at %ldMB "
220 "for crashkernel (System RAM: %ldMB)\n",
221 (unsigned long)(crash_size
>> 20),
222 (unsigned long)(crash_base
>> 20),
223 (unsigned long)(free_mem
>> 20));
224 crashk_res
.start
= crash_base
;
225 crashk_res
.end
= crash_base
+ crash_size
- 1;
226 reserve_bootmem(crash_base
, crash_size
);
228 printk(KERN_INFO
"crashkernel reservation failed - "
229 "you have to specify a base address\n");
233 static inline void __init
reserve_crashkernel(void)
237 #define EBDA_ADDR_POINTER 0x40E
239 unsigned __initdata ebda_addr
;
240 unsigned __initdata ebda_size
;
242 static void discover_ebda(void)
245 * there is a real-mode segmented pointer pointing to the
246 * 4K EBDA area at 0x40E
248 ebda_addr
= *(unsigned short *)__va(EBDA_ADDR_POINTER
);
251 ebda_size
= *(unsigned short *)__va(ebda_addr
);
253 /* Round EBDA up to pages */
257 ebda_size
= round_up(ebda_size
+ (ebda_addr
& ~PAGE_MASK
), PAGE_SIZE
);
258 if (ebda_size
> 64*1024)
262 void __init
setup_arch(char **cmdline_p
)
266 printk(KERN_INFO
"Command line: %s\n", boot_command_line
);
268 ROOT_DEV
= old_decode_dev(boot_params
.hdr
.root_dev
);
269 screen_info
= boot_params
.screen_info
;
270 edid_info
= boot_params
.edid_info
;
271 saved_video_mode
= boot_params
.hdr
.vid_mode
;
272 bootloader_type
= boot_params
.hdr
.type_of_loader
;
274 #ifdef CONFIG_BLK_DEV_RAM
275 rd_image_start
= boot_params
.hdr
.ram_size
& RAMDISK_IMAGE_START_MASK
;
276 rd_prompt
= ((boot_params
.hdr
.ram_size
& RAMDISK_PROMPT_FLAG
) != 0);
277 rd_doload
= ((boot_params
.hdr
.ram_size
& RAMDISK_LOAD_FLAG
) != 0);
279 setup_memory_region();
282 if (!boot_params
.hdr
.root_flags
)
283 root_mountflags
&= ~MS_RDONLY
;
284 init_mm
.start_code
= (unsigned long) &_text
;
285 init_mm
.end_code
= (unsigned long) &_etext
;
286 init_mm
.end_data
= (unsigned long) &_edata
;
287 init_mm
.brk
= (unsigned long) &_end
;
289 code_resource
.start
= virt_to_phys(&_text
);
290 code_resource
.end
= virt_to_phys(&_etext
)-1;
291 data_resource
.start
= virt_to_phys(&_etext
);
292 data_resource
.end
= virt_to_phys(&_edata
)-1;
293 bss_resource
.start
= virt_to_phys(&__bss_start
);
294 bss_resource
.end
= virt_to_phys(&__bss_stop
)-1;
296 early_identify_cpu(&boot_cpu_data
);
298 strlcpy(command_line
, boot_command_line
, COMMAND_LINE_SIZE
);
299 *cmdline_p
= command_line
;
303 finish_e820_parsing();
305 e820_register_active_regions(0, 0, -1UL);
307 * partially used pages are not usable - thus
308 * we are rounding upwards:
310 end_pfn
= e820_end_of_ram();
311 num_physpages
= end_pfn
;
317 init_memory_mapping(0, (end_pfn_map
<< PAGE_SHIFT
));
324 /* setup to use the static apicid table during kernel startup */
325 x86_cpu_to_apicid_ptr
= (void *)&x86_cpu_to_apicid_init
;
330 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
331 * Call this early for SRAT node setup.
333 acpi_boot_table_init();
336 /* How many end-of-memory variables you have, grandma! */
337 max_low_pfn
= end_pfn
;
339 high_memory
= (void *)__va(end_pfn
* PAGE_SIZE
- 1) + 1;
341 /* Remove active ranges so rediscovery with NUMA-awareness happens */
342 remove_all_active_ranges();
344 #ifdef CONFIG_ACPI_NUMA
346 * Parse SRAT to discover nodes.
352 numa_initmem_init(0, end_pfn
);
354 contig_initmem_init(0, end_pfn
);
357 /* Reserve direct mapping */
358 reserve_bootmem_generic(table_start
<< PAGE_SHIFT
,
359 (table_end
- table_start
) << PAGE_SHIFT
);
362 reserve_bootmem_generic(__pa_symbol(&_text
),
363 __pa_symbol(&_end
) - __pa_symbol(&_text
));
366 * reserve physical page 0 - it's a special BIOS page on many boxes,
367 * enabling clean reboots, SMP operation, laptop functions.
369 reserve_bootmem_generic(0, PAGE_SIZE
);
371 /* reserve ebda region */
373 reserve_bootmem_generic(ebda_addr
, ebda_size
);
375 /* reserve nodemap region */
377 reserve_bootmem_generic(nodemap_addr
, nodemap_size
);
381 /* Reserve SMP trampoline */
382 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE
, 2*PAGE_SIZE
);
385 #ifdef CONFIG_ACPI_SLEEP
387 * Reserve low memory region for sleep support.
389 acpi_reserve_bootmem();
392 * Find and reserve possible boot-time SMP configuration:
395 #ifdef CONFIG_BLK_DEV_INITRD
396 if (boot_params
.hdr
.type_of_loader
&& boot_params
.hdr
.ramdisk_image
) {
397 unsigned long ramdisk_image
= boot_params
.hdr
.ramdisk_image
;
398 unsigned long ramdisk_size
= boot_params
.hdr
.ramdisk_size
;
399 unsigned long ramdisk_end
= ramdisk_image
+ ramdisk_size
;
400 unsigned long end_of_mem
= end_pfn
<< PAGE_SHIFT
;
402 if (ramdisk_end
<= end_of_mem
) {
403 reserve_bootmem_generic(ramdisk_image
, ramdisk_size
);
404 initrd_start
= ramdisk_image
+ PAGE_OFFSET
;
405 initrd_end
= initrd_start
+ramdisk_size
;
407 printk(KERN_ERR
"initrd extends beyond end of memory "
408 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
409 ramdisk_end
, end_of_mem
);
414 reserve_crashkernel();
420 * set this early, so we dont allocate cpu0
421 * if MADT list doesnt list BSP first
422 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
424 cpu_set(0, cpu_present_map
);
427 * Read APIC and some other early information from ACPI tables.
435 * get boot-time SMP configuration:
437 if (smp_found_config
)
439 init_apic_mappings();
440 ioapic_init_mappings();
443 * We trust e820 completely. No explicit ROM probing in memory.
445 e820_reserve_resources(&code_resource
, &data_resource
, &bss_resource
);
446 e820_mark_nosave_regions();
448 /* request I/O space for devices used on all i[345]86 PCs */
449 for (i
= 0; i
< ARRAY_SIZE(standard_io_resources
); i
++)
450 request_resource(&ioport_resource
, &standard_io_resources
[i
]);
455 #if defined(CONFIG_VGA_CONSOLE)
456 conswitchp
= &vga_con
;
457 #elif defined(CONFIG_DUMMY_CONSOLE)
458 conswitchp
= &dummy_con
;
463 static int __cpuinit
get_model_name(struct cpuinfo_x86
*c
)
467 if (c
->extended_cpuid_level
< 0x80000004)
470 v
= (unsigned int *) c
->x86_model_id
;
471 cpuid(0x80000002, &v
[0], &v
[1], &v
[2], &v
[3]);
472 cpuid(0x80000003, &v
[4], &v
[5], &v
[6], &v
[7]);
473 cpuid(0x80000004, &v
[8], &v
[9], &v
[10], &v
[11]);
474 c
->x86_model_id
[48] = 0;
479 static void __cpuinit
display_cacheinfo(struct cpuinfo_x86
*c
)
481 unsigned int n
, dummy
, eax
, ebx
, ecx
, edx
;
483 n
= c
->extended_cpuid_level
;
485 if (n
>= 0x80000005) {
486 cpuid(0x80000005, &dummy
, &ebx
, &ecx
, &edx
);
487 printk(KERN_INFO
"CPU: L1 I Cache: %dK (%d bytes/line), "
488 "D cache %dK (%d bytes/line)\n",
489 edx
>>24, edx
&0xFF, ecx
>>24, ecx
&0xFF);
490 c
->x86_cache_size
= (ecx
>>24) + (edx
>>24);
491 /* On K8 L1 TLB is inclusive, so don't count it */
495 if (n
>= 0x80000006) {
496 cpuid(0x80000006, &dummy
, &ebx
, &ecx
, &edx
);
497 ecx
= cpuid_ecx(0x80000006);
498 c
->x86_cache_size
= ecx
>> 16;
499 c
->x86_tlbsize
+= ((ebx
>> 16) & 0xfff) + (ebx
& 0xfff);
501 printk(KERN_INFO
"CPU: L2 Cache: %dK (%d bytes/line)\n",
502 c
->x86_cache_size
, ecx
& 0xFF);
506 cpuid(0x80000007, &dummy
, &dummy
, &dummy
, &c
->x86_power
);
507 if (n
>= 0x80000008) {
508 cpuid(0x80000008, &eax
, &dummy
, &dummy
, &dummy
);
509 c
->x86_virt_bits
= (eax
>> 8) & 0xff;
510 c
->x86_phys_bits
= eax
& 0xff;
515 static int nearby_node(int apicid
)
519 for (i
= apicid
- 1; i
>= 0; i
--) {
520 node
= apicid_to_node
[i
];
521 if (node
!= NUMA_NO_NODE
&& node_online(node
))
524 for (i
= apicid
+ 1; i
< MAX_LOCAL_APIC
; i
++) {
525 node
= apicid_to_node
[i
];
526 if (node
!= NUMA_NO_NODE
&& node_online(node
))
529 return first_node(node_online_map
); /* Shouldn't happen */
534 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
535 * Assumes number of cores is a power of two.
537 static void __init
amd_detect_cmp(struct cpuinfo_x86
*c
)
542 int cpu
= smp_processor_id();
544 unsigned apicid
= hard_smp_processor_id();
546 bits
= c
->x86_coreid_bits
;
548 /* Low order bits define the core id (index of core in socket) */
549 c
->cpu_core_id
= c
->phys_proc_id
& ((1 << bits
)-1);
550 /* Convert the APIC ID into the socket ID */
551 c
->phys_proc_id
= phys_pkg_id(bits
);
554 node
= c
->phys_proc_id
;
555 if (apicid_to_node
[apicid
] != NUMA_NO_NODE
)
556 node
= apicid_to_node
[apicid
];
557 if (!node_online(node
)) {
558 /* Two possibilities here:
559 - The CPU is missing memory and no node was created.
560 In that case try picking one from a nearby CPU
561 - The APIC IDs differ from the HyperTransport node IDs
562 which the K8 northbridge parsing fills in.
563 Assume they are all increased by a constant offset,
564 but in the same order as the HT nodeids.
565 If that doesn't result in a usable node fall back to the
566 path for the previous case. */
568 int ht_nodeid
= apicid
- (cpu_data(0).phys_proc_id
<< bits
);
570 if (ht_nodeid
>= 0 &&
571 apicid_to_node
[ht_nodeid
] != NUMA_NO_NODE
)
572 node
= apicid_to_node
[ht_nodeid
];
573 /* Pick a nearby node */
574 if (!node_online(node
))
575 node
= nearby_node(apicid
);
577 numa_set_node(cpu
, node
);
579 printk(KERN_INFO
"CPU %d/%x -> Node %d\n", cpu
, apicid
, node
);
584 static void __cpuinit
early_init_amd(struct cpuinfo_x86
*c
)
589 /* Multi core CPU? */
590 if (c
->extended_cpuid_level
< 0x80000008)
593 ecx
= cpuid_ecx(0x80000008);
595 c
->x86_max_cores
= (ecx
& 0xff) + 1;
597 /* CPU telling us the core id bits shift? */
598 bits
= (ecx
>> 12) & 0xF;
600 /* Otherwise recompute */
602 while ((1 << bits
) < c
->x86_max_cores
)
606 c
->x86_coreid_bits
= bits
;
611 #define ENABLE_C1E_MASK 0x18000000
612 #define CPUID_PROCESSOR_SIGNATURE 1
613 #define CPUID_XFAM 0x0ff00000
614 #define CPUID_XFAM_K8 0x00000000
615 #define CPUID_XFAM_10H 0x00100000
616 #define CPUID_XFAM_11H 0x00200000
617 #define CPUID_XMOD 0x000f0000
618 #define CPUID_XMOD_REV_F 0x00040000
620 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
621 static __cpuinit
int amd_apic_timer_broken(void)
623 u32 lo
, hi
, eax
= cpuid_eax(CPUID_PROCESSOR_SIGNATURE
);
625 switch (eax
& CPUID_XFAM
) {
627 if ((eax
& CPUID_XMOD
) < CPUID_XMOD_REV_F
)
631 rdmsr(MSR_K8_ENABLE_C1E
, lo
, hi
);
632 if (lo
& ENABLE_C1E_MASK
)
636 /* err on the side of caution */
642 static void __cpuinit
init_amd(struct cpuinfo_x86
*c
)
650 * Disable TLB flush filter by setting HWCR.FFDIS on K8
651 * bit 6 of msr C001_0015
653 * Errata 63 for SH-B3 steppings
654 * Errata 122 for all steppings (F+ have it disabled by default)
657 rdmsrl(MSR_K8_HWCR
, value
);
659 wrmsrl(MSR_K8_HWCR
, value
);
663 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
664 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
665 clear_bit(0*32+31, (unsigned long *)&c
->x86_capability
);
667 /* On C+ stepping K8 rep microcode works well for copy/memset */
668 level
= cpuid_eax(1);
669 if (c
->x86
== 15 && ((level
>= 0x0f48 && level
< 0x0f50) ||
671 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
672 if (c
->x86
== 0x10 || c
->x86
== 0x11)
673 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
675 /* Enable workaround for FXSAVE leak */
677 set_cpu_cap(c
, X86_FEATURE_FXSAVE_LEAK
);
679 level
= get_model_name(c
);
683 /* Should distinguish Models here, but this is only
684 a fallback anyways. */
685 strcpy(c
->x86_model_id
, "Hammer");
689 display_cacheinfo(c
);
691 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
692 if (c
->x86_power
& (1<<8))
693 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
695 /* Multi core CPU? */
696 if (c
->extended_cpuid_level
>= 0x80000008)
699 if (c
->extended_cpuid_level
>= 0x80000006 &&
700 (cpuid_edx(0x80000006) & 0xf000))
701 num_cache_leaves
= 4;
703 num_cache_leaves
= 3;
705 if (c
->x86
== 0xf || c
->x86
== 0x10 || c
->x86
== 0x11)
706 set_cpu_cap(c
, X86_FEATURE_K8
);
708 /* RDTSC can be speculated around */
709 clear_cpu_cap(c
, X86_FEATURE_SYNC_RDTSC
);
711 /* Family 10 doesn't support C states in MWAIT so don't use it */
712 if (c
->x86
== 0x10 && !force_mwait
)
713 clear_cpu_cap(c
, X86_FEATURE_MWAIT
);
715 if (amd_apic_timer_broken())
716 disable_apic_timer
= 1;
719 static void __cpuinit
detect_ht(struct cpuinfo_x86
*c
)
722 u32 eax
, ebx
, ecx
, edx
;
723 int index_msb
, core_bits
;
725 cpuid(1, &eax
, &ebx
, &ecx
, &edx
);
728 if (!cpu_has(c
, X86_FEATURE_HT
))
730 if (cpu_has(c
, X86_FEATURE_CMP_LEGACY
))
733 smp_num_siblings
= (ebx
& 0xff0000) >> 16;
735 if (smp_num_siblings
== 1) {
736 printk(KERN_INFO
"CPU: Hyper-Threading is disabled\n");
737 } else if (smp_num_siblings
> 1) {
739 if (smp_num_siblings
> NR_CPUS
) {
740 printk(KERN_WARNING
"CPU: Unsupported number of "
741 "siblings %d", smp_num_siblings
);
742 smp_num_siblings
= 1;
746 index_msb
= get_count_order(smp_num_siblings
);
747 c
->phys_proc_id
= phys_pkg_id(index_msb
);
749 smp_num_siblings
= smp_num_siblings
/ c
->x86_max_cores
;
751 index_msb
= get_count_order(smp_num_siblings
);
753 core_bits
= get_count_order(c
->x86_max_cores
);
755 c
->cpu_core_id
= phys_pkg_id(index_msb
) &
756 ((1 << core_bits
) - 1);
759 if ((c
->x86_max_cores
* smp_num_siblings
) > 1) {
760 printk(KERN_INFO
"CPU: Physical Processor ID: %d\n",
762 printk(KERN_INFO
"CPU: Processor Core ID: %d\n",
770 * find out the number of processor cores on the die
772 static int __cpuinit
intel_num_cpu_cores(struct cpuinfo_x86
*c
)
776 if (c
->cpuid_level
< 4)
779 cpuid_count(4, 0, &eax
, &t
, &t
, &t
);
782 return ((eax
>> 26) + 1);
787 static void srat_detect_node(void)
791 int cpu
= smp_processor_id();
792 int apicid
= hard_smp_processor_id();
794 /* Don't do the funky fallback heuristics the AMD version employs
796 node
= apicid_to_node
[apicid
];
797 if (node
== NUMA_NO_NODE
)
798 node
= first_node(node_online_map
);
799 numa_set_node(cpu
, node
);
801 printk(KERN_INFO
"CPU %d/%x -> Node %d\n", cpu
, apicid
, node
);
805 static void __cpuinit
init_intel(struct cpuinfo_x86
*c
)
810 init_intel_cacheinfo(c
);
811 if (c
->cpuid_level
> 9) {
812 unsigned eax
= cpuid_eax(10);
813 /* Check for version and the number of counters */
814 if ((eax
& 0xff) && (((eax
>>8) & 0xff) > 1))
815 set_cpu_cap(c
, X86_FEATURE_ARCH_PERFMON
);
820 rdmsr(MSR_IA32_MISC_ENABLE
, l1
, l2
);
822 set_cpu_cap(c
, X86_FEATURE_BTS
);
824 set_cpu_cap(c
, X86_FEATURE_PEBS
);
831 n
= c
->extended_cpuid_level
;
832 if (n
>= 0x80000008) {
833 unsigned eax
= cpuid_eax(0x80000008);
834 c
->x86_virt_bits
= (eax
>> 8) & 0xff;
835 c
->x86_phys_bits
= eax
& 0xff;
836 /* CPUID workaround for Intel 0F34 CPU */
837 if (c
->x86_vendor
== X86_VENDOR_INTEL
&&
838 c
->x86
== 0xF && c
->x86_model
== 0x3 &&
840 c
->x86_phys_bits
= 36;
844 c
->x86_cache_alignment
= c
->x86_clflush_size
* 2;
845 if ((c
->x86
== 0xf && c
->x86_model
>= 0x03) ||
846 (c
->x86
== 0x6 && c
->x86_model
>= 0x0e))
847 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
849 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
851 set_cpu_cap(c
, X86_FEATURE_SYNC_RDTSC
);
853 clear_cpu_cap(c
, X86_FEATURE_SYNC_RDTSC
);
854 c
->x86_max_cores
= intel_num_cpu_cores(c
);
859 static void __cpuinit
get_cpu_vendor(struct cpuinfo_x86
*c
)
861 char *v
= c
->x86_vendor_id
;
863 if (!strcmp(v
, "AuthenticAMD"))
864 c
->x86_vendor
= X86_VENDOR_AMD
;
865 else if (!strcmp(v
, "GenuineIntel"))
866 c
->x86_vendor
= X86_VENDOR_INTEL
;
868 c
->x86_vendor
= X86_VENDOR_UNKNOWN
;
871 struct cpu_model_info
{
874 char *model_names
[16];
877 /* Do some early cpuid on the boot CPU to get some parameter that are
878 needed before check_bugs. Everything advanced is in identify_cpu
880 static void __cpuinit
early_identify_cpu(struct cpuinfo_x86
*c
)
884 c
->loops_per_jiffy
= loops_per_jiffy
;
885 c
->x86_cache_size
= -1;
886 c
->x86_vendor
= X86_VENDOR_UNKNOWN
;
887 c
->x86_model
= c
->x86_mask
= 0; /* So far unknown... */
888 c
->x86_vendor_id
[0] = '\0'; /* Unset */
889 c
->x86_model_id
[0] = '\0'; /* Unset */
890 c
->x86_clflush_size
= 64;
891 c
->x86_cache_alignment
= c
->x86_clflush_size
;
892 c
->x86_max_cores
= 1;
893 c
->x86_coreid_bits
= 0;
894 c
->extended_cpuid_level
= 0;
895 memset(&c
->x86_capability
, 0, sizeof c
->x86_capability
);
897 /* Get vendor name */
898 cpuid(0x00000000, (unsigned int *)&c
->cpuid_level
,
899 (unsigned int *)&c
->x86_vendor_id
[0],
900 (unsigned int *)&c
->x86_vendor_id
[8],
901 (unsigned int *)&c
->x86_vendor_id
[4]);
905 /* Initialize the standard set of capabilities */
906 /* Note that the vendor-specific code below might override */
908 /* Intel-defined flags: level 0x00000001 */
909 if (c
->cpuid_level
>= 0x00000001) {
911 cpuid(0x00000001, &tfms
, &misc
, &c
->x86_capability
[4],
912 &c
->x86_capability
[0]);
913 c
->x86
= (tfms
>> 8) & 0xf;
914 c
->x86_model
= (tfms
>> 4) & 0xf;
915 c
->x86_mask
= tfms
& 0xf;
917 c
->x86
+= (tfms
>> 20) & 0xff;
919 c
->x86_model
+= ((tfms
>> 16) & 0xF) << 4;
920 if (c
->x86_capability
[0] & (1<<19))
921 c
->x86_clflush_size
= ((misc
>> 8) & 0xff) * 8;
923 /* Have CPUID level 0 only - unheard of */
928 c
->phys_proc_id
= (cpuid_ebx(1) >> 24) & 0xff;
930 /* AMD-defined flags: level 0x80000001 */
931 xlvl
= cpuid_eax(0x80000000);
932 c
->extended_cpuid_level
= xlvl
;
933 if ((xlvl
& 0xffff0000) == 0x80000000) {
934 if (xlvl
>= 0x80000001) {
935 c
->x86_capability
[1] = cpuid_edx(0x80000001);
936 c
->x86_capability
[6] = cpuid_ecx(0x80000001);
938 if (xlvl
>= 0x80000004)
939 get_model_name(c
); /* Default name */
942 /* Transmeta-defined flags: level 0x80860001 */
943 xlvl
= cpuid_eax(0x80860000);
944 if ((xlvl
& 0xffff0000) == 0x80860000) {
945 /* Don't set x86_cpuid_level here for now to not confuse. */
946 if (xlvl
>= 0x80860001)
947 c
->x86_capability
[2] = cpuid_edx(0x80860001);
950 switch (c
->x86_vendor
) {
959 * This does the hard work of actually picking apart the CPU stuff...
961 void __cpuinit
identify_cpu(struct cpuinfo_x86
*c
)
965 early_identify_cpu(c
);
967 init_scattered_cpuid_features(c
);
969 c
->apicid
= phys_pkg_id(0);
972 * Vendor-specific initialization. In this section we
973 * canonicalize the feature flags, meaning if there are
974 * features a certain CPU supports which CPUID doesn't
975 * tell us, CPUID claiming incorrect flags, or other bugs,
976 * we handle them here.
978 * At the end of this section, c->x86_capability better
979 * indicate the features this CPU genuinely supports!
981 switch (c
->x86_vendor
) {
986 case X86_VENDOR_INTEL
:
990 case X86_VENDOR_UNKNOWN
:
992 display_cacheinfo(c
);
996 select_idle_routine(c
);
1000 * On SMP, boot_cpu_data holds the common feature set between
1001 * all CPUs; so make sure that we indicate which features are
1002 * common between the CPUs. The first time this routine gets
1003 * executed, c == &boot_cpu_data.
1005 if (c
!= &boot_cpu_data
) {
1006 /* AND the already accumulated flags with these */
1007 for (i
= 0; i
< NCAPINTS
; i
++)
1008 boot_cpu_data
.x86_capability
[i
] &= c
->x86_capability
[i
];
1011 #ifdef CONFIG_X86_MCE
1014 if (c
!= &boot_cpu_data
)
1017 numa_add_cpu(smp_processor_id());
1021 void __cpuinit
print_cpu_info(struct cpuinfo_x86
*c
)
1023 if (c
->x86_model_id
[0])
1024 printk(KERN_INFO
"%s", c
->x86_model_id
);
1026 if (c
->x86_mask
|| c
->cpuid_level
>= 0)
1027 printk(KERN_CONT
" stepping %02x\n", c
->x86_mask
);
1029 printk(KERN_CONT
"\n");
1033 * Get CPU information for use by the procfs.
1036 static int show_cpuinfo(struct seq_file
*m
, void *v
)
1038 struct cpuinfo_x86
*c
= v
;
1042 * These flag bits must match the definitions in <asm/cpufeature.h>.
1043 * NULL means this bit is undefined or reserved; either way it doesn't
1044 * have meaning as far as Linux is concerned. Note that it's important
1045 * to realize there is a difference between this table and CPUID -- if
1046 * applications want to get the raw CPUID data, they should access
1047 * /dev/cpu/<cpu_nr>/cpuid instead.
1049 static const char *const x86_cap_flags
[] = {
1051 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1052 "cx8", "apic", NULL
, "sep", "mtrr", "pge", "mca", "cmov",
1053 "pat", "pse36", "pn", "clflush", NULL
, "dts", "acpi", "mmx",
1054 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1057 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1058 NULL
, NULL
, NULL
, "syscall", NULL
, NULL
, NULL
, NULL
,
1059 NULL
, NULL
, NULL
, NULL
, "nx", NULL
, "mmxext", NULL
,
1060 NULL
, "fxsr_opt", "pdpe1gb", "rdtscp", NULL
, "lm",
1061 "3dnowext", "3dnow",
1063 /* Transmeta-defined */
1064 "recovery", "longrun", NULL
, "lrti", NULL
, NULL
, NULL
, NULL
,
1065 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1066 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1067 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1069 /* Other (Linux-defined) */
1070 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1071 NULL
, NULL
, NULL
, NULL
,
1072 "constant_tsc", "up", NULL
, "arch_perfmon",
1073 "pebs", "bts", NULL
, "sync_rdtsc",
1074 "rep_good", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1075 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1077 /* Intel-defined (#2) */
1078 "pni", NULL
, NULL
, "monitor", "ds_cpl", "vmx", "smx", "est",
1079 "tm2", "ssse3", "cid", NULL
, NULL
, "cx16", "xtpr", NULL
,
1080 NULL
, NULL
, "dca", "sse4_1", "sse4_2", NULL
, NULL
, "popcnt",
1081 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1083 /* VIA/Cyrix/Centaur-defined */
1084 NULL
, NULL
, "rng", "rng_en", NULL
, NULL
, "ace", "ace_en",
1085 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL
, NULL
,
1086 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1087 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1089 /* AMD-defined (#2) */
1090 "lahf_lm", "cmp_legacy", "svm", "extapic",
1091 "cr8_legacy", "abm", "sse4a", "misalignsse",
1092 "3dnowprefetch", "osvw", "ibs", "sse5",
1093 "skinit", "wdt", NULL
, NULL
,
1094 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1095 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1097 /* Auxiliary (Linux-defined) */
1098 "ida", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1099 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1100 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1101 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
1103 static const char *const x86_power_flags
[] = {
1104 "ts", /* temperature sensor */
1105 "fid", /* frequency id control */
1106 "vid", /* voltage id control */
1107 "ttp", /* thermal trip */
1112 "", /* tsc invariant mapped to constant_tsc */
1121 seq_printf(m
, "processor\t: %u\n"
1123 "cpu family\t: %d\n"
1125 "model name\t: %s\n",
1127 c
->x86_vendor_id
[0] ? c
->x86_vendor_id
: "unknown",
1130 c
->x86_model_id
[0] ? c
->x86_model_id
: "unknown");
1132 if (c
->x86_mask
|| c
->cpuid_level
>= 0)
1133 seq_printf(m
, "stepping\t: %d\n", c
->x86_mask
);
1135 seq_printf(m
, "stepping\t: unknown\n");
1137 if (cpu_has(c
, X86_FEATURE_TSC
)) {
1138 unsigned int freq
= cpufreq_quick_get((unsigned)cpu
);
1142 seq_printf(m
, "cpu MHz\t\t: %u.%03u\n",
1143 freq
/ 1000, (freq
% 1000));
1147 if (c
->x86_cache_size
>= 0)
1148 seq_printf(m
, "cache size\t: %d KB\n", c
->x86_cache_size
);
1151 if (smp_num_siblings
* c
->x86_max_cores
> 1) {
1152 seq_printf(m
, "physical id\t: %d\n", c
->phys_proc_id
);
1153 seq_printf(m
, "siblings\t: %d\n",
1154 cpus_weight(per_cpu(cpu_core_map
, cpu
)));
1155 seq_printf(m
, "core id\t\t: %d\n", c
->cpu_core_id
);
1156 seq_printf(m
, "cpu cores\t: %d\n", c
->booted_cores
);
1162 "fpu_exception\t: yes\n"
1163 "cpuid level\t: %d\n"
1168 for (i
= 0; i
< 32*NCAPINTS
; i
++)
1169 if (cpu_has(c
, i
) && x86_cap_flags
[i
] != NULL
)
1170 seq_printf(m
, " %s", x86_cap_flags
[i
]);
1172 seq_printf(m
, "\nbogomips\t: %lu.%02lu\n",
1173 c
->loops_per_jiffy
/(500000/HZ
),
1174 (c
->loops_per_jiffy
/(5000/HZ
)) % 100);
1176 if (c
->x86_tlbsize
> 0)
1177 seq_printf(m
, "TLB size\t: %d 4K pages\n", c
->x86_tlbsize
);
1178 seq_printf(m
, "clflush size\t: %d\n", c
->x86_clflush_size
);
1179 seq_printf(m
, "cache_alignment\t: %d\n", c
->x86_cache_alignment
);
1181 seq_printf(m
, "address sizes\t: %u bits physical, %u bits virtual\n",
1182 c
->x86_phys_bits
, c
->x86_virt_bits
);
1184 seq_printf(m
, "power management:");
1185 for (i
= 0; i
< 32; i
++) {
1186 if (c
->x86_power
& (1 << i
)) {
1187 if (i
< ARRAY_SIZE(x86_power_flags
) &&
1189 seq_printf(m
, "%s%s",
1190 x86_power_flags
[i
][0]?" ":"",
1191 x86_power_flags
[i
]);
1193 seq_printf(m
, " [%d]", i
);
1197 seq_printf(m
, "\n\n");
1202 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1204 if (*pos
== 0) /* just in case, cpu 0 is not the first */
1205 *pos
= first_cpu(cpu_online_map
);
1206 if ((*pos
) < NR_CPUS
&& cpu_online(*pos
))
1207 return &cpu_data(*pos
);
1211 static void *c_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1213 *pos
= next_cpu(*pos
, cpu_online_map
);
1214 return c_start(m
, pos
);
1217 static void c_stop(struct seq_file
*m
, void *v
)
1221 struct seq_operations cpuinfo_op
= {
1225 .show
= show_cpuinfo
,