Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/init.h> |
2 | #include <linux/kernel.h> | |
3 | ||
4 | #include <linux/string.h> | |
5 | #include <linux/bitops.h> | |
6 | #include <linux/smp.h> | |
83ce4009 | 7 | #include <linux/sched.h> |
1da177e4 | 8 | #include <linux/thread_info.h> |
53e86b91 | 9 | #include <linux/module.h> |
8bdbd962 | 10 | #include <linux/uaccess.h> |
1da177e4 LT |
11 | |
12 | #include <asm/processor.h> | |
d72b1b4f | 13 | #include <asm/pgtable.h> |
1da177e4 | 14 | #include <asm/msr.h> |
73bdb73f | 15 | #include <asm/bugs.h> |
1f442d70 | 16 | #include <asm/cpu.h> |
1da177e4 | 17 | |
185f3b9d | 18 | #ifdef CONFIG_X86_64 |
8bdbd962 | 19 | #include <linux/topology.h> |
185f3b9d YL |
20 | #include <asm/numa_64.h> |
21 | #endif | |
22 | ||
1da177e4 LT |
23 | #include "cpu.h" |
24 | ||
25 | #ifdef CONFIG_X86_LOCAL_APIC | |
26 | #include <asm/mpspec.h> | |
27 | #include <asm/apic.h> | |
1da177e4 LT |
28 | #endif |
29 | ||
03ae5768 | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
1da177e4 | 31 | { |
99fb4d34 | 32 | /* Unmask CPUID levels if masked: */ |
30a0fb94 | 33 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { |
99fb4d34 | 34 | u64 misc_enable; |
066941bd | 35 | |
99fb4d34 IM |
36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
37 | ||
38 | if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { | |
39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | |
40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | |
41 | c->cpuid_level = cpuid_eax(0); | |
42 | } | |
066941bd PA |
43 | } |
44 | ||
2b16a235 AK |
45 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
46 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | |
47 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | |
185f3b9d | 48 | |
7a0fc404 PA |
49 | /* |
50 | * Atom erratum AAE44/AAF40/AAG38/AAH41: | |
51 | * | |
52 | * A race condition between speculative fetches and invalidating | |
53 | * a large page. This is worked around in microcode, but we | |
54 | * need the microcode to have already been loaded... so if it is | |
55 | * not, recommend a BIOS update and disable large pages. | |
56 | */ | |
57 | if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) { | |
58 | u32 ucode, junk; | |
59 | ||
60 | wrmsr(MSR_IA32_UCODE_REV, 0, 0); | |
61 | sync_core(); | |
62 | rdmsr(MSR_IA32_UCODE_REV, junk, ucode); | |
63 | ||
64 | if (ucode < 0x20e) { | |
65 | printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); | |
66 | clear_cpu_cap(c, X86_FEATURE_PSE); | |
67 | } | |
68 | } | |
69 | ||
185f3b9d YL |
70 | #ifdef CONFIG_X86_64 |
71 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | |
72 | #else | |
73 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | |
74 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | |
75 | c->x86_cache_alignment = 128; | |
76 | #endif | |
40fb1715 | 77 | |
13c6c532 JB |
78 | /* CPUID workaround for 0F33/0F34 CPU */ |
79 | if (c->x86 == 0xF && c->x86_model == 0x3 | |
80 | && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) | |
81 | c->x86_phys_bits = 36; | |
82 | ||
40fb1715 VP |
83 | /* |
84 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | |
83ce4009 IM |
85 | * with P/T states and does not stop in deep C-states. |
86 | * | |
87 | * It is also reliable across cores and sockets. (but not across | |
88 | * cabinets - we turn it off in that case explicitly.) | |
40fb1715 VP |
89 | */ |
90 | if (c->x86_power & (1 << 8)) { | |
91 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | |
92 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | |
14be1f74 DS |
93 | if (!check_tsc_unstable()) |
94 | sched_clock_stable = 1; | |
40fb1715 VP |
95 | } |
96 | ||
75a04811 PA |
97 | /* |
98 | * There is a known erratum on Pentium III and Core Solo | |
99 | * and Core Duo CPUs. | |
100 | * " Page with PAT set to WC while associated MTRR is UC | |
101 | * may consolidate to UC " | |
102 | * Because of this erratum, it is better to stick with | |
103 | * setting WC in MTRR rather than using PAT on these CPUs. | |
104 | * | |
105 | * Enable PAT WC only on P4, Core 2 or later CPUs. | |
106 | */ | |
107 | if (c->x86 == 6 && c->x86_model < 15) | |
108 | clear_cpu_cap(c, X86_FEATURE_PAT); | |
f8561296 VN |
109 | |
110 | #ifdef CONFIG_KMEMCHECK | |
111 | /* | |
112 | * P4s have a "fast strings" feature which causes single- | |
113 | * stepping REP instructions to only generate a #DB on | |
114 | * cache-line boundaries. | |
115 | * | |
116 | * Ingo Molnar reported a Pentium D (model 6) and a Xeon | |
117 | * (model 2) with the same problem. | |
118 | */ | |
119 | if (c->x86 == 15) { | |
120 | u64 misc_enable; | |
121 | ||
122 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | |
123 | ||
124 | if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { | |
125 | printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); | |
126 | ||
127 | misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; | |
128 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | |
129 | } | |
130 | } | |
131 | #endif | |
1da177e4 LT |
132 | } |
133 | ||
185f3b9d | 134 | #ifdef CONFIG_X86_32 |
1da177e4 LT |
135 | /* |
136 | * Early probe support logic for ppro memory erratum #50 | |
137 | * | |
138 | * This is called before we do cpu ident work | |
139 | */ | |
65eb6b43 | 140 | |
3bc9b76b | 141 | int __cpuinit ppro_with_ram_bug(void) |
1da177e4 LT |
142 | { |
143 | /* Uses data from early_cpu_detect now */ | |
144 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | |
145 | boot_cpu_data.x86 == 6 && | |
146 | boot_cpu_data.x86_model == 1 && | |
147 | boot_cpu_data.x86_mask < 8) { | |
148 | printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); | |
149 | return 1; | |
150 | } | |
151 | return 0; | |
152 | } | |
65eb6b43 | 153 | |
4052704d YL |
154 | #ifdef CONFIG_X86_F00F_BUG |
155 | static void __cpuinit trap_init_f00f_bug(void) | |
156 | { | |
157 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | |
1da177e4 | 158 | |
4052704d YL |
159 | /* |
160 | * Update the IDT descriptor and reload the IDT so that | |
161 | * it uses the read-only mapped virtual address. | |
162 | */ | |
163 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | |
164 | load_idt(&idt_descr); | |
165 | } | |
166 | #endif | |
167 | ||
1f442d70 YL |
168 | static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) |
169 | { | |
170 | #ifdef CONFIG_SMP | |
171 | /* calling is from identify_secondary_cpu() ? */ | |
172 | if (c->cpu_index == boot_cpu_id) | |
173 | return; | |
174 | ||
175 | /* | |
176 | * Mask B, Pentium, but not Pentium MMX | |
177 | */ | |
178 | if (c->x86 == 5 && | |
179 | c->x86_mask >= 1 && c->x86_mask <= 4 && | |
180 | c->x86_model <= 3) { | |
181 | /* | |
182 | * Remember we have B step Pentia with bugs | |
183 | */ | |
184 | WARN_ONCE(1, "WARNING: SMP operation may be unreliable" | |
185 | "with B stepping processors.\n"); | |
186 | } | |
187 | #endif | |
188 | } | |
189 | ||
4052704d | 190 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) |
1da177e4 LT |
191 | { |
192 | unsigned long lo, hi; | |
193 | ||
4052704d YL |
194 | #ifdef CONFIG_X86_F00F_BUG |
195 | /* | |
196 | * All current models of Pentium and Pentium with MMX technology CPUs | |
8bdbd962 AC |
197 | * have the F0 0F bug, which lets nonprivileged users lock up the |
198 | * system. | |
4052704d YL |
199 | * Note that the workaround only should be initialized once... |
200 | */ | |
201 | c->f00f_bug = 0; | |
202 | if (!paravirt_enabled() && c->x86 == 5) { | |
203 | static int f00f_workaround_enabled; | |
204 | ||
205 | c->f00f_bug = 1; | |
206 | if (!f00f_workaround_enabled) { | |
207 | trap_init_f00f_bug(); | |
208 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | |
209 | f00f_workaround_enabled = 1; | |
210 | } | |
211 | } | |
212 | #endif | |
213 | ||
214 | /* | |
215 | * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until | |
216 | * model 3 mask 3 | |
217 | */ | |
218 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | |
219 | clear_cpu_cap(c, X86_FEATURE_SEP); | |
220 | ||
221 | /* | |
222 | * P4 Xeon errata 037 workaround. | |
223 | * Hardware prefetcher may cause stale data to be loaded into the cache. | |
224 | */ | |
1da177e4 | 225 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
65eb6b43 | 226 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
ecab22aa | 227 | if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { |
1da177e4 LT |
228 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
229 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | |
ecab22aa | 230 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
8bdbd962 | 231 | wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
1da177e4 LT |
232 | } |
233 | } | |
1da177e4 | 234 | |
4052704d YL |
235 | /* |
236 | * See if we have a good local APIC by checking for buggy Pentia, | |
237 | * i.e. all B steppings and the C2 stepping of P54C when using their | |
238 | * integrated APIC (see 11AP erratum in "Pentium Processor | |
239 | * Specification Update"). | |
240 | */ | |
241 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | |
242 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | |
243 | set_cpu_cap(c, X86_FEATURE_11AP); | |
185f3b9d | 244 | |
185f3b9d | 245 | |
4052704d | 246 | #ifdef CONFIG_X86_INTEL_USERCOPY |
185f3b9d | 247 | /* |
4052704d | 248 | * Set up the preferred alignment for movsl bulk memory moves |
185f3b9d | 249 | */ |
4052704d YL |
250 | switch (c->x86) { |
251 | case 4: /* 486: untested */ | |
252 | break; | |
253 | case 5: /* Old Pentia: untested */ | |
254 | break; | |
255 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | |
256 | movsl_mask.mask = 7; | |
257 | break; | |
258 | case 15: /* P4 is OK down to 8-byte alignment */ | |
259 | movsl_mask.mask = 7; | |
260 | break; | |
261 | } | |
185f3b9d | 262 | #endif |
4052704d YL |
263 | |
264 | #ifdef CONFIG_X86_NUMAQ | |
265 | numaq_tsc_disable(); | |
266 | #endif | |
1f442d70 YL |
267 | |
268 | intel_smp_check(c); | |
4052704d YL |
269 | } |
270 | #else | |
271 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |
272 | { | |
273 | } | |
185f3b9d YL |
274 | #endif |
275 | ||
2759c328 | 276 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) |
185f3b9d YL |
277 | { |
278 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | |
279 | unsigned node; | |
280 | int cpu = smp_processor_id(); | |
2759c328 | 281 | int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; |
185f3b9d YL |
282 | |
283 | /* Don't do the funky fallback heuristics the AMD version employs | |
284 | for now. */ | |
285 | node = apicid_to_node[apicid]; | |
d9c2d5ac | 286 | if (node == NUMA_NO_NODE) |
185f3b9d | 287 | node = first_node(node_online_map); |
d9c2d5ac YL |
288 | else if (!node_online(node)) { |
289 | /* reuse the value from init_cpu_to_node() */ | |
290 | node = cpu_to_node(cpu); | |
291 | } | |
185f3b9d | 292 | numa_set_node(cpu, node); |
185f3b9d YL |
293 | #endif |
294 | } | |
295 | ||
3dd9d514 AK |
296 | /* |
297 | * find out the number of processor cores on the die | |
298 | */ | |
f69feff7 | 299 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) |
3dd9d514 | 300 | { |
f2ab4461 | 301 | unsigned int eax, ebx, ecx, edx; |
3dd9d514 AK |
302 | |
303 | if (c->cpuid_level < 4) | |
304 | return 1; | |
305 | ||
f2ab4461 ZA |
306 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ |
307 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | |
3dd9d514 | 308 | if (eax & 0x1f) |
8bdbd962 | 309 | return (eax >> 26) + 1; |
3dd9d514 AK |
310 | else |
311 | return 1; | |
312 | } | |
313 | ||
e38e05a8 SY |
314 | static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) |
315 | { | |
316 | /* Intel VMX MSR indicated features */ | |
317 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 | |
318 | #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 | |
319 | #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 | |
320 | #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 | |
321 | #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 | |
322 | #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 | |
323 | ||
324 | u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; | |
325 | ||
326 | clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | |
327 | clear_cpu_cap(c, X86_FEATURE_VNMI); | |
328 | clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | |
329 | clear_cpu_cap(c, X86_FEATURE_EPT); | |
330 | clear_cpu_cap(c, X86_FEATURE_VPID); | |
331 | ||
332 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); | |
333 | msr_ctl = vmx_msr_high | vmx_msr_low; | |
334 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) | |
335 | set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | |
336 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) | |
337 | set_cpu_cap(c, X86_FEATURE_VNMI); | |
338 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { | |
339 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | |
340 | vmx_msr_low, vmx_msr_high); | |
341 | msr_ctl2 = vmx_msr_high | vmx_msr_low; | |
342 | if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && | |
343 | (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) | |
344 | set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | |
345 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) | |
346 | set_cpu_cap(c, X86_FEATURE_EPT); | |
347 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) | |
348 | set_cpu_cap(c, X86_FEATURE_VPID); | |
349 | } | |
350 | } | |
351 | ||
3bc9b76b | 352 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
1da177e4 LT |
353 | { |
354 | unsigned int l2 = 0; | |
1da177e4 | 355 | |
2b16a235 AK |
356 | early_init_intel(c); |
357 | ||
4052704d | 358 | intel_workarounds(c); |
1da177e4 | 359 | |
345077cd SS |
360 | /* |
361 | * Detect the extended topology information if available. This | |
362 | * will reinitialise the initial_apicid which will be used | |
363 | * in init_intel_cacheinfo() | |
364 | */ | |
365 | detect_extended_topology(c); | |
366 | ||
1da177e4 | 367 | l2 = init_intel_cacheinfo(c); |
65eb6b43 | 368 | if (c->cpuid_level > 9) { |
0080e667 VP |
369 | unsigned eax = cpuid_eax(10); |
370 | /* Check for version and the number of counters */ | |
371 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | |
d0e95ebd | 372 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
0080e667 | 373 | } |
1da177e4 | 374 | |
4052704d YL |
375 | if (cpu_has_xmm2) |
376 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | |
377 | if (cpu_has_ds) { | |
378 | unsigned int l1; | |
379 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | |
380 | if (!(l1 & (1<<11))) | |
381 | set_cpu_cap(c, X86_FEATURE_BTS); | |
382 | if (!(l1 & (1<<12))) | |
383 | set_cpu_cap(c, X86_FEATURE_PEBS); | |
4052704d | 384 | } |
1da177e4 | 385 | |
e736ad54 PV |
386 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) |
387 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | |
388 | ||
4052704d YL |
389 | #ifdef CONFIG_X86_64 |
390 | if (c->x86 == 15) | |
391 | c->x86_cache_alignment = c->x86_clflush_size * 2; | |
392 | if (c->x86 == 6) | |
393 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | |
394 | #else | |
65eb6b43 PC |
395 | /* |
396 | * Names for the Pentium II/Celeron processors | |
397 | * detectable only by also checking the cache size. | |
398 | * Dixon is NOT a Celeron. | |
399 | */ | |
1da177e4 | 400 | if (c->x86 == 6) { |
4052704d YL |
401 | char *p = NULL; |
402 | ||
1da177e4 LT |
403 | switch (c->x86_model) { |
404 | case 5: | |
405 | if (c->x86_mask == 0) { | |
406 | if (l2 == 0) | |
407 | p = "Celeron (Covington)"; | |
408 | else if (l2 == 256) | |
409 | p = "Mobile Pentium II (Dixon)"; | |
410 | } | |
411 | break; | |
65eb6b43 | 412 | |
1da177e4 LT |
413 | case 6: |
414 | if (l2 == 128) | |
415 | p = "Celeron (Mendocino)"; | |
416 | else if (c->x86_mask == 0 || c->x86_mask == 5) | |
417 | p = "Celeron-A"; | |
418 | break; | |
65eb6b43 | 419 | |
1da177e4 LT |
420 | case 8: |
421 | if (l2 == 128) | |
422 | p = "Celeron (Coppermine)"; | |
423 | break; | |
424 | } | |
1da177e4 | 425 | |
4052704d YL |
426 | if (p) |
427 | strcpy(c->x86_model_id, p); | |
1da177e4 | 428 | } |
1da177e4 | 429 | |
185f3b9d YL |
430 | if (c->x86 == 15) |
431 | set_cpu_cap(c, X86_FEATURE_P4); | |
432 | if (c->x86 == 6) | |
433 | set_cpu_cap(c, X86_FEATURE_P3); | |
f4166c54 | 434 | #endif |
185f3b9d | 435 | |
185f3b9d YL |
436 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { |
437 | /* | |
438 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | |
439 | * detection. | |
440 | */ | |
441 | c->x86_max_cores = intel_num_cpu_cores(c); | |
442 | #ifdef CONFIG_X86_32 | |
443 | detect_ht(c); | |
444 | #endif | |
445 | } | |
446 | ||
447 | /* Work around errata */ | |
2759c328 | 448 | srat_detect_node(c); |
e38e05a8 SY |
449 | |
450 | if (cpu_has(c, X86_FEATURE_VMX)) | |
451 | detect_vmx_virtcap(c); | |
42ed458a | 452 | } |
1da177e4 | 453 | |
185f3b9d | 454 | #ifdef CONFIG_X86_32 |
65eb6b43 | 455 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
1da177e4 | 456 | { |
65eb6b43 PC |
457 | /* |
458 | * Intel PIII Tualatin. This comes in two flavours. | |
1da177e4 LT |
459 | * One has 256kb of cache, the other 512. We have no way |
460 | * to determine which, so we use a boottime override | |
461 | * for the 512kb model, and assume 256 otherwise. | |
462 | */ | |
463 | if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) | |
464 | size = 256; | |
465 | return size; | |
466 | } | |
185f3b9d | 467 | #endif |
1da177e4 | 468 | |
02dde8b4 | 469 | static const struct cpu_dev __cpuinitconst intel_cpu_dev = { |
1da177e4 | 470 | .c_vendor = "Intel", |
65eb6b43 | 471 | .c_ident = { "GenuineIntel" }, |
185f3b9d | 472 | #ifdef CONFIG_X86_32 |
1da177e4 | 473 | .c_models = { |
65eb6b43 PC |
474 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
475 | { | |
476 | [0] = "486 DX-25/33", | |
477 | [1] = "486 DX-50", | |
478 | [2] = "486 SX", | |
479 | [3] = "486 DX/2", | |
480 | [4] = "486 SL", | |
481 | [5] = "486 SX/2", | |
482 | [7] = "486 DX/2-WB", | |
483 | [8] = "486 DX/4", | |
1da177e4 LT |
484 | [9] = "486 DX/4-WB" |
485 | } | |
486 | }, | |
487 | { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = | |
65eb6b43 PC |
488 | { |
489 | [0] = "Pentium 60/66 A-step", | |
490 | [1] = "Pentium 60/66", | |
1da177e4 | 491 | [2] = "Pentium 75 - 200", |
65eb6b43 | 492 | [3] = "OverDrive PODP5V83", |
1da177e4 | 493 | [4] = "Pentium MMX", |
65eb6b43 | 494 | [7] = "Mobile Pentium 75 - 200", |
1da177e4 LT |
495 | [8] = "Mobile Pentium MMX" |
496 | } | |
497 | }, | |
498 | { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = | |
65eb6b43 | 499 | { |
1da177e4 | 500 | [0] = "Pentium Pro A-step", |
65eb6b43 PC |
501 | [1] = "Pentium Pro", |
502 | [3] = "Pentium II (Klamath)", | |
503 | [4] = "Pentium II (Deschutes)", | |
504 | [5] = "Pentium II (Deschutes)", | |
1da177e4 | 505 | [6] = "Mobile Pentium II", |
65eb6b43 PC |
506 | [7] = "Pentium III (Katmai)", |
507 | [8] = "Pentium III (Coppermine)", | |
1da177e4 LT |
508 | [10] = "Pentium III (Cascades)", |
509 | [11] = "Pentium III (Tualatin)", | |
510 | } | |
511 | }, | |
512 | { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = | |
513 | { | |
514 | [0] = "Pentium 4 (Unknown)", | |
515 | [1] = "Pentium 4 (Willamette)", | |
516 | [2] = "Pentium 4 (Northwood)", | |
517 | [4] = "Pentium 4 (Foster)", | |
518 | [5] = "Pentium 4 (Foster)", | |
519 | } | |
520 | }, | |
521 | }, | |
185f3b9d YL |
522 | .c_size_cache = intel_size_cache, |
523 | #endif | |
03ae5768 | 524 | .c_early_init = early_init_intel, |
1da177e4 | 525 | .c_init = init_intel, |
10a434fc | 526 | .c_x86_vendor = X86_VENDOR_INTEL, |
1da177e4 LT |
527 | }; |
528 | ||
10a434fc | 529 | cpu_dev_register(intel_cpu_dev); |
1da177e4 | 530 |