if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
cpu_devs[c->x86_vendor]->c_early_init)
cpu_devs[c->x86_vendor]->c_early_init(c);
+}
- early_get_cap(c);
+/*
+ * The NOPL instruction is supposed to exist on all CPUs with
+ * family >= 6; unfortunately, that's not true in practice because
+ * of early VIA chips and (more importantly) broken virtualizers that
+ * are not easy to detect. In the latter case it doesn't even *fail*
+ * reliably, so probing for it doesn't even work. Disable it completely
+ * unless we can find a reliable way to detect all the broken cases.
+ */
+static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
+{
+ clear_cpu_cap(c, X86_FEATURE_NOPL);
}
+ /*
+ * The NOPL instruction is supposed to exist on all CPUs with
+ * family >= 6, unfortunately, that's not true in practice because
+ * of early VIA chips and (more importantly) broken virtualizers that
+ * are not easy to detect. Hence, probe for it based on first
+ * principles.
+ */
+ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
+ {
+ const u32 nopl_signature = 0x888c53b1; /* Random number */
+ u32 has_nopl = nopl_signature;
+
+ clear_cpu_cap(c, X86_FEATURE_NOPL);
+ if (c->x86 >= 6) {
+ asm volatile("\n"
+ "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ "3: xor %0,%0\n"
+ " jmp 2b\n"
+ " .previous\n"
+ _ASM_EXTABLE(1b,3b)
+ : "+a" (has_nopl));
+
+ if (has_nopl == nopl_signature)
+ set_cpu_cap(c, X86_FEATURE_NOPL);
+ }
+ }
+
static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
- #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */
- #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
- #define X86_FEATURE_P3 (3*32+ 6) /* P3 */
- #define X86_FEATURE_P4 (3*32+ 7) /* P4 */
+ #define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */
+ #define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */
+ #define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */
+ #define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
- #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
+ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
++#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
- #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */
- #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */
- #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
- #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
- #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
- #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */
+ #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
+ #define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */
+ #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */
+ #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */
+ #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
+ #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */