x86/asm: Get rid of __read_cr4_safe()
authorAndy Lutomirski <luto@kernel.org>
Thu, 29 Sep 2016 19:48:12 +0000 (12:48 -0700)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 30 Sep 2016 10:40:12 +0000 (12:40 +0200)
We use __read_cr4() vs __read_cr4_safe() inconsistently.  On
CR4-less CPUs, all CR4 bits are effectively clear, so we can make
the code simpler and more robust by making __read_cr4() always fix
up faults on 32-bit kernels.

This may fix some bugs on old 486-like CPUs, but I don't have any
easy way to test that.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: david@saggiorato.net
Link: http://lkml.kernel.org/r/ea647033d357d9ce2ad2bbde5a631045f5052fb6.1475178370.git.luto@kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/special_insns.h
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/paravirt.c
arch/x86/kernel/process_32.c
arch/x86/kernel/setup.c
arch/x86/power/cpu.c
arch/x86/xen/enlighten.c

index 2970d22d77663f5299a8d49af690c3cf1aa7df60..91b6f4eed3fd1af6a248d552ac856b70171f192d 100644 (file)
@@ -80,10 +80,6 @@ static inline unsigned long __read_cr4(void)
 {
        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
 }
-static inline unsigned long __read_cr4_safe(void)
-{
-       return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
-}
 
 static inline void __write_cr4(unsigned long x)
 {
index 7fa9e7740ba3db18d04c8405f549183edf60e8a5..fcf243f077ace9b3b6bee909ce296d19d26de4d2 100644 (file)
@@ -108,7 +108,6 @@ struct pv_cpu_ops {
        unsigned long (*read_cr0)(void);
        void (*write_cr0)(unsigned long);
 
-       unsigned long (*read_cr4_safe)(void);
        unsigned long (*read_cr4)(void);
        void (*write_cr4)(unsigned long);
 
index 587d7914ea4b56a539d9877b1526139285165419..19a2224f9e16fc42a06828d9a49498337e525f38 100644 (file)
@@ -59,22 +59,19 @@ static inline void native_write_cr3(unsigned long val)
 static inline unsigned long native_read_cr4(void)
 {
        unsigned long val;
-       asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
-       return val;
-}
-
-static inline unsigned long native_read_cr4_safe(void)
-{
-       unsigned long val;
-       /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
-        * exists, so it will never fail. */
 #ifdef CONFIG_X86_32
+       /*
+        * This could fault if CR4 does not exist.  Non-existent CR4
+        * is functionally equivalent to CR4 == 0.  Keep it simple and pretend
+        * that CR4 == 0 on CPUs that don't have CR4.
+        */
        asm volatile("1: mov %%cr4, %0\n"
                     "2:\n"
                     _ASM_EXTABLE(1b, 2b)
                     : "=r" (val), "=m" (__force_order) : "0" (0));
 #else
-       val = native_read_cr4();
+       /* CR4 always exists on x86_64. */
+       asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
 #endif
        return val;
 }
@@ -182,11 +179,6 @@ static inline unsigned long __read_cr4(void)
        return native_read_cr4();
 }
 
-static inline unsigned long __read_cr4_safe(void)
-{
-       return native_read_cr4_safe();
-}
-
 static inline void __write_cr4(unsigned long x)
 {
        native_write_cr4(x);
index dee8a70382ba711b8830970f520e5073aad95ed4..6fa85944af83d8ddbbad3a344a31a7920e64e6d0 100644 (file)
@@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 /* Initialize cr4 shadow for this CPU. */
 static inline void cr4_init_shadow(void)
 {
-       this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
+       this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
 }
 
 /* Set in this cpu's CR4. */
index bef340082d207f51acad6e4e3501aaffe15b95cb..bbf3d5933eaaf7efe4ab30003ab09e1ee5d78575 100644 (file)
@@ -332,7 +332,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
        .read_cr0 = native_read_cr0,
        .write_cr0 = native_write_cr0,
        .read_cr4 = native_read_cr4,
-       .read_cr4_safe = native_read_cr4_safe,
        .write_cr4 = native_write_cr4,
 #ifdef CONFIG_X86_64
        .read_cr8 = native_read_cr8,
index 404efdfa083b45c9bf719181ab434d15121ed4c0..bd7be8efdc4ce7ae49f155b7f701f6d6d800293f 100644 (file)
@@ -90,7 +90,7 @@ void __show_regs(struct pt_regs *regs, int all)
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = read_cr3();
-       cr4 = __read_cr4_safe();
+       cr4 = __read_cr4();
        printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
                        cr0, cr2, cr3, cr4);
 
index 87f2330cc805e603748aa11bb807b53d7bf4b8c5..3aabfdcbcb525e4d9fb22d9ab2771a1451e736fc 100644 (file)
@@ -1137,7 +1137,7 @@ void __init setup_arch(char **cmdline_p)
         * auditing all the early-boot CR4 manipulation would be needed to
         * rule it out.
         */
-       mmu_cr4_features = __read_cr4_safe();
+       mmu_cr4_features = __read_cr4();
 
        memblock_set_current_limit(get_max_mapped());
 
index b12c26e2e309541c7afdb587835d48589da5a4fd..53cace2ec0e2888baa5937edce817d00bd09de64 100644 (file)
@@ -130,7 +130,7 @@ static void __save_processor_state(struct saved_context *ctxt)
        ctxt->cr0 = read_cr0();
        ctxt->cr2 = read_cr2();
        ctxt->cr3 = read_cr3();
-       ctxt->cr4 = __read_cr4_safe();
+       ctxt->cr4 = __read_cr4();
 #ifdef CONFIG_X86_64
        ctxt->cr8 = read_cr8();
 #endif
index b86ebb1a9a7f45cff133b5e99b90e34734252d16..e2cf8fcea6bb274eeae5bc0b09b9693e4215d08a 100644 (file)
@@ -1237,7 +1237,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .write_cr0 = xen_write_cr0,
 
        .read_cr4 = native_read_cr4,
-       .read_cr4_safe = native_read_cr4_safe,
        .write_cr4 = xen_write_cr4,
 
 #ifdef CONFIG_X86_64