[PATCH] x86: more asm cleanups
authorZachary Amsden <zach@vmware.com>
Sat, 3 Sep 2005 22:56:42 +0000 (15:56 -0700)
committerLinus Torvalds <torvalds@evo.osdl.org>
Mon, 5 Sep 2005 07:06:12 +0000 (00:06 -0700)
Some more assembler cleanups I noticed along the way.

Signed-off-by: Zachary Amsden <zach@vmware.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/i386/kernel/cpu/intel.c
arch/i386/kernel/crash.c
arch/i386/kernel/machine_kexec.c
arch/i386/kernel/msr.c
arch/i386/kernel/process.c
arch/i386/mach-voyager/voyager_basic.c
arch/i386/mach-voyager/voyager_smp.c
include/asm-i386/msr.h

index a2c33c1a46c5c9a620c3d33c5372ead800120a44..43601de0f6331415aaa8b19594112a3de080ae4f 100644 (file)
@@ -82,16 +82,13 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
  */
 static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
 {
-       unsigned int eax;
+       unsigned int eax, ebx, ecx, edx;
 
        if (c->cpuid_level < 4)
                return 1;
 
-       __asm__("cpuid"
-               : "=a" (eax)
-               : "0" (4), "c" (0)
-               : "bx", "dx");
-
+       /* Intel has a non-standard dependency on %ecx for this CPUID level. */
+       cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
        if (eax & 0x1f)
                return ((eax >> 26) + 1);
        else
index e5fab12f79261a7856d298ac92765d1efa4d075b..913be77bb8446569d99175f0ff6556567bea531b 100644 (file)
@@ -153,7 +153,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
        disable_local_APIC();
        atomic_dec(&waiting_for_crash_ipi);
        /* Assume hlt works */
-       __asm__("hlt");
+       halt();
        for(;;);
 
        return 1;
index f19f6d34bcbff9506c1a619a34ff50e7c246bd74..a912fed4848273381ce5d0d3025dc81a1c7deff4 100644 (file)
@@ -93,10 +93,7 @@ static void set_idt(void *newidt, __u16 limit)
        curidt.size    = limit;
        curidt.address = (unsigned long)newidt;
 
-       __asm__ __volatile__ (
-               "lidtl %0\n"
-               : : "m" (curidt)
-               );
+       load_idt(&curidt);
 };
 
 
@@ -108,10 +105,7 @@ static void set_gdt(void *newgdt, __u16 limit)
        curgdt.size    = limit;
        curgdt.address = (unsigned long)newgdt;
 
-       __asm__ __volatile__ (
-               "lgdtl %0\n"
-               : : "m" (curgdt)
-               );
+       load_gdt(&curgdt);
 };
 
 static void load_segments(void)
index b2f03c39a6fed5eebcd381776ae0948e3f280d4a..03100d6fc5d6304522553d82d73f51b33f3e3f60 100644 (file)
 
 static struct class *msr_class;
 
-/* Note: "err" is handled in a funny way below.  Otherwise one version
-   of gcc or another breaks. */
-
 static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx)
 {
        int err;
 
-       asm volatile ("1:       wrmsr\n"
-                     "2:\n"
-                     ".section .fixup,\"ax\"\n"
-                     "3:       movl %4,%0\n"
-                     " jmp 2b\n"
-                     ".previous\n"
-                     ".section __ex_table,\"a\"\n"
-                     " .align 4\n" "   .long 1b,3b\n" ".previous":"=&bDS" (err)
-                     :"a"(eax), "d"(edx), "c"(reg), "i"(-EIO), "0"(0));
-
+       err = wrmsr_safe(reg, eax, edx);
+       if (err)
+               err = -EIO;
        return err;
 }
 
@@ -70,18 +60,9 @@ static inline int rdmsr_eio(u32 reg, u32 *eax, u32 *edx)
 {
        int err;
 
-       asm volatile ("1:       rdmsr\n"
-                     "2:\n"
-                     ".section .fixup,\"ax\"\n"
-                     "3:       movl %4,%0\n"
-                     " jmp 2b\n"
-                     ".previous\n"
-                     ".section __ex_table,\"a\"\n"
-                     " .align 4\n"
-                     " .long 1b,3b\n"
-                     ".previous":"=&bDS" (err), "=a"(*eax), "=d"(*edx)
-                     :"c"(reg), "i"(-EIO), "0"(0));
-
+       err = rdmsr_safe(reg, eax, edx);
+       if (err)
+               err = -EIO;
        return err;
 }
 
index 9d94995e967292ced08f96dfa21e656f234797cd..66099780039388c80367c99f01431c1c7957cd72 100644 (file)
@@ -164,7 +164,7 @@ static inline void play_dead(void)
         */
        local_irq_disable();
        while (1)
-               __asm__ __volatile__("hlt":::"memory");
+               halt();
 }
 #else
 static inline void play_dead(void)
index c6384061328a5d72acaed91f216c26fef86a8c91..cc69875d979b9445b3ebcc0549906656ee3f2767 100644 (file)
@@ -234,10 +234,9 @@ voyager_power_off(void)
 #endif
        }
        /* and wait for it to happen */
-       for(;;) {
-               __asm("cli");
-               __asm("hlt");
-       }
+       local_irq_disable();
+       for(;;)
+               halt();
 }
 
 /* copied from process.c */
@@ -278,10 +277,9 @@ machine_restart(char *cmd)
                outb(basebd | 0x08, VOYAGER_MC_SETUP);
                outb(0x02, catbase + 0x21);
        }
-       for(;;) {
-               asm("cli");
-               asm("hlt");
-       }
+       local_irq_disable();
+       for(;;)
+               halt();
 }
 
 void
index 0e1f4208b07ce46ecc8d883dc3f2121fb07f5c92..16790b7986136375ff64905a26cc7ce6fe3c8c97 100644 (file)
@@ -1015,7 +1015,7 @@ smp_stop_cpu_function(void *dummy)
        cpu_clear(smp_processor_id(), cpu_online_map);
        local_irq_disable();
        for(;;)
-              __asm__("hlt");
+               halt();
 }
 
 static DEFINE_SPINLOCK(call_lock);
index c76fce8badbbb32333691d293b4c1b172b7f3e58..62b76cd96957da8ddfca0a0d1a1508b373be8ab5 100644 (file)
@@ -47,6 +47,21 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
                     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
        ret__; })
 
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr,a,b) ({ int ret__;                                              \
+       asm volatile("2: rdmsr ; xorl %0,%0\n"                                          \
+                    "1:\n\t"                                                           \
+                    ".section .fixup,\"ax\"\n\t"                                       \
+                    "3:  movl %4,%0 ; jmp 1b\n\t"                                      \
+                    ".previous\n\t"                                                    \
+                    ".section __ex_table,\"a\"\n"                                      \
+                    "   .align 4\n\t"                                                  \
+                    "   .long  2b,3b\n\t"                                              \
+                    ".previous"                                                        \
+                    : "=r" (ret__), "=a" (*(a)), "=d" (*(b))                           \
+                    : "c" (msr), "i" (-EFAULT));\
+       ret__; })
+
 #define rdtsc(low,high) \
      __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))