x86: merge mmu_context.h
authorBrian Gerst <brgerst@gmail.com>
Wed, 21 Jan 2009 08:26:06 +0000 (17:26 +0900)
committerTejun Heo <tj@kernel.org>
Wed, 21 Jan 2009 08:26:06 +0000 (17:26 +0900)
Impact: cleanup

tj: * changed cpu to unsigned as was done on mmu_context_64.h as cpu
      id is officially unsigned int
    * added missing ';' to 32bit version of deactivate_mm()

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mmu_context_32.h [deleted file]
arch/x86/include/asm/mmu_context_64.h [deleted file]

index 8aeeb3fd73db8a90369b60adb747719cdeb8f58d..52948df9cd1da52955cca670469800598319314f 100644 (file)
@@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
-#ifdef CONFIG_X86_32
-# include "mmu_context_32.h"
-#else
-# include "mmu_context_64.h"
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+#ifdef CONFIG_SMP
+       if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+               percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+#endif
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+       unsigned cpu = smp_processor_id();
+
+       if (likely(prev != next)) {
+               /* stop flush ipis for the previous mm */
+               cpu_clear(cpu, prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               percpu_write(cpu_tlbstate.active_mm, next);
 #endif
+               cpu_set(cpu, next->cpu_vm_mask);
+
+               /* Re-load page tables */
+               load_cr3(next->pgd);
+
+               /*
+                * load the LDT, if the LDT is different:
+                */
+               if (unlikely(prev->context.ldt != next->context.ldt))
+                       load_LDT_nolock(&next->context);
+       }
+#ifdef CONFIG_SMP
+       else {
+               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+                       /* We were in lazy tlb mode and leave_mm disabled
+                        * tlb flush IPI delivery. We must reload CR3
+                        * to make sure to use no freed page tables.
+                        */
+                       load_cr3(next->pgd);
+                       load_LDT_nolock(&next->context);
+               }
+       }
+#endif
+}
 
 #define activate_mm(prev, next)                        \
 do {                                           \
@@ -33,5 +76,17 @@ do {                                         \
        switch_mm((prev), (next), NULL);        \
 } while (0);
 
+#ifdef CONFIG_X86_32
+#define deactivate_mm(tsk, mm)                 \
+do {                                           \
+       loadsegment(gs, 0);                     \
+} while (0)
+#else
+#define deactivate_mm(tsk, mm)                 \
+do {                                           \
+       load_gs_index(0);                       \
+       loadsegment(fs, 0);                     \
+} while (0)
+#endif
 
 #endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
deleted file mode 100644 (file)
index 08b5345..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _ASM_X86_MMU_CONTEXT_32_H
-#define _ASM_X86_MMU_CONTEXT_32_H
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
-       if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
-               percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
-                            struct mm_struct *next,
-                            struct task_struct *tsk)
-{
-       int cpu = smp_processor_id();
-
-       if (likely(prev != next)) {
-               /* stop flush ipis for the previous mm */
-               cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
-               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-               percpu_write(cpu_tlbstate.active_mm, next);
-#endif
-               cpu_set(cpu, next->cpu_vm_mask);
-
-               /* Re-load page tables */
-               load_cr3(next->pgd);
-
-               /*
-                * load the LDT, if the LDT is different:
-                */
-               if (unlikely(prev->context.ldt != next->context.ldt))
-                       load_LDT_nolock(&next->context);
-       }
-#ifdef CONFIG_SMP
-       else {
-               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-               BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
-
-               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-                       /* We were in lazy tlb mode and leave_mm disabled
-                        * tlb flush IPI delivery. We must reload %cr3.
-                        */
-                       load_cr3(next->pgd);
-                       load_LDT_nolock(&next->context);
-               }
-       }
-#endif
-}
-
-#define deactivate_mm(tsk, mm)                 \
-       asm("movl %0,%%gs": :"r" (0));
-
-#endif /* _ASM_X86_MMU_CONTEXT_32_H */
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h
deleted file mode 100644 (file)
index c457250..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef _ASM_X86_MMU_CONTEXT_64_H
-#define _ASM_X86_MMU_CONTEXT_64_H
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
-       if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
-               percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-                            struct task_struct *tsk)
-{
-       unsigned cpu = smp_processor_id();
-       if (likely(prev != next)) {
-               /* stop flush ipis for the previous mm */
-               cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
-               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-               percpu_write(cpu_tlbstate.active_mm, next);
-#endif
-               cpu_set(cpu, next->cpu_vm_mask);
-               load_cr3(next->pgd);
-
-               if (unlikely(next->context.ldt != prev->context.ldt))
-                       load_LDT_nolock(&next->context);
-       }
-#ifdef CONFIG_SMP
-       else {
-               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-               BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
-
-               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-                       /* We were in lazy tlb mode and leave_mm disabled
-                        * tlb flush IPI delivery. We must reload CR3
-                        * to make sure to use no freed page tables.
-                        */
-                       load_cr3(next->pgd);
-                       load_LDT_nolock(&next->context);
-               }
-       }
-#endif
-}
-
-#define deactivate_mm(tsk, mm)                 \
-do {                                           \
-       load_gs_index(0);                       \
-       asm volatile("movl %0,%%fs"::"r"(0));   \
-} while (0)
-
-#endif /* _ASM_X86_MMU_CONTEXT_64_H */