powerpc: mmu_gather rework
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 25 May 2011 00:11:48 +0000 (17:11 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 May 2011 15:39:13 +0000 (08:39 -0700)
Fix up powerpc to the new mmu_gather stuff.

PPC has an extra batching queue to RCU free the actual pagetable
allocations, use the ARCH extentions for that for now.

For the ppc64_tlb_batch, which tracks the vaddrs to unhash from the
hardware hash-table, keep using per-cpu arrays but flush on context switch
and use a TLF bit to track the lazy_mmu state.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/powerpc/include/asm/pgalloc.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/include/asm/tlb.h
arch/powerpc/kernel/process.c
arch/powerpc/mm/pgtable.c
arch/powerpc/mm/tlb_hash32.c
arch/powerpc/mm/tlb_hash64.c
arch/powerpc/mm/tlb_nohash.c

index abe8532bd14e4dfc27693cfadce3d49a78380881..df1b4cbb2e70c0a263cd3e676374c0568f703c89 100644 (file)
@@ -32,13 +32,13 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 
 #ifdef CONFIG_SMP
 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift);
-extern void pte_free_finish(void);
+extern void pte_free_finish(struct mmu_gather *tlb);
 #else /* CONFIG_SMP */
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
 {
        pgtable_free(table, shift);
 }
-static inline void pte_free_finish(void) { }
+static inline void pte_free_finish(struct mmu_gather *tlb) { }
 #endif /* !CONFIG_SMP */
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
index d8529ef13b2329352a6b85a92adb2574ed768607..37c353e8af7c783fba4b8510e259b714bcc05f55 100644 (file)
@@ -139,10 +139,12 @@ static inline struct thread_info *current_thread_info(void)
 #define TLF_NAPPING            0       /* idle thread enabled NAP mode */
 #define TLF_SLEEPING           1       /* suspend code enabled SLEEP mode */
 #define TLF_RESTORE_SIGMASK    2       /* Restore signal mask in do_signal */
+#define TLF_LAZY_MMU           3       /* tlb_batch is active */
 
 #define _TLF_NAPPING           (1 << TLF_NAPPING)
 #define _TLF_SLEEPING          (1 << TLF_SLEEPING)
 #define _TLF_RESTORE_SIGMASK   (1 << TLF_RESTORE_SIGMASK)
+#define _TLF_LAZY_MMU          (1 << TLF_LAZY_MMU)
 
 #ifndef __ASSEMBLY__
 #define HAVE_SET_RESTORE_SIGMASK       1
index e2b428b0f7babd3626e880be36b45a682a95b1ab..8f0ed7adcd1211daf5b80e6f052516ce1805fe96 100644 (file)
 #define tlb_start_vma(tlb, vma)        do { } while (0)
 #define tlb_end_vma(tlb, vma)  do { } while (0)
 
+#define HAVE_ARCH_MMU_GATHER 1
+
+struct pte_freelist_batch;
+
+struct arch_mmu_gather {
+       struct pte_freelist_batch *batch;
+};
+
+#define ARCH_MMU_GATHER_INIT (struct arch_mmu_gather){ .batch = NULL, }
+
 extern void tlb_flush(struct mmu_gather *tlb);
 
 /* Get the generic bits... */
index 095043d79946cfbfd8c9f15e56584ef393192456..91e52df3d81d19ec7a22bba96fee3ea2d12b4002 100644 (file)
@@ -395,6 +395,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
        struct thread_struct *new_thread, *old_thread;
        unsigned long flags;
        struct task_struct *last;
+#ifdef CONFIG_PPC_BOOK3S_64
+       struct ppc64_tlb_batch *batch;
+#endif
 
 #ifdef CONFIG_SMP
        /* avoid complexity of lazy save/restore of fpu
@@ -513,7 +516,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
                old_thread->accum_tb += (current_tb - start_tb);
                new_thread->start_tb = current_tb;
        }
-#endif
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+       batch = &__get_cpu_var(ppc64_tlb_batch);
+       if (batch->active) {
+               current_thread_info()->local_flags |= _TLF_LAZY_MMU;
+               if (batch->index)
+                       __flush_tlb_pending(batch);
+               batch->active = 0;
+       }
+#endif /* CONFIG_PPC_BOOK3S_64 */
 
        local_irq_save(flags);
 
@@ -528,6 +541,14 @@ struct task_struct *__switch_to(struct task_struct *prev,
        hard_irq_disable();
        last = _switch(old_thread, new_thread);
 
+#ifdef CONFIG_PPC_BOOK3S_64
+       if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
+               current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
+               batch = &__get_cpu_var(ppc64_tlb_batch);
+               batch->active = 1;
+       }
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
        local_irq_restore(flags);
 
        return last;
index 6a3997f98dfb90a2f3d01da4b53156c2098a38f8..6e72788598f8e49e624b827555884a21e3fb5b8d 100644 (file)
@@ -33,8 +33,6 @@
 
 #include "mmu_decl.h"
 
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
 #ifdef CONFIG_SMP
 
 /*
@@ -43,7 +41,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  * freeing a page table page that is being walked without locks
  */
 
-static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
 static unsigned long pte_freelist_forced_free;
 
 struct pte_freelist_batch
@@ -97,12 +94,10 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
 
 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
 {
-       /* This is safe since tlb_gather_mmu has disabled preemption */
-       struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+       struct pte_freelist_batch **batchp = &tlb->arch.batch;
        unsigned long pgf;
 
-       if (atomic_read(&tlb->mm->mm_users) < 2 ||
-           cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
+       if (atomic_read(&tlb->mm->mm_users) < 2) {
                pgtable_free(table, shift);
                return;
        }
@@ -124,10 +119,9 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
        }
 }
 
-void pte_free_finish(void)
+void pte_free_finish(struct mmu_gather *tlb)
 {
-       /* This is safe since tlb_gather_mmu has disabled preemption */
-       struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+       struct pte_freelist_batch **batchp = &tlb->arch.batch;
 
        if (*batchp == NULL)
                return;
index 690566b66e8ea233cd0f40f4850d6ad8f1bc307d..d555cdb06bc8c0fa52cc96e816ccb02257d1a9ad 100644 (file)
@@ -73,7 +73,7 @@ void tlb_flush(struct mmu_gather *tlb)
        }
 
        /* Push out batch of freed page tables */
-       pte_free_finish();
+       pte_free_finish(tlb);
 }
 
 /*
index c14d09f614f362ef67e04744bc5db8f62b8947cc..5c94ca34cd79d7adacfb83acac7f96b3f043d8b7 100644 (file)
@@ -155,7 +155,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
 
 void tlb_flush(struct mmu_gather *tlb)
 {
-       struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
+       struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
 
        /* If there's a TLB batch pending, then we must flush it because the
         * pages are going to be freed and we really don't want to have a CPU
@@ -164,8 +164,10 @@ void tlb_flush(struct mmu_gather *tlb)
        if (tlbbatch->index)
                __flush_tlb_pending(tlbbatch);
 
+       put_cpu_var(ppc64_tlb_batch);
+
        /* Push out batch of freed page tables */
-       pte_free_finish();
+       pte_free_finish(tlb);
 }
 
 /**
index 2a030d89bbc6e65a847a29f6c4bf012f2e6188b0..8eaf67d320436740c400db25cb2778f9a0c183d9 100644 (file)
@@ -301,7 +301,7 @@ void tlb_flush(struct mmu_gather *tlb)
        flush_tlb_mm(tlb->mm);
 
        /* Push out batch of freed page tables */
-       pte_free_finish();
+       pte_free_finish(tlb);
 }
 
 /*