[PATCH] ppc64: More hugepage fixes
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 28 Sep 2005 04:45:45 +0000 (21:45 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 28 Sep 2005 14:46:42 +0000 (07:46 -0700)
My previous patch fixing invalidation of huge PTEs wasn't good enough, we
still had an issue if a PTE invalidation batch contained both small and
large pages.  This patch fixes this by making sure the batch is flushed if
the page size fed to it changes.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/ppc64/mm/hash_native.c
arch/ppc64/mm/tlb.c
include/asm-ppc64/tlbflush.h

index eb1bbb5b6c160bcf3b4e2e1a8f630951c3209be8..bfd385b7713c7c53063226cb01e48260f7896f30 100644 (file)
@@ -343,7 +343,7 @@ static void native_flush_hash_range(unsigned long context,
        hpte_t *hptep;
        unsigned long hpte_v;
        struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
-       unsigned long large;
+       unsigned long large = batch->large;
 
        local_irq_save(flags);
 
@@ -356,7 +356,6 @@ static void native_flush_hash_range(unsigned long context,
 
                va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
                batch->vaddr[j] = va;
-               large = pte_huge(batch->pte[i]);
                if (large)
                        vpn = va >> HPAGE_SHIFT;
                else
@@ -406,7 +405,7 @@ static void native_flush_hash_range(unsigned long context,
                asm volatile("ptesync":::"memory");
 
                for (i = 0; i < j; i++)
-                       __tlbie(batch->vaddr[i], 0);
+                       __tlbie(batch->vaddr[i], large);
 
                asm volatile("eieio; tlbsync; ptesync":::"memory");
 
index d8a6593a13f0ad98fd0b64310b514551b02047e4..21fbffb23a43df153b114f36bba67f5f27ab4b52 100644 (file)
@@ -143,7 +143,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
         * up scanning and resetting referenced bits then our batch context
         * will change mid stream.
         */
-       if (unlikely(i != 0 && context != batch->context)) {
+       if (i != 0 && (context != batch->context ||
+                      batch->large != pte_huge(pte))) {
                flush_tlb_pending();
                i = 0;
        }
@@ -151,6 +152,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
        if (i == 0) {
                batch->context = context;
                batch->mm = mm;
+               batch->large = pte_huge(pte);
        }
        batch->pte[i] = __pte(pte);
        batch->addr[i] = addr;
index 45411a67e0825f3e4685bfeebd5d6724a0ab6ab3..74271d7c1d161f9577f9ddb6d3be88ebc351a7bc 100644 (file)
@@ -25,6 +25,7 @@ struct ppc64_tlb_batch {
        pte_t pte[PPC64_TLB_BATCH_NR];
        unsigned long addr[PPC64_TLB_BATCH_NR];
        unsigned long vaddr[PPC64_TLB_BATCH_NR];
+       unsigned int large;
 };
 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);