ARC: [mm] Zero page optimization
authorVineet Gupta <vgupta@synopsys.com>
Sun, 19 May 2013 10:21:03 +0000 (15:51 +0530)
committerVineet Gupta <vgupta@synopsys.com>
Sat, 22 Jun 2013 13:53:18 +0000 (19:23 +0530)
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
arch/arc/mm/tlb.c

index f60807c2683d9afa494505653bd2006fa4672092..1c91dbc8ddd86f4351d74eadb08916c3365b6fbe 100644 (file)
@@ -433,9 +433,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
 {
        unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
        unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
+       struct page *page = pfn_to_page(pte_pfn(*ptep));
 
        create_tlb(vma, vaddr, ptep);
 
+       if (page == ZERO_PAGE(0)) {
+               return;
+       }
+
        /*
         * Exec page : Independent of aliasing/page-color considerations,
         *             since icache doesn't snoop dcache on ARC, any dirty
@@ -447,7 +452,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
         */
        if ((vma->vm_flags & VM_EXEC) ||
             addr_not_cache_congruent(paddr, vaddr)) {
-               struct page *page = pfn_to_page(pte_pfn(*ptep));
 
                int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
                if (dirty) {