mm: Count the number of pages affected in change_protection()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / hugetlb.c
index 8536741f069b2449bbaab9e752431fb9eb4527a6..712895eda0d0ee7d363c67ba0ba7dab830ec6e3d 100644 (file)
@@ -671,6 +671,11 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        }
 }
 
+/*
+ * PageHuge() only returns true for hugetlbfs pages, but not for normal or
+ * transparent huge pages.  See the PageTransHuge() documentation for more
+ * details.
+ */
 int PageHuge(struct page *page)
 {
        compound_page_dtor *dtor;
@@ -2355,13 +2360,15 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        struct page *page;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
+       const unsigned long mmun_start = start; /* For mmu_notifiers */
+       const unsigned long mmun_end   = end;   /* For mmu_notifiers */
 
        WARN_ON(!is_vm_hugetlb_page(vma));
        BUG_ON(start & ~huge_page_mask(h));
        BUG_ON(end & ~huge_page_mask(h));
 
        tlb_start_vma(tlb, vma);
-       mmu_notifier_invalidate_range_start(mm, start, end);
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 again:
        spin_lock(&mm->page_table_lock);
        for (address = start; address < end; address += sz) {
@@ -2425,7 +2432,7 @@ again:
                if (address < end && !ref_page)
                        goto again;
        }
-       mmu_notifier_invalidate_range_end(mm, start, end);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        tlb_end_vma(tlb, vma);
 }
 
@@ -2480,7 +2487,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * from page cache lookup which is in HPAGE_SIZE units.
         */
        address = address & huge_page_mask(h);
-       pgoff = vma_hugecache_offset(h, vma, address);
+       pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
+                       vma->vm_pgoff;
        mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
 
        /*
@@ -2524,6 +2532,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *old_page, *new_page;
        int avoidcopy;
        int outside_reserve = 0;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
 
        old_page = pte_page(pte);
 
@@ -2610,6 +2620,9 @@ retry_avoidcopy:
                            pages_per_huge_page(h));
        __SetPageUptodate(new_page);
 
+       mmun_start = address & huge_page_mask(h);
+       mmun_end = mmun_start + huge_page_size(h);
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        /*
         * Retake the page_table_lock to check for racing updates
         * before the page tables are altered
@@ -2618,9 +2631,6 @@ retry_avoidcopy:
        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
        if (likely(pte_same(huge_ptep_get(ptep), pte))) {
                /* Break COW */
-               mmu_notifier_invalidate_range_start(mm,
-                       address & huge_page_mask(h),
-                       (address & huge_page_mask(h)) + huge_page_size(h));
                huge_ptep_clear_flush(vma, address, ptep);
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
@@ -2628,10 +2638,11 @@ retry_avoidcopy:
                hugepage_add_new_anon_rmap(new_page, vma, address);
                /* Make the old page be freed below */
                new_page = old_page;
-               mmu_notifier_invalidate_range_end(mm,
-                       address & huge_page_mask(h),
-                       (address & huge_page_mask(h)) + huge_page_size(h));
        }
+       spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+       /* Caller expects lock to be held */
+       spin_lock(&mm->page_table_lock);
        page_cache_release(new_page);
        page_cache_release(old_page);
        return 0;
@@ -3003,7 +3014,7 @@ same_page:
        return i ? i : -EFAULT;
 }
 
-void hugetlb_change_protection(struct vm_area_struct *vma,
+unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot)
 {
        struct mm_struct *mm = vma->vm_mm;
@@ -3011,6 +3022,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        pte_t *ptep;
        pte_t pte;
        struct hstate *h = hstate_vma(vma);
+       unsigned long pages = 0;
 
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
@@ -3021,12 +3033,15 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
-               if (huge_pmd_unshare(mm, &address, ptep))
+               if (huge_pmd_unshare(mm, &address, ptep)) {
+                       pages++;
                        continue;
+               }
                if (!huge_pte_none(huge_ptep_get(ptep))) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
                        set_huge_pte_at(mm, address, ptep, pte);
+                       pages++;
                }
        }
        spin_unlock(&mm->page_table_lock);
@@ -3038,6 +3053,8 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
         */
        flush_tlb_range(vma, start, end);
        mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+
+       return pages << h->order;
 }
 
 int hugetlb_reserve_pages(struct inode *inode,