mm/hugetlb: add migration entry check in __unmap_hugepage_range
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / hugetlb.c
index e2bfbf73a551d0747fbea1b69200dd773e36754e..d9bc87ca062bf8caaaa7c5a0be44cbef2c0e0a5d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/rmap.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
+#include <linux/page-isolation.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -517,9 +518,15 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
 {
        struct page *page;
 
-       if (list_empty(&h->hugepage_freelists[nid]))
+       list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
+               if (!is_migrate_isolate_page(page))
+                       break;
+       /*
+        * if 'non-isolated free hugepage' not found on the list,
+        * the allocation fails.
+        */
+       if (&h->hugepage_freelists[nid] == &page->lru)
                return NULL;
-       page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
        list_move(&page->lru, &h->hugepage_activelist);
        set_page_refcounted(page);
        h->free_huge_pages--;
@@ -690,6 +697,40 @@ int PageHuge(struct page *page)
 }
 EXPORT_SYMBOL_GPL(PageHuge);
 
+/*
+ * PageHeadHuge() only returns true for hugetlbfs head page, but not for
+ * normal or transparent huge pages.
+ */
+int PageHeadHuge(struct page *page_head)
+{
+       compound_page_dtor *dtor;
+
+       if (!PageHead(page_head))
+               return 0;
+
+       dtor = get_compound_page_dtor(page_head);
+
+       return dtor == free_huge_page;
+}
+EXPORT_SYMBOL_GPL(PageHeadHuge);
+
+pgoff_t __basepage_index(struct page *page)
+{
+       struct page *page_head = compound_head(page);
+       pgoff_t index = page_index(page_head);
+       unsigned long compound_idx;
+
+       if (!PageHuge(page_head))
+               return page_index(page);
+
+       if (compound_order(page_head) >= MAX_ORDER)
+               compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
+       else
+               compound_idx = page - page_head;
+
+       return (index << compound_order(page_head)) + compound_idx;
+}
+
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 {
        struct page *page;
@@ -1059,6 +1100,7 @@ static void return_unused_surplus_pages(struct hstate *h,
        while (nr_pages--) {
                if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
                        break;
+               cond_resched_lock(&hugetlb_lock);
        }
 }
 
@@ -1446,6 +1488,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
        while (min_count < persistent_huge_pages(h)) {
                if (!free_pool_huge_page(h, nodes_allowed, 0))
                        break;
+               cond_resched_lock(&hugetlb_lock);
        }
        while (count < persistent_huge_pages(h)) {
                if (!adjust_pool_surplus(h, nodes_allowed, 1))
@@ -2285,6 +2328,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
                update_mmu_cache(vma, address, ptep);
 }
 
+static int is_hugetlb_entry_migration(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_migration_entry(swp))
+               return 1;
+       else
+               return 0;
+}
+
+static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+               return 1;
+       else
+               return 0;
+}
 
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
@@ -2312,7 +2380,24 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 
                spin_lock(&dst->page_table_lock);
                spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
-               if (!huge_pte_none(huge_ptep_get(src_pte))) {
+               entry = huge_ptep_get(src_pte);
+               if (huge_pte_none(entry)) { /* skip none entry */
+                       ;
+               } else if (unlikely(is_hugetlb_entry_migration(entry) ||
+                                   is_hugetlb_entry_hwpoisoned(entry))) {
+                       swp_entry_t swp_entry = pte_to_swp_entry(entry);
+
+                       if (is_write_migration_entry(swp_entry) && cow) {
+                               /*
+                                * COW mappings require pages in both
+                                * parent and child to be set to read.
+                                */
+                               make_migration_entry_read(&swp_entry);
+                               entry = swp_entry_to_pte(swp_entry);
+                               set_huge_pte_at(src, addr, src_pte, entry);
+                       }
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
+               } else {
                        if (cow)
                                huge_ptep_set_wrprotect(src, addr, src_pte);
                        entry = huge_ptep_get(src_pte);
@@ -2330,32 +2415,6 @@ nomem:
        return -ENOMEM;
 }
 
-static int is_hugetlb_entry_migration(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                            unsigned long start, unsigned long end,
                            struct page *ref_page)
@@ -2392,9 +2451,10 @@ again:
                        continue;
 
                /*
-                * HWPoisoned hugepage is already unmapped and dropped reference
+                * Migrating hugepage or HWPoisoned hugepage is already
+                * unmapped and its refcount is dropped, so just clear pte here.
                 */
-               if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+               if (unlikely(!pte_present(pte))) {
                        huge_pte_clear(mm, address, ptep);
                        continue;
                }
@@ -2473,7 +2533,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 
        mm = vma->vm_mm;
 
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, start, end);
        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
        tlb_finish_mmu(&tlb, start, end);
 }