Merge branch 'android-4.14-p' into android-exynos-4.14-ww-9610-minor_up-dev
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / mm / gup.c
index 7c0e5b1bbcd4c5fcceeac86c63380673381cb2e9..d973fa1a844b2c9a90cda9335edf76a138dc019f 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
+#include <linux/migrate.h>
+#include <linux/mm_inline.h>
+#include <linux/mmu_notifier.h>
+#include <asm/tlbflush.h>
+
 #include "internal.h"
 
+#ifdef CONFIG_CMA
+static struct page *__alloc_nonmovable_userpage(struct page *page,
+                               unsigned long private, int **result)
+{
+       return alloc_page(GFP_HIGHUSER);
+}
+
+static bool __need_migrate_cma_page(struct page *page,
+                               struct vm_area_struct *vma,
+                               unsigned long start, unsigned int flags)
+{
+       if (!(flags & FOLL_GET) || !(flags & FOLL_CMA))
+               return false;
+
+       if (!is_migrate_cma_page(page))
+               return false;
+
+       if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
+                                       VM_STACK_INCOMPLETE_SETUP)
+               return false;
+
+       if (!PageLRU(page)) {
+               migrate_prep_local();
+               if (WARN_ON(!PageLRU(page))) {
+                       __dump_page(page, "non-lru cma page");
+                       return false;
+               }
+       }
+
+       return true;
+}
+
+static int __isolate_cma_pinpage(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+       struct lruvec *lruvec;
+
+       spin_lock_irq(zone_lru_lock(zone));
+       if (__isolate_lru_page(page, 0) != 0) {
+               spin_unlock_irq(zone_lru_lock(zone));
+               dump_page(page, "failed to isolate lru page");
+               return -EBUSY;
+       } else {
+               lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
+               del_page_from_lru_list(page, lruvec, page_lru(page));
+       }
+       spin_unlock_irq(zone_lru_lock(zone));
+
+       return 0;
+}
+
+static int __migrate_cma_pinpage(struct page *page, struct vm_area_struct *vma)
+{
+       struct list_head migratepages;
+       int tries = 0;
+       int ret = 0;
+
+       INIT_LIST_HEAD(&migratepages);
+
+       list_add(&page->lru, &migratepages);
+       inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
+
+       while (!list_empty(&migratepages) && tries++ < 5) {
+               ret = migrate_pages(&migratepages, __alloc_nonmovable_userpage,
+                                       NULL, 0, MIGRATE_SYNC, MR_CMA);
+       }
+
+       if (ret < 0) {
+               putback_movable_pages(&migratepages);
+               pr_err("%s: migration failed %p[%#lx]\n", __func__,
+                                       page, page_to_pfn(page));
+               return -EFAULT;
+       }
+
+       return 0;
+}
+#else
+static bool __need_migrate_cma_page(struct page *page,
+                               struct vm_area_struct *vma,
+                               unsigned long start, unsigned int flags)
+{
+       return false;
+}
+static int __migrate_cma_pinpage(struct page *page, struct vm_area_struct *vma)
+{
+       return 0;
+}
+#endif
+
 static struct page *no_page_table(struct vm_area_struct *vma,
                unsigned int flags)
 {
@@ -139,6 +233,35 @@ retry:
                }
        }
 
+       if (__need_migrate_cma_page(page, vma, address, flags)) {
+               if (__isolate_cma_pinpage(page)) {
+                       pr_warn("%s: Failed to migrate a cma page\n", __func__);
+                       pr_warn("because of racing with compaction.\n");
+                       WARN(1, "Please try again get_user_pages()\n");
+                       page = ERR_PTR(-EBUSY);
+                       goto out;
+               }
+               pte_unmap_unlock(ptep, ptl);
+               if (__migrate_cma_pinpage(page, vma)) {
+                       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+               } else {
+                       struct page *old_page = page;
+
+                       migration_entry_wait(mm, pmd, address);
+                       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+                       update_mmu_cache(vma, address, ptep);
+                       pte = *ptep;
+                       set_pte_at_notify(mm, address, ptep, pte);
+                       page = vm_normal_page(vma, address, pte);
+                       BUG_ON(!page);
+
+                       pr_debug("cma: cma page %p[%#lx] migrated to new "
+                                       "page %p[%#lx]\n", old_page,
+                                       page_to_pfn(old_page),
+                                       page, page_to_pfn(page));
+               }
+       }
+
        if (flags & FOLL_SPLIT && PageTransCompound(page)) {
                int ret;
                get_page(page);
@@ -660,6 +783,9 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        if (!(gup_flags & FOLL_FORCE))
                gup_flags |= FOLL_NUMA;
 
+       if ((gup_flags & FOLL_CMA) != 0)
+               migrate_prep();
+
        do {
                struct page *page;
                unsigned int foll_flags = gup_flags;