[COMMON] mm: cma page migration for pinned pages
authorJinsung Yang <jsgood.yang@samsung.com>
Tue, 3 Mar 2015 05:24:46 +0000 (14:24 +0900)
committerhskang <hs1218.kang@samsung.com>
Mon, 27 Aug 2018 07:21:17 +0000 (16:21 +0900)
If the changes in the patch 'cma: redirect page allocation to CMA' were
applied, many user pages could be allocated from cma page block. However,
this can cause migration failures due to pinned pages by __get_user_pages().
This patch adds to support cma page migration at page pinning time if gup_flags
has FOLL_CMA.

Change-Id: I70389f1ddee9697af653c4a50eab161925113979
Signed-off-by: Jinsung Yang <jsgood.yang@samsung.com>
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
include/linux/mm.h
mm/gup.c

index 5b13b4e1fa0c101689ecbe1334062ad16e55bd9a..f0d5c842a8eb63d485813e550ffceff46d0c772d 100644 (file)
@@ -2386,6 +2386,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
 #define FOLL_COW       0x4000  /* internal GUP flag */
 #define FOLL_ANON      0x8000  /* don't do file mappings */
+#define FOLL_CMA       0x80000 /* migrate if the page is from cma pageblock */
 
 static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
 {
index 4cc8a6ff0f56c889e34dd452e045034c4ff9a474..b6f72ca9dcea1a5b0873c3d69d1902a46b524e2f 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
+#include <linux/migrate.h>
+#include <linux/mm_inline.h>
+#include <linux/mmu_notifier.h>
+#include <asm/tlbflush.h>
+
 #include "internal.h"
 
+#ifdef CONFIG_CMA
+static struct page *__alloc_nonmovable_userpage(struct page *page,
+                               unsigned long private, int **result)
+{
+       return alloc_page(GFP_HIGHUSER);
+}
+
+static bool __need_migrate_cma_page(struct page *page,
+                               struct vm_area_struct *vma,
+                               unsigned long start, unsigned int flags)
+{
+       if (!(flags & FOLL_GET))
+               return false;
+
+       if (!get_pageblock_migratetype(page) != MIGRATE_CMA)
+               return false;
+
+       if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
+                                       VM_STACK_INCOMPLETE_SETUP)
+               return false;
+
+       if (!(flags & FOLL_CMA))
+               return false;
+
+       migrate_prep_local();
+
+       if (!PageLRU(page))
+               return false;
+
+       return true;
+}
+
+static int __migrate_cma_pinpage(struct page *page, struct vm_area_struct *vma)
+{
+       struct zone *zone = page_zone(page);
+       struct list_head migratepages;
+       struct lruvec *lruvec;
+       int tries = 0;
+       int ret = 0;
+
+       INIT_LIST_HEAD(&migratepages);
+
+       if (__isolate_lru_page(page, 0) != 0) {
+               dump_page(page, "failed to isolate lru page");
+               return -EFAULT;
+       } else {
+               spin_lock_irq(zone_lru_lock(zone));
+               lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
+               del_page_from_lru_list(page, lruvec, page_lru(page));
+               spin_unlock_irq(zone_lru_lock(zone));
+       }
+
+       list_add(&page->lru, &migratepages);
+       inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
+
+       while (!list_empty(&migratepages) && tries++ < 5) {
+               ret = migrate_pages(&migratepages, __alloc_nonmovable_userpage,
+                                       NULL, 0, MIGRATE_SYNC, MR_CMA);
+       }
+
+       if (ret < 0) {
+               putback_movable_pages(&migratepages);
+               pr_err("%s: migration failed %p[%#lx]\n", __func__,
+                                       page, page_to_pfn(page));
+               return -EFAULT;
+       }
+
+       return 0;
+}
+#else
+static bool __need_migrate_cma_page(struct page *page,
+                               struct vm_area_struct *vma,
+                               unsigned long start, unsigned int flags)
+{
+       return false;
+}
+static int __migrate_cma_pinpage(struct page *page, struct vm_area_struct *vma)
+{
+       return 0;
+}
+#endif
+
 static struct page *no_page_table(struct vm_area_struct *vma,
                unsigned int flags)
 {
@@ -139,6 +226,28 @@ retry:
                }
        }
 
+       if (__need_migrate_cma_page(page, vma, address, flags)) {
+               pte_unmap_unlock(ptep, ptl);
+               if (__migrate_cma_pinpage(page, vma)) {
+                       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+               } else {
+                       struct page *old_page = page;
+
+                       migration_entry_wait(mm, pmd, address);
+                       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+                       update_mmu_cache(vma, address, ptep);
+                       pte = *ptep;
+                       set_pte_at_notify(mm, address, ptep, pte);
+                       page = vm_normal_page(vma, address, pte);
+                       BUG_ON(!page);
+
+                       pr_debug("cma: cma page %p[%#lx] migrated to new "
+                                       "page %p[%#lx]\n", old_page,
+                                       page_to_pfn(old_page),
+                                       page, page_to_pfn(page));
+               }
+       }
+
        if (flags & FOLL_SPLIT && PageTransCompound(page)) {
                int ret;
                get_page(page);