mm: rename FOLL_MLOCK to FOLL_POPULATE
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 14 Apr 2015 22:44:37 +0000 (15:44 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 Apr 2015 23:48:59 +0000 (16:48 -0700)
After commit a1fde08c74e9 ("VM: skip the stack guard page lookup in
get_user_pages only for mlock") FOLL_MLOCK has lost its original
meaning: we don't necessarily mlock the page if the flags is set -- we
also take VM_LOCKED into consideration.

Since we use the same codepath for __mm_populate(), let's rename
FOLL_MLOCK to FOLL_POPULATE.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/gup.c
mm/huge_memory.c
mm/mlock.c

index 47a93928b90fff85339fd09d47761549db576b58..cccbbba12b9d474d18c6cea81923c98f2fbbb33f 100644 (file)
@@ -2109,7 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_FORCE     0x10    /* get_user_pages read/write w/o permission */
 #define FOLL_NOWAIT    0x20    /* if a disk transfer is needed, start the IO
                                 * and return without waiting upon it */
-#define FOLL_MLOCK     0x40    /* mark page as mlocked */
+#define FOLL_POPULATE  0x40    /* fault in page */
 #define FOLL_SPLIT     0x80    /* don't return transhuge pages, split them */
 #define FOLL_HWPOISON  0x100   /* check page is hwpoisoned */
 #define FOLL_NUMA      0x200   /* force NUMA hinting page fault */
index a6e24e246f8688af7664966e66d99dbd038b8066..1b114ba9aebf4c512342f5591b71b6c5a9f27a33 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -92,7 +92,7 @@ retry:
                 */
                mark_page_accessed(page);
        }
-       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
+       if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
                /*
                 * The preliminary mapping check is mainly to avoid the
                 * pointless overhead of lock_page on the ZERO_PAGE
@@ -265,8 +265,8 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
        unsigned int fault_flags = 0;
        int ret;
 
-       /* For mlock, just skip the stack guard page. */
-       if ((*flags & FOLL_MLOCK) &&
+       /* For mm_populate(), just skip the stack guard page. */
+       if ((*flags & FOLL_POPULATE) &&
                        (stack_guard_page_start(vma, address) ||
                         stack_guard_page_end(vma, address + PAGE_SIZE)))
                return -ENOENT;
index 6817b0350c71c43b0f89a4972ce19c51a2408a2b..10a4b6cea0d10a27f3792b8a61f1a415b911f9a0 100644 (file)
@@ -1231,7 +1231,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                          pmd, _pmd,  1))
                        update_mmu_cache_pmd(vma, addr, pmd);
        }
-       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
+       if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
                if (page->mapping && trylock_page(page)) {
                        lru_add_drain();
                        if (page->mapping)
index 8a54cd214925872a66d4d1cc36b69ec6c6047324..f756e28b33fc2ca253fec3e3b66c2c90f7f72797 100644 (file)
@@ -237,7 +237,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
        VM_BUG_ON_VMA(end   > vma->vm_end, vma);
        VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
 
-       gup_flags = FOLL_TOUCH | FOLL_MLOCK;
+       gup_flags = FOLL_TOUCH | FOLL_POPULATE;
        /*
         * We want to touch writable mappings with a write fault in order
         * to break COW, except for shared mappings because these don't COW