mm: unexport __get_user_pages_unlocked()
authorLorenzo Stoakes <lstoakes@gmail.com>
Wed, 14 Dec 2016 23:06:55 +0000 (15:06 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 00:04:09 +0000 (16:04 -0800)
Unexport the low-level __get_user_pages_unlocked() function and replaces
invocations with calls to more appropriate higher-level functions.

In hva_to_pfn_slow() we are able to replace __get_user_pages_unlocked()
with get_user_pages_unlocked() since we can now pass gup_flags.

In async_pf_execute() and process_vm_rw_single_vec() we need to pass
different tsk, mm arguments so get_user_pages_remote() is the sane
replacement in these cases (having added manual acquisition and release
of mmap_sem.)

Additionally get_user_pages_remote() reintroduces use of the FOLL_TOUCH
flag.  However, this flag was originally silently dropped by commit
1e9877902dc7 ("mm/gup: Introduce get_user_pages_remote()"), so this
appears to have been unintentional and reintroducing it is therefore not
an issue.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20161027095141.2569-3-lstoakes@gmail.com
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/gup.c
mm/nommu.c
mm/process_vm_access.c
virt/kvm/async_pf.c
virt/kvm/kvm_main.c

index cc154454675ad79b7de1b816e7df25e4211fb7e0..7b2d14ed3815c89a971307536b5b7b569efdf648 100644 (file)
@@ -1280,9 +1280,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
                            struct vm_area_struct **vmas);
 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                    unsigned int gup_flags, struct page **pages, int *locked);
-long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
-                              unsigned long start, unsigned long nr_pages,
-                              struct page **pages, unsigned int gup_flags);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                    struct page **pages, unsigned int gup_flags);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
index b64c907aa4f06e14ce8eb3ed54d16749a273e80c..55315555489d02c411534a958f1a31f79c9fafb5 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -865,9 +865,10 @@ EXPORT_SYMBOL(get_user_pages_locked);
  * caller if required (just like with __get_user_pages). "FOLL_GET"
  * is set implicitly if "pages" is non-NULL.
  */
-__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
-                                              unsigned long start, unsigned long nr_pages,
-                                              struct page **pages, unsigned int gup_flags)
+static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
+               struct mm_struct *mm, unsigned long start,
+               unsigned long nr_pages, struct page **pages,
+               unsigned int gup_flags)
 {
        long ret;
        int locked = 1;
@@ -879,7 +880,6 @@ __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct m
                up_read(&mm->mmap_sem);
        return ret;
 }
-EXPORT_SYMBOL(__get_user_pages_unlocked);
 
 /*
  * get_user_pages_unlocked() is suitable to replace the form:
index 9720e0bab0298fcec3855d7300934eb9e6223b4c..c299a7fbca70ffe3228dce7c0713a862bd151855 100644 (file)
@@ -176,9 +176,10 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 }
 EXPORT_SYMBOL(get_user_pages_locked);
 
-long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
-                              unsigned long start, unsigned long nr_pages,
-                              struct page **pages, unsigned int gup_flags)
+static long __get_user_pages_unlocked(struct task_struct *tsk,
+                       struct mm_struct *mm, unsigned long start,
+                       unsigned long nr_pages, struct page **pages,
+                       unsigned int gup_flags)
 {
        long ret;
        down_read(&mm->mmap_sem);
@@ -187,7 +188,6 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
        up_read(&mm->mmap_sem);
        return ret;
 }
-EXPORT_SYMBOL(__get_user_pages_unlocked);
 
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                             struct page **pages, unsigned int gup_flags)
index be8dc8d1edb95b34d8c6b7fbf34321e597e981e1..84d0c7eada2b50f2828033334c5a10df7a6a90a5 100644 (file)
@@ -88,7 +88,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
        ssize_t rc = 0;
        unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
                / sizeof(struct pages *);
-       unsigned int flags = FOLL_REMOTE;
+       unsigned int flags = 0;
 
        /* Work out address and page range required */
        if (len == 0)
@@ -100,15 +100,19 @@ static int process_vm_rw_single_vec(unsigned long addr,
 
        while (!rc && nr_pages && iov_iter_count(iter)) {
                int pages = min(nr_pages, max_pages_per_loop);
+               int locked = 1;
                size_t bytes;
 
                /*
                 * Get the pages we're interested in.  We must
-                * add FOLL_REMOTE because task/mm might not
+                * access remotely because task/mm might not
                 * current/current->mm
                 */
-               pages = __get_user_pages_unlocked(task, mm, pa, pages,
-                                                 process_pages, flags);
+               down_read(&mm->mmap_sem);
+               pages = get_user_pages_remote(task, mm, pa, pages, flags,
+                                             process_pages, NULL, &locked);
+               if (locked)
+                       up_read(&mm->mmap_sem);
                if (pages <= 0)
                        return -EFAULT;
 
index efeceb0a222dd8a793cd8d7e5a7770b7799851c3..3815e940fbeacb444bffc22c581f127d98730a59 100644 (file)
@@ -76,16 +76,20 @@ static void async_pf_execute(struct work_struct *work)
        struct kvm_vcpu *vcpu = apf->vcpu;
        unsigned long addr = apf->addr;
        gva_t gva = apf->gva;
+       int locked = 1;
 
        might_sleep();
 
        /*
         * This work is run asynchromously to the task which owns
         * mm and might be done in another context, so we must
-        * use FOLL_REMOTE.
+        * access remotely.
         */
-       __get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
-                       FOLL_WRITE | FOLL_REMOTE);
+       down_read(&mm->mmap_sem);
+       get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
+                       &locked);
+       if (locked)
+               up_read(&mm->mmap_sem);
 
        kvm_async_page_present_sync(vcpu, apf);
 
index 823544c166be8eb424eca23faa14bad8745065a3..de102cae7125b5d2515aad0b9e660c53e520c1e4 100644 (file)
@@ -1418,13 +1418,12 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
                npages = get_user_page_nowait(addr, write_fault, page);
                up_read(&current->mm->mmap_sem);
        } else {
-               unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
+               unsigned int flags = FOLL_HWPOISON;
 
                if (write_fault)
                        flags |= FOLL_WRITE;
 
-               npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
-                                                  page, flags);
+               npages = get_user_pages_unlocked(addr, 1, page, flags);
        }
        if (npages != 1)
                return npages;