mm: make vm_mmap killable
authorMichal Hocko <mhocko@suse.com>
Mon, 23 May 2016 23:25:30 +0000 (16:25 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 May 2016 00:04:14 +0000 (17:04 -0700)
All the callers of vm_mmap seem to check for the failure already and
bail out in one way or another on the error which means that we can
change it to use killable version of vm_mmap_pgoff and return -EINTR if
the current task gets killed while waiting for mmap_sem.  This also
means that vm_mmap_pgoff can be killable by default and drop the
additional parameter.

This will help in the OOM conditions when the oom victim might be stuck
waiting for the mmap_sem for write which in turn can block oom_reaper
which relies on the mmap_sem for read to make a forward progress and
reclaim the address space of the victim.

Please note that load_elf_binary is ignoring vm_mmap error for
current->personality & MMAP_PAGE_ZERO case but that shouldn't be a
problem because the address is not used anywhere and we never return to
the userspace if we got killed.

Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/internal.h
mm/mmap.c
mm/nommu.c
mm/util.c

index b530c99e8e8163222e128985bbef09d9c218cc05..d5eb8dddd7c012f1822d5f280f5cd362fce9e899 100644 (file)
@@ -2013,7 +2013,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {}
 /* These take the mm semaphore themselves */
 extern unsigned long vm_brk(unsigned long, unsigned long);
 extern int vm_munmap(unsigned long, size_t);
-extern unsigned long vm_mmap(struct file *, unsigned long,
+extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
         unsigned long, unsigned long,
         unsigned long, unsigned long);
 
index bff7fd702331b594d7cd0927e8288b27eabddee2..a37e5b6f9d25ca1cfd31938eebee0f90054ed4de 100644 (file)
@@ -444,8 +444,7 @@ extern u32 hwpoison_filter_enable;
 
 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long,
-        unsigned long, unsigned long,
-        bool);
+        unsigned long, unsigned long);
 
 extern void set_pageblock_order(void);
 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
index 11e1f2ca72af0cd2c218d50378a11dcc98b14a83..420088682d4ac8d96ceb0f7eb5993d8984758773 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1333,7 +1333,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff, true);
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 out_fput:
        if (file)
                fput(file);
index b74512746aae82588421eef1f87588a3536709a5..c8bd59a03c71563b73c146d35e00550b8119d4c0 100644 (file)
@@ -1446,7 +1446,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff, true);
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 
        if (file)
                fput(file);
index 03b23774685081d77391505816bc00221d1a1ea8..917e0e3d0f8ee3a05edf78a665eace20635b3aa0 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
        unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long pgoff, bool killable)
+       unsigned long flag, unsigned long pgoff)
 {
        unsigned long ret;
        struct mm_struct *mm = current->mm;
@@ -297,12 +297,8 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 
        ret = security_mmap_file(file, prot, flag);
        if (!ret) {
-               if (killable) {
-                       if (down_write_killable(&mm->mmap_sem))
-                               return -EINTR;
-               } else {
-                       down_write(&mm->mmap_sem);
-               }
+               if (down_write_killable(&mm->mmap_sem))
+                       return -EINTR;
                ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
                                    &populate);
                up_write(&mm->mmap_sem);
@@ -312,7 +308,6 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
        return ret;
 }
 
-/* XXX are all callers checking an error */
 unsigned long vm_mmap(struct file *file, unsigned long addr,
        unsigned long len, unsigned long prot,
        unsigned long flag, unsigned long offset)
@@ -322,7 +317,7 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
        if (unlikely(offset_in_page(offset)))
                return -EINVAL;
 
-       return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT, false);
+       return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 }
 EXPORT_SYMBOL(vm_mmap);