mm: memcg: enable memcg OOM killer only for user faults
authorJohannes Weiner <hannes@cmpxchg.org>
Thu, 12 Sep 2013 22:13:42 +0000 (15:13 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Nov 2014 17:22:56 +0000 (09:22 -0800)
commit 519e52473ebe9db5cdef44670d5a97f1fd53d721 upstream.

System calls and kernel faults (uaccess, gup) can handle an out of memory
situation gracefully and just return -ENOMEM.

Enable the memcg OOM killer only for user faults, where it's really the
only option available.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: azurIt <azurit@pobox.sk>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/memcontrol.h
include/linux/sched.h
mm/filemap.c
mm/memcontrol.c
mm/memory.c

index d6183f06d8c182951fac67c17a2c05d2ce9f20fa..2c911c95b1ac17304e39969a3d70cc55a6d0a9f2 100644 (file)
@@ -124,6 +124,37 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
                                        struct page *newpage);
 
+/**
+ * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
+ * @new: true to enable, false to disable
+ *
+ * Toggle whether a failed memcg charge should invoke the OOM killer
+ * or just return -ENOMEM.  Returns the previous toggle state.
+ */
+static inline bool mem_cgroup_toggle_oom(bool new)
+{
+       bool old;
+
+       old = current->memcg_oom.may_oom;
+       current->memcg_oom.may_oom = new;
+
+       return old;
+}
+
+static inline void mem_cgroup_enable_oom(void)
+{
+       bool old = mem_cgroup_toggle_oom(true);
+
+       WARN_ON(old == true);
+}
+
+static inline void mem_cgroup_disable_oom(void)
+{
+       bool old = mem_cgroup_toggle_oom(false);
+
+       WARN_ON(old == false);
+}
+
 #ifdef CONFIG_MEMCG_SWAP
 extern int do_swap_account;
 #endif
@@ -347,6 +378,19 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
 {
 }
 
+static inline bool mem_cgroup_toggle_oom(bool new)
+{
+       return false;
+}
+
+static inline void mem_cgroup_enable_oom(void)
+{
+}
+
+static inline void mem_cgroup_disable_oom(void)
+{
+}
+
 static inline void mem_cgroup_inc_page_stat(struct page *page,
                                            enum mem_cgroup_page_stat_item idx)
 {
index f87e9a8d364ffb72e4b2d7f461dbab24e8241b46..42a58ce480bcf466537e3361cd0b475dec32d972 100644 (file)
@@ -1411,6 +1411,9 @@ struct task_struct {
                unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
        } memcg_batch;
        unsigned int memcg_kmem_skip_account;
+       struct memcg_oom_info {
+               unsigned int may_oom:1;
+       } memcg_oom;
 #endif
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        atomic_t ptrace_bp_refcnt;
index 7905fe721aa8ab3db06c957c9f2cc63cea1fee5f..c466f4c449e9e96dc9e2c0f836d5857e9efe6e2d 100644 (file)
@@ -1614,6 +1614,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct inode *inode = mapping->host;
        pgoff_t offset = vmf->pgoff;
        struct page *page;
+       bool memcg_oom;
        pgoff_t size;
        int ret = 0;
 
@@ -1622,7 +1623,11 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        /*
-        * Do we have something in the page cache already?
+        * Do we have something in the page cache already?  Either
+        * way, try readahead, but disable the memcg OOM killer for it
+        * as readahead is optional and no errors are propagated up
+        * the fault stack.  The OOM killer is enabled while trying to
+        * instantiate the faulting page individually below.
         */
        page = find_get_page(mapping, offset);
        if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
@@ -1630,10 +1635,14 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                 * We found the page, so try async readahead before
                 * waiting for the lock.
                 */
+               memcg_oom = mem_cgroup_toggle_oom(false);
                do_async_mmap_readahead(vma, ra, file, page, offset);
+               mem_cgroup_toggle_oom(memcg_oom);
        } else if (!page) {
                /* No page in the page cache at all */
+               memcg_oom = mem_cgroup_toggle_oom(false);
                do_sync_mmap_readahead(vma, ra, file, offset);
+               mem_cgroup_toggle_oom(memcg_oom);
                count_vm_event(PGMAJFAULT);
                mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
index f45e21ab9cea8531acd4bde77e9b4e3b63866f96..c0607d27ce870c3598d6d82b9c6dc25fd3fd5d55 100644 (file)
@@ -2613,7 +2613,7 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
                return CHARGE_RETRY;
 
        /* If we don't need to call oom-killer at el, return immediately */
-       if (!oom_check)
+       if (!oom_check || !current->memcg_oom.may_oom)
                return CHARGE_NOMEM;
        /* check OOM */
        if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
index ebe0f285c0e7094b431c726f752aa354f61eff80..59f450c5c0a3d8f87577021805f2f61a651ab10d 100644 (file)
@@ -3754,22 +3754,14 @@ unlock:
 /*
  * By the time we get here, we already hold the mm semaphore
  */
-int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long address, unsigned int flags)
+static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                            unsigned long address, unsigned int flags)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
 
-       __set_current_state(TASK_RUNNING);
-
-       count_vm_event(PGFAULT);
-       mem_cgroup_count_vm_event(mm, PGFAULT);
-
-       /* do counter updates before entering really critical section. */
-       check_sync_rss_stat(current);
-
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);
 
@@ -3850,6 +3842,34 @@ retry:
        return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 }
 
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                   unsigned long address, unsigned int flags)
+{
+       int ret;
+
+       __set_current_state(TASK_RUNNING);
+
+       count_vm_event(PGFAULT);
+       mem_cgroup_count_vm_event(mm, PGFAULT);
+
+       /* do counter updates before entering really critical section. */
+       check_sync_rss_stat(current);
+
+       /*
+        * Enable the memcg OOM handling for faults triggered in user
+        * space.  Kernel faults are handled more gracefully.
+        */
+       if (flags & FAULT_FLAG_USER)
+               mem_cgroup_enable_oom();
+
+       ret = __handle_mm_fault(mm, vma, address, flags);
+
+       if (flags & FAULT_FLAG_USER)
+               mem_cgroup_disable_oom();
+
+       return ret;
+}
+
 #ifndef __PAGETABLE_PUD_FOLDED
 /*
  * Allocate page upper directory.