Merge tag 'squashfs-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/pkl...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / oom_kill.c
index 2958fd8e7c9abcfcf6cc7b38e2dac34133905b22..46bf2ed5594c73a86cbbe61fec40669bcfea74a3 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/ptrace.h>
 #include <linux/freezer.h>
 #include <linux/ftrace.h>
+#include <linux/ratelimit.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/oom.h>
@@ -309,7 +310,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
  */
 static struct task_struct *select_bad_process(unsigned int *ppoints,
                unsigned long totalpages, struct mem_cgroup *memcg,
-               const nodemask_t *nodemask)
+               const nodemask_t *nodemask, bool force_kill)
 {
        struct task_struct *g, *p;
        struct task_struct *chosen = NULL;
@@ -335,7 +336,8 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
                        if (unlikely(frozen(p)))
                                __thaw_task(p);
-                       return ERR_PTR(-1UL);
+                       if (!force_kill)
+                               return ERR_PTR(-1UL);
                }
                if (!p->mm)
                        continue;
@@ -353,7 +355,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                        if (p == current) {
                                chosen = p;
                                *ppoints = 1000;
-                       } else {
+                       } else if (!force_kill) {
                                /*
                                 * If this task is not being ptraced on exit,
                                 * then wait for it to finish before killing
@@ -434,66 +436,18 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
 }
 
 #define K(x) ((x) << (PAGE_SHIFT-10))
-static int oom_kill_task(struct task_struct *p)
-{
-       struct task_struct *q;
-       struct mm_struct *mm;
-
-       p = find_lock_task_mm(p);
-       if (!p)
-               return 1;
-
-       /* mm cannot be safely dereferenced after task_unlock(p) */
-       mm = p->mm;
-
-       pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
-               task_pid_nr(p), p->comm, K(p->mm->total_vm),
-               K(get_mm_counter(p->mm, MM_ANONPAGES)),
-               K(get_mm_counter(p->mm, MM_FILEPAGES)));
-       task_unlock(p);
-
-       /*
-        * Kill all user processes sharing p->mm in other thread groups, if any.
-        * They don't get access to memory reserves or a higher scheduler
-        * priority, though, to avoid depletion of all memory or task
-        * starvation.  This prevents mm->mmap_sem livelock when an oom killed
-        * task cannot exit because it requires the semaphore and its contended
-        * by another thread trying to allocate memory itself.  That thread will
-        * now get access to memory reserves since it has a pending fatal
-        * signal.
-        */
-       for_each_process(q)
-               if (q->mm == mm && !same_thread_group(q, p) &&
-                   !(q->flags & PF_KTHREAD)) {
-                       if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
-                               continue;
-
-                       task_lock(q);   /* Protect ->comm from prctl() */
-                       pr_err("Kill process %d (%s) sharing same memory\n",
-                               task_pid_nr(q), q->comm);
-                       task_unlock(q);
-                       force_sig(SIGKILL, q);
-               }
-
-       set_tsk_thread_flag(p, TIF_MEMDIE);
-       force_sig(SIGKILL, p);
-
-       return 0;
-}
-#undef K
-
-static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
-                           unsigned int points, unsigned long totalpages,
-                           struct mem_cgroup *memcg, nodemask_t *nodemask,
-                           const char *message)
+static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+                            unsigned int points, unsigned long totalpages,
+                            struct mem_cgroup *memcg, nodemask_t *nodemask,
+                            const char *message)
 {
        struct task_struct *victim = p;
        struct task_struct *child;
        struct task_struct *t = p;
+       struct mm_struct *mm;
        unsigned int victim_points = 0;
-
-       if (printk_ratelimit())
-               dump_header(p, gfp_mask, order, memcg, nodemask);
+       static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
+                                             DEFAULT_RATELIMIT_BURST);
 
        /*
         * If the task is already exiting, don't alarm the sysadmin or kill
@@ -501,9 +455,12 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         */
        if (p->flags & PF_EXITING) {
                set_tsk_thread_flag(p, TIF_MEMDIE);
-               return 0;
+               return;
        }
 
+       if (__ratelimit(&oom_rs))
+               dump_header(p, gfp_mask, order, memcg, nodemask);
+
        task_lock(p);
        pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
                message, task_pid_nr(p), p->comm, points);
@@ -533,8 +490,44 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
                }
        } while_each_thread(p, t);
 
-       return oom_kill_task(victim);
+       victim = find_lock_task_mm(victim);
+       if (!victim)
+               return;
+
+       /* mm cannot safely be dereferenced after task_unlock(victim) */
+       mm = victim->mm;
+       pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
+               task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
+               K(get_mm_counter(victim->mm, MM_ANONPAGES)),
+               K(get_mm_counter(victim->mm, MM_FILEPAGES)));
+       task_unlock(victim);
+
+       /*
+        * Kill all user processes sharing victim->mm in other thread groups, if
+        * any.  They don't get access to memory reserves, though, to avoid
+        * depletion of all memory.  This prevents mm->mmap_sem livelock when an
+        * oom killed thread cannot exit because it requires the semaphore and
+        * its contended by another thread trying to allocate memory itself.
+        * That thread will now get access to memory reserves since it has a
+        * pending fatal signal.
+        */
+       for_each_process(p)
+               if (p->mm == mm && !same_thread_group(p, victim) &&
+                   !(p->flags & PF_KTHREAD)) {
+                       if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+                               continue;
+
+                       task_lock(p);   /* Protect ->comm from prctl() */
+                       pr_err("Kill process %d (%s) sharing same memory\n",
+                               task_pid_nr(p), p->comm);
+                       task_unlock(p);
+                       do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
+               }
+
+       set_tsk_thread_flag(victim, TIF_MEMDIE);
+       do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
 }
+#undef K
 
 /*
  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
@@ -561,7 +554,8 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
 }
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
-void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask)
+void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+                             int order)
 {
        unsigned long limit;
        unsigned int points = 0;
@@ -577,18 +571,13 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask)
                return;
        }
 
-       check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
+       check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
        limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
        read_lock(&tasklist_lock);
-retry:
-       p = select_bad_process(&points, limit, memcg, NULL);
-       if (!p || PTR_ERR(p) == -1UL)
-               goto out;
-
-       if (oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL,
-                               "Memory cgroup out of memory"))
-               goto retry;
-out:
+       p = select_bad_process(&points, limit, memcg, NULL, false);
+       if (p && PTR_ERR(p) != -1UL)
+               oom_kill_process(p, gfp_mask, order, points, limit, memcg, NULL,
+                                "Memory cgroup out of memory");
        read_unlock(&tasklist_lock);
 }
 #endif
@@ -700,6 +689,7 @@ static void clear_system_oom(void)
  * @gfp_mask: memory allocation flags
  * @order: amount of memory being requested as a power of 2
  * @nodemask: nodemask passed to page allocator
+ * @force_kill: true if a task must be killed, even if others are exiting
  *
  * If we run out of memory, we have the choice between either
  * killing a random task (bad), letting the system crash (worse)
@@ -707,7 +697,7 @@ static void clear_system_oom(void)
  * don't have to be perfect here, we just have to be good.
  */
 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
-               int order, nodemask_t *nodemask)
+               int order, nodemask_t *nodemask, bool force_kill)
 {
        const nodemask_t *mpol_mask;
        struct task_struct *p;
@@ -745,33 +735,25 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
        if (sysctl_oom_kill_allocating_task &&
            !oom_unkillable_task(current, NULL, nodemask) &&
            current->mm) {
-               /*
-                * oom_kill_process() needs tasklist_lock held.  If it returns
-                * non-zero, current could not be killed so we must fallback to
-                * the tasklist scan.
-                */
-               if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
-                               NULL, nodemask,
-                               "Out of memory (oom_kill_allocating_task)"))
-                       goto out;
-       }
-
-retry:
-       p = select_bad_process(&points, totalpages, NULL, mpol_mask);
-       if (PTR_ERR(p) == -1UL)
+               oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
+                                nodemask,
+                                "Out of memory (oom_kill_allocating_task)");
                goto out;
+       }
 
+       p = select_bad_process(&points, totalpages, NULL, mpol_mask,
+                              force_kill);
        /* Found nothing?!?! Either we hang forever, or we panic. */
        if (!p) {
                dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
                read_unlock(&tasklist_lock);
                panic("Out of memory and no killable processes...\n");
        }
-
-       if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
-                               nodemask, "Out of memory"))
-               goto retry;
-       killed = 1;
+       if (PTR_ERR(p) != -1UL) {
+               oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
+                                nodemask, "Out of memory");
+               killed = 1;
+       }
 out:
        read_unlock(&tasklist_lock);
 
@@ -792,7 +774,7 @@ out:
 void pagefault_out_of_memory(void)
 {
        if (try_set_system_oom()) {
-               out_of_memory(NULL, 0, 0, NULL);
+               out_of_memory(NULL, 0, 0, NULL, false);
                clear_system_oom();
        }
        if (!test_thread_flag(TIF_MEMDIE))