Merge tag 'v3.10.56' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / oom_kill.c
index c629dfc8211d2cc58380bcd0114a768e4e083c92..50cbdb5de6eb373adb66aa0b4d07e6562e6e2c09 100644 (file)
@@ -47,19 +47,21 @@ static DEFINE_SPINLOCK(zone_scan_lock);
 #ifdef CONFIG_NUMA
 /**
  * has_intersects_mems_allowed() - check task eligiblity for kill
- * @tsk: task struct of which task to consider
+ * @start: task struct of which task to consider
  * @mask: nodemask passed to page allocator for mempolicy ooms
  *
  * Task eligibility is determined by whether or not a candidate task, @tsk,
  * shares the same mempolicy nodes as current if it is bound by such a policy
  * and whether or not it has the same set of allowed cpuset nodes.
  */
-static bool has_intersects_mems_allowed(struct task_struct *tsk,
+static bool has_intersects_mems_allowed(struct task_struct *start,
                                        const nodemask_t *mask)
 {
-       struct task_struct *start = tsk;
+       struct task_struct *tsk;
+       bool ret = false;
 
-       do {
+       rcu_read_lock();
+       for_each_thread(start, tsk) {
                if (mask) {
                        /*
                         * If this is a mempolicy constrained oom, tsk's
@@ -67,19 +69,20 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
                         * mempolicy intersects current, otherwise it may be
                         * needlessly killed.
                         */
-                       if (mempolicy_nodemask_intersects(tsk, mask))
-                               return true;
+                       ret = mempolicy_nodemask_intersects(tsk, mask);
                } else {
                        /*
                         * This is not a mempolicy constrained oom, so only
                         * check the mems of tsk's cpuset.
                         */
-                       if (cpuset_mems_allowed_intersects(current, tsk))
-                               return true;
+                       ret = cpuset_mems_allowed_intersects(current, tsk);
                }
-       } while_each_thread(start, tsk);
+               if (ret)
+                       break;
+       }
+       rcu_read_unlock();
 
-       return false;
+       return ret;
 }
 #else
 static bool has_intersects_mems_allowed(struct task_struct *tsk,
@@ -97,16 +100,21 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
  */
 struct task_struct *find_lock_task_mm(struct task_struct *p)
 {
-       struct task_struct *t = p;
+       struct task_struct *t;
 
-       do {
+       rcu_read_lock();
+
+       for_each_thread(p, t) {
                task_lock(t);
                if (likely(t->mm))
-                       return t;
+                       goto found;
                task_unlock(t);
-       } while_each_thread(p, t);
+       }
+       t = NULL;
+found:
+       rcu_read_unlock();
 
-       return NULL;
+       return t;
 }
 
 /* return true if the task is not adequate as candidate victim task. */
@@ -301,7 +309,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
        unsigned long chosen_points = 0;
 
        rcu_read_lock();
-       do_each_thread(g, p) {
+       for_each_process_thread(g, p) {
                unsigned int points;
 
                switch (oom_scan_process_thread(p, totalpages, nodemask,
@@ -323,7 +331,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                        chosen = p;
                        chosen_points = points;
                }
-       } while_each_thread(g, p);
+       }
        if (chosen)
                get_task_struct(chosen);
        rcu_read_unlock();
@@ -406,7 +414,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
 {
        struct task_struct *victim = p;
        struct task_struct *child;
-       struct task_struct *t = p;
+       struct task_struct *t;
        struct mm_struct *mm;
        unsigned int victim_points = 0;
        static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
@@ -437,7 +445,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         * still freeing memory.
         */
        read_lock(&tasklist_lock);
-       do {
+       for_each_thread(p, t) {
                list_for_each_entry(child, &t->children, sibling) {
                        unsigned int child_points;
 
@@ -466,13 +474,11 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
                                get_task_struct(victim);
                        }
                }
-       } while_each_thread(p, t);
+       }
        read_unlock(&tasklist_lock);
 
-       rcu_read_lock();
        p = find_lock_task_mm(victim);
        if (!p) {
-               rcu_read_unlock();
                put_task_struct(victim);
                return;
        } else if (victim != p) {
@@ -498,6 +504,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         * That thread will now get access to memory reserves since it has a
         * pending fatal signal.
         */
+       rcu_read_lock();
        for_each_process(p)
                if (p->mm == mm && !same_thread_group(p, victim) &&
                    !(p->flags & PF_KTHREAD)) {