sched: Rework check_for_tasks()
authorKirill Tkhai <ktkhai@parallels.com>
Wed, 25 Jun 2014 08:19:55 +0000 (12:19 +0400)
committerIngo Molnar <mingo@kernel.org>
Sat, 5 Jul 2014 09:17:45 +0000 (11:17 +0200)
1) Iterate thru all of threads in the system.
   Check for all threads, not only for group leaders.

2) Check for p->on_rq instead of p->state and cputime.
   Preempted task in !TASK_RUNNING state  OR just
   created task may be queued, that we want to be
   reported too.

3) Use read_lock() instead of write_lock().
   This function does not change any structures, and
   read_lock() is enough.

Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Fabian Frederick <fabf@skynet.be>
Cc: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Cc: Konstantin Khorenko <khorenko@parallels.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael wang <wangyun@linux.vnet.ibm.com>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Turner <pjt@google.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Todd E Brandt <todd.e.brandt@linux.intel.com>
Cc: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1403684395.3462.44.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/cpu.c

index a343bde710b1025d6519dd6efc7d890bdff86118..81e2a388a0f687eeb472bf33a4e24dd763ef34cf 100644 (file)
@@ -274,21 +274,28 @@ void clear_tasks_mm_cpumask(int cpu)
        rcu_read_unlock();
 }
 
-static inline void check_for_tasks(int cpu)
+static inline void check_for_tasks(int dead_cpu)
 {
-       struct task_struct *p;
-       cputime_t utime, stime;
+       struct task_struct *g, *p;
 
-       write_lock_irq(&tasklist_lock);
-       for_each_process(p) {
-               task_cputime(p, &utime, &stime);
-               if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
-                   (utime || stime))
-                       pr_warn("Task %s (pid = %d) is on cpu %d (state = %ld, flags = %x)\n",
-                               p->comm, task_pid_nr(p), cpu,
-                               p->state, p->flags);
-       }
-       write_unlock_irq(&tasklist_lock);
+       read_lock_irq(&tasklist_lock);
+       do_each_thread(g, p) {
+               if (!p->on_rq)
+                       continue;
+               /*
+                * We do the check with unlocked task_rq(p)->lock.
+                * Order the reading to do not warn about a task,
+                * which was running on this cpu in the past, and
+                * it's just been woken on another cpu.
+                */
+               rmb();
+               if (task_cpu(p) != dead_cpu)
+                       continue;
+
+               pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
+                       p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
+       } while_each_thread(g, p);
+       read_unlock_irq(&tasklist_lock);
 }
 
 struct take_cpu_down_param {