tracing: Don't assume possible cpu list have continuous numbers
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Tue, 10 Mar 2009 01:49:53 +0000 (10:49 +0900)
committerIngo Molnar <mingo@elte.hu>
Tue, 10 Mar 2009 09:20:30 +0000 (10:20 +0100)
"for (++cpu ; cpu < num_possible_cpus(); cpu++)" statement assumes
possible cpus have continuous number - but that's a wrong assumption.

Insted, cpumask_next() should be used.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20090310104437.A480.A69D9226@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/trace/trace_workqueue.c

index 46c8dc896bd319c508d1e75c0e3420b038249151..739fdacf873bd84564992cdb1fe90698ed3d9ae5 100644 (file)
@@ -91,7 +91,7 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
        struct cpu_workqueue_stats *cws;
        unsigned long flags;
 
-       WARN_ON(cpu < 0 || cpu >= num_possible_cpus());
+       WARN_ON(cpu < 0);
 
        /* Workqueues are sometimes created in atomic context */
        cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
@@ -175,12 +175,12 @@ static void *workqueue_stat_next(void *prev, int idx)
        spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
        if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
                spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
-               for (++cpu ; cpu < num_possible_cpus(); cpu++) {
-                       ret = workqueue_stat_start_cpu(cpu);
-                       if (ret)
-                               return ret;
-               }
-               return NULL;
+               do {
+                       cpu = cpumask_next(cpu, cpu_possible_mask);
+                       if (cpu >= nr_cpu_ids)
+                               return NULL;
+               } while (!(ret = workqueue_stat_start_cpu(cpu)));
+               return ret;
        }
        spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);