[PATCH] sched: remove lb_stopbalance counter
authorChen, Kenneth W <kenneth.w.chen@intel.com>
Sun, 10 Dec 2006 10:20:35 +0000 (02:20 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Sun, 10 Dec 2006 17:55:43 +0000 (09:55 -0800)
Remove scheduler stats lb_stopbalance counter.  This counter can be
calculated by: lb_balanced - lb_nobusyg - lb_nobusyq.  There is no need to
create gazillion counters while we can derive the value.

Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/linux/sched.h
kernel/sched.c

index 72d6927d29ed7c065124e3ffe6f4efb2dd47ce51..ea92e5c890894694533ea42a0d95e2148ff301c9 100644 (file)
@@ -707,7 +707,6 @@ struct sched_domain {
        unsigned long lb_hot_gained[MAX_IDLE_TYPES];
        unsigned long lb_nobusyg[MAX_IDLE_TYPES];
        unsigned long lb_nobusyq[MAX_IDLE_TYPES];
-       unsigned long lb_stopbalance[MAX_IDLE_TYPES];
 
        /* Active load balancing */
        unsigned long alb_cnt;
index 4e453431c61a56e0f2493a51905823d2d73734cf..66e44b5b53d279d7dc406d546cd4295b261cfa84 100644 (file)
@@ -428,7 +428,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
  * bump this up when changing the output format or the meaning of an existing
  * format, so that tools can adapt (or abort)
  */
-#define SCHEDSTAT_VERSION 13
+#define SCHEDSTAT_VERSION 14
 
 static int show_schedstat(struct seq_file *seq, void *v)
 {
@@ -466,7 +466,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
                        seq_printf(seq, "domain%d %s", dcnt++, mask_str);
                        for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
                                        itype++) {
-                               seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu",
+                               seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu",
                                    sd->lb_cnt[itype],
                                    sd->lb_balanced[itype],
                                    sd->lb_failed[itype],
@@ -474,8 +474,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
                                    sd->lb_gained[itype],
                                    sd->lb_hot_gained[itype],
                                    sd->lb_nobusyq[itype],
-                                   sd->lb_nobusyg[itype],
-                                   sd->lb_stopbalance[itype]);
+                                   sd->lb_nobusyg[itype]);
                        }
                        seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
                            sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
@@ -2596,10 +2595,8 @@ redo:
        group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
                                   &cpus, balance);
 
-       if (*balance == 0) {
-               schedstat_inc(sd, lb_stopbalance[idle]);
+       if (*balance == 0)
                goto out_balanced;
-       }
 
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[idle]);