/proc/stat: fix scalability of irq sum of all cpu
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Wed, 27 Oct 2010 22:34:15 +0000 (15:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 01:03:13 +0000 (18:03 -0700)
In /proc/stat, the number of per-IRQ event is shown by making a sum each
irq's events on all cpus.  But we can make use of kstat_irqs().

kstat_irqs() do the same calculation, If !CONFIG_GENERIC_HARDIRQ,
it's not a big cost. (Both of the number of cpus and irqs are small.)

If a system is very big and CONFIG_GENERIC_HARDIRQ, it does

for_each_irq()
for_each_cpu()
- look up a radix tree
- read desc->irq_stat[cpu]
This seems not efficient. This patch adds kstat_irqs() for
CONFIG_GENRIC_HARDIRQ and change the calculation as

for_each_irq()
look up radix tree
for_each_cpu()
- read desc->irq_stat[cpu]

This reduces cost.

A test on (4096cpusp, 256 nodes, 4592 irqs) host (by Jack Steiner)

%time cat /proc/stat > /dev/null

Before Patch:  2.459 sec
After Patch :   .561 sec

[akpm@linux-foundation.org: unexport kstat_irqs, coding-style tweaks]
[akpm@linux-foundation.org: fix unused variable 'per_irq_sum']
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: Jack Steiner <steiner@sgi.com>
Acked-by: Jack Steiner <steiner@sgi.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/stat.c
include/linux/kernel_stat.h
kernel/irq/irqdesc.c

index b80c620565bfcdda5cf7f5e1df16ed34f7d33518..e15a19c93baefa87ed4fd7ebbe57a532a5c80270 100644 (file)
@@ -31,7 +31,6 @@ static int show_stat(struct seq_file *p, void *v)
        u64 sum_softirq = 0;
        unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
        struct timespec boottime;
-       unsigned int per_irq_sum;
 
        user = nice = system = idle = iowait =
                irq = softirq = steal = cputime64_zero;
@@ -108,13 +107,8 @@ static int show_stat(struct seq_file *p, void *v)
        seq_printf(p, "intr %llu", (unsigned long long)sum);
 
        /* sum again ? it could be updated? */
-       for_each_irq_nr(j) {
-               per_irq_sum = 0;
-               for_each_possible_cpu(i)
-                       per_irq_sum += kstat_irqs_cpu(j, i);
-
-               seq_printf(p, " %u", per_irq_sum);
-       }
+       for_each_irq_nr(j)
+               seq_printf(p, " %u", kstat_irqs(j));
 
        seq_printf(p,
                "\nctxt %llu\n"
index 8b9b8908553029a6ca4efcf320bff8e22236a953..ad54c846911b91a169b903f7b1f6fee24d4320a2 100644 (file)
@@ -86,6 +86,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
 /*
  * Number of interrupts per specific IRQ source, since bootup
  */
+#ifndef CONFIG_GENERIC_HARDIRQS
 static inline unsigned int kstat_irqs(unsigned int irq)
 {
        unsigned int sum = 0;
@@ -96,6 +97,9 @@ static inline unsigned int kstat_irqs(unsigned int irq)
 
        return sum;
 }
+#else
+extern unsigned int kstat_irqs(unsigned int irq);
+#endif
 
 /*
  * Number of interrupts per cpu, since bootup
index 9d917ff726759a931443e3b03693a558cc65b377..9988d03797f5660dea26f417d9002fae94fd2798 100644 (file)
@@ -393,3 +393,18 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
        struct irq_desc *desc = irq_to_desc(irq);
        return desc ? desc->kstat_irqs[cpu] : 0;
 }
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+unsigned int kstat_irqs(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       int cpu;
+       int sum = 0;
+
+       if (!desc)
+               return 0;
+       for_each_possible_cpu(cpu)
+               sum += desc->kstat_irqs[cpu];
+       return sum;
+}
+#endif /* CONFIG_GENERIC_HARDIRQS */