[IA64] wider use of for_each_cpu_mask() in arch/ia64
authorhawkes@sgi.com <hawkes@sgi.com>
Mon, 10 Oct 2005 15:43:26 +0000 (08:43 -0700)
committerTony Luck <tony.luck@intel.com>
Tue, 25 Oct 2005 22:10:08 +0000 (15:10 -0700)
In arch/ia64 change the explicit use of for-loops and NR_CPUS into the
general for_each_cpu() or for_each_online_cpu() constructs, as
appropriate.  This widens the scope of potential future optimizations
of the general constructs, as well as takes advantage of the existing
optimizations of first_cpu() and next_cpu().

Signed-off-by: John Hawkes <hawkes@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
arch/ia64/kernel/irq.c
arch/ia64/kernel/module.c
arch/ia64/kernel/smp.c
arch/ia64/kernel/smpboot.c
arch/ia64/mm/tlb.c

index 205d980282612e8a4f41ccd97858df3ac5da5dde..d33244c3275914f8ecdfeb769c1a9aad92710eff 100644 (file)
@@ -57,9 +57,9 @@ int show_interrupts(struct seq_file *p, void *v)
 
        if (i == 0) {
                seq_printf(p, "           ");
-               for (j=0; j<NR_CPUS; j++)
-                       if (cpu_online(j))
-                               seq_printf(p, "CPU%d       ",j);
+               for_each_online_cpu(j) {
+                       seq_printf(p, "CPU%d       ",j);
+               }
                seq_putc(p, '\n');
        }
 
@@ -72,9 +72,9 @@ int show_interrupts(struct seq_file *p, void *v)
 #ifndef CONFIG_SMP
                seq_printf(p, "%10u ", kstat_irqs(i));
 #else
-               for (j = 0; j < NR_CPUS; j++)
-                       if (cpu_online(j))
-                               seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+               for_each_online_cpu(j) {
+                       seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+               }
 #endif
                seq_printf(p, " %14s", irq_desc[i].handler->typename);
                seq_printf(p, "  %s", action->name);
index f1aca7cffd120879e446e703a8783f93c36f6654..7a2f0a798d121c67a338b6f34cc9a59090f2505b 100644 (file)
@@ -947,8 +947,8 @@ void
 percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
 {
        unsigned int i;
-       for (i = 0; i < NR_CPUS; i++)
-               if (cpu_possible(i))
-                       memcpy(pcpudst + __per_cpu_offset[i], src, size);
+       for_each_cpu(i) {
+               memcpy(pcpudst + __per_cpu_offset[i], src, size);
+       }
 }
 #endif /* CONFIG_SMP */
index 0166a984709536238d8f97641992038ce17cd27a..657ac99a451cf8d550e10d5fa52af12f130b3588 100644 (file)
@@ -185,8 +185,8 @@ send_IPI_allbutself (int op)
 {
        unsigned int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_online(i) && i != smp_processor_id())
+       for_each_online_cpu(i) {
+               if (i != smp_processor_id())
                        send_IPI_single(i, op);
        }
 }
@@ -199,9 +199,9 @@ send_IPI_all (int op)
 {
        int i;
 
-       for (i = 0; i < NR_CPUS; i++)
-               if (cpu_online(i))
-                       send_IPI_single(i, op);
+       for_each_online_cpu(i) {
+               send_IPI_single(i, op);
+       }
 }
 
 /*
index 7d72c0d872b312ad4c5e4406dca78cb5e24b616b..400a4898712492da0bdbe28ea27aa525f7a842ef 100644 (file)
@@ -694,9 +694,9 @@ smp_cpus_done (unsigned int dummy)
         * Allow the user to impress friends.
         */
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
-               if (cpu_online(cpu))
-                       bogosum += cpu_data(cpu)->loops_per_jiffy;
+       for_each_online_cpu(cpu) {
+               bogosum += cpu_data(cpu)->loops_per_jiffy;
+       }
 
        printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
               (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
index 464557e4ed8241f5755782730495a4e434446c59..987fb754d6ad58daabc62ba3742f73e82f02b53d 100644 (file)
@@ -77,9 +77,10 @@ wrap_mmu_context (struct mm_struct *mm)
        /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
        {
                int cpu = get_cpu(); /* prevent preemption/migration */
-               for (i = 0; i < NR_CPUS; ++i)
-                       if (cpu_online(i) && (i != cpu))
+               for_each_online_cpu(i) {
+                       if (i != cpu)
                                per_cpu(ia64_need_tlb_flush, i) = 1;
+               }
                put_cpu();
        }
        local_flush_tlb_all();