[PATCH] for_each_possible_cpu: powerpc
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tue, 28 Mar 2006 22:50:51 +0000 (14:50 -0800)
committerPaul Mackerras <paulus@samba.org>
Wed, 29 Mar 2006 02:44:15 +0000 (13:44 +1100)
for_each_cpu() actually iterates across all possible CPUs.  We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs.  This is inefficient and
possibly buggy.

We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.

This patch replaces for_each_cpu with for_each_possible_cpu.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
14 files changed:
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/lparcfg.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/time.c
arch/powerpc/mm/stab.c
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/cell/pervasive.c
arch/powerpc/platforms/pseries/xics.c
include/asm-powerpc/percpu.h

index 771a59cbd213168219f42e71f529511298ca163e..bb5c9501234c8364c855cf11d7e363983603ef61 100644 (file)
@@ -379,7 +379,7 @@ void irq_ctx_init(void)
        struct thread_info *tp;
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
                tp = softirq_ctx[i];
                tp->cpu = i;
index e789fef4eb8a1dc8f3dd210d884f98787fb7a211..1b73508ecb2bde2e7b5f72777ff0ad0b45a4fe33 100644 (file)
@@ -56,7 +56,7 @@ static unsigned long get_purr(void)
        unsigned long sum_purr = 0;
        int cpu;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                sum_purr += lppaca[cpu].emulated_time_base;
 
 #ifdef PURR_DEBUG
@@ -222,7 +222,7 @@ static unsigned long get_purr(void)
        int cpu;
        struct cpu_usage *cu;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                cu = &per_cpu(cpu_usage_array, cpu);
                sum_purr += cu->current_tb;
        }
index 4b78ee0e58679ceef55d1e2b22c665fe95d7d13d..06636c927a7ec4e2f3049fcd87f8219a37472e20 100644 (file)
@@ -593,7 +593,7 @@ static void rtas_percpu_suspend_me(void *info)
                data->waiting = 0;
                data->args->args[data->args->nargs] =
                        rtas_call(ibm_suspend_me_token, 0, 1, NULL);
-               for_each_cpu(i)
+               for_each_possible_cpu(i)
                        plpar_hcall_norets(H_PROD,i);
        } else {
                data->waiting = -EBUSY;
@@ -626,7 +626,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
        /* Prod each CPU.  This won't hurt, and will wake
         * anyone we successfully put to sleep with H_Join
         */
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                plpar_hcall_norets(H_PROD, i);
 
        return data.waiting;
index 3473cb9cb0ab47da334c184d23796b18722e055f..c607f3b9ca17494ce7a1f93ddac8d811ce66b997 100644 (file)
@@ -431,7 +431,7 @@ void __init smp_setup_cpu_maps(void)
        /*
         * Do the sibling map; assume only two threads per processor.
         */
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                cpu_set(cpu, cpu_sibling_map[cpu]);
                if (cpu_has_feature(CPU_FTR_SMT))
                        cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
index ae9c33d7073171ee0b2e72cda33930c8f35f1621..a72bf5dceeee587875f6a4ad1aa41fc9fcc6df8a 100644 (file)
@@ -226,7 +226,7 @@ int __init ppc_init(void)
        if ( ppc_md.progress ) ppc_md.progress("             ", 0xffff);
 
        /* register CPU devices */
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                register_cpu(&cpu_devices[i], i, NULL);
 
        /* call platform init */
index 05b152299396599fa1be0af60e4c6ce69a8efe72..59aa92cd6fa4eabab53ee3bc8d5381e4a3f095a0 100644 (file)
@@ -474,7 +474,7 @@ static void __init irqstack_early_init(void)
         * interrupt stacks must be under 256MB, we cannot afford to take
         * SLB misses on them.
         */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                softirq_ctx[i] = (struct thread_info *)
                        __va(lmb_alloc_base(THREAD_SIZE,
                                            THREAD_SIZE, 0x10000000));
@@ -507,7 +507,7 @@ static void __init emergency_stack_init(void)
         */
        limit = min(0x10000000UL, lmb.rmo_size);
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                paca[i].emergency_sp =
                __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
 }
@@ -624,7 +624,7 @@ void __init setup_per_cpu_areas(void)
                size = PERCPU_ENOUGH_ROOM;
 #endif
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
                if (!ptr)
                        panic("Cannot allocate cpu data for CPU %d\n", i);
index 805eaedbc3084e3ea1c22d594705c6acb24fb514..530f7dba0bd2968adf9f49a1db63dbf3a0d53a57 100644 (file)
@@ -362,7 +362,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
  
        smp_space_timers(max_cpus);
 
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                if (cpu != boot_cpuid)
                        smp_create_idle(cpu);
 }
index aca2f09cd8429463d77f0ee26bece1364d096419..73560ef6f802e8942b04e1fc97da641092cc9e8e 100644 (file)
@@ -74,7 +74,7 @@ static int __init smt_setup(void)
        val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay",
                                           NULL);
        if (!smt_snooze_cmdline && val) {
-               for_each_cpu(cpu)
+               for_each_possible_cpu(cpu)
                        per_cpu(smt_snooze_delay, cpu) = *val;
        }
 
@@ -93,7 +93,7 @@ static int __init setup_smt_snooze_delay(char *str)
        smt_snooze_cmdline = 1;
 
        if (get_option(&str, &snooze)) {
-               for_each_cpu(cpu)
+               for_each_possible_cpu(cpu)
                        per_cpu(smt_snooze_delay, cpu) = snooze;
        }
 
@@ -347,7 +347,7 @@ static int __init topology_init(void)
 
        register_cpu_notifier(&sysfs_cpu_nb);
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct cpu *c = &per_cpu(cpu_devices, cpu);
 
 #ifdef CONFIG_NUMA
index 4a27218a086cb018bd5a6c31d8cfdf96b5434c98..24e3ad756de02ce124bb2ff05ea4eb9b04c9a033 100644 (file)
@@ -261,7 +261,7 @@ void snapshot_timebases(void)
 
        if (!cpu_has_feature(CPU_FTR_PURR))
                return;
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
        on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
 }
@@ -751,7 +751,7 @@ void __init smp_space_timers(unsigned int max_cpus)
         * systems works better if the two threads' timebase interrupts
         * are staggered by half a jiffy with respect to each other.
         */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                if (i == boot_cpuid)
                        continue;
                if (i == (boot_cpuid ^ 1))
index 91d25fb27f8940973fe15d67ff6f76a3c28acaa2..4a9291d9fef8a2b9511d581b2bd17deb3ab8e3c1 100644 (file)
@@ -239,7 +239,7 @@ void stabs_alloc(void)
        if (cpu_has_feature(CPU_FTR_SLB))
                return;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                unsigned long newstab;
 
                if (cpu == 0)
index ae62f5d5c31b7e85db6987f4c3a97fd542048583..978be1c30c1b54988ccdf9a02f13cdad09932cf2 100644 (file)
@@ -364,7 +364,7 @@ void iic_init_IRQ(void)
                setup_iic_hardcoded();
 
        irq_offset = 0;
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                iic = &per_cpu(iic, cpu);
                if (iic->regs)
                        out_be64(&iic->regs->prio, 0xff);
index 58baeb52f6fcbf831532e704cc5c197f6ac50116..7eed8c624517018cd33298552b29cc709ef8eb4e 100644 (file)
@@ -217,7 +217,7 @@ void __init cell_pervasive_init(void)
        if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
                return;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                p = &cbe_pervasive[cpu];
                ret = cbe_find_pmd_mmio(cpu, p);
                if (ret)
index c60d3ff25a2f40e6b09cfb668460b0fb0b5d56f0..4864cb32be250478de4705809c4403f8ca2eb630 100644 (file)
@@ -541,7 +541,7 @@ nextnode:
                ops = &pSeriesLP_ops;
        else {
 #ifdef CONFIG_SMP
-               for_each_cpu(i) {
+               for_each_possible_cpu(i) {
                        int hard_id;
 
                        /* FIXME: Do this dynamically! --RR */
index 464301cd0d0300800e4889befa923a78e214fead..184a7a4d2fdfe15fb957a55fe828c5945dc2d5a7 100644 (file)
@@ -27,7 +27,7 @@
 #define percpu_modcopy(pcpudst, src, size)                     \
 do {                                                           \
        unsigned int __i;                                       \
-       for_each_cpu(__i)                                       \
+       for_each_possible_cpu(__i)                              \
                memcpy((pcpudst)+__per_cpu_offset(__i),         \
                       (src), (size));                          \
 } while (0)