x86: clean up non-smp usage of cpu maps
authorMike Travis <travis@sgi.com>
Tue, 25 Mar 2008 22:06:51 +0000 (15:06 -0700)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:41:34 +0000 (17:41 +0200)
Cleanup references to the early cpu maps for the non-SMP configuration
and remove some functions called for SMP configurations only.

Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/setup.c
arch/x86/mm/numa_64.c
include/asm-x86/smp.h
include/asm-x86/topology.h

index 1179aa06cdbfd4bce187c8a3b8c01a1468b15af4..dc7940955b7a0febacf97498ae9ee0bbfed7e03f 100644 (file)
@@ -10,7 +10,7 @@
 #include <asm/setup.h>
 #include <asm/topology.h>
 
-#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP)
 /*
  * Copy data used in early init routines from the initial arrays to the
  * per cpu data areas.  These arrays then become expendable and the
@@ -21,21 +21,12 @@ static void __init setup_per_cpu_maps(void)
        int cpu;
 
        for_each_possible_cpu(cpu) {
-#ifdef CONFIG_SMP
-               if (per_cpu_offset(cpu)) {
-#endif
-                       per_cpu(x86_cpu_to_apicid, cpu) =
-                                               x86_cpu_to_apicid_init[cpu];
-                       per_cpu(x86_bios_cpu_apicid, cpu) =
+               per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu];
+               per_cpu(x86_bios_cpu_apicid, cpu) =
                                                x86_bios_cpu_apicid_init[cpu];
 #ifdef CONFIG_NUMA
-                       per_cpu(x86_cpu_to_node_map, cpu) =
+               per_cpu(x86_cpu_to_node_map, cpu) =
                                                x86_cpu_to_node_map_init[cpu];
-#endif
-#ifdef CONFIG_SMP
-               } else
-                       printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
-                                                                       cpu);
 #endif
        }
 
@@ -72,17 +63,20 @@ void __init setup_per_cpu_areas(void)
 
        /* Copy section for each CPU (we discard the original) */
        size = PERCPU_ENOUGH_ROOM;
-
        printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
                          size);
-       for_each_cpu_mask(i, cpu_possible_map) {
+
+       for_each_possible_cpu(i) {
                char *ptr;
 #ifndef CONFIG_NEED_MULTIPLE_NODES
                ptr = alloc_bootmem_pages(size);
 #else
                int node = early_cpu_to_node(i);
-               if (!node_online(node) || !NODE_DATA(node))
+               if (!node_online(node) || !NODE_DATA(node)) {
                        ptr = alloc_bootmem_pages(size);
+                       printk(KERN_INFO
+                              "cpu %d has no node or node-local memory\n", i);
+               }
                else
                        ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
 #endif
@@ -96,7 +90,7 @@ void __init setup_per_cpu_areas(void)
                memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
        }
 
-       /* setup percpu data maps early */
+       /* Setup percpu data maps */
        setup_per_cpu_maps();
 }
 
index 18267a02e67ae442535e5d19d0e8c100b2b469cc..2ea56f48f29b506c3a180bfd79b3bdb62bf5fd2c 100644 (file)
@@ -31,13 +31,15 @@ bootmem_data_t plat_node_bdata[MAX_NUMNODES];
 
 struct memnode memnode;
 
+#ifdef CONFIG_SMP
 int x86_cpu_to_node_map_init[NR_CPUS] = {
        [0 ... NR_CPUS-1] = NUMA_NO_NODE
 };
 void *x86_cpu_to_node_map_early_ptr;
+EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
+#endif
 DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map);
-EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
 
 s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
        [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
index 654724c58f5bf8dc416e7a644f1b70b85db1980c..d973c11688ceb03d5fe6a1f5a9530111ae562db6 100644 (file)
@@ -29,10 +29,15 @@ extern int smp_num_siblings;
 extern unsigned int num_processors;
 extern cpumask_t cpu_initialized;
 
+#ifdef CONFIG_SMP
 extern u16 x86_cpu_to_apicid_init[];
 extern u16 x86_bios_cpu_apicid_init[];
 extern void *x86_cpu_to_apicid_early_ptr;
 extern void *x86_bios_cpu_apicid_early_ptr;
+#else
+#define x86_cpu_to_apicid_early_ptr NULL
+#define x86_bios_cpu_apicid_early_ptr NULL
+#endif
 
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_t, cpu_core_map);
index 8d1a1f3d21b4ff5b971ca42995065a1155d89968..81a29eb08ac4113f5a5f5d57752fef9b390ae153 100644 (file)
@@ -38,8 +38,13 @@ extern int cpu_to_node_map[];
 #endif
 
 DECLARE_PER_CPU(int, x86_cpu_to_node_map);
+
+#ifdef CONFIG_SMP
 extern int x86_cpu_to_node_map_init[];
 extern void *x86_cpu_to_node_map_early_ptr;
+#else
+#define x86_cpu_to_node_map_early_ptr NULL
+#endif
 
 extern cpumask_t node_to_cpumask_map[];
 
@@ -54,6 +59,8 @@ static inline int cpu_to_node(int cpu)
 }
 
 #else /* CONFIG_X86_64 */
+
+#ifdef CONFIG_SMP
 static inline int early_cpu_to_node(int cpu)
 {
        int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
@@ -65,6 +72,9 @@ static inline int early_cpu_to_node(int cpu)
        else
                return NUMA_NO_NODE;
 }
+#else
+#define        early_cpu_to_node(cpu)  cpu_to_node(cpu)
+#endif
 
 static inline int cpu_to_node(int cpu)
 {
@@ -76,10 +86,7 @@ static inline int cpu_to_node(int cpu)
                return ((int *)x86_cpu_to_node_map_early_ptr)[cpu];
        }
 #endif
-       if (per_cpu_offset(cpu))
-               return per_cpu(x86_cpu_to_node_map, cpu);
-       else
-               return NUMA_NO_NODE;
+       return per_cpu(x86_cpu_to_node_map, cpu);
 }
 #endif /* CONFIG_X86_64 */