[IA64] Fix section mismatch in contig.c version of per_cpu_init()
authorTony Luck <tony.luck@intel.com>
Tue, 6 Nov 2007 23:14:45 +0000 (15:14 -0800)
committerTony Luck <tony.luck@intel.com>
Tue, 6 Nov 2007 23:14:45 +0000 (15:14 -0800)
There is a section mismatch when building CONFIG_FLATMEM=y kernels
that also have CONFIG_HOTPLUG_CPU=y

WARNING: vmlinux.o(.text+0x5a902): Section mismatch: reference to \
.init.text:__alloc_bootmem (between 'per_cpu_init' and 'count_pages')

The issue occurs because per_cpu_init() in mm/contig.c is
marked __cpuinit (which is #define'd to nothing on a hot
plug cpu configuration) call __alloc_bootmem() (which is
an __init function).  The usage is actually safe because
the __alloc_bootmem() is inside an "if (first_time)" test
so that the call is only made while it is still legal to
do so.

But the warning is irritating.  Move the allocation to
find_memory().

Signed-off-by: Tony Luck <tony.luck@intel.com>
arch/ia64/mm/contig.c

index d3c538be466c8e82d5e76719230de21fa3121d79..7e9c275ea148579b8b792583f497ae15e21927f3 100644 (file)
@@ -146,6 +146,46 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
        return 0;
 }
 
+#ifdef CONFIG_SMP
+static void *cpu_data;
+/**
+ * per_cpu_init - setup per-cpu variables
+ *
+ * Allocate and setup per-cpu data areas.
+ */
+void * __cpuinit
+per_cpu_init (void)
+{
+       int cpu;
+       static int first_time=1;
+
+       /*
+        * get_free_pages() cannot be used before cpu_init() done.  BSP
+        * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
+        * get_zeroed_page().
+        */
+       if (first_time) {
+               first_time=0;
+               for (cpu = 0; cpu < NR_CPUS; cpu++) {
+                       memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
+                       __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
+                       cpu_data += PERCPU_PAGE_SIZE;
+                       per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+               }
+       }
+       return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
+}
+
+static inline void
+alloc_per_cpu_data(void)
+{
+       cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
+                                  PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+}
+#else
+#define alloc_per_cpu_data() do { } while (0)
+#endif /* CONFIG_SMP */
+
 /**
  * find_memory - setup memory map
  *
@@ -182,41 +222,9 @@ find_memory (void)
 
        find_initrd();
 
+       alloc_per_cpu_data();
 }
 
-#ifdef CONFIG_SMP
-/**
- * per_cpu_init - setup per-cpu variables
- *
- * Allocate and setup per-cpu data areas.
- */
-void * __cpuinit
-per_cpu_init (void)
-{
-       void *cpu_data;
-       int cpu;
-       static int first_time=1;
-
-       /*
-        * get_free_pages() cannot be used before cpu_init() done.  BSP
-        * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
-        * get_zeroed_page().
-        */
-       if (first_time) {
-               first_time=0;
-               cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
-                                          PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
-               for (cpu = 0; cpu < NR_CPUS; cpu++) {
-                       memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
-                       __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
-                       cpu_data += PERCPU_PAGE_SIZE;
-                       per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
-               }
-       }
-       return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-}
-#endif /* CONFIG_SMP */
-
 static int
 count_pages (u64 start, u64 end, void *arg)
 {