return allocation;
}
-void __init remap_numa_kva(void)
+static void __init remap_numa_kva(void)
{
void *vaddr;
unsigned long pfn;
allocate_pgdat(nid);
}
+ remap_numa_kva();
+
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
for_each_online_node(nid)
propagate_e820_map_node(nid);
- memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
+ for_each_online_node(nid)
+ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+
NODE_DATA(0)->bdata = &node0_bdata;
setup_bootmem_allocator();
}
paravirt_pagetable_setup_start(pgd_base);
- remap_numa_kva();
/*
* Fixed mappings, only the page table structure has to be
* created - mappings will be set by set_fixmap():
after_init_bootmem = 1;
}
-/*
- * The node 0 pgdat is initialized before all of these because
- * it's needed for bootmem. node>0 pgdats have their virtual
- * space allocated before the pagetables are in place to access
- * them, so they can't be cleared then.
- *
- * This should all compile down to nothing when NUMA is off.
- */
-static void __init remapped_pgdat_init(void)
-{
- int nid;
-
- for_each_online_node(nid) {
- if (nid != 0)
- memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
- }
-}
-
static void __init find_early_table_space(unsigned long end)
{
unsigned long puds, pmds, tables, start;
/*
* NOTE: at this point the bootmem allocator is fully available.
*/
- remapped_pgdat_init();
sparse_init();
zone_sizes_init();
extern void numa_remove_cpu(int cpu);
#ifdef CONFIG_NUMA
-extern void __init remap_numa_kva(void);
extern void set_highmem_pages_init(void);
-#else
-static inline void remap_numa_kva(void)
-{
-}
#endif
#endif /* _ASM_X86_32_NUMA_H */