1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
7 #include <asm/percpu.h>
8 #include <asm/sections.h>
9 #include <asm/processor.h>
10 #include <asm/setup.h>
11 #include <asm/topology.h>
12 #include <asm/mpspec.h>
13 #include <asm/apicdef.h>
15 #ifdef CONFIG_X86_LOCAL_APIC
16 unsigned int num_processors
;
17 unsigned disabled_cpus __cpuinitdata
;
18 /* Processor that is doing the boot up */
19 unsigned int boot_cpu_physical_apicid
= -1U;
20 EXPORT_SYMBOL(boot_cpu_physical_apicid
);
22 /* Bitmask of physically existing CPUs */
23 physid_mask_t phys_cpu_present_map
;
26 /* map cpu index to physical APIC ID */
27 DEFINE_EARLY_PER_CPU(u16
, x86_cpu_to_apicid
, BAD_APICID
);
28 DEFINE_EARLY_PER_CPU(u16
, x86_bios_cpu_apicid
, BAD_APICID
);
29 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid
);
30 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid
);
32 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
35 /* map cpu index to node index */
36 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map
, NUMA_NO_NODE
);
37 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map
);
39 /* which logical CPUs are on which nodes */
40 cpumask_t
*node_to_cpumask_map
;
41 EXPORT_SYMBOL(node_to_cpumask_map
);
43 /* setup node_to_cpumask_map */
44 static void __init
setup_node_to_cpumask_map(void);
47 static inline void setup_node_to_cpumask_map(void) { }
50 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
52 * Copy data used in early init routines from the initial arrays to the
53 * per cpu data areas. These arrays then become expendable and the
54 * *_early_ptr's are zeroed indicating that the static arrays are gone.
56 static void __init
setup_per_cpu_maps(void)
60 for_each_possible_cpu(cpu
) {
61 per_cpu(x86_cpu_to_apicid
, cpu
) =
62 early_per_cpu_map(x86_cpu_to_apicid
, cpu
);
63 per_cpu(x86_bios_cpu_apicid
, cpu
) =
64 early_per_cpu_map(x86_bios_cpu_apicid
, cpu
);
66 per_cpu(x86_cpu_to_node_map
, cpu
) =
67 early_per_cpu_map(x86_cpu_to_node_map
, cpu
);
71 /* indicate the early static arrays will soon be gone */
72 early_per_cpu_ptr(x86_cpu_to_apicid
) = NULL
;
73 early_per_cpu_ptr(x86_bios_cpu_apicid
) = NULL
;
75 early_per_cpu_ptr(x86_cpu_to_node_map
) = NULL
;
79 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
80 cpumask_t
*cpumask_of_cpu_map __read_mostly
;
81 EXPORT_SYMBOL(cpumask_of_cpu_map
);
83 /* requires nr_cpu_ids to be initialized */
84 static void __init
setup_cpumask_of_cpu(void)
88 /* alloc_bootmem zeroes memory */
89 cpumask_of_cpu_map
= alloc_bootmem_low(sizeof(cpumask_t
) * nr_cpu_ids
);
90 for (i
= 0; i
< nr_cpu_ids
; i
++)
91 cpu_set(i
, cpumask_of_cpu_map
[i
]);
94 static inline void setup_cpumask_of_cpu(void) { }
99 * Great future not-so-futuristic plan: make i386 and x86_64 do it
102 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
103 EXPORT_SYMBOL(__per_cpu_offset
);
104 static inline void setup_cpu_pda_map(void) { }
106 #elif !defined(CONFIG_SMP)
107 static inline void setup_cpu_pda_map(void) { }
109 #else /* CONFIG_SMP && CONFIG_X86_64 */
112 * Allocate cpu_pda pointer table and array via alloc_bootmem.
114 static void __init
setup_cpu_pda_map(void)
117 struct x8664_pda
**new_cpu_pda
;
121 size
= roundup(sizeof(struct x8664_pda
), cache_line_size());
123 /* allocate cpu_pda array and pointer table */
125 unsigned long tsize
= nr_cpu_ids
* sizeof(void *);
126 unsigned long asize
= size
* (nr_cpu_ids
- 1);
128 tsize
= roundup(tsize
, cache_line_size());
129 new_cpu_pda
= alloc_bootmem(tsize
+ asize
);
130 pda
= (char *)new_cpu_pda
+ tsize
;
133 /* initialize pointer table to static pda's */
134 for_each_possible_cpu(cpu
) {
136 /* leave boot cpu pda in place */
137 new_cpu_pda
[0] = cpu_pda(0);
140 new_cpu_pda
[cpu
] = (struct x8664_pda
*)pda
;
141 new_cpu_pda
[cpu
]->in_bootmem
= 1;
145 /* point to new pointer table */
146 _cpu_pda
= new_cpu_pda
;
152 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
153 * Always point %gs to its beginning
155 void __init
setup_per_cpu_areas(void)
157 ssize_t size
= PERCPU_ENOUGH_ROOM
;
161 #ifdef CONFIG_HOTPLUG_CPU
162 prefill_possible_map();
164 nr_cpu_ids
= num_processors
;
167 /* Setup cpu_pda map */
170 /* Copy section for each CPU (we discard the original) */
171 size
= PERCPU_ENOUGH_ROOM
;
172 printk(KERN_INFO
"PERCPU: Allocating %lu bytes of per cpu data\n",
175 for_each_possible_cpu(cpu
) {
176 #ifndef CONFIG_NEED_MULTIPLE_NODES
177 ptr
= alloc_bootmem_pages(size
);
179 int node
= early_cpu_to_node(cpu
);
180 if (!node_online(node
) || !NODE_DATA(node
)) {
181 ptr
= alloc_bootmem_pages(size
);
183 "cpu %d has no node %d or node-local memory\n",
187 ptr
= alloc_bootmem_pages_node(NODE_DATA(node
), size
);
189 per_cpu_offset(cpu
) = ptr
- __per_cpu_start
;
190 memcpy(ptr
, __per_cpu_start
, __per_cpu_end
- __per_cpu_start
);
194 printk(KERN_DEBUG
"NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
195 NR_CPUS
, nr_cpu_ids
, nr_node_ids
);
197 /* Setup percpu data maps */
198 setup_per_cpu_maps();
200 /* Setup node to cpumask map */
201 setup_node_to_cpumask_map();
203 /* Setup cpumask_of_cpu map */
204 setup_cpumask_of_cpu();
212 * Allocate node_to_cpumask_map based on number of available nodes
213 * Requires node_possible_map to be valid.
215 * Note: node_to_cpumask() is not valid until after this is done.
217 static void __init
setup_node_to_cpumask_map(void)
219 unsigned int node
, num
= 0;
222 /* setup nr_node_ids if not done yet */
223 if (nr_node_ids
== MAX_NUMNODES
) {
224 for_each_node_mask(node
, node_possible_map
)
226 nr_node_ids
= num
+ 1;
229 /* allocate the map */
230 map
= alloc_bootmem_low(nr_node_ids
* sizeof(cpumask_t
));
232 Dprintk(KERN_DEBUG
"Node to cpumask map at %p for %d nodes\n",
235 /* node_to_cpumask() will now work */
236 node_to_cpumask_map
= map
;
239 void __cpuinit
numa_set_node(int cpu
, int node
)
241 int *cpu_to_node_map
= early_per_cpu_ptr(x86_cpu_to_node_map
);
243 if (cpu_pda(cpu
) && node
!= NUMA_NO_NODE
)
244 cpu_pda(cpu
)->nodenumber
= node
;
247 cpu_to_node_map
[cpu
] = node
;
249 else if (per_cpu_offset(cpu
))
250 per_cpu(x86_cpu_to_node_map
, cpu
) = node
;
253 Dprintk(KERN_INFO
"Setting node for non-present cpu %d\n", cpu
);
256 void __cpuinit
numa_clear_node(int cpu
)
258 numa_set_node(cpu
, NUMA_NO_NODE
);
261 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
263 void __cpuinit
numa_add_cpu(int cpu
)
265 cpu_set(cpu
, node_to_cpumask_map
[early_cpu_to_node(cpu
)]);
268 void __cpuinit
numa_remove_cpu(int cpu
)
270 cpu_clear(cpu
, node_to_cpumask_map
[cpu_to_node(cpu
)]);
273 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
276 * --------- debug versions of the numa functions ---------
278 static void __cpuinit
numa_set_cpumask(int cpu
, int enable
)
280 int node
= cpu_to_node(cpu
);
284 if (node_to_cpumask_map
== NULL
) {
285 printk(KERN_ERR
"node_to_cpumask_map NULL\n");
290 mask
= &node_to_cpumask_map
[node
];
294 cpu_clear(cpu
, *mask
);
296 cpulist_scnprintf(buf
, sizeof(buf
), *mask
);
297 printk(KERN_DEBUG
"%s cpu %d node %d: mask now %s\n",
298 enable
? "numa_add_cpu":"numa_remove_cpu", cpu
, node
, buf
);
301 void __cpuinit
numa_add_cpu(int cpu
)
303 numa_set_cpumask(cpu
, 1);
306 void __cpuinit
numa_remove_cpu(int cpu
)
308 numa_set_cpumask(cpu
, 0);
311 int cpu_to_node(int cpu
)
313 if (early_per_cpu_ptr(x86_cpu_to_node_map
)) {
315 "cpu_to_node(%d): usage too early!\n", cpu
);
317 return early_per_cpu_ptr(x86_cpu_to_node_map
)[cpu
];
319 return per_cpu(x86_cpu_to_node_map
, cpu
);
321 EXPORT_SYMBOL(cpu_to_node
);
324 * Same function as cpu_to_node() but used if called before the
325 * per_cpu areas are setup.
327 int early_cpu_to_node(int cpu
)
329 if (early_per_cpu_ptr(x86_cpu_to_node_map
))
330 return early_per_cpu_ptr(x86_cpu_to_node_map
)[cpu
];
332 if (!per_cpu_offset(cpu
)) {
334 "early_cpu_to_node(%d): no per_cpu area!\n", cpu
);
338 return per_cpu(x86_cpu_to_node_map
, cpu
);
342 * Returns a pointer to the bitmask of CPUs on Node 'node'.
344 cpumask_t
*_node_to_cpumask_ptr(int node
)
346 if (node_to_cpumask_map
== NULL
) {
348 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
351 return &cpu_online_map
;
353 BUG_ON(node
>= nr_node_ids
);
354 return &node_to_cpumask_map
[node
];
356 EXPORT_SYMBOL(_node_to_cpumask_ptr
);
359 * Returns a bitmask of CPUs on Node 'node'.
361 cpumask_t
node_to_cpumask(int node
)
363 if (node_to_cpumask_map
== NULL
) {
365 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node
);
367 return cpu_online_map
;
369 BUG_ON(node
>= nr_node_ids
);
370 return node_to_cpumask_map
[node
];
372 EXPORT_SYMBOL(node_to_cpumask
);
375 * --------- end of debug versions of the numa functions ---------
378 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
380 #endif /* X86_64_NUMA */