x86: don't return invalid pointers from node_to_cpumask()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / setup.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <asm/smp.h>
7 #include <asm/percpu.h>
8 #include <asm/sections.h>
9 #include <asm/processor.h>
10 #include <asm/setup.h>
11 #include <asm/topology.h>
12 #include <asm/mpspec.h>
13 #include <asm/apicdef.h>
14
15 #ifdef CONFIG_X86_LOCAL_APIC
16 unsigned int num_processors;
17 unsigned disabled_cpus __cpuinitdata;
18 /* Processor that is doing the boot up */
19 unsigned int boot_cpu_physical_apicid = -1U;
20 EXPORT_SYMBOL(boot_cpu_physical_apicid);
21
22 /* Bitmask of physically existing CPUs */
23 physid_mask_t phys_cpu_present_map;
24 #endif
25
26 /* map cpu index to physical APIC ID */
27 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
28 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
29 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
30 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
31
32 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
33 #define X86_64_NUMA 1
34
35 /* map cpu index to node index */
36 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
37 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
38
39 /* which logical CPUs are on which nodes */
40 cpumask_t *node_to_cpumask_map;
41 EXPORT_SYMBOL(node_to_cpumask_map);
42
43 /* setup node_to_cpumask_map */
44 static void __init setup_node_to_cpumask_map(void);
45
46 #else
47 static inline void setup_node_to_cpumask_map(void) { }
48 #endif
49
50 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
51 /*
52 * Copy data used in early init routines from the initial arrays to the
53 * per cpu data areas. These arrays then become expendable and the
54 * *_early_ptr's are zeroed indicating that the static arrays are gone.
55 */
56 static void __init setup_per_cpu_maps(void)
57 {
58 int cpu;
59
60 for_each_possible_cpu(cpu) {
61 per_cpu(x86_cpu_to_apicid, cpu) =
62 early_per_cpu_map(x86_cpu_to_apicid, cpu);
63 per_cpu(x86_bios_cpu_apicid, cpu) =
64 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
65 #ifdef X86_64_NUMA
66 per_cpu(x86_cpu_to_node_map, cpu) =
67 early_per_cpu_map(x86_cpu_to_node_map, cpu);
68 #endif
69 }
70
71 /* indicate the early static arrays will soon be gone */
72 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
73 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
74 #ifdef X86_64_NUMA
75 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
76 #endif
77 }
78
79 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
80 cpumask_t *cpumask_of_cpu_map __read_mostly;
81 EXPORT_SYMBOL(cpumask_of_cpu_map);
82
83 /* requires nr_cpu_ids to be initialized */
84 static void __init setup_cpumask_of_cpu(void)
85 {
86 int i;
87
88 /* alloc_bootmem zeroes memory */
89 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
90 for (i = 0; i < nr_cpu_ids; i++)
91 cpu_set(i, cpumask_of_cpu_map[i]);
92 }
93 #else
94 static inline void setup_cpumask_of_cpu(void) { }
95 #endif
96
97 #ifdef CONFIG_X86_32
98 /*
99 * Great future not-so-futuristic plan: make i386 and x86_64 do it
100 * the same way
101 */
102 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
103 EXPORT_SYMBOL(__per_cpu_offset);
104 static inline void setup_cpu_pda_map(void) { }
105
106 #elif !defined(CONFIG_SMP)
107 static inline void setup_cpu_pda_map(void) { }
108
109 #else /* CONFIG_SMP && CONFIG_X86_64 */
110
111 /*
112 * Allocate cpu_pda pointer table and array via alloc_bootmem.
113 */
114 static void __init setup_cpu_pda_map(void)
115 {
116 char *pda;
117 struct x8664_pda **new_cpu_pda;
118 unsigned long size;
119 int cpu;
120
121 size = roundup(sizeof(struct x8664_pda), cache_line_size());
122
123 /* allocate cpu_pda array and pointer table */
124 {
125 unsigned long tsize = nr_cpu_ids * sizeof(void *);
126 unsigned long asize = size * (nr_cpu_ids - 1);
127
128 tsize = roundup(tsize, cache_line_size());
129 new_cpu_pda = alloc_bootmem(tsize + asize);
130 pda = (char *)new_cpu_pda + tsize;
131 }
132
133 /* initialize pointer table to static pda's */
134 for_each_possible_cpu(cpu) {
135 if (cpu == 0) {
136 /* leave boot cpu pda in place */
137 new_cpu_pda[0] = cpu_pda(0);
138 continue;
139 }
140 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
141 new_cpu_pda[cpu]->in_bootmem = 1;
142 pda += size;
143 }
144
145 /* point to new pointer table */
146 _cpu_pda = new_cpu_pda;
147 }
148 #endif
149
150 /*
151 * Great future plan:
152 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
153 * Always point %gs to its beginning
154 */
155 void __init setup_per_cpu_areas(void)
156 {
157 ssize_t size = PERCPU_ENOUGH_ROOM;
158 char *ptr;
159 int cpu;
160
161 #ifdef CONFIG_HOTPLUG_CPU
162 prefill_possible_map();
163 #else
164 nr_cpu_ids = num_processors;
165 #endif
166
167 /* Setup cpu_pda map */
168 setup_cpu_pda_map();
169
170 /* Copy section for each CPU (we discard the original) */
171 size = PERCPU_ENOUGH_ROOM;
172 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
173 size);
174
175 for_each_possible_cpu(cpu) {
176 #ifndef CONFIG_NEED_MULTIPLE_NODES
177 ptr = alloc_bootmem_pages(size);
178 #else
179 int node = early_cpu_to_node(cpu);
180 if (!node_online(node) || !NODE_DATA(node)) {
181 ptr = alloc_bootmem_pages(size);
182 printk(KERN_INFO
183 "cpu %d has no node %d or node-local memory\n",
184 cpu, node);
185 }
186 else
187 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
188 #endif
189 per_cpu_offset(cpu) = ptr - __per_cpu_start;
190 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
191
192 }
193
194 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
195 NR_CPUS, nr_cpu_ids, nr_node_ids);
196
197 /* Setup percpu data maps */
198 setup_per_cpu_maps();
199
200 /* Setup node to cpumask map */
201 setup_node_to_cpumask_map();
202
203 /* Setup cpumask_of_cpu map */
204 setup_cpumask_of_cpu();
205 }
206
207 #endif
208
209 #ifdef X86_64_NUMA
210
211 /*
212 * Allocate node_to_cpumask_map based on number of available nodes
213 * Requires node_possible_map to be valid.
214 *
215 * Note: node_to_cpumask() is not valid until after this is done.
216 */
217 static void __init setup_node_to_cpumask_map(void)
218 {
219 unsigned int node, num = 0;
220 cpumask_t *map;
221
222 /* setup nr_node_ids if not done yet */
223 if (nr_node_ids == MAX_NUMNODES) {
224 for_each_node_mask(node, node_possible_map)
225 num = node;
226 nr_node_ids = num + 1;
227 }
228
229 /* allocate the map */
230 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
231
232 Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
233 map, nr_node_ids);
234
235 /* node_to_cpumask() will now work */
236 node_to_cpumask_map = map;
237 }
238
239 void __cpuinit numa_set_node(int cpu, int node)
240 {
241 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
242
243 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
244 cpu_pda(cpu)->nodenumber = node;
245
246 if (cpu_to_node_map)
247 cpu_to_node_map[cpu] = node;
248
249 else if (per_cpu_offset(cpu))
250 per_cpu(x86_cpu_to_node_map, cpu) = node;
251
252 else
253 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
254 }
255
256 void __cpuinit numa_clear_node(int cpu)
257 {
258 numa_set_node(cpu, NUMA_NO_NODE);
259 }
260
261 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
262
263 void __cpuinit numa_add_cpu(int cpu)
264 {
265 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
266 }
267
268 void __cpuinit numa_remove_cpu(int cpu)
269 {
270 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
271 }
272
273 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
274
275 /*
276 * --------- debug versions of the numa functions ---------
277 */
278 static void __cpuinit numa_set_cpumask(int cpu, int enable)
279 {
280 int node = cpu_to_node(cpu);
281 cpumask_t *mask;
282 char buf[64];
283
284 if (node_to_cpumask_map == NULL) {
285 printk(KERN_ERR "node_to_cpumask_map NULL\n");
286 dump_stack();
287 return;
288 }
289
290 mask = &node_to_cpumask_map[node];
291 if (enable)
292 cpu_set(cpu, *mask);
293 else
294 cpu_clear(cpu, *mask);
295
296 cpulist_scnprintf(buf, sizeof(buf), *mask);
297 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
298 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
299 }
300
301 void __cpuinit numa_add_cpu(int cpu)
302 {
303 numa_set_cpumask(cpu, 1);
304 }
305
306 void __cpuinit numa_remove_cpu(int cpu)
307 {
308 numa_set_cpumask(cpu, 0);
309 }
310
311 int cpu_to_node(int cpu)
312 {
313 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
314 printk(KERN_WARNING
315 "cpu_to_node(%d): usage too early!\n", cpu);
316 dump_stack();
317 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
318 }
319 return per_cpu(x86_cpu_to_node_map, cpu);
320 }
321 EXPORT_SYMBOL(cpu_to_node);
322
323 /*
324 * Same function as cpu_to_node() but used if called before the
325 * per_cpu areas are setup.
326 */
327 int early_cpu_to_node(int cpu)
328 {
329 if (early_per_cpu_ptr(x86_cpu_to_node_map))
330 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
331
332 if (!per_cpu_offset(cpu)) {
333 printk(KERN_WARNING
334 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
335 dump_stack();
336 return NUMA_NO_NODE;
337 }
338 return per_cpu(x86_cpu_to_node_map, cpu);
339 }
340
341 /*
342 * Returns a pointer to the bitmask of CPUs on Node 'node'.
343 */
344 cpumask_t *_node_to_cpumask_ptr(int node)
345 {
346 if (node_to_cpumask_map == NULL) {
347 printk(KERN_WARNING
348 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
349 node);
350 dump_stack();
351 return &cpu_online_map;
352 }
353 BUG_ON(node >= nr_node_ids);
354 return &node_to_cpumask_map[node];
355 }
356 EXPORT_SYMBOL(_node_to_cpumask_ptr);
357
358 /*
359 * Returns a bitmask of CPUs on Node 'node'.
360 */
361 cpumask_t node_to_cpumask(int node)
362 {
363 if (node_to_cpumask_map == NULL) {
364 printk(KERN_WARNING
365 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
366 dump_stack();
367 return cpu_online_map;
368 }
369 BUG_ON(node >= nr_node_ids);
370 return node_to_cpumask_map[node];
371 }
372 EXPORT_SYMBOL(node_to_cpumask);
373
374 /*
375 * --------- end of debug versions of the numa functions ---------
376 */
377
378 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
379
380 #endif /* X86_64_NUMA */