powerpc/numa: Use ibm,architecture-vec-5 to detect form 1 affinity
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / mm / numa.c
CommitLineData
1da177e4
LT
1/*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/threads.h>
12#include <linux/bootmem.h>
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/module.h>
17#include <linux/nodemask.h>
18#include <linux/cpu.h>
19#include <linux/notifier.h>
d9b2b2a2 20#include <linux/lmb.h>
6df1646e 21#include <linux/of.h>
06eccea6 22#include <linux/pfn.h>
45fb6cea 23#include <asm/sparsemem.h>
d9b2b2a2 24#include <asm/prom.h>
cf00a8d1 25#include <asm/system.h>
2249ca9d 26#include <asm/smp.h>
1da177e4
LT
27
28static int numa_enabled = 1;
29
1daa6d08
BS
30static char *cmdline __initdata;
31
1da177e4
LT
32static int numa_debug;
33#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
34
45fb6cea 35int numa_cpu_lookup_table[NR_CPUS];
25863de0 36cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
1da177e4 37struct pglist_data *node_data[MAX_NUMNODES];
45fb6cea
AB
38
39EXPORT_SYMBOL(numa_cpu_lookup_table);
25863de0 40EXPORT_SYMBOL(node_to_cpumask_map);
45fb6cea
AB
41EXPORT_SYMBOL(node_data);
42
1da177e4 43static int min_common_depth;
237a0989 44static int n_mem_addr_cells, n_mem_size_cells;
1da177e4 45
25863de0
AB
46/*
47 * Allocate node_to_cpumask_map based on number of available nodes
48 * Requires node_possible_map to be valid.
49 *
50 * Note: node_to_cpumask() is not valid until after this is done.
51 */
52static void __init setup_node_to_cpumask_map(void)
53{
54 unsigned int node, num = 0;
55
56 /* setup nr_node_ids if not done yet */
57 if (nr_node_ids == MAX_NUMNODES) {
58 for_each_node_mask(node, node_possible_map)
59 num = node;
60 nr_node_ids = num + 1;
61 }
62
63 /* allocate the map */
64 for (node = 0; node < nr_node_ids; node++)
65 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
66
67 /* cpumask_of_node() will now work */
68 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
69}
70
1daa6d08
BS
71static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
72 unsigned int *nid)
73{
74 unsigned long long mem;
75 char *p = cmdline;
76 static unsigned int fake_nid;
77 static unsigned long long curr_boundary;
78
79 /*
80 * Modify node id, iff we started creating NUMA nodes
81 * We want to continue from where we left of the last time
82 */
83 if (fake_nid)
84 *nid = fake_nid;
85 /*
86 * In case there are no more arguments to parse, the
87 * node_id should be the same as the last fake node id
88 * (we've handled this above).
89 */
90 if (!p)
91 return 0;
92
93 mem = memparse(p, &p);
94 if (!mem)
95 return 0;
96
97 if (mem < curr_boundary)
98 return 0;
99
100 curr_boundary = mem;
101
102 if ((end_pfn << PAGE_SHIFT) > mem) {
103 /*
104 * Skip commas and spaces
105 */
106 while (*p == ',' || *p == ' ' || *p == '\t')
107 p++;
108
109 cmdline = p;
110 fake_nid++;
111 *nid = fake_nid;
112 dbg("created new fake_node with id %d\n", fake_nid);
113 return 1;
114 }
115 return 0;
116}
117
8f64e1f2
JT
118/*
119 * get_active_region_work_fn - A helper function for get_node_active_region
120 * Returns datax set to the start_pfn and end_pfn if they contain
121 * the initial value of datax->start_pfn between them
122 * @start_pfn: start page(inclusive) of region to check
123 * @end_pfn: end page(exclusive) of region to check
124 * @datax: comes in with ->start_pfn set to value to search for and
125 * goes out with active range if it contains it
126 * Returns 1 if search value is in range else 0
127 */
128static int __init get_active_region_work_fn(unsigned long start_pfn,
129 unsigned long end_pfn, void *datax)
130{
131 struct node_active_region *data;
132 data = (struct node_active_region *)datax;
133
134 if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
135 data->start_pfn = start_pfn;
136 data->end_pfn = end_pfn;
137 return 1;
138 }
139 return 0;
140
141}
142
143/*
144 * get_node_active_region - Return active region containing start_pfn
e8170372 145 * Active range returned is empty if none found.
8f64e1f2
JT
146 * @start_pfn: The page to return the region for.
147 * @node_ar: Returned set to the active region containing start_pfn
148 */
149static void __init get_node_active_region(unsigned long start_pfn,
150 struct node_active_region *node_ar)
151{
152 int nid = early_pfn_to_nid(start_pfn);
153
154 node_ar->nid = nid;
155 node_ar->start_pfn = start_pfn;
e8170372 156 node_ar->end_pfn = start_pfn;
8f64e1f2
JT
157 work_with_active_regions(nid, get_active_region_work_fn, node_ar);
158}
159
2e5ce39d 160static void __cpuinit map_cpu_to_node(int cpu, int node)
1da177e4
LT
161{
162 numa_cpu_lookup_table[cpu] = node;
45fb6cea 163
bf4b85b0
NL
164 dbg("adding cpu %d to node %d\n", cpu, node);
165
25863de0
AB
166 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
167 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
1da177e4
LT
168}
169
170#ifdef CONFIG_HOTPLUG_CPU
171static void unmap_cpu_from_node(unsigned long cpu)
172{
173 int node = numa_cpu_lookup_table[cpu];
174
175 dbg("removing cpu %lu from node %d\n", cpu, node);
176
25863de0
AB
177 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
178 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
1da177e4
LT
179 } else {
180 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
181 cpu, node);
182 }
183}
184#endif /* CONFIG_HOTPLUG_CPU */
185
1da177e4 186/* must hold reference to node during call */
a7f67bdf 187static const int *of_get_associativity(struct device_node *dev)
1da177e4 188{
e2eb6392 189 return of_get_property(dev, "ibm,associativity", NULL);
1da177e4
LT
190}
191
cf00085d
C
192/*
193 * Returns the property linux,drconf-usable-memory if
194 * it exists (the property exists only in kexec/kdump kernels,
195 * added by kexec-tools)
196 */
197static const u32 *of_get_usable_memory(struct device_node *memory)
198{
199 const u32 *prop;
200 u32 len;
201 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
202 if (!prop || len < sizeof(unsigned int))
203 return 0;
204 return prop;
205}
206
482ec7c4
NL
207/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
208 * info is found.
209 */
953039c8 210static int of_node_to_nid_single(struct device_node *device)
1da177e4 211{
482ec7c4 212 int nid = -1;
a7f67bdf 213 const unsigned int *tmp;
1da177e4
LT
214
215 if (min_common_depth == -1)
482ec7c4 216 goto out;
1da177e4
LT
217
218 tmp = of_get_associativity(device);
482ec7c4
NL
219 if (!tmp)
220 goto out;
221
222 if (tmp[0] >= min_common_depth)
cf950b7a 223 nid = tmp[min_common_depth];
bc16a759
NL
224
225 /* POWER4 LPAR uses 0xffff as invalid node */
482ec7c4
NL
226 if (nid == 0xffff || nid >= MAX_NUMNODES)
227 nid = -1;
228out:
cf950b7a 229 return nid;
1da177e4
LT
230}
231
953039c8
JK
232/* Walk the device tree upwards, looking for an associativity id */
233int of_node_to_nid(struct device_node *device)
234{
235 struct device_node *tmp;
236 int nid = -1;
237
238 of_node_get(device);
239 while (device) {
240 nid = of_node_to_nid_single(device);
241 if (nid != -1)
242 break;
243
244 tmp = device;
245 device = of_get_parent(tmp);
246 of_node_put(tmp);
247 }
248 of_node_put(device);
249
250 return nid;
251}
252EXPORT_SYMBOL_GPL(of_node_to_nid);
253
1da177e4
LT
254/*
255 * In theory, the "ibm,associativity" property may contain multiple
256 * associativity lists because a resource may be multiply connected
257 * into the machine. This resource then has different associativity
258 * characteristics relative to its multiple connections. We ignore
259 * this for now. We also assume that all cpu and memory sets have
260 * their distances represented at a common level. This won't be
1b3c3714 261 * true for hierarchical NUMA.
1da177e4
LT
262 *
263 * In any case the ibm,associativity-reference-points should give
264 * the correct depth for a normal NUMA system.
265 *
266 * - Dave Hansen <haveblue@us.ibm.com>
267 */
268static int __init find_min_common_depth(void)
269{
4b83c330 270 int depth, index;
a7f67bdf 271 const unsigned int *ref_points;
1da177e4
LT
272 struct device_node *rtas_root;
273 unsigned int len;
bc8449cc
AB
274 struct device_node *chosen;
275 const char *vec5;
1da177e4
LT
276
277 rtas_root = of_find_node_by_path("/rtas");
278
279 if (!rtas_root)
280 return -1;
281
282 /*
283 * this property is 2 32-bit integers, each representing a level of
284 * depth in the associativity nodes. The first is for an SMP
285 * configuration (should be all 0's) and the second is for a normal
286 * NUMA configuration.
287 */
4b83c330 288 index = 1;
e2eb6392 289 ref_points = of_get_property(rtas_root,
1da177e4
LT
290 "ibm,associativity-reference-points", &len);
291
4b83c330 292 /*
bc8449cc 293 * For form 1 affinity information we want the first field
4b83c330 294 */
bc8449cc
AB
295#define VEC5_AFFINITY_BYTE 5
296#define VEC5_AFFINITY 0x80
297 chosen = of_find_node_by_path("/chosen");
298 if (chosen) {
299 vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
300 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
301 dbg("Using form 1 affinity\n");
302 index = 0;
303 }
4b83c330
AB
304 }
305
20fcefe5 306 if ((len >= 2 * sizeof(unsigned int)) && ref_points) {
4b83c330 307 depth = ref_points[index];
1da177e4 308 } else {
bf4b85b0 309 dbg("NUMA: ibm,associativity-reference-points not found.\n");
1da177e4
LT
310 depth = -1;
311 }
312 of_node_put(rtas_root);
313
314 return depth;
315}
316
84c9fdd1 317static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
1da177e4
LT
318{
319 struct device_node *memory = NULL;
1da177e4
LT
320
321 memory = of_find_node_by_type(memory, "memory");
54c23310 322 if (!memory)
84c9fdd1 323 panic("numa.c: No memory nodes found!");
54c23310 324
a8bda5dd 325 *n_addr_cells = of_n_addr_cells(memory);
9213feea 326 *n_size_cells = of_n_size_cells(memory);
84c9fdd1 327 of_node_put(memory);
1da177e4
LT
328}
329
a7f67bdf 330static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
1da177e4
LT
331{
332 unsigned long result = 0;
333
334 while (n--) {
335 result = (result << 32) | **buf;
336 (*buf)++;
337 }
338 return result;
339}
340
8342681d
NF
341struct of_drconf_cell {
342 u64 base_addr;
343 u32 drc_index;
344 u32 reserved;
345 u32 aa_index;
346 u32 flags;
347};
348
349#define DRCONF_MEM_ASSIGNED 0x00000008
350#define DRCONF_MEM_AI_INVALID 0x00000040
351#define DRCONF_MEM_RESERVED 0x00000080
352
353/*
354 * Read the next lmb list entry from the ibm,dynamic-memory property
355 * and return the information in the provided of_drconf_cell structure.
356 */
357static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
358{
359 const u32 *cp;
360
361 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
362
363 cp = *cellp;
364 drmem->drc_index = cp[0];
365 drmem->reserved = cp[1];
366 drmem->aa_index = cp[2];
367 drmem->flags = cp[3];
368
369 *cellp = cp + 4;
370}
371
372/*
373 * Retreive and validate the ibm,dynamic-memory property of the device tree.
374 *
375 * The layout of the ibm,dynamic-memory property is a number N of lmb
376 * list entries followed by N lmb list entries. Each lmb list entry
377 * contains information as layed out in the of_drconf_cell struct above.
378 */
379static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
380{
381 const u32 *prop;
382 u32 len, entries;
383
384 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
385 if (!prop || len < sizeof(unsigned int))
386 return 0;
387
388 entries = *prop++;
389
390 /* Now that we know the number of entries, revalidate the size
391 * of the property read in to ensure we have everything
392 */
393 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
394 return 0;
395
396 *dm = prop;
397 return entries;
398}
399
400/*
401 * Retreive and validate the ibm,lmb-size property for drconf memory
402 * from the device tree.
403 */
404static u64 of_get_lmb_size(struct device_node *memory)
405{
406 const u32 *prop;
407 u32 len;
408
409 prop = of_get_property(memory, "ibm,lmb-size", &len);
410 if (!prop || len < sizeof(unsigned int))
411 return 0;
412
413 return read_n_cells(n_mem_size_cells, &prop);
414}
415
416struct assoc_arrays {
417 u32 n_arrays;
418 u32 array_sz;
419 const u32 *arrays;
420};
421
422/*
423 * Retreive and validate the list of associativity arrays for drconf
424 * memory from the ibm,associativity-lookup-arrays property of the
425 * device tree..
426 *
427 * The layout of the ibm,associativity-lookup-arrays property is a number N
428 * indicating the number of associativity arrays, followed by a number M
429 * indicating the size of each associativity array, followed by a list
430 * of N associativity arrays.
431 */
432static int of_get_assoc_arrays(struct device_node *memory,
433 struct assoc_arrays *aa)
434{
435 const u32 *prop;
436 u32 len;
437
438 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
439 if (!prop || len < 2 * sizeof(unsigned int))
440 return -1;
441
442 aa->n_arrays = *prop++;
443 aa->array_sz = *prop++;
444
445 /* Now that we know the number of arrrays and size of each array,
446 * revalidate the size of the property read in.
447 */
448 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
449 return -1;
450
451 aa->arrays = prop;
452 return 0;
453}
454
455/*
456 * This is like of_node_to_nid_single() for memory represented in the
457 * ibm,dynamic-reconfiguration-memory node.
458 */
459static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
460 struct assoc_arrays *aa)
461{
462 int default_nid = 0;
463 int nid = default_nid;
464 int index;
465
466 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
467 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
468 drmem->aa_index < aa->n_arrays) {
469 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
470 nid = aa->arrays[index];
471
472 if (nid == 0xffff || nid >= MAX_NUMNODES)
473 nid = default_nid;
474 }
475
476 return nid;
477}
478
1da177e4
LT
479/*
480 * Figure out to which domain a cpu belongs and stick it there.
481 * Return the id of the domain used.
482 */
2e5ce39d 483static int __cpuinit numa_setup_cpu(unsigned long lcpu)
1da177e4 484{
cf950b7a 485 int nid = 0;
8b16cd23 486 struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
1da177e4
LT
487
488 if (!cpu) {
489 WARN_ON(1);
490 goto out;
491 }
492
953039c8 493 nid = of_node_to_nid_single(cpu);
1da177e4 494
482ec7c4 495 if (nid < 0 || !node_online(nid))
72c33688 496 nid = first_online_node;
1da177e4 497out:
cf950b7a 498 map_cpu_to_node(lcpu, nid);
1da177e4
LT
499
500 of_node_put(cpu);
501
cf950b7a 502 return nid;
1da177e4
LT
503}
504
74b85f37 505static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
1da177e4
LT
506 unsigned long action,
507 void *hcpu)
508{
509 unsigned long lcpu = (unsigned long)hcpu;
510 int ret = NOTIFY_DONE;
511
512 switch (action) {
513 case CPU_UP_PREPARE:
8bb78442 514 case CPU_UP_PREPARE_FROZEN:
2b261227 515 numa_setup_cpu(lcpu);
1da177e4
LT
516 ret = NOTIFY_OK;
517 break;
518#ifdef CONFIG_HOTPLUG_CPU
519 case CPU_DEAD:
8bb78442 520 case CPU_DEAD_FROZEN:
1da177e4 521 case CPU_UP_CANCELED:
8bb78442 522 case CPU_UP_CANCELED_FROZEN:
1da177e4
LT
523 unmap_cpu_from_node(lcpu);
524 break;
525 ret = NOTIFY_OK;
526#endif
527 }
528 return ret;
529}
530
531/*
532 * Check and possibly modify a memory region to enforce the memory limit.
533 *
534 * Returns the size the region should have to enforce the memory limit.
535 * This will either be the original value of size, a truncated value,
536 * or zero. If the returned value of size is 0 the region should be
537 * discarded as it lies wholy above the memory limit.
538 */
45fb6cea
AB
539static unsigned long __init numa_enforce_memory_limit(unsigned long start,
540 unsigned long size)
1da177e4
LT
541{
542 /*
543 * We use lmb_end_of_DRAM() in here instead of memory_limit because
544 * we've already adjusted it for the limit and it takes care of
fe55249d
MM
545 * having memory holes below the limit. Also, in the case of
546 * iommu_is_off, memory_limit is not set but is implicitly enforced.
1da177e4 547 */
1da177e4 548
1da177e4
LT
549 if (start + size <= lmb_end_of_DRAM())
550 return size;
551
552 if (start >= lmb_end_of_DRAM())
553 return 0;
554
555 return lmb_end_of_DRAM() - start;
556}
557
cf00085d
C
558/*
559 * Reads the counter for a given entry in
560 * linux,drconf-usable-memory property
561 */
562static inline int __init read_usm_ranges(const u32 **usm)
563{
564 /*
565 * For each lmb in ibm,dynamic-memory a corresponding
566 * entry in linux,drconf-usable-memory property contains
567 * a counter followed by that many (base, size) duple.
568 * read the counter from linux,drconf-usable-memory
569 */
570 return read_n_cells(n_mem_size_cells, usm);
571}
572
0204568a
PM
573/*
574 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
575 * node. This assumes n_mem_{addr,size}_cells have been set.
576 */
577static void __init parse_drconf_memory(struct device_node *memory)
578{
cf00085d
C
579 const u32 *dm, *usm;
580 unsigned int n, rc, ranges, is_kexec_kdump = 0;
581 unsigned long lmb_size, base, size, sz;
8342681d
NF
582 int nid;
583 struct assoc_arrays aa;
584
585 n = of_get_drconf_memory(memory, &dm);
586 if (!n)
0204568a
PM
587 return;
588
8342681d
NF
589 lmb_size = of_get_lmb_size(memory);
590 if (!lmb_size)
591 return;
592
593 rc = of_get_assoc_arrays(memory, &aa);
594 if (rc)
0204568a
PM
595 return;
596
cf00085d
C
597 /* check if this is a kexec/kdump kernel */
598 usm = of_get_usable_memory(memory);
599 if (usm != NULL)
600 is_kexec_kdump = 1;
601
0204568a 602 for (; n != 0; --n) {
8342681d
NF
603 struct of_drconf_cell drmem;
604
605 read_drconf_cell(&drmem, &dm);
606
607 /* skip this block if the reserved bit is set in flags (0x80)
608 or if the block is not assigned to this partition (0x8) */
609 if ((drmem.flags & DRCONF_MEM_RESERVED)
610 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
0204568a 611 continue;
1daa6d08 612
cf00085d
C
613 base = drmem.base_addr;
614 size = lmb_size;
615 ranges = 1;
8342681d 616
cf00085d
C
617 if (is_kexec_kdump) {
618 ranges = read_usm_ranges(&usm);
619 if (!ranges) /* there are no (base, size) duple */
620 continue;
621 }
622 do {
623 if (is_kexec_kdump) {
624 base = read_n_cells(n_mem_addr_cells, &usm);
625 size = read_n_cells(n_mem_size_cells, &usm);
626 }
627 nid = of_drconf_to_nid_single(&drmem, &aa);
628 fake_numa_create_new_node(
629 ((base + size) >> PAGE_SHIFT),
8342681d 630 &nid);
cf00085d
C
631 node_set_online(nid);
632 sz = numa_enforce_memory_limit(base, size);
633 if (sz)
634 add_active_range(nid, base >> PAGE_SHIFT,
635 (base >> PAGE_SHIFT)
636 + (sz >> PAGE_SHIFT));
637 } while (--ranges);
0204568a
PM
638 }
639}
640
1da177e4
LT
641static int __init parse_numa_properties(void)
642{
643 struct device_node *cpu = NULL;
644 struct device_node *memory = NULL;
482ec7c4 645 int default_nid = 0;
1da177e4
LT
646 unsigned long i;
647
648 if (numa_enabled == 0) {
649 printk(KERN_WARNING "NUMA disabled by user\n");
650 return -1;
651 }
652
1da177e4
LT
653 min_common_depth = find_min_common_depth();
654
1da177e4
LT
655 if (min_common_depth < 0)
656 return min_common_depth;
657
bf4b85b0
NL
658 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
659
1da177e4 660 /*
482ec7c4
NL
661 * Even though we connect cpus to numa domains later in SMP
662 * init, we need to know the node ids now. This is because
663 * each node to be onlined must have NODE_DATA etc backing it.
1da177e4 664 */
482ec7c4 665 for_each_present_cpu(i) {
cf950b7a 666 int nid;
1da177e4 667
8b16cd23 668 cpu = of_get_cpu_node(i, NULL);
482ec7c4 669 BUG_ON(!cpu);
953039c8 670 nid = of_node_to_nid_single(cpu);
482ec7c4 671 of_node_put(cpu);
1da177e4 672
482ec7c4
NL
673 /*
674 * Don't fall back to default_nid yet -- we will plug
675 * cpus into nodes once the memory scan has discovered
676 * the topology.
677 */
678 if (nid < 0)
679 continue;
680 node_set_online(nid);
1da177e4
LT
681 }
682
237a0989 683 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
1da177e4
LT
684 memory = NULL;
685 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
686 unsigned long start;
687 unsigned long size;
cf950b7a 688 int nid;
1da177e4 689 int ranges;
a7f67bdf 690 const unsigned int *memcell_buf;
1da177e4
LT
691 unsigned int len;
692
e2eb6392 693 memcell_buf = of_get_property(memory,
ba759485
ME
694 "linux,usable-memory", &len);
695 if (!memcell_buf || len <= 0)
e2eb6392 696 memcell_buf = of_get_property(memory, "reg", &len);
1da177e4
LT
697 if (!memcell_buf || len <= 0)
698 continue;
699
cc5d0189
BH
700 /* ranges in cell */
701 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1da177e4
LT
702new_range:
703 /* these are order-sensitive, and modify the buffer pointer */
237a0989
MK
704 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
705 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1da177e4 706
482ec7c4
NL
707 /*
708 * Assumption: either all memory nodes or none will
709 * have associativity properties. If none, then
710 * everything goes to default_nid.
711 */
953039c8 712 nid = of_node_to_nid_single(memory);
482ec7c4
NL
713 if (nid < 0)
714 nid = default_nid;
1daa6d08
BS
715
716 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
482ec7c4 717 node_set_online(nid);
1da177e4 718
45fb6cea 719 if (!(size = numa_enforce_memory_limit(start, size))) {
1da177e4
LT
720 if (--ranges)
721 goto new_range;
722 else
723 continue;
724 }
725
c67c3cb4
MG
726 add_active_range(nid, start >> PAGE_SHIFT,
727 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
1da177e4
LT
728
729 if (--ranges)
730 goto new_range;
731 }
732
0204568a
PM
733 /*
734 * Now do the same thing for each LMB listed in the ibm,dynamic-memory
735 * property in the ibm,dynamic-reconfiguration-memory node.
736 */
737 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
738 if (memory)
739 parse_drconf_memory(memory);
740
1da177e4
LT
741 return 0;
742}
743
744static void __init setup_nonnuma(void)
745{
746 unsigned long top_of_ram = lmb_end_of_DRAM();
747 unsigned long total_ram = lmb_phys_mem_size();
c67c3cb4 748 unsigned long start_pfn, end_pfn;
1daa6d08 749 unsigned int i, nid = 0;
1da177e4 750
e110b281 751 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1da177e4 752 top_of_ram, total_ram);
e110b281 753 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
1da177e4
LT
754 (top_of_ram - total_ram) >> 20);
755
c67c3cb4
MG
756 for (i = 0; i < lmb.memory.cnt; ++i) {
757 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
758 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
1daa6d08
BS
759
760 fake_numa_create_new_node(end_pfn, &nid);
761 add_active_range(nid, start_pfn, end_pfn);
762 node_set_online(nid);
c67c3cb4 763 }
1da177e4
LT
764}
765
4b703a23
AB
766void __init dump_numa_cpu_topology(void)
767{
768 unsigned int node;
769 unsigned int cpu, count;
770
771 if (min_common_depth == -1 || !numa_enabled)
772 return;
773
774 for_each_online_node(node) {
e110b281 775 printk(KERN_DEBUG "Node %d CPUs:", node);
4b703a23
AB
776
777 count = 0;
778 /*
779 * If we used a CPU iterator here we would miss printing
780 * the holes in the cpumap.
781 */
25863de0
AB
782 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
783 if (cpumask_test_cpu(cpu,
784 node_to_cpumask_map[node])) {
4b703a23
AB
785 if (count == 0)
786 printk(" %u", cpu);
787 ++count;
788 } else {
789 if (count > 1)
790 printk("-%u", cpu - 1);
791 count = 0;
792 }
793 }
794
795 if (count > 1)
25863de0 796 printk("-%u", nr_cpu_ids - 1);
4b703a23
AB
797 printk("\n");
798 }
799}
800
801static void __init dump_numa_memory_topology(void)
1da177e4
LT
802{
803 unsigned int node;
804 unsigned int count;
805
806 if (min_common_depth == -1 || !numa_enabled)
807 return;
808
809 for_each_online_node(node) {
810 unsigned long i;
811
e110b281 812 printk(KERN_DEBUG "Node %d Memory:", node);
1da177e4
LT
813
814 count = 0;
815
45fb6cea
AB
816 for (i = 0; i < lmb_end_of_DRAM();
817 i += (1 << SECTION_SIZE_BITS)) {
818 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
1da177e4
LT
819 if (count == 0)
820 printk(" 0x%lx", i);
821 ++count;
822 } else {
823 if (count > 0)
824 printk("-0x%lx", i);
825 count = 0;
826 }
827 }
828
829 if (count > 0)
830 printk("-0x%lx", i);
831 printk("\n");
832 }
1da177e4
LT
833}
834
835/*
836 * Allocate some memory, satisfying the lmb or bootmem allocator where
837 * required. nid is the preferred node and end is the physical address of
838 * the highest address in the node.
839 *
0be210fd 840 * Returns the virtual address of the memory.
1da177e4 841 */
893473df 842static void __init *careful_zallocation(int nid, unsigned long size,
45fb6cea
AB
843 unsigned long align,
844 unsigned long end_pfn)
1da177e4 845{
0be210fd 846 void *ret;
45fb6cea 847 int new_nid;
0be210fd
DH
848 unsigned long ret_paddr;
849
850 ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
1da177e4
LT
851
852 /* retry over all memory */
0be210fd
DH
853 if (!ret_paddr)
854 ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
1da177e4 855
0be210fd 856 if (!ret_paddr)
5d21ea2b 857 panic("numa.c: cannot allocate %lu bytes for node %d",
1da177e4
LT
858 size, nid);
859
0be210fd
DH
860 ret = __va(ret_paddr);
861
1da177e4 862 /*
c555e520
DH
863 * We initialize the nodes in numeric order: 0, 1, 2...
864 * and hand over control from the LMB allocator to the
865 * bootmem allocator. If this function is called for
866 * node 5, then we know that all nodes <5 are using the
867 * bootmem allocator instead of the LMB allocator.
868 *
869 * So, check the nid from which this allocation came
870 * and double check to see if we need to use bootmem
871 * instead of the LMB. We don't free the LMB memory
872 * since it would be useless.
1da177e4 873 */
0be210fd 874 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
45fb6cea 875 if (new_nid < nid) {
0be210fd 876 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
1da177e4
LT
877 size, align, 0);
878
0be210fd 879 dbg("alloc_bootmem %p %lx\n", ret, size);
1da177e4
LT
880 }
881
893473df 882 memset(ret, 0, size);
0be210fd 883 return ret;
1da177e4
LT
884}
885
74b85f37
CS
886static struct notifier_block __cpuinitdata ppc64_numa_nb = {
887 .notifier_call = cpu_numa_callback,
888 .priority = 1 /* Must run before sched domains notifier. */
889};
890
4a618669
DH
891static void mark_reserved_regions_for_nid(int nid)
892{
893 struct pglist_data *node = NODE_DATA(nid);
894 int i;
895
896 for (i = 0; i < lmb.reserved.cnt; i++) {
897 unsigned long physbase = lmb.reserved.region[i].base;
898 unsigned long size = lmb.reserved.region[i].size;
899 unsigned long start_pfn = physbase >> PAGE_SHIFT;
06eccea6 900 unsigned long end_pfn = PFN_UP(physbase + size);
4a618669
DH
901 struct node_active_region node_ar;
902 unsigned long node_end_pfn = node->node_start_pfn +
903 node->node_spanned_pages;
904
905 /*
906 * Check to make sure that this lmb.reserved area is
907 * within the bounds of the node that we care about.
908 * Checking the nid of the start and end points is not
909 * sufficient because the reserved area could span the
910 * entire node.
911 */
912 if (end_pfn <= node->node_start_pfn ||
913 start_pfn >= node_end_pfn)
914 continue;
915
916 get_node_active_region(start_pfn, &node_ar);
917 while (start_pfn < end_pfn &&
918 node_ar.start_pfn < node_ar.end_pfn) {
919 unsigned long reserve_size = size;
920 /*
921 * if reserved region extends past active region
922 * then trim size to active region
923 */
924 if (end_pfn > node_ar.end_pfn)
925 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
06eccea6 926 - physbase;
a4c74ddd
DH
927 /*
928 * Only worry about *this* node, others may not
929 * yet have valid NODE_DATA().
930 */
931 if (node_ar.nid == nid) {
932 dbg("reserve_bootmem %lx %lx nid=%d\n",
933 physbase, reserve_size, node_ar.nid);
934 reserve_bootmem_node(NODE_DATA(node_ar.nid),
935 physbase, reserve_size,
936 BOOTMEM_DEFAULT);
937 }
4a618669
DH
938 /*
939 * if reserved region is contained in the active region
940 * then done.
941 */
942 if (end_pfn <= node_ar.end_pfn)
943 break;
944
945 /*
946 * reserved region extends past the active region
947 * get next active region that contains this
948 * reserved region
949 */
950 start_pfn = node_ar.end_pfn;
951 physbase = start_pfn << PAGE_SHIFT;
952 size = size - reserve_size;
953 get_node_active_region(start_pfn, &node_ar);
954 }
955 }
956}
957
958
1da177e4
LT
959void __init do_init_bootmem(void)
960{
961 int nid;
1da177e4
LT
962
963 min_low_pfn = 0;
964 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
965 max_pfn = max_low_pfn;
966
967 if (parse_numa_properties())
968 setup_nonnuma();
969 else
4b703a23 970 dump_numa_memory_topology();
1da177e4 971
1da177e4 972 for_each_online_node(nid) {
c67c3cb4 973 unsigned long start_pfn, end_pfn;
0be210fd 974 void *bootmem_vaddr;
1da177e4
LT
975 unsigned long bootmap_pages;
976
c67c3cb4 977 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1da177e4 978
4a618669
DH
979 /*
980 * Allocate the node structure node local if possible
981 *
982 * Be careful moving this around, as it relies on all
983 * previous nodes' bootmem to be initialized and have
984 * all reserved areas marked.
985 */
893473df 986 NODE_DATA(nid) = careful_zallocation(nid,
1da177e4 987 sizeof(struct pglist_data),
45fb6cea 988 SMP_CACHE_BYTES, end_pfn);
1da177e4
LT
989
990 dbg("node %d\n", nid);
991 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
992
b61bfa3c 993 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
45fb6cea
AB
994 NODE_DATA(nid)->node_start_pfn = start_pfn;
995 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1da177e4
LT
996
997 if (NODE_DATA(nid)->node_spanned_pages == 0)
998 continue;
999
45fb6cea
AB
1000 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1001 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1da177e4 1002
45fb6cea 1003 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
893473df 1004 bootmem_vaddr = careful_zallocation(nid,
45fb6cea
AB
1005 bootmap_pages << PAGE_SHIFT,
1006 PAGE_SIZE, end_pfn);
1da177e4 1007
0be210fd 1008 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1da177e4 1009
0be210fd
DH
1010 init_bootmem_node(NODE_DATA(nid),
1011 __pa(bootmem_vaddr) >> PAGE_SHIFT,
45fb6cea 1012 start_pfn, end_pfn);
1da177e4 1013
c67c3cb4 1014 free_bootmem_with_active_regions(nid, end_pfn);
4a618669
DH
1015 /*
1016 * Be very careful about moving this around. Future
893473df 1017 * calls to careful_zallocation() depend on this getting
4a618669
DH
1018 * done correctly.
1019 */
1020 mark_reserved_regions_for_nid(nid);
8f64e1f2 1021 sparse_memory_present_with_active_regions(nid);
4a618669 1022 }
d3f6204a
BH
1023
1024 init_bootmem_done = 1;
25863de0
AB
1025
1026 /*
1027 * Now bootmem is initialised we can create the node to cpumask
1028 * lookup tables and setup the cpu callback to populate them.
1029 */
1030 setup_node_to_cpumask_map();
1031
1032 register_cpu_notifier(&ppc64_numa_nb);
1033 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1034 (void *)(unsigned long)boot_cpuid);
1da177e4
LT
1035}
1036
1037void __init paging_init(void)
1038{
6391af17
MG
1039 unsigned long max_zone_pfns[MAX_NR_ZONES];
1040 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1041 max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
c67c3cb4 1042 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
1043}
1044
1045static int __init early_numa(char *p)
1046{
1047 if (!p)
1048 return 0;
1049
1050 if (strstr(p, "off"))
1051 numa_enabled = 0;
1052
1053 if (strstr(p, "debug"))
1054 numa_debug = 1;
1055
1daa6d08
BS
1056 p = strstr(p, "fake=");
1057 if (p)
1058 cmdline = p + strlen("fake=");
1059
1da177e4
LT
1060 return 0;
1061}
1062early_param("numa", early_numa);
237a0989
MK
1063
1064#ifdef CONFIG_MEMORY_HOTPLUG
0db9360a 1065/*
0f16ef7f
NF
1066 * Find the node associated with a hot added memory section for
1067 * memory represented in the device tree by the property
1068 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
0db9360a
NF
1069 */
1070static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1071 unsigned long scn_addr)
1072{
1073 const u32 *dm;
0f16ef7f 1074 unsigned int drconf_cell_cnt, rc;
0db9360a 1075 unsigned long lmb_size;
0db9360a 1076 struct assoc_arrays aa;
0f16ef7f 1077 int nid = -1;
0db9360a 1078
0f16ef7f
NF
1079 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1080 if (!drconf_cell_cnt)
1081 return -1;
0db9360a
NF
1082
1083 lmb_size = of_get_lmb_size(memory);
1084 if (!lmb_size)
0f16ef7f 1085 return -1;
0db9360a
NF
1086
1087 rc = of_get_assoc_arrays(memory, &aa);
1088 if (rc)
0f16ef7f 1089 return -1;
0db9360a 1090
0f16ef7f 1091 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
0db9360a
NF
1092 struct of_drconf_cell drmem;
1093
1094 read_drconf_cell(&drmem, &dm);
1095
1096 /* skip this block if it is reserved or not assigned to
1097 * this partition */
1098 if ((drmem.flags & DRCONF_MEM_RESERVED)
1099 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1100 continue;
1101
0f16ef7f
NF
1102 if ((scn_addr < drmem.base_addr)
1103 || (scn_addr >= (drmem.base_addr + lmb_size)))
1104 continue;
1105
0db9360a 1106 nid = of_drconf_to_nid_single(&drmem, &aa);
0f16ef7f
NF
1107 break;
1108 }
1109
1110 return nid;
1111}
1112
1113/*
1114 * Find the node associated with a hot added memory section for memory
1115 * represented in the device tree as a node (i.e. memory@XXXX) for
1116 * each lmb.
1117 */
1118int hot_add_node_scn_to_nid(unsigned long scn_addr)
1119{
1120 struct device_node *memory = NULL;
1121 int nid = -1;
1122
1123 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
1124 unsigned long start, size;
1125 int ranges;
1126 const unsigned int *memcell_buf;
1127 unsigned int len;
1128
1129 memcell_buf = of_get_property(memory, "reg", &len);
1130 if (!memcell_buf || len <= 0)
1131 continue;
1132
1133 /* ranges in cell */
1134 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1135
1136 while (ranges--) {
1137 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1138 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1139
1140 if ((scn_addr < start) || (scn_addr >= (start + size)))
1141 continue;
1142
1143 nid = of_node_to_nid_single(memory);
1144 break;
1145 }
0db9360a 1146
0f16ef7f
NF
1147 of_node_put(memory);
1148 if (nid >= 0)
1149 break;
0db9360a
NF
1150 }
1151
0f16ef7f 1152 return nid;
0db9360a
NF
1153}
1154
237a0989
MK
1155/*
1156 * Find the node associated with a hot added memory section. Section
1157 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
1158 * sections are fully contained within a single LMB.
1159 */
1160int hot_add_scn_to_nid(unsigned long scn_addr)
1161{
1162 struct device_node *memory = NULL;
0f16ef7f 1163 int nid, found = 0;
237a0989
MK
1164
1165 if (!numa_enabled || (min_common_depth < 0))
72c33688 1166 return first_online_node;
0db9360a
NF
1167
1168 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1169 if (memory) {
1170 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1171 of_node_put(memory);
0f16ef7f
NF
1172 } else {
1173 nid = hot_add_node_scn_to_nid(scn_addr);
0db9360a 1174 }
237a0989 1175
0f16ef7f 1176 if (nid < 0 || !node_online(nid))
72c33688 1177 nid = first_online_node;
237a0989 1178
0f16ef7f
NF
1179 if (NODE_DATA(nid)->node_spanned_pages)
1180 return nid;
237a0989 1181
0f16ef7f
NF
1182 for_each_online_node(nid) {
1183 if (NODE_DATA(nid)->node_spanned_pages) {
1184 found = 1;
1185 break;
237a0989 1186 }
237a0989 1187 }
0f16ef7f
NF
1188
1189 BUG_ON(!found);
1190 return nid;
237a0989 1191}
0f16ef7f 1192
237a0989 1193#endif /* CONFIG_MEMORY_HOTPLUG */