[PATCH] ppc64: prep for NUMA sparsemem rework 2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / mm / numa.c
CommitLineData
1da177e4
LT
1/*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/threads.h>
12#include <linux/bootmem.h>
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/module.h>
17#include <linux/nodemask.h>
18#include <linux/cpu.h>
19#include <linux/notifier.h>
20#include <asm/lmb.h>
21#include <asm/machdep.h>
22#include <asm/abs_addr.h>
cf00a8d1 23#include <asm/system.h>
2249ca9d 24#include <asm/smp.h>
1da177e4
LT
25
26static int numa_enabled = 1;
27
28static int numa_debug;
29#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
30
31#ifdef DEBUG_NUMA
32#define ARRAY_INITIALISER -1
33#else
34#define ARRAY_INITIALISER 0
35#endif
36
37int numa_cpu_lookup_table[NR_CPUS] = { [ 0 ... (NR_CPUS - 1)] =
38 ARRAY_INITIALISER};
39char *numa_memory_lookup_table;
40cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
1da177e4
LT
41
42struct pglist_data *node_data[MAX_NUMNODES];
43bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
44static int min_common_depth;
45
46/*
47 * We need somewhere to store start/span for each node until we have
48 * allocated the real node_data structures.
49 */
50static struct {
51 unsigned long node_start_pfn;
52 unsigned long node_end_pfn;
53 unsigned long node_present_pages;
54} init_node_data[MAX_NUMNODES] __initdata;
55
56EXPORT_SYMBOL(node_data);
57EXPORT_SYMBOL(numa_cpu_lookup_table);
58EXPORT_SYMBOL(numa_memory_lookup_table);
59EXPORT_SYMBOL(numa_cpumask_lookup_table);
1da177e4
LT
60
61static inline void map_cpu_to_node(int cpu, int node)
62{
63 numa_cpu_lookup_table[cpu] = node;
64 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) {
65 cpu_set(cpu, numa_cpumask_lookup_table[node]);
1da177e4
LT
66 }
67}
68
69#ifdef CONFIG_HOTPLUG_CPU
70static void unmap_cpu_from_node(unsigned long cpu)
71{
72 int node = numa_cpu_lookup_table[cpu];
73
74 dbg("removing cpu %lu from node %d\n", cpu, node);
75
76 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
77 cpu_clear(cpu, numa_cpumask_lookup_table[node]);
1da177e4
LT
78 } else {
79 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
80 cpu, node);
81 }
82}
83#endif /* CONFIG_HOTPLUG_CPU */
84
85static struct device_node * __devinit find_cpu_node(unsigned int cpu)
86{
87 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
88 struct device_node *cpu_node = NULL;
89 unsigned int *interrupt_server, *reg;
90 int len;
91
92 while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) {
93 /* Try interrupt server first */
94 interrupt_server = (unsigned int *)get_property(cpu_node,
95 "ibm,ppc-interrupt-server#s", &len);
96
97 len = len / sizeof(u32);
98
99 if (interrupt_server && (len > 0)) {
100 while (len--) {
101 if (interrupt_server[len] == hw_cpuid)
102 return cpu_node;
103 }
104 } else {
105 reg = (unsigned int *)get_property(cpu_node,
106 "reg", &len);
107 if (reg && (len > 0) && (reg[0] == hw_cpuid))
108 return cpu_node;
109 }
110 }
111
112 return NULL;
113}
114
115/* must hold reference to node during call */
116static int *of_get_associativity(struct device_node *dev)
117{
118 return (unsigned int *)get_property(dev, "ibm,associativity", NULL);
119}
120
121static int of_node_numa_domain(struct device_node *device)
122{
123 int numa_domain;
124 unsigned int *tmp;
125
126 if (min_common_depth == -1)
127 return 0;
128
129 tmp = of_get_associativity(device);
130 if (tmp && (tmp[0] >= min_common_depth)) {
131 numa_domain = tmp[min_common_depth];
132 } else {
133 dbg("WARNING: no NUMA information for %s\n",
134 device->full_name);
135 numa_domain = 0;
136 }
137 return numa_domain;
138}
139
140/*
141 * In theory, the "ibm,associativity" property may contain multiple
142 * associativity lists because a resource may be multiply connected
143 * into the machine. This resource then has different associativity
144 * characteristics relative to its multiple connections. We ignore
145 * this for now. We also assume that all cpu and memory sets have
146 * their distances represented at a common level. This won't be
147 * true for heirarchical NUMA.
148 *
149 * In any case the ibm,associativity-reference-points should give
150 * the correct depth for a normal NUMA system.
151 *
152 * - Dave Hansen <haveblue@us.ibm.com>
153 */
154static int __init find_min_common_depth(void)
155{
156 int depth;
157 unsigned int *ref_points;
158 struct device_node *rtas_root;
159 unsigned int len;
160
161 rtas_root = of_find_node_by_path("/rtas");
162
163 if (!rtas_root)
164 return -1;
165
166 /*
167 * this property is 2 32-bit integers, each representing a level of
168 * depth in the associativity nodes. The first is for an SMP
169 * configuration (should be all 0's) and the second is for a normal
170 * NUMA configuration.
171 */
172 ref_points = (unsigned int *)get_property(rtas_root,
173 "ibm,associativity-reference-points", &len);
174
175 if ((len >= 1) && ref_points) {
176 depth = ref_points[1];
177 } else {
178 dbg("WARNING: could not find NUMA "
179 "associativity reference point\n");
180 depth = -1;
181 }
182 of_node_put(rtas_root);
183
184 return depth;
185}
186
187static int __init get_mem_addr_cells(void)
188{
189 struct device_node *memory = NULL;
190 int rc;
191
192 memory = of_find_node_by_type(memory, "memory");
193 if (!memory)
194 return 0; /* it won't matter */
195
196 rc = prom_n_addr_cells(memory);
197 return rc;
198}
199
200static int __init get_mem_size_cells(void)
201{
202 struct device_node *memory = NULL;
203 int rc;
204
205 memory = of_find_node_by_type(memory, "memory");
206 if (!memory)
207 return 0; /* it won't matter */
208 rc = prom_n_size_cells(memory);
209 return rc;
210}
211
212static unsigned long read_n_cells(int n, unsigned int **buf)
213{
214 unsigned long result = 0;
215
216 while (n--) {
217 result = (result << 32) | **buf;
218 (*buf)++;
219 }
220 return result;
221}
222
223/*
224 * Figure out to which domain a cpu belongs and stick it there.
225 * Return the id of the domain used.
226 */
227static int numa_setup_cpu(unsigned long lcpu)
228{
229 int numa_domain = 0;
230 struct device_node *cpu = find_cpu_node(lcpu);
231
232 if (!cpu) {
233 WARN_ON(1);
234 goto out;
235 }
236
237 numa_domain = of_node_numa_domain(cpu);
238
239 if (numa_domain >= num_online_nodes()) {
240 /*
241 * POWER4 LPAR uses 0xffff as invalid node,
242 * dont warn in this case.
243 */
244 if (numa_domain != 0xffff)
245 printk(KERN_ERR "WARNING: cpu %ld "
246 "maps to invalid NUMA node %d\n",
247 lcpu, numa_domain);
248 numa_domain = 0;
249 }
250out:
251 node_set_online(numa_domain);
252
253 map_cpu_to_node(lcpu, numa_domain);
254
255 of_node_put(cpu);
256
257 return numa_domain;
258}
259
260static int cpu_numa_callback(struct notifier_block *nfb,
261 unsigned long action,
262 void *hcpu)
263{
264 unsigned long lcpu = (unsigned long)hcpu;
265 int ret = NOTIFY_DONE;
266
267 switch (action) {
268 case CPU_UP_PREPARE:
269 if (min_common_depth == -1 || !numa_enabled)
270 map_cpu_to_node(lcpu, 0);
271 else
272 numa_setup_cpu(lcpu);
273 ret = NOTIFY_OK;
274 break;
275#ifdef CONFIG_HOTPLUG_CPU
276 case CPU_DEAD:
277 case CPU_UP_CANCELED:
278 unmap_cpu_from_node(lcpu);
279 break;
280 ret = NOTIFY_OK;
281#endif
282 }
283 return ret;
284}
285
286/*
287 * Check and possibly modify a memory region to enforce the memory limit.
288 *
289 * Returns the size the region should have to enforce the memory limit.
290 * This will either be the original value of size, a truncated value,
291 * or zero. If the returned value of size is 0 the region should be
292 * discarded as it lies wholy above the memory limit.
293 */
294static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsigned long size)
295{
296 /*
297 * We use lmb_end_of_DRAM() in here instead of memory_limit because
298 * we've already adjusted it for the limit and it takes care of
299 * having memory holes below the limit.
300 */
1da177e4
LT
301
302 if (! memory_limit)
303 return size;
304
305 if (start + size <= lmb_end_of_DRAM())
306 return size;
307
308 if (start >= lmb_end_of_DRAM())
309 return 0;
310
311 return lmb_end_of_DRAM() - start;
312}
313
314static int __init parse_numa_properties(void)
315{
316 struct device_node *cpu = NULL;
317 struct device_node *memory = NULL;
318 int addr_cells, size_cells;
319 int max_domain = 0;
320 long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT;
321 unsigned long i;
322
323 if (numa_enabled == 0) {
324 printk(KERN_WARNING "NUMA disabled by user\n");
325 return -1;
326 }
327
328 numa_memory_lookup_table =
329 (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));
330 memset(numa_memory_lookup_table, 0, entries * sizeof(char));
331
332 for (i = 0; i < entries ; i++)
333 numa_memory_lookup_table[i] = ARRAY_INITIALISER;
334
335 min_common_depth = find_min_common_depth();
336
337 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
338 if (min_common_depth < 0)
339 return min_common_depth;
340
341 max_domain = numa_setup_cpu(boot_cpuid);
342
343 /*
344 * Even though we connect cpus to numa domains later in SMP init,
345 * we need to know the maximum node id now. This is because each
346 * node id must have NODE_DATA etc backing it.
347 * As a result of hotplug we could still have cpus appear later on
348 * with larger node ids. In that case we force the cpu into node 0.
349 */
350 for_each_cpu(i) {
351 int numa_domain;
352
353 cpu = find_cpu_node(i);
354
355 if (cpu) {
356 numa_domain = of_node_numa_domain(cpu);
357 of_node_put(cpu);
358
359 if (numa_domain < MAX_NUMNODES &&
360 max_domain < numa_domain)
361 max_domain = numa_domain;
362 }
363 }
364
365 addr_cells = get_mem_addr_cells();
366 size_cells = get_mem_size_cells();
367 memory = NULL;
368 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
369 unsigned long start;
370 unsigned long size;
371 int numa_domain;
372 int ranges;
373 unsigned int *memcell_buf;
374 unsigned int len;
375
376 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
377 if (!memcell_buf || len <= 0)
378 continue;
379
380 ranges = memory->n_addrs;
381new_range:
382 /* these are order-sensitive, and modify the buffer pointer */
383 start = read_n_cells(addr_cells, &memcell_buf);
384 size = read_n_cells(size_cells, &memcell_buf);
385
386 start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
387 size = _ALIGN_UP(size, MEMORY_INCREMENT);
388
389 numa_domain = of_node_numa_domain(memory);
390
391 if (numa_domain >= MAX_NUMNODES) {
392 if (numa_domain != 0xffff)
393 printk(KERN_ERR "WARNING: memory at %lx maps "
394 "to invalid NUMA node %d\n", start,
395 numa_domain);
396 numa_domain = 0;
397 }
398
399 if (max_domain < numa_domain)
400 max_domain = numa_domain;
401
402 if (! (size = numa_enforce_memory_limit(start, size))) {
403 if (--ranges)
404 goto new_range;
405 else
406 continue;
407 }
408
409 /*
410 * Initialize new node struct, or add to an existing one.
411 */
412 if (init_node_data[numa_domain].node_end_pfn) {
413 if ((start / PAGE_SIZE) <
414 init_node_data[numa_domain].node_start_pfn)
415 init_node_data[numa_domain].node_start_pfn =
416 start / PAGE_SIZE;
417 if (((start / PAGE_SIZE) + (size / PAGE_SIZE)) >
418 init_node_data[numa_domain].node_end_pfn)
419 init_node_data[numa_domain].node_end_pfn =
420 (start / PAGE_SIZE) +
421 (size / PAGE_SIZE);
422
423 init_node_data[numa_domain].node_present_pages +=
424 size / PAGE_SIZE;
425 } else {
426 node_set_online(numa_domain);
427
428 init_node_data[numa_domain].node_start_pfn =
429 start / PAGE_SIZE;
430 init_node_data[numa_domain].node_end_pfn =
431 init_node_data[numa_domain].node_start_pfn +
432 size / PAGE_SIZE;
433 init_node_data[numa_domain].node_present_pages =
434 size / PAGE_SIZE;
435 }
436
437 for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
438 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
439 numa_domain;
440
441 if (--ranges)
442 goto new_range;
443 }
444
445 for (i = 0; i <= max_domain; i++)
446 node_set_online(i);
447
448 return 0;
449}
450
451static void __init setup_nonnuma(void)
452{
453 unsigned long top_of_ram = lmb_end_of_DRAM();
454 unsigned long total_ram = lmb_phys_mem_size();
455 unsigned long i;
456
457 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
458 top_of_ram, total_ram);
459 printk(KERN_INFO "Memory hole size: %ldMB\n",
460 (top_of_ram - total_ram) >> 20);
461
462 if (!numa_memory_lookup_table) {
463 long entries = top_of_ram >> MEMORY_INCREMENT_SHIFT;
464 numa_memory_lookup_table =
465 (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));
466 memset(numa_memory_lookup_table, 0, entries * sizeof(char));
467 for (i = 0; i < entries ; i++)
468 numa_memory_lookup_table[i] = ARRAY_INITIALISER;
469 }
470
471 map_cpu_to_node(boot_cpuid, 0);
472
473 node_set_online(0);
474
475 init_node_data[0].node_start_pfn = 0;
476 init_node_data[0].node_end_pfn = lmb_end_of_DRAM() / PAGE_SIZE;
477 init_node_data[0].node_present_pages = total_ram / PAGE_SIZE;
478
479 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
480 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
481}
482
483static void __init dump_numa_topology(void)
484{
485 unsigned int node;
486 unsigned int count;
487
488 if (min_common_depth == -1 || !numa_enabled)
489 return;
490
491 for_each_online_node(node) {
492 unsigned long i;
493
494 printk(KERN_INFO "Node %d Memory:", node);
495
496 count = 0;
497
498 for (i = 0; i < lmb_end_of_DRAM(); i += MEMORY_INCREMENT) {
499 if (numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] == node) {
500 if (count == 0)
501 printk(" 0x%lx", i);
502 ++count;
503 } else {
504 if (count > 0)
505 printk("-0x%lx", i);
506 count = 0;
507 }
508 }
509
510 if (count > 0)
511 printk("-0x%lx", i);
512 printk("\n");
513 }
514 return;
515}
516
517/*
518 * Allocate some memory, satisfying the lmb or bootmem allocator where
519 * required. nid is the preferred node and end is the physical address of
520 * the highest address in the node.
521 *
522 * Returns the physical address of the memory.
523 */
524static unsigned long careful_allocation(int nid, unsigned long size,
525 unsigned long align, unsigned long end)
526{
527 unsigned long ret = lmb_alloc_base(size, align, end);
528
529 /* retry over all memory */
530 if (!ret)
531 ret = lmb_alloc_base(size, align, lmb_end_of_DRAM());
532
533 if (!ret)
534 panic("numa.c: cannot allocate %lu bytes on node %d",
535 size, nid);
536
537 /*
538 * If the memory came from a previously allocated node, we must
539 * retry with the bootmem allocator.
540 */
541 if (pa_to_nid(ret) < nid) {
542 nid = pa_to_nid(ret);
543 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(nid),
544 size, align, 0);
545
546 if (!ret)
547 panic("numa.c: cannot allocate %lu bytes on node %d",
548 size, nid);
549
550 ret = virt_to_abs(ret);
551
552 dbg("alloc_bootmem %lx %lx\n", ret, size);
553 }
554
555 return ret;
556}
557
558void __init do_init_bootmem(void)
559{
560 int nid;
561 int addr_cells, size_cells;
562 struct device_node *memory = NULL;
563 static struct notifier_block ppc64_numa_nb = {
564 .notifier_call = cpu_numa_callback,
565 .priority = 1 /* Must run before sched domains notifier. */
566 };
567
568 min_low_pfn = 0;
569 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
570 max_pfn = max_low_pfn;
571
572 if (parse_numa_properties())
573 setup_nonnuma();
574 else
575 dump_numa_topology();
576
577 register_cpu_notifier(&ppc64_numa_nb);
578
579 for_each_online_node(nid) {
580 unsigned long start_paddr, end_paddr;
581 int i;
582 unsigned long bootmem_paddr;
583 unsigned long bootmap_pages;
584
585 start_paddr = init_node_data[nid].node_start_pfn * PAGE_SIZE;
586 end_paddr = init_node_data[nid].node_end_pfn * PAGE_SIZE;
587
588 /* Allocate the node structure node local if possible */
589 NODE_DATA(nid) = (struct pglist_data *)careful_allocation(nid,
590 sizeof(struct pglist_data),
591 SMP_CACHE_BYTES, end_paddr);
592 NODE_DATA(nid) = abs_to_virt(NODE_DATA(nid));
593 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
594
595 dbg("node %d\n", nid);
596 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
597
598 NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
599 NODE_DATA(nid)->node_start_pfn =
600 init_node_data[nid].node_start_pfn;
601 NODE_DATA(nid)->node_spanned_pages =
602 end_paddr - start_paddr;
603
604 if (NODE_DATA(nid)->node_spanned_pages == 0)
605 continue;
606
607 dbg("start_paddr = %lx\n", start_paddr);
608 dbg("end_paddr = %lx\n", end_paddr);
609
610 bootmap_pages = bootmem_bootmap_pages((end_paddr - start_paddr) >> PAGE_SHIFT);
611
612 bootmem_paddr = careful_allocation(nid,
613 bootmap_pages << PAGE_SHIFT,
614 PAGE_SIZE, end_paddr);
615 memset(abs_to_virt(bootmem_paddr), 0,
616 bootmap_pages << PAGE_SHIFT);
617 dbg("bootmap_paddr = %lx\n", bootmem_paddr);
618
619 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
620 start_paddr >> PAGE_SHIFT,
621 end_paddr >> PAGE_SHIFT);
622
623 /*
624 * We need to do another scan of all memory sections to
625 * associate memory with the correct node.
626 */
627 addr_cells = get_mem_addr_cells();
628 size_cells = get_mem_size_cells();
629 memory = NULL;
630 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
631 unsigned long mem_start, mem_size;
632 int numa_domain, ranges;
633 unsigned int *memcell_buf;
634 unsigned int len;
635
636 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
637 if (!memcell_buf || len <= 0)
638 continue;
639
640 ranges = memory->n_addrs; /* ranges in cell */
641new_range:
642 mem_start = read_n_cells(addr_cells, &memcell_buf);
643 mem_size = read_n_cells(size_cells, &memcell_buf);
96cd5b08
MK
644 if (numa_enabled) {
645 numa_domain = of_node_numa_domain(memory);
646 if (numa_domain >= MAX_NUMNODES)
647 numa_domain = 0;
648 } else
649 numa_domain = 0;
1da177e4
LT
650
651 if (numa_domain != nid)
652 continue;
653
654 mem_size = numa_enforce_memory_limit(mem_start, mem_size);
655 if (mem_size) {
656 dbg("free_bootmem %lx %lx\n", mem_start, mem_size);
657 free_bootmem_node(NODE_DATA(nid), mem_start, mem_size);
658 }
659
660 if (--ranges) /* process all ranges in cell */
661 goto new_range;
662 }
663
664 /*
665 * Mark reserved regions on this node
666 */
667 for (i = 0; i < lmb.reserved.cnt; i++) {
180379dc 668 unsigned long physbase = lmb.reserved.region[i].base;
1da177e4
LT
669 unsigned long size = lmb.reserved.region[i].size;
670
671 if (pa_to_nid(physbase) != nid &&
672 pa_to_nid(physbase+size-1) != nid)
673 continue;
674
675 if (physbase < end_paddr &&
676 (physbase+size) > start_paddr) {
677 /* overlaps */
678 if (physbase < start_paddr) {
679 size -= start_paddr - physbase;
680 physbase = start_paddr;
681 }
682
683 if (size > end_paddr - physbase)
684 size = end_paddr - physbase;
685
686 dbg("reserve_bootmem %lx %lx\n", physbase,
687 size);
688 reserve_bootmem_node(NODE_DATA(nid), physbase,
689 size);
690 }
691 }
802f192e
BP
692 /*
693 * This loop may look famaliar, but we have to do it again
694 * after marking our reserved memory to mark memory present
695 * for sparsemem.
696 */
697 addr_cells = get_mem_addr_cells();
698 size_cells = get_mem_size_cells();
699 memory = NULL;
700 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
701 unsigned long mem_start, mem_size;
702 int numa_domain, ranges;
703 unsigned int *memcell_buf;
704 unsigned int len;
705
706 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
707 if (!memcell_buf || len <= 0)
708 continue;
709
710 ranges = memory->n_addrs; /* ranges in cell */
711new_range2:
712 mem_start = read_n_cells(addr_cells, &memcell_buf);
713 mem_size = read_n_cells(size_cells, &memcell_buf);
714 if (numa_enabled) {
715 numa_domain = of_node_numa_domain(memory);
716 if (numa_domain >= MAX_NUMNODES)
717 numa_domain = 0;
718 } else
719 numa_domain = 0;
720
721 if (numa_domain != nid)
722 continue;
723
724 mem_size = numa_enforce_memory_limit(mem_start, mem_size);
725 memory_present(numa_domain, mem_start >> PAGE_SHIFT,
726 (mem_start + mem_size) >> PAGE_SHIFT);
727
728 if (--ranges) /* process all ranges in cell */
729 goto new_range2;
730 }
731
1da177e4
LT
732 }
733}
734
735void __init paging_init(void)
736{
737 unsigned long zones_size[MAX_NR_ZONES];
738 unsigned long zholes_size[MAX_NR_ZONES];
739 int nid;
740
741 memset(zones_size, 0, sizeof(zones_size));
742 memset(zholes_size, 0, sizeof(zholes_size));
743
744 for_each_online_node(nid) {
745 unsigned long start_pfn;
746 unsigned long end_pfn;
747
748 start_pfn = init_node_data[nid].node_start_pfn;
749 end_pfn = init_node_data[nid].node_end_pfn;
750
751 zones_size[ZONE_DMA] = end_pfn - start_pfn;
752 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
753 init_node_data[nid].node_present_pages;
754
755 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid,
756 zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]);
757
758 free_area_init_node(nid, NODE_DATA(nid), zones_size,
759 start_pfn, zholes_size);
760 }
761}
762
763static int __init early_numa(char *p)
764{
765 if (!p)
766 return 0;
767
768 if (strstr(p, "off"))
769 numa_enabled = 0;
770
771 if (strstr(p, "debug"))
772 numa_debug = 1;
773
774 return 0;
775}
776early_param("numa", early_numa);