4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <linux/stop_machine.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/uaccess.h>
29 #include <linux/slab.h>
30 #include <asm/cputhreads.h>
31 #include <asm/sparsemem.h>
34 #include <asm/cputhreads.h>
35 #include <asm/topology.h>
36 #include <asm/firmware.h>
38 #include <asm/hvcall.h>
39 #include <asm/setup.h>
42 static int numa_enabled
= 1;
44 static char *cmdline __initdata
;
46 static int numa_debug
;
47 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
49 int numa_cpu_lookup_table
[NR_CPUS
];
50 cpumask_var_t node_to_cpumask_map
[MAX_NUMNODES
];
51 struct pglist_data
*node_data
[MAX_NUMNODES
];
53 EXPORT_SYMBOL(numa_cpu_lookup_table
);
54 EXPORT_SYMBOL(node_to_cpumask_map
);
55 EXPORT_SYMBOL(node_data
);
57 static int min_common_depth
;
58 static int n_mem_addr_cells
, n_mem_size_cells
;
59 static int form1_affinity
;
61 #define MAX_DISTANCE_REF_POINTS 4
62 static int distance_ref_points_depth
;
63 static const unsigned int *distance_ref_points
;
64 static int distance_lookup_table
[MAX_NUMNODES
][MAX_DISTANCE_REF_POINTS
];
67 * Allocate node_to_cpumask_map based on number of available nodes
68 * Requires node_possible_map to be valid.
70 * Note: cpumask_of_node() is not valid until after this is done.
72 static void __init
setup_node_to_cpumask_map(void)
76 /* setup nr_node_ids if not done yet */
77 if (nr_node_ids
== MAX_NUMNODES
)
80 /* allocate the map */
81 for (node
= 0; node
< nr_node_ids
; node
++)
82 alloc_bootmem_cpumask_var(&node_to_cpumask_map
[node
]);
84 /* cpumask_of_node() will now work */
85 dbg("Node to cpumask map for %d nodes\n", nr_node_ids
);
88 static int __init
fake_numa_create_new_node(unsigned long end_pfn
,
91 unsigned long long mem
;
93 static unsigned int fake_nid
;
94 static unsigned long long curr_boundary
;
97 * Modify node id, iff we started creating NUMA nodes
98 * We want to continue from where we left of the last time
103 * In case there are no more arguments to parse, the
104 * node_id should be the same as the last fake node id
105 * (we've handled this above).
110 mem
= memparse(p
, &p
);
114 if (mem
< curr_boundary
)
119 if ((end_pfn
<< PAGE_SHIFT
) > mem
) {
121 * Skip commas and spaces
123 while (*p
== ',' || *p
== ' ' || *p
== '\t')
129 dbg("created new fake_node with id %d\n", fake_nid
);
136 * get_node_active_region - Return active region containing pfn
137 * Active range returned is empty if none found.
138 * @pfn: The page to return the region for
139 * @node_ar: Returned set to the active region containing @pfn
141 static void __init
get_node_active_region(unsigned long pfn
,
142 struct node_active_region
*node_ar
)
144 unsigned long start_pfn
, end_pfn
;
147 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
148 if (pfn
>= start_pfn
&& pfn
< end_pfn
) {
150 node_ar
->start_pfn
= start_pfn
;
151 node_ar
->end_pfn
= end_pfn
;
157 static void reset_numa_cpu_lookup_table(void)
161 for_each_possible_cpu(cpu
)
162 numa_cpu_lookup_table
[cpu
] = -1;
165 static void update_numa_cpu_lookup_table(unsigned int cpu
, int node
)
167 numa_cpu_lookup_table
[cpu
] = node
;
170 static void map_cpu_to_node(int cpu
, int node
)
172 update_numa_cpu_lookup_table(cpu
, node
);
174 dbg("adding cpu %d to node %d\n", cpu
, node
);
176 if (!(cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])))
177 cpumask_set_cpu(cpu
, node_to_cpumask_map
[node
]);
180 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
181 static void unmap_cpu_from_node(unsigned long cpu
)
183 int node
= numa_cpu_lookup_table
[cpu
];
185 dbg("removing cpu %lu from node %d\n", cpu
, node
);
187 if (cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])) {
188 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[node
]);
190 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
194 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
196 /* must hold reference to node during call */
197 static const int *of_get_associativity(struct device_node
*dev
)
199 return of_get_property(dev
, "ibm,associativity", NULL
);
203 * Returns the property linux,drconf-usable-memory if
204 * it exists (the property exists only in kexec/kdump kernels,
205 * added by kexec-tools)
207 static const u32
*of_get_usable_memory(struct device_node
*memory
)
211 prop
= of_get_property(memory
, "linux,drconf-usable-memory", &len
);
212 if (!prop
|| len
< sizeof(unsigned int))
217 int __node_distance(int a
, int b
)
220 int distance
= LOCAL_DISTANCE
;
223 return ((a
== b
) ? LOCAL_DISTANCE
: REMOTE_DISTANCE
);
225 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
226 if (distance_lookup_table
[a
][i
] == distance_lookup_table
[b
][i
])
229 /* Double the distance for each NUMA level */
236 static void initialize_distance_lookup_table(int nid
,
237 const unsigned int *associativity
)
244 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
245 distance_lookup_table
[nid
][i
] =
246 associativity
[distance_ref_points
[i
]];
250 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
253 static int associativity_to_nid(const unsigned int *associativity
)
257 if (min_common_depth
== -1)
260 if (associativity
[0] >= min_common_depth
)
261 nid
= associativity
[min_common_depth
];
263 /* POWER4 LPAR uses 0xffff as invalid node */
264 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
267 if (nid
> 0 && associativity
[0] >= distance_ref_points_depth
)
268 initialize_distance_lookup_table(nid
, associativity
);
274 /* Returns the nid associated with the given device tree node,
275 * or -1 if not found.
277 static int of_node_to_nid_single(struct device_node
*device
)
280 const unsigned int *tmp
;
282 tmp
= of_get_associativity(device
);
284 nid
= associativity_to_nid(tmp
);
288 /* Walk the device tree upwards, looking for an associativity id */
289 int of_node_to_nid(struct device_node
*device
)
291 struct device_node
*tmp
;
296 nid
= of_node_to_nid_single(device
);
301 device
= of_get_parent(tmp
);
308 EXPORT_SYMBOL_GPL(of_node_to_nid
);
310 static int __init
find_min_common_depth(void)
313 struct device_node
*root
;
315 if (firmware_has_feature(FW_FEATURE_OPAL
))
316 root
= of_find_node_by_path("/ibm,opal");
318 root
= of_find_node_by_path("/rtas");
320 root
= of_find_node_by_path("/");
323 * This property is a set of 32-bit integers, each representing
324 * an index into the ibm,associativity nodes.
326 * With form 0 affinity the first integer is for an SMP configuration
327 * (should be all 0's) and the second is for a normal NUMA
328 * configuration. We have only one level of NUMA.
330 * With form 1 affinity the first integer is the most significant
331 * NUMA boundary and the following are progressively less significant
332 * boundaries. There can be more than one level of NUMA.
334 distance_ref_points
= of_get_property(root
,
335 "ibm,associativity-reference-points",
336 &distance_ref_points_depth
);
338 if (!distance_ref_points
) {
339 dbg("NUMA: ibm,associativity-reference-points not found.\n");
343 distance_ref_points_depth
/= sizeof(int);
345 if (firmware_has_feature(FW_FEATURE_OPAL
) ||
346 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY
)) {
347 dbg("Using form 1 affinity\n");
351 if (form1_affinity
) {
352 depth
= distance_ref_points
[0];
354 if (distance_ref_points_depth
< 2) {
355 printk(KERN_WARNING
"NUMA: "
356 "short ibm,associativity-reference-points\n");
360 depth
= distance_ref_points
[1];
364 * Warn and cap if the hardware supports more than
365 * MAX_DISTANCE_REF_POINTS domains.
367 if (distance_ref_points_depth
> MAX_DISTANCE_REF_POINTS
) {
368 printk(KERN_WARNING
"NUMA: distance array capped at "
369 "%d entries\n", MAX_DISTANCE_REF_POINTS
);
370 distance_ref_points_depth
= MAX_DISTANCE_REF_POINTS
;
381 static void __init
get_n_mem_cells(int *n_addr_cells
, int *n_size_cells
)
383 struct device_node
*memory
= NULL
;
385 memory
= of_find_node_by_type(memory
, "memory");
387 panic("numa.c: No memory nodes found!");
389 *n_addr_cells
= of_n_addr_cells(memory
);
390 *n_size_cells
= of_n_size_cells(memory
);
394 static unsigned long read_n_cells(int n
, const unsigned int **buf
)
396 unsigned long result
= 0;
399 result
= (result
<< 32) | **buf
;
406 * Read the next memblock list entry from the ibm,dynamic-memory property
407 * and return the information in the provided of_drconf_cell structure.
409 static void read_drconf_cell(struct of_drconf_cell
*drmem
, const u32
**cellp
)
413 drmem
->base_addr
= read_n_cells(n_mem_addr_cells
, cellp
);
416 drmem
->drc_index
= cp
[0];
417 drmem
->reserved
= cp
[1];
418 drmem
->aa_index
= cp
[2];
419 drmem
->flags
= cp
[3];
425 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
427 * The layout of the ibm,dynamic-memory property is a number N of memblock
428 * list entries followed by N memblock list entries. Each memblock list entry
429 * contains information as laid out in the of_drconf_cell struct above.
431 static int of_get_drconf_memory(struct device_node
*memory
, const u32
**dm
)
436 prop
= of_get_property(memory
, "ibm,dynamic-memory", &len
);
437 if (!prop
|| len
< sizeof(unsigned int))
442 /* Now that we know the number of entries, revalidate the size
443 * of the property read in to ensure we have everything
445 if (len
< (entries
* (n_mem_addr_cells
+ 4) + 1) * sizeof(unsigned int))
453 * Retrieve and validate the ibm,lmb-size property for drconf memory
454 * from the device tree.
456 static u64
of_get_lmb_size(struct device_node
*memory
)
461 prop
= of_get_property(memory
, "ibm,lmb-size", &len
);
462 if (!prop
|| len
< sizeof(unsigned int))
465 return read_n_cells(n_mem_size_cells
, &prop
);
468 struct assoc_arrays
{
475 * Retrieve and validate the list of associativity arrays for drconf
476 * memory from the ibm,associativity-lookup-arrays property of the
479 * The layout of the ibm,associativity-lookup-arrays property is a number N
480 * indicating the number of associativity arrays, followed by a number M
481 * indicating the size of each associativity array, followed by a list
482 * of N associativity arrays.
484 static int of_get_assoc_arrays(struct device_node
*memory
,
485 struct assoc_arrays
*aa
)
490 prop
= of_get_property(memory
, "ibm,associativity-lookup-arrays", &len
);
491 if (!prop
|| len
< 2 * sizeof(unsigned int))
494 aa
->n_arrays
= *prop
++;
495 aa
->array_sz
= *prop
++;
497 /* Now that we know the number of arrays and size of each array,
498 * revalidate the size of the property read in.
500 if (len
< (aa
->n_arrays
* aa
->array_sz
+ 2) * sizeof(unsigned int))
508 * This is like of_node_to_nid_single() for memory represented in the
509 * ibm,dynamic-reconfiguration-memory node.
511 static int of_drconf_to_nid_single(struct of_drconf_cell
*drmem
,
512 struct assoc_arrays
*aa
)
515 int nid
= default_nid
;
518 if (min_common_depth
> 0 && min_common_depth
<= aa
->array_sz
&&
519 !(drmem
->flags
& DRCONF_MEM_AI_INVALID
) &&
520 drmem
->aa_index
< aa
->n_arrays
) {
521 index
= drmem
->aa_index
* aa
->array_sz
+ min_common_depth
- 1;
522 nid
= aa
->arrays
[index
];
524 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
532 * Figure out to which domain a cpu belongs and stick it there.
533 * Return the id of the domain used.
535 static int __cpuinit
numa_setup_cpu(unsigned long lcpu
)
538 struct device_node
*cpu
;
541 * If a valid cpu-to-node mapping is already available, use it
542 * directly instead of querying the firmware, since it represents
543 * the most recent mapping notified to us by the platform (eg: VPHN).
545 if ((nid
= numa_cpu_lookup_table
[lcpu
]) >= 0) {
546 map_cpu_to_node(lcpu
, nid
);
550 cpu
= of_get_cpu_node(lcpu
, NULL
);
558 nid
= of_node_to_nid_single(cpu
);
560 if (nid
< 0 || !node_online(nid
))
561 nid
= first_online_node
;
563 map_cpu_to_node(lcpu
, nid
);
570 static int __cpuinit
cpu_numa_callback(struct notifier_block
*nfb
,
571 unsigned long action
,
574 unsigned long lcpu
= (unsigned long)hcpu
;
575 int ret
= NOTIFY_DONE
;
579 case CPU_UP_PREPARE_FROZEN
:
580 numa_setup_cpu(lcpu
);
583 #ifdef CONFIG_HOTPLUG_CPU
585 case CPU_DEAD_FROZEN
:
586 case CPU_UP_CANCELED
:
587 case CPU_UP_CANCELED_FROZEN
:
588 unmap_cpu_from_node(lcpu
);
597 * Check and possibly modify a memory region to enforce the memory limit.
599 * Returns the size the region should have to enforce the memory limit.
600 * This will either be the original value of size, a truncated value,
601 * or zero. If the returned value of size is 0 the region should be
602 * discarded as it lies wholly above the memory limit.
604 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
,
608 * We use memblock_end_of_DRAM() in here instead of memory_limit because
609 * we've already adjusted it for the limit and it takes care of
610 * having memory holes below the limit. Also, in the case of
611 * iommu_is_off, memory_limit is not set but is implicitly enforced.
614 if (start
+ size
<= memblock_end_of_DRAM())
617 if (start
>= memblock_end_of_DRAM())
620 return memblock_end_of_DRAM() - start
;
624 * Reads the counter for a given entry in
625 * linux,drconf-usable-memory property
627 static inline int __init
read_usm_ranges(const u32
**usm
)
630 * For each lmb in ibm,dynamic-memory a corresponding
631 * entry in linux,drconf-usable-memory property contains
632 * a counter followed by that many (base, size) duple.
633 * read the counter from linux,drconf-usable-memory
635 return read_n_cells(n_mem_size_cells
, usm
);
639 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
640 * node. This assumes n_mem_{addr,size}_cells have been set.
642 static void __init
parse_drconf_memory(struct device_node
*memory
)
644 const u32
*uninitialized_var(dm
), *usm
;
645 unsigned int n
, rc
, ranges
, is_kexec_kdump
= 0;
646 unsigned long lmb_size
, base
, size
, sz
;
648 struct assoc_arrays aa
= { .arrays
= NULL
};
650 n
= of_get_drconf_memory(memory
, &dm
);
654 lmb_size
= of_get_lmb_size(memory
);
658 rc
= of_get_assoc_arrays(memory
, &aa
);
662 /* check if this is a kexec/kdump kernel */
663 usm
= of_get_usable_memory(memory
);
667 for (; n
!= 0; --n
) {
668 struct of_drconf_cell drmem
;
670 read_drconf_cell(&drmem
, &dm
);
672 /* skip this block if the reserved bit is set in flags (0x80)
673 or if the block is not assigned to this partition (0x8) */
674 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
675 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
678 base
= drmem
.base_addr
;
682 if (is_kexec_kdump
) {
683 ranges
= read_usm_ranges(&usm
);
684 if (!ranges
) /* there are no (base, size) duple */
688 if (is_kexec_kdump
) {
689 base
= read_n_cells(n_mem_addr_cells
, &usm
);
690 size
= read_n_cells(n_mem_size_cells
, &usm
);
692 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
693 fake_numa_create_new_node(
694 ((base
+ size
) >> PAGE_SHIFT
),
696 node_set_online(nid
);
697 sz
= numa_enforce_memory_limit(base
, size
);
699 memblock_set_node(base
, sz
, nid
);
704 static int __init
parse_numa_properties(void)
706 struct device_node
*memory
;
710 if (numa_enabled
== 0) {
711 printk(KERN_WARNING
"NUMA disabled by user\n");
715 min_common_depth
= find_min_common_depth();
717 if (min_common_depth
< 0)
718 return min_common_depth
;
720 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
723 * Even though we connect cpus to numa domains later in SMP
724 * init, we need to know the node ids now. This is because
725 * each node to be onlined must have NODE_DATA etc backing it.
727 for_each_present_cpu(i
) {
728 struct device_node
*cpu
;
731 cpu
= of_get_cpu_node(i
, NULL
);
733 nid
= of_node_to_nid_single(cpu
);
737 * Don't fall back to default_nid yet -- we will plug
738 * cpus into nodes once the memory scan has discovered
743 node_set_online(nid
);
746 get_n_mem_cells(&n_mem_addr_cells
, &n_mem_size_cells
);
748 for_each_node_by_type(memory
, "memory") {
753 const unsigned int *memcell_buf
;
756 memcell_buf
= of_get_property(memory
,
757 "linux,usable-memory", &len
);
758 if (!memcell_buf
|| len
<= 0)
759 memcell_buf
= of_get_property(memory
, "reg", &len
);
760 if (!memcell_buf
|| len
<= 0)
764 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
766 /* these are order-sensitive, and modify the buffer pointer */
767 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
768 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
771 * Assumption: either all memory nodes or none will
772 * have associativity properties. If none, then
773 * everything goes to default_nid.
775 nid
= of_node_to_nid_single(memory
);
779 fake_numa_create_new_node(((start
+ size
) >> PAGE_SHIFT
), &nid
);
780 node_set_online(nid
);
782 if (!(size
= numa_enforce_memory_limit(start
, size
))) {
789 memblock_set_node(start
, size
, nid
);
796 * Now do the same thing for each MEMBLOCK listed in the
797 * ibm,dynamic-memory property in the
798 * ibm,dynamic-reconfiguration-memory node.
800 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
802 parse_drconf_memory(memory
);
807 static void __init
setup_nonnuma(void)
809 unsigned long top_of_ram
= memblock_end_of_DRAM();
810 unsigned long total_ram
= memblock_phys_mem_size();
811 unsigned long start_pfn
, end_pfn
;
812 unsigned int nid
= 0;
813 struct memblock_region
*reg
;
815 printk(KERN_DEBUG
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
816 top_of_ram
, total_ram
);
817 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
818 (top_of_ram
- total_ram
) >> 20);
820 for_each_memblock(memory
, reg
) {
821 start_pfn
= memblock_region_memory_base_pfn(reg
);
822 end_pfn
= memblock_region_memory_end_pfn(reg
);
824 fake_numa_create_new_node(end_pfn
, &nid
);
825 memblock_set_node(PFN_PHYS(start_pfn
),
826 PFN_PHYS(end_pfn
- start_pfn
), nid
);
827 node_set_online(nid
);
831 void __init
dump_numa_cpu_topology(void)
834 unsigned int cpu
, count
;
836 if (min_common_depth
== -1 || !numa_enabled
)
839 for_each_online_node(node
) {
840 printk(KERN_DEBUG
"Node %d CPUs:", node
);
844 * If we used a CPU iterator here we would miss printing
845 * the holes in the cpumap.
847 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
848 if (cpumask_test_cpu(cpu
,
849 node_to_cpumask_map
[node
])) {
855 printk("-%u", cpu
- 1);
861 printk("-%u", nr_cpu_ids
- 1);
866 static void __init
dump_numa_memory_topology(void)
871 if (min_common_depth
== -1 || !numa_enabled
)
874 for_each_online_node(node
) {
877 printk(KERN_DEBUG
"Node %d Memory:", node
);
881 for (i
= 0; i
< memblock_end_of_DRAM();
882 i
+= (1 << SECTION_SIZE_BITS
)) {
883 if (early_pfn_to_nid(i
>> PAGE_SHIFT
) == node
) {
901 * Allocate some memory, satisfying the memblock or bootmem allocator where
902 * required. nid is the preferred node and end is the physical address of
903 * the highest address in the node.
905 * Returns the virtual address of the memory.
907 static void __init
*careful_zallocation(int nid
, unsigned long size
,
909 unsigned long end_pfn
)
913 unsigned long ret_paddr
;
915 ret_paddr
= __memblock_alloc_base(size
, align
, end_pfn
<< PAGE_SHIFT
);
917 /* retry over all memory */
919 ret_paddr
= __memblock_alloc_base(size
, align
, memblock_end_of_DRAM());
922 panic("numa.c: cannot allocate %lu bytes for node %d",
925 ret
= __va(ret_paddr
);
928 * We initialize the nodes in numeric order: 0, 1, 2...
929 * and hand over control from the MEMBLOCK allocator to the
930 * bootmem allocator. If this function is called for
931 * node 5, then we know that all nodes <5 are using the
932 * bootmem allocator instead of the MEMBLOCK allocator.
934 * So, check the nid from which this allocation came
935 * and double check to see if we need to use bootmem
936 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
937 * since it would be useless.
939 new_nid
= early_pfn_to_nid(ret_paddr
>> PAGE_SHIFT
);
941 ret
= __alloc_bootmem_node(NODE_DATA(new_nid
),
944 dbg("alloc_bootmem %p %lx\n", ret
, size
);
947 memset(ret
, 0, size
);
951 static struct notifier_block __cpuinitdata ppc64_numa_nb
= {
952 .notifier_call
= cpu_numa_callback
,
953 .priority
= 1 /* Must run before sched domains notifier. */
956 static void __init
mark_reserved_regions_for_nid(int nid
)
958 struct pglist_data
*node
= NODE_DATA(nid
);
959 struct memblock_region
*reg
;
961 for_each_memblock(reserved
, reg
) {
962 unsigned long physbase
= reg
->base
;
963 unsigned long size
= reg
->size
;
964 unsigned long start_pfn
= physbase
>> PAGE_SHIFT
;
965 unsigned long end_pfn
= PFN_UP(physbase
+ size
);
966 struct node_active_region node_ar
;
967 unsigned long node_end_pfn
= node
->node_start_pfn
+
968 node
->node_spanned_pages
;
971 * Check to make sure that this memblock.reserved area is
972 * within the bounds of the node that we care about.
973 * Checking the nid of the start and end points is not
974 * sufficient because the reserved area could span the
977 if (end_pfn
<= node
->node_start_pfn
||
978 start_pfn
>= node_end_pfn
)
981 get_node_active_region(start_pfn
, &node_ar
);
982 while (start_pfn
< end_pfn
&&
983 node_ar
.start_pfn
< node_ar
.end_pfn
) {
984 unsigned long reserve_size
= size
;
986 * if reserved region extends past active region
987 * then trim size to active region
989 if (end_pfn
> node_ar
.end_pfn
)
990 reserve_size
= (node_ar
.end_pfn
<< PAGE_SHIFT
)
993 * Only worry about *this* node, others may not
994 * yet have valid NODE_DATA().
996 if (node_ar
.nid
== nid
) {
997 dbg("reserve_bootmem %lx %lx nid=%d\n",
998 physbase
, reserve_size
, node_ar
.nid
);
999 reserve_bootmem_node(NODE_DATA(node_ar
.nid
),
1000 physbase
, reserve_size
,
1004 * if reserved region is contained in the active region
1007 if (end_pfn
<= node_ar
.end_pfn
)
1011 * reserved region extends past the active region
1012 * get next active region that contains this
1015 start_pfn
= node_ar
.end_pfn
;
1016 physbase
= start_pfn
<< PAGE_SHIFT
;
1017 size
= size
- reserve_size
;
1018 get_node_active_region(start_pfn
, &node_ar
);
1024 void __init
do_init_bootmem(void)
1029 max_low_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
1030 max_pfn
= max_low_pfn
;
1032 if (parse_numa_properties())
1035 dump_numa_memory_topology();
1037 for_each_online_node(nid
) {
1038 unsigned long start_pfn
, end_pfn
;
1039 void *bootmem_vaddr
;
1040 unsigned long bootmap_pages
;
1042 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
1045 * Allocate the node structure node local if possible
1047 * Be careful moving this around, as it relies on all
1048 * previous nodes' bootmem to be initialized and have
1049 * all reserved areas marked.
1051 NODE_DATA(nid
) = careful_zallocation(nid
,
1052 sizeof(struct pglist_data
),
1053 SMP_CACHE_BYTES
, end_pfn
);
1055 dbg("node %d\n", nid
);
1056 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
1058 NODE_DATA(nid
)->bdata
= &bootmem_node_data
[nid
];
1059 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
1060 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
1062 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
1065 dbg("start_paddr = %lx\n", start_pfn
<< PAGE_SHIFT
);
1066 dbg("end_paddr = %lx\n", end_pfn
<< PAGE_SHIFT
);
1068 bootmap_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
1069 bootmem_vaddr
= careful_zallocation(nid
,
1070 bootmap_pages
<< PAGE_SHIFT
,
1071 PAGE_SIZE
, end_pfn
);
1073 dbg("bootmap_vaddr = %p\n", bootmem_vaddr
);
1075 init_bootmem_node(NODE_DATA(nid
),
1076 __pa(bootmem_vaddr
) >> PAGE_SHIFT
,
1077 start_pfn
, end_pfn
);
1079 free_bootmem_with_active_regions(nid
, end_pfn
);
1081 * Be very careful about moving this around. Future
1082 * calls to careful_zallocation() depend on this getting
1085 mark_reserved_regions_for_nid(nid
);
1086 sparse_memory_present_with_active_regions(nid
);
1089 init_bootmem_done
= 1;
1092 * Now bootmem is initialised we can create the node to cpumask
1093 * lookup tables and setup the cpu callback to populate them.
1095 setup_node_to_cpumask_map();
1097 reset_numa_cpu_lookup_table();
1098 register_cpu_notifier(&ppc64_numa_nb
);
1099 cpu_numa_callback(&ppc64_numa_nb
, CPU_UP_PREPARE
,
1100 (void *)(unsigned long)boot_cpuid
);
1103 void __init
paging_init(void)
1105 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
1106 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
1107 max_zone_pfns
[ZONE_DMA
] = memblock_end_of_DRAM() >> PAGE_SHIFT
;
1108 free_area_init_nodes(max_zone_pfns
);
1111 static int __init
early_numa(char *p
)
1116 if (strstr(p
, "off"))
1119 if (strstr(p
, "debug"))
1122 p
= strstr(p
, "fake=");
1124 cmdline
= p
+ strlen("fake=");
1128 early_param("numa", early_numa
);
1130 #ifdef CONFIG_MEMORY_HOTPLUG
1132 * Find the node associated with a hot added memory section for
1133 * memory represented in the device tree by the property
1134 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1136 static int hot_add_drconf_scn_to_nid(struct device_node
*memory
,
1137 unsigned long scn_addr
)
1140 unsigned int drconf_cell_cnt
, rc
;
1141 unsigned long lmb_size
;
1142 struct assoc_arrays aa
;
1145 drconf_cell_cnt
= of_get_drconf_memory(memory
, &dm
);
1146 if (!drconf_cell_cnt
)
1149 lmb_size
= of_get_lmb_size(memory
);
1153 rc
= of_get_assoc_arrays(memory
, &aa
);
1157 for (; drconf_cell_cnt
!= 0; --drconf_cell_cnt
) {
1158 struct of_drconf_cell drmem
;
1160 read_drconf_cell(&drmem
, &dm
);
1162 /* skip this block if it is reserved or not assigned to
1164 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
1165 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
1168 if ((scn_addr
< drmem
.base_addr
)
1169 || (scn_addr
>= (drmem
.base_addr
+ lmb_size
)))
1172 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
1180 * Find the node associated with a hot added memory section for memory
1181 * represented in the device tree as a node (i.e. memory@XXXX) for
1184 int hot_add_node_scn_to_nid(unsigned long scn_addr
)
1186 struct device_node
*memory
;
1189 for_each_node_by_type(memory
, "memory") {
1190 unsigned long start
, size
;
1192 const unsigned int *memcell_buf
;
1195 memcell_buf
= of_get_property(memory
, "reg", &len
);
1196 if (!memcell_buf
|| len
<= 0)
1199 /* ranges in cell */
1200 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
1203 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
1204 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
1206 if ((scn_addr
< start
) || (scn_addr
>= (start
+ size
)))
1209 nid
= of_node_to_nid_single(memory
);
1217 of_node_put(memory
);
1223 * Find the node associated with a hot added memory section. Section
1224 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1225 * sections are fully contained within a single MEMBLOCK.
1227 int hot_add_scn_to_nid(unsigned long scn_addr
)
1229 struct device_node
*memory
= NULL
;
1232 if (!numa_enabled
|| (min_common_depth
< 0))
1233 return first_online_node
;
1235 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1237 nid
= hot_add_drconf_scn_to_nid(memory
, scn_addr
);
1238 of_node_put(memory
);
1240 nid
= hot_add_node_scn_to_nid(scn_addr
);
1243 if (nid
< 0 || !node_online(nid
))
1244 nid
= first_online_node
;
1246 if (NODE_DATA(nid
)->node_spanned_pages
)
1249 for_each_online_node(nid
) {
1250 if (NODE_DATA(nid
)->node_spanned_pages
) {
1260 static u64
hot_add_drconf_memory_max(void)
1262 struct device_node
*memory
= NULL
;
1263 unsigned int drconf_cell_cnt
= 0;
1267 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1269 drconf_cell_cnt
= of_get_drconf_memory(memory
, &dm
);
1270 lmb_size
= of_get_lmb_size(memory
);
1271 of_node_put(memory
);
1273 return lmb_size
* drconf_cell_cnt
;
1277 * memory_hotplug_max - return max address of memory that may be added
1279 * This is currently only used on systems that support drconfig memory
1282 u64
memory_hotplug_max(void)
1284 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1286 #endif /* CONFIG_MEMORY_HOTPLUG */
1288 /* Virtual Processor Home Node (VPHN) support */
1289 #ifdef CONFIG_PPC_SPLPAR
1290 struct topology_update_data
{
1291 struct topology_update_data
*next
;
1297 static u8 vphn_cpu_change_counts
[NR_CPUS
][MAX_DISTANCE_REF_POINTS
];
1298 static cpumask_t cpu_associativity_changes_mask
;
1299 static int vphn_enabled
;
1300 static int prrn_enabled
;
1301 static void reset_topology_timer(void);
1304 * Store the current values of the associativity change counters in the
1307 static void setup_cpu_associativity_change_counters(void)
1311 /* The VPHN feature supports a maximum of 8 reference points */
1312 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS
> 8);
1314 for_each_possible_cpu(cpu
) {
1316 u8
*counts
= vphn_cpu_change_counts
[cpu
];
1317 volatile u8
*hypervisor_counts
= lppaca
[cpu
].vphn_assoc_counts
;
1319 for (i
= 0; i
< distance_ref_points_depth
; i
++)
1320 counts
[i
] = hypervisor_counts
[i
];
1325 * The hypervisor maintains a set of 8 associativity change counters in
1326 * the VPA of each cpu that correspond to the associativity levels in the
1327 * ibm,associativity-reference-points property. When an associativity
1328 * level changes, the corresponding counter is incremented.
1330 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1331 * node associativity levels have changed.
1333 * Returns the number of cpus with unhandled associativity changes.
1335 static int update_cpu_associativity_changes_mask(void)
1338 cpumask_t
*changes
= &cpu_associativity_changes_mask
;
1340 for_each_possible_cpu(cpu
) {
1342 u8
*counts
= vphn_cpu_change_counts
[cpu
];
1343 volatile u8
*hypervisor_counts
= lppaca
[cpu
].vphn_assoc_counts
;
1345 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
1346 if (hypervisor_counts
[i
] != counts
[i
]) {
1347 counts
[i
] = hypervisor_counts
[i
];
1352 cpumask_or(changes
, changes
, cpu_sibling_mask(cpu
));
1353 cpu
= cpu_last_thread_sibling(cpu
);
1357 return cpumask_weight(changes
);
1361 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1362 * the complete property we have to add the length in the first cell.
1364 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1367 * Convert the associativity domain numbers returned from the hypervisor
1368 * to the sequence they would appear in the ibm,associativity property.
1370 static int vphn_unpack_associativity(const long *packed
, unsigned int *unpacked
)
1372 int i
, nr_assoc_doms
= 0;
1373 const u16
*field
= (const u16
*) packed
;
1375 #define VPHN_FIELD_UNUSED (0xffff)
1376 #define VPHN_FIELD_MSB (0x8000)
1377 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1379 for (i
= 1; i
< VPHN_ASSOC_BUFSIZE
; i
++) {
1380 if (*field
== VPHN_FIELD_UNUSED
) {
1381 /* All significant fields processed, and remaining
1382 * fields contain the reserved value of all 1's.
1385 unpacked
[i
] = *((u32
*)field
);
1387 } else if (*field
& VPHN_FIELD_MSB
) {
1388 /* Data is in the lower 15 bits of this field */
1389 unpacked
[i
] = *field
& VPHN_FIELD_MASK
;
1393 /* Data is in the lower 15 bits of this field
1394 * concatenated with the next 16 bit field
1396 unpacked
[i
] = *((u32
*)field
);
1402 /* The first cell contains the length of the property */
1403 unpacked
[0] = nr_assoc_doms
;
1405 return nr_assoc_doms
;
1409 * Retrieve the new associativity information for a virtual processor's
1412 static long hcall_vphn(unsigned long cpu
, unsigned int *associativity
)
1415 long retbuf
[PLPAR_HCALL9_BUFSIZE
] = {0};
1417 int hwcpu
= get_hard_smp_processor_id(cpu
);
1419 rc
= plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY
, retbuf
, flags
, hwcpu
);
1420 vphn_unpack_associativity(retbuf
, associativity
);
1425 static long vphn_get_associativity(unsigned long cpu
,
1426 unsigned int *associativity
)
1430 rc
= hcall_vphn(cpu
, associativity
);
1435 "VPHN is not supported. Disabling polling...\n");
1436 stop_topology_update();
1440 "hcall_vphn() experienced a hardware fault "
1441 "preventing VPHN. Disabling polling...\n");
1442 stop_topology_update();
1449 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1450 * characteristics change. This function doesn't perform any locking and is
1451 * only safe to call from stop_machine().
1453 static int update_cpu_topology(void *data
)
1455 struct topology_update_data
*update
;
1461 cpu
= smp_processor_id();
1463 for (update
= data
; update
; update
= update
->next
) {
1464 if (cpu
!= update
->cpu
)
1467 unmap_cpu_from_node(update
->cpu
);
1468 map_cpu_to_node(update
->cpu
, update
->new_nid
);
1475 static int update_lookup_table(void *data
)
1477 struct topology_update_data
*update
;
1483 * Upon topology update, the numa-cpu lookup table needs to be updated
1484 * for all threads in the core, including offline CPUs, to ensure that
1485 * future hotplug operations respect the cpu-to-node associativity
1488 for (update
= data
; update
; update
= update
->next
) {
1491 nid
= update
->new_nid
;
1492 base
= cpu_first_thread_sibling(update
->cpu
);
1494 for (j
= 0; j
< threads_per_core
; j
++) {
1495 update_numa_cpu_lookup_table(base
+ j
, nid
);
1503 * Update the node maps and sysfs entries for each cpu whose home node
1504 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1506 int arch_update_cpu_topology(void)
1508 unsigned int cpu
, sibling
, changed
= 0;
1509 struct topology_update_data
*updates
, *ud
;
1510 unsigned int associativity
[VPHN_ASSOC_BUFSIZE
] = {0};
1511 cpumask_t updated_cpus
;
1513 int weight
, new_nid
, i
= 0;
1515 weight
= cpumask_weight(&cpu_associativity_changes_mask
);
1519 updates
= kzalloc(weight
* (sizeof(*updates
)), GFP_KERNEL
);
1523 cpumask_clear(&updated_cpus
);
1525 for_each_cpu(cpu
, &cpu_associativity_changes_mask
) {
1527 * If siblings aren't flagged for changes, updates list
1528 * will be too short. Skip on this update and set for next
1531 if (!cpumask_subset(cpu_sibling_mask(cpu
),
1532 &cpu_associativity_changes_mask
)) {
1533 pr_info("Sibling bits not set for associativity "
1534 "change, cpu%d\n", cpu
);
1535 cpumask_or(&cpu_associativity_changes_mask
,
1536 &cpu_associativity_changes_mask
,
1537 cpu_sibling_mask(cpu
));
1538 cpu
= cpu_last_thread_sibling(cpu
);
1542 /* Use associativity from first thread for all siblings */
1543 vphn_get_associativity(cpu
, associativity
);
1544 new_nid
= associativity_to_nid(associativity
);
1545 if (new_nid
< 0 || !node_online(new_nid
))
1546 new_nid
= first_online_node
;
1548 if (new_nid
== numa_cpu_lookup_table
[cpu
]) {
1549 cpumask_andnot(&cpu_associativity_changes_mask
,
1550 &cpu_associativity_changes_mask
,
1551 cpu_sibling_mask(cpu
));
1552 cpu
= cpu_last_thread_sibling(cpu
);
1556 for_each_cpu(sibling
, cpu_sibling_mask(cpu
)) {
1559 ud
->new_nid
= new_nid
;
1560 ud
->old_nid
= numa_cpu_lookup_table
[sibling
];
1561 cpumask_set_cpu(sibling
, &updated_cpus
);
1563 ud
->next
= &updates
[i
];
1565 cpu
= cpu_last_thread_sibling(cpu
);
1568 stop_machine(update_cpu_topology
, &updates
[0], &updated_cpus
);
1571 * Update the numa-cpu lookup table with the new mappings, even for
1572 * offline CPUs. It is best to perform this update from the stop-
1575 stop_machine(update_lookup_table
, &updates
[0],
1576 cpumask_of(raw_smp_processor_id()));
1578 for (ud
= &updates
[0]; ud
; ud
= ud
->next
) {
1579 unregister_cpu_under_node(ud
->cpu
, ud
->old_nid
);
1580 register_cpu_under_node(ud
->cpu
, ud
->new_nid
);
1582 dev
= get_cpu_device(ud
->cpu
);
1584 kobject_uevent(&dev
->kobj
, KOBJ_CHANGE
);
1585 cpumask_clear_cpu(ud
->cpu
, &cpu_associativity_changes_mask
);
1593 static void topology_work_fn(struct work_struct
*work
)
1595 rebuild_sched_domains();
1597 static DECLARE_WORK(topology_work
, topology_work_fn
);
1599 void topology_schedule_update(void)
1601 schedule_work(&topology_work
);
1604 static void topology_timer_fn(unsigned long ignored
)
1606 if (prrn_enabled
&& cpumask_weight(&cpu_associativity_changes_mask
))
1607 topology_schedule_update();
1608 else if (vphn_enabled
) {
1609 if (update_cpu_associativity_changes_mask() > 0)
1610 topology_schedule_update();
1611 reset_topology_timer();
1614 static struct timer_list topology_timer
=
1615 TIMER_INITIALIZER(topology_timer_fn
, 0, 0);
1617 static void reset_topology_timer(void)
1619 topology_timer
.data
= 0;
1620 topology_timer
.expires
= jiffies
+ 60 * HZ
;
1621 mod_timer(&topology_timer
, topology_timer
.expires
);
1626 static void stage_topology_update(int core_id
)
1628 cpumask_or(&cpu_associativity_changes_mask
,
1629 &cpu_associativity_changes_mask
, cpu_sibling_mask(core_id
));
1630 reset_topology_timer();
1633 static int dt_update_callback(struct notifier_block
*nb
,
1634 unsigned long action
, void *data
)
1636 struct of_prop_reconfig
*update
;
1637 int rc
= NOTIFY_DONE
;
1640 case OF_RECONFIG_UPDATE_PROPERTY
:
1641 update
= (struct of_prop_reconfig
*)data
;
1642 if (!of_prop_cmp(update
->dn
->type
, "cpu") &&
1643 !of_prop_cmp(update
->prop
->name
, "ibm,associativity")) {
1645 of_property_read_u32(update
->dn
, "reg", &core_id
);
1646 stage_topology_update(core_id
);
1655 static struct notifier_block dt_update_nb
= {
1656 .notifier_call
= dt_update_callback
,
1662 * Start polling for associativity changes.
1664 int start_topology_update(void)
1668 if (firmware_has_feature(FW_FEATURE_PRRN
)) {
1669 if (!prrn_enabled
) {
1673 rc
= of_reconfig_notifier_register(&dt_update_nb
);
1676 } else if (firmware_has_feature(FW_FEATURE_VPHN
) &&
1677 get_lppaca()->shared_proc
) {
1678 if (!vphn_enabled
) {
1681 setup_cpu_associativity_change_counters();
1682 init_timer_deferrable(&topology_timer
);
1683 reset_topology_timer();
1691 * Disable polling for VPHN associativity changes.
1693 int stop_topology_update(void)
1700 rc
= of_reconfig_notifier_unregister(&dt_update_nb
);
1702 } else if (vphn_enabled
) {
1704 rc
= del_timer_sync(&topology_timer
);
1710 int prrn_is_enabled(void)
1712 return prrn_enabled
;
1715 static int topology_read(struct seq_file
*file
, void *v
)
1717 if (vphn_enabled
|| prrn_enabled
)
1718 seq_puts(file
, "on\n");
1720 seq_puts(file
, "off\n");
1725 static int topology_open(struct inode
*inode
, struct file
*file
)
1727 return single_open(file
, topology_read
, NULL
);
1730 static ssize_t
topology_write(struct file
*file
, const char __user
*buf
,
1731 size_t count
, loff_t
*off
)
1733 char kbuf
[4]; /* "on" or "off" plus null. */
1736 read_len
= count
< 3 ? count
: 3;
1737 if (copy_from_user(kbuf
, buf
, read_len
))
1740 kbuf
[read_len
] = '\0';
1742 if (!strncmp(kbuf
, "on", 2))
1743 start_topology_update();
1744 else if (!strncmp(kbuf
, "off", 3))
1745 stop_topology_update();
1752 static const struct file_operations topology_ops
= {
1754 .write
= topology_write
,
1755 .open
= topology_open
,
1756 .release
= single_release
1759 static int topology_update_init(void)
1761 start_topology_update();
1762 proc_create("powerpc/topology_updates", 644, NULL
, &topology_ops
);
1766 device_initcall(topology_update_init
);
1767 #endif /* CONFIG_PPC_SPLPAR */