4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/topology.h>
7 #include <linux/memblock.h>
10 #include "numa_internal.h"
12 static int emu_nid_to_phys
[MAX_NUMNODES
] __cpuinitdata
;
13 static char *emu_cmdline __initdata
;
15 void __init
numa_emu_cmdline(char *str
)
20 static int __init
emu_find_memblk_by_nid(int nid
, const struct numa_meminfo
*mi
)
24 for (i
= 0; i
< mi
->nr_blks
; i
++)
25 if (mi
->blk
[i
].nid
== nid
)
31 * Sets up nid to range from @start to @end. The return value is -errno if
32 * something went wrong, 0 otherwise.
34 static int __init
emu_setup_memblk(struct numa_meminfo
*ei
,
35 struct numa_meminfo
*pi
,
36 int nid
, int phys_blk
, u64 size
)
38 struct numa_memblk
*eb
= &ei
->blk
[ei
->nr_blks
];
39 struct numa_memblk
*pb
= &pi
->blk
[phys_blk
];
41 if (ei
->nr_blks
>= NR_NODE_MEMBLKS
) {
42 pr_err("NUMA: Too many emulated memblks, failing emulation\n");
47 eb
->start
= pb
->start
;
48 eb
->end
= pb
->start
+ size
;
51 if (emu_nid_to_phys
[nid
] == NUMA_NO_NODE
)
52 emu_nid_to_phys
[nid
] = pb
->nid
;
55 if (pb
->start
>= pb
->end
) {
56 WARN_ON_ONCE(pb
->start
> pb
->end
);
57 numa_remove_memblk_from(phys_blk
, pi
);
60 printk(KERN_INFO
"Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid
,
61 eb
->start
, eb
->end
, (eb
->end
- eb
->start
) >> 20);
66 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
67 * to max_addr. The return value is the number of nodes allocated.
69 static int __init
split_nodes_interleave(struct numa_meminfo
*ei
,
70 struct numa_meminfo
*pi
,
71 u64 addr
, u64 max_addr
, int nr_nodes
)
73 nodemask_t physnode_mask
= NODE_MASK_NONE
;
81 if (nr_nodes
> MAX_NUMNODES
) {
82 pr_info("numa=fake=%d too large, reducing to %d\n",
83 nr_nodes
, MAX_NUMNODES
);
84 nr_nodes
= MAX_NUMNODES
;
87 size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) / nr_nodes
;
89 * Calculate the number of big nodes that can be allocated as a result
90 * of consolidating the remainder.
92 big
= ((size
& ~FAKE_NODE_MIN_HASH_MASK
) * nr_nodes
) /
95 size
&= FAKE_NODE_MIN_HASH_MASK
;
97 pr_err("Not enough memory for each node. "
98 "NUMA emulation disabled.\n");
102 for (i
= 0; i
< pi
->nr_blks
; i
++)
103 node_set(pi
->blk
[i
].nid
, physnode_mask
);
106 * Continue to fill physical nodes with fake nodes until there is no
107 * memory left on any of them.
109 while (nodes_weight(physnode_mask
)) {
110 for_each_node_mask(i
, physnode_mask
) {
111 u64 dma32_end
= PFN_PHYS(MAX_DMA32_PFN
);
112 u64 start
, limit
, end
;
115 phys_blk
= emu_find_memblk_by_nid(i
, pi
);
117 node_clear(i
, physnode_mask
);
120 start
= pi
->blk
[phys_blk
].start
;
121 limit
= pi
->blk
[phys_blk
].end
;
125 end
+= FAKE_NODE_MIN_SIZE
;
128 * Continue to add memory to this fake node if its
129 * non-reserved memory is less than the per-node size.
132 memblock_x86_hole_size(start
, end
) < size
) {
133 end
+= FAKE_NODE_MIN_SIZE
;
141 * If there won't be at least FAKE_NODE_MIN_SIZE of
142 * non-reserved memory in ZONE_DMA32 for the next node,
143 * this one must extend to the boundary.
145 if (end
< dma32_end
&& dma32_end
- end
-
146 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
150 * If there won't be enough non-reserved memory for the
151 * next node, this one must extend to the end of the
155 memblock_x86_hole_size(end
, limit
) < size
)
158 ret
= emu_setup_memblk(ei
, pi
, nid
++ % nr_nodes
,
160 min(end
, limit
) - start
);
169 * Returns the end address of a node so that there is at least `size' amount of
170 * non-reserved memory or `max_addr' is reached.
172 static u64 __init
find_end_of_node(u64 start
, u64 max_addr
, u64 size
)
174 u64 end
= start
+ size
;
176 while (end
- start
- memblock_x86_hole_size(start
, end
) < size
) {
177 end
+= FAKE_NODE_MIN_SIZE
;
178 if (end
> max_addr
) {
187 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
188 * `addr' to `max_addr'. The return value is the number of nodes allocated.
190 static int __init
split_nodes_size_interleave(struct numa_meminfo
*ei
,
191 struct numa_meminfo
*pi
,
192 u64 addr
, u64 max_addr
, u64 size
)
194 nodemask_t physnode_mask
= NODE_MASK_NONE
;
202 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
203 * increased accordingly if the requested size is too small. This
204 * creates a uniform distribution of node sizes across the entire
205 * machine (but not necessarily over physical nodes).
207 min_size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) /
209 min_size
= max(min_size
, FAKE_NODE_MIN_SIZE
);
210 if ((min_size
& FAKE_NODE_MIN_HASH_MASK
) < min_size
)
211 min_size
= (min_size
+ FAKE_NODE_MIN_SIZE
) &
212 FAKE_NODE_MIN_HASH_MASK
;
213 if (size
< min_size
) {
214 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
215 size
>> 20, min_size
>> 20);
218 size
&= FAKE_NODE_MIN_HASH_MASK
;
220 for (i
= 0; i
< pi
->nr_blks
; i
++)
221 node_set(pi
->blk
[i
].nid
, physnode_mask
);
224 * Fill physical nodes with fake nodes of size until there is no memory
225 * left on any of them.
227 while (nodes_weight(physnode_mask
)) {
228 for_each_node_mask(i
, physnode_mask
) {
229 u64 dma32_end
= MAX_DMA32_PFN
<< PAGE_SHIFT
;
230 u64 start
, limit
, end
;
233 phys_blk
= emu_find_memblk_by_nid(i
, pi
);
235 node_clear(i
, physnode_mask
);
238 start
= pi
->blk
[phys_blk
].start
;
239 limit
= pi
->blk
[phys_blk
].end
;
241 end
= find_end_of_node(start
, limit
, size
);
243 * If there won't be at least FAKE_NODE_MIN_SIZE of
244 * non-reserved memory in ZONE_DMA32 for the next node,
245 * this one must extend to the boundary.
247 if (end
< dma32_end
&& dma32_end
- end
-
248 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
252 * If there won't be enough non-reserved memory for the
253 * next node, this one must extend to the end of the
257 memblock_x86_hole_size(end
, limit
) < size
)
260 ret
= emu_setup_memblk(ei
, pi
, nid
++ % MAX_NUMNODES
,
262 min(end
, limit
) - start
);
271 * numa_emulation - Emulate NUMA nodes
272 * @numa_meminfo: NUMA configuration to massage
273 * @numa_dist_cnt: The size of the physical NUMA distance table
275 * Emulate NUMA nodes according to the numa=fake kernel parameter.
276 * @numa_meminfo contains the physical memory configuration and is modified
277 * to reflect the emulated configuration on success. @numa_dist_cnt is
278 * used to determine the size of the physical distance table.
280 * On success, the following modifications are made.
282 * - @numa_meminfo is updated to reflect the emulated nodes.
284 * - __apicid_to_node[] is updated such that APIC IDs are mapped to the
287 * - NUMA distance table is rebuilt to represent distances between emulated
288 * nodes. The distances are determined considering how emulated nodes
289 * are mapped to physical nodes and match the actual distances.
291 * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical
292 * nodes. This is used by numa_add_cpu() and numa_remove_cpu().
294 * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with
295 * identity mapping and no other modification is made.
297 void __init
numa_emulation(struct numa_meminfo
*numa_meminfo
, int numa_dist_cnt
)
299 static struct numa_meminfo ei __initdata
;
300 static struct numa_meminfo pi __initdata
;
301 const u64 max_addr
= max_pfn
<< PAGE_SHIFT
;
302 u8
*phys_dist
= NULL
;
303 size_t phys_size
= numa_dist_cnt
* numa_dist_cnt
* sizeof(phys_dist
[0]);
304 int max_emu_nid
, dfl_phys_nid
;
310 memset(&ei
, 0, sizeof(ei
));
313 for (i
= 0; i
< MAX_NUMNODES
; i
++)
314 emu_nid_to_phys
[i
] = NUMA_NO_NODE
;
317 * If the numa=fake command-line contains a 'M' or 'G', it represents
318 * the fixed node size. Otherwise, if it is just a single number N,
319 * split the system RAM into N fake nodes.
321 if (strchr(emu_cmdline
, 'M') || strchr(emu_cmdline
, 'G')) {
324 size
= memparse(emu_cmdline
, &emu_cmdline
);
325 ret
= split_nodes_size_interleave(&ei
, &pi
, 0, max_addr
, size
);
329 n
= simple_strtoul(emu_cmdline
, NULL
, 0);
330 ret
= split_nodes_interleave(&ei
, &pi
, 0, max_addr
, n
);
336 if (numa_cleanup_meminfo(&ei
) < 0) {
337 pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n");
341 /* copy the physical distance table */
345 phys
= memblock_find_in_range(0,
346 (u64
)max_pfn_mapped
<< PAGE_SHIFT
,
347 phys_size
, PAGE_SIZE
);
348 if (phys
== MEMBLOCK_ERROR
) {
349 pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
352 memblock_x86_reserve_range(phys
, phys
+ phys_size
, "TMP NUMA DIST");
353 phys_dist
= __va(phys
);
355 for (i
= 0; i
< numa_dist_cnt
; i
++)
356 for (j
= 0; j
< numa_dist_cnt
; j
++)
357 phys_dist
[i
* numa_dist_cnt
+ j
] =
362 * Determine the max emulated nid and the default phys nid to use
363 * for unmapped nodes.
366 dfl_phys_nid
= NUMA_NO_NODE
;
367 for (i
= 0; i
< ARRAY_SIZE(emu_nid_to_phys
); i
++) {
368 if (emu_nid_to_phys
[i
] != NUMA_NO_NODE
) {
370 if (dfl_phys_nid
== NUMA_NO_NODE
)
371 dfl_phys_nid
= emu_nid_to_phys
[i
];
374 if (dfl_phys_nid
== NUMA_NO_NODE
) {
375 pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n");
383 * Transform __apicid_to_node table to use emulated nids by
384 * reverse-mapping phys_nid. The maps should always exist but fall
385 * back to zero just in case.
387 for (i
= 0; i
< ARRAY_SIZE(__apicid_to_node
); i
++) {
388 if (__apicid_to_node
[i
] == NUMA_NO_NODE
)
390 for (j
= 0; j
< ARRAY_SIZE(emu_nid_to_phys
); j
++)
391 if (__apicid_to_node
[i
] == emu_nid_to_phys
[j
])
393 __apicid_to_node
[i
] = j
< ARRAY_SIZE(emu_nid_to_phys
) ? j
: 0;
396 /* make sure all emulated nodes are mapped to a physical node */
397 for (i
= 0; i
< ARRAY_SIZE(emu_nid_to_phys
); i
++)
398 if (emu_nid_to_phys
[i
] == NUMA_NO_NODE
)
399 emu_nid_to_phys
[i
] = dfl_phys_nid
;
401 /* transform distance table */
402 numa_reset_distance();
403 for (i
= 0; i
< max_emu_nid
+ 1; i
++) {
404 for (j
= 0; j
< max_emu_nid
+ 1; j
++) {
405 int physi
= emu_nid_to_phys
[i
];
406 int physj
= emu_nid_to_phys
[j
];
409 if (physi
>= numa_dist_cnt
|| physj
>= numa_dist_cnt
)
410 dist
= physi
== physj
?
411 LOCAL_DISTANCE
: REMOTE_DISTANCE
;
413 dist
= phys_dist
[physi
* numa_dist_cnt
+ physj
];
415 numa_set_distance(i
, j
, dist
);
419 /* free the copied physical distance table */
421 memblock_x86_free_range(__pa(phys_dist
), __pa(phys_dist
) + phys_size
);
425 /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */
426 for (i
= 0; i
< ARRAY_SIZE(emu_nid_to_phys
); i
++)
427 emu_nid_to_phys
[i
] = i
;
430 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
431 void __cpuinit
numa_add_cpu(int cpu
)
435 nid
= early_cpu_to_node(cpu
);
436 BUG_ON(nid
== NUMA_NO_NODE
|| !node_online(nid
));
438 physnid
= emu_nid_to_phys
[nid
];
441 * Map the cpu to each emulated node that is allocated on the physical
442 * node of the cpu's apic id.
444 for_each_online_node(nid
)
445 if (emu_nid_to_phys
[nid
] == physnid
)
446 cpumask_set_cpu(cpu
, node_to_cpumask_map
[nid
]);
449 void __cpuinit
numa_remove_cpu(int cpu
)
453 for_each_online_node(i
)
454 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[i
]);
456 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
457 static void __cpuinit
numa_set_cpumask(int cpu
, bool enable
)
461 nid
= early_cpu_to_node(cpu
);
462 if (nid
== NUMA_NO_NODE
) {
463 /* early_cpu_to_node() already emits a warning and trace */
467 physnid
= emu_nid_to_phys
[nid
];
469 for_each_online_node(nid
) {
470 if (emu_nid_to_phys
[nid
] != physnid
)
473 debug_cpumask_set_cpu(cpu
, nid
, enable
);
477 void __cpuinit
numa_add_cpu(int cpu
)
479 numa_set_cpumask(cpu
, true);
482 void __cpuinit
numa_remove_cpu(int cpu
)
484 numa_set_cpumask(cpu
, false);
486 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */