page_poison: play nicely with KASAN
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / mm / nobootmem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bootmem - A boot-time physical memory allocator and configurator
4 *
5 * Copyright (C) 1999 Ingo Molnar
6 * 1999 Kanoj Sarcar, SGI
7 * 2008 Johannes Weiner
8 *
9 * Access to this subsystem has to be serialized externally (which is true
10 * for the boot process anyway).
11 */
12 #include <linux/init.h>
13 #include <linux/pfn.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
19 #include <linux/bootmem.h>
20
21 #include <asm/bug.h>
22 #include <asm/io.h>
23
24 #include "internal.h"
25
26 #ifndef CONFIG_HAVE_MEMBLOCK
27 #error CONFIG_HAVE_MEMBLOCK not defined
28 #endif
29
30 #ifndef CONFIG_NEED_MULTIPLE_NODES
31 struct pglist_data __refdata contig_page_data;
32 EXPORT_SYMBOL(contig_page_data);
33 #endif
34
35 unsigned long max_low_pfn;
36 unsigned long min_low_pfn;
37 unsigned long max_pfn;
38 unsigned long long max_possible_pfn;
39
40 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
41 u64 goal, u64 limit)
42 {
43 void *ptr;
44 u64 addr;
45 ulong flags = choose_memblock_flags();
46
47 if (limit > memblock.current_limit)
48 limit = memblock.current_limit;
49
50 again:
51 addr = memblock_find_in_range_node(size, align, goal, limit, nid,
52 flags);
53 if (!addr && (flags & MEMBLOCK_MIRROR)) {
54 flags &= ~MEMBLOCK_MIRROR;
55 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
56 &size);
57 goto again;
58 }
59 if (!addr)
60 return NULL;
61
62 if (memblock_reserve(addr, size))
63 return NULL;
64
65 ptr = phys_to_virt(addr);
66 memset(ptr, 0, size);
67 /*
68 * The min_count is set to 0 so that bootmem allocated blocks
69 * are never reported as leaks.
70 */
71 kmemleak_alloc(ptr, size, 0, 0);
72 return ptr;
73 }
74
75 /*
76 * free_bootmem_late - free bootmem pages directly to page allocator
77 * @addr: starting address of the range
78 * @size: size of the range in bytes
79 *
80 * This is only useful when the bootmem allocator has already been torn
81 * down, but we are still initializing the system. Pages are given directly
82 * to the page allocator, no bootmem metadata is updated because it is gone.
83 */
84 void __init free_bootmem_late(unsigned long addr, unsigned long size)
85 {
86 unsigned long cursor, end;
87
88 kmemleak_free_part_phys(addr, size);
89
90 cursor = PFN_UP(addr);
91 end = PFN_DOWN(addr + size);
92
93 for (; cursor < end; cursor++) {
94 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
95 totalram_pages++;
96 }
97 }
98
99 static void __init __free_pages_memory(unsigned long start, unsigned long end)
100 {
101 int order;
102
103 while (start < end) {
104 order = min(MAX_ORDER - 1UL, __ffs(start));
105
106 while (start + (1UL << order) > end)
107 order--;
108
109 __free_pages_bootmem(pfn_to_page(start), start, order);
110
111 start += (1UL << order);
112 }
113 }
114
115 static unsigned long __init __free_memory_core(phys_addr_t start,
116 phys_addr_t end)
117 {
118 unsigned long start_pfn = PFN_UP(start);
119 unsigned long end_pfn = min_t(unsigned long,
120 PFN_DOWN(end), max_low_pfn);
121
122 if (start_pfn >= end_pfn)
123 return 0;
124
125 __free_pages_memory(start_pfn, end_pfn);
126
127 return end_pfn - start_pfn;
128 }
129
130 static unsigned long __init free_low_memory_core_early(void)
131 {
132 unsigned long count = 0;
133 phys_addr_t start, end;
134 u64 i;
135
136 memblock_clear_hotplug(0, -1);
137
138 for_each_reserved_mem_region(i, &start, &end)
139 reserve_bootmem_region(start, end);
140
141 /*
142 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
143 * because in some case like Node0 doesn't have RAM installed
144 * low ram will be on Node1
145 */
146 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
147 NULL)
148 count += __free_memory_core(start, end);
149
150 return count;
151 }
152
153 static int reset_managed_pages_done __initdata;
154
155 void reset_node_managed_pages(pg_data_t *pgdat)
156 {
157 struct zone *z;
158
159 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
160 z->managed_pages = 0;
161 }
162
163 void __init reset_all_zones_managed_pages(void)
164 {
165 struct pglist_data *pgdat;
166
167 if (reset_managed_pages_done)
168 return;
169
170 for_each_online_pgdat(pgdat)
171 reset_node_managed_pages(pgdat);
172
173 reset_managed_pages_done = 1;
174 }
175
176 /**
177 * free_all_bootmem - release free pages to the buddy allocator
178 *
179 * Returns the number of pages actually released.
180 */
181 unsigned long __init free_all_bootmem(void)
182 {
183 unsigned long pages;
184
185 reset_all_zones_managed_pages();
186
187 pages = free_low_memory_core_early();
188 totalram_pages += pages;
189
190 return pages;
191 }
192
193 /**
194 * free_bootmem_node - mark a page range as usable
195 * @pgdat: node the range resides on
196 * @physaddr: starting address of the range
197 * @size: size of the range in bytes
198 *
199 * Partial pages will be considered reserved and left as they are.
200 *
201 * The range must reside completely on the specified node.
202 */
203 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
204 unsigned long size)
205 {
206 memblock_free(physaddr, size);
207 }
208
209 /**
210 * free_bootmem - mark a page range as usable
211 * @addr: starting address of the range
212 * @size: size of the range in bytes
213 *
214 * Partial pages will be considered reserved and left as they are.
215 *
216 * The range must be contiguous but may span node boundaries.
217 */
218 void __init free_bootmem(unsigned long addr, unsigned long size)
219 {
220 memblock_free(addr, size);
221 }
222
223 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
224 unsigned long align,
225 unsigned long goal,
226 unsigned long limit)
227 {
228 void *ptr;
229
230 if (WARN_ON_ONCE(slab_is_available()))
231 return kzalloc(size, GFP_NOWAIT);
232
233 restart:
234
235 ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
236
237 if (ptr)
238 return ptr;
239
240 if (goal != 0) {
241 goal = 0;
242 goto restart;
243 }
244
245 return NULL;
246 }
247
248 /**
249 * __alloc_bootmem_nopanic - allocate boot memory without panicking
250 * @size: size of the request in bytes
251 * @align: alignment of the region
252 * @goal: preferred starting address of the region
253 *
254 * The goal is dropped if it can not be satisfied and the allocation will
255 * fall back to memory below @goal.
256 *
257 * Allocation may happen on any node in the system.
258 *
259 * Returns NULL on failure.
260 */
261 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
262 unsigned long goal)
263 {
264 unsigned long limit = -1UL;
265
266 return ___alloc_bootmem_nopanic(size, align, goal, limit);
267 }
268
269 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
270 unsigned long goal, unsigned long limit)
271 {
272 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
273
274 if (mem)
275 return mem;
276 /*
277 * Whoops, we cannot satisfy the allocation request.
278 */
279 pr_alert("bootmem alloc of %lu bytes failed!\n", size);
280 panic("Out of memory");
281 return NULL;
282 }
283
284 /**
285 * __alloc_bootmem - allocate boot memory
286 * @size: size of the request in bytes
287 * @align: alignment of the region
288 * @goal: preferred starting address of the region
289 *
290 * The goal is dropped if it can not be satisfied and the allocation will
291 * fall back to memory below @goal.
292 *
293 * Allocation may happen on any node in the system.
294 *
295 * The function panics if the request can not be satisfied.
296 */
297 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
298 unsigned long goal)
299 {
300 unsigned long limit = -1UL;
301
302 return ___alloc_bootmem(size, align, goal, limit);
303 }
304
305 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
306 unsigned long size,
307 unsigned long align,
308 unsigned long goal,
309 unsigned long limit)
310 {
311 void *ptr;
312
313 again:
314 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
315 goal, limit);
316 if (ptr)
317 return ptr;
318
319 ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
320 goal, limit);
321 if (ptr)
322 return ptr;
323
324 if (goal) {
325 goal = 0;
326 goto again;
327 }
328
329 return NULL;
330 }
331
332 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
333 unsigned long align, unsigned long goal)
334 {
335 if (WARN_ON_ONCE(slab_is_available()))
336 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
337
338 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
339 }
340
341 static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
342 unsigned long align, unsigned long goal,
343 unsigned long limit)
344 {
345 void *ptr;
346
347 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
348 if (ptr)
349 return ptr;
350
351 pr_alert("bootmem alloc of %lu bytes failed!\n", size);
352 panic("Out of memory");
353 return NULL;
354 }
355
356 /**
357 * __alloc_bootmem_node - allocate boot memory from a specific node
358 * @pgdat: node to allocate from
359 * @size: size of the request in bytes
360 * @align: alignment of the region
361 * @goal: preferred starting address of the region
362 *
363 * The goal is dropped if it can not be satisfied and the allocation will
364 * fall back to memory below @goal.
365 *
366 * Allocation may fall back to any node in the system if the specified node
367 * can not hold the requested memory.
368 *
369 * The function panics if the request can not be satisfied.
370 */
371 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
372 unsigned long align, unsigned long goal)
373 {
374 if (WARN_ON_ONCE(slab_is_available()))
375 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
376
377 return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
378 }
379
380 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
381 unsigned long align, unsigned long goal)
382 {
383 return __alloc_bootmem_node(pgdat, size, align, goal);
384 }
385
386
387 /**
388 * __alloc_bootmem_low - allocate low boot memory
389 * @size: size of the request in bytes
390 * @align: alignment of the region
391 * @goal: preferred starting address of the region
392 *
393 * The goal is dropped if it can not be satisfied and the allocation will
394 * fall back to memory below @goal.
395 *
396 * Allocation may happen on any node in the system.
397 *
398 * The function panics if the request can not be satisfied.
399 */
400 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
401 unsigned long goal)
402 {
403 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
404 }
405
406 void * __init __alloc_bootmem_low_nopanic(unsigned long size,
407 unsigned long align,
408 unsigned long goal)
409 {
410 return ___alloc_bootmem_nopanic(size, align, goal,
411 ARCH_LOW_ADDRESS_LIMIT);
412 }
413
414 /**
415 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
416 * @pgdat: node to allocate from
417 * @size: size of the request in bytes
418 * @align: alignment of the region
419 * @goal: preferred starting address of the region
420 *
421 * The goal is dropped if it can not be satisfied and the allocation will
422 * fall back to memory below @goal.
423 *
424 * Allocation may fall back to any node in the system if the specified node
425 * can not hold the requested memory.
426 *
427 * The function panics if the request can not be satisfied.
428 */
429 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
430 unsigned long align, unsigned long goal)
431 {
432 if (WARN_ON_ONCE(slab_is_available()))
433 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
434
435 return ___alloc_bootmem_node(pgdat, size, align, goal,
436 ARCH_LOW_ADDRESS_LIMIT);
437 }