memblock: Kill sentinel entries at the end of static region arrays
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / mm / memblock.c
CommitLineData
95f72d1e
YL
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
142b45a7 14#include <linux/slab.h>
95f72d1e
YL
15#include <linux/init.h>
16#include <linux/bitops.h>
449e8df3 17#include <linux/poison.h>
c196f76f 18#include <linux/pfn.h>
6d03b885
BH
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
95f72d1e
YL
21#include <linux/memblock.h>
22
10d06439 23struct memblock memblock __initdata_memblock;
95f72d1e 24
10d06439
YL
25int memblock_debug __initdata_memblock;
26int memblock_can_resize __initdata_memblock;
c5a1cb28
TH
27static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
28static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
95f72d1e 29
142b45a7
BH
30/* inline so we don't get a warning when pr_debug is compiled out */
31static inline const char *memblock_type_name(struct memblock_type *type)
32{
33 if (type == &memblock.memory)
34 return "memory";
35 else if (type == &memblock.reserved)
36 return "reserved";
37 else
38 return "unknown";
39}
40
6ed311b2
BH
41/*
42 * Address comparison utilities
43 */
10d06439 44static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
2898cc4c 45 phys_addr_t base2, phys_addr_t size2)
95f72d1e
YL
46{
47 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
48}
49
2d7d3eb2
HS
50static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
51 phys_addr_t base, phys_addr_t size)
6ed311b2
BH
52{
53 unsigned long i;
54
55 for (i = 0; i < type->cnt; i++) {
56 phys_addr_t rgnbase = type->regions[i].base;
57 phys_addr_t rgnsize = type->regions[i].size;
58 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
59 break;
60 }
61
62 return (i < type->cnt) ? i : -1;
63}
64
65/*
66 * Find, allocate, deallocate or reserve unreserved regions. All allocations
67 * are top-down.
68 */
69
cd79481d 70static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
6ed311b2
BH
71 phys_addr_t size, phys_addr_t align)
72{
73 phys_addr_t base, res_base;
74 long j;
75
f1af98c7
YL
76 /* In case, huge size is requested */
77 if (end < size)
1f5026a7 78 return 0;
f1af98c7 79
348968eb 80 base = round_down(end - size, align);
f1af98c7 81
25818f0f
BH
82 /* Prevent allocations returning 0 as it's also used to
83 * indicate an allocation failure
84 */
85 if (start == 0)
86 start = PAGE_SIZE;
87
6ed311b2
BH
88 while (start <= base) {
89 j = memblock_overlaps_region(&memblock.reserved, base, size);
90 if (j < 0)
91 return base;
92 res_base = memblock.reserved.regions[j].base;
93 if (res_base < size)
94 break;
348968eb 95 base = round_down(res_base - size, align);
6ed311b2
BH
96 }
97
1f5026a7 98 return 0;
6ed311b2
BH
99}
100
fc769a8e
TH
101/*
102 * Find a free area with specified alignment in a specific range.
103 */
104phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t end,
105 phys_addr_t size, phys_addr_t align)
6ed311b2
BH
106{
107 long i;
6ed311b2
BH
108
109 BUG_ON(0 == size);
110
6ed311b2 111 /* Pump up max_addr */
fef501d4
BH
112 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
113 end = memblock.current_limit;
6ed311b2
BH
114
115 /* We do a top-down search, this tends to limit memory
116 * fragmentation by keeping early boot allocs near the
117 * top of memory
118 */
119 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
120 phys_addr_t memblockbase = memblock.memory.regions[i].base;
121 phys_addr_t memblocksize = memblock.memory.regions[i].size;
fef501d4 122 phys_addr_t bottom, top, found;
6ed311b2
BH
123
124 if (memblocksize < size)
125 continue;
fef501d4
BH
126 if ((memblockbase + memblocksize) <= start)
127 break;
128 bottom = max(memblockbase, start);
129 top = min(memblockbase + memblocksize, end);
130 if (bottom >= top)
131 continue;
132 found = memblock_find_region(bottom, top, size, align);
1f5026a7 133 if (found)
fef501d4 134 return found;
6ed311b2 135 }
1f5026a7 136 return 0;
6ed311b2
BH
137}
138
7950c407
YL
139/*
140 * Free memblock.reserved.regions
141 */
142int __init_memblock memblock_free_reserved_regions(void)
143{
144 if (memblock.reserved.regions == memblock_reserved_init_regions)
145 return 0;
146
147 return memblock_free(__pa(memblock.reserved.regions),
148 sizeof(struct memblock_region) * memblock.reserved.max);
149}
150
151/*
152 * Reserve memblock.reserved.regions
153 */
154int __init_memblock memblock_reserve_reserved_regions(void)
155{
156 if (memblock.reserved.regions == memblock_reserved_init_regions)
157 return 0;
158
159 return memblock_reserve(__pa(memblock.reserved.regions),
160 sizeof(struct memblock_region) * memblock.reserved.max);
161}
162
10d06439 163static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
95f72d1e 164{
7c0caeb8
TH
165 memmove(&type->regions[r], &type->regions[r + 1],
166 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
e3239ff9 167 type->cnt--;
95f72d1e 168
8f7a6605
BH
169 /* Special case for empty arrays */
170 if (type->cnt == 0) {
171 type->cnt = 1;
172 type->regions[0].base = 0;
173 type->regions[0].size = 0;
7c0caeb8 174 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
8f7a6605 175 }
95f72d1e
YL
176}
177
10d06439 178static int __init_memblock memblock_double_array(struct memblock_type *type)
142b45a7
BH
179{
180 struct memblock_region *new_array, *old_array;
181 phys_addr_t old_size, new_size, addr;
182 int use_slab = slab_is_available();
183
184 /* We don't allow resizing until we know about the reserved regions
185 * of memory that aren't suitable for allocation
186 */
187 if (!memblock_can_resize)
188 return -1;
189
142b45a7
BH
190 /* Calculate new doubled size */
191 old_size = type->max * sizeof(struct memblock_region);
192 new_size = old_size << 1;
193
194 /* Try to find some space for it.
195 *
196 * WARNING: We assume that either slab_is_available() and we use it or
197 * we use MEMBLOCK for allocations. That means that this is unsafe to use
198 * when bootmem is currently active (unless bootmem itself is implemented
199 * on top of MEMBLOCK which isn't the case yet)
200 *
201 * This should however not be an issue for now, as we currently only
202 * call into MEMBLOCK while it's still active, or much later when slab is
203 * active for memory hotplug operations
204 */
205 if (use_slab) {
206 new_array = kmalloc(new_size, GFP_KERNEL);
1f5026a7 207 addr = new_array ? __pa(new_array) : 0;
142b45a7 208 } else
fc769a8e 209 addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
1f5026a7 210 if (!addr) {
142b45a7
BH
211 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
212 memblock_type_name(type), type->max, type->max * 2);
213 return -1;
214 }
215 new_array = __va(addr);
216
ea9e4376
YL
217 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
218 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
219
142b45a7
BH
220 /* Found space, we now need to move the array over before
221 * we add the reserved region since it may be our reserved
222 * array itself that is full.
223 */
224 memcpy(new_array, type->regions, old_size);
225 memset(new_array + type->max, 0, old_size);
226 old_array = type->regions;
227 type->regions = new_array;
228 type->max <<= 1;
229
230 /* If we use SLAB that's it, we are done */
231 if (use_slab)
232 return 0;
233
234 /* Add the new reserved region now. Should not fail ! */
9c8c27e2 235 BUG_ON(memblock_reserve(addr, new_size));
142b45a7
BH
236
237 /* If the array wasn't our static init one, then free it. We only do
238 * that before SLAB is available as later on, we don't know whether
239 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
240 * anyways
241 */
242 if (old_array != memblock_memory_init_regions &&
243 old_array != memblock_reserved_init_regions)
244 memblock_free(__pa(old_array), old_size);
245
246 return 0;
247}
248
784656f9
TH
249/**
250 * memblock_merge_regions - merge neighboring compatible regions
251 * @type: memblock type to scan
252 *
253 * Scan @type and merge neighboring compatible regions.
254 */
255static void __init_memblock memblock_merge_regions(struct memblock_type *type)
95f72d1e 256{
784656f9 257 int i = 0;
95f72d1e 258
784656f9
TH
259 /* cnt never goes below 1 */
260 while (i < type->cnt - 1) {
261 struct memblock_region *this = &type->regions[i];
262 struct memblock_region *next = &type->regions[i + 1];
95f72d1e 263
7c0caeb8
TH
264 if (this->base + this->size != next->base ||
265 memblock_get_region_node(this) !=
266 memblock_get_region_node(next)) {
784656f9
TH
267 BUG_ON(this->base + this->size > next->base);
268 i++;
269 continue;
8f7a6605
BH
270 }
271
784656f9
TH
272 this->size += next->size;
273 memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
274 type->cnt--;
95f72d1e 275 }
784656f9 276}
95f72d1e 277
784656f9
TH
278/**
279 * memblock_insert_region - insert new memblock region
280 * @type: memblock type to insert into
281 * @idx: index for the insertion point
282 * @base: base address of the new region
283 * @size: size of the new region
284 *
285 * Insert new memblock region [@base,@base+@size) into @type at @idx.
286 * @type must already have extra room to accomodate the new region.
287 */
288static void __init_memblock memblock_insert_region(struct memblock_type *type,
289 int idx, phys_addr_t base,
7c0caeb8 290 phys_addr_t size, int nid)
784656f9
TH
291{
292 struct memblock_region *rgn = &type->regions[idx];
293
294 BUG_ON(type->cnt >= type->max);
295 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
296 rgn->base = base;
297 rgn->size = size;
7c0caeb8 298 memblock_set_region_node(rgn, nid);
784656f9
TH
299 type->cnt++;
300}
301
302/**
303 * memblock_add_region - add new memblock region
304 * @type: memblock type to add new region into
305 * @base: base address of the new region
306 * @size: size of the new region
307 *
308 * Add new memblock region [@base,@base+@size) into @type. The new region
309 * is allowed to overlap with existing ones - overlaps don't affect already
310 * existing regions. @type is guaranteed to be minimal (all neighbouring
311 * compatible regions are merged) after the addition.
312 *
313 * RETURNS:
314 * 0 on success, -errno on failure.
315 */
581adcbe
TH
316static int __init_memblock memblock_add_region(struct memblock_type *type,
317 phys_addr_t base, phys_addr_t size)
784656f9
TH
318{
319 bool insert = false;
320 phys_addr_t obase = base, end = base + size;
321 int i, nr_new;
322
323 /* special case for empty array */
324 if (type->regions[0].size == 0) {
325 WARN_ON(type->cnt != 1);
8f7a6605
BH
326 type->regions[0].base = base;
327 type->regions[0].size = size;
7c0caeb8 328 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
8f7a6605 329 return 0;
95f72d1e 330 }
784656f9
TH
331repeat:
332 /*
333 * The following is executed twice. Once with %false @insert and
334 * then with %true. The first counts the number of regions needed
335 * to accomodate the new area. The second actually inserts them.
142b45a7 336 */
784656f9
TH
337 base = obase;
338 nr_new = 0;
95f72d1e 339
784656f9
TH
340 for (i = 0; i < type->cnt; i++) {
341 struct memblock_region *rgn = &type->regions[i];
342 phys_addr_t rbase = rgn->base;
343 phys_addr_t rend = rbase + rgn->size;
344
345 if (rbase >= end)
95f72d1e 346 break;
784656f9
TH
347 if (rend <= base)
348 continue;
349 /*
350 * @rgn overlaps. If it separates the lower part of new
351 * area, insert that portion.
352 */
353 if (rbase > base) {
354 nr_new++;
355 if (insert)
356 memblock_insert_region(type, i++, base,
7c0caeb8 357 rbase - base, MAX_NUMNODES);
95f72d1e 358 }
784656f9
TH
359 /* area below @rend is dealt with, forget about it */
360 base = min(rend, end);
95f72d1e 361 }
784656f9
TH
362
363 /* insert the remaining portion */
364 if (base < end) {
365 nr_new++;
366 if (insert)
7c0caeb8
TH
367 memblock_insert_region(type, i, base, end - base,
368 MAX_NUMNODES);
95f72d1e 369 }
95f72d1e 370
784656f9
TH
371 /*
372 * If this was the first round, resize array and repeat for actual
373 * insertions; otherwise, merge and return.
142b45a7 374 */
784656f9
TH
375 if (!insert) {
376 while (type->cnt + nr_new > type->max)
377 if (memblock_double_array(type) < 0)
378 return -ENOMEM;
379 insert = true;
380 goto repeat;
381 } else {
382 memblock_merge_regions(type);
383 return 0;
142b45a7 384 }
95f72d1e
YL
385}
386
581adcbe 387int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
95f72d1e 388{
e3239ff9 389 return memblock_add_region(&memblock.memory, base, size);
95f72d1e
YL
390}
391
581adcbe
TH
392static int __init_memblock __memblock_remove(struct memblock_type *type,
393 phys_addr_t base, phys_addr_t size)
95f72d1e 394{
2898cc4c 395 phys_addr_t end = base + size;
95f72d1e
YL
396 int i;
397
8f7a6605
BH
398 /* Walk through the array for collisions */
399 for (i = 0; i < type->cnt; i++) {
400 struct memblock_region *rgn = &type->regions[i];
401 phys_addr_t rend = rgn->base + rgn->size;
95f72d1e 402
8f7a6605
BH
403 /* Nothing more to do, exit */
404 if (rgn->base > end || rgn->size == 0)
95f72d1e 405 break;
95f72d1e 406
8f7a6605
BH
407 /* If we fully enclose the block, drop it */
408 if (base <= rgn->base && end >= rend) {
409 memblock_remove_region(type, i--);
410 continue;
411 }
95f72d1e 412
8f7a6605
BH
413 /* If we are fully enclosed within a block
414 * then we need to split it and we are done
415 */
416 if (base > rgn->base && end < rend) {
417 rgn->size = base - rgn->base;
418 if (!memblock_add_region(type, end, rend - end))
419 return 0;
420 /* Failure to split is bad, we at least
421 * restore the block before erroring
422 */
423 rgn->size = rend - rgn->base;
424 WARN_ON(1);
425 return -1;
426 }
95f72d1e 427
8f7a6605
BH
428 /* Check if we need to trim the bottom of a block */
429 if (rgn->base < end && rend > end) {
430 rgn->size -= end - rgn->base;
431 rgn->base = end;
432 break;
433 }
95f72d1e 434
8f7a6605
BH
435 /* And check if we need to trim the top of a block */
436 if (base < rend)
437 rgn->size -= rend - base;
95f72d1e 438
8f7a6605
BH
439 }
440 return 0;
95f72d1e
YL
441}
442
581adcbe 443int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
95f72d1e
YL
444{
445 return __memblock_remove(&memblock.memory, base, size);
446}
447
581adcbe 448int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
95f72d1e 449{
24aa0788 450 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
a150439c
PA
451 (unsigned long long)base,
452 (unsigned long long)base + size,
453 (void *)_RET_IP_);
24aa0788 454
95f72d1e
YL
455 return __memblock_remove(&memblock.reserved, base, size);
456}
457
581adcbe 458int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
95f72d1e 459{
e3239ff9 460 struct memblock_type *_rgn = &memblock.reserved;
95f72d1e 461
24aa0788 462 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
a150439c
PA
463 (unsigned long long)base,
464 (unsigned long long)base + size,
465 (void *)_RET_IP_);
95f72d1e
YL
466 BUG_ON(0 == size);
467
468 return memblock_add_region(_rgn, base, size);
469}
470
35fd0808
TH
471/**
472 * __next_free_mem_range - next function for for_each_free_mem_range()
473 * @idx: pointer to u64 loop variable
474 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
475 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
476 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
477 * @p_nid: ptr to int for nid of the range, can be %NULL
478 *
479 * Find the first free area from *@idx which matches @nid, fill the out
480 * parameters, and update *@idx for the next iteration. The lower 32bit of
481 * *@idx contains index into memory region and the upper 32bit indexes the
482 * areas before each reserved region. For example, if reserved regions
483 * look like the following,
484 *
485 * 0:[0-16), 1:[32-48), 2:[128-130)
486 *
487 * The upper 32bit indexes the following regions.
488 *
489 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
490 *
491 * As both region arrays are sorted, the function advances the two indices
492 * in lockstep and returns each intersection.
493 */
494void __init_memblock __next_free_mem_range(u64 *idx, int nid,
495 phys_addr_t *out_start,
496 phys_addr_t *out_end, int *out_nid)
497{
498 struct memblock_type *mem = &memblock.memory;
499 struct memblock_type *rsv = &memblock.reserved;
500 int mi = *idx & 0xffffffff;
501 int ri = *idx >> 32;
502
503 for ( ; mi < mem->cnt; mi++) {
504 struct memblock_region *m = &mem->regions[mi];
505 phys_addr_t m_start = m->base;
506 phys_addr_t m_end = m->base + m->size;
507
508 /* only memory regions are associated with nodes, check it */
509 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
510 continue;
511
512 /* scan areas before each reservation for intersection */
513 for ( ; ri < rsv->cnt + 1; ri++) {
514 struct memblock_region *r = &rsv->regions[ri];
515 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
516 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
517
518 /* if ri advanced past mi, break out to advance mi */
519 if (r_start >= m_end)
520 break;
521 /* if the two regions intersect, we're done */
522 if (m_start < r_end) {
523 if (out_start)
524 *out_start = max(m_start, r_start);
525 if (out_end)
526 *out_end = min(m_end, r_end);
527 if (out_nid)
528 *out_nid = memblock_get_region_node(m);
529 /*
530 * The region which ends first is advanced
531 * for the next iteration.
532 */
533 if (m_end <= r_end)
534 mi++;
535 else
536 ri++;
537 *idx = (u32)mi | (u64)ri << 32;
538 return;
539 }
540 }
541 }
542
543 /* signal end of iteration */
544 *idx = ULLONG_MAX;
545}
546
7c0caeb8
TH
547#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
548/*
549 * Common iterator interface used to define for_each_mem_range().
550 */
551void __init_memblock __next_mem_pfn_range(int *idx, int nid,
552 unsigned long *out_start_pfn,
553 unsigned long *out_end_pfn, int *out_nid)
554{
555 struct memblock_type *type = &memblock.memory;
556 struct memblock_region *r;
557
558 while (++*idx < type->cnt) {
559 r = &type->regions[*idx];
560
561 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
562 continue;
563 if (nid == MAX_NUMNODES || nid == r->nid)
564 break;
565 }
566 if (*idx >= type->cnt) {
567 *idx = -1;
568 return;
569 }
570
571 if (out_start_pfn)
572 *out_start_pfn = PFN_UP(r->base);
573 if (out_end_pfn)
574 *out_end_pfn = PFN_DOWN(r->base + r->size);
575 if (out_nid)
576 *out_nid = r->nid;
577}
578
579/**
580 * memblock_set_node - set node ID on memblock regions
581 * @base: base of area to set node ID for
582 * @size: size of area to set node ID for
583 * @nid: node ID to set
584 *
585 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
586 * Regions which cross the area boundaries are split as necessary.
587 *
588 * RETURNS:
589 * 0 on success, -errno on failure.
590 */
591int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
592 int nid)
593{
594 struct memblock_type *type = &memblock.memory;
595 phys_addr_t end = base + size;
596 int i;
597
598 /* we'll create at most two more regions */
599 while (type->cnt + 2 > type->max)
600 if (memblock_double_array(type) < 0)
601 return -ENOMEM;
602
603 for (i = 0; i < type->cnt; i++) {
604 struct memblock_region *rgn = &type->regions[i];
605 phys_addr_t rbase = rgn->base;
606 phys_addr_t rend = rbase + rgn->size;
607
608 if (rbase >= end)
609 break;
610 if (rend <= base)
611 continue;
612
613 if (rbase < base) {
614 /*
615 * @rgn intersects from below. Split and continue
616 * to process the next region - the new top half.
617 */
618 rgn->base = base;
619 rgn->size = rend - rgn->base;
620 memblock_insert_region(type, i, rbase, base - rbase,
621 rgn->nid);
622 } else if (rend > end) {
623 /*
624 * @rgn intersects from above. Split and redo the
625 * current region - the new bottom half.
626 */
627 rgn->base = end;
628 rgn->size = rend - rgn->base;
629 memblock_insert_region(type, i--, rbase, end - rbase,
630 rgn->nid);
631 } else {
632 /* @rgn is fully contained, set ->nid */
633 rgn->nid = nid;
634 }
635 }
636
637 memblock_merge_regions(type);
638 return 0;
639}
640#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
641
6ed311b2 642phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
95f72d1e 643{
6ed311b2 644 phys_addr_t found;
95f72d1e 645
6ed311b2
BH
646 /* We align the size to limit fragmentation. Without this, a lot of
647 * small allocs quickly eat up the whole reserve array on sparc
648 */
348968eb 649 size = round_up(size, align);
95f72d1e 650
fc769a8e 651 found = memblock_find_in_range(0, max_addr, size, align);
9c8c27e2 652 if (found && !memblock_reserve(found, size))
6ed311b2 653 return found;
95f72d1e 654
6ed311b2 655 return 0;
95f72d1e
YL
656}
657
6ed311b2 658phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
95f72d1e 659{
6ed311b2
BH
660 phys_addr_t alloc;
661
662 alloc = __memblock_alloc_base(size, align, max_addr);
663
664 if (alloc == 0)
665 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
666 (unsigned long long) size, (unsigned long long) max_addr);
667
668 return alloc;
95f72d1e
YL
669}
670
6ed311b2 671phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
95f72d1e 672{
6ed311b2
BH
673 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
674}
95f72d1e 675
95f72d1e 676
6ed311b2 677/*
34e18455 678 * Additional node-local top-down allocators.
c196f76f
BH
679 *
680 * WARNING: Only available after early_node_map[] has been populated,
681 * on some architectures, that is after all the calls to add_active_range()
682 * have been done to populate it.
6ed311b2 683 */
95f72d1e 684
34e18455
TH
685static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start,
686 phys_addr_t end, int *nid)
c3f72b57 687{
c196f76f 688#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
c196f76f
BH
689 unsigned long start_pfn, end_pfn;
690 int i;
691
b2fea988 692 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid)
34e18455
TH
693 if (end > PFN_PHYS(start_pfn) && end <= PFN_PHYS(end_pfn))
694 return max(start, PFN_PHYS(start_pfn));
c196f76f 695#endif
c3f72b57 696 *nid = 0;
34e18455 697 return start;
c3f72b57
BH
698}
699
e6498040
TH
700phys_addr_t __init memblock_find_in_range_node(phys_addr_t start,
701 phys_addr_t end,
2898cc4c
BH
702 phys_addr_t size,
703 phys_addr_t align, int nid)
95f72d1e 704{
e6498040
TH
705 struct memblock_type *mem = &memblock.memory;
706 int i;
95f72d1e 707
e6498040 708 BUG_ON(0 == size);
95f72d1e 709
e6498040
TH
710 /* Pump up max_addr */
711 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
712 end = memblock.current_limit;
95f72d1e 713
e6498040
TH
714 for (i = mem->cnt - 1; i >= 0; i--) {
715 struct memblock_region *r = &mem->regions[i];
716 phys_addr_t base = max(start, r->base);
717 phys_addr_t top = min(end, r->base + r->size);
718
719 while (base < top) {
720 phys_addr_t tbase, ret;
721 int tnid;
722
723 tbase = memblock_nid_range_rev(base, top, &tnid);
724 if (nid == MAX_NUMNODES || tnid == nid) {
725 ret = memblock_find_region(tbase, top, size, align);
726 if (ret)
727 return ret;
728 }
729 top = tbase;
95f72d1e 730 }
95f72d1e 731 }
e6498040 732
1f5026a7 733 return 0;
95f72d1e
YL
734}
735
2898cc4c 736phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
95f72d1e 737{
e6498040 738 phys_addr_t found;
95f72d1e 739
e6498040
TH
740 /*
741 * We align the size to limit fragmentation. Without this, a lot of
7f219c73
BH
742 * small allocs quickly eat up the whole reserve array on sparc
743 */
348968eb 744 size = round_up(size, align);
7f219c73 745
e6498040
TH
746 found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE,
747 size, align, nid);
9c8c27e2 748 if (found && !memblock_reserve(found, size))
e6498040 749 return found;
95f72d1e 750
9d1e2492
BH
751 return 0;
752}
753
754phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
755{
756 phys_addr_t res = memblock_alloc_nid(size, align, nid);
757
758 if (res)
759 return res;
15fb0972 760 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
95f72d1e
YL
761}
762
9d1e2492
BH
763
764/*
765 * Remaining API functions
766 */
767
95f72d1e 768/* You must call memblock_analyze() before this. */
2898cc4c 769phys_addr_t __init memblock_phys_mem_size(void)
95f72d1e 770{
4734b594 771 return memblock.memory_size;
95f72d1e
YL
772}
773
0a93ebef
SR
774/* lowest address */
775phys_addr_t __init_memblock memblock_start_of_DRAM(void)
776{
777 return memblock.memory.regions[0].base;
778}
779
10d06439 780phys_addr_t __init_memblock memblock_end_of_DRAM(void)
95f72d1e
YL
781{
782 int idx = memblock.memory.cnt - 1;
783
e3239ff9 784 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
95f72d1e
YL
785}
786
787/* You must call memblock_analyze() after this. */
2898cc4c 788void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
95f72d1e
YL
789{
790 unsigned long i;
2898cc4c 791 phys_addr_t limit;
e3239ff9 792 struct memblock_region *p;
95f72d1e
YL
793
794 if (!memory_limit)
795 return;
796
797 /* Truncate the memblock regions to satisfy the memory limit. */
798 limit = memory_limit;
799 for (i = 0; i < memblock.memory.cnt; i++) {
e3239ff9
BH
800 if (limit > memblock.memory.regions[i].size) {
801 limit -= memblock.memory.regions[i].size;
95f72d1e
YL
802 continue;
803 }
804
e3239ff9 805 memblock.memory.regions[i].size = limit;
95f72d1e
YL
806 memblock.memory.cnt = i + 1;
807 break;
808 }
809
95f72d1e
YL
810 memory_limit = memblock_end_of_DRAM();
811
812 /* And truncate any reserves above the limit also. */
813 for (i = 0; i < memblock.reserved.cnt; i++) {
e3239ff9 814 p = &memblock.reserved.regions[i];
95f72d1e
YL
815
816 if (p->base > memory_limit)
817 p->size = 0;
818 else if ((p->base + p->size) > memory_limit)
819 p->size = memory_limit - p->base;
820
821 if (p->size == 0) {
822 memblock_remove_region(&memblock.reserved, i);
823 i--;
824 }
825 }
826}
827
cd79481d 828static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
72d4b0b4
BH
829{
830 unsigned int left = 0, right = type->cnt;
831
832 do {
833 unsigned int mid = (right + left) / 2;
834
835 if (addr < type->regions[mid].base)
836 right = mid;
837 else if (addr >= (type->regions[mid].base +
838 type->regions[mid].size))
839 left = mid + 1;
840 else
841 return mid;
842 } while (left < right);
843 return -1;
844}
845
2898cc4c 846int __init memblock_is_reserved(phys_addr_t addr)
95f72d1e 847{
72d4b0b4
BH
848 return memblock_search(&memblock.reserved, addr) != -1;
849}
95f72d1e 850
3661ca66 851int __init_memblock memblock_is_memory(phys_addr_t addr)
72d4b0b4
BH
852{
853 return memblock_search(&memblock.memory, addr) != -1;
854}
855
3661ca66 856int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
72d4b0b4 857{
abb65272 858 int idx = memblock_search(&memblock.memory, base);
72d4b0b4
BH
859
860 if (idx == -1)
861 return 0;
abb65272
TV
862 return memblock.memory.regions[idx].base <= base &&
863 (memblock.memory.regions[idx].base +
864 memblock.memory.regions[idx].size) >= (base + size);
95f72d1e
YL
865}
866
10d06439 867int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
95f72d1e 868{
f1c2c19c 869 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
95f72d1e
YL
870}
871
e63075a3 872
3661ca66 873void __init_memblock memblock_set_current_limit(phys_addr_t limit)
e63075a3
BH
874{
875 memblock.current_limit = limit;
876}
877
7c0caeb8 878static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
6ed311b2
BH
879{
880 unsigned long long base, size;
881 int i;
882
7c0caeb8 883 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
6ed311b2 884
7c0caeb8
TH
885 for (i = 0; i < type->cnt; i++) {
886 struct memblock_region *rgn = &type->regions[i];
887 char nid_buf[32] = "";
888
889 base = rgn->base;
890 size = rgn->size;
891#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
892 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
893 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
894 memblock_get_region_node(rgn));
895#endif
896 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
897 name, i, base, base + size - 1, size, nid_buf);
6ed311b2
BH
898 }
899}
900
4ff7b82f 901void __init_memblock __memblock_dump_all(void)
6ed311b2 902{
6ed311b2
BH
903 pr_info("MEMBLOCK configuration:\n");
904 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
905
906 memblock_dump(&memblock.memory, "memory");
907 memblock_dump(&memblock.reserved, "reserved");
908}
909
910void __init memblock_analyze(void)
911{
912 int i;
913
6ed311b2
BH
914 memblock.memory_size = 0;
915
916 for (i = 0; i < memblock.memory.cnt; i++)
917 memblock.memory_size += memblock.memory.regions[i].size;
142b45a7
BH
918
919 /* We allow resizing from there */
920 memblock_can_resize = 1;
6ed311b2
BH
921}
922
7590abe8
BH
923void __init memblock_init(void)
924{
236260b9
JF
925 static int init_done __initdata = 0;
926
927 if (init_done)
928 return;
929 init_done = 1;
930
7590abe8
BH
931 /* Hookup the initial arrays */
932 memblock.memory.regions = memblock_memory_init_regions;
933 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
934 memblock.reserved.regions = memblock_reserved_init_regions;
935 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
936
7590abe8
BH
937 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
938 * This simplifies the memblock_add() code below...
939 */
940 memblock.memory.regions[0].base = 0;
941 memblock.memory.regions[0].size = 0;
7c0caeb8 942 memblock_set_region_node(&memblock.memory.regions[0], MAX_NUMNODES);
7590abe8
BH
943 memblock.memory.cnt = 1;
944
945 /* Ditto. */
946 memblock.reserved.regions[0].base = 0;
947 memblock.reserved.regions[0].size = 0;
7c0caeb8 948 memblock_set_region_node(&memblock.reserved.regions[0], MAX_NUMNODES);
7590abe8
BH
949 memblock.reserved.cnt = 1;
950
951 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
952}
953
6ed311b2
BH
954static int __init early_memblock(char *p)
955{
956 if (p && strstr(p, "debug"))
957 memblock_debug = 1;
958 return 0;
959}
960early_param("memblock", early_memblock);
961
c378ddd5 962#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
6d03b885
BH
963
964static int memblock_debug_show(struct seq_file *m, void *private)
965{
966 struct memblock_type *type = m->private;
967 struct memblock_region *reg;
968 int i;
969
970 for (i = 0; i < type->cnt; i++) {
971 reg = &type->regions[i];
972 seq_printf(m, "%4d: ", i);
973 if (sizeof(phys_addr_t) == 4)
974 seq_printf(m, "0x%08lx..0x%08lx\n",
975 (unsigned long)reg->base,
976 (unsigned long)(reg->base + reg->size - 1));
977 else
978 seq_printf(m, "0x%016llx..0x%016llx\n",
979 (unsigned long long)reg->base,
980 (unsigned long long)(reg->base + reg->size - 1));
981
982 }
983 return 0;
984}
985
986static int memblock_debug_open(struct inode *inode, struct file *file)
987{
988 return single_open(file, memblock_debug_show, inode->i_private);
989}
990
991static const struct file_operations memblock_debug_fops = {
992 .open = memblock_debug_open,
993 .read = seq_read,
994 .llseek = seq_lseek,
995 .release = single_release,
996};
997
998static int __init memblock_init_debugfs(void)
999{
1000 struct dentry *root = debugfs_create_dir("memblock", NULL);
1001 if (!root)
1002 return -ENXIO;
1003 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1004 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1005
1006 return 0;
1007}
1008__initcall(memblock_init_debugfs);
1009
1010#endif /* CONFIG_DEBUG_FS */