2 * Based on arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/mman.h>
27 #include <linux/nodemask.h>
28 #include <linux/initrd.h>
29 #include <linux/gfp.h>
30 #include <linux/memblock.h>
31 #include <linux/sort.h>
32 #include <linux/of_fdt.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mrdump.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/sizes.h>
41 #include <mach/mtk_memcfg.h>
45 static unsigned long phys_initrd_start __initdata
= 0;
46 static unsigned long phys_initrd_size __initdata
= 0;
48 phys_addr_t memstart_addr __read_mostly
= 0;
50 void __init
early_init_dt_setup_initrd_arch(unsigned long start
,
53 phys_initrd_start
= start
;
54 phys_initrd_size
= end
- start
;
57 static int __init
early_initrd(char *p
)
59 unsigned long start
, size
;
62 start
= memparse(p
, &endp
);
64 size
= memparse(endp
+ 1, NULL
);
66 phys_initrd_start
= start
;
67 phys_initrd_size
= size
;
71 early_param("initrd", early_initrd
);
73 static void __init
zone_sizes_init(unsigned long min
, unsigned long max
)
75 struct memblock_region
*reg
;
76 unsigned long zone_size
[MAX_NR_ZONES
], zhole_size
[MAX_NR_ZONES
];
77 unsigned long max_dma
= min
;
79 memset(zone_size
, 0, sizeof(zone_size
));
81 /* 4GB maximum for 32-bit only capable devices */
82 if (IS_ENABLED(CONFIG_ZONE_DMA
)) {
83 unsigned long max_dma_phys
=
84 (unsigned long)dma_to_phys(NULL
, DMA_BIT_MASK(32) + 1);
85 max_dma
= max(min
, min(max
, max_dma_phys
>> PAGE_SHIFT
));
86 zone_size
[ZONE_DMA
] = max_dma
- min
;
88 zone_size
[ZONE_NORMAL
] = max
- max_dma
;
90 memcpy(zhole_size
, zone_size
, sizeof(zhole_size
));
92 for_each_memblock(memory
, reg
) {
93 unsigned long start
= memblock_region_memory_base_pfn(reg
);
94 unsigned long end
= memblock_region_memory_end_pfn(reg
);
99 if (IS_ENABLED(CONFIG_ZONE_DMA
) && start
< max_dma
) {
100 unsigned long dma_end
= min(end
, max_dma
);
101 zhole_size
[ZONE_DMA
] -= dma_end
- start
;
105 unsigned long normal_end
= min(end
, max
);
106 unsigned long normal_start
= max(start
, max_dma
);
107 zhole_size
[ZONE_NORMAL
] -= normal_end
- normal_start
;
111 free_area_init_node(0, zone_size
, min
, zhole_size
);
114 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
115 #define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
117 int pfn_valid(unsigned long pfn
)
119 return (pfn
& PFN_MASK
) == pfn
&& memblock_is_memory(pfn
<< PAGE_SHIFT
);
121 EXPORT_SYMBOL(pfn_valid
);
124 #ifndef CONFIG_SPARSEMEM
125 static void arm64_memory_present(void)
129 static void arm64_memory_present(void)
131 struct memblock_region
*reg
;
133 for_each_memblock(memory
, reg
)
134 memory_present(0, memblock_region_memory_base_pfn(reg
),
135 memblock_region_memory_end_pfn(reg
));
139 static bool arm64_memblock_steal_permitted
= true;
141 phys_addr_t __init
arm64_memblock_steal(phys_addr_t size
, phys_addr_t align
)
145 BUG_ON(!arm64_memblock_steal_permitted
);
147 phys
= memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ANYWHERE
);
148 memblock_free(phys
, size
);
149 memblock_remove(phys
, size
);
151 MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT
"[PHY layout]%ps : 0x%08llx - 0x%08llx (0x%08llx)\n",
152 __builtin_return_address(0), (unsigned long long)phys
,
153 (unsigned long long)phys
+ size
- 1,
154 (unsigned long long)size
);
160 #ifdef CONFIG_MTK_COMBO_CHIP
161 extern void mtk_wcn_consys_memory_reserve(void);
162 void __weak
mtk_wcn_consys_memory_reserve(void)
164 printk(KERN_ERR
"weak reserve function: %s", __FUNCTION__
);
167 void __init
arm64_memblock_init(void)
169 u64
*reserve_map
, base
, size
;
171 /* Register the kernel text, kernel data and initrd with memblock */
172 memblock_reserve(__pa(_text
), _end
- _text
);
173 #ifdef CONFIG_BLK_DEV_INITRD
174 if (phys_initrd_size
) {
175 memblock_reserve(phys_initrd_start
, phys_initrd_size
);
177 /* Now convert initrd to virtual addresses */
178 initrd_start
= __phys_to_virt(phys_initrd_start
);
179 initrd_end
= initrd_start
+ phys_initrd_size
;
184 * Reserve the page tables. These are already in use,
185 * and can only be in node 0.
187 memblock_reserve(__pa(swapper_pg_dir
), SWAPPER_DIR_SIZE
);
188 memblock_reserve(__pa(idmap_pg_dir
), IDMAP_DIR_SIZE
);
190 /* Reserve the dtb region */
191 memblock_reserve(virt_to_phys(initial_boot_params
),
192 be32_to_cpu(initial_boot_params
->totalsize
));
195 * Process the reserve map. This will probably overlap the initrd
196 * and dtb locations which are already reserved, but overlapping
197 * doesn't hurt anything
199 reserve_map
= ((void*)initial_boot_params
) +
200 be32_to_cpu(initial_boot_params
->off_mem_rsvmap
);
202 base
= be64_to_cpup(reserve_map
++);
203 size
= be64_to_cpup(reserve_map
++);
206 memblock_reserve(base
, size
);
208 #ifdef CONFIG_MTK_ECCCI_DRIVER
210 extern void ccci_md_mem_reserve(void);
211 ccci_md_mem_reserve();
215 #ifdef CONFIG_MTK_COMBO_CHIP
217 mtk_wcn_consys_memory_reserve();
221 #if defined(CONFIG_MTK_RAM_CONSOLE_USING_DRAM)
222 memblock_reserve(CONFIG_MTK_RAM_CONSOLE_DRAM_ADDR
, CONFIG_MTK_RAM_CONSOLE_DRAM_SIZE
);
224 mrdump_reserve_memory();
226 mrdump_mini_reserve_memory();
228 arm64_memblock_steal_permitted
= false;
229 early_init_fdt_scan_reserved_mem();
230 memblock_allow_resize();
234 void __init
bootmem_init(void)
236 unsigned long min
, max
;
238 min
= PFN_UP(memblock_start_of_DRAM());
239 max
= PFN_DOWN(memblock_end_of_DRAM());
242 * Sparsemem tries to allocate bootmem in memory_present(), so must be
243 * done after the fixed reservations.
245 arm64_memory_present();
248 zone_sizes_init(min
, max
);
250 high_memory
= __va((max
<< PAGE_SHIFT
) - 1) + 1;
251 max_pfn
= max_low_pfn
= max
;
255 * Poison init memory with an undefined instruction (0x0).
257 static inline void poison_init_mem(void *s
, size_t count
)
262 #ifndef CONFIG_SPARSEMEM_VMEMMAP
263 static inline void free_memmap(unsigned long start_pfn
, unsigned long end_pfn
)
265 struct page
*start_pg
, *end_pg
;
266 unsigned long pg
, pgend
;
269 * Convert start_pfn/end_pfn to a struct page pointer.
271 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
272 end_pg
= pfn_to_page(end_pfn
- 1) + 1;
275 * Convert to physical addresses, and round start upwards and end
278 pg
= (unsigned long)PAGE_ALIGN(__pa(start_pg
));
279 pgend
= (unsigned long)__pa(end_pg
) & PAGE_MASK
;
282 * If there are free pages between these, free the section of the
286 free_bootmem(pg
, pgend
- pg
);
290 * The mem_map array can get very big. Free the unused area of the memory map.
292 static void __init
free_unused_memmap(void)
294 unsigned long start
, prev_end
= 0;
295 struct memblock_region
*reg
;
297 for_each_memblock(memory
, reg
) {
298 start
= __phys_to_pfn(reg
->base
);
300 #ifdef CONFIG_SPARSEMEM
302 * Take care not to free memmap entries that don't exist due
303 * to SPARSEMEM sections which aren't present.
305 start
= min(start
, ALIGN(prev_end
, PAGES_PER_SECTION
));
308 * If we had a previous bank, and there is a space between the
309 * current bank and the previous, free it.
311 if (prev_end
&& prev_end
< start
)
312 free_memmap(prev_end
, start
);
315 * Align up here since the VM subsystem insists that the
316 * memmap entries are valid from the bank end aligned to
317 * MAX_ORDER_NR_PAGES.
319 prev_end
= ALIGN(__phys_to_pfn(reg
->base
+ reg
->size
),
323 #ifdef CONFIG_SPARSEMEM
324 if (!IS_ALIGNED(prev_end
, PAGES_PER_SECTION
))
325 free_memmap(prev_end
, ALIGN(prev_end
, PAGES_PER_SECTION
));
328 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
331 * mem_init() marks the free areas in the mem_map and tells us how much memory
332 * is free. This is done after various parts of the system have claimed their
333 * memory after the kernel image.
335 void __init
mem_init(void)
337 unsigned long reserved_pages
, free_pages
;
338 struct memblock_region
*reg
;
340 arm64_swiotlb_init();
342 max_mapnr
= pfn_to_page(max_pfn
+ PHYS_PFN_OFFSET
) - mem_map
;
344 #ifndef CONFIG_SPARSEMEM_VMEMMAP
345 /* this will put all unused low memory onto the freelists */
346 free_unused_memmap();
349 totalram_pages
+= free_all_bootmem();
351 reserved_pages
= free_pages
= 0;
353 for_each_memblock(memory
, reg
) {
354 unsigned int pfn1
, pfn2
;
355 struct page
*page
, *end
;
357 pfn1
= __phys_to_pfn(reg
->base
);
358 pfn2
= pfn1
+ __phys_to_pfn(reg
->size
);
360 page
= pfn_to_page(pfn1
);
361 end
= pfn_to_page(pfn2
- 1) + 1;
364 if (PageReserved(page
))
366 else if (!page_count(page
))
369 } while (page
< end
);
373 * Since our memory may not be contiguous, calculate the real number
374 * of pages we have in this system.
378 for_each_memblock(memory
, reg
) {
379 unsigned long pages
= memblock_region_memory_end_pfn(reg
) -
380 memblock_region_memory_base_pfn(reg
);
381 num_physpages
+= pages
;
382 printk(" %ldMB", pages
>> (20 - PAGE_SHIFT
));
384 printk(" = %luMB total\n", num_physpages
>> (20 - PAGE_SHIFT
));
386 pr_notice("Memory: %luk/%luk available, %luk reserved\n",
387 nr_free_pages() << (PAGE_SHIFT
-10),
388 free_pages
<< (PAGE_SHIFT
-10),
389 reserved_pages
<< (PAGE_SHIFT
-10));
391 #define MLK(b, t) b, t, ((t) - (b)) >> 10
392 #define MLM(b, t) b, t, ((t) - (b)) >> 20
393 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
395 MTK_MEMCFG_LOG_AND_PRINTK(KERN_NOTICE
"Virtual kernel memory layout:\n"
396 " vmalloc : 0x%16lx - 0x%16lx (%6ld MB)\n"
397 #ifdef CONFIG_SPARSEMEM_VMEMMAP
398 " vmemmap : 0x%16lx - 0x%16lx (%6ld MB)\n"
400 " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
401 " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
402 " .init : 0x%p" " - 0x%p" " (%6ld kB)\n"
403 " .text : 0x%p" " - 0x%p" " (%6ld kB)\n"
404 " .data : 0x%p" " - 0x%p" " (%6ld kB)\n",
405 MLM(VMALLOC_START
, VMALLOC_END
),
406 #ifdef CONFIG_SPARSEMEM_VMEMMAP
407 MLM((unsigned long)virt_to_page(PAGE_OFFSET
),
408 (unsigned long)virt_to_page(high_memory
)),
410 MLM(MODULES_VADDR
, MODULES_END
),
411 MLM(PAGE_OFFSET
, (unsigned long)high_memory
),
413 MLK_ROUNDUP(__init_begin
, __init_end
),
414 MLK_ROUNDUP(_text
, _etext
),
415 MLK_ROUNDUP(_sdata
, _edata
));
422 * Check boundaries twice: Some fundamental inconsistencies can be
423 * detected at build time already.
426 BUILD_BUG_ON(TASK_SIZE_32
> TASK_SIZE_64
);
428 BUILD_BUG_ON(TASK_SIZE_64
> MODULES_VADDR
);
429 BUG_ON(TASK_SIZE_64
> MODULES_VADDR
);
431 if (PAGE_SIZE
>= 16384 && num_physpages
<= 128) {
432 extern int sysctl_overcommit_memory
;
434 * On a machine this small we won't get anywhere without
435 * overcommit, so turn it on by default.
437 sysctl_overcommit_memory
= OVERCOMMIT_ALWAYS
;
441 void free_initmem(void)
443 poison_init_mem(__init_begin
, __init_end
- __init_begin
);
444 free_initmem_default(0);
447 #ifdef CONFIG_BLK_DEV_INITRD
449 static int keep_initrd
;
451 void free_initrd_mem(unsigned long start
, unsigned long end
)
454 poison_init_mem((void *)start
, PAGE_ALIGN(end
) - start
);
455 free_reserved_area(start
, end
, 0, "initrd");
459 static int __init
keepinitrd_setup(char *__unused
)
465 __setup("keepinitrd", keepinitrd_setup
);