2 * Based on arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/mman.h>
25 #include <linux/nodemask.h>
26 #include <linux/memblock.h>
29 #include <linux/slab.h>
30 #include <linux/stop_machine.h>
32 #include <asm/cputype.h>
33 #include <asm/fixmap.h>
34 #include <asm/sections.h>
35 #include <asm/setup.h>
36 #include <asm/sizes.h>
38 #include <asm/memblock.h>
39 #include <asm/mmu_context.h>
43 u64 idmap_t0sz
= TCR_T0SZ(VA_BITS
);
46 * Empty_zero_page is a special page that is used for zero-initialized data
49 struct page
*empty_zero_page
;
50 EXPORT_SYMBOL(empty_zero_page
);
52 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
53 unsigned long size
, pgprot_t vma_prot
)
56 return pgprot_noncached(vma_prot
);
57 else if (file
->f_flags
& O_SYNC
)
58 return pgprot_writecombine(vma_prot
);
61 EXPORT_SYMBOL(phys_mem_access_prot
);
63 static void __init
*early_alloc(unsigned long sz
)
65 void *ptr
= __va(memblock_alloc(sz
, sz
));
72 * remap a PMD into pages
74 static void split_pmd(pmd_t
*pmd
, pte_t
*pte
)
76 unsigned long pfn
= pmd_pfn(*pmd
);
81 * Need to have the least restrictive permissions available
82 * permissions will be fixed up later
84 set_pte(pte
, pfn_pte(pfn
, PAGE_KERNEL_EXEC
));
86 } while (pte
++, i
++, i
< PTRS_PER_PTE
);
89 static void alloc_init_pte(pmd_t
*pmd
, unsigned long addr
,
90 unsigned long end
, unsigned long pfn
,
92 void *(*alloc
)(unsigned long size
))
96 if (pmd_none(*pmd
) || pmd_sect(*pmd
)) {
97 pte
= alloc(PTRS_PER_PTE
* sizeof(pte_t
));
100 __pmd_populate(pmd
, __pa(pte
), PMD_TYPE_TABLE
);
103 BUG_ON(pmd_bad(*pmd
));
105 pte
= pte_offset_kernel(pmd
, addr
);
107 set_pte(pte
, pfn_pte(pfn
, prot
));
109 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
112 void split_pud(pud_t
*old_pud
, pmd_t
*pmd
)
114 unsigned long addr
= pud_pfn(*old_pud
) << PAGE_SHIFT
;
115 pgprot_t prot
= __pgprot(pud_val(*old_pud
) ^ addr
);
119 set_pmd(pmd
, __pmd(addr
| prot
));
121 } while (pmd
++, i
++, i
< PTRS_PER_PMD
);
124 static void alloc_init_pmd(struct mm_struct
*mm
, pud_t
*pud
,
125 unsigned long addr
, unsigned long end
,
126 phys_addr_t phys
, pgprot_t prot
,
127 void *(*alloc
)(unsigned long size
))
133 * Check for initial section mappings in the pgd/pud and remove them.
135 if (pud_none(*pud
) || pud_sect(*pud
)) {
136 pmd
= alloc(PTRS_PER_PMD
* sizeof(pmd_t
));
137 if (pud_sect(*pud
)) {
139 * need to have the 1G of mappings continue to be
144 pud_populate(mm
, pud
, pmd
);
147 BUG_ON(pud_bad(*pud
));
149 pmd
= pmd_offset(pud
, addr
);
151 next
= pmd_addr_end(addr
, end
);
152 /* try section mapping first */
153 if (((addr
| next
| phys
) & ~SECTION_MASK
) == 0) {
155 set_pmd(pmd
, __pmd(phys
|
156 pgprot_val(mk_sect_prot(prot
))));
158 * Check for previous table entries created during
159 * boot (__create_page_tables) and flush them.
161 if (!pmd_none(old_pmd
)) {
163 if (pmd_table(old_pmd
)) {
164 phys_addr_t table
= __pa(pte_offset_map(&old_pmd
, 0));
165 if (!WARN_ON_ONCE(slab_is_available()))
166 memblock_free(table
, PAGE_SIZE
);
170 alloc_init_pte(pmd
, addr
, next
, __phys_to_pfn(phys
),
174 } while (pmd
++, addr
= next
, addr
!= end
);
177 static inline bool use_1G_block(unsigned long addr
, unsigned long next
,
180 if (PAGE_SHIFT
!= 12)
183 if (((addr
| next
| phys
) & ~PUD_MASK
) != 0)
189 static void alloc_init_pud(struct mm_struct
*mm
, pgd_t
*pgd
,
190 unsigned long addr
, unsigned long end
,
191 phys_addr_t phys
, pgprot_t prot
,
192 void *(*alloc
)(unsigned long size
))
197 if (pgd_none(*pgd
)) {
198 pud
= alloc(PTRS_PER_PUD
* sizeof(pud_t
));
199 pgd_populate(mm
, pgd
, pud
);
201 BUG_ON(pgd_bad(*pgd
));
203 pud
= pud_offset(pgd
, addr
);
205 next
= pud_addr_end(addr
, end
);
208 * For 4K granule only, attempt to put down a 1GB block
210 if (use_1G_block(addr
, next
, phys
)) {
211 pud_t old_pud
= *pud
;
212 set_pud(pud
, __pud(phys
|
213 pgprot_val(mk_sect_prot(prot
))));
216 * If we have an old value for a pud, it will
217 * be pointing to a pmd table that we no longer
218 * need (from swapper_pg_dir).
220 * Look up the old pmd table and free it.
222 if (!pud_none(old_pud
)) {
224 if (pud_table(old_pud
)) {
225 phys_addr_t table
= __pa(pmd_offset(&old_pud
, 0));
226 if (!WARN_ON_ONCE(slab_is_available()))
227 memblock_free(table
, PAGE_SIZE
);
231 alloc_init_pmd(mm
, pud
, addr
, next
, phys
, prot
, alloc
);
234 } while (pud
++, addr
= next
, addr
!= end
);
238 * Create the page directory entries and any necessary page tables for the
239 * mapping specified by 'md'.
241 static void __create_mapping(struct mm_struct
*mm
, pgd_t
*pgd
,
242 phys_addr_t phys
, unsigned long virt
,
243 phys_addr_t size
, pgprot_t prot
,
244 void *(*alloc
)(unsigned long size
))
246 unsigned long addr
, length
, end
, next
;
248 addr
= virt
& PAGE_MASK
;
249 length
= PAGE_ALIGN(size
+ (virt
& ~PAGE_MASK
));
253 next
= pgd_addr_end(addr
, end
);
254 alloc_init_pud(mm
, pgd
, addr
, next
, phys
, prot
, alloc
);
256 } while (pgd
++, addr
= next
, addr
!= end
);
259 static void *late_alloc(unsigned long size
)
263 BUG_ON(size
> PAGE_SIZE
);
264 ptr
= (void *)__get_free_page(PGALLOC_GFP
);
269 static void __ref
create_mapping(phys_addr_t phys
, unsigned long virt
,
270 phys_addr_t size
, pgprot_t prot
)
272 if (virt
< VMALLOC_START
) {
273 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
277 __create_mapping(&init_mm
, pgd_offset_k(virt
& PAGE_MASK
), phys
, virt
,
278 size
, prot
, early_alloc
);
281 void __init
create_pgd_mapping(struct mm_struct
*mm
, phys_addr_t phys
,
282 unsigned long virt
, phys_addr_t size
,
285 __create_mapping(mm
, pgd_offset(mm
, virt
), phys
, virt
, size
, prot
,
289 static void create_mapping_late(phys_addr_t phys
, unsigned long virt
,
290 phys_addr_t size
, pgprot_t prot
)
292 if (virt
< VMALLOC_START
) {
293 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
298 return __create_mapping(&init_mm
, pgd_offset_k(virt
& PAGE_MASK
),
299 phys
, virt
, size
, prot
, late_alloc
);
302 #ifdef CONFIG_DEBUG_RODATA
303 static void __init
__map_memblock(phys_addr_t start
, phys_addr_t end
)
306 * Set up the executable regions using the existing section mappings
307 * for now. This will get more fine grained later once all memory
310 unsigned long kernel_x_start
= round_down(__pa(_stext
), SECTION_SIZE
);
311 unsigned long kernel_x_end
= round_up(__pa(__init_end
), SECTION_SIZE
);
313 if (end
< kernel_x_start
) {
314 create_mapping(start
, __phys_to_virt(start
),
315 end
- start
, PAGE_KERNEL
);
316 } else if (start
>= kernel_x_end
) {
317 create_mapping(start
, __phys_to_virt(start
),
318 end
- start
, PAGE_KERNEL
);
320 if (start
< kernel_x_start
)
321 create_mapping(start
, __phys_to_virt(start
),
322 kernel_x_start
- start
,
324 create_mapping(kernel_x_start
,
325 __phys_to_virt(kernel_x_start
),
326 kernel_x_end
- kernel_x_start
,
328 if (kernel_x_end
< end
)
329 create_mapping(kernel_x_end
,
330 __phys_to_virt(kernel_x_end
),
337 static void __init
__map_memblock(phys_addr_t start
, phys_addr_t end
)
339 create_mapping(start
, __phys_to_virt(start
), end
- start
,
344 static void __init
map_mem(void)
346 struct memblock_region
*reg
;
350 * Temporarily limit the memblock range. We need to do this as
351 * create_mapping requires puds, pmds and ptes to be allocated from
352 * memory addressable from the initial direct kernel mapping.
354 * The initial direct kernel mapping, located at swapper_pg_dir, gives
355 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
356 * PHYS_OFFSET (which must be aligned to 2MB as per
357 * Documentation/arm64/booting.txt).
359 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES
))
360 limit
= PHYS_OFFSET
+ PMD_SIZE
;
362 limit
= PHYS_OFFSET
+ PUD_SIZE
;
363 memblock_set_current_limit(limit
);
365 /* map all the memory banks */
366 for_each_memblock(memory
, reg
) {
367 phys_addr_t start
= reg
->base
;
368 phys_addr_t end
= start
+ reg
->size
;
373 #ifndef CONFIG_ARM64_64K_PAGES
375 * For the first memory bank align the start address and
376 * current memblock limit to prevent create_mapping() from
377 * allocating pte page tables from unmapped memory.
378 * When 64K pages are enabled, the pte page table for the
379 * first PGDIR_SIZE is already present in swapper_pg_dir.
382 start
= ALIGN(start
, PMD_SIZE
);
384 limit
= end
& PMD_MASK
;
385 memblock_set_current_limit(limit
);
388 __map_memblock(start
, end
);
391 /* Limit no longer required. */
392 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE
);
395 void __init
fixup_executable(void)
397 #ifdef CONFIG_DEBUG_RODATA
398 /* now that we are actually fully mapped, make the start/end more fine grained */
399 if (!IS_ALIGNED((unsigned long)_stext
, SECTION_SIZE
)) {
400 unsigned long aligned_start
= round_down(__pa(_stext
),
403 create_mapping(aligned_start
, __phys_to_virt(aligned_start
),
404 __pa(_stext
) - aligned_start
,
408 if (!IS_ALIGNED((unsigned long)__init_end
, SECTION_SIZE
)) {
409 unsigned long aligned_end
= round_up(__pa(__init_end
),
411 create_mapping(__pa(__init_end
), (unsigned long)__init_end
,
412 aligned_end
- __pa(__init_end
),
418 #ifdef CONFIG_DEBUG_RODATA
419 void mark_rodata_ro(void)
421 create_mapping_late(__pa(_stext
), (unsigned long)_stext
,
422 (unsigned long)_etext
- (unsigned long)_stext
,
423 PAGE_KERNEL_EXEC
| PTE_RDONLY
);
428 void fixup_init(void)
430 create_mapping_late(__pa(__init_begin
), (unsigned long)__init_begin
,
431 (unsigned long)__init_end
- (unsigned long)__init_begin
,
436 * paging_init() sets up the page tables, initialises the zone memory
437 * maps and sets up the zero page.
439 void __init
paging_init(void)
446 /* allocate the zero page. */
447 zero_page
= early_alloc(PAGE_SIZE
);
451 empty_zero_page
= virt_to_page(zero_page
);
454 * TTBR0 is only used for the identity mapping at this stage. Make it
455 * point to zero page to avoid speculatively fetching new entries.
457 cpu_set_reserved_ttbr0();
459 cpu_set_default_tcr_t0sz();
463 * Enable the identity mapping to allow the MMU disabling.
465 void setup_mm_for_reboot(void)
467 cpu_set_reserved_ttbr0();
469 cpu_set_idmap_tcr_t0sz();
470 cpu_switch_mm(idmap_pg_dir
, &init_mm
);
474 * Check whether a kernel address is valid (derived from arch/x86/).
476 int kern_addr_valid(unsigned long addr
)
483 if ((((long)addr
) >> VA_BITS
) != -1UL)
486 pgd
= pgd_offset_k(addr
);
490 pud
= pud_offset(pgd
, addr
);
495 return pfn_valid(pud_pfn(*pud
));
497 pmd
= pmd_offset(pud
, addr
);
502 return pfn_valid(pmd_pfn(*pmd
));
504 pte
= pte_offset_kernel(pmd
, addr
);
508 return pfn_valid(pte_pfn(*pte
));
510 #ifdef CONFIG_SPARSEMEM_VMEMMAP
511 #ifdef CONFIG_ARM64_64K_PAGES
512 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
514 return vmemmap_populate_basepages(start
, end
, node
);
516 #else /* !CONFIG_ARM64_64K_PAGES */
517 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
519 unsigned long addr
= start
;
526 next
= pmd_addr_end(addr
, end
);
528 pgd
= vmemmap_pgd_populate(addr
, node
);
532 pud
= vmemmap_pud_populate(pgd
, addr
, node
);
536 pmd
= pmd_offset(pud
, addr
);
537 if (pmd_none(*pmd
)) {
540 p
= vmemmap_alloc_block_buf(PMD_SIZE
, node
);
544 set_pmd(pmd
, __pmd(__pa(p
) | PROT_SECT_NORMAL
));
546 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
547 } while (addr
= next
, addr
!= end
);
551 #endif /* CONFIG_ARM64_64K_PAGES */
552 void vmemmap_free(unsigned long start
, unsigned long end
)
555 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
557 static pte_t bm_pte
[PTRS_PER_PTE
] __page_aligned_bss
;
558 #if CONFIG_PGTABLE_LEVELS > 2
559 static pmd_t bm_pmd
[PTRS_PER_PMD
] __page_aligned_bss
;
561 #if CONFIG_PGTABLE_LEVELS > 3
562 static pud_t bm_pud
[PTRS_PER_PUD
] __page_aligned_bss
;
565 static inline pud_t
* fixmap_pud(unsigned long addr
)
567 pgd_t
*pgd
= pgd_offset_k(addr
);
569 BUG_ON(pgd_none(*pgd
) || pgd_bad(*pgd
));
571 return pud_offset(pgd
, addr
);
574 static inline pmd_t
* fixmap_pmd(unsigned long addr
)
576 pud_t
*pud
= fixmap_pud(addr
);
578 BUG_ON(pud_none(*pud
) || pud_bad(*pud
));
580 return pmd_offset(pud
, addr
);
583 static inline pte_t
* fixmap_pte(unsigned long addr
)
585 pmd_t
*pmd
= fixmap_pmd(addr
);
587 BUG_ON(pmd_none(*pmd
) || pmd_bad(*pmd
));
589 return pte_offset_kernel(pmd
, addr
);
592 void __init
early_fixmap_init(void)
597 unsigned long addr
= FIXADDR_START
;
599 pgd
= pgd_offset_k(addr
);
600 pgd_populate(&init_mm
, pgd
, bm_pud
);
601 pud
= pud_offset(pgd
, addr
);
602 pud_populate(&init_mm
, pud
, bm_pmd
);
603 pmd
= pmd_offset(pud
, addr
);
604 pmd_populate_kernel(&init_mm
, pmd
, bm_pte
);
607 * The boot-ioremap range spans multiple pmds, for which
608 * we are not preparted:
610 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN
) >> PMD_SHIFT
)
611 != (__fix_to_virt(FIX_BTMAP_END
) >> PMD_SHIFT
));
613 if ((pmd
!= fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN
)))
614 || pmd
!= fixmap_pmd(fix_to_virt(FIX_BTMAP_END
))) {
616 pr_warn("pmd %p != %p, %p\n",
617 pmd
, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN
)),
618 fixmap_pmd(fix_to_virt(FIX_BTMAP_END
)));
619 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
620 fix_to_virt(FIX_BTMAP_BEGIN
));
621 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
622 fix_to_virt(FIX_BTMAP_END
));
624 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END
);
625 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN
);
629 void __set_fixmap(enum fixed_addresses idx
,
630 phys_addr_t phys
, pgprot_t flags
)
632 unsigned long addr
= __fix_to_virt(idx
);
635 BUG_ON(idx
<= FIX_HOLE
|| idx
>= __end_of_fixed_addresses
);
637 pte
= fixmap_pte(addr
);
639 if (pgprot_val(flags
)) {
640 set_pte(pte
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
642 pte_clear(&init_mm
, addr
, pte
);
643 flush_tlb_kernel_range(addr
, addr
+PAGE_SIZE
);