x86/boot/64: Rename init_level4_pgt and early_level4_pgt
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 6 Jun 2017 11:31:27 +0000 (14:31 +0300)
committerIngo Molnar <mingo@kernel.org>
Tue, 13 Jun 2017 06:56:55 +0000 (08:56 +0200)
With CONFIG_X86_5LEVEL=y, level 4 is no longer top level of page tables.

Let's give these variable more generic names: init_top_pgt and
early_top_pgt.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170606113133.22974-9-kirill.shutemov@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/kernel/espfix_64.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/kernel/machine_kexec_64.c
arch/x86/mm/dump_pagetables.c
arch/x86/mm/kasan_init_64.c
arch/x86/realmode/init.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/xen-pvh.S

index 942482ac36a843ffb5a3099b78083b284802811e..77037b6f1caa22f622f50d67ea6cebd00f76f685 100644 (file)
@@ -922,7 +922,7 @@ extern pgd_t trampoline_pgd_entry;
 static inline void __meminit init_trampoline_default(void)
 {
        /* Default trampoline pgd value */
-       trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
+       trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
 }
 # ifdef CONFIG_RANDOMIZE_MEMORY
 void __meminit init_trampoline(void);
index 12ea31274eb66940c4ba4cec589f631e79b3ab8a..affcb2a9c563546f73136106b34153b342d3385d 100644 (file)
@@ -20,9 +20,9 @@ extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
 extern pmd_t level2_ident_pgt[512];
 extern pte_t level1_fixmap_pgt[512];
-extern pgd_t init_level4_pgt[];
+extern pgd_t init_top_pgt[];
 
-#define swapper_pg_dir init_level4_pgt
+#define swapper_pg_dir init_top_pgt
 
 extern void paging_init(void);
 
index 8e598a1ad986c91d5a13222a41bc87183cd20d06..6b91e2eb8d3f8a5b8ad1a57c7a2a47d0a3b4440d 100644 (file)
@@ -125,7 +125,7 @@ void __init init_espfix_bsp(void)
        p4d_t *p4d;
 
        /* Install the espfix pud into the kernel page directory */
-       pgd = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
+       pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
        p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
        p4d_populate(&init_mm, p4d, espfix_pud_page);
 
index 1f2a499929c3f18cfe7d13e6b418ba59c6adb554..71ca01b6cc590541629eba42a2c796f340626eae 100644 (file)
@@ -33,7 +33,7 @@
 /*
  * Manage page tables very early on.
  */
-extern pgd_t early_level4_pgt[PTRS_PER_PGD];
+extern pgd_t early_top_pgt[PTRS_PER_PGD];
 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
 static unsigned int __initdata next_early_pgt;
 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
@@ -67,7 +67,7 @@ void __init __startup_64(unsigned long physaddr)
 
        /* Fixup the physical addresses in the page table */
 
-       pgd = fixup_pointer(&early_level4_pgt, physaddr);
+       pgd = fixup_pointer(&early_top_pgt, physaddr);
        pgd[pgd_index(__START_KERNEL_map)] += load_delta;
 
        pud = fixup_pointer(&level3_kernel_pgt, physaddr);
@@ -124,9 +124,9 @@ void __init __startup_64(unsigned long physaddr)
 /* Wipe all early page tables except for the kernel symbol map */
 static void __init reset_early_page_tables(void)
 {
-       memset(early_level4_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
+       memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
        next_early_pgt = 0;
-       write_cr3(__pa_nodebug(early_level4_pgt));
+       write_cr3(__pa_nodebug(early_top_pgt));
 }
 
 /* Create a new PMD entry */
@@ -138,12 +138,11 @@ int __init early_make_pgtable(unsigned long address)
        pmdval_t pmd, *pmd_p;
 
        /* Invalid address or early pgt is done ?  */
-       if (physaddr >= MAXMEM ||
-           read_cr3_pa() != __pa_nodebug(early_level4_pgt))
+       if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
                return -1;
 
 again:
-       pgd_p = &early_level4_pgt[pgd_index(address)].pgd;
+       pgd_p = &early_top_pgt[pgd_index(address)].pgd;
        pgd = *pgd_p;
 
        /*
@@ -240,7 +239,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
 
        clear_bss();
 
-       clear_page(init_level4_pgt);
+       clear_page(init_top_pgt);
 
        kasan_early_init();
 
@@ -255,8 +254,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
         */
        load_ucode_bsp();
 
-       /* set init_level4_pgt kernel high mapping*/
-       init_level4_pgt[511] = early_level4_pgt[511];
+       /* set init_top_pgt kernel high mapping*/
+       init_top_pgt[511] = early_top_pgt[511];
 
        x86_64_start_reservations(real_mode_data);
 }
index 1432d530fa35e3cbfab4235e462e725c0c9b2db3..0ae0bad4d4d52794011632763812f9886d64869e 100644 (file)
@@ -77,7 +77,7 @@ startup_64:
        call    __startup_64
        popq    %rsi
 
-       movq    $(early_level4_pgt - __START_KERNEL_map), %rax
+       movq    $(early_top_pgt - __START_KERNEL_map), %rax
        jmp 1f
 ENTRY(secondary_startup_64)
        /*
@@ -97,7 +97,7 @@ ENTRY(secondary_startup_64)
        /* Sanitize CPU configuration */
        call verify_cpu
 
-       movq    $(init_level4_pgt - __START_KERNEL_map), %rax
+       movq    $(init_top_pgt - __START_KERNEL_map), %rax
 1:
 
        /* Enable PAE mode and PGE */
@@ -328,7 +328,7 @@ GLOBAL(name)
        .endr
 
        __INITDATA
-NEXT_PAGE(early_level4_pgt)
+NEXT_PAGE(early_top_pgt)
        .fill   511,8,0
        .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
 
@@ -338,14 +338,14 @@ NEXT_PAGE(early_dynamic_pgts)
        .data
 
 #ifndef CONFIG_XEN
-NEXT_PAGE(init_level4_pgt)
+NEXT_PAGE(init_top_pgt)
        .fill   512,8,0
 #else
-NEXT_PAGE(init_level4_pgt)
+NEXT_PAGE(init_top_pgt)
        .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-       .org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
+       .org    init_top_pgt + L4_PAGE_OFFSET*8, 0
        .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-       .org    init_level4_pgt + L4_START_KERNEL*8, 0
+       .org    init_top_pgt + L4_START_KERNEL*8, 0
        /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
        .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
 
index 6f5ca4ebe6e5dd834781c678ffd39e4afd688395..cb0a30473c2310b76695c73ec6fad3cd1e7b051f 100644 (file)
@@ -347,7 +347,7 @@ void machine_kexec(struct kimage *image)
 void arch_crash_save_vmcoreinfo(void)
 {
        VMCOREINFO_NUMBER(phys_base);
-       VMCOREINFO_SYMBOL(init_level4_pgt);
+       VMCOREINFO_SYMBOL(init_top_pgt);
 
 #ifdef CONFIG_NUMA
        VMCOREINFO_SYMBOL(node_data);
index bce6990b1d81214d76ece4a0bb2b3baf628677d9..0470826d2bdca2b04bbcba902ae42f8aed728cb5 100644 (file)
@@ -431,7 +431,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
                                       bool checkwx)
 {
 #ifdef CONFIG_X86_64
-       pgd_t *start = (pgd_t *) &init_level4_pgt;
+       pgd_t *start = (pgd_t *) &init_top_pgt;
 #else
        pgd_t *start = swapper_pg_dir;
 #endif
index 0c7d8129bed688696f5caffee90a8d9e1fb75c54..88215ac16b24bd3d721a2069eaa9a09e933ec883 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/tlbflush.h>
 #include <asm/sections.h>
 
-extern pgd_t early_level4_pgt[PTRS_PER_PGD];
+extern pgd_t early_top_pgt[PTRS_PER_PGD];
 extern struct range pfn_mapped[E820_MAX_ENTRIES];
 
 static int __init map_range(struct range *range)
@@ -109,8 +109,8 @@ void __init kasan_early_init(void)
        for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
                kasan_zero_p4d[i] = __p4d(p4d_val);
 
-       kasan_map_early_shadow(early_level4_pgt);
-       kasan_map_early_shadow(init_level4_pgt);
+       kasan_map_early_shadow(early_top_pgt);
+       kasan_map_early_shadow(init_top_pgt);
 }
 
 void __init kasan_init(void)
@@ -121,8 +121,8 @@ void __init kasan_init(void)
        register_die_notifier(&kasan_die_notifier);
 #endif
 
-       memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
-       load_cr3(early_level4_pgt);
+       memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
+       load_cr3(early_top_pgt);
        __flush_tlb_all();
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -148,7 +148,7 @@ void __init kasan_init(void)
        kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
                        (void *)KASAN_SHADOW_END);
 
-       load_cr3(init_level4_pgt);
+       load_cr3(init_top_pgt);
        __flush_tlb_all();
 
        /*
index a163a90af4aa8f81db742cba00a833d5f9011857..cd4be19c36dc611f482ad5291d27365db47a92c5 100644 (file)
@@ -102,7 +102,7 @@ static void __init setup_real_mode(void)
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
        trampoline_pgd[0] = trampoline_pgd_entry.pgd;
-       trampoline_pgd[511] = init_level4_pgt[511].pgd;
+       trampoline_pgd[511] = init_top_pgt[511].pgd;
 #endif
 }
 
index 4f638309deea86856c8b11b31c45b00e06bd2b28..1d7a7213a3109fb3f21bba9a7a6789496b19b346 100644 (file)
@@ -1465,8 +1465,8 @@ static void xen_write_cr3(unsigned long cr3)
  * At the start of the day - when Xen launches a guest, it has already
  * built pagetables for the guest. We diligently look over them
  * in xen_setup_kernel_pagetable and graft as appropriate them in the
- * init_level4_pgt and its friends. Then when we are happy we load
- * the new init_level4_pgt - and continue on.
+ * init_top_pgt and its friends. Then when we are happy we load
+ * the new init_top_pgt - and continue on.
  *
  * The generic code starts (start_kernel) and 'init_mem_mapping' sets
  * up the rest of the pagetables. When it has completed it loads the cr3.
@@ -1909,12 +1909,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        pt_end = pt_base + xen_start_info->nr_pt_frames;
 
        /* Zap identity mapping */
-       init_level4_pgt[0] = __pgd(0);
+       init_top_pgt[0] = __pgd(0);
 
        /* Pre-constructed entries are in pfn, so convert to mfn */
        /* L4[272] -> level3_ident_pgt  */
        /* L4[511] -> level3_kernel_pgt */
-       convert_pfn_mfn(init_level4_pgt);
+       convert_pfn_mfn(init_top_pgt);
 
        /* L3_i[0] -> level2_ident_pgt */
        convert_pfn_mfn(level3_ident_pgt);
@@ -1945,10 +1945,10 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        /* Copy the initial P->M table mappings if necessary. */
        i = pgd_index(xen_start_info->mfn_list);
        if (i && i < pgd_index(__START_KERNEL_map))
-               init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
+               init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
 
        /* Make pagetable pieces RO */
-       set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+       set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
        set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
        set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
        set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
@@ -1959,7 +1959,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 
        /* Pin down new L4 */
        pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
-                         PFN_DOWN(__pa_symbol(init_level4_pgt)));
+                         PFN_DOWN(__pa_symbol(init_top_pgt)));
 
        /* Unpin Xen-provided one */
        pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
@@ -1969,7 +1969,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
         * attach it to, so make sure we just set kernel pgd.
         */
        xen_mc_batch();
-       __xen_write_cr3(true, __pa(init_level4_pgt));
+       __xen_write_cr3(true, __pa(init_top_pgt));
        xen_mc_issue(PARAVIRT_LAZY_CPU);
 
        /* We can't that easily rip out L3 and L2, as the Xen pagetables are
index 5e246716d58f1822f4dd3da028252adb8d6bff85..e1a5fbeae08d8a3bf3cb619c023bab096ad4ba2d 100644 (file)
@@ -87,7 +87,7 @@ ENTRY(pvh_start_xen)
        wrmsr
 
        /* Enable pre-constructed page tables. */
-       mov $_pa(init_level4_pgt), %eax
+       mov $_pa(init_top_pgt), %eax
        mov %eax, %cr3
        mov $(X86_CR0_PG | X86_CR0_PE), %eax
        mov %eax, %cr0