x86, 64bit: Don't set max_pfn_mapped wrong value early on native path
authorYinghai Lu <yinghai@kernel.org>
Thu, 24 Jan 2013 20:19:54 +0000 (12:19 -0800)
committerH. Peter Anvin <hpa@linux.intel.com>
Tue, 29 Jan 2013 23:20:16 +0000 (15:20 -0800)
We are not having max_pfn_mapped set correctly until init_memory_mapping.
So don't print its initial value for 64bit

Also need to use KERNEL_IMAGE_SIZE directly for highmap cleanup.

-v2: update comments about max_pfn_mapped according to Stefano Stabellini.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-14-git-send-email-yinghai@kernel.org
Acked-by: Borislav Petkov <bp@suse.de>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/kernel/head64.c
arch/x86/kernel/setup.c
arch/x86/mm/init_64.c

index 816fc85c9bb3f2c82cf1b4858317773465df0819..f3b19685918e559ac7595e4b9d8cdf4b27a948db 100644 (file)
@@ -148,9 +148,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
        /* clear bss before set_intr_gate with early_idt_handler */
        clear_bss();
 
-       /* XXX - this is wrong... we need to build page tables from scratch */
-       max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
-
        for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
 #ifdef CONFIG_EARLY_PRINTK
                set_intr_gate(i, &early_idt_handlers[i]);
index db9c41dae8d77d962adf61e9a74d85693e8d49ea..d58083a2e1584e88bb9df8135212e3a49c53b8d3 100644 (file)
@@ -996,8 +996,10 @@ void __init setup_arch(char **cmdline_p)
        setup_bios_corruption_check();
 #endif
 
+#ifdef CONFIG_X86_32
        printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
                        (max_pfn_mapped<<PAGE_SHIFT) - 1);
+#endif
 
        reserve_real_mode();
 
index 9fbb85c8d9302cb5c1bfe7688c171fa77ab2ca10..dc67337d9bf0f7335607ff14c6fbb20bc7f60ade 100644 (file)
@@ -378,10 +378,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 void __init cleanup_highmap(void)
 {
        unsigned long vaddr = __START_KERNEL_map;
-       unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
+       unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
        unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
        pmd_t *pmd = level2_kernel_pgt;
 
+       /*
+        * Native path, max_pfn_mapped is not set yet.
+        * Xen has valid max_pfn_mapped set in
+        *      arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
+        */
+       if (max_pfn_mapped)
+               vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
+
        for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
                if (pmd_none(*pmd))
                        continue;