Merge tag 'v3.8-rc5' into x86/mm
authorH. Peter Anvin <hpa@linux.intel.com>
Sat, 26 Jan 2013 00:31:21 +0000 (16:31 -0800)
committerH. Peter Anvin <hpa@linux.intel.com>
Sat, 26 Jan 2013 00:31:21 +0000 (16:31 -0800)
The __pa() fixup series that follows touches KVM code that is not
present in the existing branch based on v3.7-rc5, so merge in the
current upstream from Linus.

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
20 files changed:
arch/x86/include/asm/page.h
arch/x86/include/asm/page_32.h
arch/x86/include/asm/page_64.h
arch/x86/include/asm/page_64_types.h
arch/x86/include/asm/pgtable_64.h
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/setup.c
arch/x86/kernel/x8664_ksyms_64.c
arch/x86/lguest/boot.c
arch/x86/mm/init_64.c
arch/x86/mm/pageattr.c
arch/x86/mm/pgtable.c
arch/x86/mm/physaddr.c
arch/x86/platform/efi/efi.c
arch/x86/realmode/init.c

index 8ca82839288a35e326f43fbac24a8384eb497652..3698a6a0a940cdcf607cab91c0c67b7231922213 100644 (file)
@@ -44,7 +44,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
  * case properly. Once all supported versions of gcc understand it, we can
  * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
  */
-#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
+#define __pa_symbol(x) \
+       __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
 
 #define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
 
index da4e762406f7da5913080fb5394b9d5b1197965e..4d550d04b6099bf444e8291ffdfde9538a4e47df 100644 (file)
@@ -15,6 +15,7 @@ extern unsigned long __phys_addr(unsigned long);
 #else
 #define __phys_addr(x)         __phys_addr_nodebug(x)
 #endif
+#define __phys_addr_symbol(x)  __phys_addr(x)
 #define __phys_reloc_hide(x)   RELOC_HIDE((x), 0)
 
 #ifdef CONFIG_FLATMEM
index 072694ed81a57317aff966a63ca778f2b2f4ba73..0f1ddee6a0ceb66f2d97fd42c4e53acd05300c3d 100644 (file)
@@ -3,4 +3,40 @@
 
 #include <asm/page_64_types.h>
 
+#ifndef __ASSEMBLY__
+
+/* duplicated to the one in bootmem.h */
+extern unsigned long max_pfn;
+extern unsigned long phys_base;
+
+static inline unsigned long __phys_addr_nodebug(unsigned long x)
+{
+       unsigned long y = x - __START_KERNEL_map;
+
+       /* use the carry flag to determine if x was < __START_KERNEL_map */
+       x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
+
+       return x;
+}
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern unsigned long __phys_addr(unsigned long);
+extern unsigned long __phys_addr_symbol(unsigned long);
+#else
+#define __phys_addr(x)         __phys_addr_nodebug(x)
+#define __phys_addr_symbol(x) \
+       ((unsigned long)(x) - __START_KERNEL_map + phys_base)
+#endif
+
+#define __phys_reloc_hide(x)   (x)
+
+#ifdef CONFIG_FLATMEM
+#define pfn_valid(pfn)          ((pfn) < max_pfn)
+#endif
+
+void clear_page(void *page);
+void copy_page(void *to, void *from);
+
+#endif /* !__ASSEMBLY__ */
+
 #endif /* _ASM_X86_PAGE_64_H */
index 320f7bb95f762d1b5383895b7152ff9385000148..8b491e66eaa89cb93e37feefe7b78cb7d86abb04 100644 (file)
 #define KERNEL_IMAGE_SIZE      (512 * 1024 * 1024)
 #define KERNEL_IMAGE_START     _AC(0xffffffff80000000, UL)
 
-#ifndef __ASSEMBLY__
-void clear_page(void *page);
-void copy_page(void *to, void *from);
-
-/* duplicated to the one in bootmem.h */
-extern unsigned long max_pfn;
-extern unsigned long phys_base;
-
-extern unsigned long __phys_addr(unsigned long);
-#define __phys_reloc_hide(x)   (x)
-
-#define vmemmap ((struct page *)VMEMMAP_START)
-
-extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
-extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
-
-#endif /* !__ASSEMBLY__ */
-
-#ifdef CONFIG_FLATMEM
-#define pfn_valid(pfn)          ((pfn) < max_pfn)
-#endif
-
 #endif /* _ASM_X86_PAGE_64_DEFS_H */
index 47356f9df82e411ca3aa4e0025611d37451064b1..b5d30ad3902249e66de408a98ede477738c74f4d 100644 (file)
@@ -183,6 +183,11 @@ extern void cleanup_highmap(void);
 
 #define __HAVE_ARCH_PTE_SAME
 
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
+extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_X86_PGTABLE_64_H */
index d5e0d717005abd1f9b610ff1ce39beaefaa080f0..0532f5d6e4efc66c264d1e7d47d44e615b833be3 100644 (file)
@@ -69,7 +69,7 @@ int acpi_suspend_lowlevel(void)
 
 #ifndef CONFIG_64BIT
        header->pmode_entry = (u32)&wakeup_pmode_return;
-       header->pmode_cr3 = (u32)__pa(&initial_page_table);
+       header->pmode_cr3 = (u32)__pa_symbol(initial_page_table);
        saved_magic = 0x12345678;
 #else /* CONFIG_64BIT */
 #ifdef CONFIG_SMP
index 9c2aa89a11cbf8d4a124d25bdea25d726cedba17..9a9110918ca719b3a0d2b6a7917899d2762a24bb 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/apic.h>
 #include <asm/ipi.h>
 #include <asm/apic_flat_64.h>
+#include <asm/pgtable.h>
 
 static int numachip_system __read_mostly;
 
index fcaabd0432c5dda0fa5e3c8f8b37473ee177c1d8..fdfefa27b94832fb672651d922c185ed4fa218d3 100644 (file)
@@ -168,7 +168,7 @@ int __cpuinit ppro_with_ram_bug(void)
 #ifdef CONFIG_X86_F00F_BUG
 static void __cpuinit trap_init_f00f_bug(void)
 {
-       __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
+       __set_fixmap(FIX_F00F_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
 
        /*
         * Update the IDT descriptor and reload the IDT so that
index 1d414029f1d800ff67ae21bd2e74794ad06efa32..42a392a9fd02fd3c0e306e3a4f4da323444dc3fd 100644 (file)
@@ -89,7 +89,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
         * kernel identity mapping to modify code.
         */
        if (within(ip, (unsigned long)_text, (unsigned long)_etext))
-               ip = (unsigned long)__va(__pa(ip));
+               ip = (unsigned long)__va(__pa_symbol(ip));
 
        return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
 }
@@ -279,7 +279,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
         * kernel identity mapping to modify code.
         */
        if (within(ip, (unsigned long)_text, (unsigned long)_etext))
-               ip = (unsigned long)__va(__pa(ip));
+               ip = (unsigned long)__va(__pa_symbol(ip));
 
        return probe_kernel_write((void *)ip, val, size);
 }
index c18f59d10101cefc82f6638bcca758a480b70cc3..e175548329915e23ddef8f6dae39fdfe68d73100 100644 (file)
@@ -30,8 +30,8 @@ static void __init i386_default_early_setup(void)
 
 void __init i386_start_kernel(void)
 {
-       memblock_reserve(__pa_symbol(&_text),
-                        __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
+       memblock_reserve(__pa_symbol(_text),
+                        (unsigned long)__bss_stop - (unsigned long)_text);
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /* Reserve INITRD */
index 037df57a99ac34d5ba1a5cab5abaa4554e0689d0..7b215a50ec1e21efb7c520325d0d039c1ba24844 100644 (file)
@@ -97,8 +97,8 @@ void __init x86_64_start_reservations(char *real_mode_data)
 {
        copy_bootdata(__va(real_mode_data));
 
-       memblock_reserve(__pa_symbol(&_text),
-                        __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
+       memblock_reserve(__pa_symbol(_text),
+                        (unsigned long)__bss_stop - (unsigned long)_text);
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /* Reserve INITRD */
index 00f6c1472b850472e5f9759dd5ad9613f6c026be..8354399b3aae21082d94cb6478cf29ca93e4ee84 100644 (file)
@@ -296,8 +296,8 @@ static void __init cleanup_highmap(void)
 static void __init reserve_brk(void)
 {
        if (_brk_end > _brk_start)
-               memblock_reserve(__pa(_brk_start),
-                                __pa(_brk_end) - __pa(_brk_start));
+               memblock_reserve(__pa_symbol(_brk_start),
+                                _brk_end - _brk_start);
 
        /* Mark brk area as locked down and no longer taking any
           new allocations */
@@ -835,12 +835,12 @@ void __init setup_arch(char **cmdline_p)
        init_mm.end_data = (unsigned long) _edata;
        init_mm.brk = _brk_end;
 
-       code_resource.start = virt_to_phys(_text);
-       code_resource.end = virt_to_phys(_etext)-1;
-       data_resource.start = virt_to_phys(_etext);
-       data_resource.end = virt_to_phys(_edata)-1;
-       bss_resource.start = virt_to_phys(&__bss_start);
-       bss_resource.end = virt_to_phys(&__bss_stop)-1;
+       code_resource.start = __pa_symbol(_text);
+       code_resource.end = __pa_symbol(_etext)-1;
+       data_resource.start = __pa_symbol(_etext);
+       data_resource.end = __pa_symbol(_edata)-1;
+       bss_resource.start = __pa_symbol(__bss_start);
+       bss_resource.end = __pa_symbol(__bss_stop)-1;
 
 #ifdef CONFIG_CMDLINE_BOOL
 #ifdef CONFIG_CMDLINE_OVERRIDE
index 1330dd10295031544d08788320524fb917b889bc..b014d9414d085611db5d46cc8ee797d39a97ba90 100644 (file)
@@ -59,6 +59,9 @@ EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(__memcpy);
 EXPORT_SYMBOL(memmove);
 
+#ifndef CONFIG_DEBUG_VIRTUAL
+EXPORT_SYMBOL(phys_base);
+#endif
 EXPORT_SYMBOL(empty_zero_page);
 #ifndef CONFIG_PARAVIRT
 EXPORT_SYMBOL(native_load_gs_index);
index df4176cdbb321e8223dfdffb056b35981c3c63f8..1cbd89ca5569f2ce5d76a4bed1f21d6d52ed63e5 100644 (file)
@@ -552,7 +552,8 @@ static void lguest_write_cr3(unsigned long cr3)
        current_cr3 = cr3;
 
        /* These two page tables are simple, linear, and used during boot */
-       if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table))
+       if (cr3 != __pa_symbol(swapper_pg_dir) &&
+           cr3 != __pa_symbol(initial_page_table))
                cr3_changed = true;
 }
 
index 2ead3c8a4c8419da92a61fbba35eb695e5205dde..287c6d6a9ef1ff1ba140801f875443e092c8c550 100644 (file)
@@ -772,12 +772,10 @@ void set_kernel_text_ro(void)
 void mark_rodata_ro(void)
 {
        unsigned long start = PFN_ALIGN(_text);
-       unsigned long rodata_start =
-               ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
+       unsigned long rodata_start = PFN_ALIGN(__start_rodata);
        unsigned long end = (unsigned long) &__end_rodata_hpage_align;
-       unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
-       unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
-       unsigned long data_start = (unsigned long) &_sdata;
+       unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
+       unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
 
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
@@ -802,12 +800,12 @@ void mark_rodata_ro(void)
 #endif
 
        free_init_pages("unused kernel memory",
-                       (unsigned long) page_address(virt_to_page(text_end)),
-                       (unsigned long)
-                                page_address(virt_to_page(rodata_start)));
+                       (unsigned long) __va(__pa_symbol(text_end)),
+                       (unsigned long) __va(__pa_symbol(rodata_start)));
+
        free_init_pages("unused kernel memory",
-                       (unsigned long) page_address(virt_to_page(rodata_end)),
-                       (unsigned long) page_address(virt_to_page(data_start)));
+                       (unsigned long) __va(__pa_symbol(rodata_end)),
+                       (unsigned long) __va(__pa_symbol(_sdata)));
 }
 
 #endif
index a718e0d23503fdc4bb3149d4ad5c7046458f2a57..40f92f3aea542cc995726ff67fd4939c4a130a0e 100644 (file)
@@ -94,12 +94,12 @@ static inline void split_page_count(int level) { }
 
 static inline unsigned long highmap_start_pfn(void)
 {
-       return __pa(_text) >> PAGE_SHIFT;
+       return __pa_symbol(_text) >> PAGE_SHIFT;
 }
 
 static inline unsigned long highmap_end_pfn(void)
 {
-       return __pa(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
+       return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
 }
 
 #endif
@@ -276,8 +276,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
         * The .rodata section needs to be read-only. Using the pfn
         * catches all aliases.
         */
-       if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
-                  __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
+       if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
+                  __pa_symbol(__end_rodata) >> PAGE_SHIFT))
                pgprot_val(forbidden) |= _PAGE_RW;
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
index e27fbf887f3ba2b0fb41595710a9d8d9882aed1f..193350b51f90af74508a8ec97217085bf3acdbab 100644 (file)
@@ -334,7 +334,12 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
        if (changed && dirty) {
                *pmdp = entry;
                pmd_update_defer(vma->vm_mm, address, pmdp);
-               flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+               /*
+                * We had a write-protection fault here and changed the pmd
+                * to to more permissive. No need to flush the TLB for that,
+                * #PF is architecturally guaranteed to do that and in the
+                * worst-case we'll generate a spurious fault.
+                */
        }
 
        return changed;
index d2e2735327b4b2ed8f65dd06895aabc71e67e4d3..c73feddd518d475f94756b84b453d992f66c36ab 100644 (file)
@@ -8,33 +8,54 @@
 
 #ifdef CONFIG_X86_64
 
+#ifdef CONFIG_DEBUG_VIRTUAL
 unsigned long __phys_addr(unsigned long x)
 {
-       if (x >= __START_KERNEL_map) {
-               x -= __START_KERNEL_map;
-               VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
-               x += phys_base;
+       unsigned long y = x - __START_KERNEL_map;
+
+       /* use the carry flag to determine if x was < __START_KERNEL_map */
+       if (unlikely(x > y)) {
+               x = y + phys_base;
+
+               VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
        } else {
-               VIRTUAL_BUG_ON(x < PAGE_OFFSET);
-               x -= PAGE_OFFSET;
-               VIRTUAL_BUG_ON(!phys_addr_valid(x));
+               x = y + (__START_KERNEL_map - PAGE_OFFSET);
+
+               /* carry flag will be set if starting x was >= PAGE_OFFSET */
+               VIRTUAL_BUG_ON((x > y) || !phys_addr_valid(x));
        }
+
        return x;
 }
 EXPORT_SYMBOL(__phys_addr);
 
+unsigned long __phys_addr_symbol(unsigned long x)
+{
+       unsigned long y = x - __START_KERNEL_map;
+
+       /* only check upper bounds since lower bounds will trigger carry */
+       VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
+
+       return y + phys_base;
+}
+EXPORT_SYMBOL(__phys_addr_symbol);
+#endif
+
 bool __virt_addr_valid(unsigned long x)
 {
-       if (x >= __START_KERNEL_map) {
-               x -= __START_KERNEL_map;
-               if (x >= KERNEL_IMAGE_SIZE)
+       unsigned long y = x - __START_KERNEL_map;
+
+       /* use the carry flag to determine if x was < __START_KERNEL_map */
+       if (unlikely(x > y)) {
+               x = y + phys_base;
+
+               if (y >= KERNEL_IMAGE_SIZE)
                        return false;
-               x += phys_base;
        } else {
-               if (x < PAGE_OFFSET)
-                       return false;
-               x -= PAGE_OFFSET;
-               if (!phys_addr_valid(x))
+               x = y + (__START_KERNEL_map - PAGE_OFFSET);
+
+               /* carry flag will be set if starting x was >= PAGE_OFFSET */
+               if ((x > y) || !phys_addr_valid(x))
                        return false;
        }
 
index ad4439145f858314dfe518cf7cd9336c5fe9c96d..1b600266265e4892eb06004642caf3e737699396 100644 (file)
@@ -410,8 +410,8 @@ void __init efi_reserve_boot_services(void)
                 * - Not within any part of the kernel
                 * - Not the bios reserved area
                */
-               if ((start+size >= virt_to_phys(_text)
-                               && start <= virt_to_phys(_end)) ||
+               if ((start+size >= __pa_symbol(_text)
+                               && start <= __pa_symbol(_end)) ||
                        !e820_all_mapped(start, start+size, E820_RAM) ||
                        memblock_is_region_reserved(start, size)) {
                        /* Could not reserve, skip it */
index cbca565af5bd5113db16274000b003550f12e52b..80450261215c50b8c62b3fd559b0dd7a82f6275d 100644 (file)
@@ -62,9 +62,9 @@ void __init setup_real_mode(void)
                __va(real_mode_header->trampoline_header);
 
 #ifdef CONFIG_X86_32
-       trampoline_header->start = __pa(startup_32_smp);
+       trampoline_header->start = __pa_symbol(startup_32_smp);
        trampoline_header->gdt_limit = __BOOT_DS + 7;
-       trampoline_header->gdt_base = __pa(boot_gdt);
+       trampoline_header->gdt_base = __pa_symbol(boot_gdt);
 #else
        /*
         * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
@@ -78,8 +78,8 @@ void __init setup_real_mode(void)
        *trampoline_cr4_features = read_cr4();
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
-       trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
-       trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
+       trampoline_pgd[0] = __pa_symbol(level3_ident_pgt) + _KERNPG_TABLE;
+       trampoline_pgd[511] = __pa_symbol(level3_kernel_pgt) + _KERNPG_TABLE;
 #endif
 }