x86: fix EFI mapping
authorHuang, Ying <ying.huang@intel.com>
Mon, 4 Feb 2008 15:48:06 +0000 (16:48 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 4 Feb 2008 15:48:06 +0000 (16:48 +0100)
The patch updates EFI runtime memory mapping code, by making EFI
areas explicitly executable.

Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/kernel/efi.c
arch/x86/kernel/efi_64.c
include/asm-x86/efi.h

index 1411324a625ca5ae0060d49afbdbc897b9e70c68..32dd62b36ff7e200807d1e8ea20651651bd41a9b 100644 (file)
@@ -379,11 +379,9 @@ void __init efi_init(void)
 #endif
 }
 
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 static void __init runtime_code_page_mkexec(void)
 {
        efi_memory_desc_t *md;
-       unsigned long end;
        void *p;
 
        if (!(__supported_pte_mask & _PAGE_NX))
@@ -392,18 +390,13 @@ static void __init runtime_code_page_mkexec(void)
        /* Make EFI runtime service code area executable */
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                md = p;
-               end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-               if (md->type == EFI_RUNTIME_SERVICES_CODE &&
-                   (end >> PAGE_SHIFT) <= max_pfn_mapped) {
-                       set_memory_x(md->virt_addr, md->num_pages);
-                       set_memory_uc(md->virt_addr, md->num_pages);
-               }
+
+               if (md->type != EFI_RUNTIME_SERVICES_CODE)
+                       continue;
+
+               set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT);
        }
-       __flush_tlb_all();
 }
-#else
-static inline void __init runtime_code_page_mkexec(void) { }
-#endif
 
 /*
  * This function will switch the EFI runtime services to virtual mode.
@@ -417,30 +410,40 @@ void __init efi_enter_virtual_mode(void)
 {
        efi_memory_desc_t *md;
        efi_status_t status;
-       unsigned long end;
-       void *p;
+       unsigned long size;
+       u64 end, systab;
+       void *p, *va;
 
        efi.systab = NULL;
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                md = p;
                if (!(md->attribute & EFI_MEMORY_RUNTIME))
                        continue;
-               end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-               if ((md->attribute & EFI_MEMORY_WB) &&
-                   ((end >> PAGE_SHIFT) <= max_pfn_mapped))
-                       md->virt_addr = (unsigned long)__va(md->phys_addr);
+
+               size = md->num_pages << EFI_PAGE_SHIFT;
+               end = md->phys_addr + size;
+
+               if ((end >> PAGE_SHIFT) <= max_pfn_mapped)
+                       va = __va(md->phys_addr);
                else
-                       md->virt_addr = (unsigned long)
-                               efi_ioremap(md->phys_addr,
-                                           md->num_pages << EFI_PAGE_SHIFT);
-               if (!md->virt_addr)
+                       va = efi_ioremap(md->phys_addr, size);
+
+               if (md->attribute & EFI_MEMORY_WB)
+                       set_memory_uc(md->virt_addr, size);
+
+               md->virt_addr = (u64) (unsigned long) va;
+
+               if (!va) {
                        printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
                               (unsigned long long)md->phys_addr);
-               if ((md->phys_addr <= (unsigned long)efi_phys.systab) &&
-                   ((unsigned long)efi_phys.systab < end))
-                       efi.systab = (efi_system_table_t *)(unsigned long)
-                               (md->virt_addr - md->phys_addr +
-                                (unsigned long)efi_phys.systab);
+                       continue;
+               }
+
+               systab = (u64) (unsigned long) efi_phys.systab;
+               if (md->phys_addr <= systab && systab < end) {
+                       systab += md->virt_addr - md->phys_addr;
+                       efi.systab = (efi_system_table_t *) (unsigned long) systab;
+               }
        }
 
        BUG_ON(!efi.systab);
index 674f2379480f99123aafef01ac0d4e34e5e330ee..09d5c23309342779ca955e641a408657a7395ec8 100644 (file)
@@ -54,10 +54,10 @@ static void __init early_mapping_set_exec(unsigned long start,
                else
                        set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
                                            __supported_pte_mask));
-               if (level == 4)
-                       start = (start + PMD_SIZE) & PMD_MASK;
-               else
+               if (level == PG_LEVEL_4K)
                        start = (start + PAGE_SIZE) & PAGE_MASK;
+               else
+                       start = (start + PMD_SIZE) & PMD_MASK;
        }
 }
 
@@ -109,23 +109,23 @@ void __init efi_reserve_bootmem(void)
                                memmap.nr_map * memmap.desc_size);
 }
 
-void __iomem * __init efi_ioremap(unsigned long offset,
-                                 unsigned long size)
+void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
 {
        static unsigned pages_mapped;
-       unsigned long last_addr;
        unsigned i, pages;
 
-       last_addr = offset + size - 1;
-       offset &= PAGE_MASK;
-       pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT;
+       /* phys_addr and size must be page aligned */
+       if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK))
+               return NULL;
+
+       pages = size >> PAGE_SHIFT;
        if (pages_mapped + pages > MAX_EFI_IO_PAGES)
                return NULL;
 
        for (i = 0; i < pages; i++) {
                __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
-                            offset, PAGE_KERNEL_EXEC_NOCACHE);
-               offset += PAGE_SIZE;
+                            phys_addr, PAGE_KERNEL);
+               phys_addr += PAGE_SIZE;
                pages_mapped++;
        }
 
index 9c68a1f098d8cb12af04bacd1564909b179aca61..ea9734b74aca235b33e98a33fbe54a99feeb9fbd 100644 (file)
@@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6)      \
        efi_call_virt(f, a1, a2, a3, a4, a5, a6)
 
-#define efi_ioremap(addr, size)                        ioremap(addr, size)
+#define efi_ioremap(addr, size)                        ioremap_cache(addr, size)
 
 #else /* !CONFIG_X86_32 */
 
@@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
        efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
                  (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
 
-extern void *efi_ioremap(unsigned long offset, unsigned long size);
+extern void *efi_ioremap(unsigned long addr, unsigned long size);
 
 #endif /* CONFIG_X86_32 */