x86/power/64: Always create temporary identity mapping correctly
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 8 Aug 2016 13:31:31 +0000 (15:31 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 8 Aug 2016 20:04:30 +0000 (22:04 +0200)
The low-level resume-from-hibernation code on x86-64 uses
kernel_ident_mapping_init() to create the temoprary identity mapping,
but that function assumes that the offset between kernel virtual
addresses and physical addresses is aligned on the PGD level.

However, with a randomized identity mapping base, it may be aligned
on the PUD level and if that happens, the temporary identity mapping
created by set_up_temporary_mappings() will not reflect the actual
kernel identity mapping and the image restoration will fail as a
result (leading to a kernel panic most of the time).

To fix this problem, rework kernel_ident_mapping_init() to support
unaligned offsets between KVA and PA up to the PMD level and make
set_up_temporary_mappings() use it as approprtiate.

Reported-and-tested-by: Thomas Garnier <thgarnie@google.com>
Reported-by: Borislav Petkov <bp@suse.de>
Suggested-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Yinghai Lu <yinghai@kernel.org>
arch/x86/include/asm/init.h
arch/x86/mm/ident_map.c
arch/x86/power/hibernate_64.c

index 223042086f4e9aa29498d3f159aaa2b757fa766a..737da62bfeb095e875912b18e5732b7ce04cb18e 100644 (file)
@@ -5,10 +5,10 @@ struct x86_mapping_info {
        void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
        void *context;                   /* context for alloc_pgt_page */
        unsigned long pmd_flag;          /* page flag for PMD entry */
-       bool kernel_mapping;             /* kernel mapping or ident mapping */
+       unsigned long offset;            /* ident mapping offset */
 };
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-                               unsigned long addr, unsigned long end);
+                               unsigned long pstart, unsigned long pend);
 
 #endif /* _ASM_X86_INIT_H */
index ec21796ac5fd5a95f5e386af57a67d2e4ba51f61..4473cb4f8b906dcae083a4b29c38b72d9b7d56d3 100644 (file)
@@ -3,15 +3,17 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
                           unsigned long addr, unsigned long end)
 {
        addr &= PMD_MASK;
        for (; addr < end; addr += PMD_SIZE) {
                pmd_t *pmd = pmd_page + pmd_index(addr);
 
-               if (!pmd_present(*pmd))
-                       set_pmd(pmd, __pmd(addr | pmd_flag));
+               if (pmd_present(*pmd))
+                       continue;
+
+               set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
        }
 }
 
@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 
                if (pud_present(*pud)) {
                        pmd = pmd_offset(pud, 0);
-                       ident_pmd_init(info->pmd_flag, pmd, addr, next);
+                       ident_pmd_init(info, pmd, addr, next);
                        continue;
                }
                pmd = (pmd_t *)info->alloc_pgt_page(info->context);
                if (!pmd)
                        return -ENOMEM;
-               ident_pmd_init(info->pmd_flag, pmd, addr, next);
+               ident_pmd_init(info, pmd, addr, next);
                set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
        }
 
@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 }
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-                             unsigned long addr, unsigned long end)
+                             unsigned long pstart, unsigned long pend)
 {
+       unsigned long addr = pstart + info->offset;
+       unsigned long end = pend + info->offset;
        unsigned long next;
        int result;
-       int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
 
        for (; addr < end; addr = next) {
-               pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+               pgd_t *pgd = pgd_page + pgd_index(addr);
                pud_t *pud;
 
                next = (addr & PGDIR_MASK) + PGDIR_SIZE;
index f0b5f2d402afb15f639be87f9581c44c7298bdef..a3e3ccc87138a7d44e6fe12114403b25a2d4affa 100644 (file)
@@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void)
        struct x86_mapping_info info = {
                .alloc_pgt_page = alloc_pgt_page,
                .pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
-               .kernel_mapping = true,
+               .offset         = __PAGE_OFFSET,
        };
        unsigned long mstart, mend;
        pgd_t *pgd;