x86/mm: Add sync_global_pgds() for configuration with 5-level paging
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 6 Jun 2017 11:31:29 +0000 (14:31 +0300)
committerIngo Molnar <mingo@kernel.org>
Tue, 13 Jun 2017 06:56:56 +0000 (08:56 +0200)
This basically restores slightly modified version of original
sync_global_pgds() which we had before folded p4d was introduced.

The only modification is protection against 'addr' overflow.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170606113133.22974-11-kirill.shutemov@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/mm/init_64.c

index 95651dc58e0900d3717a462d4cfe05efffc9374e..7a9497ac468db32b215a50feaa348968b9837a4f 100644 (file)
@@ -92,6 +92,44 @@ __setup("noexec32=", nonx32_setup);
  * When memory was added make sure all the processes MM have
  * suitable PGD entries in the local PGD level page.
  */
+#ifdef CONFIG_X86_5LEVEL
+void sync_global_pgds(unsigned long start, unsigned long end)
+{
+       unsigned long addr;
+
+       for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+               const pgd_t *pgd_ref = pgd_offset_k(addr);
+               struct page *page;
+
+               /* Check for overflow */
+               if (addr < start)
+                       break;
+
+               if (pgd_none(*pgd_ref))
+                       continue;
+
+               spin_lock(&pgd_lock);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       pgd_t *pgd;
+                       spinlock_t *pgt_lock;
+
+                       pgd = (pgd_t *)page_address(page) + pgd_index(addr);
+                       /* the pgt_lock only for Xen */
+                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+                       spin_lock(pgt_lock);
+
+                       if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
+                               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+
+                       if (pgd_none(*pgd))
+                               set_pgd(pgd, *pgd_ref);
+
+                       spin_unlock(pgt_lock);
+               }
+               spin_unlock(&pgd_lock);
+       }
+}
+#else
 void sync_global_pgds(unsigned long start, unsigned long end)
 {
        unsigned long addr;
@@ -135,6 +173,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
                spin_unlock(&pgd_lock);
        }
 }
+#endif
 
 /*
  * NOTE: This function is marked __ref because it calls __init function