mm: introduce __p4d_alloc()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 9 Mar 2017 14:24:08 +0000 (17:24 +0300)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 9 Mar 2017 19:48:48 +0000 (11:48 -0800)
For full 5-level paging we need a helper to allocate p4d page table.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory.c

index 7f1c2163b3cea07514b52956533feb825f99f72f..235ba51b2fbf07ffeeeb6b70d6522b4b0addb3de 100644 (file)
@@ -3906,6 +3906,29 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(handle_mm_fault);
 
+#ifndef __PAGETABLE_P4D_FOLDED
+/*
+ * Allocate p4d page table.
+ * We've already handled the fast-path in-line.
+ */
+int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+       p4d_t *new = p4d_alloc_one(mm, address);
+       if (!new)
+               return -ENOMEM;
+
+       smp_wmb(); /* See comment in __pte_alloc */
+
+       spin_lock(&mm->page_table_lock);
+       if (pgd_present(*pgd))          /* Another has populated it */
+               p4d_free(mm, new);
+       else
+               pgd_populate(mm, pgd, new);
+       spin_unlock(&mm->page_table_lock);
+       return 0;
+}
+#endif /* __PAGETABLE_P4D_FOLDED */
+
 #ifndef __PAGETABLE_PUD_FOLDED
 /*
  * Allocate page upper directory.