x86/mm/pageattr: Add a PMD pagetable populating function
authorBorislav Petkov <bp@suse.de>
Thu, 31 Oct 2013 16:25:03 +0000 (17:25 +0100)
committerMatt Fleming <matt.fleming@intel.com>
Sat, 2 Nov 2013 11:09:20 +0000 (11:09 +0000)
Handle PMD-level mappings the same as PUD ones.

Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
arch/x86/mm/pageattr.c

index 81deca77b871d0896caa6bea29cf0cf90f6a7a11..968398b023c0d7c1b454c04e18b2b2f2fcb31a57 100644 (file)
@@ -666,6 +666,16 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        return 0;
 }
 
+static int alloc_pte_page(pmd_t *pmd)
+{
+       pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+       if (!pte)
+               return -1;
+
+       set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
+       return 0;
+}
+
 static int alloc_pmd_page(pud_t *pud)
 {
        pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
@@ -676,7 +686,77 @@ static int alloc_pmd_page(pud_t *pud)
        return 0;
 }
 
-#define populate_pmd(cpa, start, end, pages, pud, pgprot)      (-1)
+#define populate_pte(cpa, start, end, pages, pmd, pgprot)      do {} while (0)
+
+static int populate_pmd(struct cpa_data *cpa,
+                       unsigned long start, unsigned long end,
+                       unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+{
+       unsigned int cur_pages = 0;
+       pmd_t *pmd;
+
+       /*
+        * Not on a 2M boundary?
+        */
+       if (start & (PMD_SIZE - 1)) {
+               unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
+               unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
+
+               pre_end   = min_t(unsigned long, pre_end, next_page);
+               cur_pages = (pre_end - start) >> PAGE_SHIFT;
+               cur_pages = min_t(unsigned int, num_pages, cur_pages);
+
+               /*
+                * Need a PTE page?
+                */
+               pmd = pmd_offset(pud, start);
+               if (pmd_none(*pmd))
+                       if (alloc_pte_page(pmd))
+                               return -1;
+
+               populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
+
+               start = pre_end;
+       }
+
+       /*
+        * We mapped them all?
+        */
+       if (num_pages == cur_pages)
+               return cur_pages;
+
+       while (end - start >= PMD_SIZE) {
+
+               /*
+                * We cannot use a 1G page so allocate a PMD page if needed.
+                */
+               if (pud_none(*pud))
+                       if (alloc_pmd_page(pud))
+                               return -1;
+
+               pmd = pmd_offset(pud, start);
+
+               set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot)));
+
+               start     += PMD_SIZE;
+               cpa->pfn  += PMD_SIZE;
+               cur_pages += PMD_SIZE >> PAGE_SHIFT;
+       }
+
+       /*
+        * Map trailing 4K pages.
+        */
+       if (start < end) {
+               pmd = pmd_offset(pud, start);
+               if (pmd_none(*pmd))
+                       if (alloc_pte_page(pmd))
+                               return -1;
+
+               populate_pte(cpa, start, end, num_pages - cur_pages,
+                            pmd, pgprot);
+       }
+       return num_pages;
+}
 
 static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
                        pgprot_t pgprot)