mm, dax: convert vmf_insert_pfn_pmd() to pfn_t
authorDan Williams <dan.j.williams@intel.com>
Sat, 16 Jan 2016 00:56:43 +0000 (16:56 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Jan 2016 01:56:32 +0000 (17:56 -0800)
Similar to the conversion of vm_insert_mixed() use pfn_t in the
vmf_insert_pfn_pmd() to tag the resulting pte with _PAGE_DEVICE when the
pfn is backed by a devm_memremap_pages() mapping.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/include/asm/pgtable.h
arch/x86/mm/pat.c
fs/dax.c
include/asm-generic/pgtable.h
include/linux/huge_mm.h
include/linux/pfn_t.h
mm/huge_memory.c
mm/memory.c

index 4c668f15a53227c43ed0d547ba8d6c0faa9c42cd..6585a8b10fea5d393a25ef1ec0b3b3ce3aff7e90 100644 (file)
@@ -286,6 +286,11 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
        return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 }
 
+static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+{
+       return pmd_set_flags(pmd, _PAGE_DEVMAP);
+}
+
 static inline pmd_t pmd_mkhuge(pmd_t pmd)
 {
        return pmd_set_flags(pmd, _PAGE_PSE);
index 031782e7423197ab4dbadb857eeed3349bcde396..f4ae536b0914db1db521feabe09ae0ab681a4b87 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/debugfs.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/pfn_t.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
@@ -949,7 +950,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
 }
 
 int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
-                    unsigned long pfn)
+                    pfn_t pfn)
 {
        enum page_cache_mode pcm;
 
@@ -957,7 +958,7 @@ int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
                return 0;
 
        /* Set prot based on lookup */
-       pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
+       pcm = lookup_memtype(pfn_t_to_phys(pfn));
        *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
                         cachemode2protval(pcm));
 
index 574763eed8a361444f555c3f2745a9f7eaf3181f..96ac3072463dfd9d7bb8a8a0f5f65e221f70049d 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -693,7 +693,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                dax_unmap_atomic(bdev, &dax);
 
                result |= vmf_insert_pfn_pmd(vma, address, pmd,
-                               pfn_t_to_pfn(dax.pfn), write);
+                               dax.pfn, write);
        }
 
  out:
index af0a6cc81635e6aa02fb3aafe0b878b863be6b12..0b3c0d39ef753053bb26c1b9fb4979e706240a58 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_GENERIC_PGTABLE_H
 #define _ASM_GENERIC_PGTABLE_H
 
+#include <linux/pfn.h>
+
 #ifndef __ASSEMBLY__
 #ifdef CONFIG_MMU
 
@@ -549,7 +551,7 @@ static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
  * by vm_insert_pfn().
  */
 static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
-                                  unsigned long pfn)
+                                  pfn_t pfn)
 {
        return 0;
 }
@@ -584,7 +586,7 @@ extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
                           unsigned long pfn, unsigned long addr,
                           unsigned long size);
 extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
-                           unsigned long pfn);
+                           pfn_t pfn);
 extern int track_pfn_copy(struct vm_area_struct *vma);
 extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
                        unsigned long size);
index 0160201993d4a4247805cc5fcb98a1a3bf91a179..8ca35a131904db4a6fa39dca298d5636e24f7585 100644 (file)
@@ -37,7 +37,7 @@ extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        unsigned long addr, pgprot_t newprot,
                        int prot_numa);
 int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
-                       unsigned long pfn, bool write);
+                       pfn_t pfn, bool write);
 
 enum transparent_hugepage_flag {
        TRANSPARENT_HUGEPAGE_FLAG,
index bdaa275d7623e7b55b5d0b09dff43ff8e593c8c8..0703b5360d31b5fa599a541bce9881941a170281 100644 (file)
@@ -77,6 +77,13 @@ static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
 }
 #endif
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
+{
+       return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
+}
+#endif
+
 #ifdef __HAVE_ARCH_PTE_DEVMAP
 static inline bool pfn_t_devmap(pfn_t pfn)
 {
@@ -90,5 +97,6 @@ static inline bool pfn_t_devmap(pfn_t pfn)
        return false;
 }
 pte_t pte_mkdevmap(pte_t pte);
+pmd_t pmd_mkdevmap(pmd_t pmd);
 #endif
 #endif /* _LINUX_PFN_T_H_ */
index 996e86dbeb435aefc2d88e215d5ebfde8cb86e9f..d93706013a5564a1fca71c005a598a1b24188c48 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kthread.h>
 #include <linux/khugepaged.h>
 #include <linux/freezer.h>
+#include <linux/pfn_t.h>
 #include <linux/mman.h>
 #include <linux/pagemap.h>
 #include <linux/debugfs.h>
@@ -931,14 +932,16 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
+               pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write)
 {
        struct mm_struct *mm = vma->vm_mm;
        pmd_t entry;
        spinlock_t *ptl;
 
        ptl = pmd_lock(mm, pmd);
-       entry = pmd_mkhuge(pfn_pmd(pfn, prot));
+       entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
+       if (pfn_t_devmap(pfn))
+               entry = pmd_mkdevmap(entry);
        if (write) {
                entry = pmd_mkyoung(pmd_mkdirty(entry));
                entry = maybe_pmd_mkwrite(entry, vma);
@@ -949,7 +952,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
 }
 
 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
-                       pmd_t *pmd, unsigned long pfn, bool write)
+                       pmd_t *pmd, pfn_t pfn, bool write)
 {
        pgprot_t pgprot = vma->vm_page_prot;
        /*
@@ -961,7 +964,7 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-       BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+       BUG_ON(!pfn_t_devmap(pfn));
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return VM_FAULT_SIGBUS;
index 7f03652723ea91044db5b3a27f82335c24dc38a9..552ae3d694354f14eefb60fd347f8ea9c377c45e 100644 (file)
@@ -1567,7 +1567,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       if (track_pfn_insert(vma, &pgprot, pfn))
+       if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)))
                return -EINVAL;
 
        ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);