mm, dax: change pmd_fault() to take only vmf parameter
authorDave Jiang <dave.jiang@intel.com>
Wed, 22 Feb 2017 23:40:06 +0000 (15:40 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Feb 2017 00:41:26 +0000 (16:41 -0800)
pmd_fault() and related functions really only need the vmf parameter since
the additional parameters are all included in the vmf struct.  Remove the
additional parameter and simplify pmd_fault() and friends.

Link: http://lkml.kernel.org/r/1484085142-2297-8-git-send-email-ross.zwisler@linux.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/dax/dax.c
fs/dax.c
fs/ext4/file.c
fs/xfs/xfs_file.c
include/linux/dax.h
include/linux/mm.h
include/trace/events/fs_dax.h
mm/memory.c

index a8833cc3569731a5bd05e2d56d5cae24e3f100ba..18e9875f627711b6970bece1a6270901dc6a1497 100644 (file)
@@ -472,8 +472,7 @@ static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return rc;
 }
 
-static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
-               struct vm_area_struct *vma, struct vm_fault *vmf)
+static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 {
        unsigned long pmd_addr = vmf->address & PMD_MASK;
        struct device *dev = &dax_dev->dev;
@@ -482,7 +481,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
        pgoff_t pgoff;
        pfn_t pfn;
 
-       if (check_vma(dax_dev, vma, __func__))
+       if (check_vma(dax_dev, vmf->vma, __func__))
                return VM_FAULT_SIGBUS;
 
        dax_region = dax_dev->region;
@@ -497,7 +496,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
                return VM_FAULT_SIGBUS;
        }
 
-       pgoff = linear_page_index(vma, pmd_addr);
+       pgoff = linear_page_index(vmf->vma, pmd_addr);
        phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
        if (phys == -1) {
                dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
@@ -507,22 +506,23 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
 
        pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 
-       return vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
+       return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
                        vmf->flags & FAULT_FLAG_WRITE);
 }
 
-static int dax_dev_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int dax_dev_pmd_fault(struct vm_fault *vmf)
 {
        int rc;
-       struct file *filp = vma->vm_file;
+       struct file *filp = vmf->vma->vm_file;
        struct dax_dev *dax_dev = filp->private_data;
 
        dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
                        current->comm, (vmf->flags & FAULT_FLAG_WRITE)
-                       ? "write" : "read", vma->vm_start, vma->vm_end);
+                       ? "write" : "read",
+                       vmf->vma->vm_start, vmf->vma->vm_end);
 
        rcu_read_lock();
-       rc = __dax_dev_pmd_fault(dax_dev, vma, vmf);
+       rc = __dax_dev_pmd_fault(dax_dev, vmf);
        rcu_read_unlock();
 
        return rc;
index 01fdbc86ee8c488406555fb950630568e335cb54..d800197aba34e5ec3a60499fc2a6c38697ac9a5d 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1256,11 +1256,10 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault);
  */
 #define PG_PMD_COLOUR  ((PMD_SIZE >> PAGE_SHIFT) - 1)
 
-static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
-               struct vm_fault *vmf, unsigned long address,
-               struct iomap *iomap, loff_t pos, bool write, void **entryp)
+static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
+               loff_t pos, void **entryp)
 {
-       struct address_space *mapping = vma->vm_file->f_mapping;
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        struct block_device *bdev = iomap->bdev;
        struct inode *inode = mapping->host;
        struct blk_dax_ctl dax = {
@@ -1287,31 +1286,30 @@ static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
                goto fallback;
        *entryp = ret;
 
-       trace_dax_pmd_insert_mapping(inode, vma, address, write, length,
-                       dax.pfn, ret);
-       return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
+       trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
+       return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
+                       dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
 
  unmap_fallback:
        dax_unmap_atomic(bdev, &dax);
 fallback:
-       trace_dax_pmd_insert_mapping_fallback(inode, vma, address, write,
-                       length, dax.pfn, ret);
+       trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
+                       dax.pfn, ret);
        return VM_FAULT_FALLBACK;
 }
 
-static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
-               struct vm_fault *vmf, unsigned long address,
-               struct iomap *iomap, void **entryp)
+static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
+               void **entryp)
 {
-       struct address_space *mapping = vma->vm_file->f_mapping;
-       unsigned long pmd_addr = address & PMD_MASK;
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+       unsigned long pmd_addr = vmf->address & PMD_MASK;
        struct inode *inode = mapping->host;
        struct page *zero_page;
        void *ret = NULL;
        spinlock_t *ptl;
        pmd_t pmd_entry;
 
-       zero_page = mm_get_huge_zero_page(vma->vm_mm);
+       zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
 
        if (unlikely(!zero_page))
                goto fallback;
@@ -1322,27 +1320,27 @@ static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
                goto fallback;
        *entryp = ret;
 
-       ptl = pmd_lock(vma->vm_mm, pmd);
-       if (!pmd_none(*pmd)) {
+       ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
+       if (!pmd_none(*(vmf->pmd))) {
                spin_unlock(ptl);
                goto fallback;
        }
 
-       pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
+       pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
        pmd_entry = pmd_mkhuge(pmd_entry);
-       set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
+       set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
        spin_unlock(ptl);
-       trace_dax_pmd_load_hole(inode, vma, address, zero_page, ret);
+       trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
        return VM_FAULT_NOPAGE;
 
 fallback:
-       trace_dax_pmd_load_hole_fallback(inode, vma, address, zero_page, ret);
+       trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
        return VM_FAULT_FALLBACK;
 }
 
-int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
-               struct iomap_ops *ops)
+int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops)
 {
+       struct vm_area_struct *vma = vmf->vma;
        struct address_space *mapping = vma->vm_file->f_mapping;
        unsigned long pmd_addr = vmf->address & PMD_MASK;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -1363,7 +1361,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
        pgoff = linear_page_index(vma, pmd_addr);
        max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 
-       trace_dax_pmd_fault(inode, vma, vmf, max_pgoff, 0);
+       trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
 
        /* Fall back to PTEs if we're going to COW */
        if (write && !(vma->vm_flags & VM_SHARED))
@@ -1409,15 +1407,13 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 
        switch (iomap.type) {
        case IOMAP_MAPPED:
-               result = dax_pmd_insert_mapping(vma, vmf->pmd, vmf,
-                               vmf->address, &iomap, pos, write, &entry);
+               result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
                break;
        case IOMAP_UNWRITTEN:
        case IOMAP_HOLE:
                if (WARN_ON_ONCE(write))
                        goto unlock_entry;
-               result = dax_pmd_load_hole(vma, vmf->pmd, vmf, vmf->address,
-                               &iomap, &entry);
+               result = dax_pmd_load_hole(vmf, &iomap, &entry);
                break;
        default:
                WARN_ON_ONCE(1);
@@ -1447,7 +1443,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                count_vm_event(THP_FAULT_FALLBACK);
        }
 out:
-       trace_dax_pmd_fault_done(inode, vma, vmf, max_pgoff, result);
+       trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
        return result;
 }
 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
index d3f589b3602cc1c037de66d67d5d24d650e14407..13021a054fc080e4a75de2ecec11daa139cd7443 100644 (file)
@@ -274,19 +274,19 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 }
 
 static int
-ext4_dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ext4_dax_pmd_fault(struct vm_fault *vmf)
 {
        int result;
-       struct inode *inode = file_inode(vma->vm_file);
+       struct inode *inode = file_inode(vmf->vma->vm_file);
        struct super_block *sb = inode->i_sb;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
 
        if (write) {
                sb_start_pagefault(sb);
-               file_update_time(vma->vm_file);
+               file_update_time(vmf->vma->vm_file);
        }
        down_read(&EXT4_I(inode)->i_mmap_sem);
-       result = dax_iomap_pmd_fault(vma, vmf, &ext4_iomap_ops);
+       result = dax_iomap_pmd_fault(vmf, &ext4_iomap_ops);
        up_read(&EXT4_I(inode)->i_mmap_sem);
        if (write)
                sb_end_pagefault(sb);
index 44a8d2356e31801c4d4b71546b60cb14f8749892..9d8440b07b53d390431727e15d8b2bdaa3cac022 100644 (file)
@@ -1431,10 +1431,9 @@ xfs_filemap_fault(
  */
 STATIC int
 xfs_filemap_pmd_fault(
-       struct vm_area_struct   *vma,
        struct vm_fault         *vmf)
 {
-       struct inode            *inode = file_inode(vma->vm_file);
+       struct inode            *inode = file_inode(vmf->vma->vm_file);
        struct xfs_inode        *ip = XFS_I(inode);
        int                     ret;
 
@@ -1445,11 +1444,11 @@ xfs_filemap_pmd_fault(
 
        if (vmf->flags & FAULT_FLAG_WRITE) {
                sb_start_pagefault(inode->i_sb);
-               file_update_time(vma->vm_file);
+               file_update_time(vmf->vma->vm_file);
        }
 
        xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-       ret = dax_iomap_pmd_fault(vma, vmf, &xfs_iomap_ops);
+       ret = dax_iomap_pmd_fault(vmf, &xfs_iomap_ops);
        xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
        if (vmf->flags & FAULT_FLAG_WRITE)
index a829fee2b42bb864f59c8b696e80e21c8af39408..c1bd6ab5e97479d2af8914fc3df637a866d38db7 100644 (file)
@@ -71,15 +71,14 @@ static inline unsigned int dax_radix_order(void *entry)
                return PMD_SHIFT - PAGE_SHIFT;
        return 0;
 }
-int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
-               struct iomap_ops *ops);
+int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops);
 #else
 static inline unsigned int dax_radix_order(void *entry)
 {
        return 0;
 }
-static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma,
-               struct vm_fault *vmf, struct iomap_ops *ops)
+static inline int dax_iomap_pmd_fault(struct vm_fault *vmf,
+               struct iomap_ops *ops)
 {
        return VM_FAULT_FALLBACK;
 }
index 0961e95e904eb01fbc47ac6a5387b7604eaa0445..3787f047a0986e00c52d23c0328b0366e96bb102 100644 (file)
@@ -351,7 +351,7 @@ struct vm_operations_struct {
        void (*close)(struct vm_area_struct * area);
        int (*mremap)(struct vm_area_struct * area);
        int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
-       int (*pmd_fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+       int (*pmd_fault)(struct vm_fault *vmf);
        void (*map_pages)(struct vm_fault *vmf,
                        pgoff_t start_pgoff, pgoff_t end_pgoff);
 
index a98665bfb38f3f62e0dc573cde54666b54ecfd18..c566ddc87f73e5885841204d5ae7d160374545a6 100644 (file)
@@ -7,9 +7,9 @@
 #include <linux/tracepoint.h>
 
 DECLARE_EVENT_CLASS(dax_pmd_fault_class,
-       TP_PROTO(struct inode *inode, struct vm_area_struct *vma,
-               struct vm_fault *vmf, pgoff_t max_pgoff, int result),
-       TP_ARGS(inode, vma, vmf, max_pgoff, result),
+       TP_PROTO(struct inode *inode, struct vm_fault *vmf,
+               pgoff_t max_pgoff, int result),
+       TP_ARGS(inode, vmf, max_pgoff, result),
        TP_STRUCT__entry(
                __field(unsigned long, ino)
                __field(unsigned long, vm_start)
@@ -25,9 +25,9 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
        TP_fast_assign(
                __entry->dev = inode->i_sb->s_dev;
                __entry->ino = inode->i_ino;
-               __entry->vm_start = vma->vm_start;
-               __entry->vm_end = vma->vm_end;
-               __entry->vm_flags = vma->vm_flags;
+               __entry->vm_start = vmf->vma->vm_start;
+               __entry->vm_end = vmf->vma->vm_end;
+               __entry->vm_flags = vmf->vma->vm_flags;
                __entry->address = vmf->address;
                __entry->flags = vmf->flags;
                __entry->pgoff = vmf->pgoff;
@@ -52,19 +52,18 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
 
 #define DEFINE_PMD_FAULT_EVENT(name) \
 DEFINE_EVENT(dax_pmd_fault_class, name, \
-       TP_PROTO(struct inode *inode, struct vm_area_struct *vma, \
-               struct vm_fault *vmf, \
+       TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
                pgoff_t max_pgoff, int result), \
-       TP_ARGS(inode, vma, vmf, max_pgoff, result))
+       TP_ARGS(inode, vmf, max_pgoff, result))
 
 DEFINE_PMD_FAULT_EVENT(dax_pmd_fault);
 DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
 
 DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
-       TP_PROTO(struct inode *inode, struct vm_area_struct *vma,
-               unsigned long address, struct page *zero_page,
+       TP_PROTO(struct inode *inode, struct vm_fault *vmf,
+               struct page *zero_page,
                void *radix_entry),
-       TP_ARGS(inode, vma, address, zero_page, radix_entry),
+       TP_ARGS(inode, vmf, zero_page, radix_entry),
        TP_STRUCT__entry(
                __field(unsigned long, ino)
                __field(unsigned long, vm_flags)
@@ -76,8 +75,8 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
        TP_fast_assign(
                __entry->dev = inode->i_sb->s_dev;
                __entry->ino = inode->i_ino;
-               __entry->vm_flags = vma->vm_flags;
-               __entry->address = address;
+               __entry->vm_flags = vmf->vma->vm_flags;
+               __entry->address = vmf->address;
                __entry->zero_page = zero_page;
                __entry->radix_entry = radix_entry;
        ),
@@ -95,19 +94,17 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
 
 #define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
 DEFINE_EVENT(dax_pmd_load_hole_class, name, \
-       TP_PROTO(struct inode *inode, struct vm_area_struct *vma, \
-               unsigned long address, struct page *zero_page, \
-               void *radix_entry), \
-       TP_ARGS(inode, vma, address, zero_page, radix_entry))
+       TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
+               struct page *zero_page, void *radix_entry), \
+       TP_ARGS(inode, vmf, zero_page, radix_entry))
 
 DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
 DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
 
 DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
-       TP_PROTO(struct inode *inode, struct vm_area_struct *vma,
-               unsigned long address, int write, long length, pfn_t pfn,
-               void *radix_entry),
-       TP_ARGS(inode, vma, address, write, length, pfn, radix_entry),
+       TP_PROTO(struct inode *inode, struct vm_fault *vmf,
+               long length, pfn_t pfn, void *radix_entry),
+       TP_ARGS(inode, vmf, length, pfn, radix_entry),
        TP_STRUCT__entry(
                __field(unsigned long, ino)
                __field(unsigned long, vm_flags)
@@ -121,9 +118,9 @@ DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
        TP_fast_assign(
                __entry->dev = inode->i_sb->s_dev;
                __entry->ino = inode->i_ino;
-               __entry->vm_flags = vma->vm_flags;
-               __entry->address = address;
-               __entry->write = write;
+               __entry->vm_flags = vmf->vma->vm_flags;
+               __entry->address = vmf->address;
+               __entry->write = vmf->flags & FAULT_FLAG_WRITE;
                __entry->length = length;
                __entry->pfn_val = pfn.val;
                __entry->radix_entry = radix_entry;
@@ -146,10 +143,9 @@ DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
 
 #define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \
 DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
-       TP_PROTO(struct inode *inode, struct vm_area_struct *vma, \
-               unsigned long address, int write, long length, pfn_t pfn, \
-               void *radix_entry), \
-       TP_ARGS(inode, vma, address, write, length, pfn, radix_entry))
+       TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
+               long length, pfn_t pfn, void *radix_entry), \
+       TP_ARGS(inode, vmf, length, pfn, radix_entry))
 
 DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
 DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
index 2376f8528800b3a412322e452158ce5781333751..ececdc4a2892e654c9e0ae8959e76b4810c02d4a 100644 (file)
@@ -3471,11 +3471,10 @@ out:
 
 static int create_huge_pmd(struct vm_fault *vmf)
 {
-       struct vm_area_struct *vma = vmf->vma;
-       if (vma_is_anonymous(vma))
+       if (vma_is_anonymous(vmf->vma))
                return do_huge_pmd_anonymous_page(vmf);
-       if (vma->vm_ops->pmd_fault)
-               return vma->vm_ops->pmd_fault(vma, vmf);
+       if (vmf->vma->vm_ops->pmd_fault)
+               return vmf->vma->vm_ops->pmd_fault(vmf);
        return VM_FAULT_FALLBACK;
 }
 
@@ -3484,7 +3483,7 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
        if (vma_is_anonymous(vmf->vma))
                return do_huge_pmd_wp_page(vmf, orig_pmd);
        if (vmf->vma->vm_ops->pmd_fault)
-               return vmf->vma->vm_ops->pmd_fault(vmf->vma, vmf);
+               return vmf->vma->vm_ops->pmd_fault(vmf);
 
        /* COW handled on pte level: split pmd */
        VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);