file_inode(f)->i_mapping is f->f_mapping
authorAl Viro <viro@zeniv.linux.org.uk>
Sat, 5 Dec 2015 04:45:44 +0000 (23:45 -0500)
committerAl Viro <viro@zeniv.linux.org.uk>
Sun, 29 May 2016 22:56:09 +0000 (18:56 -0400)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/ttm/ttm_tt.c
fs/hfs/inode.c
fs/hfsplus/inode.c
fs/nfs/dir.c
fs/ocfs2/aops.c
mm/hugetlb.c

index 88e7fc7977213d323869d990275a634a70db8bf2..cb8f0347b934d4be4b385796a03352dca5e7b381 100644 (file)
@@ -231,7 +231,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
 
        obj->dev_addr = DMA_ERROR_CODE;
 
-       mapping = file_inode(obj->obj.filp)->i_mapping;
+       mapping = obj->obj.filp->f_mapping;
        mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
 
        DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
@@ -441,7 +441,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
                if (sg_alloc_table(sgt, count, GFP_KERNEL))
                        goto free_sgt;
 
-               mapping = file_inode(dobj->obj.filp)->i_mapping;
+               mapping = dobj->obj.filp->f_mapping;
 
                for_each_sg(sgt->sgl, sg, count, i) {
                        struct page *page;
index 32156060b9c96bb9ccfe0c0b06dc5057a0160fb6..ad89db36ca256dbe73e7c5b3e04ce42ad03e141c 100644 (file)
@@ -511,7 +511,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
        int i, npages;
 
        /* This is the shared memory object that backs the GEM resource */
-       mapping = file_inode(obj->filp)->i_mapping;
+       mapping = obj->filp->f_mapping;
 
        /* We already BUG_ON() for non-page-aligned sizes in
         * drm_gem_object_init(), so we should never hit this unless
index df9bcbab922f359bd1837f5b3a5fde457e827457..8c6f750634af4bd9ed56e073411575f9f397b4c5 100644 (file)
@@ -660,7 +660,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
                 * why this is required _and_ expected if you're
                 * going to pin these pages.
                 */
-               mapping = file_inode(obj->filp)->i_mapping;
+               mapping = obj->filp->f_mapping;
                mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
        }
 
index aad26851cee3c87514fc9f4162a7bff2ac88ec75..ed6117a0ee84689137ee7ca6e6625b5b5b093d1f 100644 (file)
@@ -151,7 +151,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 static int
 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
-       struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+       struct address_space *mapping = obj->base.filp->f_mapping;
        char *vaddr = obj->phys_handle->vaddr;
        struct sg_table *st;
        struct scatterlist *sg;
@@ -218,7 +218,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
                obj->dirty = 0;
 
        if (obj->dirty) {
-               struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+               struct address_space *mapping = obj->base.filp->f_mapping;
                char *vaddr = obj->phys_handle->vaddr;
                int i;
 
@@ -2155,7 +2155,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
        if (obj->base.filp == NULL)
                return;
 
-       mapping = file_inode(obj->base.filp)->i_mapping,
+       mapping = obj->base.filp->f_mapping,
        invalidate_mapping_pages(mapping, 0, (loff_t)-1);
 }
 
@@ -2271,7 +2271,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
         *
         * Fail silently without starting the shrinker
         */
-       mapping = file_inode(obj->base.filp)->i_mapping;
+       mapping = obj->base.filp->f_mapping;
        gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
        gfp |= __GFP_NORETRY | __GFP_NOWARN;
        sg = st->sgl;
@@ -4522,7 +4522,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                mask |= __GFP_DMA32;
        }
 
-       mapping = file_inode(obj->base.filp)->i_mapping;
+       mapping = obj->base.filp->f_mapping;
        mapping_set_gfp_mask(mapping, mask);
 
        i915_gem_object_init(obj, &i915_gem_object_ops);
index b97afc281778df99853c89da9e80bda2c0759e1c..dfb36f3c9c350da77db8f15aee7d2b02b82d3ed5 100644 (file)
@@ -1406,7 +1406,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
                if (ret)
                        goto err_free;
 
-               mapping = file_inode(obj->filp)->i_mapping;
+               mapping = obj->filp->f_mapping;
                mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
        }
 
index 077ae9b2865dcda4bb8d9e774a8b4a5cf498bc84..97542c35d6efa4ab2adde3b19af61f20f95e31be 100644 (file)
@@ -298,7 +298,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
        swap_storage = ttm->swap_storage;
        BUG_ON(swap_storage == NULL);
 
-       swap_space = file_inode(swap_storage)->i_mapping;
+       swap_space = swap_storage->f_mapping;
 
        for (i = 0; i < ttm->num_pages; ++i) {
                from_page = shmem_read_mapping_page(swap_space, i);
@@ -347,7 +347,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
        } else
                swap_storage = persistent_swap_storage;
 
-       swap_space = file_inode(swap_storage)->i_mapping;
+       swap_space = swap_storage->f_mapping;
 
        for (i = 0; i < ttm->num_pages; ++i) {
                from_page = ttm->pages[i];
index 8eed66af5b828d1f9bf1d791685f51d8a0aa0496..02a3845363f7d93601cb3b53b912e7df8f8179d5 100644 (file)
@@ -128,7 +128,7 @@ static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
-       struct inode *inode = file_inode(file)->i_mapping->host;
+       struct inode *inode = mapping->host;
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
index ef9fefe364a68781758b50a211a00a7635d0d6bc..19462d773fe2443c3c483249630c646dc1f3c3f9 100644 (file)
@@ -126,7 +126,7 @@ static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
-       struct inode *inode = file_inode(file)->i_mapping->host;
+       struct inode *inode = mapping->host;
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
index 8be0a2438a9390363b529cccd8c2aad0831c0f70..59efb6e46a5e752e86cdb1a456c1148b9e2dd735 100644 (file)
@@ -729,7 +729,7 @@ struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
        struct page *page;
 
        for (;;) {
-               page = read_cache_page(file_inode(desc->file)->i_mapping,
+               page = read_cache_page(desc->file->f_mapping,
                        desc->page_index, (filler_t *)nfs_readdir_filler, desc);
                if (IS_ERR(page) || grab_page(page))
                        break;
index c034edf3ef38ed8b14b1054cf6e8753a0c77d063..ab8e56c6f2e13b942c5627350fc586879d4a8b78 100644 (file)
@@ -2426,7 +2426,7 @@ static int ocfs2_dio_end_io(struct kiocb *iocb,
 static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *file = iocb->ki_filp;
-       struct inode *inode = file_inode(file)->i_mapping->host;
+       struct inode *inode = file->f_mapping->host;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        get_block_t *get_block;
 
index d26162e81feaa78b2fb4615839cbb580fc626f4b..b322c85c58c34698ffcaecb037d3b5f863bac4af 100644 (file)
@@ -3287,7 +3287,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
        address = address & huge_page_mask(h);
        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
                        vma->vm_pgoff;
-       mapping = file_inode(vma->vm_file)->i_mapping;
+       mapping = vma->vm_file->f_mapping;
 
        /*
         * Take the mapping lock for the duration of the table walk. As