Merge branch 'xfs-4.8-split-dax-dio' into for-next
authorDave Chinner <david@fromorbit.com>
Wed, 20 Jul 2016 01:54:37 +0000 (11:54 +1000)
committerDave Chinner <david@fromorbit.com>
Wed, 20 Jul 2016 01:54:37 +0000 (11:54 +1000)
1  2 
fs/xfs/xfs_aops.c
fs/xfs/xfs_file.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_trace.h

index 80714ebd54c05f6c9264ca39cbbcdf7ecbb46be4,3ba0809e0be8a6f2f396def2d2d6419cc1a7c699..b3682774a07d3cfd04176558b3103f21cd22faf1
@@@ -1374,26 -1407,222 +1374,12 @@@ xfs_vm_direct_IO
        struct kiocb            *iocb,
        struct iov_iter         *iter)
  {
-       struct inode            *inode = iocb->ki_filp->f_mapping->host;
-       dio_iodone_t            *endio = NULL;
-       int                     flags = 0;
-       struct block_device     *bdev;
-       if (iov_iter_rw(iter) == WRITE) {
-               endio = xfs_end_io_direct_write;
-               flags = DIO_ASYNC_EXTEND;
-       }
-       if (IS_DAX(inode)) {
-               return dax_do_io(iocb, inode, iter,
-                                xfs_get_blocks_direct, endio, 0);
-       }
-       bdev = xfs_find_bdev_for_inode(inode);
-       return  __blockdev_direct_IO(iocb, inode, bdev, iter,
-                       xfs_get_blocks_direct, endio, NULL, flags);
+       /*
+        * We just need the method present so that open/fcntl allow direct I/O.
+        */
+       return -EINVAL;
  }
  
 -/*
 - * Punch out the delalloc blocks we have already allocated.
 - *
 - * Don't bother with xfs_setattr given that nothing can have made it to disk yet
 - * as the page is still locked at this point.
 - */
 -STATIC void
 -xfs_vm_kill_delalloc_range(
 -      struct inode            *inode,
 -      loff_t                  start,
 -      loff_t                  end)
 -{
 -      struct xfs_inode        *ip = XFS_I(inode);
 -      xfs_fileoff_t           start_fsb;
 -      xfs_fileoff_t           end_fsb;
 -      int                     error;
 -
 -      start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
 -      end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
 -      if (end_fsb <= start_fsb)
 -              return;
 -
 -      xfs_ilock(ip, XFS_ILOCK_EXCL);
 -      error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
 -                                              end_fsb - start_fsb);
 -      if (error) {
 -              /* something screwed, just bail */
 -              if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 -                      xfs_alert(ip->i_mount,
 -              "xfs_vm_write_failed: unable to clean up ino %lld",
 -                                      ip->i_ino);
 -              }
 -      }
 -      xfs_iunlock(ip, XFS_ILOCK_EXCL);
 -}
 -
 -STATIC void
 -xfs_vm_write_failed(
 -      struct inode            *inode,
 -      struct page             *page,
 -      loff_t                  pos,
 -      unsigned                len)
 -{
 -      loff_t                  block_offset;
 -      loff_t                  block_start;
 -      loff_t                  block_end;
 -      loff_t                  from = pos & (PAGE_SIZE - 1);
 -      loff_t                  to = from + len;
 -      struct buffer_head      *bh, *head;
 -      struct xfs_mount        *mp = XFS_I(inode)->i_mount;
 -
 -      /*
 -       * The request pos offset might be 32 or 64 bit, this is all fine
 -       * on 64-bit platform.  However, for 64-bit pos request on 32-bit
 -       * platform, the high 32-bit will be masked off if we evaluate the
 -       * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
 -       * 0xfffff000 as an unsigned long, hence the result is incorrect
 -       * which could cause the following ASSERT failed in most cases.
 -       * In order to avoid this, we can evaluate the block_offset of the
 -       * start of the page by using shifts rather than masks the mismatch
 -       * problem.
 -       */
 -      block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT;
 -
 -      ASSERT(block_offset + from == pos);
 -
 -      head = page_buffers(page);
 -      block_start = 0;
 -      for (bh = head; bh != head || !block_start;
 -           bh = bh->b_this_page, block_start = block_end,
 -                                 block_offset += bh->b_size) {
 -              block_end = block_start + bh->b_size;
 -
 -              /* skip buffers before the write */
 -              if (block_end <= from)
 -                      continue;
 -
 -              /* if the buffer is after the write, we're done */
 -              if (block_start >= to)
 -                      break;
 -
 -              /*
 -               * Process delalloc and unwritten buffers beyond EOF. We can
 -               * encounter unwritten buffers in the event that a file has
 -               * post-EOF unwritten extents and an extending write happens to
 -               * fail (e.g., an unaligned write that also involves a delalloc
 -               * to the same page).
 -               */
 -              if (!buffer_delay(bh) && !buffer_unwritten(bh))
 -                      continue;
 -
 -              if (!xfs_mp_fail_writes(mp) && !buffer_new(bh) &&
 -                  block_offset < i_size_read(inode))
 -                      continue;
 -
 -              if (buffer_delay(bh))
 -                      xfs_vm_kill_delalloc_range(inode, block_offset,
 -                                                 block_offset + bh->b_size);
 -
 -              /*
 -               * This buffer does not contain data anymore. make sure anyone
 -               * who finds it knows that for certain.
 -               */
 -              clear_buffer_delay(bh);
 -              clear_buffer_uptodate(bh);
 -              clear_buffer_mapped(bh);
 -              clear_buffer_new(bh);
 -              clear_buffer_dirty(bh);
 -              clear_buffer_unwritten(bh);
 -      }
 -
 -}
 -
 -/*
 - * This used to call block_write_begin(), but it unlocks and releases the page
 - * on error, and we need that page to be able to punch stale delalloc blocks out
 - * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
 - * the appropriate point.
 - */
 -STATIC int
 -xfs_vm_write_begin(
 -      struct file             *file,
 -      struct address_space    *mapping,
 -      loff_t                  pos,
 -      unsigned                len,
 -      unsigned                flags,
 -      struct page             **pagep,
 -      void                    **fsdata)
 -{
 -      pgoff_t                 index = pos >> PAGE_SHIFT;
 -      struct page             *page;
 -      int                     status;
 -      struct xfs_mount        *mp = XFS_I(mapping->host)->i_mount;
 -
 -      ASSERT(len <= PAGE_SIZE);
 -
 -      page = grab_cache_page_write_begin(mapping, index, flags);
 -      if (!page)
 -              return -ENOMEM;
 -
 -      status = __block_write_begin(page, pos, len, xfs_get_blocks);
 -      if (xfs_mp_fail_writes(mp))
 -              status = -EIO;
 -      if (unlikely(status)) {
 -              struct inode    *inode = mapping->host;
 -              size_t          isize = i_size_read(inode);
 -
 -              xfs_vm_write_failed(inode, page, pos, len);
 -              unlock_page(page);
 -
 -              /*
 -               * If the write is beyond EOF, we only want to kill blocks
 -               * allocated in this write, not blocks that were previously
 -               * written successfully.
 -               */
 -              if (xfs_mp_fail_writes(mp))
 -                      isize = 0;
 -              if (pos + len > isize) {
 -                      ssize_t start = max_t(ssize_t, pos, isize);
 -
 -                      truncate_pagecache_range(inode, start, pos + len);
 -              }
 -
 -              put_page(page);
 -              page = NULL;
 -      }
 -
 -      *pagep = page;
 -      return status;
 -}
 -
 -/*
 - * On failure, we only need to kill delalloc blocks beyond EOF in the range of
 - * this specific write because they will never be written. Previous writes
 - * beyond EOF where block allocation succeeded do not need to be trashed, so
 - * only new blocks from this write should be trashed. For blocks within
 - * EOF, generic_write_end() zeros them so they are safe to leave alone and be
 - * written with all the other valid data.
 - */
 -STATIC int
 -xfs_vm_write_end(
 -      struct file             *file,
 -      struct address_space    *mapping,
 -      loff_t                  pos,
 -      unsigned                len,
 -      unsigned                copied,
 -      struct page             *page,
 -      void                    *fsdata)
 -{
 -      int                     ret;
 -
 -      ASSERT(len <= PAGE_SIZE);
 -
 -      ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 -      if (unlikely(ret < len)) {
 -              struct inode    *inode = mapping->host;
 -              size_t          isize = i_size_read(inode);
 -              loff_t          to = pos + len;
 -
 -              if (to > isize) {
 -                      /* only kill blocks in this write beyond EOF */
 -                      if (pos > isize)
 -                              isize = pos;
 -                      xfs_vm_kill_delalloc_range(inode, isize, to);
 -                      truncate_pagecache_range(inode, isize, to);
 -              }
 -      }
 -      return ret;
 -}
 -
  STATIC sector_t
  xfs_vm_bmap(
        struct address_space    *mapping,
index 713991c227815bd3c07f82e6e04e5eeee245ab71,d97e8cb99a59e9acdd823c4b8809ec421fd243ac..0e7432558fc02fee95d977b5704319e4ef8c4045
@@@ -670,9 -958,8 +789,8 @@@ xfs_file_buffered_aio_write
        current->backing_dev_info = inode_to_bdi(inode);
  
  write_retry:
-       trace_xfs_file_buffered_write(ip, iov_iter_count(from),
-                                     iocb->ki_pos, 0);
+       trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
 -      ret = generic_perform_write(file, from, iocb->ki_pos);
 +      ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
        if (likely(ret >= 0))
                iocb->ki_pos += ret;
  
Simple merge
Simple merge
Simple merge