xfs: xfs_buf_ioend and xfs_buf_iodone_work duplicate functionality
authorDave Chinner <dchinner@redhat.com>
Wed, 1 Oct 2014 23:04:22 +0000 (09:04 +1000)
committerDave Chinner <david@fromorbit.com>
Wed, 1 Oct 2014 23:04:22 +0000 (09:04 +1000)
We do some work in xfs_buf_ioend, and some work in
xfs_buf_iodone_work, but much of that functionality is the same.
This work can all be done in a single function, leaving
xfs_buf_iodone just a wrapper to determine if we should execute it
by workqueue or directly. hence rename xfs_buf_iodone_work to
xfs_buf_ioend(), and add a new xfs_buf_ioend_async() for places that
need async processing.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_recover.c

index 48b1e2989ea41f7147031f5b15687c60f099386c..a046149e60992d605dcbd9ffd7ac9c5375ee15d4 100644 (file)
@@ -998,26 +998,30 @@ xfs_buf_wait_unpin(
  *     Buffer Utility Routines
  */
 
-STATIC void
-xfs_buf_iodone_work(
-       struct work_struct      *work)
+void
+xfs_buf_ioend(
+       struct xfs_buf  *bp)
 {
-       struct xfs_buf          *bp =
-               container_of(work, xfs_buf_t, b_iodone_work);
-       bool                    read = !!(bp->b_flags & XBF_READ);
+       bool            read = bp->b_flags & XBF_READ;
+
+       trace_xfs_buf_iodone(bp, _RET_IP_);
 
        bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
 
-       /* only validate buffers that were read without errors */
-       if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
+       /* Only validate buffers that were read without errors */
+       if (read && !bp->b_error && bp->b_ops) {
+               ASSERT(!bp->b_iodone);
                bp->b_ops->verify_read(bp);
+       }
+
+       if (!bp->b_error)
+               bp->b_flags |= XBF_DONE;
 
        if (bp->b_iodone)
                (*(bp->b_iodone))(bp);
        else if (bp->b_flags & XBF_ASYNC)
                xfs_buf_relse(bp);
        else {
-               ASSERT(read && bp->b_ops);
                complete(&bp->b_iowait);
 
                /* release the !XBF_ASYNC ref now we are done. */
@@ -1025,30 +1029,22 @@ xfs_buf_iodone_work(
        }
 }
 
-void
-xfs_buf_ioend(
-       struct xfs_buf  *bp,
-       int             schedule)
+static void
+xfs_buf_ioend_work(
+       struct work_struct      *work)
 {
-       bool            read = !!(bp->b_flags & XBF_READ);
-
-       trace_xfs_buf_iodone(bp, _RET_IP_);
+       struct xfs_buf          *bp =
+               container_of(work, xfs_buf_t, b_iodone_work);
 
-       if (bp->b_error == 0)
-               bp->b_flags |= XBF_DONE;
+       xfs_buf_ioend(bp);
+}
 
-       if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
-               if (schedule) {
-                       INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
-                       queue_work(xfslogd_workqueue, &bp->b_iodone_work);
-               } else {
-                       xfs_buf_iodone_work(&bp->b_iodone_work);
-               }
-       } else {
-               bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
-               complete(&bp->b_iowait);
-               xfs_buf_rele(bp);
-       }
+void
+xfs_buf_ioend_async(
+       struct xfs_buf  *bp)
+{
+       INIT_WORK(&bp->b_iodone_work, xfs_buf_ioend_work);
+       queue_work(xfslogd_workqueue, &bp->b_iodone_work);
 }
 
 void
@@ -1099,7 +1095,7 @@ xfs_bioerror(
        XFS_BUF_UNDONE(bp);
        xfs_buf_stale(bp);
 
-       xfs_buf_ioend(bp, 0);
+       xfs_buf_ioend(bp);
 
        return -EIO;
 }
@@ -1185,15 +1181,6 @@ xfs_bwrite(
        return error;
 }
 
-STATIC void
-_xfs_buf_ioend(
-       xfs_buf_t               *bp,
-       int                     schedule)
-{
-       if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
-               xfs_buf_ioend(bp, schedule);
-}
-
 STATIC void
 xfs_buf_bio_end_io(
        struct bio              *bio,
@@ -1211,7 +1198,8 @@ xfs_buf_bio_end_io(
        if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
                invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
 
-       _xfs_buf_ioend(bp, 1);
+       if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
+               xfs_buf_ioend_async(bp);
        bio_put(bio);
 }
 
@@ -1423,15 +1411,17 @@ xfs_buf_iorequest(
        /*
         * If _xfs_buf_ioapply failed or we are doing synchronous IO that
         * completes extremely quickly, we can get back here with only the IO
-        * reference we took above. _xfs_buf_ioend will drop it to zero. Run
-        * completion processing synchronously so that we don't return to the
-        * caller with completion still pending. This avoids unnecessary context
-        * switches associated with the end_io workqueue.
+        * reference we took above. If we drop it to zero, run completion
+        * processing synchronously so that we don't return to the caller with
+        * completion still pending. This avoids unnecessary context switches
+        * associated with the end_io workqueue.
         */
-       if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
-               _xfs_buf_ioend(bp, 0);
-       else
-               _xfs_buf_ioend(bp, 1);
+       if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
+               if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
+                       xfs_buf_ioend(bp);
+               else
+                       xfs_buf_ioend_async(bp);
+       }
 
        xfs_buf_rele(bp);
 }
index c753183900b369b9dbbb377194c3e7b0fc3c9343..4585c1595a987fb6d0b4f486098d68c229c01cea 100644 (file)
@@ -286,7 +286,7 @@ extern void xfs_buf_unlock(xfs_buf_t *);
 
 /* Buffer Read and Write Routines */
 extern int xfs_bwrite(struct xfs_buf *bp);
-extern void xfs_buf_ioend(xfs_buf_t *, int);
+extern void xfs_buf_ioend(struct xfs_buf *bp);
 extern void xfs_buf_ioerror(xfs_buf_t *, int);
 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
 extern void xfs_buf_iorequest(xfs_buf_t *);
index 76007deed31fe4455d80956f5debf82faa8baf96..4fd41b58e6d2435d2b4b2250e9d6182d664c39f3 100644 (file)
@@ -491,7 +491,7 @@ xfs_buf_item_unpin(
                xfs_buf_ioerror(bp, -EIO);
                XFS_BUF_UNDONE(bp);
                xfs_buf_stale(bp);
-               xfs_buf_ioend(bp, 0);
+               xfs_buf_ioend(bp);
        }
 }
 
@@ -1115,7 +1115,7 @@ do_callbacks:
        xfs_buf_do_callbacks(bp);
        bp->b_fspriv = NULL;
        bp->b_iodone = NULL;
-       xfs_buf_ioend(bp, 0);
+       xfs_buf_ioend(bp);
 }
 
 /*
index fea3c92fb3f0603b5ca44bb86a5a18dcaaac6240..00d210bbf3c38143a648686400e510436e88e1c9 100644 (file)
@@ -3056,7 +3056,7 @@ cluster_corrupt_out:
                        XFS_BUF_UNDONE(bp);
                        xfs_buf_stale(bp);
                        xfs_buf_ioerror(bp, -EIO);
-                       xfs_buf_ioend(bp, 0);
+                       xfs_buf_ioend(bp);
                } else {
                        xfs_buf_stale(bp);
                        xfs_buf_relse(bp);
index 85f36f212641ba7ae6dfc4137dcdc99f4bec1f3d..3567396f4428dc1dbd5f34433cf9ca8d9ebd0eaf 100644 (file)
@@ -1678,7 +1678,7 @@ xlog_bdstrat(
        if (iclog->ic_state & XLOG_STATE_IOERROR) {
                xfs_buf_ioerror(bp, -EIO);
                xfs_buf_stale(bp);
-               xfs_buf_ioend(bp, 0);
+               xfs_buf_ioend(bp);
                /*
                 * It would seem logical to return EIO here, but we rely on
                 * the log state machine to propagate I/O errors instead of
index 1fd5787add9924d7ac726329f2ee88c13a90422d..4ba19bf7da1f30adbe36225264c1cd540baf5ede 100644 (file)
@@ -383,7 +383,7 @@ xlog_recover_iodone(
                                        SHUTDOWN_META_IO_ERROR);
        }
        bp->b_iodone = NULL;
-       xfs_buf_ioend(bp, 0);
+       xfs_buf_ioend(bp);
 }
 
 /*