bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
+ /*
+ * Pull in IO completion errors now. We are guaranteed to be running
+ * single threaded, so we don't need the lock to read b_io_error.
+ */
+ if (!bp->b_error && bp->b_io_error)
+ xfs_buf_ioerror(bp, bp->b_io_error);
+
/* Only validate buffers that were read without errors */
if (read && !bp->b_error && bp->b_ops) {
ASSERT(!bp->b_iodone);
* don't overwrite existing errors - otherwise we can lose errors on
* buffers that require multiple bios to complete.
*/
- if (!bp->b_error)
- xfs_buf_ioerror(bp, error);
+ if (error) {
+ spin_lock(&bp->b_lock);
+ if (!bp->b_io_error)
+ bp->b_io_error = error;
+ spin_unlock(&bp->b_lock);
+ }
if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
+ /* clear the internal error state to avoid spurious errors */
+ bp->b_io_error = 0;
+
/*
* Take references to the buffer. For XBF_ASYNC buffers, holding a
* reference for as long as submission takes is all that is necessary
struct list_head b_lru; /* lru list */
spinlock_t b_lock; /* internal state lock */
unsigned int b_state; /* internal state flags */
+ int b_io_error; /* internal IO error state */
wait_queue_head_t b_waiters; /* unpin waiters */
struct list_head b_list;
struct xfs_perag *b_pag; /* contains rbtree root */