struct xfs_buf *
xfs_buf_alloc(
struct xfs_buftarg *target,
- xfs_off_t range_base,
- size_t range_length,
+ xfs_daddr_t blkno,
+ size_t numblks,
xfs_buf_flags_t flags)
{
struct xfs_buf *bp;
sema_init(&bp->b_sema, 0); /* held, no waiters */
XB_SET_OWNER(bp);
bp->b_target = target;
- bp->b_file_offset = range_base;
+ bp->b_file_offset = blkno << BBSHIFT;
/*
* Set buffer_length and count_desired to the same value initially.
* I/O routines should use count_desired, which will be the same in
* most cases but may be reset (e.g. XFS recovery).
*/
- bp->b_buffer_length = bp->b_count_desired = range_length;
+ bp->b_buffer_length = bp->b_count_desired = numblks << BBSHIFT;
bp->b_flags = flags;
+
+ /*
+ * We do not set the block number here in the buffer because we have not
+ * finished initialising the buffer. We insert the buffer into the cache
+ * in this state, so this ensures that we are unable to do IO on a
+ * buffer that hasn't been fully initialised.
+ */
bp->b_bn = XFS_BUF_DADDR_NULL;
atomic_set(&bp->b_pin_count, 0);
init_waitqueue_head(&bp->b_waiters);
*/
xfs_buf_t *
_xfs_buf_find(
- xfs_buftarg_t *btp, /* block device target */
- xfs_off_t ioff, /* starting offset of range */
- size_t isize, /* length of range */
+ struct xfs_buftarg *btp,
+ xfs_daddr_t blkno,
+ size_t numblks,
xfs_buf_flags_t flags,
xfs_buf_t *new_bp)
{
- xfs_off_t range_base;
- size_t range_length;
+ xfs_off_t offset;
+ size_t numbytes;
struct xfs_perag *pag;
struct rb_node **rbp;
struct rb_node *parent;
xfs_buf_t *bp;
- range_base = (ioff << BBSHIFT);
- range_length = (isize << BBSHIFT);
+ offset = BBTOB(blkno);
+ numbytes = BBTOB(numblks);
/* Check for IOs smaller than the sector size / not sector aligned */
- ASSERT(!(range_length < (1 << btp->bt_sshift)));
- ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
+ ASSERT(!(numbytes < (1 << btp->bt_sshift)));
+ ASSERT(!(offset & (xfs_off_t)btp->bt_smask));
/* get tree root */
pag = xfs_perag_get(btp->bt_mount,
- xfs_daddr_to_agno(btp->bt_mount, ioff));
+ xfs_daddr_to_agno(btp->bt_mount, blkno));
/* walk tree */
spin_lock(&pag->pag_buf_lock);
parent = *rbp;
bp = rb_entry(parent, struct xfs_buf, b_rbnode);
- if (range_base < bp->b_file_offset)
+ if (offset < bp->b_file_offset)
rbp = &(*rbp)->rb_left;
- else if (range_base > bp->b_file_offset)
+ else if (offset > bp->b_file_offset)
rbp = &(*rbp)->rb_right;
else {
/*
* reallocating a busy extent. Skip this buffer and
* continue searching to the right for an exact match.
*/
- if (bp->b_buffer_length != range_length) {
+ if (bp->b_buffer_length != numbytes) {
ASSERT(bp->b_flags & XBF_STALE);
rbp = &(*rbp)->rb_right;
continue;
*/
struct xfs_buf *
xfs_buf_get(
- xfs_buftarg_t *target,/* target for buffer */
- xfs_off_t ioff, /* starting offset of range */
- size_t isize, /* length of range */
+ xfs_buftarg_t *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
xfs_buf_flags_t flags)
{
struct xfs_buf *bp;
struct xfs_buf *new_bp;
int error = 0;
- bp = _xfs_buf_find(target, ioff, isize, flags, NULL);
+ bp = _xfs_buf_find(target, blkno, numblks, flags, NULL);
if (likely(bp))
goto found;
- new_bp = xfs_buf_alloc(target, ioff << BBSHIFT, isize << BBSHIFT,
- flags);
+ new_bp = xfs_buf_alloc(target, blkno, numblks, flags);
if (unlikely(!new_bp))
return NULL;
return NULL;
}
- bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
+ bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp);
if (!bp) {
xfs_buf_free(new_bp);
return NULL;
* Now we have a workable buffer, fill in the block number so
* that we can do IO on it.
*/
- bp->b_bn = ioff;
+ bp->b_bn = blkno;
bp->b_count_desired = bp->b_buffer_length;
found:
xfs_buf_t *
xfs_buf_read(
xfs_buftarg_t *target,
- xfs_off_t ioff,
- size_t isize,
+ xfs_daddr_t blkno,
+ size_t numblks,
xfs_buf_flags_t flags)
{
xfs_buf_t *bp;
flags |= XBF_READ;
- bp = xfs_buf_get(target, ioff, isize, flags);
+ bp = xfs_buf_get(target, blkno, numblks, flags);
if (bp) {
trace_xfs_buf_read(bp, flags, _RET_IP_);
void
xfs_buf_readahead(
xfs_buftarg_t *target,
- xfs_off_t ioff,
- size_t isize)
+ xfs_daddr_t blkno,
+ size_t numblks)
{
if (bdi_read_congested(target->bt_bdi))
return;
- xfs_buf_read(target, ioff, isize,
+ xfs_buf_read(target, blkno, numblks,
XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
}
*/
struct xfs_buf *
xfs_buf_read_uncached(
- struct xfs_mount *mp,
struct xfs_buftarg *target,
xfs_daddr_t daddr,
- size_t length,
+ size_t numblks,
int flags)
{
xfs_buf_t *bp;
int error;
- bp = xfs_buf_get_uncached(target, length, flags);
+ bp = xfs_buf_get_uncached(target, numblks, flags);
if (!bp)
return NULL;
XFS_BUF_SET_ADDR(bp, daddr);
XFS_BUF_READ(bp);
- xfsbdstrat(mp, bp);
+ xfsbdstrat(target->bt_mount, bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_relse(bp);
void
xfs_buf_set_empty(
struct xfs_buf *bp,
- size_t len)
+ size_t numblks)
{
if (bp->b_pages)
_xfs_buf_free_pages(bp);
bp->b_page_count = 0;
bp->b_addr = NULL;
bp->b_file_offset = 0;
- bp->b_buffer_length = bp->b_count_desired = len;
+ bp->b_buffer_length = bp->b_count_desired = numblks << BBSHIFT;
bp->b_bn = XFS_BUF_DADDR_NULL;
bp->b_flags &= ~XBF_MAPPED;
}
xfs_buf_t *
xfs_buf_get_uncached(
struct xfs_buftarg *target,
- size_t len,
+ size_t numblks,
int flags)
{
- unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ unsigned long page_count;
int error, i;
xfs_buf_t *bp;
- bp = xfs_buf_alloc(target, 0, len, 0);
+ bp = xfs_buf_alloc(target, 0, numblks, 0);
if (unlikely(bp == NULL))
goto fail;
+ page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
error = _xfs_buf_get_pages(bp, page_count, 0);
if (error)
goto fail_free_buf;
/* Finding and Reading Buffers */
-extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t, xfs_buf_t *);
+struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks, xfs_buf_flags_t flags,
+ struct xfs_buf *new_bp);
#define xfs_incore(buftarg,blkno,len,lockit) \
_xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
-extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t);
-extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t);
-
-struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *, xfs_off_t, size_t,
- xfs_buf_flags_t);
-extern void xfs_buf_set_empty(struct xfs_buf *bp, size_t len);
-extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int);
-extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
-extern void xfs_buf_hold(xfs_buf_t *);
-extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t);
-struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp,
- struct xfs_buftarg *target,
- xfs_daddr_t daddr, size_t length, int flags);
+struct xfs_buf *xfs_buf_get(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks, xfs_buf_flags_t flags);
+struct xfs_buf *xfs_buf_read(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks, xfs_buf_flags_t flags);
+void xfs_buf_readahead(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks);
+
+struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
+struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks, xfs_buf_flags_t flags);
+void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
+int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
+
+struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
+ int flags);
+struct xfs_buf *xfs_buf_read_uncached(struct xfs_buftarg *target,
+ xfs_daddr_t daddr, size_t numblks, int flags);
+void xfs_buf_hold(struct xfs_buf *bp);
/* Releasing Buffers */
extern void xfs_buf_free(xfs_buf_t *);
if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
return error;
dpct = pct - mp->m_sb.sb_imax_pct;
- bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
+ bp = xfs_buf_read_uncached(mp->m_ddev_targp,
XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
- BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
+ XFS_FSS_TO_BB(mp, 1), 0);
if (!bp)
return EIO;
xfs_buf_relse(bp);
xlog_get_iclog_buffer_size(mp, log);
error = ENOMEM;
- bp = xfs_buf_alloc(mp->m_logdev_targp, 0, log->l_iclog_size, 0);
+ bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0);
if (!bp)
goto out_free_log;
bp->b_iodone = xlog_iodone;
prev_iclog = iclog;
bp = xfs_buf_get_uncached(mp->m_logdev_targp,
- log->l_iclog_size, 0);
+ BTOBB(log->l_iclog_size), 0);
if (!bp)
goto out_free_iclog;
* always need to ensure that the extra buffer does not point to memory
* owned by another log buffer before we free it.
*/
- xfs_buf_set_empty(log->l_xbuf, log->l_iclog_size);
+ xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size));
xfs_buf_free(log->l_xbuf);
iclog = log->l_iclog;
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
- bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, BBTOB(nbblks), 0);
+ bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
if (bp)
xfs_buf_unlock(bp);
return bp;
sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
reread:
- bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
- XFS_SB_DADDR, sector_size, 0);
+ bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
+ BTOBB(sector_size), 0);
if (!bp) {
if (loud)
xfs_warn(mp, "SB buffer read failed");
xfs_warn(mp, "filesystem size mismatch detected");
return XFS_ERROR(EFBIG);
}
- bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
+ bp = xfs_buf_read_uncached(mp->m_ddev_targp,
d - XFS_FSS_TO_BB(mp, 1),
- BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
+ XFS_FSS_TO_BB(mp, 1), 0);
if (!bp) {
xfs_warn(mp, "last sector read failed");
return EIO;
xfs_warn(mp, "log size mismatch detected");
return XFS_ERROR(EFBIG);
}
- bp = xfs_buf_read_uncached(mp, mp->m_logdev_targp,
+ bp = xfs_buf_read_uncached(mp->m_logdev_targp,
d - XFS_FSB_TO_BB(mp, 1),
- XFS_FSB_TO_B(mp, 1), 0);
+ XFS_FSB_TO_BB(mp, 1), 0);
if (!bp) {
xfs_warn(mp, "log device read failed");
return EIO;
/*
* Read in the last block of the device, make sure it exists.
*/
- bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp,
+ bp = xfs_buf_read_uncached(mp->m_rtdev_targp,
XFS_FSB_TO_BB(mp, nrblocks - 1),
- XFS_FSB_TO_B(mp, 1), 0);
+ XFS_FSB_TO_BB(mp, 1), 0);
if (!bp)
return EIO;
xfs_buf_relse(bp);
(unsigned long long) mp->m_sb.sb_rblocks);
return XFS_ERROR(EFBIG);
}
- bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp,
+ bp = xfs_buf_read_uncached(mp->m_rtdev_targp,
d - XFS_FSB_TO_BB(mp, 1),
- XFS_FSB_TO_B(mp, 1), 0);
+ XFS_FSB_TO_BB(mp, 1), 0);
if (!bp) {
xfs_warn(mp, "realtime device size check failed");
return EIO;
bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
mp->m_rtdev_targp : mp->m_ddev_targp,
- mp->m_sb.sb_blocksize, XBF_DONT_BLOCK);
+ BTOBB(mp->m_sb.sb_blocksize), XBF_DONT_BLOCK);
if (!bp)
return XFS_ERROR(ENOMEM);