sema_init(&bp->b_sema, 0); /* held, no waiters */
XB_SET_OWNER(bp);
bp->b_target = target;
- bp->b_file_offset = blkno << BBSHIFT;
+
/*
* Set buffer_length and count_desired to the same value initially.
* I/O routines should use count_desired, which will be the same in
}
use_alloc_page:
- end = bp->b_file_offset + bp->b_buffer_length;
- page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
+ end = BBTOB(bp->b_bn) + bp->b_buffer_length;
+ page_count = xfs_buf_btoc(end) - xfs_buf_btoct(BBTOB(bp->b_bn));
error = _xfs_buf_get_pages(bp, page_count, flags);
if (unlikely(error))
return error;
xfs_buf_flags_t flags,
xfs_buf_t *new_bp)
{
- xfs_off_t offset;
size_t numbytes;
struct xfs_perag *pag;
struct rb_node **rbp;
struct rb_node *parent;
xfs_buf_t *bp;
- offset = BBTOB(blkno);
numbytes = BBTOB(numblks);
/* Check for IOs smaller than the sector size / not sector aligned */
ASSERT(!(numbytes < (1 << btp->bt_sshift)));
- ASSERT(!(offset & (xfs_off_t)btp->bt_smask));
+ ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
/* get tree root */
pag = xfs_perag_get(btp->bt_mount,
parent = *rbp;
bp = rb_entry(parent, struct xfs_buf, b_rbnode);
- if (offset < bp->b_file_offset)
+ if (blkno < bp->b_bn)
rbp = &(*rbp)->rb_left;
- else if (offset > bp->b_file_offset)
+ else if (blkno > bp->b_bn)
rbp = &(*rbp)->rb_right;
else {
/*
- * found a block offset match. If the range doesn't
+ * found a block number match. If the range doesn't
* match, the only way this is allowed is if the buffer
* in the cache is stale and the transaction that made
* it stale has not yet committed. i.e. we are
bp->b_pages = NULL;
bp->b_page_count = 0;
bp->b_addr = NULL;
- bp->b_file_offset = 0;
bp->b_buffer_length = bp->b_count_desired = numblks << BBSHIFT;
bp->b_bn = XFS_BUF_DADDR_NULL;
bp->b_flags &= ~XBF_MAPPED;
* fast-path on locking.
*/
struct rb_node b_rbnode; /* rbtree node */
- xfs_off_t b_file_offset; /* offset in file */
+ xfs_daddr_t b_bn; /* block number for I/O */
size_t b_buffer_length;/* size of buffer in bytes */
atomic_t b_hold; /* reference count */
atomic_t b_lru_ref; /* lru reclaim ref count */
struct list_head b_list;
struct xfs_perag *b_pag; /* contains rbtree root */
xfs_buftarg_t *b_target; /* buffer target (device) */
- xfs_daddr_t b_bn; /* block number for I/O */
size_t b_count_desired;/* desired transfer size */
void *b_addr; /* virtual address of buffer */
struct work_struct b_iodone_work;
#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
-#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
-#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)