#endif
}
-static void
+void
xfs_agfl_write_verify(
struct xfs_buf *bp)
{
xfs_agfl_verify(bp);
}
-void
+static void
xfs_agfl_read_verify(
struct xfs_buf *bp)
{
}
}
-static void
+void
xfs_agf_write_verify(
struct xfs_buf *bp)
{
xfs_agf_verify(bp);
}
-void
+static void
xfs_agf_read_verify(
struct xfs_buf *bp)
{
xfs_extlen_t *len, /* output: length of extent */
int *stat); /* output: success/failure */
+void xfs_agf_write_verify(struct xfs_buf *bp);
+void xfs_agfl_write_verify(struct xfs_buf *bp);
+
#endif /* __XFS_ALLOC_H__ */
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
.key_diff = xfs_allocbt_key_diff,
.read_verify = xfs_allocbt_read_verify,
+ .write_verify = xfs_allocbt_write_verify,
#ifdef DEBUG
.keys_inorder = xfs_allocbt_keys_inorder,
.recs_inorder = xfs_allocbt_recs_inorder,
XFS_ATTR_FORK);
if (error)
goto out;
- ASSERT(bp2 != NULL);
+ bp2->b_pre_io = bp1->b_pre_io;
memcpy(bp2->b_addr, bp1->b_addr, XFS_LBSIZE(dp->i_mount));
bp1 = NULL;
xfs_trans_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
XFS_ATTR_FORK);
if (error)
return(error);
- ASSERT(bp != NULL);
+ bp->b_pre_io = xfs_attr_leaf_write_verify;
leaf = bp->b_addr;
memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
hdr = &leaf->hdr;
/*
* Fill in the child block.
*/
+ abp->b_pre_io = xfs_bmbt_write_verify;
ablock = XFS_BUF_TO_BLOCK(abp);
ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
ablock->bb_level = 0;
ASSERT(args.len == 1);
*firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
+ bp->b_pre_io = xfs_bmbt_write_verify;
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
}
}
-static void
+void
xfs_bmbt_write_verify(
struct xfs_buf *bp)
{
.init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
.key_diff = xfs_bmbt_key_diff,
.read_verify = xfs_bmbt_read_verify,
+ .write_verify = xfs_bmbt_write_verify,
#ifdef DEBUG
.keys_inorder = xfs_bmbt_keys_inorder,
.recs_inorder = xfs_bmbt_recs_inorder,
extern int xfs_bmdr_maxrecs(struct xfs_mount *, int blocklen, int leaf);
extern int xfs_bmbt_maxrecs(struct xfs_mount *, int blocklen, int leaf);
extern void xfs_bmbt_read_verify(struct xfs_buf *bp);
+extern void xfs_bmbt_write_verify(struct xfs_buf *bp);
extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_inode *, int);
if (!*bpp)
return ENOMEM;
+ (*bpp)->b_pre_io = cur->bc_ops->write_verify;
*block = XFS_BUF_TO_BLOCK(*bpp);
return 0;
}
union xfs_btree_key *key);
void (*read_verify)(struct xfs_buf *bp);
+ void (*write_verify)(struct xfs_buf *bp);
+
#ifdef DEBUG
/* check that k1 is lower than k2 */
int (*keys_inorder)(struct xfs_btree_cur *cur,
xfs_trans_log_buf(tp, bp,
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
+ bp->b_pre_io = xfs_da_node_write_verify;
*bpp = bp;
return(0);
}
}
memcpy(node, oldroot, size);
xfs_trans_log_buf(tp, bp, 0, size - 1);
+
+ bp->b_pre_io = blk1->bp->b_pre_io;
blk1->bp = bp;
blk1->blkno = blkno;
/*
* Start converting it to block form.
*/
+ dbp->b_pre_io = xfs_dir2_block_write_verify;
hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
needlog = 1;
needscan = 0;
kmem_free(sfp);
return error;
}
+ bp->b_pre_io = xfs_dir2_block_write_verify;
hdr = bp->b_addr;
hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
/*
return 0;
}
-void
+static void
xfs_dir2_data_verify(
struct xfs_buf *bp)
{
}
}
-static void
+void
xfs_dir2_data_write_verify(
struct xfs_buf *bp)
{
xfs_dir2_data_verify(bp);
}
-void
+static void
xfs_dir2_data_read_verify(
struct xfs_buf *bp)
{
*/
error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, blkno), -1, &bp,
XFS_DATA_FORK);
- if (error) {
+ if (error)
return error;
- }
- ASSERT(bp != NULL);
+ bp->b_pre_io = xfs_dir2_data_write_verify;
/*
* Initialize the header.
xfs_buf_ioend(bp, 0);
}
-static void
+void
xfs_dir2_leafn_write_verify(
struct xfs_buf *bp)
{
/*
* Fix up the block header, make it a data block.
*/
+ dbp->b_pre_io = xfs_dir2_data_write_verify;
hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
if (needscan)
xfs_dir2_data_freescan(mp, hdr, &needlog);
* Get the buffer for the block.
*/
error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, bno), -1, &bp,
- XFS_DATA_FORK);
- if (error) {
+ XFS_DATA_FORK);
+ if (error)
return error;
- }
- ASSERT(bp != NULL);
- leaf = bp->b_addr;
+
/*
* Initialize the header.
*/
+ leaf = bp->b_addr;
leaf->hdr.info.magic = cpu_to_be16(magic);
leaf->hdr.info.forw = 0;
leaf->hdr.info.back = 0;
* the block.
*/
if (magic == XFS_DIR2_LEAF1_MAGIC) {
+ bp->b_pre_io = xfs_dir2_leaf1_write_verify;
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
ltp->bestcount = 0;
xfs_dir2_leaf_log_tail(tp, bp);
- }
+ } else
+ bp->b_pre_io = xfs_dir2_leafn_write_verify;
*bpp = bp;
return 0;
}
xfs_dir2_leaf_compact(args, lbp);
else
xfs_dir2_leaf_log_header(tp, lbp);
+
+ lbp->b_pre_io = xfs_dir2_leaf1_write_verify;
leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAF1_MAGIC);
+
/*
* Set up the leaf tail from the freespace block.
*/
/*
* Get the buffer for the new freespace block.
*/
- if ((error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, fdb), -1, &fbp,
- XFS_DATA_FORK))) {
+ error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, fdb), -1, &fbp,
+ XFS_DATA_FORK);
+ if (error)
return error;
- }
- ASSERT(fbp != NULL);
+ fbp->b_pre_io = xfs_dir2_free_write_verify;
+
free = fbp->b_addr;
leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
*to = cpu_to_be16(off);
}
free->hdr.nused = cpu_to_be32(n);
+
+ lbp->b_pre_io = xfs_dir2_leafn_write_verify;
leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAFN_MAGIC);
+
/*
* Log everything.
*/
state->extrablk.index = (int)((char *)dep -
(char *)curbp->b_addr);
state->extrablk.magic = XFS_DIR2_DATA_MAGIC;
+ curbp->b_pre_io = xfs_dir2_data_write_verify;
if (cmp == XFS_CMP_EXACT)
return XFS_ERROR(EEXIST);
}
state->extrablk.index = -1;
state->extrablk.blkno = curdb;
state->extrablk.magic = XFS_DIR2_DATA_MAGIC;
+ curbp->b_pre_io = xfs_dir2_data_write_verify;
} else {
/* If the curbp is not the CI match block, drop it */
if (state->extrablk.bp != curbp)
/*
* Get a buffer for the new block.
*/
- if ((error = xfs_da_get_buf(tp, dp,
- xfs_dir2_db_to_da(mp, fbno),
- -1, &fbp, XFS_DATA_FORK))) {
+ error = xfs_da_get_buf(tp, dp,
+ xfs_dir2_db_to_da(mp, fbno),
+ -1, &fbp, XFS_DATA_FORK);
+ if (error)
return error;
- }
- ASSERT(fbp != NULL);
+ fbp->b_pre_io = xfs_dir2_free_write_verify;
/*
* Initialize the new block to be empty, and remember
#else
#define xfs_dir2_data_check(dp,bp)
#endif
+extern void xfs_dir2_data_write_verify(struct xfs_buf *bp);
extern int __xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
extern int xfs_dir2_data_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mapped_bno, struct xfs_buf **bpp);
/* xfs_dir2_leaf.c */
extern void xfs_dir2_leafn_read_verify(struct xfs_buf *bp);
+extern void xfs_dir2_leafn_write_verify(struct xfs_buf *bp);
extern int xfs_dir2_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp);
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
}
+static void
+xfs_dquot_buf_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
+ struct xfs_disk_dquot *ddq;
+ xfs_dqid_t id = 0;
+ int i;
+
+ /*
+ * On the first read of the buffer, verify that each dquot is valid.
+ * We don't know what the id of the dquot is supposed to be, just that
+ * they should be increasing monotonically within the buffer. If the
+ * first id is corrupt, then it will fail on the second dquot in the
+ * buffer so corruptions could point to the wrong dquot in this case.
+ */
+ for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
+ int error;
+
+ ddq = &d[i].dd_diskdq;
+
+ if (i == 0)
+ id = be32_to_cpu(ddq->d_id);
+
+ error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
+ "xfs_dquot_read_verify");
+ if (error) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, d);
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ break;
+ }
+ }
+}
+
+static void
+xfs_dquot_buf_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dquot_buf_verify(bp);
+}
+void
+xfs_dquot_buf_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dquot_buf_verify(bp);
+ bp->b_pre_io = xfs_dquot_buf_write_verify;
+ bp->b_iodone = NULL;
+ xfs_buf_ioend(bp, 0);
+}
/*
* Allocate a block and fill it with dquots.
error = xfs_buf_geterror(bp);
if (error)
goto error1;
+ bp->b_pre_io = xfs_dquot_buf_write_verify;
/*
* Make a chunk of dquots out of this buffer and log
return (error);
}
-
-static void
-xfs_dquot_buf_verify(
- struct xfs_buf *bp)
-{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
- struct xfs_disk_dquot *ddq;
- xfs_dqid_t id = 0;
- int i;
-
- /*
- * On the first read of the buffer, verify that each dquot is valid.
- * We don't know what the id of the dquot is supposed to be, just that
- * they should be increasing monotonically within the buffer. If the
- * first id is corrupt, then it will fail on the second dquot in the
- * buffer so corruptions could point to the wrong dquot in this case.
- */
- for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
- int error;
-
- ddq = &d[i].dd_diskdq;
-
- if (i == 0)
- id = be32_to_cpu(ddq->d_id);
-
- error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
- "xfs_dquot_read_verify");
- if (error) {
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, d);
- xfs_buf_ioerror(bp, EFSCORRUPTED);
- break;
- }
- }
-}
-
-static void
-xfs_dquot_buf_write_verify(
- struct xfs_buf *bp)
-{
- xfs_dquot_buf_verify(bp);
-}
-
-void
-xfs_dquot_buf_read_verify(
- struct xfs_buf *bp)
-{
- xfs_dquot_buf_verify(bp);
- bp->b_pre_io = xfs_dquot_buf_write_verify;
- bp->b_iodone = NULL;
- xfs_buf_ioend(bp, 0);
-}
-
STATIC int
xfs_qm_dqrepair(
struct xfs_mount *mp,
error = ENOMEM;
goto error0;
}
+ bp->b_pre_io = xfs_agf_write_verify;
agf = XFS_BUF_TO_AGF(bp);
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
error = ENOMEM;
goto error0;
}
+ bp->b_pre_io = xfs_agfl_write_verify;
agfl = XFS_BUF_TO_AGFL(bp);
for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
error = ENOMEM;
goto error0;
}
+ bp->b_pre_io = xfs_agi_write_verify;
agi = XFS_BUF_TO_AGI(bp);
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
XFS_FSS_TO_BB(mp, 1), 0);
- if (bp)
+ if (bp) {
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
- else
+ bp->b_pre_io = xfs_sb_write_verify;
+ } else
error = ENOMEM;
}
* to log a whole cluster of inodes instead of all the
* individual transactions causing a lot of log traffic.
*/
+ fbuf->b_pre_io = xfs_inode_buf_write_verify;
xfs_buf_zero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog);
for (i = 0; i < ninodes; i++) {
int ioffset = i << mp->m_sb.sb_inodelog;
xfs_check_agi_unlinked(agi);
}
-static void
+void
xfs_agi_write_verify(
struct xfs_buf *bp)
{
xfs_agi_verify(bp);
}
-void
+static void
xfs_agi_read_verify(
struct xfs_buf *bp)
{
/*
* Get the data from the pointed-to record.
*/
-extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur,
+int xfs_inobt_get_rec(struct xfs_btree_cur *cur,
xfs_inobt_rec_incore_t *rec, int *stat);
+void xfs_agi_write_verify(struct xfs_buf *bp);
+
#endif /* __XFS_IALLOC_H__ */
.init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
.key_diff = xfs_inobt_key_diff,
.read_verify = xfs_inobt_read_verify,
+ .write_verify = xfs_inobt_write_verify,
#ifdef DEBUG
.keys_inorder = xfs_inobt_keys_inorder,
.recs_inorder = xfs_inobt_recs_inorder,
xfs_inobp_check(mp, bp);
}
-static void
+void
xfs_inode_buf_write_verify(
struct xfs_buf *bp)
{
if (!bp)
return ENOMEM;
+
+ /*
+ * This buffer may not have been correctly initialised as we
+ * didn't read it from disk. That's not important because we are
+ * only using to mark the buffer as stale in the log, and to
+ * attach stale cached inodes on it. That means it will never be
+ * dispatched for IO. If it is, we want to know about it, and we
+ * want it to fail. We can acheive this by adding a write
+ * verifier to the buffer.
+ */
+ bp->b_pre_io = xfs_inode_buf_write_verify;
+
/*
* Walk the inodes already attached to the buffer and mark them
* stale. These will all have the flush locks held, so an
int xfs_iread(struct xfs_mount *, struct xfs_trans *,
struct xfs_inode *, uint);
void xfs_inode_buf_read_verify(struct xfs_buf *);
+void xfs_inode_buf_write_verify(struct xfs_buf *);
void xfs_dinode_to_disk(struct xfs_dinode *,
struct xfs_icdinode *);
void xfs_idestroy_fork(struct xfs_inode *, int);
xfs_buf_ioerror(bp, error);
}
-static void
+void
xfs_sb_write_verify(
struct xfs_buf *bp)
{
#endif /* __KERNEL__ */
extern void xfs_sb_read_verify(struct xfs_buf *);
+extern void xfs_sb_write_verify(struct xfs_buf *bp);
extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
extern int xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t,
xfs_agnumber_t *);