return 0;
}
-void
-xfs_agfl_read_verify(
+static void
+xfs_agfl_verify(
struct xfs_buf *bp)
{
#ifdef WHEN_CRCS_COME_ALONG
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
#endif
+}
+
+static void
+xfs_agfl_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_agfl_verify(bp);
+}
+
+void
+xfs_agfl_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_agfl_verify(bp);
+ bp->b_pre_io = xfs_agfl_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
}
static void
-xfs_agf_read_verify(
+xfs_agf_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, agf);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
+}
+
+static void
+xfs_agf_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_agf_verify(bp);
+}
+void
+xfs_agf_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_agf_verify(bp);
+ bp->b_pre_io = xfs_agf_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
}
-void
-xfs_allocbt_read_verify(
+static void
+xfs_allocbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
if (!sblock_ok) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- XFS_CORRUPTION_ERROR("xfs_allocbt_read_verify",
- XFS_ERRLEVEL_LOW, mp, block);
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, block);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
+}
+static void
+xfs_allocbt_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_allocbt_verify(bp);
+}
+
+void
+xfs_allocbt_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_allocbt_verify(bp);
+ bp->b_pre_io = xfs_allocbt_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
xfs_mount_t *mp);
STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
-void
+static void
xfs_attr_leaf_verify(
struct xfs_buf *bp)
{
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, hdr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
+}
+
+static void
+xfs_attr_leaf_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_attr_leaf_verify(bp);
+}
+void
+xfs_attr_leaf_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_attr_leaf_verify(bp);
+ bp->b_pre_io = xfs_attr_leaf_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
+
int
xfs_attr_leaf_read(
struct xfs_trans *tp,
struct xfs_buf **bpp)
{
return xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
- XFS_ATTR_FORK, xfs_attr_leaf_verify);
+ XFS_ATTR_FORK, xfs_attr_leaf_read_verify);
}
/*========================================================================
int xfs_attr_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
struct xfs_buf **bpp);
-void xfs_attr_leaf_verify(struct xfs_buf *bp);
+void xfs_attr_leaf_read_verify(struct xfs_buf *bp);
#endif /* __XFS_ATTR_LEAF_H__ */
cur->bc_rec.b.br_startoff;
}
-void
-xfs_bmbt_read_verify(
+static void
+xfs_bmbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
if (!lblock_ok) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- XFS_CORRUPTION_ERROR("xfs_bmbt_read_verify",
- XFS_ERRLEVEL_LOW, mp, block);
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, block);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
+}
+static void
+xfs_bmbt_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_bmbt_verify(bp);
+}
+
+void
+xfs_bmbt_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_bmbt_verify(bp);
+ bp->b_pre_io = xfs_bmbt_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
static void
-__xfs_da_node_verify(
+xfs_da_node_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
- bp->b_iodone = NULL;
- xfs_buf_ioend(bp, 0);
}
static void
-xfs_da_node_verify(
+xfs_da_node_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_da_node_verify(bp);
+}
+
+static void
+xfs_da_node_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
switch (be16_to_cpu(info->magic)) {
case XFS_DA_NODE_MAGIC:
- __xfs_da_node_verify(bp);
- return;
+ xfs_da_node_verify(bp);
+ break;
case XFS_ATTR_LEAF_MAGIC:
- xfs_attr_leaf_verify(bp);
+ xfs_attr_leaf_read_verify(bp);
return;
case XFS_DIR2_LEAFN_MAGIC:
- xfs_dir2_leafn_verify(bp);
+ xfs_dir2_leafn_read_verify(bp);
return;
default:
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
+ mp, info);
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
break;
}
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, info);
- xfs_buf_ioerror(bp, EFSCORRUPTED);
-
+ bp->b_pre_io = xfs_da_node_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
int which_fork)
{
return xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
- which_fork, xfs_da_node_verify);
+ which_fork, xfs_da_node_read_verify);
}
/*========================================================================
xfs_da_blkinfo_onlychild_validate(bp->b_addr,
be16_to_cpu(oldroot->hdr.level));
+ /*
+ * This could be copying a leaf back into the root block in the case of
+ * there only being a single leaf block left in the tree. Hence we have
+ * to update the pre_io pointer as well to match the buffer type change
+ * that could occur.
+ */
memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
+ root_blk->bp->b_pre_io = bp->b_pre_io;
xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
error = xfs_da_shrink_inode(args, child, bp);
return(error);
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, hdr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
+}
+static void
+xfs_dir2_block_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dir2_block_verify(bp);
+}
+
+void
+xfs_dir2_block_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dir2_block_verify(bp);
+ bp->b_pre_io = xfs_dir2_block_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
struct xfs_mount *mp = dp->i_mount;
return xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, bpp,
- XFS_DATA_FORK, xfs_dir2_block_verify);
+ XFS_DATA_FORK, xfs_dir2_block_read_verify);
}
static void
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, hdr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
+}
+
+static void
+xfs_dir2_data_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dir2_data_verify(bp);
+}
+void
+xfs_dir2_data_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dir2_data_verify(bp);
+ bp->b_pre_io = xfs_dir2_data_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
+
int
xfs_dir2_data_read(
struct xfs_trans *tp,
struct xfs_buf **bpp)
{
return xfs_da_read_buf(tp, dp, bno, mapped_bno, bpp,
- XFS_DATA_FORK, xfs_dir2_data_verify);
+ XFS_DATA_FORK, xfs_dir2_data_read_verify);
}
int
xfs_daddr_t mapped_bno)
{
return xfs_da_reada_buf(tp, dp, bno, mapped_bno,
- XFS_DATA_FORK, xfs_dir2_data_verify);
+ XFS_DATA_FORK, xfs_dir2_data_read_verify);
}
/*
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, hdr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
+}
+
+static void
+xfs_dir2_leaf1_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
+}
+static void
+xfs_dir2_leaf1_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
+ bp->b_pre_io = xfs_dir2_leaf1_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
static void
-xfs_dir2_leaf1_verify(
- struct xfs_buf *bp)
+xfs_dir2_leafn_write_verify(
+ struct xfs_buf *bp)
{
- xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
+ xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
}
void
-xfs_dir2_leafn_verify(
- struct xfs_buf *bp)
+xfs_dir2_leafn_read_verify(
+ struct xfs_buf *bp)
{
xfs_dir2_leaf_verify(bp, cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
+ bp->b_pre_io = xfs_dir2_leafn_write_verify;
+ bp->b_iodone = NULL;
+ xfs_buf_ioend(bp, 0);
}
static int
struct xfs_buf **bpp)
{
return xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
- XFS_DATA_FORK, xfs_dir2_leaf1_verify);
+ XFS_DATA_FORK, xfs_dir2_leaf1_read_verify);
}
int
struct xfs_buf **bpp)
{
return xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
- XFS_DATA_FORK, xfs_dir2_leafn_verify);
+ XFS_DATA_FORK, xfs_dir2_leafn_read_verify);
}
/*
XFS_ERRLEVEL_LOW, mp, hdr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
+}
+
+static void
+xfs_dir2_free_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dir2_free_verify(bp);
+}
+void
+xfs_dir2_free_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dir2_free_verify(bp);
+ bp->b_pre_io = xfs_dir2_free_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
+
static int
__xfs_dir2_free_read(
struct xfs_trans *tp,
struct xfs_buf **bpp)
{
return xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
- XFS_DATA_FORK, xfs_dir2_free_verify);
+ XFS_DATA_FORK, xfs_dir2_free_read_verify);
}
int
xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
/* xfs_dir2_leaf.c */
-extern void xfs_dir2_leafn_verify(struct xfs_buf *bp);
+extern void xfs_dir2_leafn_read_verify(struct xfs_buf *bp);
extern int xfs_dir2_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp);
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
return (error);
}
-void
-xfs_dquot_read_verify(
+static void
+xfs_dquot_buf_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
"xfs_dquot_read_verify");
if (error) {
- XFS_CORRUPTION_ERROR("xfs_dquot_read_verify",
- XFS_ERRLEVEL_LOW, mp, d);
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, d);
xfs_buf_ioerror(bp, EFSCORRUPTED);
break;
}
}
+}
+
+static void
+xfs_dquot_buf_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dquot_buf_verify(bp);
+}
+
+void
+xfs_dquot_buf_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_dquot_buf_verify(bp);
+ bp->b_pre_io = xfs_dquot_buf_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
/*
* Read the buffer without verification so we get the corrupted
- * buffer returned to us.
+ * buffer returned to us. make sure we verify it on write, though.
*/
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen,
ASSERT(*bpp == NULL);
return XFS_ERROR(error);
}
+ (*bpp)->b_pre_io = xfs_dquot_buf_write_verify;
ASSERT(xfs_buf_islocked(*bpp));
d = (struct xfs_dqblk *)(*bpp)->b_addr;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen,
- 0, &bp, xfs_dquot_read_verify);
+ 0, &bp, xfs_dquot_buf_read_verify);
if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint,
uint, struct xfs_dquot **);
-extern void xfs_dquot_read_verify(struct xfs_buf *bp);
+extern void xfs_dquot_buf_read_verify(struct xfs_buf *bp);
extern void xfs_qm_dqdestroy(xfs_dquot_t *);
extern int xfs_qm_dqflush(struct xfs_dquot *, struct xfs_buf **);
extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
#endif
static void
-xfs_agi_read_verify(
+xfs_agi_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
xfs_check_agi_unlinked(agi);
+}
+
+static void
+xfs_agi_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_agi_verify(bp);
+}
+
+void
+xfs_agi_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_agi_verify(bp);
+ bp->b_pre_io = xfs_agi_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
}
void
-xfs_inobt_read_verify(
+xfs_inobt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
if (!sblock_ok) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- XFS_CORRUPTION_ERROR("xfs_inobt_read_verify",
- XFS_ERRLEVEL_LOW, mp, block);
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, block);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
+}
+
+static void
+xfs_inobt_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_inobt_verify(bp);
+}
+void
+xfs_inobt_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_inobt_verify(bp);
+ bp->b_pre_io = xfs_inobt_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
}
#endif
-void
+static void
xfs_inode_buf_verify(
struct xfs_buf *bp)
{
}
}
xfs_inobp_check(mp, bp);
+}
+
+static void
+xfs_inode_buf_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_inode_buf_verify(bp);
+}
+
+void
+xfs_inode_buf_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_inode_buf_verify(bp);
+ bp->b_pre_io = xfs_inode_buf_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
buf_flags |= XBF_UNMAPPED;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
(int)imap->im_len, buf_flags, &bp,
- xfs_inode_buf_verify);
+ xfs_inode_buf_read_verify);
if (error) {
if (error == EAGAIN) {
ASSERT(buf_flags & XBF_TRYLOCK);
struct xfs_buf **, uint, uint);
int xfs_iread(struct xfs_mount *, struct xfs_trans *,
struct xfs_inode *, uint);
-void xfs_inode_buf_verify(struct xfs_buf *);
+void xfs_inode_buf_read_verify(struct xfs_buf *);
void xfs_dinode_to_disk(struct xfs_dinode *,
struct xfs_icdinode *);
void xfs_idestroy_fork(struct xfs_inode *, int);
& ~r.ir_free)
xfs_btree_reada_bufs(mp, agno,
agbno, nbcluster,
- xfs_inode_buf_verify);
+ xfs_inode_buf_read_verify);
}
irbp->ir_startino = r.ir_startino;
irbp->ir_freecount = r.ir_freecount;
}
}
-void
-xfs_sb_read_verify(
+static void
+xfs_sb_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
error = xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR);
if (error)
xfs_buf_ioerror(bp, error);
+}
+
+static void
+xfs_sb_write_verify(
+ struct xfs_buf *bp)
+{
+ xfs_sb_verify(bp);
+}
+
+void
+xfs_sb_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_sb_verify(bp);
+ bp->b_pre_io = xfs_sb_write_verify;
bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, bno),
mp->m_quotainfo->qi_dqchunklen, 0, &bp,
- xfs_dquot_read_verify);
+ xfs_dquot_buf_read_verify);
if (error)
break;