struct xfs_rud_log_format {
__uint16_t rud_type; /* rud log item type */
__uint16_t rud_size; /* size of this item */
- __uint32_t rud_nextents; /* # of extents freed */
+ __uint32_t __pad;
__uint64_t rud_rui_id; /* id of corresponding rui */
- struct xfs_map_extent rud_extents[1]; /* array of extents rmapped */
};
/*
struct xfs_ail *ailp = log->l_ailp;
rud_formatp = item->ri_buf[0].i_addr;
- ASSERT(item->ri_buf[0].i_len == (sizeof(struct xfs_rud_log_format) +
- ((rud_formatp->rud_nextents - 1) *
- sizeof(struct xfs_map_extent))));
+ ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
rui_id = rud_formatp->rud_rui_id;
/*
return container_of(lip, struct xfs_rud_log_item, rud_item);
}
-STATIC void
-xfs_rud_item_free(struct xfs_rud_log_item *rudp)
-{
- if (rudp->rud_format.rud_nextents > XFS_RUD_MAX_FAST_EXTENTS)
- kmem_free(rudp);
- else
- kmem_zone_free(xfs_rud_zone, rudp);
-}
-
-/*
- * This returns the number of iovecs needed to log the given rud item.
- * We only need 1 iovec for an rud item. It just logs the rud_log_format
- * structure.
- */
-static inline int
-xfs_rud_item_sizeof(
- struct xfs_rud_log_item *rudp)
-{
- return sizeof(struct xfs_rud_log_format) +
- (rudp->rud_format.rud_nextents - 1) *
- sizeof(struct xfs_map_extent);
-}
-
STATIC void
xfs_rud_item_size(
struct xfs_log_item *lip,
int *nbytes)
{
*nvecs += 1;
- *nbytes += xfs_rud_item_sizeof(RUD_ITEM(lip));
+ *nbytes += sizeof(struct xfs_rud_log_format);
}
/*
struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
- ASSERT(rudp->rud_next_extent == rudp->rud_format.rud_nextents);
-
rudp->rud_format.rud_type = XFS_LI_RUD;
rudp->rud_format.rud_size = 1;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
- xfs_rud_item_sizeof(rudp));
+ sizeof(struct xfs_rud_log_format));
}
/*
if (lip->li_flags & XFS_LI_ABORTED) {
xfs_rui_release(rudp->rud_ruip);
- xfs_rud_item_free(rudp);
+ kmem_zone_free(xfs_rud_zone, rudp);
}
}
* aborted due to log I/O error).
*/
xfs_rui_release(rudp->rud_ruip);
- xfs_rud_item_free(rudp);
+ kmem_zone_free(xfs_rud_zone, rudp);
return (xfs_lsn_t)-1;
}
struct xfs_rud_log_item *
xfs_rud_init(
struct xfs_mount *mp,
- struct xfs_rui_log_item *ruip,
- uint nextents)
+ struct xfs_rui_log_item *ruip)
{
struct xfs_rud_log_item *rudp;
- uint size;
-
- ASSERT(nextents > 0);
- if (nextents > XFS_RUD_MAX_FAST_EXTENTS) {
- size = (uint)(sizeof(struct xfs_rud_log_item) +
- ((nextents - 1) * sizeof(struct xfs_map_extent)));
- rudp = kmem_zalloc(size, KM_SLEEP);
- } else {
- rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP);
- }
+ rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP);
xfs_log_item_init(mp, &rudp->rud_item, XFS_LI_RUD, &xfs_rud_item_ops);
rudp->rud_ruip = ruip;
- rudp->rud_format.rud_nextents = nextents;
rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
return rudp;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error)
return error;
- rudp = xfs_trans_get_rud(tp, ruip, ruip->rui_format.rui_nextents);
+ rudp = xfs_trans_get_rud(tp, ruip);
for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
rmap = &(ruip->rui_format.rui_extents[i]);
struct xfs_rud_log_item {
struct xfs_log_item rud_item;
struct xfs_rui_log_item *rud_ruip;
- uint rud_next_extent;
struct xfs_rud_log_format rud_format;
};
-/*
- * Max number of extents in fast allocation path.
- */
-#define XFS_RUD_MAX_FAST_EXTENTS 16
-
extern struct kmem_zone *xfs_rui_zone;
extern struct kmem_zone *xfs_rud_zone;
struct xfs_rui_log_item *xfs_rui_init(struct xfs_mount *, uint);
struct xfs_rud_log_item *xfs_rud_init(struct xfs_mount *,
- struct xfs_rui_log_item *, uint);
+ struct xfs_rui_log_item *);
int xfs_rui_copy_format(struct xfs_log_iovec *buf,
struct xfs_rui_log_format *dst_rui_fmt);
void xfs_rui_item_free(struct xfs_rui_log_item *);
if (!xfs_icreate_zone)
goto out_destroy_ili_zone;
- xfs_rud_zone = kmem_zone_init((sizeof(struct xfs_rud_log_item) +
- ((XFS_RUD_MAX_FAST_EXTENTS - 1) *
- sizeof(struct xfs_map_extent))),
+ xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
"xfs_rud_item");
if (!xfs_rud_zone)
goto out_destroy_icreate_zone;
void xfs_rmap_update_init_defer_op(void);
struct xfs_rud_log_item *xfs_trans_get_rud(struct xfs_trans *tp,
- struct xfs_rui_log_item *ruip, uint nextents);
+ struct xfs_rui_log_item *ruip);
int xfs_trans_log_finish_rmap_update(struct xfs_trans *tp,
struct xfs_rud_log_item *rudp, enum xfs_rmap_intent_type type,
__uint64_t owner, int whichfork, xfs_fileoff_t startoff,
xfs_trans_set_rmap_flags(rmap, type, whichfork, state);
}
-/*
- * This routine is called to allocate an "rmap update done"
- * log item that will hold nextents worth of extents. The
- * caller must use all nextents extents, because we are not
- * flexible about this at all.
- */
struct xfs_rud_log_item *
xfs_trans_get_rud(
struct xfs_trans *tp,
- struct xfs_rui_log_item *ruip,
- uint nextents)
+ struct xfs_rui_log_item *ruip)
{
struct xfs_rud_log_item *rudp;
- ASSERT(tp != NULL);
- ASSERT(nextents > 0);
-
- rudp = xfs_rud_init(tp->t_mountp, ruip, nextents);
- ASSERT(rudp != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
+ rudp = xfs_rud_init(tp->t_mountp, ruip);
xfs_trans_add_item(tp, &rudp->rud_item);
return rudp;
}
xfs_exntst_t state,
struct xfs_btree_cur **pcur)
{
- uint next_extent;
- struct xfs_map_extent *rmap;
int error;
error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
tp->t_flags |= XFS_TRANS_DIRTY;
rudp->rud_item.li_desc->lid_flags |= XFS_LID_DIRTY;
- next_extent = rudp->rud_next_extent;
- ASSERT(next_extent < rudp->rud_format.rud_nextents);
- rmap = &(rudp->rud_format.rud_extents[next_extent]);
- rmap->me_owner = owner;
- rmap->me_startblock = startblock;
- rmap->me_startoff = startoff;
- rmap->me_len = blockcount;
- xfs_trans_set_rmap_flags(rmap, type, whichfork, state);
- rudp->rud_next_extent++;
-
return error;
}
void *intent,
unsigned int count)
{
- return xfs_trans_get_rud(tp, intent, count);
+ return xfs_trans_get_rud(tp, intent);
}
/* Process a deferred rmap update. */