if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
} else {
- kfree(gl->gl_lvb);
+ kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl);
}
}
return -ENOMEM;
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
- gl->gl_lvb = NULL;
if (glops->go_flags & GLOF_LVB) {
- gl->gl_lvb = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
- if (!gl->gl_lvb) {
+ gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
+ if (!gl->gl_lksb.sb_lvbptr) {
kmem_cache_free(cachep, gl);
return -ENOMEM;
}
- gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
}
atomic_inc(&sdp->sd_glock_disposal);
tmp = search_bucket(hash, sdp, &name);
if (tmp) {
spin_unlock_bucket(hash);
- kfree(gl->gl_lvb);
+ kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(cachep, gl);
atomic_dec(&sdp->sd_glock_disposal);
gl = tmp;
ktime_t gl_dstamp;
struct gfs2_lkstats gl_stats;
struct dlm_lksb gl_lksb;
- char *gl_lvb;
unsigned long gl_tchange;
void *gl_object;
gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
- if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID && gl->gl_lvb)
- memset(gl->gl_lvb, 0, GDLM_LVB_SIZE);
+ if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
+ memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
{
u32 lkf = 0;
- if (gl->gl_lvb)
+ if (gl->gl_lksb.sb_lvbptr)
lkf |= DLM_LKF_VALBLK;
if (gfs_flags & LM_FLAG_TRY)
/* don't want to skip dlm_unlock writing the lvb when lock is ex */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
- gl->gl_lvb && gl->gl_state != LM_ST_EXCLUSIVE) {
+ gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) {
gfs2_glock_free(gl);
return;
}
if (error < 0)
return error;
- qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
+ qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
qlvb->__pad = 0;
qlvb->qb_limit = q.qu_limit;
if (error)
return error;
- qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
+ qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
gfs2_glock_dq_uninit(q_gh);
if (error)
goto out;
- qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
+ qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
fdq->d_version = FS_DQUOT_VERSION;
fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
fdq->d_id = from_kqid(&init_user_ns, qid);
goto fail;
rgd->rd_gl->gl_object = rgd;
- rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
+ rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
if (rgd->rd_data > sdp->sd_max_rg_data)
sdp->sd_max_rg_data = rgd->rd_data;