{
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
- if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
- else
+ } else {
+ kfree(gl->gl_lvb);
kmem_cache_free(gfs2_glock_cachep, gl);
+ }
}
void gfs2_glock_free(struct gfs2_glock *gl)
if (sdp->sd_lockstruct.ls_ops->lm_lock) {
/* lock_dlm */
ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
- GLOCK_BUG_ON(gl, ret);
+ if (ret) {
+ printk(KERN_ERR "GFS2: lm_lock ret %d\n", ret);
+ GLOCK_BUG_ON(gl, 1);
+ }
} else { /* lock_nolock */
finish_xmote(gl, target);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
if (!gl)
return -ENOMEM;
+ memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
+ gl->gl_lvb = NULL;
+
+ if (glops->go_flags & GLOF_LVB) {
+ gl->gl_lvb = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
+ if (!gl->gl_lvb) {
+ kmem_cache_free(cachep, gl);
+ return -ENOMEM;
+ }
+ gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
+ }
+
atomic_inc(&sdp->sd_glock_disposal);
gl->gl_sbd = sdp;
gl->gl_flags = 0;
preempt_enable();
gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
- memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
- memset(gl->gl_lvb, 0, 32 * sizeof(char));
- gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
tmp = search_bucket(hash, sdp, &name);
if (tmp) {
spin_unlock_bucket(hash);
+ kfree(gl->gl_lvb);
kmem_cache_free(cachep, gl);
atomic_dec(&sdp->sd_glock_disposal);
gl = tmp;
.go_unlock = gfs2_rgrp_go_unlock,
.go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP,
- .go_flags = GLOF_ASPACE,
+ .go_flags = GLOF_ASPACE | GLOF_LVB,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
const struct gfs2_glock_operations gfs2_quota_glops = {
.go_type = LM_TYPE_QUOTA,
+ .go_flags = GLOF_LVB,
};
const struct gfs2_glock_operations gfs2_journal_glops = {
gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
- if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID)
+ if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID && gl->gl_lvb)
memset(gl->gl_lvb, 0, GDLM_LVB_SIZE);
switch (gl->gl_lksb.sb_status) {
static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
const int req)
{
- u32 lkf = DLM_LKF_VALBLK;
- u32 lkid = gl->gl_lksb.sb_lkid;
+ u32 lkf = 0;
+
+ if (gl->gl_lvb)
+ lkf |= DLM_LKF_VALBLK;
if (gfs_flags & LM_FLAG_TRY)
lkf |= DLM_LKF_NOQUEUE;
BUG();
}
- if (lkid != 0) {
+ if (gl->gl_lksb.sb_lkid != 0) {
lkf |= DLM_LKF_CONVERT;
if (test_bit(GLF_BLOCKING, &gl->gl_flags))
lkf |= DLM_LKF_QUECVT;
/* don't want to skip dlm_unlock writing the lvb when lock is ex */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
- gl->gl_state != LM_ST_EXCLUSIVE) {
+ gl->gl_lvb && gl->gl_state != LM_ST_EXCLUSIVE) {
gfs2_glock_free(gl);
return;
}