gfs2: gfs2_evict_inode: Put glocks asynchronously
authorAndreas Gruenbacher <agruenba@redhat.com>
Tue, 1 Aug 2017 16:45:23 +0000 (11:45 -0500)
committerBob Peterson <rpeterso@redhat.com>
Thu, 10 Aug 2017 15:45:21 +0000 (10:45 -0500)
gfs2_evict_inode is called to free inodes under memory pressure.  The
function calls into DLM when an inode's last cluster-wide reference goes
away (remote unlink) and to release the glock and associated DLM lock
before finally destroying the inode.  However, if DLM is blocked on
memory to become available, calling into DLM again will deadlock.

Avoid that by decoupling releasing glocks from destroying inodes in that
case: with gfs2_glock_queue_put, glocks will be dequeued asynchronously
in work queue context, when the associated inodes have likely already
been destroyed.

With this change, inodes can end up being unlinked, remote-unlink can be
triggered, and then the inode can be reallocated before all
remote-unlink callbacks are processed.  To detect that, revalidate the
link count in gfs2_evict_inode to make sure we're not deleting an
allocated, referenced inode.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/super.c

index 11d48b9640477e76109c5cc475ceb2d5fb2a8323..5ad757f0ce6057b8807ed40f3c993119ae95b0c1 100644 (file)
@@ -171,7 +171,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
  *
  */
 
-static void gfs2_glock_hold(struct gfs2_glock *gl)
+void gfs2_glock_hold(struct gfs2_glock *gl)
 {
        GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
        lockref_get(&gl->gl_lockref);
@@ -264,6 +264,14 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
        sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 }
 
+/*
+ * Cause the glock to be put in work queue context.
+ */
+void gfs2_glock_queue_put(struct gfs2_glock *gl)
+{
+       gfs2_glock_queue_work(gl, 0);
+}
+
 /**
  * gfs2_glock_put() - Decrement reference count on glock
  * @gl: The glock to put
index 526d2123f7587c274a9cabf8f4f9e17386cf7b0a..5e12220cc0c241100337723719e2bf14f026f836 100644 (file)
@@ -182,7 +182,9 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
 extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
                          const struct gfs2_glock_operations *glops,
                          int create, struct gfs2_glock **glp);
+extern void gfs2_glock_hold(struct gfs2_glock *gl);
 extern void gfs2_glock_put(struct gfs2_glock *gl);
+extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
 extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
                             u16 flags, struct gfs2_holder *gh);
 extern void gfs2_holder_reinit(unsigned int state, u16 flags,
index 6c39bb1ec1005a7e4050f76f5ce31207d431194e..4089dbe617a6fe6755f600685897752a542d6100 100644 (file)
@@ -1501,6 +1501,22 @@ out_qs:
        return error;
 }
 
+/**
+ * gfs2_glock_put_eventually
+ * @gl:        The glock to put
+ *
+ * When under memory pressure, trigger a deferred glock put to make sure we
+ * won't call into DLM and deadlock.  Otherwise, put the glock directly.
+ */
+
+static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
+{
+       if (current->flags & PF_MEMALLOC)
+               gfs2_glock_queue_put(gl);
+       else
+               gfs2_glock_put(gl);
+}
+
 /**
  * gfs2_evict_inode - Remove an inode from cache
  * @inode: The inode to evict
@@ -1564,6 +1580,12 @@ static void gfs2_evict_inode(struct inode *inode)
                        goto out_truncate;
        }
 
+       /*
+        * The inode may have been recreated in the meantime.
+        */
+       if (inode->i_nlink)
+               goto out_truncate;
+
 alloc_failed:
        if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
            test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
@@ -1653,12 +1675,16 @@ out:
        glock_clear_object(ip->i_gl, ip);
        wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
        gfs2_glock_add_to_lru(ip->i_gl);
-       gfs2_glock_put(ip->i_gl);
+       gfs2_glock_put_eventually(ip->i_gl);
        ip->i_gl = NULL;
        if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
-               glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+               struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
+
+               glock_clear_object(gl, ip);
                ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
+               gfs2_glock_hold(gl);
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+               gfs2_glock_put_eventually(gl);
        }
 }