ocfs2_dlm: Fixes race between migrate and dirty
authorKurt Hackel <kurt.hackel@oracle.com>
Fri, 5 Jan 2007 23:00:17 +0000 (15:00 -0800)
committerMark Fasheh <mark.fasheh@oracle.com>
Wed, 7 Feb 2007 20:00:57 +0000 (12:00 -0800)
dlmthread was removing lockres' from the dirty list
and resetting the dirty flag before shuffling the list.
This patch retains the dirty state flag until the lists
are shuffled.

Signed-off-by: Kurt Hackel <kurt.hackel@oracle.com>
Signed-off-by: Sunil Mushran <Sunil.Mushran@oracle.com>
Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmthread.c

index 04048bb1a1bdca4dd3e4529b7a3c6847d561ef28..e95ecb2aaf1493fd0caa523d9feed48ca46a6d40 100644 (file)
@@ -223,6 +223,7 @@ static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
 #define DLM_LOCK_RES_IN_PROGRESS          0x00000010
 #define DLM_LOCK_RES_MIGRATING            0x00000020
 #define DLM_LOCK_RES_DROPPING_REF         0x00000040
+#define DLM_LOCK_RES_BLOCK_DIRTY          0x00001000
 
 /* max milliseconds to wait to sync up a network failure with a node death */
 #define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
index 251c48028ea341fe143aa1ec9bbe9742d6544233..a65a87726d6a30fee23de505d5e52938b0e2cea6 100644 (file)
@@ -2707,8 +2707,15 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
        __dlm_lockres_reserve_ast(res);
        spin_unlock(&res->spinlock);
 
-       /* now flush all the pending asts.. hang out for a bit */
+       /* now flush all the pending asts */
        dlm_kick_thread(dlm, res);
+       /* before waiting on DIRTY, block processes which may
+        * try to dirty the lockres before MIGRATING is set */
+       spin_lock(&res->spinlock);
+       BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
+       res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
+       spin_unlock(&res->spinlock);
+       /* now wait on any pending asts and the DIRTY state */
        wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
        dlm_lockres_release_ast(dlm, res);
 
@@ -2734,6 +2741,13 @@ again:
                mlog(0, "trying again...\n");
                goto again;
        }
+       /* now that we are sure the MIGRATING state is there, drop
+        * the unneded state which blocked threads trying to DIRTY */
+       spin_lock(&res->spinlock);
+       BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
+       BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
+       res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
+       spin_unlock(&res->spinlock);
 
        /* did the target go down or die? */
        spin_lock(&dlm->spinlock);
index baa99979904c4f60767fa2234daeff3108559cd8..3b94e4dec351b8bbdfbe85137fb558aedbd5ef04 100644 (file)
@@ -95,7 +95,7 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
 int __dlm_lockres_unused(struct dlm_lock_resource *res)
 {
        if (!__dlm_lockres_has_locks(res) &&
-           list_empty(&res->dirty)) {
+           (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
                /* try not to scan the bitmap unless the first two
                 * conditions are already true */
                int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
@@ -455,12 +455,17 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
        assert_spin_locked(&res->spinlock);
 
        /* don't shuffle secondary queues */
-       if ((res->owner == dlm->node_num) &&
-           !(res->state & DLM_LOCK_RES_DIRTY)) {
-               /* ref for dirty_list */
-               dlm_lockres_get(res);
-               list_add_tail(&res->dirty, &dlm->dirty_list);
-               res->state |= DLM_LOCK_RES_DIRTY;
+       if ((res->owner == dlm->node_num)) {
+               if (res->state & (DLM_LOCK_RES_MIGRATING |
+                                 DLM_LOCK_RES_BLOCK_DIRTY))
+                   return;
+
+               if (list_empty(&res->dirty)) {
+                       /* ref for dirty_list */
+                       dlm_lockres_get(res);
+                       list_add_tail(&res->dirty, &dlm->dirty_list);
+                       res->state |= DLM_LOCK_RES_DIRTY;
+               }
        }
 }
 
@@ -639,7 +644,7 @@ static int dlm_thread(void *data)
                        dlm_lockres_get(res);
 
                        spin_lock(&res->spinlock);
-                       res->state &= ~DLM_LOCK_RES_DIRTY;
+                       /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
                        list_del_init(&res->dirty);
                        spin_unlock(&res->spinlock);
                        spin_unlock(&dlm->spinlock);
@@ -663,10 +668,11 @@ static int dlm_thread(void *data)
                        /* it is now ok to move lockreses in these states
                         * to the dirty list, assuming that they will only be
                         * dirty for a short while. */
+                       BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
                        if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
-                                         DLM_LOCK_RES_MIGRATING |
                                          DLM_LOCK_RES_RECOVERING)) {
                                /* move it to the tail and keep going */
+                               res->state &= ~DLM_LOCK_RES_DIRTY;
                                spin_unlock(&res->spinlock);
                                mlog(0, "delaying list shuffling for in-"
                                     "progress lockres %.*s, state=%d\n",
@@ -687,6 +693,7 @@ static int dlm_thread(void *data)
 
                        /* called while holding lockres lock */
                        dlm_shuffle_lists(dlm, res);
+                       res->state &= ~DLM_LOCK_RES_DIRTY;
                        spin_unlock(&res->spinlock);
 
                        dlm_lockres_calc_usage(dlm, res);
@@ -697,11 +704,8 @@ in_progress:
                        /* if the lock was in-progress, stick
                         * it on the back of the list */
                        if (delay) {
-                               /* ref for dirty_list */
-                               dlm_lockres_get(res);
                                spin_lock(&res->spinlock);
-                               list_add_tail(&res->dirty, &dlm->dirty_list);
-                               res->state |= DLM_LOCK_RES_DIRTY;
+                               __dlm_dirty_lockres(dlm, res);
                                spin_unlock(&res->spinlock);
                        }
                        dlm_lockres_put(res);