From 4345abb2c3b56eae9bcf4728dcdc0f584690c8aa Mon Sep 17 00:00:00 2001 From: Bobi Jam Date: Wed, 25 Mar 2015 21:53:21 -0400 Subject: [PATCH] staging/lustre/mgc: detach MGC dev on error lustre_start_mgc() creates MGC device, if error happens later on ll_fill_super(), this device is still attached, and later mount fails by keep complaining that the MGC device's already in the client node. It turns out that the device was referenced by mgc config llog data which is arranged in the mgc lock requeue thread re-trying to get its mgc lock, and in normal case, this llog reference only released in mgc_blocking_ast() when the system is umount. This patch make mgc_precleanup() to wake up requeue thread to handle the config llog data. This patch also makes mgc_setup() wait for mgc_requeue_thread() start before moving on. Signed-off-by: Bobi Jam Reviewed-on: http://review.whamcloud.com/11765 Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-4943 Reviewed-by: Ryan Haasken Signed-off-by: Oleg Drokin Signed-off-by: Greg Kroah-Hartman --- .../staging/lustre/lustre/mgc/mgc_request.c | 36 +++++++++++++------ 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c index 3f00e775cbff..8496d2552d52 100644 --- a/drivers/staging/lustre/lustre/mgc/mgc_request.c +++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c @@ -484,9 +484,11 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data) #define RQ_NOW 0x2 #define RQ_LATER 0x4 #define RQ_STOP 0x8 +#define RQ_PRECLEANUP 0x10 static int rq_state; static wait_queue_head_t rq_waitq; static DECLARE_COMPLETION(rq_exit); +static DECLARE_COMPLETION(rq_start); static void do_requeue(struct config_llog_data *cld) { @@ -515,6 +517,8 @@ static void do_requeue(struct config_llog_data *cld) static int mgc_requeue_thread(void *data) { + bool first = true; + CDEBUG(D_MGC, "Starting requeue thread\n"); /* Keep trying failed locks periodically */ @@ -531,13 +535,19 @@ static int mgc_requeue_thread(void *data) rq_state &= ~(RQ_NOW | RQ_LATER); spin_unlock(&config_list_lock); + if (first) { + first = false; + complete(&rq_start); + } + /* Always wait a few seconds to allow the server who caused the lock revocation to finish its setup, plus some random so everyone doesn't try to reconnect at once. */ to = MGC_TIMEOUT_MIN_SECONDS * HZ; to += rand * HZ / 100; /* rand is centi-seconds */ lwi = LWI_TIMEOUT(to, NULL, NULL); - l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi); + l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP), + &lwi); /* * iterate & processing through the list. for each cld, process @@ -550,6 +560,7 @@ static int mgc_requeue_thread(void *data) cld_prev = NULL; spin_lock(&config_list_lock); + rq_state &= ~RQ_PRECLEANUP; list_for_each_entry(cld, &config_llog_list, cld_list_chain) { if (!cld->cld_lostlock) @@ -666,24 +677,26 @@ static atomic_t mgc_count = ATOMIC_INIT(0); static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) { int rc = 0; + int temp; switch (stage) { case OBD_CLEANUP_EARLY: break; case OBD_CLEANUP_EXPORTS: if (atomic_dec_and_test(&mgc_count)) { - int running; + LASSERT(rq_state & RQ_RUNNING); /* stop requeue thread */ - spin_lock(&config_list_lock); - running = rq_state & RQ_RUNNING; - if (running) - rq_state |= RQ_STOP; - spin_unlock(&config_list_lock); - if (running) { - wake_up(&rq_waitq); - wait_for_completion(&rq_exit); - } + temp = RQ_STOP; + } else { + /* wakeup requeue thread to clean our cld */ + temp = RQ_NOW | RQ_PRECLEANUP; } + spin_lock(&config_list_lock); + rq_state |= temp; + spin_unlock(&config_list_lock); + wake_up(&rq_waitq); + if (temp & RQ_STOP) + wait_for_completion(&rq_exit); obd_cleanup_client_import(obd); rc = mgc_llog_fini(NULL, obd); if (rc != 0) @@ -742,6 +755,7 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) } /* rc is the task_struct pointer of mgc_requeue_thread. */ rc = 0; + wait_for_completion(&rq_start); } return rc; -- 2.20.1