staging: lustre: osc: allow to call brw_commit() multiple times
authorJinshan Xiong <jinshan.xiong@intel.com>
Tue, 16 Aug 2016 20:18:34 +0000 (16:18 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 21 Aug 2016 13:57:34 +0000 (15:57 +0200)
Sometimes the rq_commit_cb of BRW RPC can be called twice if that RPC
has already committed at reply time. This will cause inaccuracy of
unstable pages accounting and then assertion.

Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-3274
Reviewed-on: http://review.whamcloud.com/8215
Reviewed-by: Prakash Surya <surya1@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: James Simmons <jsimmons@infradead.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/lustre/lustre/osc/osc_cache.c
drivers/staging/lustre/lustre/osc/osc_request.c

index 53b5d73f2cd14a3916c702a629d70247f0d2e51b..683b3c20380ad63049b43b2f658f208a81779fde 100644 (file)
@@ -1875,11 +1875,6 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
        atomic_sub(page_count, &obd_unstable_pages);
        LASSERT(atomic_read(&obd_unstable_pages) >= 0);
 
-       spin_lock(&req->rq_lock);
-       req->rq_committed = 1;
-       req->rq_unstable  = 0;
-       spin_unlock(&req->rq_lock);
-
        wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
 }
 
@@ -1909,27 +1904,21 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req)
        LASSERT(atomic_read(&obd_unstable_pages) >= 0);
        atomic_add(page_count, &obd_unstable_pages);
 
-       spin_lock(&req->rq_lock);
-
        /*
         * If the request has already been committed (i.e. brw_commit
         * called via rq_commit_cb), we need to undo the unstable page
         * increments we just performed because rq_commit_cb wont be
-        * called again. Otherwise, just set the commit callback so the
-        * unstable page accounting is properly updated when the request
-        * is committed
+        * called again.
         */
-       if (req->rq_committed) {
+       spin_lock(&req->rq_lock);
+       if (unlikely(req->rq_committed)) {
                /* Drop lock before calling osc_dec_unstable_pages */
                spin_unlock(&req->rq_lock);
                osc_dec_unstable_pages(req);
-               spin_lock(&req->rq_lock);
        } else {
                req->rq_unstable = 1;
-               req->rq_commit_cb = osc_dec_unstable_pages;
+               spin_unlock(&req->rq_lock);
        }
-
-       spin_unlock(&req->rq_lock);
 }
 
 /* this must be called holding the loi list lock to give coverage to exit_cache,
index 536b868ff776b8ad38f93c23f628e4e875991785..a2d948f09c4d7c44a731c266292525f7f888555f 100644 (file)
@@ -1847,21 +1847,21 @@ static int brw_interpret(const struct lu_env *env,
 
 static void brw_commit(struct ptlrpc_request *req)
 {
-       spin_lock(&req->rq_lock);
        /*
         * If osc_inc_unstable_pages (via osc_extent_finish) races with
         * this called via the rq_commit_cb, I need to ensure
         * osc_dec_unstable_pages is still called. Otherwise unstable
         * pages may be leaked.
         */
-       if (req->rq_unstable) {
+       spin_lock(&req->rq_lock);
+       if (unlikely(req->rq_unstable)) {
+               req->rq_unstable = 0;
                spin_unlock(&req->rq_lock);
                osc_dec_unstable_pages(req);
-               spin_lock(&req->rq_lock);
        } else {
                req->rq_committed = 1;
+               spin_unlock(&req->rq_lock);
        }
-       spin_unlock(&req->rq_lock);
 }
 
 /**