block: fix lockdep warning on io_context release put_io_context()
authorTejun Heo <tj@kernel.org>
Sat, 11 Feb 2012 11:37:25 +0000 (12:37 +0100)
committerJens Axboe <axboe@kernel.dk>
Sat, 11 Feb 2012 11:37:25 +0000 (12:37 +0100)
11a3122f6c "block: strip out locking optimization in put_io_context()"
removed ioc_lock depth lockdep annoation along with locking
optimization; however, while recursing from put_io_context() is no
longer possible, ioc_release_fn() may still end up putting the last
reference of another ioc through elevator, which wlil grab ioc->lock
triggering spurious (as the ioc is always different one) A-A deadlock
warning.

As this can only happen one time from ioc_release_fn(), using non-zero
subclass from ioc_release_fn() is enough.  Use subclass 1.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-ioc.c

index 9884fd7427fef7658f01b0d5cb6af129d9ecb645..8b782a63c29705661dc3ced3a13ead1dd4b42b22 100644 (file)
@@ -80,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work)
        struct io_context *ioc = container_of(work, struct io_context,
                                              release_work);
        struct request_queue *last_q = NULL;
+       unsigned long flags;
 
-       spin_lock_irq(&ioc->lock);
+       /*
+        * Exiting icq may call into put_io_context() through elevator
+        * which will trigger lockdep warning.  The ioc's are guaranteed to
+        * be different, use a different locking subclass here.  Use
+        * irqsave variant as there's no spin_lock_irq_nested().
+        */
+       spin_lock_irqsave_nested(&ioc->lock, flags, 1);
 
        while (!hlist_empty(&ioc->icq_list)) {
                struct io_cq *icq = hlist_entry(ioc->icq_list.first,
@@ -103,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work)
                         */
                        if (last_q) {
                                spin_unlock(last_q->queue_lock);
-                               spin_unlock_irq(&ioc->lock);
+                               spin_unlock_irqrestore(&ioc->lock, flags);
                                blk_put_queue(last_q);
                        } else {
-                               spin_unlock_irq(&ioc->lock);
+                               spin_unlock_irqrestore(&ioc->lock, flags);
                        }
 
                        last_q = this_q;
-                       spin_lock_irq(this_q->queue_lock);
-                       spin_lock(&ioc->lock);
+                       spin_lock_irqsave(this_q->queue_lock, flags);
+                       spin_lock_nested(&ioc->lock, 1);
                        continue;
                }
                ioc_exit_icq(icq);
@@ -119,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work)
 
        if (last_q) {
                spin_unlock(last_q->queue_lock);
-               spin_unlock_irq(&ioc->lock);
+               spin_unlock_irqrestore(&ioc->lock, flags);
                blk_put_queue(last_q);
        } else {
-               spin_unlock_irq(&ioc->lock);
+               spin_unlock_irqrestore(&ioc->lock, flags);
        }
 
        kmem_cache_free(iocontext_cachep, ioc);