block: use an atomic_t for mq_freeze_depth
authorChristoph Hellwig <hch@lst.de>
Thu, 7 May 2015 07:38:13 +0000 (09:38 +0200)
committerJens Axboe <axboe@fb.com>
Tue, 19 May 2015 15:12:59 +0000 (09:12 -0600)
lockdep gets unhappy about the not disabling irqs when using the queue_lock
around it.  Instead of trying to fix that up just switch to an atomic_t
and get rid of the lock.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq.c
include/linux/blkdev.h

index 31df474436997e9adc1865f1955b72d89dba862d..c382a34fe5ac61a939f411e810d1a6b1849e146b 100644 (file)
@@ -89,7 +89,8 @@ static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
                        return -EBUSY;
 
                ret = wait_event_interruptible(q->mq_freeze_wq,
-                               !q->mq_freeze_depth || blk_queue_dying(q));
+                               !atomic_read(&q->mq_freeze_depth) ||
+                               blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
                if (ret)
@@ -112,13 +113,10 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
 
 void blk_mq_freeze_queue_start(struct request_queue *q)
 {
-       bool freeze;
+       int freeze_depth;
 
-       spin_lock_irq(q->queue_lock);
-       freeze = !q->mq_freeze_depth++;
-       spin_unlock_irq(q->queue_lock);
-
-       if (freeze) {
+       freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
+       if (freeze_depth == 1) {
                percpu_ref_kill(&q->mq_usage_counter);
                blk_mq_run_hw_queues(q, false);
        }
@@ -143,13 +141,11 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
 void blk_mq_unfreeze_queue(struct request_queue *q)
 {
-       bool wake;
+       int freeze_depth;
 
-       spin_lock_irq(q->queue_lock);
-       wake = !--q->mq_freeze_depth;
-       WARN_ON_ONCE(q->mq_freeze_depth < 0);
-       spin_unlock_irq(q->queue_lock);
-       if (wake) {
+       freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
+       WARN_ON_ONCE(freeze_depth < 0);
+       if (!freeze_depth) {
                percpu_ref_reinit(&q->mq_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
        }
@@ -2081,7 +2077,7 @@ void blk_mq_free_queue(struct request_queue *q)
 /* Basically redo blk_mq_init_queue with queue frozen */
 static void blk_mq_queue_reinit(struct request_queue *q)
 {
-       WARN_ON_ONCE(!q->mq_freeze_depth);
+       WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
 
        blk_mq_sysfs_unregister(q);
 
index 2da818a480975ea28cd82b7beeef9621982120c6..bc917956a6d0b82ab1f17f3159c5e898774047ff 100644 (file)
@@ -444,7 +444,7 @@ struct request_queue {
        struct mutex            sysfs_lock;
 
        int                     bypass_depth;
-       int                     mq_freeze_depth;
+       atomic_t                mq_freeze_depth;
 
 #if defined(CONFIG_BLK_DEV_BSG)
        bsg_job_fn              *bsg_job_fn;