blk-mq: avoid to synchronize rcu inside blk_cleanup_queue()
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / block / blk-core.c
index 048be4aa602446f93e3e981f1ece5659e3d7287b..6aa2bc4e96529fe00da224166d3c71fddb585f65 100644 (file)
@@ -333,11 +333,13 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->timeout);
+       cancel_work_sync(&q->timeout_work);
 
        if (q->mq_ops) {
                struct blk_mq_hw_ctx *hctx;
                int i;
 
+               cancel_delayed_work_sync(&q->requeue_work);
                queue_for_each_hw_ctx(q, hctx, i)
                        cancel_delayed_work_sync(&hctx->run_work);
        } else {
@@ -529,6 +531,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
        }
 }
 
+void blk_drain_queue(struct request_queue *q)
+{
+       spin_lock_irq(q->queue_lock);
+       __blk_drain_queue(q, true);
+       spin_unlock_irq(q->queue_lock);
+}
+
 /**
  * blk_queue_bypass_start - enter queue bypass mode
  * @q: queue of interest
@@ -604,8 +613,8 @@ void blk_set_queue_dying(struct request_queue *q)
                spin_lock_irq(q->queue_lock);
                blk_queue_for_each_rl(rl, q) {
                        if (rl->rq_pool) {
-                               wake_up(&rl->wait[BLK_RW_SYNC]);
-                               wake_up(&rl->wait[BLK_RW_ASYNC]);
+                               wake_up_all(&rl->wait[BLK_RW_SYNC]);
+                               wake_up_all(&rl->wait[BLK_RW_ASYNC]);
                        }
                }
                spin_unlock_irq(q->queue_lock);
@@ -653,11 +662,22 @@ void blk_cleanup_queue(struct request_queue *q)
         */
        blk_freeze_queue(q);
        spin_lock_irq(lock);
-       if (!q->mq_ops)
-               __blk_drain_queue(q, true);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
+       /*
+        * make sure all in-progress dispatch are completed because
+        * blk_freeze_queue() can only complete all requests, and
+        * dispatch may still be in-progress since we dispatch requests
+        * from more than one contexts.
+        *
+        * No need to quiesce queue if it isn't initialized yet since
+        * blk_freeze_queue() should be enough for cases of passthrough
+        * request.
+        */
+       if (q->mq_ops && blk_queue_init_done(q))
+               blk_mq_quiesce_queue(q);
+
        /* for synchronous bio-based driver finish in-flight integrity i/o */
        blk_flush_integrity();
 
@@ -763,7 +783,6 @@ EXPORT_SYMBOL(blk_alloc_queue);
 int blk_queue_enter(struct request_queue *q, bool nowait)
 {
        while (true) {
-               int ret;
 
                if (percpu_ref_tryget_live(&q->q_usage_counter))
                        return 0;
@@ -780,13 +799,11 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
                 */
                smp_rmb();
 
-               ret = wait_event_interruptible(q->mq_freeze_wq,
-                               !atomic_read(&q->mq_freeze_depth) ||
-                               blk_queue_dying(q));
+               wait_event(q->mq_freeze_wq,
+                          !atomic_read(&q->mq_freeze_depth) ||
+                          blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
-               if (ret)
-                       return ret;
        }
 }
 
@@ -844,6 +861,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
                    laptop_mode_timer_fn, (unsigned long) q);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+       INIT_WORK(&q->timeout_work, NULL);
        INIT_LIST_HEAD(&q->queue_head);
        INIT_LIST_HEAD(&q->timeout_list);
        INIT_LIST_HEAD(&q->icq_list);
@@ -1011,6 +1029,7 @@ out_exit_flush_rq:
                q->exit_rq_fn(q, q->fq->flush_rq);
 out_free_flush_queue:
        blk_free_flush_queue(q->fq);
+       q->fq = NULL;
        return -ENOMEM;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -2260,7 +2279,7 @@ blk_qc_t submit_bio(struct bio *bio)
                unsigned int count;
 
                if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
-                       count = queue_logical_block_size(bio->bi_disk->queue);
+                       count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
                else
                        count = bio_sectors(bio);
 
@@ -3048,6 +3067,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 {
        if (bio_has_data(bio))
                rq->nr_phys_segments = bio_phys_segments(q, bio);
+       else if (bio_op(bio) == REQ_OP_DISCARD)
+               rq->nr_phys_segments = 1;
 
        rq->__data_len = bio->bi_iter.bi_size;
        rq->bio = rq->biotail = bio;
@@ -3131,6 +3152,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
        dst->cpu = src->cpu;
        dst->__sector = blk_rq_pos(src);
        dst->__data_len = blk_rq_bytes(src);
+       if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
+               dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
+               dst->special_vec = src->special_vec;
+       }
        dst->nr_phys_segments = src->nr_phys_segments;
        dst->ioprio = src->ioprio;
        dst->extra_len = src->extra_len;
@@ -3438,9 +3463,11 @@ EXPORT_SYMBOL(blk_finish_plug);
  */
 void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
 {
-       /* not support for RQF_PM and ->rpm_status in blk-mq yet */
-       if (q->mq_ops)
+       /* Don't enable runtime PM for blk-mq until it is ready */
+       if (q->mq_ops) {
+               pm_runtime_disable(dev);
                return;
+       }
 
        q->dev = dev;
        q->rpm_status = RPM_ACTIVE;