Merge tag 'sound-fix-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / block / blk-flush.c
index 20b7c7a02f1cbdfe5a63c5918d3abd356bd4265b..0d5a9c1da1fc71db5706926e69e2a22b7491eea9 100644 (file)
@@ -74,6 +74,7 @@
 #include "blk.h"
 #include "blk-mq.h"
 #include "blk-mq-tag.h"
+#include "blk-mq-sched.h"
 
 /* FLUSH/FUA sequences */
 enum {
@@ -296,8 +297,14 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
        if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
                return false;
 
-       /* C2 and C3 */
+       /* C2 and C3
+        *
+        * For blk-mq + scheduling, we can risk having all driver tags
+        * assigned to empty flushes, and we deadlock if we are expecting
+        * other requests to make progress. Don't defer for that case.
+        */
        if (!list_empty(&fq->flush_data_in_flight) &&
+           !(q->mq_ops && q->elevator) &&
            time_before(jiffies,
                        fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
                return false;
@@ -326,7 +333,6 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
                blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
        }
 
-       flush_rq->cmd_type = REQ_TYPE_FS;
        flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
        flush_rq->rq_flags |= RQF_FLUSH_SEQ;
        flush_rq->rq_disk = first_rq->rq_disk;
@@ -391,9 +397,10 @@ static void mq_flush_data_end_io(struct request *rq, int error)
         * the comment in flush_end_io().
         */
        spin_lock_irqsave(&fq->mq_flush_lock, flags);
-       if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
-               blk_mq_run_hw_queue(hctx, true);
+       blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+
+       blk_mq_run_hw_queue(hctx, true);
 }
 
 /**
@@ -453,9 +460,9 @@ void blk_insert_flush(struct request *rq)
         */
        if ((policy & REQ_FSEQ_DATA) &&
            !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
-               if (q->mq_ops) {
-                       blk_mq_insert_request(rq, false, true, false);
-               else
+               if (q->mq_ops)
+                       blk_mq_sched_insert_request(rq, false, true, false, false);
+               else
                        list_add_tail(&rq->queuelist, &q->queue_head);
                return;
        }
@@ -545,11 +552,10 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
        if (!fq)
                goto fail;
 
-       if (q->mq_ops) {
+       if (q->mq_ops)
                spin_lock_init(&fq->mq_flush_lock);
-               rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
-       }
 
+       rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
        fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
        if (!fq->flush_rq)
                goto fail_rq;