[RAMEN9610-21541]can: peak_usb: pcan_usb_pro: Fix info-leaks to USB devices
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / block / blk-mq.c
index 3f18cff80050331ece90da34aec20819ae2e3245..eac4448047366bb60db76788833f6e5b977dd467 100644 (file)
@@ -118,6 +118,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
        blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
 }
 
+static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
+                                    struct request *rq, void *priv,
+                                    bool reserved)
+{
+       struct mq_inflight *mi = priv;
+
+       if (rq->part == mi->part)
+               mi->inflight[rq_data_dir(rq)]++;
+}
+
+void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
+                        unsigned int inflight[2])
+{
+       struct mq_inflight mi = { .part = part, .inflight = inflight, };
+
+       inflight[0] = inflight[1] = 0;
+       blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
+}
+
 void blk_freeze_queue_start(struct request_queue *q)
 {
        int freeze_depth;
@@ -159,6 +178,8 @@ void blk_freeze_queue(struct request_queue *q)
         * exported to drivers as the only user for unfreeze is blk_mq.
         */
        blk_freeze_queue_start(q);
+       if (!q->mq_ops)
+               blk_drain_queue(q);
        blk_mq_freeze_queue_wait(q);
 }
 
@@ -636,7 +657,6 @@ static void __blk_mq_requeue_request(struct request *rq)
 
        trace_block_rq_requeue(q, rq);
        wbt_requeue(q->rq_wb, &rq->issue_stat);
-       blk_mq_sched_requeue_request(rq);
 
        if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
                if (q->dma_drain_size && blk_rq_bytes(rq))
@@ -648,6 +668,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
 {
        __blk_mq_requeue_request(rq);
 
+       /* this request will be re-inserted to io scheduler queue */
+       blk_mq_sched_requeue_request(rq);
+
        BUG_ON(blk_queued_rq(rq));
        blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
 }
@@ -1139,9 +1162,27 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
        /*
         * We should be running this queue from one of the CPUs that
         * are mapped to it.
+        *
+        * There are at least two related races now between setting
+        * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
+        * __blk_mq_run_hw_queue():
+        *
+        * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
+        *   but later it becomes online, then this warning is harmless
+        *   at all
+        *
+        * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
+        *   but later it becomes offline, then the warning can't be
+        *   triggered, and we depend on blk-mq timeout handler to
+        *   handle dispatched requests to this hctx
         */
-       WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
-               cpu_online(hctx->next_cpu));
+       if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
+               cpu_online(hctx->next_cpu)) {
+               printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
+                       raw_smp_processor_id(),
+                       cpumask_empty(hctx->cpumask) ? "inactive": "active");
+               dump_stack();
+       }
 
        /*
         * We can't run the queue inline with ints disabled. Ensure that
@@ -1401,6 +1442,22 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
+/*
+ * Should only be used carefully, when the caller knows we want to
+ * bypass a potential IO scheduler on the target device.
+ */
+void blk_mq_request_bypass_insert(struct request *rq)
+{
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
+
+       spin_lock(&hctx->lock);
+       list_add_tail(&rq->queuelist, &hctx->dispatch);
+       spin_unlock(&hctx->lock);
+
+       blk_mq_run_hw_queue(hctx, false);
+}
+
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                            struct list_head *list)
 
@@ -1455,7 +1512,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                BUG_ON(!rq->q);
                if (rq->mq_ctx != this_ctx) {
                        if (this_ctx) {
-                               trace_block_unplug(this_q, depth, from_schedule);
+                               trace_block_unplug(this_q, depth, !from_schedule);
                                blk_mq_sched_insert_requests(this_q, this_ctx,
                                                                &ctx_list,
                                                                from_schedule);
@@ -1475,7 +1532,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
         * on 'ctx_list'. Do those.
         */
        if (this_ctx) {
-               trace_block_unplug(this_q, depth, from_schedule);
+               trace_block_unplug(this_q, depth, !from_schedule);
                blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
                                                from_schedule);
        }
@@ -1908,7 +1965,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
 {
        blk_mq_debugfs_unregister_hctx(hctx);
 
-       blk_mq_tag_idle(hctx);
+       if (blk_mq_hw_queue_mapped(hctx))
+               blk_mq_tag_idle(hctx);
 
        if (set->ops->exit_request)
                set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
@@ -2194,7 +2252,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
 
        mutex_lock(&set->tag_list_lock);
        list_del_rcu(&q->tag_set_list);
-       INIT_LIST_HEAD(&q->tag_set_list);
        if (list_is_singular(&set->tag_list)) {
                /* just transitioned to unshared */
                set->flags &= ~BLK_MQ_F_TAG_SHARED;
@@ -2202,8 +2259,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
                blk_mq_update_tag_set_depth(set, false);
        }
        mutex_unlock(&set->tag_list_lock);
-
        synchronize_rcu();
+       INIT_LIST_HEAD(&q->tag_set_list);
 }
 
 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@ -2294,6 +2351,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
        struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
 
        blk_mq_sysfs_unregister(q);
+
+       /* protect against switching io scheduler  */
+       mutex_lock(&q->sysfs_lock);
        for (i = 0; i < set->nr_hw_queues; i++) {
                int node;
 
@@ -2338,6 +2398,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                }
        }
        q->nr_hw_queues = i;
+       mutex_unlock(&q->sysfs_lock);
        blk_mq_sysfs_register(q);
 }
 
@@ -2508,9 +2569,27 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
 
 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
 {
-       if (set->ops->map_queues)
+       if (set->ops->map_queues) {
+               int cpu;
+               /*
+                * transport .map_queues is usually done in the following
+                * way:
+                *
+                * for (queue = 0; queue < set->nr_hw_queues; queue++) {
+                *      mask = get_cpu_mask(queue)
+                *      for_each_cpu(cpu, mask)
+                *              set->mq_map[cpu] = queue;
+                * }
+                *
+                * When we need to remap, the table has to be cleared for
+                * killing stale mapping since one CPU may not be mapped
+                * to any hw queue.
+                */
+               for_each_possible_cpu(cpu)
+                       set->mq_map[cpu] = 0;
+
                return set->ops->map_queues(set);
-       else
+       else
                return blk_mq_map_queues(set);
 }