X-Git-Url: https://git.stricted.de/?a=blobdiff_plain;f=block%2Fblk-core.c;h=f68b4eee2d10ca3f2038dbddce1091383e0d4e6c;hb=f42ae2d6b9d4372c4e8e8eed8a5b9ad44552a625;hp=048be4aa602446f93e3e981f1ece5659e3d7287b;hpb=716479a39fc9f1f867735fc0dafaa8cd547bc72b;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git diff --git a/block/blk-core.c b/block/blk-core.c index 048be4aa6024..f68b4eee2d10 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -333,11 +333,13 @@ EXPORT_SYMBOL(blk_stop_queue); void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); + cancel_work_sync(&q->timeout_work); if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; int i; + cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { @@ -529,6 +531,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) } } +void blk_drain_queue(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + __blk_drain_queue(q, true); + spin_unlock_irq(q->queue_lock); +} + /** * blk_queue_bypass_start - enter queue bypass mode * @q: queue of interest @@ -604,8 +613,8 @@ void blk_set_queue_dying(struct request_queue *q) spin_lock_irq(q->queue_lock); blk_queue_for_each_rl(rl, q) { if (rl->rq_pool) { - wake_up(&rl->wait[BLK_RW_SYNC]); - wake_up(&rl->wait[BLK_RW_ASYNC]); + wake_up_all(&rl->wait[BLK_RW_SYNC]); + wake_up_all(&rl->wait[BLK_RW_ASYNC]); } } spin_unlock_irq(q->queue_lock); @@ -653,11 +662,21 @@ void blk_cleanup_queue(struct request_queue *q) */ blk_freeze_queue(q); spin_lock_irq(lock); - if (!q->mq_ops) - __blk_drain_queue(q, true); queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); + /* + * make sure all in-progress dispatch are completed because + * blk_freeze_queue() can only complete all requests, and + * dispatch may still be in-progress since we dispatch requests + * from more than one contexts. + * + * We rely on driver to deal with the race in case that queue + * initialization isn't done. + */ + if (q->mq_ops && blk_queue_init_done(q)) + blk_mq_quiesce_queue(q); + /* for synchronous bio-based driver finish in-flight integrity i/o */ blk_flush_integrity(); @@ -763,7 +782,6 @@ EXPORT_SYMBOL(blk_alloc_queue); int blk_queue_enter(struct request_queue *q, bool nowait) { while (true) { - int ret; if (percpu_ref_tryget_live(&q->q_usage_counter)) return 0; @@ -780,13 +798,11 @@ int blk_queue_enter(struct request_queue *q, bool nowait) */ smp_rmb(); - ret = wait_event_interruptible(q->mq_freeze_wq, - !atomic_read(&q->mq_freeze_depth) || - blk_queue_dying(q)); + wait_event(q->mq_freeze_wq, + !atomic_read(&q->mq_freeze_depth) || + blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV; - if (ret) - return ret; } } @@ -844,6 +860,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); + INIT_WORK(&q->timeout_work, NULL); INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->icq_list); @@ -1011,6 +1028,7 @@ out_exit_flush_rq: q->exit_rq_fn(q, q->fq->flush_rq); out_free_flush_queue: blk_free_flush_queue(q->fq); + q->fq = NULL; return -ENOMEM; } EXPORT_SYMBOL(blk_init_allocated_queue); @@ -1783,6 +1801,9 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio) else req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); req->write_hint = bio->bi_write_hint; +#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN + req->__dun = bio->bi_iter.bi_dun; +#endif blk_rq_bio_prep(req->q, req, bio); } EXPORT_SYMBOL_GPL(blk_init_request_from_bio); @@ -2260,7 +2281,7 @@ blk_qc_t submit_bio(struct bio *bio) unsigned int count; if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) - count = queue_logical_block_size(bio->bi_disk->queue); + count = queue_logical_block_size(bio->bi_disk->queue) >> 9; else count = bio_sectors(bio); @@ -2768,8 +2789,13 @@ bool blk_update_request(struct request *req, blk_status_t error, req->__data_len -= total_bytes; /* update sector only for requests with clear definition of sector */ - if (!blk_rq_is_passthrough(req)) + if (!blk_rq_is_passthrough(req)) { req->__sector += total_bytes >> 9; +#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN + if (req->__dun) + req->__dun += total_bytes >> 12; +#endif + } /* mixed attributes always follow the first bio */ if (req->rq_flags & RQF_MIXED_MERGE) { @@ -3048,6 +3074,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, { if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); + else if (bio_op(bio) == REQ_OP_DISCARD) + rq->nr_phys_segments = 1; rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; @@ -3131,6 +3159,13 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); dst->__data_len = blk_rq_bytes(src); +#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN + dst->__dun = blk_rq_dun(src); +#endif + if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { + dst->rq_flags |= RQF_SPECIAL_PAYLOAD; + dst->special_vec = src->special_vec; + } dst->nr_phys_segments = src->nr_phys_segments; dst->ioprio = src->ioprio; dst->extra_len = src->extra_len; @@ -3438,9 +3473,11 @@ EXPORT_SYMBOL(blk_finish_plug); */ void blk_pm_runtime_init(struct request_queue *q, struct device *dev) { - /* not support for RQF_PM and ->rpm_status in blk-mq yet */ - if (q->mq_ops) + /* Don't enable runtime PM for blk-mq until it is ready */ + if (q->mq_ops) { + pm_runtime_disable(dev); return; + } q->dev = dev; q->rpm_status = RPM_ACTIVE;