if (!q)
return NULL;
- q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
- if (!q->flush_rq)
+ if (blk_init_flush(q))
return NULL;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
return q;
fail:
- kfree(q->flush_rq);
+ blk_exit_flush(q);
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
}
EXPORT_SYMBOL(blkdev_issue_flush);
-int blk_mq_init_flush(struct request_queue *q)
+static int blk_mq_init_flush(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
return -ENOMEM;
return 0;
}
+
+int blk_init_flush(struct request_queue *q)
+{
+ if (q->mq_ops)
+ return blk_mq_init_flush(q);
+
+ q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!q->flush_rq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void blk_exit_flush(struct request_queue *q)
+{
+ kfree(q->flush_rq);
+}
blk_mq_add_queue_tag_set(set, q);
- if (blk_mq_init_flush(q))
+ if (blk_init_flush(q))
goto err_hw_queues;
blk_mq_map_swqueue(q);
void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
-int blk_mq_init_flush(struct request_queue *q);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq,
if (q->queue_tags)
__blk_queue_free_tags(q);
+ blk_exit_flush(q);
+
if (q->mq_ops)
blk_mq_free_queue(q);
- kfree(q->flush_rq);
-
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
kobject_get(&q->kobj);
}
+int blk_init_flush(struct request_queue *q);
+void blk_exit_flush(struct request_queue *q);
+
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask);
void blk_exit_rl(struct request_list *rl);