}
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
+{
+ struct request_queue *uninit_q, *q;
+
+ uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
+ if (!uninit_q)
+ return ERR_PTR(-ENOMEM);
+
+ q = blk_mq_init_allocated_queue(set, uninit_q);
+ if (IS_ERR(q))
+ blk_cleanup_queue(uninit_q);
+
+ return q;
+}
+EXPORT_SYMBOL(blk_mq_init_queue);
+
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q)
{
struct blk_mq_hw_ctx **hctxs;
struct blk_mq_ctx __percpu *ctx;
- struct request_queue *q;
unsigned int *map;
int i;
hctxs[i]->queue_num = i;
}
- q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
- if (!q)
- goto err_hctxs;
-
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
*/
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
- goto err_mq_usage;
+ goto err_hctxs;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, 30000);
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
if (blk_mq_init_hw_queues(q, set))
- goto err_mq_usage;
+ goto err_hctxs;
mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list);
return q;
-err_mq_usage:
- blk_cleanup_queue(q);
err_hctxs:
kfree(map);
for (i = 0; i < set->nr_hw_queues; i++) {
free_percpu(ctx);
return ERR_PTR(-ENOMEM);
}
-EXPORT_SYMBOL(blk_mq_init_queue);
+EXPORT_SYMBOL(blk_mq_init_allocated_queue);
void blk_mq_free_queue(struct request_queue *q)
{
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);