Commit | Line | Data |
---|---|---|
bd166ef1 JA |
1 | /* |
2 | * blk-mq scheduling framework | |
3 | * | |
4 | * Copyright (C) 2016 Jens Axboe | |
5 | */ | |
6 | #include <linux/kernel.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/blk-mq.h> | |
9 | ||
10 | #include <trace/events/block.h> | |
11 | ||
12 | #include "blk.h" | |
13 | #include "blk-mq.h" | |
d332ce09 | 14 | #include "blk-mq-debugfs.h" |
bd166ef1 JA |
15 | #include "blk-mq-sched.h" |
16 | #include "blk-mq-tag.h" | |
17 | #include "blk-wbt.h" | |
18 | ||
19 | void blk_mq_sched_free_hctx_data(struct request_queue *q, | |
20 | void (*exit)(struct blk_mq_hw_ctx *)) | |
21 | { | |
22 | struct blk_mq_hw_ctx *hctx; | |
23 | int i; | |
24 | ||
25 | queue_for_each_hw_ctx(q, hctx, i) { | |
26 | if (exit && hctx->sched_data) | |
27 | exit(hctx); | |
28 | kfree(hctx->sched_data); | |
29 | hctx->sched_data = NULL; | |
30 | } | |
31 | } | |
32 | EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); | |
33 | ||
44e8c2bf | 34 | void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) |
bd166ef1 | 35 | { |
44e8c2bf CH |
36 | struct request_queue *q = rq->q; |
37 | struct io_context *ioc = rq_ioc(bio); | |
bd166ef1 JA |
38 | struct io_cq *icq; |
39 | ||
40 | spin_lock_irq(q->queue_lock); | |
41 | icq = ioc_lookup_icq(ioc, q); | |
42 | spin_unlock_irq(q->queue_lock); | |
43 | ||
44 | if (!icq) { | |
45 | icq = ioc_create_icq(ioc, q, GFP_ATOMIC); | |
46 | if (!icq) | |
47 | return; | |
48 | } | |
ea511e3c | 49 | get_io_context(icq->ioc); |
44e8c2bf | 50 | rq->elv.icq = icq; |
bd166ef1 JA |
51 | } |
52 | ||
8e8320c9 JA |
53 | /* |
54 | * Mark a hardware queue as needing a restart. For shared queues, maintain | |
55 | * a count of how many hardware queues are marked for restart. | |
56 | */ | |
57 | static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) | |
58 | { | |
59 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
60 | return; | |
61 | ||
62 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { | |
63 | struct request_queue *q = hctx->queue; | |
64 | ||
65 | if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
66 | atomic_inc(&q->shared_hctx_restart); | |
67 | } else | |
68 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | |
69 | } | |
70 | ||
71 | static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) | |
72 | { | |
73 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
74 | return false; | |
75 | ||
76 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { | |
77 | struct request_queue *q = hctx->queue; | |
78 | ||
79 | if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
80 | atomic_dec(&q->shared_hctx_restart); | |
81 | } else | |
82 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | |
83 | ||
84 | if (blk_mq_hctx_has_pending(hctx)) { | |
85 | blk_mq_run_hw_queue(hctx, true); | |
86 | return true; | |
87 | } | |
88 | ||
89 | return false; | |
90 | } | |
91 | ||
bd166ef1 JA |
92 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
93 | { | |
81380ca1 OS |
94 | struct request_queue *q = hctx->queue; |
95 | struct elevator_queue *e = q->elevator; | |
64765a75 | 96 | const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; |
85dcb3c8 | 97 | bool do_sched_dispatch = true; |
bd166ef1 JA |
98 | LIST_HEAD(rq_list); |
99 | ||
f4560ffe ML |
100 | /* RCU or SRCU read lock is needed before checking quiesced flag */ |
101 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) | |
bd166ef1 JA |
102 | return; |
103 | ||
104 | hctx->run++; | |
105 | ||
106 | /* | |
107 | * If we have previous entries on our dispatch list, grab them first for | |
108 | * more fair dispatch. | |
109 | */ | |
110 | if (!list_empty_careful(&hctx->dispatch)) { | |
111 | spin_lock(&hctx->lock); | |
112 | if (!list_empty(&hctx->dispatch)) | |
113 | list_splice_init(&hctx->dispatch, &rq_list); | |
114 | spin_unlock(&hctx->lock); | |
115 | } | |
116 | ||
117 | /* | |
118 | * Only ask the scheduler for requests, if we didn't have residual | |
119 | * requests from the dispatch list. This is to avoid the case where | |
120 | * we only ever dispatch a fraction of the requests available because | |
121 | * of low device queue depth. Once we pull requests out of the IO | |
122 | * scheduler, we can no longer merge or sort them. So it's best to | |
123 | * leave them there for as long as we can. Mark the hw queue as | |
124 | * needing a restart in that case. | |
125 | */ | |
c13660a0 | 126 | if (!list_empty(&rq_list)) { |
d38d3515 | 127 | blk_mq_sched_mark_restart_hctx(hctx); |
85dcb3c8 | 128 | do_sched_dispatch = blk_mq_dispatch_rq_list(q, &rq_list); |
64765a75 | 129 | } else if (!has_sched_dispatch) { |
c13660a0 | 130 | blk_mq_flush_busy_ctxs(hctx, &rq_list); |
81380ca1 | 131 | blk_mq_dispatch_rq_list(q, &rq_list); |
64765a75 JA |
132 | } |
133 | ||
134 | /* | |
85dcb3c8 ML |
135 | * We want to dispatch from the scheduler if there was nothing |
136 | * on the dispatch list or we were able to dispatch from the | |
137 | * dispatch list. | |
64765a75 | 138 | */ |
85dcb3c8 | 139 | if (do_sched_dispatch && has_sched_dispatch) { |
c13660a0 JA |
140 | do { |
141 | struct request *rq; | |
142 | ||
143 | rq = e->type->ops.mq.dispatch_request(hctx); | |
144 | if (!rq) | |
145 | break; | |
146 | list_add(&rq->queuelist, &rq_list); | |
81380ca1 | 147 | } while (blk_mq_dispatch_rq_list(q, &rq_list)); |
c13660a0 | 148 | } |
bd166ef1 JA |
149 | } |
150 | ||
e4d750c9 JA |
151 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, |
152 | struct request **merged_request) | |
bd166ef1 JA |
153 | { |
154 | struct request *rq; | |
bd166ef1 | 155 | |
34fe7c05 CH |
156 | switch (elv_merge(q, &rq, bio)) { |
157 | case ELEVATOR_BACK_MERGE: | |
bd166ef1 JA |
158 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
159 | return false; | |
34fe7c05 CH |
160 | if (!bio_attempt_back_merge(q, rq, bio)) |
161 | return false; | |
162 | *merged_request = attempt_back_merge(q, rq); | |
163 | if (!*merged_request) | |
164 | elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); | |
165 | return true; | |
166 | case ELEVATOR_FRONT_MERGE: | |
bd166ef1 JA |
167 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
168 | return false; | |
34fe7c05 CH |
169 | if (!bio_attempt_front_merge(q, rq, bio)) |
170 | return false; | |
171 | *merged_request = attempt_front_merge(q, rq); | |
172 | if (!*merged_request) | |
173 | elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); | |
174 | return true; | |
175 | default: | |
176 | return false; | |
bd166ef1 | 177 | } |
bd166ef1 JA |
178 | } |
179 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); | |
180 | ||
9bddeb2a ML |
181 | /* |
182 | * Reverse check our software queue for entries that we could potentially | |
183 | * merge with. Currently includes a hand-wavy stop count of 8, to not spend | |
184 | * too much time checking for merges. | |
185 | */ | |
186 | static bool blk_mq_attempt_merge(struct request_queue *q, | |
187 | struct blk_mq_ctx *ctx, struct bio *bio) | |
188 | { | |
189 | struct request *rq; | |
190 | int checked = 8; | |
191 | ||
7b607814 BVA |
192 | lockdep_assert_held(&ctx->lock); |
193 | ||
9bddeb2a ML |
194 | list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { |
195 | bool merged = false; | |
196 | ||
197 | if (!checked--) | |
198 | break; | |
199 | ||
200 | if (!blk_rq_merge_ok(rq, bio)) | |
201 | continue; | |
202 | ||
203 | switch (blk_try_merge(rq, bio)) { | |
204 | case ELEVATOR_BACK_MERGE: | |
205 | if (blk_mq_sched_allow_merge(q, rq, bio)) | |
206 | merged = bio_attempt_back_merge(q, rq, bio); | |
207 | break; | |
208 | case ELEVATOR_FRONT_MERGE: | |
209 | if (blk_mq_sched_allow_merge(q, rq, bio)) | |
210 | merged = bio_attempt_front_merge(q, rq, bio); | |
211 | break; | |
212 | case ELEVATOR_DISCARD_MERGE: | |
213 | merged = bio_attempt_discard_merge(q, rq, bio); | |
214 | break; | |
215 | default: | |
216 | continue; | |
217 | } | |
218 | ||
219 | if (merged) | |
220 | ctx->rq_merged++; | |
221 | return merged; | |
222 | } | |
223 | ||
224 | return false; | |
225 | } | |
226 | ||
bd166ef1 JA |
227 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) |
228 | { | |
229 | struct elevator_queue *e = q->elevator; | |
9bddeb2a ML |
230 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); |
231 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
232 | bool ret = false; | |
bd166ef1 | 233 | |
9bddeb2a | 234 | if (e && e->type->ops.mq.bio_merge) { |
bd166ef1 JA |
235 | blk_mq_put_ctx(ctx); |
236 | return e->type->ops.mq.bio_merge(hctx, bio); | |
237 | } | |
238 | ||
07a252b4 ML |
239 | if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && |
240 | !list_empty_careful(&ctx->rq_list)) { | |
9bddeb2a ML |
241 | /* default per sw-queue merge */ |
242 | spin_lock(&ctx->lock); | |
243 | ret = blk_mq_attempt_merge(q, ctx, bio); | |
244 | spin_unlock(&ctx->lock); | |
245 | } | |
246 | ||
247 | blk_mq_put_ctx(ctx); | |
248 | return ret; | |
bd166ef1 JA |
249 | } |
250 | ||
251 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) | |
252 | { | |
253 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); | |
254 | } | |
255 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); | |
256 | ||
257 | void blk_mq_sched_request_inserted(struct request *rq) | |
258 | { | |
259 | trace_block_rq_insert(rq->q, rq); | |
260 | } | |
261 | EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); | |
262 | ||
0cacba6c OS |
263 | static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, |
264 | struct request *rq) | |
bd166ef1 JA |
265 | { |
266 | if (rq->tag == -1) { | |
267 | rq->rq_flags |= RQF_SORTED; | |
268 | return false; | |
269 | } | |
270 | ||
271 | /* | |
272 | * If we already have a real request tag, send directly to | |
273 | * the dispatch list. | |
274 | */ | |
275 | spin_lock(&hctx->lock); | |
276 | list_add(&rq->queuelist, &hctx->dispatch); | |
277 | spin_unlock(&hctx->lock); | |
278 | return true; | |
279 | } | |
bd166ef1 | 280 | |
6d8c6c0f BVA |
281 | /** |
282 | * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list | |
283 | * @pos: loop cursor. | |
284 | * @skip: the list element that will not be examined. Iteration starts at | |
285 | * @skip->next. | |
286 | * @head: head of the list to examine. This list must have at least one | |
287 | * element, namely @skip. | |
288 | * @member: name of the list_head structure within typeof(*pos). | |
289 | */ | |
290 | #define list_for_each_entry_rcu_rr(pos, skip, head, member) \ | |
291 | for ((pos) = (skip); \ | |
292 | (pos = (pos)->member.next != (head) ? list_entry_rcu( \ | |
293 | (pos)->member.next, typeof(*pos), member) : \ | |
294 | list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \ | |
295 | (pos) != (skip); ) | |
50e1dab8 | 296 | |
6d8c6c0f BVA |
297 | /* |
298 | * Called after a driver tag has been freed to check whether a hctx needs to | |
299 | * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware | |
300 | * queues in a round-robin fashion if the tag set of @hctx is shared with other | |
301 | * hardware queues. | |
302 | */ | |
303 | void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) | |
304 | { | |
305 | struct blk_mq_tags *const tags = hctx->tags; | |
306 | struct blk_mq_tag_set *const set = hctx->queue->tag_set; | |
307 | struct request_queue *const queue = hctx->queue, *q; | |
308 | struct blk_mq_hw_ctx *hctx2; | |
309 | unsigned int i, j; | |
310 | ||
311 | if (set->flags & BLK_MQ_F_TAG_SHARED) { | |
8e8320c9 JA |
312 | /* |
313 | * If this is 0, then we know that no hardware queues | |
314 | * have RESTART marked. We're done. | |
315 | */ | |
316 | if (!atomic_read(&queue->shared_hctx_restart)) | |
317 | return; | |
318 | ||
6d8c6c0f BVA |
319 | rcu_read_lock(); |
320 | list_for_each_entry_rcu_rr(q, queue, &set->tag_list, | |
321 | tag_set_list) { | |
322 | queue_for_each_hw_ctx(q, hctx2, i) | |
323 | if (hctx2->tags == tags && | |
324 | blk_mq_sched_restart_hctx(hctx2)) | |
325 | goto done; | |
326 | } | |
327 | j = hctx->queue_num + 1; | |
328 | for (i = 0; i < queue->nr_hw_queues; i++, j++) { | |
329 | if (j == queue->nr_hw_queues) | |
330 | j = 0; | |
331 | hctx2 = queue->queue_hw_ctx[j]; | |
332 | if (hctx2->tags == tags && | |
333 | blk_mq_sched_restart_hctx(hctx2)) | |
334 | break; | |
d38d3515 | 335 | } |
6d8c6c0f BVA |
336 | done: |
337 | rcu_read_unlock(); | |
d38d3515 | 338 | } else { |
50e1dab8 | 339 | blk_mq_sched_restart_hctx(hctx); |
50e1dab8 JA |
340 | } |
341 | } | |
342 | ||
bd6737f1 JA |
343 | /* |
344 | * Add flush/fua to the queue. If we fail getting a driver tag, then | |
345 | * punt to the requeue list. Requeue will re-invoke us from a context | |
346 | * that's safe to block from. | |
347 | */ | |
348 | static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx, | |
349 | struct request *rq, bool can_block) | |
350 | { | |
351 | if (blk_mq_get_driver_tag(rq, &hctx, can_block)) { | |
352 | blk_insert_flush(rq); | |
353 | blk_mq_run_hw_queue(hctx, true); | |
354 | } else | |
c7a571b4 | 355 | blk_mq_add_to_requeue_list(rq, false, true); |
bd6737f1 JA |
356 | } |
357 | ||
358 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, | |
359 | bool run_queue, bool async, bool can_block) | |
360 | { | |
361 | struct request_queue *q = rq->q; | |
362 | struct elevator_queue *e = q->elevator; | |
363 | struct blk_mq_ctx *ctx = rq->mq_ctx; | |
364 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
365 | ||
f3a8ab7d | 366 | if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) { |
bd6737f1 JA |
367 | blk_mq_sched_insert_flush(hctx, rq, can_block); |
368 | return; | |
369 | } | |
370 | ||
0cacba6c OS |
371 | if (e && blk_mq_sched_bypass_insert(hctx, rq)) |
372 | goto run; | |
373 | ||
bd6737f1 JA |
374 | if (e && e->type->ops.mq.insert_requests) { |
375 | LIST_HEAD(list); | |
376 | ||
377 | list_add(&rq->queuelist, &list); | |
378 | e->type->ops.mq.insert_requests(hctx, &list, at_head); | |
379 | } else { | |
380 | spin_lock(&ctx->lock); | |
381 | __blk_mq_insert_request(hctx, rq, at_head); | |
382 | spin_unlock(&ctx->lock); | |
383 | } | |
384 | ||
0cacba6c | 385 | run: |
bd6737f1 JA |
386 | if (run_queue) |
387 | blk_mq_run_hw_queue(hctx, async); | |
388 | } | |
389 | ||
390 | void blk_mq_sched_insert_requests(struct request_queue *q, | |
391 | struct blk_mq_ctx *ctx, | |
392 | struct list_head *list, bool run_queue_async) | |
393 | { | |
394 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | |
395 | struct elevator_queue *e = hctx->queue->elevator; | |
396 | ||
0cacba6c OS |
397 | if (e) { |
398 | struct request *rq, *next; | |
399 | ||
400 | /* | |
401 | * We bypass requests that already have a driver tag assigned, | |
402 | * which should only be flushes. Flushes are only ever inserted | |
403 | * as single requests, so we shouldn't ever hit the | |
404 | * WARN_ON_ONCE() below (but let's handle it just in case). | |
405 | */ | |
406 | list_for_each_entry_safe(rq, next, list, queuelist) { | |
407 | if (WARN_ON_ONCE(rq->tag != -1)) { | |
408 | list_del_init(&rq->queuelist); | |
409 | blk_mq_sched_bypass_insert(hctx, rq); | |
410 | } | |
411 | } | |
412 | } | |
413 | ||
bd6737f1 JA |
414 | if (e && e->type->ops.mq.insert_requests) |
415 | e->type->ops.mq.insert_requests(hctx, list, false); | |
416 | else | |
417 | blk_mq_insert_requests(hctx, ctx, list); | |
418 | ||
419 | blk_mq_run_hw_queue(hctx, run_queue_async); | |
420 | } | |
421 | ||
bd166ef1 JA |
422 | static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, |
423 | struct blk_mq_hw_ctx *hctx, | |
424 | unsigned int hctx_idx) | |
425 | { | |
426 | if (hctx->sched_tags) { | |
427 | blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); | |
428 | blk_mq_free_rq_map(hctx->sched_tags); | |
429 | hctx->sched_tags = NULL; | |
430 | } | |
431 | } | |
432 | ||
6917ff0b OS |
433 | static int blk_mq_sched_alloc_tags(struct request_queue *q, |
434 | struct blk_mq_hw_ctx *hctx, | |
435 | unsigned int hctx_idx) | |
436 | { | |
437 | struct blk_mq_tag_set *set = q->tag_set; | |
438 | int ret; | |
439 | ||
440 | hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, | |
441 | set->reserved_tags); | |
442 | if (!hctx->sched_tags) | |
443 | return -ENOMEM; | |
444 | ||
445 | ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); | |
446 | if (ret) | |
447 | blk_mq_sched_free_tags(set, hctx, hctx_idx); | |
448 | ||
449 | return ret; | |
450 | } | |
451 | ||
54d5329d | 452 | static void blk_mq_sched_tags_teardown(struct request_queue *q) |
bd166ef1 JA |
453 | { |
454 | struct blk_mq_tag_set *set = q->tag_set; | |
455 | struct blk_mq_hw_ctx *hctx; | |
6917ff0b OS |
456 | int i; |
457 | ||
458 | queue_for_each_hw_ctx(q, hctx, i) | |
459 | blk_mq_sched_free_tags(set, hctx, i); | |
460 | } | |
461 | ||
93252632 OS |
462 | int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, |
463 | unsigned int hctx_idx) | |
464 | { | |
465 | struct elevator_queue *e = q->elevator; | |
ee056f98 | 466 | int ret; |
93252632 OS |
467 | |
468 | if (!e) | |
469 | return 0; | |
470 | ||
ee056f98 OS |
471 | ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx); |
472 | if (ret) | |
473 | return ret; | |
474 | ||
475 | if (e->type->ops.mq.init_hctx) { | |
476 | ret = e->type->ops.mq.init_hctx(hctx, hctx_idx); | |
477 | if (ret) { | |
478 | blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); | |
479 | return ret; | |
480 | } | |
481 | } | |
482 | ||
d332ce09 OS |
483 | blk_mq_debugfs_register_sched_hctx(q, hctx); |
484 | ||
ee056f98 | 485 | return 0; |
93252632 OS |
486 | } |
487 | ||
488 | void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, | |
489 | unsigned int hctx_idx) | |
490 | { | |
491 | struct elevator_queue *e = q->elevator; | |
492 | ||
493 | if (!e) | |
494 | return; | |
495 | ||
d332ce09 OS |
496 | blk_mq_debugfs_unregister_sched_hctx(hctx); |
497 | ||
ee056f98 OS |
498 | if (e->type->ops.mq.exit_hctx && hctx->sched_data) { |
499 | e->type->ops.mq.exit_hctx(hctx, hctx_idx); | |
500 | hctx->sched_data = NULL; | |
501 | } | |
502 | ||
93252632 OS |
503 | blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); |
504 | } | |
505 | ||
6917ff0b OS |
506 | int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) |
507 | { | |
508 | struct blk_mq_hw_ctx *hctx; | |
ee056f98 | 509 | struct elevator_queue *eq; |
6917ff0b OS |
510 | unsigned int i; |
511 | int ret; | |
512 | ||
513 | if (!e) { | |
514 | q->elevator = NULL; | |
515 | return 0; | |
516 | } | |
bd166ef1 JA |
517 | |
518 | /* | |
32825c45 ML |
519 | * Default to double of smaller one between hw queue_depth and 128, |
520 | * since we don't split into sync/async like the old code did. | |
521 | * Additionally, this is a per-hw queue depth. | |
bd166ef1 | 522 | */ |
32825c45 ML |
523 | q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, |
524 | BLKDEV_MAX_RQ); | |
bd166ef1 | 525 | |
bd166ef1 | 526 | queue_for_each_hw_ctx(q, hctx, i) { |
6917ff0b | 527 | ret = blk_mq_sched_alloc_tags(q, hctx, i); |
bd166ef1 | 528 | if (ret) |
6917ff0b | 529 | goto err; |
bd166ef1 JA |
530 | } |
531 | ||
6917ff0b OS |
532 | ret = e->ops.mq.init_sched(q, e); |
533 | if (ret) | |
534 | goto err; | |
bd166ef1 | 535 | |
d332ce09 OS |
536 | blk_mq_debugfs_register_sched(q); |
537 | ||
538 | queue_for_each_hw_ctx(q, hctx, i) { | |
539 | if (e->ops.mq.init_hctx) { | |
ee056f98 OS |
540 | ret = e->ops.mq.init_hctx(hctx, i); |
541 | if (ret) { | |
542 | eq = q->elevator; | |
543 | blk_mq_exit_sched(q, eq); | |
544 | kobject_put(&eq->kobj); | |
545 | return ret; | |
546 | } | |
547 | } | |
d332ce09 | 548 | blk_mq_debugfs_register_sched_hctx(q, hctx); |
ee056f98 OS |
549 | } |
550 | ||
bd166ef1 | 551 | return 0; |
bd166ef1 | 552 | |
6917ff0b | 553 | err: |
54d5329d OS |
554 | blk_mq_sched_tags_teardown(q); |
555 | q->elevator = NULL; | |
6917ff0b | 556 | return ret; |
bd166ef1 | 557 | } |
d3484991 | 558 | |
54d5329d OS |
559 | void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) |
560 | { | |
ee056f98 OS |
561 | struct blk_mq_hw_ctx *hctx; |
562 | unsigned int i; | |
563 | ||
d332ce09 OS |
564 | queue_for_each_hw_ctx(q, hctx, i) { |
565 | blk_mq_debugfs_unregister_sched_hctx(hctx); | |
566 | if (e->type->ops.mq.exit_hctx && hctx->sched_data) { | |
567 | e->type->ops.mq.exit_hctx(hctx, i); | |
568 | hctx->sched_data = NULL; | |
ee056f98 OS |
569 | } |
570 | } | |
d332ce09 | 571 | blk_mq_debugfs_unregister_sched(q); |
54d5329d OS |
572 | if (e->type->ops.mq.exit_sched) |
573 | e->type->ops.mq.exit_sched(e); | |
574 | blk_mq_sched_tags_teardown(q); | |
575 | q->elevator = NULL; | |
576 | } | |
577 | ||
d3484991 JA |
578 | int blk_mq_sched_init(struct request_queue *q) |
579 | { | |
580 | int ret; | |
581 | ||
d3484991 JA |
582 | mutex_lock(&q->sysfs_lock); |
583 | ret = elevator_init(q, NULL); | |
584 | mutex_unlock(&q->sysfs_lock); | |
585 | ||
586 | return ret; | |
587 | } |