Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Deadline i/o scheduler. |
3 | * | |
4 | * Copyright (C) 2002 Jens Axboe <axboe@suse.de> | |
5 | */ | |
6 | #include <linux/kernel.h> | |
7 | #include <linux/fs.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/elevator.h> | |
10 | #include <linux/bio.h> | |
11 | #include <linux/config.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/hash.h> | |
17 | #include <linux/rbtree.h> | |
18 | ||
19 | /* | |
20 | * See Documentation/block/deadline-iosched.txt | |
21 | */ | |
64100099 AV |
22 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ |
23 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | |
24 | static const int writes_starved = 2; /* max times reads can starve a write */ | |
25 | static const int fifo_batch = 16; /* # of sequential requests treated as one | |
1da177e4 LT |
26 | by the above parameters. For throughput. */ |
27 | ||
28 | static const int deadline_hash_shift = 5; | |
29 | #define DL_HASH_BLOCK(sec) ((sec) >> 3) | |
30 | #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) | |
31 | #define DL_HASH_ENTRIES (1 << deadline_hash_shift) | |
32 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | |
bae386f7 | 33 | #define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash)) |
1da177e4 LT |
34 | |
35 | struct deadline_data { | |
36 | /* | |
37 | * run time data | |
38 | */ | |
39 | ||
40 | /* | |
41 | * requests (deadline_rq s) are present on both sort_list and fifo_list | |
42 | */ | |
43 | struct rb_root sort_list[2]; | |
44 | struct list_head fifo_list[2]; | |
45 | ||
46 | /* | |
47 | * next in sort order. read, write or both are NULL | |
48 | */ | |
49 | struct deadline_rq *next_drq[2]; | |
bae386f7 | 50 | struct hlist_head *hash; /* request hash */ |
1da177e4 LT |
51 | unsigned int batching; /* number of sequential requests made */ |
52 | sector_t last_sector; /* head position */ | |
53 | unsigned int starved; /* times reads have starved writes */ | |
54 | ||
55 | /* | |
56 | * settings that change how the i/o scheduler behaves | |
57 | */ | |
58 | int fifo_expire[2]; | |
59 | int fifo_batch; | |
60 | int writes_starved; | |
61 | int front_merges; | |
62 | ||
63 | mempool_t *drq_pool; | |
64 | }; | |
65 | ||
66 | /* | |
67 | * pre-request data. | |
68 | */ | |
69 | struct deadline_rq { | |
70 | /* | |
71 | * rbtree index, key is the starting offset | |
72 | */ | |
73 | struct rb_node rb_node; | |
74 | sector_t rb_key; | |
75 | ||
76 | struct request *request; | |
77 | ||
78 | /* | |
79 | * request hash, key is the ending offset (for back merge lookup) | |
80 | */ | |
bae386f7 | 81 | struct hlist_node hash; |
1da177e4 LT |
82 | |
83 | /* | |
84 | * expire fifo | |
85 | */ | |
86 | struct list_head fifo; | |
87 | unsigned long expires; | |
88 | }; | |
89 | ||
90 | static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq); | |
91 | ||
92 | static kmem_cache_t *drq_pool; | |
93 | ||
94 | #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private) | |
95 | ||
96 | /* | |
97 | * the back merge hash support functions | |
98 | */ | |
99 | static inline void __deadline_del_drq_hash(struct deadline_rq *drq) | |
100 | { | |
bae386f7 | 101 | hlist_del_init(&drq->hash); |
1da177e4 LT |
102 | } |
103 | ||
104 | static inline void deadline_del_drq_hash(struct deadline_rq *drq) | |
105 | { | |
106 | if (ON_HASH(drq)) | |
107 | __deadline_del_drq_hash(drq); | |
108 | } | |
109 | ||
1da177e4 LT |
110 | static inline void |
111 | deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | |
112 | { | |
113 | struct request *rq = drq->request; | |
114 | ||
115 | BUG_ON(ON_HASH(drq)); | |
116 | ||
bae386f7 | 117 | hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); |
1da177e4 LT |
118 | } |
119 | ||
120 | /* | |
121 | * move hot entry to front of chain | |
122 | */ | |
123 | static inline void | |
124 | deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | |
125 | { | |
126 | struct request *rq = drq->request; | |
bae386f7 | 127 | struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; |
1da177e4 | 128 | |
bae386f7 AM |
129 | if (ON_HASH(drq) && &drq->hash != head->first) { |
130 | hlist_del(&drq->hash); | |
131 | hlist_add_head(&drq->hash, head); | |
1da177e4 LT |
132 | } |
133 | } | |
134 | ||
135 | static struct request * | |
136 | deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) | |
137 | { | |
bae386f7 AM |
138 | struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; |
139 | struct hlist_node *entry, *next; | |
140 | struct deadline_rq *drq; | |
1da177e4 | 141 | |
bae386f7 | 142 | hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) { |
1da177e4 LT |
143 | struct request *__rq = drq->request; |
144 | ||
1da177e4 LT |
145 | BUG_ON(!ON_HASH(drq)); |
146 | ||
147 | if (!rq_mergeable(__rq)) { | |
148 | __deadline_del_drq_hash(drq); | |
149 | continue; | |
150 | } | |
151 | ||
152 | if (rq_hash_key(__rq) == offset) | |
153 | return __rq; | |
154 | } | |
155 | ||
156 | return NULL; | |
157 | } | |
158 | ||
159 | /* | |
160 | * rb tree support functions | |
161 | */ | |
1da177e4 LT |
162 | #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) |
163 | #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)]) | |
164 | #define rq_rb_key(rq) (rq)->sector | |
165 | ||
166 | static struct deadline_rq * | |
167 | __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | |
168 | { | |
169 | struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node; | |
170 | struct rb_node *parent = NULL; | |
171 | struct deadline_rq *__drq; | |
172 | ||
173 | while (*p) { | |
174 | parent = *p; | |
175 | __drq = rb_entry_drq(parent); | |
176 | ||
177 | if (drq->rb_key < __drq->rb_key) | |
178 | p = &(*p)->rb_left; | |
179 | else if (drq->rb_key > __drq->rb_key) | |
180 | p = &(*p)->rb_right; | |
181 | else | |
182 | return __drq; | |
183 | } | |
184 | ||
185 | rb_link_node(&drq->rb_node, parent, p); | |
186 | return NULL; | |
187 | } | |
188 | ||
189 | static void | |
190 | deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | |
191 | { | |
192 | struct deadline_rq *__alias; | |
193 | ||
194 | drq->rb_key = rq_rb_key(drq->request); | |
195 | ||
196 | retry: | |
197 | __alias = __deadline_add_drq_rb(dd, drq); | |
198 | if (!__alias) { | |
199 | rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); | |
200 | return; | |
201 | } | |
202 | ||
203 | deadline_move_request(dd, __alias); | |
204 | goto retry; | |
205 | } | |
206 | ||
207 | static inline void | |
208 | deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | |
209 | { | |
210 | const int data_dir = rq_data_dir(drq->request); | |
211 | ||
212 | if (dd->next_drq[data_dir] == drq) { | |
213 | struct rb_node *rbnext = rb_next(&drq->rb_node); | |
214 | ||
215 | dd->next_drq[data_dir] = NULL; | |
216 | if (rbnext) | |
217 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); | |
218 | } | |
219 | ||
dd67d051 | 220 | BUG_ON(!RB_EMPTY_NODE(&drq->rb_node)); |
b4878f24 | 221 | rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); |
dd67d051 | 222 | RB_CLEAR_NODE(&drq->rb_node); |
1da177e4 LT |
223 | } |
224 | ||
225 | static struct request * | |
226 | deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir) | |
227 | { | |
228 | struct rb_node *n = dd->sort_list[data_dir].rb_node; | |
229 | struct deadline_rq *drq; | |
230 | ||
231 | while (n) { | |
232 | drq = rb_entry_drq(n); | |
233 | ||
234 | if (sector < drq->rb_key) | |
235 | n = n->rb_left; | |
236 | else if (sector > drq->rb_key) | |
237 | n = n->rb_right; | |
238 | else | |
239 | return drq->request; | |
240 | } | |
241 | ||
242 | return NULL; | |
243 | } | |
244 | ||
245 | /* | |
246 | * deadline_find_first_drq finds the first (lowest sector numbered) request | |
247 | * for the specified data_dir. Used to sweep back to the start of the disk | |
248 | * (1-way elevator) after we process the last (highest sector) request. | |
249 | */ | |
250 | static struct deadline_rq * | |
251 | deadline_find_first_drq(struct deadline_data *dd, int data_dir) | |
252 | { | |
253 | struct rb_node *n = dd->sort_list[data_dir].rb_node; | |
254 | ||
255 | for (;;) { | |
256 | if (n->rb_left == NULL) | |
257 | return rb_entry_drq(n); | |
258 | ||
259 | n = n->rb_left; | |
260 | } | |
261 | } | |
262 | ||
263 | /* | |
264 | * add drq to rbtree and fifo | |
265 | */ | |
b4878f24 | 266 | static void |
1da177e4 LT |
267 | deadline_add_request(struct request_queue *q, struct request *rq) |
268 | { | |
269 | struct deadline_data *dd = q->elevator->elevator_data; | |
270 | struct deadline_rq *drq = RQ_DATA(rq); | |
271 | ||
272 | const int data_dir = rq_data_dir(drq->request); | |
273 | ||
274 | deadline_add_drq_rb(dd, drq); | |
275 | /* | |
276 | * set expire time (only used for reads) and add to fifo list | |
277 | */ | |
278 | drq->expires = jiffies + dd->fifo_expire[data_dir]; | |
279 | list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); | |
280 | ||
98b11471 | 281 | if (rq_mergeable(rq)) |
1da177e4 | 282 | deadline_add_drq_hash(dd, drq); |
1da177e4 LT |
283 | } |
284 | ||
285 | /* | |
286 | * remove rq from rbtree, fifo, and hash | |
287 | */ | |
288 | static void deadline_remove_request(request_queue_t *q, struct request *rq) | |
289 | { | |
290 | struct deadline_rq *drq = RQ_DATA(rq); | |
b4878f24 | 291 | struct deadline_data *dd = q->elevator->elevator_data; |
1da177e4 | 292 | |
b4878f24 | 293 | list_del_init(&drq->fifo); |
b4878f24 | 294 | deadline_del_drq_rb(dd, drq); |
98b11471 | 295 | deadline_del_drq_hash(drq); |
1da177e4 LT |
296 | } |
297 | ||
298 | static int | |
299 | deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) | |
300 | { | |
301 | struct deadline_data *dd = q->elevator->elevator_data; | |
302 | struct request *__rq; | |
303 | int ret; | |
304 | ||
1da177e4 LT |
305 | /* |
306 | * see if the merge hash can satisfy a back merge | |
307 | */ | |
308 | __rq = deadline_find_drq_hash(dd, bio->bi_sector); | |
309 | if (__rq) { | |
310 | BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); | |
311 | ||
312 | if (elv_rq_merge_ok(__rq, bio)) { | |
313 | ret = ELEVATOR_BACK_MERGE; | |
314 | goto out; | |
315 | } | |
316 | } | |
317 | ||
318 | /* | |
319 | * check for front merge | |
320 | */ | |
321 | if (dd->front_merges) { | |
322 | sector_t rb_key = bio->bi_sector + bio_sectors(bio); | |
323 | ||
324 | __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio)); | |
325 | if (__rq) { | |
326 | BUG_ON(rb_key != rq_rb_key(__rq)); | |
327 | ||
328 | if (elv_rq_merge_ok(__rq, bio)) { | |
329 | ret = ELEVATOR_FRONT_MERGE; | |
330 | goto out; | |
331 | } | |
332 | } | |
333 | } | |
334 | ||
335 | return ELEVATOR_NO_MERGE; | |
336 | out: | |
1da177e4 LT |
337 | if (ret) |
338 | deadline_hot_drq_hash(dd, RQ_DATA(__rq)); | |
339 | *req = __rq; | |
340 | return ret; | |
341 | } | |
342 | ||
343 | static void deadline_merged_request(request_queue_t *q, struct request *req) | |
344 | { | |
345 | struct deadline_data *dd = q->elevator->elevator_data; | |
346 | struct deadline_rq *drq = RQ_DATA(req); | |
347 | ||
348 | /* | |
349 | * hash always needs to be repositioned, key is end sector | |
350 | */ | |
351 | deadline_del_drq_hash(drq); | |
352 | deadline_add_drq_hash(dd, drq); | |
353 | ||
354 | /* | |
355 | * if the merge was a front merge, we need to reposition request | |
356 | */ | |
357 | if (rq_rb_key(req) != drq->rb_key) { | |
358 | deadline_del_drq_rb(dd, drq); | |
359 | deadline_add_drq_rb(dd, drq); | |
360 | } | |
1da177e4 LT |
361 | } |
362 | ||
363 | static void | |
364 | deadline_merged_requests(request_queue_t *q, struct request *req, | |
365 | struct request *next) | |
366 | { | |
367 | struct deadline_data *dd = q->elevator->elevator_data; | |
368 | struct deadline_rq *drq = RQ_DATA(req); | |
369 | struct deadline_rq *dnext = RQ_DATA(next); | |
370 | ||
371 | BUG_ON(!drq); | |
372 | BUG_ON(!dnext); | |
373 | ||
374 | /* | |
375 | * reposition drq (this is the merged request) in hash, and in rbtree | |
376 | * in case of a front merge | |
377 | */ | |
378 | deadline_del_drq_hash(drq); | |
379 | deadline_add_drq_hash(dd, drq); | |
380 | ||
381 | if (rq_rb_key(req) != drq->rb_key) { | |
382 | deadline_del_drq_rb(dd, drq); | |
383 | deadline_add_drq_rb(dd, drq); | |
384 | } | |
385 | ||
386 | /* | |
387 | * if dnext expires before drq, assign its expire time to drq | |
388 | * and move into dnext position (dnext will be deleted) in fifo | |
389 | */ | |
390 | if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) { | |
391 | if (time_before(dnext->expires, drq->expires)) { | |
392 | list_move(&drq->fifo, &dnext->fifo); | |
393 | drq->expires = dnext->expires; | |
394 | } | |
395 | } | |
396 | ||
397 | /* | |
398 | * kill knowledge of next, this one is a goner | |
399 | */ | |
400 | deadline_remove_request(q, next); | |
401 | } | |
402 | ||
403 | /* | |
404 | * move request from sort list to dispatch queue. | |
405 | */ | |
406 | static inline void | |
407 | deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) | |
408 | { | |
409 | request_queue_t *q = drq->request->q; | |
410 | ||
411 | deadline_remove_request(q, drq->request); | |
b4878f24 | 412 | elv_dispatch_add_tail(q, drq->request); |
1da177e4 LT |
413 | } |
414 | ||
415 | /* | |
416 | * move an entry to dispatch queue | |
417 | */ | |
418 | static void | |
419 | deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq) | |
420 | { | |
421 | const int data_dir = rq_data_dir(drq->request); | |
422 | struct rb_node *rbnext = rb_next(&drq->rb_node); | |
423 | ||
424 | dd->next_drq[READ] = NULL; | |
425 | dd->next_drq[WRITE] = NULL; | |
426 | ||
427 | if (rbnext) | |
428 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); | |
429 | ||
430 | dd->last_sector = drq->request->sector + drq->request->nr_sectors; | |
431 | ||
432 | /* | |
433 | * take it off the sort and fifo list, move | |
434 | * to dispatch queue | |
435 | */ | |
436 | deadline_move_to_dispatch(dd, drq); | |
437 | } | |
438 | ||
439 | #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo) | |
440 | ||
441 | /* | |
442 | * deadline_check_fifo returns 0 if there are no expired reads on the fifo, | |
443 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | |
444 | */ | |
445 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |
446 | { | |
447 | struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next); | |
448 | ||
449 | /* | |
450 | * drq is expired! | |
451 | */ | |
452 | if (time_after(jiffies, drq->expires)) | |
453 | return 1; | |
454 | ||
455 | return 0; | |
456 | } | |
457 | ||
458 | /* | |
459 | * deadline_dispatch_requests selects the best request according to | |
460 | * read/write expire, fifo_batch, etc | |
461 | */ | |
b4878f24 | 462 | static int deadline_dispatch_requests(request_queue_t *q, int force) |
1da177e4 | 463 | { |
b4878f24 | 464 | struct deadline_data *dd = q->elevator->elevator_data; |
1da177e4 LT |
465 | const int reads = !list_empty(&dd->fifo_list[READ]); |
466 | const int writes = !list_empty(&dd->fifo_list[WRITE]); | |
467 | struct deadline_rq *drq; | |
4b0dc07e | 468 | int data_dir; |
1da177e4 LT |
469 | |
470 | /* | |
471 | * batches are currently reads XOR writes | |
472 | */ | |
9d5c1e1b AM |
473 | if (dd->next_drq[WRITE]) |
474 | drq = dd->next_drq[WRITE]; | |
475 | else | |
476 | drq = dd->next_drq[READ]; | |
1da177e4 LT |
477 | |
478 | if (drq) { | |
479 | /* we have a "next request" */ | |
480 | ||
481 | if (dd->last_sector != drq->request->sector) | |
482 | /* end the batch on a non sequential request */ | |
483 | dd->batching += dd->fifo_batch; | |
484 | ||
485 | if (dd->batching < dd->fifo_batch) | |
486 | /* we are still entitled to batch */ | |
487 | goto dispatch_request; | |
488 | } | |
489 | ||
490 | /* | |
491 | * at this point we are not running a batch. select the appropriate | |
492 | * data direction (read / write) | |
493 | */ | |
494 | ||
495 | if (reads) { | |
dd67d051 | 496 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); |
1da177e4 LT |
497 | |
498 | if (writes && (dd->starved++ >= dd->writes_starved)) | |
499 | goto dispatch_writes; | |
500 | ||
501 | data_dir = READ; | |
1da177e4 LT |
502 | |
503 | goto dispatch_find_request; | |
504 | } | |
505 | ||
506 | /* | |
507 | * there are either no reads or writes have been starved | |
508 | */ | |
509 | ||
510 | if (writes) { | |
511 | dispatch_writes: | |
dd67d051 | 512 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); |
1da177e4 LT |
513 | |
514 | dd->starved = 0; | |
515 | ||
516 | data_dir = WRITE; | |
1da177e4 LT |
517 | |
518 | goto dispatch_find_request; | |
519 | } | |
520 | ||
521 | return 0; | |
522 | ||
523 | dispatch_find_request: | |
524 | /* | |
525 | * we are not running a batch, find best request for selected data_dir | |
526 | */ | |
527 | if (deadline_check_fifo(dd, data_dir)) { | |
528 | /* An expired request exists - satisfy it */ | |
529 | dd->batching = 0; | |
530 | drq = list_entry_fifo(dd->fifo_list[data_dir].next); | |
531 | ||
532 | } else if (dd->next_drq[data_dir]) { | |
533 | /* | |
534 | * The last req was the same dir and we have a next request in | |
535 | * sort order. No expired requests so continue on from here. | |
536 | */ | |
537 | drq = dd->next_drq[data_dir]; | |
538 | } else { | |
539 | /* | |
540 | * The last req was the other direction or we have run out of | |
541 | * higher-sectored requests. Go back to the lowest sectored | |
542 | * request (1 way elevator) and start a new batch. | |
543 | */ | |
544 | dd->batching = 0; | |
545 | drq = deadline_find_first_drq(dd, data_dir); | |
546 | } | |
547 | ||
548 | dispatch_request: | |
549 | /* | |
550 | * drq is the selected appropriate request. | |
551 | */ | |
552 | dd->batching++; | |
553 | deadline_move_request(dd, drq); | |
554 | ||
555 | return 1; | |
556 | } | |
557 | ||
1da177e4 LT |
558 | static int deadline_queue_empty(request_queue_t *q) |
559 | { | |
560 | struct deadline_data *dd = q->elevator->elevator_data; | |
561 | ||
b4878f24 JA |
562 | return list_empty(&dd->fifo_list[WRITE]) |
563 | && list_empty(&dd->fifo_list[READ]); | |
1da177e4 LT |
564 | } |
565 | ||
566 | static struct request * | |
567 | deadline_former_request(request_queue_t *q, struct request *rq) | |
568 | { | |
569 | struct deadline_rq *drq = RQ_DATA(rq); | |
570 | struct rb_node *rbprev = rb_prev(&drq->rb_node); | |
571 | ||
572 | if (rbprev) | |
573 | return rb_entry_drq(rbprev)->request; | |
574 | ||
575 | return NULL; | |
576 | } | |
577 | ||
578 | static struct request * | |
579 | deadline_latter_request(request_queue_t *q, struct request *rq) | |
580 | { | |
581 | struct deadline_rq *drq = RQ_DATA(rq); | |
582 | struct rb_node *rbnext = rb_next(&drq->rb_node); | |
583 | ||
584 | if (rbnext) | |
585 | return rb_entry_drq(rbnext)->request; | |
586 | ||
587 | return NULL; | |
588 | } | |
589 | ||
590 | static void deadline_exit_queue(elevator_t *e) | |
591 | { | |
592 | struct deadline_data *dd = e->elevator_data; | |
593 | ||
594 | BUG_ON(!list_empty(&dd->fifo_list[READ])); | |
595 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | |
596 | ||
597 | mempool_destroy(dd->drq_pool); | |
598 | kfree(dd->hash); | |
599 | kfree(dd); | |
600 | } | |
601 | ||
602 | /* | |
603 | * initialize elevator private data (deadline_data), and alloc a drq for | |
604 | * each request on the free lists | |
605 | */ | |
bc1c1169 | 606 | static void *deadline_init_queue(request_queue_t *q, elevator_t *e) |
1da177e4 LT |
607 | { |
608 | struct deadline_data *dd; | |
609 | int i; | |
610 | ||
611 | if (!drq_pool) | |
bc1c1169 | 612 | return NULL; |
1da177e4 | 613 | |
1946089a | 614 | dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); |
1da177e4 | 615 | if (!dd) |
bc1c1169 | 616 | return NULL; |
1da177e4 LT |
617 | memset(dd, 0, sizeof(*dd)); |
618 | ||
bae386f7 | 619 | dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES, |
1946089a | 620 | GFP_KERNEL, q->node); |
1da177e4 LT |
621 | if (!dd->hash) { |
622 | kfree(dd); | |
bc1c1169 | 623 | return NULL; |
1da177e4 LT |
624 | } |
625 | ||
1946089a CL |
626 | dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
627 | mempool_free_slab, drq_pool, q->node); | |
1da177e4 LT |
628 | if (!dd->drq_pool) { |
629 | kfree(dd->hash); | |
630 | kfree(dd); | |
bc1c1169 | 631 | return NULL; |
1da177e4 LT |
632 | } |
633 | ||
634 | for (i = 0; i < DL_HASH_ENTRIES; i++) | |
bae386f7 | 635 | INIT_HLIST_HEAD(&dd->hash[i]); |
1da177e4 LT |
636 | |
637 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | |
638 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | |
639 | dd->sort_list[READ] = RB_ROOT; | |
640 | dd->sort_list[WRITE] = RB_ROOT; | |
1da177e4 LT |
641 | dd->fifo_expire[READ] = read_expire; |
642 | dd->fifo_expire[WRITE] = write_expire; | |
643 | dd->writes_starved = writes_starved; | |
644 | dd->front_merges = 1; | |
645 | dd->fifo_batch = fifo_batch; | |
bc1c1169 | 646 | return dd; |
1da177e4 LT |
647 | } |
648 | ||
649 | static void deadline_put_request(request_queue_t *q, struct request *rq) | |
650 | { | |
651 | struct deadline_data *dd = q->elevator->elevator_data; | |
652 | struct deadline_rq *drq = RQ_DATA(rq); | |
653 | ||
b4878f24 JA |
654 | mempool_free(drq, dd->drq_pool); |
655 | rq->elevator_private = NULL; | |
1da177e4 LT |
656 | } |
657 | ||
658 | static int | |
22e2c507 | 659 | deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, |
8267e268 | 660 | gfp_t gfp_mask) |
1da177e4 LT |
661 | { |
662 | struct deadline_data *dd = q->elevator->elevator_data; | |
663 | struct deadline_rq *drq; | |
664 | ||
665 | drq = mempool_alloc(dd->drq_pool, gfp_mask); | |
666 | if (drq) { | |
667 | memset(drq, 0, sizeof(*drq)); | |
dd67d051 | 668 | RB_CLEAR_NODE(&drq->rb_node); |
1da177e4 LT |
669 | drq->request = rq; |
670 | ||
bae386f7 | 671 | INIT_HLIST_NODE(&drq->hash); |
1da177e4 LT |
672 | |
673 | INIT_LIST_HEAD(&drq->fifo); | |
674 | ||
675 | rq->elevator_private = drq; | |
676 | return 0; | |
677 | } | |
678 | ||
679 | return 1; | |
680 | } | |
681 | ||
682 | /* | |
683 | * sysfs parts below | |
684 | */ | |
1da177e4 LT |
685 | |
686 | static ssize_t | |
687 | deadline_var_show(int var, char *page) | |
688 | { | |
689 | return sprintf(page, "%d\n", var); | |
690 | } | |
691 | ||
692 | static ssize_t | |
693 | deadline_var_store(int *var, const char *page, size_t count) | |
694 | { | |
695 | char *p = (char *) page; | |
696 | ||
697 | *var = simple_strtol(p, &p, 10); | |
698 | return count; | |
699 | } | |
700 | ||
701 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | |
3d1ab40f | 702 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
1da177e4 | 703 | { \ |
3d1ab40f AV |
704 | struct deadline_data *dd = e->elevator_data; \ |
705 | int __data = __VAR; \ | |
1da177e4 LT |
706 | if (__CONV) \ |
707 | __data = jiffies_to_msecs(__data); \ | |
708 | return deadline_var_show(__data, (page)); \ | |
709 | } | |
e572ec7e AV |
710 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); |
711 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); | |
712 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); | |
713 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); | |
714 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); | |
1da177e4 LT |
715 | #undef SHOW_FUNCTION |
716 | ||
717 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
3d1ab40f | 718 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
1da177e4 | 719 | { \ |
3d1ab40f | 720 | struct deadline_data *dd = e->elevator_data; \ |
1da177e4 LT |
721 | int __data; \ |
722 | int ret = deadline_var_store(&__data, (page), count); \ | |
723 | if (__data < (MIN)) \ | |
724 | __data = (MIN); \ | |
725 | else if (__data > (MAX)) \ | |
726 | __data = (MAX); \ | |
727 | if (__CONV) \ | |
728 | *(__PTR) = msecs_to_jiffies(__data); \ | |
729 | else \ | |
730 | *(__PTR) = __data; \ | |
731 | return ret; \ | |
732 | } | |
e572ec7e AV |
733 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); |
734 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | |
735 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | |
736 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); | |
737 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); | |
1da177e4 LT |
738 | #undef STORE_FUNCTION |
739 | ||
e572ec7e AV |
740 | #define DD_ATTR(name) \ |
741 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ | |
742 | deadline_##name##_store) | |
743 | ||
744 | static struct elv_fs_entry deadline_attrs[] = { | |
745 | DD_ATTR(read_expire), | |
746 | DD_ATTR(write_expire), | |
747 | DD_ATTR(writes_starved), | |
748 | DD_ATTR(front_merges), | |
749 | DD_ATTR(fifo_batch), | |
750 | __ATTR_NULL | |
1da177e4 LT |
751 | }; |
752 | ||
1da177e4 LT |
753 | static struct elevator_type iosched_deadline = { |
754 | .ops = { | |
755 | .elevator_merge_fn = deadline_merge, | |
756 | .elevator_merged_fn = deadline_merged_request, | |
757 | .elevator_merge_req_fn = deadline_merged_requests, | |
b4878f24 JA |
758 | .elevator_dispatch_fn = deadline_dispatch_requests, |
759 | .elevator_add_req_fn = deadline_add_request, | |
1da177e4 LT |
760 | .elevator_queue_empty_fn = deadline_queue_empty, |
761 | .elevator_former_req_fn = deadline_former_request, | |
762 | .elevator_latter_req_fn = deadline_latter_request, | |
763 | .elevator_set_req_fn = deadline_set_request, | |
764 | .elevator_put_req_fn = deadline_put_request, | |
765 | .elevator_init_fn = deadline_init_queue, | |
766 | .elevator_exit_fn = deadline_exit_queue, | |
767 | }, | |
768 | ||
3d1ab40f | 769 | .elevator_attrs = deadline_attrs, |
1da177e4 LT |
770 | .elevator_name = "deadline", |
771 | .elevator_owner = THIS_MODULE, | |
772 | }; | |
773 | ||
774 | static int __init deadline_init(void) | |
775 | { | |
776 | int ret; | |
777 | ||
778 | drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq), | |
779 | 0, 0, NULL, NULL); | |
780 | ||
781 | if (!drq_pool) | |
782 | return -ENOMEM; | |
783 | ||
784 | ret = elv_register(&iosched_deadline); | |
785 | if (ret) | |
786 | kmem_cache_destroy(drq_pool); | |
787 | ||
788 | return ret; | |
789 | } | |
790 | ||
791 | static void __exit deadline_exit(void) | |
792 | { | |
793 | kmem_cache_destroy(drq_pool); | |
794 | elv_unregister(&iosched_deadline); | |
795 | } | |
796 | ||
797 | module_init(deadline_init); | |
798 | module_exit(deadline_exit); | |
799 | ||
800 | MODULE_AUTHOR("Jens Axboe"); | |
801 | MODULE_LICENSE("GPL"); | |
802 | MODULE_DESCRIPTION("deadline IO scheduler"); |