[PATCH] Simplify proc/devices and fix early termination regression
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / elevator.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@suse.de> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
30#include <linux/config.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <linux/compiler.h>
cb98fc8b 35#include <linux/delay.h>
2056a782 36#include <linux/blktrace_api.h>
1da177e4
LT
37
38#include <asm/uaccess.h>
39
40static DEFINE_SPINLOCK(elv_list_lock);
41static LIST_HEAD(elv_list);
42
43/*
44 * can we safely merge with this request?
45 */
46inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
47{
48 if (!rq_mergeable(rq))
49 return 0;
50
51 /*
52 * different data direction or already started, don't merge
53 */
54 if (bio_data_dir(bio) != rq_data_dir(rq))
55 return 0;
56
57 /*
58 * same device and no special stuff set, merge is ok
59 */
60 if (rq->rq_disk == bio->bi_bdev->bd_disk &&
61 !rq->waiting && !rq->special)
62 return 1;
63
64 return 0;
65}
66EXPORT_SYMBOL(elv_rq_merge_ok);
67
769db45b 68static inline int elv_try_merge(struct request *__rq, struct bio *bio)
1da177e4
LT
69{
70 int ret = ELEVATOR_NO_MERGE;
71
72 /*
73 * we can merge and sequence is ok, check if it's possible
74 */
75 if (elv_rq_merge_ok(__rq, bio)) {
76 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
77 ret = ELEVATOR_BACK_MERGE;
78 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
79 ret = ELEVATOR_FRONT_MERGE;
80 }
81
82 return ret;
83}
1da177e4 84
1da177e4
LT
85static struct elevator_type *elevator_find(const char *name)
86{
87 struct elevator_type *e = NULL;
88 struct list_head *entry;
89
1da177e4
LT
90 list_for_each(entry, &elv_list) {
91 struct elevator_type *__e;
92
93 __e = list_entry(entry, struct elevator_type, list);
94
95 if (!strcmp(__e->elevator_name, name)) {
96 e = __e;
97 break;
98 }
99 }
1da177e4
LT
100
101 return e;
102}
103
104static void elevator_put(struct elevator_type *e)
105{
106 module_put(e->elevator_owner);
107}
108
109static struct elevator_type *elevator_get(const char *name)
110{
2824bc93 111 struct elevator_type *e;
1da177e4 112
2824bc93
TH
113 spin_lock_irq(&elv_list_lock);
114
115 e = elevator_find(name);
116 if (e && !try_module_get(e->elevator_owner))
117 e = NULL;
118
119 spin_unlock_irq(&elv_list_lock);
1da177e4
LT
120
121 return e;
122}
123
3d1ab40f 124static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
1da177e4
LT
125{
126 int ret = 0;
127
1da177e4
LT
128 q->elevator = eq;
129
130 if (eq->ops->elevator_init_fn)
131 ret = eq->ops->elevator_init_fn(q, eq);
132
133 return ret;
134}
135
136static char chosen_elevator[16];
137
5f003976 138static int __init elevator_setup(char *str)
1da177e4 139{
752a3b79
CE
140 /*
141 * Be backwards-compatible with previous kernels, so users
142 * won't get the wrong elevator.
143 */
5f003976 144 if (!strcmp(str, "as"))
752a3b79 145 strcpy(chosen_elevator, "anticipatory");
cff3ba22 146 else
5f003976 147 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
1da177e4
LT
148 return 0;
149}
150
151__setup("elevator=", elevator_setup);
152
3d1ab40f
AV
153static struct kobj_type elv_ktype;
154
155static elevator_t *elevator_alloc(struct elevator_type *e)
156{
157 elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
158 if (eq) {
159 memset(eq, 0, sizeof(*eq));
160 eq->ops = &e->ops;
161 eq->elevator_type = e;
162 kobject_init(&eq->kobj);
163 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
164 eq->kobj.ktype = &elv_ktype;
165 mutex_init(&eq->sysfs_lock);
166 } else {
167 elevator_put(e);
168 }
169 return eq;
170}
171
172static void elevator_release(struct kobject *kobj)
173{
174 elevator_t *e = container_of(kobj, elevator_t, kobj);
175 elevator_put(e->elevator_type);
176 kfree(e);
177}
178
1da177e4
LT
179int elevator_init(request_queue_t *q, char *name)
180{
181 struct elevator_type *e = NULL;
182 struct elevator_queue *eq;
183 int ret = 0;
184
cb98fc8b
TH
185 INIT_LIST_HEAD(&q->queue_head);
186 q->last_merge = NULL;
187 q->end_sector = 0;
188 q->boundary_rq = NULL;
cb98fc8b 189
5f003976 190 if (name && !(e = elevator_get(name)))
1da177e4
LT
191 return -EINVAL;
192
248d5ca5
ND
193 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
194 printk("I/O scheduler %s not found\n", chosen_elevator);
195
196 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
197 printk("Default I/O scheduler not found, using no-op\n");
198 e = elevator_get("noop");
5f003976
ND
199 }
200
3d1ab40f
AV
201 eq = elevator_alloc(e);
202 if (!eq)
1da177e4 203 return -ENOMEM;
1da177e4 204
3d1ab40f
AV
205 ret = elevator_attach(q, eq);
206 if (ret)
207 kobject_put(&eq->kobj);
1da177e4
LT
208
209 return ret;
210}
211
212void elevator_exit(elevator_t *e)
213{
3d1ab40f 214 mutex_lock(&e->sysfs_lock);
1da177e4
LT
215 if (e->ops->elevator_exit_fn)
216 e->ops->elevator_exit_fn(e);
3d1ab40f
AV
217 e->ops = NULL;
218 mutex_unlock(&e->sysfs_lock);
1da177e4 219
3d1ab40f 220 kobject_put(&e->kobj);
1da177e4
LT
221}
222
8922e16c
TH
223/*
224 * Insert rq into dispatch queue of q. Queue lock must be held on
225 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
226 * appended to the dispatch queue. To be used by specific elevators.
227 */
1b47f531 228void elv_dispatch_sort(request_queue_t *q, struct request *rq)
8922e16c
TH
229{
230 sector_t boundary;
8922e16c
TH
231 struct list_head *entry;
232
06b86245
TH
233 if (q->last_merge == rq)
234 q->last_merge = NULL;
15853af9 235 q->nr_sorted--;
06b86245 236
1b47f531 237 boundary = q->end_sector;
cb19833d 238
8922e16c
TH
239 list_for_each_prev(entry, &q->queue_head) {
240 struct request *pos = list_entry_rq(entry);
241
242 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
243 break;
244 if (rq->sector >= boundary) {
245 if (pos->sector < boundary)
246 continue;
247 } else {
248 if (pos->sector >= boundary)
249 break;
250 }
251 if (rq->sector >= pos->sector)
252 break;
253 }
254
255 list_add(&rq->queuelist, entry);
256}
257
1da177e4
LT
258int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
259{
260 elevator_t *e = q->elevator;
06b86245
TH
261 int ret;
262
263 if (q->last_merge) {
264 ret = elv_try_merge(q->last_merge, bio);
265 if (ret != ELEVATOR_NO_MERGE) {
266 *req = q->last_merge;
267 return ret;
268 }
269 }
1da177e4
LT
270
271 if (e->ops->elevator_merge_fn)
272 return e->ops->elevator_merge_fn(q, req, bio);
273
274 return ELEVATOR_NO_MERGE;
275}
276
277void elv_merged_request(request_queue_t *q, struct request *rq)
278{
279 elevator_t *e = q->elevator;
280
281 if (e->ops->elevator_merged_fn)
282 e->ops->elevator_merged_fn(q, rq);
06b86245
TH
283
284 q->last_merge = rq;
1da177e4
LT
285}
286
287void elv_merge_requests(request_queue_t *q, struct request *rq,
288 struct request *next)
289{
290 elevator_t *e = q->elevator;
291
1da177e4
LT
292 if (e->ops->elevator_merge_req_fn)
293 e->ops->elevator_merge_req_fn(q, rq, next);
15853af9 294 q->nr_sorted--;
06b86245
TH
295
296 q->last_merge = rq;
1da177e4
LT
297}
298
8922e16c 299void elv_requeue_request(request_queue_t *q, struct request *rq)
1da177e4
LT
300{
301 elevator_t *e = q->elevator;
302
303 /*
304 * it already went through dequeue, we need to decrement the
305 * in_flight count again
306 */
8922e16c 307 if (blk_account_rq(rq)) {
1da177e4 308 q->in_flight--;
8922e16c
TH
309 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
310 e->ops->elevator_deactivate_req_fn(q, rq);
311 }
1da177e4
LT
312
313 rq->flags &= ~REQ_STARTED;
314
30e9656c 315 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
1da177e4
LT
316}
317
15853af9
TH
318static void elv_drain_elevator(request_queue_t *q)
319{
320 static int printed;
321 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
322 ;
323 if (q->nr_sorted == 0)
324 return;
325 if (printed++ < 10) {
326 printk(KERN_ERR "%s: forced dispatching is broken "
327 "(nr_sorted=%u), please report this\n",
328 q->elevator->elevator_type->elevator_name, q->nr_sorted);
329 }
330}
331
30e9656c 332void elv_insert(request_queue_t *q, struct request *rq, int where)
1da177e4 333{
797e7dbb
TH
334 struct list_head *pos;
335 unsigned ordseq;
336
2056a782
JA
337 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
338
1da177e4
LT
339 rq->q = q;
340
8922e16c
TH
341 switch (where) {
342 case ELEVATOR_INSERT_FRONT:
343 rq->flags |= REQ_SOFTBARRIER;
344
345 list_add(&rq->queuelist, &q->queue_head);
346 break;
347
348 case ELEVATOR_INSERT_BACK:
349 rq->flags |= REQ_SOFTBARRIER;
15853af9 350 elv_drain_elevator(q);
8922e16c
TH
351 list_add_tail(&rq->queuelist, &q->queue_head);
352 /*
353 * We kick the queue here for the following reasons.
354 * - The elevator might have returned NULL previously
355 * to delay requests and returned them now. As the
356 * queue wasn't empty before this request, ll_rw_blk
357 * won't run the queue on return, resulting in hang.
358 * - Usually, back inserted requests won't be merged
359 * with anything. There's no point in delaying queue
360 * processing.
361 */
362 blk_remove_plug(q);
363 q->request_fn(q);
364 break;
365
366 case ELEVATOR_INSERT_SORT:
367 BUG_ON(!blk_fs_request(rq));
368 rq->flags |= REQ_SORTED;
15853af9 369 q->nr_sorted++;
06b86245
TH
370 if (q->last_merge == NULL && rq_mergeable(rq))
371 q->last_merge = rq;
ca23509f
TH
372 /*
373 * Some ioscheds (cfq) run q->request_fn directly, so
374 * rq cannot be accessed after calling
375 * elevator_add_req_fn.
376 */
377 q->elevator->ops->elevator_add_req_fn(q, rq);
8922e16c
TH
378 break;
379
797e7dbb
TH
380 case ELEVATOR_INSERT_REQUEUE:
381 /*
382 * If ordered flush isn't in progress, we do front
383 * insertion; otherwise, requests should be requeued
384 * in ordseq order.
385 */
386 rq->flags |= REQ_SOFTBARRIER;
387
388 if (q->ordseq == 0) {
389 list_add(&rq->queuelist, &q->queue_head);
390 break;
391 }
392
393 ordseq = blk_ordered_req_seq(rq);
394
395 list_for_each(pos, &q->queue_head) {
396 struct request *pos_rq = list_entry_rq(pos);
397 if (ordseq <= blk_ordered_req_seq(pos_rq))
398 break;
399 }
400
401 list_add_tail(&rq->queuelist, pos);
402 break;
403
8922e16c
TH
404 default:
405 printk(KERN_ERR "%s: bad insertion point %d\n",
406 __FUNCTION__, where);
407 BUG();
408 }
409
410 if (blk_queue_plugged(q)) {
411 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
412 - q->in_flight;
413
414 if (nrq >= q->unplug_thresh)
415 __generic_unplug_device(q);
416 }
1da177e4
LT
417}
418
30e9656c
TH
419void __elv_add_request(request_queue_t *q, struct request *rq, int where,
420 int plug)
421{
422 if (q->ordcolor)
423 rq->flags |= REQ_ORDERED_COLOR;
424
425 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
426 /*
427 * toggle ordered color
428 */
429 if (blk_barrier_rq(rq))
430 q->ordcolor ^= 1;
431
432 /*
433 * barriers implicitly indicate back insertion
434 */
435 if (where == ELEVATOR_INSERT_SORT)
436 where = ELEVATOR_INSERT_BACK;
437
438 /*
439 * this request is scheduling boundary, update
440 * end_sector
441 */
442 if (blk_fs_request(rq)) {
443 q->end_sector = rq_end_sector(rq);
444 q->boundary_rq = rq;
445 }
446 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
447 where = ELEVATOR_INSERT_BACK;
448
449 if (plug)
450 blk_plug_device(q);
451
452 elv_insert(q, rq, where);
453}
454
1da177e4
LT
455void elv_add_request(request_queue_t *q, struct request *rq, int where,
456 int plug)
457{
458 unsigned long flags;
459
460 spin_lock_irqsave(q->queue_lock, flags);
461 __elv_add_request(q, rq, where, plug);
462 spin_unlock_irqrestore(q->queue_lock, flags);
463}
464
465static inline struct request *__elv_next_request(request_queue_t *q)
466{
8922e16c
TH
467 struct request *rq;
468
797e7dbb
TH
469 while (1) {
470 while (!list_empty(&q->queue_head)) {
471 rq = list_entry_rq(q->queue_head.next);
472 if (blk_do_ordered(q, &rq))
473 return rq;
474 }
1da177e4 475
797e7dbb
TH
476 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
477 return NULL;
1da177e4 478 }
1da177e4
LT
479}
480
481struct request *elv_next_request(request_queue_t *q)
482{
483 struct request *rq;
484 int ret;
485
486 while ((rq = __elv_next_request(q)) != NULL) {
8922e16c
TH
487 if (!(rq->flags & REQ_STARTED)) {
488 elevator_t *e = q->elevator;
489
490 /*
491 * This is the first time the device driver
492 * sees this request (possibly after
493 * requeueing). Notify IO scheduler.
494 */
495 if (blk_sorted_rq(rq) &&
496 e->ops->elevator_activate_req_fn)
497 e->ops->elevator_activate_req_fn(q, rq);
1da177e4 498
8922e16c
TH
499 /*
500 * just mark as started even if we don't start
501 * it, a request that has been delayed should
502 * not be passed by new incoming requests
503 */
504 rq->flags |= REQ_STARTED;
2056a782 505 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
8922e16c 506 }
1da177e4 507
8922e16c 508 if (!q->boundary_rq || q->boundary_rq == rq) {
1b47f531 509 q->end_sector = rq_end_sector(rq);
8922e16c
TH
510 q->boundary_rq = NULL;
511 }
1da177e4
LT
512
513 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
514 break;
515
516 ret = q->prep_rq_fn(q, rq);
517 if (ret == BLKPREP_OK) {
518 break;
519 } else if (ret == BLKPREP_DEFER) {
2e759cd4
TH
520 /*
521 * the request may have been (partially) prepped.
522 * we need to keep this request in the front to
8922e16c
TH
523 * avoid resource deadlock. REQ_STARTED will
524 * prevent other fs requests from passing this one.
2e759cd4 525 */
1da177e4
LT
526 rq = NULL;
527 break;
528 } else if (ret == BLKPREP_KILL) {
529 int nr_bytes = rq->hard_nr_sectors << 9;
530
531 if (!nr_bytes)
532 nr_bytes = rq->data_len;
533
534 blkdev_dequeue_request(rq);
535 rq->flags |= REQ_QUIET;
536 end_that_request_chunk(rq, 0, nr_bytes);
8ffdc655 537 end_that_request_last(rq, 0);
1da177e4
LT
538 } else {
539 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
540 ret);
541 break;
542 }
543 }
544
545 return rq;
546}
547
8922e16c 548void elv_dequeue_request(request_queue_t *q, struct request *rq)
1da177e4 549{
8922e16c
TH
550 BUG_ON(list_empty(&rq->queuelist));
551
552 list_del_init(&rq->queuelist);
1da177e4
LT
553
554 /*
555 * the time frame between a request being removed from the lists
556 * and to it is freed is accounted as io that is in progress at
8922e16c 557 * the driver side.
1da177e4
LT
558 */
559 if (blk_account_rq(rq))
560 q->in_flight++;
1da177e4
LT
561}
562
563int elv_queue_empty(request_queue_t *q)
564{
565 elevator_t *e = q->elevator;
566
8922e16c
TH
567 if (!list_empty(&q->queue_head))
568 return 0;
569
1da177e4
LT
570 if (e->ops->elevator_queue_empty_fn)
571 return e->ops->elevator_queue_empty_fn(q);
572
8922e16c 573 return 1;
1da177e4
LT
574}
575
576struct request *elv_latter_request(request_queue_t *q, struct request *rq)
577{
1da177e4
LT
578 elevator_t *e = q->elevator;
579
580 if (e->ops->elevator_latter_req_fn)
581 return e->ops->elevator_latter_req_fn(q, rq);
1da177e4
LT
582 return NULL;
583}
584
585struct request *elv_former_request(request_queue_t *q, struct request *rq)
586{
1da177e4
LT
587 elevator_t *e = q->elevator;
588
589 if (e->ops->elevator_former_req_fn)
590 return e->ops->elevator_former_req_fn(q, rq);
1da177e4
LT
591 return NULL;
592}
593
22e2c507 594int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
8267e268 595 gfp_t gfp_mask)
1da177e4
LT
596{
597 elevator_t *e = q->elevator;
598
599 if (e->ops->elevator_set_req_fn)
22e2c507 600 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
1da177e4
LT
601
602 rq->elevator_private = NULL;
603 return 0;
604}
605
606void elv_put_request(request_queue_t *q, struct request *rq)
607{
608 elevator_t *e = q->elevator;
609
610 if (e->ops->elevator_put_req_fn)
611 e->ops->elevator_put_req_fn(q, rq);
612}
613
22e2c507 614int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
1da177e4
LT
615{
616 elevator_t *e = q->elevator;
617
618 if (e->ops->elevator_may_queue_fn)
22e2c507 619 return e->ops->elevator_may_queue_fn(q, rw, bio);
1da177e4
LT
620
621 return ELV_MQUEUE_MAY;
622}
623
624void elv_completed_request(request_queue_t *q, struct request *rq)
625{
626 elevator_t *e = q->elevator;
627
628 /*
629 * request is released from the driver, io must be done
630 */
8922e16c 631 if (blk_account_rq(rq)) {
1da177e4 632 q->in_flight--;
1bc691d3
TH
633 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
634 e->ops->elevator_completed_req_fn(q, rq);
635 }
797e7dbb 636
1bc691d3
TH
637 /*
638 * Check if the queue is waiting for fs requests to be
639 * drained for flush sequence.
640 */
641 if (unlikely(q->ordseq)) {
642 struct request *first_rq = list_entry_rq(q->queue_head.next);
643 if (q->in_flight == 0 &&
797e7dbb
TH
644 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
645 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
646 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
647 q->request_fn(q);
648 }
8922e16c 649 }
1da177e4
LT
650}
651
3d1ab40f
AV
652#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
653
654static ssize_t
655elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1da177e4 656{
3d1ab40f
AV
657 elevator_t *e = container_of(kobj, elevator_t, kobj);
658 struct elv_fs_entry *entry = to_elv(attr);
659 ssize_t error;
660
661 if (!entry->show)
662 return -EIO;
663
664 mutex_lock(&e->sysfs_lock);
665 error = e->ops ? entry->show(e, page) : -ENOENT;
666 mutex_unlock(&e->sysfs_lock);
667 return error;
668}
1da177e4 669
3d1ab40f
AV
670static ssize_t
671elv_attr_store(struct kobject *kobj, struct attribute *attr,
672 const char *page, size_t length)
673{
674 elevator_t *e = container_of(kobj, elevator_t, kobj);
675 struct elv_fs_entry *entry = to_elv(attr);
676 ssize_t error;
1da177e4 677
3d1ab40f
AV
678 if (!entry->store)
679 return -EIO;
1da177e4 680
3d1ab40f
AV
681 mutex_lock(&e->sysfs_lock);
682 error = e->ops ? entry->store(e, page, length) : -ENOENT;
683 mutex_unlock(&e->sysfs_lock);
684 return error;
685}
686
687static struct sysfs_ops elv_sysfs_ops = {
688 .show = elv_attr_show,
689 .store = elv_attr_store,
690};
691
692static struct kobj_type elv_ktype = {
693 .sysfs_ops = &elv_sysfs_ops,
694 .release = elevator_release,
695};
696
697int elv_register_queue(struct request_queue *q)
698{
699 elevator_t *e = q->elevator;
700 int error;
701
702 e->kobj.parent = &q->kobj;
703
704 error = kobject_add(&e->kobj);
705 if (!error) {
e572ec7e 706 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
3d1ab40f 707 if (attr) {
e572ec7e
AV
708 while (attr->attr.name) {
709 if (sysfs_create_file(&e->kobj, &attr->attr))
3d1ab40f 710 break;
e572ec7e 711 attr++;
3d1ab40f
AV
712 }
713 }
714 kobject_uevent(&e->kobj, KOBJ_ADD);
715 }
716 return error;
1da177e4
LT
717}
718
719void elv_unregister_queue(struct request_queue *q)
720{
721 if (q) {
722 elevator_t *e = q->elevator;
3d1ab40f
AV
723 kobject_uevent(&e->kobj, KOBJ_REMOVE);
724 kobject_del(&e->kobj);
1da177e4
LT
725 }
726}
727
728int elv_register(struct elevator_type *e)
729{
2824bc93 730 spin_lock_irq(&elv_list_lock);
ce524497 731 BUG_ON(elevator_find(e->elevator_name));
1da177e4
LT
732 list_add_tail(&e->list, &elv_list);
733 spin_unlock_irq(&elv_list_lock);
734
735 printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
5f003976
ND
736 if (!strcmp(e->elevator_name, chosen_elevator) ||
737 (!*chosen_elevator &&
738 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
739 printk(" (default)");
1da177e4
LT
740 printk("\n");
741 return 0;
742}
743EXPORT_SYMBOL_GPL(elv_register);
744
745void elv_unregister(struct elevator_type *e)
746{
83521d3e
CH
747 struct task_struct *g, *p;
748
749 /*
750 * Iterate every thread in the process to remove the io contexts.
751 */
e17a9489
AV
752 if (e->ops.trim) {
753 read_lock(&tasklist_lock);
754 do_each_thread(g, p) {
755 task_lock(p);
756 e->ops.trim(p->io_context);
757 task_unlock(p);
758 } while_each_thread(g, p);
759 read_unlock(&tasklist_lock);
760 }
83521d3e 761
1da177e4
LT
762 spin_lock_irq(&elv_list_lock);
763 list_del_init(&e->list);
764 spin_unlock_irq(&elv_list_lock);
765}
766EXPORT_SYMBOL_GPL(elv_unregister);
767
768/*
769 * switch to new_e io scheduler. be careful not to introduce deadlocks -
770 * we don't free the old io scheduler, before we have allocated what we
771 * need for the new one. this way we have a chance of going back to the old
cb98fc8b 772 * one, if the new one fails init for some reason.
1da177e4 773 */
3d1ab40f 774static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
1da177e4 775{
cb98fc8b 776 elevator_t *old_elevator, *e;
1da177e4 777
cb98fc8b
TH
778 /*
779 * Allocate new elevator
780 */
3d1ab40f 781 e = elevator_alloc(new_e);
1da177e4 782 if (!e)
3d1ab40f 783 return 0;
1da177e4
LT
784
785 /*
cb98fc8b 786 * Turn on BYPASS and drain all requests w/ elevator private data
1da177e4 787 */
cb98fc8b
TH
788 spin_lock_irq(q->queue_lock);
789
64521d1a 790 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
cb98fc8b 791
15853af9 792 elv_drain_elevator(q);
cb98fc8b
TH
793
794 while (q->rq.elvpriv) {
407df2aa
TH
795 blk_remove_plug(q);
796 q->request_fn(q);
cb98fc8b 797 spin_unlock_irq(q->queue_lock);
64521d1a 798 msleep(10);
cb98fc8b 799 spin_lock_irq(q->queue_lock);
15853af9 800 elv_drain_elevator(q);
cb98fc8b
TH
801 }
802
803 spin_unlock_irq(q->queue_lock);
1da177e4
LT
804
805 /*
806 * unregister old elevator data
807 */
808 elv_unregister_queue(q);
809 old_elevator = q->elevator;
810
1da177e4
LT
811 /*
812 * attach and start new elevator
813 */
3d1ab40f 814 if (elevator_attach(q, e))
1da177e4
LT
815 goto fail;
816
817 if (elv_register_queue(q))
818 goto fail_register;
819
820 /*
cb98fc8b 821 * finally exit old elevator and turn off BYPASS.
1da177e4
LT
822 */
823 elevator_exit(old_elevator);
64521d1a 824 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
3d1ab40f 825 return 1;
1da177e4
LT
826
827fail_register:
828 /*
829 * switch failed, exit the new io scheduler and reattach the old
830 * one again (along with re-adding the sysfs dir)
831 */
832 elevator_exit(e);
cb98fc8b 833 e = NULL;
1da177e4
LT
834fail:
835 q->elevator = old_elevator;
836 elv_register_queue(q);
64521d1a 837 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
3d1ab40f
AV
838 if (e)
839 kobject_put(&e->kobj);
840 return 0;
1da177e4
LT
841}
842
843ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
844{
845 char elevator_name[ELV_NAME_MAX];
be561235 846 size_t len;
1da177e4
LT
847 struct elevator_type *e;
848
be561235
TH
849 elevator_name[sizeof(elevator_name) - 1] = '\0';
850 strncpy(elevator_name, name, sizeof(elevator_name) - 1);
851 len = strlen(elevator_name);
1da177e4 852
be561235
TH
853 if (len && elevator_name[len - 1] == '\n')
854 elevator_name[len - 1] = '\0';
1da177e4
LT
855
856 e = elevator_get(elevator_name);
857 if (!e) {
858 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
859 return -EINVAL;
860 }
861
2ca7d93b
ND
862 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
863 elevator_put(e);
1da177e4 864 return count;
2ca7d93b 865 }
1da177e4 866
3d1ab40f
AV
867 if (!elevator_switch(q, e))
868 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
1da177e4
LT
869 return count;
870}
871
872ssize_t elv_iosched_show(request_queue_t *q, char *name)
873{
874 elevator_t *e = q->elevator;
875 struct elevator_type *elv = e->elevator_type;
876 struct list_head *entry;
877 int len = 0;
878
879 spin_lock_irq(q->queue_lock);
880 list_for_each(entry, &elv_list) {
881 struct elevator_type *__e;
882
883 __e = list_entry(entry, struct elevator_type, list);
884 if (!strcmp(elv->elevator_name, __e->elevator_name))
885 len += sprintf(name+len, "[%s] ", elv->elevator_name);
886 else
887 len += sprintf(name+len, "%s ", __e->elevator_name);
888 }
889 spin_unlock_irq(q->queue_lock);
890
891 len += sprintf(len+name, "\n");
892 return len;
893}
894
1b47f531 895EXPORT_SYMBOL(elv_dispatch_sort);
1da177e4
LT
896EXPORT_SYMBOL(elv_add_request);
897EXPORT_SYMBOL(__elv_add_request);
898EXPORT_SYMBOL(elv_requeue_request);
899EXPORT_SYMBOL(elv_next_request);
8922e16c 900EXPORT_SYMBOL(elv_dequeue_request);
1da177e4
LT
901EXPORT_SYMBOL(elv_queue_empty);
902EXPORT_SYMBOL(elv_completed_request);
903EXPORT_SYMBOL(elevator_exit);
904EXPORT_SYMBOL(elevator_init);