]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/elevator.c
[PATCH] introduce setup_timer() helper
[mirror_ubuntu-bionic-kernel.git] / drivers / block / elevator.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/block/elevator.c
3 *
4 * Block device elevator/IO-scheduler.
5 *
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 *
8 * 30042000 Jens Axboe <axboe@suse.de> :
9 *
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
14 * an existing request
15 * - elevator_dequeue_fn, called when a request is taken off the active list
16 *
17 * 20082000 Dave Jones <davej@suse.de> :
18 * Removed tests for max-bomb-segments, which was breaking elvtune
19 * when run without -bN
20 *
21 * Jens:
22 * - Rework again to work with bio instead of buffer_heads
23 * - loose bi_dev comparisons, partition handling is right now
24 * - completely modularize elevator setup and teardown
25 *
26 */
27#include <linux/kernel.h>
28#include <linux/fs.h>
29#include <linux/blkdev.h>
30#include <linux/elevator.h>
31#include <linux/bio.h>
32#include <linux/config.h>
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/init.h>
36#include <linux/compiler.h>
cb98fc8b 37#include <linux/delay.h>
1da177e4
LT
38
39#include <asm/uaccess.h>
40
41static DEFINE_SPINLOCK(elv_list_lock);
42static LIST_HEAD(elv_list);
43
44/*
45 * can we safely merge with this request?
46 */
47inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
48{
49 if (!rq_mergeable(rq))
50 return 0;
51
52 /*
53 * different data direction or already started, don't merge
54 */
55 if (bio_data_dir(bio) != rq_data_dir(rq))
56 return 0;
57
58 /*
59 * same device and no special stuff set, merge is ok
60 */
61 if (rq->rq_disk == bio->bi_bdev->bd_disk &&
62 !rq->waiting && !rq->special)
63 return 1;
64
65 return 0;
66}
67EXPORT_SYMBOL(elv_rq_merge_ok);
68
69inline int elv_try_merge(struct request *__rq, struct bio *bio)
70{
71 int ret = ELEVATOR_NO_MERGE;
72
73 /*
74 * we can merge and sequence is ok, check if it's possible
75 */
76 if (elv_rq_merge_ok(__rq, bio)) {
77 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
78 ret = ELEVATOR_BACK_MERGE;
79 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
80 ret = ELEVATOR_FRONT_MERGE;
81 }
82
83 return ret;
84}
85EXPORT_SYMBOL(elv_try_merge);
86
1da177e4
LT
87static struct elevator_type *elevator_find(const char *name)
88{
89 struct elevator_type *e = NULL;
90 struct list_head *entry;
91
1da177e4
LT
92 list_for_each(entry, &elv_list) {
93 struct elevator_type *__e;
94
95 __e = list_entry(entry, struct elevator_type, list);
96
97 if (!strcmp(__e->elevator_name, name)) {
98 e = __e;
99 break;
100 }
101 }
1da177e4
LT
102
103 return e;
104}
105
106static void elevator_put(struct elevator_type *e)
107{
108 module_put(e->elevator_owner);
109}
110
111static struct elevator_type *elevator_get(const char *name)
112{
2824bc93 113 struct elevator_type *e;
1da177e4 114
2824bc93
TH
115 spin_lock_irq(&elv_list_lock);
116
117 e = elevator_find(name);
118 if (e && !try_module_get(e->elevator_owner))
119 e = NULL;
120
121 spin_unlock_irq(&elv_list_lock);
1da177e4
LT
122
123 return e;
124}
125
126static int elevator_attach(request_queue_t *q, struct elevator_type *e,
127 struct elevator_queue *eq)
128{
129 int ret = 0;
130
131 memset(eq, 0, sizeof(*eq));
132 eq->ops = &e->ops;
133 eq->elevator_type = e;
134
1da177e4
LT
135 q->elevator = eq;
136
137 if (eq->ops->elevator_init_fn)
138 ret = eq->ops->elevator_init_fn(q, eq);
139
140 return ret;
141}
142
143static char chosen_elevator[16];
144
145static void elevator_setup_default(void)
146{
2824bc93
TH
147 struct elevator_type *e;
148
1da177e4
LT
149 /*
150 * check if default is set and exists
151 */
2824bc93
TH
152 if (chosen_elevator[0] && (e = elevator_get(chosen_elevator))) {
153 elevator_put(e);
1da177e4 154 return;
2824bc93 155 }
1da177e4
LT
156
157#if defined(CONFIG_IOSCHED_AS)
158 strcpy(chosen_elevator, "anticipatory");
159#elif defined(CONFIG_IOSCHED_DEADLINE)
160 strcpy(chosen_elevator, "deadline");
161#elif defined(CONFIG_IOSCHED_CFQ)
162 strcpy(chosen_elevator, "cfq");
163#elif defined(CONFIG_IOSCHED_NOOP)
164 strcpy(chosen_elevator, "noop");
165#else
166#error "You must build at least 1 IO scheduler into the kernel"
167#endif
168}
169
170static int __init elevator_setup(char *str)
171{
172 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
173 return 0;
174}
175
176__setup("elevator=", elevator_setup);
177
178int elevator_init(request_queue_t *q, char *name)
179{
180 struct elevator_type *e = NULL;
181 struct elevator_queue *eq;
182 int ret = 0;
183
cb98fc8b
TH
184 INIT_LIST_HEAD(&q->queue_head);
185 q->last_merge = NULL;
186 q->end_sector = 0;
187 q->boundary_rq = NULL;
cb98fc8b 188
1da177e4
LT
189 elevator_setup_default();
190
191 if (!name)
192 name = chosen_elevator;
193
194 e = elevator_get(name);
195 if (!e)
196 return -EINVAL;
197
198 eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
199 if (!eq) {
200 elevator_put(e->elevator_type);
201 return -ENOMEM;
202 }
203
204 ret = elevator_attach(q, e, eq);
205 if (ret) {
206 kfree(eq);
207 elevator_put(e->elevator_type);
208 }
209
210 return ret;
211}
212
213void elevator_exit(elevator_t *e)
214{
215 if (e->ops->elevator_exit_fn)
216 e->ops->elevator_exit_fn(e);
217
218 elevator_put(e->elevator_type);
219 e->elevator_type = NULL;
220 kfree(e);
221}
222
8922e16c
TH
223/*
224 * Insert rq into dispatch queue of q. Queue lock must be held on
225 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
226 * appended to the dispatch queue. To be used by specific elevators.
227 */
1b47f531 228void elv_dispatch_sort(request_queue_t *q, struct request *rq)
8922e16c
TH
229{
230 sector_t boundary;
8922e16c
TH
231 struct list_head *entry;
232
06b86245
TH
233 if (q->last_merge == rq)
234 q->last_merge = NULL;
235
1b47f531 236 boundary = q->end_sector;
cb19833d 237
8922e16c
TH
238 list_for_each_prev(entry, &q->queue_head) {
239 struct request *pos = list_entry_rq(entry);
240
241 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
242 break;
243 if (rq->sector >= boundary) {
244 if (pos->sector < boundary)
245 continue;
246 } else {
247 if (pos->sector >= boundary)
248 break;
249 }
250 if (rq->sector >= pos->sector)
251 break;
252 }
253
254 list_add(&rq->queuelist, entry);
255}
256
1da177e4
LT
257int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
258{
259 elevator_t *e = q->elevator;
06b86245
TH
260 int ret;
261
262 if (q->last_merge) {
263 ret = elv_try_merge(q->last_merge, bio);
264 if (ret != ELEVATOR_NO_MERGE) {
265 *req = q->last_merge;
266 return ret;
267 }
268 }
1da177e4
LT
269
270 if (e->ops->elevator_merge_fn)
271 return e->ops->elevator_merge_fn(q, req, bio);
272
273 return ELEVATOR_NO_MERGE;
274}
275
276void elv_merged_request(request_queue_t *q, struct request *rq)
277{
278 elevator_t *e = q->elevator;
279
280 if (e->ops->elevator_merged_fn)
281 e->ops->elevator_merged_fn(q, rq);
06b86245
TH
282
283 q->last_merge = rq;
1da177e4
LT
284}
285
286void elv_merge_requests(request_queue_t *q, struct request *rq,
287 struct request *next)
288{
289 elevator_t *e = q->elevator;
290
1da177e4
LT
291 if (e->ops->elevator_merge_req_fn)
292 e->ops->elevator_merge_req_fn(q, rq, next);
06b86245
TH
293
294 q->last_merge = rq;
1da177e4
LT
295}
296
8922e16c 297void elv_requeue_request(request_queue_t *q, struct request *rq)
1da177e4
LT
298{
299 elevator_t *e = q->elevator;
300
301 /*
302 * it already went through dequeue, we need to decrement the
303 * in_flight count again
304 */
8922e16c 305 if (blk_account_rq(rq)) {
1da177e4 306 q->in_flight--;
8922e16c
TH
307 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
308 e->ops->elevator_deactivate_req_fn(q, rq);
309 }
1da177e4
LT
310
311 rq->flags &= ~REQ_STARTED;
312
1da177e4
LT
313 /*
314 * if this is the flush, requeue the original instead and drop the flush
315 */
316 if (rq->flags & REQ_BAR_FLUSH) {
317 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
318 rq = rq->end_io_data;
319 }
320
8922e16c 321 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
1da177e4
LT
322}
323
324void __elv_add_request(request_queue_t *q, struct request *rq, int where,
325 int plug)
326{
8922e16c
TH
327 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
328 /*
329 * barriers implicitly indicate back insertion
330 */
331 if (where == ELEVATOR_INSERT_SORT)
332 where = ELEVATOR_INSERT_BACK;
333
334 /*
1b47f531 335 * this request is scheduling boundary, update end_sector
8922e16c
TH
336 */
337 if (blk_fs_request(rq)) {
1b47f531 338 q->end_sector = rq_end_sector(rq);
8922e16c
TH
339 q->boundary_rq = rq;
340 }
cb98fc8b
TH
341 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
342 where = ELEVATOR_INSERT_BACK;
1da177e4
LT
343
344 if (plug)
345 blk_plug_device(q);
346
347 rq->q = q;
348
8922e16c
TH
349 switch (where) {
350 case ELEVATOR_INSERT_FRONT:
351 rq->flags |= REQ_SOFTBARRIER;
352
353 list_add(&rq->queuelist, &q->queue_head);
354 break;
355
356 case ELEVATOR_INSERT_BACK:
357 rq->flags |= REQ_SOFTBARRIER;
358
359 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
360 ;
361 list_add_tail(&rq->queuelist, &q->queue_head);
362 /*
363 * We kick the queue here for the following reasons.
364 * - The elevator might have returned NULL previously
365 * to delay requests and returned them now. As the
366 * queue wasn't empty before this request, ll_rw_blk
367 * won't run the queue on return, resulting in hang.
368 * - Usually, back inserted requests won't be merged
369 * with anything. There's no point in delaying queue
370 * processing.
371 */
372 blk_remove_plug(q);
373 q->request_fn(q);
374 break;
375
376 case ELEVATOR_INSERT_SORT:
377 BUG_ON(!blk_fs_request(rq));
378 rq->flags |= REQ_SORTED;
379 q->elevator->ops->elevator_add_req_fn(q, rq);
06b86245
TH
380 if (q->last_merge == NULL && rq_mergeable(rq))
381 q->last_merge = rq;
8922e16c
TH
382 break;
383
384 default:
385 printk(KERN_ERR "%s: bad insertion point %d\n",
386 __FUNCTION__, where);
387 BUG();
388 }
389
390 if (blk_queue_plugged(q)) {
391 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
392 - q->in_flight;
393
394 if (nrq >= q->unplug_thresh)
395 __generic_unplug_device(q);
396 }
1da177e4
LT
397}
398
399void elv_add_request(request_queue_t *q, struct request *rq, int where,
400 int plug)
401{
402 unsigned long flags;
403
404 spin_lock_irqsave(q->queue_lock, flags);
405 __elv_add_request(q, rq, where, plug);
406 spin_unlock_irqrestore(q->queue_lock, flags);
407}
408
409static inline struct request *__elv_next_request(request_queue_t *q)
410{
8922e16c
TH
411 struct request *rq;
412
413 if (unlikely(list_empty(&q->queue_head) &&
414 !q->elevator->ops->elevator_dispatch_fn(q, 0)))
415 return NULL;
416
417 rq = list_entry_rq(q->queue_head.next);
1da177e4
LT
418
419 /*
420 * if this is a barrier write and the device has to issue a
421 * flush sequence to support it, check how far we are
422 */
8922e16c 423 if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
1da177e4
LT
424 BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
425
426 if (q->ordered == QUEUE_ORDERED_FLUSH &&
427 !blk_barrier_preflush(rq))
428 rq = blk_start_pre_flush(q, rq);
429 }
430
431 return rq;
432}
433
434struct request *elv_next_request(request_queue_t *q)
435{
436 struct request *rq;
437 int ret;
438
439 while ((rq = __elv_next_request(q)) != NULL) {
8922e16c
TH
440 if (!(rq->flags & REQ_STARTED)) {
441 elevator_t *e = q->elevator;
442
443 /*
444 * This is the first time the device driver
445 * sees this request (possibly after
446 * requeueing). Notify IO scheduler.
447 */
448 if (blk_sorted_rq(rq) &&
449 e->ops->elevator_activate_req_fn)
450 e->ops->elevator_activate_req_fn(q, rq);
1da177e4 451
8922e16c
TH
452 /*
453 * just mark as started even if we don't start
454 * it, a request that has been delayed should
455 * not be passed by new incoming requests
456 */
457 rq->flags |= REQ_STARTED;
458 }
1da177e4 459
8922e16c 460 if (!q->boundary_rq || q->boundary_rq == rq) {
1b47f531 461 q->end_sector = rq_end_sector(rq);
8922e16c
TH
462 q->boundary_rq = NULL;
463 }
1da177e4
LT
464
465 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
466 break;
467
468 ret = q->prep_rq_fn(q, rq);
469 if (ret == BLKPREP_OK) {
470 break;
471 } else if (ret == BLKPREP_DEFER) {
2e759cd4
TH
472 /*
473 * the request may have been (partially) prepped.
474 * we need to keep this request in the front to
8922e16c
TH
475 * avoid resource deadlock. REQ_STARTED will
476 * prevent other fs requests from passing this one.
2e759cd4 477 */
1da177e4
LT
478 rq = NULL;
479 break;
480 } else if (ret == BLKPREP_KILL) {
481 int nr_bytes = rq->hard_nr_sectors << 9;
482
483 if (!nr_bytes)
484 nr_bytes = rq->data_len;
485
486 blkdev_dequeue_request(rq);
487 rq->flags |= REQ_QUIET;
488 end_that_request_chunk(rq, 0, nr_bytes);
489 end_that_request_last(rq);
490 } else {
491 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
492 ret);
493 break;
494 }
495 }
496
497 return rq;
498}
499
8922e16c 500void elv_dequeue_request(request_queue_t *q, struct request *rq)
1da177e4 501{
8922e16c
TH
502 BUG_ON(list_empty(&rq->queuelist));
503
504 list_del_init(&rq->queuelist);
1da177e4
LT
505
506 /*
507 * the time frame between a request being removed from the lists
508 * and to it is freed is accounted as io that is in progress at
8922e16c 509 * the driver side.
1da177e4
LT
510 */
511 if (blk_account_rq(rq))
512 q->in_flight++;
1da177e4
LT
513}
514
515int elv_queue_empty(request_queue_t *q)
516{
517 elevator_t *e = q->elevator;
518
8922e16c
TH
519 if (!list_empty(&q->queue_head))
520 return 0;
521
1da177e4
LT
522 if (e->ops->elevator_queue_empty_fn)
523 return e->ops->elevator_queue_empty_fn(q);
524
8922e16c 525 return 1;
1da177e4
LT
526}
527
528struct request *elv_latter_request(request_queue_t *q, struct request *rq)
529{
530 struct list_head *next;
531
532 elevator_t *e = q->elevator;
533
534 if (e->ops->elevator_latter_req_fn)
535 return e->ops->elevator_latter_req_fn(q, rq);
536
537 next = rq->queuelist.next;
538 if (next != &q->queue_head && next != &rq->queuelist)
539 return list_entry_rq(next);
540
541 return NULL;
542}
543
544struct request *elv_former_request(request_queue_t *q, struct request *rq)
545{
546 struct list_head *prev;
547
548 elevator_t *e = q->elevator;
549
550 if (e->ops->elevator_former_req_fn)
551 return e->ops->elevator_former_req_fn(q, rq);
552
553 prev = rq->queuelist.prev;
554 if (prev != &q->queue_head && prev != &rq->queuelist)
555 return list_entry_rq(prev);
556
557 return NULL;
558}
559
22e2c507 560int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
8267e268 561 gfp_t gfp_mask)
1da177e4
LT
562{
563 elevator_t *e = q->elevator;
564
565 if (e->ops->elevator_set_req_fn)
22e2c507 566 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
1da177e4
LT
567
568 rq->elevator_private = NULL;
569 return 0;
570}
571
572void elv_put_request(request_queue_t *q, struct request *rq)
573{
574 elevator_t *e = q->elevator;
575
576 if (e->ops->elevator_put_req_fn)
577 e->ops->elevator_put_req_fn(q, rq);
578}
579
22e2c507 580int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
1da177e4
LT
581{
582 elevator_t *e = q->elevator;
583
584 if (e->ops->elevator_may_queue_fn)
22e2c507 585 return e->ops->elevator_may_queue_fn(q, rw, bio);
1da177e4
LT
586
587 return ELV_MQUEUE_MAY;
588}
589
590void elv_completed_request(request_queue_t *q, struct request *rq)
591{
592 elevator_t *e = q->elevator;
593
594 /*
595 * request is released from the driver, io must be done
596 */
8922e16c 597 if (blk_account_rq(rq)) {
1da177e4 598 q->in_flight--;
8922e16c
TH
599 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
600 e->ops->elevator_completed_req_fn(q, rq);
601 }
1da177e4
LT
602}
603
604int elv_register_queue(struct request_queue *q)
605{
606 elevator_t *e = q->elevator;
607
608 e->kobj.parent = kobject_get(&q->kobj);
609 if (!e->kobj.parent)
610 return -EBUSY;
611
612 snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
613 e->kobj.ktype = e->elevator_type->elevator_ktype;
614
615 return kobject_register(&e->kobj);
616}
617
618void elv_unregister_queue(struct request_queue *q)
619{
620 if (q) {
621 elevator_t *e = q->elevator;
622 kobject_unregister(&e->kobj);
623 kobject_put(&q->kobj);
624 }
625}
626
627int elv_register(struct elevator_type *e)
628{
2824bc93 629 spin_lock_irq(&elv_list_lock);
1da177e4
LT
630 if (elevator_find(e->elevator_name))
631 BUG();
1da177e4
LT
632 list_add_tail(&e->list, &elv_list);
633 spin_unlock_irq(&elv_list_lock);
634
635 printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
636 if (!strcmp(e->elevator_name, chosen_elevator))
637 printk(" (default)");
638 printk("\n");
639 return 0;
640}
641EXPORT_SYMBOL_GPL(elv_register);
642
643void elv_unregister(struct elevator_type *e)
644{
645 spin_lock_irq(&elv_list_lock);
646 list_del_init(&e->list);
647 spin_unlock_irq(&elv_list_lock);
648}
649EXPORT_SYMBOL_GPL(elv_unregister);
650
651/*
652 * switch to new_e io scheduler. be careful not to introduce deadlocks -
653 * we don't free the old io scheduler, before we have allocated what we
654 * need for the new one. this way we have a chance of going back to the old
cb98fc8b 655 * one, if the new one fails init for some reason.
1da177e4
LT
656 */
657static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
658{
cb98fc8b 659 elevator_t *old_elevator, *e;
1da177e4 660
cb98fc8b
TH
661 /*
662 * Allocate new elevator
663 */
664 e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
1da177e4
LT
665 if (!e)
666 goto error;
667
668 /*
cb98fc8b 669 * Turn on BYPASS and drain all requests w/ elevator private data
1da177e4 670 */
cb98fc8b
TH
671 spin_lock_irq(q->queue_lock);
672
64521d1a 673 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
cb98fc8b
TH
674
675 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
676 ;
677
678 while (q->rq.elvpriv) {
679 spin_unlock_irq(q->queue_lock);
64521d1a 680 msleep(10);
cb98fc8b
TH
681 spin_lock_irq(q->queue_lock);
682 }
683
684 spin_unlock_irq(q->queue_lock);
1da177e4
LT
685
686 /*
687 * unregister old elevator data
688 */
689 elv_unregister_queue(q);
690 old_elevator = q->elevator;
691
1da177e4
LT
692 /*
693 * attach and start new elevator
694 */
695 if (elevator_attach(q, new_e, e))
696 goto fail;
697
698 if (elv_register_queue(q))
699 goto fail_register;
700
701 /*
cb98fc8b 702 * finally exit old elevator and turn off BYPASS.
1da177e4
LT
703 */
704 elevator_exit(old_elevator);
64521d1a 705 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1da177e4
LT
706 return;
707
708fail_register:
709 /*
710 * switch failed, exit the new io scheduler and reattach the old
711 * one again (along with re-adding the sysfs dir)
712 */
713 elevator_exit(e);
cb98fc8b 714 e = NULL;
1da177e4
LT
715fail:
716 q->elevator = old_elevator;
717 elv_register_queue(q);
64521d1a 718 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
cb98fc8b 719 kfree(e);
1da177e4 720error:
1da177e4
LT
721 elevator_put(new_e);
722 printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
723}
724
725ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
726{
727 char elevator_name[ELV_NAME_MAX];
728 struct elevator_type *e;
729
730 memset(elevator_name, 0, sizeof(elevator_name));
731 strncpy(elevator_name, name, sizeof(elevator_name));
732
733 if (elevator_name[strlen(elevator_name) - 1] == '\n')
734 elevator_name[strlen(elevator_name) - 1] = '\0';
735
736 e = elevator_get(elevator_name);
737 if (!e) {
738 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
739 return -EINVAL;
740 }
741
742 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name))
743 return count;
744
745 elevator_switch(q, e);
746 return count;
747}
748
749ssize_t elv_iosched_show(request_queue_t *q, char *name)
750{
751 elevator_t *e = q->elevator;
752 struct elevator_type *elv = e->elevator_type;
753 struct list_head *entry;
754 int len = 0;
755
756 spin_lock_irq(q->queue_lock);
757 list_for_each(entry, &elv_list) {
758 struct elevator_type *__e;
759
760 __e = list_entry(entry, struct elevator_type, list);
761 if (!strcmp(elv->elevator_name, __e->elevator_name))
762 len += sprintf(name+len, "[%s] ", elv->elevator_name);
763 else
764 len += sprintf(name+len, "%s ", __e->elevator_name);
765 }
766 spin_unlock_irq(q->queue_lock);
767
768 len += sprintf(len+name, "\n");
769 return len;
770}
771
1b47f531 772EXPORT_SYMBOL(elv_dispatch_sort);
1da177e4
LT
773EXPORT_SYMBOL(elv_add_request);
774EXPORT_SYMBOL(__elv_add_request);
775EXPORT_SYMBOL(elv_requeue_request);
776EXPORT_SYMBOL(elv_next_request);
8922e16c 777EXPORT_SYMBOL(elv_dequeue_request);
1da177e4
LT
778EXPORT_SYMBOL(elv_queue_empty);
779EXPORT_SYMBOL(elv_completed_request);
780EXPORT_SYMBOL(elevator_exit);
781EXPORT_SYMBOL(elevator_init);