]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - block/elevator.c
[NETFILTER]: Rename init functions.
[mirror_ubuntu-zesty-kernel.git] / block / elevator.c
1 /*
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@suse.de> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/compiler.h>
35 #include <linux/delay.h>
36 #include <linux/blktrace_api.h>
37
38 #include <asm/uaccess.h>
39
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42
43 /*
44 * can we safely merge with this request?
45 */
46 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
47 {
48 if (!rq_mergeable(rq))
49 return 0;
50
51 /*
52 * different data direction or already started, don't merge
53 */
54 if (bio_data_dir(bio) != rq_data_dir(rq))
55 return 0;
56
57 /*
58 * same device and no special stuff set, merge is ok
59 */
60 if (rq->rq_disk == bio->bi_bdev->bd_disk &&
61 !rq->waiting && !rq->special)
62 return 1;
63
64 return 0;
65 }
66 EXPORT_SYMBOL(elv_rq_merge_ok);
67
68 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
69 {
70 int ret = ELEVATOR_NO_MERGE;
71
72 /*
73 * we can merge and sequence is ok, check if it's possible
74 */
75 if (elv_rq_merge_ok(__rq, bio)) {
76 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
77 ret = ELEVATOR_BACK_MERGE;
78 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
79 ret = ELEVATOR_FRONT_MERGE;
80 }
81
82 return ret;
83 }
84
85 static struct elevator_type *elevator_find(const char *name)
86 {
87 struct elevator_type *e = NULL;
88 struct list_head *entry;
89
90 list_for_each(entry, &elv_list) {
91 struct elevator_type *__e;
92
93 __e = list_entry(entry, struct elevator_type, list);
94
95 if (!strcmp(__e->elevator_name, name)) {
96 e = __e;
97 break;
98 }
99 }
100
101 return e;
102 }
103
104 static void elevator_put(struct elevator_type *e)
105 {
106 module_put(e->elevator_owner);
107 }
108
109 static struct elevator_type *elevator_get(const char *name)
110 {
111 struct elevator_type *e;
112
113 spin_lock_irq(&elv_list_lock);
114
115 e = elevator_find(name);
116 if (e && !try_module_get(e->elevator_owner))
117 e = NULL;
118
119 spin_unlock_irq(&elv_list_lock);
120
121 return e;
122 }
123
124 static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
125 {
126 int ret = 0;
127
128 q->elevator = eq;
129
130 if (eq->ops->elevator_init_fn)
131 ret = eq->ops->elevator_init_fn(q, eq);
132
133 return ret;
134 }
135
136 static char chosen_elevator[16];
137
138 static int __init elevator_setup(char *str)
139 {
140 /*
141 * Be backwards-compatible with previous kernels, so users
142 * won't get the wrong elevator.
143 */
144 if (!strcmp(str, "as"))
145 strcpy(chosen_elevator, "anticipatory");
146 else
147 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
148 return 0;
149 }
150
151 __setup("elevator=", elevator_setup);
152
153 static struct kobj_type elv_ktype;
154
155 static elevator_t *elevator_alloc(struct elevator_type *e)
156 {
157 elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
158 if (eq) {
159 memset(eq, 0, sizeof(*eq));
160 eq->ops = &e->ops;
161 eq->elevator_type = e;
162 kobject_init(&eq->kobj);
163 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
164 eq->kobj.ktype = &elv_ktype;
165 mutex_init(&eq->sysfs_lock);
166 } else {
167 elevator_put(e);
168 }
169 return eq;
170 }
171
172 static void elevator_release(struct kobject *kobj)
173 {
174 elevator_t *e = container_of(kobj, elevator_t, kobj);
175 elevator_put(e->elevator_type);
176 kfree(e);
177 }
178
179 int elevator_init(request_queue_t *q, char *name)
180 {
181 struct elevator_type *e = NULL;
182 struct elevator_queue *eq;
183 int ret = 0;
184
185 INIT_LIST_HEAD(&q->queue_head);
186 q->last_merge = NULL;
187 q->end_sector = 0;
188 q->boundary_rq = NULL;
189
190 if (name && !(e = elevator_get(name)))
191 return -EINVAL;
192
193 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
194 printk("I/O scheduler %s not found\n", chosen_elevator);
195
196 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
197 printk("Default I/O scheduler not found, using no-op\n");
198 e = elevator_get("noop");
199 }
200
201 eq = elevator_alloc(e);
202 if (!eq)
203 return -ENOMEM;
204
205 ret = elevator_attach(q, eq);
206 if (ret)
207 kobject_put(&eq->kobj);
208
209 return ret;
210 }
211
212 void elevator_exit(elevator_t *e)
213 {
214 mutex_lock(&e->sysfs_lock);
215 if (e->ops->elevator_exit_fn)
216 e->ops->elevator_exit_fn(e);
217 e->ops = NULL;
218 mutex_unlock(&e->sysfs_lock);
219
220 kobject_put(&e->kobj);
221 }
222
223 /*
224 * Insert rq into dispatch queue of q. Queue lock must be held on
225 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
226 * appended to the dispatch queue. To be used by specific elevators.
227 */
228 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
229 {
230 sector_t boundary;
231 struct list_head *entry;
232
233 if (q->last_merge == rq)
234 q->last_merge = NULL;
235 q->nr_sorted--;
236
237 boundary = q->end_sector;
238
239 list_for_each_prev(entry, &q->queue_head) {
240 struct request *pos = list_entry_rq(entry);
241
242 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
243 break;
244 if (rq->sector >= boundary) {
245 if (pos->sector < boundary)
246 continue;
247 } else {
248 if (pos->sector >= boundary)
249 break;
250 }
251 if (rq->sector >= pos->sector)
252 break;
253 }
254
255 list_add(&rq->queuelist, entry);
256 }
257
258 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
259 {
260 elevator_t *e = q->elevator;
261 int ret;
262
263 if (q->last_merge) {
264 ret = elv_try_merge(q->last_merge, bio);
265 if (ret != ELEVATOR_NO_MERGE) {
266 *req = q->last_merge;
267 return ret;
268 }
269 }
270
271 if (e->ops->elevator_merge_fn)
272 return e->ops->elevator_merge_fn(q, req, bio);
273
274 return ELEVATOR_NO_MERGE;
275 }
276
277 void elv_merged_request(request_queue_t *q, struct request *rq)
278 {
279 elevator_t *e = q->elevator;
280
281 if (e->ops->elevator_merged_fn)
282 e->ops->elevator_merged_fn(q, rq);
283
284 q->last_merge = rq;
285 }
286
287 void elv_merge_requests(request_queue_t *q, struct request *rq,
288 struct request *next)
289 {
290 elevator_t *e = q->elevator;
291
292 if (e->ops->elevator_merge_req_fn)
293 e->ops->elevator_merge_req_fn(q, rq, next);
294 q->nr_sorted--;
295
296 q->last_merge = rq;
297 }
298
299 void elv_requeue_request(request_queue_t *q, struct request *rq)
300 {
301 elevator_t *e = q->elevator;
302
303 /*
304 * it already went through dequeue, we need to decrement the
305 * in_flight count again
306 */
307 if (blk_account_rq(rq)) {
308 q->in_flight--;
309 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
310 e->ops->elevator_deactivate_req_fn(q, rq);
311 }
312
313 rq->flags &= ~REQ_STARTED;
314
315 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
316 }
317
318 static void elv_drain_elevator(request_queue_t *q)
319 {
320 static int printed;
321 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
322 ;
323 if (q->nr_sorted == 0)
324 return;
325 if (printed++ < 10) {
326 printk(KERN_ERR "%s: forced dispatching is broken "
327 "(nr_sorted=%u), please report this\n",
328 q->elevator->elevator_type->elevator_name, q->nr_sorted);
329 }
330 }
331
332 void elv_insert(request_queue_t *q, struct request *rq, int where)
333 {
334 struct list_head *pos;
335 unsigned ordseq;
336
337 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
338
339 rq->q = q;
340
341 switch (where) {
342 case ELEVATOR_INSERT_FRONT:
343 rq->flags |= REQ_SOFTBARRIER;
344
345 list_add(&rq->queuelist, &q->queue_head);
346 break;
347
348 case ELEVATOR_INSERT_BACK:
349 rq->flags |= REQ_SOFTBARRIER;
350 elv_drain_elevator(q);
351 list_add_tail(&rq->queuelist, &q->queue_head);
352 /*
353 * We kick the queue here for the following reasons.
354 * - The elevator might have returned NULL previously
355 * to delay requests and returned them now. As the
356 * queue wasn't empty before this request, ll_rw_blk
357 * won't run the queue on return, resulting in hang.
358 * - Usually, back inserted requests won't be merged
359 * with anything. There's no point in delaying queue
360 * processing.
361 */
362 blk_remove_plug(q);
363 q->request_fn(q);
364 break;
365
366 case ELEVATOR_INSERT_SORT:
367 BUG_ON(!blk_fs_request(rq));
368 rq->flags |= REQ_SORTED;
369 q->nr_sorted++;
370 if (q->last_merge == NULL && rq_mergeable(rq))
371 q->last_merge = rq;
372 /*
373 * Some ioscheds (cfq) run q->request_fn directly, so
374 * rq cannot be accessed after calling
375 * elevator_add_req_fn.
376 */
377 q->elevator->ops->elevator_add_req_fn(q, rq);
378 break;
379
380 case ELEVATOR_INSERT_REQUEUE:
381 /*
382 * If ordered flush isn't in progress, we do front
383 * insertion; otherwise, requests should be requeued
384 * in ordseq order.
385 */
386 rq->flags |= REQ_SOFTBARRIER;
387
388 if (q->ordseq == 0) {
389 list_add(&rq->queuelist, &q->queue_head);
390 break;
391 }
392
393 ordseq = blk_ordered_req_seq(rq);
394
395 list_for_each(pos, &q->queue_head) {
396 struct request *pos_rq = list_entry_rq(pos);
397 if (ordseq <= blk_ordered_req_seq(pos_rq))
398 break;
399 }
400
401 list_add_tail(&rq->queuelist, pos);
402 break;
403
404 default:
405 printk(KERN_ERR "%s: bad insertion point %d\n",
406 __FUNCTION__, where);
407 BUG();
408 }
409
410 if (blk_queue_plugged(q)) {
411 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
412 - q->in_flight;
413
414 if (nrq >= q->unplug_thresh)
415 __generic_unplug_device(q);
416 }
417 }
418
419 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
420 int plug)
421 {
422 if (q->ordcolor)
423 rq->flags |= REQ_ORDERED_COLOR;
424
425 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
426 /*
427 * toggle ordered color
428 */
429 if (blk_barrier_rq(rq))
430 q->ordcolor ^= 1;
431
432 /*
433 * barriers implicitly indicate back insertion
434 */
435 if (where == ELEVATOR_INSERT_SORT)
436 where = ELEVATOR_INSERT_BACK;
437
438 /*
439 * this request is scheduling boundary, update
440 * end_sector
441 */
442 if (blk_fs_request(rq)) {
443 q->end_sector = rq_end_sector(rq);
444 q->boundary_rq = rq;
445 }
446 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
447 where = ELEVATOR_INSERT_BACK;
448
449 if (plug)
450 blk_plug_device(q);
451
452 elv_insert(q, rq, where);
453 }
454
455 void elv_add_request(request_queue_t *q, struct request *rq, int where,
456 int plug)
457 {
458 unsigned long flags;
459
460 spin_lock_irqsave(q->queue_lock, flags);
461 __elv_add_request(q, rq, where, plug);
462 spin_unlock_irqrestore(q->queue_lock, flags);
463 }
464
465 static inline struct request *__elv_next_request(request_queue_t *q)
466 {
467 struct request *rq;
468
469 while (1) {
470 while (!list_empty(&q->queue_head)) {
471 rq = list_entry_rq(q->queue_head.next);
472 if (blk_do_ordered(q, &rq))
473 return rq;
474 }
475
476 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
477 return NULL;
478 }
479 }
480
481 struct request *elv_next_request(request_queue_t *q)
482 {
483 struct request *rq;
484 int ret;
485
486 while ((rq = __elv_next_request(q)) != NULL) {
487 if (!(rq->flags & REQ_STARTED)) {
488 elevator_t *e = q->elevator;
489
490 /*
491 * This is the first time the device driver
492 * sees this request (possibly after
493 * requeueing). Notify IO scheduler.
494 */
495 if (blk_sorted_rq(rq) &&
496 e->ops->elevator_activate_req_fn)
497 e->ops->elevator_activate_req_fn(q, rq);
498
499 /*
500 * just mark as started even if we don't start
501 * it, a request that has been delayed should
502 * not be passed by new incoming requests
503 */
504 rq->flags |= REQ_STARTED;
505 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
506 }
507
508 if (!q->boundary_rq || q->boundary_rq == rq) {
509 q->end_sector = rq_end_sector(rq);
510 q->boundary_rq = NULL;
511 }
512
513 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
514 break;
515
516 ret = q->prep_rq_fn(q, rq);
517 if (ret == BLKPREP_OK) {
518 break;
519 } else if (ret == BLKPREP_DEFER) {
520 /*
521 * the request may have been (partially) prepped.
522 * we need to keep this request in the front to
523 * avoid resource deadlock. REQ_STARTED will
524 * prevent other fs requests from passing this one.
525 */
526 rq = NULL;
527 break;
528 } else if (ret == BLKPREP_KILL) {
529 int nr_bytes = rq->hard_nr_sectors << 9;
530
531 if (!nr_bytes)
532 nr_bytes = rq->data_len;
533
534 blkdev_dequeue_request(rq);
535 rq->flags |= REQ_QUIET;
536 end_that_request_chunk(rq, 0, nr_bytes);
537 end_that_request_last(rq, 0);
538 } else {
539 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
540 ret);
541 break;
542 }
543 }
544
545 return rq;
546 }
547
548 void elv_dequeue_request(request_queue_t *q, struct request *rq)
549 {
550 BUG_ON(list_empty(&rq->queuelist));
551
552 list_del_init(&rq->queuelist);
553
554 /*
555 * the time frame between a request being removed from the lists
556 * and to it is freed is accounted as io that is in progress at
557 * the driver side.
558 */
559 if (blk_account_rq(rq))
560 q->in_flight++;
561 }
562
563 int elv_queue_empty(request_queue_t *q)
564 {
565 elevator_t *e = q->elevator;
566
567 if (!list_empty(&q->queue_head))
568 return 0;
569
570 if (e->ops->elevator_queue_empty_fn)
571 return e->ops->elevator_queue_empty_fn(q);
572
573 return 1;
574 }
575
576 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
577 {
578 elevator_t *e = q->elevator;
579
580 if (e->ops->elevator_latter_req_fn)
581 return e->ops->elevator_latter_req_fn(q, rq);
582 return NULL;
583 }
584
585 struct request *elv_former_request(request_queue_t *q, struct request *rq)
586 {
587 elevator_t *e = q->elevator;
588
589 if (e->ops->elevator_former_req_fn)
590 return e->ops->elevator_former_req_fn(q, rq);
591 return NULL;
592 }
593
594 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
595 gfp_t gfp_mask)
596 {
597 elevator_t *e = q->elevator;
598
599 if (e->ops->elevator_set_req_fn)
600 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
601
602 rq->elevator_private = NULL;
603 return 0;
604 }
605
606 void elv_put_request(request_queue_t *q, struct request *rq)
607 {
608 elevator_t *e = q->elevator;
609
610 if (e->ops->elevator_put_req_fn)
611 e->ops->elevator_put_req_fn(q, rq);
612 }
613
614 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
615 {
616 elevator_t *e = q->elevator;
617
618 if (e->ops->elevator_may_queue_fn)
619 return e->ops->elevator_may_queue_fn(q, rw, bio);
620
621 return ELV_MQUEUE_MAY;
622 }
623
624 void elv_completed_request(request_queue_t *q, struct request *rq)
625 {
626 elevator_t *e = q->elevator;
627
628 /*
629 * request is released from the driver, io must be done
630 */
631 if (blk_account_rq(rq)) {
632 q->in_flight--;
633 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
634 e->ops->elevator_completed_req_fn(q, rq);
635 }
636
637 /*
638 * Check if the queue is waiting for fs requests to be
639 * drained for flush sequence.
640 */
641 if (unlikely(q->ordseq)) {
642 struct request *first_rq = list_entry_rq(q->queue_head.next);
643 if (q->in_flight == 0 &&
644 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
645 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
646 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
647 q->request_fn(q);
648 }
649 }
650 }
651
652 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
653
654 static ssize_t
655 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
656 {
657 elevator_t *e = container_of(kobj, elevator_t, kobj);
658 struct elv_fs_entry *entry = to_elv(attr);
659 ssize_t error;
660
661 if (!entry->show)
662 return -EIO;
663
664 mutex_lock(&e->sysfs_lock);
665 error = e->ops ? entry->show(e, page) : -ENOENT;
666 mutex_unlock(&e->sysfs_lock);
667 return error;
668 }
669
670 static ssize_t
671 elv_attr_store(struct kobject *kobj, struct attribute *attr,
672 const char *page, size_t length)
673 {
674 elevator_t *e = container_of(kobj, elevator_t, kobj);
675 struct elv_fs_entry *entry = to_elv(attr);
676 ssize_t error;
677
678 if (!entry->store)
679 return -EIO;
680
681 mutex_lock(&e->sysfs_lock);
682 error = e->ops ? entry->store(e, page, length) : -ENOENT;
683 mutex_unlock(&e->sysfs_lock);
684 return error;
685 }
686
687 static struct sysfs_ops elv_sysfs_ops = {
688 .show = elv_attr_show,
689 .store = elv_attr_store,
690 };
691
692 static struct kobj_type elv_ktype = {
693 .sysfs_ops = &elv_sysfs_ops,
694 .release = elevator_release,
695 };
696
697 int elv_register_queue(struct request_queue *q)
698 {
699 elevator_t *e = q->elevator;
700 int error;
701
702 e->kobj.parent = &q->kobj;
703
704 error = kobject_add(&e->kobj);
705 if (!error) {
706 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
707 if (attr) {
708 while (attr->attr.name) {
709 if (sysfs_create_file(&e->kobj, &attr->attr))
710 break;
711 attr++;
712 }
713 }
714 kobject_uevent(&e->kobj, KOBJ_ADD);
715 }
716 return error;
717 }
718
719 void elv_unregister_queue(struct request_queue *q)
720 {
721 if (q) {
722 elevator_t *e = q->elevator;
723 kobject_uevent(&e->kobj, KOBJ_REMOVE);
724 kobject_del(&e->kobj);
725 }
726 }
727
728 int elv_register(struct elevator_type *e)
729 {
730 spin_lock_irq(&elv_list_lock);
731 BUG_ON(elevator_find(e->elevator_name));
732 list_add_tail(&e->list, &elv_list);
733 spin_unlock_irq(&elv_list_lock);
734
735 printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
736 if (!strcmp(e->elevator_name, chosen_elevator) ||
737 (!*chosen_elevator &&
738 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
739 printk(" (default)");
740 printk("\n");
741 return 0;
742 }
743 EXPORT_SYMBOL_GPL(elv_register);
744
745 void elv_unregister(struct elevator_type *e)
746 {
747 struct task_struct *g, *p;
748
749 /*
750 * Iterate every thread in the process to remove the io contexts.
751 */
752 if (e->ops.trim) {
753 read_lock(&tasklist_lock);
754 do_each_thread(g, p) {
755 task_lock(p);
756 e->ops.trim(p->io_context);
757 task_unlock(p);
758 } while_each_thread(g, p);
759 read_unlock(&tasklist_lock);
760 }
761
762 spin_lock_irq(&elv_list_lock);
763 list_del_init(&e->list);
764 spin_unlock_irq(&elv_list_lock);
765 }
766 EXPORT_SYMBOL_GPL(elv_unregister);
767
768 /*
769 * switch to new_e io scheduler. be careful not to introduce deadlocks -
770 * we don't free the old io scheduler, before we have allocated what we
771 * need for the new one. this way we have a chance of going back to the old
772 * one, if the new one fails init for some reason.
773 */
774 static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
775 {
776 elevator_t *old_elevator, *e;
777
778 /*
779 * Allocate new elevator
780 */
781 e = elevator_alloc(new_e);
782 if (!e)
783 return 0;
784
785 /*
786 * Turn on BYPASS and drain all requests w/ elevator private data
787 */
788 spin_lock_irq(q->queue_lock);
789
790 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
791
792 elv_drain_elevator(q);
793
794 while (q->rq.elvpriv) {
795 blk_remove_plug(q);
796 q->request_fn(q);
797 spin_unlock_irq(q->queue_lock);
798 msleep(10);
799 spin_lock_irq(q->queue_lock);
800 elv_drain_elevator(q);
801 }
802
803 spin_unlock_irq(q->queue_lock);
804
805 /*
806 * unregister old elevator data
807 */
808 elv_unregister_queue(q);
809 old_elevator = q->elevator;
810
811 /*
812 * attach and start new elevator
813 */
814 if (elevator_attach(q, e))
815 goto fail;
816
817 if (elv_register_queue(q))
818 goto fail_register;
819
820 /*
821 * finally exit old elevator and turn off BYPASS.
822 */
823 elevator_exit(old_elevator);
824 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
825 return 1;
826
827 fail_register:
828 /*
829 * switch failed, exit the new io scheduler and reattach the old
830 * one again (along with re-adding the sysfs dir)
831 */
832 elevator_exit(e);
833 e = NULL;
834 fail:
835 q->elevator = old_elevator;
836 elv_register_queue(q);
837 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
838 if (e)
839 kobject_put(&e->kobj);
840 return 0;
841 }
842
843 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
844 {
845 char elevator_name[ELV_NAME_MAX];
846 size_t len;
847 struct elevator_type *e;
848
849 elevator_name[sizeof(elevator_name) - 1] = '\0';
850 strncpy(elevator_name, name, sizeof(elevator_name) - 1);
851 len = strlen(elevator_name);
852
853 if (len && elevator_name[len - 1] == '\n')
854 elevator_name[len - 1] = '\0';
855
856 e = elevator_get(elevator_name);
857 if (!e) {
858 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
859 return -EINVAL;
860 }
861
862 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
863 elevator_put(e);
864 return count;
865 }
866
867 if (!elevator_switch(q, e))
868 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
869 return count;
870 }
871
872 ssize_t elv_iosched_show(request_queue_t *q, char *name)
873 {
874 elevator_t *e = q->elevator;
875 struct elevator_type *elv = e->elevator_type;
876 struct list_head *entry;
877 int len = 0;
878
879 spin_lock_irq(q->queue_lock);
880 list_for_each(entry, &elv_list) {
881 struct elevator_type *__e;
882
883 __e = list_entry(entry, struct elevator_type, list);
884 if (!strcmp(elv->elevator_name, __e->elevator_name))
885 len += sprintf(name+len, "[%s] ", elv->elevator_name);
886 else
887 len += sprintf(name+len, "%s ", __e->elevator_name);
888 }
889 spin_unlock_irq(q->queue_lock);
890
891 len += sprintf(len+name, "\n");
892 return len;
893 }
894
895 EXPORT_SYMBOL(elv_dispatch_sort);
896 EXPORT_SYMBOL(elv_add_request);
897 EXPORT_SYMBOL(__elv_add_request);
898 EXPORT_SYMBOL(elv_requeue_request);
899 EXPORT_SYMBOL(elv_next_request);
900 EXPORT_SYMBOL(elv_dequeue_request);
901 EXPORT_SYMBOL(elv_queue_empty);
902 EXPORT_SYMBOL(elv_completed_request);
903 EXPORT_SYMBOL(elevator_exit);
904 EXPORT_SYMBOL(elevator_init);