]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - block/elevator.c
[PATCH] Remove ->waiting member from struct request
[mirror_ubuntu-hirsute-kernel.git] / block / elevator.c
1 /*
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@suse.de> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37
38 #include <asm/uaccess.h>
39
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42
43 /*
44 * Merge hash stuff.
45 */
46 static const int elv_hash_shift = 6;
47 #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
48 #define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49 #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
50 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
51 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
52
53 /*
54 * can we safely merge with this request?
55 */
56 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
57 {
58 if (!rq_mergeable(rq))
59 return 0;
60
61 /*
62 * different data direction or already started, don't merge
63 */
64 if (bio_data_dir(bio) != rq_data_dir(rq))
65 return 0;
66
67 /*
68 * same device and no special stuff set, merge is ok
69 */
70 if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special)
71 return 1;
72
73 return 0;
74 }
75 EXPORT_SYMBOL(elv_rq_merge_ok);
76
77 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
78 {
79 int ret = ELEVATOR_NO_MERGE;
80
81 /*
82 * we can merge and sequence is ok, check if it's possible
83 */
84 if (elv_rq_merge_ok(__rq, bio)) {
85 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
86 ret = ELEVATOR_BACK_MERGE;
87 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
88 ret = ELEVATOR_FRONT_MERGE;
89 }
90
91 return ret;
92 }
93
94 static struct elevator_type *elevator_find(const char *name)
95 {
96 struct elevator_type *e = NULL;
97 struct list_head *entry;
98
99 list_for_each(entry, &elv_list) {
100 struct elevator_type *__e;
101
102 __e = list_entry(entry, struct elevator_type, list);
103
104 if (!strcmp(__e->elevator_name, name)) {
105 e = __e;
106 break;
107 }
108 }
109
110 return e;
111 }
112
113 static void elevator_put(struct elevator_type *e)
114 {
115 module_put(e->elevator_owner);
116 }
117
118 static struct elevator_type *elevator_get(const char *name)
119 {
120 struct elevator_type *e;
121
122 spin_lock_irq(&elv_list_lock);
123
124 e = elevator_find(name);
125 if (e && !try_module_get(e->elevator_owner))
126 e = NULL;
127
128 spin_unlock_irq(&elv_list_lock);
129
130 return e;
131 }
132
133 static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
134 {
135 return eq->ops->elevator_init_fn(q, eq);
136 }
137
138 static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
139 void *data)
140 {
141 q->elevator = eq;
142 eq->elevator_data = data;
143 }
144
145 static char chosen_elevator[16];
146
147 static int __init elevator_setup(char *str)
148 {
149 /*
150 * Be backwards-compatible with previous kernels, so users
151 * won't get the wrong elevator.
152 */
153 if (!strcmp(str, "as"))
154 strcpy(chosen_elevator, "anticipatory");
155 else
156 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
157 return 1;
158 }
159
160 __setup("elevator=", elevator_setup);
161
162 static struct kobj_type elv_ktype;
163
164 static elevator_t *elevator_alloc(struct elevator_type *e)
165 {
166 elevator_t *eq;
167 int i;
168
169 eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
170 if (unlikely(!eq))
171 goto err;
172
173 memset(eq, 0, sizeof(*eq));
174 eq->ops = &e->ops;
175 eq->elevator_type = e;
176 kobject_init(&eq->kobj);
177 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
178 eq->kobj.ktype = &elv_ktype;
179 mutex_init(&eq->sysfs_lock);
180
181 eq->hash = kmalloc(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL);
182 if (!eq->hash)
183 goto err;
184
185 for (i = 0; i < ELV_HASH_ENTRIES; i++)
186 INIT_HLIST_HEAD(&eq->hash[i]);
187
188 return eq;
189 err:
190 kfree(eq);
191 elevator_put(e);
192 return NULL;
193 }
194
195 static void elevator_release(struct kobject *kobj)
196 {
197 elevator_t *e = container_of(kobj, elevator_t, kobj);
198
199 elevator_put(e->elevator_type);
200 kfree(e->hash);
201 kfree(e);
202 }
203
204 int elevator_init(request_queue_t *q, char *name)
205 {
206 struct elevator_type *e = NULL;
207 struct elevator_queue *eq;
208 int ret = 0;
209 void *data;
210
211 INIT_LIST_HEAD(&q->queue_head);
212 q->last_merge = NULL;
213 q->end_sector = 0;
214 q->boundary_rq = NULL;
215
216 if (name && !(e = elevator_get(name)))
217 return -EINVAL;
218
219 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
220 printk("I/O scheduler %s not found\n", chosen_elevator);
221
222 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
223 printk("Default I/O scheduler not found, using no-op\n");
224 e = elevator_get("noop");
225 }
226
227 eq = elevator_alloc(e);
228 if (!eq)
229 return -ENOMEM;
230
231 data = elevator_init_queue(q, eq);
232 if (!data) {
233 kobject_put(&eq->kobj);
234 return -ENOMEM;
235 }
236
237 elevator_attach(q, eq, data);
238 return ret;
239 }
240
241 EXPORT_SYMBOL(elevator_init);
242
243 void elevator_exit(elevator_t *e)
244 {
245 mutex_lock(&e->sysfs_lock);
246 if (e->ops->elevator_exit_fn)
247 e->ops->elevator_exit_fn(e);
248 e->ops = NULL;
249 mutex_unlock(&e->sysfs_lock);
250
251 kobject_put(&e->kobj);
252 }
253
254 EXPORT_SYMBOL(elevator_exit);
255
256 static inline void __elv_rqhash_del(struct request *rq)
257 {
258 hlist_del_init(&rq->hash);
259 }
260
261 static void elv_rqhash_del(request_queue_t *q, struct request *rq)
262 {
263 if (ELV_ON_HASH(rq))
264 __elv_rqhash_del(rq);
265 }
266
267 static void elv_rqhash_add(request_queue_t *q, struct request *rq)
268 {
269 elevator_t *e = q->elevator;
270
271 BUG_ON(ELV_ON_HASH(rq));
272 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
273 }
274
275 static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
276 {
277 __elv_rqhash_del(rq);
278 elv_rqhash_add(q, rq);
279 }
280
281 static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
282 {
283 elevator_t *e = q->elevator;
284 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
285 struct hlist_node *entry, *next;
286 struct request *rq;
287
288 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
289 BUG_ON(!ELV_ON_HASH(rq));
290
291 if (unlikely(!rq_mergeable(rq))) {
292 __elv_rqhash_del(rq);
293 continue;
294 }
295
296 if (rq_hash_key(rq) == offset)
297 return rq;
298 }
299
300 return NULL;
301 }
302
303 /*
304 * RB-tree support functions for inserting/lookup/removal of requests
305 * in a sorted RB tree.
306 */
307 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
308 {
309 struct rb_node **p = &root->rb_node;
310 struct rb_node *parent = NULL;
311 struct request *__rq;
312
313 while (*p) {
314 parent = *p;
315 __rq = rb_entry(parent, struct request, rb_node);
316
317 if (rq->sector < __rq->sector)
318 p = &(*p)->rb_left;
319 else if (rq->sector > __rq->sector)
320 p = &(*p)->rb_right;
321 else
322 return __rq;
323 }
324
325 rb_link_node(&rq->rb_node, parent, p);
326 rb_insert_color(&rq->rb_node, root);
327 return NULL;
328 }
329
330 EXPORT_SYMBOL(elv_rb_add);
331
332 void elv_rb_del(struct rb_root *root, struct request *rq)
333 {
334 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
335 rb_erase(&rq->rb_node, root);
336 RB_CLEAR_NODE(&rq->rb_node);
337 }
338
339 EXPORT_SYMBOL(elv_rb_del);
340
341 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
342 {
343 struct rb_node *n = root->rb_node;
344 struct request *rq;
345
346 while (n) {
347 rq = rb_entry(n, struct request, rb_node);
348
349 if (sector < rq->sector)
350 n = n->rb_left;
351 else if (sector > rq->sector)
352 n = n->rb_right;
353 else
354 return rq;
355 }
356
357 return NULL;
358 }
359
360 EXPORT_SYMBOL(elv_rb_find);
361
362 /*
363 * Insert rq into dispatch queue of q. Queue lock must be held on
364 * entry. rq is sort insted into the dispatch queue. To be used by
365 * specific elevators.
366 */
367 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
368 {
369 sector_t boundary;
370 struct list_head *entry;
371
372 if (q->last_merge == rq)
373 q->last_merge = NULL;
374
375 elv_rqhash_del(q, rq);
376
377 q->nr_sorted--;
378
379 boundary = q->end_sector;
380
381 list_for_each_prev(entry, &q->queue_head) {
382 struct request *pos = list_entry_rq(entry);
383
384 if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
385 break;
386 if (rq->sector >= boundary) {
387 if (pos->sector < boundary)
388 continue;
389 } else {
390 if (pos->sector >= boundary)
391 break;
392 }
393 if (rq->sector >= pos->sector)
394 break;
395 }
396
397 list_add(&rq->queuelist, entry);
398 }
399
400 EXPORT_SYMBOL(elv_dispatch_sort);
401
402 /*
403 * Insert rq into dispatch queue of q. Queue lock must be held on
404 * entry. rq is added to the back of the dispatch queue. To be used by
405 * specific elevators.
406 */
407 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
408 {
409 if (q->last_merge == rq)
410 q->last_merge = NULL;
411
412 elv_rqhash_del(q, rq);
413
414 q->nr_sorted--;
415
416 q->end_sector = rq_end_sector(rq);
417 q->boundary_rq = rq;
418 list_add_tail(&rq->queuelist, &q->queue_head);
419 }
420
421 EXPORT_SYMBOL(elv_dispatch_add_tail);
422
423 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
424 {
425 elevator_t *e = q->elevator;
426 struct request *__rq;
427 int ret;
428
429 /*
430 * First try one-hit cache.
431 */
432 if (q->last_merge) {
433 ret = elv_try_merge(q->last_merge, bio);
434 if (ret != ELEVATOR_NO_MERGE) {
435 *req = q->last_merge;
436 return ret;
437 }
438 }
439
440 /*
441 * See if our hash lookup can find a potential backmerge.
442 */
443 __rq = elv_rqhash_find(q, bio->bi_sector);
444 if (__rq && elv_rq_merge_ok(__rq, bio)) {
445 *req = __rq;
446 return ELEVATOR_BACK_MERGE;
447 }
448
449 if (e->ops->elevator_merge_fn)
450 return e->ops->elevator_merge_fn(q, req, bio);
451
452 return ELEVATOR_NO_MERGE;
453 }
454
455 void elv_merged_request(request_queue_t *q, struct request *rq, int type)
456 {
457 elevator_t *e = q->elevator;
458
459 if (e->ops->elevator_merged_fn)
460 e->ops->elevator_merged_fn(q, rq, type);
461
462 if (type == ELEVATOR_BACK_MERGE)
463 elv_rqhash_reposition(q, rq);
464
465 q->last_merge = rq;
466 }
467
468 void elv_merge_requests(request_queue_t *q, struct request *rq,
469 struct request *next)
470 {
471 elevator_t *e = q->elevator;
472
473 if (e->ops->elevator_merge_req_fn)
474 e->ops->elevator_merge_req_fn(q, rq, next);
475
476 elv_rqhash_reposition(q, rq);
477 elv_rqhash_del(q, next);
478
479 q->nr_sorted--;
480 q->last_merge = rq;
481 }
482
483 void elv_requeue_request(request_queue_t *q, struct request *rq)
484 {
485 elevator_t *e = q->elevator;
486
487 /*
488 * it already went through dequeue, we need to decrement the
489 * in_flight count again
490 */
491 if (blk_account_rq(rq)) {
492 q->in_flight--;
493 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
494 e->ops->elevator_deactivate_req_fn(q, rq);
495 }
496
497 rq->cmd_flags &= ~REQ_STARTED;
498
499 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
500 }
501
502 static void elv_drain_elevator(request_queue_t *q)
503 {
504 static int printed;
505 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
506 ;
507 if (q->nr_sorted == 0)
508 return;
509 if (printed++ < 10) {
510 printk(KERN_ERR "%s: forced dispatching is broken "
511 "(nr_sorted=%u), please report this\n",
512 q->elevator->elevator_type->elevator_name, q->nr_sorted);
513 }
514 }
515
516 void elv_insert(request_queue_t *q, struct request *rq, int where)
517 {
518 struct list_head *pos;
519 unsigned ordseq;
520 int unplug_it = 1;
521
522 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
523
524 rq->q = q;
525
526 switch (where) {
527 case ELEVATOR_INSERT_FRONT:
528 rq->cmd_flags |= REQ_SOFTBARRIER;
529
530 list_add(&rq->queuelist, &q->queue_head);
531 break;
532
533 case ELEVATOR_INSERT_BACK:
534 rq->cmd_flags |= REQ_SOFTBARRIER;
535 elv_drain_elevator(q);
536 list_add_tail(&rq->queuelist, &q->queue_head);
537 /*
538 * We kick the queue here for the following reasons.
539 * - The elevator might have returned NULL previously
540 * to delay requests and returned them now. As the
541 * queue wasn't empty before this request, ll_rw_blk
542 * won't run the queue on return, resulting in hang.
543 * - Usually, back inserted requests won't be merged
544 * with anything. There's no point in delaying queue
545 * processing.
546 */
547 blk_remove_plug(q);
548 q->request_fn(q);
549 break;
550
551 case ELEVATOR_INSERT_SORT:
552 BUG_ON(!blk_fs_request(rq));
553 rq->cmd_flags |= REQ_SORTED;
554 q->nr_sorted++;
555 if (rq_mergeable(rq)) {
556 elv_rqhash_add(q, rq);
557 if (!q->last_merge)
558 q->last_merge = rq;
559 }
560
561 /*
562 * Some ioscheds (cfq) run q->request_fn directly, so
563 * rq cannot be accessed after calling
564 * elevator_add_req_fn.
565 */
566 q->elevator->ops->elevator_add_req_fn(q, rq);
567 break;
568
569 case ELEVATOR_INSERT_REQUEUE:
570 /*
571 * If ordered flush isn't in progress, we do front
572 * insertion; otherwise, requests should be requeued
573 * in ordseq order.
574 */
575 rq->cmd_flags |= REQ_SOFTBARRIER;
576
577 if (q->ordseq == 0) {
578 list_add(&rq->queuelist, &q->queue_head);
579 break;
580 }
581
582 ordseq = blk_ordered_req_seq(rq);
583
584 list_for_each(pos, &q->queue_head) {
585 struct request *pos_rq = list_entry_rq(pos);
586 if (ordseq <= blk_ordered_req_seq(pos_rq))
587 break;
588 }
589
590 list_add_tail(&rq->queuelist, pos);
591 /*
592 * most requeues happen because of a busy condition, don't
593 * force unplug of the queue for that case.
594 */
595 unplug_it = 0;
596 break;
597
598 default:
599 printk(KERN_ERR "%s: bad insertion point %d\n",
600 __FUNCTION__, where);
601 BUG();
602 }
603
604 if (unplug_it && blk_queue_plugged(q)) {
605 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
606 - q->in_flight;
607
608 if (nrq >= q->unplug_thresh)
609 __generic_unplug_device(q);
610 }
611 }
612
613 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
614 int plug)
615 {
616 if (q->ordcolor)
617 rq->cmd_flags |= REQ_ORDERED_COLOR;
618
619 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
620 /*
621 * toggle ordered color
622 */
623 if (blk_barrier_rq(rq))
624 q->ordcolor ^= 1;
625
626 /*
627 * barriers implicitly indicate back insertion
628 */
629 if (where == ELEVATOR_INSERT_SORT)
630 where = ELEVATOR_INSERT_BACK;
631
632 /*
633 * this request is scheduling boundary, update
634 * end_sector
635 */
636 if (blk_fs_request(rq)) {
637 q->end_sector = rq_end_sector(rq);
638 q->boundary_rq = rq;
639 }
640 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
641 where = ELEVATOR_INSERT_BACK;
642
643 if (plug)
644 blk_plug_device(q);
645
646 elv_insert(q, rq, where);
647 }
648
649 EXPORT_SYMBOL(__elv_add_request);
650
651 void elv_add_request(request_queue_t *q, struct request *rq, int where,
652 int plug)
653 {
654 unsigned long flags;
655
656 spin_lock_irqsave(q->queue_lock, flags);
657 __elv_add_request(q, rq, where, plug);
658 spin_unlock_irqrestore(q->queue_lock, flags);
659 }
660
661 EXPORT_SYMBOL(elv_add_request);
662
663 static inline struct request *__elv_next_request(request_queue_t *q)
664 {
665 struct request *rq;
666
667 while (1) {
668 while (!list_empty(&q->queue_head)) {
669 rq = list_entry_rq(q->queue_head.next);
670 if (blk_do_ordered(q, &rq))
671 return rq;
672 }
673
674 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
675 return NULL;
676 }
677 }
678
679 struct request *elv_next_request(request_queue_t *q)
680 {
681 struct request *rq;
682 int ret;
683
684 while ((rq = __elv_next_request(q)) != NULL) {
685 if (!(rq->cmd_flags & REQ_STARTED)) {
686 elevator_t *e = q->elevator;
687
688 /*
689 * This is the first time the device driver
690 * sees this request (possibly after
691 * requeueing). Notify IO scheduler.
692 */
693 if (blk_sorted_rq(rq) &&
694 e->ops->elevator_activate_req_fn)
695 e->ops->elevator_activate_req_fn(q, rq);
696
697 /*
698 * just mark as started even if we don't start
699 * it, a request that has been delayed should
700 * not be passed by new incoming requests
701 */
702 rq->cmd_flags |= REQ_STARTED;
703 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
704 }
705
706 if (!q->boundary_rq || q->boundary_rq == rq) {
707 q->end_sector = rq_end_sector(rq);
708 q->boundary_rq = NULL;
709 }
710
711 if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
712 break;
713
714 ret = q->prep_rq_fn(q, rq);
715 if (ret == BLKPREP_OK) {
716 break;
717 } else if (ret == BLKPREP_DEFER) {
718 /*
719 * the request may have been (partially) prepped.
720 * we need to keep this request in the front to
721 * avoid resource deadlock. REQ_STARTED will
722 * prevent other fs requests from passing this one.
723 */
724 rq = NULL;
725 break;
726 } else if (ret == BLKPREP_KILL) {
727 int nr_bytes = rq->hard_nr_sectors << 9;
728
729 if (!nr_bytes)
730 nr_bytes = rq->data_len;
731
732 blkdev_dequeue_request(rq);
733 rq->cmd_flags |= REQ_QUIET;
734 end_that_request_chunk(rq, 0, nr_bytes);
735 end_that_request_last(rq, 0);
736 } else {
737 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
738 ret);
739 break;
740 }
741 }
742
743 return rq;
744 }
745
746 EXPORT_SYMBOL(elv_next_request);
747
748 void elv_dequeue_request(request_queue_t *q, struct request *rq)
749 {
750 BUG_ON(list_empty(&rq->queuelist));
751 BUG_ON(ELV_ON_HASH(rq));
752
753 list_del_init(&rq->queuelist);
754
755 /*
756 * the time frame between a request being removed from the lists
757 * and to it is freed is accounted as io that is in progress at
758 * the driver side.
759 */
760 if (blk_account_rq(rq))
761 q->in_flight++;
762 }
763
764 EXPORT_SYMBOL(elv_dequeue_request);
765
766 int elv_queue_empty(request_queue_t *q)
767 {
768 elevator_t *e = q->elevator;
769
770 if (!list_empty(&q->queue_head))
771 return 0;
772
773 if (e->ops->elevator_queue_empty_fn)
774 return e->ops->elevator_queue_empty_fn(q);
775
776 return 1;
777 }
778
779 EXPORT_SYMBOL(elv_queue_empty);
780
781 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
782 {
783 elevator_t *e = q->elevator;
784
785 if (e->ops->elevator_latter_req_fn)
786 return e->ops->elevator_latter_req_fn(q, rq);
787 return NULL;
788 }
789
790 struct request *elv_former_request(request_queue_t *q, struct request *rq)
791 {
792 elevator_t *e = q->elevator;
793
794 if (e->ops->elevator_former_req_fn)
795 return e->ops->elevator_former_req_fn(q, rq);
796 return NULL;
797 }
798
799 int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
800 gfp_t gfp_mask)
801 {
802 elevator_t *e = q->elevator;
803
804 if (e->ops->elevator_set_req_fn)
805 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
806
807 rq->elevator_private = NULL;
808 return 0;
809 }
810
811 void elv_put_request(request_queue_t *q, struct request *rq)
812 {
813 elevator_t *e = q->elevator;
814
815 if (e->ops->elevator_put_req_fn)
816 e->ops->elevator_put_req_fn(q, rq);
817 }
818
819 int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
820 {
821 elevator_t *e = q->elevator;
822
823 if (e->ops->elevator_may_queue_fn)
824 return e->ops->elevator_may_queue_fn(q, rw, bio);
825
826 return ELV_MQUEUE_MAY;
827 }
828
829 void elv_completed_request(request_queue_t *q, struct request *rq)
830 {
831 elevator_t *e = q->elevator;
832
833 /*
834 * request is released from the driver, io must be done
835 */
836 if (blk_account_rq(rq)) {
837 q->in_flight--;
838 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
839 e->ops->elevator_completed_req_fn(q, rq);
840 }
841
842 /*
843 * Check if the queue is waiting for fs requests to be
844 * drained for flush sequence.
845 */
846 if (unlikely(q->ordseq)) {
847 struct request *first_rq = list_entry_rq(q->queue_head.next);
848 if (q->in_flight == 0 &&
849 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
850 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
851 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
852 q->request_fn(q);
853 }
854 }
855 }
856
857 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
858
859 static ssize_t
860 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
861 {
862 elevator_t *e = container_of(kobj, elevator_t, kobj);
863 struct elv_fs_entry *entry = to_elv(attr);
864 ssize_t error;
865
866 if (!entry->show)
867 return -EIO;
868
869 mutex_lock(&e->sysfs_lock);
870 error = e->ops ? entry->show(e, page) : -ENOENT;
871 mutex_unlock(&e->sysfs_lock);
872 return error;
873 }
874
875 static ssize_t
876 elv_attr_store(struct kobject *kobj, struct attribute *attr,
877 const char *page, size_t length)
878 {
879 elevator_t *e = container_of(kobj, elevator_t, kobj);
880 struct elv_fs_entry *entry = to_elv(attr);
881 ssize_t error;
882
883 if (!entry->store)
884 return -EIO;
885
886 mutex_lock(&e->sysfs_lock);
887 error = e->ops ? entry->store(e, page, length) : -ENOENT;
888 mutex_unlock(&e->sysfs_lock);
889 return error;
890 }
891
892 static struct sysfs_ops elv_sysfs_ops = {
893 .show = elv_attr_show,
894 .store = elv_attr_store,
895 };
896
897 static struct kobj_type elv_ktype = {
898 .sysfs_ops = &elv_sysfs_ops,
899 .release = elevator_release,
900 };
901
902 int elv_register_queue(struct request_queue *q)
903 {
904 elevator_t *e = q->elevator;
905 int error;
906
907 e->kobj.parent = &q->kobj;
908
909 error = kobject_add(&e->kobj);
910 if (!error) {
911 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
912 if (attr) {
913 while (attr->attr.name) {
914 if (sysfs_create_file(&e->kobj, &attr->attr))
915 break;
916 attr++;
917 }
918 }
919 kobject_uevent(&e->kobj, KOBJ_ADD);
920 }
921 return error;
922 }
923
924 static void __elv_unregister_queue(elevator_t *e)
925 {
926 kobject_uevent(&e->kobj, KOBJ_REMOVE);
927 kobject_del(&e->kobj);
928 }
929
930 void elv_unregister_queue(struct request_queue *q)
931 {
932 if (q)
933 __elv_unregister_queue(q->elevator);
934 }
935
936 int elv_register(struct elevator_type *e)
937 {
938 spin_lock_irq(&elv_list_lock);
939 BUG_ON(elevator_find(e->elevator_name));
940 list_add_tail(&e->list, &elv_list);
941 spin_unlock_irq(&elv_list_lock);
942
943 printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
944 if (!strcmp(e->elevator_name, chosen_elevator) ||
945 (!*chosen_elevator &&
946 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
947 printk(" (default)");
948 printk("\n");
949 return 0;
950 }
951 EXPORT_SYMBOL_GPL(elv_register);
952
953 void elv_unregister(struct elevator_type *e)
954 {
955 struct task_struct *g, *p;
956
957 /*
958 * Iterate every thread in the process to remove the io contexts.
959 */
960 if (e->ops.trim) {
961 read_lock(&tasklist_lock);
962 do_each_thread(g, p) {
963 task_lock(p);
964 if (p->io_context)
965 e->ops.trim(p->io_context);
966 task_unlock(p);
967 } while_each_thread(g, p);
968 read_unlock(&tasklist_lock);
969 }
970
971 spin_lock_irq(&elv_list_lock);
972 list_del_init(&e->list);
973 spin_unlock_irq(&elv_list_lock);
974 }
975 EXPORT_SYMBOL_GPL(elv_unregister);
976
977 /*
978 * switch to new_e io scheduler. be careful not to introduce deadlocks -
979 * we don't free the old io scheduler, before we have allocated what we
980 * need for the new one. this way we have a chance of going back to the old
981 * one, if the new one fails init for some reason.
982 */
983 static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
984 {
985 elevator_t *old_elevator, *e;
986 void *data;
987
988 /*
989 * Allocate new elevator
990 */
991 e = elevator_alloc(new_e);
992 if (!e)
993 return 0;
994
995 data = elevator_init_queue(q, e);
996 if (!data) {
997 kobject_put(&e->kobj);
998 return 0;
999 }
1000
1001 /*
1002 * Turn on BYPASS and drain all requests w/ elevator private data
1003 */
1004 spin_lock_irq(q->queue_lock);
1005
1006 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1007
1008 elv_drain_elevator(q);
1009
1010 while (q->rq.elvpriv) {
1011 blk_remove_plug(q);
1012 q->request_fn(q);
1013 spin_unlock_irq(q->queue_lock);
1014 msleep(10);
1015 spin_lock_irq(q->queue_lock);
1016 elv_drain_elevator(q);
1017 }
1018
1019 /*
1020 * Remember old elevator.
1021 */
1022 old_elevator = q->elevator;
1023
1024 /*
1025 * attach and start new elevator
1026 */
1027 elevator_attach(q, e, data);
1028
1029 spin_unlock_irq(q->queue_lock);
1030
1031 __elv_unregister_queue(old_elevator);
1032
1033 if (elv_register_queue(q))
1034 goto fail_register;
1035
1036 /*
1037 * finally exit old elevator and turn off BYPASS.
1038 */
1039 elevator_exit(old_elevator);
1040 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1041 return 1;
1042
1043 fail_register:
1044 /*
1045 * switch failed, exit the new io scheduler and reattach the old
1046 * one again (along with re-adding the sysfs dir)
1047 */
1048 elevator_exit(e);
1049 q->elevator = old_elevator;
1050 elv_register_queue(q);
1051 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1052 return 0;
1053 }
1054
1055 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
1056 {
1057 char elevator_name[ELV_NAME_MAX];
1058 size_t len;
1059 struct elevator_type *e;
1060
1061 elevator_name[sizeof(elevator_name) - 1] = '\0';
1062 strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1063 len = strlen(elevator_name);
1064
1065 if (len && elevator_name[len - 1] == '\n')
1066 elevator_name[len - 1] = '\0';
1067
1068 e = elevator_get(elevator_name);
1069 if (!e) {
1070 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1071 return -EINVAL;
1072 }
1073
1074 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1075 elevator_put(e);
1076 return count;
1077 }
1078
1079 if (!elevator_switch(q, e))
1080 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
1081 return count;
1082 }
1083
1084 ssize_t elv_iosched_show(request_queue_t *q, char *name)
1085 {
1086 elevator_t *e = q->elevator;
1087 struct elevator_type *elv = e->elevator_type;
1088 struct list_head *entry;
1089 int len = 0;
1090
1091 spin_lock_irq(q->queue_lock);
1092 list_for_each(entry, &elv_list) {
1093 struct elevator_type *__e;
1094
1095 __e = list_entry(entry, struct elevator_type, list);
1096 if (!strcmp(elv->elevator_name, __e->elevator_name))
1097 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1098 else
1099 len += sprintf(name+len, "%s ", __e->elevator_name);
1100 }
1101 spin_unlock_irq(q->queue_lock);
1102
1103 len += sprintf(len+name, "\n");
1104 return len;
1105 }
1106
1107 struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
1108 {
1109 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1110
1111 if (rbprev)
1112 return rb_entry_rq(rbprev);
1113
1114 return NULL;
1115 }
1116
1117 EXPORT_SYMBOL(elv_rb_former_request);
1118
1119 struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
1120 {
1121 struct rb_node *rbnext = rb_next(&rq->rb_node);
1122
1123 if (rbnext)
1124 return rb_entry_rq(rbnext);
1125
1126 return NULL;
1127 }
1128
1129 EXPORT_SYMBOL(elv_rb_latter_request);