]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/elevator.c
Slab allocators: Replace explicit zeroing with __GFP_ZERO
[mirror_ubuntu-artful-kernel.git] / block / elevator.c
1 /*
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37
38 #include <asm/uaccess.h>
39
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42
43 /*
44 * Merge hash stuff.
45 */
46 static const int elv_hash_shift = 6;
47 #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
48 #define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49 #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
50 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
51 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
52
53 /*
54 * Query io scheduler to see if the current process issuing bio may be
55 * merged with rq.
56 */
57 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58 {
59 request_queue_t *q = rq->q;
60 elevator_t *e = q->elevator;
61
62 if (e->ops->elevator_allow_merge_fn)
63 return e->ops->elevator_allow_merge_fn(q, rq, bio);
64
65 return 1;
66 }
67
68 /*
69 * can we safely merge with this request?
70 */
71 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
72 {
73 if (!rq_mergeable(rq))
74 return 0;
75
76 /*
77 * different data direction or already started, don't merge
78 */
79 if (bio_data_dir(bio) != rq_data_dir(rq))
80 return 0;
81
82 /*
83 * must be same device and not a special request
84 */
85 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
86 return 0;
87
88 if (!elv_iosched_allow_merge(rq, bio))
89 return 0;
90
91 return 1;
92 }
93 EXPORT_SYMBOL(elv_rq_merge_ok);
94
95 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
96 {
97 int ret = ELEVATOR_NO_MERGE;
98
99 /*
100 * we can merge and sequence is ok, check if it's possible
101 */
102 if (elv_rq_merge_ok(__rq, bio)) {
103 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
104 ret = ELEVATOR_BACK_MERGE;
105 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
106 ret = ELEVATOR_FRONT_MERGE;
107 }
108
109 return ret;
110 }
111
112 static struct elevator_type *elevator_find(const char *name)
113 {
114 struct elevator_type *e;
115
116 list_for_each_entry(e, &elv_list, list) {
117 if (!strcmp(e->elevator_name, name))
118 return e;
119 }
120
121 return NULL;
122 }
123
124 static void elevator_put(struct elevator_type *e)
125 {
126 module_put(e->elevator_owner);
127 }
128
129 static struct elevator_type *elevator_get(const char *name)
130 {
131 struct elevator_type *e;
132
133 spin_lock(&elv_list_lock);
134
135 e = elevator_find(name);
136 if (e && !try_module_get(e->elevator_owner))
137 e = NULL;
138
139 spin_unlock(&elv_list_lock);
140
141 return e;
142 }
143
144 static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
145 {
146 return eq->ops->elevator_init_fn(q);
147 }
148
149 static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
150 void *data)
151 {
152 q->elevator = eq;
153 eq->elevator_data = data;
154 }
155
156 static char chosen_elevator[16];
157
158 static int __init elevator_setup(char *str)
159 {
160 /*
161 * Be backwards-compatible with previous kernels, so users
162 * won't get the wrong elevator.
163 */
164 if (!strcmp(str, "as"))
165 strcpy(chosen_elevator, "anticipatory");
166 else
167 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
168 return 1;
169 }
170
171 __setup("elevator=", elevator_setup);
172
173 static struct kobj_type elv_ktype;
174
175 static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
176 {
177 elevator_t *eq;
178 int i;
179
180 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
181 if (unlikely(!eq))
182 goto err;
183
184 eq->ops = &e->ops;
185 eq->elevator_type = e;
186 kobject_init(&eq->kobj);
187 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
188 eq->kobj.ktype = &elv_ktype;
189 mutex_init(&eq->sysfs_lock);
190
191 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
192 GFP_KERNEL, q->node);
193 if (!eq->hash)
194 goto err;
195
196 for (i = 0; i < ELV_HASH_ENTRIES; i++)
197 INIT_HLIST_HEAD(&eq->hash[i]);
198
199 return eq;
200 err:
201 kfree(eq);
202 elevator_put(e);
203 return NULL;
204 }
205
206 static void elevator_release(struct kobject *kobj)
207 {
208 elevator_t *e = container_of(kobj, elevator_t, kobj);
209
210 elevator_put(e->elevator_type);
211 kfree(e->hash);
212 kfree(e);
213 }
214
215 int elevator_init(request_queue_t *q, char *name)
216 {
217 struct elevator_type *e = NULL;
218 struct elevator_queue *eq;
219 int ret = 0;
220 void *data;
221
222 INIT_LIST_HEAD(&q->queue_head);
223 q->last_merge = NULL;
224 q->end_sector = 0;
225 q->boundary_rq = NULL;
226
227 if (name && !(e = elevator_get(name)))
228 return -EINVAL;
229
230 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
231 printk("I/O scheduler %s not found\n", chosen_elevator);
232
233 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
234 printk("Default I/O scheduler not found, using no-op\n");
235 e = elevator_get("noop");
236 }
237
238 eq = elevator_alloc(q, e);
239 if (!eq)
240 return -ENOMEM;
241
242 data = elevator_init_queue(q, eq);
243 if (!data) {
244 kobject_put(&eq->kobj);
245 return -ENOMEM;
246 }
247
248 elevator_attach(q, eq, data);
249 return ret;
250 }
251
252 EXPORT_SYMBOL(elevator_init);
253
254 void elevator_exit(elevator_t *e)
255 {
256 mutex_lock(&e->sysfs_lock);
257 if (e->ops->elevator_exit_fn)
258 e->ops->elevator_exit_fn(e);
259 e->ops = NULL;
260 mutex_unlock(&e->sysfs_lock);
261
262 kobject_put(&e->kobj);
263 }
264
265 EXPORT_SYMBOL(elevator_exit);
266
267 static void elv_activate_rq(request_queue_t *q, struct request *rq)
268 {
269 elevator_t *e = q->elevator;
270
271 if (e->ops->elevator_activate_req_fn)
272 e->ops->elevator_activate_req_fn(q, rq);
273 }
274
275 static void elv_deactivate_rq(request_queue_t *q, struct request *rq)
276 {
277 elevator_t *e = q->elevator;
278
279 if (e->ops->elevator_deactivate_req_fn)
280 e->ops->elevator_deactivate_req_fn(q, rq);
281 }
282
283 static inline void __elv_rqhash_del(struct request *rq)
284 {
285 hlist_del_init(&rq->hash);
286 }
287
288 static void elv_rqhash_del(request_queue_t *q, struct request *rq)
289 {
290 if (ELV_ON_HASH(rq))
291 __elv_rqhash_del(rq);
292 }
293
294 static void elv_rqhash_add(request_queue_t *q, struct request *rq)
295 {
296 elevator_t *e = q->elevator;
297
298 BUG_ON(ELV_ON_HASH(rq));
299 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
300 }
301
302 static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
303 {
304 __elv_rqhash_del(rq);
305 elv_rqhash_add(q, rq);
306 }
307
308 static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
309 {
310 elevator_t *e = q->elevator;
311 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
312 struct hlist_node *entry, *next;
313 struct request *rq;
314
315 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
316 BUG_ON(!ELV_ON_HASH(rq));
317
318 if (unlikely(!rq_mergeable(rq))) {
319 __elv_rqhash_del(rq);
320 continue;
321 }
322
323 if (rq_hash_key(rq) == offset)
324 return rq;
325 }
326
327 return NULL;
328 }
329
330 /*
331 * RB-tree support functions for inserting/lookup/removal of requests
332 * in a sorted RB tree.
333 */
334 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
335 {
336 struct rb_node **p = &root->rb_node;
337 struct rb_node *parent = NULL;
338 struct request *__rq;
339
340 while (*p) {
341 parent = *p;
342 __rq = rb_entry(parent, struct request, rb_node);
343
344 if (rq->sector < __rq->sector)
345 p = &(*p)->rb_left;
346 else if (rq->sector > __rq->sector)
347 p = &(*p)->rb_right;
348 else
349 return __rq;
350 }
351
352 rb_link_node(&rq->rb_node, parent, p);
353 rb_insert_color(&rq->rb_node, root);
354 return NULL;
355 }
356
357 EXPORT_SYMBOL(elv_rb_add);
358
359 void elv_rb_del(struct rb_root *root, struct request *rq)
360 {
361 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
362 rb_erase(&rq->rb_node, root);
363 RB_CLEAR_NODE(&rq->rb_node);
364 }
365
366 EXPORT_SYMBOL(elv_rb_del);
367
368 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
369 {
370 struct rb_node *n = root->rb_node;
371 struct request *rq;
372
373 while (n) {
374 rq = rb_entry(n, struct request, rb_node);
375
376 if (sector < rq->sector)
377 n = n->rb_left;
378 else if (sector > rq->sector)
379 n = n->rb_right;
380 else
381 return rq;
382 }
383
384 return NULL;
385 }
386
387 EXPORT_SYMBOL(elv_rb_find);
388
389 /*
390 * Insert rq into dispatch queue of q. Queue lock must be held on
391 * entry. rq is sort insted into the dispatch queue. To be used by
392 * specific elevators.
393 */
394 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
395 {
396 sector_t boundary;
397 struct list_head *entry;
398
399 if (q->last_merge == rq)
400 q->last_merge = NULL;
401
402 elv_rqhash_del(q, rq);
403
404 q->nr_sorted--;
405
406 boundary = q->end_sector;
407
408 list_for_each_prev(entry, &q->queue_head) {
409 struct request *pos = list_entry_rq(entry);
410
411 if (rq_data_dir(rq) != rq_data_dir(pos))
412 break;
413 if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
414 break;
415 if (rq->sector >= boundary) {
416 if (pos->sector < boundary)
417 continue;
418 } else {
419 if (pos->sector >= boundary)
420 break;
421 }
422 if (rq->sector >= pos->sector)
423 break;
424 }
425
426 list_add(&rq->queuelist, entry);
427 }
428
429 EXPORT_SYMBOL(elv_dispatch_sort);
430
431 /*
432 * Insert rq into dispatch queue of q. Queue lock must be held on
433 * entry. rq is added to the back of the dispatch queue. To be used by
434 * specific elevators.
435 */
436 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
437 {
438 if (q->last_merge == rq)
439 q->last_merge = NULL;
440
441 elv_rqhash_del(q, rq);
442
443 q->nr_sorted--;
444
445 q->end_sector = rq_end_sector(rq);
446 q->boundary_rq = rq;
447 list_add_tail(&rq->queuelist, &q->queue_head);
448 }
449
450 EXPORT_SYMBOL(elv_dispatch_add_tail);
451
452 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
453 {
454 elevator_t *e = q->elevator;
455 struct request *__rq;
456 int ret;
457
458 /*
459 * First try one-hit cache.
460 */
461 if (q->last_merge) {
462 ret = elv_try_merge(q->last_merge, bio);
463 if (ret != ELEVATOR_NO_MERGE) {
464 *req = q->last_merge;
465 return ret;
466 }
467 }
468
469 /*
470 * See if our hash lookup can find a potential backmerge.
471 */
472 __rq = elv_rqhash_find(q, bio->bi_sector);
473 if (__rq && elv_rq_merge_ok(__rq, bio)) {
474 *req = __rq;
475 return ELEVATOR_BACK_MERGE;
476 }
477
478 if (e->ops->elevator_merge_fn)
479 return e->ops->elevator_merge_fn(q, req, bio);
480
481 return ELEVATOR_NO_MERGE;
482 }
483
484 void elv_merged_request(request_queue_t *q, struct request *rq, int type)
485 {
486 elevator_t *e = q->elevator;
487
488 if (e->ops->elevator_merged_fn)
489 e->ops->elevator_merged_fn(q, rq, type);
490
491 if (type == ELEVATOR_BACK_MERGE)
492 elv_rqhash_reposition(q, rq);
493
494 q->last_merge = rq;
495 }
496
497 void elv_merge_requests(request_queue_t *q, struct request *rq,
498 struct request *next)
499 {
500 elevator_t *e = q->elevator;
501
502 if (e->ops->elevator_merge_req_fn)
503 e->ops->elevator_merge_req_fn(q, rq, next);
504
505 elv_rqhash_reposition(q, rq);
506 elv_rqhash_del(q, next);
507
508 q->nr_sorted--;
509 q->last_merge = rq;
510 }
511
512 void elv_requeue_request(request_queue_t *q, struct request *rq)
513 {
514 /*
515 * it already went through dequeue, we need to decrement the
516 * in_flight count again
517 */
518 if (blk_account_rq(rq)) {
519 q->in_flight--;
520 if (blk_sorted_rq(rq))
521 elv_deactivate_rq(q, rq);
522 }
523
524 rq->cmd_flags &= ~REQ_STARTED;
525
526 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
527 }
528
529 static void elv_drain_elevator(request_queue_t *q)
530 {
531 static int printed;
532 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
533 ;
534 if (q->nr_sorted == 0)
535 return;
536 if (printed++ < 10) {
537 printk(KERN_ERR "%s: forced dispatching is broken "
538 "(nr_sorted=%u), please report this\n",
539 q->elevator->elevator_type->elevator_name, q->nr_sorted);
540 }
541 }
542
543 void elv_insert(request_queue_t *q, struct request *rq, int where)
544 {
545 struct list_head *pos;
546 unsigned ordseq;
547 int unplug_it = 1;
548
549 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
550
551 rq->q = q;
552
553 switch (where) {
554 case ELEVATOR_INSERT_FRONT:
555 rq->cmd_flags |= REQ_SOFTBARRIER;
556
557 list_add(&rq->queuelist, &q->queue_head);
558 break;
559
560 case ELEVATOR_INSERT_BACK:
561 rq->cmd_flags |= REQ_SOFTBARRIER;
562 elv_drain_elevator(q);
563 list_add_tail(&rq->queuelist, &q->queue_head);
564 /*
565 * We kick the queue here for the following reasons.
566 * - The elevator might have returned NULL previously
567 * to delay requests and returned them now. As the
568 * queue wasn't empty before this request, ll_rw_blk
569 * won't run the queue on return, resulting in hang.
570 * - Usually, back inserted requests won't be merged
571 * with anything. There's no point in delaying queue
572 * processing.
573 */
574 blk_remove_plug(q);
575 q->request_fn(q);
576 break;
577
578 case ELEVATOR_INSERT_SORT:
579 BUG_ON(!blk_fs_request(rq));
580 rq->cmd_flags |= REQ_SORTED;
581 q->nr_sorted++;
582 if (rq_mergeable(rq)) {
583 elv_rqhash_add(q, rq);
584 if (!q->last_merge)
585 q->last_merge = rq;
586 }
587
588 /*
589 * Some ioscheds (cfq) run q->request_fn directly, so
590 * rq cannot be accessed after calling
591 * elevator_add_req_fn.
592 */
593 q->elevator->ops->elevator_add_req_fn(q, rq);
594 break;
595
596 case ELEVATOR_INSERT_REQUEUE:
597 /*
598 * If ordered flush isn't in progress, we do front
599 * insertion; otherwise, requests should be requeued
600 * in ordseq order.
601 */
602 rq->cmd_flags |= REQ_SOFTBARRIER;
603
604 /*
605 * Most requeues happen because of a busy condition,
606 * don't force unplug of the queue for that case.
607 */
608 unplug_it = 0;
609
610 if (q->ordseq == 0) {
611 list_add(&rq->queuelist, &q->queue_head);
612 break;
613 }
614
615 ordseq = blk_ordered_req_seq(rq);
616
617 list_for_each(pos, &q->queue_head) {
618 struct request *pos_rq = list_entry_rq(pos);
619 if (ordseq <= blk_ordered_req_seq(pos_rq))
620 break;
621 }
622
623 list_add_tail(&rq->queuelist, pos);
624 break;
625
626 default:
627 printk(KERN_ERR "%s: bad insertion point %d\n",
628 __FUNCTION__, where);
629 BUG();
630 }
631
632 if (unplug_it && blk_queue_plugged(q)) {
633 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
634 - q->in_flight;
635
636 if (nrq >= q->unplug_thresh)
637 __generic_unplug_device(q);
638 }
639 }
640
641 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
642 int plug)
643 {
644 if (q->ordcolor)
645 rq->cmd_flags |= REQ_ORDERED_COLOR;
646
647 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
648 /*
649 * toggle ordered color
650 */
651 if (blk_barrier_rq(rq))
652 q->ordcolor ^= 1;
653
654 /*
655 * barriers implicitly indicate back insertion
656 */
657 if (where == ELEVATOR_INSERT_SORT)
658 where = ELEVATOR_INSERT_BACK;
659
660 /*
661 * this request is scheduling boundary, update
662 * end_sector
663 */
664 if (blk_fs_request(rq)) {
665 q->end_sector = rq_end_sector(rq);
666 q->boundary_rq = rq;
667 }
668 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
669 where = ELEVATOR_INSERT_BACK;
670
671 if (plug)
672 blk_plug_device(q);
673
674 elv_insert(q, rq, where);
675 }
676
677 EXPORT_SYMBOL(__elv_add_request);
678
679 void elv_add_request(request_queue_t *q, struct request *rq, int where,
680 int plug)
681 {
682 unsigned long flags;
683
684 spin_lock_irqsave(q->queue_lock, flags);
685 __elv_add_request(q, rq, where, plug);
686 spin_unlock_irqrestore(q->queue_lock, flags);
687 }
688
689 EXPORT_SYMBOL(elv_add_request);
690
691 static inline struct request *__elv_next_request(request_queue_t *q)
692 {
693 struct request *rq;
694
695 while (1) {
696 while (!list_empty(&q->queue_head)) {
697 rq = list_entry_rq(q->queue_head.next);
698 if (blk_do_ordered(q, &rq))
699 return rq;
700 }
701
702 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
703 return NULL;
704 }
705 }
706
707 struct request *elv_next_request(request_queue_t *q)
708 {
709 struct request *rq;
710 int ret;
711
712 while ((rq = __elv_next_request(q)) != NULL) {
713 if (!(rq->cmd_flags & REQ_STARTED)) {
714 /*
715 * This is the first time the device driver
716 * sees this request (possibly after
717 * requeueing). Notify IO scheduler.
718 */
719 if (blk_sorted_rq(rq))
720 elv_activate_rq(q, rq);
721
722 /*
723 * just mark as started even if we don't start
724 * it, a request that has been delayed should
725 * not be passed by new incoming requests
726 */
727 rq->cmd_flags |= REQ_STARTED;
728 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
729 }
730
731 if (!q->boundary_rq || q->boundary_rq == rq) {
732 q->end_sector = rq_end_sector(rq);
733 q->boundary_rq = NULL;
734 }
735
736 if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
737 break;
738
739 ret = q->prep_rq_fn(q, rq);
740 if (ret == BLKPREP_OK) {
741 break;
742 } else if (ret == BLKPREP_DEFER) {
743 /*
744 * the request may have been (partially) prepped.
745 * we need to keep this request in the front to
746 * avoid resource deadlock. REQ_STARTED will
747 * prevent other fs requests from passing this one.
748 */
749 rq = NULL;
750 break;
751 } else if (ret == BLKPREP_KILL) {
752 int nr_bytes = rq->hard_nr_sectors << 9;
753
754 if (!nr_bytes)
755 nr_bytes = rq->data_len;
756
757 blkdev_dequeue_request(rq);
758 rq->cmd_flags |= REQ_QUIET;
759 end_that_request_chunk(rq, 0, nr_bytes);
760 end_that_request_last(rq, 0);
761 } else {
762 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
763 ret);
764 break;
765 }
766 }
767
768 return rq;
769 }
770
771 EXPORT_SYMBOL(elv_next_request);
772
773 void elv_dequeue_request(request_queue_t *q, struct request *rq)
774 {
775 BUG_ON(list_empty(&rq->queuelist));
776 BUG_ON(ELV_ON_HASH(rq));
777
778 list_del_init(&rq->queuelist);
779
780 /*
781 * the time frame between a request being removed from the lists
782 * and to it is freed is accounted as io that is in progress at
783 * the driver side.
784 */
785 if (blk_account_rq(rq))
786 q->in_flight++;
787 }
788
789 EXPORT_SYMBOL(elv_dequeue_request);
790
791 int elv_queue_empty(request_queue_t *q)
792 {
793 elevator_t *e = q->elevator;
794
795 if (!list_empty(&q->queue_head))
796 return 0;
797
798 if (e->ops->elevator_queue_empty_fn)
799 return e->ops->elevator_queue_empty_fn(q);
800
801 return 1;
802 }
803
804 EXPORT_SYMBOL(elv_queue_empty);
805
806 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
807 {
808 elevator_t *e = q->elevator;
809
810 if (e->ops->elevator_latter_req_fn)
811 return e->ops->elevator_latter_req_fn(q, rq);
812 return NULL;
813 }
814
815 struct request *elv_former_request(request_queue_t *q, struct request *rq)
816 {
817 elevator_t *e = q->elevator;
818
819 if (e->ops->elevator_former_req_fn)
820 return e->ops->elevator_former_req_fn(q, rq);
821 return NULL;
822 }
823
824 int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
825 {
826 elevator_t *e = q->elevator;
827
828 if (e->ops->elevator_set_req_fn)
829 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
830
831 rq->elevator_private = NULL;
832 return 0;
833 }
834
835 void elv_put_request(request_queue_t *q, struct request *rq)
836 {
837 elevator_t *e = q->elevator;
838
839 if (e->ops->elevator_put_req_fn)
840 e->ops->elevator_put_req_fn(rq);
841 }
842
843 int elv_may_queue(request_queue_t *q, int rw)
844 {
845 elevator_t *e = q->elevator;
846
847 if (e->ops->elevator_may_queue_fn)
848 return e->ops->elevator_may_queue_fn(q, rw);
849
850 return ELV_MQUEUE_MAY;
851 }
852
853 void elv_completed_request(request_queue_t *q, struct request *rq)
854 {
855 elevator_t *e = q->elevator;
856
857 /*
858 * request is released from the driver, io must be done
859 */
860 if (blk_account_rq(rq)) {
861 q->in_flight--;
862 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
863 e->ops->elevator_completed_req_fn(q, rq);
864 }
865
866 /*
867 * Check if the queue is waiting for fs requests to be
868 * drained for flush sequence.
869 */
870 if (unlikely(q->ordseq)) {
871 struct request *first_rq = list_entry_rq(q->queue_head.next);
872 if (q->in_flight == 0 &&
873 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
874 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
875 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
876 q->request_fn(q);
877 }
878 }
879 }
880
881 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
882
883 static ssize_t
884 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
885 {
886 elevator_t *e = container_of(kobj, elevator_t, kobj);
887 struct elv_fs_entry *entry = to_elv(attr);
888 ssize_t error;
889
890 if (!entry->show)
891 return -EIO;
892
893 mutex_lock(&e->sysfs_lock);
894 error = e->ops ? entry->show(e, page) : -ENOENT;
895 mutex_unlock(&e->sysfs_lock);
896 return error;
897 }
898
899 static ssize_t
900 elv_attr_store(struct kobject *kobj, struct attribute *attr,
901 const char *page, size_t length)
902 {
903 elevator_t *e = container_of(kobj, elevator_t, kobj);
904 struct elv_fs_entry *entry = to_elv(attr);
905 ssize_t error;
906
907 if (!entry->store)
908 return -EIO;
909
910 mutex_lock(&e->sysfs_lock);
911 error = e->ops ? entry->store(e, page, length) : -ENOENT;
912 mutex_unlock(&e->sysfs_lock);
913 return error;
914 }
915
916 static struct sysfs_ops elv_sysfs_ops = {
917 .show = elv_attr_show,
918 .store = elv_attr_store,
919 };
920
921 static struct kobj_type elv_ktype = {
922 .sysfs_ops = &elv_sysfs_ops,
923 .release = elevator_release,
924 };
925
926 int elv_register_queue(struct request_queue *q)
927 {
928 elevator_t *e = q->elevator;
929 int error;
930
931 e->kobj.parent = &q->kobj;
932
933 error = kobject_add(&e->kobj);
934 if (!error) {
935 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
936 if (attr) {
937 while (attr->attr.name) {
938 if (sysfs_create_file(&e->kobj, &attr->attr))
939 break;
940 attr++;
941 }
942 }
943 kobject_uevent(&e->kobj, KOBJ_ADD);
944 }
945 return error;
946 }
947
948 static void __elv_unregister_queue(elevator_t *e)
949 {
950 kobject_uevent(&e->kobj, KOBJ_REMOVE);
951 kobject_del(&e->kobj);
952 }
953
954 void elv_unregister_queue(struct request_queue *q)
955 {
956 if (q)
957 __elv_unregister_queue(q->elevator);
958 }
959
960 int elv_register(struct elevator_type *e)
961 {
962 char *def = "";
963
964 spin_lock(&elv_list_lock);
965 BUG_ON(elevator_find(e->elevator_name));
966 list_add_tail(&e->list, &elv_list);
967 spin_unlock(&elv_list_lock);
968
969 if (!strcmp(e->elevator_name, chosen_elevator) ||
970 (!*chosen_elevator &&
971 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
972 def = " (default)";
973
974 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def);
975 return 0;
976 }
977 EXPORT_SYMBOL_GPL(elv_register);
978
979 void elv_unregister(struct elevator_type *e)
980 {
981 struct task_struct *g, *p;
982
983 /*
984 * Iterate every thread in the process to remove the io contexts.
985 */
986 if (e->ops.trim) {
987 read_lock(&tasklist_lock);
988 do_each_thread(g, p) {
989 task_lock(p);
990 if (p->io_context)
991 e->ops.trim(p->io_context);
992 task_unlock(p);
993 } while_each_thread(g, p);
994 read_unlock(&tasklist_lock);
995 }
996
997 spin_lock(&elv_list_lock);
998 list_del_init(&e->list);
999 spin_unlock(&elv_list_lock);
1000 }
1001 EXPORT_SYMBOL_GPL(elv_unregister);
1002
1003 /*
1004 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1005 * we don't free the old io scheduler, before we have allocated what we
1006 * need for the new one. this way we have a chance of going back to the old
1007 * one, if the new one fails init for some reason.
1008 */
1009 static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
1010 {
1011 elevator_t *old_elevator, *e;
1012 void *data;
1013
1014 /*
1015 * Allocate new elevator
1016 */
1017 e = elevator_alloc(q, new_e);
1018 if (!e)
1019 return 0;
1020
1021 data = elevator_init_queue(q, e);
1022 if (!data) {
1023 kobject_put(&e->kobj);
1024 return 0;
1025 }
1026
1027 /*
1028 * Turn on BYPASS and drain all requests w/ elevator private data
1029 */
1030 spin_lock_irq(q->queue_lock);
1031
1032 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1033
1034 elv_drain_elevator(q);
1035
1036 while (q->rq.elvpriv) {
1037 blk_remove_plug(q);
1038 q->request_fn(q);
1039 spin_unlock_irq(q->queue_lock);
1040 msleep(10);
1041 spin_lock_irq(q->queue_lock);
1042 elv_drain_elevator(q);
1043 }
1044
1045 /*
1046 * Remember old elevator.
1047 */
1048 old_elevator = q->elevator;
1049
1050 /*
1051 * attach and start new elevator
1052 */
1053 elevator_attach(q, e, data);
1054
1055 spin_unlock_irq(q->queue_lock);
1056
1057 __elv_unregister_queue(old_elevator);
1058
1059 if (elv_register_queue(q))
1060 goto fail_register;
1061
1062 /*
1063 * finally exit old elevator and turn off BYPASS.
1064 */
1065 elevator_exit(old_elevator);
1066 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1067 return 1;
1068
1069 fail_register:
1070 /*
1071 * switch failed, exit the new io scheduler and reattach the old
1072 * one again (along with re-adding the sysfs dir)
1073 */
1074 elevator_exit(e);
1075 q->elevator = old_elevator;
1076 elv_register_queue(q);
1077 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1078 return 0;
1079 }
1080
1081 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
1082 {
1083 char elevator_name[ELV_NAME_MAX];
1084 size_t len;
1085 struct elevator_type *e;
1086
1087 elevator_name[sizeof(elevator_name) - 1] = '\0';
1088 strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1089 len = strlen(elevator_name);
1090
1091 if (len && elevator_name[len - 1] == '\n')
1092 elevator_name[len - 1] = '\0';
1093
1094 e = elevator_get(elevator_name);
1095 if (!e) {
1096 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1097 return -EINVAL;
1098 }
1099
1100 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1101 elevator_put(e);
1102 return count;
1103 }
1104
1105 if (!elevator_switch(q, e))
1106 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
1107 return count;
1108 }
1109
1110 ssize_t elv_iosched_show(request_queue_t *q, char *name)
1111 {
1112 elevator_t *e = q->elevator;
1113 struct elevator_type *elv = e->elevator_type;
1114 struct elevator_type *__e;
1115 int len = 0;
1116
1117 spin_lock(&elv_list_lock);
1118 list_for_each_entry(__e, &elv_list, list) {
1119 if (!strcmp(elv->elevator_name, __e->elevator_name))
1120 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1121 else
1122 len += sprintf(name+len, "%s ", __e->elevator_name);
1123 }
1124 spin_unlock(&elv_list_lock);
1125
1126 len += sprintf(len+name, "\n");
1127 return len;
1128 }
1129
1130 struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
1131 {
1132 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1133
1134 if (rbprev)
1135 return rb_entry_rq(rbprev);
1136
1137 return NULL;
1138 }
1139
1140 EXPORT_SYMBOL(elv_rb_former_request);
1141
1142 struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
1143 {
1144 struct rb_node *rbnext = rb_next(&rq->rb_node);
1145
1146 if (rbnext)
1147 return rb_entry_rq(rbnext);
1148
1149 return NULL;
1150 }
1151
1152 EXPORT_SYMBOL(elv_rb_latter_request);