]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/elevator.c
Fix race with shared tag queue maps
[mirror_ubuntu-artful-kernel.git] / block / elevator.c
1 /*
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37
38 #include <asm/uaccess.h>
39
40 static DEFINE_SPINLOCK(elv_list_lock);
41 static LIST_HEAD(elv_list);
42
43 /*
44 * Merge hash stuff.
45 */
46 static const int elv_hash_shift = 6;
47 #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
48 #define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49 #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
50 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
51 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
52
53 /*
54 * Query io scheduler to see if the current process issuing bio may be
55 * merged with rq.
56 */
57 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58 {
59 struct request_queue *q = rq->q;
60 elevator_t *e = q->elevator;
61
62 if (e->ops->elevator_allow_merge_fn)
63 return e->ops->elevator_allow_merge_fn(q, rq, bio);
64
65 return 1;
66 }
67
68 /*
69 * can we safely merge with this request?
70 */
71 inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
72 {
73 if (!rq_mergeable(rq))
74 return 0;
75
76 /*
77 * different data direction or already started, don't merge
78 */
79 if (bio_data_dir(bio) != rq_data_dir(rq))
80 return 0;
81
82 /*
83 * must be same device and not a special request
84 */
85 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
86 return 0;
87
88 if (!elv_iosched_allow_merge(rq, bio))
89 return 0;
90
91 return 1;
92 }
93 EXPORT_SYMBOL(elv_rq_merge_ok);
94
95 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
96 {
97 int ret = ELEVATOR_NO_MERGE;
98
99 /*
100 * we can merge and sequence is ok, check if it's possible
101 */
102 if (elv_rq_merge_ok(__rq, bio)) {
103 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
104 ret = ELEVATOR_BACK_MERGE;
105 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
106 ret = ELEVATOR_FRONT_MERGE;
107 }
108
109 return ret;
110 }
111
112 static struct elevator_type *elevator_find(const char *name)
113 {
114 struct elevator_type *e;
115
116 list_for_each_entry(e, &elv_list, list) {
117 if (!strcmp(e->elevator_name, name))
118 return e;
119 }
120
121 return NULL;
122 }
123
124 static void elevator_put(struct elevator_type *e)
125 {
126 module_put(e->elevator_owner);
127 }
128
129 static struct elevator_type *elevator_get(const char *name)
130 {
131 struct elevator_type *e;
132
133 spin_lock(&elv_list_lock);
134
135 e = elevator_find(name);
136 if (e && !try_module_get(e->elevator_owner))
137 e = NULL;
138
139 spin_unlock(&elv_list_lock);
140
141 return e;
142 }
143
144 static void *elevator_init_queue(struct request_queue *q,
145 struct elevator_queue *eq)
146 {
147 return eq->ops->elevator_init_fn(q);
148 }
149
150 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
151 void *data)
152 {
153 q->elevator = eq;
154 eq->elevator_data = data;
155 }
156
157 static char chosen_elevator[16];
158
159 static int __init elevator_setup(char *str)
160 {
161 /*
162 * Be backwards-compatible with previous kernels, so users
163 * won't get the wrong elevator.
164 */
165 if (!strcmp(str, "as"))
166 strcpy(chosen_elevator, "anticipatory");
167 else
168 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
169 return 1;
170 }
171
172 __setup("elevator=", elevator_setup);
173
174 static struct kobj_type elv_ktype;
175
176 static elevator_t *elevator_alloc(struct request_queue *q,
177 struct elevator_type *e)
178 {
179 elevator_t *eq;
180 int i;
181
182 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
183 if (unlikely(!eq))
184 goto err;
185
186 eq->ops = &e->ops;
187 eq->elevator_type = e;
188 kobject_init(&eq->kobj);
189 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
190 eq->kobj.ktype = &elv_ktype;
191 mutex_init(&eq->sysfs_lock);
192
193 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
194 GFP_KERNEL, q->node);
195 if (!eq->hash)
196 goto err;
197
198 for (i = 0; i < ELV_HASH_ENTRIES; i++)
199 INIT_HLIST_HEAD(&eq->hash[i]);
200
201 return eq;
202 err:
203 kfree(eq);
204 elevator_put(e);
205 return NULL;
206 }
207
208 static void elevator_release(struct kobject *kobj)
209 {
210 elevator_t *e = container_of(kobj, elevator_t, kobj);
211
212 elevator_put(e->elevator_type);
213 kfree(e->hash);
214 kfree(e);
215 }
216
217 int elevator_init(struct request_queue *q, char *name)
218 {
219 struct elevator_type *e = NULL;
220 struct elevator_queue *eq;
221 int ret = 0;
222 void *data;
223
224 INIT_LIST_HEAD(&q->queue_head);
225 q->last_merge = NULL;
226 q->end_sector = 0;
227 q->boundary_rq = NULL;
228
229 if (name && !(e = elevator_get(name)))
230 return -EINVAL;
231
232 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
233 printk("I/O scheduler %s not found\n", chosen_elevator);
234
235 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
236 printk("Default I/O scheduler not found, using no-op\n");
237 e = elevator_get("noop");
238 }
239
240 eq = elevator_alloc(q, e);
241 if (!eq)
242 return -ENOMEM;
243
244 data = elevator_init_queue(q, eq);
245 if (!data) {
246 kobject_put(&eq->kobj);
247 return -ENOMEM;
248 }
249
250 elevator_attach(q, eq, data);
251 return ret;
252 }
253
254 EXPORT_SYMBOL(elevator_init);
255
256 void elevator_exit(elevator_t *e)
257 {
258 mutex_lock(&e->sysfs_lock);
259 if (e->ops->elevator_exit_fn)
260 e->ops->elevator_exit_fn(e);
261 e->ops = NULL;
262 mutex_unlock(&e->sysfs_lock);
263
264 kobject_put(&e->kobj);
265 }
266
267 EXPORT_SYMBOL(elevator_exit);
268
269 static void elv_activate_rq(struct request_queue *q, struct request *rq)
270 {
271 elevator_t *e = q->elevator;
272
273 if (e->ops->elevator_activate_req_fn)
274 e->ops->elevator_activate_req_fn(q, rq);
275 }
276
277 static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
278 {
279 elevator_t *e = q->elevator;
280
281 if (e->ops->elevator_deactivate_req_fn)
282 e->ops->elevator_deactivate_req_fn(q, rq);
283 }
284
285 static inline void __elv_rqhash_del(struct request *rq)
286 {
287 hlist_del_init(&rq->hash);
288 }
289
290 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
291 {
292 if (ELV_ON_HASH(rq))
293 __elv_rqhash_del(rq);
294 }
295
296 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
297 {
298 elevator_t *e = q->elevator;
299
300 BUG_ON(ELV_ON_HASH(rq));
301 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
302 }
303
304 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
305 {
306 __elv_rqhash_del(rq);
307 elv_rqhash_add(q, rq);
308 }
309
310 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
311 {
312 elevator_t *e = q->elevator;
313 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
314 struct hlist_node *entry, *next;
315 struct request *rq;
316
317 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
318 BUG_ON(!ELV_ON_HASH(rq));
319
320 if (unlikely(!rq_mergeable(rq))) {
321 __elv_rqhash_del(rq);
322 continue;
323 }
324
325 if (rq_hash_key(rq) == offset)
326 return rq;
327 }
328
329 return NULL;
330 }
331
332 /*
333 * RB-tree support functions for inserting/lookup/removal of requests
334 * in a sorted RB tree.
335 */
336 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
337 {
338 struct rb_node **p = &root->rb_node;
339 struct rb_node *parent = NULL;
340 struct request *__rq;
341
342 while (*p) {
343 parent = *p;
344 __rq = rb_entry(parent, struct request, rb_node);
345
346 if (rq->sector < __rq->sector)
347 p = &(*p)->rb_left;
348 else if (rq->sector > __rq->sector)
349 p = &(*p)->rb_right;
350 else
351 return __rq;
352 }
353
354 rb_link_node(&rq->rb_node, parent, p);
355 rb_insert_color(&rq->rb_node, root);
356 return NULL;
357 }
358
359 EXPORT_SYMBOL(elv_rb_add);
360
361 void elv_rb_del(struct rb_root *root, struct request *rq)
362 {
363 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
364 rb_erase(&rq->rb_node, root);
365 RB_CLEAR_NODE(&rq->rb_node);
366 }
367
368 EXPORT_SYMBOL(elv_rb_del);
369
370 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
371 {
372 struct rb_node *n = root->rb_node;
373 struct request *rq;
374
375 while (n) {
376 rq = rb_entry(n, struct request, rb_node);
377
378 if (sector < rq->sector)
379 n = n->rb_left;
380 else if (sector > rq->sector)
381 n = n->rb_right;
382 else
383 return rq;
384 }
385
386 return NULL;
387 }
388
389 EXPORT_SYMBOL(elv_rb_find);
390
391 /*
392 * Insert rq into dispatch queue of q. Queue lock must be held on
393 * entry. rq is sort insted into the dispatch queue. To be used by
394 * specific elevators.
395 */
396 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
397 {
398 sector_t boundary;
399 struct list_head *entry;
400
401 if (q->last_merge == rq)
402 q->last_merge = NULL;
403
404 elv_rqhash_del(q, rq);
405
406 q->nr_sorted--;
407
408 boundary = q->end_sector;
409
410 list_for_each_prev(entry, &q->queue_head) {
411 struct request *pos = list_entry_rq(entry);
412
413 if (rq_data_dir(rq) != rq_data_dir(pos))
414 break;
415 if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
416 break;
417 if (rq->sector >= boundary) {
418 if (pos->sector < boundary)
419 continue;
420 } else {
421 if (pos->sector >= boundary)
422 break;
423 }
424 if (rq->sector >= pos->sector)
425 break;
426 }
427
428 list_add(&rq->queuelist, entry);
429 }
430
431 EXPORT_SYMBOL(elv_dispatch_sort);
432
433 /*
434 * Insert rq into dispatch queue of q. Queue lock must be held on
435 * entry. rq is added to the back of the dispatch queue. To be used by
436 * specific elevators.
437 */
438 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
439 {
440 if (q->last_merge == rq)
441 q->last_merge = NULL;
442
443 elv_rqhash_del(q, rq);
444
445 q->nr_sorted--;
446
447 q->end_sector = rq_end_sector(rq);
448 q->boundary_rq = rq;
449 list_add_tail(&rq->queuelist, &q->queue_head);
450 }
451
452 EXPORT_SYMBOL(elv_dispatch_add_tail);
453
454 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
455 {
456 elevator_t *e = q->elevator;
457 struct request *__rq;
458 int ret;
459
460 /*
461 * First try one-hit cache.
462 */
463 if (q->last_merge) {
464 ret = elv_try_merge(q->last_merge, bio);
465 if (ret != ELEVATOR_NO_MERGE) {
466 *req = q->last_merge;
467 return ret;
468 }
469 }
470
471 /*
472 * See if our hash lookup can find a potential backmerge.
473 */
474 __rq = elv_rqhash_find(q, bio->bi_sector);
475 if (__rq && elv_rq_merge_ok(__rq, bio)) {
476 *req = __rq;
477 return ELEVATOR_BACK_MERGE;
478 }
479
480 if (e->ops->elevator_merge_fn)
481 return e->ops->elevator_merge_fn(q, req, bio);
482
483 return ELEVATOR_NO_MERGE;
484 }
485
486 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
487 {
488 elevator_t *e = q->elevator;
489
490 if (e->ops->elevator_merged_fn)
491 e->ops->elevator_merged_fn(q, rq, type);
492
493 if (type == ELEVATOR_BACK_MERGE)
494 elv_rqhash_reposition(q, rq);
495
496 q->last_merge = rq;
497 }
498
499 void elv_merge_requests(struct request_queue *q, struct request *rq,
500 struct request *next)
501 {
502 elevator_t *e = q->elevator;
503
504 if (e->ops->elevator_merge_req_fn)
505 e->ops->elevator_merge_req_fn(q, rq, next);
506
507 elv_rqhash_reposition(q, rq);
508 elv_rqhash_del(q, next);
509
510 q->nr_sorted--;
511 q->last_merge = rq;
512 }
513
514 void elv_requeue_request(struct request_queue *q, struct request *rq)
515 {
516 /*
517 * it already went through dequeue, we need to decrement the
518 * in_flight count again
519 */
520 if (blk_account_rq(rq)) {
521 q->in_flight--;
522 if (blk_sorted_rq(rq))
523 elv_deactivate_rq(q, rq);
524 }
525
526 rq->cmd_flags &= ~REQ_STARTED;
527
528 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
529 }
530
531 static void elv_drain_elevator(struct request_queue *q)
532 {
533 static int printed;
534 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
535 ;
536 if (q->nr_sorted == 0)
537 return;
538 if (printed++ < 10) {
539 printk(KERN_ERR "%s: forced dispatching is broken "
540 "(nr_sorted=%u), please report this\n",
541 q->elevator->elevator_type->elevator_name, q->nr_sorted);
542 }
543 }
544
545 void elv_insert(struct request_queue *q, struct request *rq, int where)
546 {
547 struct list_head *pos;
548 unsigned ordseq;
549 int unplug_it = 1;
550
551 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
552
553 rq->q = q;
554
555 switch (where) {
556 case ELEVATOR_INSERT_FRONT:
557 rq->cmd_flags |= REQ_SOFTBARRIER;
558
559 list_add(&rq->queuelist, &q->queue_head);
560 break;
561
562 case ELEVATOR_INSERT_BACK:
563 rq->cmd_flags |= REQ_SOFTBARRIER;
564 elv_drain_elevator(q);
565 list_add_tail(&rq->queuelist, &q->queue_head);
566 /*
567 * We kick the queue here for the following reasons.
568 * - The elevator might have returned NULL previously
569 * to delay requests and returned them now. As the
570 * queue wasn't empty before this request, ll_rw_blk
571 * won't run the queue on return, resulting in hang.
572 * - Usually, back inserted requests won't be merged
573 * with anything. There's no point in delaying queue
574 * processing.
575 */
576 blk_remove_plug(q);
577 q->request_fn(q);
578 break;
579
580 case ELEVATOR_INSERT_SORT:
581 BUG_ON(!blk_fs_request(rq));
582 rq->cmd_flags |= REQ_SORTED;
583 q->nr_sorted++;
584 if (rq_mergeable(rq)) {
585 elv_rqhash_add(q, rq);
586 if (!q->last_merge)
587 q->last_merge = rq;
588 }
589
590 /*
591 * Some ioscheds (cfq) run q->request_fn directly, so
592 * rq cannot be accessed after calling
593 * elevator_add_req_fn.
594 */
595 q->elevator->ops->elevator_add_req_fn(q, rq);
596 break;
597
598 case ELEVATOR_INSERT_REQUEUE:
599 /*
600 * If ordered flush isn't in progress, we do front
601 * insertion; otherwise, requests should be requeued
602 * in ordseq order.
603 */
604 rq->cmd_flags |= REQ_SOFTBARRIER;
605
606 /*
607 * Most requeues happen because of a busy condition,
608 * don't force unplug of the queue for that case.
609 */
610 unplug_it = 0;
611
612 if (q->ordseq == 0) {
613 list_add(&rq->queuelist, &q->queue_head);
614 break;
615 }
616
617 ordseq = blk_ordered_req_seq(rq);
618
619 list_for_each(pos, &q->queue_head) {
620 struct request *pos_rq = list_entry_rq(pos);
621 if (ordseq <= blk_ordered_req_seq(pos_rq))
622 break;
623 }
624
625 list_add_tail(&rq->queuelist, pos);
626 break;
627
628 default:
629 printk(KERN_ERR "%s: bad insertion point %d\n",
630 __FUNCTION__, where);
631 BUG();
632 }
633
634 if (unplug_it && blk_queue_plugged(q)) {
635 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
636 - q->in_flight;
637
638 if (nrq >= q->unplug_thresh)
639 __generic_unplug_device(q);
640 }
641 }
642
643 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
644 int plug)
645 {
646 if (q->ordcolor)
647 rq->cmd_flags |= REQ_ORDERED_COLOR;
648
649 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
650 /*
651 * toggle ordered color
652 */
653 if (blk_barrier_rq(rq))
654 q->ordcolor ^= 1;
655
656 /*
657 * barriers implicitly indicate back insertion
658 */
659 if (where == ELEVATOR_INSERT_SORT)
660 where = ELEVATOR_INSERT_BACK;
661
662 /*
663 * this request is scheduling boundary, update
664 * end_sector
665 */
666 if (blk_fs_request(rq)) {
667 q->end_sector = rq_end_sector(rq);
668 q->boundary_rq = rq;
669 }
670 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
671 where = ELEVATOR_INSERT_BACK;
672
673 if (plug)
674 blk_plug_device(q);
675
676 elv_insert(q, rq, where);
677 }
678
679 EXPORT_SYMBOL(__elv_add_request);
680
681 void elv_add_request(struct request_queue *q, struct request *rq, int where,
682 int plug)
683 {
684 unsigned long flags;
685
686 spin_lock_irqsave(q->queue_lock, flags);
687 __elv_add_request(q, rq, where, plug);
688 spin_unlock_irqrestore(q->queue_lock, flags);
689 }
690
691 EXPORT_SYMBOL(elv_add_request);
692
693 static inline struct request *__elv_next_request(struct request_queue *q)
694 {
695 struct request *rq;
696
697 while (1) {
698 while (!list_empty(&q->queue_head)) {
699 rq = list_entry_rq(q->queue_head.next);
700 if (blk_do_ordered(q, &rq))
701 return rq;
702 }
703
704 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
705 return NULL;
706 }
707 }
708
709 struct request *elv_next_request(struct request_queue *q)
710 {
711 struct request *rq;
712 int ret;
713
714 while ((rq = __elv_next_request(q)) != NULL) {
715 if (!(rq->cmd_flags & REQ_STARTED)) {
716 /*
717 * This is the first time the device driver
718 * sees this request (possibly after
719 * requeueing). Notify IO scheduler.
720 */
721 if (blk_sorted_rq(rq))
722 elv_activate_rq(q, rq);
723
724 /*
725 * just mark as started even if we don't start
726 * it, a request that has been delayed should
727 * not be passed by new incoming requests
728 */
729 rq->cmd_flags |= REQ_STARTED;
730 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
731 }
732
733 if (!q->boundary_rq || q->boundary_rq == rq) {
734 q->end_sector = rq_end_sector(rq);
735 q->boundary_rq = NULL;
736 }
737
738 if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
739 break;
740
741 ret = q->prep_rq_fn(q, rq);
742 if (ret == BLKPREP_OK) {
743 break;
744 } else if (ret == BLKPREP_DEFER) {
745 /*
746 * the request may have been (partially) prepped.
747 * we need to keep this request in the front to
748 * avoid resource deadlock. REQ_STARTED will
749 * prevent other fs requests from passing this one.
750 */
751 rq = NULL;
752 break;
753 } else if (ret == BLKPREP_KILL) {
754 int nr_bytes = rq->hard_nr_sectors << 9;
755
756 if (!nr_bytes)
757 nr_bytes = rq->data_len;
758
759 blkdev_dequeue_request(rq);
760 rq->cmd_flags |= REQ_QUIET;
761 end_that_request_chunk(rq, 0, nr_bytes);
762 end_that_request_last(rq, 0);
763 } else {
764 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
765 ret);
766 break;
767 }
768 }
769
770 return rq;
771 }
772
773 EXPORT_SYMBOL(elv_next_request);
774
775 void elv_dequeue_request(struct request_queue *q, struct request *rq)
776 {
777 BUG_ON(list_empty(&rq->queuelist));
778 BUG_ON(ELV_ON_HASH(rq));
779
780 list_del_init(&rq->queuelist);
781
782 /*
783 * the time frame between a request being removed from the lists
784 * and to it is freed is accounted as io that is in progress at
785 * the driver side.
786 */
787 if (blk_account_rq(rq))
788 q->in_flight++;
789 }
790
791 EXPORT_SYMBOL(elv_dequeue_request);
792
793 int elv_queue_empty(struct request_queue *q)
794 {
795 elevator_t *e = q->elevator;
796
797 if (!list_empty(&q->queue_head))
798 return 0;
799
800 if (e->ops->elevator_queue_empty_fn)
801 return e->ops->elevator_queue_empty_fn(q);
802
803 return 1;
804 }
805
806 EXPORT_SYMBOL(elv_queue_empty);
807
808 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
809 {
810 elevator_t *e = q->elevator;
811
812 if (e->ops->elevator_latter_req_fn)
813 return e->ops->elevator_latter_req_fn(q, rq);
814 return NULL;
815 }
816
817 struct request *elv_former_request(struct request_queue *q, struct request *rq)
818 {
819 elevator_t *e = q->elevator;
820
821 if (e->ops->elevator_former_req_fn)
822 return e->ops->elevator_former_req_fn(q, rq);
823 return NULL;
824 }
825
826 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
827 {
828 elevator_t *e = q->elevator;
829
830 if (e->ops->elevator_set_req_fn)
831 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
832
833 rq->elevator_private = NULL;
834 return 0;
835 }
836
837 void elv_put_request(struct request_queue *q, struct request *rq)
838 {
839 elevator_t *e = q->elevator;
840
841 if (e->ops->elevator_put_req_fn)
842 e->ops->elevator_put_req_fn(rq);
843 }
844
845 int elv_may_queue(struct request_queue *q, int rw)
846 {
847 elevator_t *e = q->elevator;
848
849 if (e->ops->elevator_may_queue_fn)
850 return e->ops->elevator_may_queue_fn(q, rw);
851
852 return ELV_MQUEUE_MAY;
853 }
854
855 void elv_completed_request(struct request_queue *q, struct request *rq)
856 {
857 elevator_t *e = q->elevator;
858
859 /*
860 * request is released from the driver, io must be done
861 */
862 if (blk_account_rq(rq)) {
863 q->in_flight--;
864 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
865 e->ops->elevator_completed_req_fn(q, rq);
866 }
867
868 /*
869 * Check if the queue is waiting for fs requests to be
870 * drained for flush sequence.
871 */
872 if (unlikely(q->ordseq)) {
873 struct request *first_rq = list_entry_rq(q->queue_head.next);
874 if (q->in_flight == 0 &&
875 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
876 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
877 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
878 q->request_fn(q);
879 }
880 }
881 }
882
883 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
884
885 static ssize_t
886 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
887 {
888 elevator_t *e = container_of(kobj, elevator_t, kobj);
889 struct elv_fs_entry *entry = to_elv(attr);
890 ssize_t error;
891
892 if (!entry->show)
893 return -EIO;
894
895 mutex_lock(&e->sysfs_lock);
896 error = e->ops ? entry->show(e, page) : -ENOENT;
897 mutex_unlock(&e->sysfs_lock);
898 return error;
899 }
900
901 static ssize_t
902 elv_attr_store(struct kobject *kobj, struct attribute *attr,
903 const char *page, size_t length)
904 {
905 elevator_t *e = container_of(kobj, elevator_t, kobj);
906 struct elv_fs_entry *entry = to_elv(attr);
907 ssize_t error;
908
909 if (!entry->store)
910 return -EIO;
911
912 mutex_lock(&e->sysfs_lock);
913 error = e->ops ? entry->store(e, page, length) : -ENOENT;
914 mutex_unlock(&e->sysfs_lock);
915 return error;
916 }
917
918 static struct sysfs_ops elv_sysfs_ops = {
919 .show = elv_attr_show,
920 .store = elv_attr_store,
921 };
922
923 static struct kobj_type elv_ktype = {
924 .sysfs_ops = &elv_sysfs_ops,
925 .release = elevator_release,
926 };
927
928 int elv_register_queue(struct request_queue *q)
929 {
930 elevator_t *e = q->elevator;
931 int error;
932
933 e->kobj.parent = &q->kobj;
934
935 error = kobject_add(&e->kobj);
936 if (!error) {
937 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
938 if (attr) {
939 while (attr->attr.name) {
940 if (sysfs_create_file(&e->kobj, &attr->attr))
941 break;
942 attr++;
943 }
944 }
945 kobject_uevent(&e->kobj, KOBJ_ADD);
946 }
947 return error;
948 }
949
950 static void __elv_unregister_queue(elevator_t *e)
951 {
952 kobject_uevent(&e->kobj, KOBJ_REMOVE);
953 kobject_del(&e->kobj);
954 }
955
956 void elv_unregister_queue(struct request_queue *q)
957 {
958 if (q)
959 __elv_unregister_queue(q->elevator);
960 }
961
962 int elv_register(struct elevator_type *e)
963 {
964 char *def = "";
965
966 spin_lock(&elv_list_lock);
967 BUG_ON(elevator_find(e->elevator_name));
968 list_add_tail(&e->list, &elv_list);
969 spin_unlock(&elv_list_lock);
970
971 if (!strcmp(e->elevator_name, chosen_elevator) ||
972 (!*chosen_elevator &&
973 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
974 def = " (default)";
975
976 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def);
977 return 0;
978 }
979 EXPORT_SYMBOL_GPL(elv_register);
980
981 void elv_unregister(struct elevator_type *e)
982 {
983 struct task_struct *g, *p;
984
985 /*
986 * Iterate every thread in the process to remove the io contexts.
987 */
988 if (e->ops.trim) {
989 read_lock(&tasklist_lock);
990 do_each_thread(g, p) {
991 task_lock(p);
992 if (p->io_context)
993 e->ops.trim(p->io_context);
994 task_unlock(p);
995 } while_each_thread(g, p);
996 read_unlock(&tasklist_lock);
997 }
998
999 spin_lock(&elv_list_lock);
1000 list_del_init(&e->list);
1001 spin_unlock(&elv_list_lock);
1002 }
1003 EXPORT_SYMBOL_GPL(elv_unregister);
1004
1005 /*
1006 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1007 * we don't free the old io scheduler, before we have allocated what we
1008 * need for the new one. this way we have a chance of going back to the old
1009 * one, if the new one fails init for some reason.
1010 */
1011 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1012 {
1013 elevator_t *old_elevator, *e;
1014 void *data;
1015
1016 /*
1017 * Allocate new elevator
1018 */
1019 e = elevator_alloc(q, new_e);
1020 if (!e)
1021 return 0;
1022
1023 data = elevator_init_queue(q, e);
1024 if (!data) {
1025 kobject_put(&e->kobj);
1026 return 0;
1027 }
1028
1029 /*
1030 * Turn on BYPASS and drain all requests w/ elevator private data
1031 */
1032 spin_lock_irq(q->queue_lock);
1033
1034 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1035
1036 elv_drain_elevator(q);
1037
1038 while (q->rq.elvpriv) {
1039 blk_remove_plug(q);
1040 q->request_fn(q);
1041 spin_unlock_irq(q->queue_lock);
1042 msleep(10);
1043 spin_lock_irq(q->queue_lock);
1044 elv_drain_elevator(q);
1045 }
1046
1047 /*
1048 * Remember old elevator.
1049 */
1050 old_elevator = q->elevator;
1051
1052 /*
1053 * attach and start new elevator
1054 */
1055 elevator_attach(q, e, data);
1056
1057 spin_unlock_irq(q->queue_lock);
1058
1059 __elv_unregister_queue(old_elevator);
1060
1061 if (elv_register_queue(q))
1062 goto fail_register;
1063
1064 /*
1065 * finally exit old elevator and turn off BYPASS.
1066 */
1067 elevator_exit(old_elevator);
1068 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1069 return 1;
1070
1071 fail_register:
1072 /*
1073 * switch failed, exit the new io scheduler and reattach the old
1074 * one again (along with re-adding the sysfs dir)
1075 */
1076 elevator_exit(e);
1077 q->elevator = old_elevator;
1078 elv_register_queue(q);
1079 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1080 return 0;
1081 }
1082
1083 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1084 size_t count)
1085 {
1086 char elevator_name[ELV_NAME_MAX];
1087 size_t len;
1088 struct elevator_type *e;
1089
1090 elevator_name[sizeof(elevator_name) - 1] = '\0';
1091 strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1092 len = strlen(elevator_name);
1093
1094 if (len && elevator_name[len - 1] == '\n')
1095 elevator_name[len - 1] = '\0';
1096
1097 e = elevator_get(elevator_name);
1098 if (!e) {
1099 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1100 return -EINVAL;
1101 }
1102
1103 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1104 elevator_put(e);
1105 return count;
1106 }
1107
1108 if (!elevator_switch(q, e))
1109 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
1110 return count;
1111 }
1112
1113 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1114 {
1115 elevator_t *e = q->elevator;
1116 struct elevator_type *elv = e->elevator_type;
1117 struct elevator_type *__e;
1118 int len = 0;
1119
1120 spin_lock(&elv_list_lock);
1121 list_for_each_entry(__e, &elv_list, list) {
1122 if (!strcmp(elv->elevator_name, __e->elevator_name))
1123 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1124 else
1125 len += sprintf(name+len, "%s ", __e->elevator_name);
1126 }
1127 spin_unlock(&elv_list_lock);
1128
1129 len += sprintf(len+name, "\n");
1130 return len;
1131 }
1132
1133 struct request *elv_rb_former_request(struct request_queue *q,
1134 struct request *rq)
1135 {
1136 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1137
1138 if (rbprev)
1139 return rb_entry_rq(rbprev);
1140
1141 return NULL;
1142 }
1143
1144 EXPORT_SYMBOL(elv_rb_former_request);
1145
1146 struct request *elv_rb_latter_request(struct request_queue *q,
1147 struct request *rq)
1148 {
1149 struct rb_node *rbnext = rb_next(&rq->rb_node);
1150
1151 if (rbnext)
1152 return rb_entry_rq(rbnext);
1153
1154 return NULL;
1155 }
1156
1157 EXPORT_SYMBOL(elv_rb_latter_request);