]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/elevator.c
Merge git://oss.sgi.com:8090/xfs/xfs-2.6
[mirror_ubuntu-artful-kernel.git] / block / elevator.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
0fe23479 6 * 30042000 Jens Axboe <axboe@kernel.dk> :
1da177e4
LT
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
1da177e4
LT
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
cb98fc8b 34#include <linux/delay.h>
2056a782 35#include <linux/blktrace_api.h>
9817064b 36#include <linux/hash.h>
1da177e4
LT
37
38#include <asm/uaccess.h>
39
40static DEFINE_SPINLOCK(elv_list_lock);
41static LIST_HEAD(elv_list);
42
9817064b
JA
43/*
44 * Merge hash stuff.
45 */
46static const int elv_hash_shift = 6;
47#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
48#define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
50#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
51#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
52
da775265
JA
53/*
54 * Query io scheduler to see if the current process issuing bio may be
55 * merged with rq.
56 */
57static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58{
59 request_queue_t *q = rq->q;
60 elevator_t *e = q->elevator;
61
62 if (e->ops->elevator_allow_merge_fn)
63 return e->ops->elevator_allow_merge_fn(q, rq, bio);
64
65 return 1;
66}
67
1da177e4
LT
68/*
69 * can we safely merge with this request?
70 */
71inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
72{
73 if (!rq_mergeable(rq))
74 return 0;
75
76 /*
77 * different data direction or already started, don't merge
78 */
79 if (bio_data_dir(bio) != rq_data_dir(rq))
80 return 0;
81
82 /*
da775265 83 * must be same device and not a special request
1da177e4 84 */
bb4067e3 85 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
da775265
JA
86 return 0;
87
88 if (!elv_iosched_allow_merge(rq, bio))
89 return 0;
1da177e4 90
da775265 91 return 1;
1da177e4
LT
92}
93EXPORT_SYMBOL(elv_rq_merge_ok);
94
769db45b 95static inline int elv_try_merge(struct request *__rq, struct bio *bio)
1da177e4
LT
96{
97 int ret = ELEVATOR_NO_MERGE;
98
99 /*
100 * we can merge and sequence is ok, check if it's possible
101 */
102 if (elv_rq_merge_ok(__rq, bio)) {
103 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
104 ret = ELEVATOR_BACK_MERGE;
105 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
106 ret = ELEVATOR_FRONT_MERGE;
107 }
108
109 return ret;
110}
1da177e4 111
1da177e4
LT
112static struct elevator_type *elevator_find(const char *name)
113{
a22b169d 114 struct elevator_type *e;
1da177e4
LT
115 struct list_head *entry;
116
1da177e4 117 list_for_each(entry, &elv_list) {
1da177e4 118
a22b169d 119 e = list_entry(entry, struct elevator_type, list);
1da177e4 120
a22b169d
VT
121 if (!strcmp(e->elevator_name, name))
122 return e;
1da177e4 123 }
1da177e4 124
a22b169d 125 return NULL;
1da177e4
LT
126}
127
128static void elevator_put(struct elevator_type *e)
129{
130 module_put(e->elevator_owner);
131}
132
133static struct elevator_type *elevator_get(const char *name)
134{
2824bc93 135 struct elevator_type *e;
1da177e4 136
2824bc93
TH
137 spin_lock_irq(&elv_list_lock);
138
139 e = elevator_find(name);
140 if (e && !try_module_get(e->elevator_owner))
141 e = NULL;
142
143 spin_unlock_irq(&elv_list_lock);
1da177e4
LT
144
145 return e;
146}
147
bc1c1169 148static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
1da177e4 149{
bb37b94c 150 return eq->ops->elevator_init_fn(q);
bc1c1169 151}
1da177e4 152
bc1c1169
JA
153static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
154 void *data)
155{
1da177e4 156 q->elevator = eq;
bc1c1169 157 eq->elevator_data = data;
1da177e4
LT
158}
159
160static char chosen_elevator[16];
161
5f003976 162static int __init elevator_setup(char *str)
1da177e4 163{
752a3b79
CE
164 /*
165 * Be backwards-compatible with previous kernels, so users
166 * won't get the wrong elevator.
167 */
5f003976 168 if (!strcmp(str, "as"))
752a3b79 169 strcpy(chosen_elevator, "anticipatory");
cff3ba22 170 else
5f003976 171 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
9b41046c 172 return 1;
1da177e4
LT
173}
174
175__setup("elevator=", elevator_setup);
176
3d1ab40f
AV
177static struct kobj_type elv_ktype;
178
b5deef90 179static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
3d1ab40f 180{
9817064b
JA
181 elevator_t *eq;
182 int i;
183
b5deef90 184 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
9817064b
JA
185 if (unlikely(!eq))
186 goto err;
187
188 memset(eq, 0, sizeof(*eq));
189 eq->ops = &e->ops;
190 eq->elevator_type = e;
191 kobject_init(&eq->kobj);
192 snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
193 eq->kobj.ktype = &elv_ktype;
194 mutex_init(&eq->sysfs_lock);
195
b5deef90
JA
196 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
197 GFP_KERNEL, q->node);
9817064b
JA
198 if (!eq->hash)
199 goto err;
200
201 for (i = 0; i < ELV_HASH_ENTRIES; i++)
202 INIT_HLIST_HEAD(&eq->hash[i]);
203
3d1ab40f 204 return eq;
9817064b
JA
205err:
206 kfree(eq);
207 elevator_put(e);
208 return NULL;
3d1ab40f
AV
209}
210
211static void elevator_release(struct kobject *kobj)
212{
213 elevator_t *e = container_of(kobj, elevator_t, kobj);
9817064b 214
3d1ab40f 215 elevator_put(e->elevator_type);
9817064b 216 kfree(e->hash);
3d1ab40f
AV
217 kfree(e);
218}
219
1da177e4
LT
220int elevator_init(request_queue_t *q, char *name)
221{
222 struct elevator_type *e = NULL;
223 struct elevator_queue *eq;
224 int ret = 0;
bc1c1169 225 void *data;
1da177e4 226
cb98fc8b
TH
227 INIT_LIST_HEAD(&q->queue_head);
228 q->last_merge = NULL;
229 q->end_sector = 0;
230 q->boundary_rq = NULL;
cb98fc8b 231
5f003976 232 if (name && !(e = elevator_get(name)))
1da177e4
LT
233 return -EINVAL;
234
248d5ca5
ND
235 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
236 printk("I/O scheduler %s not found\n", chosen_elevator);
237
238 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
239 printk("Default I/O scheduler not found, using no-op\n");
240 e = elevator_get("noop");
5f003976
ND
241 }
242
b5deef90 243 eq = elevator_alloc(q, e);
3d1ab40f 244 if (!eq)
1da177e4 245 return -ENOMEM;
1da177e4 246
bc1c1169
JA
247 data = elevator_init_queue(q, eq);
248 if (!data) {
3d1ab40f 249 kobject_put(&eq->kobj);
bc1c1169
JA
250 return -ENOMEM;
251 }
1da177e4 252
bc1c1169 253 elevator_attach(q, eq, data);
1da177e4
LT
254 return ret;
255}
256
2e662b65
JA
257EXPORT_SYMBOL(elevator_init);
258
1da177e4
LT
259void elevator_exit(elevator_t *e)
260{
3d1ab40f 261 mutex_lock(&e->sysfs_lock);
1da177e4
LT
262 if (e->ops->elevator_exit_fn)
263 e->ops->elevator_exit_fn(e);
3d1ab40f
AV
264 e->ops = NULL;
265 mutex_unlock(&e->sysfs_lock);
1da177e4 266
3d1ab40f 267 kobject_put(&e->kobj);
1da177e4
LT
268}
269
2e662b65
JA
270EXPORT_SYMBOL(elevator_exit);
271
9817064b
JA
272static inline void __elv_rqhash_del(struct request *rq)
273{
274 hlist_del_init(&rq->hash);
275}
276
277static void elv_rqhash_del(request_queue_t *q, struct request *rq)
278{
279 if (ELV_ON_HASH(rq))
280 __elv_rqhash_del(rq);
281}
282
283static void elv_rqhash_add(request_queue_t *q, struct request *rq)
284{
285 elevator_t *e = q->elevator;
286
287 BUG_ON(ELV_ON_HASH(rq));
288 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
289}
290
291static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
292{
293 __elv_rqhash_del(rq);
294 elv_rqhash_add(q, rq);
295}
296
297static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
298{
299 elevator_t *e = q->elevator;
300 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
301 struct hlist_node *entry, *next;
302 struct request *rq;
303
304 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
305 BUG_ON(!ELV_ON_HASH(rq));
306
307 if (unlikely(!rq_mergeable(rq))) {
308 __elv_rqhash_del(rq);
309 continue;
310 }
311
312 if (rq_hash_key(rq) == offset)
313 return rq;
314 }
315
316 return NULL;
317}
318
2e662b65
JA
319/*
320 * RB-tree support functions for inserting/lookup/removal of requests
321 * in a sorted RB tree.
322 */
323struct request *elv_rb_add(struct rb_root *root, struct request *rq)
324{
325 struct rb_node **p = &root->rb_node;
326 struct rb_node *parent = NULL;
327 struct request *__rq;
328
329 while (*p) {
330 parent = *p;
331 __rq = rb_entry(parent, struct request, rb_node);
332
333 if (rq->sector < __rq->sector)
334 p = &(*p)->rb_left;
335 else if (rq->sector > __rq->sector)
336 p = &(*p)->rb_right;
337 else
338 return __rq;
339 }
340
341 rb_link_node(&rq->rb_node, parent, p);
342 rb_insert_color(&rq->rb_node, root);
343 return NULL;
344}
345
346EXPORT_SYMBOL(elv_rb_add);
347
348void elv_rb_del(struct rb_root *root, struct request *rq)
349{
350 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
351 rb_erase(&rq->rb_node, root);
352 RB_CLEAR_NODE(&rq->rb_node);
353}
354
355EXPORT_SYMBOL(elv_rb_del);
356
357struct request *elv_rb_find(struct rb_root *root, sector_t sector)
358{
359 struct rb_node *n = root->rb_node;
360 struct request *rq;
361
362 while (n) {
363 rq = rb_entry(n, struct request, rb_node);
364
365 if (sector < rq->sector)
366 n = n->rb_left;
367 else if (sector > rq->sector)
368 n = n->rb_right;
369 else
370 return rq;
371 }
372
373 return NULL;
374}
375
376EXPORT_SYMBOL(elv_rb_find);
377
8922e16c
TH
378/*
379 * Insert rq into dispatch queue of q. Queue lock must be held on
2e662b65
JA
380 * entry. rq is sort insted into the dispatch queue. To be used by
381 * specific elevators.
8922e16c 382 */
1b47f531 383void elv_dispatch_sort(request_queue_t *q, struct request *rq)
8922e16c
TH
384{
385 sector_t boundary;
8922e16c
TH
386 struct list_head *entry;
387
06b86245
TH
388 if (q->last_merge == rq)
389 q->last_merge = NULL;
9817064b
JA
390
391 elv_rqhash_del(q, rq);
392
15853af9 393 q->nr_sorted--;
06b86245 394
1b47f531 395 boundary = q->end_sector;
cb19833d 396
8922e16c
TH
397 list_for_each_prev(entry, &q->queue_head) {
398 struct request *pos = list_entry_rq(entry);
399
4aff5e23 400 if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
8922e16c
TH
401 break;
402 if (rq->sector >= boundary) {
403 if (pos->sector < boundary)
404 continue;
405 } else {
406 if (pos->sector >= boundary)
407 break;
408 }
409 if (rq->sector >= pos->sector)
410 break;
411 }
412
413 list_add(&rq->queuelist, entry);
414}
415
2e662b65
JA
416EXPORT_SYMBOL(elv_dispatch_sort);
417
9817064b 418/*
2e662b65
JA
419 * Insert rq into dispatch queue of q. Queue lock must be held on
420 * entry. rq is added to the back of the dispatch queue. To be used by
421 * specific elevators.
9817064b
JA
422 */
423void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
424{
425 if (q->last_merge == rq)
426 q->last_merge = NULL;
427
428 elv_rqhash_del(q, rq);
429
430 q->nr_sorted--;
431
432 q->end_sector = rq_end_sector(rq);
433 q->boundary_rq = rq;
434 list_add_tail(&rq->queuelist, &q->queue_head);
435}
436
2e662b65
JA
437EXPORT_SYMBOL(elv_dispatch_add_tail);
438
1da177e4
LT
439int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
440{
441 elevator_t *e = q->elevator;
9817064b 442 struct request *__rq;
06b86245
TH
443 int ret;
444
9817064b
JA
445 /*
446 * First try one-hit cache.
447 */
06b86245
TH
448 if (q->last_merge) {
449 ret = elv_try_merge(q->last_merge, bio);
450 if (ret != ELEVATOR_NO_MERGE) {
451 *req = q->last_merge;
452 return ret;
453 }
454 }
1da177e4 455
9817064b
JA
456 /*
457 * See if our hash lookup can find a potential backmerge.
458 */
459 __rq = elv_rqhash_find(q, bio->bi_sector);
460 if (__rq && elv_rq_merge_ok(__rq, bio)) {
461 *req = __rq;
462 return ELEVATOR_BACK_MERGE;
463 }
464
1da177e4
LT
465 if (e->ops->elevator_merge_fn)
466 return e->ops->elevator_merge_fn(q, req, bio);
467
468 return ELEVATOR_NO_MERGE;
469}
470
2e662b65 471void elv_merged_request(request_queue_t *q, struct request *rq, int type)
1da177e4
LT
472{
473 elevator_t *e = q->elevator;
474
475 if (e->ops->elevator_merged_fn)
2e662b65 476 e->ops->elevator_merged_fn(q, rq, type);
06b86245 477
2e662b65
JA
478 if (type == ELEVATOR_BACK_MERGE)
479 elv_rqhash_reposition(q, rq);
9817064b 480
06b86245 481 q->last_merge = rq;
1da177e4
LT
482}
483
484void elv_merge_requests(request_queue_t *q, struct request *rq,
485 struct request *next)
486{
487 elevator_t *e = q->elevator;
488
1da177e4
LT
489 if (e->ops->elevator_merge_req_fn)
490 e->ops->elevator_merge_req_fn(q, rq, next);
06b86245 491
9817064b
JA
492 elv_rqhash_reposition(q, rq);
493 elv_rqhash_del(q, next);
494
495 q->nr_sorted--;
06b86245 496 q->last_merge = rq;
1da177e4
LT
497}
498
8922e16c 499void elv_requeue_request(request_queue_t *q, struct request *rq)
1da177e4
LT
500{
501 elevator_t *e = q->elevator;
502
503 /*
504 * it already went through dequeue, we need to decrement the
505 * in_flight count again
506 */
8922e16c 507 if (blk_account_rq(rq)) {
1da177e4 508 q->in_flight--;
8922e16c
TH
509 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
510 e->ops->elevator_deactivate_req_fn(q, rq);
511 }
1da177e4 512
4aff5e23 513 rq->cmd_flags &= ~REQ_STARTED;
1da177e4 514
30e9656c 515 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
1da177e4
LT
516}
517
15853af9
TH
518static void elv_drain_elevator(request_queue_t *q)
519{
520 static int printed;
521 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
522 ;
523 if (q->nr_sorted == 0)
524 return;
525 if (printed++ < 10) {
526 printk(KERN_ERR "%s: forced dispatching is broken "
527 "(nr_sorted=%u), please report this\n",
528 q->elevator->elevator_type->elevator_name, q->nr_sorted);
529 }
530}
531
30e9656c 532void elv_insert(request_queue_t *q, struct request *rq, int where)
1da177e4 533{
797e7dbb
TH
534 struct list_head *pos;
535 unsigned ordseq;
dac07ec1 536 int unplug_it = 1;
797e7dbb 537
2056a782
JA
538 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
539
1da177e4
LT
540 rq->q = q;
541
8922e16c
TH
542 switch (where) {
543 case ELEVATOR_INSERT_FRONT:
4aff5e23 544 rq->cmd_flags |= REQ_SOFTBARRIER;
8922e16c
TH
545
546 list_add(&rq->queuelist, &q->queue_head);
547 break;
548
549 case ELEVATOR_INSERT_BACK:
4aff5e23 550 rq->cmd_flags |= REQ_SOFTBARRIER;
15853af9 551 elv_drain_elevator(q);
8922e16c
TH
552 list_add_tail(&rq->queuelist, &q->queue_head);
553 /*
554 * We kick the queue here for the following reasons.
555 * - The elevator might have returned NULL previously
556 * to delay requests and returned them now. As the
557 * queue wasn't empty before this request, ll_rw_blk
558 * won't run the queue on return, resulting in hang.
559 * - Usually, back inserted requests won't be merged
560 * with anything. There's no point in delaying queue
561 * processing.
562 */
563 blk_remove_plug(q);
564 q->request_fn(q);
565 break;
566
567 case ELEVATOR_INSERT_SORT:
568 BUG_ON(!blk_fs_request(rq));
4aff5e23 569 rq->cmd_flags |= REQ_SORTED;
15853af9 570 q->nr_sorted++;
9817064b
JA
571 if (rq_mergeable(rq)) {
572 elv_rqhash_add(q, rq);
573 if (!q->last_merge)
574 q->last_merge = rq;
575 }
576
ca23509f
TH
577 /*
578 * Some ioscheds (cfq) run q->request_fn directly, so
579 * rq cannot be accessed after calling
580 * elevator_add_req_fn.
581 */
582 q->elevator->ops->elevator_add_req_fn(q, rq);
8922e16c
TH
583 break;
584
797e7dbb
TH
585 case ELEVATOR_INSERT_REQUEUE:
586 /*
587 * If ordered flush isn't in progress, we do front
588 * insertion; otherwise, requests should be requeued
589 * in ordseq order.
590 */
4aff5e23 591 rq->cmd_flags |= REQ_SOFTBARRIER;
797e7dbb 592
95543179
LV
593 /*
594 * Most requeues happen because of a busy condition,
595 * don't force unplug of the queue for that case.
596 */
597 unplug_it = 0;
598
797e7dbb
TH
599 if (q->ordseq == 0) {
600 list_add(&rq->queuelist, &q->queue_head);
601 break;
602 }
603
604 ordseq = blk_ordered_req_seq(rq);
605
606 list_for_each(pos, &q->queue_head) {
607 struct request *pos_rq = list_entry_rq(pos);
608 if (ordseq <= blk_ordered_req_seq(pos_rq))
609 break;
610 }
611
612 list_add_tail(&rq->queuelist, pos);
613 break;
614
8922e16c
TH
615 default:
616 printk(KERN_ERR "%s: bad insertion point %d\n",
617 __FUNCTION__, where);
618 BUG();
619 }
620
dac07ec1 621 if (unplug_it && blk_queue_plugged(q)) {
8922e16c
TH
622 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
623 - q->in_flight;
624
625 if (nrq >= q->unplug_thresh)
626 __generic_unplug_device(q);
627 }
1da177e4
LT
628}
629
30e9656c
TH
630void __elv_add_request(request_queue_t *q, struct request *rq, int where,
631 int plug)
632{
633 if (q->ordcolor)
4aff5e23 634 rq->cmd_flags |= REQ_ORDERED_COLOR;
30e9656c 635
4aff5e23 636 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
30e9656c
TH
637 /*
638 * toggle ordered color
639 */
640 if (blk_barrier_rq(rq))
641 q->ordcolor ^= 1;
642
643 /*
644 * barriers implicitly indicate back insertion
645 */
646 if (where == ELEVATOR_INSERT_SORT)
647 where = ELEVATOR_INSERT_BACK;
648
649 /*
650 * this request is scheduling boundary, update
651 * end_sector
652 */
653 if (blk_fs_request(rq)) {
654 q->end_sector = rq_end_sector(rq);
655 q->boundary_rq = rq;
656 }
4aff5e23 657 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
30e9656c
TH
658 where = ELEVATOR_INSERT_BACK;
659
660 if (plug)
661 blk_plug_device(q);
662
663 elv_insert(q, rq, where);
664}
665
2e662b65
JA
666EXPORT_SYMBOL(__elv_add_request);
667
1da177e4
LT
668void elv_add_request(request_queue_t *q, struct request *rq, int where,
669 int plug)
670{
671 unsigned long flags;
672
673 spin_lock_irqsave(q->queue_lock, flags);
674 __elv_add_request(q, rq, where, plug);
675 spin_unlock_irqrestore(q->queue_lock, flags);
676}
677
2e662b65
JA
678EXPORT_SYMBOL(elv_add_request);
679
1da177e4
LT
680static inline struct request *__elv_next_request(request_queue_t *q)
681{
8922e16c
TH
682 struct request *rq;
683
797e7dbb
TH
684 while (1) {
685 while (!list_empty(&q->queue_head)) {
686 rq = list_entry_rq(q->queue_head.next);
687 if (blk_do_ordered(q, &rq))
688 return rq;
689 }
1da177e4 690
797e7dbb
TH
691 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
692 return NULL;
1da177e4 693 }
1da177e4
LT
694}
695
696struct request *elv_next_request(request_queue_t *q)
697{
698 struct request *rq;
699 int ret;
700
701 while ((rq = __elv_next_request(q)) != NULL) {
4aff5e23 702 if (!(rq->cmd_flags & REQ_STARTED)) {
8922e16c
TH
703 elevator_t *e = q->elevator;
704
705 /*
706 * This is the first time the device driver
707 * sees this request (possibly after
708 * requeueing). Notify IO scheduler.
709 */
710 if (blk_sorted_rq(rq) &&
711 e->ops->elevator_activate_req_fn)
712 e->ops->elevator_activate_req_fn(q, rq);
1da177e4 713
8922e16c
TH
714 /*
715 * just mark as started even if we don't start
716 * it, a request that has been delayed should
717 * not be passed by new incoming requests
718 */
4aff5e23 719 rq->cmd_flags |= REQ_STARTED;
2056a782 720 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
8922e16c 721 }
1da177e4 722
8922e16c 723 if (!q->boundary_rq || q->boundary_rq == rq) {
1b47f531 724 q->end_sector = rq_end_sector(rq);
8922e16c
TH
725 q->boundary_rq = NULL;
726 }
1da177e4 727
4aff5e23 728 if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
1da177e4
LT
729 break;
730
731 ret = q->prep_rq_fn(q, rq);
732 if (ret == BLKPREP_OK) {
733 break;
734 } else if (ret == BLKPREP_DEFER) {
2e759cd4
TH
735 /*
736 * the request may have been (partially) prepped.
737 * we need to keep this request in the front to
8922e16c
TH
738 * avoid resource deadlock. REQ_STARTED will
739 * prevent other fs requests from passing this one.
2e759cd4 740 */
1da177e4
LT
741 rq = NULL;
742 break;
743 } else if (ret == BLKPREP_KILL) {
744 int nr_bytes = rq->hard_nr_sectors << 9;
745
746 if (!nr_bytes)
747 nr_bytes = rq->data_len;
748
749 blkdev_dequeue_request(rq);
4aff5e23 750 rq->cmd_flags |= REQ_QUIET;
1da177e4 751 end_that_request_chunk(rq, 0, nr_bytes);
8ffdc655 752 end_that_request_last(rq, 0);
1da177e4
LT
753 } else {
754 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
755 ret);
756 break;
757 }
758 }
759
760 return rq;
761}
762
2e662b65
JA
763EXPORT_SYMBOL(elv_next_request);
764
8922e16c 765void elv_dequeue_request(request_queue_t *q, struct request *rq)
1da177e4 766{
8922e16c 767 BUG_ON(list_empty(&rq->queuelist));
9817064b 768 BUG_ON(ELV_ON_HASH(rq));
8922e16c
TH
769
770 list_del_init(&rq->queuelist);
1da177e4
LT
771
772 /*
773 * the time frame between a request being removed from the lists
774 * and to it is freed is accounted as io that is in progress at
8922e16c 775 * the driver side.
1da177e4
LT
776 */
777 if (blk_account_rq(rq))
778 q->in_flight++;
1da177e4
LT
779}
780
2e662b65
JA
781EXPORT_SYMBOL(elv_dequeue_request);
782
1da177e4
LT
783int elv_queue_empty(request_queue_t *q)
784{
785 elevator_t *e = q->elevator;
786
8922e16c
TH
787 if (!list_empty(&q->queue_head))
788 return 0;
789
1da177e4
LT
790 if (e->ops->elevator_queue_empty_fn)
791 return e->ops->elevator_queue_empty_fn(q);
792
8922e16c 793 return 1;
1da177e4
LT
794}
795
2e662b65
JA
796EXPORT_SYMBOL(elv_queue_empty);
797
1da177e4
LT
798struct request *elv_latter_request(request_queue_t *q, struct request *rq)
799{
1da177e4
LT
800 elevator_t *e = q->elevator;
801
802 if (e->ops->elevator_latter_req_fn)
803 return e->ops->elevator_latter_req_fn(q, rq);
1da177e4
LT
804 return NULL;
805}
806
807struct request *elv_former_request(request_queue_t *q, struct request *rq)
808{
1da177e4
LT
809 elevator_t *e = q->elevator;
810
811 if (e->ops->elevator_former_req_fn)
812 return e->ops->elevator_former_req_fn(q, rq);
1da177e4
LT
813 return NULL;
814}
815
cb78b285 816int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
1da177e4
LT
817{
818 elevator_t *e = q->elevator;
819
820 if (e->ops->elevator_set_req_fn)
cb78b285 821 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
1da177e4
LT
822
823 rq->elevator_private = NULL;
824 return 0;
825}
826
827void elv_put_request(request_queue_t *q, struct request *rq)
828{
829 elevator_t *e = q->elevator;
830
831 if (e->ops->elevator_put_req_fn)
bb37b94c 832 e->ops->elevator_put_req_fn(rq);
1da177e4
LT
833}
834
cb78b285 835int elv_may_queue(request_queue_t *q, int rw)
1da177e4
LT
836{
837 elevator_t *e = q->elevator;
838
839 if (e->ops->elevator_may_queue_fn)
cb78b285 840 return e->ops->elevator_may_queue_fn(q, rw);
1da177e4
LT
841
842 return ELV_MQUEUE_MAY;
843}
844
845void elv_completed_request(request_queue_t *q, struct request *rq)
846{
847 elevator_t *e = q->elevator;
848
849 /*
850 * request is released from the driver, io must be done
851 */
8922e16c 852 if (blk_account_rq(rq)) {
1da177e4 853 q->in_flight--;
1bc691d3
TH
854 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
855 e->ops->elevator_completed_req_fn(q, rq);
856 }
797e7dbb 857
1bc691d3
TH
858 /*
859 * Check if the queue is waiting for fs requests to be
860 * drained for flush sequence.
861 */
862 if (unlikely(q->ordseq)) {
863 struct request *first_rq = list_entry_rq(q->queue_head.next);
864 if (q->in_flight == 0 &&
797e7dbb
TH
865 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
866 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
867 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
868 q->request_fn(q);
869 }
8922e16c 870 }
1da177e4
LT
871}
872
3d1ab40f
AV
873#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
874
875static ssize_t
876elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1da177e4 877{
3d1ab40f
AV
878 elevator_t *e = container_of(kobj, elevator_t, kobj);
879 struct elv_fs_entry *entry = to_elv(attr);
880 ssize_t error;
881
882 if (!entry->show)
883 return -EIO;
884
885 mutex_lock(&e->sysfs_lock);
886 error = e->ops ? entry->show(e, page) : -ENOENT;
887 mutex_unlock(&e->sysfs_lock);
888 return error;
889}
1da177e4 890
3d1ab40f
AV
891static ssize_t
892elv_attr_store(struct kobject *kobj, struct attribute *attr,
893 const char *page, size_t length)
894{
895 elevator_t *e = container_of(kobj, elevator_t, kobj);
896 struct elv_fs_entry *entry = to_elv(attr);
897 ssize_t error;
1da177e4 898
3d1ab40f
AV
899 if (!entry->store)
900 return -EIO;
1da177e4 901
3d1ab40f
AV
902 mutex_lock(&e->sysfs_lock);
903 error = e->ops ? entry->store(e, page, length) : -ENOENT;
904 mutex_unlock(&e->sysfs_lock);
905 return error;
906}
907
908static struct sysfs_ops elv_sysfs_ops = {
909 .show = elv_attr_show,
910 .store = elv_attr_store,
911};
912
913static struct kobj_type elv_ktype = {
914 .sysfs_ops = &elv_sysfs_ops,
915 .release = elevator_release,
916};
917
918int elv_register_queue(struct request_queue *q)
919{
920 elevator_t *e = q->elevator;
921 int error;
922
923 e->kobj.parent = &q->kobj;
924
925 error = kobject_add(&e->kobj);
926 if (!error) {
e572ec7e 927 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
3d1ab40f 928 if (attr) {
e572ec7e
AV
929 while (attr->attr.name) {
930 if (sysfs_create_file(&e->kobj, &attr->attr))
3d1ab40f 931 break;
e572ec7e 932 attr++;
3d1ab40f
AV
933 }
934 }
935 kobject_uevent(&e->kobj, KOBJ_ADD);
936 }
937 return error;
1da177e4
LT
938}
939
bc1c1169
JA
940static void __elv_unregister_queue(elevator_t *e)
941{
942 kobject_uevent(&e->kobj, KOBJ_REMOVE);
943 kobject_del(&e->kobj);
944}
945
1da177e4
LT
946void elv_unregister_queue(struct request_queue *q)
947{
bc1c1169
JA
948 if (q)
949 __elv_unregister_queue(q->elevator);
1da177e4
LT
950}
951
952int elv_register(struct elevator_type *e)
953{
2824bc93 954 spin_lock_irq(&elv_list_lock);
ce524497 955 BUG_ON(elevator_find(e->elevator_name));
1da177e4
LT
956 list_add_tail(&e->list, &elv_list);
957 spin_unlock_irq(&elv_list_lock);
958
959 printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
5f003976
ND
960 if (!strcmp(e->elevator_name, chosen_elevator) ||
961 (!*chosen_elevator &&
962 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
963 printk(" (default)");
1da177e4
LT
964 printk("\n");
965 return 0;
966}
967EXPORT_SYMBOL_GPL(elv_register);
968
969void elv_unregister(struct elevator_type *e)
970{
83521d3e
CH
971 struct task_struct *g, *p;
972
973 /*
974 * Iterate every thread in the process to remove the io contexts.
975 */
e17a9489
AV
976 if (e->ops.trim) {
977 read_lock(&tasklist_lock);
978 do_each_thread(g, p) {
979 task_lock(p);
2d8f6131
ON
980 if (p->io_context)
981 e->ops.trim(p->io_context);
e17a9489
AV
982 task_unlock(p);
983 } while_each_thread(g, p);
984 read_unlock(&tasklist_lock);
985 }
83521d3e 986
1da177e4
LT
987 spin_lock_irq(&elv_list_lock);
988 list_del_init(&e->list);
989 spin_unlock_irq(&elv_list_lock);
990}
991EXPORT_SYMBOL_GPL(elv_unregister);
992
993/*
994 * switch to new_e io scheduler. be careful not to introduce deadlocks -
995 * we don't free the old io scheduler, before we have allocated what we
996 * need for the new one. this way we have a chance of going back to the old
cb98fc8b 997 * one, if the new one fails init for some reason.
1da177e4 998 */
3d1ab40f 999static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
1da177e4 1000{
cb98fc8b 1001 elevator_t *old_elevator, *e;
bc1c1169 1002 void *data;
1da177e4 1003
cb98fc8b
TH
1004 /*
1005 * Allocate new elevator
1006 */
b5deef90 1007 e = elevator_alloc(q, new_e);
1da177e4 1008 if (!e)
3d1ab40f 1009 return 0;
1da177e4 1010
bc1c1169
JA
1011 data = elevator_init_queue(q, e);
1012 if (!data) {
1013 kobject_put(&e->kobj);
1014 return 0;
1015 }
1016
1da177e4 1017 /*
cb98fc8b 1018 * Turn on BYPASS and drain all requests w/ elevator private data
1da177e4 1019 */
cb98fc8b
TH
1020 spin_lock_irq(q->queue_lock);
1021
64521d1a 1022 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
cb98fc8b 1023
15853af9 1024 elv_drain_elevator(q);
cb98fc8b
TH
1025
1026 while (q->rq.elvpriv) {
407df2aa
TH
1027 blk_remove_plug(q);
1028 q->request_fn(q);
cb98fc8b 1029 spin_unlock_irq(q->queue_lock);
64521d1a 1030 msleep(10);
cb98fc8b 1031 spin_lock_irq(q->queue_lock);
15853af9 1032 elv_drain_elevator(q);
cb98fc8b
TH
1033 }
1034
1da177e4 1035 /*
bc1c1169 1036 * Remember old elevator.
1da177e4 1037 */
1da177e4
LT
1038 old_elevator = q->elevator;
1039
1da177e4
LT
1040 /*
1041 * attach and start new elevator
1042 */
bc1c1169
JA
1043 elevator_attach(q, e, data);
1044
1045 spin_unlock_irq(q->queue_lock);
1046
1047 __elv_unregister_queue(old_elevator);
1da177e4
LT
1048
1049 if (elv_register_queue(q))
1050 goto fail_register;
1051
1052 /*
cb98fc8b 1053 * finally exit old elevator and turn off BYPASS.
1da177e4
LT
1054 */
1055 elevator_exit(old_elevator);
64521d1a 1056 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
3d1ab40f 1057 return 1;
1da177e4
LT
1058
1059fail_register:
1060 /*
1061 * switch failed, exit the new io scheduler and reattach the old
1062 * one again (along with re-adding the sysfs dir)
1063 */
1064 elevator_exit(e);
1da177e4
LT
1065 q->elevator = old_elevator;
1066 elv_register_queue(q);
64521d1a 1067 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
3d1ab40f 1068 return 0;
1da177e4
LT
1069}
1070
1071ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
1072{
1073 char elevator_name[ELV_NAME_MAX];
be561235 1074 size_t len;
1da177e4
LT
1075 struct elevator_type *e;
1076
be561235
TH
1077 elevator_name[sizeof(elevator_name) - 1] = '\0';
1078 strncpy(elevator_name, name, sizeof(elevator_name) - 1);
1079 len = strlen(elevator_name);
1da177e4 1080
be561235
TH
1081 if (len && elevator_name[len - 1] == '\n')
1082 elevator_name[len - 1] = '\0';
1da177e4
LT
1083
1084 e = elevator_get(elevator_name);
1085 if (!e) {
1086 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1087 return -EINVAL;
1088 }
1089
2ca7d93b
ND
1090 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1091 elevator_put(e);
1da177e4 1092 return count;
2ca7d93b 1093 }
1da177e4 1094
3d1ab40f
AV
1095 if (!elevator_switch(q, e))
1096 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
1da177e4
LT
1097 return count;
1098}
1099
1100ssize_t elv_iosched_show(request_queue_t *q, char *name)
1101{
1102 elevator_t *e = q->elevator;
1103 struct elevator_type *elv = e->elevator_type;
1104 struct list_head *entry;
1105 int len = 0;
1106
c5841642 1107 spin_lock_irq(&elv_list_lock);
1da177e4
LT
1108 list_for_each(entry, &elv_list) {
1109 struct elevator_type *__e;
1110
1111 __e = list_entry(entry, struct elevator_type, list);
1112 if (!strcmp(elv->elevator_name, __e->elevator_name))
1113 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1114 else
1115 len += sprintf(name+len, "%s ", __e->elevator_name);
1116 }
c5841642 1117 spin_unlock_irq(&elv_list_lock);
1da177e4
LT
1118
1119 len += sprintf(len+name, "\n");
1120 return len;
1121}
1122
2e662b65
JA
1123struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
1124{
1125 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1126
1127 if (rbprev)
1128 return rb_entry_rq(rbprev);
1129
1130 return NULL;
1131}
1132
1133EXPORT_SYMBOL(elv_rb_former_request);
1134
1135struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
1136{
1137 struct rb_node *rbnext = rb_next(&rq->rb_node);
1138
1139 if (rbnext)
1140 return rb_entry_rq(rbnext);
1141
1142 return NULL;
1143}
1144
1145EXPORT_SYMBOL(elv_rb_latter_request);