2 * linux/drivers/block/as-iosched.c
4 * Anticipatory & deadline i/o scheduler.
6 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
7 * Nick Piggin <piggin@cyberone.com.au>
10 #include <linux/kernel.h>
12 #include <linux/blkdev.h>
13 #include <linux/elevator.h>
14 #include <linux/bio.h>
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/compiler.h>
20 #include <linux/hash.h>
21 #include <linux/rbtree.h>
22 #include <linux/interrupt.h>
28 * See Documentation/block/as-iosched.txt
32 * max time before a read is submitted.
34 #define default_read_expire (HZ / 8)
37 * ditto for writes, these limits are not hard, even
38 * if the disk is capable of satisfying them.
40 #define default_write_expire (HZ / 4)
43 * read_batch_expire describes how long we will allow a stream of reads to
44 * persist before looking to see whether it is time to switch over to writes.
46 #define default_read_batch_expire (HZ / 2)
49 * write_batch_expire describes how long we want a stream of writes to run for.
50 * This is not a hard limit, but a target we set for the auto-tuning thingy.
51 * See, the problem is: we can send a lot of writes to disk cache / TCQ in
52 * a short amount of time...
54 #define default_write_batch_expire (HZ / 8)
57 * max time we may wait to anticipate a read (default around 6ms)
59 #define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
62 * Keep track of up to 20ms thinktimes. We can go as big as we like here,
63 * however huge values tend to interfere and not decay fast enough. A program
64 * might be in a non-io phase of operation. Waiting on user input for example,
65 * or doing a lengthy computation. A small penalty can be justified there, and
66 * will still catch out those processes that constantly have large thinktimes.
68 #define MAX_THINKTIME (HZ/50UL)
70 /* Bits in as_io_context.state */
72 AS_TASK_RUNNING
=0, /* Process has not exitted */
73 AS_TASK_IOSTARTED
, /* Process has started some IO */
74 AS_TASK_IORUNNING
, /* Process has completed some IO */
77 enum anticipation_status
{
78 ANTIC_OFF
=0, /* Not anticipating (normal operation) */
79 ANTIC_WAIT_REQ
, /* The last read has not yet completed */
80 ANTIC_WAIT_NEXT
, /* Currently anticipating a request vs
81 last read (which has completed) */
82 ANTIC_FINISHED
, /* Anticipating but have found a candidate
91 struct request_queue
*q
; /* the "owner" queue */
94 * requests (as_rq s) are present on both sort_list and fifo_list
96 struct rb_root sort_list
[2];
97 struct list_head fifo_list
[2];
99 struct as_rq
*next_arq
[2]; /* next in sort order */
100 sector_t last_sector
[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
101 struct list_head
*hash
; /* request hash */
103 unsigned long exit_prob
; /* probability a task will exit while
105 unsigned long new_ttime_total
; /* mean thinktime on new proc */
106 unsigned long new_ttime_mean
;
107 u64 new_seek_total
; /* mean seek on new proc */
108 sector_t new_seek_mean
;
110 unsigned long current_batch_expires
;
111 unsigned long last_check_fifo
[2];
112 int changed_batch
; /* 1: waiting for old batch to end */
113 int new_batch
; /* 1: waiting on first read complete */
114 int batch_data_dir
; /* current batch REQ_SYNC / REQ_ASYNC */
115 int write_batch_count
; /* max # of reqs in a write batch */
116 int current_write_count
; /* how many requests left this batch */
117 int write_batch_idled
; /* has the write batch gone idle? */
120 enum anticipation_status antic_status
;
121 unsigned long antic_start
; /* jiffies: when it started */
122 struct timer_list antic_timer
; /* anticipatory scheduling timer */
123 struct work_struct antic_work
; /* Deferred unplugging */
124 struct io_context
*io_context
; /* Identify the expected process */
125 int ioc_finished
; /* IO associated with io_context is finished */
129 * settings that change how the i/o scheduler behaves
131 unsigned long fifo_expire
[2];
132 unsigned long batch_expire
[2];
133 unsigned long antic_expire
;
136 #define list_entry_fifo(ptr) list_entry((ptr), struct as_rq, fifo)
142 AS_RQ_NEW
=0, /* New - not referenced and not on any lists */
143 AS_RQ_QUEUED
, /* In the request queue. It belongs to the
145 AS_RQ_DISPATCHED
, /* On the dispatch list. It belongs to the
147 AS_RQ_PRESCHED
, /* Debug poisoning for requests being used */
150 AS_RQ_POSTSCHED
, /* when they shouldn't be */
155 * rbtree index, key is the starting offset
157 struct rb_node rb_node
;
160 struct request
*request
;
162 struct io_context
*io_context
; /* The submitting task */
165 * request hash, key is the ending offset (for back merge lookup)
167 struct list_head hash
;
168 unsigned int on_hash
;
173 struct list_head fifo
;
174 unsigned long expires
;
176 unsigned int is_sync
;
177 enum arq_state state
;
180 #define RQ_DATA(rq) ((struct as_rq *) (rq)->elevator_private)
182 static kmem_cache_t
*arq_pool
;
185 * IO Context helper functions
188 /* Called to deallocate the as_io_context */
189 static void free_as_io_context(struct as_io_context
*aic
)
194 /* Called when the task exits */
195 static void exit_as_io_context(struct as_io_context
*aic
)
197 WARN_ON(!test_bit(AS_TASK_RUNNING
, &aic
->state
));
198 clear_bit(AS_TASK_RUNNING
, &aic
->state
);
201 static struct as_io_context
*alloc_as_io_context(void)
203 struct as_io_context
*ret
;
205 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
207 ret
->dtor
= free_as_io_context
;
208 ret
->exit
= exit_as_io_context
;
209 ret
->state
= 1 << AS_TASK_RUNNING
;
210 atomic_set(&ret
->nr_queued
, 0);
211 atomic_set(&ret
->nr_dispatched
, 0);
212 spin_lock_init(&ret
->lock
);
213 ret
->ttime_total
= 0;
214 ret
->ttime_samples
= 0;
217 ret
->seek_samples
= 0;
225 * If the current task has no AS IO context then create one and initialise it.
226 * Then take a ref on the task's io context and return it.
228 static struct io_context
*as_get_io_context(void)
230 struct io_context
*ioc
= get_io_context(GFP_ATOMIC
);
231 if (ioc
&& !ioc
->aic
) {
232 ioc
->aic
= alloc_as_io_context();
241 static void as_put_io_context(struct as_rq
*arq
)
243 struct as_io_context
*aic
;
245 if (unlikely(!arq
->io_context
))
248 aic
= arq
->io_context
->aic
;
250 if (arq
->is_sync
== REQ_SYNC
&& aic
) {
251 spin_lock(&aic
->lock
);
252 set_bit(AS_TASK_IORUNNING
, &aic
->state
);
253 aic
->last_end_request
= jiffies
;
254 spin_unlock(&aic
->lock
);
257 put_io_context(arq
->io_context
);
261 * the back merge hash support functions
263 static const int as_hash_shift
= 6;
264 #define AS_HASH_BLOCK(sec) ((sec) >> 3)
265 #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
266 #define AS_HASH_ENTRIES (1 << as_hash_shift)
267 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
268 #define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash)
270 static inline void __as_del_arq_hash(struct as_rq
*arq
)
273 list_del_init(&arq
->hash
);
276 static inline void as_del_arq_hash(struct as_rq
*arq
)
279 __as_del_arq_hash(arq
);
282 static void as_remove_merge_hints(request_queue_t
*q
, struct as_rq
*arq
)
284 as_del_arq_hash(arq
);
286 if (q
->last_merge
== arq
->request
)
287 q
->last_merge
= NULL
;
290 static void as_add_arq_hash(struct as_data
*ad
, struct as_rq
*arq
)
292 struct request
*rq
= arq
->request
;
294 BUG_ON(arq
->on_hash
);
297 list_add(&arq
->hash
, &ad
->hash
[AS_HASH_FN(rq_hash_key(rq
))]);
301 * move hot entry to front of chain
303 static inline void as_hot_arq_hash(struct as_data
*ad
, struct as_rq
*arq
)
305 struct request
*rq
= arq
->request
;
306 struct list_head
*head
= &ad
->hash
[AS_HASH_FN(rq_hash_key(rq
))];
313 if (arq
->hash
.prev
!= head
) {
314 list_del(&arq
->hash
);
315 list_add(&arq
->hash
, head
);
319 static struct request
*as_find_arq_hash(struct as_data
*ad
, sector_t offset
)
321 struct list_head
*hash_list
= &ad
->hash
[AS_HASH_FN(offset
)];
322 struct list_head
*entry
, *next
= hash_list
->next
;
324 while ((entry
= next
) != hash_list
) {
325 struct as_rq
*arq
= list_entry_hash(entry
);
326 struct request
*__rq
= arq
->request
;
330 BUG_ON(!arq
->on_hash
);
332 if (!rq_mergeable(__rq
)) {
333 as_remove_merge_hints(ad
->q
, arq
);
337 if (rq_hash_key(__rq
) == offset
)
345 * rb tree support functions
348 #define RB_EMPTY(root) ((root)->rb_node == NULL)
349 #define ON_RB(node) ((node)->rb_color != RB_NONE)
350 #define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
351 #define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
352 #define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
353 #define rq_rb_key(rq) (rq)->sector
356 * as_find_first_arq finds the first (lowest sector numbered) request
357 * for the specified data_dir. Used to sweep back to the start of the disk
358 * (1-way elevator) after we process the last (highest sector) request.
360 static struct as_rq
*as_find_first_arq(struct as_data
*ad
, int data_dir
)
362 struct rb_node
*n
= ad
->sort_list
[data_dir
].rb_node
;
368 if (n
->rb_left
== NULL
)
369 return rb_entry_arq(n
);
376 * Add the request to the rb tree if it is unique. If there is an alias (an
377 * existing request against the same sector), which can happen when using
378 * direct IO, then return the alias.
380 static struct as_rq
*as_add_arq_rb(struct as_data
*ad
, struct as_rq
*arq
)
382 struct rb_node
**p
= &ARQ_RB_ROOT(ad
, arq
)->rb_node
;
383 struct rb_node
*parent
= NULL
;
385 struct request
*rq
= arq
->request
;
387 arq
->rb_key
= rq_rb_key(rq
);
391 __arq
= rb_entry_arq(parent
);
393 if (arq
->rb_key
< __arq
->rb_key
)
395 else if (arq
->rb_key
> __arq
->rb_key
)
401 rb_link_node(&arq
->rb_node
, parent
, p
);
402 rb_insert_color(&arq
->rb_node
, ARQ_RB_ROOT(ad
, arq
));
407 static inline void as_del_arq_rb(struct as_data
*ad
, struct as_rq
*arq
)
409 if (!ON_RB(&arq
->rb_node
)) {
414 rb_erase(&arq
->rb_node
, ARQ_RB_ROOT(ad
, arq
));
415 RB_CLEAR(&arq
->rb_node
);
418 static struct request
*
419 as_find_arq_rb(struct as_data
*ad
, sector_t sector
, int data_dir
)
421 struct rb_node
*n
= ad
->sort_list
[data_dir
].rb_node
;
425 arq
= rb_entry_arq(n
);
427 if (sector
< arq
->rb_key
)
429 else if (sector
> arq
->rb_key
)
439 * IO Scheduler proper
442 #define MAXBACK (1024 * 1024) /*
443 * Maximum distance the disk will go backward
447 #define BACK_PENALTY 2
450 * as_choose_req selects the preferred one of two requests of the same data_dir
451 * ignoring time - eg. timeouts, which is the job of as_dispatch_request
453 static struct as_rq
*
454 as_choose_req(struct as_data
*ad
, struct as_rq
*arq1
, struct as_rq
*arq2
)
457 sector_t last
, s1
, s2
, d1
, d2
;
458 int r1_wrap
=0, r2_wrap
=0; /* requests are behind the disk head */
459 const sector_t maxback
= MAXBACK
;
461 if (arq1
== NULL
|| arq1
== arq2
)
466 data_dir
= arq1
->is_sync
;
468 last
= ad
->last_sector
[data_dir
];
469 s1
= arq1
->request
->sector
;
470 s2
= arq2
->request
->sector
;
472 BUG_ON(data_dir
!= arq2
->is_sync
);
475 * Strict one way elevator _except_ in the case where we allow
476 * short backward seeks which are biased as twice the cost of a
477 * similar forward seek.
481 else if (s1
+maxback
>= last
)
482 d1
= (last
- s1
)*BACK_PENALTY
;
485 d1
= 0; /* shut up, gcc */
490 else if (s2
+maxback
>= last
)
491 d2
= (last
- s2
)*BACK_PENALTY
;
497 /* Found required data */
498 if (!r1_wrap
&& r2_wrap
)
500 else if (!r2_wrap
&& r1_wrap
)
502 else if (r1_wrap
&& r2_wrap
) {
503 /* both behind the head */
510 /* Both requests in front of the head */
524 * as_find_next_arq finds the next request after @prev in elevator order.
525 * this with as_choose_req form the basis for how the scheduler chooses
526 * what request to process next. Anticipation works on top of this.
528 static struct as_rq
*as_find_next_arq(struct as_data
*ad
, struct as_rq
*last
)
530 const int data_dir
= last
->is_sync
;
532 struct rb_node
*rbnext
= rb_next(&last
->rb_node
);
533 struct rb_node
*rbprev
= rb_prev(&last
->rb_node
);
534 struct as_rq
*arq_next
, *arq_prev
;
536 BUG_ON(!ON_RB(&last
->rb_node
));
539 arq_prev
= rb_entry_arq(rbprev
);
544 arq_next
= rb_entry_arq(rbnext
);
546 arq_next
= as_find_first_arq(ad
, data_dir
);
547 if (arq_next
== last
)
551 ret
= as_choose_req(ad
, arq_next
, arq_prev
);
557 * anticipatory scheduling functions follow
561 * as_antic_expired tells us when we have anticipated too long.
562 * The funny "absolute difference" math on the elapsed time is to handle
563 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
565 static int as_antic_expired(struct as_data
*ad
)
569 delta_jif
= jiffies
- ad
->antic_start
;
570 if (unlikely(delta_jif
< 0))
571 delta_jif
= -delta_jif
;
572 if (delta_jif
< ad
->antic_expire
)
579 * as_antic_waitnext starts anticipating that a nice request will soon be
580 * submitted. See also as_antic_waitreq
582 static void as_antic_waitnext(struct as_data
*ad
)
584 unsigned long timeout
;
586 BUG_ON(ad
->antic_status
!= ANTIC_OFF
587 && ad
->antic_status
!= ANTIC_WAIT_REQ
);
589 timeout
= ad
->antic_start
+ ad
->antic_expire
;
591 mod_timer(&ad
->antic_timer
, timeout
);
593 ad
->antic_status
= ANTIC_WAIT_NEXT
;
597 * as_antic_waitreq starts anticipating. We don't start timing the anticipation
598 * until the request that we're anticipating on has finished. This means we
599 * are timing from when the candidate process wakes up hopefully.
601 static void as_antic_waitreq(struct as_data
*ad
)
603 BUG_ON(ad
->antic_status
== ANTIC_FINISHED
);
604 if (ad
->antic_status
== ANTIC_OFF
) {
605 if (!ad
->io_context
|| ad
->ioc_finished
)
606 as_antic_waitnext(ad
);
608 ad
->antic_status
= ANTIC_WAIT_REQ
;
613 * This is called directly by the functions in this file to stop anticipation.
614 * We kill the timer and schedule a call to the request_fn asap.
616 static void as_antic_stop(struct as_data
*ad
)
618 int status
= ad
->antic_status
;
620 if (status
== ANTIC_WAIT_REQ
|| status
== ANTIC_WAIT_NEXT
) {
621 if (status
== ANTIC_WAIT_NEXT
)
622 del_timer(&ad
->antic_timer
);
623 ad
->antic_status
= ANTIC_FINISHED
;
624 /* see as_work_handler */
625 kblockd_schedule_work(&ad
->antic_work
);
630 * as_antic_timeout is the timer function set by as_antic_waitnext.
632 static void as_antic_timeout(unsigned long data
)
634 struct request_queue
*q
= (struct request_queue
*)data
;
635 struct as_data
*ad
= q
->elevator
->elevator_data
;
638 spin_lock_irqsave(q
->queue_lock
, flags
);
639 if (ad
->antic_status
== ANTIC_WAIT_REQ
640 || ad
->antic_status
== ANTIC_WAIT_NEXT
) {
641 struct as_io_context
*aic
= ad
->io_context
->aic
;
643 ad
->antic_status
= ANTIC_FINISHED
;
644 kblockd_schedule_work(&ad
->antic_work
);
646 if (aic
->ttime_samples
== 0) {
647 /* process anticipated on has exitted or timed out*/
648 ad
->exit_prob
= (7*ad
->exit_prob
+ 256)/8;
651 spin_unlock_irqrestore(q
->queue_lock
, flags
);
655 * as_close_req decides if one request is considered "close" to the
656 * previous one issued.
658 static int as_close_req(struct as_data
*ad
, struct as_rq
*arq
)
660 unsigned long delay
; /* milliseconds */
661 sector_t last
= ad
->last_sector
[ad
->batch_data_dir
];
662 sector_t next
= arq
->request
->sector
;
663 sector_t delta
; /* acceptable close offset (in sectors) */
665 if (ad
->antic_status
== ANTIC_OFF
|| !ad
->ioc_finished
)
668 delay
= ((jiffies
- ad
->antic_start
) * 1000) / HZ
;
672 else if (delay
<= 20 && delay
<= ad
->antic_expire
)
673 delta
= 64 << (delay
-1);
677 return (last
- (delta
>>1) <= next
) && (next
<= last
+ delta
);
681 * as_can_break_anticipation returns true if we have been anticipating this
684 * It also returns true if the process against which we are anticipating
685 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
686 * dispatch it ASAP, because we know that application will not be submitting
689 * If the task which has submitted the request has exitted, break anticipation.
691 * If this task has queued some other IO, do not enter enticipation.
693 static int as_can_break_anticipation(struct as_data
*ad
, struct as_rq
*arq
)
695 struct io_context
*ioc
;
696 struct as_io_context
*aic
;
699 ioc
= ad
->io_context
;
702 if (arq
&& ioc
== arq
->io_context
) {
703 /* request from same process */
707 if (ad
->ioc_finished
&& as_antic_expired(ad
)) {
709 * In this situation status should really be FINISHED,
710 * however the timer hasn't had the chance to run yet.
719 if (!test_bit(AS_TASK_RUNNING
, &aic
->state
)) {
720 /* process anticipated on has exitted */
721 if (aic
->ttime_samples
== 0)
722 ad
->exit_prob
= (7*ad
->exit_prob
+ 256)/8;
726 if (atomic_read(&aic
->nr_queued
) > 0) {
727 /* process has more requests queued */
731 if (atomic_read(&aic
->nr_dispatched
) > 0) {
732 /* process has more requests dispatched */
736 if (arq
&& arq
->is_sync
== REQ_SYNC
&& as_close_req(ad
, arq
)) {
738 * Found a close request that is not one of ours.
740 * This makes close requests from another process reset
741 * our thinktime delay. Is generally useful when there are
742 * two or more cooperating processes working in the same
745 spin_lock(&aic
->lock
);
746 aic
->last_end_request
= jiffies
;
747 spin_unlock(&aic
->lock
);
752 if (aic
->ttime_samples
== 0) {
753 if (ad
->new_ttime_mean
> ad
->antic_expire
)
755 if (ad
->exit_prob
> 128)
757 } else if (aic
->ttime_mean
> ad
->antic_expire
) {
758 /* the process thinks too much between requests */
765 if (ad
->last_sector
[REQ_SYNC
] < arq
->request
->sector
)
766 s
= arq
->request
->sector
- ad
->last_sector
[REQ_SYNC
];
768 s
= ad
->last_sector
[REQ_SYNC
] - arq
->request
->sector
;
770 if (aic
->seek_samples
== 0) {
772 * Process has just started IO. Use past statistics to
773 * guage success possibility
775 if (ad
->new_seek_mean
> s
) {
776 /* this request is better than what we're expecting */
781 if (aic
->seek_mean
> s
) {
782 /* this request is better than what we're expecting */
791 * as_can_anticipate indicates weather we should either run arq
792 * or keep anticipating a better request.
794 static int as_can_anticipate(struct as_data
*ad
, struct as_rq
*arq
)
798 * Last request submitted was a write
802 if (ad
->antic_status
== ANTIC_FINISHED
)
804 * Don't restart if we have just finished. Run the next request
808 if (as_can_break_anticipation(ad
, arq
))
810 * This request is a good candidate. Don't keep anticipating,
816 * OK from here, we haven't finished, and don't have a decent request!
817 * Status is either ANTIC_OFF so start waiting,
818 * ANTIC_WAIT_REQ so continue waiting for request to finish
819 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
826 static void as_update_thinktime(struct as_data
*ad
, struct as_io_context
*aic
, unsigned long ttime
)
828 /* fixed point: 1.0 == 1<<8 */
829 if (aic
->ttime_samples
== 0) {
830 ad
->new_ttime_total
= (7*ad
->new_ttime_total
+ 256*ttime
) / 8;
831 ad
->new_ttime_mean
= ad
->new_ttime_total
/ 256;
833 ad
->exit_prob
= (7*ad
->exit_prob
)/8;
835 aic
->ttime_samples
= (7*aic
->ttime_samples
+ 256) / 8;
836 aic
->ttime_total
= (7*aic
->ttime_total
+ 256*ttime
) / 8;
837 aic
->ttime_mean
= (aic
->ttime_total
+ 128) / aic
->ttime_samples
;
840 static void as_update_seekdist(struct as_data
*ad
, struct as_io_context
*aic
, sector_t sdist
)
844 if (aic
->seek_samples
== 0) {
845 ad
->new_seek_total
= (7*ad
->new_seek_total
+ 256*(u64
)sdist
)/8;
846 ad
->new_seek_mean
= ad
->new_seek_total
/ 256;
850 * Don't allow the seek distance to get too large from the
851 * odd fragment, pagein, etc
853 if (aic
->seek_samples
<= 60) /* second&third seek */
854 sdist
= min(sdist
, (aic
->seek_mean
* 4) + 2*1024*1024);
856 sdist
= min(sdist
, (aic
->seek_mean
* 4) + 2*1024*64);
858 aic
->seek_samples
= (7*aic
->seek_samples
+ 256) / 8;
859 aic
->seek_total
= (7*aic
->seek_total
+ (u64
)256*sdist
) / 8;
860 total
= aic
->seek_total
+ (aic
->seek_samples
/2);
861 do_div(total
, aic
->seek_samples
);
862 aic
->seek_mean
= (sector_t
)total
;
866 * as_update_iohist keeps a decaying histogram of IO thinktimes, and
867 * updates @aic->ttime_mean based on that. It is called when a new
870 static void as_update_iohist(struct as_data
*ad
, struct as_io_context
*aic
, struct request
*rq
)
872 struct as_rq
*arq
= RQ_DATA(rq
);
873 int data_dir
= arq
->is_sync
;
874 unsigned long thinktime
;
880 if (data_dir
== REQ_SYNC
) {
881 unsigned long in_flight
= atomic_read(&aic
->nr_queued
)
882 + atomic_read(&aic
->nr_dispatched
);
883 spin_lock(&aic
->lock
);
884 if (test_bit(AS_TASK_IORUNNING
, &aic
->state
) ||
885 test_bit(AS_TASK_IOSTARTED
, &aic
->state
)) {
886 /* Calculate read -> read thinktime */
887 if (test_bit(AS_TASK_IORUNNING
, &aic
->state
)
889 thinktime
= jiffies
- aic
->last_end_request
;
890 thinktime
= min(thinktime
, MAX_THINKTIME
-1);
893 as_update_thinktime(ad
, aic
, thinktime
);
895 /* Calculate read -> read seek distance */
896 if (aic
->last_request_pos
< rq
->sector
)
897 seek_dist
= rq
->sector
- aic
->last_request_pos
;
899 seek_dist
= aic
->last_request_pos
- rq
->sector
;
900 as_update_seekdist(ad
, aic
, seek_dist
);
902 aic
->last_request_pos
= rq
->sector
+ rq
->nr_sectors
;
903 set_bit(AS_TASK_IOSTARTED
, &aic
->state
);
904 spin_unlock(&aic
->lock
);
909 * as_update_arq must be called whenever a request (arq) is added to
910 * the sort_list. This function keeps caches up to date, and checks if the
911 * request might be one we are "anticipating"
913 static void as_update_arq(struct as_data
*ad
, struct as_rq
*arq
)
915 const int data_dir
= arq
->is_sync
;
917 /* keep the next_arq cache up to date */
918 ad
->next_arq
[data_dir
] = as_choose_req(ad
, arq
, ad
->next_arq
[data_dir
]);
921 * have we been anticipating this request?
922 * or does it come from the same process as the one we are anticipating
925 if (ad
->antic_status
== ANTIC_WAIT_REQ
926 || ad
->antic_status
== ANTIC_WAIT_NEXT
) {
927 if (as_can_break_anticipation(ad
, arq
))
933 * Gathers timings and resizes the write batch automatically
935 static void update_write_batch(struct as_data
*ad
)
937 unsigned long batch
= ad
->batch_expire
[REQ_ASYNC
];
940 write_time
= (jiffies
- ad
->current_batch_expires
) + batch
;
944 if (write_time
> batch
&& !ad
->write_batch_idled
) {
945 if (write_time
> batch
* 3)
946 ad
->write_batch_count
/= 2;
948 ad
->write_batch_count
--;
949 } else if (write_time
< batch
&& ad
->current_write_count
== 0) {
950 if (batch
> write_time
* 3)
951 ad
->write_batch_count
*= 2;
953 ad
->write_batch_count
++;
956 if (ad
->write_batch_count
< 1)
957 ad
->write_batch_count
= 1;
961 * as_completed_request is to be called when a request has completed and
962 * returned something to the requesting process, be it an error or data.
964 static void as_completed_request(request_queue_t
*q
, struct request
*rq
)
966 struct as_data
*ad
= q
->elevator
->elevator_data
;
967 struct as_rq
*arq
= RQ_DATA(rq
);
969 WARN_ON(!list_empty(&rq
->queuelist
));
971 if (arq
->state
!= AS_RQ_REMOVED
) {
972 printk("arq->state %d\n", arq
->state
);
977 if (ad
->changed_batch
&& ad
->nr_dispatched
== 1) {
978 kblockd_schedule_work(&ad
->antic_work
);
979 ad
->changed_batch
= 0;
981 if (ad
->batch_data_dir
== REQ_SYNC
)
984 WARN_ON(ad
->nr_dispatched
== 0);
988 * Start counting the batch from when a request of that direction is
989 * actually serviced. This should help devices with big TCQ windows
990 * and writeback caches
992 if (ad
->new_batch
&& ad
->batch_data_dir
== arq
->is_sync
) {
993 update_write_batch(ad
);
994 ad
->current_batch_expires
= jiffies
+
995 ad
->batch_expire
[REQ_SYNC
];
999 if (ad
->io_context
== arq
->io_context
&& ad
->io_context
) {
1000 ad
->antic_start
= jiffies
;
1001 ad
->ioc_finished
= 1;
1002 if (ad
->antic_status
== ANTIC_WAIT_REQ
) {
1004 * We were waiting on this request, now anticipate
1007 as_antic_waitnext(ad
);
1011 as_put_io_context(arq
);
1013 arq
->state
= AS_RQ_POSTSCHED
;
1017 * as_remove_queued_request removes a request from the pre dispatch queue
1018 * without updating refcounts. It is expected the caller will drop the
1019 * reference unless it replaces the request at somepart of the elevator
1020 * (ie. the dispatch queue)
1022 static void as_remove_queued_request(request_queue_t
*q
, struct request
*rq
)
1024 struct as_rq
*arq
= RQ_DATA(rq
);
1025 const int data_dir
= arq
->is_sync
;
1026 struct as_data
*ad
= q
->elevator
->elevator_data
;
1028 WARN_ON(arq
->state
!= AS_RQ_QUEUED
);
1030 if (arq
->io_context
&& arq
->io_context
->aic
) {
1031 BUG_ON(!atomic_read(&arq
->io_context
->aic
->nr_queued
));
1032 atomic_dec(&arq
->io_context
->aic
->nr_queued
);
1036 * Update the "next_arq" cache if we are about to remove its
1039 if (ad
->next_arq
[data_dir
] == arq
)
1040 ad
->next_arq
[data_dir
] = as_find_next_arq(ad
, arq
);
1042 list_del_init(&arq
->fifo
);
1043 as_remove_merge_hints(q
, arq
);
1044 as_del_arq_rb(ad
, arq
);
1048 * as_fifo_expired returns 0 if there are no expired reads on the fifo,
1049 * 1 otherwise. It is ratelimited so that we only perform the check once per
1050 * `fifo_expire' interval. Otherwise a large number of expired requests
1051 * would create a hopeless seekstorm.
1053 * See as_antic_expired comment.
1055 static int as_fifo_expired(struct as_data
*ad
, int adir
)
1060 delta_jif
= jiffies
- ad
->last_check_fifo
[adir
];
1061 if (unlikely(delta_jif
< 0))
1062 delta_jif
= -delta_jif
;
1063 if (delta_jif
< ad
->fifo_expire
[adir
])
1066 ad
->last_check_fifo
[adir
] = jiffies
;
1068 if (list_empty(&ad
->fifo_list
[adir
]))
1071 arq
= list_entry_fifo(ad
->fifo_list
[adir
].next
);
1073 return time_after(jiffies
, arq
->expires
);
1077 * as_batch_expired returns true if the current batch has expired. A batch
1078 * is a set of reads or a set of writes.
1080 static inline int as_batch_expired(struct as_data
*ad
)
1082 if (ad
->changed_batch
|| ad
->new_batch
)
1085 if (ad
->batch_data_dir
== REQ_SYNC
)
1086 /* TODO! add a check so a complete fifo gets written? */
1087 return time_after(jiffies
, ad
->current_batch_expires
);
1089 return time_after(jiffies
, ad
->current_batch_expires
)
1090 || ad
->current_write_count
== 0;
1094 * move an entry to dispatch queue
1096 static void as_move_to_dispatch(struct as_data
*ad
, struct as_rq
*arq
)
1098 struct request
*rq
= arq
->request
;
1099 const int data_dir
= arq
->is_sync
;
1101 BUG_ON(!ON_RB(&arq
->rb_node
));
1104 ad
->antic_status
= ANTIC_OFF
;
1107 * This has to be set in order to be correctly updated by
1110 ad
->last_sector
[data_dir
] = rq
->sector
+ rq
->nr_sectors
;
1112 if (data_dir
== REQ_SYNC
) {
1113 /* In case we have to anticipate after this */
1114 copy_io_context(&ad
->io_context
, &arq
->io_context
);
1116 if (ad
->io_context
) {
1117 put_io_context(ad
->io_context
);
1118 ad
->io_context
= NULL
;
1121 if (ad
->current_write_count
!= 0)
1122 ad
->current_write_count
--;
1124 ad
->ioc_finished
= 0;
1126 ad
->next_arq
[data_dir
] = as_find_next_arq(ad
, arq
);
1129 * take it off the sort and fifo list, add to dispatch queue
1131 while (!list_empty(&rq
->queuelist
)) {
1132 struct request
*__rq
= list_entry_rq(rq
->queuelist
.next
);
1133 struct as_rq
*__arq
= RQ_DATA(__rq
);
1135 list_del(&__rq
->queuelist
);
1137 elv_dispatch_add_tail(ad
->q
, __rq
);
1139 if (__arq
->io_context
&& __arq
->io_context
->aic
)
1140 atomic_inc(&__arq
->io_context
->aic
->nr_dispatched
);
1142 WARN_ON(__arq
->state
!= AS_RQ_QUEUED
);
1143 __arq
->state
= AS_RQ_DISPATCHED
;
1145 ad
->nr_dispatched
++;
1148 as_remove_queued_request(ad
->q
, rq
);
1149 WARN_ON(arq
->state
!= AS_RQ_QUEUED
);
1151 elv_dispatch_sort(ad
->q
, rq
);
1153 arq
->state
= AS_RQ_DISPATCHED
;
1154 if (arq
->io_context
&& arq
->io_context
->aic
)
1155 atomic_inc(&arq
->io_context
->aic
->nr_dispatched
);
1156 ad
->nr_dispatched
++;
1160 * as_dispatch_request selects the best request according to
1161 * read/write expire, batch expire, etc, and moves it to the dispatch
1162 * queue. Returns 1 if a request was found, 0 otherwise.
1164 static int as_dispatch_request(request_queue_t
*q
, int force
)
1166 struct as_data
*ad
= q
->elevator
->elevator_data
;
1168 const int reads
= !list_empty(&ad
->fifo_list
[REQ_SYNC
]);
1169 const int writes
= !list_empty(&ad
->fifo_list
[REQ_ASYNC
]);
1171 if (unlikely(force
)) {
1173 * Forced dispatch, accounting is useless. Reset
1174 * accounting states and dump fifo_lists. Note that
1175 * batch_data_dir is reset to REQ_SYNC to avoid
1176 * screwing write batch accounting as write batch
1177 * accounting occurs on W->R transition.
1181 ad
->batch_data_dir
= REQ_SYNC
;
1182 ad
->changed_batch
= 0;
1185 while (ad
->next_arq
[REQ_SYNC
]) {
1186 as_move_to_dispatch(ad
, ad
->next_arq
[REQ_SYNC
]);
1189 ad
->last_check_fifo
[REQ_SYNC
] = jiffies
;
1191 while (ad
->next_arq
[REQ_ASYNC
]) {
1192 as_move_to_dispatch(ad
, ad
->next_arq
[REQ_ASYNC
]);
1195 ad
->last_check_fifo
[REQ_ASYNC
] = jiffies
;
1200 /* Signal that the write batch was uncontended, so we can't time it */
1201 if (ad
->batch_data_dir
== REQ_ASYNC
&& !reads
) {
1202 if (ad
->current_write_count
== 0 || !writes
)
1203 ad
->write_batch_idled
= 1;
1206 if (!(reads
|| writes
)
1207 || ad
->antic_status
== ANTIC_WAIT_REQ
1208 || ad
->antic_status
== ANTIC_WAIT_NEXT
1209 || ad
->changed_batch
)
1212 if (!(reads
&& writes
&& as_batch_expired(ad
)) ) {
1214 * batch is still running or no reads or no writes
1216 arq
= ad
->next_arq
[ad
->batch_data_dir
];
1218 if (ad
->batch_data_dir
== REQ_SYNC
&& ad
->antic_expire
) {
1219 if (as_fifo_expired(ad
, REQ_SYNC
))
1222 if (as_can_anticipate(ad
, arq
)) {
1223 as_antic_waitreq(ad
);
1229 /* we have a "next request" */
1230 if (reads
&& !writes
)
1231 ad
->current_batch_expires
=
1232 jiffies
+ ad
->batch_expire
[REQ_SYNC
];
1233 goto dispatch_request
;
1238 * at this point we are not running a batch. select the appropriate
1239 * data direction (read / write)
1243 BUG_ON(RB_EMPTY(&ad
->sort_list
[REQ_SYNC
]));
1245 if (writes
&& ad
->batch_data_dir
== REQ_SYNC
)
1247 * Last batch was a read, switch to writes
1249 goto dispatch_writes
;
1251 if (ad
->batch_data_dir
== REQ_ASYNC
) {
1252 WARN_ON(ad
->new_batch
);
1253 ad
->changed_batch
= 1;
1255 ad
->batch_data_dir
= REQ_SYNC
;
1256 arq
= list_entry_fifo(ad
->fifo_list
[ad
->batch_data_dir
].next
);
1257 ad
->last_check_fifo
[ad
->batch_data_dir
] = jiffies
;
1258 goto dispatch_request
;
1262 * the last batch was a read
1267 BUG_ON(RB_EMPTY(&ad
->sort_list
[REQ_ASYNC
]));
1269 if (ad
->batch_data_dir
== REQ_SYNC
) {
1270 ad
->changed_batch
= 1;
1273 * new_batch might be 1 when the queue runs out of
1274 * reads. A subsequent submission of a write might
1275 * cause a change of batch before the read is finished.
1279 ad
->batch_data_dir
= REQ_ASYNC
;
1280 ad
->current_write_count
= ad
->write_batch_count
;
1281 ad
->write_batch_idled
= 0;
1282 arq
= ad
->next_arq
[ad
->batch_data_dir
];
1283 goto dispatch_request
;
1291 * If a request has expired, service it.
1294 if (as_fifo_expired(ad
, ad
->batch_data_dir
)) {
1296 arq
= list_entry_fifo(ad
->fifo_list
[ad
->batch_data_dir
].next
);
1297 BUG_ON(arq
== NULL
);
1300 if (ad
->changed_batch
) {
1301 WARN_ON(ad
->new_batch
);
1303 if (ad
->nr_dispatched
)
1306 if (ad
->batch_data_dir
== REQ_ASYNC
)
1307 ad
->current_batch_expires
= jiffies
+
1308 ad
->batch_expire
[REQ_ASYNC
];
1312 ad
->changed_batch
= 0;
1316 * arq is the selected appropriate request.
1318 as_move_to_dispatch(ad
, arq
);
1324 * Add arq to a list behind alias
1327 as_add_aliased_request(struct as_data
*ad
, struct as_rq
*arq
, struct as_rq
*alias
)
1329 struct request
*req
= arq
->request
;
1330 struct list_head
*insert
= alias
->request
->queuelist
.prev
;
1333 * Transfer list of aliases
1335 while (!list_empty(&req
->queuelist
)) {
1336 struct request
*__rq
= list_entry_rq(req
->queuelist
.next
);
1337 struct as_rq
*__arq
= RQ_DATA(__rq
);
1339 list_move_tail(&__rq
->queuelist
, &alias
->request
->queuelist
);
1341 WARN_ON(__arq
->state
!= AS_RQ_QUEUED
);
1345 * Another request with the same start sector on the rbtree.
1346 * Link this request to that sector. They are untangled in
1347 * as_move_to_dispatch
1349 list_add(&arq
->request
->queuelist
, insert
);
1352 * Don't want to have to handle merges.
1354 as_remove_merge_hints(ad
->q
, arq
);
1358 * add arq to rbtree and fifo
1360 static void as_add_request(request_queue_t
*q
, struct request
*rq
)
1362 struct as_data
*ad
= q
->elevator
->elevator_data
;
1363 struct as_rq
*arq
= RQ_DATA(rq
);
1364 struct as_rq
*alias
;
1367 if (arq
->state
!= AS_RQ_PRESCHED
) {
1368 printk("arq->state: %d\n", arq
->state
);
1371 arq
->state
= AS_RQ_NEW
;
1373 if (rq_data_dir(arq
->request
) == READ
1374 || current
->flags
&PF_SYNCWRITE
)
1378 data_dir
= arq
->is_sync
;
1380 arq
->io_context
= as_get_io_context();
1382 if (arq
->io_context
) {
1383 as_update_iohist(ad
, arq
->io_context
->aic
, arq
->request
);
1384 atomic_inc(&arq
->io_context
->aic
->nr_queued
);
1387 alias
= as_add_arq_rb(ad
, arq
);
1390 * set expire time (only used for reads) and add to fifo list
1392 arq
->expires
= jiffies
+ ad
->fifo_expire
[data_dir
];
1393 list_add_tail(&arq
->fifo
, &ad
->fifo_list
[data_dir
]);
1395 if (rq_mergeable(arq
->request
)) {
1396 as_add_arq_hash(ad
, arq
);
1398 if (!ad
->q
->last_merge
)
1399 ad
->q
->last_merge
= arq
->request
;
1401 as_update_arq(ad
, arq
); /* keep state machine up to date */
1404 as_add_aliased_request(ad
, arq
, alias
);
1407 * have we been anticipating this request?
1408 * or does it come from the same process as the one we are
1411 if (ad
->antic_status
== ANTIC_WAIT_REQ
1412 || ad
->antic_status
== ANTIC_WAIT_NEXT
) {
1413 if (as_can_break_anticipation(ad
, arq
))
1418 arq
->state
= AS_RQ_QUEUED
;
1421 static void as_activate_request(request_queue_t
*q
, struct request
*rq
)
1423 struct as_rq
*arq
= RQ_DATA(rq
);
1425 WARN_ON(arq
->state
!= AS_RQ_DISPATCHED
);
1426 arq
->state
= AS_RQ_REMOVED
;
1427 if (arq
->io_context
&& arq
->io_context
->aic
)
1428 atomic_dec(&arq
->io_context
->aic
->nr_dispatched
);
1431 static void as_deactivate_request(request_queue_t
*q
, struct request
*rq
)
1433 struct as_rq
*arq
= RQ_DATA(rq
);
1435 WARN_ON(arq
->state
!= AS_RQ_REMOVED
);
1436 arq
->state
= AS_RQ_DISPATCHED
;
1437 if (arq
->io_context
&& arq
->io_context
->aic
)
1438 atomic_inc(&arq
->io_context
->aic
->nr_dispatched
);
1442 * as_queue_empty tells us if there are requests left in the device. It may
1443 * not be the case that a driver can get the next request even if the queue
1444 * is not empty - it is used in the block layer to check for plugging and
1445 * merging opportunities
1447 static int as_queue_empty(request_queue_t
*q
)
1449 struct as_data
*ad
= q
->elevator
->elevator_data
;
1451 return list_empty(&ad
->fifo_list
[REQ_ASYNC
])
1452 && list_empty(&ad
->fifo_list
[REQ_SYNC
]);
1455 static struct request
*
1456 as_former_request(request_queue_t
*q
, struct request
*rq
)
1458 struct as_rq
*arq
= RQ_DATA(rq
);
1459 struct rb_node
*rbprev
= rb_prev(&arq
->rb_node
);
1460 struct request
*ret
= NULL
;
1463 ret
= rb_entry_arq(rbprev
)->request
;
1468 static struct request
*
1469 as_latter_request(request_queue_t
*q
, struct request
*rq
)
1471 struct as_rq
*arq
= RQ_DATA(rq
);
1472 struct rb_node
*rbnext
= rb_next(&arq
->rb_node
);
1473 struct request
*ret
= NULL
;
1476 ret
= rb_entry_arq(rbnext
)->request
;
1482 as_merge(request_queue_t
*q
, struct request
**req
, struct bio
*bio
)
1484 struct as_data
*ad
= q
->elevator
->elevator_data
;
1485 sector_t rb_key
= bio
->bi_sector
+ bio_sectors(bio
);
1486 struct request
*__rq
;
1490 * try last_merge to avoid going to hash
1492 ret
= elv_try_last_merge(q
, bio
);
1493 if (ret
!= ELEVATOR_NO_MERGE
) {
1494 __rq
= q
->last_merge
;
1499 * see if the merge hash can satisfy a back merge
1501 __rq
= as_find_arq_hash(ad
, bio
->bi_sector
);
1503 BUG_ON(__rq
->sector
+ __rq
->nr_sectors
!= bio
->bi_sector
);
1505 if (elv_rq_merge_ok(__rq
, bio
)) {
1506 ret
= ELEVATOR_BACK_MERGE
;
1512 * check for front merge
1514 __rq
= as_find_arq_rb(ad
, rb_key
, bio_data_dir(bio
));
1516 BUG_ON(rb_key
!= rq_rb_key(__rq
));
1518 if (elv_rq_merge_ok(__rq
, bio
)) {
1519 ret
= ELEVATOR_FRONT_MERGE
;
1524 return ELEVATOR_NO_MERGE
;
1526 if (rq_mergeable(__rq
))
1527 q
->last_merge
= __rq
;
1530 if (rq_mergeable(__rq
))
1531 as_hot_arq_hash(ad
, RQ_DATA(__rq
));
1537 static void as_merged_request(request_queue_t
*q
, struct request
*req
)
1539 struct as_data
*ad
= q
->elevator
->elevator_data
;
1540 struct as_rq
*arq
= RQ_DATA(req
);
1543 * hash always needs to be repositioned, key is end sector
1545 as_del_arq_hash(arq
);
1546 as_add_arq_hash(ad
, arq
);
1549 * if the merge was a front merge, we need to reposition request
1551 if (rq_rb_key(req
) != arq
->rb_key
) {
1552 struct as_rq
*alias
, *next_arq
= NULL
;
1554 if (ad
->next_arq
[arq
->is_sync
] == arq
)
1555 next_arq
= as_find_next_arq(ad
, arq
);
1558 * Note! We should really be moving any old aliased requests
1559 * off this request and try to insert them into the rbtree. We
1560 * currently don't bother. Ditto the next function.
1562 as_del_arq_rb(ad
, arq
);
1563 if ((alias
= as_add_arq_rb(ad
, arq
)) ) {
1564 list_del_init(&arq
->fifo
);
1565 as_add_aliased_request(ad
, arq
, alias
);
1567 ad
->next_arq
[arq
->is_sync
] = next_arq
;
1570 * Note! At this stage of this and the next function, our next
1571 * request may not be optimal - eg the request may have "grown"
1572 * behind the disk head. We currently don't bother adjusting.
1577 q
->last_merge
= req
;
1581 as_merged_requests(request_queue_t
*q
, struct request
*req
,
1582 struct request
*next
)
1584 struct as_data
*ad
= q
->elevator
->elevator_data
;
1585 struct as_rq
*arq
= RQ_DATA(req
);
1586 struct as_rq
*anext
= RQ_DATA(next
);
1592 * reposition arq (this is the merged request) in hash, and in rbtree
1593 * in case of a front merge
1595 as_del_arq_hash(arq
);
1596 as_add_arq_hash(ad
, arq
);
1598 if (rq_rb_key(req
) != arq
->rb_key
) {
1599 struct as_rq
*alias
, *next_arq
= NULL
;
1601 if (ad
->next_arq
[arq
->is_sync
] == arq
)
1602 next_arq
= as_find_next_arq(ad
, arq
);
1604 as_del_arq_rb(ad
, arq
);
1605 if ((alias
= as_add_arq_rb(ad
, arq
)) ) {
1606 list_del_init(&arq
->fifo
);
1607 as_add_aliased_request(ad
, arq
, alias
);
1609 ad
->next_arq
[arq
->is_sync
] = next_arq
;
1614 * if anext expires before arq, assign its expire time to arq
1615 * and move into anext position (anext will be deleted) in fifo
1617 if (!list_empty(&arq
->fifo
) && !list_empty(&anext
->fifo
)) {
1618 if (time_before(anext
->expires
, arq
->expires
)) {
1619 list_move(&arq
->fifo
, &anext
->fifo
);
1620 arq
->expires
= anext
->expires
;
1622 * Don't copy here but swap, because when anext is
1623 * removed below, it must contain the unused context
1625 swap_io_context(&arq
->io_context
, &anext
->io_context
);
1630 * Transfer list of aliases
1632 while (!list_empty(&next
->queuelist
)) {
1633 struct request
*__rq
= list_entry_rq(next
->queuelist
.next
);
1634 struct as_rq
*__arq
= RQ_DATA(__rq
);
1636 list_move_tail(&__rq
->queuelist
, &req
->queuelist
);
1638 WARN_ON(__arq
->state
!= AS_RQ_QUEUED
);
1642 * kill knowledge of next, this one is a goner
1644 as_remove_queued_request(q
, next
);
1645 as_put_io_context(anext
);
1647 anext
->state
= AS_RQ_MERGED
;
1651 * This is executed in a "deferred" process context, by kblockd. It calls the
1652 * driver's request_fn so the driver can submit that request.
1654 * IMPORTANT! This guy will reenter the elevator, so set up all queue global
1655 * state before calling, and don't rely on any state over calls.
1657 * FIXME! dispatch queue is not a queue at all!
1659 static void as_work_handler(void *data
)
1661 struct request_queue
*q
= data
;
1662 unsigned long flags
;
1664 spin_lock_irqsave(q
->queue_lock
, flags
);
1665 if (!as_queue_empty(q
))
1667 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1670 static void as_put_request(request_queue_t
*q
, struct request
*rq
)
1672 struct as_data
*ad
= q
->elevator
->elevator_data
;
1673 struct as_rq
*arq
= RQ_DATA(rq
);
1680 if (unlikely(arq
->state
!= AS_RQ_POSTSCHED
&&
1681 arq
->state
!= AS_RQ_PRESCHED
&&
1682 arq
->state
!= AS_RQ_MERGED
)) {
1683 printk("arq->state %d\n", arq
->state
);
1687 mempool_free(arq
, ad
->arq_pool
);
1688 rq
->elevator_private
= NULL
;
1691 static int as_set_request(request_queue_t
*q
, struct request
*rq
,
1692 struct bio
*bio
, int gfp_mask
)
1694 struct as_data
*ad
= q
->elevator
->elevator_data
;
1695 struct as_rq
*arq
= mempool_alloc(ad
->arq_pool
, gfp_mask
);
1698 memset(arq
, 0, sizeof(*arq
));
1699 RB_CLEAR(&arq
->rb_node
);
1701 arq
->state
= AS_RQ_PRESCHED
;
1702 arq
->io_context
= NULL
;
1703 INIT_LIST_HEAD(&arq
->hash
);
1705 INIT_LIST_HEAD(&arq
->fifo
);
1706 rq
->elevator_private
= arq
;
1713 static int as_may_queue(request_queue_t
*q
, int rw
, struct bio
*bio
)
1715 int ret
= ELV_MQUEUE_MAY
;
1716 struct as_data
*ad
= q
->elevator
->elevator_data
;
1717 struct io_context
*ioc
;
1718 if (ad
->antic_status
== ANTIC_WAIT_REQ
||
1719 ad
->antic_status
== ANTIC_WAIT_NEXT
) {
1720 ioc
= as_get_io_context();
1721 if (ad
->io_context
== ioc
)
1722 ret
= ELV_MQUEUE_MUST
;
1723 put_io_context(ioc
);
1729 static void as_exit_queue(elevator_t
*e
)
1731 struct as_data
*ad
= e
->elevator_data
;
1733 del_timer_sync(&ad
->antic_timer
);
1736 BUG_ON(!list_empty(&ad
->fifo_list
[REQ_SYNC
]));
1737 BUG_ON(!list_empty(&ad
->fifo_list
[REQ_ASYNC
]));
1739 mempool_destroy(ad
->arq_pool
);
1740 put_io_context(ad
->io_context
);
1746 * initialize elevator private data (as_data), and alloc a arq for
1747 * each request on the free lists
1749 static int as_init_queue(request_queue_t
*q
, elevator_t
*e
)
1757 ad
= kmalloc_node(sizeof(*ad
), GFP_KERNEL
, q
->node
);
1760 memset(ad
, 0, sizeof(*ad
));
1762 ad
->q
= q
; /* Identify what queue the data belongs to */
1764 ad
->hash
= kmalloc_node(sizeof(struct list_head
)*AS_HASH_ENTRIES
,
1765 GFP_KERNEL
, q
->node
);
1771 ad
->arq_pool
= mempool_create_node(BLKDEV_MIN_RQ
, mempool_alloc_slab
,
1772 mempool_free_slab
, arq_pool
, q
->node
);
1773 if (!ad
->arq_pool
) {
1779 /* anticipatory scheduling helpers */
1780 ad
->antic_timer
.function
= as_antic_timeout
;
1781 ad
->antic_timer
.data
= (unsigned long)q
;
1782 init_timer(&ad
->antic_timer
);
1783 INIT_WORK(&ad
->antic_work
, as_work_handler
, q
);
1785 for (i
= 0; i
< AS_HASH_ENTRIES
; i
++)
1786 INIT_LIST_HEAD(&ad
->hash
[i
]);
1788 INIT_LIST_HEAD(&ad
->fifo_list
[REQ_SYNC
]);
1789 INIT_LIST_HEAD(&ad
->fifo_list
[REQ_ASYNC
]);
1790 ad
->sort_list
[REQ_SYNC
] = RB_ROOT
;
1791 ad
->sort_list
[REQ_ASYNC
] = RB_ROOT
;
1792 ad
->fifo_expire
[REQ_SYNC
] = default_read_expire
;
1793 ad
->fifo_expire
[REQ_ASYNC
] = default_write_expire
;
1794 ad
->antic_expire
= default_antic_expire
;
1795 ad
->batch_expire
[REQ_SYNC
] = default_read_batch_expire
;
1796 ad
->batch_expire
[REQ_ASYNC
] = default_write_batch_expire
;
1797 e
->elevator_data
= ad
;
1799 ad
->current_batch_expires
= jiffies
+ ad
->batch_expire
[REQ_SYNC
];
1800 ad
->write_batch_count
= ad
->batch_expire
[REQ_ASYNC
] / 10;
1801 if (ad
->write_batch_count
< 2)
1802 ad
->write_batch_count
= 2;
1810 struct as_fs_entry
{
1811 struct attribute attr
;
1812 ssize_t (*show
)(struct as_data
*, char *);
1813 ssize_t (*store
)(struct as_data
*, const char *, size_t);
1817 as_var_show(unsigned int var
, char *page
)
1819 return sprintf(page
, "%d\n", var
);
1823 as_var_store(unsigned long *var
, const char *page
, size_t count
)
1825 char *p
= (char *) page
;
1827 *var
= simple_strtoul(p
, &p
, 10);
1831 static ssize_t
as_est_show(struct as_data
*ad
, char *page
)
1835 pos
+= sprintf(page
+pos
, "%lu %% exit probability\n", 100*ad
->exit_prob
/256);
1836 pos
+= sprintf(page
+pos
, "%lu ms new thinktime\n", ad
->new_ttime_mean
);
1837 pos
+= sprintf(page
+pos
, "%llu sectors new seek distance\n", (unsigned long long)ad
->new_seek_mean
);
1842 #define SHOW_FUNCTION(__FUNC, __VAR) \
1843 static ssize_t __FUNC(struct as_data *ad, char *page) \
1845 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
1847 SHOW_FUNCTION(as_readexpire_show
, ad
->fifo_expire
[REQ_SYNC
]);
1848 SHOW_FUNCTION(as_writeexpire_show
, ad
->fifo_expire
[REQ_ASYNC
]);
1849 SHOW_FUNCTION(as_anticexpire_show
, ad
->antic_expire
);
1850 SHOW_FUNCTION(as_read_batchexpire_show
, ad
->batch_expire
[REQ_SYNC
]);
1851 SHOW_FUNCTION(as_write_batchexpire_show
, ad
->batch_expire
[REQ_ASYNC
]);
1852 #undef SHOW_FUNCTION
1854 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
1855 static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \
1857 int ret = as_var_store(__PTR, (page), count); \
1858 if (*(__PTR) < (MIN)) \
1860 else if (*(__PTR) > (MAX)) \
1862 *(__PTR) = msecs_to_jiffies(*(__PTR)); \
1865 STORE_FUNCTION(as_readexpire_store
, &ad
->fifo_expire
[REQ_SYNC
], 0, INT_MAX
);
1866 STORE_FUNCTION(as_writeexpire_store
, &ad
->fifo_expire
[REQ_ASYNC
], 0, INT_MAX
);
1867 STORE_FUNCTION(as_anticexpire_store
, &ad
->antic_expire
, 0, INT_MAX
);
1868 STORE_FUNCTION(as_read_batchexpire_store
,
1869 &ad
->batch_expire
[REQ_SYNC
], 0, INT_MAX
);
1870 STORE_FUNCTION(as_write_batchexpire_store
,
1871 &ad
->batch_expire
[REQ_ASYNC
], 0, INT_MAX
);
1872 #undef STORE_FUNCTION
1874 static struct as_fs_entry as_est_entry
= {
1875 .attr
= {.name
= "est_time", .mode
= S_IRUGO
},
1876 .show
= as_est_show
,
1878 static struct as_fs_entry as_readexpire_entry
= {
1879 .attr
= {.name
= "read_expire", .mode
= S_IRUGO
| S_IWUSR
},
1880 .show
= as_readexpire_show
,
1881 .store
= as_readexpire_store
,
1883 static struct as_fs_entry as_writeexpire_entry
= {
1884 .attr
= {.name
= "write_expire", .mode
= S_IRUGO
| S_IWUSR
},
1885 .show
= as_writeexpire_show
,
1886 .store
= as_writeexpire_store
,
1888 static struct as_fs_entry as_anticexpire_entry
= {
1889 .attr
= {.name
= "antic_expire", .mode
= S_IRUGO
| S_IWUSR
},
1890 .show
= as_anticexpire_show
,
1891 .store
= as_anticexpire_store
,
1893 static struct as_fs_entry as_read_batchexpire_entry
= {
1894 .attr
= {.name
= "read_batch_expire", .mode
= S_IRUGO
| S_IWUSR
},
1895 .show
= as_read_batchexpire_show
,
1896 .store
= as_read_batchexpire_store
,
1898 static struct as_fs_entry as_write_batchexpire_entry
= {
1899 .attr
= {.name
= "write_batch_expire", .mode
= S_IRUGO
| S_IWUSR
},
1900 .show
= as_write_batchexpire_show
,
1901 .store
= as_write_batchexpire_store
,
1904 static struct attribute
*default_attrs
[] = {
1906 &as_readexpire_entry
.attr
,
1907 &as_writeexpire_entry
.attr
,
1908 &as_anticexpire_entry
.attr
,
1909 &as_read_batchexpire_entry
.attr
,
1910 &as_write_batchexpire_entry
.attr
,
1914 #define to_as(atr) container_of((atr), struct as_fs_entry, attr)
1917 as_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
1919 elevator_t
*e
= container_of(kobj
, elevator_t
, kobj
);
1920 struct as_fs_entry
*entry
= to_as(attr
);
1925 return entry
->show(e
->elevator_data
, page
);
1929 as_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
1930 const char *page
, size_t length
)
1932 elevator_t
*e
= container_of(kobj
, elevator_t
, kobj
);
1933 struct as_fs_entry
*entry
= to_as(attr
);
1938 return entry
->store(e
->elevator_data
, page
, length
);
1941 static struct sysfs_ops as_sysfs_ops
= {
1942 .show
= as_attr_show
,
1943 .store
= as_attr_store
,
1946 static struct kobj_type as_ktype
= {
1947 .sysfs_ops
= &as_sysfs_ops
,
1948 .default_attrs
= default_attrs
,
1951 static struct elevator_type iosched_as
= {
1953 .elevator_merge_fn
= as_merge
,
1954 .elevator_merged_fn
= as_merged_request
,
1955 .elevator_merge_req_fn
= as_merged_requests
,
1956 .elevator_dispatch_fn
= as_dispatch_request
,
1957 .elevator_add_req_fn
= as_add_request
,
1958 .elevator_activate_req_fn
= as_activate_request
,
1959 .elevator_deactivate_req_fn
= as_deactivate_request
,
1960 .elevator_queue_empty_fn
= as_queue_empty
,
1961 .elevator_completed_req_fn
= as_completed_request
,
1962 .elevator_former_req_fn
= as_former_request
,
1963 .elevator_latter_req_fn
= as_latter_request
,
1964 .elevator_set_req_fn
= as_set_request
,
1965 .elevator_put_req_fn
= as_put_request
,
1966 .elevator_may_queue_fn
= as_may_queue
,
1967 .elevator_init_fn
= as_init_queue
,
1968 .elevator_exit_fn
= as_exit_queue
,
1971 .elevator_ktype
= &as_ktype
,
1972 .elevator_name
= "anticipatory",
1973 .elevator_owner
= THIS_MODULE
,
1976 static int __init
as_init(void)
1980 arq_pool
= kmem_cache_create("as_arq", sizeof(struct as_rq
),
1985 ret
= elv_register(&iosched_as
);
1988 * don't allow AS to get unregistered, since we would have
1989 * to browse all tasks in the system and release their
1990 * as_io_context first
1992 __module_get(THIS_MODULE
);
1996 kmem_cache_destroy(arq_pool
);
2000 static void __exit
as_exit(void)
2002 kmem_cache_destroy(arq_pool
);
2003 elv_unregister(&iosched_as
);
2006 module_init(as_init
);
2007 module_exit(as_exit
);
2009 MODULE_AUTHOR("Nick Piggin");
2010 MODULE_LICENSE("GPL");
2011 MODULE_DESCRIPTION("anticipatory IO scheduler");