2 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
3 * for the blk-mq scheduling framework
5 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7 #include <linux/kernel.h>
9 #include <linux/blkdev.h>
10 #include <linux/blk-mq.h>
11 #include <linux/elevator.h>
12 #include <linux/bio.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/compiler.h>
17 #include <linux/rbtree.h>
18 #include <linux/sbitmap.h>
22 #include "blk-mq-tag.h"
23 #include "blk-mq-sched.h"
26 * See Documentation/block/deadline-iosched.txt
28 static const int read_expire
= HZ
/ 2; /* max time before a read is submitted. */
29 static const int write_expire
= 5 * HZ
; /* ditto for writes, these limits are SOFT! */
30 static const int writes_starved
= 2; /* max times reads can starve a write */
31 static const int fifo_batch
= 16; /* # of sequential requests treated as one
32 by the above parameters. For throughput. */
34 struct deadline_data
{
40 * requests (deadline_rq s) are present on both sort_list and fifo_list
42 struct rb_root sort_list
[2];
43 struct list_head fifo_list
[2];
46 * next in sort order. read, write or both are NULL
48 struct request
*next_rq
[2];
49 unsigned int batching
; /* number of sequential requests made */
50 unsigned int starved
; /* times reads have starved writes */
53 * settings that change how the i/o scheduler behaves
61 struct list_head dispatch
;
64 static inline struct rb_root
*
65 deadline_rb_root(struct deadline_data
*dd
, struct request
*rq
)
67 return &dd
->sort_list
[rq_data_dir(rq
)];
71 * get the request after `rq' in sector-sorted order
73 static inline struct request
*
74 deadline_latter_request(struct request
*rq
)
76 struct rb_node
*node
= rb_next(&rq
->rb_node
);
79 return rb_entry_rq(node
);
85 deadline_add_rq_rb(struct deadline_data
*dd
, struct request
*rq
)
87 struct rb_root
*root
= deadline_rb_root(dd
, rq
);
93 deadline_del_rq_rb(struct deadline_data
*dd
, struct request
*rq
)
95 const int data_dir
= rq_data_dir(rq
);
97 if (dd
->next_rq
[data_dir
] == rq
)
98 dd
->next_rq
[data_dir
] = deadline_latter_request(rq
);
100 elv_rb_del(deadline_rb_root(dd
, rq
), rq
);
104 * remove rq from rbtree and fifo.
106 static void deadline_remove_request(struct request_queue
*q
, struct request
*rq
)
108 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
110 list_del_init(&rq
->queuelist
);
113 * We might not be on the rbtree, if we are doing an insert merge
115 if (!RB_EMPTY_NODE(&rq
->rb_node
))
116 deadline_del_rq_rb(dd
, rq
);
118 elv_rqhash_del(q
, rq
);
119 if (q
->last_merge
== rq
)
120 q
->last_merge
= NULL
;
123 static void dd_request_merged(struct request_queue
*q
, struct request
*req
,
126 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
129 * if the merge was a front merge, we need to reposition request
131 if (type
== ELEVATOR_FRONT_MERGE
) {
132 elv_rb_del(deadline_rb_root(dd
, req
), req
);
133 deadline_add_rq_rb(dd
, req
);
137 static void dd_merged_requests(struct request_queue
*q
, struct request
*req
,
138 struct request
*next
)
141 * if next expires before rq, assign its expire time to rq
142 * and move into next position (next will be deleted) in fifo
144 if (!list_empty(&req
->queuelist
) && !list_empty(&next
->queuelist
)) {
145 if (time_before((unsigned long)next
->fifo_time
,
146 (unsigned long)req
->fifo_time
)) {
147 list_move(&req
->queuelist
, &next
->queuelist
);
148 req
->fifo_time
= next
->fifo_time
;
153 * kill knowledge of next, this one is a goner
155 deadline_remove_request(q
, next
);
159 * move an entry to dispatch queue
162 deadline_move_request(struct deadline_data
*dd
, struct request
*rq
)
164 const int data_dir
= rq_data_dir(rq
);
166 dd
->next_rq
[READ
] = NULL
;
167 dd
->next_rq
[WRITE
] = NULL
;
168 dd
->next_rq
[data_dir
] = deadline_latter_request(rq
);
171 * take it off the sort and fifo list
173 deadline_remove_request(rq
->q
, rq
);
177 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
178 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
180 static inline int deadline_check_fifo(struct deadline_data
*dd
, int ddir
)
182 struct request
*rq
= rq_entry_fifo(dd
->fifo_list
[ddir
].next
);
187 if (time_after_eq(jiffies
, (unsigned long)rq
->fifo_time
))
194 * deadline_dispatch_requests selects the best request according to
195 * read/write expire, fifo_batch, etc
197 static struct request
*__dd_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
199 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
204 if (!list_empty(&dd
->dispatch
)) {
205 rq
= list_first_entry(&dd
->dispatch
, struct request
, queuelist
);
206 list_del_init(&rq
->queuelist
);
210 reads
= !list_empty(&dd
->fifo_list
[READ
]);
211 writes
= !list_empty(&dd
->fifo_list
[WRITE
]);
214 * batches are currently reads XOR writes
216 if (dd
->next_rq
[WRITE
])
217 rq
= dd
->next_rq
[WRITE
];
219 rq
= dd
->next_rq
[READ
];
221 if (rq
&& dd
->batching
< dd
->fifo_batch
)
222 /* we have a next request are still entitled to batch */
223 goto dispatch_request
;
226 * at this point we are not running a batch. select the appropriate
227 * data direction (read / write)
231 BUG_ON(RB_EMPTY_ROOT(&dd
->sort_list
[READ
]));
233 if (writes
&& (dd
->starved
++ >= dd
->writes_starved
))
234 goto dispatch_writes
;
238 goto dispatch_find_request
;
242 * there are either no reads or writes have been starved
247 BUG_ON(RB_EMPTY_ROOT(&dd
->sort_list
[WRITE
]));
253 goto dispatch_find_request
;
258 dispatch_find_request
:
260 * we are not running a batch, find best request for selected data_dir
262 if (deadline_check_fifo(dd
, data_dir
) || !dd
->next_rq
[data_dir
]) {
264 * A deadline has expired, the last request was in the other
265 * direction, or we have run out of higher-sectored requests.
266 * Start again from the request with the earliest expiry time.
268 rq
= rq_entry_fifo(dd
->fifo_list
[data_dir
].next
);
271 * The last req was the same dir and we have a next request in
272 * sort order. No expired requests so continue on from here.
274 rq
= dd
->next_rq
[data_dir
];
281 * rq is the selected appropriate request.
284 deadline_move_request(dd
, rq
);
286 rq
->rq_flags
|= RQF_STARTED
;
290 static struct request
*dd_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
292 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
295 spin_lock(&dd
->lock
);
296 rq
= __dd_dispatch_request(hctx
);
297 spin_unlock(&dd
->lock
);
302 static void dd_exit_queue(struct elevator_queue
*e
)
304 struct deadline_data
*dd
= e
->elevator_data
;
306 BUG_ON(!list_empty(&dd
->fifo_list
[READ
]));
307 BUG_ON(!list_empty(&dd
->fifo_list
[WRITE
]));
313 * initialize elevator private data (deadline_data).
315 static int dd_init_queue(struct request_queue
*q
, struct elevator_type
*e
)
317 struct deadline_data
*dd
;
318 struct elevator_queue
*eq
;
320 eq
= elevator_alloc(q
, e
);
324 dd
= kzalloc_node(sizeof(*dd
), GFP_KERNEL
, q
->node
);
326 kobject_put(&eq
->kobj
);
329 eq
->elevator_data
= dd
;
331 INIT_LIST_HEAD(&dd
->fifo_list
[READ
]);
332 INIT_LIST_HEAD(&dd
->fifo_list
[WRITE
]);
333 dd
->sort_list
[READ
] = RB_ROOT
;
334 dd
->sort_list
[WRITE
] = RB_ROOT
;
335 dd
->fifo_expire
[READ
] = read_expire
;
336 dd
->fifo_expire
[WRITE
] = write_expire
;
337 dd
->writes_starved
= writes_starved
;
338 dd
->front_merges
= 1;
339 dd
->fifo_batch
= fifo_batch
;
340 spin_lock_init(&dd
->lock
);
341 INIT_LIST_HEAD(&dd
->dispatch
);
347 static int dd_request_merge(struct request_queue
*q
, struct request
**rq
,
350 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
351 sector_t sector
= bio_end_sector(bio
);
352 struct request
*__rq
;
354 if (!dd
->front_merges
)
355 return ELEVATOR_NO_MERGE
;
357 __rq
= elv_rb_find(&dd
->sort_list
[bio_data_dir(bio
)], sector
);
359 BUG_ON(sector
!= blk_rq_pos(__rq
));
361 if (elv_bio_merge_ok(__rq
, bio
)) {
363 return ELEVATOR_FRONT_MERGE
;
367 return ELEVATOR_NO_MERGE
;
370 static bool dd_bio_merge(struct blk_mq_hw_ctx
*hctx
, struct bio
*bio
)
372 struct request_queue
*q
= hctx
->queue
;
373 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
374 struct request
*free
= NULL
;
377 spin_lock(&dd
->lock
);
378 ret
= blk_mq_sched_try_merge(q
, bio
, &free
);
379 spin_unlock(&dd
->lock
);
382 blk_mq_free_request(free
);
388 * add rq to rbtree and fifo
390 static void dd_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
393 struct request_queue
*q
= hctx
->queue
;
394 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
395 const int data_dir
= rq_data_dir(rq
);
397 if (blk_mq_sched_try_insert_merge(q
, rq
))
400 blk_mq_sched_request_inserted(rq
);
402 if (at_head
|| blk_rq_is_passthrough(rq
)) {
404 list_add(&rq
->queuelist
, &dd
->dispatch
);
406 list_add_tail(&rq
->queuelist
, &dd
->dispatch
);
408 deadline_add_rq_rb(dd
, rq
);
410 if (rq_mergeable(rq
)) {
411 elv_rqhash_add(q
, rq
);
417 * set expire time and add to fifo list
419 rq
->fifo_time
= jiffies
+ dd
->fifo_expire
[data_dir
];
420 list_add_tail(&rq
->queuelist
, &dd
->fifo_list
[data_dir
]);
424 static void dd_insert_requests(struct blk_mq_hw_ctx
*hctx
,
425 struct list_head
*list
, bool at_head
)
427 struct request_queue
*q
= hctx
->queue
;
428 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
430 spin_lock(&dd
->lock
);
431 while (!list_empty(list
)) {
434 rq
= list_first_entry(list
, struct request
, queuelist
);
435 list_del_init(&rq
->queuelist
);
436 dd_insert_request(hctx
, rq
, at_head
);
438 spin_unlock(&dd
->lock
);
441 static bool dd_has_work(struct blk_mq_hw_ctx
*hctx
)
443 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
445 return !list_empty_careful(&dd
->dispatch
) ||
446 !list_empty_careful(&dd
->fifo_list
[0]) ||
447 !list_empty_careful(&dd
->fifo_list
[1]);
454 deadline_var_show(int var
, char *page
)
456 return sprintf(page
, "%d\n", var
);
460 deadline_var_store(int *var
, const char *page
, size_t count
)
462 char *p
= (char *) page
;
464 *var
= simple_strtol(p
, &p
, 10);
468 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
469 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
471 struct deadline_data *dd = e->elevator_data; \
472 int __data = __VAR; \
474 __data = jiffies_to_msecs(__data); \
475 return deadline_var_show(__data, (page)); \
477 SHOW_FUNCTION(deadline_read_expire_show
, dd
->fifo_expire
[READ
], 1);
478 SHOW_FUNCTION(deadline_write_expire_show
, dd
->fifo_expire
[WRITE
], 1);
479 SHOW_FUNCTION(deadline_writes_starved_show
, dd
->writes_starved
, 0);
480 SHOW_FUNCTION(deadline_front_merges_show
, dd
->front_merges
, 0);
481 SHOW_FUNCTION(deadline_fifo_batch_show
, dd
->fifo_batch
, 0);
484 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
485 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
487 struct deadline_data *dd = e->elevator_data; \
489 int ret = deadline_var_store(&__data, (page), count); \
490 if (__data < (MIN)) \
492 else if (__data > (MAX)) \
495 *(__PTR) = msecs_to_jiffies(__data); \
500 STORE_FUNCTION(deadline_read_expire_store
, &dd
->fifo_expire
[READ
], 0, INT_MAX
, 1);
501 STORE_FUNCTION(deadline_write_expire_store
, &dd
->fifo_expire
[WRITE
], 0, INT_MAX
, 1);
502 STORE_FUNCTION(deadline_writes_starved_store
, &dd
->writes_starved
, INT_MIN
, INT_MAX
, 0);
503 STORE_FUNCTION(deadline_front_merges_store
, &dd
->front_merges
, 0, 1, 0);
504 STORE_FUNCTION(deadline_fifo_batch_store
, &dd
->fifo_batch
, 0, INT_MAX
, 0);
505 #undef STORE_FUNCTION
507 #define DD_ATTR(name) \
508 __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
509 deadline_##name##_store)
511 static struct elv_fs_entry deadline_attrs
[] = {
512 DD_ATTR(read_expire
),
513 DD_ATTR(write_expire
),
514 DD_ATTR(writes_starved
),
515 DD_ATTR(front_merges
),
520 static struct elevator_type mq_deadline
= {
522 .insert_requests
= dd_insert_requests
,
523 .dispatch_request
= dd_dispatch_request
,
524 .next_request
= elv_rb_latter_request
,
525 .former_request
= elv_rb_former_request
,
526 .bio_merge
= dd_bio_merge
,
527 .request_merge
= dd_request_merge
,
528 .requests_merged
= dd_merged_requests
,
529 .request_merged
= dd_request_merged
,
530 .has_work
= dd_has_work
,
531 .init_sched
= dd_init_queue
,
532 .exit_sched
= dd_exit_queue
,
536 .elevator_attrs
= deadline_attrs
,
537 .elevator_name
= "mq-deadline",
538 .elevator_owner
= THIS_MODULE
,
541 static int __init
deadline_init(void)
543 return elv_register(&mq_deadline
);
546 static void __exit
deadline_exit(void)
548 elv_unregister(&mq_deadline
);
551 module_init(deadline_init
);
552 module_exit(deadline_exit
);
554 MODULE_AUTHOR("Jens Axboe");
555 MODULE_LICENSE("GPL");
556 MODULE_DESCRIPTION("MQ deadline IO scheduler");