]>
Commit | Line | Data |
---|---|---|
945ffb60 JA |
1 | /* |
2 | * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, | |
3 | * for the blk-mq scheduling framework | |
4 | * | |
5 | * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> | |
6 | */ | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/blk-mq.h> | |
11 | #include <linux/elevator.h> | |
12 | #include <linux/bio.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/compiler.h> | |
17 | #include <linux/rbtree.h> | |
18 | #include <linux/sbitmap.h> | |
19 | ||
20 | #include "blk.h" | |
21 | #include "blk-mq.h" | |
22 | #include "blk-mq-tag.h" | |
23 | #include "blk-mq-sched.h" | |
24 | ||
25 | /* | |
26 | * See Documentation/block/deadline-iosched.txt | |
27 | */ | |
28 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ | |
29 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | |
30 | static const int writes_starved = 2; /* max times reads can starve a write */ | |
31 | static const int fifo_batch = 16; /* # of sequential requests treated as one | |
32 | by the above parameters. For throughput. */ | |
33 | ||
34 | struct deadline_data { | |
35 | /* | |
36 | * run time data | |
37 | */ | |
38 | ||
39 | /* | |
40 | * requests (deadline_rq s) are present on both sort_list and fifo_list | |
41 | */ | |
42 | struct rb_root sort_list[2]; | |
43 | struct list_head fifo_list[2]; | |
44 | ||
45 | /* | |
46 | * next in sort order. read, write or both are NULL | |
47 | */ | |
48 | struct request *next_rq[2]; | |
49 | unsigned int batching; /* number of sequential requests made */ | |
50 | unsigned int starved; /* times reads have starved writes */ | |
51 | ||
52 | /* | |
53 | * settings that change how the i/o scheduler behaves | |
54 | */ | |
55 | int fifo_expire[2]; | |
56 | int fifo_batch; | |
57 | int writes_starved; | |
58 | int front_merges; | |
59 | ||
60 | spinlock_t lock; | |
61 | struct list_head dispatch; | |
62 | }; | |
63 | ||
64 | static inline struct rb_root * | |
65 | deadline_rb_root(struct deadline_data *dd, struct request *rq) | |
66 | { | |
67 | return &dd->sort_list[rq_data_dir(rq)]; | |
68 | } | |
69 | ||
70 | /* | |
71 | * get the request after `rq' in sector-sorted order | |
72 | */ | |
73 | static inline struct request * | |
74 | deadline_latter_request(struct request *rq) | |
75 | { | |
76 | struct rb_node *node = rb_next(&rq->rb_node); | |
77 | ||
78 | if (node) | |
79 | return rb_entry_rq(node); | |
80 | ||
81 | return NULL; | |
82 | } | |
83 | ||
84 | static void | |
85 | deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) | |
86 | { | |
87 | struct rb_root *root = deadline_rb_root(dd, rq); | |
88 | ||
89 | elv_rb_add(root, rq); | |
90 | } | |
91 | ||
92 | static inline void | |
93 | deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) | |
94 | { | |
95 | const int data_dir = rq_data_dir(rq); | |
96 | ||
97 | if (dd->next_rq[data_dir] == rq) | |
98 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
99 | ||
100 | elv_rb_del(deadline_rb_root(dd, rq), rq); | |
101 | } | |
102 | ||
103 | /* | |
104 | * remove rq from rbtree and fifo. | |
105 | */ | |
106 | static void deadline_remove_request(struct request_queue *q, struct request *rq) | |
107 | { | |
108 | struct deadline_data *dd = q->elevator->elevator_data; | |
109 | ||
110 | list_del_init(&rq->queuelist); | |
111 | ||
112 | /* | |
113 | * We might not be on the rbtree, if we are doing an insert merge | |
114 | */ | |
115 | if (!RB_EMPTY_NODE(&rq->rb_node)) | |
116 | deadline_del_rq_rb(dd, rq); | |
117 | ||
118 | elv_rqhash_del(q, rq); | |
119 | if (q->last_merge == rq) | |
120 | q->last_merge = NULL; | |
121 | } | |
122 | ||
123 | static void dd_request_merged(struct request_queue *q, struct request *req, | |
124 | int type) | |
125 | { | |
126 | struct deadline_data *dd = q->elevator->elevator_data; | |
127 | ||
128 | /* | |
129 | * if the merge was a front merge, we need to reposition request | |
130 | */ | |
131 | if (type == ELEVATOR_FRONT_MERGE) { | |
132 | elv_rb_del(deadline_rb_root(dd, req), req); | |
133 | deadline_add_rq_rb(dd, req); | |
134 | } | |
135 | } | |
136 | ||
137 | static void dd_merged_requests(struct request_queue *q, struct request *req, | |
138 | struct request *next) | |
139 | { | |
140 | /* | |
141 | * if next expires before rq, assign its expire time to rq | |
142 | * and move into next position (next will be deleted) in fifo | |
143 | */ | |
144 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | |
145 | if (time_before((unsigned long)next->fifo_time, | |
146 | (unsigned long)req->fifo_time)) { | |
147 | list_move(&req->queuelist, &next->queuelist); | |
148 | req->fifo_time = next->fifo_time; | |
149 | } | |
150 | } | |
151 | ||
152 | /* | |
153 | * kill knowledge of next, this one is a goner | |
154 | */ | |
155 | deadline_remove_request(q, next); | |
156 | } | |
157 | ||
158 | /* | |
159 | * move an entry to dispatch queue | |
160 | */ | |
161 | static void | |
162 | deadline_move_request(struct deadline_data *dd, struct request *rq) | |
163 | { | |
164 | const int data_dir = rq_data_dir(rq); | |
165 | ||
166 | dd->next_rq[READ] = NULL; | |
167 | dd->next_rq[WRITE] = NULL; | |
168 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
169 | ||
170 | /* | |
171 | * take it off the sort and fifo list | |
172 | */ | |
173 | deadline_remove_request(rq->q, rq); | |
174 | } | |
175 | ||
176 | /* | |
177 | * deadline_check_fifo returns 0 if there are no expired requests on the fifo, | |
178 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | |
179 | */ | |
180 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |
181 | { | |
182 | struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); | |
183 | ||
184 | /* | |
185 | * rq is expired! | |
186 | */ | |
187 | if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) | |
188 | return 1; | |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
193 | /* | |
194 | * deadline_dispatch_requests selects the best request according to | |
195 | * read/write expire, fifo_batch, etc | |
196 | */ | |
197 | static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx) | |
198 | { | |
199 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
200 | struct request *rq; | |
201 | bool reads, writes; | |
202 | int data_dir; | |
203 | ||
204 | if (!list_empty(&dd->dispatch)) { | |
205 | rq = list_first_entry(&dd->dispatch, struct request, queuelist); | |
206 | list_del_init(&rq->queuelist); | |
207 | goto done; | |
208 | } | |
209 | ||
210 | reads = !list_empty(&dd->fifo_list[READ]); | |
211 | writes = !list_empty(&dd->fifo_list[WRITE]); | |
212 | ||
213 | /* | |
214 | * batches are currently reads XOR writes | |
215 | */ | |
216 | if (dd->next_rq[WRITE]) | |
217 | rq = dd->next_rq[WRITE]; | |
218 | else | |
219 | rq = dd->next_rq[READ]; | |
220 | ||
221 | if (rq && dd->batching < dd->fifo_batch) | |
222 | /* we have a next request are still entitled to batch */ | |
223 | goto dispatch_request; | |
224 | ||
225 | /* | |
226 | * at this point we are not running a batch. select the appropriate | |
227 | * data direction (read / write) | |
228 | */ | |
229 | ||
230 | if (reads) { | |
231 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); | |
232 | ||
233 | if (writes && (dd->starved++ >= dd->writes_starved)) | |
234 | goto dispatch_writes; | |
235 | ||
236 | data_dir = READ; | |
237 | ||
238 | goto dispatch_find_request; | |
239 | } | |
240 | ||
241 | /* | |
242 | * there are either no reads or writes have been starved | |
243 | */ | |
244 | ||
245 | if (writes) { | |
246 | dispatch_writes: | |
247 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); | |
248 | ||
249 | dd->starved = 0; | |
250 | ||
251 | data_dir = WRITE; | |
252 | ||
253 | goto dispatch_find_request; | |
254 | } | |
255 | ||
256 | return NULL; | |
257 | ||
258 | dispatch_find_request: | |
259 | /* | |
260 | * we are not running a batch, find best request for selected data_dir | |
261 | */ | |
262 | if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) { | |
263 | /* | |
264 | * A deadline has expired, the last request was in the other | |
265 | * direction, or we have run out of higher-sectored requests. | |
266 | * Start again from the request with the earliest expiry time. | |
267 | */ | |
268 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); | |
269 | } else { | |
270 | /* | |
271 | * The last req was the same dir and we have a next request in | |
272 | * sort order. No expired requests so continue on from here. | |
273 | */ | |
274 | rq = dd->next_rq[data_dir]; | |
275 | } | |
276 | ||
277 | dd->batching = 0; | |
278 | ||
279 | dispatch_request: | |
280 | /* | |
281 | * rq is the selected appropriate request. | |
282 | */ | |
283 | dd->batching++; | |
284 | deadline_move_request(dd, rq); | |
285 | done: | |
286 | rq->rq_flags |= RQF_STARTED; | |
287 | return rq; | |
288 | } | |
289 | ||
290 | static void dd_dispatch_requests(struct blk_mq_hw_ctx *hctx, | |
291 | struct list_head *rq_list) | |
292 | { | |
293 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
294 | ||
295 | spin_lock(&dd->lock); | |
296 | blk_mq_sched_move_to_dispatch(hctx, rq_list, __dd_dispatch_request); | |
297 | spin_unlock(&dd->lock); | |
298 | } | |
299 | ||
300 | static void dd_exit_queue(struct elevator_queue *e) | |
301 | { | |
302 | struct deadline_data *dd = e->elevator_data; | |
303 | ||
304 | BUG_ON(!list_empty(&dd->fifo_list[READ])); | |
305 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | |
306 | ||
307 | kfree(dd); | |
308 | } | |
309 | ||
310 | /* | |
311 | * initialize elevator private data (deadline_data). | |
312 | */ | |
313 | static int dd_init_queue(struct request_queue *q, struct elevator_type *e) | |
314 | { | |
315 | struct deadline_data *dd; | |
316 | struct elevator_queue *eq; | |
317 | ||
318 | eq = elevator_alloc(q, e); | |
319 | if (!eq) | |
320 | return -ENOMEM; | |
321 | ||
322 | dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); | |
323 | if (!dd) { | |
324 | kobject_put(&eq->kobj); | |
325 | return -ENOMEM; | |
326 | } | |
327 | eq->elevator_data = dd; | |
328 | ||
329 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | |
330 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | |
331 | dd->sort_list[READ] = RB_ROOT; | |
332 | dd->sort_list[WRITE] = RB_ROOT; | |
333 | dd->fifo_expire[READ] = read_expire; | |
334 | dd->fifo_expire[WRITE] = write_expire; | |
335 | dd->writes_starved = writes_starved; | |
336 | dd->front_merges = 1; | |
337 | dd->fifo_batch = fifo_batch; | |
338 | spin_lock_init(&dd->lock); | |
339 | INIT_LIST_HEAD(&dd->dispatch); | |
340 | ||
341 | q->elevator = eq; | |
342 | return 0; | |
343 | } | |
344 | ||
345 | static int dd_request_merge(struct request_queue *q, struct request **rq, | |
346 | struct bio *bio) | |
347 | { | |
348 | struct deadline_data *dd = q->elevator->elevator_data; | |
349 | sector_t sector = bio_end_sector(bio); | |
350 | struct request *__rq; | |
351 | ||
352 | if (!dd->front_merges) | |
353 | return ELEVATOR_NO_MERGE; | |
354 | ||
355 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); | |
356 | if (__rq) { | |
357 | BUG_ON(sector != blk_rq_pos(__rq)); | |
358 | ||
359 | if (elv_bio_merge_ok(__rq, bio)) { | |
360 | *rq = __rq; | |
361 | return ELEVATOR_FRONT_MERGE; | |
362 | } | |
363 | } | |
364 | ||
365 | return ELEVATOR_NO_MERGE; | |
366 | } | |
367 | ||
368 | static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) | |
369 | { | |
370 | struct request_queue *q = hctx->queue; | |
371 | struct deadline_data *dd = q->elevator->elevator_data; | |
372 | int ret; | |
373 | ||
374 | spin_lock(&dd->lock); | |
375 | ret = blk_mq_sched_try_merge(q, bio); | |
376 | spin_unlock(&dd->lock); | |
377 | ||
378 | return ret; | |
379 | } | |
380 | ||
381 | /* | |
382 | * add rq to rbtree and fifo | |
383 | */ | |
384 | static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
385 | bool at_head) | |
386 | { | |
387 | struct request_queue *q = hctx->queue; | |
388 | struct deadline_data *dd = q->elevator->elevator_data; | |
389 | const int data_dir = rq_data_dir(rq); | |
390 | ||
391 | if (blk_mq_sched_try_insert_merge(q, rq)) | |
392 | return; | |
393 | ||
394 | blk_mq_sched_request_inserted(rq); | |
395 | ||
396 | if (blk_mq_sched_bypass_insert(hctx, rq)) | |
397 | return; | |
398 | ||
399 | if (at_head || rq->cmd_type != REQ_TYPE_FS) { | |
400 | if (at_head) | |
401 | list_add(&rq->queuelist, &dd->dispatch); | |
402 | else | |
403 | list_add_tail(&rq->queuelist, &dd->dispatch); | |
404 | } else { | |
405 | deadline_add_rq_rb(dd, rq); | |
406 | ||
407 | if (rq_mergeable(rq)) { | |
408 | elv_rqhash_add(q, rq); | |
409 | if (!q->last_merge) | |
410 | q->last_merge = rq; | |
411 | } | |
412 | ||
413 | /* | |
414 | * set expire time and add to fifo list | |
415 | */ | |
416 | rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; | |
417 | list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); | |
418 | } | |
419 | } | |
420 | ||
421 | static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, | |
422 | struct list_head *list, bool at_head) | |
423 | { | |
424 | struct request_queue *q = hctx->queue; | |
425 | struct deadline_data *dd = q->elevator->elevator_data; | |
426 | ||
427 | spin_lock(&dd->lock); | |
428 | while (!list_empty(list)) { | |
429 | struct request *rq; | |
430 | ||
431 | rq = list_first_entry(list, struct request, queuelist); | |
432 | list_del_init(&rq->queuelist); | |
433 | dd_insert_request(hctx, rq, at_head); | |
434 | } | |
435 | spin_unlock(&dd->lock); | |
436 | } | |
437 | ||
438 | static bool dd_has_work(struct blk_mq_hw_ctx *hctx) | |
439 | { | |
440 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
441 | ||
442 | return !list_empty_careful(&dd->dispatch) || | |
443 | !list_empty_careful(&dd->fifo_list[0]) || | |
444 | !list_empty_careful(&dd->fifo_list[1]); | |
445 | } | |
446 | ||
447 | /* | |
448 | * sysfs parts below | |
449 | */ | |
450 | static ssize_t | |
451 | deadline_var_show(int var, char *page) | |
452 | { | |
453 | return sprintf(page, "%d\n", var); | |
454 | } | |
455 | ||
456 | static ssize_t | |
457 | deadline_var_store(int *var, const char *page, size_t count) | |
458 | { | |
459 | char *p = (char *) page; | |
460 | ||
461 | *var = simple_strtol(p, &p, 10); | |
462 | return count; | |
463 | } | |
464 | ||
465 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | |
466 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ | |
467 | { \ | |
468 | struct deadline_data *dd = e->elevator_data; \ | |
469 | int __data = __VAR; \ | |
470 | if (__CONV) \ | |
471 | __data = jiffies_to_msecs(__data); \ | |
472 | return deadline_var_show(__data, (page)); \ | |
473 | } | |
474 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); | |
475 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); | |
476 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); | |
477 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); | |
478 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); | |
479 | #undef SHOW_FUNCTION | |
480 | ||
481 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
482 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | |
483 | { \ | |
484 | struct deadline_data *dd = e->elevator_data; \ | |
485 | int __data; \ | |
486 | int ret = deadline_var_store(&__data, (page), count); \ | |
487 | if (__data < (MIN)) \ | |
488 | __data = (MIN); \ | |
489 | else if (__data > (MAX)) \ | |
490 | __data = (MAX); \ | |
491 | if (__CONV) \ | |
492 | *(__PTR) = msecs_to_jiffies(__data); \ | |
493 | else \ | |
494 | *(__PTR) = __data; \ | |
495 | return ret; \ | |
496 | } | |
497 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | |
498 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | |
499 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | |
500 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); | |
501 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); | |
502 | #undef STORE_FUNCTION | |
503 | ||
504 | #define DD_ATTR(name) \ | |
505 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ | |
506 | deadline_##name##_store) | |
507 | ||
508 | static struct elv_fs_entry deadline_attrs[] = { | |
509 | DD_ATTR(read_expire), | |
510 | DD_ATTR(write_expire), | |
511 | DD_ATTR(writes_starved), | |
512 | DD_ATTR(front_merges), | |
513 | DD_ATTR(fifo_batch), | |
514 | __ATTR_NULL | |
515 | }; | |
516 | ||
517 | static struct elevator_type mq_deadline = { | |
518 | .ops.mq = { | |
519 | .insert_requests = dd_insert_requests, | |
520 | .dispatch_requests = dd_dispatch_requests, | |
521 | .next_request = elv_rb_latter_request, | |
522 | .former_request = elv_rb_former_request, | |
523 | .bio_merge = dd_bio_merge, | |
524 | .request_merge = dd_request_merge, | |
525 | .requests_merged = dd_merged_requests, | |
526 | .request_merged = dd_request_merged, | |
527 | .has_work = dd_has_work, | |
528 | .init_sched = dd_init_queue, | |
529 | .exit_sched = dd_exit_queue, | |
530 | }, | |
531 | ||
532 | .uses_mq = true, | |
533 | .elevator_attrs = deadline_attrs, | |
534 | .elevator_name = "mq-deadline", | |
535 | .elevator_owner = THIS_MODULE, | |
536 | }; | |
537 | ||
538 | static int __init deadline_init(void) | |
539 | { | |
540 | return elv_register(&mq_deadline); | |
541 | } | |
542 | ||
543 | static void __exit deadline_exit(void) | |
544 | { | |
545 | elv_unregister(&mq_deadline); | |
546 | } | |
547 | ||
548 | module_init(deadline_init); | |
549 | module_exit(deadline_exit); | |
550 | ||
551 | MODULE_AUTHOR("Jens Axboe"); | |
552 | MODULE_LICENSE("GPL"); | |
553 | MODULE_DESCRIPTION("MQ deadline IO scheduler"); |