]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
945ffb60 JA |
2 | /* |
3 | * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, | |
4 | * for the blk-mq scheduling framework | |
5 | * | |
6 | * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/blkdev.h> | |
11 | #include <linux/blk-mq.h> | |
12 | #include <linux/elevator.h> | |
13 | #include <linux/bio.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/compiler.h> | |
18 | #include <linux/rbtree.h> | |
19 | #include <linux/sbitmap.h> | |
20 | ||
b357e4a6 CK |
21 | #include <trace/events/block.h> |
22 | ||
945ffb60 JA |
23 | #include "blk.h" |
24 | #include "blk-mq.h" | |
daaadb3e | 25 | #include "blk-mq-debugfs.h" |
945ffb60 JA |
26 | #include "blk-mq-tag.h" |
27 | #include "blk-mq-sched.h" | |
28 | ||
29 | /* | |
898bd37a | 30 | * See Documentation/block/deadline-iosched.rst |
945ffb60 JA |
31 | */ |
32 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ | |
33 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | |
34 | static const int writes_starved = 2; /* max times reads can starve a write */ | |
35 | static const int fifo_batch = 16; /* # of sequential requests treated as one | |
36 | by the above parameters. For throughput. */ | |
37 | ||
38 | struct deadline_data { | |
39 | /* | |
40 | * run time data | |
41 | */ | |
42 | ||
43 | /* | |
44 | * requests (deadline_rq s) are present on both sort_list and fifo_list | |
45 | */ | |
46 | struct rb_root sort_list[2]; | |
47 | struct list_head fifo_list[2]; | |
48 | ||
49 | /* | |
50 | * next in sort order. read, write or both are NULL | |
51 | */ | |
52 | struct request *next_rq[2]; | |
53 | unsigned int batching; /* number of sequential requests made */ | |
54 | unsigned int starved; /* times reads have starved writes */ | |
55 | ||
56 | /* | |
57 | * settings that change how the i/o scheduler behaves | |
58 | */ | |
59 | int fifo_expire[2]; | |
60 | int fifo_batch; | |
61 | int writes_starved; | |
62 | int front_merges; | |
63 | ||
64 | spinlock_t lock; | |
5700f691 | 65 | spinlock_t zone_lock; |
945ffb60 JA |
66 | struct list_head dispatch; |
67 | }; | |
68 | ||
69 | static inline struct rb_root * | |
70 | deadline_rb_root(struct deadline_data *dd, struct request *rq) | |
71 | { | |
72 | return &dd->sort_list[rq_data_dir(rq)]; | |
73 | } | |
74 | ||
75 | /* | |
76 | * get the request after `rq' in sector-sorted order | |
77 | */ | |
78 | static inline struct request * | |
79 | deadline_latter_request(struct request *rq) | |
80 | { | |
81 | struct rb_node *node = rb_next(&rq->rb_node); | |
82 | ||
83 | if (node) | |
84 | return rb_entry_rq(node); | |
85 | ||
86 | return NULL; | |
87 | } | |
88 | ||
89 | static void | |
90 | deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) | |
91 | { | |
92 | struct rb_root *root = deadline_rb_root(dd, rq); | |
93 | ||
94 | elv_rb_add(root, rq); | |
95 | } | |
96 | ||
97 | static inline void | |
98 | deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) | |
99 | { | |
100 | const int data_dir = rq_data_dir(rq); | |
101 | ||
102 | if (dd->next_rq[data_dir] == rq) | |
103 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
104 | ||
105 | elv_rb_del(deadline_rb_root(dd, rq), rq); | |
106 | } | |
107 | ||
108 | /* | |
109 | * remove rq from rbtree and fifo. | |
110 | */ | |
111 | static void deadline_remove_request(struct request_queue *q, struct request *rq) | |
112 | { | |
113 | struct deadline_data *dd = q->elevator->elevator_data; | |
114 | ||
115 | list_del_init(&rq->queuelist); | |
116 | ||
117 | /* | |
118 | * We might not be on the rbtree, if we are doing an insert merge | |
119 | */ | |
120 | if (!RB_EMPTY_NODE(&rq->rb_node)) | |
121 | deadline_del_rq_rb(dd, rq); | |
122 | ||
123 | elv_rqhash_del(q, rq); | |
124 | if (q->last_merge == rq) | |
125 | q->last_merge = NULL; | |
126 | } | |
127 | ||
128 | static void dd_request_merged(struct request_queue *q, struct request *req, | |
34fe7c05 | 129 | enum elv_merge type) |
945ffb60 JA |
130 | { |
131 | struct deadline_data *dd = q->elevator->elevator_data; | |
132 | ||
133 | /* | |
134 | * if the merge was a front merge, we need to reposition request | |
135 | */ | |
136 | if (type == ELEVATOR_FRONT_MERGE) { | |
137 | elv_rb_del(deadline_rb_root(dd, req), req); | |
138 | deadline_add_rq_rb(dd, req); | |
139 | } | |
140 | } | |
141 | ||
142 | static void dd_merged_requests(struct request_queue *q, struct request *req, | |
143 | struct request *next) | |
144 | { | |
145 | /* | |
146 | * if next expires before rq, assign its expire time to rq | |
147 | * and move into next position (next will be deleted) in fifo | |
148 | */ | |
149 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | |
150 | if (time_before((unsigned long)next->fifo_time, | |
151 | (unsigned long)req->fifo_time)) { | |
152 | list_move(&req->queuelist, &next->queuelist); | |
153 | req->fifo_time = next->fifo_time; | |
154 | } | |
155 | } | |
156 | ||
157 | /* | |
158 | * kill knowledge of next, this one is a goner | |
159 | */ | |
160 | deadline_remove_request(q, next); | |
161 | } | |
162 | ||
163 | /* | |
164 | * move an entry to dispatch queue | |
165 | */ | |
166 | static void | |
167 | deadline_move_request(struct deadline_data *dd, struct request *rq) | |
168 | { | |
169 | const int data_dir = rq_data_dir(rq); | |
170 | ||
171 | dd->next_rq[READ] = NULL; | |
172 | dd->next_rq[WRITE] = NULL; | |
173 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
174 | ||
175 | /* | |
176 | * take it off the sort and fifo list | |
177 | */ | |
178 | deadline_remove_request(rq->q, rq); | |
179 | } | |
180 | ||
181 | /* | |
182 | * deadline_check_fifo returns 0 if there are no expired requests on the fifo, | |
183 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | |
184 | */ | |
185 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |
186 | { | |
187 | struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); | |
188 | ||
189 | /* | |
190 | * rq is expired! | |
191 | */ | |
192 | if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) | |
193 | return 1; | |
194 | ||
195 | return 0; | |
196 | } | |
197 | ||
bf09ce56 DLM |
198 | /* |
199 | * For the specified data direction, return the next request to | |
200 | * dispatch using arrival ordered lists. | |
201 | */ | |
202 | static struct request * | |
203 | deadline_fifo_request(struct deadline_data *dd, int data_dir) | |
204 | { | |
5700f691 DLM |
205 | struct request *rq; |
206 | unsigned long flags; | |
207 | ||
bf09ce56 DLM |
208 | if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) |
209 | return NULL; | |
210 | ||
211 | if (list_empty(&dd->fifo_list[data_dir])) | |
212 | return NULL; | |
213 | ||
5700f691 DLM |
214 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); |
215 | if (data_dir == READ || !blk_queue_is_zoned(rq->q)) | |
216 | return rq; | |
217 | ||
218 | /* | |
219 | * Look for a write request that can be dispatched, that is one with | |
220 | * an unlocked target zone. | |
221 | */ | |
222 | spin_lock_irqsave(&dd->zone_lock, flags); | |
223 | list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) { | |
224 | if (blk_req_can_dispatch_to_zone(rq)) | |
225 | goto out; | |
226 | } | |
227 | rq = NULL; | |
228 | out: | |
229 | spin_unlock_irqrestore(&dd->zone_lock, flags); | |
230 | ||
231 | return rq; | |
bf09ce56 DLM |
232 | } |
233 | ||
234 | /* | |
235 | * For the specified data direction, return the next request to | |
236 | * dispatch using sector position sorted lists. | |
237 | */ | |
238 | static struct request * | |
239 | deadline_next_request(struct deadline_data *dd, int data_dir) | |
240 | { | |
5700f691 DLM |
241 | struct request *rq; |
242 | unsigned long flags; | |
243 | ||
bf09ce56 DLM |
244 | if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) |
245 | return NULL; | |
246 | ||
5700f691 DLM |
247 | rq = dd->next_rq[data_dir]; |
248 | if (!rq) | |
249 | return NULL; | |
250 | ||
251 | if (data_dir == READ || !blk_queue_is_zoned(rq->q)) | |
252 | return rq; | |
253 | ||
254 | /* | |
255 | * Look for a write request that can be dispatched, that is one with | |
256 | * an unlocked target zone. | |
257 | */ | |
258 | spin_lock_irqsave(&dd->zone_lock, flags); | |
259 | while (rq) { | |
260 | if (blk_req_can_dispatch_to_zone(rq)) | |
261 | break; | |
262 | rq = deadline_latter_request(rq); | |
263 | } | |
264 | spin_unlock_irqrestore(&dd->zone_lock, flags); | |
265 | ||
266 | return rq; | |
bf09ce56 DLM |
267 | } |
268 | ||
945ffb60 JA |
269 | /* |
270 | * deadline_dispatch_requests selects the best request according to | |
271 | * read/write expire, fifo_batch, etc | |
272 | */ | |
ca11f209 | 273 | static struct request *__dd_dispatch_request(struct deadline_data *dd) |
945ffb60 | 274 | { |
bf09ce56 | 275 | struct request *rq, *next_rq; |
945ffb60 JA |
276 | bool reads, writes; |
277 | int data_dir; | |
278 | ||
279 | if (!list_empty(&dd->dispatch)) { | |
280 | rq = list_first_entry(&dd->dispatch, struct request, queuelist); | |
281 | list_del_init(&rq->queuelist); | |
282 | goto done; | |
283 | } | |
284 | ||
285 | reads = !list_empty(&dd->fifo_list[READ]); | |
286 | writes = !list_empty(&dd->fifo_list[WRITE]); | |
287 | ||
288 | /* | |
289 | * batches are currently reads XOR writes | |
290 | */ | |
bf09ce56 DLM |
291 | rq = deadline_next_request(dd, WRITE); |
292 | if (!rq) | |
293 | rq = deadline_next_request(dd, READ); | |
945ffb60 JA |
294 | |
295 | if (rq && dd->batching < dd->fifo_batch) | |
296 | /* we have a next request are still entitled to batch */ | |
297 | goto dispatch_request; | |
298 | ||
299 | /* | |
300 | * at this point we are not running a batch. select the appropriate | |
301 | * data direction (read / write) | |
302 | */ | |
303 | ||
304 | if (reads) { | |
305 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); | |
306 | ||
5700f691 DLM |
307 | if (deadline_fifo_request(dd, WRITE) && |
308 | (dd->starved++ >= dd->writes_starved)) | |
945ffb60 JA |
309 | goto dispatch_writes; |
310 | ||
311 | data_dir = READ; | |
312 | ||
313 | goto dispatch_find_request; | |
314 | } | |
315 | ||
316 | /* | |
317 | * there are either no reads or writes have been starved | |
318 | */ | |
319 | ||
320 | if (writes) { | |
321 | dispatch_writes: | |
322 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); | |
323 | ||
324 | dd->starved = 0; | |
325 | ||
326 | data_dir = WRITE; | |
327 | ||
328 | goto dispatch_find_request; | |
329 | } | |
330 | ||
331 | return NULL; | |
332 | ||
333 | dispatch_find_request: | |
334 | /* | |
335 | * we are not running a batch, find best request for selected data_dir | |
336 | */ | |
bf09ce56 DLM |
337 | next_rq = deadline_next_request(dd, data_dir); |
338 | if (deadline_check_fifo(dd, data_dir) || !next_rq) { | |
945ffb60 JA |
339 | /* |
340 | * A deadline has expired, the last request was in the other | |
341 | * direction, or we have run out of higher-sectored requests. | |
342 | * Start again from the request with the earliest expiry time. | |
343 | */ | |
bf09ce56 | 344 | rq = deadline_fifo_request(dd, data_dir); |
945ffb60 JA |
345 | } else { |
346 | /* | |
347 | * The last req was the same dir and we have a next request in | |
348 | * sort order. No expired requests so continue on from here. | |
349 | */ | |
bf09ce56 | 350 | rq = next_rq; |
945ffb60 JA |
351 | } |
352 | ||
5700f691 DLM |
353 | /* |
354 | * For a zoned block device, if we only have writes queued and none of | |
355 | * them can be dispatched, rq will be NULL. | |
356 | */ | |
357 | if (!rq) | |
358 | return NULL; | |
359 | ||
945ffb60 JA |
360 | dd->batching = 0; |
361 | ||
362 | dispatch_request: | |
363 | /* | |
364 | * rq is the selected appropriate request. | |
365 | */ | |
366 | dd->batching++; | |
367 | deadline_move_request(dd, rq); | |
368 | done: | |
5700f691 DLM |
369 | /* |
370 | * If the request needs its target zone locked, do it. | |
371 | */ | |
372 | blk_req_zone_write_lock(rq); | |
945ffb60 JA |
373 | rq->rq_flags |= RQF_STARTED; |
374 | return rq; | |
375 | } | |
376 | ||
ca11f209 JA |
377 | /* |
378 | * One confusing aspect here is that we get called for a specific | |
7211aef8 | 379 | * hardware queue, but we may return a request that is for a |
ca11f209 JA |
380 | * different hardware queue. This is because mq-deadline has shared |
381 | * state for all hardware queues, in terms of sorting, FIFOs, etc. | |
382 | */ | |
c13660a0 | 383 | static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
945ffb60 JA |
384 | { |
385 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
c13660a0 | 386 | struct request *rq; |
945ffb60 JA |
387 | |
388 | spin_lock(&dd->lock); | |
ca11f209 | 389 | rq = __dd_dispatch_request(dd); |
945ffb60 | 390 | spin_unlock(&dd->lock); |
c13660a0 JA |
391 | |
392 | return rq; | |
945ffb60 JA |
393 | } |
394 | ||
395 | static void dd_exit_queue(struct elevator_queue *e) | |
396 | { | |
397 | struct deadline_data *dd = e->elevator_data; | |
398 | ||
399 | BUG_ON(!list_empty(&dd->fifo_list[READ])); | |
400 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | |
401 | ||
402 | kfree(dd); | |
403 | } | |
404 | ||
405 | /* | |
406 | * initialize elevator private data (deadline_data). | |
407 | */ | |
408 | static int dd_init_queue(struct request_queue *q, struct elevator_type *e) | |
409 | { | |
410 | struct deadline_data *dd; | |
411 | struct elevator_queue *eq; | |
412 | ||
413 | eq = elevator_alloc(q, e); | |
414 | if (!eq) | |
415 | return -ENOMEM; | |
416 | ||
417 | dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); | |
418 | if (!dd) { | |
419 | kobject_put(&eq->kobj); | |
420 | return -ENOMEM; | |
421 | } | |
422 | eq->elevator_data = dd; | |
423 | ||
424 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | |
425 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | |
426 | dd->sort_list[READ] = RB_ROOT; | |
427 | dd->sort_list[WRITE] = RB_ROOT; | |
428 | dd->fifo_expire[READ] = read_expire; | |
429 | dd->fifo_expire[WRITE] = write_expire; | |
430 | dd->writes_starved = writes_starved; | |
431 | dd->front_merges = 1; | |
432 | dd->fifo_batch = fifo_batch; | |
433 | spin_lock_init(&dd->lock); | |
5700f691 | 434 | spin_lock_init(&dd->zone_lock); |
945ffb60 JA |
435 | INIT_LIST_HEAD(&dd->dispatch); |
436 | ||
437 | q->elevator = eq; | |
438 | return 0; | |
439 | } | |
440 | ||
441 | static int dd_request_merge(struct request_queue *q, struct request **rq, | |
442 | struct bio *bio) | |
443 | { | |
444 | struct deadline_data *dd = q->elevator->elevator_data; | |
445 | sector_t sector = bio_end_sector(bio); | |
446 | struct request *__rq; | |
447 | ||
448 | if (!dd->front_merges) | |
449 | return ELEVATOR_NO_MERGE; | |
450 | ||
451 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); | |
452 | if (__rq) { | |
453 | BUG_ON(sector != blk_rq_pos(__rq)); | |
454 | ||
455 | if (elv_bio_merge_ok(__rq, bio)) { | |
456 | *rq = __rq; | |
457 | return ELEVATOR_FRONT_MERGE; | |
458 | } | |
459 | } | |
460 | ||
461 | return ELEVATOR_NO_MERGE; | |
462 | } | |
463 | ||
efed9a33 | 464 | static bool dd_bio_merge(struct request_queue *q, struct bio *bio, |
14ccb66b | 465 | unsigned int nr_segs) |
945ffb60 | 466 | { |
945ffb60 | 467 | struct deadline_data *dd = q->elevator->elevator_data; |
e4d750c9 JA |
468 | struct request *free = NULL; |
469 | bool ret; | |
945ffb60 JA |
470 | |
471 | spin_lock(&dd->lock); | |
14ccb66b | 472 | ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); |
945ffb60 JA |
473 | spin_unlock(&dd->lock); |
474 | ||
e4d750c9 JA |
475 | if (free) |
476 | blk_mq_free_request(free); | |
477 | ||
945ffb60 JA |
478 | return ret; |
479 | } | |
480 | ||
481 | /* | |
482 | * add rq to rbtree and fifo | |
483 | */ | |
484 | static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
485 | bool at_head) | |
486 | { | |
487 | struct request_queue *q = hctx->queue; | |
488 | struct deadline_data *dd = q->elevator->elevator_data; | |
489 | const int data_dir = rq_data_dir(rq); | |
490 | ||
5700f691 DLM |
491 | /* |
492 | * This may be a requeue of a write request that has locked its | |
493 | * target zone. If it is the case, this releases the zone lock. | |
494 | */ | |
495 | blk_req_zone_write_unlock(rq); | |
496 | ||
945ffb60 JA |
497 | if (blk_mq_sched_try_insert_merge(q, rq)) |
498 | return; | |
499 | ||
b357e4a6 | 500 | trace_block_rq_insert(rq); |
945ffb60 | 501 | |
7687b38a LF |
502 | if (at_head) { |
503 | list_add(&rq->queuelist, &dd->dispatch); | |
945ffb60 JA |
504 | } else { |
505 | deadline_add_rq_rb(dd, rq); | |
506 | ||
507 | if (rq_mergeable(rq)) { | |
508 | elv_rqhash_add(q, rq); | |
509 | if (!q->last_merge) | |
510 | q->last_merge = rq; | |
511 | } | |
512 | ||
513 | /* | |
514 | * set expire time and add to fifo list | |
515 | */ | |
516 | rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; | |
517 | list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); | |
518 | } | |
519 | } | |
520 | ||
521 | static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, | |
522 | struct list_head *list, bool at_head) | |
523 | { | |
524 | struct request_queue *q = hctx->queue; | |
525 | struct deadline_data *dd = q->elevator->elevator_data; | |
526 | ||
527 | spin_lock(&dd->lock); | |
528 | while (!list_empty(list)) { | |
529 | struct request *rq; | |
530 | ||
531 | rq = list_first_entry(list, struct request, queuelist); | |
532 | list_del_init(&rq->queuelist); | |
533 | dd_insert_request(hctx, rq, at_head); | |
534 | } | |
535 | spin_unlock(&dd->lock); | |
536 | } | |
537 | ||
f3bc78d2 DLM |
538 | /* |
539 | * Nothing to do here. This is defined only to ensure that .finish_request | |
540 | * method is called upon request completion. | |
541 | */ | |
5d9c305b | 542 | static void dd_prepare_request(struct request *rq) |
f3bc78d2 DLM |
543 | { |
544 | } | |
545 | ||
5700f691 DLM |
546 | /* |
547 | * For zoned block devices, write unlock the target zone of | |
548 | * completed write requests. Do this while holding the zone lock | |
549 | * spinlock so that the zone is never unlocked while deadline_fifo_request() | |
f3bc78d2 DLM |
550 | * or deadline_next_request() are executing. This function is called for |
551 | * all requests, whether or not these requests complete successfully. | |
cb8acabb DLM |
552 | * |
553 | * For a zoned block device, __dd_dispatch_request() may have stopped | |
554 | * dispatching requests if all the queued requests are write requests directed | |
555 | * at zones that are already locked due to on-going write requests. To ensure | |
556 | * write request dispatch progress in this case, mark the queue as needing a | |
557 | * restart to ensure that the queue is run again after completion of the | |
558 | * request and zones being unlocked. | |
5700f691 | 559 | */ |
f3bc78d2 | 560 | static void dd_finish_request(struct request *rq) |
5700f691 DLM |
561 | { |
562 | struct request_queue *q = rq->q; | |
563 | ||
564 | if (blk_queue_is_zoned(q)) { | |
565 | struct deadline_data *dd = q->elevator->elevator_data; | |
566 | unsigned long flags; | |
567 | ||
568 | spin_lock_irqsave(&dd->zone_lock, flags); | |
569 | blk_req_zone_write_unlock(rq); | |
cb8acabb DLM |
570 | if (!list_empty(&dd->fifo_list[WRITE])) |
571 | blk_mq_sched_mark_restart_hctx(rq->mq_hctx); | |
5700f691 DLM |
572 | spin_unlock_irqrestore(&dd->zone_lock, flags); |
573 | } | |
574 | } | |
575 | ||
945ffb60 JA |
576 | static bool dd_has_work(struct blk_mq_hw_ctx *hctx) |
577 | { | |
578 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
579 | ||
580 | return !list_empty_careful(&dd->dispatch) || | |
581 | !list_empty_careful(&dd->fifo_list[0]) || | |
582 | !list_empty_careful(&dd->fifo_list[1]); | |
583 | } | |
584 | ||
585 | /* | |
586 | * sysfs parts below | |
587 | */ | |
588 | static ssize_t | |
589 | deadline_var_show(int var, char *page) | |
590 | { | |
591 | return sprintf(page, "%d\n", var); | |
592 | } | |
593 | ||
235f8da1 | 594 | static void |
595 | deadline_var_store(int *var, const char *page) | |
945ffb60 JA |
596 | { |
597 | char *p = (char *) page; | |
598 | ||
599 | *var = simple_strtol(p, &p, 10); | |
945ffb60 JA |
600 | } |
601 | ||
602 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | |
603 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ | |
604 | { \ | |
605 | struct deadline_data *dd = e->elevator_data; \ | |
606 | int __data = __VAR; \ | |
607 | if (__CONV) \ | |
608 | __data = jiffies_to_msecs(__data); \ | |
609 | return deadline_var_show(__data, (page)); \ | |
610 | } | |
611 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); | |
612 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); | |
613 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); | |
614 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); | |
615 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); | |
616 | #undef SHOW_FUNCTION | |
617 | ||
618 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
619 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | |
620 | { \ | |
621 | struct deadline_data *dd = e->elevator_data; \ | |
622 | int __data; \ | |
235f8da1 | 623 | deadline_var_store(&__data, (page)); \ |
945ffb60 JA |
624 | if (__data < (MIN)) \ |
625 | __data = (MIN); \ | |
626 | else if (__data > (MAX)) \ | |
627 | __data = (MAX); \ | |
628 | if (__CONV) \ | |
629 | *(__PTR) = msecs_to_jiffies(__data); \ | |
630 | else \ | |
631 | *(__PTR) = __data; \ | |
235f8da1 | 632 | return count; \ |
945ffb60 JA |
633 | } |
634 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | |
635 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | |
636 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | |
637 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); | |
638 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); | |
639 | #undef STORE_FUNCTION | |
640 | ||
641 | #define DD_ATTR(name) \ | |
5657a819 | 642 | __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) |
945ffb60 JA |
643 | |
644 | static struct elv_fs_entry deadline_attrs[] = { | |
645 | DD_ATTR(read_expire), | |
646 | DD_ATTR(write_expire), | |
647 | DD_ATTR(writes_starved), | |
648 | DD_ATTR(front_merges), | |
649 | DD_ATTR(fifo_batch), | |
650 | __ATTR_NULL | |
651 | }; | |
652 | ||
daaadb3e OS |
653 | #ifdef CONFIG_BLK_DEBUG_FS |
654 | #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \ | |
655 | static void *deadline_##name##_fifo_start(struct seq_file *m, \ | |
656 | loff_t *pos) \ | |
657 | __acquires(&dd->lock) \ | |
658 | { \ | |
659 | struct request_queue *q = m->private; \ | |
660 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
661 | \ | |
662 | spin_lock(&dd->lock); \ | |
663 | return seq_list_start(&dd->fifo_list[ddir], *pos); \ | |
664 | } \ | |
665 | \ | |
666 | static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ | |
667 | loff_t *pos) \ | |
668 | { \ | |
669 | struct request_queue *q = m->private; \ | |
670 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
671 | \ | |
672 | return seq_list_next(v, &dd->fifo_list[ddir], pos); \ | |
673 | } \ | |
674 | \ | |
675 | static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ | |
676 | __releases(&dd->lock) \ | |
677 | { \ | |
678 | struct request_queue *q = m->private; \ | |
679 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
680 | \ | |
681 | spin_unlock(&dd->lock); \ | |
682 | } \ | |
683 | \ | |
684 | static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ | |
685 | .start = deadline_##name##_fifo_start, \ | |
686 | .next = deadline_##name##_fifo_next, \ | |
687 | .stop = deadline_##name##_fifo_stop, \ | |
688 | .show = blk_mq_debugfs_rq_show, \ | |
689 | }; \ | |
690 | \ | |
691 | static int deadline_##name##_next_rq_show(void *data, \ | |
692 | struct seq_file *m) \ | |
693 | { \ | |
694 | struct request_queue *q = data; \ | |
695 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
696 | struct request *rq = dd->next_rq[ddir]; \ | |
697 | \ | |
698 | if (rq) \ | |
699 | __blk_mq_debugfs_rq_show(m, rq); \ | |
700 | return 0; \ | |
701 | } | |
702 | DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read) | |
703 | DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write) | |
704 | #undef DEADLINE_DEBUGFS_DDIR_ATTRS | |
705 | ||
706 | static int deadline_batching_show(void *data, struct seq_file *m) | |
707 | { | |
708 | struct request_queue *q = data; | |
709 | struct deadline_data *dd = q->elevator->elevator_data; | |
710 | ||
711 | seq_printf(m, "%u\n", dd->batching); | |
712 | return 0; | |
713 | } | |
714 | ||
715 | static int deadline_starved_show(void *data, struct seq_file *m) | |
716 | { | |
717 | struct request_queue *q = data; | |
718 | struct deadline_data *dd = q->elevator->elevator_data; | |
719 | ||
720 | seq_printf(m, "%u\n", dd->starved); | |
721 | return 0; | |
722 | } | |
723 | ||
724 | static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) | |
725 | __acquires(&dd->lock) | |
726 | { | |
727 | struct request_queue *q = m->private; | |
728 | struct deadline_data *dd = q->elevator->elevator_data; | |
729 | ||
730 | spin_lock(&dd->lock); | |
731 | return seq_list_start(&dd->dispatch, *pos); | |
732 | } | |
733 | ||
734 | static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos) | |
735 | { | |
736 | struct request_queue *q = m->private; | |
737 | struct deadline_data *dd = q->elevator->elevator_data; | |
738 | ||
739 | return seq_list_next(v, &dd->dispatch, pos); | |
740 | } | |
741 | ||
742 | static void deadline_dispatch_stop(struct seq_file *m, void *v) | |
743 | __releases(&dd->lock) | |
744 | { | |
745 | struct request_queue *q = m->private; | |
746 | struct deadline_data *dd = q->elevator->elevator_data; | |
747 | ||
748 | spin_unlock(&dd->lock); | |
749 | } | |
750 | ||
751 | static const struct seq_operations deadline_dispatch_seq_ops = { | |
752 | .start = deadline_dispatch_start, | |
753 | .next = deadline_dispatch_next, | |
754 | .stop = deadline_dispatch_stop, | |
755 | .show = blk_mq_debugfs_rq_show, | |
756 | }; | |
757 | ||
758 | #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ | |
759 | {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \ | |
760 | {#name "_next_rq", 0400, deadline_##name##_next_rq_show} | |
761 | static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { | |
762 | DEADLINE_QUEUE_DDIR_ATTRS(read), | |
763 | DEADLINE_QUEUE_DDIR_ATTRS(write), | |
764 | {"batching", 0400, deadline_batching_show}, | |
765 | {"starved", 0400, deadline_starved_show}, | |
766 | {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, | |
767 | {}, | |
768 | }; | |
769 | #undef DEADLINE_QUEUE_DDIR_ATTRS | |
770 | #endif | |
771 | ||
945ffb60 | 772 | static struct elevator_type mq_deadline = { |
f9cd4bfe | 773 | .ops = { |
945ffb60 | 774 | .insert_requests = dd_insert_requests, |
c13660a0 | 775 | .dispatch_request = dd_dispatch_request, |
f3bc78d2 DLM |
776 | .prepare_request = dd_prepare_request, |
777 | .finish_request = dd_finish_request, | |
945ffb60 JA |
778 | .next_request = elv_rb_latter_request, |
779 | .former_request = elv_rb_former_request, | |
780 | .bio_merge = dd_bio_merge, | |
781 | .request_merge = dd_request_merge, | |
782 | .requests_merged = dd_merged_requests, | |
783 | .request_merged = dd_request_merged, | |
784 | .has_work = dd_has_work, | |
785 | .init_sched = dd_init_queue, | |
786 | .exit_sched = dd_exit_queue, | |
787 | }, | |
788 | ||
daaadb3e OS |
789 | #ifdef CONFIG_BLK_DEBUG_FS |
790 | .queue_debugfs_attrs = deadline_queue_debugfs_attrs, | |
791 | #endif | |
945ffb60 JA |
792 | .elevator_attrs = deadline_attrs, |
793 | .elevator_name = "mq-deadline", | |
4d740bc9 | 794 | .elevator_alias = "deadline", |
68c43f13 | 795 | .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE, |
945ffb60 JA |
796 | .elevator_owner = THIS_MODULE, |
797 | }; | |
7de967e7 | 798 | MODULE_ALIAS("mq-deadline-iosched"); |
945ffb60 JA |
799 | |
800 | static int __init deadline_init(void) | |
801 | { | |
802 | return elv_register(&mq_deadline); | |
803 | } | |
804 | ||
805 | static void __exit deadline_exit(void) | |
806 | { | |
807 | elv_unregister(&mq_deadline); | |
808 | } | |
809 | ||
810 | module_init(deadline_init); | |
811 | module_exit(deadline_exit); | |
812 | ||
813 | MODULE_AUTHOR("Jens Axboe"); | |
814 | MODULE_LICENSE("GPL"); | |
815 | MODULE_DESCRIPTION("MQ deadline IO scheduler"); |