]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/mq-deadline.c
Merge tag 'block-5.12-2021-02-27' of git://git.kernel.dk/linux-block
[mirror_ubuntu-jammy-kernel.git] / block / mq-deadline.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
5 *
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7 */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
20
21 #include <trace/events/block.h>
22
23 #include "blk.h"
24 #include "blk-mq.h"
25 #include "blk-mq-debugfs.h"
26 #include "blk-mq-tag.h"
27 #include "blk-mq-sched.h"
28
29 /*
30 * See Documentation/block/deadline-iosched.rst
31 */
32 static const int read_expire = HZ / 2; /* max time before a read is submitted. */
33 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
34 static const int writes_starved = 2; /* max times reads can starve a write */
35 static const int fifo_batch = 16; /* # of sequential requests treated as one
36 by the above parameters. For throughput. */
37
38 struct deadline_data {
39 /*
40 * run time data
41 */
42
43 /*
44 * requests (deadline_rq s) are present on both sort_list and fifo_list
45 */
46 struct rb_root sort_list[2];
47 struct list_head fifo_list[2];
48
49 /*
50 * next in sort order. read, write or both are NULL
51 */
52 struct request *next_rq[2];
53 unsigned int batching; /* number of sequential requests made */
54 unsigned int starved; /* times reads have starved writes */
55
56 /*
57 * settings that change how the i/o scheduler behaves
58 */
59 int fifo_expire[2];
60 int fifo_batch;
61 int writes_starved;
62 int front_merges;
63
64 spinlock_t lock;
65 spinlock_t zone_lock;
66 struct list_head dispatch;
67 };
68
69 static inline struct rb_root *
70 deadline_rb_root(struct deadline_data *dd, struct request *rq)
71 {
72 return &dd->sort_list[rq_data_dir(rq)];
73 }
74
75 /*
76 * get the request after `rq' in sector-sorted order
77 */
78 static inline struct request *
79 deadline_latter_request(struct request *rq)
80 {
81 struct rb_node *node = rb_next(&rq->rb_node);
82
83 if (node)
84 return rb_entry_rq(node);
85
86 return NULL;
87 }
88
89 static void
90 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
91 {
92 struct rb_root *root = deadline_rb_root(dd, rq);
93
94 elv_rb_add(root, rq);
95 }
96
97 static inline void
98 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
99 {
100 const int data_dir = rq_data_dir(rq);
101
102 if (dd->next_rq[data_dir] == rq)
103 dd->next_rq[data_dir] = deadline_latter_request(rq);
104
105 elv_rb_del(deadline_rb_root(dd, rq), rq);
106 }
107
108 /*
109 * remove rq from rbtree and fifo.
110 */
111 static void deadline_remove_request(struct request_queue *q, struct request *rq)
112 {
113 struct deadline_data *dd = q->elevator->elevator_data;
114
115 list_del_init(&rq->queuelist);
116
117 /*
118 * We might not be on the rbtree, if we are doing an insert merge
119 */
120 if (!RB_EMPTY_NODE(&rq->rb_node))
121 deadline_del_rq_rb(dd, rq);
122
123 elv_rqhash_del(q, rq);
124 if (q->last_merge == rq)
125 q->last_merge = NULL;
126 }
127
128 static void dd_request_merged(struct request_queue *q, struct request *req,
129 enum elv_merge type)
130 {
131 struct deadline_data *dd = q->elevator->elevator_data;
132
133 /*
134 * if the merge was a front merge, we need to reposition request
135 */
136 if (type == ELEVATOR_FRONT_MERGE) {
137 elv_rb_del(deadline_rb_root(dd, req), req);
138 deadline_add_rq_rb(dd, req);
139 }
140 }
141
142 static void dd_merged_requests(struct request_queue *q, struct request *req,
143 struct request *next)
144 {
145 /*
146 * if next expires before rq, assign its expire time to rq
147 * and move into next position (next will be deleted) in fifo
148 */
149 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
150 if (time_before((unsigned long)next->fifo_time,
151 (unsigned long)req->fifo_time)) {
152 list_move(&req->queuelist, &next->queuelist);
153 req->fifo_time = next->fifo_time;
154 }
155 }
156
157 /*
158 * kill knowledge of next, this one is a goner
159 */
160 deadline_remove_request(q, next);
161 }
162
163 /*
164 * move an entry to dispatch queue
165 */
166 static void
167 deadline_move_request(struct deadline_data *dd, struct request *rq)
168 {
169 const int data_dir = rq_data_dir(rq);
170
171 dd->next_rq[READ] = NULL;
172 dd->next_rq[WRITE] = NULL;
173 dd->next_rq[data_dir] = deadline_latter_request(rq);
174
175 /*
176 * take it off the sort and fifo list
177 */
178 deadline_remove_request(rq->q, rq);
179 }
180
181 /*
182 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
183 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
184 */
185 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
186 {
187 struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
188
189 /*
190 * rq is expired!
191 */
192 if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
193 return 1;
194
195 return 0;
196 }
197
198 /*
199 * For the specified data direction, return the next request to
200 * dispatch using arrival ordered lists.
201 */
202 static struct request *
203 deadline_fifo_request(struct deadline_data *dd, int data_dir)
204 {
205 struct request *rq;
206 unsigned long flags;
207
208 if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
209 return NULL;
210
211 if (list_empty(&dd->fifo_list[data_dir]))
212 return NULL;
213
214 rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
215 if (data_dir == READ || !blk_queue_is_zoned(rq->q))
216 return rq;
217
218 /*
219 * Look for a write request that can be dispatched, that is one with
220 * an unlocked target zone.
221 */
222 spin_lock_irqsave(&dd->zone_lock, flags);
223 list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
224 if (blk_req_can_dispatch_to_zone(rq))
225 goto out;
226 }
227 rq = NULL;
228 out:
229 spin_unlock_irqrestore(&dd->zone_lock, flags);
230
231 return rq;
232 }
233
234 /*
235 * For the specified data direction, return the next request to
236 * dispatch using sector position sorted lists.
237 */
238 static struct request *
239 deadline_next_request(struct deadline_data *dd, int data_dir)
240 {
241 struct request *rq;
242 unsigned long flags;
243
244 if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
245 return NULL;
246
247 rq = dd->next_rq[data_dir];
248 if (!rq)
249 return NULL;
250
251 if (data_dir == READ || !blk_queue_is_zoned(rq->q))
252 return rq;
253
254 /*
255 * Look for a write request that can be dispatched, that is one with
256 * an unlocked target zone.
257 */
258 spin_lock_irqsave(&dd->zone_lock, flags);
259 while (rq) {
260 if (blk_req_can_dispatch_to_zone(rq))
261 break;
262 rq = deadline_latter_request(rq);
263 }
264 spin_unlock_irqrestore(&dd->zone_lock, flags);
265
266 return rq;
267 }
268
269 /*
270 * deadline_dispatch_requests selects the best request according to
271 * read/write expire, fifo_batch, etc
272 */
273 static struct request *__dd_dispatch_request(struct deadline_data *dd)
274 {
275 struct request *rq, *next_rq;
276 bool reads, writes;
277 int data_dir;
278
279 if (!list_empty(&dd->dispatch)) {
280 rq = list_first_entry(&dd->dispatch, struct request, queuelist);
281 list_del_init(&rq->queuelist);
282 goto done;
283 }
284
285 reads = !list_empty(&dd->fifo_list[READ]);
286 writes = !list_empty(&dd->fifo_list[WRITE]);
287
288 /*
289 * batches are currently reads XOR writes
290 */
291 rq = deadline_next_request(dd, WRITE);
292 if (!rq)
293 rq = deadline_next_request(dd, READ);
294
295 if (rq && dd->batching < dd->fifo_batch)
296 /* we have a next request are still entitled to batch */
297 goto dispatch_request;
298
299 /*
300 * at this point we are not running a batch. select the appropriate
301 * data direction (read / write)
302 */
303
304 if (reads) {
305 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
306
307 if (deadline_fifo_request(dd, WRITE) &&
308 (dd->starved++ >= dd->writes_starved))
309 goto dispatch_writes;
310
311 data_dir = READ;
312
313 goto dispatch_find_request;
314 }
315
316 /*
317 * there are either no reads or writes have been starved
318 */
319
320 if (writes) {
321 dispatch_writes:
322 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
323
324 dd->starved = 0;
325
326 data_dir = WRITE;
327
328 goto dispatch_find_request;
329 }
330
331 return NULL;
332
333 dispatch_find_request:
334 /*
335 * we are not running a batch, find best request for selected data_dir
336 */
337 next_rq = deadline_next_request(dd, data_dir);
338 if (deadline_check_fifo(dd, data_dir) || !next_rq) {
339 /*
340 * A deadline has expired, the last request was in the other
341 * direction, or we have run out of higher-sectored requests.
342 * Start again from the request with the earliest expiry time.
343 */
344 rq = deadline_fifo_request(dd, data_dir);
345 } else {
346 /*
347 * The last req was the same dir and we have a next request in
348 * sort order. No expired requests so continue on from here.
349 */
350 rq = next_rq;
351 }
352
353 /*
354 * For a zoned block device, if we only have writes queued and none of
355 * them can be dispatched, rq will be NULL.
356 */
357 if (!rq)
358 return NULL;
359
360 dd->batching = 0;
361
362 dispatch_request:
363 /*
364 * rq is the selected appropriate request.
365 */
366 dd->batching++;
367 deadline_move_request(dd, rq);
368 done:
369 /*
370 * If the request needs its target zone locked, do it.
371 */
372 blk_req_zone_write_lock(rq);
373 rq->rq_flags |= RQF_STARTED;
374 return rq;
375 }
376
377 /*
378 * One confusing aspect here is that we get called for a specific
379 * hardware queue, but we may return a request that is for a
380 * different hardware queue. This is because mq-deadline has shared
381 * state for all hardware queues, in terms of sorting, FIFOs, etc.
382 */
383 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
384 {
385 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
386 struct request *rq;
387
388 spin_lock(&dd->lock);
389 rq = __dd_dispatch_request(dd);
390 spin_unlock(&dd->lock);
391
392 return rq;
393 }
394
395 static void dd_exit_queue(struct elevator_queue *e)
396 {
397 struct deadline_data *dd = e->elevator_data;
398
399 BUG_ON(!list_empty(&dd->fifo_list[READ]));
400 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
401
402 kfree(dd);
403 }
404
405 /*
406 * initialize elevator private data (deadline_data).
407 */
408 static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
409 {
410 struct deadline_data *dd;
411 struct elevator_queue *eq;
412
413 eq = elevator_alloc(q, e);
414 if (!eq)
415 return -ENOMEM;
416
417 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
418 if (!dd) {
419 kobject_put(&eq->kobj);
420 return -ENOMEM;
421 }
422 eq->elevator_data = dd;
423
424 INIT_LIST_HEAD(&dd->fifo_list[READ]);
425 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
426 dd->sort_list[READ] = RB_ROOT;
427 dd->sort_list[WRITE] = RB_ROOT;
428 dd->fifo_expire[READ] = read_expire;
429 dd->fifo_expire[WRITE] = write_expire;
430 dd->writes_starved = writes_starved;
431 dd->front_merges = 1;
432 dd->fifo_batch = fifo_batch;
433 spin_lock_init(&dd->lock);
434 spin_lock_init(&dd->zone_lock);
435 INIT_LIST_HEAD(&dd->dispatch);
436
437 q->elevator = eq;
438 return 0;
439 }
440
441 static int dd_request_merge(struct request_queue *q, struct request **rq,
442 struct bio *bio)
443 {
444 struct deadline_data *dd = q->elevator->elevator_data;
445 sector_t sector = bio_end_sector(bio);
446 struct request *__rq;
447
448 if (!dd->front_merges)
449 return ELEVATOR_NO_MERGE;
450
451 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
452 if (__rq) {
453 BUG_ON(sector != blk_rq_pos(__rq));
454
455 if (elv_bio_merge_ok(__rq, bio)) {
456 *rq = __rq;
457 return ELEVATOR_FRONT_MERGE;
458 }
459 }
460
461 return ELEVATOR_NO_MERGE;
462 }
463
464 static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
465 unsigned int nr_segs)
466 {
467 struct request_queue *q = hctx->queue;
468 struct deadline_data *dd = q->elevator->elevator_data;
469 struct request *free = NULL;
470 bool ret;
471
472 spin_lock(&dd->lock);
473 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
474 spin_unlock(&dd->lock);
475
476 if (free)
477 blk_mq_free_request(free);
478
479 return ret;
480 }
481
482 /*
483 * add rq to rbtree and fifo
484 */
485 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
486 bool at_head)
487 {
488 struct request_queue *q = hctx->queue;
489 struct deadline_data *dd = q->elevator->elevator_data;
490 const int data_dir = rq_data_dir(rq);
491
492 /*
493 * This may be a requeue of a write request that has locked its
494 * target zone. If it is the case, this releases the zone lock.
495 */
496 blk_req_zone_write_unlock(rq);
497
498 if (blk_mq_sched_try_insert_merge(q, rq))
499 return;
500
501 trace_block_rq_insert(rq);
502
503 if (at_head || blk_rq_is_passthrough(rq)) {
504 if (at_head)
505 list_add(&rq->queuelist, &dd->dispatch);
506 else
507 list_add_tail(&rq->queuelist, &dd->dispatch);
508 } else {
509 deadline_add_rq_rb(dd, rq);
510
511 if (rq_mergeable(rq)) {
512 elv_rqhash_add(q, rq);
513 if (!q->last_merge)
514 q->last_merge = rq;
515 }
516
517 /*
518 * set expire time and add to fifo list
519 */
520 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
521 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
522 }
523 }
524
525 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
526 struct list_head *list, bool at_head)
527 {
528 struct request_queue *q = hctx->queue;
529 struct deadline_data *dd = q->elevator->elevator_data;
530
531 spin_lock(&dd->lock);
532 while (!list_empty(list)) {
533 struct request *rq;
534
535 rq = list_first_entry(list, struct request, queuelist);
536 list_del_init(&rq->queuelist);
537 dd_insert_request(hctx, rq, at_head);
538 }
539 spin_unlock(&dd->lock);
540 }
541
542 /*
543 * Nothing to do here. This is defined only to ensure that .finish_request
544 * method is called upon request completion.
545 */
546 static void dd_prepare_request(struct request *rq)
547 {
548 }
549
550 /*
551 * For zoned block devices, write unlock the target zone of
552 * completed write requests. Do this while holding the zone lock
553 * spinlock so that the zone is never unlocked while deadline_fifo_request()
554 * or deadline_next_request() are executing. This function is called for
555 * all requests, whether or not these requests complete successfully.
556 *
557 * For a zoned block device, __dd_dispatch_request() may have stopped
558 * dispatching requests if all the queued requests are write requests directed
559 * at zones that are already locked due to on-going write requests. To ensure
560 * write request dispatch progress in this case, mark the queue as needing a
561 * restart to ensure that the queue is run again after completion of the
562 * request and zones being unlocked.
563 */
564 static void dd_finish_request(struct request *rq)
565 {
566 struct request_queue *q = rq->q;
567
568 if (blk_queue_is_zoned(q)) {
569 struct deadline_data *dd = q->elevator->elevator_data;
570 unsigned long flags;
571
572 spin_lock_irqsave(&dd->zone_lock, flags);
573 blk_req_zone_write_unlock(rq);
574 if (!list_empty(&dd->fifo_list[WRITE]))
575 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
576 spin_unlock_irqrestore(&dd->zone_lock, flags);
577 }
578 }
579
580 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
581 {
582 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
583
584 return !list_empty_careful(&dd->dispatch) ||
585 !list_empty_careful(&dd->fifo_list[0]) ||
586 !list_empty_careful(&dd->fifo_list[1]);
587 }
588
589 /*
590 * sysfs parts below
591 */
592 static ssize_t
593 deadline_var_show(int var, char *page)
594 {
595 return sprintf(page, "%d\n", var);
596 }
597
598 static void
599 deadline_var_store(int *var, const char *page)
600 {
601 char *p = (char *) page;
602
603 *var = simple_strtol(p, &p, 10);
604 }
605
606 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
607 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
608 { \
609 struct deadline_data *dd = e->elevator_data; \
610 int __data = __VAR; \
611 if (__CONV) \
612 __data = jiffies_to_msecs(__data); \
613 return deadline_var_show(__data, (page)); \
614 }
615 SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
616 SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
617 SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
618 SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
619 SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
620 #undef SHOW_FUNCTION
621
622 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
623 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
624 { \
625 struct deadline_data *dd = e->elevator_data; \
626 int __data; \
627 deadline_var_store(&__data, (page)); \
628 if (__data < (MIN)) \
629 __data = (MIN); \
630 else if (__data > (MAX)) \
631 __data = (MAX); \
632 if (__CONV) \
633 *(__PTR) = msecs_to_jiffies(__data); \
634 else \
635 *(__PTR) = __data; \
636 return count; \
637 }
638 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
639 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
640 STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
641 STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
642 STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
643 #undef STORE_FUNCTION
644
645 #define DD_ATTR(name) \
646 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
647
648 static struct elv_fs_entry deadline_attrs[] = {
649 DD_ATTR(read_expire),
650 DD_ATTR(write_expire),
651 DD_ATTR(writes_starved),
652 DD_ATTR(front_merges),
653 DD_ATTR(fifo_batch),
654 __ATTR_NULL
655 };
656
657 #ifdef CONFIG_BLK_DEBUG_FS
658 #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
659 static void *deadline_##name##_fifo_start(struct seq_file *m, \
660 loff_t *pos) \
661 __acquires(&dd->lock) \
662 { \
663 struct request_queue *q = m->private; \
664 struct deadline_data *dd = q->elevator->elevator_data; \
665 \
666 spin_lock(&dd->lock); \
667 return seq_list_start(&dd->fifo_list[ddir], *pos); \
668 } \
669 \
670 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
671 loff_t *pos) \
672 { \
673 struct request_queue *q = m->private; \
674 struct deadline_data *dd = q->elevator->elevator_data; \
675 \
676 return seq_list_next(v, &dd->fifo_list[ddir], pos); \
677 } \
678 \
679 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
680 __releases(&dd->lock) \
681 { \
682 struct request_queue *q = m->private; \
683 struct deadline_data *dd = q->elevator->elevator_data; \
684 \
685 spin_unlock(&dd->lock); \
686 } \
687 \
688 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
689 .start = deadline_##name##_fifo_start, \
690 .next = deadline_##name##_fifo_next, \
691 .stop = deadline_##name##_fifo_stop, \
692 .show = blk_mq_debugfs_rq_show, \
693 }; \
694 \
695 static int deadline_##name##_next_rq_show(void *data, \
696 struct seq_file *m) \
697 { \
698 struct request_queue *q = data; \
699 struct deadline_data *dd = q->elevator->elevator_data; \
700 struct request *rq = dd->next_rq[ddir]; \
701 \
702 if (rq) \
703 __blk_mq_debugfs_rq_show(m, rq); \
704 return 0; \
705 }
706 DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
707 DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
708 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
709
710 static int deadline_batching_show(void *data, struct seq_file *m)
711 {
712 struct request_queue *q = data;
713 struct deadline_data *dd = q->elevator->elevator_data;
714
715 seq_printf(m, "%u\n", dd->batching);
716 return 0;
717 }
718
719 static int deadline_starved_show(void *data, struct seq_file *m)
720 {
721 struct request_queue *q = data;
722 struct deadline_data *dd = q->elevator->elevator_data;
723
724 seq_printf(m, "%u\n", dd->starved);
725 return 0;
726 }
727
728 static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
729 __acquires(&dd->lock)
730 {
731 struct request_queue *q = m->private;
732 struct deadline_data *dd = q->elevator->elevator_data;
733
734 spin_lock(&dd->lock);
735 return seq_list_start(&dd->dispatch, *pos);
736 }
737
738 static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
739 {
740 struct request_queue *q = m->private;
741 struct deadline_data *dd = q->elevator->elevator_data;
742
743 return seq_list_next(v, &dd->dispatch, pos);
744 }
745
746 static void deadline_dispatch_stop(struct seq_file *m, void *v)
747 __releases(&dd->lock)
748 {
749 struct request_queue *q = m->private;
750 struct deadline_data *dd = q->elevator->elevator_data;
751
752 spin_unlock(&dd->lock);
753 }
754
755 static const struct seq_operations deadline_dispatch_seq_ops = {
756 .start = deadline_dispatch_start,
757 .next = deadline_dispatch_next,
758 .stop = deadline_dispatch_stop,
759 .show = blk_mq_debugfs_rq_show,
760 };
761
762 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
763 {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
764 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
765 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
766 DEADLINE_QUEUE_DDIR_ATTRS(read),
767 DEADLINE_QUEUE_DDIR_ATTRS(write),
768 {"batching", 0400, deadline_batching_show},
769 {"starved", 0400, deadline_starved_show},
770 {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
771 {},
772 };
773 #undef DEADLINE_QUEUE_DDIR_ATTRS
774 #endif
775
776 static struct elevator_type mq_deadline = {
777 .ops = {
778 .insert_requests = dd_insert_requests,
779 .dispatch_request = dd_dispatch_request,
780 .prepare_request = dd_prepare_request,
781 .finish_request = dd_finish_request,
782 .next_request = elv_rb_latter_request,
783 .former_request = elv_rb_former_request,
784 .bio_merge = dd_bio_merge,
785 .request_merge = dd_request_merge,
786 .requests_merged = dd_merged_requests,
787 .request_merged = dd_request_merged,
788 .has_work = dd_has_work,
789 .init_sched = dd_init_queue,
790 .exit_sched = dd_exit_queue,
791 },
792
793 #ifdef CONFIG_BLK_DEBUG_FS
794 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
795 #endif
796 .elevator_attrs = deadline_attrs,
797 .elevator_name = "mq-deadline",
798 .elevator_alias = "deadline",
799 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
800 .elevator_owner = THIS_MODULE,
801 };
802 MODULE_ALIAS("mq-deadline-iosched");
803
804 static int __init deadline_init(void)
805 {
806 return elv_register(&mq_deadline);
807 }
808
809 static void __exit deadline_exit(void)
810 {
811 elv_unregister(&mq_deadline);
812 }
813
814 module_init(deadline_init);
815 module_exit(deadline_exit);
816
817 MODULE_AUTHOR("Jens Axboe");
818 MODULE_LICENSE("GPL");
819 MODULE_DESCRIPTION("MQ deadline IO scheduler");