1 // SPDX-License-Identifier: GPL-2.0
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
8 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
21 #include <trace/events/block.h>
25 #include "blk-mq-debugfs.h"
26 #include "blk-mq-tag.h"
27 #include "blk-mq-sched.h"
30 * See Documentation/block/deadline-iosched.rst
32 static const int read_expire
= HZ
/ 2; /* max time before a read is submitted. */
33 static const int write_expire
= 5 * HZ
; /* ditto for writes, these limits are SOFT! */
34 static const int writes_starved
= 2; /* max times reads can starve a write */
35 static const int fifo_batch
= 16; /* # of sequential requests treated as one
36 by the above parameters. For throughput. */
38 struct deadline_data
{
44 * requests (deadline_rq s) are present on both sort_list and fifo_list
46 struct rb_root sort_list
[2];
47 struct list_head fifo_list
[2];
50 * next in sort order. read, write or both are NULL
52 struct request
*next_rq
[2];
53 unsigned int batching
; /* number of sequential requests made */
54 unsigned int starved
; /* times reads have starved writes */
57 * settings that change how the i/o scheduler behaves
66 struct list_head dispatch
;
69 static inline struct rb_root
*
70 deadline_rb_root(struct deadline_data
*dd
, struct request
*rq
)
72 return &dd
->sort_list
[rq_data_dir(rq
)];
76 * get the request after `rq' in sector-sorted order
78 static inline struct request
*
79 deadline_latter_request(struct request
*rq
)
81 struct rb_node
*node
= rb_next(&rq
->rb_node
);
84 return rb_entry_rq(node
);
90 deadline_add_rq_rb(struct deadline_data
*dd
, struct request
*rq
)
92 struct rb_root
*root
= deadline_rb_root(dd
, rq
);
98 deadline_del_rq_rb(struct deadline_data
*dd
, struct request
*rq
)
100 const int data_dir
= rq_data_dir(rq
);
102 if (dd
->next_rq
[data_dir
] == rq
)
103 dd
->next_rq
[data_dir
] = deadline_latter_request(rq
);
105 elv_rb_del(deadline_rb_root(dd
, rq
), rq
);
109 * remove rq from rbtree and fifo.
111 static void deadline_remove_request(struct request_queue
*q
, struct request
*rq
)
113 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
115 list_del_init(&rq
->queuelist
);
118 * We might not be on the rbtree, if we are doing an insert merge
120 if (!RB_EMPTY_NODE(&rq
->rb_node
))
121 deadline_del_rq_rb(dd
, rq
);
123 elv_rqhash_del(q
, rq
);
124 if (q
->last_merge
== rq
)
125 q
->last_merge
= NULL
;
128 static void dd_request_merged(struct request_queue
*q
, struct request
*req
,
131 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
134 * if the merge was a front merge, we need to reposition request
136 if (type
== ELEVATOR_FRONT_MERGE
) {
137 elv_rb_del(deadline_rb_root(dd
, req
), req
);
138 deadline_add_rq_rb(dd
, req
);
142 static void dd_merged_requests(struct request_queue
*q
, struct request
*req
,
143 struct request
*next
)
146 * if next expires before rq, assign its expire time to rq
147 * and move into next position (next will be deleted) in fifo
149 if (!list_empty(&req
->queuelist
) && !list_empty(&next
->queuelist
)) {
150 if (time_before((unsigned long)next
->fifo_time
,
151 (unsigned long)req
->fifo_time
)) {
152 list_move(&req
->queuelist
, &next
->queuelist
);
153 req
->fifo_time
= next
->fifo_time
;
158 * kill knowledge of next, this one is a goner
160 deadline_remove_request(q
, next
);
164 * move an entry to dispatch queue
167 deadline_move_request(struct deadline_data
*dd
, struct request
*rq
)
169 const int data_dir
= rq_data_dir(rq
);
171 dd
->next_rq
[READ
] = NULL
;
172 dd
->next_rq
[WRITE
] = NULL
;
173 dd
->next_rq
[data_dir
] = deadline_latter_request(rq
);
176 * take it off the sort and fifo list
178 deadline_remove_request(rq
->q
, rq
);
182 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
183 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
185 static inline int deadline_check_fifo(struct deadline_data
*dd
, int ddir
)
187 struct request
*rq
= rq_entry_fifo(dd
->fifo_list
[ddir
].next
);
192 if (time_after_eq(jiffies
, (unsigned long)rq
->fifo_time
))
199 * For the specified data direction, return the next request to
200 * dispatch using arrival ordered lists.
202 static struct request
*
203 deadline_fifo_request(struct deadline_data
*dd
, int data_dir
)
208 if (WARN_ON_ONCE(data_dir
!= READ
&& data_dir
!= WRITE
))
211 if (list_empty(&dd
->fifo_list
[data_dir
]))
214 rq
= rq_entry_fifo(dd
->fifo_list
[data_dir
].next
);
215 if (data_dir
== READ
|| !blk_queue_is_zoned(rq
->q
))
219 * Look for a write request that can be dispatched, that is one with
220 * an unlocked target zone.
222 spin_lock_irqsave(&dd
->zone_lock
, flags
);
223 list_for_each_entry(rq
, &dd
->fifo_list
[WRITE
], queuelist
) {
224 if (blk_req_can_dispatch_to_zone(rq
))
229 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
235 * For the specified data direction, return the next request to
236 * dispatch using sector position sorted lists.
238 static struct request
*
239 deadline_next_request(struct deadline_data
*dd
, int data_dir
)
244 if (WARN_ON_ONCE(data_dir
!= READ
&& data_dir
!= WRITE
))
247 rq
= dd
->next_rq
[data_dir
];
251 if (data_dir
== READ
|| !blk_queue_is_zoned(rq
->q
))
255 * Look for a write request that can be dispatched, that is one with
256 * an unlocked target zone.
258 spin_lock_irqsave(&dd
->zone_lock
, flags
);
260 if (blk_req_can_dispatch_to_zone(rq
))
262 rq
= deadline_latter_request(rq
);
264 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
270 * deadline_dispatch_requests selects the best request according to
271 * read/write expire, fifo_batch, etc
273 static struct request
*__dd_dispatch_request(struct deadline_data
*dd
)
275 struct request
*rq
, *next_rq
;
279 if (!list_empty(&dd
->dispatch
)) {
280 rq
= list_first_entry(&dd
->dispatch
, struct request
, queuelist
);
281 list_del_init(&rq
->queuelist
);
285 reads
= !list_empty(&dd
->fifo_list
[READ
]);
286 writes
= !list_empty(&dd
->fifo_list
[WRITE
]);
289 * batches are currently reads XOR writes
291 rq
= deadline_next_request(dd
, WRITE
);
293 rq
= deadline_next_request(dd
, READ
);
295 if (rq
&& dd
->batching
< dd
->fifo_batch
)
296 /* we have a next request are still entitled to batch */
297 goto dispatch_request
;
300 * at this point we are not running a batch. select the appropriate
301 * data direction (read / write)
305 BUG_ON(RB_EMPTY_ROOT(&dd
->sort_list
[READ
]));
307 if (deadline_fifo_request(dd
, WRITE
) &&
308 (dd
->starved
++ >= dd
->writes_starved
))
309 goto dispatch_writes
;
313 goto dispatch_find_request
;
317 * there are either no reads or writes have been starved
322 BUG_ON(RB_EMPTY_ROOT(&dd
->sort_list
[WRITE
]));
328 goto dispatch_find_request
;
333 dispatch_find_request
:
335 * we are not running a batch, find best request for selected data_dir
337 next_rq
= deadline_next_request(dd
, data_dir
);
338 if (deadline_check_fifo(dd
, data_dir
) || !next_rq
) {
340 * A deadline has expired, the last request was in the other
341 * direction, or we have run out of higher-sectored requests.
342 * Start again from the request with the earliest expiry time.
344 rq
= deadline_fifo_request(dd
, data_dir
);
347 * The last req was the same dir and we have a next request in
348 * sort order. No expired requests so continue on from here.
354 * For a zoned block device, if we only have writes queued and none of
355 * them can be dispatched, rq will be NULL.
364 * rq is the selected appropriate request.
367 deadline_move_request(dd
, rq
);
370 * If the request needs its target zone locked, do it.
372 blk_req_zone_write_lock(rq
);
373 rq
->rq_flags
|= RQF_STARTED
;
378 * One confusing aspect here is that we get called for a specific
379 * hardware queue, but we may return a request that is for a
380 * different hardware queue. This is because mq-deadline has shared
381 * state for all hardware queues, in terms of sorting, FIFOs, etc.
383 static struct request
*dd_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
385 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
388 spin_lock(&dd
->lock
);
389 rq
= __dd_dispatch_request(dd
);
390 spin_unlock(&dd
->lock
);
395 static void dd_exit_queue(struct elevator_queue
*e
)
397 struct deadline_data
*dd
= e
->elevator_data
;
399 BUG_ON(!list_empty(&dd
->fifo_list
[READ
]));
400 BUG_ON(!list_empty(&dd
->fifo_list
[WRITE
]));
406 * initialize elevator private data (deadline_data).
408 static int dd_init_queue(struct request_queue
*q
, struct elevator_type
*e
)
410 struct deadline_data
*dd
;
411 struct elevator_queue
*eq
;
413 eq
= elevator_alloc(q
, e
);
417 dd
= kzalloc_node(sizeof(*dd
), GFP_KERNEL
, q
->node
);
419 kobject_put(&eq
->kobj
);
422 eq
->elevator_data
= dd
;
424 INIT_LIST_HEAD(&dd
->fifo_list
[READ
]);
425 INIT_LIST_HEAD(&dd
->fifo_list
[WRITE
]);
426 dd
->sort_list
[READ
] = RB_ROOT
;
427 dd
->sort_list
[WRITE
] = RB_ROOT
;
428 dd
->fifo_expire
[READ
] = read_expire
;
429 dd
->fifo_expire
[WRITE
] = write_expire
;
430 dd
->writes_starved
= writes_starved
;
431 dd
->front_merges
= 1;
432 dd
->fifo_batch
= fifo_batch
;
433 spin_lock_init(&dd
->lock
);
434 spin_lock_init(&dd
->zone_lock
);
435 INIT_LIST_HEAD(&dd
->dispatch
);
441 static int dd_request_merge(struct request_queue
*q
, struct request
**rq
,
444 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
445 sector_t sector
= bio_end_sector(bio
);
446 struct request
*__rq
;
448 if (!dd
->front_merges
)
449 return ELEVATOR_NO_MERGE
;
451 __rq
= elv_rb_find(&dd
->sort_list
[bio_data_dir(bio
)], sector
);
453 BUG_ON(sector
!= blk_rq_pos(__rq
));
455 if (elv_bio_merge_ok(__rq
, bio
)) {
457 return ELEVATOR_FRONT_MERGE
;
461 return ELEVATOR_NO_MERGE
;
464 static bool dd_bio_merge(struct blk_mq_hw_ctx
*hctx
, struct bio
*bio
,
465 unsigned int nr_segs
)
467 struct request_queue
*q
= hctx
->queue
;
468 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
469 struct request
*free
= NULL
;
472 spin_lock(&dd
->lock
);
473 ret
= blk_mq_sched_try_merge(q
, bio
, nr_segs
, &free
);
474 spin_unlock(&dd
->lock
);
477 blk_mq_free_request(free
);
483 * add rq to rbtree and fifo
485 static void dd_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
488 struct request_queue
*q
= hctx
->queue
;
489 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
490 const int data_dir
= rq_data_dir(rq
);
493 * This may be a requeue of a write request that has locked its
494 * target zone. If it is the case, this releases the zone lock.
496 blk_req_zone_write_unlock(rq
);
498 if (blk_mq_sched_try_insert_merge(q
, rq
))
501 trace_block_rq_insert(rq
);
503 if (at_head
|| blk_rq_is_passthrough(rq
)) {
505 list_add(&rq
->queuelist
, &dd
->dispatch
);
507 list_add_tail(&rq
->queuelist
, &dd
->dispatch
);
509 deadline_add_rq_rb(dd
, rq
);
511 if (rq_mergeable(rq
)) {
512 elv_rqhash_add(q
, rq
);
518 * set expire time and add to fifo list
520 rq
->fifo_time
= jiffies
+ dd
->fifo_expire
[data_dir
];
521 list_add_tail(&rq
->queuelist
, &dd
->fifo_list
[data_dir
]);
525 static void dd_insert_requests(struct blk_mq_hw_ctx
*hctx
,
526 struct list_head
*list
, bool at_head
)
528 struct request_queue
*q
= hctx
->queue
;
529 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
531 spin_lock(&dd
->lock
);
532 while (!list_empty(list
)) {
535 rq
= list_first_entry(list
, struct request
, queuelist
);
536 list_del_init(&rq
->queuelist
);
537 dd_insert_request(hctx
, rq
, at_head
);
539 spin_unlock(&dd
->lock
);
543 * Nothing to do here. This is defined only to ensure that .finish_request
544 * method is called upon request completion.
546 static void dd_prepare_request(struct request
*rq
)
551 * For zoned block devices, write unlock the target zone of
552 * completed write requests. Do this while holding the zone lock
553 * spinlock so that the zone is never unlocked while deadline_fifo_request()
554 * or deadline_next_request() are executing. This function is called for
555 * all requests, whether or not these requests complete successfully.
557 * For a zoned block device, __dd_dispatch_request() may have stopped
558 * dispatching requests if all the queued requests are write requests directed
559 * at zones that are already locked due to on-going write requests. To ensure
560 * write request dispatch progress in this case, mark the queue as needing a
561 * restart to ensure that the queue is run again after completion of the
562 * request and zones being unlocked.
564 static void dd_finish_request(struct request
*rq
)
566 struct request_queue
*q
= rq
->q
;
568 if (blk_queue_is_zoned(q
)) {
569 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
572 spin_lock_irqsave(&dd
->zone_lock
, flags
);
573 blk_req_zone_write_unlock(rq
);
574 if (!list_empty(&dd
->fifo_list
[WRITE
]))
575 blk_mq_sched_mark_restart_hctx(rq
->mq_hctx
);
576 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
580 static bool dd_has_work(struct blk_mq_hw_ctx
*hctx
)
582 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
584 return !list_empty_careful(&dd
->dispatch
) ||
585 !list_empty_careful(&dd
->fifo_list
[0]) ||
586 !list_empty_careful(&dd
->fifo_list
[1]);
593 deadline_var_show(int var
, char *page
)
595 return sprintf(page
, "%d\n", var
);
599 deadline_var_store(int *var
, const char *page
)
601 char *p
= (char *) page
;
603 *var
= simple_strtol(p
, &p
, 10);
606 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
607 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
609 struct deadline_data *dd = e->elevator_data; \
610 int __data = __VAR; \
612 __data = jiffies_to_msecs(__data); \
613 return deadline_var_show(__data, (page)); \
615 SHOW_FUNCTION(deadline_read_expire_show
, dd
->fifo_expire
[READ
], 1);
616 SHOW_FUNCTION(deadline_write_expire_show
, dd
->fifo_expire
[WRITE
], 1);
617 SHOW_FUNCTION(deadline_writes_starved_show
, dd
->writes_starved
, 0);
618 SHOW_FUNCTION(deadline_front_merges_show
, dd
->front_merges
, 0);
619 SHOW_FUNCTION(deadline_fifo_batch_show
, dd
->fifo_batch
, 0);
622 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
623 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
625 struct deadline_data *dd = e->elevator_data; \
627 deadline_var_store(&__data, (page)); \
628 if (__data < (MIN)) \
630 else if (__data > (MAX)) \
633 *(__PTR) = msecs_to_jiffies(__data); \
638 STORE_FUNCTION(deadline_read_expire_store
, &dd
->fifo_expire
[READ
], 0, INT_MAX
, 1);
639 STORE_FUNCTION(deadline_write_expire_store
, &dd
->fifo_expire
[WRITE
], 0, INT_MAX
, 1);
640 STORE_FUNCTION(deadline_writes_starved_store
, &dd
->writes_starved
, INT_MIN
, INT_MAX
, 0);
641 STORE_FUNCTION(deadline_front_merges_store
, &dd
->front_merges
, 0, 1, 0);
642 STORE_FUNCTION(deadline_fifo_batch_store
, &dd
->fifo_batch
, 0, INT_MAX
, 0);
643 #undef STORE_FUNCTION
645 #define DD_ATTR(name) \
646 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
648 static struct elv_fs_entry deadline_attrs
[] = {
649 DD_ATTR(read_expire
),
650 DD_ATTR(write_expire
),
651 DD_ATTR(writes_starved
),
652 DD_ATTR(front_merges
),
657 #ifdef CONFIG_BLK_DEBUG_FS
658 #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
659 static void *deadline_##name##_fifo_start(struct seq_file *m, \
661 __acquires(&dd->lock) \
663 struct request_queue *q = m->private; \
664 struct deadline_data *dd = q->elevator->elevator_data; \
666 spin_lock(&dd->lock); \
667 return seq_list_start(&dd->fifo_list[ddir], *pos); \
670 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
673 struct request_queue *q = m->private; \
674 struct deadline_data *dd = q->elevator->elevator_data; \
676 return seq_list_next(v, &dd->fifo_list[ddir], pos); \
679 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
680 __releases(&dd->lock) \
682 struct request_queue *q = m->private; \
683 struct deadline_data *dd = q->elevator->elevator_data; \
685 spin_unlock(&dd->lock); \
688 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
689 .start = deadline_##name##_fifo_start, \
690 .next = deadline_##name##_fifo_next, \
691 .stop = deadline_##name##_fifo_stop, \
692 .show = blk_mq_debugfs_rq_show, \
695 static int deadline_##name##_next_rq_show(void *data, \
696 struct seq_file *m) \
698 struct request_queue *q = data; \
699 struct deadline_data *dd = q->elevator->elevator_data; \
700 struct request *rq = dd->next_rq[ddir]; \
703 __blk_mq_debugfs_rq_show(m, rq); \
706 DEADLINE_DEBUGFS_DDIR_ATTRS(READ
, read
)
707 DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE
, write
)
708 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
710 static int deadline_batching_show(void *data
, struct seq_file
*m
)
712 struct request_queue
*q
= data
;
713 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
715 seq_printf(m
, "%u\n", dd
->batching
);
719 static int deadline_starved_show(void *data
, struct seq_file
*m
)
721 struct request_queue
*q
= data
;
722 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
724 seq_printf(m
, "%u\n", dd
->starved
);
728 static void *deadline_dispatch_start(struct seq_file
*m
, loff_t
*pos
)
729 __acquires(&dd
->lock
)
731 struct request_queue
*q
= m
->private;
732 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
734 spin_lock(&dd
->lock
);
735 return seq_list_start(&dd
->dispatch
, *pos
);
738 static void *deadline_dispatch_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
740 struct request_queue
*q
= m
->private;
741 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
743 return seq_list_next(v
, &dd
->dispatch
, pos
);
746 static void deadline_dispatch_stop(struct seq_file
*m
, void *v
)
747 __releases(&dd
->lock
)
749 struct request_queue
*q
= m
->private;
750 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
752 spin_unlock(&dd
->lock
);
755 static const struct seq_operations deadline_dispatch_seq_ops
= {
756 .start
= deadline_dispatch_start
,
757 .next
= deadline_dispatch_next
,
758 .stop
= deadline_dispatch_stop
,
759 .show
= blk_mq_debugfs_rq_show
,
762 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
763 {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
764 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
765 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs
[] = {
766 DEADLINE_QUEUE_DDIR_ATTRS(read
),
767 DEADLINE_QUEUE_DDIR_ATTRS(write
),
768 {"batching", 0400, deadline_batching_show
},
769 {"starved", 0400, deadline_starved_show
},
770 {"dispatch", 0400, .seq_ops
= &deadline_dispatch_seq_ops
},
773 #undef DEADLINE_QUEUE_DDIR_ATTRS
776 static struct elevator_type mq_deadline
= {
778 .insert_requests
= dd_insert_requests
,
779 .dispatch_request
= dd_dispatch_request
,
780 .prepare_request
= dd_prepare_request
,
781 .finish_request
= dd_finish_request
,
782 .next_request
= elv_rb_latter_request
,
783 .former_request
= elv_rb_former_request
,
784 .bio_merge
= dd_bio_merge
,
785 .request_merge
= dd_request_merge
,
786 .requests_merged
= dd_merged_requests
,
787 .request_merged
= dd_request_merged
,
788 .has_work
= dd_has_work
,
789 .init_sched
= dd_init_queue
,
790 .exit_sched
= dd_exit_queue
,
793 #ifdef CONFIG_BLK_DEBUG_FS
794 .queue_debugfs_attrs
= deadline_queue_debugfs_attrs
,
796 .elevator_attrs
= deadline_attrs
,
797 .elevator_name
= "mq-deadline",
798 .elevator_alias
= "deadline",
799 .elevator_features
= ELEVATOR_F_ZBD_SEQ_WRITE
,
800 .elevator_owner
= THIS_MODULE
,
802 MODULE_ALIAS("mq-deadline-iosched");
804 static int __init
deadline_init(void)
806 return elv_register(&mq_deadline
);
809 static void __exit
deadline_exit(void)
811 elv_unregister(&mq_deadline
);
814 module_init(deadline_init
);
815 module_exit(deadline_exit
);
817 MODULE_AUTHOR("Jens Axboe");
818 MODULE_LICENSE("GPL");
819 MODULE_DESCRIPTION("MQ deadline IO scheduler");