1 // SPDX-License-Identifier: GPL-2.0
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
8 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
21 #include <trace/events/block.h>
25 #include "blk-mq-debugfs.h"
26 #include "blk-mq-tag.h"
27 #include "blk-mq-sched.h"
28 #include "mq-deadline-cgroup.h"
31 * See Documentation/block/deadline-iosched.rst
33 static const int read_expire
= HZ
/ 2; /* max time before a read is submitted. */
34 static const int write_expire
= 5 * HZ
; /* ditto for writes, these limits are SOFT! */
36 * Time after which to dispatch lower priority requests even if higher
37 * priority requests are pending.
39 static const int aging_expire
= 10 * HZ
;
40 static const int writes_starved
= 2; /* max times reads can starve a write */
41 static const int fifo_batch
= 16; /* # of sequential requests treated as one
42 by the above parameters. For throughput. */
49 enum { DD_DIR_COUNT
= 2 };
58 enum { DD_PRIO_COUNT
= 3 };
60 /* I/O statistics for all I/O priorities (enum dd_prio). */
62 struct io_stats_per_prio stats
[DD_PRIO_COUNT
];
66 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
67 * present on both sort_list[] and fifo_list[].
70 struct list_head dispatch
;
71 struct rb_root sort_list
[DD_DIR_COUNT
];
72 struct list_head fifo_list
[DD_DIR_COUNT
];
73 /* Next request in FIFO order. Read, write or both are NULL. */
74 struct request
*next_rq
[DD_DIR_COUNT
];
77 struct deadline_data
{
82 /* Request queue that owns this data structure. */
83 struct request_queue
*queue
;
85 struct dd_per_prio per_prio
[DD_PRIO_COUNT
];
87 /* Data direction of latest dispatched request. */
88 enum dd_data_dir last_dir
;
89 unsigned int batching
; /* number of sequential requests made */
90 unsigned int starved
; /* times reads have starved writes */
92 struct io_stats __percpu
*stats
;
95 * settings that change how the i/o scheduler behaves
97 int fifo_expire
[DD_DIR_COUNT
];
105 spinlock_t zone_lock
;
108 /* Count one event of type 'event_type' and with I/O priority 'prio' */
109 #define dd_count(dd, event_type, prio) do { \
110 struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
112 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
113 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
114 local_inc(&io_stats->stats[(prio)].event_type); \
115 put_cpu_ptr(io_stats); \
119 * Returns the total number of dd_count(dd, event_type, prio) calls across all
120 * CPUs. No locking or barriers since it is fine if the returned sum is slightly
123 #define dd_sum(dd, event_type, prio) ({ \
127 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
128 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
129 for_each_present_cpu(cpu) \
130 sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
131 stats[(prio)].event_type); \
135 /* Maps an I/O priority class to a deadline scheduler priority. */
136 static const enum dd_prio ioprio_class_to_prio
[] = {
137 [IOPRIO_CLASS_NONE
] = DD_BE_PRIO
,
138 [IOPRIO_CLASS_RT
] = DD_RT_PRIO
,
139 [IOPRIO_CLASS_BE
] = DD_BE_PRIO
,
140 [IOPRIO_CLASS_IDLE
] = DD_IDLE_PRIO
,
143 static inline struct rb_root
*
144 deadline_rb_root(struct dd_per_prio
*per_prio
, struct request
*rq
)
146 return &per_prio
->sort_list
[rq_data_dir(rq
)];
150 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
153 static u8
dd_rq_ioclass(struct request
*rq
)
155 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq
));
159 * get the request after `rq' in sector-sorted order
161 static inline struct request
*
162 deadline_latter_request(struct request
*rq
)
164 struct rb_node
*node
= rb_next(&rq
->rb_node
);
167 return rb_entry_rq(node
);
173 deadline_add_rq_rb(struct dd_per_prio
*per_prio
, struct request
*rq
)
175 struct rb_root
*root
= deadline_rb_root(per_prio
, rq
);
177 elv_rb_add(root
, rq
);
181 deadline_del_rq_rb(struct dd_per_prio
*per_prio
, struct request
*rq
)
183 const enum dd_data_dir data_dir
= rq_data_dir(rq
);
185 if (per_prio
->next_rq
[data_dir
] == rq
)
186 per_prio
->next_rq
[data_dir
] = deadline_latter_request(rq
);
188 elv_rb_del(deadline_rb_root(per_prio
, rq
), rq
);
192 * remove rq from rbtree and fifo.
194 static void deadline_remove_request(struct request_queue
*q
,
195 struct dd_per_prio
*per_prio
,
198 list_del_init(&rq
->queuelist
);
201 * We might not be on the rbtree, if we are doing an insert merge
203 if (!RB_EMPTY_NODE(&rq
->rb_node
))
204 deadline_del_rq_rb(per_prio
, rq
);
206 elv_rqhash_del(q
, rq
);
207 if (q
->last_merge
== rq
)
208 q
->last_merge
= NULL
;
211 static void dd_request_merged(struct request_queue
*q
, struct request
*req
,
214 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
215 const u8 ioprio_class
= dd_rq_ioclass(req
);
216 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
217 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
220 * if the merge was a front merge, we need to reposition request
222 if (type
== ELEVATOR_FRONT_MERGE
) {
223 elv_rb_del(deadline_rb_root(per_prio
, req
), req
);
224 deadline_add_rq_rb(per_prio
, req
);
229 * Callback function that is invoked after @next has been merged into @req.
231 static void dd_merged_requests(struct request_queue
*q
, struct request
*req
,
232 struct request
*next
)
234 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
235 const u8 ioprio_class
= dd_rq_ioclass(next
);
236 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
237 struct dd_blkcg
*blkcg
= next
->elv
.priv
[0];
239 dd_count(dd
, merged
, prio
);
240 ddcg_count(blkcg
, merged
, ioprio_class
);
243 * if next expires before rq, assign its expire time to rq
244 * and move into next position (next will be deleted) in fifo
246 if (!list_empty(&req
->queuelist
) && !list_empty(&next
->queuelist
)) {
247 if (time_before((unsigned long)next
->fifo_time
,
248 (unsigned long)req
->fifo_time
)) {
249 list_move(&req
->queuelist
, &next
->queuelist
);
250 req
->fifo_time
= next
->fifo_time
;
255 * kill knowledge of next, this one is a goner
257 deadline_remove_request(q
, &dd
->per_prio
[prio
], next
);
261 * move an entry to dispatch queue
264 deadline_move_request(struct deadline_data
*dd
, struct dd_per_prio
*per_prio
,
267 const enum dd_data_dir data_dir
= rq_data_dir(rq
);
269 per_prio
->next_rq
[data_dir
] = deadline_latter_request(rq
);
272 * take it off the sort and fifo list
274 deadline_remove_request(rq
->q
, per_prio
, rq
);
277 /* Number of requests queued for a given priority level. */
278 static u32
dd_queued(struct deadline_data
*dd
, enum dd_prio prio
)
280 return dd_sum(dd
, inserted
, prio
) - dd_sum(dd
, completed
, prio
);
284 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
285 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
287 static inline int deadline_check_fifo(struct dd_per_prio
*per_prio
,
288 enum dd_data_dir data_dir
)
290 struct request
*rq
= rq_entry_fifo(per_prio
->fifo_list
[data_dir
].next
);
295 if (time_after_eq(jiffies
, (unsigned long)rq
->fifo_time
))
302 * For the specified data direction, return the next request to
303 * dispatch using arrival ordered lists.
305 static struct request
*
306 deadline_fifo_request(struct deadline_data
*dd
, struct dd_per_prio
*per_prio
,
307 enum dd_data_dir data_dir
)
312 if (list_empty(&per_prio
->fifo_list
[data_dir
]))
315 rq
= rq_entry_fifo(per_prio
->fifo_list
[data_dir
].next
);
316 if (data_dir
== DD_READ
|| !blk_queue_is_zoned(rq
->q
))
320 * Look for a write request that can be dispatched, that is one with
321 * an unlocked target zone.
323 spin_lock_irqsave(&dd
->zone_lock
, flags
);
324 list_for_each_entry(rq
, &per_prio
->fifo_list
[DD_WRITE
], queuelist
) {
325 if (blk_req_can_dispatch_to_zone(rq
))
330 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
336 * For the specified data direction, return the next request to
337 * dispatch using sector position sorted lists.
339 static struct request
*
340 deadline_next_request(struct deadline_data
*dd
, struct dd_per_prio
*per_prio
,
341 enum dd_data_dir data_dir
)
346 rq
= per_prio
->next_rq
[data_dir
];
350 if (data_dir
== DD_READ
|| !blk_queue_is_zoned(rq
->q
))
354 * Look for a write request that can be dispatched, that is one with
355 * an unlocked target zone.
357 spin_lock_irqsave(&dd
->zone_lock
, flags
);
359 if (blk_req_can_dispatch_to_zone(rq
))
361 rq
= deadline_latter_request(rq
);
363 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
369 * deadline_dispatch_requests selects the best request according to
370 * read/write expire, fifo_batch, etc and with a start time <= @latest.
372 static struct request
*__dd_dispatch_request(struct deadline_data
*dd
,
373 struct dd_per_prio
*per_prio
,
376 struct request
*rq
, *next_rq
;
377 enum dd_data_dir data_dir
;
378 struct dd_blkcg
*blkcg
;
382 lockdep_assert_held(&dd
->lock
);
384 if (!list_empty(&per_prio
->dispatch
)) {
385 rq
= list_first_entry(&per_prio
->dispatch
, struct request
,
387 if (rq
->start_time_ns
> latest_start_ns
)
389 list_del_init(&rq
->queuelist
);
394 * batches are currently reads XOR writes
396 rq
= deadline_next_request(dd
, per_prio
, dd
->last_dir
);
397 if (rq
&& dd
->batching
< dd
->fifo_batch
)
398 /* we have a next request are still entitled to batch */
399 goto dispatch_request
;
402 * at this point we are not running a batch. select the appropriate
403 * data direction (read / write)
406 if (!list_empty(&per_prio
->fifo_list
[DD_READ
])) {
407 BUG_ON(RB_EMPTY_ROOT(&per_prio
->sort_list
[DD_READ
]));
409 if (deadline_fifo_request(dd
, per_prio
, DD_WRITE
) &&
410 (dd
->starved
++ >= dd
->writes_starved
))
411 goto dispatch_writes
;
415 goto dispatch_find_request
;
419 * there are either no reads or writes have been starved
422 if (!list_empty(&per_prio
->fifo_list
[DD_WRITE
])) {
424 BUG_ON(RB_EMPTY_ROOT(&per_prio
->sort_list
[DD_WRITE
]));
430 goto dispatch_find_request
;
435 dispatch_find_request
:
437 * we are not running a batch, find best request for selected data_dir
439 next_rq
= deadline_next_request(dd
, per_prio
, data_dir
);
440 if (deadline_check_fifo(per_prio
, data_dir
) || !next_rq
) {
442 * A deadline has expired, the last request was in the other
443 * direction, or we have run out of higher-sectored requests.
444 * Start again from the request with the earliest expiry time.
446 rq
= deadline_fifo_request(dd
, per_prio
, data_dir
);
449 * The last req was the same dir and we have a next request in
450 * sort order. No expired requests so continue on from here.
456 * For a zoned block device, if we only have writes queued and none of
457 * them can be dispatched, rq will be NULL.
462 dd
->last_dir
= data_dir
;
466 if (rq
->start_time_ns
> latest_start_ns
)
469 * rq is the selected appropriate request.
472 deadline_move_request(dd
, per_prio
, rq
);
474 ioprio_class
= dd_rq_ioclass(rq
);
475 prio
= ioprio_class_to_prio
[ioprio_class
];
476 dd_count(dd
, dispatched
, prio
);
477 blkcg
= rq
->elv
.priv
[0];
478 ddcg_count(blkcg
, dispatched
, ioprio_class
);
480 * If the request needs its target zone locked, do it.
482 blk_req_zone_write_lock(rq
);
483 rq
->rq_flags
|= RQF_STARTED
;
488 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
490 * One confusing aspect here is that we get called for a specific
491 * hardware queue, but we may return a request that is for a
492 * different hardware queue. This is because mq-deadline has shared
493 * state for all hardware queues, in terms of sorting, FIFOs, etc.
495 static struct request
*dd_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
497 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
498 const u64 now_ns
= ktime_get_ns();
499 struct request
*rq
= NULL
;
502 spin_lock(&dd
->lock
);
504 * Start with dispatching requests whose deadline expired more than
505 * aging_expire jiffies ago.
507 for (prio
= DD_BE_PRIO
; prio
<= DD_PRIO_MAX
; prio
++) {
508 rq
= __dd_dispatch_request(dd
, &dd
->per_prio
[prio
], now_ns
-
509 jiffies_to_nsecs(dd
->aging_expire
));
514 * Next, dispatch requests in priority order. Ignore lower priority
515 * requests if any higher priority requests are pending.
517 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++) {
518 rq
= __dd_dispatch_request(dd
, &dd
->per_prio
[prio
], now_ns
);
519 if (rq
|| dd_queued(dd
, prio
))
524 spin_unlock(&dd
->lock
);
530 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
531 * function is used by __blk_mq_get_tag().
533 static void dd_limit_depth(unsigned int op
, struct blk_mq_alloc_data
*data
)
535 struct deadline_data
*dd
= data
->q
->elevator
->elevator_data
;
537 /* Do not throttle synchronous reads. */
538 if (op_is_sync(op
) && !op_is_write(op
))
542 * Throttle asynchronous requests and writes such that these requests
543 * do not block the allocation of synchronous requests.
545 data
->shallow_depth
= dd
->async_depth
;
548 /* Called by blk_mq_update_nr_requests(). */
549 static void dd_depth_updated(struct blk_mq_hw_ctx
*hctx
)
551 struct request_queue
*q
= hctx
->queue
;
552 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
553 struct blk_mq_tags
*tags
= hctx
->sched_tags
;
555 dd
->async_depth
= max(1UL, 3 * q
->nr_requests
/ 4);
557 sbitmap_queue_min_shallow_depth(tags
->bitmap_tags
, dd
->async_depth
);
560 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
561 static int dd_init_hctx(struct blk_mq_hw_ctx
*hctx
, unsigned int hctx_idx
)
563 dd_depth_updated(hctx
);
567 static void dd_exit_sched(struct elevator_queue
*e
)
569 struct deadline_data
*dd
= e
->elevator_data
;
572 dd_deactivate_policy(dd
->queue
);
574 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++) {
575 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
577 WARN_ON_ONCE(!list_empty(&per_prio
->fifo_list
[DD_READ
]));
578 WARN_ON_ONCE(!list_empty(&per_prio
->fifo_list
[DD_WRITE
]));
581 free_percpu(dd
->stats
);
587 * Initialize elevator private data (deadline_data) and associate with blkcg.
589 static int dd_init_sched(struct request_queue
*q
, struct elevator_type
*e
)
591 struct deadline_data
*dd
;
592 struct elevator_queue
*eq
;
597 * Initialization would be very tricky if the queue is not frozen,
598 * hence the warning statement below.
600 WARN_ON_ONCE(!percpu_ref_is_zero(&q
->q_usage_counter
));
602 eq
= elevator_alloc(q
, e
);
606 dd
= kzalloc_node(sizeof(*dd
), GFP_KERNEL
, q
->node
);
610 eq
->elevator_data
= dd
;
612 dd
->stats
= alloc_percpu_gfp(typeof(*dd
->stats
),
613 GFP_KERNEL
| __GFP_ZERO
);
619 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++) {
620 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
622 INIT_LIST_HEAD(&per_prio
->dispatch
);
623 INIT_LIST_HEAD(&per_prio
->fifo_list
[DD_READ
]);
624 INIT_LIST_HEAD(&per_prio
->fifo_list
[DD_WRITE
]);
625 per_prio
->sort_list
[DD_READ
] = RB_ROOT
;
626 per_prio
->sort_list
[DD_WRITE
] = RB_ROOT
;
628 dd
->fifo_expire
[DD_READ
] = read_expire
;
629 dd
->fifo_expire
[DD_WRITE
] = write_expire
;
630 dd
->writes_starved
= writes_starved
;
631 dd
->front_merges
= 1;
632 dd
->last_dir
= DD_WRITE
;
633 dd
->fifo_batch
= fifo_batch
;
634 dd
->aging_expire
= aging_expire
;
635 spin_lock_init(&dd
->lock
);
636 spin_lock_init(&dd
->zone_lock
);
638 ret
= dd_activate_policy(q
);
647 free_percpu(dd
->stats
);
653 kobject_put(&eq
->kobj
);
658 * Try to merge @bio into an existing request. If @bio has been merged into
659 * an existing request, store the pointer to that request into *@rq.
661 static int dd_request_merge(struct request_queue
*q
, struct request
**rq
,
664 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
665 const u8 ioprio_class
= IOPRIO_PRIO_CLASS(bio
->bi_ioprio
);
666 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
667 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
668 sector_t sector
= bio_end_sector(bio
);
669 struct request
*__rq
;
671 if (!dd
->front_merges
)
672 return ELEVATOR_NO_MERGE
;
674 __rq
= elv_rb_find(&per_prio
->sort_list
[bio_data_dir(bio
)], sector
);
676 BUG_ON(sector
!= blk_rq_pos(__rq
));
678 if (elv_bio_merge_ok(__rq
, bio
)) {
680 return ELEVATOR_FRONT_MERGE
;
684 return ELEVATOR_NO_MERGE
;
688 * Attempt to merge a bio into an existing request. This function is called
689 * before @bio is associated with a request.
691 static bool dd_bio_merge(struct request_queue
*q
, struct bio
*bio
,
692 unsigned int nr_segs
)
694 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
695 struct request
*free
= NULL
;
698 spin_lock(&dd
->lock
);
699 ret
= blk_mq_sched_try_merge(q
, bio
, nr_segs
, &free
);
700 spin_unlock(&dd
->lock
);
703 blk_mq_free_request(free
);
709 * add rq to rbtree and fifo
711 static void dd_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
714 struct request_queue
*q
= hctx
->queue
;
715 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
716 const enum dd_data_dir data_dir
= rq_data_dir(rq
);
717 u16 ioprio
= req_get_ioprio(rq
);
718 u8 ioprio_class
= IOPRIO_PRIO_CLASS(ioprio
);
719 struct dd_per_prio
*per_prio
;
721 struct dd_blkcg
*blkcg
;
724 lockdep_assert_held(&dd
->lock
);
727 * This may be a requeue of a write request that has locked its
728 * target zone. If it is the case, this releases the zone lock.
730 blk_req_zone_write_unlock(rq
);
733 * If a block cgroup has been associated with the submitter and if an
734 * I/O priority has been set in the associated block cgroup, use the
735 * lowest of the cgroup priority and the request priority for the
736 * request. If no priority has been set in the request, use the cgroup
739 prio
= ioprio_class_to_prio
[ioprio_class
];
740 dd_count(dd
, inserted
, prio
);
741 blkcg
= dd_blkcg_from_bio(rq
->bio
);
742 ddcg_count(blkcg
, inserted
, ioprio_class
);
743 rq
->elv
.priv
[0] = blkcg
;
745 if (blk_mq_sched_try_insert_merge(q
, rq
, &free
)) {
746 blk_mq_free_requests(&free
);
750 trace_block_rq_insert(rq
);
752 per_prio
= &dd
->per_prio
[prio
];
754 list_add(&rq
->queuelist
, &per_prio
->dispatch
);
756 deadline_add_rq_rb(per_prio
, rq
);
758 if (rq_mergeable(rq
)) {
759 elv_rqhash_add(q
, rq
);
765 * set expire time and add to fifo list
767 rq
->fifo_time
= jiffies
+ dd
->fifo_expire
[data_dir
];
768 list_add_tail(&rq
->queuelist
, &per_prio
->fifo_list
[data_dir
]);
773 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
775 static void dd_insert_requests(struct blk_mq_hw_ctx
*hctx
,
776 struct list_head
*list
, bool at_head
)
778 struct request_queue
*q
= hctx
->queue
;
779 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
781 spin_lock(&dd
->lock
);
782 while (!list_empty(list
)) {
785 rq
= list_first_entry(list
, struct request
, queuelist
);
786 list_del_init(&rq
->queuelist
);
787 dd_insert_request(hctx
, rq
, at_head
);
789 spin_unlock(&dd
->lock
);
792 /* Callback from inside blk_mq_rq_ctx_init(). */
793 static void dd_prepare_request(struct request
*rq
)
795 rq
->elv
.priv
[0] = NULL
;
799 * Callback from inside blk_mq_free_request().
801 * For zoned block devices, write unlock the target zone of
802 * completed write requests. Do this while holding the zone lock
803 * spinlock so that the zone is never unlocked while deadline_fifo_request()
804 * or deadline_next_request() are executing. This function is called for
805 * all requests, whether or not these requests complete successfully.
807 * For a zoned block device, __dd_dispatch_request() may have stopped
808 * dispatching requests if all the queued requests are write requests directed
809 * at zones that are already locked due to on-going write requests. To ensure
810 * write request dispatch progress in this case, mark the queue as needing a
811 * restart to ensure that the queue is run again after completion of the
812 * request and zones being unlocked.
814 static void dd_finish_request(struct request
*rq
)
816 struct request_queue
*q
= rq
->q
;
817 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
818 struct dd_blkcg
*blkcg
= rq
->elv
.priv
[0];
819 const u8 ioprio_class
= dd_rq_ioclass(rq
);
820 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
821 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
823 dd_count(dd
, completed
, prio
);
824 ddcg_count(blkcg
, completed
, ioprio_class
);
826 if (blk_queue_is_zoned(q
)) {
829 spin_lock_irqsave(&dd
->zone_lock
, flags
);
830 blk_req_zone_write_unlock(rq
);
831 if (!list_empty(&per_prio
->fifo_list
[DD_WRITE
]))
832 blk_mq_sched_mark_restart_hctx(rq
->mq_hctx
);
833 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
837 static bool dd_has_work_for_prio(struct dd_per_prio
*per_prio
)
839 return !list_empty_careful(&per_prio
->dispatch
) ||
840 !list_empty_careful(&per_prio
->fifo_list
[DD_READ
]) ||
841 !list_empty_careful(&per_prio
->fifo_list
[DD_WRITE
]);
844 static bool dd_has_work(struct blk_mq_hw_ctx
*hctx
)
846 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
849 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++)
850 if (dd_has_work_for_prio(&dd
->per_prio
[prio
]))
859 #define SHOW_INT(__FUNC, __VAR) \
860 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
862 struct deadline_data *dd = e->elevator_data; \
864 return sysfs_emit(page, "%d\n", __VAR); \
866 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
867 SHOW_JIFFIES(deadline_read_expire_show
, dd
->fifo_expire
[DD_READ
]);
868 SHOW_JIFFIES(deadline_write_expire_show
, dd
->fifo_expire
[DD_WRITE
]);
869 SHOW_JIFFIES(deadline_aging_expire_show
, dd
->aging_expire
);
870 SHOW_INT(deadline_writes_starved_show
, dd
->writes_starved
);
871 SHOW_INT(deadline_front_merges_show
, dd
->front_merges
);
872 SHOW_INT(deadline_async_depth_show
, dd
->front_merges
);
873 SHOW_INT(deadline_fifo_batch_show
, dd
->fifo_batch
);
877 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
878 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
880 struct deadline_data *dd = e->elevator_data; \
883 __ret = kstrtoint(page, 0, &__data); \
886 if (__data < (MIN)) \
888 else if (__data > (MAX)) \
890 *(__PTR) = __CONV(__data); \
893 #define STORE_INT(__FUNC, __PTR, MIN, MAX) \
894 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
895 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
896 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
897 STORE_JIFFIES(deadline_read_expire_store
, &dd
->fifo_expire
[DD_READ
], 0, INT_MAX
);
898 STORE_JIFFIES(deadline_write_expire_store
, &dd
->fifo_expire
[DD_WRITE
], 0, INT_MAX
);
899 STORE_JIFFIES(deadline_aging_expire_store
, &dd
->aging_expire
, 0, INT_MAX
);
900 STORE_INT(deadline_writes_starved_store
, &dd
->writes_starved
, INT_MIN
, INT_MAX
);
901 STORE_INT(deadline_front_merges_store
, &dd
->front_merges
, 0, 1);
902 STORE_INT(deadline_async_depth_store
, &dd
->front_merges
, 1, INT_MAX
);
903 STORE_INT(deadline_fifo_batch_store
, &dd
->fifo_batch
, 0, INT_MAX
);
904 #undef STORE_FUNCTION
908 #define DD_ATTR(name) \
909 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
911 static struct elv_fs_entry deadline_attrs
[] = {
912 DD_ATTR(read_expire
),
913 DD_ATTR(write_expire
),
914 DD_ATTR(writes_starved
),
915 DD_ATTR(front_merges
),
916 DD_ATTR(async_depth
),
918 DD_ATTR(aging_expire
),
922 #ifdef CONFIG_BLK_DEBUG_FS
923 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
924 static void *deadline_##name##_fifo_start(struct seq_file *m, \
926 __acquires(&dd->lock) \
928 struct request_queue *q = m->private; \
929 struct deadline_data *dd = q->elevator->elevator_data; \
930 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
932 spin_lock(&dd->lock); \
933 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
936 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
939 struct request_queue *q = m->private; \
940 struct deadline_data *dd = q->elevator->elevator_data; \
941 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
943 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
946 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
947 __releases(&dd->lock) \
949 struct request_queue *q = m->private; \
950 struct deadline_data *dd = q->elevator->elevator_data; \
952 spin_unlock(&dd->lock); \
955 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
956 .start = deadline_##name##_fifo_start, \
957 .next = deadline_##name##_fifo_next, \
958 .stop = deadline_##name##_fifo_stop, \
959 .show = blk_mq_debugfs_rq_show, \
962 static int deadline_##name##_next_rq_show(void *data, \
963 struct seq_file *m) \
965 struct request_queue *q = data; \
966 struct deadline_data *dd = q->elevator->elevator_data; \
967 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
968 struct request *rq = per_prio->next_rq[data_dir]; \
971 __blk_mq_debugfs_rq_show(m, rq); \
975 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO
, DD_READ
, read0
);
976 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO
, DD_WRITE
, write0
);
977 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO
, DD_READ
, read1
);
978 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO
, DD_WRITE
, write1
);
979 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO
, DD_READ
, read2
);
980 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO
, DD_WRITE
, write2
);
981 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
983 static int deadline_batching_show(void *data
, struct seq_file
*m
)
985 struct request_queue
*q
= data
;
986 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
988 seq_printf(m
, "%u\n", dd
->batching
);
992 static int deadline_starved_show(void *data
, struct seq_file
*m
)
994 struct request_queue
*q
= data
;
995 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
997 seq_printf(m
, "%u\n", dd
->starved
);
1001 static int dd_async_depth_show(void *data
, struct seq_file
*m
)
1003 struct request_queue
*q
= data
;
1004 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
1006 seq_printf(m
, "%u\n", dd
->async_depth
);
1010 static int dd_queued_show(void *data
, struct seq_file
*m
)
1012 struct request_queue
*q
= data
;
1013 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
1015 seq_printf(m
, "%u %u %u\n", dd_queued(dd
, DD_RT_PRIO
),
1016 dd_queued(dd
, DD_BE_PRIO
),
1017 dd_queued(dd
, DD_IDLE_PRIO
));
1021 /* Number of requests owned by the block driver for a given priority. */
1022 static u32
dd_owned_by_driver(struct deadline_data
*dd
, enum dd_prio prio
)
1024 return dd_sum(dd
, dispatched
, prio
) + dd_sum(dd
, merged
, prio
)
1025 - dd_sum(dd
, completed
, prio
);
1028 static int dd_owned_by_driver_show(void *data
, struct seq_file
*m
)
1030 struct request_queue
*q
= data
;
1031 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
1033 seq_printf(m
, "%u %u %u\n", dd_owned_by_driver(dd
, DD_RT_PRIO
),
1034 dd_owned_by_driver(dd
, DD_BE_PRIO
),
1035 dd_owned_by_driver(dd
, DD_IDLE_PRIO
));
1039 #define DEADLINE_DISPATCH_ATTR(prio) \
1040 static void *deadline_dispatch##prio##_start(struct seq_file *m, \
1042 __acquires(&dd->lock) \
1044 struct request_queue *q = m->private; \
1045 struct deadline_data *dd = q->elevator->elevator_data; \
1046 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1048 spin_lock(&dd->lock); \
1049 return seq_list_start(&per_prio->dispatch, *pos); \
1052 static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1053 void *v, loff_t *pos) \
1055 struct request_queue *q = m->private; \
1056 struct deadline_data *dd = q->elevator->elevator_data; \
1057 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1059 return seq_list_next(v, &per_prio->dispatch, pos); \
1062 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1063 __releases(&dd->lock) \
1065 struct request_queue *q = m->private; \
1066 struct deadline_data *dd = q->elevator->elevator_data; \
1068 spin_unlock(&dd->lock); \
1071 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1072 .start = deadline_dispatch##prio##_start, \
1073 .next = deadline_dispatch##prio##_next, \
1074 .stop = deadline_dispatch##prio##_stop, \
1075 .show = blk_mq_debugfs_rq_show, \
1078 DEADLINE_DISPATCH_ATTR(0);
1079 DEADLINE_DISPATCH_ATTR(1);
1080 DEADLINE_DISPATCH_ATTR(2);
1081 #undef DEADLINE_DISPATCH_ATTR
1083 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1084 {#name "_fifo_list", 0400, \
1085 .seq_ops = &deadline_##name##_fifo_seq_ops}
1086 #define DEADLINE_NEXT_RQ_ATTR(name) \
1087 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1088 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs
[] = {
1089 DEADLINE_QUEUE_DDIR_ATTRS(read0
),
1090 DEADLINE_QUEUE_DDIR_ATTRS(write0
),
1091 DEADLINE_QUEUE_DDIR_ATTRS(read1
),
1092 DEADLINE_QUEUE_DDIR_ATTRS(write1
),
1093 DEADLINE_QUEUE_DDIR_ATTRS(read2
),
1094 DEADLINE_QUEUE_DDIR_ATTRS(write2
),
1095 DEADLINE_NEXT_RQ_ATTR(read0
),
1096 DEADLINE_NEXT_RQ_ATTR(write0
),
1097 DEADLINE_NEXT_RQ_ATTR(read1
),
1098 DEADLINE_NEXT_RQ_ATTR(write1
),
1099 DEADLINE_NEXT_RQ_ATTR(read2
),
1100 DEADLINE_NEXT_RQ_ATTR(write2
),
1101 {"batching", 0400, deadline_batching_show
},
1102 {"starved", 0400, deadline_starved_show
},
1103 {"async_depth", 0400, dd_async_depth_show
},
1104 {"dispatch0", 0400, .seq_ops
= &deadline_dispatch0_seq_ops
},
1105 {"dispatch1", 0400, .seq_ops
= &deadline_dispatch1_seq_ops
},
1106 {"dispatch2", 0400, .seq_ops
= &deadline_dispatch2_seq_ops
},
1107 {"owned_by_driver", 0400, dd_owned_by_driver_show
},
1108 {"queued", 0400, dd_queued_show
},
1111 #undef DEADLINE_QUEUE_DDIR_ATTRS
1114 static struct elevator_type mq_deadline
= {
1116 .depth_updated
= dd_depth_updated
,
1117 .limit_depth
= dd_limit_depth
,
1118 .insert_requests
= dd_insert_requests
,
1119 .dispatch_request
= dd_dispatch_request
,
1120 .prepare_request
= dd_prepare_request
,
1121 .finish_request
= dd_finish_request
,
1122 .next_request
= elv_rb_latter_request
,
1123 .former_request
= elv_rb_former_request
,
1124 .bio_merge
= dd_bio_merge
,
1125 .request_merge
= dd_request_merge
,
1126 .requests_merged
= dd_merged_requests
,
1127 .request_merged
= dd_request_merged
,
1128 .has_work
= dd_has_work
,
1129 .init_sched
= dd_init_sched
,
1130 .exit_sched
= dd_exit_sched
,
1131 .init_hctx
= dd_init_hctx
,
1134 #ifdef CONFIG_BLK_DEBUG_FS
1135 .queue_debugfs_attrs
= deadline_queue_debugfs_attrs
,
1137 .elevator_attrs
= deadline_attrs
,
1138 .elevator_name
= "mq-deadline",
1139 .elevator_alias
= "deadline",
1140 .elevator_features
= ELEVATOR_F_ZBD_SEQ_WRITE
,
1141 .elevator_owner
= THIS_MODULE
,
1143 MODULE_ALIAS("mq-deadline-iosched");
1145 static int __init
deadline_init(void)
1149 ret
= elv_register(&mq_deadline
);
1152 ret
= dd_blkcg_init();
1160 elv_unregister(&mq_deadline
);
1164 static void __exit
deadline_exit(void)
1167 elv_unregister(&mq_deadline
);
1170 module_init(deadline_init
);
1171 module_exit(deadline_exit
);
1173 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1174 MODULE_LICENSE("GPL");
1175 MODULE_DESCRIPTION("MQ deadline IO scheduler");