1 // SPDX-License-Identifier: GPL-2.0
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
8 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
21 #include <trace/events/block.h>
25 #include "blk-mq-debugfs.h"
26 #include "blk-mq-tag.h"
27 #include "blk-mq-sched.h"
30 * See Documentation/block/deadline-iosched.rst
32 static const int read_expire
= HZ
/ 2; /* max time before a read is submitted. */
33 static const int write_expire
= 5 * HZ
; /* ditto for writes, these limits are SOFT! */
35 * Time after which to dispatch lower priority requests even if higher
36 * priority requests are pending.
38 static const int aging_expire
= 10 * HZ
;
39 static const int writes_starved
= 2; /* max times reads can starve a write */
40 static const int fifo_batch
= 16; /* # of sequential requests treated as one
41 by the above parameters. For throughput. */
48 enum { DD_DIR_COUNT
= 2 };
57 enum { DD_PRIO_COUNT
= 3 };
59 /* I/O statistics per I/O priority. */
60 struct io_stats_per_prio
{
67 /* I/O statistics for all I/O priorities (enum dd_prio). */
69 struct io_stats_per_prio stats
[DD_PRIO_COUNT
];
73 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
74 * present on both sort_list[] and fifo_list[].
77 struct list_head dispatch
;
78 struct rb_root sort_list
[DD_DIR_COUNT
];
79 struct list_head fifo_list
[DD_DIR_COUNT
];
80 /* Next request in FIFO order. Read, write or both are NULL. */
81 struct request
*next_rq
[DD_DIR_COUNT
];
84 struct deadline_data
{
89 struct dd_per_prio per_prio
[DD_PRIO_COUNT
];
91 /* Data direction of latest dispatched request. */
92 enum dd_data_dir last_dir
;
93 unsigned int batching
; /* number of sequential requests made */
94 unsigned int starved
; /* times reads have starved writes */
96 struct io_stats __percpu
*stats
;
99 * settings that change how the i/o scheduler behaves
101 int fifo_expire
[DD_DIR_COUNT
];
109 spinlock_t zone_lock
;
112 /* Count one event of type 'event_type' and with I/O priority 'prio' */
113 #define dd_count(dd, event_type, prio) do { \
114 struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
116 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
117 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
118 local_inc(&io_stats->stats[(prio)].event_type); \
119 put_cpu_ptr(io_stats); \
123 * Returns the total number of dd_count(dd, event_type, prio) calls across all
124 * CPUs. No locking or barriers since it is fine if the returned sum is slightly
127 #define dd_sum(dd, event_type, prio) ({ \
131 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
132 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
133 for_each_present_cpu(cpu) \
134 sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
135 stats[(prio)].event_type); \
139 /* Maps an I/O priority class to a deadline scheduler priority. */
140 static const enum dd_prio ioprio_class_to_prio
[] = {
141 [IOPRIO_CLASS_NONE
] = DD_BE_PRIO
,
142 [IOPRIO_CLASS_RT
] = DD_RT_PRIO
,
143 [IOPRIO_CLASS_BE
] = DD_BE_PRIO
,
144 [IOPRIO_CLASS_IDLE
] = DD_IDLE_PRIO
,
147 static inline struct rb_root
*
148 deadline_rb_root(struct dd_per_prio
*per_prio
, struct request
*rq
)
150 return &per_prio
->sort_list
[rq_data_dir(rq
)];
154 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
157 static u8
dd_rq_ioclass(struct request
*rq
)
159 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq
));
163 * get the request after `rq' in sector-sorted order
165 static inline struct request
*
166 deadline_latter_request(struct request
*rq
)
168 struct rb_node
*node
= rb_next(&rq
->rb_node
);
171 return rb_entry_rq(node
);
177 deadline_add_rq_rb(struct dd_per_prio
*per_prio
, struct request
*rq
)
179 struct rb_root
*root
= deadline_rb_root(per_prio
, rq
);
181 elv_rb_add(root
, rq
);
185 deadline_del_rq_rb(struct dd_per_prio
*per_prio
, struct request
*rq
)
187 const enum dd_data_dir data_dir
= rq_data_dir(rq
);
189 if (per_prio
->next_rq
[data_dir
] == rq
)
190 per_prio
->next_rq
[data_dir
] = deadline_latter_request(rq
);
192 elv_rb_del(deadline_rb_root(per_prio
, rq
), rq
);
196 * remove rq from rbtree and fifo.
198 static void deadline_remove_request(struct request_queue
*q
,
199 struct dd_per_prio
*per_prio
,
202 list_del_init(&rq
->queuelist
);
205 * We might not be on the rbtree, if we are doing an insert merge
207 if (!RB_EMPTY_NODE(&rq
->rb_node
))
208 deadline_del_rq_rb(per_prio
, rq
);
210 elv_rqhash_del(q
, rq
);
211 if (q
->last_merge
== rq
)
212 q
->last_merge
= NULL
;
215 static void dd_request_merged(struct request_queue
*q
, struct request
*req
,
218 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
219 const u8 ioprio_class
= dd_rq_ioclass(req
);
220 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
221 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
224 * if the merge was a front merge, we need to reposition request
226 if (type
== ELEVATOR_FRONT_MERGE
) {
227 elv_rb_del(deadline_rb_root(per_prio
, req
), req
);
228 deadline_add_rq_rb(per_prio
, req
);
233 * Callback function that is invoked after @next has been merged into @req.
235 static void dd_merged_requests(struct request_queue
*q
, struct request
*req
,
236 struct request
*next
)
238 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
239 const u8 ioprio_class
= dd_rq_ioclass(next
);
240 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
242 dd_count(dd
, merged
, prio
);
245 * if next expires before rq, assign its expire time to rq
246 * and move into next position (next will be deleted) in fifo
248 if (!list_empty(&req
->queuelist
) && !list_empty(&next
->queuelist
)) {
249 if (time_before((unsigned long)next
->fifo_time
,
250 (unsigned long)req
->fifo_time
)) {
251 list_move(&req
->queuelist
, &next
->queuelist
);
252 req
->fifo_time
= next
->fifo_time
;
257 * kill knowledge of next, this one is a goner
259 deadline_remove_request(q
, &dd
->per_prio
[prio
], next
);
263 * move an entry to dispatch queue
266 deadline_move_request(struct deadline_data
*dd
, struct dd_per_prio
*per_prio
,
269 const enum dd_data_dir data_dir
= rq_data_dir(rq
);
271 per_prio
->next_rq
[data_dir
] = deadline_latter_request(rq
);
274 * take it off the sort and fifo list
276 deadline_remove_request(rq
->q
, per_prio
, rq
);
279 /* Number of requests queued for a given priority level. */
280 static u32
dd_queued(struct deadline_data
*dd
, enum dd_prio prio
)
282 return dd_sum(dd
, inserted
, prio
) - dd_sum(dd
, completed
, prio
);
286 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
287 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
289 static inline int deadline_check_fifo(struct dd_per_prio
*per_prio
,
290 enum dd_data_dir data_dir
)
292 struct request
*rq
= rq_entry_fifo(per_prio
->fifo_list
[data_dir
].next
);
297 if (time_after_eq(jiffies
, (unsigned long)rq
->fifo_time
))
304 * For the specified data direction, return the next request to
305 * dispatch using arrival ordered lists.
307 static struct request
*
308 deadline_fifo_request(struct deadline_data
*dd
, struct dd_per_prio
*per_prio
,
309 enum dd_data_dir data_dir
)
314 if (list_empty(&per_prio
->fifo_list
[data_dir
]))
317 rq
= rq_entry_fifo(per_prio
->fifo_list
[data_dir
].next
);
318 if (data_dir
== DD_READ
|| !blk_queue_is_zoned(rq
->q
))
322 * Look for a write request that can be dispatched, that is one with
323 * an unlocked target zone.
325 spin_lock_irqsave(&dd
->zone_lock
, flags
);
326 list_for_each_entry(rq
, &per_prio
->fifo_list
[DD_WRITE
], queuelist
) {
327 if (blk_req_can_dispatch_to_zone(rq
))
332 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
338 * For the specified data direction, return the next request to
339 * dispatch using sector position sorted lists.
341 static struct request
*
342 deadline_next_request(struct deadline_data
*dd
, struct dd_per_prio
*per_prio
,
343 enum dd_data_dir data_dir
)
348 rq
= per_prio
->next_rq
[data_dir
];
352 if (data_dir
== DD_READ
|| !blk_queue_is_zoned(rq
->q
))
356 * Look for a write request that can be dispatched, that is one with
357 * an unlocked target zone.
359 spin_lock_irqsave(&dd
->zone_lock
, flags
);
361 if (blk_req_can_dispatch_to_zone(rq
))
363 rq
= deadline_latter_request(rq
);
365 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
371 * deadline_dispatch_requests selects the best request according to
372 * read/write expire, fifo_batch, etc and with a start time <= @latest.
374 static struct request
*__dd_dispatch_request(struct deadline_data
*dd
,
375 struct dd_per_prio
*per_prio
,
378 struct request
*rq
, *next_rq
;
379 enum dd_data_dir data_dir
;
383 lockdep_assert_held(&dd
->lock
);
385 if (!list_empty(&per_prio
->dispatch
)) {
386 rq
= list_first_entry(&per_prio
->dispatch
, struct request
,
388 if (rq
->start_time_ns
> latest_start_ns
)
390 list_del_init(&rq
->queuelist
);
395 * batches are currently reads XOR writes
397 rq
= deadline_next_request(dd
, per_prio
, dd
->last_dir
);
398 if (rq
&& dd
->batching
< dd
->fifo_batch
)
399 /* we have a next request are still entitled to batch */
400 goto dispatch_request
;
403 * at this point we are not running a batch. select the appropriate
404 * data direction (read / write)
407 if (!list_empty(&per_prio
->fifo_list
[DD_READ
])) {
408 BUG_ON(RB_EMPTY_ROOT(&per_prio
->sort_list
[DD_READ
]));
410 if (deadline_fifo_request(dd
, per_prio
, DD_WRITE
) &&
411 (dd
->starved
++ >= dd
->writes_starved
))
412 goto dispatch_writes
;
416 goto dispatch_find_request
;
420 * there are either no reads or writes have been starved
423 if (!list_empty(&per_prio
->fifo_list
[DD_WRITE
])) {
425 BUG_ON(RB_EMPTY_ROOT(&per_prio
->sort_list
[DD_WRITE
]));
431 goto dispatch_find_request
;
436 dispatch_find_request
:
438 * we are not running a batch, find best request for selected data_dir
440 next_rq
= deadline_next_request(dd
, per_prio
, data_dir
);
441 if (deadline_check_fifo(per_prio
, data_dir
) || !next_rq
) {
443 * A deadline has expired, the last request was in the other
444 * direction, or we have run out of higher-sectored requests.
445 * Start again from the request with the earliest expiry time.
447 rq
= deadline_fifo_request(dd
, per_prio
, data_dir
);
450 * The last req was the same dir and we have a next request in
451 * sort order. No expired requests so continue on from here.
457 * For a zoned block device, if we only have writes queued and none of
458 * them can be dispatched, rq will be NULL.
463 dd
->last_dir
= data_dir
;
467 if (rq
->start_time_ns
> latest_start_ns
)
470 * rq is the selected appropriate request.
473 deadline_move_request(dd
, per_prio
, rq
);
475 ioprio_class
= dd_rq_ioclass(rq
);
476 prio
= ioprio_class_to_prio
[ioprio_class
];
477 dd_count(dd
, dispatched
, prio
);
479 * If the request needs its target zone locked, do it.
481 blk_req_zone_write_lock(rq
);
482 rq
->rq_flags
|= RQF_STARTED
;
487 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
489 * One confusing aspect here is that we get called for a specific
490 * hardware queue, but we may return a request that is for a
491 * different hardware queue. This is because mq-deadline has shared
492 * state for all hardware queues, in terms of sorting, FIFOs, etc.
494 static struct request
*dd_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
496 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
497 const u64 now_ns
= ktime_get_ns();
498 struct request
*rq
= NULL
;
501 spin_lock(&dd
->lock
);
503 * Start with dispatching requests whose deadline expired more than
504 * aging_expire jiffies ago.
506 for (prio
= DD_BE_PRIO
; prio
<= DD_PRIO_MAX
; prio
++) {
507 rq
= __dd_dispatch_request(dd
, &dd
->per_prio
[prio
], now_ns
-
508 jiffies_to_nsecs(dd
->aging_expire
));
513 * Next, dispatch requests in priority order. Ignore lower priority
514 * requests if any higher priority requests are pending.
516 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++) {
517 rq
= __dd_dispatch_request(dd
, &dd
->per_prio
[prio
], now_ns
);
518 if (rq
|| dd_queued(dd
, prio
))
523 spin_unlock(&dd
->lock
);
529 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
530 * function is used by __blk_mq_get_tag().
532 static void dd_limit_depth(unsigned int op
, struct blk_mq_alloc_data
*data
)
534 struct deadline_data
*dd
= data
->q
->elevator
->elevator_data
;
536 /* Do not throttle synchronous reads. */
537 if (op_is_sync(op
) && !op_is_write(op
))
541 * Throttle asynchronous requests and writes such that these requests
542 * do not block the allocation of synchronous requests.
544 data
->shallow_depth
= dd
->async_depth
;
547 /* Called by blk_mq_update_nr_requests(). */
548 static void dd_depth_updated(struct blk_mq_hw_ctx
*hctx
)
550 struct request_queue
*q
= hctx
->queue
;
551 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
552 struct blk_mq_tags
*tags
= hctx
->sched_tags
;
554 dd
->async_depth
= max(1UL, 3 * q
->nr_requests
/ 4);
556 sbitmap_queue_min_shallow_depth(tags
->bitmap_tags
, dd
->async_depth
);
559 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
560 static int dd_init_hctx(struct blk_mq_hw_ctx
*hctx
, unsigned int hctx_idx
)
562 dd_depth_updated(hctx
);
566 static void dd_exit_sched(struct elevator_queue
*e
)
568 struct deadline_data
*dd
= e
->elevator_data
;
571 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++) {
572 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
574 WARN_ON_ONCE(!list_empty(&per_prio
->fifo_list
[DD_READ
]));
575 WARN_ON_ONCE(!list_empty(&per_prio
->fifo_list
[DD_WRITE
]));
578 free_percpu(dd
->stats
);
584 * initialize elevator private data (deadline_data).
586 static int dd_init_sched(struct request_queue
*q
, struct elevator_type
*e
)
588 struct deadline_data
*dd
;
589 struct elevator_queue
*eq
;
593 eq
= elevator_alloc(q
, e
);
597 dd
= kzalloc_node(sizeof(*dd
), GFP_KERNEL
, q
->node
);
601 eq
->elevator_data
= dd
;
603 dd
->stats
= alloc_percpu_gfp(typeof(*dd
->stats
),
604 GFP_KERNEL
| __GFP_ZERO
);
608 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++) {
609 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
611 INIT_LIST_HEAD(&per_prio
->dispatch
);
612 INIT_LIST_HEAD(&per_prio
->fifo_list
[DD_READ
]);
613 INIT_LIST_HEAD(&per_prio
->fifo_list
[DD_WRITE
]);
614 per_prio
->sort_list
[DD_READ
] = RB_ROOT
;
615 per_prio
->sort_list
[DD_WRITE
] = RB_ROOT
;
617 dd
->fifo_expire
[DD_READ
] = read_expire
;
618 dd
->fifo_expire
[DD_WRITE
] = write_expire
;
619 dd
->writes_starved
= writes_starved
;
620 dd
->front_merges
= 1;
621 dd
->last_dir
= DD_WRITE
;
622 dd
->fifo_batch
= fifo_batch
;
623 dd
->aging_expire
= aging_expire
;
624 spin_lock_init(&dd
->lock
);
625 spin_lock_init(&dd
->zone_lock
);
634 kobject_put(&eq
->kobj
);
639 * Try to merge @bio into an existing request. If @bio has been merged into
640 * an existing request, store the pointer to that request into *@rq.
642 static int dd_request_merge(struct request_queue
*q
, struct request
**rq
,
645 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
646 const u8 ioprio_class
= IOPRIO_PRIO_CLASS(bio
->bi_ioprio
);
647 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
648 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
649 sector_t sector
= bio_end_sector(bio
);
650 struct request
*__rq
;
652 if (!dd
->front_merges
)
653 return ELEVATOR_NO_MERGE
;
655 __rq
= elv_rb_find(&per_prio
->sort_list
[bio_data_dir(bio
)], sector
);
657 BUG_ON(sector
!= blk_rq_pos(__rq
));
659 if (elv_bio_merge_ok(__rq
, bio
)) {
661 return ELEVATOR_FRONT_MERGE
;
665 return ELEVATOR_NO_MERGE
;
669 * Attempt to merge a bio into an existing request. This function is called
670 * before @bio is associated with a request.
672 static bool dd_bio_merge(struct request_queue
*q
, struct bio
*bio
,
673 unsigned int nr_segs
)
675 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
676 struct request
*free
= NULL
;
679 spin_lock(&dd
->lock
);
680 ret
= blk_mq_sched_try_merge(q
, bio
, nr_segs
, &free
);
681 spin_unlock(&dd
->lock
);
684 blk_mq_free_request(free
);
690 * add rq to rbtree and fifo
692 static void dd_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
695 struct request_queue
*q
= hctx
->queue
;
696 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
697 const enum dd_data_dir data_dir
= rq_data_dir(rq
);
698 u16 ioprio
= req_get_ioprio(rq
);
699 u8 ioprio_class
= IOPRIO_PRIO_CLASS(ioprio
);
700 struct dd_per_prio
*per_prio
;
704 lockdep_assert_held(&dd
->lock
);
707 * This may be a requeue of a write request that has locked its
708 * target zone. If it is the case, this releases the zone lock.
710 blk_req_zone_write_unlock(rq
);
712 prio
= ioprio_class_to_prio
[ioprio_class
];
713 dd_count(dd
, inserted
, prio
);
715 if (blk_mq_sched_try_insert_merge(q
, rq
, &free
)) {
716 blk_mq_free_requests(&free
);
720 trace_block_rq_insert(rq
);
722 per_prio
= &dd
->per_prio
[prio
];
724 list_add(&rq
->queuelist
, &per_prio
->dispatch
);
726 deadline_add_rq_rb(per_prio
, rq
);
728 if (rq_mergeable(rq
)) {
729 elv_rqhash_add(q
, rq
);
735 * set expire time and add to fifo list
737 rq
->fifo_time
= jiffies
+ dd
->fifo_expire
[data_dir
];
738 list_add_tail(&rq
->queuelist
, &per_prio
->fifo_list
[data_dir
]);
743 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
745 static void dd_insert_requests(struct blk_mq_hw_ctx
*hctx
,
746 struct list_head
*list
, bool at_head
)
748 struct request_queue
*q
= hctx
->queue
;
749 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
751 spin_lock(&dd
->lock
);
752 while (!list_empty(list
)) {
755 rq
= list_first_entry(list
, struct request
, queuelist
);
756 list_del_init(&rq
->queuelist
);
757 dd_insert_request(hctx
, rq
, at_head
);
759 spin_unlock(&dd
->lock
);
763 * Nothing to do here. This is defined only to ensure that .finish_request
764 * method is called upon request completion.
766 static void dd_prepare_request(struct request
*rq
)
771 * Callback from inside blk_mq_free_request().
773 * For zoned block devices, write unlock the target zone of
774 * completed write requests. Do this while holding the zone lock
775 * spinlock so that the zone is never unlocked while deadline_fifo_request()
776 * or deadline_next_request() are executing. This function is called for
777 * all requests, whether or not these requests complete successfully.
779 * For a zoned block device, __dd_dispatch_request() may have stopped
780 * dispatching requests if all the queued requests are write requests directed
781 * at zones that are already locked due to on-going write requests. To ensure
782 * write request dispatch progress in this case, mark the queue as needing a
783 * restart to ensure that the queue is run again after completion of the
784 * request and zones being unlocked.
786 static void dd_finish_request(struct request
*rq
)
788 struct request_queue
*q
= rq
->q
;
789 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
790 const u8 ioprio_class
= dd_rq_ioclass(rq
);
791 const enum dd_prio prio
= ioprio_class_to_prio
[ioprio_class
];
792 struct dd_per_prio
*per_prio
= &dd
->per_prio
[prio
];
794 dd_count(dd
, completed
, prio
);
796 if (blk_queue_is_zoned(q
)) {
799 spin_lock_irqsave(&dd
->zone_lock
, flags
);
800 blk_req_zone_write_unlock(rq
);
801 if (!list_empty(&per_prio
->fifo_list
[DD_WRITE
]))
802 blk_mq_sched_mark_restart_hctx(rq
->mq_hctx
);
803 spin_unlock_irqrestore(&dd
->zone_lock
, flags
);
807 static bool dd_has_work_for_prio(struct dd_per_prio
*per_prio
)
809 return !list_empty_careful(&per_prio
->dispatch
) ||
810 !list_empty_careful(&per_prio
->fifo_list
[DD_READ
]) ||
811 !list_empty_careful(&per_prio
->fifo_list
[DD_WRITE
]);
814 static bool dd_has_work(struct blk_mq_hw_ctx
*hctx
)
816 struct deadline_data
*dd
= hctx
->queue
->elevator
->elevator_data
;
819 for (prio
= 0; prio
<= DD_PRIO_MAX
; prio
++)
820 if (dd_has_work_for_prio(&dd
->per_prio
[prio
]))
829 #define SHOW_INT(__FUNC, __VAR) \
830 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
832 struct deadline_data *dd = e->elevator_data; \
834 return sysfs_emit(page, "%d\n", __VAR); \
836 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
837 SHOW_JIFFIES(deadline_read_expire_show
, dd
->fifo_expire
[DD_READ
]);
838 SHOW_JIFFIES(deadline_write_expire_show
, dd
->fifo_expire
[DD_WRITE
]);
839 SHOW_JIFFIES(deadline_aging_expire_show
, dd
->aging_expire
);
840 SHOW_INT(deadline_writes_starved_show
, dd
->writes_starved
);
841 SHOW_INT(deadline_front_merges_show
, dd
->front_merges
);
842 SHOW_INT(deadline_async_depth_show
, dd
->front_merges
);
843 SHOW_INT(deadline_fifo_batch_show
, dd
->fifo_batch
);
847 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
848 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
850 struct deadline_data *dd = e->elevator_data; \
853 __ret = kstrtoint(page, 0, &__data); \
856 if (__data < (MIN)) \
858 else if (__data > (MAX)) \
860 *(__PTR) = __CONV(__data); \
863 #define STORE_INT(__FUNC, __PTR, MIN, MAX) \
864 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
865 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
866 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
867 STORE_JIFFIES(deadline_read_expire_store
, &dd
->fifo_expire
[DD_READ
], 0, INT_MAX
);
868 STORE_JIFFIES(deadline_write_expire_store
, &dd
->fifo_expire
[DD_WRITE
], 0, INT_MAX
);
869 STORE_JIFFIES(deadline_aging_expire_store
, &dd
->aging_expire
, 0, INT_MAX
);
870 STORE_INT(deadline_writes_starved_store
, &dd
->writes_starved
, INT_MIN
, INT_MAX
);
871 STORE_INT(deadline_front_merges_store
, &dd
->front_merges
, 0, 1);
872 STORE_INT(deadline_async_depth_store
, &dd
->front_merges
, 1, INT_MAX
);
873 STORE_INT(deadline_fifo_batch_store
, &dd
->fifo_batch
, 0, INT_MAX
);
874 #undef STORE_FUNCTION
878 #define DD_ATTR(name) \
879 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
881 static struct elv_fs_entry deadline_attrs
[] = {
882 DD_ATTR(read_expire
),
883 DD_ATTR(write_expire
),
884 DD_ATTR(writes_starved
),
885 DD_ATTR(front_merges
),
886 DD_ATTR(async_depth
),
888 DD_ATTR(aging_expire
),
892 #ifdef CONFIG_BLK_DEBUG_FS
893 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
894 static void *deadline_##name##_fifo_start(struct seq_file *m, \
896 __acquires(&dd->lock) \
898 struct request_queue *q = m->private; \
899 struct deadline_data *dd = q->elevator->elevator_data; \
900 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
902 spin_lock(&dd->lock); \
903 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
906 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
909 struct request_queue *q = m->private; \
910 struct deadline_data *dd = q->elevator->elevator_data; \
911 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
913 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
916 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
917 __releases(&dd->lock) \
919 struct request_queue *q = m->private; \
920 struct deadline_data *dd = q->elevator->elevator_data; \
922 spin_unlock(&dd->lock); \
925 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
926 .start = deadline_##name##_fifo_start, \
927 .next = deadline_##name##_fifo_next, \
928 .stop = deadline_##name##_fifo_stop, \
929 .show = blk_mq_debugfs_rq_show, \
932 static int deadline_##name##_next_rq_show(void *data, \
933 struct seq_file *m) \
935 struct request_queue *q = data; \
936 struct deadline_data *dd = q->elevator->elevator_data; \
937 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
938 struct request *rq = per_prio->next_rq[data_dir]; \
941 __blk_mq_debugfs_rq_show(m, rq); \
945 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO
, DD_READ
, read0
);
946 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO
, DD_WRITE
, write0
);
947 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO
, DD_READ
, read1
);
948 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO
, DD_WRITE
, write1
);
949 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO
, DD_READ
, read2
);
950 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO
, DD_WRITE
, write2
);
951 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
953 static int deadline_batching_show(void *data
, struct seq_file
*m
)
955 struct request_queue
*q
= data
;
956 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
958 seq_printf(m
, "%u\n", dd
->batching
);
962 static int deadline_starved_show(void *data
, struct seq_file
*m
)
964 struct request_queue
*q
= data
;
965 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
967 seq_printf(m
, "%u\n", dd
->starved
);
971 static int dd_async_depth_show(void *data
, struct seq_file
*m
)
973 struct request_queue
*q
= data
;
974 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
976 seq_printf(m
, "%u\n", dd
->async_depth
);
980 static int dd_queued_show(void *data
, struct seq_file
*m
)
982 struct request_queue
*q
= data
;
983 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
985 seq_printf(m
, "%u %u %u\n", dd_queued(dd
, DD_RT_PRIO
),
986 dd_queued(dd
, DD_BE_PRIO
),
987 dd_queued(dd
, DD_IDLE_PRIO
));
991 /* Number of requests owned by the block driver for a given priority. */
992 static u32
dd_owned_by_driver(struct deadline_data
*dd
, enum dd_prio prio
)
994 return dd_sum(dd
, dispatched
, prio
) + dd_sum(dd
, merged
, prio
)
995 - dd_sum(dd
, completed
, prio
);
998 static int dd_owned_by_driver_show(void *data
, struct seq_file
*m
)
1000 struct request_queue
*q
= data
;
1001 struct deadline_data
*dd
= q
->elevator
->elevator_data
;
1003 seq_printf(m
, "%u %u %u\n", dd_owned_by_driver(dd
, DD_RT_PRIO
),
1004 dd_owned_by_driver(dd
, DD_BE_PRIO
),
1005 dd_owned_by_driver(dd
, DD_IDLE_PRIO
));
1009 #define DEADLINE_DISPATCH_ATTR(prio) \
1010 static void *deadline_dispatch##prio##_start(struct seq_file *m, \
1012 __acquires(&dd->lock) \
1014 struct request_queue *q = m->private; \
1015 struct deadline_data *dd = q->elevator->elevator_data; \
1016 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1018 spin_lock(&dd->lock); \
1019 return seq_list_start(&per_prio->dispatch, *pos); \
1022 static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1023 void *v, loff_t *pos) \
1025 struct request_queue *q = m->private; \
1026 struct deadline_data *dd = q->elevator->elevator_data; \
1027 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1029 return seq_list_next(v, &per_prio->dispatch, pos); \
1032 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1033 __releases(&dd->lock) \
1035 struct request_queue *q = m->private; \
1036 struct deadline_data *dd = q->elevator->elevator_data; \
1038 spin_unlock(&dd->lock); \
1041 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1042 .start = deadline_dispatch##prio##_start, \
1043 .next = deadline_dispatch##prio##_next, \
1044 .stop = deadline_dispatch##prio##_stop, \
1045 .show = blk_mq_debugfs_rq_show, \
1048 DEADLINE_DISPATCH_ATTR(0);
1049 DEADLINE_DISPATCH_ATTR(1);
1050 DEADLINE_DISPATCH_ATTR(2);
1051 #undef DEADLINE_DISPATCH_ATTR
1053 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1054 {#name "_fifo_list", 0400, \
1055 .seq_ops = &deadline_##name##_fifo_seq_ops}
1056 #define DEADLINE_NEXT_RQ_ATTR(name) \
1057 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1058 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs
[] = {
1059 DEADLINE_QUEUE_DDIR_ATTRS(read0
),
1060 DEADLINE_QUEUE_DDIR_ATTRS(write0
),
1061 DEADLINE_QUEUE_DDIR_ATTRS(read1
),
1062 DEADLINE_QUEUE_DDIR_ATTRS(write1
),
1063 DEADLINE_QUEUE_DDIR_ATTRS(read2
),
1064 DEADLINE_QUEUE_DDIR_ATTRS(write2
),
1065 DEADLINE_NEXT_RQ_ATTR(read0
),
1066 DEADLINE_NEXT_RQ_ATTR(write0
),
1067 DEADLINE_NEXT_RQ_ATTR(read1
),
1068 DEADLINE_NEXT_RQ_ATTR(write1
),
1069 DEADLINE_NEXT_RQ_ATTR(read2
),
1070 DEADLINE_NEXT_RQ_ATTR(write2
),
1071 {"batching", 0400, deadline_batching_show
},
1072 {"starved", 0400, deadline_starved_show
},
1073 {"async_depth", 0400, dd_async_depth_show
},
1074 {"dispatch0", 0400, .seq_ops
= &deadline_dispatch0_seq_ops
},
1075 {"dispatch1", 0400, .seq_ops
= &deadline_dispatch1_seq_ops
},
1076 {"dispatch2", 0400, .seq_ops
= &deadline_dispatch2_seq_ops
},
1077 {"owned_by_driver", 0400, dd_owned_by_driver_show
},
1078 {"queued", 0400, dd_queued_show
},
1081 #undef DEADLINE_QUEUE_DDIR_ATTRS
1084 static struct elevator_type mq_deadline
= {
1086 .depth_updated
= dd_depth_updated
,
1087 .limit_depth
= dd_limit_depth
,
1088 .insert_requests
= dd_insert_requests
,
1089 .dispatch_request
= dd_dispatch_request
,
1090 .prepare_request
= dd_prepare_request
,
1091 .finish_request
= dd_finish_request
,
1092 .next_request
= elv_rb_latter_request
,
1093 .former_request
= elv_rb_former_request
,
1094 .bio_merge
= dd_bio_merge
,
1095 .request_merge
= dd_request_merge
,
1096 .requests_merged
= dd_merged_requests
,
1097 .request_merged
= dd_request_merged
,
1098 .has_work
= dd_has_work
,
1099 .init_sched
= dd_init_sched
,
1100 .exit_sched
= dd_exit_sched
,
1101 .init_hctx
= dd_init_hctx
,
1104 #ifdef CONFIG_BLK_DEBUG_FS
1105 .queue_debugfs_attrs
= deadline_queue_debugfs_attrs
,
1107 .elevator_attrs
= deadline_attrs
,
1108 .elevator_name
= "mq-deadline",
1109 .elevator_alias
= "deadline",
1110 .elevator_features
= ELEVATOR_F_ZBD_SEQ_WRITE
,
1111 .elevator_owner
= THIS_MODULE
,
1113 MODULE_ALIAS("mq-deadline-iosched");
1115 static int __init
deadline_init(void)
1117 return elv_register(&mq_deadline
);
1120 static void __exit
deadline_exit(void)
1122 elv_unregister(&mq_deadline
);
1125 module_init(deadline_init
);
1126 module_exit(deadline_exit
);
1128 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1129 MODULE_LICENSE("GPL");
1130 MODULE_DESCRIPTION("MQ deadline IO scheduler");