1 // SPDX-License-Identifier: GPL-2.0
3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using
6 * Copyright (C) 2017 Facebook
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/module.h>
13 #include <linux/sbitmap.h>
15 #include <trace/events/block.h>
20 #include "blk-mq-debugfs.h"
21 #include "blk-mq-sched.h"
22 #include "blk-mq-tag.h"
24 #define CREATE_TRACE_POINTS
25 #include <trace/events/kyber.h>
28 * Scheduling domains: the device is divided into multiple domains based on the
39 static const char *kyber_domain_names
[] = {
40 [KYBER_READ
] = "READ",
41 [KYBER_WRITE
] = "WRITE",
42 [KYBER_DISCARD
] = "DISCARD",
43 [KYBER_OTHER
] = "OTHER",
48 * In order to prevent starvation of synchronous requests by a flood of
49 * asynchronous requests, we reserve 25% of requests for synchronous
52 KYBER_ASYNC_PERCENT
= 75,
56 * Maximum device-wide depth for each scheduling domain.
58 * Even for fast devices with lots of tags like NVMe, you can saturate the
59 * device with only a fraction of the maximum possible queue depth. So, we cap
60 * these to a reasonable value.
62 static const unsigned int kyber_depth
[] = {
70 * Default latency targets for each scheduling domain.
72 static const u64 kyber_latency_targets
[] = {
73 [KYBER_READ
] = 2ULL * NSEC_PER_MSEC
,
74 [KYBER_WRITE
] = 10ULL * NSEC_PER_MSEC
,
75 [KYBER_DISCARD
] = 5ULL * NSEC_PER_SEC
,
79 * Batch size (number of requests we'll dispatch in a row) for each scheduling
82 static const unsigned int kyber_batch_size
[] = {
90 * Requests latencies are recorded in a histogram with buckets defined relative
91 * to the target latency:
93 * <= 1/4 * target latency
94 * <= 1/2 * target latency
95 * <= 3/4 * target latency
97 * <= 1 1/4 * target latency
98 * <= 1 1/2 * target latency
99 * <= 1 3/4 * target latency
100 * > 1 3/4 * target latency
104 * The width of the latency histogram buckets is
105 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
107 KYBER_LATENCY_SHIFT
= 2,
109 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
112 KYBER_GOOD_BUCKETS
= 1 << KYBER_LATENCY_SHIFT
,
113 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
114 KYBER_LATENCY_BUCKETS
= 2 << KYBER_LATENCY_SHIFT
,
118 * We measure both the total latency and the I/O latency (i.e., latency after
119 * submitting to the device).
126 static const char *kyber_latency_type_names
[] = {
127 [KYBER_TOTAL_LATENCY
] = "total",
128 [KYBER_IO_LATENCY
] = "I/O",
132 * Per-cpu latency histograms: total latency and I/O latency for each scheduling
133 * domain except for KYBER_OTHER.
135 struct kyber_cpu_latency
{
136 atomic_t buckets
[KYBER_OTHER
][2][KYBER_LATENCY_BUCKETS
];
140 * There is a same mapping between ctx & hctx and kcq & khd,
141 * we use request->mq_ctx->index_hw to index the kcq in khd.
143 struct kyber_ctx_queue
{
145 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
146 * Also protect the rqs on rq_list when merge.
149 struct list_head rq_list
[KYBER_NUM_DOMAINS
];
150 } ____cacheline_aligned_in_smp
;
152 struct kyber_queue_data
{
153 struct request_queue
*q
;
157 * Each scheduling domain has a limited number of in-flight requests
158 * device-wide, limited by these tokens.
160 struct sbitmap_queue domain_tokens
[KYBER_NUM_DOMAINS
];
163 * Async request percentage, converted to per-word depth for
164 * sbitmap_get_shallow().
166 unsigned int async_depth
;
168 struct kyber_cpu_latency __percpu
*cpu_latency
;
170 /* Timer for stats aggregation and adjusting domain tokens. */
171 struct timer_list timer
;
173 unsigned int latency_buckets
[KYBER_OTHER
][2][KYBER_LATENCY_BUCKETS
];
175 unsigned long latency_timeout
[KYBER_OTHER
];
177 int domain_p99
[KYBER_OTHER
];
179 /* Target latencies in nanoseconds. */
180 u64 latency_targets
[KYBER_OTHER
];
183 struct kyber_hctx_data
{
185 struct list_head rqs
[KYBER_NUM_DOMAINS
];
186 unsigned int cur_domain
;
187 unsigned int batching
;
188 struct kyber_ctx_queue
*kcqs
;
189 struct sbitmap kcq_map
[KYBER_NUM_DOMAINS
];
190 struct sbq_wait domain_wait
[KYBER_NUM_DOMAINS
];
191 struct sbq_wait_state
*domain_ws
[KYBER_NUM_DOMAINS
];
192 atomic_t wait_index
[KYBER_NUM_DOMAINS
];
195 static int kyber_domain_wake(wait_queue_entry_t
*wait
, unsigned mode
, int flags
,
198 static unsigned int kyber_sched_domain(blk_opf_t opf
)
200 switch (opf
& REQ_OP_MASK
) {
206 return KYBER_DISCARD
;
212 static void flush_latency_buckets(struct kyber_queue_data
*kqd
,
213 struct kyber_cpu_latency
*cpu_latency
,
214 unsigned int sched_domain
, unsigned int type
)
216 unsigned int *buckets
= kqd
->latency_buckets
[sched_domain
][type
];
217 atomic_t
*cpu_buckets
= cpu_latency
->buckets
[sched_domain
][type
];
220 for (bucket
= 0; bucket
< KYBER_LATENCY_BUCKETS
; bucket
++)
221 buckets
[bucket
] += atomic_xchg(&cpu_buckets
[bucket
], 0);
225 * Calculate the histogram bucket with the given percentile rank, or -1 if there
226 * aren't enough samples yet.
228 static int calculate_percentile(struct kyber_queue_data
*kqd
,
229 unsigned int sched_domain
, unsigned int type
,
230 unsigned int percentile
)
232 unsigned int *buckets
= kqd
->latency_buckets
[sched_domain
][type
];
233 unsigned int bucket
, samples
= 0, percentile_samples
;
235 for (bucket
= 0; bucket
< KYBER_LATENCY_BUCKETS
; bucket
++)
236 samples
+= buckets
[bucket
];
242 * We do the calculation once we have 500 samples or one second passes
243 * since the first sample was recorded, whichever comes first.
245 if (!kqd
->latency_timeout
[sched_domain
])
246 kqd
->latency_timeout
[sched_domain
] = max(jiffies
+ HZ
, 1UL);
248 time_is_after_jiffies(kqd
->latency_timeout
[sched_domain
])) {
251 kqd
->latency_timeout
[sched_domain
] = 0;
253 percentile_samples
= DIV_ROUND_UP(samples
* percentile
, 100);
254 for (bucket
= 0; bucket
< KYBER_LATENCY_BUCKETS
- 1; bucket
++) {
255 if (buckets
[bucket
] >= percentile_samples
)
257 percentile_samples
-= buckets
[bucket
];
259 memset(buckets
, 0, sizeof(kqd
->latency_buckets
[sched_domain
][type
]));
261 trace_kyber_latency(kqd
->dev
, kyber_domain_names
[sched_domain
],
262 kyber_latency_type_names
[type
], percentile
,
263 bucket
+ 1, 1 << KYBER_LATENCY_SHIFT
, samples
);
268 static void kyber_resize_domain(struct kyber_queue_data
*kqd
,
269 unsigned int sched_domain
, unsigned int depth
)
271 depth
= clamp(depth
, 1U, kyber_depth
[sched_domain
]);
272 if (depth
!= kqd
->domain_tokens
[sched_domain
].sb
.depth
) {
273 sbitmap_queue_resize(&kqd
->domain_tokens
[sched_domain
], depth
);
274 trace_kyber_adjust(kqd
->dev
, kyber_domain_names
[sched_domain
],
279 static void kyber_timer_fn(struct timer_list
*t
)
281 struct kyber_queue_data
*kqd
= from_timer(kqd
, t
, timer
);
282 unsigned int sched_domain
;
286 /* Sum all of the per-cpu latency histograms. */
287 for_each_online_cpu(cpu
) {
288 struct kyber_cpu_latency
*cpu_latency
;
290 cpu_latency
= per_cpu_ptr(kqd
->cpu_latency
, cpu
);
291 for (sched_domain
= 0; sched_domain
< KYBER_OTHER
; sched_domain
++) {
292 flush_latency_buckets(kqd
, cpu_latency
, sched_domain
,
293 KYBER_TOTAL_LATENCY
);
294 flush_latency_buckets(kqd
, cpu_latency
, sched_domain
,
300 * Check if any domains have a high I/O latency, which might indicate
301 * congestion in the device. Note that we use the p90; we don't want to
302 * be too sensitive to outliers here.
304 for (sched_domain
= 0; sched_domain
< KYBER_OTHER
; sched_domain
++) {
307 p90
= calculate_percentile(kqd
, sched_domain
, KYBER_IO_LATENCY
,
309 if (p90
>= KYBER_GOOD_BUCKETS
)
314 * Adjust the scheduling domain depths. If we determined that there was
315 * congestion, we throttle all domains with good latencies. Either way,
316 * we ease up on throttling domains with bad latencies.
318 for (sched_domain
= 0; sched_domain
< KYBER_OTHER
; sched_domain
++) {
319 unsigned int orig_depth
, depth
;
322 p99
= calculate_percentile(kqd
, sched_domain
,
323 KYBER_TOTAL_LATENCY
, 99);
325 * This is kind of subtle: different domains will not
326 * necessarily have enough samples to calculate the latency
327 * percentiles during the same window, so we have to remember
328 * the p99 for the next time we observe congestion; once we do,
329 * we don't want to throttle again until we get more data, so we
334 p99
= kqd
->domain_p99
[sched_domain
];
335 kqd
->domain_p99
[sched_domain
] = -1;
336 } else if (p99
>= 0) {
337 kqd
->domain_p99
[sched_domain
] = p99
;
343 * If this domain has bad latency, throttle less. Otherwise,
344 * throttle more iff we determined that there is congestion.
346 * The new depth is scaled linearly with the p99 latency vs the
347 * latency target. E.g., if the p99 is 3/4 of the target, then
348 * we throttle down to 3/4 of the current depth, and if the p99
349 * is 2x the target, then we double the depth.
351 if (bad
|| p99
>= KYBER_GOOD_BUCKETS
) {
352 orig_depth
= kqd
->domain_tokens
[sched_domain
].sb
.depth
;
353 depth
= (orig_depth
* (p99
+ 1)) >> KYBER_LATENCY_SHIFT
;
354 kyber_resize_domain(kqd
, sched_domain
, depth
);
359 static struct kyber_queue_data
*kyber_queue_data_alloc(struct request_queue
*q
)
361 struct kyber_queue_data
*kqd
;
365 kqd
= kzalloc_node(sizeof(*kqd
), GFP_KERNEL
, q
->node
);
370 kqd
->dev
= disk_devt(q
->disk
);
372 kqd
->cpu_latency
= alloc_percpu_gfp(struct kyber_cpu_latency
,
373 GFP_KERNEL
| __GFP_ZERO
);
374 if (!kqd
->cpu_latency
)
377 timer_setup(&kqd
->timer
, kyber_timer_fn
, 0);
379 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
380 WARN_ON(!kyber_depth
[i
]);
381 WARN_ON(!kyber_batch_size
[i
]);
382 ret
= sbitmap_queue_init_node(&kqd
->domain_tokens
[i
],
383 kyber_depth
[i
], -1, false,
384 GFP_KERNEL
, q
->node
);
387 sbitmap_queue_free(&kqd
->domain_tokens
[i
]);
392 for (i
= 0; i
< KYBER_OTHER
; i
++) {
393 kqd
->domain_p99
[i
] = -1;
394 kqd
->latency_targets
[i
] = kyber_latency_targets
[i
];
400 free_percpu(kqd
->cpu_latency
);
407 static int kyber_init_sched(struct request_queue
*q
, struct elevator_type
*e
)
409 struct kyber_queue_data
*kqd
;
410 struct elevator_queue
*eq
;
412 eq
= elevator_alloc(q
, e
);
416 kqd
= kyber_queue_data_alloc(q
);
418 kobject_put(&eq
->kobj
);
422 blk_stat_enable_accounting(q
);
424 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED
, q
);
426 eq
->elevator_data
= kqd
;
432 static void kyber_exit_sched(struct elevator_queue
*e
)
434 struct kyber_queue_data
*kqd
= e
->elevator_data
;
437 timer_shutdown_sync(&kqd
->timer
);
438 blk_stat_disable_accounting(kqd
->q
);
440 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++)
441 sbitmap_queue_free(&kqd
->domain_tokens
[i
]);
442 free_percpu(kqd
->cpu_latency
);
446 static void kyber_ctx_queue_init(struct kyber_ctx_queue
*kcq
)
450 spin_lock_init(&kcq
->lock
);
451 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++)
452 INIT_LIST_HEAD(&kcq
->rq_list
[i
]);
455 static void kyber_depth_updated(struct blk_mq_hw_ctx
*hctx
)
457 struct kyber_queue_data
*kqd
= hctx
->queue
->elevator
->elevator_data
;
458 struct blk_mq_tags
*tags
= hctx
->sched_tags
;
459 unsigned int shift
= tags
->bitmap_tags
.sb
.shift
;
461 kqd
->async_depth
= (1U << shift
) * KYBER_ASYNC_PERCENT
/ 100U;
463 sbitmap_queue_min_shallow_depth(&tags
->bitmap_tags
, kqd
->async_depth
);
466 static int kyber_init_hctx(struct blk_mq_hw_ctx
*hctx
, unsigned int hctx_idx
)
468 struct kyber_hctx_data
*khd
;
471 khd
= kmalloc_node(sizeof(*khd
), GFP_KERNEL
, hctx
->numa_node
);
475 khd
->kcqs
= kmalloc_array_node(hctx
->nr_ctx
,
476 sizeof(struct kyber_ctx_queue
),
477 GFP_KERNEL
, hctx
->numa_node
);
481 for (i
= 0; i
< hctx
->nr_ctx
; i
++)
482 kyber_ctx_queue_init(&khd
->kcqs
[i
]);
484 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
485 if (sbitmap_init_node(&khd
->kcq_map
[i
], hctx
->nr_ctx
,
486 ilog2(8), GFP_KERNEL
, hctx
->numa_node
,
489 sbitmap_free(&khd
->kcq_map
[i
]);
494 spin_lock_init(&khd
->lock
);
496 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
497 INIT_LIST_HEAD(&khd
->rqs
[i
]);
498 khd
->domain_wait
[i
].sbq
= NULL
;
499 init_waitqueue_func_entry(&khd
->domain_wait
[i
].wait
,
501 khd
->domain_wait
[i
].wait
.private = hctx
;
502 INIT_LIST_HEAD(&khd
->domain_wait
[i
].wait
.entry
);
503 atomic_set(&khd
->wait_index
[i
], 0);
509 hctx
->sched_data
= khd
;
510 kyber_depth_updated(hctx
);
521 static void kyber_exit_hctx(struct blk_mq_hw_ctx
*hctx
, unsigned int hctx_idx
)
523 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
526 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++)
527 sbitmap_free(&khd
->kcq_map
[i
]);
529 kfree(hctx
->sched_data
);
532 static int rq_get_domain_token(struct request
*rq
)
534 return (long)rq
->elv
.priv
[0];
537 static void rq_set_domain_token(struct request
*rq
, int token
)
539 rq
->elv
.priv
[0] = (void *)(long)token
;
542 static void rq_clear_domain_token(struct kyber_queue_data
*kqd
,
545 unsigned int sched_domain
;
548 nr
= rq_get_domain_token(rq
);
550 sched_domain
= kyber_sched_domain(rq
->cmd_flags
);
551 sbitmap_queue_clear(&kqd
->domain_tokens
[sched_domain
], nr
,
556 static void kyber_limit_depth(blk_opf_t opf
, struct blk_mq_alloc_data
*data
)
559 * We use the scheduler tags as per-hardware queue queueing tokens.
560 * Async requests can be limited at this stage.
562 if (!op_is_sync(opf
)) {
563 struct kyber_queue_data
*kqd
= data
->q
->elevator
->elevator_data
;
565 data
->shallow_depth
= kqd
->async_depth
;
569 static bool kyber_bio_merge(struct request_queue
*q
, struct bio
*bio
,
570 unsigned int nr_segs
)
572 struct blk_mq_ctx
*ctx
= blk_mq_get_ctx(q
);
573 struct blk_mq_hw_ctx
*hctx
= blk_mq_map_queue(q
, bio
->bi_opf
, ctx
);
574 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
575 struct kyber_ctx_queue
*kcq
= &khd
->kcqs
[ctx
->index_hw
[hctx
->type
]];
576 unsigned int sched_domain
= kyber_sched_domain(bio
->bi_opf
);
577 struct list_head
*rq_list
= &kcq
->rq_list
[sched_domain
];
580 spin_lock(&kcq
->lock
);
581 merged
= blk_bio_list_merge(hctx
->queue
, rq_list
, bio
, nr_segs
);
582 spin_unlock(&kcq
->lock
);
587 static void kyber_prepare_request(struct request
*rq
)
589 rq_set_domain_token(rq
, -1);
592 static void kyber_insert_requests(struct blk_mq_hw_ctx
*hctx
,
593 struct list_head
*rq_list
, bool at_head
)
595 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
596 struct request
*rq
, *next
;
598 list_for_each_entry_safe(rq
, next
, rq_list
, queuelist
) {
599 unsigned int sched_domain
= kyber_sched_domain(rq
->cmd_flags
);
600 struct kyber_ctx_queue
*kcq
= &khd
->kcqs
[rq
->mq_ctx
->index_hw
[hctx
->type
]];
601 struct list_head
*head
= &kcq
->rq_list
[sched_domain
];
603 spin_lock(&kcq
->lock
);
604 trace_block_rq_insert(rq
);
606 list_move(&rq
->queuelist
, head
);
608 list_move_tail(&rq
->queuelist
, head
);
609 sbitmap_set_bit(&khd
->kcq_map
[sched_domain
],
610 rq
->mq_ctx
->index_hw
[hctx
->type
]);
611 spin_unlock(&kcq
->lock
);
615 static void kyber_finish_request(struct request
*rq
)
617 struct kyber_queue_data
*kqd
= rq
->q
->elevator
->elevator_data
;
619 rq_clear_domain_token(kqd
, rq
);
622 static void add_latency_sample(struct kyber_cpu_latency
*cpu_latency
,
623 unsigned int sched_domain
, unsigned int type
,
624 u64 target
, u64 latency
)
630 divisor
= max_t(u64
, target
>> KYBER_LATENCY_SHIFT
, 1);
631 bucket
= min_t(unsigned int, div64_u64(latency
- 1, divisor
),
632 KYBER_LATENCY_BUCKETS
- 1);
637 atomic_inc(&cpu_latency
->buckets
[sched_domain
][type
][bucket
]);
640 static void kyber_completed_request(struct request
*rq
, u64 now
)
642 struct kyber_queue_data
*kqd
= rq
->q
->elevator
->elevator_data
;
643 struct kyber_cpu_latency
*cpu_latency
;
644 unsigned int sched_domain
;
647 sched_domain
= kyber_sched_domain(rq
->cmd_flags
);
648 if (sched_domain
== KYBER_OTHER
)
651 cpu_latency
= get_cpu_ptr(kqd
->cpu_latency
);
652 target
= kqd
->latency_targets
[sched_domain
];
653 add_latency_sample(cpu_latency
, sched_domain
, KYBER_TOTAL_LATENCY
,
654 target
, now
- rq
->start_time_ns
);
655 add_latency_sample(cpu_latency
, sched_domain
, KYBER_IO_LATENCY
, target
,
656 now
- rq
->io_start_time_ns
);
657 put_cpu_ptr(kqd
->cpu_latency
);
659 timer_reduce(&kqd
->timer
, jiffies
+ HZ
/ 10);
662 struct flush_kcq_data
{
663 struct kyber_hctx_data
*khd
;
664 unsigned int sched_domain
;
665 struct list_head
*list
;
668 static bool flush_busy_kcq(struct sbitmap
*sb
, unsigned int bitnr
, void *data
)
670 struct flush_kcq_data
*flush_data
= data
;
671 struct kyber_ctx_queue
*kcq
= &flush_data
->khd
->kcqs
[bitnr
];
673 spin_lock(&kcq
->lock
);
674 list_splice_tail_init(&kcq
->rq_list
[flush_data
->sched_domain
],
676 sbitmap_clear_bit(sb
, bitnr
);
677 spin_unlock(&kcq
->lock
);
682 static void kyber_flush_busy_kcqs(struct kyber_hctx_data
*khd
,
683 unsigned int sched_domain
,
684 struct list_head
*list
)
686 struct flush_kcq_data data
= {
688 .sched_domain
= sched_domain
,
692 sbitmap_for_each_set(&khd
->kcq_map
[sched_domain
],
693 flush_busy_kcq
, &data
);
696 static int kyber_domain_wake(wait_queue_entry_t
*wqe
, unsigned mode
, int flags
,
699 struct blk_mq_hw_ctx
*hctx
= READ_ONCE(wqe
->private);
700 struct sbq_wait
*wait
= container_of(wqe
, struct sbq_wait
, wait
);
702 sbitmap_del_wait_queue(wait
);
703 blk_mq_run_hw_queue(hctx
, true);
707 static int kyber_get_domain_token(struct kyber_queue_data
*kqd
,
708 struct kyber_hctx_data
*khd
,
709 struct blk_mq_hw_ctx
*hctx
)
711 unsigned int sched_domain
= khd
->cur_domain
;
712 struct sbitmap_queue
*domain_tokens
= &kqd
->domain_tokens
[sched_domain
];
713 struct sbq_wait
*wait
= &khd
->domain_wait
[sched_domain
];
714 struct sbq_wait_state
*ws
;
717 nr
= __sbitmap_queue_get(domain_tokens
);
720 * If we failed to get a domain token, make sure the hardware queue is
721 * run when one becomes available. Note that this is serialized on
722 * khd->lock, but we still need to be careful about the waker.
724 if (nr
< 0 && list_empty_careful(&wait
->wait
.entry
)) {
725 ws
= sbq_wait_ptr(domain_tokens
,
726 &khd
->wait_index
[sched_domain
]);
727 khd
->domain_ws
[sched_domain
] = ws
;
728 sbitmap_add_wait_queue(domain_tokens
, ws
, wait
);
731 * Try again in case a token was freed before we got on the wait
734 nr
= __sbitmap_queue_get(domain_tokens
);
738 * If we got a token while we were on the wait queue, remove ourselves
739 * from the wait queue to ensure that all wake ups make forward
740 * progress. It's possible that the waker already deleted the entry
741 * between the !list_empty_careful() check and us grabbing the lock, but
742 * list_del_init() is okay with that.
744 if (nr
>= 0 && !list_empty_careful(&wait
->wait
.entry
)) {
745 ws
= khd
->domain_ws
[sched_domain
];
746 spin_lock_irq(&ws
->wait
.lock
);
747 sbitmap_del_wait_queue(wait
);
748 spin_unlock_irq(&ws
->wait
.lock
);
754 static struct request
*
755 kyber_dispatch_cur_domain(struct kyber_queue_data
*kqd
,
756 struct kyber_hctx_data
*khd
,
757 struct blk_mq_hw_ctx
*hctx
)
759 struct list_head
*rqs
;
763 rqs
= &khd
->rqs
[khd
->cur_domain
];
766 * If we already have a flushed request, then we just need to get a
767 * token for it. Otherwise, if there are pending requests in the kcqs,
768 * flush the kcqs, but only if we can get a token. If not, we should
769 * leave the requests in the kcqs so that they can be merged. Note that
770 * khd->lock serializes the flushes, so if we observed any bit set in
771 * the kcq_map, we will always get a request.
773 rq
= list_first_entry_or_null(rqs
, struct request
, queuelist
);
775 nr
= kyber_get_domain_token(kqd
, khd
, hctx
);
778 rq_set_domain_token(rq
, nr
);
779 list_del_init(&rq
->queuelist
);
782 trace_kyber_throttled(kqd
->dev
,
783 kyber_domain_names
[khd
->cur_domain
]);
785 } else if (sbitmap_any_bit_set(&khd
->kcq_map
[khd
->cur_domain
])) {
786 nr
= kyber_get_domain_token(kqd
, khd
, hctx
);
788 kyber_flush_busy_kcqs(khd
, khd
->cur_domain
, rqs
);
789 rq
= list_first_entry(rqs
, struct request
, queuelist
);
791 rq_set_domain_token(rq
, nr
);
792 list_del_init(&rq
->queuelist
);
795 trace_kyber_throttled(kqd
->dev
,
796 kyber_domain_names
[khd
->cur_domain
]);
800 /* There were either no pending requests or no tokens. */
804 static struct request
*kyber_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
806 struct kyber_queue_data
*kqd
= hctx
->queue
->elevator
->elevator_data
;
807 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
811 spin_lock(&khd
->lock
);
814 * First, if we are still entitled to batch, try to dispatch a request
817 if (khd
->batching
< kyber_batch_size
[khd
->cur_domain
]) {
818 rq
= kyber_dispatch_cur_domain(kqd
, khd
, hctx
);
825 * 1. We were no longer entitled to a batch.
826 * 2. The domain we were batching didn't have any requests.
827 * 3. The domain we were batching was out of tokens.
829 * Start another batch. Note that this wraps back around to the original
830 * domain if no other domains have requests or tokens.
833 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
834 if (khd
->cur_domain
== KYBER_NUM_DOMAINS
- 1)
839 rq
= kyber_dispatch_cur_domain(kqd
, khd
, hctx
);
846 spin_unlock(&khd
->lock
);
850 static bool kyber_has_work(struct blk_mq_hw_ctx
*hctx
)
852 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
855 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
856 if (!list_empty_careful(&khd
->rqs
[i
]) ||
857 sbitmap_any_bit_set(&khd
->kcq_map
[i
]))
864 #define KYBER_LAT_SHOW_STORE(domain, name) \
865 static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
868 struct kyber_queue_data *kqd = e->elevator_data; \
870 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
873 static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
874 const char *page, size_t count) \
876 struct kyber_queue_data *kqd = e->elevator_data; \
877 unsigned long long nsec; \
880 ret = kstrtoull(page, 10, &nsec); \
884 kqd->latency_targets[domain] = nsec; \
888 KYBER_LAT_SHOW_STORE(KYBER_READ
, read
);
889 KYBER_LAT_SHOW_STORE(KYBER_WRITE
, write
);
890 #undef KYBER_LAT_SHOW_STORE
892 #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
893 static struct elv_fs_entry kyber_sched_attrs
[] = {
894 KYBER_LAT_ATTR(read
),
895 KYBER_LAT_ATTR(write
),
898 #undef KYBER_LAT_ATTR
900 #ifdef CONFIG_BLK_DEBUG_FS
901 #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
902 static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
904 struct request_queue *q = data; \
905 struct kyber_queue_data *kqd = q->elevator->elevator_data; \
907 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
911 static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
912 __acquires(&khd->lock) \
914 struct blk_mq_hw_ctx *hctx = m->private; \
915 struct kyber_hctx_data *khd = hctx->sched_data; \
917 spin_lock(&khd->lock); \
918 return seq_list_start(&khd->rqs[domain], *pos); \
921 static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
924 struct blk_mq_hw_ctx *hctx = m->private; \
925 struct kyber_hctx_data *khd = hctx->sched_data; \
927 return seq_list_next(v, &khd->rqs[domain], pos); \
930 static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
931 __releases(&khd->lock) \
933 struct blk_mq_hw_ctx *hctx = m->private; \
934 struct kyber_hctx_data *khd = hctx->sched_data; \
936 spin_unlock(&khd->lock); \
939 static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
940 .start = kyber_##name##_rqs_start, \
941 .next = kyber_##name##_rqs_next, \
942 .stop = kyber_##name##_rqs_stop, \
943 .show = blk_mq_debugfs_rq_show, \
946 static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
948 struct blk_mq_hw_ctx *hctx = data; \
949 struct kyber_hctx_data *khd = hctx->sched_data; \
950 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
952 seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
955 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ
, read
)
956 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE
, write
)
957 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD
, discard
)
958 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER
, other
)
959 #undef KYBER_DEBUGFS_DOMAIN_ATTRS
961 static int kyber_async_depth_show(void *data
, struct seq_file
*m
)
963 struct request_queue
*q
= data
;
964 struct kyber_queue_data
*kqd
= q
->elevator
->elevator_data
;
966 seq_printf(m
, "%u\n", kqd
->async_depth
);
970 static int kyber_cur_domain_show(void *data
, struct seq_file
*m
)
972 struct blk_mq_hw_ctx
*hctx
= data
;
973 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
975 seq_printf(m
, "%s\n", kyber_domain_names
[khd
->cur_domain
]);
979 static int kyber_batching_show(void *data
, struct seq_file
*m
)
981 struct blk_mq_hw_ctx
*hctx
= data
;
982 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
984 seq_printf(m
, "%u\n", khd
->batching
);
988 #define KYBER_QUEUE_DOMAIN_ATTRS(name) \
989 {#name "_tokens", 0400, kyber_##name##_tokens_show}
990 static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs
[] = {
991 KYBER_QUEUE_DOMAIN_ATTRS(read
),
992 KYBER_QUEUE_DOMAIN_ATTRS(write
),
993 KYBER_QUEUE_DOMAIN_ATTRS(discard
),
994 KYBER_QUEUE_DOMAIN_ATTRS(other
),
995 {"async_depth", 0400, kyber_async_depth_show
},
998 #undef KYBER_QUEUE_DOMAIN_ATTRS
1000 #define KYBER_HCTX_DOMAIN_ATTRS(name) \
1001 {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
1002 {#name "_waiting", 0400, kyber_##name##_waiting_show}
1003 static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs
[] = {
1004 KYBER_HCTX_DOMAIN_ATTRS(read
),
1005 KYBER_HCTX_DOMAIN_ATTRS(write
),
1006 KYBER_HCTX_DOMAIN_ATTRS(discard
),
1007 KYBER_HCTX_DOMAIN_ATTRS(other
),
1008 {"cur_domain", 0400, kyber_cur_domain_show
},
1009 {"batching", 0400, kyber_batching_show
},
1012 #undef KYBER_HCTX_DOMAIN_ATTRS
1015 static struct elevator_type kyber_sched
= {
1017 .init_sched
= kyber_init_sched
,
1018 .exit_sched
= kyber_exit_sched
,
1019 .init_hctx
= kyber_init_hctx
,
1020 .exit_hctx
= kyber_exit_hctx
,
1021 .limit_depth
= kyber_limit_depth
,
1022 .bio_merge
= kyber_bio_merge
,
1023 .prepare_request
= kyber_prepare_request
,
1024 .insert_requests
= kyber_insert_requests
,
1025 .finish_request
= kyber_finish_request
,
1026 .requeue_request
= kyber_finish_request
,
1027 .completed_request
= kyber_completed_request
,
1028 .dispatch_request
= kyber_dispatch_request
,
1029 .has_work
= kyber_has_work
,
1030 .depth_updated
= kyber_depth_updated
,
1032 #ifdef CONFIG_BLK_DEBUG_FS
1033 .queue_debugfs_attrs
= kyber_queue_debugfs_attrs
,
1034 .hctx_debugfs_attrs
= kyber_hctx_debugfs_attrs
,
1036 .elevator_attrs
= kyber_sched_attrs
,
1037 .elevator_name
= "kyber",
1038 .elevator_owner
= THIS_MODULE
,
1041 static int __init
kyber_init(void)
1043 return elv_register(&kyber_sched
);
1046 static void __exit
kyber_exit(void)
1048 elv_unregister(&kyber_sched
);
1051 module_init(kyber_init
);
1052 module_exit(kyber_exit
);
1054 MODULE_AUTHOR("Omar Sandoval");
1055 MODULE_LICENSE("GPL");
1056 MODULE_DESCRIPTION("Kyber I/O scheduler");