2 * Budget Fair Queueing (BFQ) I/O scheduler.
4 * Based on ideas and code from CFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
11 * Arianna Avanzini <avanzini@google.com>
13 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
25 * BFQ is a proportional-share I/O scheduler, with some extra
26 * low-latency capabilities. BFQ also supports full hierarchical
27 * scheduling through cgroups. Next paragraphs provide an introduction
28 * on BFQ inner workings. Details on BFQ benefits, usage and
29 * limitations can be found in Documentation/block/bfq-iosched.txt.
31 * BFQ is a proportional-share storage-I/O scheduling algorithm based
32 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
33 * budgets, measured in number of sectors, to processes instead of
34 * time slices. The device is not granted to the in-service process
35 * for a given time slice, but until it has exhausted its assigned
36 * budget. This change from the time to the service domain enables BFQ
37 * to distribute the device throughput among processes as desired,
38 * without any distortion due to throughput fluctuations, or to device
39 * internal queueing. BFQ uses an ad hoc internal scheduler, called
40 * B-WF2Q+, to schedule processes according to their budgets. More
41 * precisely, BFQ schedules queues associated with processes. Each
42 * process/queue is assigned a user-configurable weight, and B-WF2Q+
43 * guarantees that each queue receives a fraction of the throughput
44 * proportional to its weight. Thanks to the accurate policy of
45 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
46 * processes issuing sequential requests (to boost the throughput),
47 * and yet guarantee a low latency to interactive and soft real-time
50 * In particular, to provide these low-latency guarantees, BFQ
51 * explicitly privileges the I/O of two classes of time-sensitive
52 * applications: interactive and soft real-time. This feature enables
53 * BFQ to provide applications in these classes with a very low
54 * latency. Finally, BFQ also features additional heuristics for
55 * preserving both a low latency and a high throughput on NCQ-capable,
56 * rotational or flash-based devices, and to get the job done quickly
57 * for applications consisting in many I/O-bound processes.
59 * NOTE: if the main or only goal, with a given device, is to achieve
60 * the maximum-possible throughput at all times, then do switch off
61 * all low-latency heuristics for that device, by setting low_latency
64 * BFQ is described in [1], where also a reference to the initial, more
65 * theoretical paper on BFQ can be found. The interested reader can find
66 * in the latter paper full details on the main algorithm, as well as
67 * formulas of the guarantees and formal proofs of all the properties.
68 * With respect to the version of BFQ presented in these papers, this
69 * implementation adds a few more heuristics, such as the one that
70 * guarantees a low latency to soft real-time applications, and a
71 * hierarchical extension based on H-WF2Q+.
73 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
74 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
75 * with O(log N) complexity derives from the one introduced with EEVDF
78 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
79 * Scheduler", Proceedings of the First Workshop on Mobile System
80 * Technologies (MST-2015), May 2015.
81 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
83 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
84 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
87 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
89 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
90 * First: A Flexible and Accurate Mechanism for Proportional Share
91 * Resource Allocation", technical report.
93 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
95 #include <linux/module.h>
96 #include <linux/slab.h>
97 #include <linux/blkdev.h>
98 #include <linux/cgroup.h>
99 #include <linux/elevator.h>
100 #include <linux/ktime.h>
101 #include <linux/rbtree.h>
102 #include <linux/ioprio.h>
103 #include <linux/sbitmap.h>
104 #include <linux/delay.h>
108 #include "blk-mq-tag.h"
109 #include "blk-mq-sched.h"
110 #include "bfq-iosched.h"
112 #define BFQ_BFQQ_FNS(name) \
113 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
115 __set_bit(BFQQF_##name, &(bfqq)->flags); \
117 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
119 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
121 int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
123 return test_bit(BFQQF_##name, &(bfqq)->flags); \
126 BFQ_BFQQ_FNS(just_created
);
128 BFQ_BFQQ_FNS(wait_request
);
129 BFQ_BFQQ_FNS(non_blocking_wait_rq
);
130 BFQ_BFQQ_FNS(fifo_expire
);
131 BFQ_BFQQ_FNS(has_short_ttime
);
133 BFQ_BFQQ_FNS(IO_bound
);
134 BFQ_BFQQ_FNS(in_large_burst
);
136 BFQ_BFQQ_FNS(split_coop
);
137 BFQ_BFQQ_FNS(softrt_update
);
138 #undef BFQ_BFQQ_FNS \
140 /* Expiration time of sync (0) and async (1) requests, in ns. */
141 static const u64 bfq_fifo_expire
[2] = { NSEC_PER_SEC
/ 4, NSEC_PER_SEC
/ 8 };
143 /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
144 static const int bfq_back_max
= 16 * 1024;
146 /* Penalty of a backwards seek, in number of sectors. */
147 static const int bfq_back_penalty
= 2;
149 /* Idling period duration, in ns. */
150 static u64 bfq_slice_idle
= NSEC_PER_SEC
/ 125;
152 /* Minimum number of assigned budgets for which stats are safe to compute. */
153 static const int bfq_stats_min_budgets
= 194;
155 /* Default maximum budget values, in sectors and number of requests. */
156 static const int bfq_default_max_budget
= 16 * 1024;
159 * Async to sync throughput distribution is controlled as follows:
160 * when an async request is served, the entity is charged the number
161 * of sectors of the request, multiplied by the factor below
163 static const int bfq_async_charge_factor
= 10;
165 /* Default timeout values, in jiffies, approximating CFQ defaults. */
166 const int bfq_timeout
= HZ
/ 8;
168 static struct kmem_cache
*bfq_pool
;
170 /* Below this threshold (in ns), we consider thinktime immediate. */
171 #define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
173 /* hw_tag detection: parallel requests threshold and min samples needed. */
174 #define BFQ_HW_QUEUE_THRESHOLD 4
175 #define BFQ_HW_QUEUE_SAMPLES 32
177 #define BFQQ_SEEK_THR (sector_t)(8 * 100)
178 #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
179 #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
180 #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
182 /* Min number of samples required to perform peak-rate update */
183 #define BFQ_RATE_MIN_SAMPLES 32
184 /* Min observation time interval required to perform a peak-rate update (ns) */
185 #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
186 /* Target observation time interval for a peak-rate update (ns) */
187 #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
189 /* Shift used for peak rate fixed precision calculations. */
190 #define BFQ_RATE_SHIFT 16
193 * By default, BFQ computes the duration of the weight raising for
194 * interactive applications automatically, using the following formula:
195 * duration = (R / r) * T, where r is the peak rate of the device, and
196 * R and T are two reference parameters.
197 * In particular, R is the peak rate of the reference device (see below),
198 * and T is a reference time: given the systems that are likely to be
199 * installed on the reference device according to its speed class, T is
200 * about the maximum time needed, under BFQ and while reading two files in
201 * parallel, to load typical large applications on these systems.
202 * In practice, the slower/faster the device at hand is, the more/less it
203 * takes to load applications with respect to the reference device.
204 * Accordingly, the longer/shorter BFQ grants weight raising to interactive
207 * BFQ uses four different reference pairs (R, T), depending on:
208 * . whether the device is rotational or non-rotational;
209 * . whether the device is slow, such as old or portable HDDs, as well as
210 * SD cards, or fast, such as newer HDDs and SSDs.
212 * The device's speed class is dynamically (re)detected in
213 * bfq_update_peak_rate() every time the estimated peak rate is updated.
215 * In the following definitions, R_slow[0]/R_fast[0] and
216 * T_slow[0]/T_fast[0] are the reference values for a slow/fast
217 * rotational device, whereas R_slow[1]/R_fast[1] and
218 * T_slow[1]/T_fast[1] are the reference values for a slow/fast
219 * non-rotational device. Finally, device_speed_thresh are the
220 * thresholds used to switch between speed classes. The reference
221 * rates are not the actual peak rates of the devices used as a
222 * reference, but slightly lower values. The reason for using these
223 * slightly lower values is that the peak-rate estimator tends to
224 * yield slightly lower values than the actual peak rate (it can yield
225 * the actual peak rate only if there is only one process doing I/O,
226 * and the process does sequential I/O).
228 * Both the reference peak rates and the thresholds are measured in
229 * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
231 static int R_slow
[2] = {1000, 10700};
232 static int R_fast
[2] = {14000, 33000};
234 * To improve readability, a conversion function is used to initialize the
235 * following arrays, which entails that they can be initialized only in a
238 static int T_slow
[2];
239 static int T_fast
[2];
240 static int device_speed_thresh
[2];
242 #define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
243 #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
245 struct bfq_queue
*bic_to_bfqq(struct bfq_io_cq
*bic
, bool is_sync
)
247 return bic
->bfqq
[is_sync
];
250 void bic_set_bfqq(struct bfq_io_cq
*bic
, struct bfq_queue
*bfqq
, bool is_sync
)
252 bic
->bfqq
[is_sync
] = bfqq
;
255 struct bfq_data
*bic_to_bfqd(struct bfq_io_cq
*bic
)
257 return bic
->icq
.q
->elevator
->elevator_data
;
261 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
262 * @icq: the iocontext queue.
264 static struct bfq_io_cq
*icq_to_bic(struct io_cq
*icq
)
266 /* bic->icq is the first member, %NULL will convert to %NULL */
267 return container_of(icq
, struct bfq_io_cq
, icq
);
271 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
272 * @bfqd: the lookup key.
273 * @ioc: the io_context of the process doing I/O.
274 * @q: the request queue.
276 static struct bfq_io_cq
*bfq_bic_lookup(struct bfq_data
*bfqd
,
277 struct io_context
*ioc
,
278 struct request_queue
*q
)
282 struct bfq_io_cq
*icq
;
284 spin_lock_irqsave(q
->queue_lock
, flags
);
285 icq
= icq_to_bic(ioc_lookup_icq(ioc
, q
));
286 spin_unlock_irqrestore(q
->queue_lock
, flags
);
295 * Scheduler run of queue, if there are requests pending and no one in the
296 * driver that will restart queueing.
298 void bfq_schedule_dispatch(struct bfq_data
*bfqd
)
300 if (bfqd
->queued
!= 0) {
301 bfq_log(bfqd
, "schedule dispatch");
302 blk_mq_run_hw_queues(bfqd
->queue
, true);
306 #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
307 #define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
309 #define bfq_sample_valid(samples) ((samples) > 80)
312 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
313 * We choose the request that is closesr to the head right now. Distance
314 * behind the head is penalized and only allowed to a certain extent.
316 static struct request
*bfq_choose_req(struct bfq_data
*bfqd
,
321 sector_t s1
, s2
, d1
= 0, d2
= 0;
322 unsigned long back_max
;
323 #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
324 #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
325 unsigned int wrap
= 0; /* bit mask: requests behind the disk head? */
327 if (!rq1
|| rq1
== rq2
)
332 if (rq_is_sync(rq1
) && !rq_is_sync(rq2
))
334 else if (rq_is_sync(rq2
) && !rq_is_sync(rq1
))
336 if ((rq1
->cmd_flags
& REQ_META
) && !(rq2
->cmd_flags
& REQ_META
))
338 else if ((rq2
->cmd_flags
& REQ_META
) && !(rq1
->cmd_flags
& REQ_META
))
341 s1
= blk_rq_pos(rq1
);
342 s2
= blk_rq_pos(rq2
);
345 * By definition, 1KiB is 2 sectors.
347 back_max
= bfqd
->bfq_back_max
* 2;
350 * Strict one way elevator _except_ in the case where we allow
351 * short backward seeks which are biased as twice the cost of a
352 * similar forward seek.
356 else if (s1
+ back_max
>= last
)
357 d1
= (last
- s1
) * bfqd
->bfq_back_penalty
;
359 wrap
|= BFQ_RQ1_WRAP
;
363 else if (s2
+ back_max
>= last
)
364 d2
= (last
- s2
) * bfqd
->bfq_back_penalty
;
366 wrap
|= BFQ_RQ2_WRAP
;
368 /* Found required data */
371 * By doing switch() on the bit mask "wrap" we avoid having to
372 * check two variables for all permutations: --> faster!
375 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
390 case BFQ_RQ1_WRAP
|BFQ_RQ2_WRAP
: /* both rqs wrapped */
393 * Since both rqs are wrapped,
394 * start with the one that's further behind head
395 * (--> only *one* back seek required),
396 * since back seek takes more time than forward.
405 static struct bfq_queue
*
406 bfq_rq_pos_tree_lookup(struct bfq_data
*bfqd
, struct rb_root
*root
,
407 sector_t sector
, struct rb_node
**ret_parent
,
408 struct rb_node
***rb_link
)
410 struct rb_node
**p
, *parent
;
411 struct bfq_queue
*bfqq
= NULL
;
419 bfqq
= rb_entry(parent
, struct bfq_queue
, pos_node
);
422 * Sort strictly based on sector. Smallest to the left,
423 * largest to the right.
425 if (sector
> blk_rq_pos(bfqq
->next_rq
))
427 else if (sector
< blk_rq_pos(bfqq
->next_rq
))
435 *ret_parent
= parent
;
439 bfq_log(bfqd
, "rq_pos_tree_lookup %llu: returning %d",
440 (unsigned long long)sector
,
441 bfqq
? bfqq
->pid
: 0);
446 void bfq_pos_tree_add_move(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
)
448 struct rb_node
**p
, *parent
;
449 struct bfq_queue
*__bfqq
;
451 if (bfqq
->pos_root
) {
452 rb_erase(&bfqq
->pos_node
, bfqq
->pos_root
);
453 bfqq
->pos_root
= NULL
;
456 if (bfq_class_idle(bfqq
))
461 bfqq
->pos_root
= &bfq_bfqq_to_bfqg(bfqq
)->rq_pos_tree
;
462 __bfqq
= bfq_rq_pos_tree_lookup(bfqd
, bfqq
->pos_root
,
463 blk_rq_pos(bfqq
->next_rq
), &parent
, &p
);
465 rb_link_node(&bfqq
->pos_node
, parent
, p
);
466 rb_insert_color(&bfqq
->pos_node
, bfqq
->pos_root
);
468 bfqq
->pos_root
= NULL
;
472 * Tell whether there are active queues or groups with differentiated weights.
474 static bool bfq_differentiated_weights(struct bfq_data
*bfqd
)
477 * For weights to differ, at least one of the trees must contain
478 * at least two nodes.
480 return (!RB_EMPTY_ROOT(&bfqd
->queue_weights_tree
) &&
481 (bfqd
->queue_weights_tree
.rb_node
->rb_left
||
482 bfqd
->queue_weights_tree
.rb_node
->rb_right
)
483 #ifdef CONFIG_BFQ_GROUP_IOSCHED
485 (!RB_EMPTY_ROOT(&bfqd
->group_weights_tree
) &&
486 (bfqd
->group_weights_tree
.rb_node
->rb_left
||
487 bfqd
->group_weights_tree
.rb_node
->rb_right
)
493 * The following function returns true if every queue must receive the
494 * same share of the throughput (this condition is used when deciding
495 * whether idling may be disabled, see the comments in the function
496 * bfq_bfqq_may_idle()).
498 * Such a scenario occurs when:
499 * 1) all active queues have the same weight,
500 * 2) all active groups at the same level in the groups tree have the same
502 * 3) all active groups at the same level in the groups tree have the same
503 * number of children.
505 * Unfortunately, keeping the necessary state for evaluating exactly the
506 * above symmetry conditions would be quite complex and time-consuming.
507 * Therefore this function evaluates, instead, the following stronger
508 * sub-conditions, for which it is much easier to maintain the needed
510 * 1) all active queues have the same weight,
511 * 2) all active groups have the same weight,
512 * 3) all active groups have at most one active child each.
513 * In particular, the last two conditions are always true if hierarchical
514 * support and the cgroups interface are not enabled, thus no state needs
515 * to be maintained in this case.
517 static bool bfq_symmetric_scenario(struct bfq_data
*bfqd
)
519 return !bfq_differentiated_weights(bfqd
);
523 * If the weight-counter tree passed as input contains no counter for
524 * the weight of the input entity, then add that counter; otherwise just
525 * increment the existing counter.
527 * Note that weight-counter trees contain few nodes in mostly symmetric
528 * scenarios. For example, if all queues have the same weight, then the
529 * weight-counter tree for the queues may contain at most one node.
530 * This holds even if low_latency is on, because weight-raised queues
531 * are not inserted in the tree.
532 * In most scenarios, the rate at which nodes are created/destroyed
535 void bfq_weights_tree_add(struct bfq_data
*bfqd
, struct bfq_entity
*entity
,
536 struct rb_root
*root
)
538 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
541 * Do not insert if the entity is already associated with a
542 * counter, which happens if:
543 * 1) the entity is associated with a queue,
544 * 2) a request arrival has caused the queue to become both
545 * non-weight-raised, and hence change its weight, and
546 * backlogged; in this respect, each of the two events
547 * causes an invocation of this function,
548 * 3) this is the invocation of this function caused by the
549 * second event. This second invocation is actually useless,
550 * and we handle this fact by exiting immediately. More
551 * efficient or clearer solutions might possibly be adopted.
553 if (entity
->weight_counter
)
557 struct bfq_weight_counter
*__counter
= container_of(*new,
558 struct bfq_weight_counter
,
562 if (entity
->weight
== __counter
->weight
) {
563 entity
->weight_counter
= __counter
;
566 if (entity
->weight
< __counter
->weight
)
567 new = &((*new)->rb_left
);
569 new = &((*new)->rb_right
);
572 entity
->weight_counter
= kzalloc(sizeof(struct bfq_weight_counter
),
576 * In the unlucky event of an allocation failure, we just
577 * exit. This will cause the weight of entity to not be
578 * considered in bfq_differentiated_weights, which, in its
579 * turn, causes the scenario to be deemed wrongly symmetric in
580 * case entity's weight would have been the only weight making
581 * the scenario asymmetric. On the bright side, no unbalance
582 * will however occur when entity becomes inactive again (the
583 * invocation of this function is triggered by an activation
584 * of entity). In fact, bfq_weights_tree_remove does nothing
585 * if !entity->weight_counter.
587 if (unlikely(!entity
->weight_counter
))
590 entity
->weight_counter
->weight
= entity
->weight
;
591 rb_link_node(&entity
->weight_counter
->weights_node
, parent
, new);
592 rb_insert_color(&entity
->weight_counter
->weights_node
, root
);
595 entity
->weight_counter
->num_active
++;
599 * Decrement the weight counter associated with the entity, and, if the
600 * counter reaches 0, remove the counter from the tree.
601 * See the comments to the function bfq_weights_tree_add() for considerations
604 void bfq_weights_tree_remove(struct bfq_data
*bfqd
, struct bfq_entity
*entity
,
605 struct rb_root
*root
)
607 if (!entity
->weight_counter
)
610 entity
->weight_counter
->num_active
--;
611 if (entity
->weight_counter
->num_active
> 0)
612 goto reset_entity_pointer
;
614 rb_erase(&entity
->weight_counter
->weights_node
, root
);
615 kfree(entity
->weight_counter
);
617 reset_entity_pointer
:
618 entity
->weight_counter
= NULL
;
622 * Return expired entry, or NULL to just start from scratch in rbtree.
624 static struct request
*bfq_check_fifo(struct bfq_queue
*bfqq
,
625 struct request
*last
)
629 if (bfq_bfqq_fifo_expire(bfqq
))
632 bfq_mark_bfqq_fifo_expire(bfqq
);
634 rq
= rq_entry_fifo(bfqq
->fifo
.next
);
636 if (rq
== last
|| ktime_get_ns() < rq
->fifo_time
)
639 bfq_log_bfqq(bfqq
->bfqd
, bfqq
, "check_fifo: returned %p", rq
);
643 static struct request
*bfq_find_next_rq(struct bfq_data
*bfqd
,
644 struct bfq_queue
*bfqq
,
645 struct request
*last
)
647 struct rb_node
*rbnext
= rb_next(&last
->rb_node
);
648 struct rb_node
*rbprev
= rb_prev(&last
->rb_node
);
649 struct request
*next
, *prev
= NULL
;
651 /* Follow expired path, else get first next available. */
652 next
= bfq_check_fifo(bfqq
, last
);
657 prev
= rb_entry_rq(rbprev
);
660 next
= rb_entry_rq(rbnext
);
662 rbnext
= rb_first(&bfqq
->sort_list
);
663 if (rbnext
&& rbnext
!= &last
->rb_node
)
664 next
= rb_entry_rq(rbnext
);
667 return bfq_choose_req(bfqd
, next
, prev
, blk_rq_pos(last
));
670 /* see the definition of bfq_async_charge_factor for details */
671 static unsigned long bfq_serv_to_charge(struct request
*rq
,
672 struct bfq_queue
*bfqq
)
674 if (bfq_bfqq_sync(bfqq
) || bfqq
->wr_coeff
> 1)
675 return blk_rq_sectors(rq
);
678 * If there are no weight-raised queues, then amplify service
679 * by just the async charge factor; otherwise amplify service
680 * by twice the async charge factor, to further reduce latency
681 * for weight-raised queues.
683 if (bfqq
->bfqd
->wr_busy_queues
== 0)
684 return blk_rq_sectors(rq
) * bfq_async_charge_factor
;
686 return blk_rq_sectors(rq
) * 2 * bfq_async_charge_factor
;
690 * bfq_updated_next_req - update the queue after a new next_rq selection.
691 * @bfqd: the device data the queue belongs to.
692 * @bfqq: the queue to update.
694 * If the first request of a queue changes we make sure that the queue
695 * has enough budget to serve at least its first request (if the
696 * request has grown). We do this because if the queue has not enough
697 * budget for its first request, it has to go through two dispatch
698 * rounds to actually get it dispatched.
700 static void bfq_updated_next_req(struct bfq_data
*bfqd
,
701 struct bfq_queue
*bfqq
)
703 struct bfq_entity
*entity
= &bfqq
->entity
;
704 struct request
*next_rq
= bfqq
->next_rq
;
705 unsigned long new_budget
;
710 if (bfqq
== bfqd
->in_service_queue
)
712 * In order not to break guarantees, budgets cannot be
713 * changed after an entity has been selected.
717 new_budget
= max_t(unsigned long, bfqq
->max_budget
,
718 bfq_serv_to_charge(next_rq
, bfqq
));
719 if (entity
->budget
!= new_budget
) {
720 entity
->budget
= new_budget
;
721 bfq_log_bfqq(bfqd
, bfqq
, "updated next rq: new budget %lu",
723 bfq_requeue_bfqq(bfqd
, bfqq
, false);
728 bfq_bfqq_resume_state(struct bfq_queue
*bfqq
, struct bfq_data
*bfqd
,
729 struct bfq_io_cq
*bic
, bool bfq_already_existing
)
731 unsigned int old_wr_coeff
= bfqq
->wr_coeff
;
732 bool busy
= bfq_already_existing
&& bfq_bfqq_busy(bfqq
);
734 if (bic
->saved_has_short_ttime
)
735 bfq_mark_bfqq_has_short_ttime(bfqq
);
737 bfq_clear_bfqq_has_short_ttime(bfqq
);
739 if (bic
->saved_IO_bound
)
740 bfq_mark_bfqq_IO_bound(bfqq
);
742 bfq_clear_bfqq_IO_bound(bfqq
);
744 bfqq
->ttime
= bic
->saved_ttime
;
745 bfqq
->wr_coeff
= bic
->saved_wr_coeff
;
746 bfqq
->wr_start_at_switch_to_srt
= bic
->saved_wr_start_at_switch_to_srt
;
747 bfqq
->last_wr_start_finish
= bic
->saved_last_wr_start_finish
;
748 bfqq
->wr_cur_max_time
= bic
->saved_wr_cur_max_time
;
750 if (bfqq
->wr_coeff
> 1 && (bfq_bfqq_in_large_burst(bfqq
) ||
751 time_is_before_jiffies(bfqq
->last_wr_start_finish
+
752 bfqq
->wr_cur_max_time
))) {
753 bfq_log_bfqq(bfqq
->bfqd
, bfqq
,
754 "resume state: switching off wr");
759 /* make sure weight will be updated, however we got here */
760 bfqq
->entity
.prio_changed
= 1;
765 if (old_wr_coeff
== 1 && bfqq
->wr_coeff
> 1)
766 bfqd
->wr_busy_queues
++;
767 else if (old_wr_coeff
> 1 && bfqq
->wr_coeff
== 1)
768 bfqd
->wr_busy_queues
--;
771 static int bfqq_process_refs(struct bfq_queue
*bfqq
)
773 return bfqq
->ref
- bfqq
->allocated
- bfqq
->entity
.on_st
;
776 /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
777 static void bfq_reset_burst_list(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
)
779 struct bfq_queue
*item
;
780 struct hlist_node
*n
;
782 hlist_for_each_entry_safe(item
, n
, &bfqd
->burst_list
, burst_list_node
)
783 hlist_del_init(&item
->burst_list_node
);
784 hlist_add_head(&bfqq
->burst_list_node
, &bfqd
->burst_list
);
785 bfqd
->burst_size
= 1;
786 bfqd
->burst_parent_entity
= bfqq
->entity
.parent
;
789 /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
790 static void bfq_add_to_burst(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
)
792 /* Increment burst size to take into account also bfqq */
795 if (bfqd
->burst_size
== bfqd
->bfq_large_burst_thresh
) {
796 struct bfq_queue
*pos
, *bfqq_item
;
797 struct hlist_node
*n
;
800 * Enough queues have been activated shortly after each
801 * other to consider this burst as large.
803 bfqd
->large_burst
= true;
806 * We can now mark all queues in the burst list as
807 * belonging to a large burst.
809 hlist_for_each_entry(bfqq_item
, &bfqd
->burst_list
,
811 bfq_mark_bfqq_in_large_burst(bfqq_item
);
812 bfq_mark_bfqq_in_large_burst(bfqq
);
815 * From now on, and until the current burst finishes, any
816 * new queue being activated shortly after the last queue
817 * was inserted in the burst can be immediately marked as
818 * belonging to a large burst. So the burst list is not
819 * needed any more. Remove it.
821 hlist_for_each_entry_safe(pos
, n
, &bfqd
->burst_list
,
823 hlist_del_init(&pos
->burst_list_node
);
825 * Burst not yet large: add bfqq to the burst list. Do
826 * not increment the ref counter for bfqq, because bfqq
827 * is removed from the burst list before freeing bfqq
830 hlist_add_head(&bfqq
->burst_list_node
, &bfqd
->burst_list
);
834 * If many queues belonging to the same group happen to be created
835 * shortly after each other, then the processes associated with these
836 * queues have typically a common goal. In particular, bursts of queue
837 * creations are usually caused by services or applications that spawn
838 * many parallel threads/processes. Examples are systemd during boot,
839 * or git grep. To help these processes get their job done as soon as
840 * possible, it is usually better to not grant either weight-raising
841 * or device idling to their queues.
843 * In this comment we describe, firstly, the reasons why this fact
844 * holds, and, secondly, the next function, which implements the main
845 * steps needed to properly mark these queues so that they can then be
846 * treated in a different way.
848 * The above services or applications benefit mostly from a high
849 * throughput: the quicker the requests of the activated queues are
850 * cumulatively served, the sooner the target job of these queues gets
851 * completed. As a consequence, weight-raising any of these queues,
852 * which also implies idling the device for it, is almost always
853 * counterproductive. In most cases it just lowers throughput.
855 * On the other hand, a burst of queue creations may be caused also by
856 * the start of an application that does not consist of a lot of
857 * parallel I/O-bound threads. In fact, with a complex application,
858 * several short processes may need to be executed to start-up the
859 * application. In this respect, to start an application as quickly as
860 * possible, the best thing to do is in any case to privilege the I/O
861 * related to the application with respect to all other
862 * I/O. Therefore, the best strategy to start as quickly as possible
863 * an application that causes a burst of queue creations is to
864 * weight-raise all the queues created during the burst. This is the
865 * exact opposite of the best strategy for the other type of bursts.
867 * In the end, to take the best action for each of the two cases, the
868 * two types of bursts need to be distinguished. Fortunately, this
869 * seems relatively easy, by looking at the sizes of the bursts. In
870 * particular, we found a threshold such that only bursts with a
871 * larger size than that threshold are apparently caused by
872 * services or commands such as systemd or git grep. For brevity,
873 * hereafter we call just 'large' these bursts. BFQ *does not*
874 * weight-raise queues whose creation occurs in a large burst. In
875 * addition, for each of these queues BFQ performs or does not perform
876 * idling depending on which choice boosts the throughput more. The
877 * exact choice depends on the device and request pattern at
880 * Unfortunately, false positives may occur while an interactive task
881 * is starting (e.g., an application is being started). The
882 * consequence is that the queues associated with the task do not
883 * enjoy weight raising as expected. Fortunately these false positives
884 * are very rare. They typically occur if some service happens to
885 * start doing I/O exactly when the interactive task starts.
887 * Turning back to the next function, it implements all the steps
888 * needed to detect the occurrence of a large burst and to properly
889 * mark all the queues belonging to it (so that they can then be
890 * treated in a different way). This goal is achieved by maintaining a
891 * "burst list" that holds, temporarily, the queues that belong to the
892 * burst in progress. The list is then used to mark these queues as
893 * belonging to a large burst if the burst does become large. The main
894 * steps are the following.
896 * . when the very first queue is created, the queue is inserted into the
897 * list (as it could be the first queue in a possible burst)
899 * . if the current burst has not yet become large, and a queue Q that does
900 * not yet belong to the burst is activated shortly after the last time
901 * at which a new queue entered the burst list, then the function appends
902 * Q to the burst list
904 * . if, as a consequence of the previous step, the burst size reaches
905 * the large-burst threshold, then
907 * . all the queues in the burst list are marked as belonging to a
910 * . the burst list is deleted; in fact, the burst list already served
911 * its purpose (keeping temporarily track of the queues in a burst,
912 * so as to be able to mark them as belonging to a large burst in the
913 * previous sub-step), and now is not needed any more
915 * . the device enters a large-burst mode
917 * . if a queue Q that does not belong to the burst is created while
918 * the device is in large-burst mode and shortly after the last time
919 * at which a queue either entered the burst list or was marked as
920 * belonging to the current large burst, then Q is immediately marked
921 * as belonging to a large burst.
923 * . if a queue Q that does not belong to the burst is created a while
924 * later, i.e., not shortly after, than the last time at which a queue
925 * either entered the burst list or was marked as belonging to the
926 * current large burst, then the current burst is deemed as finished and:
928 * . the large-burst mode is reset if set
930 * . the burst list is emptied
932 * . Q is inserted in the burst list, as Q may be the first queue
933 * in a possible new burst (then the burst list contains just Q
936 static void bfq_handle_burst(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
)
939 * If bfqq is already in the burst list or is part of a large
940 * burst, or finally has just been split, then there is
941 * nothing else to do.
943 if (!hlist_unhashed(&bfqq
->burst_list_node
) ||
944 bfq_bfqq_in_large_burst(bfqq
) ||
945 time_is_after_eq_jiffies(bfqq
->split_time
+
946 msecs_to_jiffies(10)))
950 * If bfqq's creation happens late enough, or bfqq belongs to
951 * a different group than the burst group, then the current
952 * burst is finished, and related data structures must be
955 * In this respect, consider the special case where bfqq is
956 * the very first queue created after BFQ is selected for this
957 * device. In this case, last_ins_in_burst and
958 * burst_parent_entity are not yet significant when we get
959 * here. But it is easy to verify that, whether or not the
960 * following condition is true, bfqq will end up being
961 * inserted into the burst list. In particular the list will
962 * happen to contain only bfqq. And this is exactly what has
963 * to happen, as bfqq may be the first queue of the first
966 if (time_is_before_jiffies(bfqd
->last_ins_in_burst
+
967 bfqd
->bfq_burst_interval
) ||
968 bfqq
->entity
.parent
!= bfqd
->burst_parent_entity
) {
969 bfqd
->large_burst
= false;
970 bfq_reset_burst_list(bfqd
, bfqq
);
975 * If we get here, then bfqq is being activated shortly after the
976 * last queue. So, if the current burst is also large, we can mark
977 * bfqq as belonging to this large burst immediately.
979 if (bfqd
->large_burst
) {
980 bfq_mark_bfqq_in_large_burst(bfqq
);
985 * If we get here, then a large-burst state has not yet been
986 * reached, but bfqq is being activated shortly after the last
987 * queue. Then we add bfqq to the burst.
989 bfq_add_to_burst(bfqd
, bfqq
);
992 * At this point, bfqq either has been added to the current
993 * burst or has caused the current burst to terminate and a
994 * possible new burst to start. In particular, in the second
995 * case, bfqq has become the first queue in the possible new
996 * burst. In both cases last_ins_in_burst needs to be moved
999 bfqd
->last_ins_in_burst
= jiffies
;
1002 static int bfq_bfqq_budget_left(struct bfq_queue
*bfqq
)
1004 struct bfq_entity
*entity
= &bfqq
->entity
;
1006 return entity
->budget
- entity
->service
;
1010 * If enough samples have been computed, return the current max budget
1011 * stored in bfqd, which is dynamically updated according to the
1012 * estimated disk peak rate; otherwise return the default max budget
1014 static int bfq_max_budget(struct bfq_data
*bfqd
)
1016 if (bfqd
->budgets_assigned
< bfq_stats_min_budgets
)
1017 return bfq_default_max_budget
;
1019 return bfqd
->bfq_max_budget
;
1023 * Return min budget, which is a fraction of the current or default
1024 * max budget (trying with 1/32)
1026 static int bfq_min_budget(struct bfq_data
*bfqd
)
1028 if (bfqd
->budgets_assigned
< bfq_stats_min_budgets
)
1029 return bfq_default_max_budget
/ 32;
1031 return bfqd
->bfq_max_budget
/ 32;
1035 * The next function, invoked after the input queue bfqq switches from
1036 * idle to busy, updates the budget of bfqq. The function also tells
1037 * whether the in-service queue should be expired, by returning
1038 * true. The purpose of expiring the in-service queue is to give bfqq
1039 * the chance to possibly preempt the in-service queue, and the reason
1040 * for preempting the in-service queue is to achieve one of the two
1043 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1044 * expired because it has remained idle. In particular, bfqq may have
1045 * expired for one of the following two reasons:
1047 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1048 * and did not make it to issue a new request before its last
1049 * request was served;
1051 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1052 * a new request before the expiration of the idling-time.
1054 * Even if bfqq has expired for one of the above reasons, the process
1055 * associated with the queue may be however issuing requests greedily,
1056 * and thus be sensitive to the bandwidth it receives (bfqq may have
1057 * remained idle for other reasons: CPU high load, bfqq not enjoying
1058 * idling, I/O throttling somewhere in the path from the process to
1059 * the I/O scheduler, ...). But if, after every expiration for one of
1060 * the above two reasons, bfqq has to wait for the service of at least
1061 * one full budget of another queue before being served again, then
1062 * bfqq is likely to get a much lower bandwidth or resource time than
1063 * its reserved ones. To address this issue, two countermeasures need
1066 * First, the budget and the timestamps of bfqq need to be updated in
1067 * a special way on bfqq reactivation: they need to be updated as if
1068 * bfqq did not remain idle and did not expire. In fact, if they are
1069 * computed as if bfqq expired and remained idle until reactivation,
1070 * then the process associated with bfqq is treated as if, instead of
1071 * being greedy, it stopped issuing requests when bfqq remained idle,
1072 * and restarts issuing requests only on this reactivation. In other
1073 * words, the scheduler does not help the process recover the "service
1074 * hole" between bfqq expiration and reactivation. As a consequence,
1075 * the process receives a lower bandwidth than its reserved one. In
1076 * contrast, to recover this hole, the budget must be updated as if
1077 * bfqq was not expired at all before this reactivation, i.e., it must
1078 * be set to the value of the remaining budget when bfqq was
1079 * expired. Along the same line, timestamps need to be assigned the
1080 * value they had the last time bfqq was selected for service, i.e.,
1081 * before last expiration. Thus timestamps need to be back-shifted
1082 * with respect to their normal computation (see [1] for more details
1083 * on this tricky aspect).
1085 * Secondly, to allow the process to recover the hole, the in-service
1086 * queue must be expired too, to give bfqq the chance to preempt it
1087 * immediately. In fact, if bfqq has to wait for a full budget of the
1088 * in-service queue to be completed, then it may become impossible to
1089 * let the process recover the hole, even if the back-shifted
1090 * timestamps of bfqq are lower than those of the in-service queue. If
1091 * this happens for most or all of the holes, then the process may not
1092 * receive its reserved bandwidth. In this respect, it is worth noting
1093 * that, being the service of outstanding requests unpreemptible, a
1094 * little fraction of the holes may however be unrecoverable, thereby
1095 * causing a little loss of bandwidth.
1097 * The last important point is detecting whether bfqq does need this
1098 * bandwidth recovery. In this respect, the next function deems the
1099 * process associated with bfqq greedy, and thus allows it to recover
1100 * the hole, if: 1) the process is waiting for the arrival of a new
1101 * request (which implies that bfqq expired for one of the above two
1102 * reasons), and 2) such a request has arrived soon. The first
1103 * condition is controlled through the flag non_blocking_wait_rq,
1104 * while the second through the flag arrived_in_time. If both
1105 * conditions hold, then the function computes the budget in the
1106 * above-described special way, and signals that the in-service queue
1107 * should be expired. Timestamp back-shifting is done later in
1108 * __bfq_activate_entity.
1110 * 2. Reduce latency. Even if timestamps are not backshifted to let
1111 * the process associated with bfqq recover a service hole, bfqq may
1112 * however happen to have, after being (re)activated, a lower finish
1113 * timestamp than the in-service queue. That is, the next budget of
1114 * bfqq may have to be completed before the one of the in-service
1115 * queue. If this is the case, then preempting the in-service queue
1116 * allows this goal to be achieved, apart from the unpreemptible,
1117 * outstanding requests mentioned above.
1119 * Unfortunately, regardless of which of the above two goals one wants
1120 * to achieve, service trees need first to be updated to know whether
1121 * the in-service queue must be preempted. To have service trees
1122 * correctly updated, the in-service queue must be expired and
1123 * rescheduled, and bfqq must be scheduled too. This is one of the
1124 * most costly operations (in future versions, the scheduling
1125 * mechanism may be re-designed in such a way to make it possible to
1126 * know whether preemption is needed without needing to update service
1127 * trees). In addition, queue preemptions almost always cause random
1128 * I/O, and thus loss of throughput. Because of these facts, the next
1129 * function adopts the following simple scheme to avoid both costly
1130 * operations and too frequent preemptions: it requests the expiration
1131 * of the in-service queue (unconditionally) only for queues that need
1132 * to recover a hole, or that either are weight-raised or deserve to
1135 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data
*bfqd
,
1136 struct bfq_queue
*bfqq
,
1137 bool arrived_in_time
,
1138 bool wr_or_deserves_wr
)
1140 struct bfq_entity
*entity
= &bfqq
->entity
;
1142 if (bfq_bfqq_non_blocking_wait_rq(bfqq
) && arrived_in_time
) {
1144 * We do not clear the flag non_blocking_wait_rq here, as
1145 * the latter is used in bfq_activate_bfqq to signal
1146 * that timestamps need to be back-shifted (and is
1147 * cleared right after).
1151 * In next assignment we rely on that either
1152 * entity->service or entity->budget are not updated
1153 * on expiration if bfqq is empty (see
1154 * __bfq_bfqq_recalc_budget). Thus both quantities
1155 * remain unchanged after such an expiration, and the
1156 * following statement therefore assigns to
1157 * entity->budget the remaining budget on such an
1158 * expiration. For clarity, entity->service is not
1159 * updated on expiration in any case, and, in normal
1160 * operation, is reset only when bfqq is selected for
1161 * service (see bfq_get_next_queue).
1163 entity
->budget
= min_t(unsigned long,
1164 bfq_bfqq_budget_left(bfqq
),
1170 entity
->budget
= max_t(unsigned long, bfqq
->max_budget
,
1171 bfq_serv_to_charge(bfqq
->next_rq
, bfqq
));
1172 bfq_clear_bfqq_non_blocking_wait_rq(bfqq
);
1173 return wr_or_deserves_wr
;
1176 static unsigned int bfq_wr_duration(struct bfq_data
*bfqd
)
1180 if (bfqd
->bfq_wr_max_time
> 0)
1181 return bfqd
->bfq_wr_max_time
;
1183 dur
= bfqd
->RT_prod
;
1184 do_div(dur
, bfqd
->peak_rate
);
1187 * Limit duration between 3 and 13 seconds. Tests show that
1188 * higher values than 13 seconds often yield the opposite of
1189 * the desired result, i.e., worsen responsiveness by letting
1190 * non-interactive and non-soft-real-time applications
1191 * preserve weight raising for a too long time interval.
1193 * On the other end, lower values than 3 seconds make it
1194 * difficult for most interactive tasks to complete their jobs
1195 * before weight-raising finishes.
1197 if (dur
> msecs_to_jiffies(13000))
1198 dur
= msecs_to_jiffies(13000);
1199 else if (dur
< msecs_to_jiffies(3000))
1200 dur
= msecs_to_jiffies(3000);
1206 * Return the farthest future time instant according to jiffies
1209 static unsigned long bfq_greatest_from_now(void)
1211 return jiffies
+ MAX_JIFFY_OFFSET
;
1215 * Return the farthest past time instant according to jiffies
1218 static unsigned long bfq_smallest_from_now(void)
1220 return jiffies
- MAX_JIFFY_OFFSET
;
1223 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data
*bfqd
,
1224 struct bfq_queue
*bfqq
,
1225 unsigned int old_wr_coeff
,
1226 bool wr_or_deserves_wr
,
1231 if (old_wr_coeff
== 1 && wr_or_deserves_wr
) {
1232 /* start a weight-raising period */
1234 bfqq
->wr_coeff
= bfqd
->bfq_wr_coeff
;
1235 bfqq
->wr_cur_max_time
= bfq_wr_duration(bfqd
);
1238 * No interactive weight raising in progress
1239 * here: assign minus infinity to
1240 * wr_start_at_switch_to_srt, to make sure
1241 * that, at the end of the soft-real-time
1242 * weight raising periods that is starting
1243 * now, no interactive weight-raising period
1244 * may be wrongly considered as still in
1245 * progress (and thus actually started by
1248 bfqq
->wr_start_at_switch_to_srt
=
1249 bfq_smallest_from_now();
1250 bfqq
->wr_coeff
= bfqd
->bfq_wr_coeff
*
1251 BFQ_SOFTRT_WEIGHT_FACTOR
;
1252 bfqq
->wr_cur_max_time
=
1253 bfqd
->bfq_wr_rt_max_time
;
1257 * If needed, further reduce budget to make sure it is
1258 * close to bfqq's backlog, so as to reduce the
1259 * scheduling-error component due to a too large
1260 * budget. Do not care about throughput consequences,
1261 * but only about latency. Finally, do not assign a
1262 * too small budget either, to avoid increasing
1263 * latency by causing too frequent expirations.
1265 bfqq
->entity
.budget
= min_t(unsigned long,
1266 bfqq
->entity
.budget
,
1267 2 * bfq_min_budget(bfqd
));
1268 } else if (old_wr_coeff
> 1) {
1269 if (interactive
) { /* update wr coeff and duration */
1270 bfqq
->wr_coeff
= bfqd
->bfq_wr_coeff
;
1271 bfqq
->wr_cur_max_time
= bfq_wr_duration(bfqd
);
1272 } else if (in_burst
)
1276 * The application is now or still meeting the
1277 * requirements for being deemed soft rt. We
1278 * can then correctly and safely (re)charge
1279 * the weight-raising duration for the
1280 * application with the weight-raising
1281 * duration for soft rt applications.
1283 * In particular, doing this recharge now, i.e.,
1284 * before the weight-raising period for the
1285 * application finishes, reduces the probability
1286 * of the following negative scenario:
1287 * 1) the weight of a soft rt application is
1288 * raised at startup (as for any newly
1289 * created application),
1290 * 2) since the application is not interactive,
1291 * at a certain time weight-raising is
1292 * stopped for the application,
1293 * 3) at that time the application happens to
1294 * still have pending requests, and hence
1295 * is destined to not have a chance to be
1296 * deemed soft rt before these requests are
1297 * completed (see the comments to the
1298 * function bfq_bfqq_softrt_next_start()
1299 * for details on soft rt detection),
1300 * 4) these pending requests experience a high
1301 * latency because the application is not
1302 * weight-raised while they are pending.
1304 if (bfqq
->wr_cur_max_time
!=
1305 bfqd
->bfq_wr_rt_max_time
) {
1306 bfqq
->wr_start_at_switch_to_srt
=
1307 bfqq
->last_wr_start_finish
;
1309 bfqq
->wr_cur_max_time
=
1310 bfqd
->bfq_wr_rt_max_time
;
1311 bfqq
->wr_coeff
= bfqd
->bfq_wr_coeff
*
1312 BFQ_SOFTRT_WEIGHT_FACTOR
;
1314 bfqq
->last_wr_start_finish
= jiffies
;
1319 static bool bfq_bfqq_idle_for_long_time(struct bfq_data
*bfqd
,
1320 struct bfq_queue
*bfqq
)
1322 return bfqq
->dispatched
== 0 &&
1323 time_is_before_jiffies(
1324 bfqq
->budget_timeout
+
1325 bfqd
->bfq_wr_min_idle_time
);
1328 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data
*bfqd
,
1329 struct bfq_queue
*bfqq
,
1334 bool soft_rt
, in_burst
, wr_or_deserves_wr
,
1335 bfqq_wants_to_preempt
,
1336 idle_for_long_time
= bfq_bfqq_idle_for_long_time(bfqd
, bfqq
),
1338 * See the comments on
1339 * bfq_bfqq_update_budg_for_activation for
1340 * details on the usage of the next variable.
1342 arrived_in_time
= ktime_get_ns() <=
1343 bfqq
->ttime
.last_end_request
+
1344 bfqd
->bfq_slice_idle
* 3;
1346 bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq
)), bfqq
, rq
->cmd_flags
);
1349 * bfqq deserves to be weight-raised if:
1351 * - it does not belong to a large burst,
1352 * - it has been idle for enough time or is soft real-time,
1353 * - is linked to a bfq_io_cq (it is not shared in any sense).
1355 in_burst
= bfq_bfqq_in_large_burst(bfqq
);
1356 soft_rt
= bfqd
->bfq_wr_max_softrt_rate
> 0 &&
1358 time_is_before_jiffies(bfqq
->soft_rt_next_start
);
1359 *interactive
= !in_burst
&& idle_for_long_time
;
1360 wr_or_deserves_wr
= bfqd
->low_latency
&&
1361 (bfqq
->wr_coeff
> 1 ||
1362 (bfq_bfqq_sync(bfqq
) &&
1363 bfqq
->bic
&& (*interactive
|| soft_rt
)));
1366 * Using the last flag, update budget and check whether bfqq
1367 * may want to preempt the in-service queue.
1369 bfqq_wants_to_preempt
=
1370 bfq_bfqq_update_budg_for_activation(bfqd
, bfqq
,
1375 * If bfqq happened to be activated in a burst, but has been
1376 * idle for much more than an interactive queue, then we
1377 * assume that, in the overall I/O initiated in the burst, the
1378 * I/O associated with bfqq is finished. So bfqq does not need
1379 * to be treated as a queue belonging to a burst
1380 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1381 * if set, and remove bfqq from the burst list if it's
1382 * there. We do not decrement burst_size, because the fact
1383 * that bfqq does not need to belong to the burst list any
1384 * more does not invalidate the fact that bfqq was created in
1387 if (likely(!bfq_bfqq_just_created(bfqq
)) &&
1388 idle_for_long_time
&&
1389 time_is_before_jiffies(
1390 bfqq
->budget_timeout
+
1391 msecs_to_jiffies(10000))) {
1392 hlist_del_init(&bfqq
->burst_list_node
);
1393 bfq_clear_bfqq_in_large_burst(bfqq
);
1396 bfq_clear_bfqq_just_created(bfqq
);
1399 if (!bfq_bfqq_IO_bound(bfqq
)) {
1400 if (arrived_in_time
) {
1401 bfqq
->requests_within_timer
++;
1402 if (bfqq
->requests_within_timer
>=
1403 bfqd
->bfq_requests_within_timer
)
1404 bfq_mark_bfqq_IO_bound(bfqq
);
1406 bfqq
->requests_within_timer
= 0;
1409 if (bfqd
->low_latency
) {
1410 if (unlikely(time_is_after_jiffies(bfqq
->split_time
)))
1413 jiffies
- bfqd
->bfq_wr_min_idle_time
- 1;
1415 if (time_is_before_jiffies(bfqq
->split_time
+
1416 bfqd
->bfq_wr_min_idle_time
)) {
1417 bfq_update_bfqq_wr_on_rq_arrival(bfqd
, bfqq
,
1424 if (old_wr_coeff
!= bfqq
->wr_coeff
)
1425 bfqq
->entity
.prio_changed
= 1;
1429 bfqq
->last_idle_bklogged
= jiffies
;
1430 bfqq
->service_from_backlogged
= 0;
1431 bfq_clear_bfqq_softrt_update(bfqq
);
1433 bfq_add_bfqq_busy(bfqd
, bfqq
);
1436 * Expire in-service queue only if preemption may be needed
1437 * for guarantees. In this respect, the function
1438 * next_queue_may_preempt just checks a simple, necessary
1439 * condition, and not a sufficient condition based on
1440 * timestamps. In fact, for the latter condition to be
1441 * evaluated, timestamps would need first to be updated, and
1442 * this operation is quite costly (see the comments on the
1443 * function bfq_bfqq_update_budg_for_activation).
1445 if (bfqd
->in_service_queue
&& bfqq_wants_to_preempt
&&
1446 bfqd
->in_service_queue
->wr_coeff
< bfqq
->wr_coeff
&&
1447 next_queue_may_preempt(bfqd
))
1448 bfq_bfqq_expire(bfqd
, bfqd
->in_service_queue
,
1449 false, BFQQE_PREEMPTED
);
1452 static void bfq_add_request(struct request
*rq
)
1454 struct bfq_queue
*bfqq
= RQ_BFQQ(rq
);
1455 struct bfq_data
*bfqd
= bfqq
->bfqd
;
1456 struct request
*next_rq
, *prev
;
1457 unsigned int old_wr_coeff
= bfqq
->wr_coeff
;
1458 bool interactive
= false;
1460 bfq_log_bfqq(bfqd
, bfqq
, "add_request %d", rq_is_sync(rq
));
1461 bfqq
->queued
[rq_is_sync(rq
)]++;
1464 elv_rb_add(&bfqq
->sort_list
, rq
);
1467 * Check if this request is a better next-serve candidate.
1469 prev
= bfqq
->next_rq
;
1470 next_rq
= bfq_choose_req(bfqd
, bfqq
->next_rq
, rq
, bfqd
->last_position
);
1471 bfqq
->next_rq
= next_rq
;
1474 * Adjust priority tree position, if next_rq changes.
1476 if (prev
!= bfqq
->next_rq
)
1477 bfq_pos_tree_add_move(bfqd
, bfqq
);
1479 if (!bfq_bfqq_busy(bfqq
)) /* switching to busy ... */
1480 bfq_bfqq_handle_idle_busy_switch(bfqd
, bfqq
, old_wr_coeff
,
1483 if (bfqd
->low_latency
&& old_wr_coeff
== 1 && !rq_is_sync(rq
) &&
1484 time_is_before_jiffies(
1485 bfqq
->last_wr_start_finish
+
1486 bfqd
->bfq_wr_min_inter_arr_async
)) {
1487 bfqq
->wr_coeff
= bfqd
->bfq_wr_coeff
;
1488 bfqq
->wr_cur_max_time
= bfq_wr_duration(bfqd
);
1490 bfqd
->wr_busy_queues
++;
1491 bfqq
->entity
.prio_changed
= 1;
1493 if (prev
!= bfqq
->next_rq
)
1494 bfq_updated_next_req(bfqd
, bfqq
);
1498 * Assign jiffies to last_wr_start_finish in the following
1501 * . if bfqq is not going to be weight-raised, because, for
1502 * non weight-raised queues, last_wr_start_finish stores the
1503 * arrival time of the last request; as of now, this piece
1504 * of information is used only for deciding whether to
1505 * weight-raise async queues
1507 * . if bfqq is not weight-raised, because, if bfqq is now
1508 * switching to weight-raised, then last_wr_start_finish
1509 * stores the time when weight-raising starts
1511 * . if bfqq is interactive, because, regardless of whether
1512 * bfqq is currently weight-raised, the weight-raising
1513 * period must start or restart (this case is considered
1514 * separately because it is not detected by the above
1515 * conditions, if bfqq is already weight-raised)
1517 * last_wr_start_finish has to be updated also if bfqq is soft
1518 * real-time, because the weight-raising period is constantly
1519 * restarted on idle-to-busy transitions for these queues, but
1520 * this is already done in bfq_bfqq_handle_idle_busy_switch if
1523 if (bfqd
->low_latency
&&
1524 (old_wr_coeff
== 1 || bfqq
->wr_coeff
== 1 || interactive
))
1525 bfqq
->last_wr_start_finish
= jiffies
;
1528 static struct request
*bfq_find_rq_fmerge(struct bfq_data
*bfqd
,
1530 struct request_queue
*q
)
1532 struct bfq_queue
*bfqq
= bfqd
->bio_bfqq
;
1536 return elv_rb_find(&bfqq
->sort_list
, bio_end_sector(bio
));
1541 static sector_t
get_sdist(sector_t last_pos
, struct request
*rq
)
1544 return abs(blk_rq_pos(rq
) - last_pos
);
1549 #if 0 /* Still not clear if we can do without next two functions */
1550 static void bfq_activate_request(struct request_queue
*q
, struct request
*rq
)
1552 struct bfq_data
*bfqd
= q
->elevator
->elevator_data
;
1554 bfqd
->rq_in_driver
++;
1557 static void bfq_deactivate_request(struct request_queue
*q
, struct request
*rq
)
1559 struct bfq_data
*bfqd
= q
->elevator
->elevator_data
;
1561 bfqd
->rq_in_driver
--;
1565 static void bfq_remove_request(struct request_queue
*q
,
1568 struct bfq_queue
*bfqq
= RQ_BFQQ(rq
);
1569 struct bfq_data
*bfqd
= bfqq
->bfqd
;
1570 const int sync
= rq_is_sync(rq
);
1572 if (bfqq
->next_rq
== rq
) {
1573 bfqq
->next_rq
= bfq_find_next_rq(bfqd
, bfqq
, rq
);
1574 bfq_updated_next_req(bfqd
, bfqq
);
1577 if (rq
->queuelist
.prev
!= &rq
->queuelist
)
1578 list_del_init(&rq
->queuelist
);
1579 bfqq
->queued
[sync
]--;
1581 elv_rb_del(&bfqq
->sort_list
, rq
);
1583 elv_rqhash_del(q
, rq
);
1584 if (q
->last_merge
== rq
)
1585 q
->last_merge
= NULL
;
1587 if (RB_EMPTY_ROOT(&bfqq
->sort_list
)) {
1588 bfqq
->next_rq
= NULL
;
1590 if (bfq_bfqq_busy(bfqq
) && bfqq
!= bfqd
->in_service_queue
) {
1591 bfq_del_bfqq_busy(bfqd
, bfqq
, false);
1593 * bfqq emptied. In normal operation, when
1594 * bfqq is empty, bfqq->entity.service and
1595 * bfqq->entity.budget must contain,
1596 * respectively, the service received and the
1597 * budget used last time bfqq emptied. These
1598 * facts do not hold in this case, as at least
1599 * this last removal occurred while bfqq is
1600 * not in service. To avoid inconsistencies,
1601 * reset both bfqq->entity.service and
1602 * bfqq->entity.budget, if bfqq has still a
1603 * process that may issue I/O requests to it.
1605 bfqq
->entity
.budget
= bfqq
->entity
.service
= 0;
1609 * Remove queue from request-position tree as it is empty.
1611 if (bfqq
->pos_root
) {
1612 rb_erase(&bfqq
->pos_node
, bfqq
->pos_root
);
1613 bfqq
->pos_root
= NULL
;
1617 if (rq
->cmd_flags
& REQ_META
)
1618 bfqq
->meta_pending
--;
1620 bfqg_stats_update_io_remove(bfqq_group(bfqq
), rq
->cmd_flags
);
1623 static bool bfq_bio_merge(struct blk_mq_hw_ctx
*hctx
, struct bio
*bio
)
1625 struct request_queue
*q
= hctx
->queue
;
1626 struct bfq_data
*bfqd
= q
->elevator
->elevator_data
;
1627 struct request
*free
= NULL
;
1629 * bfq_bic_lookup grabs the queue_lock: invoke it now and
1630 * store its return value for later use, to avoid nesting
1631 * queue_lock inside the bfqd->lock. We assume that the bic
1632 * returned by bfq_bic_lookup does not go away before
1633 * bfqd->lock is taken.
1635 struct bfq_io_cq
*bic
= bfq_bic_lookup(bfqd
, current
->io_context
, q
);
1638 spin_lock_irq(&bfqd
->lock
);
1641 bfqd
->bio_bfqq
= bic_to_bfqq(bic
, op_is_sync(bio
->bi_opf
));
1643 bfqd
->bio_bfqq
= NULL
;
1644 bfqd
->bio_bic
= bic
;
1646 ret
= blk_mq_sched_try_merge(q
, bio
, &free
);
1649 blk_mq_free_request(free
);
1650 spin_unlock_irq(&bfqd
->lock
);
1655 static int bfq_request_merge(struct request_queue
*q
, struct request
**req
,
1658 struct bfq_data
*bfqd
= q
->elevator
->elevator_data
;
1659 struct request
*__rq
;
1661 __rq
= bfq_find_rq_fmerge(bfqd
, bio
, q
);
1662 if (__rq
&& elv_bio_merge_ok(__rq
, bio
)) {
1664 return ELEVATOR_FRONT_MERGE
;
1667 return ELEVATOR_NO_MERGE
;
1670 static void bfq_request_merged(struct request_queue
*q
, struct request
*req
,
1671 enum elv_merge type
)
1673 if (type
== ELEVATOR_FRONT_MERGE
&&
1674 rb_prev(&req
->rb_node
) &&
1676 blk_rq_pos(container_of(rb_prev(&req
->rb_node
),
1677 struct request
, rb_node
))) {
1678 struct bfq_queue
*bfqq
= RQ_BFQQ(req
);
1679 struct bfq_data
*bfqd
= bfqq
->bfqd
;
1680 struct request
*prev
, *next_rq
;
1682 /* Reposition request in its sort_list */
1683 elv_rb_del(&bfqq
->sort_list
, req
);
1684 elv_rb_add(&bfqq
->sort_list
, req
);
1686 /* Choose next request to be served for bfqq */
1687 prev
= bfqq
->next_rq
;
1688 next_rq
= bfq_choose_req(bfqd
, bfqq
->next_rq
, req
,
1689 bfqd
->last_position
);
1690 bfqq
->next_rq
= next_rq
;
1692 * If next_rq changes, update both the queue's budget to
1693 * fit the new request and the queue's position in its
1696 if (prev
!= bfqq
->next_rq
) {
1697 bfq_updated_next_req(bfqd
, bfqq
);
1698 bfq_pos_tree_add_move(bfqd
, bfqq
);
1703 static void bfq_requests_merged(struct request_queue
*q
, struct request
*rq
,
1704 struct request
*next
)
1706 struct bfq_queue
*bfqq
= RQ_BFQQ(rq
), *next_bfqq
= RQ_BFQQ(next
);
1708 if (!RB_EMPTY_NODE(&rq
->rb_node
))
1710 spin_lock_irq(&bfqq
->bfqd
->lock
);
1713 * If next and rq belong to the same bfq_queue and next is older
1714 * than rq, then reposition rq in the fifo (by substituting next
1715 * with rq). Otherwise, if next and rq belong to different
1716 * bfq_queues, never reposition rq: in fact, we would have to
1717 * reposition it with respect to next's position in its own fifo,
1718 * which would most certainly be too expensive with respect to
1721 if (bfqq
== next_bfqq
&&
1722 !list_empty(&rq
->queuelist
) && !list_empty(&next
->queuelist
) &&
1723 next
->fifo_time
< rq
->fifo_time
) {
1724 list_del_init(&rq
->queuelist
);
1725 list_replace_init(&next
->queuelist
, &rq
->queuelist
);
1726 rq
->fifo_time
= next
->fifo_time
;
1729 if (bfqq
->next_rq
== next
)
1732 bfq_remove_request(q
, next
);
1734 spin_unlock_irq(&bfqq
->bfqd
->lock
);
1736 bfqg_stats_update_io_merged(bfqq_group(bfqq
), next
->cmd_flags
);
1739 /* Must be called with bfqq != NULL */
1740 static void bfq_bfqq_end_wr(struct bfq_queue
*bfqq
)
1742 if (bfq_bfqq_busy(bfqq
))
1743 bfqq
->bfqd
->wr_busy_queues
--;
1745 bfqq
->wr_cur_max_time
= 0;
1746 bfqq
->last_wr_start_finish
= jiffies
;
1748 * Trigger a weight change on the next invocation of
1749 * __bfq_entity_update_weight_prio.
1751 bfqq
->entity
.prio_changed
= 1;
1754 void bfq_end_wr_async_queues(struct bfq_data
*bfqd
,
1755 struct bfq_group
*bfqg
)
1759 for (i
= 0; i
< 2; i
++)
1760 for (j
= 0; j
< IOPRIO_BE_NR
; j
++)
1761 if (bfqg
->async_bfqq
[i
][j
])
1762 bfq_bfqq_end_wr(bfqg
->async_bfqq
[i
][j
]);
1763 if (bfqg
->async_idle_bfqq
)
1764 bfq_bfqq_end_wr(bfqg
->async_idle_bfqq
);
1767 static void bfq_end_wr(struct bfq_data
*bfqd
)
1769 struct bfq_queue
*bfqq
;
1771 spin_lock_irq(&bfqd
->lock
);
1773 list_for_each_entry(bfqq
, &bfqd
->active_list
, bfqq_list
)
1774 bfq_bfqq_end_wr(bfqq
);
1775 list_for_each_entry(bfqq
, &bfqd
->idle_list
, bfqq_list
)
1776 bfq_bfqq_end_wr(bfqq
);
1777 bfq_end_wr_async(bfqd
);
1779 spin_unlock_irq(&bfqd
->lock
);
1782 static sector_t
bfq_io_struct_pos(void *io_struct
, bool request
)
1785 return blk_rq_pos(io_struct
);
1787 return ((struct bio
*)io_struct
)->bi_iter
.bi_sector
;
1790 static int bfq_rq_close_to_sector(void *io_struct
, bool request
,
1793 return abs(bfq_io_struct_pos(io_struct
, request
) - sector
) <=
1797 static struct bfq_queue
*bfqq_find_close(struct bfq_data
*bfqd
,
1798 struct bfq_queue
*bfqq
,
1801 struct rb_root
*root
= &bfq_bfqq_to_bfqg(bfqq
)->rq_pos_tree
;
1802 struct rb_node
*parent
, *node
;
1803 struct bfq_queue
*__bfqq
;
1805 if (RB_EMPTY_ROOT(root
))
1809 * First, if we find a request starting at the end of the last
1810 * request, choose it.
1812 __bfqq
= bfq_rq_pos_tree_lookup(bfqd
, root
, sector
, &parent
, NULL
);
1817 * If the exact sector wasn't found, the parent of the NULL leaf
1818 * will contain the closest sector (rq_pos_tree sorted by
1819 * next_request position).
1821 __bfqq
= rb_entry(parent
, struct bfq_queue
, pos_node
);
1822 if (bfq_rq_close_to_sector(__bfqq
->next_rq
, true, sector
))
1825 if (blk_rq_pos(__bfqq
->next_rq
) < sector
)
1826 node
= rb_next(&__bfqq
->pos_node
);
1828 node
= rb_prev(&__bfqq
->pos_node
);
1832 __bfqq
= rb_entry(node
, struct bfq_queue
, pos_node
);
1833 if (bfq_rq_close_to_sector(__bfqq
->next_rq
, true, sector
))
1839 static struct bfq_queue
*bfq_find_close_cooperator(struct bfq_data
*bfqd
,
1840 struct bfq_queue
*cur_bfqq
,
1843 struct bfq_queue
*bfqq
;
1846 * We shall notice if some of the queues are cooperating,
1847 * e.g., working closely on the same area of the device. In
1848 * that case, we can group them together and: 1) don't waste
1849 * time idling, and 2) serve the union of their requests in
1850 * the best possible order for throughput.
1852 bfqq
= bfqq_find_close(bfqd
, cur_bfqq
, sector
);
1853 if (!bfqq
|| bfqq
== cur_bfqq
)
1859 static struct bfq_queue
*
1860 bfq_setup_merge(struct bfq_queue
*bfqq
, struct bfq_queue
*new_bfqq
)
1862 int process_refs
, new_process_refs
;
1863 struct bfq_queue
*__bfqq
;
1866 * If there are no process references on the new_bfqq, then it is
1867 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
1868 * may have dropped their last reference (not just their last process
1871 if (!bfqq_process_refs(new_bfqq
))
1874 /* Avoid a circular list and skip interim queue merges. */
1875 while ((__bfqq
= new_bfqq
->new_bfqq
)) {
1881 process_refs
= bfqq_process_refs(bfqq
);
1882 new_process_refs
= bfqq_process_refs(new_bfqq
);
1884 * If the process for the bfqq has gone away, there is no
1885 * sense in merging the queues.
1887 if (process_refs
== 0 || new_process_refs
== 0)
1890 bfq_log_bfqq(bfqq
->bfqd
, bfqq
, "scheduling merge with queue %d",
1894 * Merging is just a redirection: the requests of the process
1895 * owning one of the two queues are redirected to the other queue.
1896 * The latter queue, in its turn, is set as shared if this is the
1897 * first time that the requests of some process are redirected to
1900 * We redirect bfqq to new_bfqq and not the opposite, because
1901 * we are in the context of the process owning bfqq, thus we
1902 * have the io_cq of this process. So we can immediately
1903 * configure this io_cq to redirect the requests of the
1904 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
1905 * not available any more (new_bfqq->bic == NULL).
1907 * Anyway, even in case new_bfqq coincides with the in-service
1908 * queue, redirecting requests the in-service queue is the
1909 * best option, as we feed the in-service queue with new
1910 * requests close to the last request served and, by doing so,
1911 * are likely to increase the throughput.
1913 bfqq
->new_bfqq
= new_bfqq
;
1914 new_bfqq
->ref
+= process_refs
;
1918 static bool bfq_may_be_close_cooperator(struct bfq_queue
*bfqq
,
1919 struct bfq_queue
*new_bfqq
)
1921 if (bfq_class_idle(bfqq
) || bfq_class_idle(new_bfqq
) ||
1922 (bfqq
->ioprio_class
!= new_bfqq
->ioprio_class
))
1926 * If either of the queues has already been detected as seeky,
1927 * then merging it with the other queue is unlikely to lead to
1930 if (BFQQ_SEEKY(bfqq
) || BFQQ_SEEKY(new_bfqq
))
1934 * Interleaved I/O is known to be done by (some) applications
1935 * only for reads, so it does not make sense to merge async
1938 if (!bfq_bfqq_sync(bfqq
) || !bfq_bfqq_sync(new_bfqq
))
1945 * If this function returns true, then bfqq cannot be merged. The idea
1946 * is that true cooperation happens very early after processes start
1947 * to do I/O. Usually, late cooperations are just accidental false
1948 * positives. In case bfqq is weight-raised, such false positives
1949 * would evidently degrade latency guarantees for bfqq.
1951 static bool wr_from_too_long(struct bfq_queue
*bfqq
)
1953 return bfqq
->wr_coeff
> 1 &&
1954 time_is_before_jiffies(bfqq
->last_wr_start_finish
+
1955 msecs_to_jiffies(100));
1959 * Attempt to schedule a merge of bfqq with the currently in-service
1960 * queue or with a close queue among the scheduled queues. Return
1961 * NULL if no merge was scheduled, a pointer to the shared bfq_queue
1962 * structure otherwise.
1964 * The OOM queue is not allowed to participate to cooperation: in fact, since
1965 * the requests temporarily redirected to the OOM queue could be redirected
1966 * again to dedicated queues at any time, the state needed to correctly
1967 * handle merging with the OOM queue would be quite complex and expensive
1968 * to maintain. Besides, in such a critical condition as an out of memory,
1969 * the benefits of queue merging may be little relevant, or even negligible.
1971 * Weight-raised queues can be merged only if their weight-raising
1972 * period has just started. In fact cooperating processes are usually
1973 * started together. Thus, with this filter we avoid false positives
1974 * that would jeopardize low-latency guarantees.
1976 * WARNING: queue merging may impair fairness among non-weight raised
1977 * queues, for at least two reasons: 1) the original weight of a
1978 * merged queue may change during the merged state, 2) even being the
1979 * weight the same, a merged queue may be bloated with many more
1980 * requests than the ones produced by its originally-associated
1983 static struct bfq_queue
*
1984 bfq_setup_cooperator(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
1985 void *io_struct
, bool request
)
1987 struct bfq_queue
*in_service_bfqq
, *new_bfqq
;
1990 return bfqq
->new_bfqq
;
1993 wr_from_too_long(bfqq
) ||
1994 unlikely(bfqq
== &bfqd
->oom_bfqq
))
1997 /* If there is only one backlogged queue, don't search. */
1998 if (bfqd
->busy_queues
== 1)
2001 in_service_bfqq
= bfqd
->in_service_queue
;
2003 if (!in_service_bfqq
|| in_service_bfqq
== bfqq
2004 || wr_from_too_long(in_service_bfqq
) ||
2005 unlikely(in_service_bfqq
== &bfqd
->oom_bfqq
))
2006 goto check_scheduled
;
2008 if (bfq_rq_close_to_sector(io_struct
, request
, bfqd
->last_position
) &&
2009 bfqq
->entity
.parent
== in_service_bfqq
->entity
.parent
&&
2010 bfq_may_be_close_cooperator(bfqq
, in_service_bfqq
)) {
2011 new_bfqq
= bfq_setup_merge(bfqq
, in_service_bfqq
);
2016 * Check whether there is a cooperator among currently scheduled
2017 * queues. The only thing we need is that the bio/request is not
2018 * NULL, as we need it to establish whether a cooperator exists.
2021 new_bfqq
= bfq_find_close_cooperator(bfqd
, bfqq
,
2022 bfq_io_struct_pos(io_struct
, request
));
2024 if (new_bfqq
&& !wr_from_too_long(new_bfqq
) &&
2025 likely(new_bfqq
!= &bfqd
->oom_bfqq
) &&
2026 bfq_may_be_close_cooperator(bfqq
, new_bfqq
))
2027 return bfq_setup_merge(bfqq
, new_bfqq
);
2032 static void bfq_bfqq_save_state(struct bfq_queue
*bfqq
)
2034 struct bfq_io_cq
*bic
= bfqq
->bic
;
2037 * If !bfqq->bic, the queue is already shared or its requests
2038 * have already been redirected to a shared queue; both idle window
2039 * and weight raising state have already been saved. Do nothing.
2044 bic
->saved_ttime
= bfqq
->ttime
;
2045 bic
->saved_has_short_ttime
= bfq_bfqq_has_short_ttime(bfqq
);
2046 bic
->saved_IO_bound
= bfq_bfqq_IO_bound(bfqq
);
2047 bic
->saved_in_large_burst
= bfq_bfqq_in_large_burst(bfqq
);
2048 bic
->was_in_burst_list
= !hlist_unhashed(&bfqq
->burst_list_node
);
2049 bic
->saved_wr_coeff
= bfqq
->wr_coeff
;
2050 bic
->saved_wr_start_at_switch_to_srt
= bfqq
->wr_start_at_switch_to_srt
;
2051 bic
->saved_last_wr_start_finish
= bfqq
->last_wr_start_finish
;
2052 bic
->saved_wr_cur_max_time
= bfqq
->wr_cur_max_time
;
2056 bfq_merge_bfqqs(struct bfq_data
*bfqd
, struct bfq_io_cq
*bic
,
2057 struct bfq_queue
*bfqq
, struct bfq_queue
*new_bfqq
)
2059 bfq_log_bfqq(bfqd
, bfqq
, "merging with queue %lu",
2060 (unsigned long)new_bfqq
->pid
);
2061 /* Save weight raising and idle window of the merged queues */
2062 bfq_bfqq_save_state(bfqq
);
2063 bfq_bfqq_save_state(new_bfqq
);
2064 if (bfq_bfqq_IO_bound(bfqq
))
2065 bfq_mark_bfqq_IO_bound(new_bfqq
);
2066 bfq_clear_bfqq_IO_bound(bfqq
);
2069 * If bfqq is weight-raised, then let new_bfqq inherit
2070 * weight-raising. To reduce false positives, neglect the case
2071 * where bfqq has just been created, but has not yet made it
2072 * to be weight-raised (which may happen because EQM may merge
2073 * bfqq even before bfq_add_request is executed for the first
2074 * time for bfqq). Handling this case would however be very
2075 * easy, thanks to the flag just_created.
2077 if (new_bfqq
->wr_coeff
== 1 && bfqq
->wr_coeff
> 1) {
2078 new_bfqq
->wr_coeff
= bfqq
->wr_coeff
;
2079 new_bfqq
->wr_cur_max_time
= bfqq
->wr_cur_max_time
;
2080 new_bfqq
->last_wr_start_finish
= bfqq
->last_wr_start_finish
;
2081 new_bfqq
->wr_start_at_switch_to_srt
=
2082 bfqq
->wr_start_at_switch_to_srt
;
2083 if (bfq_bfqq_busy(new_bfqq
))
2084 bfqd
->wr_busy_queues
++;
2085 new_bfqq
->entity
.prio_changed
= 1;
2088 if (bfqq
->wr_coeff
> 1) { /* bfqq has given its wr to new_bfqq */
2090 bfqq
->entity
.prio_changed
= 1;
2091 if (bfq_bfqq_busy(bfqq
))
2092 bfqd
->wr_busy_queues
--;
2095 bfq_log_bfqq(bfqd
, new_bfqq
, "merge_bfqqs: wr_busy %d",
2096 bfqd
->wr_busy_queues
);
2099 * Merge queues (that is, let bic redirect its requests to new_bfqq)
2101 bic_set_bfqq(bic
, new_bfqq
, 1);
2102 bfq_mark_bfqq_coop(new_bfqq
);
2104 * new_bfqq now belongs to at least two bics (it is a shared queue):
2105 * set new_bfqq->bic to NULL. bfqq either:
2106 * - does not belong to any bic any more, and hence bfqq->bic must
2107 * be set to NULL, or
2108 * - is a queue whose owning bics have already been redirected to a
2109 * different queue, hence the queue is destined to not belong to
2110 * any bic soon and bfqq->bic is already NULL (therefore the next
2111 * assignment causes no harm).
2113 new_bfqq
->bic
= NULL
;
2115 /* release process reference to bfqq */
2116 bfq_put_queue(bfqq
);
2119 static bool bfq_allow_bio_merge(struct request_queue
*q
, struct request
*rq
,
2122 struct bfq_data
*bfqd
= q
->elevator
->elevator_data
;
2123 bool is_sync
= op_is_sync(bio
->bi_opf
);
2124 struct bfq_queue
*bfqq
= bfqd
->bio_bfqq
, *new_bfqq
;
2127 * Disallow merge of a sync bio into an async request.
2129 if (is_sync
&& !rq_is_sync(rq
))
2133 * Lookup the bfqq that this bio will be queued with. Allow
2134 * merge only if rq is queued there.
2140 * We take advantage of this function to perform an early merge
2141 * of the queues of possible cooperating processes.
2143 new_bfqq
= bfq_setup_cooperator(bfqd
, bfqq
, bio
, false);
2146 * bic still points to bfqq, then it has not yet been
2147 * redirected to some other bfq_queue, and a queue
2148 * merge beween bfqq and new_bfqq can be safely
2149 * fulfillled, i.e., bic can be redirected to new_bfqq
2150 * and bfqq can be put.
2152 bfq_merge_bfqqs(bfqd
, bfqd
->bio_bic
, bfqq
,
2155 * If we get here, bio will be queued into new_queue,
2156 * so use new_bfqq to decide whether bio and rq can be
2162 * Change also bqfd->bio_bfqq, as
2163 * bfqd->bio_bic now points to new_bfqq, and
2164 * this function may be invoked again (and then may
2165 * use again bqfd->bio_bfqq).
2167 bfqd
->bio_bfqq
= bfqq
;
2170 return bfqq
== RQ_BFQQ(rq
);
2174 * Set the maximum time for the in-service queue to consume its
2175 * budget. This prevents seeky processes from lowering the throughput.
2176 * In practice, a time-slice service scheme is used with seeky
2179 static void bfq_set_budget_timeout(struct bfq_data
*bfqd
,
2180 struct bfq_queue
*bfqq
)
2182 unsigned int timeout_coeff
;
2184 if (bfqq
->wr_cur_max_time
== bfqd
->bfq_wr_rt_max_time
)
2187 timeout_coeff
= bfqq
->entity
.weight
/ bfqq
->entity
.orig_weight
;
2189 bfqd
->last_budget_start
= ktime_get();
2191 bfqq
->budget_timeout
= jiffies
+
2192 bfqd
->bfq_timeout
* timeout_coeff
;
2195 static void __bfq_set_in_service_queue(struct bfq_data
*bfqd
,
2196 struct bfq_queue
*bfqq
)
2199 bfqg_stats_update_avg_queue_size(bfqq_group(bfqq
));
2200 bfq_clear_bfqq_fifo_expire(bfqq
);
2202 bfqd
->budgets_assigned
= (bfqd
->budgets_assigned
* 7 + 256) / 8;
2204 if (time_is_before_jiffies(bfqq
->last_wr_start_finish
) &&
2205 bfqq
->wr_coeff
> 1 &&
2206 bfqq
->wr_cur_max_time
== bfqd
->bfq_wr_rt_max_time
&&
2207 time_is_before_jiffies(bfqq
->budget_timeout
)) {
2209 * For soft real-time queues, move the start
2210 * of the weight-raising period forward by the
2211 * time the queue has not received any
2212 * service. Otherwise, a relatively long
2213 * service delay is likely to cause the
2214 * weight-raising period of the queue to end,
2215 * because of the short duration of the
2216 * weight-raising period of a soft real-time
2217 * queue. It is worth noting that this move
2218 * is not so dangerous for the other queues,
2219 * because soft real-time queues are not
2222 * To not add a further variable, we use the
2223 * overloaded field budget_timeout to
2224 * determine for how long the queue has not
2225 * received service, i.e., how much time has
2226 * elapsed since the queue expired. However,
2227 * this is a little imprecise, because
2228 * budget_timeout is set to jiffies if bfqq
2229 * not only expires, but also remains with no
2232 if (time_after(bfqq
->budget_timeout
,
2233 bfqq
->last_wr_start_finish
))
2234 bfqq
->last_wr_start_finish
+=
2235 jiffies
- bfqq
->budget_timeout
;
2237 bfqq
->last_wr_start_finish
= jiffies
;
2240 bfq_set_budget_timeout(bfqd
, bfqq
);
2241 bfq_log_bfqq(bfqd
, bfqq
,
2242 "set_in_service_queue, cur-budget = %d",
2243 bfqq
->entity
.budget
);
2246 bfqd
->in_service_queue
= bfqq
;
2250 * Get and set a new queue for service.
2252 static struct bfq_queue
*bfq_set_in_service_queue(struct bfq_data
*bfqd
)
2254 struct bfq_queue
*bfqq
= bfq_get_next_queue(bfqd
);
2256 __bfq_set_in_service_queue(bfqd
, bfqq
);
2260 static void bfq_arm_slice_timer(struct bfq_data
*bfqd
)
2262 struct bfq_queue
*bfqq
= bfqd
->in_service_queue
;
2265 bfq_mark_bfqq_wait_request(bfqq
);
2268 * We don't want to idle for seeks, but we do want to allow
2269 * fair distribution of slice time for a process doing back-to-back
2270 * seeks. So allow a little bit of time for him to submit a new rq.
2272 sl
= bfqd
->bfq_slice_idle
;
2274 * Unless the queue is being weight-raised or the scenario is
2275 * asymmetric, grant only minimum idle time if the queue
2276 * is seeky. A long idling is preserved for a weight-raised
2277 * queue, or, more in general, in an asymmetric scenario,
2278 * because a long idling is needed for guaranteeing to a queue
2279 * its reserved share of the throughput (in particular, it is
2280 * needed if the queue has a higher weight than some other
2283 if (BFQQ_SEEKY(bfqq
) && bfqq
->wr_coeff
== 1 &&
2284 bfq_symmetric_scenario(bfqd
))
2285 sl
= min_t(u64
, sl
, BFQ_MIN_TT
);
2287 bfqd
->last_idling_start
= ktime_get();
2288 hrtimer_start(&bfqd
->idle_slice_timer
, ns_to_ktime(sl
),
2290 bfqg_stats_set_start_idle_time(bfqq_group(bfqq
));
2294 * In autotuning mode, max_budget is dynamically recomputed as the
2295 * amount of sectors transferred in timeout at the estimated peak
2296 * rate. This enables BFQ to utilize a full timeslice with a full
2297 * budget, even if the in-service queue is served at peak rate. And
2298 * this maximises throughput with sequential workloads.
2300 static unsigned long bfq_calc_max_budget(struct bfq_data
*bfqd
)
2302 return (u64
)bfqd
->peak_rate
* USEC_PER_MSEC
*
2303 jiffies_to_msecs(bfqd
->bfq_timeout
)>>BFQ_RATE_SHIFT
;
2307 * Update parameters related to throughput and responsiveness, as a
2308 * function of the estimated peak rate. See comments on
2309 * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
2311 static void update_thr_responsiveness_params(struct bfq_data
*bfqd
)
2313 int dev_type
= blk_queue_nonrot(bfqd
->queue
);
2315 if (bfqd
->bfq_user_max_budget
== 0)
2316 bfqd
->bfq_max_budget
=
2317 bfq_calc_max_budget(bfqd
);
2319 if (bfqd
->device_speed
== BFQ_BFQD_FAST
&&
2320 bfqd
->peak_rate
< device_speed_thresh
[dev_type
]) {
2321 bfqd
->device_speed
= BFQ_BFQD_SLOW
;
2322 bfqd
->RT_prod
= R_slow
[dev_type
] *
2324 } else if (bfqd
->device_speed
== BFQ_BFQD_SLOW
&&
2325 bfqd
->peak_rate
> device_speed_thresh
[dev_type
]) {
2326 bfqd
->device_speed
= BFQ_BFQD_FAST
;
2327 bfqd
->RT_prod
= R_fast
[dev_type
] *
2332 "dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
2333 dev_type
== 0 ? "ROT" : "NONROT",
2334 bfqd
->device_speed
== BFQ_BFQD_FAST
? "FAST" : "SLOW",
2335 bfqd
->device_speed
== BFQ_BFQD_FAST
?
2336 (USEC_PER_SEC
*(u64
)R_fast
[dev_type
])>>BFQ_RATE_SHIFT
:
2337 (USEC_PER_SEC
*(u64
)R_slow
[dev_type
])>>BFQ_RATE_SHIFT
,
2338 (USEC_PER_SEC
*(u64
)device_speed_thresh
[dev_type
])>>
2342 static void bfq_reset_rate_computation(struct bfq_data
*bfqd
,
2345 if (rq
!= NULL
) { /* new rq dispatch now, reset accordingly */
2346 bfqd
->last_dispatch
= bfqd
->first_dispatch
= ktime_get_ns();
2347 bfqd
->peak_rate_samples
= 1;
2348 bfqd
->sequential_samples
= 0;
2349 bfqd
->tot_sectors_dispatched
= bfqd
->last_rq_max_size
=
2351 } else /* no new rq dispatched, just reset the number of samples */
2352 bfqd
->peak_rate_samples
= 0; /* full re-init on next disp. */
2355 "reset_rate_computation at end, sample %u/%u tot_sects %llu",
2356 bfqd
->peak_rate_samples
, bfqd
->sequential_samples
,
2357 bfqd
->tot_sectors_dispatched
);
2360 static void bfq_update_rate_reset(struct bfq_data
*bfqd
, struct request
*rq
)
2362 u32 rate
, weight
, divisor
;
2365 * For the convergence property to hold (see comments on
2366 * bfq_update_peak_rate()) and for the assessment to be
2367 * reliable, a minimum number of samples must be present, and
2368 * a minimum amount of time must have elapsed. If not so, do
2369 * not compute new rate. Just reset parameters, to get ready
2370 * for a new evaluation attempt.
2372 if (bfqd
->peak_rate_samples
< BFQ_RATE_MIN_SAMPLES
||
2373 bfqd
->delta_from_first
< BFQ_RATE_MIN_INTERVAL
)
2374 goto reset_computation
;
2377 * If a new request completion has occurred after last
2378 * dispatch, then, to approximate the rate at which requests
2379 * have been served by the device, it is more precise to
2380 * extend the observation interval to the last completion.
2382 bfqd
->delta_from_first
=
2383 max_t(u64
, bfqd
->delta_from_first
,
2384 bfqd
->last_completion
- bfqd
->first_dispatch
);
2387 * Rate computed in sects/usec, and not sects/nsec, for
2390 rate
= div64_ul(bfqd
->tot_sectors_dispatched
<<BFQ_RATE_SHIFT
,
2391 div_u64(bfqd
->delta_from_first
, NSEC_PER_USEC
));
2394 * Peak rate not updated if:
2395 * - the percentage of sequential dispatches is below 3/4 of the
2396 * total, and rate is below the current estimated peak rate
2397 * - rate is unreasonably high (> 20M sectors/sec)
2399 if ((bfqd
->sequential_samples
< (3 * bfqd
->peak_rate_samples
)>>2 &&
2400 rate
<= bfqd
->peak_rate
) ||
2401 rate
> 20<<BFQ_RATE_SHIFT
)
2402 goto reset_computation
;
2405 * We have to update the peak rate, at last! To this purpose,
2406 * we use a low-pass filter. We compute the smoothing constant
2407 * of the filter as a function of the 'weight' of the new
2410 * As can be seen in next formulas, we define this weight as a
2411 * quantity proportional to how sequential the workload is,
2412 * and to how long the observation time interval is.
2414 * The weight runs from 0 to 8. The maximum value of the
2415 * weight, 8, yields the minimum value for the smoothing
2416 * constant. At this minimum value for the smoothing constant,
2417 * the measured rate contributes for half of the next value of
2418 * the estimated peak rate.
2420 * So, the first step is to compute the weight as a function
2421 * of how sequential the workload is. Note that the weight
2422 * cannot reach 9, because bfqd->sequential_samples cannot
2423 * become equal to bfqd->peak_rate_samples, which, in its
2424 * turn, holds true because bfqd->sequential_samples is not
2425 * incremented for the first sample.
2427 weight
= (9 * bfqd
->sequential_samples
) / bfqd
->peak_rate_samples
;
2430 * Second step: further refine the weight as a function of the
2431 * duration of the observation interval.
2433 weight
= min_t(u32
, 8,
2434 div_u64(weight
* bfqd
->delta_from_first
,
2435 BFQ_RATE_REF_INTERVAL
));
2438 * Divisor ranging from 10, for minimum weight, to 2, for
2441 divisor
= 10 - weight
;
2444 * Finally, update peak rate:
2446 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
2448 bfqd
->peak_rate
*= divisor
-1;
2449 bfqd
->peak_rate
/= divisor
;
2450 rate
/= divisor
; /* smoothing constant alpha = 1/divisor */
2452 bfqd
->peak_rate
+= rate
;
2453 update_thr_responsiveness_params(bfqd
);
2456 bfq_reset_rate_computation(bfqd
, rq
);
2460 * Update the read/write peak rate (the main quantity used for
2461 * auto-tuning, see update_thr_responsiveness_params()).
2463 * It is not trivial to estimate the peak rate (correctly): because of
2464 * the presence of sw and hw queues between the scheduler and the
2465 * device components that finally serve I/O requests, it is hard to
2466 * say exactly when a given dispatched request is served inside the
2467 * device, and for how long. As a consequence, it is hard to know
2468 * precisely at what rate a given set of requests is actually served
2471 * On the opposite end, the dispatch time of any request is trivially
2472 * available, and, from this piece of information, the "dispatch rate"
2473 * of requests can be immediately computed. So, the idea in the next
2474 * function is to use what is known, namely request dispatch times
2475 * (plus, when useful, request completion times), to estimate what is
2476 * unknown, namely in-device request service rate.
2478 * The main issue is that, because of the above facts, the rate at
2479 * which a certain set of requests is dispatched over a certain time
2480 * interval can vary greatly with respect to the rate at which the
2481 * same requests are then served. But, since the size of any
2482 * intermediate queue is limited, and the service scheme is lossless
2483 * (no request is silently dropped), the following obvious convergence
2484 * property holds: the number of requests dispatched MUST become
2485 * closer and closer to the number of requests completed as the
2486 * observation interval grows. This is the key property used in
2487 * the next function to estimate the peak service rate as a function
2488 * of the observed dispatch rate. The function assumes to be invoked
2489 * on every request dispatch.
2491 static void bfq_update_peak_rate(struct bfq_data
*bfqd
, struct request
*rq
)
2493 u64 now_ns
= ktime_get_ns();
2495 if (bfqd
->peak_rate_samples
== 0) { /* first dispatch */
2496 bfq_log(bfqd
, "update_peak_rate: goto reset, samples %d",
2497 bfqd
->peak_rate_samples
);
2498 bfq_reset_rate_computation(bfqd
, rq
);
2499 goto update_last_values
; /* will add one sample */
2503 * Device idle for very long: the observation interval lasting
2504 * up to this dispatch cannot be a valid observation interval
2505 * for computing a new peak rate (similarly to the late-
2506 * completion event in bfq_completed_request()). Go to
2507 * update_rate_and_reset to have the following three steps
2509 * - close the observation interval at the last (previous)
2510 * request dispatch or completion
2511 * - compute rate, if possible, for that observation interval
2512 * - start a new observation interval with this dispatch
2514 if (now_ns
- bfqd
->last_dispatch
> 100*NSEC_PER_MSEC
&&
2515 bfqd
->rq_in_driver
== 0)
2516 goto update_rate_and_reset
;
2518 /* Update sampling information */
2519 bfqd
->peak_rate_samples
++;
2521 if ((bfqd
->rq_in_driver
> 0 ||
2522 now_ns
- bfqd
->last_completion
< BFQ_MIN_TT
)
2523 && get_sdist(bfqd
->last_position
, rq
) < BFQQ_SEEK_THR
)
2524 bfqd
->sequential_samples
++;
2526 bfqd
->tot_sectors_dispatched
+= blk_rq_sectors(rq
);
2528 /* Reset max observed rq size every 32 dispatches */
2529 if (likely(bfqd
->peak_rate_samples
% 32))
2530 bfqd
->last_rq_max_size
=
2531 max_t(u32
, blk_rq_sectors(rq
), bfqd
->last_rq_max_size
);
2533 bfqd
->last_rq_max_size
= blk_rq_sectors(rq
);
2535 bfqd
->delta_from_first
= now_ns
- bfqd
->first_dispatch
;
2537 /* Target observation interval not yet reached, go on sampling */
2538 if (bfqd
->delta_from_first
< BFQ_RATE_REF_INTERVAL
)
2539 goto update_last_values
;
2541 update_rate_and_reset
:
2542 bfq_update_rate_reset(bfqd
, rq
);
2544 bfqd
->last_position
= blk_rq_pos(rq
) + blk_rq_sectors(rq
);
2545 bfqd
->last_dispatch
= now_ns
;
2549 * Remove request from internal lists.
2551 static void bfq_dispatch_remove(struct request_queue
*q
, struct request
*rq
)
2553 struct bfq_queue
*bfqq
= RQ_BFQQ(rq
);
2556 * For consistency, the next instruction should have been
2557 * executed after removing the request from the queue and
2558 * dispatching it. We execute instead this instruction before
2559 * bfq_remove_request() (and hence introduce a temporary
2560 * inconsistency), for efficiency. In fact, should this
2561 * dispatch occur for a non in-service bfqq, this anticipated
2562 * increment prevents two counters related to bfqq->dispatched
2563 * from risking to be, first, uselessly decremented, and then
2564 * incremented again when the (new) value of bfqq->dispatched
2565 * happens to be taken into account.
2568 bfq_update_peak_rate(q
->elevator
->elevator_data
, rq
);
2570 bfq_remove_request(q
, rq
);
2573 static void __bfq_bfqq_expire(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
)
2576 * If this bfqq is shared between multiple processes, check
2577 * to make sure that those processes are still issuing I/Os
2578 * within the mean seek distance. If not, it may be time to
2579 * break the queues apart again.
2581 if (bfq_bfqq_coop(bfqq
) && BFQQ_SEEKY(bfqq
))
2582 bfq_mark_bfqq_split_coop(bfqq
);
2584 if (RB_EMPTY_ROOT(&bfqq
->sort_list
)) {
2585 if (bfqq
->dispatched
== 0)
2587 * Overloading budget_timeout field to store
2588 * the time at which the queue remains with no
2589 * backlog and no outstanding request; used by
2590 * the weight-raising mechanism.
2592 bfqq
->budget_timeout
= jiffies
;
2594 bfq_del_bfqq_busy(bfqd
, bfqq
, true);
2596 bfq_requeue_bfqq(bfqd
, bfqq
, true);
2598 * Resort priority tree of potential close cooperators.
2600 bfq_pos_tree_add_move(bfqd
, bfqq
);
2604 * All in-service entities must have been properly deactivated
2605 * or requeued before executing the next function, which
2606 * resets all in-service entites as no more in service.
2608 __bfq_bfqd_reset_in_service(bfqd
);
2612 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
2613 * @bfqd: device data.
2614 * @bfqq: queue to update.
2615 * @reason: reason for expiration.
2617 * Handle the feedback on @bfqq budget at queue expiration.
2618 * See the body for detailed comments.
2620 static void __bfq_bfqq_recalc_budget(struct bfq_data
*bfqd
,
2621 struct bfq_queue
*bfqq
,
2622 enum bfqq_expiration reason
)
2624 struct request
*next_rq
;
2625 int budget
, min_budget
;
2627 min_budget
= bfq_min_budget(bfqd
);
2629 if (bfqq
->wr_coeff
== 1)
2630 budget
= bfqq
->max_budget
;
2632 * Use a constant, low budget for weight-raised queues,
2633 * to help achieve a low latency. Keep it slightly higher
2634 * than the minimum possible budget, to cause a little
2635 * bit fewer expirations.
2637 budget
= 2 * min_budget
;
2639 bfq_log_bfqq(bfqd
, bfqq
, "recalc_budg: last budg %d, budg left %d",
2640 bfqq
->entity
.budget
, bfq_bfqq_budget_left(bfqq
));
2641 bfq_log_bfqq(bfqd
, bfqq
, "recalc_budg: last max_budg %d, min budg %d",
2642 budget
, bfq_min_budget(bfqd
));
2643 bfq_log_bfqq(bfqd
, bfqq
, "recalc_budg: sync %d, seeky %d",
2644 bfq_bfqq_sync(bfqq
), BFQQ_SEEKY(bfqd
->in_service_queue
));
2646 if (bfq_bfqq_sync(bfqq
) && bfqq
->wr_coeff
== 1) {
2649 * Caveat: in all the following cases we trade latency
2652 case BFQQE_TOO_IDLE
:
2654 * This is the only case where we may reduce
2655 * the budget: if there is no request of the
2656 * process still waiting for completion, then
2657 * we assume (tentatively) that the timer has
2658 * expired because the batch of requests of
2659 * the process could have been served with a
2660 * smaller budget. Hence, betting that
2661 * process will behave in the same way when it
2662 * becomes backlogged again, we reduce its
2663 * next budget. As long as we guess right,
2664 * this budget cut reduces the latency
2665 * experienced by the process.
2667 * However, if there are still outstanding
2668 * requests, then the process may have not yet
2669 * issued its next request just because it is
2670 * still waiting for the completion of some of
2671 * the still outstanding ones. So in this
2672 * subcase we do not reduce its budget, on the
2673 * contrary we increase it to possibly boost
2674 * the throughput, as discussed in the
2675 * comments to the BUDGET_TIMEOUT case.
2677 if (bfqq
->dispatched
> 0) /* still outstanding reqs */
2678 budget
= min(budget
* 2, bfqd
->bfq_max_budget
);
2680 if (budget
> 5 * min_budget
)
2681 budget
-= 4 * min_budget
;
2683 budget
= min_budget
;
2686 case BFQQE_BUDGET_TIMEOUT
:
2688 * We double the budget here because it gives
2689 * the chance to boost the throughput if this
2690 * is not a seeky process (and has bumped into
2691 * this timeout because of, e.g., ZBR).
2693 budget
= min(budget
* 2, bfqd
->bfq_max_budget
);
2695 case BFQQE_BUDGET_EXHAUSTED
:
2697 * The process still has backlog, and did not
2698 * let either the budget timeout or the disk
2699 * idling timeout expire. Hence it is not
2700 * seeky, has a short thinktime and may be
2701 * happy with a higher budget too. So
2702 * definitely increase the budget of this good
2703 * candidate to boost the disk throughput.
2705 budget
= min(budget
* 4, bfqd
->bfq_max_budget
);
2707 case BFQQE_NO_MORE_REQUESTS
:
2709 * For queues that expire for this reason, it
2710 * is particularly important to keep the
2711 * budget close to the actual service they
2712 * need. Doing so reduces the timestamp
2713 * misalignment problem described in the
2714 * comments in the body of
2715 * __bfq_activate_entity. In fact, suppose
2716 * that a queue systematically expires for
2717 * BFQQE_NO_MORE_REQUESTS and presents a
2718 * new request in time to enjoy timestamp
2719 * back-shifting. The larger the budget of the
2720 * queue is with respect to the service the
2721 * queue actually requests in each service
2722 * slot, the more times the queue can be
2723 * reactivated with the same virtual finish
2724 * time. It follows that, even if this finish
2725 * time is pushed to the system virtual time
2726 * to reduce the consequent timestamp
2727 * misalignment, the queue unjustly enjoys for
2728 * many re-activations a lower finish time
2729 * than all newly activated queues.
2731 * The service needed by bfqq is measured
2732 * quite precisely by bfqq->entity.service.
2733 * Since bfqq does not enjoy device idling,
2734 * bfqq->entity.service is equal to the number
2735 * of sectors that the process associated with
2736 * bfqq requested to read/write before waiting
2737 * for request completions, or blocking for
2740 budget
= max_t(int, bfqq
->entity
.service
, min_budget
);
2745 } else if (!bfq_bfqq_sync(bfqq
)) {
2747 * Async queues get always the maximum possible
2748 * budget, as for them we do not care about latency
2749 * (in addition, their ability to dispatch is limited
2750 * by the charging factor).
2752 budget
= bfqd
->bfq_max_budget
;
2755 bfqq
->max_budget
= budget
;
2757 if (bfqd
->budgets_assigned
>= bfq_stats_min_budgets
&&
2758 !bfqd
->bfq_user_max_budget
)
2759 bfqq
->max_budget
= min(bfqq
->max_budget
, bfqd
->bfq_max_budget
);
2762 * If there is still backlog, then assign a new budget, making
2763 * sure that it is large enough for the next request. Since
2764 * the finish time of bfqq must be kept in sync with the
2765 * budget, be sure to call __bfq_bfqq_expire() *after* this
2768 * If there is no backlog, then no need to update the budget;
2769 * it will be updated on the arrival of a new request.
2771 next_rq
= bfqq
->next_rq
;
2773 bfqq
->entity
.budget
= max_t(unsigned long, bfqq
->max_budget
,
2774 bfq_serv_to_charge(next_rq
, bfqq
));
2776 bfq_log_bfqq(bfqd
, bfqq
, "head sect: %u, new budget %d",
2777 next_rq
? blk_rq_sectors(next_rq
) : 0,
2778 bfqq
->entity
.budget
);
2782 * Return true if the process associated with bfqq is "slow". The slow
2783 * flag is used, in addition to the budget timeout, to reduce the
2784 * amount of service provided to seeky processes, and thus reduce
2785 * their chances to lower the throughput. More details in the comments
2786 * on the function bfq_bfqq_expire().
2788 * An important observation is in order: as discussed in the comments
2789 * on the function bfq_update_peak_rate(), with devices with internal
2790 * queues, it is hard if ever possible to know when and for how long
2791 * an I/O request is processed by the device (apart from the trivial
2792 * I/O pattern where a new request is dispatched only after the
2793 * previous one has been completed). This makes it hard to evaluate
2794 * the real rate at which the I/O requests of each bfq_queue are
2795 * served. In fact, for an I/O scheduler like BFQ, serving a
2796 * bfq_queue means just dispatching its requests during its service
2797 * slot (i.e., until the budget of the queue is exhausted, or the
2798 * queue remains idle, or, finally, a timeout fires). But, during the
2799 * service slot of a bfq_queue, around 100 ms at most, the device may
2800 * be even still processing requests of bfq_queues served in previous
2801 * service slots. On the opposite end, the requests of the in-service
2802 * bfq_queue may be completed after the service slot of the queue
2805 * Anyway, unless more sophisticated solutions are used
2806 * (where possible), the sum of the sizes of the requests dispatched
2807 * during the service slot of a bfq_queue is probably the only
2808 * approximation available for the service received by the bfq_queue
2809 * during its service slot. And this sum is the quantity used in this
2810 * function to evaluate the I/O speed of a process.
2812 static bool bfq_bfqq_is_slow(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
2813 bool compensate
, enum bfqq_expiration reason
,
2814 unsigned long *delta_ms
)
2816 ktime_t delta_ktime
;
2818 bool slow
= BFQQ_SEEKY(bfqq
); /* if delta too short, use seekyness */
2820 if (!bfq_bfqq_sync(bfqq
))
2824 delta_ktime
= bfqd
->last_idling_start
;
2826 delta_ktime
= ktime_get();
2827 delta_ktime
= ktime_sub(delta_ktime
, bfqd
->last_budget_start
);
2828 delta_usecs
= ktime_to_us(delta_ktime
);
2830 /* don't use too short time intervals */
2831 if (delta_usecs
< 1000) {
2832 if (blk_queue_nonrot(bfqd
->queue
))
2834 * give same worst-case guarantees as idling
2837 *delta_ms
= BFQ_MIN_TT
/ NSEC_PER_MSEC
;
2838 else /* charge at least one seek */
2839 *delta_ms
= bfq_slice_idle
/ NSEC_PER_MSEC
;
2844 *delta_ms
= delta_usecs
/ USEC_PER_MSEC
;
2847 * Use only long (> 20ms) intervals to filter out excessive
2848 * spikes in service rate estimation.
2850 if (delta_usecs
> 20000) {
2852 * Caveat for rotational devices: processes doing I/O
2853 * in the slower disk zones tend to be slow(er) even
2854 * if not seeky. In this respect, the estimated peak
2855 * rate is likely to be an average over the disk
2856 * surface. Accordingly, to not be too harsh with
2857 * unlucky processes, a process is deemed slow only if
2858 * its rate has been lower than half of the estimated
2861 slow
= bfqq
->entity
.service
< bfqd
->bfq_max_budget
/ 2;
2864 bfq_log_bfqq(bfqd
, bfqq
, "bfq_bfqq_is_slow: slow %d", slow
);
2870 * To be deemed as soft real-time, an application must meet two
2871 * requirements. First, the application must not require an average
2872 * bandwidth higher than the approximate bandwidth required to playback or
2873 * record a compressed high-definition video.
2874 * The next function is invoked on the completion of the last request of a
2875 * batch, to compute the next-start time instant, soft_rt_next_start, such
2876 * that, if the next request of the application does not arrive before
2877 * soft_rt_next_start, then the above requirement on the bandwidth is met.
2879 * The second requirement is that the request pattern of the application is
2880 * isochronous, i.e., that, after issuing a request or a batch of requests,
2881 * the application stops issuing new requests until all its pending requests
2882 * have been completed. After that, the application may issue a new batch,
2884 * For this reason the next function is invoked to compute
2885 * soft_rt_next_start only for applications that meet this requirement,
2886 * whereas soft_rt_next_start is set to infinity for applications that do
2889 * Unfortunately, even a greedy application may happen to behave in an
2890 * isochronous way if the CPU load is high. In fact, the application may
2891 * stop issuing requests while the CPUs are busy serving other processes,
2892 * then restart, then stop again for a while, and so on. In addition, if
2893 * the disk achieves a low enough throughput with the request pattern
2894 * issued by the application (e.g., because the request pattern is random
2895 * and/or the device is slow), then the application may meet the above
2896 * bandwidth requirement too. To prevent such a greedy application to be
2897 * deemed as soft real-time, a further rule is used in the computation of
2898 * soft_rt_next_start: soft_rt_next_start must be higher than the current
2899 * time plus the maximum time for which the arrival of a request is waited
2900 * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
2901 * This filters out greedy applications, as the latter issue instead their
2902 * next request as soon as possible after the last one has been completed
2903 * (in contrast, when a batch of requests is completed, a soft real-time
2904 * application spends some time processing data).
2906 * Unfortunately, the last filter may easily generate false positives if
2907 * only bfqd->bfq_slice_idle is used as a reference time interval and one
2908 * or both the following cases occur:
2909 * 1) HZ is so low that the duration of a jiffy is comparable to or higher
2910 * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
2912 * 2) jiffies, instead of increasing at a constant rate, may stop increasing
2913 * for a while, then suddenly 'jump' by several units to recover the lost
2914 * increments. This seems to happen, e.g., inside virtual machines.
2915 * To address this issue, we do not use as a reference time interval just
2916 * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
2917 * particular we add the minimum number of jiffies for which the filter
2918 * seems to be quite precise also in embedded systems and KVM/QEMU virtual
2921 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data
*bfqd
,
2922 struct bfq_queue
*bfqq
)
2924 return max(bfqq
->last_idle_bklogged
+
2925 HZ
* bfqq
->service_from_backlogged
/
2926 bfqd
->bfq_wr_max_softrt_rate
,
2927 jiffies
+ nsecs_to_jiffies(bfqq
->bfqd
->bfq_slice_idle
) + 4);
2931 * bfq_bfqq_expire - expire a queue.
2932 * @bfqd: device owning the queue.
2933 * @bfqq: the queue to expire.
2934 * @compensate: if true, compensate for the time spent idling.
2935 * @reason: the reason causing the expiration.
2937 * If the process associated with bfqq does slow I/O (e.g., because it
2938 * issues random requests), we charge bfqq with the time it has been
2939 * in service instead of the service it has received (see
2940 * bfq_bfqq_charge_time for details on how this goal is achieved). As
2941 * a consequence, bfqq will typically get higher timestamps upon
2942 * reactivation, and hence it will be rescheduled as if it had
2943 * received more service than what it has actually received. In the
2944 * end, bfqq receives less service in proportion to how slowly its
2945 * associated process consumes its budgets (and hence how seriously it
2946 * tends to lower the throughput). In addition, this time-charging
2947 * strategy guarantees time fairness among slow processes. In
2948 * contrast, if the process associated with bfqq is not slow, we
2949 * charge bfqq exactly with the service it has received.
2951 * Charging time to the first type of queues and the exact service to
2952 * the other has the effect of using the WF2Q+ policy to schedule the
2953 * former on a timeslice basis, without violating service domain
2954 * guarantees among the latter.
2956 void bfq_bfqq_expire(struct bfq_data
*bfqd
,
2957 struct bfq_queue
*bfqq
,
2959 enum bfqq_expiration reason
)
2962 unsigned long delta
= 0;
2963 struct bfq_entity
*entity
= &bfqq
->entity
;
2967 * Check whether the process is slow (see bfq_bfqq_is_slow).
2969 slow
= bfq_bfqq_is_slow(bfqd
, bfqq
, compensate
, reason
, &delta
);
2972 * Increase service_from_backlogged before next statement,
2973 * because the possible next invocation of
2974 * bfq_bfqq_charge_time would likely inflate
2975 * entity->service. In contrast, service_from_backlogged must
2976 * contain real service, to enable the soft real-time
2977 * heuristic to correctly compute the bandwidth consumed by
2980 bfqq
->service_from_backlogged
+= entity
->service
;
2983 * As above explained, charge slow (typically seeky) and
2984 * timed-out queues with the time and not the service
2985 * received, to favor sequential workloads.
2987 * Processes doing I/O in the slower disk zones will tend to
2988 * be slow(er) even if not seeky. Therefore, since the
2989 * estimated peak rate is actually an average over the disk
2990 * surface, these processes may timeout just for bad luck. To
2991 * avoid punishing them, do not charge time to processes that
2992 * succeeded in consuming at least 2/3 of their budget. This
2993 * allows BFQ to preserve enough elasticity to still perform
2994 * bandwidth, and not time, distribution with little unlucky
2995 * or quasi-sequential processes.
2997 if (bfqq
->wr_coeff
== 1 &&
2999 (reason
== BFQQE_BUDGET_TIMEOUT
&&
3000 bfq_bfqq_budget_left(bfqq
) >= entity
->budget
/ 3)))
3001 bfq_bfqq_charge_time(bfqd
, bfqq
, delta
);
3003 if (reason
== BFQQE_TOO_IDLE
&&
3004 entity
->service
<= 2 * entity
->budget
/ 10)
3005 bfq_clear_bfqq_IO_bound(bfqq
);
3007 if (bfqd
->low_latency
&& bfqq
->wr_coeff
== 1)
3008 bfqq
->last_wr_start_finish
= jiffies
;
3010 if (bfqd
->low_latency
&& bfqd
->bfq_wr_max_softrt_rate
> 0 &&
3011 RB_EMPTY_ROOT(&bfqq
->sort_list
)) {
3013 * If we get here, and there are no outstanding
3014 * requests, then the request pattern is isochronous
3015 * (see the comments on the function
3016 * bfq_bfqq_softrt_next_start()). Thus we can compute
3017 * soft_rt_next_start. If, instead, the queue still
3018 * has outstanding requests, then we have to wait for
3019 * the completion of all the outstanding requests to
3020 * discover whether the request pattern is actually
3023 if (bfqq
->dispatched
== 0)
3024 bfqq
->soft_rt_next_start
=
3025 bfq_bfqq_softrt_next_start(bfqd
, bfqq
);
3028 * The application is still waiting for the
3029 * completion of one or more requests:
3030 * prevent it from possibly being incorrectly
3031 * deemed as soft real-time by setting its
3032 * soft_rt_next_start to infinity. In fact,
3033 * without this assignment, the application
3034 * would be incorrectly deemed as soft
3036 * 1) it issued a new request before the
3037 * completion of all its in-flight
3039 * 2) at that time, its soft_rt_next_start
3040 * happened to be in the past.
3042 bfqq
->soft_rt_next_start
=
3043 bfq_greatest_from_now();
3045 * Schedule an update of soft_rt_next_start to when
3046 * the task may be discovered to be isochronous.
3048 bfq_mark_bfqq_softrt_update(bfqq
);
3052 bfq_log_bfqq(bfqd
, bfqq
,
3053 "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason
,
3054 slow
, bfqq
->dispatched
, bfq_bfqq_has_short_ttime(bfqq
));
3057 * Increase, decrease or leave budget unchanged according to
3060 __bfq_bfqq_recalc_budget(bfqd
, bfqq
, reason
);
3062 __bfq_bfqq_expire(bfqd
, bfqq
);
3064 /* mark bfqq as waiting a request only if a bic still points to it */
3065 if (ref
> 1 && !bfq_bfqq_busy(bfqq
) &&
3066 reason
!= BFQQE_BUDGET_TIMEOUT
&&
3067 reason
!= BFQQE_BUDGET_EXHAUSTED
)
3068 bfq_mark_bfqq_non_blocking_wait_rq(bfqq
);
3072 * Budget timeout is not implemented through a dedicated timer, but
3073 * just checked on request arrivals and completions, as well as on
3074 * idle timer expirations.
3076 static bool bfq_bfqq_budget_timeout(struct bfq_queue
*bfqq
)
3078 return time_is_before_eq_jiffies(bfqq
->budget_timeout
);
3082 * If we expire a queue that is actively waiting (i.e., with the
3083 * device idled) for the arrival of a new request, then we may incur
3084 * the timestamp misalignment problem described in the body of the
3085 * function __bfq_activate_entity. Hence we return true only if this
3086 * condition does not hold, or if the queue is slow enough to deserve
3087 * only to be kicked off for preserving a high throughput.
3089 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue
*bfqq
)
3091 bfq_log_bfqq(bfqq
->bfqd
, bfqq
,
3092 "may_budget_timeout: wait_request %d left %d timeout %d",
3093 bfq_bfqq_wait_request(bfqq
),
3094 bfq_bfqq_budget_left(bfqq
) >= bfqq
->entity
.budget
/ 3,
3095 bfq_bfqq_budget_timeout(bfqq
));
3097 return (!bfq_bfqq_wait_request(bfqq
) ||
3098 bfq_bfqq_budget_left(bfqq
) >= bfqq
->entity
.budget
/ 3)
3100 bfq_bfqq_budget_timeout(bfqq
);
3104 * For a queue that becomes empty, device idling is allowed only if
3105 * this function returns true for the queue. As a consequence, since
3106 * device idling plays a critical role in both throughput boosting and
3107 * service guarantees, the return value of this function plays a
3108 * critical role in both these aspects as well.
3110 * In a nutshell, this function returns true only if idling is
3111 * beneficial for throughput or, even if detrimental for throughput,
3112 * idling is however necessary to preserve service guarantees (low
3113 * latency, desired throughput distribution, ...). In particular, on
3114 * NCQ-capable devices, this function tries to return false, so as to
3115 * help keep the drives' internal queues full, whenever this helps the
3116 * device boost the throughput without causing any service-guarantee
3119 * In more detail, the return value of this function is obtained by,
3120 * first, computing a number of boolean variables that take into
3121 * account throughput and service-guarantee issues, and, then,
3122 * combining these variables in a logical expression. Most of the
3123 * issues taken into account are not trivial. We discuss these issues
3124 * individually while introducing the variables.
3126 static bool bfq_bfqq_may_idle(struct bfq_queue
*bfqq
)
3128 struct bfq_data
*bfqd
= bfqq
->bfqd
;
3129 bool rot_without_queueing
=
3130 !blk_queue_nonrot(bfqd
->queue
) && !bfqd
->hw_tag
,
3131 bfqq_sequential_and_IO_bound
,
3132 idling_boosts_thr
, idling_boosts_thr_without_issues
,
3133 idling_needed_for_service_guarantees
,
3134 asymmetric_scenario
;
3136 if (bfqd
->strict_guarantees
)
3140 * Idling is performed only if slice_idle > 0. In addition, we
3143 * (b) bfqq is in the idle io prio class: in this case we do
3144 * not idle because we want to minimize the bandwidth that
3145 * queues in this class can steal to higher-priority queues
3147 if (bfqd
->bfq_slice_idle
== 0 || !bfq_bfqq_sync(bfqq
) ||
3148 bfq_class_idle(bfqq
))
3151 bfqq_sequential_and_IO_bound
= !BFQQ_SEEKY(bfqq
) &&
3152 bfq_bfqq_IO_bound(bfqq
) && bfq_bfqq_has_short_ttime(bfqq
);
3155 * The next variable takes into account the cases where idling
3156 * boosts the throughput.
3158 * The value of the variable is computed considering, first, that
3159 * idling is virtually always beneficial for the throughput if:
3160 * (a) the device is not NCQ-capable and rotational, or
3161 * (b) regardless of the presence of NCQ, the device is rotational and
3162 * the request pattern for bfqq is I/O-bound and sequential, or
3163 * (c) regardless of whether it is rotational, the device is
3164 * not NCQ-capable and the request pattern for bfqq is
3165 * I/O-bound and sequential.
3167 * Secondly, and in contrast to the above item (b), idling an
3168 * NCQ-capable flash-based device would not boost the
3169 * throughput even with sequential I/O; rather it would lower
3170 * the throughput in proportion to how fast the device
3171 * is. Accordingly, the next variable is true if any of the
3172 * above conditions (a), (b) or (c) is true, and, in
3173 * particular, happens to be false if bfqd is an NCQ-capable
3174 * flash-based device.
3176 idling_boosts_thr
= rot_without_queueing
||
3177 ((!blk_queue_nonrot(bfqd
->queue
) || !bfqd
->hw_tag
) &&
3178 bfqq_sequential_and_IO_bound
);
3181 * The value of the next variable,
3182 * idling_boosts_thr_without_issues, is equal to that of
3183 * idling_boosts_thr, unless a special case holds. In this
3184 * special case, described below, idling may cause problems to
3185 * weight-raised queues.
3187 * When the request pool is saturated (e.g., in the presence
3188 * of write hogs), if the processes associated with
3189 * non-weight-raised queues ask for requests at a lower rate,
3190 * then processes associated with weight-raised queues have a
3191 * higher probability to get a request from the pool
3192 * immediately (or at least soon) when they need one. Thus
3193 * they have a higher probability to actually get a fraction
3194 * of the device throughput proportional to their high
3195 * weight. This is especially true with NCQ-capable drives,
3196 * which enqueue several requests in advance, and further
3197 * reorder internally-queued requests.
3199 * For this reason, we force to false the value of
3200 * idling_boosts_thr_without_issues if there are weight-raised
3201 * busy queues. In this case, and if bfqq is not weight-raised,
3202 * this guarantees that the device is not idled for bfqq (if,
3203 * instead, bfqq is weight-raised, then idling will be
3204 * guaranteed by another variable, see below). Combined with
3205 * the timestamping rules of BFQ (see [1] for details), this
3206 * behavior causes bfqq, and hence any sync non-weight-raised
3207 * queue, to get a lower number of requests served, and thus
3208 * to ask for a lower number of requests from the request
3209 * pool, before the busy weight-raised queues get served
3210 * again. This often mitigates starvation problems in the
3211 * presence of heavy write workloads and NCQ, thereby
3212 * guaranteeing a higher application and system responsiveness
3213 * in these hostile scenarios.
3215 idling_boosts_thr_without_issues
= idling_boosts_thr
&&
3216 bfqd
->wr_busy_queues
== 0;
3219 * There is then a case where idling must be performed not
3220 * for throughput concerns, but to preserve service
3223 * To introduce this case, we can note that allowing the drive
3224 * to enqueue more than one request at a time, and hence
3225 * delegating de facto final scheduling decisions to the
3226 * drive's internal scheduler, entails loss of control on the
3227 * actual request service order. In particular, the critical
3228 * situation is when requests from different processes happen
3229 * to be present, at the same time, in the internal queue(s)
3230 * of the drive. In such a situation, the drive, by deciding
3231 * the service order of the internally-queued requests, does
3232 * determine also the actual throughput distribution among
3233 * these processes. But the drive typically has no notion or
3234 * concern about per-process throughput distribution, and
3235 * makes its decisions only on a per-request basis. Therefore,
3236 * the service distribution enforced by the drive's internal
3237 * scheduler is likely to coincide with the desired
3238 * device-throughput distribution only in a completely
3239 * symmetric scenario where:
3240 * (i) each of these processes must get the same throughput as
3242 * (ii) all these processes have the same I/O pattern
3243 (either sequential or random).
3244 * In fact, in such a scenario, the drive will tend to treat
3245 * the requests of each of these processes in about the same
3246 * way as the requests of the others, and thus to provide
3247 * each of these processes with about the same throughput
3248 * (which is exactly the desired throughput distribution). In
3249 * contrast, in any asymmetric scenario, device idling is
3250 * certainly needed to guarantee that bfqq receives its
3251 * assigned fraction of the device throughput (see [1] for
3254 * We address this issue by controlling, actually, only the
3255 * symmetry sub-condition (i), i.e., provided that
3256 * sub-condition (i) holds, idling is not performed,
3257 * regardless of whether sub-condition (ii) holds. In other
3258 * words, only if sub-condition (i) holds, then idling is
3259 * allowed, and the device tends to be prevented from queueing
3260 * many requests, possibly of several processes. The reason
3261 * for not controlling also sub-condition (ii) is that we
3262 * exploit preemption to preserve guarantees in case of
3263 * symmetric scenarios, even if (ii) does not hold, as
3264 * explained in the next two paragraphs.
3266 * Even if a queue, say Q, is expired when it remains idle, Q
3267 * can still preempt the new in-service queue if the next
3268 * request of Q arrives soon (see the comments on
3269 * bfq_bfqq_update_budg_for_activation). If all queues and
3270 * groups have the same weight, this form of preemption,
3271 * combined with the hole-recovery heuristic described in the
3272 * comments on function bfq_bfqq_update_budg_for_activation,
3273 * are enough to preserve a correct bandwidth distribution in
3274 * the mid term, even without idling. In fact, even if not
3275 * idling allows the internal queues of the device to contain
3276 * many requests, and thus to reorder requests, we can rather
3277 * safely assume that the internal scheduler still preserves a
3278 * minimum of mid-term fairness. The motivation for using
3279 * preemption instead of idling is that, by not idling,
3280 * service guarantees are preserved without minimally
3281 * sacrificing throughput. In other words, both a high
3282 * throughput and its desired distribution are obtained.
3284 * More precisely, this preemption-based, idleless approach
3285 * provides fairness in terms of IOPS, and not sectors per
3286 * second. This can be seen with a simple example. Suppose
3287 * that there are two queues with the same weight, but that
3288 * the first queue receives requests of 8 sectors, while the
3289 * second queue receives requests of 1024 sectors. In
3290 * addition, suppose that each of the two queues contains at
3291 * most one request at a time, which implies that each queue
3292 * always remains idle after it is served. Finally, after
3293 * remaining idle, each queue receives very quickly a new
3294 * request. It follows that the two queues are served
3295 * alternatively, preempting each other if needed. This
3296 * implies that, although both queues have the same weight,
3297 * the queue with large requests receives a service that is
3298 * 1024/8 times as high as the service received by the other
3301 * On the other hand, device idling is performed, and thus
3302 * pure sector-domain guarantees are provided, for the
3303 * following queues, which are likely to need stronger
3304 * throughput guarantees: weight-raised queues, and queues
3305 * with a higher weight than other queues. When such queues
3306 * are active, sub-condition (i) is false, which triggers
3309 * According to the above considerations, the next variable is
3310 * true (only) if sub-condition (i) holds. To compute the
3311 * value of this variable, we not only use the return value of
3312 * the function bfq_symmetric_scenario(), but also check
3313 * whether bfqq is being weight-raised, because
3314 * bfq_symmetric_scenario() does not take into account also
3315 * weight-raised queues (see comments on
3316 * bfq_weights_tree_add()).
3318 * As a side note, it is worth considering that the above
3319 * device-idling countermeasures may however fail in the
3320 * following unlucky scenario: if idling is (correctly)
3321 * disabled in a time period during which all symmetry
3322 * sub-conditions hold, and hence the device is allowed to
3323 * enqueue many requests, but at some later point in time some
3324 * sub-condition stops to hold, then it may become impossible
3325 * to let requests be served in the desired order until all
3326 * the requests already queued in the device have been served.
3328 asymmetric_scenario
= bfqq
->wr_coeff
> 1 ||
3329 !bfq_symmetric_scenario(bfqd
);
3332 * Finally, there is a case where maximizing throughput is the
3333 * best choice even if it may cause unfairness toward
3334 * bfqq. Such a case is when bfqq became active in a burst of
3335 * queue activations. Queues that became active during a large
3336 * burst benefit only from throughput, as discussed in the
3337 * comments on bfq_handle_burst. Thus, if bfqq became active
3338 * in a burst and not idling the device maximizes throughput,
3339 * then the device must no be idled, because not idling the
3340 * device provides bfqq and all other queues in the burst with
3341 * maximum benefit. Combining this and the above case, we can
3342 * now establish when idling is actually needed to preserve
3343 * service guarantees.
3345 idling_needed_for_service_guarantees
=
3346 asymmetric_scenario
&& !bfq_bfqq_in_large_burst(bfqq
);
3349 * We have now all the components we need to compute the
3350 * return value of the function, which is true only if idling
3351 * either boosts the throughput (without issues), or is
3352 * necessary to preserve service guarantees.
3354 return idling_boosts_thr_without_issues
||
3355 idling_needed_for_service_guarantees
;
3359 * If the in-service queue is empty but the function bfq_bfqq_may_idle
3360 * returns true, then:
3361 * 1) the queue must remain in service and cannot be expired, and
3362 * 2) the device must be idled to wait for the possible arrival of a new
3363 * request for the queue.
3364 * See the comments on the function bfq_bfqq_may_idle for the reasons
3365 * why performing device idling is the best choice to boost the throughput
3366 * and preserve service guarantees when bfq_bfqq_may_idle itself
3369 static bool bfq_bfqq_must_idle(struct bfq_queue
*bfqq
)
3371 return RB_EMPTY_ROOT(&bfqq
->sort_list
) && bfq_bfqq_may_idle(bfqq
);
3375 * Select a queue for service. If we have a current queue in service,
3376 * check whether to continue servicing it, or retrieve and set a new one.
3378 static struct bfq_queue
*bfq_select_queue(struct bfq_data
*bfqd
)
3380 struct bfq_queue
*bfqq
;
3381 struct request
*next_rq
;
3382 enum bfqq_expiration reason
= BFQQE_BUDGET_TIMEOUT
;
3384 bfqq
= bfqd
->in_service_queue
;
3388 bfq_log_bfqq(bfqd
, bfqq
, "select_queue: already in-service queue");
3390 if (bfq_may_expire_for_budg_timeout(bfqq
) &&
3391 !bfq_bfqq_wait_request(bfqq
) &&
3392 !bfq_bfqq_must_idle(bfqq
))
3397 * This loop is rarely executed more than once. Even when it
3398 * happens, it is much more convenient to re-execute this loop
3399 * than to return NULL and trigger a new dispatch to get a
3402 next_rq
= bfqq
->next_rq
;
3404 * If bfqq has requests queued and it has enough budget left to
3405 * serve them, keep the queue, otherwise expire it.
3408 if (bfq_serv_to_charge(next_rq
, bfqq
) >
3409 bfq_bfqq_budget_left(bfqq
)) {
3411 * Expire the queue for budget exhaustion,
3412 * which makes sure that the next budget is
3413 * enough to serve the next request, even if
3414 * it comes from the fifo expired path.
3416 reason
= BFQQE_BUDGET_EXHAUSTED
;
3420 * The idle timer may be pending because we may
3421 * not disable disk idling even when a new request
3424 if (bfq_bfqq_wait_request(bfqq
)) {
3426 * If we get here: 1) at least a new request
3427 * has arrived but we have not disabled the
3428 * timer because the request was too small,
3429 * 2) then the block layer has unplugged
3430 * the device, causing the dispatch to be
3433 * Since the device is unplugged, now the
3434 * requests are probably large enough to
3435 * provide a reasonable throughput.
3436 * So we disable idling.
3438 bfq_clear_bfqq_wait_request(bfqq
);
3439 hrtimer_try_to_cancel(&bfqd
->idle_slice_timer
);
3440 bfqg_stats_update_idle_time(bfqq_group(bfqq
));
3447 * No requests pending. However, if the in-service queue is idling
3448 * for a new request, or has requests waiting for a completion and
3449 * may idle after their completion, then keep it anyway.
3451 if (bfq_bfqq_wait_request(bfqq
) ||
3452 (bfqq
->dispatched
!= 0 && bfq_bfqq_may_idle(bfqq
))) {
3457 reason
= BFQQE_NO_MORE_REQUESTS
;
3459 bfq_bfqq_expire(bfqd
, bfqq
, false, reason
);
3461 bfqq
= bfq_set_in_service_queue(bfqd
);
3463 bfq_log_bfqq(bfqd
, bfqq
, "select_queue: checking new queue");
3468 bfq_log_bfqq(bfqd
, bfqq
, "select_queue: returned this queue");
3470 bfq_log(bfqd
, "select_queue: no queue returned");
3475 static void bfq_update_wr_data(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
)
3477 struct bfq_entity
*entity
= &bfqq
->entity
;
3479 if (bfqq
->wr_coeff
> 1) { /* queue is being weight-raised */
3480 bfq_log_bfqq(bfqd
, bfqq
,
3481 "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
3482 jiffies_to_msecs(jiffies
- bfqq
->last_wr_start_finish
),
3483 jiffies_to_msecs(bfqq
->wr_cur_max_time
),
3485 bfqq
->entity
.weight
, bfqq
->entity
.orig_weight
);
3487 if (entity
->prio_changed
)
3488 bfq_log_bfqq(bfqd
, bfqq
, "WARN: pending prio change");
3491 * If the queue was activated in a burst, or too much
3492 * time has elapsed from the beginning of this
3493 * weight-raising period, then end weight raising.
3495 if (bfq_bfqq_in_large_burst(bfqq
))
3496 bfq_bfqq_end_wr(bfqq
);
3497 else if (time_is_before_jiffies(bfqq
->last_wr_start_finish
+
3498 bfqq
->wr_cur_max_time
)) {
3499 if (bfqq
->wr_cur_max_time
!= bfqd
->bfq_wr_rt_max_time
||
3500 time_is_before_jiffies(bfqq
->wr_start_at_switch_to_srt
+
3501 bfq_wr_duration(bfqd
)))
3502 bfq_bfqq_end_wr(bfqq
);
3504 /* switch back to interactive wr */
3505 bfqq
->wr_coeff
= bfqd
->bfq_wr_coeff
;
3506 bfqq
->wr_cur_max_time
= bfq_wr_duration(bfqd
);
3507 bfqq
->last_wr_start_finish
=
3508 bfqq
->wr_start_at_switch_to_srt
;
3509 bfqq
->entity
.prio_changed
= 1;
3514 * To improve latency (for this or other queues), immediately
3515 * update weight both if it must be raised and if it must be
3516 * lowered. Since, entity may be on some active tree here, and
3517 * might have a pending change of its ioprio class, invoke
3518 * next function with the last parameter unset (see the
3519 * comments on the function).
3521 if ((entity
->weight
> entity
->orig_weight
) != (bfqq
->wr_coeff
> 1))
3522 __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity
),
3527 * Dispatch next request from bfqq.
3529 static struct request
*bfq_dispatch_rq_from_bfqq(struct bfq_data
*bfqd
,
3530 struct bfq_queue
*bfqq
)
3532 struct request
*rq
= bfqq
->next_rq
;
3533 unsigned long service_to_charge
;
3535 service_to_charge
= bfq_serv_to_charge(rq
, bfqq
);
3537 bfq_bfqq_served(bfqq
, service_to_charge
);
3539 bfq_dispatch_remove(bfqd
->queue
, rq
);
3542 * If weight raising has to terminate for bfqq, then next
3543 * function causes an immediate update of bfqq's weight,
3544 * without waiting for next activation. As a consequence, on
3545 * expiration, bfqq will be timestamped as if has never been
3546 * weight-raised during this service slot, even if it has
3547 * received part or even most of the service as a
3548 * weight-raised queue. This inflates bfqq's timestamps, which
3549 * is beneficial, as bfqq is then more willing to leave the
3550 * device immediately to possible other weight-raised queues.
3552 bfq_update_wr_data(bfqd
, bfqq
);
3555 * Expire bfqq, pretending that its budget expired, if bfqq
3556 * belongs to CLASS_IDLE and other queues are waiting for
3559 if (bfqd
->busy_queues
> 1 && bfq_class_idle(bfqq
))
3565 bfq_bfqq_expire(bfqd
, bfqq
, false, BFQQE_BUDGET_EXHAUSTED
);
3569 static bool bfq_has_work(struct blk_mq_hw_ctx
*hctx
)
3571 struct bfq_data
*bfqd
= hctx
->queue
->elevator
->elevator_data
;
3574 * Avoiding lock: a race on bfqd->busy_queues should cause at
3575 * most a call to dispatch for nothing
3577 return !list_empty_careful(&bfqd
->dispatch
) ||
3578 bfqd
->busy_queues
> 0;
3581 static struct request
*__bfq_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
3583 struct bfq_data
*bfqd
= hctx
->queue
->elevator
->elevator_data
;
3584 struct request
*rq
= NULL
;
3585 struct bfq_queue
*bfqq
= NULL
;
3587 if (!list_empty(&bfqd
->dispatch
)) {
3588 rq
= list_first_entry(&bfqd
->dispatch
, struct request
,
3590 list_del_init(&rq
->queuelist
);
3596 * Increment counters here, because this
3597 * dispatch does not follow the standard
3598 * dispatch flow (where counters are
3603 goto inc_in_driver_start_rq
;
3607 * We exploit the put_rq_private hook to decrement
3608 * rq_in_driver, but put_rq_private will not be
3609 * invoked on this request. So, to avoid unbalance,
3610 * just start this request, without incrementing
3611 * rq_in_driver. As a negative consequence,
3612 * rq_in_driver is deceptively lower than it should be
3613 * while this request is in service. This may cause
3614 * bfq_schedule_dispatch to be invoked uselessly.
3616 * As for implementing an exact solution, the
3617 * put_request hook, if defined, is probably invoked
3618 * also on this request. So, by exploiting this hook,
3619 * we could 1) increment rq_in_driver here, and 2)
3620 * decrement it in put_request. Such a solution would
3621 * let the value of the counter be always accurate,
3622 * but it would entail using an extra interface
3623 * function. This cost seems higher than the benefit,
3624 * being the frequency of non-elevator-private
3625 * requests very low.
3630 bfq_log(bfqd
, "dispatch requests: %d busy queues", bfqd
->busy_queues
);
3632 if (bfqd
->busy_queues
== 0)
3636 * Force device to serve one request at a time if
3637 * strict_guarantees is true. Forcing this service scheme is
3638 * currently the ONLY way to guarantee that the request
3639 * service order enforced by the scheduler is respected by a
3640 * queueing device. Otherwise the device is free even to make
3641 * some unlucky request wait for as long as the device
3644 * Of course, serving one request at at time may cause loss of
3647 if (bfqd
->strict_guarantees
&& bfqd
->rq_in_driver
> 0)
3650 bfqq
= bfq_select_queue(bfqd
);
3654 rq
= bfq_dispatch_rq_from_bfqq(bfqd
, bfqq
);
3657 inc_in_driver_start_rq
:
3658 bfqd
->rq_in_driver
++;
3660 rq
->rq_flags
|= RQF_STARTED
;
3666 static struct request
*bfq_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
3668 struct bfq_data
*bfqd
= hctx
->queue
->elevator
->elevator_data
;
3671 spin_lock_irq(&bfqd
->lock
);
3673 rq
= __bfq_dispatch_request(hctx
);
3674 spin_unlock_irq(&bfqd
->lock
);
3680 * Task holds one reference to the queue, dropped when task exits. Each rq
3681 * in-flight on this queue also holds a reference, dropped when rq is freed.
3683 * Scheduler lock must be held here. Recall not to use bfqq after calling
3684 * this function on it.
3686 void bfq_put_queue(struct bfq_queue
*bfqq
)
3688 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3689 struct bfq_group
*bfqg
= bfqq_group(bfqq
);
3693 bfq_log_bfqq(bfqq
->bfqd
, bfqq
, "put_queue: %p %d",
3700 if (bfq_bfqq_sync(bfqq
))
3702 * The fact that this queue is being destroyed does not
3703 * invalidate the fact that this queue may have been
3704 * activated during the current burst. As a consequence,
3705 * although the queue does not exist anymore, and hence
3706 * needs to be removed from the burst list if there,
3707 * the burst size has not to be decremented.
3709 hlist_del_init(&bfqq
->burst_list_node
);
3711 kmem_cache_free(bfq_pool
, bfqq
);
3712 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3713 bfqg_and_blkg_put(bfqg
);
3717 static void bfq_put_cooperator(struct bfq_queue
*bfqq
)
3719 struct bfq_queue
*__bfqq
, *next
;
3722 * If this queue was scheduled to merge with another queue, be
3723 * sure to drop the reference taken on that queue (and others in
3724 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
3726 __bfqq
= bfqq
->new_bfqq
;
3730 next
= __bfqq
->new_bfqq
;
3731 bfq_put_queue(__bfqq
);
3736 static void bfq_exit_bfqq(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
)
3738 if (bfqq
== bfqd
->in_service_queue
) {
3739 __bfq_bfqq_expire(bfqd
, bfqq
);
3740 bfq_schedule_dispatch(bfqd
);
3743 bfq_log_bfqq(bfqd
, bfqq
, "exit_bfqq: %p, %d", bfqq
, bfqq
->ref
);
3745 bfq_put_cooperator(bfqq
);
3747 bfq_put_queue(bfqq
); /* release process reference */
3750 static void bfq_exit_icq_bfqq(struct bfq_io_cq
*bic
, bool is_sync
)
3752 struct bfq_queue
*bfqq
= bic_to_bfqq(bic
, is_sync
);
3753 struct bfq_data
*bfqd
;
3756 bfqd
= bfqq
->bfqd
; /* NULL if scheduler already exited */
3759 unsigned long flags
;
3761 spin_lock_irqsave(&bfqd
->lock
, flags
);
3762 bfq_exit_bfqq(bfqd
, bfqq
);
3763 bic_set_bfqq(bic
, NULL
, is_sync
);
3764 spin_unlock_irqrestore(&bfqd
->lock
, flags
);
3768 static void bfq_exit_icq(struct io_cq
*icq
)
3770 struct bfq_io_cq
*bic
= icq_to_bic(icq
);
3772 bfq_exit_icq_bfqq(bic
, true);
3773 bfq_exit_icq_bfqq(bic
, false);
3777 * Update the entity prio values; note that the new values will not
3778 * be used until the next (re)activation.
3781 bfq_set_next_ioprio_data(struct bfq_queue
*bfqq
, struct bfq_io_cq
*bic
)
3783 struct task_struct
*tsk
= current
;
3785 struct bfq_data
*bfqd
= bfqq
->bfqd
;
3790 ioprio_class
= IOPRIO_PRIO_CLASS(bic
->ioprio
);
3791 switch (ioprio_class
) {
3793 dev_err(bfqq
->bfqd
->queue
->backing_dev_info
->dev
,
3794 "bfq: bad prio class %d\n", ioprio_class
);
3796 case IOPRIO_CLASS_NONE
:
3798 * No prio set, inherit CPU scheduling settings.
3800 bfqq
->new_ioprio
= task_nice_ioprio(tsk
);
3801 bfqq
->new_ioprio_class
= task_nice_ioclass(tsk
);
3803 case IOPRIO_CLASS_RT
:
3804 bfqq
->new_ioprio
= IOPRIO_PRIO_DATA(bic
->ioprio
);
3805 bfqq
->new_ioprio_class
= IOPRIO_CLASS_RT
;
3807 case IOPRIO_CLASS_BE
:
3808 bfqq
->new_ioprio
= IOPRIO_PRIO_DATA(bic
->ioprio
);
3809 bfqq
->new_ioprio_class
= IOPRIO_CLASS_BE
;
3811 case IOPRIO_CLASS_IDLE
:
3812 bfqq
->new_ioprio_class
= IOPRIO_CLASS_IDLE
;
3813 bfqq
->new_ioprio
= 7;
3817 if (bfqq
->new_ioprio
>= IOPRIO_BE_NR
) {
3818 pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
3820 bfqq
->new_ioprio
= IOPRIO_BE_NR
;
3823 bfqq
->entity
.new_weight
= bfq_ioprio_to_weight(bfqq
->new_ioprio
);
3824 bfqq
->entity
.prio_changed
= 1;
3827 static struct bfq_queue
*bfq_get_queue(struct bfq_data
*bfqd
,
3828 struct bio
*bio
, bool is_sync
,
3829 struct bfq_io_cq
*bic
);
3831 static void bfq_check_ioprio_change(struct bfq_io_cq
*bic
, struct bio
*bio
)
3833 struct bfq_data
*bfqd
= bic_to_bfqd(bic
);
3834 struct bfq_queue
*bfqq
;
3835 int ioprio
= bic
->icq
.ioc
->ioprio
;
3838 * This condition may trigger on a newly created bic, be sure to
3839 * drop the lock before returning.
3841 if (unlikely(!bfqd
) || likely(bic
->ioprio
== ioprio
))
3844 bic
->ioprio
= ioprio
;
3846 bfqq
= bic_to_bfqq(bic
, false);
3848 /* release process reference on this queue */
3849 bfq_put_queue(bfqq
);
3850 bfqq
= bfq_get_queue(bfqd
, bio
, BLK_RW_ASYNC
, bic
);
3851 bic_set_bfqq(bic
, bfqq
, false);
3854 bfqq
= bic_to_bfqq(bic
, true);
3856 bfq_set_next_ioprio_data(bfqq
, bic
);
3859 static void bfq_init_bfqq(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
3860 struct bfq_io_cq
*bic
, pid_t pid
, int is_sync
)
3862 RB_CLEAR_NODE(&bfqq
->entity
.rb_node
);
3863 INIT_LIST_HEAD(&bfqq
->fifo
);
3864 INIT_HLIST_NODE(&bfqq
->burst_list_node
);
3870 bfq_set_next_ioprio_data(bfqq
, bic
);
3874 * No need to mark as has_short_ttime if in
3875 * idle_class, because no device idling is performed
3876 * for queues in idle class
3878 if (!bfq_class_idle(bfqq
))
3879 /* tentatively mark as has_short_ttime */
3880 bfq_mark_bfqq_has_short_ttime(bfqq
);
3881 bfq_mark_bfqq_sync(bfqq
);
3882 bfq_mark_bfqq_just_created(bfqq
);
3884 bfq_clear_bfqq_sync(bfqq
);
3886 /* set end request to minus infinity from now */
3887 bfqq
->ttime
.last_end_request
= ktime_get_ns() + 1;
3889 bfq_mark_bfqq_IO_bound(bfqq
);
3893 /* Tentative initial value to trade off between thr and lat */
3894 bfqq
->max_budget
= (2 * bfq_max_budget(bfqd
)) / 3;
3895 bfqq
->budget_timeout
= bfq_smallest_from_now();
3898 bfqq
->last_wr_start_finish
= jiffies
;
3899 bfqq
->wr_start_at_switch_to_srt
= bfq_smallest_from_now();
3900 bfqq
->split_time
= bfq_smallest_from_now();
3903 * Set to the value for which bfqq will not be deemed as
3904 * soft rt when it becomes backlogged.
3906 bfqq
->soft_rt_next_start
= bfq_greatest_from_now();
3908 /* first request is almost certainly seeky */
3909 bfqq
->seek_history
= 1;
3912 static struct bfq_queue
**bfq_async_queue_prio(struct bfq_data
*bfqd
,
3913 struct bfq_group
*bfqg
,
3914 int ioprio_class
, int ioprio
)
3916 switch (ioprio_class
) {
3917 case IOPRIO_CLASS_RT
:
3918 return &bfqg
->async_bfqq
[0][ioprio
];
3919 case IOPRIO_CLASS_NONE
:
3920 ioprio
= IOPRIO_NORM
;
3922 case IOPRIO_CLASS_BE
:
3923 return &bfqg
->async_bfqq
[1][ioprio
];
3924 case IOPRIO_CLASS_IDLE
:
3925 return &bfqg
->async_idle_bfqq
;
3931 static struct bfq_queue
*bfq_get_queue(struct bfq_data
*bfqd
,
3932 struct bio
*bio
, bool is_sync
,
3933 struct bfq_io_cq
*bic
)
3935 const int ioprio
= IOPRIO_PRIO_DATA(bic
->ioprio
);
3936 const int ioprio_class
= IOPRIO_PRIO_CLASS(bic
->ioprio
);
3937 struct bfq_queue
**async_bfqq
= NULL
;
3938 struct bfq_queue
*bfqq
;
3939 struct bfq_group
*bfqg
;
3943 bfqg
= bfq_find_set_group(bfqd
, bio_blkcg(bio
));
3945 bfqq
= &bfqd
->oom_bfqq
;
3950 async_bfqq
= bfq_async_queue_prio(bfqd
, bfqg
, ioprio_class
,
3957 bfqq
= kmem_cache_alloc_node(bfq_pool
,
3958 GFP_NOWAIT
| __GFP_ZERO
| __GFP_NOWARN
,
3962 bfq_init_bfqq(bfqd
, bfqq
, bic
, current
->pid
,
3964 bfq_init_entity(&bfqq
->entity
, bfqg
);
3965 bfq_log_bfqq(bfqd
, bfqq
, "allocated");
3967 bfqq
= &bfqd
->oom_bfqq
;
3968 bfq_log_bfqq(bfqd
, bfqq
, "using oom bfqq");
3973 * Pin the queue now that it's allocated, scheduler exit will
3978 * Extra group reference, w.r.t. sync
3979 * queue. This extra reference is removed
3980 * only if bfqq->bfqg disappears, to
3981 * guarantee that this queue is not freed
3982 * until its group goes away.
3984 bfq_log_bfqq(bfqd
, bfqq
, "get_queue, bfqq not in async: %p, %d",
3990 bfqq
->ref
++; /* get a process reference to this queue */
3991 bfq_log_bfqq(bfqd
, bfqq
, "get_queue, at end: %p, %d", bfqq
, bfqq
->ref
);
3996 static void bfq_update_io_thinktime(struct bfq_data
*bfqd
,
3997 struct bfq_queue
*bfqq
)
3999 struct bfq_ttime
*ttime
= &bfqq
->ttime
;
4000 u64 elapsed
= ktime_get_ns() - bfqq
->ttime
.last_end_request
;
4002 elapsed
= min_t(u64
, elapsed
, 2ULL * bfqd
->bfq_slice_idle
);
4004 ttime
->ttime_samples
= (7*bfqq
->ttime
.ttime_samples
+ 256) / 8;
4005 ttime
->ttime_total
= div_u64(7*ttime
->ttime_total
+ 256*elapsed
, 8);
4006 ttime
->ttime_mean
= div64_ul(ttime
->ttime_total
+ 128,
4007 ttime
->ttime_samples
);
4011 bfq_update_io_seektime(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
4014 bfqq
->seek_history
<<= 1;
4015 bfqq
->seek_history
|=
4016 get_sdist(bfqq
->last_request_pos
, rq
) > BFQQ_SEEK_THR
&&
4017 (!blk_queue_nonrot(bfqd
->queue
) ||
4018 blk_rq_sectors(rq
) < BFQQ_SECT_THR_NONROT
);
4021 static void bfq_update_has_short_ttime(struct bfq_data
*bfqd
,
4022 struct bfq_queue
*bfqq
,
4023 struct bfq_io_cq
*bic
)
4025 bool has_short_ttime
= true;
4028 * No need to update has_short_ttime if bfqq is async or in
4029 * idle io prio class, or if bfq_slice_idle is zero, because
4030 * no device idling is performed for bfqq in this case.
4032 if (!bfq_bfqq_sync(bfqq
) || bfq_class_idle(bfqq
) ||
4033 bfqd
->bfq_slice_idle
== 0)
4036 /* Idle window just restored, statistics are meaningless. */
4037 if (time_is_after_eq_jiffies(bfqq
->split_time
+
4038 bfqd
->bfq_wr_min_idle_time
))
4041 /* Think time is infinite if no process is linked to
4042 * bfqq. Otherwise check average think time to
4043 * decide whether to mark as has_short_ttime
4045 if (atomic_read(&bic
->icq
.ioc
->active_ref
) == 0 ||
4046 (bfq_sample_valid(bfqq
->ttime
.ttime_samples
) &&
4047 bfqq
->ttime
.ttime_mean
> bfqd
->bfq_slice_idle
))
4048 has_short_ttime
= false;
4050 bfq_log_bfqq(bfqd
, bfqq
, "update_has_short_ttime: has_short_ttime %d",
4053 if (has_short_ttime
)
4054 bfq_mark_bfqq_has_short_ttime(bfqq
);
4056 bfq_clear_bfqq_has_short_ttime(bfqq
);
4060 * Called when a new fs request (rq) is added to bfqq. Check if there's
4061 * something we should do about it.
4063 static void bfq_rq_enqueued(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
4066 struct bfq_io_cq
*bic
= RQ_BIC(rq
);
4068 if (rq
->cmd_flags
& REQ_META
)
4069 bfqq
->meta_pending
++;
4071 bfq_update_io_thinktime(bfqd
, bfqq
);
4072 bfq_update_has_short_ttime(bfqd
, bfqq
, bic
);
4073 bfq_update_io_seektime(bfqd
, bfqq
, rq
);
4075 bfq_log_bfqq(bfqd
, bfqq
,
4076 "rq_enqueued: has_short_ttime=%d (seeky %d)",
4077 bfq_bfqq_has_short_ttime(bfqq
), BFQQ_SEEKY(bfqq
));
4079 bfqq
->last_request_pos
= blk_rq_pos(rq
) + blk_rq_sectors(rq
);
4081 if (bfqq
== bfqd
->in_service_queue
&& bfq_bfqq_wait_request(bfqq
)) {
4082 bool small_req
= bfqq
->queued
[rq_is_sync(rq
)] == 1 &&
4083 blk_rq_sectors(rq
) < 32;
4084 bool budget_timeout
= bfq_bfqq_budget_timeout(bfqq
);
4087 * There is just this request queued: if the request
4088 * is small and the queue is not to be expired, then
4091 * In this way, if the device is being idled to wait
4092 * for a new request from the in-service queue, we
4093 * avoid unplugging the device and committing the
4094 * device to serve just a small request. On the
4095 * contrary, we wait for the block layer to decide
4096 * when to unplug the device: hopefully, new requests
4097 * will be merged to this one quickly, then the device
4098 * will be unplugged and larger requests will be
4101 if (small_req
&& !budget_timeout
)
4105 * A large enough request arrived, or the queue is to
4106 * be expired: in both cases disk idling is to be
4107 * stopped, so clear wait_request flag and reset
4110 bfq_clear_bfqq_wait_request(bfqq
);
4111 hrtimer_try_to_cancel(&bfqd
->idle_slice_timer
);
4112 bfqg_stats_update_idle_time(bfqq_group(bfqq
));
4115 * The queue is not empty, because a new request just
4116 * arrived. Hence we can safely expire the queue, in
4117 * case of budget timeout, without risking that the
4118 * timestamps of the queue are not updated correctly.
4119 * See [1] for more details.
4122 bfq_bfqq_expire(bfqd
, bfqq
, false,
4123 BFQQE_BUDGET_TIMEOUT
);
4127 static void __bfq_insert_request(struct bfq_data
*bfqd
, struct request
*rq
)
4129 struct bfq_queue
*bfqq
= RQ_BFQQ(rq
),
4130 *new_bfqq
= bfq_setup_cooperator(bfqd
, bfqq
, rq
, true);
4133 if (bic_to_bfqq(RQ_BIC(rq
), 1) != bfqq
)
4134 new_bfqq
= bic_to_bfqq(RQ_BIC(rq
), 1);
4136 * Release the request's reference to the old bfqq
4137 * and make sure one is taken to the shared queue.
4139 new_bfqq
->allocated
++;
4142 bfq_clear_bfqq_just_created(bfqq
);
4144 * If the bic associated with the process
4145 * issuing this request still points to bfqq
4146 * (and thus has not been already redirected
4147 * to new_bfqq or even some other bfq_queue),
4148 * then complete the merge and redirect it to
4151 if (bic_to_bfqq(RQ_BIC(rq
), 1) == bfqq
)
4152 bfq_merge_bfqqs(bfqd
, RQ_BIC(rq
),
4155 * rq is about to be enqueued into new_bfqq,
4156 * release rq reference on bfqq
4158 bfq_put_queue(bfqq
);
4159 rq
->elv
.priv
[1] = new_bfqq
;
4163 bfq_add_request(rq
);
4165 rq
->fifo_time
= ktime_get_ns() + bfqd
->bfq_fifo_expire
[rq_is_sync(rq
)];
4166 list_add_tail(&rq
->queuelist
, &bfqq
->fifo
);
4168 bfq_rq_enqueued(bfqd
, bfqq
, rq
);
4171 static void bfq_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
4174 struct request_queue
*q
= hctx
->queue
;
4175 struct bfq_data
*bfqd
= q
->elevator
->elevator_data
;
4177 spin_lock_irq(&bfqd
->lock
);
4178 if (blk_mq_sched_try_insert_merge(q
, rq
)) {
4179 spin_unlock_irq(&bfqd
->lock
);
4183 spin_unlock_irq(&bfqd
->lock
);
4185 blk_mq_sched_request_inserted(rq
);
4187 spin_lock_irq(&bfqd
->lock
);
4188 if (at_head
|| blk_rq_is_passthrough(rq
)) {
4190 list_add(&rq
->queuelist
, &bfqd
->dispatch
);
4192 list_add_tail(&rq
->queuelist
, &bfqd
->dispatch
);
4194 __bfq_insert_request(bfqd
, rq
);
4196 if (rq_mergeable(rq
)) {
4197 elv_rqhash_add(q
, rq
);
4203 spin_unlock_irq(&bfqd
->lock
);
4206 static void bfq_insert_requests(struct blk_mq_hw_ctx
*hctx
,
4207 struct list_head
*list
, bool at_head
)
4209 while (!list_empty(list
)) {
4212 rq
= list_first_entry(list
, struct request
, queuelist
);
4213 list_del_init(&rq
->queuelist
);
4214 bfq_insert_request(hctx
, rq
, at_head
);
4218 static void bfq_update_hw_tag(struct bfq_data
*bfqd
)
4220 bfqd
->max_rq_in_driver
= max_t(int, bfqd
->max_rq_in_driver
,
4221 bfqd
->rq_in_driver
);
4223 if (bfqd
->hw_tag
== 1)
4227 * This sample is valid if the number of outstanding requests
4228 * is large enough to allow a queueing behavior. Note that the
4229 * sum is not exact, as it's not taking into account deactivated
4232 if (bfqd
->rq_in_driver
+ bfqd
->queued
< BFQ_HW_QUEUE_THRESHOLD
)
4235 if (bfqd
->hw_tag_samples
++ < BFQ_HW_QUEUE_SAMPLES
)
4238 bfqd
->hw_tag
= bfqd
->max_rq_in_driver
> BFQ_HW_QUEUE_THRESHOLD
;
4239 bfqd
->max_rq_in_driver
= 0;
4240 bfqd
->hw_tag_samples
= 0;
4243 static void bfq_completed_request(struct bfq_queue
*bfqq
, struct bfq_data
*bfqd
)
4248 bfq_update_hw_tag(bfqd
);
4250 bfqd
->rq_in_driver
--;
4253 if (!bfqq
->dispatched
&& !bfq_bfqq_busy(bfqq
)) {
4255 * Set budget_timeout (which we overload to store the
4256 * time at which the queue remains with no backlog and
4257 * no outstanding request; used by the weight-raising
4260 bfqq
->budget_timeout
= jiffies
;
4262 bfq_weights_tree_remove(bfqd
, &bfqq
->entity
,
4263 &bfqd
->queue_weights_tree
);
4266 now_ns
= ktime_get_ns();
4268 bfqq
->ttime
.last_end_request
= now_ns
;
4271 * Using us instead of ns, to get a reasonable precision in
4272 * computing rate in next check.
4274 delta_us
= div_u64(now_ns
- bfqd
->last_completion
, NSEC_PER_USEC
);
4277 * If the request took rather long to complete, and, according
4278 * to the maximum request size recorded, this completion latency
4279 * implies that the request was certainly served at a very low
4280 * rate (less than 1M sectors/sec), then the whole observation
4281 * interval that lasts up to this time instant cannot be a
4282 * valid time interval for computing a new peak rate. Invoke
4283 * bfq_update_rate_reset to have the following three steps
4285 * - close the observation interval at the last (previous)
4286 * request dispatch or completion
4287 * - compute rate, if possible, for that observation interval
4288 * - reset to zero samples, which will trigger a proper
4289 * re-initialization of the observation interval on next
4292 if (delta_us
> BFQ_MIN_TT
/NSEC_PER_USEC
&&
4293 (bfqd
->last_rq_max_size
<<BFQ_RATE_SHIFT
)/delta_us
<
4294 1UL<<(BFQ_RATE_SHIFT
- 10))
4295 bfq_update_rate_reset(bfqd
, NULL
);
4296 bfqd
->last_completion
= now_ns
;
4299 * If we are waiting to discover whether the request pattern
4300 * of the task associated with the queue is actually
4301 * isochronous, and both requisites for this condition to hold
4302 * are now satisfied, then compute soft_rt_next_start (see the
4303 * comments on the function bfq_bfqq_softrt_next_start()). We
4304 * schedule this delayed check when bfqq expires, if it still
4305 * has in-flight requests.
4307 if (bfq_bfqq_softrt_update(bfqq
) && bfqq
->dispatched
== 0 &&
4308 RB_EMPTY_ROOT(&bfqq
->sort_list
))
4309 bfqq
->soft_rt_next_start
=
4310 bfq_bfqq_softrt_next_start(bfqd
, bfqq
);
4313 * If this is the in-service queue, check if it needs to be expired,
4314 * or if we want to idle in case it has no pending requests.
4316 if (bfqd
->in_service_queue
== bfqq
) {
4317 if (bfqq
->dispatched
== 0 && bfq_bfqq_must_idle(bfqq
)) {
4318 bfq_arm_slice_timer(bfqd
);
4320 } else if (bfq_may_expire_for_budg_timeout(bfqq
))
4321 bfq_bfqq_expire(bfqd
, bfqq
, false,
4322 BFQQE_BUDGET_TIMEOUT
);
4323 else if (RB_EMPTY_ROOT(&bfqq
->sort_list
) &&
4324 (bfqq
->dispatched
== 0 ||
4325 !bfq_bfqq_may_idle(bfqq
)))
4326 bfq_bfqq_expire(bfqd
, bfqq
, false,
4327 BFQQE_NO_MORE_REQUESTS
);
4330 if (!bfqd
->rq_in_driver
)
4331 bfq_schedule_dispatch(bfqd
);
4334 static void bfq_put_rq_priv_body(struct bfq_queue
*bfqq
)
4338 bfq_put_queue(bfqq
);
4341 static void bfq_finish_request(struct request
*rq
)
4343 struct bfq_queue
*bfqq
;
4344 struct bfq_data
*bfqd
;
4352 if (rq
->rq_flags
& RQF_STARTED
)
4353 bfqg_stats_update_completion(bfqq_group(bfqq
),
4354 rq_start_time_ns(rq
),
4355 rq_io_start_time_ns(rq
),
4358 if (likely(rq
->rq_flags
& RQF_STARTED
)) {
4359 unsigned long flags
;
4361 spin_lock_irqsave(&bfqd
->lock
, flags
);
4363 bfq_completed_request(bfqq
, bfqd
);
4364 bfq_put_rq_priv_body(bfqq
);
4366 spin_unlock_irqrestore(&bfqd
->lock
, flags
);
4369 * Request rq may be still/already in the scheduler,
4370 * in which case we need to remove it. And we cannot
4371 * defer such a check and removal, to avoid
4372 * inconsistencies in the time interval from the end
4373 * of this function to the start of the deferred work.
4374 * This situation seems to occur only in process
4375 * context, as a consequence of a merge. In the
4376 * current version of the code, this implies that the
4380 if (!RB_EMPTY_NODE(&rq
->rb_node
))
4381 bfq_remove_request(rq
->q
, rq
);
4382 bfq_put_rq_priv_body(bfqq
);
4385 rq
->elv
.priv
[0] = NULL
;
4386 rq
->elv
.priv
[1] = NULL
;
4390 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
4391 * was the last process referring to that bfqq.
4393 static struct bfq_queue
*
4394 bfq_split_bfqq(struct bfq_io_cq
*bic
, struct bfq_queue
*bfqq
)
4396 bfq_log_bfqq(bfqq
->bfqd
, bfqq
, "splitting queue");
4398 if (bfqq_process_refs(bfqq
) == 1) {
4399 bfqq
->pid
= current
->pid
;
4400 bfq_clear_bfqq_coop(bfqq
);
4401 bfq_clear_bfqq_split_coop(bfqq
);
4405 bic_set_bfqq(bic
, NULL
, 1);
4407 bfq_put_cooperator(bfqq
);
4409 bfq_put_queue(bfqq
);
4413 static struct bfq_queue
*bfq_get_bfqq_handle_split(struct bfq_data
*bfqd
,
4414 struct bfq_io_cq
*bic
,
4416 bool split
, bool is_sync
,
4419 struct bfq_queue
*bfqq
= bic_to_bfqq(bic
, is_sync
);
4421 if (likely(bfqq
&& bfqq
!= &bfqd
->oom_bfqq
))
4428 bfq_put_queue(bfqq
);
4429 bfqq
= bfq_get_queue(bfqd
, bio
, is_sync
, bic
);
4431 bic_set_bfqq(bic
, bfqq
, is_sync
);
4432 if (split
&& is_sync
) {
4433 if ((bic
->was_in_burst_list
&& bfqd
->large_burst
) ||
4434 bic
->saved_in_large_burst
)
4435 bfq_mark_bfqq_in_large_burst(bfqq
);
4437 bfq_clear_bfqq_in_large_burst(bfqq
);
4438 if (bic
->was_in_burst_list
)
4439 hlist_add_head(&bfqq
->burst_list_node
,
4442 bfqq
->split_time
= jiffies
;
4449 * Allocate bfq data structures associated with this request.
4451 static void bfq_prepare_request(struct request
*rq
, struct bio
*bio
)
4453 struct request_queue
*q
= rq
->q
;
4454 struct bfq_data
*bfqd
= q
->elevator
->elevator_data
;
4455 struct bfq_io_cq
*bic
;
4456 const int is_sync
= rq_is_sync(rq
);
4457 struct bfq_queue
*bfqq
;
4458 bool new_queue
= false;
4459 bool bfqq_already_existing
= false, split
= false;
4463 bic
= icq_to_bic(rq
->elv
.icq
);
4465 spin_lock_irq(&bfqd
->lock
);
4467 bfq_check_ioprio_change(bic
, bio
);
4469 bfq_bic_update_cgroup(bic
, bio
);
4471 bfqq
= bfq_get_bfqq_handle_split(bfqd
, bic
, bio
, false, is_sync
,
4474 if (likely(!new_queue
)) {
4475 /* If the queue was seeky for too long, break it apart. */
4476 if (bfq_bfqq_coop(bfqq
) && bfq_bfqq_split_coop(bfqq
)) {
4477 bfq_log_bfqq(bfqd
, bfqq
, "breaking apart bfqq");
4479 /* Update bic before losing reference to bfqq */
4480 if (bfq_bfqq_in_large_burst(bfqq
))
4481 bic
->saved_in_large_burst
= true;
4483 bfqq
= bfq_split_bfqq(bic
, bfqq
);
4487 bfqq
= bfq_get_bfqq_handle_split(bfqd
, bic
, bio
,
4491 bfqq_already_existing
= true;
4497 bfq_log_bfqq(bfqd
, bfqq
, "get_request %p: bfqq %p, %d",
4498 rq
, bfqq
, bfqq
->ref
);
4500 rq
->elv
.priv
[0] = bic
;
4501 rq
->elv
.priv
[1] = bfqq
;
4504 * If a bfq_queue has only one process reference, it is owned
4505 * by only this bic: we can then set bfqq->bic = bic. in
4506 * addition, if the queue has also just been split, we have to
4509 if (likely(bfqq
!= &bfqd
->oom_bfqq
) && bfqq_process_refs(bfqq
) == 1) {
4513 * The queue has just been split from a shared
4514 * queue: restore the idle window and the
4515 * possible weight raising period.
4517 bfq_bfqq_resume_state(bfqq
, bfqd
, bic
,
4518 bfqq_already_existing
);
4522 if (unlikely(bfq_bfqq_just_created(bfqq
)))
4523 bfq_handle_burst(bfqd
, bfqq
);
4525 spin_unlock_irq(&bfqd
->lock
);
4528 static void bfq_idle_slice_timer_body(struct bfq_queue
*bfqq
)
4530 struct bfq_data
*bfqd
= bfqq
->bfqd
;
4531 enum bfqq_expiration reason
;
4532 unsigned long flags
;
4534 spin_lock_irqsave(&bfqd
->lock
, flags
);
4535 bfq_clear_bfqq_wait_request(bfqq
);
4537 if (bfqq
!= bfqd
->in_service_queue
) {
4538 spin_unlock_irqrestore(&bfqd
->lock
, flags
);
4542 if (bfq_bfqq_budget_timeout(bfqq
))
4544 * Also here the queue can be safely expired
4545 * for budget timeout without wasting
4548 reason
= BFQQE_BUDGET_TIMEOUT
;
4549 else if (bfqq
->queued
[0] == 0 && bfqq
->queued
[1] == 0)
4551 * The queue may not be empty upon timer expiration,
4552 * because we may not disable the timer when the
4553 * first request of the in-service queue arrives
4554 * during disk idling.
4556 reason
= BFQQE_TOO_IDLE
;
4558 goto schedule_dispatch
;
4560 bfq_bfqq_expire(bfqd
, bfqq
, true, reason
);
4563 spin_unlock_irqrestore(&bfqd
->lock
, flags
);
4564 bfq_schedule_dispatch(bfqd
);
4568 * Handler of the expiration of the timer running if the in-service queue
4569 * is idling inside its time slice.
4571 static enum hrtimer_restart
bfq_idle_slice_timer(struct hrtimer
*timer
)
4573 struct bfq_data
*bfqd
= container_of(timer
, struct bfq_data
,
4575 struct bfq_queue
*bfqq
= bfqd
->in_service_queue
;
4578 * Theoretical race here: the in-service queue can be NULL or
4579 * different from the queue that was idling if a new request
4580 * arrives for the current queue and there is a full dispatch
4581 * cycle that changes the in-service queue. This can hardly
4582 * happen, but in the worst case we just expire a queue too
4586 bfq_idle_slice_timer_body(bfqq
);
4588 return HRTIMER_NORESTART
;
4591 static void __bfq_put_async_bfqq(struct bfq_data
*bfqd
,
4592 struct bfq_queue
**bfqq_ptr
)
4594 struct bfq_queue
*bfqq
= *bfqq_ptr
;
4596 bfq_log(bfqd
, "put_async_bfqq: %p", bfqq
);
4598 bfq_bfqq_move(bfqd
, bfqq
, bfqd
->root_group
);
4600 bfq_log_bfqq(bfqd
, bfqq
, "put_async_bfqq: putting %p, %d",
4602 bfq_put_queue(bfqq
);
4608 * Release all the bfqg references to its async queues. If we are
4609 * deallocating the group these queues may still contain requests, so
4610 * we reparent them to the root cgroup (i.e., the only one that will
4611 * exist for sure until all the requests on a device are gone).
4613 void bfq_put_async_queues(struct bfq_data
*bfqd
, struct bfq_group
*bfqg
)
4617 for (i
= 0; i
< 2; i
++)
4618 for (j
= 0; j
< IOPRIO_BE_NR
; j
++)
4619 __bfq_put_async_bfqq(bfqd
, &bfqg
->async_bfqq
[i
][j
]);
4621 __bfq_put_async_bfqq(bfqd
, &bfqg
->async_idle_bfqq
);
4624 static void bfq_exit_queue(struct elevator_queue
*e
)
4626 struct bfq_data
*bfqd
= e
->elevator_data
;
4627 struct bfq_queue
*bfqq
, *n
;
4629 hrtimer_cancel(&bfqd
->idle_slice_timer
);
4631 spin_lock_irq(&bfqd
->lock
);
4632 list_for_each_entry_safe(bfqq
, n
, &bfqd
->idle_list
, bfqq_list
)
4633 bfq_deactivate_bfqq(bfqd
, bfqq
, false, false);
4634 spin_unlock_irq(&bfqd
->lock
);
4636 hrtimer_cancel(&bfqd
->idle_slice_timer
);
4638 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4639 blkcg_deactivate_policy(bfqd
->queue
, &blkcg_policy_bfq
);
4641 spin_lock_irq(&bfqd
->lock
);
4642 bfq_put_async_queues(bfqd
, bfqd
->root_group
);
4643 kfree(bfqd
->root_group
);
4644 spin_unlock_irq(&bfqd
->lock
);
4650 static void bfq_init_root_group(struct bfq_group
*root_group
,
4651 struct bfq_data
*bfqd
)
4655 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4656 root_group
->entity
.parent
= NULL
;
4657 root_group
->my_entity
= NULL
;
4658 root_group
->bfqd
= bfqd
;
4660 root_group
->rq_pos_tree
= RB_ROOT
;
4661 for (i
= 0; i
< BFQ_IOPRIO_CLASSES
; i
++)
4662 root_group
->sched_data
.service_tree
[i
] = BFQ_SERVICE_TREE_INIT
;
4663 root_group
->sched_data
.bfq_class_idle_last_service
= jiffies
;
4666 static int bfq_init_queue(struct request_queue
*q
, struct elevator_type
*e
)
4668 struct bfq_data
*bfqd
;
4669 struct elevator_queue
*eq
;
4671 eq
= elevator_alloc(q
, e
);
4675 bfqd
= kzalloc_node(sizeof(*bfqd
), GFP_KERNEL
, q
->node
);
4677 kobject_put(&eq
->kobj
);
4680 eq
->elevator_data
= bfqd
;
4682 spin_lock_irq(q
->queue_lock
);
4684 spin_unlock_irq(q
->queue_lock
);
4687 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
4688 * Grab a permanent reference to it, so that the normal code flow
4689 * will not attempt to free it.
4691 bfq_init_bfqq(bfqd
, &bfqd
->oom_bfqq
, NULL
, 1, 0);
4692 bfqd
->oom_bfqq
.ref
++;
4693 bfqd
->oom_bfqq
.new_ioprio
= BFQ_DEFAULT_QUEUE_IOPRIO
;
4694 bfqd
->oom_bfqq
.new_ioprio_class
= IOPRIO_CLASS_BE
;
4695 bfqd
->oom_bfqq
.entity
.new_weight
=
4696 bfq_ioprio_to_weight(bfqd
->oom_bfqq
.new_ioprio
);
4698 /* oom_bfqq does not participate to bursts */
4699 bfq_clear_bfqq_just_created(&bfqd
->oom_bfqq
);
4702 * Trigger weight initialization, according to ioprio, at the
4703 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
4704 * class won't be changed any more.
4706 bfqd
->oom_bfqq
.entity
.prio_changed
= 1;
4710 INIT_LIST_HEAD(&bfqd
->dispatch
);
4712 hrtimer_init(&bfqd
->idle_slice_timer
, CLOCK_MONOTONIC
,
4714 bfqd
->idle_slice_timer
.function
= bfq_idle_slice_timer
;
4716 bfqd
->queue_weights_tree
= RB_ROOT
;
4717 bfqd
->group_weights_tree
= RB_ROOT
;
4719 INIT_LIST_HEAD(&bfqd
->active_list
);
4720 INIT_LIST_HEAD(&bfqd
->idle_list
);
4721 INIT_HLIST_HEAD(&bfqd
->burst_list
);
4725 bfqd
->bfq_max_budget
= bfq_default_max_budget
;
4727 bfqd
->bfq_fifo_expire
[0] = bfq_fifo_expire
[0];
4728 bfqd
->bfq_fifo_expire
[1] = bfq_fifo_expire
[1];
4729 bfqd
->bfq_back_max
= bfq_back_max
;
4730 bfqd
->bfq_back_penalty
= bfq_back_penalty
;
4731 bfqd
->bfq_slice_idle
= bfq_slice_idle
;
4732 bfqd
->bfq_timeout
= bfq_timeout
;
4734 bfqd
->bfq_requests_within_timer
= 120;
4736 bfqd
->bfq_large_burst_thresh
= 8;
4737 bfqd
->bfq_burst_interval
= msecs_to_jiffies(180);
4739 bfqd
->low_latency
= true;
4742 * Trade-off between responsiveness and fairness.
4744 bfqd
->bfq_wr_coeff
= 30;
4745 bfqd
->bfq_wr_rt_max_time
= msecs_to_jiffies(300);
4746 bfqd
->bfq_wr_max_time
= 0;
4747 bfqd
->bfq_wr_min_idle_time
= msecs_to_jiffies(2000);
4748 bfqd
->bfq_wr_min_inter_arr_async
= msecs_to_jiffies(500);
4749 bfqd
->bfq_wr_max_softrt_rate
= 7000; /*
4750 * Approximate rate required
4751 * to playback or record a
4752 * high-definition compressed
4755 bfqd
->wr_busy_queues
= 0;
4758 * Begin by assuming, optimistically, that the device is a
4759 * high-speed one, and that its peak rate is equal to 2/3 of
4760 * the highest reference rate.
4762 bfqd
->RT_prod
= R_fast
[blk_queue_nonrot(bfqd
->queue
)] *
4763 T_fast
[blk_queue_nonrot(bfqd
->queue
)];
4764 bfqd
->peak_rate
= R_fast
[blk_queue_nonrot(bfqd
->queue
)] * 2 / 3;
4765 bfqd
->device_speed
= BFQ_BFQD_FAST
;
4767 spin_lock_init(&bfqd
->lock
);
4770 * The invocation of the next bfq_create_group_hierarchy
4771 * function is the head of a chain of function calls
4772 * (bfq_create_group_hierarchy->blkcg_activate_policy->
4773 * blk_mq_freeze_queue) that may lead to the invocation of the
4774 * has_work hook function. For this reason,
4775 * bfq_create_group_hierarchy is invoked only after all
4776 * scheduler data has been initialized, apart from the fields
4777 * that can be initialized only after invoking
4778 * bfq_create_group_hierarchy. This, in particular, enables
4779 * has_work to correctly return false. Of course, to avoid
4780 * other inconsistencies, the blk-mq stack must then refrain
4781 * from invoking further scheduler hooks before this init
4782 * function is finished.
4784 bfqd
->root_group
= bfq_create_group_hierarchy(bfqd
, q
->node
);
4785 if (!bfqd
->root_group
)
4787 bfq_init_root_group(bfqd
->root_group
, bfqd
);
4788 bfq_init_entity(&bfqd
->oom_bfqq
.entity
, bfqd
->root_group
);
4795 kobject_put(&eq
->kobj
);
4799 static void bfq_slab_kill(void)
4801 kmem_cache_destroy(bfq_pool
);
4804 static int __init
bfq_slab_setup(void)
4806 bfq_pool
= KMEM_CACHE(bfq_queue
, 0);
4812 static ssize_t
bfq_var_show(unsigned int var
, char *page
)
4814 return sprintf(page
, "%u\n", var
);
4817 static int bfq_var_store(unsigned long *var
, const char *page
)
4819 unsigned long new_val
;
4820 int ret
= kstrtoul(page
, 10, &new_val
);
4828 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
4829 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4831 struct bfq_data *bfqd = e->elevator_data; \
4832 u64 __data = __VAR; \
4834 __data = jiffies_to_msecs(__data); \
4835 else if (__CONV == 2) \
4836 __data = div_u64(__data, NSEC_PER_MSEC); \
4837 return bfq_var_show(__data, (page)); \
4839 SHOW_FUNCTION(bfq_fifo_expire_sync_show
, bfqd
->bfq_fifo_expire
[1], 2);
4840 SHOW_FUNCTION(bfq_fifo_expire_async_show
, bfqd
->bfq_fifo_expire
[0], 2);
4841 SHOW_FUNCTION(bfq_back_seek_max_show
, bfqd
->bfq_back_max
, 0);
4842 SHOW_FUNCTION(bfq_back_seek_penalty_show
, bfqd
->bfq_back_penalty
, 0);
4843 SHOW_FUNCTION(bfq_slice_idle_show
, bfqd
->bfq_slice_idle
, 2);
4844 SHOW_FUNCTION(bfq_max_budget_show
, bfqd
->bfq_user_max_budget
, 0);
4845 SHOW_FUNCTION(bfq_timeout_sync_show
, bfqd
->bfq_timeout
, 1);
4846 SHOW_FUNCTION(bfq_strict_guarantees_show
, bfqd
->strict_guarantees
, 0);
4847 SHOW_FUNCTION(bfq_low_latency_show
, bfqd
->low_latency
, 0);
4848 #undef SHOW_FUNCTION
4850 #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
4851 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4853 struct bfq_data *bfqd = e->elevator_data; \
4854 u64 __data = __VAR; \
4855 __data = div_u64(__data, NSEC_PER_USEC); \
4856 return bfq_var_show(__data, (page)); \
4858 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show
, bfqd
->bfq_slice_idle
);
4859 #undef USEC_SHOW_FUNCTION
4861 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4863 __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4865 struct bfq_data *bfqd = e->elevator_data; \
4866 unsigned long __data, __min = (MIN), __max = (MAX); \
4869 ret = bfq_var_store(&__data, (page)); \
4872 if (__data < __min) \
4874 else if (__data > __max) \
4877 *(__PTR) = msecs_to_jiffies(__data); \
4878 else if (__CONV == 2) \
4879 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
4881 *(__PTR) = __data; \
4884 STORE_FUNCTION(bfq_fifo_expire_sync_store
, &bfqd
->bfq_fifo_expire
[1], 1,
4886 STORE_FUNCTION(bfq_fifo_expire_async_store
, &bfqd
->bfq_fifo_expire
[0], 1,
4888 STORE_FUNCTION(bfq_back_seek_max_store
, &bfqd
->bfq_back_max
, 0, INT_MAX
, 0);
4889 STORE_FUNCTION(bfq_back_seek_penalty_store
, &bfqd
->bfq_back_penalty
, 1,
4891 STORE_FUNCTION(bfq_slice_idle_store
, &bfqd
->bfq_slice_idle
, 0, INT_MAX
, 2);
4892 #undef STORE_FUNCTION
4894 #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
4895 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
4897 struct bfq_data *bfqd = e->elevator_data; \
4898 unsigned long __data, __min = (MIN), __max = (MAX); \
4901 ret = bfq_var_store(&__data, (page)); \
4904 if (__data < __min) \
4906 else if (__data > __max) \
4908 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
4911 USEC_STORE_FUNCTION(bfq_slice_idle_us_store
, &bfqd
->bfq_slice_idle
, 0,
4913 #undef USEC_STORE_FUNCTION
4915 static ssize_t
bfq_max_budget_store(struct elevator_queue
*e
,
4916 const char *page
, size_t count
)
4918 struct bfq_data
*bfqd
= e
->elevator_data
;
4919 unsigned long __data
;
4922 ret
= bfq_var_store(&__data
, (page
));
4927 bfqd
->bfq_max_budget
= bfq_calc_max_budget(bfqd
);
4929 if (__data
> INT_MAX
)
4931 bfqd
->bfq_max_budget
= __data
;
4934 bfqd
->bfq_user_max_budget
= __data
;
4940 * Leaving this name to preserve name compatibility with cfq
4941 * parameters, but this timeout is used for both sync and async.
4943 static ssize_t
bfq_timeout_sync_store(struct elevator_queue
*e
,
4944 const char *page
, size_t count
)
4946 struct bfq_data
*bfqd
= e
->elevator_data
;
4947 unsigned long __data
;
4950 ret
= bfq_var_store(&__data
, (page
));
4956 else if (__data
> INT_MAX
)
4959 bfqd
->bfq_timeout
= msecs_to_jiffies(__data
);
4960 if (bfqd
->bfq_user_max_budget
== 0)
4961 bfqd
->bfq_max_budget
= bfq_calc_max_budget(bfqd
);
4966 static ssize_t
bfq_strict_guarantees_store(struct elevator_queue
*e
,
4967 const char *page
, size_t count
)
4969 struct bfq_data
*bfqd
= e
->elevator_data
;
4970 unsigned long __data
;
4973 ret
= bfq_var_store(&__data
, (page
));
4979 if (!bfqd
->strict_guarantees
&& __data
== 1
4980 && bfqd
->bfq_slice_idle
< 8 * NSEC_PER_MSEC
)
4981 bfqd
->bfq_slice_idle
= 8 * NSEC_PER_MSEC
;
4983 bfqd
->strict_guarantees
= __data
;
4988 static ssize_t
bfq_low_latency_store(struct elevator_queue
*e
,
4989 const char *page
, size_t count
)
4991 struct bfq_data
*bfqd
= e
->elevator_data
;
4992 unsigned long __data
;
4995 ret
= bfq_var_store(&__data
, (page
));
5001 if (__data
== 0 && bfqd
->low_latency
!= 0)
5003 bfqd
->low_latency
= __data
;
5008 #define BFQ_ATTR(name) \
5009 __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
5011 static struct elv_fs_entry bfq_attrs
[] = {
5012 BFQ_ATTR(fifo_expire_sync
),
5013 BFQ_ATTR(fifo_expire_async
),
5014 BFQ_ATTR(back_seek_max
),
5015 BFQ_ATTR(back_seek_penalty
),
5016 BFQ_ATTR(slice_idle
),
5017 BFQ_ATTR(slice_idle_us
),
5018 BFQ_ATTR(max_budget
),
5019 BFQ_ATTR(timeout_sync
),
5020 BFQ_ATTR(strict_guarantees
),
5021 BFQ_ATTR(low_latency
),
5025 static struct elevator_type iosched_bfq_mq
= {
5027 .prepare_request
= bfq_prepare_request
,
5028 .finish_request
= bfq_finish_request
,
5029 .exit_icq
= bfq_exit_icq
,
5030 .insert_requests
= bfq_insert_requests
,
5031 .dispatch_request
= bfq_dispatch_request
,
5032 .next_request
= elv_rb_latter_request
,
5033 .former_request
= elv_rb_former_request
,
5034 .allow_merge
= bfq_allow_bio_merge
,
5035 .bio_merge
= bfq_bio_merge
,
5036 .request_merge
= bfq_request_merge
,
5037 .requests_merged
= bfq_requests_merged
,
5038 .request_merged
= bfq_request_merged
,
5039 .has_work
= bfq_has_work
,
5040 .init_sched
= bfq_init_queue
,
5041 .exit_sched
= bfq_exit_queue
,
5045 .icq_size
= sizeof(struct bfq_io_cq
),
5046 .icq_align
= __alignof__(struct bfq_io_cq
),
5047 .elevator_attrs
= bfq_attrs
,
5048 .elevator_name
= "bfq",
5049 .elevator_owner
= THIS_MODULE
,
5051 MODULE_ALIAS("bfq-iosched");
5053 static int __init
bfq_init(void)
5057 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5058 ret
= blkcg_policy_register(&blkcg_policy_bfq
);
5064 if (bfq_slab_setup())
5068 * Times to load large popular applications for the typical
5069 * systems installed on the reference devices (see the
5070 * comments before the definitions of the next two
5071 * arrays). Actually, we use slightly slower values, as the
5072 * estimated peak rate tends to be smaller than the actual
5073 * peak rate. The reason for this last fact is that estimates
5074 * are computed over much shorter time intervals than the long
5075 * intervals typically used for benchmarking. Why? First, to
5076 * adapt more quickly to variations. Second, because an I/O
5077 * scheduler cannot rely on a peak-rate-evaluation workload to
5078 * be run for a long time.
5080 T_slow
[0] = msecs_to_jiffies(3500); /* actually 4 sec */
5081 T_slow
[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
5082 T_fast
[0] = msecs_to_jiffies(7000); /* actually 8 sec */
5083 T_fast
[1] = msecs_to_jiffies(2500); /* actually 3 sec */
5086 * Thresholds that determine the switch between speed classes
5087 * (see the comments before the definition of the array
5088 * device_speed_thresh). These thresholds are biased towards
5089 * transitions to the fast class. This is safer than the
5090 * opposite bias. In fact, a wrong transition to the slow
5091 * class results in short weight-raising periods, because the
5092 * speed of the device then tends to be higher that the
5093 * reference peak rate. On the opposite end, a wrong
5094 * transition to the fast class tends to increase
5095 * weight-raising periods, because of the opposite reason.
5097 device_speed_thresh
[0] = (4 * R_slow
[0]) / 3;
5098 device_speed_thresh
[1] = (4 * R_slow
[1]) / 3;
5100 ret
= elv_register(&iosched_bfq_mq
);
5109 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5110 blkcg_policy_unregister(&blkcg_policy_bfq
);
5115 static void __exit
bfq_exit(void)
5117 elv_unregister(&iosched_bfq_mq
);
5118 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5119 blkcg_policy_unregister(&blkcg_policy_bfq
);
5124 module_init(bfq_init
);
5125 module_exit(bfq_exit
);
5127 MODULE_AUTHOR("Paolo Valente");
5128 MODULE_LICENSE("GPL");
5129 MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");