]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/bfq-iosched.c
block, bfq: check and switch back to interactive wr also on queue split
[mirror_ubuntu-bionic-kernel.git] / block / bfq-iosched.c
CommitLineData
aee69d78
PV
1/*
2 * Budget Fair Queueing (BFQ) I/O scheduler.
3 *
4 * Based on ideas and code from CFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
11 * Arianna Avanzini <avanzini@google.com>
12 *
13 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * BFQ is a proportional-share I/O scheduler, with some extra
26 * low-latency capabilities. BFQ also supports full hierarchical
27 * scheduling through cgroups. Next paragraphs provide an introduction
28 * on BFQ inner workings. Details on BFQ benefits, usage and
29 * limitations can be found in Documentation/block/bfq-iosched.txt.
30 *
31 * BFQ is a proportional-share storage-I/O scheduling algorithm based
32 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
33 * budgets, measured in number of sectors, to processes instead of
34 * time slices. The device is not granted to the in-service process
35 * for a given time slice, but until it has exhausted its assigned
36 * budget. This change from the time to the service domain enables BFQ
37 * to distribute the device throughput among processes as desired,
38 * without any distortion due to throughput fluctuations, or to device
39 * internal queueing. BFQ uses an ad hoc internal scheduler, called
40 * B-WF2Q+, to schedule processes according to their budgets. More
41 * precisely, BFQ schedules queues associated with processes. Each
42 * process/queue is assigned a user-configurable weight, and B-WF2Q+
43 * guarantees that each queue receives a fraction of the throughput
44 * proportional to its weight. Thanks to the accurate policy of
45 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
46 * processes issuing sequential requests (to boost the throughput),
47 * and yet guarantee a low latency to interactive and soft real-time
48 * applications.
49 *
50 * In particular, to provide these low-latency guarantees, BFQ
51 * explicitly privileges the I/O of two classes of time-sensitive
52 * applications: interactive and soft real-time. This feature enables
53 * BFQ to provide applications in these classes with a very low
54 * latency. Finally, BFQ also features additional heuristics for
55 * preserving both a low latency and a high throughput on NCQ-capable,
56 * rotational or flash-based devices, and to get the job done quickly
57 * for applications consisting in many I/O-bound processes.
58 *
43c1b3d6
PV
59 * NOTE: if the main or only goal, with a given device, is to achieve
60 * the maximum-possible throughput at all times, then do switch off
61 * all low-latency heuristics for that device, by setting low_latency
62 * to 0.
63 *
aee69d78
PV
64 * BFQ is described in [1], where also a reference to the initial, more
65 * theoretical paper on BFQ can be found. The interested reader can find
66 * in the latter paper full details on the main algorithm, as well as
67 * formulas of the guarantees and formal proofs of all the properties.
68 * With respect to the version of BFQ presented in these papers, this
69 * implementation adds a few more heuristics, such as the one that
70 * guarantees a low latency to soft real-time applications, and a
71 * hierarchical extension based on H-WF2Q+.
72 *
73 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
74 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
75 * with O(log N) complexity derives from the one introduced with EEVDF
76 * in [3].
77 *
78 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
79 * Scheduler", Proceedings of the First Workshop on Mobile System
80 * Technologies (MST-2015), May 2015.
81 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
82 *
83 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
84 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
85 * Oct 1997.
86 *
87 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
88 *
89 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
90 * First: A Flexible and Accurate Mechanism for Proportional Share
91 * Resource Allocation", technical report.
92 *
93 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
94 */
95#include <linux/module.h>
96#include <linux/slab.h>
97#include <linux/blkdev.h>
e21b7a0b 98#include <linux/cgroup.h>
aee69d78
PV
99#include <linux/elevator.h>
100#include <linux/ktime.h>
101#include <linux/rbtree.h>
102#include <linux/ioprio.h>
103#include <linux/sbitmap.h>
104#include <linux/delay.h>
105
106#include "blk.h"
107#include "blk-mq.h"
108#include "blk-mq-tag.h"
109#include "blk-mq-sched.h"
ea25da48 110#include "bfq-iosched.h"
aee69d78 111
ea25da48
PV
112#define BFQ_BFQQ_FNS(name) \
113void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
114{ \
115 __set_bit(BFQQF_##name, &(bfqq)->flags); \
116} \
117void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
118{ \
119 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
120} \
121int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
122{ \
123 return test_bit(BFQQF_##name, &(bfqq)->flags); \
44e44a1b
PV
124}
125
ea25da48
PV
126BFQ_BFQQ_FNS(just_created);
127BFQ_BFQQ_FNS(busy);
128BFQ_BFQQ_FNS(wait_request);
129BFQ_BFQQ_FNS(non_blocking_wait_rq);
130BFQ_BFQQ_FNS(fifo_expire);
d5be3fef 131BFQ_BFQQ_FNS(has_short_ttime);
ea25da48
PV
132BFQ_BFQQ_FNS(sync);
133BFQ_BFQQ_FNS(IO_bound);
134BFQ_BFQQ_FNS(in_large_burst);
135BFQ_BFQQ_FNS(coop);
136BFQ_BFQQ_FNS(split_coop);
137BFQ_BFQQ_FNS(softrt_update);
138#undef BFQ_BFQQ_FNS \
aee69d78 139
ea25da48
PV
140/* Expiration time of sync (0) and async (1) requests, in ns. */
141static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
aee69d78 142
ea25da48
PV
143/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
144static const int bfq_back_max = 16 * 1024;
aee69d78 145
ea25da48
PV
146/* Penalty of a backwards seek, in number of sectors. */
147static const int bfq_back_penalty = 2;
e21b7a0b 148
ea25da48
PV
149/* Idling period duration, in ns. */
150static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
aee69d78 151
ea25da48
PV
152/* Minimum number of assigned budgets for which stats are safe to compute. */
153static const int bfq_stats_min_budgets = 194;
aee69d78 154
ea25da48
PV
155/* Default maximum budget values, in sectors and number of requests. */
156static const int bfq_default_max_budget = 16 * 1024;
e21b7a0b 157
ea25da48
PV
158/*
159 * Async to sync throughput distribution is controlled as follows:
160 * when an async request is served, the entity is charged the number
161 * of sectors of the request, multiplied by the factor below
162 */
163static const int bfq_async_charge_factor = 10;
aee69d78 164
ea25da48
PV
165/* Default timeout values, in jiffies, approximating CFQ defaults. */
166const int bfq_timeout = HZ / 8;
aee69d78 167
ea25da48 168static struct kmem_cache *bfq_pool;
e21b7a0b 169
ea25da48
PV
170/* Below this threshold (in ns), we consider thinktime immediate. */
171#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
e21b7a0b 172
ea25da48
PV
173/* hw_tag detection: parallel requests threshold and min samples needed. */
174#define BFQ_HW_QUEUE_THRESHOLD 4
175#define BFQ_HW_QUEUE_SAMPLES 32
aee69d78 176
ea25da48
PV
177#define BFQQ_SEEK_THR (sector_t)(8 * 100)
178#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
179#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
180#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
aee69d78 181
ea25da48
PV
182/* Min number of samples required to perform peak-rate update */
183#define BFQ_RATE_MIN_SAMPLES 32
184/* Min observation time interval required to perform a peak-rate update (ns) */
185#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
186/* Target observation time interval for a peak-rate update (ns) */
187#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
aee69d78 188
ea25da48
PV
189/* Shift used for peak rate fixed precision calculations. */
190#define BFQ_RATE_SHIFT 16
aee69d78 191
ea25da48
PV
192/*
193 * By default, BFQ computes the duration of the weight raising for
194 * interactive applications automatically, using the following formula:
195 * duration = (R / r) * T, where r is the peak rate of the device, and
196 * R and T are two reference parameters.
197 * In particular, R is the peak rate of the reference device (see below),
198 * and T is a reference time: given the systems that are likely to be
199 * installed on the reference device according to its speed class, T is
200 * about the maximum time needed, under BFQ and while reading two files in
201 * parallel, to load typical large applications on these systems.
202 * In practice, the slower/faster the device at hand is, the more/less it
203 * takes to load applications with respect to the reference device.
204 * Accordingly, the longer/shorter BFQ grants weight raising to interactive
205 * applications.
206 *
207 * BFQ uses four different reference pairs (R, T), depending on:
208 * . whether the device is rotational or non-rotational;
209 * . whether the device is slow, such as old or portable HDDs, as well as
210 * SD cards, or fast, such as newer HDDs and SSDs.
211 *
212 * The device's speed class is dynamically (re)detected in
213 * bfq_update_peak_rate() every time the estimated peak rate is updated.
214 *
215 * In the following definitions, R_slow[0]/R_fast[0] and
216 * T_slow[0]/T_fast[0] are the reference values for a slow/fast
217 * rotational device, whereas R_slow[1]/R_fast[1] and
218 * T_slow[1]/T_fast[1] are the reference values for a slow/fast
219 * non-rotational device. Finally, device_speed_thresh are the
220 * thresholds used to switch between speed classes. The reference
221 * rates are not the actual peak rates of the devices used as a
222 * reference, but slightly lower values. The reason for using these
223 * slightly lower values is that the peak-rate estimator tends to
224 * yield slightly lower values than the actual peak rate (it can yield
225 * the actual peak rate only if there is only one process doing I/O,
226 * and the process does sequential I/O).
227 *
228 * Both the reference peak rates and the thresholds are measured in
229 * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
230 */
231static int R_slow[2] = {1000, 10700};
232static int R_fast[2] = {14000, 33000};
233/*
234 * To improve readability, a conversion function is used to initialize the
235 * following arrays, which entails that they can be initialized only in a
236 * function.
237 */
238static int T_slow[2];
239static int T_fast[2];
240static int device_speed_thresh[2];
aee69d78 241
12cd3a2f 242#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
ea25da48 243#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
aee69d78 244
ea25da48 245struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
e21b7a0b 246{
ea25da48 247 return bic->bfqq[is_sync];
aee69d78
PV
248}
249
ea25da48 250void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
aee69d78 251{
ea25da48 252 bic->bfqq[is_sync] = bfqq;
aee69d78
PV
253}
254
ea25da48 255struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
aee69d78 256{
ea25da48 257 return bic->icq.q->elevator->elevator_data;
e21b7a0b 258}
aee69d78 259
ea25da48
PV
260/**
261 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
262 * @icq: the iocontext queue.
263 */
264static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
e21b7a0b 265{
ea25da48
PV
266 /* bic->icq is the first member, %NULL will convert to %NULL */
267 return container_of(icq, struct bfq_io_cq, icq);
e21b7a0b 268}
aee69d78 269
ea25da48
PV
270/**
271 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
272 * @bfqd: the lookup key.
273 * @ioc: the io_context of the process doing I/O.
274 * @q: the request queue.
275 */
276static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
277 struct io_context *ioc,
278 struct request_queue *q)
e21b7a0b 279{
ea25da48
PV
280 if (ioc) {
281 unsigned long flags;
282 struct bfq_io_cq *icq;
aee69d78 283
ea25da48
PV
284 spin_lock_irqsave(q->queue_lock, flags);
285 icq = icq_to_bic(ioc_lookup_icq(ioc, q));
286 spin_unlock_irqrestore(q->queue_lock, flags);
aee69d78 287
ea25da48 288 return icq;
e21b7a0b 289 }
e21b7a0b 290
ea25da48 291 return NULL;
aee69d78
PV
292}
293
ea25da48
PV
294/*
295 * Scheduler run of queue, if there are requests pending and no one in the
296 * driver that will restart queueing.
297 */
298void bfq_schedule_dispatch(struct bfq_data *bfqd)
aee69d78 299{
ea25da48
PV
300 if (bfqd->queued != 0) {
301 bfq_log(bfqd, "schedule dispatch");
302 blk_mq_run_hw_queues(bfqd->queue, true);
e21b7a0b 303 }
aee69d78
PV
304}
305
306#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
307#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
308
309#define bfq_sample_valid(samples) ((samples) > 80)
310
aee69d78
PV
311/*
312 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
313 * We choose the request that is closesr to the head right now. Distance
314 * behind the head is penalized and only allowed to a certain extent.
315 */
316static struct request *bfq_choose_req(struct bfq_data *bfqd,
317 struct request *rq1,
318 struct request *rq2,
319 sector_t last)
320{
321 sector_t s1, s2, d1 = 0, d2 = 0;
322 unsigned long back_max;
323#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
324#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
325 unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
326
327 if (!rq1 || rq1 == rq2)
328 return rq2;
329 if (!rq2)
330 return rq1;
331
332 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
333 return rq1;
334 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
335 return rq2;
336 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
337 return rq1;
338 else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
339 return rq2;
340
341 s1 = blk_rq_pos(rq1);
342 s2 = blk_rq_pos(rq2);
343
344 /*
345 * By definition, 1KiB is 2 sectors.
346 */
347 back_max = bfqd->bfq_back_max * 2;
348
349 /*
350 * Strict one way elevator _except_ in the case where we allow
351 * short backward seeks which are biased as twice the cost of a
352 * similar forward seek.
353 */
354 if (s1 >= last)
355 d1 = s1 - last;
356 else if (s1 + back_max >= last)
357 d1 = (last - s1) * bfqd->bfq_back_penalty;
358 else
359 wrap |= BFQ_RQ1_WRAP;
360
361 if (s2 >= last)
362 d2 = s2 - last;
363 else if (s2 + back_max >= last)
364 d2 = (last - s2) * bfqd->bfq_back_penalty;
365 else
366 wrap |= BFQ_RQ2_WRAP;
367
368 /* Found required data */
369
370 /*
371 * By doing switch() on the bit mask "wrap" we avoid having to
372 * check two variables for all permutations: --> faster!
373 */
374 switch (wrap) {
375 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
376 if (d1 < d2)
377 return rq1;
378 else if (d2 < d1)
379 return rq2;
380
381 if (s1 >= s2)
382 return rq1;
383 else
384 return rq2;
385
386 case BFQ_RQ2_WRAP:
387 return rq1;
388 case BFQ_RQ1_WRAP:
389 return rq2;
390 case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
391 default:
392 /*
393 * Since both rqs are wrapped,
394 * start with the one that's further behind head
395 * (--> only *one* back seek required),
396 * since back seek takes more time than forward.
397 */
398 if (s1 <= s2)
399 return rq1;
400 else
401 return rq2;
402 }
403}
404
36eca894
AA
405static struct bfq_queue *
406bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
407 sector_t sector, struct rb_node **ret_parent,
408 struct rb_node ***rb_link)
409{
410 struct rb_node **p, *parent;
411 struct bfq_queue *bfqq = NULL;
412
413 parent = NULL;
414 p = &root->rb_node;
415 while (*p) {
416 struct rb_node **n;
417
418 parent = *p;
419 bfqq = rb_entry(parent, struct bfq_queue, pos_node);
420
421 /*
422 * Sort strictly based on sector. Smallest to the left,
423 * largest to the right.
424 */
425 if (sector > blk_rq_pos(bfqq->next_rq))
426 n = &(*p)->rb_right;
427 else if (sector < blk_rq_pos(bfqq->next_rq))
428 n = &(*p)->rb_left;
429 else
430 break;
431 p = n;
432 bfqq = NULL;
433 }
434
435 *ret_parent = parent;
436 if (rb_link)
437 *rb_link = p;
438
439 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
440 (unsigned long long)sector,
441 bfqq ? bfqq->pid : 0);
442
443 return bfqq;
444}
445
ea25da48 446void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
36eca894
AA
447{
448 struct rb_node **p, *parent;
449 struct bfq_queue *__bfqq;
450
451 if (bfqq->pos_root) {
452 rb_erase(&bfqq->pos_node, bfqq->pos_root);
453 bfqq->pos_root = NULL;
454 }
455
456 if (bfq_class_idle(bfqq))
457 return;
458 if (!bfqq->next_rq)
459 return;
460
461 bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
462 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
463 blk_rq_pos(bfqq->next_rq), &parent, &p);
464 if (!__bfqq) {
465 rb_link_node(&bfqq->pos_node, parent, p);
466 rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
467 } else
468 bfqq->pos_root = NULL;
469}
470
1de0c4cd
AA
471/*
472 * Tell whether there are active queues or groups with differentiated weights.
473 */
474static bool bfq_differentiated_weights(struct bfq_data *bfqd)
475{
476 /*
477 * For weights to differ, at least one of the trees must contain
478 * at least two nodes.
479 */
480 return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
481 (bfqd->queue_weights_tree.rb_node->rb_left ||
482 bfqd->queue_weights_tree.rb_node->rb_right)
483#ifdef CONFIG_BFQ_GROUP_IOSCHED
484 ) ||
485 (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
486 (bfqd->group_weights_tree.rb_node->rb_left ||
487 bfqd->group_weights_tree.rb_node->rb_right)
488#endif
489 );
490}
491
492/*
493 * The following function returns true if every queue must receive the
494 * same share of the throughput (this condition is used when deciding
495 * whether idling may be disabled, see the comments in the function
496 * bfq_bfqq_may_idle()).
497 *
498 * Such a scenario occurs when:
499 * 1) all active queues have the same weight,
500 * 2) all active groups at the same level in the groups tree have the same
501 * weight,
502 * 3) all active groups at the same level in the groups tree have the same
503 * number of children.
504 *
505 * Unfortunately, keeping the necessary state for evaluating exactly the
506 * above symmetry conditions would be quite complex and time-consuming.
507 * Therefore this function evaluates, instead, the following stronger
508 * sub-conditions, for which it is much easier to maintain the needed
509 * state:
510 * 1) all active queues have the same weight,
511 * 2) all active groups have the same weight,
512 * 3) all active groups have at most one active child each.
513 * In particular, the last two conditions are always true if hierarchical
514 * support and the cgroups interface are not enabled, thus no state needs
515 * to be maintained in this case.
516 */
517static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
518{
519 return !bfq_differentiated_weights(bfqd);
520}
521
522/*
523 * If the weight-counter tree passed as input contains no counter for
524 * the weight of the input entity, then add that counter; otherwise just
525 * increment the existing counter.
526 *
527 * Note that weight-counter trees contain few nodes in mostly symmetric
528 * scenarios. For example, if all queues have the same weight, then the
529 * weight-counter tree for the queues may contain at most one node.
530 * This holds even if low_latency is on, because weight-raised queues
531 * are not inserted in the tree.
532 * In most scenarios, the rate at which nodes are created/destroyed
533 * should be low too.
534 */
ea25da48
PV
535void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
536 struct rb_root *root)
1de0c4cd
AA
537{
538 struct rb_node **new = &(root->rb_node), *parent = NULL;
539
540 /*
541 * Do not insert if the entity is already associated with a
542 * counter, which happens if:
543 * 1) the entity is associated with a queue,
544 * 2) a request arrival has caused the queue to become both
545 * non-weight-raised, and hence change its weight, and
546 * backlogged; in this respect, each of the two events
547 * causes an invocation of this function,
548 * 3) this is the invocation of this function caused by the
549 * second event. This second invocation is actually useless,
550 * and we handle this fact by exiting immediately. More
551 * efficient or clearer solutions might possibly be adopted.
552 */
553 if (entity->weight_counter)
554 return;
555
556 while (*new) {
557 struct bfq_weight_counter *__counter = container_of(*new,
558 struct bfq_weight_counter,
559 weights_node);
560 parent = *new;
561
562 if (entity->weight == __counter->weight) {
563 entity->weight_counter = __counter;
564 goto inc_counter;
565 }
566 if (entity->weight < __counter->weight)
567 new = &((*new)->rb_left);
568 else
569 new = &((*new)->rb_right);
570 }
571
572 entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
573 GFP_ATOMIC);
574
575 /*
576 * In the unlucky event of an allocation failure, we just
577 * exit. This will cause the weight of entity to not be
578 * considered in bfq_differentiated_weights, which, in its
579 * turn, causes the scenario to be deemed wrongly symmetric in
580 * case entity's weight would have been the only weight making
581 * the scenario asymmetric. On the bright side, no unbalance
582 * will however occur when entity becomes inactive again (the
583 * invocation of this function is triggered by an activation
584 * of entity). In fact, bfq_weights_tree_remove does nothing
585 * if !entity->weight_counter.
586 */
587 if (unlikely(!entity->weight_counter))
588 return;
589
590 entity->weight_counter->weight = entity->weight;
591 rb_link_node(&entity->weight_counter->weights_node, parent, new);
592 rb_insert_color(&entity->weight_counter->weights_node, root);
593
594inc_counter:
595 entity->weight_counter->num_active++;
596}
597
598/*
599 * Decrement the weight counter associated with the entity, and, if the
600 * counter reaches 0, remove the counter from the tree.
601 * See the comments to the function bfq_weights_tree_add() for considerations
602 * about overhead.
603 */
ea25da48
PV
604void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
605 struct rb_root *root)
1de0c4cd
AA
606{
607 if (!entity->weight_counter)
608 return;
609
610 entity->weight_counter->num_active--;
611 if (entity->weight_counter->num_active > 0)
612 goto reset_entity_pointer;
613
614 rb_erase(&entity->weight_counter->weights_node, root);
615 kfree(entity->weight_counter);
616
617reset_entity_pointer:
618 entity->weight_counter = NULL;
619}
620
aee69d78
PV
621/*
622 * Return expired entry, or NULL to just start from scratch in rbtree.
623 */
624static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
625 struct request *last)
626{
627 struct request *rq;
628
629 if (bfq_bfqq_fifo_expire(bfqq))
630 return NULL;
631
632 bfq_mark_bfqq_fifo_expire(bfqq);
633
634 rq = rq_entry_fifo(bfqq->fifo.next);
635
636 if (rq == last || ktime_get_ns() < rq->fifo_time)
637 return NULL;
638
639 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
640 return rq;
641}
642
643static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
644 struct bfq_queue *bfqq,
645 struct request *last)
646{
647 struct rb_node *rbnext = rb_next(&last->rb_node);
648 struct rb_node *rbprev = rb_prev(&last->rb_node);
649 struct request *next, *prev = NULL;
650
651 /* Follow expired path, else get first next available. */
652 next = bfq_check_fifo(bfqq, last);
653 if (next)
654 return next;
655
656 if (rbprev)
657 prev = rb_entry_rq(rbprev);
658
659 if (rbnext)
660 next = rb_entry_rq(rbnext);
661 else {
662 rbnext = rb_first(&bfqq->sort_list);
663 if (rbnext && rbnext != &last->rb_node)
664 next = rb_entry_rq(rbnext);
665 }
666
667 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
668}
669
c074170e 670/* see the definition of bfq_async_charge_factor for details */
aee69d78
PV
671static unsigned long bfq_serv_to_charge(struct request *rq,
672 struct bfq_queue *bfqq)
673{
44e44a1b 674 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
c074170e
PV
675 return blk_rq_sectors(rq);
676
cfd69712
PV
677 /*
678 * If there are no weight-raised queues, then amplify service
679 * by just the async charge factor; otherwise amplify service
680 * by twice the async charge factor, to further reduce latency
681 * for weight-raised queues.
682 */
683 if (bfqq->bfqd->wr_busy_queues == 0)
684 return blk_rq_sectors(rq) * bfq_async_charge_factor;
685
686 return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
aee69d78
PV
687}
688
689/**
690 * bfq_updated_next_req - update the queue after a new next_rq selection.
691 * @bfqd: the device data the queue belongs to.
692 * @bfqq: the queue to update.
693 *
694 * If the first request of a queue changes we make sure that the queue
695 * has enough budget to serve at least its first request (if the
696 * request has grown). We do this because if the queue has not enough
697 * budget for its first request, it has to go through two dispatch
698 * rounds to actually get it dispatched.
699 */
700static void bfq_updated_next_req(struct bfq_data *bfqd,
701 struct bfq_queue *bfqq)
702{
703 struct bfq_entity *entity = &bfqq->entity;
704 struct request *next_rq = bfqq->next_rq;
705 unsigned long new_budget;
706
707 if (!next_rq)
708 return;
709
710 if (bfqq == bfqd->in_service_queue)
711 /*
712 * In order not to break guarantees, budgets cannot be
713 * changed after an entity has been selected.
714 */
715 return;
716
717 new_budget = max_t(unsigned long, bfqq->max_budget,
718 bfq_serv_to_charge(next_rq, bfqq));
719 if (entity->budget != new_budget) {
720 entity->budget = new_budget;
721 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
722 new_budget);
80294c3b 723 bfq_requeue_bfqq(bfqd, bfqq, false);
aee69d78
PV
724 }
725}
726
3e2bdd6d
PV
727static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
728{
729 u64 dur;
730
731 if (bfqd->bfq_wr_max_time > 0)
732 return bfqd->bfq_wr_max_time;
733
734 dur = bfqd->RT_prod;
735 do_div(dur, bfqd->peak_rate);
736
737 /*
738 * Limit duration between 3 and 13 seconds. Tests show that
739 * higher values than 13 seconds often yield the opposite of
740 * the desired result, i.e., worsen responsiveness by letting
741 * non-interactive and non-soft-real-time applications
742 * preserve weight raising for a too long time interval.
743 *
744 * On the other end, lower values than 3 seconds make it
745 * difficult for most interactive tasks to complete their jobs
746 * before weight-raising finishes.
747 */
748 if (dur > msecs_to_jiffies(13000))
749 dur = msecs_to_jiffies(13000);
750 else if (dur < msecs_to_jiffies(3000))
751 dur = msecs_to_jiffies(3000);
752
753 return dur;
754}
755
756/* switch back from soft real-time to interactive weight raising */
757static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
758 struct bfq_data *bfqd)
759{
760 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
761 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
762 bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
763}
764
36eca894 765static void
13c931bd
PV
766bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
767 struct bfq_io_cq *bic, bool bfq_already_existing)
36eca894 768{
13c931bd
PV
769 unsigned int old_wr_coeff = bfqq->wr_coeff;
770 bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
771
d5be3fef
PV
772 if (bic->saved_has_short_ttime)
773 bfq_mark_bfqq_has_short_ttime(bfqq);
36eca894 774 else
d5be3fef 775 bfq_clear_bfqq_has_short_ttime(bfqq);
36eca894
AA
776
777 if (bic->saved_IO_bound)
778 bfq_mark_bfqq_IO_bound(bfqq);
779 else
780 bfq_clear_bfqq_IO_bound(bfqq);
781
782 bfqq->ttime = bic->saved_ttime;
783 bfqq->wr_coeff = bic->saved_wr_coeff;
784 bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
785 bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
786 bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
787
e1b2324d 788 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
36eca894 789 time_is_before_jiffies(bfqq->last_wr_start_finish +
e1b2324d 790 bfqq->wr_cur_max_time))) {
3e2bdd6d
PV
791 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
792 !bfq_bfqq_in_large_burst(bfqq) &&
793 time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
794 bfq_wr_duration(bfqd))) {
795 switch_back_to_interactive_wr(bfqq, bfqd);
796 } else {
797 bfqq->wr_coeff = 1;
798 bfq_log_bfqq(bfqq->bfqd, bfqq,
799 "resume state: switching off wr");
800 }
36eca894
AA
801 }
802
803 /* make sure weight will be updated, however we got here */
804 bfqq->entity.prio_changed = 1;
13c931bd
PV
805
806 if (likely(!busy))
807 return;
808
809 if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
810 bfqd->wr_busy_queues++;
811 else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
812 bfqd->wr_busy_queues--;
36eca894
AA
813}
814
815static int bfqq_process_refs(struct bfq_queue *bfqq)
816{
817 return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
818}
819
e1b2324d
AA
820/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
821static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
822{
823 struct bfq_queue *item;
824 struct hlist_node *n;
825
826 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
827 hlist_del_init(&item->burst_list_node);
828 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
829 bfqd->burst_size = 1;
830 bfqd->burst_parent_entity = bfqq->entity.parent;
831}
832
833/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
834static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
835{
836 /* Increment burst size to take into account also bfqq */
837 bfqd->burst_size++;
838
839 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
840 struct bfq_queue *pos, *bfqq_item;
841 struct hlist_node *n;
842
843 /*
844 * Enough queues have been activated shortly after each
845 * other to consider this burst as large.
846 */
847 bfqd->large_burst = true;
848
849 /*
850 * We can now mark all queues in the burst list as
851 * belonging to a large burst.
852 */
853 hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
854 burst_list_node)
855 bfq_mark_bfqq_in_large_burst(bfqq_item);
856 bfq_mark_bfqq_in_large_burst(bfqq);
857
858 /*
859 * From now on, and until the current burst finishes, any
860 * new queue being activated shortly after the last queue
861 * was inserted in the burst can be immediately marked as
862 * belonging to a large burst. So the burst list is not
863 * needed any more. Remove it.
864 */
865 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
866 burst_list_node)
867 hlist_del_init(&pos->burst_list_node);
868 } else /*
869 * Burst not yet large: add bfqq to the burst list. Do
870 * not increment the ref counter for bfqq, because bfqq
871 * is removed from the burst list before freeing bfqq
872 * in put_queue.
873 */
874 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
875}
876
877/*
878 * If many queues belonging to the same group happen to be created
879 * shortly after each other, then the processes associated with these
880 * queues have typically a common goal. In particular, bursts of queue
881 * creations are usually caused by services or applications that spawn
882 * many parallel threads/processes. Examples are systemd during boot,
883 * or git grep. To help these processes get their job done as soon as
884 * possible, it is usually better to not grant either weight-raising
885 * or device idling to their queues.
886 *
887 * In this comment we describe, firstly, the reasons why this fact
888 * holds, and, secondly, the next function, which implements the main
889 * steps needed to properly mark these queues so that they can then be
890 * treated in a different way.
891 *
892 * The above services or applications benefit mostly from a high
893 * throughput: the quicker the requests of the activated queues are
894 * cumulatively served, the sooner the target job of these queues gets
895 * completed. As a consequence, weight-raising any of these queues,
896 * which also implies idling the device for it, is almost always
897 * counterproductive. In most cases it just lowers throughput.
898 *
899 * On the other hand, a burst of queue creations may be caused also by
900 * the start of an application that does not consist of a lot of
901 * parallel I/O-bound threads. In fact, with a complex application,
902 * several short processes may need to be executed to start-up the
903 * application. In this respect, to start an application as quickly as
904 * possible, the best thing to do is in any case to privilege the I/O
905 * related to the application with respect to all other
906 * I/O. Therefore, the best strategy to start as quickly as possible
907 * an application that causes a burst of queue creations is to
908 * weight-raise all the queues created during the burst. This is the
909 * exact opposite of the best strategy for the other type of bursts.
910 *
911 * In the end, to take the best action for each of the two cases, the
912 * two types of bursts need to be distinguished. Fortunately, this
913 * seems relatively easy, by looking at the sizes of the bursts. In
914 * particular, we found a threshold such that only bursts with a
915 * larger size than that threshold are apparently caused by
916 * services or commands such as systemd or git grep. For brevity,
917 * hereafter we call just 'large' these bursts. BFQ *does not*
918 * weight-raise queues whose creation occurs in a large burst. In
919 * addition, for each of these queues BFQ performs or does not perform
920 * idling depending on which choice boosts the throughput more. The
921 * exact choice depends on the device and request pattern at
922 * hand.
923 *
924 * Unfortunately, false positives may occur while an interactive task
925 * is starting (e.g., an application is being started). The
926 * consequence is that the queues associated with the task do not
927 * enjoy weight raising as expected. Fortunately these false positives
928 * are very rare. They typically occur if some service happens to
929 * start doing I/O exactly when the interactive task starts.
930 *
931 * Turning back to the next function, it implements all the steps
932 * needed to detect the occurrence of a large burst and to properly
933 * mark all the queues belonging to it (so that they can then be
934 * treated in a different way). This goal is achieved by maintaining a
935 * "burst list" that holds, temporarily, the queues that belong to the
936 * burst in progress. The list is then used to mark these queues as
937 * belonging to a large burst if the burst does become large. The main
938 * steps are the following.
939 *
940 * . when the very first queue is created, the queue is inserted into the
941 * list (as it could be the first queue in a possible burst)
942 *
943 * . if the current burst has not yet become large, and a queue Q that does
944 * not yet belong to the burst is activated shortly after the last time
945 * at which a new queue entered the burst list, then the function appends
946 * Q to the burst list
947 *
948 * . if, as a consequence of the previous step, the burst size reaches
949 * the large-burst threshold, then
950 *
951 * . all the queues in the burst list are marked as belonging to a
952 * large burst
953 *
954 * . the burst list is deleted; in fact, the burst list already served
955 * its purpose (keeping temporarily track of the queues in a burst,
956 * so as to be able to mark them as belonging to a large burst in the
957 * previous sub-step), and now is not needed any more
958 *
959 * . the device enters a large-burst mode
960 *
961 * . if a queue Q that does not belong to the burst is created while
962 * the device is in large-burst mode and shortly after the last time
963 * at which a queue either entered the burst list or was marked as
964 * belonging to the current large burst, then Q is immediately marked
965 * as belonging to a large burst.
966 *
967 * . if a queue Q that does not belong to the burst is created a while
968 * later, i.e., not shortly after, than the last time at which a queue
969 * either entered the burst list or was marked as belonging to the
970 * current large burst, then the current burst is deemed as finished and:
971 *
972 * . the large-burst mode is reset if set
973 *
974 * . the burst list is emptied
975 *
976 * . Q is inserted in the burst list, as Q may be the first queue
977 * in a possible new burst (then the burst list contains just Q
978 * after this step).
979 */
980static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
981{
982 /*
983 * If bfqq is already in the burst list or is part of a large
984 * burst, or finally has just been split, then there is
985 * nothing else to do.
986 */
987 if (!hlist_unhashed(&bfqq->burst_list_node) ||
988 bfq_bfqq_in_large_burst(bfqq) ||
989 time_is_after_eq_jiffies(bfqq->split_time +
990 msecs_to_jiffies(10)))
991 return;
992
993 /*
994 * If bfqq's creation happens late enough, or bfqq belongs to
995 * a different group than the burst group, then the current
996 * burst is finished, and related data structures must be
997 * reset.
998 *
999 * In this respect, consider the special case where bfqq is
1000 * the very first queue created after BFQ is selected for this
1001 * device. In this case, last_ins_in_burst and
1002 * burst_parent_entity are not yet significant when we get
1003 * here. But it is easy to verify that, whether or not the
1004 * following condition is true, bfqq will end up being
1005 * inserted into the burst list. In particular the list will
1006 * happen to contain only bfqq. And this is exactly what has
1007 * to happen, as bfqq may be the first queue of the first
1008 * burst.
1009 */
1010 if (time_is_before_jiffies(bfqd->last_ins_in_burst +
1011 bfqd->bfq_burst_interval) ||
1012 bfqq->entity.parent != bfqd->burst_parent_entity) {
1013 bfqd->large_burst = false;
1014 bfq_reset_burst_list(bfqd, bfqq);
1015 goto end;
1016 }
1017
1018 /*
1019 * If we get here, then bfqq is being activated shortly after the
1020 * last queue. So, if the current burst is also large, we can mark
1021 * bfqq as belonging to this large burst immediately.
1022 */
1023 if (bfqd->large_burst) {
1024 bfq_mark_bfqq_in_large_burst(bfqq);
1025 goto end;
1026 }
1027
1028 /*
1029 * If we get here, then a large-burst state has not yet been
1030 * reached, but bfqq is being activated shortly after the last
1031 * queue. Then we add bfqq to the burst.
1032 */
1033 bfq_add_to_burst(bfqd, bfqq);
1034end:
1035 /*
1036 * At this point, bfqq either has been added to the current
1037 * burst or has caused the current burst to terminate and a
1038 * possible new burst to start. In particular, in the second
1039 * case, bfqq has become the first queue in the possible new
1040 * burst. In both cases last_ins_in_burst needs to be moved
1041 * forward.
1042 */
1043 bfqd->last_ins_in_burst = jiffies;
1044}
1045
aee69d78
PV
1046static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
1047{
1048 struct bfq_entity *entity = &bfqq->entity;
1049
1050 return entity->budget - entity->service;
1051}
1052
1053/*
1054 * If enough samples have been computed, return the current max budget
1055 * stored in bfqd, which is dynamically updated according to the
1056 * estimated disk peak rate; otherwise return the default max budget
1057 */
1058static int bfq_max_budget(struct bfq_data *bfqd)
1059{
1060 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1061 return bfq_default_max_budget;
1062 else
1063 return bfqd->bfq_max_budget;
1064}
1065
1066/*
1067 * Return min budget, which is a fraction of the current or default
1068 * max budget (trying with 1/32)
1069 */
1070static int bfq_min_budget(struct bfq_data *bfqd)
1071{
1072 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1073 return bfq_default_max_budget / 32;
1074 else
1075 return bfqd->bfq_max_budget / 32;
1076}
1077
aee69d78
PV
1078/*
1079 * The next function, invoked after the input queue bfqq switches from
1080 * idle to busy, updates the budget of bfqq. The function also tells
1081 * whether the in-service queue should be expired, by returning
1082 * true. The purpose of expiring the in-service queue is to give bfqq
1083 * the chance to possibly preempt the in-service queue, and the reason
44e44a1b
PV
1084 * for preempting the in-service queue is to achieve one of the two
1085 * goals below.
aee69d78 1086 *
44e44a1b
PV
1087 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1088 * expired because it has remained idle. In particular, bfqq may have
1089 * expired for one of the following two reasons:
aee69d78
PV
1090 *
1091 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1092 * and did not make it to issue a new request before its last
1093 * request was served;
1094 *
1095 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1096 * a new request before the expiration of the idling-time.
1097 *
1098 * Even if bfqq has expired for one of the above reasons, the process
1099 * associated with the queue may be however issuing requests greedily,
1100 * and thus be sensitive to the bandwidth it receives (bfqq may have
1101 * remained idle for other reasons: CPU high load, bfqq not enjoying
1102 * idling, I/O throttling somewhere in the path from the process to
1103 * the I/O scheduler, ...). But if, after every expiration for one of
1104 * the above two reasons, bfqq has to wait for the service of at least
1105 * one full budget of another queue before being served again, then
1106 * bfqq is likely to get a much lower bandwidth or resource time than
1107 * its reserved ones. To address this issue, two countermeasures need
1108 * to be taken.
1109 *
1110 * First, the budget and the timestamps of bfqq need to be updated in
1111 * a special way on bfqq reactivation: they need to be updated as if
1112 * bfqq did not remain idle and did not expire. In fact, if they are
1113 * computed as if bfqq expired and remained idle until reactivation,
1114 * then the process associated with bfqq is treated as if, instead of
1115 * being greedy, it stopped issuing requests when bfqq remained idle,
1116 * and restarts issuing requests only on this reactivation. In other
1117 * words, the scheduler does not help the process recover the "service
1118 * hole" between bfqq expiration and reactivation. As a consequence,
1119 * the process receives a lower bandwidth than its reserved one. In
1120 * contrast, to recover this hole, the budget must be updated as if
1121 * bfqq was not expired at all before this reactivation, i.e., it must
1122 * be set to the value of the remaining budget when bfqq was
1123 * expired. Along the same line, timestamps need to be assigned the
1124 * value they had the last time bfqq was selected for service, i.e.,
1125 * before last expiration. Thus timestamps need to be back-shifted
1126 * with respect to their normal computation (see [1] for more details
1127 * on this tricky aspect).
1128 *
1129 * Secondly, to allow the process to recover the hole, the in-service
1130 * queue must be expired too, to give bfqq the chance to preempt it
1131 * immediately. In fact, if bfqq has to wait for a full budget of the
1132 * in-service queue to be completed, then it may become impossible to
1133 * let the process recover the hole, even if the back-shifted
1134 * timestamps of bfqq are lower than those of the in-service queue. If
1135 * this happens for most or all of the holes, then the process may not
1136 * receive its reserved bandwidth. In this respect, it is worth noting
1137 * that, being the service of outstanding requests unpreemptible, a
1138 * little fraction of the holes may however be unrecoverable, thereby
1139 * causing a little loss of bandwidth.
1140 *
1141 * The last important point is detecting whether bfqq does need this
1142 * bandwidth recovery. In this respect, the next function deems the
1143 * process associated with bfqq greedy, and thus allows it to recover
1144 * the hole, if: 1) the process is waiting for the arrival of a new
1145 * request (which implies that bfqq expired for one of the above two
1146 * reasons), and 2) such a request has arrived soon. The first
1147 * condition is controlled through the flag non_blocking_wait_rq,
1148 * while the second through the flag arrived_in_time. If both
1149 * conditions hold, then the function computes the budget in the
1150 * above-described special way, and signals that the in-service queue
1151 * should be expired. Timestamp back-shifting is done later in
1152 * __bfq_activate_entity.
44e44a1b
PV
1153 *
1154 * 2. Reduce latency. Even if timestamps are not backshifted to let
1155 * the process associated with bfqq recover a service hole, bfqq may
1156 * however happen to have, after being (re)activated, a lower finish
1157 * timestamp than the in-service queue. That is, the next budget of
1158 * bfqq may have to be completed before the one of the in-service
1159 * queue. If this is the case, then preempting the in-service queue
1160 * allows this goal to be achieved, apart from the unpreemptible,
1161 * outstanding requests mentioned above.
1162 *
1163 * Unfortunately, regardless of which of the above two goals one wants
1164 * to achieve, service trees need first to be updated to know whether
1165 * the in-service queue must be preempted. To have service trees
1166 * correctly updated, the in-service queue must be expired and
1167 * rescheduled, and bfqq must be scheduled too. This is one of the
1168 * most costly operations (in future versions, the scheduling
1169 * mechanism may be re-designed in such a way to make it possible to
1170 * know whether preemption is needed without needing to update service
1171 * trees). In addition, queue preemptions almost always cause random
1172 * I/O, and thus loss of throughput. Because of these facts, the next
1173 * function adopts the following simple scheme to avoid both costly
1174 * operations and too frequent preemptions: it requests the expiration
1175 * of the in-service queue (unconditionally) only for queues that need
1176 * to recover a hole, or that either are weight-raised or deserve to
1177 * be weight-raised.
aee69d78
PV
1178 */
1179static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1180 struct bfq_queue *bfqq,
44e44a1b
PV
1181 bool arrived_in_time,
1182 bool wr_or_deserves_wr)
aee69d78
PV
1183{
1184 struct bfq_entity *entity = &bfqq->entity;
1185
1186 if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
1187 /*
1188 * We do not clear the flag non_blocking_wait_rq here, as
1189 * the latter is used in bfq_activate_bfqq to signal
1190 * that timestamps need to be back-shifted (and is
1191 * cleared right after).
1192 */
1193
1194 /*
1195 * In next assignment we rely on that either
1196 * entity->service or entity->budget are not updated
1197 * on expiration if bfqq is empty (see
1198 * __bfq_bfqq_recalc_budget). Thus both quantities
1199 * remain unchanged after such an expiration, and the
1200 * following statement therefore assigns to
1201 * entity->budget the remaining budget on such an
1202 * expiration. For clarity, entity->service is not
1203 * updated on expiration in any case, and, in normal
1204 * operation, is reset only when bfqq is selected for
1205 * service (see bfq_get_next_queue).
1206 */
1207 entity->budget = min_t(unsigned long,
1208 bfq_bfqq_budget_left(bfqq),
1209 bfqq->max_budget);
1210
1211 return true;
1212 }
1213
1214 entity->budget = max_t(unsigned long, bfqq->max_budget,
1215 bfq_serv_to_charge(bfqq->next_rq, bfqq));
1216 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
44e44a1b
PV
1217 return wr_or_deserves_wr;
1218}
1219
4baa8bb1
PV
1220/*
1221 * Return the farthest future time instant according to jiffies
1222 * macros.
1223 */
1224static unsigned long bfq_greatest_from_now(void)
1225{
1226 return jiffies + MAX_JIFFY_OFFSET;
1227}
1228
1229/*
1230 * Return the farthest past time instant according to jiffies
1231 * macros.
1232 */
1233static unsigned long bfq_smallest_from_now(void)
1234{
1235 return jiffies - MAX_JIFFY_OFFSET;
1236}
1237
44e44a1b
PV
1238static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1239 struct bfq_queue *bfqq,
1240 unsigned int old_wr_coeff,
1241 bool wr_or_deserves_wr,
77b7dcea 1242 bool interactive,
e1b2324d 1243 bool in_burst,
77b7dcea 1244 bool soft_rt)
44e44a1b
PV
1245{
1246 if (old_wr_coeff == 1 && wr_or_deserves_wr) {
1247 /* start a weight-raising period */
77b7dcea
PV
1248 if (interactive) {
1249 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1250 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1251 } else {
4baa8bb1
PV
1252 /*
1253 * No interactive weight raising in progress
1254 * here: assign minus infinity to
1255 * wr_start_at_switch_to_srt, to make sure
1256 * that, at the end of the soft-real-time
1257 * weight raising periods that is starting
1258 * now, no interactive weight-raising period
1259 * may be wrongly considered as still in
1260 * progress (and thus actually started by
1261 * mistake).
1262 */
1263 bfqq->wr_start_at_switch_to_srt =
1264 bfq_smallest_from_now();
77b7dcea
PV
1265 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1266 BFQ_SOFTRT_WEIGHT_FACTOR;
1267 bfqq->wr_cur_max_time =
1268 bfqd->bfq_wr_rt_max_time;
1269 }
44e44a1b
PV
1270
1271 /*
1272 * If needed, further reduce budget to make sure it is
1273 * close to bfqq's backlog, so as to reduce the
1274 * scheduling-error component due to a too large
1275 * budget. Do not care about throughput consequences,
1276 * but only about latency. Finally, do not assign a
1277 * too small budget either, to avoid increasing
1278 * latency by causing too frequent expirations.
1279 */
1280 bfqq->entity.budget = min_t(unsigned long,
1281 bfqq->entity.budget,
1282 2 * bfq_min_budget(bfqd));
1283 } else if (old_wr_coeff > 1) {
77b7dcea
PV
1284 if (interactive) { /* update wr coeff and duration */
1285 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1286 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
e1b2324d
AA
1287 } else if (in_burst)
1288 bfqq->wr_coeff = 1;
1289 else if (soft_rt) {
77b7dcea
PV
1290 /*
1291 * The application is now or still meeting the
1292 * requirements for being deemed soft rt. We
1293 * can then correctly and safely (re)charge
1294 * the weight-raising duration for the
1295 * application with the weight-raising
1296 * duration for soft rt applications.
1297 *
1298 * In particular, doing this recharge now, i.e.,
1299 * before the weight-raising period for the
1300 * application finishes, reduces the probability
1301 * of the following negative scenario:
1302 * 1) the weight of a soft rt application is
1303 * raised at startup (as for any newly
1304 * created application),
1305 * 2) since the application is not interactive,
1306 * at a certain time weight-raising is
1307 * stopped for the application,
1308 * 3) at that time the application happens to
1309 * still have pending requests, and hence
1310 * is destined to not have a chance to be
1311 * deemed soft rt before these requests are
1312 * completed (see the comments to the
1313 * function bfq_bfqq_softrt_next_start()
1314 * for details on soft rt detection),
1315 * 4) these pending requests experience a high
1316 * latency because the application is not
1317 * weight-raised while they are pending.
1318 */
1319 if (bfqq->wr_cur_max_time !=
1320 bfqd->bfq_wr_rt_max_time) {
1321 bfqq->wr_start_at_switch_to_srt =
1322 bfqq->last_wr_start_finish;
1323
1324 bfqq->wr_cur_max_time =
1325 bfqd->bfq_wr_rt_max_time;
1326 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1327 BFQ_SOFTRT_WEIGHT_FACTOR;
1328 }
1329 bfqq->last_wr_start_finish = jiffies;
1330 }
44e44a1b
PV
1331 }
1332}
1333
1334static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1335 struct bfq_queue *bfqq)
1336{
1337 return bfqq->dispatched == 0 &&
1338 time_is_before_jiffies(
1339 bfqq->budget_timeout +
1340 bfqd->bfq_wr_min_idle_time);
aee69d78
PV
1341}
1342
1343static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1344 struct bfq_queue *bfqq,
44e44a1b
PV
1345 int old_wr_coeff,
1346 struct request *rq,
1347 bool *interactive)
aee69d78 1348{
e1b2324d
AA
1349 bool soft_rt, in_burst, wr_or_deserves_wr,
1350 bfqq_wants_to_preempt,
44e44a1b 1351 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
aee69d78
PV
1352 /*
1353 * See the comments on
1354 * bfq_bfqq_update_budg_for_activation for
1355 * details on the usage of the next variable.
1356 */
1357 arrived_in_time = ktime_get_ns() <=
1358 bfqq->ttime.last_end_request +
1359 bfqd->bfq_slice_idle * 3;
1360
e21b7a0b
AA
1361 bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
1362
aee69d78 1363 /*
44e44a1b
PV
1364 * bfqq deserves to be weight-raised if:
1365 * - it is sync,
e1b2324d 1366 * - it does not belong to a large burst,
36eca894
AA
1367 * - it has been idle for enough time or is soft real-time,
1368 * - is linked to a bfq_io_cq (it is not shared in any sense).
44e44a1b 1369 */
e1b2324d 1370 in_burst = bfq_bfqq_in_large_burst(bfqq);
77b7dcea 1371 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
e1b2324d 1372 !in_burst &&
77b7dcea 1373 time_is_before_jiffies(bfqq->soft_rt_next_start);
e1b2324d 1374 *interactive = !in_burst && idle_for_long_time;
44e44a1b
PV
1375 wr_or_deserves_wr = bfqd->low_latency &&
1376 (bfqq->wr_coeff > 1 ||
36eca894
AA
1377 (bfq_bfqq_sync(bfqq) &&
1378 bfqq->bic && (*interactive || soft_rt)));
44e44a1b
PV
1379
1380 /*
1381 * Using the last flag, update budget and check whether bfqq
1382 * may want to preempt the in-service queue.
aee69d78
PV
1383 */
1384 bfqq_wants_to_preempt =
1385 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
44e44a1b
PV
1386 arrived_in_time,
1387 wr_or_deserves_wr);
aee69d78 1388
e1b2324d
AA
1389 /*
1390 * If bfqq happened to be activated in a burst, but has been
1391 * idle for much more than an interactive queue, then we
1392 * assume that, in the overall I/O initiated in the burst, the
1393 * I/O associated with bfqq is finished. So bfqq does not need
1394 * to be treated as a queue belonging to a burst
1395 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1396 * if set, and remove bfqq from the burst list if it's
1397 * there. We do not decrement burst_size, because the fact
1398 * that bfqq does not need to belong to the burst list any
1399 * more does not invalidate the fact that bfqq was created in
1400 * a burst.
1401 */
1402 if (likely(!bfq_bfqq_just_created(bfqq)) &&
1403 idle_for_long_time &&
1404 time_is_before_jiffies(
1405 bfqq->budget_timeout +
1406 msecs_to_jiffies(10000))) {
1407 hlist_del_init(&bfqq->burst_list_node);
1408 bfq_clear_bfqq_in_large_burst(bfqq);
1409 }
1410
1411 bfq_clear_bfqq_just_created(bfqq);
1412
1413
aee69d78
PV
1414 if (!bfq_bfqq_IO_bound(bfqq)) {
1415 if (arrived_in_time) {
1416 bfqq->requests_within_timer++;
1417 if (bfqq->requests_within_timer >=
1418 bfqd->bfq_requests_within_timer)
1419 bfq_mark_bfqq_IO_bound(bfqq);
1420 } else
1421 bfqq->requests_within_timer = 0;
1422 }
1423
44e44a1b 1424 if (bfqd->low_latency) {
36eca894
AA
1425 if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1426 /* wraparound */
1427 bfqq->split_time =
1428 jiffies - bfqd->bfq_wr_min_idle_time - 1;
1429
1430 if (time_is_before_jiffies(bfqq->split_time +
1431 bfqd->bfq_wr_min_idle_time)) {
1432 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1433 old_wr_coeff,
1434 wr_or_deserves_wr,
1435 *interactive,
e1b2324d 1436 in_burst,
36eca894
AA
1437 soft_rt);
1438
1439 if (old_wr_coeff != bfqq->wr_coeff)
1440 bfqq->entity.prio_changed = 1;
1441 }
44e44a1b
PV
1442 }
1443
77b7dcea
PV
1444 bfqq->last_idle_bklogged = jiffies;
1445 bfqq->service_from_backlogged = 0;
1446 bfq_clear_bfqq_softrt_update(bfqq);
1447
aee69d78
PV
1448 bfq_add_bfqq_busy(bfqd, bfqq);
1449
1450 /*
1451 * Expire in-service queue only if preemption may be needed
1452 * for guarantees. In this respect, the function
1453 * next_queue_may_preempt just checks a simple, necessary
1454 * condition, and not a sufficient condition based on
1455 * timestamps. In fact, for the latter condition to be
1456 * evaluated, timestamps would need first to be updated, and
1457 * this operation is quite costly (see the comments on the
1458 * function bfq_bfqq_update_budg_for_activation).
1459 */
1460 if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
77b7dcea 1461 bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
aee69d78
PV
1462 next_queue_may_preempt(bfqd))
1463 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1464 false, BFQQE_PREEMPTED);
1465}
1466
1467static void bfq_add_request(struct request *rq)
1468{
1469 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1470 struct bfq_data *bfqd = bfqq->bfqd;
1471 struct request *next_rq, *prev;
44e44a1b
PV
1472 unsigned int old_wr_coeff = bfqq->wr_coeff;
1473 bool interactive = false;
aee69d78
PV
1474
1475 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1476 bfqq->queued[rq_is_sync(rq)]++;
1477 bfqd->queued++;
1478
1479 elv_rb_add(&bfqq->sort_list, rq);
1480
1481 /*
1482 * Check if this request is a better next-serve candidate.
1483 */
1484 prev = bfqq->next_rq;
1485 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
1486 bfqq->next_rq = next_rq;
1487
36eca894
AA
1488 /*
1489 * Adjust priority tree position, if next_rq changes.
1490 */
1491 if (prev != bfqq->next_rq)
1492 bfq_pos_tree_add_move(bfqd, bfqq);
1493
aee69d78 1494 if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
44e44a1b
PV
1495 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
1496 rq, &interactive);
1497 else {
1498 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
1499 time_is_before_jiffies(
1500 bfqq->last_wr_start_finish +
1501 bfqd->bfq_wr_min_inter_arr_async)) {
1502 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1503 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1504
cfd69712 1505 bfqd->wr_busy_queues++;
44e44a1b
PV
1506 bfqq->entity.prio_changed = 1;
1507 }
1508 if (prev != bfqq->next_rq)
1509 bfq_updated_next_req(bfqd, bfqq);
1510 }
1511
1512 /*
1513 * Assign jiffies to last_wr_start_finish in the following
1514 * cases:
1515 *
1516 * . if bfqq is not going to be weight-raised, because, for
1517 * non weight-raised queues, last_wr_start_finish stores the
1518 * arrival time of the last request; as of now, this piece
1519 * of information is used only for deciding whether to
1520 * weight-raise async queues
1521 *
1522 * . if bfqq is not weight-raised, because, if bfqq is now
1523 * switching to weight-raised, then last_wr_start_finish
1524 * stores the time when weight-raising starts
1525 *
1526 * . if bfqq is interactive, because, regardless of whether
1527 * bfqq is currently weight-raised, the weight-raising
1528 * period must start or restart (this case is considered
1529 * separately because it is not detected by the above
1530 * conditions, if bfqq is already weight-raised)
77b7dcea
PV
1531 *
1532 * last_wr_start_finish has to be updated also if bfqq is soft
1533 * real-time, because the weight-raising period is constantly
1534 * restarted on idle-to-busy transitions for these queues, but
1535 * this is already done in bfq_bfqq_handle_idle_busy_switch if
1536 * needed.
44e44a1b
PV
1537 */
1538 if (bfqd->low_latency &&
1539 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
1540 bfqq->last_wr_start_finish = jiffies;
aee69d78
PV
1541}
1542
1543static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
1544 struct bio *bio,
1545 struct request_queue *q)
1546{
1547 struct bfq_queue *bfqq = bfqd->bio_bfqq;
1548
1549
1550 if (bfqq)
1551 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
1552
1553 return NULL;
1554}
1555
ab0e43e9
PV
1556static sector_t get_sdist(sector_t last_pos, struct request *rq)
1557{
1558 if (last_pos)
1559 return abs(blk_rq_pos(rq) - last_pos);
1560
1561 return 0;
1562}
1563
aee69d78
PV
1564#if 0 /* Still not clear if we can do without next two functions */
1565static void bfq_activate_request(struct request_queue *q, struct request *rq)
1566{
1567 struct bfq_data *bfqd = q->elevator->elevator_data;
1568
1569 bfqd->rq_in_driver++;
aee69d78
PV
1570}
1571
1572static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
1573{
1574 struct bfq_data *bfqd = q->elevator->elevator_data;
1575
1576 bfqd->rq_in_driver--;
1577}
1578#endif
1579
1580static void bfq_remove_request(struct request_queue *q,
1581 struct request *rq)
1582{
1583 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1584 struct bfq_data *bfqd = bfqq->bfqd;
1585 const int sync = rq_is_sync(rq);
1586
1587 if (bfqq->next_rq == rq) {
1588 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
1589 bfq_updated_next_req(bfqd, bfqq);
1590 }
1591
1592 if (rq->queuelist.prev != &rq->queuelist)
1593 list_del_init(&rq->queuelist);
1594 bfqq->queued[sync]--;
1595 bfqd->queued--;
1596 elv_rb_del(&bfqq->sort_list, rq);
1597
1598 elv_rqhash_del(q, rq);
1599 if (q->last_merge == rq)
1600 q->last_merge = NULL;
1601
1602 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
1603 bfqq->next_rq = NULL;
1604
1605 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
e21b7a0b 1606 bfq_del_bfqq_busy(bfqd, bfqq, false);
aee69d78
PV
1607 /*
1608 * bfqq emptied. In normal operation, when
1609 * bfqq is empty, bfqq->entity.service and
1610 * bfqq->entity.budget must contain,
1611 * respectively, the service received and the
1612 * budget used last time bfqq emptied. These
1613 * facts do not hold in this case, as at least
1614 * this last removal occurred while bfqq is
1615 * not in service. To avoid inconsistencies,
1616 * reset both bfqq->entity.service and
1617 * bfqq->entity.budget, if bfqq has still a
1618 * process that may issue I/O requests to it.
1619 */
1620 bfqq->entity.budget = bfqq->entity.service = 0;
1621 }
36eca894
AA
1622
1623 /*
1624 * Remove queue from request-position tree as it is empty.
1625 */
1626 if (bfqq->pos_root) {
1627 rb_erase(&bfqq->pos_node, bfqq->pos_root);
1628 bfqq->pos_root = NULL;
1629 }
aee69d78
PV
1630 }
1631
1632 if (rq->cmd_flags & REQ_META)
1633 bfqq->meta_pending--;
e21b7a0b
AA
1634
1635 bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
aee69d78
PV
1636}
1637
1638static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
1639{
1640 struct request_queue *q = hctx->queue;
1641 struct bfq_data *bfqd = q->elevator->elevator_data;
1642 struct request *free = NULL;
1643 /*
1644 * bfq_bic_lookup grabs the queue_lock: invoke it now and
1645 * store its return value for later use, to avoid nesting
1646 * queue_lock inside the bfqd->lock. We assume that the bic
1647 * returned by bfq_bic_lookup does not go away before
1648 * bfqd->lock is taken.
1649 */
1650 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
1651 bool ret;
1652
1653 spin_lock_irq(&bfqd->lock);
1654
1655 if (bic)
1656 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
1657 else
1658 bfqd->bio_bfqq = NULL;
1659 bfqd->bio_bic = bic;
1660
1661 ret = blk_mq_sched_try_merge(q, bio, &free);
1662
1663 if (free)
1664 blk_mq_free_request(free);
1665 spin_unlock_irq(&bfqd->lock);
1666
1667 return ret;
1668}
1669
1670static int bfq_request_merge(struct request_queue *q, struct request **req,
1671 struct bio *bio)
1672{
1673 struct bfq_data *bfqd = q->elevator->elevator_data;
1674 struct request *__rq;
1675
1676 __rq = bfq_find_rq_fmerge(bfqd, bio, q);
1677 if (__rq && elv_bio_merge_ok(__rq, bio)) {
1678 *req = __rq;
1679 return ELEVATOR_FRONT_MERGE;
1680 }
1681
1682 return ELEVATOR_NO_MERGE;
1683}
1684
1685static void bfq_request_merged(struct request_queue *q, struct request *req,
1686 enum elv_merge type)
1687{
1688 if (type == ELEVATOR_FRONT_MERGE &&
1689 rb_prev(&req->rb_node) &&
1690 blk_rq_pos(req) <
1691 blk_rq_pos(container_of(rb_prev(&req->rb_node),
1692 struct request, rb_node))) {
1693 struct bfq_queue *bfqq = RQ_BFQQ(req);
1694 struct bfq_data *bfqd = bfqq->bfqd;
1695 struct request *prev, *next_rq;
1696
1697 /* Reposition request in its sort_list */
1698 elv_rb_del(&bfqq->sort_list, req);
1699 elv_rb_add(&bfqq->sort_list, req);
1700
1701 /* Choose next request to be served for bfqq */
1702 prev = bfqq->next_rq;
1703 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
1704 bfqd->last_position);
1705 bfqq->next_rq = next_rq;
1706 /*
36eca894
AA
1707 * If next_rq changes, update both the queue's budget to
1708 * fit the new request and the queue's position in its
1709 * rq_pos_tree.
aee69d78 1710 */
36eca894 1711 if (prev != bfqq->next_rq) {
aee69d78 1712 bfq_updated_next_req(bfqd, bfqq);
36eca894
AA
1713 bfq_pos_tree_add_move(bfqd, bfqq);
1714 }
aee69d78
PV
1715 }
1716}
1717
1718static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1719 struct request *next)
1720{
1721 struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
1722
1723 if (!RB_EMPTY_NODE(&rq->rb_node))
e21b7a0b 1724 goto end;
aee69d78
PV
1725 spin_lock_irq(&bfqq->bfqd->lock);
1726
1727 /*
1728 * If next and rq belong to the same bfq_queue and next is older
1729 * than rq, then reposition rq in the fifo (by substituting next
1730 * with rq). Otherwise, if next and rq belong to different
1731 * bfq_queues, never reposition rq: in fact, we would have to
1732 * reposition it with respect to next's position in its own fifo,
1733 * which would most certainly be too expensive with respect to
1734 * the benefits.
1735 */
1736 if (bfqq == next_bfqq &&
1737 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1738 next->fifo_time < rq->fifo_time) {
1739 list_del_init(&rq->queuelist);
1740 list_replace_init(&next->queuelist, &rq->queuelist);
1741 rq->fifo_time = next->fifo_time;
1742 }
1743
1744 if (bfqq->next_rq == next)
1745 bfqq->next_rq = rq;
1746
1747 bfq_remove_request(q, next);
1748
1749 spin_unlock_irq(&bfqq->bfqd->lock);
e21b7a0b
AA
1750end:
1751 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
aee69d78
PV
1752}
1753
44e44a1b
PV
1754/* Must be called with bfqq != NULL */
1755static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
1756{
cfd69712
PV
1757 if (bfq_bfqq_busy(bfqq))
1758 bfqq->bfqd->wr_busy_queues--;
44e44a1b
PV
1759 bfqq->wr_coeff = 1;
1760 bfqq->wr_cur_max_time = 0;
77b7dcea 1761 bfqq->last_wr_start_finish = jiffies;
44e44a1b
PV
1762 /*
1763 * Trigger a weight change on the next invocation of
1764 * __bfq_entity_update_weight_prio.
1765 */
1766 bfqq->entity.prio_changed = 1;
1767}
1768
ea25da48
PV
1769void bfq_end_wr_async_queues(struct bfq_data *bfqd,
1770 struct bfq_group *bfqg)
44e44a1b
PV
1771{
1772 int i, j;
1773
1774 for (i = 0; i < 2; i++)
1775 for (j = 0; j < IOPRIO_BE_NR; j++)
1776 if (bfqg->async_bfqq[i][j])
1777 bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
1778 if (bfqg->async_idle_bfqq)
1779 bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
1780}
1781
1782static void bfq_end_wr(struct bfq_data *bfqd)
1783{
1784 struct bfq_queue *bfqq;
1785
1786 spin_lock_irq(&bfqd->lock);
1787
1788 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
1789 bfq_bfqq_end_wr(bfqq);
1790 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
1791 bfq_bfqq_end_wr(bfqq);
1792 bfq_end_wr_async(bfqd);
1793
1794 spin_unlock_irq(&bfqd->lock);
1795}
1796
36eca894
AA
1797static sector_t bfq_io_struct_pos(void *io_struct, bool request)
1798{
1799 if (request)
1800 return blk_rq_pos(io_struct);
1801 else
1802 return ((struct bio *)io_struct)->bi_iter.bi_sector;
1803}
1804
1805static int bfq_rq_close_to_sector(void *io_struct, bool request,
1806 sector_t sector)
1807{
1808 return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
1809 BFQQ_CLOSE_THR;
1810}
1811
1812static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
1813 struct bfq_queue *bfqq,
1814 sector_t sector)
1815{
1816 struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
1817 struct rb_node *parent, *node;
1818 struct bfq_queue *__bfqq;
1819
1820 if (RB_EMPTY_ROOT(root))
1821 return NULL;
1822
1823 /*
1824 * First, if we find a request starting at the end of the last
1825 * request, choose it.
1826 */
1827 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
1828 if (__bfqq)
1829 return __bfqq;
1830
1831 /*
1832 * If the exact sector wasn't found, the parent of the NULL leaf
1833 * will contain the closest sector (rq_pos_tree sorted by
1834 * next_request position).
1835 */
1836 __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
1837 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1838 return __bfqq;
1839
1840 if (blk_rq_pos(__bfqq->next_rq) < sector)
1841 node = rb_next(&__bfqq->pos_node);
1842 else
1843 node = rb_prev(&__bfqq->pos_node);
1844 if (!node)
1845 return NULL;
1846
1847 __bfqq = rb_entry(node, struct bfq_queue, pos_node);
1848 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1849 return __bfqq;
1850
1851 return NULL;
1852}
1853
1854static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
1855 struct bfq_queue *cur_bfqq,
1856 sector_t sector)
1857{
1858 struct bfq_queue *bfqq;
1859
1860 /*
1861 * We shall notice if some of the queues are cooperating,
1862 * e.g., working closely on the same area of the device. In
1863 * that case, we can group them together and: 1) don't waste
1864 * time idling, and 2) serve the union of their requests in
1865 * the best possible order for throughput.
1866 */
1867 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
1868 if (!bfqq || bfqq == cur_bfqq)
1869 return NULL;
1870
1871 return bfqq;
1872}
1873
1874static struct bfq_queue *
1875bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
1876{
1877 int process_refs, new_process_refs;
1878 struct bfq_queue *__bfqq;
1879
1880 /*
1881 * If there are no process references on the new_bfqq, then it is
1882 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
1883 * may have dropped their last reference (not just their last process
1884 * reference).
1885 */
1886 if (!bfqq_process_refs(new_bfqq))
1887 return NULL;
1888
1889 /* Avoid a circular list and skip interim queue merges. */
1890 while ((__bfqq = new_bfqq->new_bfqq)) {
1891 if (__bfqq == bfqq)
1892 return NULL;
1893 new_bfqq = __bfqq;
1894 }
1895
1896 process_refs = bfqq_process_refs(bfqq);
1897 new_process_refs = bfqq_process_refs(new_bfqq);
1898 /*
1899 * If the process for the bfqq has gone away, there is no
1900 * sense in merging the queues.
1901 */
1902 if (process_refs == 0 || new_process_refs == 0)
1903 return NULL;
1904
1905 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
1906 new_bfqq->pid);
1907
1908 /*
1909 * Merging is just a redirection: the requests of the process
1910 * owning one of the two queues are redirected to the other queue.
1911 * The latter queue, in its turn, is set as shared if this is the
1912 * first time that the requests of some process are redirected to
1913 * it.
1914 *
6fa3e8d3
PV
1915 * We redirect bfqq to new_bfqq and not the opposite, because
1916 * we are in the context of the process owning bfqq, thus we
1917 * have the io_cq of this process. So we can immediately
1918 * configure this io_cq to redirect the requests of the
1919 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
1920 * not available any more (new_bfqq->bic == NULL).
36eca894 1921 *
6fa3e8d3
PV
1922 * Anyway, even in case new_bfqq coincides with the in-service
1923 * queue, redirecting requests the in-service queue is the
1924 * best option, as we feed the in-service queue with new
1925 * requests close to the last request served and, by doing so,
1926 * are likely to increase the throughput.
36eca894
AA
1927 */
1928 bfqq->new_bfqq = new_bfqq;
1929 new_bfqq->ref += process_refs;
1930 return new_bfqq;
1931}
1932
1933static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
1934 struct bfq_queue *new_bfqq)
1935{
1936 if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
1937 (bfqq->ioprio_class != new_bfqq->ioprio_class))
1938 return false;
1939
1940 /*
1941 * If either of the queues has already been detected as seeky,
1942 * then merging it with the other queue is unlikely to lead to
1943 * sequential I/O.
1944 */
1945 if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
1946 return false;
1947
1948 /*
1949 * Interleaved I/O is known to be done by (some) applications
1950 * only for reads, so it does not make sense to merge async
1951 * queues.
1952 */
1953 if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
1954 return false;
1955
1956 return true;
1957}
1958
1959/*
1960 * If this function returns true, then bfqq cannot be merged. The idea
1961 * is that true cooperation happens very early after processes start
1962 * to do I/O. Usually, late cooperations are just accidental false
1963 * positives. In case bfqq is weight-raised, such false positives
1964 * would evidently degrade latency guarantees for bfqq.
1965 */
1966static bool wr_from_too_long(struct bfq_queue *bfqq)
1967{
1968 return bfqq->wr_coeff > 1 &&
1969 time_is_before_jiffies(bfqq->last_wr_start_finish +
1970 msecs_to_jiffies(100));
1971}
1972
1973/*
1974 * Attempt to schedule a merge of bfqq with the currently in-service
1975 * queue or with a close queue among the scheduled queues. Return
1976 * NULL if no merge was scheduled, a pointer to the shared bfq_queue
1977 * structure otherwise.
1978 *
1979 * The OOM queue is not allowed to participate to cooperation: in fact, since
1980 * the requests temporarily redirected to the OOM queue could be redirected
1981 * again to dedicated queues at any time, the state needed to correctly
1982 * handle merging with the OOM queue would be quite complex and expensive
1983 * to maintain. Besides, in such a critical condition as an out of memory,
1984 * the benefits of queue merging may be little relevant, or even negligible.
1985 *
1986 * Weight-raised queues can be merged only if their weight-raising
1987 * period has just started. In fact cooperating processes are usually
1988 * started together. Thus, with this filter we avoid false positives
1989 * that would jeopardize low-latency guarantees.
1990 *
1991 * WARNING: queue merging may impair fairness among non-weight raised
1992 * queues, for at least two reasons: 1) the original weight of a
1993 * merged queue may change during the merged state, 2) even being the
1994 * weight the same, a merged queue may be bloated with many more
1995 * requests than the ones produced by its originally-associated
1996 * process.
1997 */
1998static struct bfq_queue *
1999bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2000 void *io_struct, bool request)
2001{
2002 struct bfq_queue *in_service_bfqq, *new_bfqq;
2003
2004 if (bfqq->new_bfqq)
2005 return bfqq->new_bfqq;
2006
2007 if (!io_struct ||
2008 wr_from_too_long(bfqq) ||
2009 unlikely(bfqq == &bfqd->oom_bfqq))
2010 return NULL;
2011
2012 /* If there is only one backlogged queue, don't search. */
2013 if (bfqd->busy_queues == 1)
2014 return NULL;
2015
2016 in_service_bfqq = bfqd->in_service_queue;
2017
6fa3e8d3
PV
2018 if (!in_service_bfqq || in_service_bfqq == bfqq
2019 || wr_from_too_long(in_service_bfqq) ||
36eca894
AA
2020 unlikely(in_service_bfqq == &bfqd->oom_bfqq))
2021 goto check_scheduled;
2022
2023 if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
2024 bfqq->entity.parent == in_service_bfqq->entity.parent &&
2025 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
2026 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
2027 if (new_bfqq)
2028 return new_bfqq;
2029 }
2030 /*
2031 * Check whether there is a cooperator among currently scheduled
2032 * queues. The only thing we need is that the bio/request is not
2033 * NULL, as we need it to establish whether a cooperator exists.
2034 */
2035check_scheduled:
2036 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
2037 bfq_io_struct_pos(io_struct, request));
2038
2039 if (new_bfqq && !wr_from_too_long(new_bfqq) &&
2040 likely(new_bfqq != &bfqd->oom_bfqq) &&
2041 bfq_may_be_close_cooperator(bfqq, new_bfqq))
2042 return bfq_setup_merge(bfqq, new_bfqq);
2043
2044 return NULL;
2045}
2046
2047static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
2048{
2049 struct bfq_io_cq *bic = bfqq->bic;
2050
2051 /*
2052 * If !bfqq->bic, the queue is already shared or its requests
2053 * have already been redirected to a shared queue; both idle window
2054 * and weight raising state have already been saved. Do nothing.
2055 */
2056 if (!bic)
2057 return;
2058
2059 bic->saved_ttime = bfqq->ttime;
d5be3fef 2060 bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
36eca894 2061 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
e1b2324d
AA
2062 bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
2063 bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
36eca894
AA
2064 bic->saved_wr_coeff = bfqq->wr_coeff;
2065 bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
2066 bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
2067 bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
2068}
2069
36eca894
AA
2070static void
2071bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
2072 struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2073{
2074 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2075 (unsigned long)new_bfqq->pid);
2076 /* Save weight raising and idle window of the merged queues */
2077 bfq_bfqq_save_state(bfqq);
2078 bfq_bfqq_save_state(new_bfqq);
2079 if (bfq_bfqq_IO_bound(bfqq))
2080 bfq_mark_bfqq_IO_bound(new_bfqq);
2081 bfq_clear_bfqq_IO_bound(bfqq);
2082
2083 /*
2084 * If bfqq is weight-raised, then let new_bfqq inherit
2085 * weight-raising. To reduce false positives, neglect the case
2086 * where bfqq has just been created, but has not yet made it
2087 * to be weight-raised (which may happen because EQM may merge
2088 * bfqq even before bfq_add_request is executed for the first
e1b2324d
AA
2089 * time for bfqq). Handling this case would however be very
2090 * easy, thanks to the flag just_created.
36eca894
AA
2091 */
2092 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
2093 new_bfqq->wr_coeff = bfqq->wr_coeff;
2094 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
2095 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
2096 new_bfqq->wr_start_at_switch_to_srt =
2097 bfqq->wr_start_at_switch_to_srt;
2098 if (bfq_bfqq_busy(new_bfqq))
2099 bfqd->wr_busy_queues++;
2100 new_bfqq->entity.prio_changed = 1;
2101 }
2102
2103 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
2104 bfqq->wr_coeff = 1;
2105 bfqq->entity.prio_changed = 1;
2106 if (bfq_bfqq_busy(bfqq))
2107 bfqd->wr_busy_queues--;
2108 }
2109
2110 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
2111 bfqd->wr_busy_queues);
2112
36eca894
AA
2113 /*
2114 * Merge queues (that is, let bic redirect its requests to new_bfqq)
2115 */
2116 bic_set_bfqq(bic, new_bfqq, 1);
2117 bfq_mark_bfqq_coop(new_bfqq);
2118 /*
2119 * new_bfqq now belongs to at least two bics (it is a shared queue):
2120 * set new_bfqq->bic to NULL. bfqq either:
2121 * - does not belong to any bic any more, and hence bfqq->bic must
2122 * be set to NULL, or
2123 * - is a queue whose owning bics have already been redirected to a
2124 * different queue, hence the queue is destined to not belong to
2125 * any bic soon and bfqq->bic is already NULL (therefore the next
2126 * assignment causes no harm).
2127 */
2128 new_bfqq->bic = NULL;
2129 bfqq->bic = NULL;
2130 /* release process reference to bfqq */
2131 bfq_put_queue(bfqq);
2132}
2133
aee69d78
PV
2134static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2135 struct bio *bio)
2136{
2137 struct bfq_data *bfqd = q->elevator->elevator_data;
2138 bool is_sync = op_is_sync(bio->bi_opf);
36eca894 2139 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
aee69d78
PV
2140
2141 /*
2142 * Disallow merge of a sync bio into an async request.
2143 */
2144 if (is_sync && !rq_is_sync(rq))
2145 return false;
2146
2147 /*
2148 * Lookup the bfqq that this bio will be queued with. Allow
2149 * merge only if rq is queued there.
2150 */
2151 if (!bfqq)
2152 return false;
2153
36eca894
AA
2154 /*
2155 * We take advantage of this function to perform an early merge
2156 * of the queues of possible cooperating processes.
2157 */
2158 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2159 if (new_bfqq) {
2160 /*
2161 * bic still points to bfqq, then it has not yet been
2162 * redirected to some other bfq_queue, and a queue
2163 * merge beween bfqq and new_bfqq can be safely
2164 * fulfillled, i.e., bic can be redirected to new_bfqq
2165 * and bfqq can be put.
2166 */
2167 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2168 new_bfqq);
2169 /*
2170 * If we get here, bio will be queued into new_queue,
2171 * so use new_bfqq to decide whether bio and rq can be
2172 * merged.
2173 */
2174 bfqq = new_bfqq;
2175
2176 /*
2177 * Change also bqfd->bio_bfqq, as
2178 * bfqd->bio_bic now points to new_bfqq, and
2179 * this function may be invoked again (and then may
2180 * use again bqfd->bio_bfqq).
2181 */
2182 bfqd->bio_bfqq = bfqq;
2183 }
2184
aee69d78
PV
2185 return bfqq == RQ_BFQQ(rq);
2186}
2187
44e44a1b
PV
2188/*
2189 * Set the maximum time for the in-service queue to consume its
2190 * budget. This prevents seeky processes from lowering the throughput.
2191 * In practice, a time-slice service scheme is used with seeky
2192 * processes.
2193 */
2194static void bfq_set_budget_timeout(struct bfq_data *bfqd,
2195 struct bfq_queue *bfqq)
2196{
77b7dcea
PV
2197 unsigned int timeout_coeff;
2198
2199 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2200 timeout_coeff = 1;
2201 else
2202 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
2203
44e44a1b
PV
2204 bfqd->last_budget_start = ktime_get();
2205
2206 bfqq->budget_timeout = jiffies +
77b7dcea 2207 bfqd->bfq_timeout * timeout_coeff;
44e44a1b
PV
2208}
2209
aee69d78
PV
2210static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
2211 struct bfq_queue *bfqq)
2212{
2213 if (bfqq) {
e21b7a0b 2214 bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
aee69d78
PV
2215 bfq_clear_bfqq_fifo_expire(bfqq);
2216
2217 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
2218
77b7dcea
PV
2219 if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
2220 bfqq->wr_coeff > 1 &&
2221 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2222 time_is_before_jiffies(bfqq->budget_timeout)) {
2223 /*
2224 * For soft real-time queues, move the start
2225 * of the weight-raising period forward by the
2226 * time the queue has not received any
2227 * service. Otherwise, a relatively long
2228 * service delay is likely to cause the
2229 * weight-raising period of the queue to end,
2230 * because of the short duration of the
2231 * weight-raising period of a soft real-time
2232 * queue. It is worth noting that this move
2233 * is not so dangerous for the other queues,
2234 * because soft real-time queues are not
2235 * greedy.
2236 *
2237 * To not add a further variable, we use the
2238 * overloaded field budget_timeout to
2239 * determine for how long the queue has not
2240 * received service, i.e., how much time has
2241 * elapsed since the queue expired. However,
2242 * this is a little imprecise, because
2243 * budget_timeout is set to jiffies if bfqq
2244 * not only expires, but also remains with no
2245 * request.
2246 */
2247 if (time_after(bfqq->budget_timeout,
2248 bfqq->last_wr_start_finish))
2249 bfqq->last_wr_start_finish +=
2250 jiffies - bfqq->budget_timeout;
2251 else
2252 bfqq->last_wr_start_finish = jiffies;
2253 }
2254
44e44a1b 2255 bfq_set_budget_timeout(bfqd, bfqq);
aee69d78
PV
2256 bfq_log_bfqq(bfqd, bfqq,
2257 "set_in_service_queue, cur-budget = %d",
2258 bfqq->entity.budget);
2259 }
2260
2261 bfqd->in_service_queue = bfqq;
2262}
2263
2264/*
2265 * Get and set a new queue for service.
2266 */
2267static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
2268{
2269 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2270
2271 __bfq_set_in_service_queue(bfqd, bfqq);
2272 return bfqq;
2273}
2274
aee69d78
PV
2275static void bfq_arm_slice_timer(struct bfq_data *bfqd)
2276{
2277 struct bfq_queue *bfqq = bfqd->in_service_queue;
aee69d78
PV
2278 u32 sl;
2279
aee69d78
PV
2280 bfq_mark_bfqq_wait_request(bfqq);
2281
2282 /*
2283 * We don't want to idle for seeks, but we do want to allow
2284 * fair distribution of slice time for a process doing back-to-back
2285 * seeks. So allow a little bit of time for him to submit a new rq.
2286 */
2287 sl = bfqd->bfq_slice_idle;
2288 /*
1de0c4cd
AA
2289 * Unless the queue is being weight-raised or the scenario is
2290 * asymmetric, grant only minimum idle time if the queue
2291 * is seeky. A long idling is preserved for a weight-raised
2292 * queue, or, more in general, in an asymmetric scenario,
2293 * because a long idling is needed for guaranteeing to a queue
2294 * its reserved share of the throughput (in particular, it is
2295 * needed if the queue has a higher weight than some other
2296 * queue).
aee69d78 2297 */
1de0c4cd
AA
2298 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
2299 bfq_symmetric_scenario(bfqd))
aee69d78
PV
2300 sl = min_t(u64, sl, BFQ_MIN_TT);
2301
2302 bfqd->last_idling_start = ktime_get();
2303 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
2304 HRTIMER_MODE_REL);
e21b7a0b 2305 bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
aee69d78
PV
2306}
2307
ab0e43e9
PV
2308/*
2309 * In autotuning mode, max_budget is dynamically recomputed as the
2310 * amount of sectors transferred in timeout at the estimated peak
2311 * rate. This enables BFQ to utilize a full timeslice with a full
2312 * budget, even if the in-service queue is served at peak rate. And
2313 * this maximises throughput with sequential workloads.
2314 */
2315static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
2316{
2317 return (u64)bfqd->peak_rate * USEC_PER_MSEC *
2318 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
2319}
2320
44e44a1b
PV
2321/*
2322 * Update parameters related to throughput and responsiveness, as a
2323 * function of the estimated peak rate. See comments on
2324 * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
2325 */
2326static void update_thr_responsiveness_params(struct bfq_data *bfqd)
2327{
2328 int dev_type = blk_queue_nonrot(bfqd->queue);
2329
2330 if (bfqd->bfq_user_max_budget == 0)
2331 bfqd->bfq_max_budget =
2332 bfq_calc_max_budget(bfqd);
2333
2334 if (bfqd->device_speed == BFQ_BFQD_FAST &&
2335 bfqd->peak_rate < device_speed_thresh[dev_type]) {
2336 bfqd->device_speed = BFQ_BFQD_SLOW;
2337 bfqd->RT_prod = R_slow[dev_type] *
2338 T_slow[dev_type];
2339 } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
2340 bfqd->peak_rate > device_speed_thresh[dev_type]) {
2341 bfqd->device_speed = BFQ_BFQD_FAST;
2342 bfqd->RT_prod = R_fast[dev_type] *
2343 T_fast[dev_type];
2344 }
2345
2346 bfq_log(bfqd,
2347"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
2348 dev_type == 0 ? "ROT" : "NONROT",
2349 bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
2350 bfqd->device_speed == BFQ_BFQD_FAST ?
2351 (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
2352 (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
2353 (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
2354 BFQ_RATE_SHIFT);
2355}
2356
ab0e43e9
PV
2357static void bfq_reset_rate_computation(struct bfq_data *bfqd,
2358 struct request *rq)
2359{
2360 if (rq != NULL) { /* new rq dispatch now, reset accordingly */
2361 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
2362 bfqd->peak_rate_samples = 1;
2363 bfqd->sequential_samples = 0;
2364 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
2365 blk_rq_sectors(rq);
2366 } else /* no new rq dispatched, just reset the number of samples */
2367 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
2368
2369 bfq_log(bfqd,
2370 "reset_rate_computation at end, sample %u/%u tot_sects %llu",
2371 bfqd->peak_rate_samples, bfqd->sequential_samples,
2372 bfqd->tot_sectors_dispatched);
2373}
2374
2375static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
2376{
2377 u32 rate, weight, divisor;
2378
2379 /*
2380 * For the convergence property to hold (see comments on
2381 * bfq_update_peak_rate()) and for the assessment to be
2382 * reliable, a minimum number of samples must be present, and
2383 * a minimum amount of time must have elapsed. If not so, do
2384 * not compute new rate. Just reset parameters, to get ready
2385 * for a new evaluation attempt.
2386 */
2387 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
2388 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
2389 goto reset_computation;
2390
2391 /*
2392 * If a new request completion has occurred after last
2393 * dispatch, then, to approximate the rate at which requests
2394 * have been served by the device, it is more precise to
2395 * extend the observation interval to the last completion.
2396 */
2397 bfqd->delta_from_first =
2398 max_t(u64, bfqd->delta_from_first,
2399 bfqd->last_completion - bfqd->first_dispatch);
2400
2401 /*
2402 * Rate computed in sects/usec, and not sects/nsec, for
2403 * precision issues.
2404 */
2405 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
2406 div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
2407
2408 /*
2409 * Peak rate not updated if:
2410 * - the percentage of sequential dispatches is below 3/4 of the
2411 * total, and rate is below the current estimated peak rate
2412 * - rate is unreasonably high (> 20M sectors/sec)
2413 */
2414 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
2415 rate <= bfqd->peak_rate) ||
2416 rate > 20<<BFQ_RATE_SHIFT)
2417 goto reset_computation;
2418
2419 /*
2420 * We have to update the peak rate, at last! To this purpose,
2421 * we use a low-pass filter. We compute the smoothing constant
2422 * of the filter as a function of the 'weight' of the new
2423 * measured rate.
2424 *
2425 * As can be seen in next formulas, we define this weight as a
2426 * quantity proportional to how sequential the workload is,
2427 * and to how long the observation time interval is.
2428 *
2429 * The weight runs from 0 to 8. The maximum value of the
2430 * weight, 8, yields the minimum value for the smoothing
2431 * constant. At this minimum value for the smoothing constant,
2432 * the measured rate contributes for half of the next value of
2433 * the estimated peak rate.
2434 *
2435 * So, the first step is to compute the weight as a function
2436 * of how sequential the workload is. Note that the weight
2437 * cannot reach 9, because bfqd->sequential_samples cannot
2438 * become equal to bfqd->peak_rate_samples, which, in its
2439 * turn, holds true because bfqd->sequential_samples is not
2440 * incremented for the first sample.
2441 */
2442 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
2443
2444 /*
2445 * Second step: further refine the weight as a function of the
2446 * duration of the observation interval.
2447 */
2448 weight = min_t(u32, 8,
2449 div_u64(weight * bfqd->delta_from_first,
2450 BFQ_RATE_REF_INTERVAL));
2451
2452 /*
2453 * Divisor ranging from 10, for minimum weight, to 2, for
2454 * maximum weight.
2455 */
2456 divisor = 10 - weight;
2457
2458 /*
2459 * Finally, update peak rate:
2460 *
2461 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
2462 */
2463 bfqd->peak_rate *= divisor-1;
2464 bfqd->peak_rate /= divisor;
2465 rate /= divisor; /* smoothing constant alpha = 1/divisor */
2466
2467 bfqd->peak_rate += rate;
44e44a1b 2468 update_thr_responsiveness_params(bfqd);
ab0e43e9
PV
2469
2470reset_computation:
2471 bfq_reset_rate_computation(bfqd, rq);
2472}
2473
2474/*
2475 * Update the read/write peak rate (the main quantity used for
2476 * auto-tuning, see update_thr_responsiveness_params()).
2477 *
2478 * It is not trivial to estimate the peak rate (correctly): because of
2479 * the presence of sw and hw queues between the scheduler and the
2480 * device components that finally serve I/O requests, it is hard to
2481 * say exactly when a given dispatched request is served inside the
2482 * device, and for how long. As a consequence, it is hard to know
2483 * precisely at what rate a given set of requests is actually served
2484 * by the device.
2485 *
2486 * On the opposite end, the dispatch time of any request is trivially
2487 * available, and, from this piece of information, the "dispatch rate"
2488 * of requests can be immediately computed. So, the idea in the next
2489 * function is to use what is known, namely request dispatch times
2490 * (plus, when useful, request completion times), to estimate what is
2491 * unknown, namely in-device request service rate.
2492 *
2493 * The main issue is that, because of the above facts, the rate at
2494 * which a certain set of requests is dispatched over a certain time
2495 * interval can vary greatly with respect to the rate at which the
2496 * same requests are then served. But, since the size of any
2497 * intermediate queue is limited, and the service scheme is lossless
2498 * (no request is silently dropped), the following obvious convergence
2499 * property holds: the number of requests dispatched MUST become
2500 * closer and closer to the number of requests completed as the
2501 * observation interval grows. This is the key property used in
2502 * the next function to estimate the peak service rate as a function
2503 * of the observed dispatch rate. The function assumes to be invoked
2504 * on every request dispatch.
2505 */
2506static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
2507{
2508 u64 now_ns = ktime_get_ns();
2509
2510 if (bfqd->peak_rate_samples == 0) { /* first dispatch */
2511 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
2512 bfqd->peak_rate_samples);
2513 bfq_reset_rate_computation(bfqd, rq);
2514 goto update_last_values; /* will add one sample */
2515 }
2516
2517 /*
2518 * Device idle for very long: the observation interval lasting
2519 * up to this dispatch cannot be a valid observation interval
2520 * for computing a new peak rate (similarly to the late-
2521 * completion event in bfq_completed_request()). Go to
2522 * update_rate_and_reset to have the following three steps
2523 * taken:
2524 * - close the observation interval at the last (previous)
2525 * request dispatch or completion
2526 * - compute rate, if possible, for that observation interval
2527 * - start a new observation interval with this dispatch
2528 */
2529 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
2530 bfqd->rq_in_driver == 0)
2531 goto update_rate_and_reset;
2532
2533 /* Update sampling information */
2534 bfqd->peak_rate_samples++;
2535
2536 if ((bfqd->rq_in_driver > 0 ||
2537 now_ns - bfqd->last_completion < BFQ_MIN_TT)
2538 && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
2539 bfqd->sequential_samples++;
2540
2541 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
2542
2543 /* Reset max observed rq size every 32 dispatches */
2544 if (likely(bfqd->peak_rate_samples % 32))
2545 bfqd->last_rq_max_size =
2546 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
2547 else
2548 bfqd->last_rq_max_size = blk_rq_sectors(rq);
2549
2550 bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
2551
2552 /* Target observation interval not yet reached, go on sampling */
2553 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
2554 goto update_last_values;
2555
2556update_rate_and_reset:
2557 bfq_update_rate_reset(bfqd, rq);
2558update_last_values:
2559 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2560 bfqd->last_dispatch = now_ns;
2561}
2562
aee69d78
PV
2563/*
2564 * Remove request from internal lists.
2565 */
2566static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
2567{
2568 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2569
2570 /*
2571 * For consistency, the next instruction should have been
2572 * executed after removing the request from the queue and
2573 * dispatching it. We execute instead this instruction before
2574 * bfq_remove_request() (and hence introduce a temporary
2575 * inconsistency), for efficiency. In fact, should this
2576 * dispatch occur for a non in-service bfqq, this anticipated
2577 * increment prevents two counters related to bfqq->dispatched
2578 * from risking to be, first, uselessly decremented, and then
2579 * incremented again when the (new) value of bfqq->dispatched
2580 * happens to be taken into account.
2581 */
2582 bfqq->dispatched++;
ab0e43e9 2583 bfq_update_peak_rate(q->elevator->elevator_data, rq);
aee69d78
PV
2584
2585 bfq_remove_request(q, rq);
2586}
2587
2588static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2589{
36eca894
AA
2590 /*
2591 * If this bfqq is shared between multiple processes, check
2592 * to make sure that those processes are still issuing I/Os
2593 * within the mean seek distance. If not, it may be time to
2594 * break the queues apart again.
2595 */
2596 if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
2597 bfq_mark_bfqq_split_coop(bfqq);
2598
44e44a1b
PV
2599 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
2600 if (bfqq->dispatched == 0)
2601 /*
2602 * Overloading budget_timeout field to store
2603 * the time at which the queue remains with no
2604 * backlog and no outstanding request; used by
2605 * the weight-raising mechanism.
2606 */
2607 bfqq->budget_timeout = jiffies;
2608
e21b7a0b 2609 bfq_del_bfqq_busy(bfqd, bfqq, true);
36eca894 2610 } else {
80294c3b 2611 bfq_requeue_bfqq(bfqd, bfqq, true);
36eca894
AA
2612 /*
2613 * Resort priority tree of potential close cooperators.
2614 */
2615 bfq_pos_tree_add_move(bfqd, bfqq);
2616 }
e21b7a0b
AA
2617
2618 /*
2619 * All in-service entities must have been properly deactivated
2620 * or requeued before executing the next function, which
2621 * resets all in-service entites as no more in service.
2622 */
2623 __bfq_bfqd_reset_in_service(bfqd);
aee69d78
PV
2624}
2625
2626/**
2627 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
2628 * @bfqd: device data.
2629 * @bfqq: queue to update.
2630 * @reason: reason for expiration.
2631 *
2632 * Handle the feedback on @bfqq budget at queue expiration.
2633 * See the body for detailed comments.
2634 */
2635static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
2636 struct bfq_queue *bfqq,
2637 enum bfqq_expiration reason)
2638{
2639 struct request *next_rq;
2640 int budget, min_budget;
2641
aee69d78
PV
2642 min_budget = bfq_min_budget(bfqd);
2643
44e44a1b
PV
2644 if (bfqq->wr_coeff == 1)
2645 budget = bfqq->max_budget;
2646 else /*
2647 * Use a constant, low budget for weight-raised queues,
2648 * to help achieve a low latency. Keep it slightly higher
2649 * than the minimum possible budget, to cause a little
2650 * bit fewer expirations.
2651 */
2652 budget = 2 * min_budget;
2653
aee69d78
PV
2654 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
2655 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
2656 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
2657 budget, bfq_min_budget(bfqd));
2658 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
2659 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
2660
44e44a1b 2661 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
aee69d78
PV
2662 switch (reason) {
2663 /*
2664 * Caveat: in all the following cases we trade latency
2665 * for throughput.
2666 */
2667 case BFQQE_TOO_IDLE:
54b60456
PV
2668 /*
2669 * This is the only case where we may reduce
2670 * the budget: if there is no request of the
2671 * process still waiting for completion, then
2672 * we assume (tentatively) that the timer has
2673 * expired because the batch of requests of
2674 * the process could have been served with a
2675 * smaller budget. Hence, betting that
2676 * process will behave in the same way when it
2677 * becomes backlogged again, we reduce its
2678 * next budget. As long as we guess right,
2679 * this budget cut reduces the latency
2680 * experienced by the process.
2681 *
2682 * However, if there are still outstanding
2683 * requests, then the process may have not yet
2684 * issued its next request just because it is
2685 * still waiting for the completion of some of
2686 * the still outstanding ones. So in this
2687 * subcase we do not reduce its budget, on the
2688 * contrary we increase it to possibly boost
2689 * the throughput, as discussed in the
2690 * comments to the BUDGET_TIMEOUT case.
2691 */
2692 if (bfqq->dispatched > 0) /* still outstanding reqs */
2693 budget = min(budget * 2, bfqd->bfq_max_budget);
2694 else {
2695 if (budget > 5 * min_budget)
2696 budget -= 4 * min_budget;
2697 else
2698 budget = min_budget;
2699 }
aee69d78
PV
2700 break;
2701 case BFQQE_BUDGET_TIMEOUT:
54b60456
PV
2702 /*
2703 * We double the budget here because it gives
2704 * the chance to boost the throughput if this
2705 * is not a seeky process (and has bumped into
2706 * this timeout because of, e.g., ZBR).
2707 */
2708 budget = min(budget * 2, bfqd->bfq_max_budget);
aee69d78
PV
2709 break;
2710 case BFQQE_BUDGET_EXHAUSTED:
2711 /*
2712 * The process still has backlog, and did not
2713 * let either the budget timeout or the disk
2714 * idling timeout expire. Hence it is not
2715 * seeky, has a short thinktime and may be
2716 * happy with a higher budget too. So
2717 * definitely increase the budget of this good
2718 * candidate to boost the disk throughput.
2719 */
54b60456 2720 budget = min(budget * 4, bfqd->bfq_max_budget);
aee69d78
PV
2721 break;
2722 case BFQQE_NO_MORE_REQUESTS:
2723 /*
2724 * For queues that expire for this reason, it
2725 * is particularly important to keep the
2726 * budget close to the actual service they
2727 * need. Doing so reduces the timestamp
2728 * misalignment problem described in the
2729 * comments in the body of
2730 * __bfq_activate_entity. In fact, suppose
2731 * that a queue systematically expires for
2732 * BFQQE_NO_MORE_REQUESTS and presents a
2733 * new request in time to enjoy timestamp
2734 * back-shifting. The larger the budget of the
2735 * queue is with respect to the service the
2736 * queue actually requests in each service
2737 * slot, the more times the queue can be
2738 * reactivated with the same virtual finish
2739 * time. It follows that, even if this finish
2740 * time is pushed to the system virtual time
2741 * to reduce the consequent timestamp
2742 * misalignment, the queue unjustly enjoys for
2743 * many re-activations a lower finish time
2744 * than all newly activated queues.
2745 *
2746 * The service needed by bfqq is measured
2747 * quite precisely by bfqq->entity.service.
2748 * Since bfqq does not enjoy device idling,
2749 * bfqq->entity.service is equal to the number
2750 * of sectors that the process associated with
2751 * bfqq requested to read/write before waiting
2752 * for request completions, or blocking for
2753 * other reasons.
2754 */
2755 budget = max_t(int, bfqq->entity.service, min_budget);
2756 break;
2757 default:
2758 return;
2759 }
44e44a1b 2760 } else if (!bfq_bfqq_sync(bfqq)) {
aee69d78
PV
2761 /*
2762 * Async queues get always the maximum possible
2763 * budget, as for them we do not care about latency
2764 * (in addition, their ability to dispatch is limited
2765 * by the charging factor).
2766 */
2767 budget = bfqd->bfq_max_budget;
2768 }
2769
2770 bfqq->max_budget = budget;
2771
2772 if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
2773 !bfqd->bfq_user_max_budget)
2774 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
2775
2776 /*
2777 * If there is still backlog, then assign a new budget, making
2778 * sure that it is large enough for the next request. Since
2779 * the finish time of bfqq must be kept in sync with the
2780 * budget, be sure to call __bfq_bfqq_expire() *after* this
2781 * update.
2782 *
2783 * If there is no backlog, then no need to update the budget;
2784 * it will be updated on the arrival of a new request.
2785 */
2786 next_rq = bfqq->next_rq;
2787 if (next_rq)
2788 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
2789 bfq_serv_to_charge(next_rq, bfqq));
2790
2791 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
2792 next_rq ? blk_rq_sectors(next_rq) : 0,
2793 bfqq->entity.budget);
2794}
2795
aee69d78 2796/*
ab0e43e9
PV
2797 * Return true if the process associated with bfqq is "slow". The slow
2798 * flag is used, in addition to the budget timeout, to reduce the
2799 * amount of service provided to seeky processes, and thus reduce
2800 * their chances to lower the throughput. More details in the comments
2801 * on the function bfq_bfqq_expire().
2802 *
2803 * An important observation is in order: as discussed in the comments
2804 * on the function bfq_update_peak_rate(), with devices with internal
2805 * queues, it is hard if ever possible to know when and for how long
2806 * an I/O request is processed by the device (apart from the trivial
2807 * I/O pattern where a new request is dispatched only after the
2808 * previous one has been completed). This makes it hard to evaluate
2809 * the real rate at which the I/O requests of each bfq_queue are
2810 * served. In fact, for an I/O scheduler like BFQ, serving a
2811 * bfq_queue means just dispatching its requests during its service
2812 * slot (i.e., until the budget of the queue is exhausted, or the
2813 * queue remains idle, or, finally, a timeout fires). But, during the
2814 * service slot of a bfq_queue, around 100 ms at most, the device may
2815 * be even still processing requests of bfq_queues served in previous
2816 * service slots. On the opposite end, the requests of the in-service
2817 * bfq_queue may be completed after the service slot of the queue
2818 * finishes.
2819 *
2820 * Anyway, unless more sophisticated solutions are used
2821 * (where possible), the sum of the sizes of the requests dispatched
2822 * during the service slot of a bfq_queue is probably the only
2823 * approximation available for the service received by the bfq_queue
2824 * during its service slot. And this sum is the quantity used in this
2825 * function to evaluate the I/O speed of a process.
aee69d78 2826 */
ab0e43e9
PV
2827static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2828 bool compensate, enum bfqq_expiration reason,
2829 unsigned long *delta_ms)
aee69d78 2830{
ab0e43e9
PV
2831 ktime_t delta_ktime;
2832 u32 delta_usecs;
2833 bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
aee69d78 2834
ab0e43e9 2835 if (!bfq_bfqq_sync(bfqq))
aee69d78
PV
2836 return false;
2837
2838 if (compensate)
ab0e43e9 2839 delta_ktime = bfqd->last_idling_start;
aee69d78 2840 else
ab0e43e9
PV
2841 delta_ktime = ktime_get();
2842 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
2843 delta_usecs = ktime_to_us(delta_ktime);
aee69d78
PV
2844
2845 /* don't use too short time intervals */
ab0e43e9
PV
2846 if (delta_usecs < 1000) {
2847 if (blk_queue_nonrot(bfqd->queue))
2848 /*
2849 * give same worst-case guarantees as idling
2850 * for seeky
2851 */
2852 *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
2853 else /* charge at least one seek */
2854 *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
2855
2856 return slow;
2857 }
aee69d78 2858
ab0e43e9 2859 *delta_ms = delta_usecs / USEC_PER_MSEC;
aee69d78
PV
2860
2861 /*
ab0e43e9
PV
2862 * Use only long (> 20ms) intervals to filter out excessive
2863 * spikes in service rate estimation.
aee69d78 2864 */
ab0e43e9
PV
2865 if (delta_usecs > 20000) {
2866 /*
2867 * Caveat for rotational devices: processes doing I/O
2868 * in the slower disk zones tend to be slow(er) even
2869 * if not seeky. In this respect, the estimated peak
2870 * rate is likely to be an average over the disk
2871 * surface. Accordingly, to not be too harsh with
2872 * unlucky processes, a process is deemed slow only if
2873 * its rate has been lower than half of the estimated
2874 * peak rate.
2875 */
2876 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
aee69d78
PV
2877 }
2878
ab0e43e9 2879 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
aee69d78 2880
ab0e43e9 2881 return slow;
aee69d78
PV
2882}
2883
77b7dcea
PV
2884/*
2885 * To be deemed as soft real-time, an application must meet two
2886 * requirements. First, the application must not require an average
2887 * bandwidth higher than the approximate bandwidth required to playback or
2888 * record a compressed high-definition video.
2889 * The next function is invoked on the completion of the last request of a
2890 * batch, to compute the next-start time instant, soft_rt_next_start, such
2891 * that, if the next request of the application does not arrive before
2892 * soft_rt_next_start, then the above requirement on the bandwidth is met.
2893 *
2894 * The second requirement is that the request pattern of the application is
2895 * isochronous, i.e., that, after issuing a request or a batch of requests,
2896 * the application stops issuing new requests until all its pending requests
2897 * have been completed. After that, the application may issue a new batch,
2898 * and so on.
2899 * For this reason the next function is invoked to compute
2900 * soft_rt_next_start only for applications that meet this requirement,
2901 * whereas soft_rt_next_start is set to infinity for applications that do
2902 * not.
2903 *
2904 * Unfortunately, even a greedy application may happen to behave in an
2905 * isochronous way if the CPU load is high. In fact, the application may
2906 * stop issuing requests while the CPUs are busy serving other processes,
2907 * then restart, then stop again for a while, and so on. In addition, if
2908 * the disk achieves a low enough throughput with the request pattern
2909 * issued by the application (e.g., because the request pattern is random
2910 * and/or the device is slow), then the application may meet the above
2911 * bandwidth requirement too. To prevent such a greedy application to be
2912 * deemed as soft real-time, a further rule is used in the computation of
2913 * soft_rt_next_start: soft_rt_next_start must be higher than the current
2914 * time plus the maximum time for which the arrival of a request is waited
2915 * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
2916 * This filters out greedy applications, as the latter issue instead their
2917 * next request as soon as possible after the last one has been completed
2918 * (in contrast, when a batch of requests is completed, a soft real-time
2919 * application spends some time processing data).
2920 *
2921 * Unfortunately, the last filter may easily generate false positives if
2922 * only bfqd->bfq_slice_idle is used as a reference time interval and one
2923 * or both the following cases occur:
2924 * 1) HZ is so low that the duration of a jiffy is comparable to or higher
2925 * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
2926 * HZ=100.
2927 * 2) jiffies, instead of increasing at a constant rate, may stop increasing
2928 * for a while, then suddenly 'jump' by several units to recover the lost
2929 * increments. This seems to happen, e.g., inside virtual machines.
2930 * To address this issue, we do not use as a reference time interval just
2931 * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
2932 * particular we add the minimum number of jiffies for which the filter
2933 * seems to be quite precise also in embedded systems and KVM/QEMU virtual
2934 * machines.
2935 */
2936static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
2937 struct bfq_queue *bfqq)
2938{
2939 return max(bfqq->last_idle_bklogged +
2940 HZ * bfqq->service_from_backlogged /
2941 bfqd->bfq_wr_max_softrt_rate,
2942 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
2943}
2944
aee69d78
PV
2945/**
2946 * bfq_bfqq_expire - expire a queue.
2947 * @bfqd: device owning the queue.
2948 * @bfqq: the queue to expire.
2949 * @compensate: if true, compensate for the time spent idling.
2950 * @reason: the reason causing the expiration.
2951 *
c074170e
PV
2952 * If the process associated with bfqq does slow I/O (e.g., because it
2953 * issues random requests), we charge bfqq with the time it has been
2954 * in service instead of the service it has received (see
2955 * bfq_bfqq_charge_time for details on how this goal is achieved). As
2956 * a consequence, bfqq will typically get higher timestamps upon
2957 * reactivation, and hence it will be rescheduled as if it had
2958 * received more service than what it has actually received. In the
2959 * end, bfqq receives less service in proportion to how slowly its
2960 * associated process consumes its budgets (and hence how seriously it
2961 * tends to lower the throughput). In addition, this time-charging
2962 * strategy guarantees time fairness among slow processes. In
2963 * contrast, if the process associated with bfqq is not slow, we
2964 * charge bfqq exactly with the service it has received.
aee69d78 2965 *
c074170e
PV
2966 * Charging time to the first type of queues and the exact service to
2967 * the other has the effect of using the WF2Q+ policy to schedule the
2968 * former on a timeslice basis, without violating service domain
2969 * guarantees among the latter.
aee69d78 2970 */
ea25da48
PV
2971void bfq_bfqq_expire(struct bfq_data *bfqd,
2972 struct bfq_queue *bfqq,
2973 bool compensate,
2974 enum bfqq_expiration reason)
aee69d78
PV
2975{
2976 bool slow;
ab0e43e9
PV
2977 unsigned long delta = 0;
2978 struct bfq_entity *entity = &bfqq->entity;
aee69d78
PV
2979 int ref;
2980
2981 /*
ab0e43e9 2982 * Check whether the process is slow (see bfq_bfqq_is_slow).
aee69d78 2983 */
ab0e43e9 2984 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
aee69d78 2985
77b7dcea
PV
2986 /*
2987 * Increase service_from_backlogged before next statement,
2988 * because the possible next invocation of
2989 * bfq_bfqq_charge_time would likely inflate
2990 * entity->service. In contrast, service_from_backlogged must
2991 * contain real service, to enable the soft real-time
2992 * heuristic to correctly compute the bandwidth consumed by
2993 * bfqq.
2994 */
2995 bfqq->service_from_backlogged += entity->service;
2996
aee69d78 2997 /*
c074170e
PV
2998 * As above explained, charge slow (typically seeky) and
2999 * timed-out queues with the time and not the service
3000 * received, to favor sequential workloads.
3001 *
3002 * Processes doing I/O in the slower disk zones will tend to
3003 * be slow(er) even if not seeky. Therefore, since the
3004 * estimated peak rate is actually an average over the disk
3005 * surface, these processes may timeout just for bad luck. To
3006 * avoid punishing them, do not charge time to processes that
3007 * succeeded in consuming at least 2/3 of their budget. This
3008 * allows BFQ to preserve enough elasticity to still perform
3009 * bandwidth, and not time, distribution with little unlucky
3010 * or quasi-sequential processes.
aee69d78 3011 */
44e44a1b
PV
3012 if (bfqq->wr_coeff == 1 &&
3013 (slow ||
3014 (reason == BFQQE_BUDGET_TIMEOUT &&
3015 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
c074170e 3016 bfq_bfqq_charge_time(bfqd, bfqq, delta);
aee69d78
PV
3017
3018 if (reason == BFQQE_TOO_IDLE &&
ab0e43e9 3019 entity->service <= 2 * entity->budget / 10)
aee69d78
PV
3020 bfq_clear_bfqq_IO_bound(bfqq);
3021
44e44a1b
PV
3022 if (bfqd->low_latency && bfqq->wr_coeff == 1)
3023 bfqq->last_wr_start_finish = jiffies;
3024
77b7dcea
PV
3025 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
3026 RB_EMPTY_ROOT(&bfqq->sort_list)) {
3027 /*
3028 * If we get here, and there are no outstanding
3029 * requests, then the request pattern is isochronous
3030 * (see the comments on the function
3031 * bfq_bfqq_softrt_next_start()). Thus we can compute
3032 * soft_rt_next_start. If, instead, the queue still
3033 * has outstanding requests, then we have to wait for
3034 * the completion of all the outstanding requests to
3035 * discover whether the request pattern is actually
3036 * isochronous.
3037 */
3038 if (bfqq->dispatched == 0)
3039 bfqq->soft_rt_next_start =
3040 bfq_bfqq_softrt_next_start(bfqd, bfqq);
3041 else {
3042 /*
3043 * The application is still waiting for the
3044 * completion of one or more requests:
3045 * prevent it from possibly being incorrectly
3046 * deemed as soft real-time by setting its
3047 * soft_rt_next_start to infinity. In fact,
3048 * without this assignment, the application
3049 * would be incorrectly deemed as soft
3050 * real-time if:
3051 * 1) it issued a new request before the
3052 * completion of all its in-flight
3053 * requests, and
3054 * 2) at that time, its soft_rt_next_start
3055 * happened to be in the past.
3056 */
3057 bfqq->soft_rt_next_start =
3058 bfq_greatest_from_now();
3059 /*
3060 * Schedule an update of soft_rt_next_start to when
3061 * the task may be discovered to be isochronous.
3062 */
3063 bfq_mark_bfqq_softrt_update(bfqq);
3064 }
3065 }
3066
aee69d78 3067 bfq_log_bfqq(bfqd, bfqq,
d5be3fef
PV
3068 "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
3069 slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
aee69d78
PV
3070
3071 /*
3072 * Increase, decrease or leave budget unchanged according to
3073 * reason.
3074 */
3075 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
3076 ref = bfqq->ref;
3077 __bfq_bfqq_expire(bfqd, bfqq);
3078
3079 /* mark bfqq as waiting a request only if a bic still points to it */
3080 if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
3081 reason != BFQQE_BUDGET_TIMEOUT &&
3082 reason != BFQQE_BUDGET_EXHAUSTED)
3083 bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
3084}
3085
3086/*
3087 * Budget timeout is not implemented through a dedicated timer, but
3088 * just checked on request arrivals and completions, as well as on
3089 * idle timer expirations.
3090 */
3091static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
3092{
44e44a1b 3093 return time_is_before_eq_jiffies(bfqq->budget_timeout);
aee69d78
PV
3094}
3095
3096/*
3097 * If we expire a queue that is actively waiting (i.e., with the
3098 * device idled) for the arrival of a new request, then we may incur
3099 * the timestamp misalignment problem described in the body of the
3100 * function __bfq_activate_entity. Hence we return true only if this
3101 * condition does not hold, or if the queue is slow enough to deserve
3102 * only to be kicked off for preserving a high throughput.
3103 */
3104static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
3105{
3106 bfq_log_bfqq(bfqq->bfqd, bfqq,
3107 "may_budget_timeout: wait_request %d left %d timeout %d",
3108 bfq_bfqq_wait_request(bfqq),
3109 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
3110 bfq_bfqq_budget_timeout(bfqq));
3111
3112 return (!bfq_bfqq_wait_request(bfqq) ||
3113 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
3114 &&
3115 bfq_bfqq_budget_timeout(bfqq);
3116}
3117
3118/*
3119 * For a queue that becomes empty, device idling is allowed only if
44e44a1b
PV
3120 * this function returns true for the queue. As a consequence, since
3121 * device idling plays a critical role in both throughput boosting and
3122 * service guarantees, the return value of this function plays a
3123 * critical role in both these aspects as well.
3124 *
3125 * In a nutshell, this function returns true only if idling is
3126 * beneficial for throughput or, even if detrimental for throughput,
3127 * idling is however necessary to preserve service guarantees (low
3128 * latency, desired throughput distribution, ...). In particular, on
3129 * NCQ-capable devices, this function tries to return false, so as to
3130 * help keep the drives' internal queues full, whenever this helps the
3131 * device boost the throughput without causing any service-guarantee
3132 * issue.
3133 *
3134 * In more detail, the return value of this function is obtained by,
3135 * first, computing a number of boolean variables that take into
3136 * account throughput and service-guarantee issues, and, then,
3137 * combining these variables in a logical expression. Most of the
3138 * issues taken into account are not trivial. We discuss these issues
3139 * individually while introducing the variables.
aee69d78
PV
3140 */
3141static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
3142{
3143 struct bfq_data *bfqd = bfqq->bfqd;
edaf9428
PV
3144 bool rot_without_queueing =
3145 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
3146 bfqq_sequential_and_IO_bound,
3147 idling_boosts_thr, idling_boosts_thr_without_issues,
e1b2324d 3148 idling_needed_for_service_guarantees,
cfd69712 3149 asymmetric_scenario;
aee69d78
PV
3150
3151 if (bfqd->strict_guarantees)
3152 return true;
3153
d5be3fef
PV
3154 /*
3155 * Idling is performed only if slice_idle > 0. In addition, we
3156 * do not idle if
3157 * (a) bfqq is async
3158 * (b) bfqq is in the idle io prio class: in this case we do
3159 * not idle because we want to minimize the bandwidth that
3160 * queues in this class can steal to higher-priority queues
3161 */
3162 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
3163 bfq_class_idle(bfqq))
3164 return false;
3165
edaf9428
PV
3166 bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
3167 bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
3168
aee69d78 3169 /*
44e44a1b
PV
3170 * The next variable takes into account the cases where idling
3171 * boosts the throughput.
3172 *
e01eff01
PV
3173 * The value of the variable is computed considering, first, that
3174 * idling is virtually always beneficial for the throughput if:
edaf9428
PV
3175 * (a) the device is not NCQ-capable and rotational, or
3176 * (b) regardless of the presence of NCQ, the device is rotational and
3177 * the request pattern for bfqq is I/O-bound and sequential, or
3178 * (c) regardless of whether it is rotational, the device is
3179 * not NCQ-capable and the request pattern for bfqq is
3180 * I/O-bound and sequential.
bf2b79e7
PV
3181 *
3182 * Secondly, and in contrast to the above item (b), idling an
3183 * NCQ-capable flash-based device would not boost the
e01eff01 3184 * throughput even with sequential I/O; rather it would lower
bf2b79e7
PV
3185 * the throughput in proportion to how fast the device
3186 * is. Accordingly, the next variable is true if any of the
edaf9428
PV
3187 * above conditions (a), (b) or (c) is true, and, in
3188 * particular, happens to be false if bfqd is an NCQ-capable
3189 * flash-based device.
aee69d78 3190 */
edaf9428
PV
3191 idling_boosts_thr = rot_without_queueing ||
3192 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
3193 bfqq_sequential_and_IO_bound);
aee69d78 3194
cfd69712
PV
3195 /*
3196 * The value of the next variable,
3197 * idling_boosts_thr_without_issues, is equal to that of
3198 * idling_boosts_thr, unless a special case holds. In this
3199 * special case, described below, idling may cause problems to
3200 * weight-raised queues.
3201 *
3202 * When the request pool is saturated (e.g., in the presence
3203 * of write hogs), if the processes associated with
3204 * non-weight-raised queues ask for requests at a lower rate,
3205 * then processes associated with weight-raised queues have a
3206 * higher probability to get a request from the pool
3207 * immediately (or at least soon) when they need one. Thus
3208 * they have a higher probability to actually get a fraction
3209 * of the device throughput proportional to their high
3210 * weight. This is especially true with NCQ-capable drives,
3211 * which enqueue several requests in advance, and further
3212 * reorder internally-queued requests.
3213 *
3214 * For this reason, we force to false the value of
3215 * idling_boosts_thr_without_issues if there are weight-raised
3216 * busy queues. In this case, and if bfqq is not weight-raised,
3217 * this guarantees that the device is not idled for bfqq (if,
3218 * instead, bfqq is weight-raised, then idling will be
3219 * guaranteed by another variable, see below). Combined with
3220 * the timestamping rules of BFQ (see [1] for details), this
3221 * behavior causes bfqq, and hence any sync non-weight-raised
3222 * queue, to get a lower number of requests served, and thus
3223 * to ask for a lower number of requests from the request
3224 * pool, before the busy weight-raised queues get served
3225 * again. This often mitigates starvation problems in the
3226 * presence of heavy write workloads and NCQ, thereby
3227 * guaranteeing a higher application and system responsiveness
3228 * in these hostile scenarios.
3229 */
3230 idling_boosts_thr_without_issues = idling_boosts_thr &&
3231 bfqd->wr_busy_queues == 0;
3232
aee69d78 3233 /*
bf2b79e7
PV
3234 * There is then a case where idling must be performed not
3235 * for throughput concerns, but to preserve service
3236 * guarantees.
3237 *
3238 * To introduce this case, we can note that allowing the drive
3239 * to enqueue more than one request at a time, and hence
44e44a1b 3240 * delegating de facto final scheduling decisions to the
bf2b79e7 3241 * drive's internal scheduler, entails loss of control on the
44e44a1b 3242 * actual request service order. In particular, the critical
bf2b79e7 3243 * situation is when requests from different processes happen
44e44a1b
PV
3244 * to be present, at the same time, in the internal queue(s)
3245 * of the drive. In such a situation, the drive, by deciding
3246 * the service order of the internally-queued requests, does
3247 * determine also the actual throughput distribution among
3248 * these processes. But the drive typically has no notion or
3249 * concern about per-process throughput distribution, and
3250 * makes its decisions only on a per-request basis. Therefore,
3251 * the service distribution enforced by the drive's internal
3252 * scheduler is likely to coincide with the desired
3253 * device-throughput distribution only in a completely
bf2b79e7
PV
3254 * symmetric scenario where:
3255 * (i) each of these processes must get the same throughput as
3256 * the others;
3257 * (ii) all these processes have the same I/O pattern
3258 (either sequential or random).
3259 * In fact, in such a scenario, the drive will tend to treat
3260 * the requests of each of these processes in about the same
3261 * way as the requests of the others, and thus to provide
3262 * each of these processes with about the same throughput
3263 * (which is exactly the desired throughput distribution). In
3264 * contrast, in any asymmetric scenario, device idling is
3265 * certainly needed to guarantee that bfqq receives its
3266 * assigned fraction of the device throughput (see [1] for
3267 * details).
3268 *
3269 * We address this issue by controlling, actually, only the
3270 * symmetry sub-condition (i), i.e., provided that
3271 * sub-condition (i) holds, idling is not performed,
3272 * regardless of whether sub-condition (ii) holds. In other
3273 * words, only if sub-condition (i) holds, then idling is
3274 * allowed, and the device tends to be prevented from queueing
3275 * many requests, possibly of several processes. The reason
3276 * for not controlling also sub-condition (ii) is that we
3277 * exploit preemption to preserve guarantees in case of
3278 * symmetric scenarios, even if (ii) does not hold, as
3279 * explained in the next two paragraphs.
3280 *
3281 * Even if a queue, say Q, is expired when it remains idle, Q
3282 * can still preempt the new in-service queue if the next
3283 * request of Q arrives soon (see the comments on
3284 * bfq_bfqq_update_budg_for_activation). If all queues and
3285 * groups have the same weight, this form of preemption,
3286 * combined with the hole-recovery heuristic described in the
3287 * comments on function bfq_bfqq_update_budg_for_activation,
3288 * are enough to preserve a correct bandwidth distribution in
3289 * the mid term, even without idling. In fact, even if not
3290 * idling allows the internal queues of the device to contain
3291 * many requests, and thus to reorder requests, we can rather
3292 * safely assume that the internal scheduler still preserves a
3293 * minimum of mid-term fairness. The motivation for using
3294 * preemption instead of idling is that, by not idling,
3295 * service guarantees are preserved without minimally
3296 * sacrificing throughput. In other words, both a high
3297 * throughput and its desired distribution are obtained.
3298 *
3299 * More precisely, this preemption-based, idleless approach
3300 * provides fairness in terms of IOPS, and not sectors per
3301 * second. This can be seen with a simple example. Suppose
3302 * that there are two queues with the same weight, but that
3303 * the first queue receives requests of 8 sectors, while the
3304 * second queue receives requests of 1024 sectors. In
3305 * addition, suppose that each of the two queues contains at
3306 * most one request at a time, which implies that each queue
3307 * always remains idle after it is served. Finally, after
3308 * remaining idle, each queue receives very quickly a new
3309 * request. It follows that the two queues are served
3310 * alternatively, preempting each other if needed. This
3311 * implies that, although both queues have the same weight,
3312 * the queue with large requests receives a service that is
3313 * 1024/8 times as high as the service received by the other
3314 * queue.
44e44a1b 3315 *
bf2b79e7
PV
3316 * On the other hand, device idling is performed, and thus
3317 * pure sector-domain guarantees are provided, for the
3318 * following queues, which are likely to need stronger
3319 * throughput guarantees: weight-raised queues, and queues
3320 * with a higher weight than other queues. When such queues
3321 * are active, sub-condition (i) is false, which triggers
3322 * device idling.
44e44a1b 3323 *
bf2b79e7
PV
3324 * According to the above considerations, the next variable is
3325 * true (only) if sub-condition (i) holds. To compute the
3326 * value of this variable, we not only use the return value of
3327 * the function bfq_symmetric_scenario(), but also check
3328 * whether bfqq is being weight-raised, because
3329 * bfq_symmetric_scenario() does not take into account also
3330 * weight-raised queues (see comments on
3331 * bfq_weights_tree_add()).
44e44a1b
PV
3332 *
3333 * As a side note, it is worth considering that the above
3334 * device-idling countermeasures may however fail in the
3335 * following unlucky scenario: if idling is (correctly)
bf2b79e7
PV
3336 * disabled in a time period during which all symmetry
3337 * sub-conditions hold, and hence the device is allowed to
44e44a1b
PV
3338 * enqueue many requests, but at some later point in time some
3339 * sub-condition stops to hold, then it may become impossible
3340 * to let requests be served in the desired order until all
3341 * the requests already queued in the device have been served.
3342 */
bf2b79e7
PV
3343 asymmetric_scenario = bfqq->wr_coeff > 1 ||
3344 !bfq_symmetric_scenario(bfqd);
44e44a1b 3345
e1b2324d
AA
3346 /*
3347 * Finally, there is a case where maximizing throughput is the
3348 * best choice even if it may cause unfairness toward
3349 * bfqq. Such a case is when bfqq became active in a burst of
3350 * queue activations. Queues that became active during a large
3351 * burst benefit only from throughput, as discussed in the
3352 * comments on bfq_handle_burst. Thus, if bfqq became active
3353 * in a burst and not idling the device maximizes throughput,
3354 * then the device must no be idled, because not idling the
3355 * device provides bfqq and all other queues in the burst with
3356 * maximum benefit. Combining this and the above case, we can
3357 * now establish when idling is actually needed to preserve
3358 * service guarantees.
3359 */
3360 idling_needed_for_service_guarantees =
3361 asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
3362
44e44a1b 3363 /*
d5be3fef
PV
3364 * We have now all the components we need to compute the
3365 * return value of the function, which is true only if idling
3366 * either boosts the throughput (without issues), or is
3367 * necessary to preserve service guarantees.
aee69d78 3368 */
d5be3fef
PV
3369 return idling_boosts_thr_without_issues ||
3370 idling_needed_for_service_guarantees;
aee69d78
PV
3371}
3372
3373/*
3374 * If the in-service queue is empty but the function bfq_bfqq_may_idle
3375 * returns true, then:
3376 * 1) the queue must remain in service and cannot be expired, and
3377 * 2) the device must be idled to wait for the possible arrival of a new
3378 * request for the queue.
3379 * See the comments on the function bfq_bfqq_may_idle for the reasons
3380 * why performing device idling is the best choice to boost the throughput
3381 * and preserve service guarantees when bfq_bfqq_may_idle itself
3382 * returns true.
3383 */
3384static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
3385{
d5be3fef 3386 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
aee69d78
PV
3387}
3388
3389/*
3390 * Select a queue for service. If we have a current queue in service,
3391 * check whether to continue servicing it, or retrieve and set a new one.
3392 */
3393static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
3394{
3395 struct bfq_queue *bfqq;
3396 struct request *next_rq;
3397 enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
3398
3399 bfqq = bfqd->in_service_queue;
3400 if (!bfqq)
3401 goto new_queue;
3402
3403 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
3404
3405 if (bfq_may_expire_for_budg_timeout(bfqq) &&
3406 !bfq_bfqq_wait_request(bfqq) &&
3407 !bfq_bfqq_must_idle(bfqq))
3408 goto expire;
3409
3410check_queue:
3411 /*
3412 * This loop is rarely executed more than once. Even when it
3413 * happens, it is much more convenient to re-execute this loop
3414 * than to return NULL and trigger a new dispatch to get a
3415 * request served.
3416 */
3417 next_rq = bfqq->next_rq;
3418 /*
3419 * If bfqq has requests queued and it has enough budget left to
3420 * serve them, keep the queue, otherwise expire it.
3421 */
3422 if (next_rq) {
3423 if (bfq_serv_to_charge(next_rq, bfqq) >
3424 bfq_bfqq_budget_left(bfqq)) {
3425 /*
3426 * Expire the queue for budget exhaustion,
3427 * which makes sure that the next budget is
3428 * enough to serve the next request, even if
3429 * it comes from the fifo expired path.
3430 */
3431 reason = BFQQE_BUDGET_EXHAUSTED;
3432 goto expire;
3433 } else {
3434 /*
3435 * The idle timer may be pending because we may
3436 * not disable disk idling even when a new request
3437 * arrives.
3438 */
3439 if (bfq_bfqq_wait_request(bfqq)) {
3440 /*
3441 * If we get here: 1) at least a new request
3442 * has arrived but we have not disabled the
3443 * timer because the request was too small,
3444 * 2) then the block layer has unplugged
3445 * the device, causing the dispatch to be
3446 * invoked.
3447 *
3448 * Since the device is unplugged, now the
3449 * requests are probably large enough to
3450 * provide a reasonable throughput.
3451 * So we disable idling.
3452 */
3453 bfq_clear_bfqq_wait_request(bfqq);
3454 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
e21b7a0b 3455 bfqg_stats_update_idle_time(bfqq_group(bfqq));
aee69d78
PV
3456 }
3457 goto keep_queue;
3458 }
3459 }
3460
3461 /*
3462 * No requests pending. However, if the in-service queue is idling
3463 * for a new request, or has requests waiting for a completion and
3464 * may idle after their completion, then keep it anyway.
3465 */
3466 if (bfq_bfqq_wait_request(bfqq) ||
3467 (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
3468 bfqq = NULL;
3469 goto keep_queue;
3470 }
3471
3472 reason = BFQQE_NO_MORE_REQUESTS;
3473expire:
3474 bfq_bfqq_expire(bfqd, bfqq, false, reason);
3475new_queue:
3476 bfqq = bfq_set_in_service_queue(bfqd);
3477 if (bfqq) {
3478 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
3479 goto check_queue;
3480 }
3481keep_queue:
3482 if (bfqq)
3483 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
3484 else
3485 bfq_log(bfqd, "select_queue: no queue returned");
3486
3487 return bfqq;
3488}
3489
44e44a1b
PV
3490static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3491{
3492 struct bfq_entity *entity = &bfqq->entity;
3493
3494 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
3495 bfq_log_bfqq(bfqd, bfqq,
3496 "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
3497 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
3498 jiffies_to_msecs(bfqq->wr_cur_max_time),
3499 bfqq->wr_coeff,
3500 bfqq->entity.weight, bfqq->entity.orig_weight);
3501
3502 if (entity->prio_changed)
3503 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
3504
3505 /*
e1b2324d
AA
3506 * If the queue was activated in a burst, or too much
3507 * time has elapsed from the beginning of this
3508 * weight-raising period, then end weight raising.
44e44a1b 3509 */
e1b2324d
AA
3510 if (bfq_bfqq_in_large_burst(bfqq))
3511 bfq_bfqq_end_wr(bfqq);
3512 else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
3513 bfqq->wr_cur_max_time)) {
77b7dcea
PV
3514 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
3515 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
e1b2324d 3516 bfq_wr_duration(bfqd)))
77b7dcea
PV
3517 bfq_bfqq_end_wr(bfqq);
3518 else {
3e2bdd6d 3519 switch_back_to_interactive_wr(bfqq, bfqd);
77b7dcea
PV
3520 bfqq->entity.prio_changed = 1;
3521 }
44e44a1b
PV
3522 }
3523 }
431b17f9
PV
3524 /*
3525 * To improve latency (for this or other queues), immediately
3526 * update weight both if it must be raised and if it must be
3527 * lowered. Since, entity may be on some active tree here, and
3528 * might have a pending change of its ioprio class, invoke
3529 * next function with the last parameter unset (see the
3530 * comments on the function).
3531 */
44e44a1b 3532 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
431b17f9
PV
3533 __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
3534 entity, false);
44e44a1b
PV
3535}
3536
aee69d78
PV
3537/*
3538 * Dispatch next request from bfqq.
3539 */
3540static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
3541 struct bfq_queue *bfqq)
3542{
3543 struct request *rq = bfqq->next_rq;
3544 unsigned long service_to_charge;
3545
3546 service_to_charge = bfq_serv_to_charge(rq, bfqq);
3547
3548 bfq_bfqq_served(bfqq, service_to_charge);
3549
3550 bfq_dispatch_remove(bfqd->queue, rq);
3551
44e44a1b
PV
3552 /*
3553 * If weight raising has to terminate for bfqq, then next
3554 * function causes an immediate update of bfqq's weight,
3555 * without waiting for next activation. As a consequence, on
3556 * expiration, bfqq will be timestamped as if has never been
3557 * weight-raised during this service slot, even if it has
3558 * received part or even most of the service as a
3559 * weight-raised queue. This inflates bfqq's timestamps, which
3560 * is beneficial, as bfqq is then more willing to leave the
3561 * device immediately to possible other weight-raised queues.
3562 */
3563 bfq_update_wr_data(bfqd, bfqq);
3564
aee69d78
PV
3565 /*
3566 * Expire bfqq, pretending that its budget expired, if bfqq
3567 * belongs to CLASS_IDLE and other queues are waiting for
3568 * service.
3569 */
3570 if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
3571 goto expire;
3572
3573 return rq;
3574
3575expire:
3576 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
3577 return rq;
3578}
3579
3580static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
3581{
3582 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3583
3584 /*
3585 * Avoiding lock: a race on bfqd->busy_queues should cause at
3586 * most a call to dispatch for nothing
3587 */
3588 return !list_empty_careful(&bfqd->dispatch) ||
3589 bfqd->busy_queues > 0;
3590}
3591
3592static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3593{
3594 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3595 struct request *rq = NULL;
3596 struct bfq_queue *bfqq = NULL;
3597
3598 if (!list_empty(&bfqd->dispatch)) {
3599 rq = list_first_entry(&bfqd->dispatch, struct request,
3600 queuelist);
3601 list_del_init(&rq->queuelist);
3602
3603 bfqq = RQ_BFQQ(rq);
3604
3605 if (bfqq) {
3606 /*
3607 * Increment counters here, because this
3608 * dispatch does not follow the standard
3609 * dispatch flow (where counters are
3610 * incremented)
3611 */
3612 bfqq->dispatched++;
3613
3614 goto inc_in_driver_start_rq;
3615 }
3616
3617 /*
3618 * We exploit the put_rq_private hook to decrement
3619 * rq_in_driver, but put_rq_private will not be
3620 * invoked on this request. So, to avoid unbalance,
3621 * just start this request, without incrementing
3622 * rq_in_driver. As a negative consequence,
3623 * rq_in_driver is deceptively lower than it should be
3624 * while this request is in service. This may cause
3625 * bfq_schedule_dispatch to be invoked uselessly.
3626 *
3627 * As for implementing an exact solution, the
3628 * put_request hook, if defined, is probably invoked
3629 * also on this request. So, by exploiting this hook,
3630 * we could 1) increment rq_in_driver here, and 2)
3631 * decrement it in put_request. Such a solution would
3632 * let the value of the counter be always accurate,
3633 * but it would entail using an extra interface
3634 * function. This cost seems higher than the benefit,
3635 * being the frequency of non-elevator-private
3636 * requests very low.
3637 */
3638 goto start_rq;
3639 }
3640
3641 bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
3642
3643 if (bfqd->busy_queues == 0)
3644 goto exit;
3645
3646 /*
3647 * Force device to serve one request at a time if
3648 * strict_guarantees is true. Forcing this service scheme is
3649 * currently the ONLY way to guarantee that the request
3650 * service order enforced by the scheduler is respected by a
3651 * queueing device. Otherwise the device is free even to make
3652 * some unlucky request wait for as long as the device
3653 * wishes.
3654 *
3655 * Of course, serving one request at at time may cause loss of
3656 * throughput.
3657 */
3658 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
3659 goto exit;
3660
3661 bfqq = bfq_select_queue(bfqd);
3662 if (!bfqq)
3663 goto exit;
3664
3665 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
3666
3667 if (rq) {
3668inc_in_driver_start_rq:
3669 bfqd->rq_in_driver++;
3670start_rq:
3671 rq->rq_flags |= RQF_STARTED;
3672 }
3673exit:
3674 return rq;
3675}
3676
3677static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3678{
3679 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3680 struct request *rq;
3681
3682 spin_lock_irq(&bfqd->lock);
36eca894 3683
aee69d78 3684 rq = __bfq_dispatch_request(hctx);
6fa3e8d3 3685 spin_unlock_irq(&bfqd->lock);
aee69d78
PV
3686
3687 return rq;
3688}
3689
3690/*
3691 * Task holds one reference to the queue, dropped when task exits. Each rq
3692 * in-flight on this queue also holds a reference, dropped when rq is freed.
3693 *
3694 * Scheduler lock must be held here. Recall not to use bfqq after calling
3695 * this function on it.
3696 */
ea25da48 3697void bfq_put_queue(struct bfq_queue *bfqq)
aee69d78 3698{
e21b7a0b
AA
3699#ifdef CONFIG_BFQ_GROUP_IOSCHED
3700 struct bfq_group *bfqg = bfqq_group(bfqq);
3701#endif
3702
aee69d78
PV
3703 if (bfqq->bfqd)
3704 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
3705 bfqq, bfqq->ref);
3706
3707 bfqq->ref--;
3708 if (bfqq->ref)
3709 return;
3710
e1b2324d
AA
3711 if (bfq_bfqq_sync(bfqq))
3712 /*
3713 * The fact that this queue is being destroyed does not
3714 * invalidate the fact that this queue may have been
3715 * activated during the current burst. As a consequence,
3716 * although the queue does not exist anymore, and hence
3717 * needs to be removed from the burst list if there,
3718 * the burst size has not to be decremented.
3719 */
3720 hlist_del_init(&bfqq->burst_list_node);
e21b7a0b 3721
aee69d78 3722 kmem_cache_free(bfq_pool, bfqq);
e21b7a0b 3723#ifdef CONFIG_BFQ_GROUP_IOSCHED
8f9bebc3 3724 bfqg_and_blkg_put(bfqg);
e21b7a0b 3725#endif
aee69d78
PV
3726}
3727
36eca894
AA
3728static void bfq_put_cooperator(struct bfq_queue *bfqq)
3729{
3730 struct bfq_queue *__bfqq, *next;
3731
3732 /*
3733 * If this queue was scheduled to merge with another queue, be
3734 * sure to drop the reference taken on that queue (and others in
3735 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
3736 */
3737 __bfqq = bfqq->new_bfqq;
3738 while (__bfqq) {
3739 if (__bfqq == bfqq)
3740 break;
3741 next = __bfqq->new_bfqq;
3742 bfq_put_queue(__bfqq);
3743 __bfqq = next;
3744 }
3745}
3746
aee69d78
PV
3747static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3748{
3749 if (bfqq == bfqd->in_service_queue) {
3750 __bfq_bfqq_expire(bfqd, bfqq);
3751 bfq_schedule_dispatch(bfqd);
3752 }
3753
3754 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
3755
36eca894
AA
3756 bfq_put_cooperator(bfqq);
3757
aee69d78
PV
3758 bfq_put_queue(bfqq); /* release process reference */
3759}
3760
3761static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
3762{
3763 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
3764 struct bfq_data *bfqd;
3765
3766 if (bfqq)
3767 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
3768
3769 if (bfqq && bfqd) {
3770 unsigned long flags;
3771
3772 spin_lock_irqsave(&bfqd->lock, flags);
3773 bfq_exit_bfqq(bfqd, bfqq);
3774 bic_set_bfqq(bic, NULL, is_sync);
6fa3e8d3 3775 spin_unlock_irqrestore(&bfqd->lock, flags);
aee69d78
PV
3776 }
3777}
3778
3779static void bfq_exit_icq(struct io_cq *icq)
3780{
3781 struct bfq_io_cq *bic = icq_to_bic(icq);
3782
3783 bfq_exit_icq_bfqq(bic, true);
3784 bfq_exit_icq_bfqq(bic, false);
3785}
3786
3787/*
3788 * Update the entity prio values; note that the new values will not
3789 * be used until the next (re)activation.
3790 */
3791static void
3792bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
3793{
3794 struct task_struct *tsk = current;
3795 int ioprio_class;
3796 struct bfq_data *bfqd = bfqq->bfqd;
3797
3798 if (!bfqd)
3799 return;
3800
3801 ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3802 switch (ioprio_class) {
3803 default:
3804 dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
3805 "bfq: bad prio class %d\n", ioprio_class);
fa393d1b 3806 /* fall through */
aee69d78
PV
3807 case IOPRIO_CLASS_NONE:
3808 /*
3809 * No prio set, inherit CPU scheduling settings.
3810 */
3811 bfqq->new_ioprio = task_nice_ioprio(tsk);
3812 bfqq->new_ioprio_class = task_nice_ioclass(tsk);
3813 break;
3814 case IOPRIO_CLASS_RT:
3815 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3816 bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
3817 break;
3818 case IOPRIO_CLASS_BE:
3819 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3820 bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
3821 break;
3822 case IOPRIO_CLASS_IDLE:
3823 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
3824 bfqq->new_ioprio = 7;
aee69d78
PV
3825 break;
3826 }
3827
3828 if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
3829 pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
3830 bfqq->new_ioprio);
3831 bfqq->new_ioprio = IOPRIO_BE_NR;
3832 }
3833
3834 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
3835 bfqq->entity.prio_changed = 1;
3836}
3837
ea25da48
PV
3838static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3839 struct bio *bio, bool is_sync,
3840 struct bfq_io_cq *bic);
3841
aee69d78
PV
3842static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
3843{
3844 struct bfq_data *bfqd = bic_to_bfqd(bic);
3845 struct bfq_queue *bfqq;
3846 int ioprio = bic->icq.ioc->ioprio;
3847
3848 /*
3849 * This condition may trigger on a newly created bic, be sure to
3850 * drop the lock before returning.
3851 */
3852 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
3853 return;
3854
3855 bic->ioprio = ioprio;
3856
3857 bfqq = bic_to_bfqq(bic, false);
3858 if (bfqq) {
3859 /* release process reference on this queue */
3860 bfq_put_queue(bfqq);
3861 bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
3862 bic_set_bfqq(bic, bfqq, false);
3863 }
3864
3865 bfqq = bic_to_bfqq(bic, true);
3866 if (bfqq)
3867 bfq_set_next_ioprio_data(bfqq, bic);
3868}
3869
3870static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3871 struct bfq_io_cq *bic, pid_t pid, int is_sync)
3872{
3873 RB_CLEAR_NODE(&bfqq->entity.rb_node);
3874 INIT_LIST_HEAD(&bfqq->fifo);
e1b2324d 3875 INIT_HLIST_NODE(&bfqq->burst_list_node);
aee69d78
PV
3876
3877 bfqq->ref = 0;
3878 bfqq->bfqd = bfqd;
3879
3880 if (bic)
3881 bfq_set_next_ioprio_data(bfqq, bic);
3882
3883 if (is_sync) {
d5be3fef
PV
3884 /*
3885 * No need to mark as has_short_ttime if in
3886 * idle_class, because no device idling is performed
3887 * for queues in idle class
3888 */
aee69d78 3889 if (!bfq_class_idle(bfqq))
d5be3fef
PV
3890 /* tentatively mark as has_short_ttime */
3891 bfq_mark_bfqq_has_short_ttime(bfqq);
aee69d78 3892 bfq_mark_bfqq_sync(bfqq);
e1b2324d 3893 bfq_mark_bfqq_just_created(bfqq);
aee69d78
PV
3894 } else
3895 bfq_clear_bfqq_sync(bfqq);
3896
3897 /* set end request to minus infinity from now */
3898 bfqq->ttime.last_end_request = ktime_get_ns() + 1;
3899
3900 bfq_mark_bfqq_IO_bound(bfqq);
3901
3902 bfqq->pid = pid;
3903
3904 /* Tentative initial value to trade off between thr and lat */
54b60456 3905 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
aee69d78 3906 bfqq->budget_timeout = bfq_smallest_from_now();
aee69d78 3907
44e44a1b 3908 bfqq->wr_coeff = 1;
36eca894 3909 bfqq->last_wr_start_finish = jiffies;
77b7dcea 3910 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
36eca894 3911 bfqq->split_time = bfq_smallest_from_now();
77b7dcea
PV
3912
3913 /*
3914 * Set to the value for which bfqq will not be deemed as
3915 * soft rt when it becomes backlogged.
3916 */
3917 bfqq->soft_rt_next_start = bfq_greatest_from_now();
44e44a1b 3918
aee69d78
PV
3919 /* first request is almost certainly seeky */
3920 bfqq->seek_history = 1;
3921}
3922
3923static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
e21b7a0b 3924 struct bfq_group *bfqg,
aee69d78
PV
3925 int ioprio_class, int ioprio)
3926{
3927 switch (ioprio_class) {
3928 case IOPRIO_CLASS_RT:
e21b7a0b 3929 return &bfqg->async_bfqq[0][ioprio];
aee69d78
PV
3930 case IOPRIO_CLASS_NONE:
3931 ioprio = IOPRIO_NORM;
3932 /* fall through */
3933 case IOPRIO_CLASS_BE:
e21b7a0b 3934 return &bfqg->async_bfqq[1][ioprio];
aee69d78 3935 case IOPRIO_CLASS_IDLE:
e21b7a0b 3936 return &bfqg->async_idle_bfqq;
aee69d78
PV
3937 default:
3938 return NULL;
3939 }
3940}
3941
3942static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3943 struct bio *bio, bool is_sync,
3944 struct bfq_io_cq *bic)
3945{
3946 const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3947 const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3948 struct bfq_queue **async_bfqq = NULL;
3949 struct bfq_queue *bfqq;
e21b7a0b 3950 struct bfq_group *bfqg;
aee69d78
PV
3951
3952 rcu_read_lock();
3953
e21b7a0b
AA
3954 bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
3955 if (!bfqg) {
3956 bfqq = &bfqd->oom_bfqq;
3957 goto out;
3958 }
3959
aee69d78 3960 if (!is_sync) {
e21b7a0b 3961 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
aee69d78
PV
3962 ioprio);
3963 bfqq = *async_bfqq;
3964 if (bfqq)
3965 goto out;
3966 }
3967
3968 bfqq = kmem_cache_alloc_node(bfq_pool,
3969 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
3970 bfqd->queue->node);
3971
3972 if (bfqq) {
3973 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
3974 is_sync);
e21b7a0b 3975 bfq_init_entity(&bfqq->entity, bfqg);
aee69d78
PV
3976 bfq_log_bfqq(bfqd, bfqq, "allocated");
3977 } else {
3978 bfqq = &bfqd->oom_bfqq;
3979 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
3980 goto out;
3981 }
3982
3983 /*
3984 * Pin the queue now that it's allocated, scheduler exit will
3985 * prune it.
3986 */
3987 if (async_bfqq) {
e21b7a0b
AA
3988 bfqq->ref++; /*
3989 * Extra group reference, w.r.t. sync
3990 * queue. This extra reference is removed
3991 * only if bfqq->bfqg disappears, to
3992 * guarantee that this queue is not freed
3993 * until its group goes away.
3994 */
3995 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
aee69d78
PV
3996 bfqq, bfqq->ref);
3997 *async_bfqq = bfqq;
3998 }
3999
4000out:
4001 bfqq->ref++; /* get a process reference to this queue */
4002 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
4003 rcu_read_unlock();
4004 return bfqq;
4005}
4006
4007static void bfq_update_io_thinktime(struct bfq_data *bfqd,
4008 struct bfq_queue *bfqq)
4009{
4010 struct bfq_ttime *ttime = &bfqq->ttime;
4011 u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
4012
4013 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
4014
4015 ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
4016 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
4017 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
4018 ttime->ttime_samples);
4019}
4020
4021static void
4022bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4023 struct request *rq)
4024{
aee69d78 4025 bfqq->seek_history <<= 1;
ab0e43e9
PV
4026 bfqq->seek_history |=
4027 get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
aee69d78
PV
4028 (!blk_queue_nonrot(bfqd->queue) ||
4029 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
4030}
4031
d5be3fef
PV
4032static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
4033 struct bfq_queue *bfqq,
4034 struct bfq_io_cq *bic)
aee69d78 4035{
d5be3fef 4036 bool has_short_ttime = true;
aee69d78 4037
d5be3fef
PV
4038 /*
4039 * No need to update has_short_ttime if bfqq is async or in
4040 * idle io prio class, or if bfq_slice_idle is zero, because
4041 * no device idling is performed for bfqq in this case.
4042 */
4043 if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
4044 bfqd->bfq_slice_idle == 0)
aee69d78
PV
4045 return;
4046
36eca894
AA
4047 /* Idle window just restored, statistics are meaningless. */
4048 if (time_is_after_eq_jiffies(bfqq->split_time +
4049 bfqd->bfq_wr_min_idle_time))
4050 return;
4051
d5be3fef
PV
4052 /* Think time is infinite if no process is linked to
4053 * bfqq. Otherwise check average think time to
4054 * decide whether to mark as has_short_ttime
4055 */
aee69d78 4056 if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
d5be3fef
PV
4057 (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
4058 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
4059 has_short_ttime = false;
4060
4061 bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
4062 has_short_ttime);
aee69d78 4063
d5be3fef
PV
4064 if (has_short_ttime)
4065 bfq_mark_bfqq_has_short_ttime(bfqq);
aee69d78 4066 else
d5be3fef 4067 bfq_clear_bfqq_has_short_ttime(bfqq);
aee69d78
PV
4068}
4069
4070/*
4071 * Called when a new fs request (rq) is added to bfqq. Check if there's
4072 * something we should do about it.
4073 */
4074static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4075 struct request *rq)
4076{
4077 struct bfq_io_cq *bic = RQ_BIC(rq);
4078
4079 if (rq->cmd_flags & REQ_META)
4080 bfqq->meta_pending++;
4081
4082 bfq_update_io_thinktime(bfqd, bfqq);
d5be3fef 4083 bfq_update_has_short_ttime(bfqd, bfqq, bic);
aee69d78 4084 bfq_update_io_seektime(bfqd, bfqq, rq);
aee69d78
PV
4085
4086 bfq_log_bfqq(bfqd, bfqq,
d5be3fef
PV
4087 "rq_enqueued: has_short_ttime=%d (seeky %d)",
4088 bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
aee69d78
PV
4089
4090 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4091
4092 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
4093 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
4094 blk_rq_sectors(rq) < 32;
4095 bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
4096
4097 /*
4098 * There is just this request queued: if the request
4099 * is small and the queue is not to be expired, then
4100 * just exit.
4101 *
4102 * In this way, if the device is being idled to wait
4103 * for a new request from the in-service queue, we
4104 * avoid unplugging the device and committing the
4105 * device to serve just a small request. On the
4106 * contrary, we wait for the block layer to decide
4107 * when to unplug the device: hopefully, new requests
4108 * will be merged to this one quickly, then the device
4109 * will be unplugged and larger requests will be
4110 * dispatched.
4111 */
4112 if (small_req && !budget_timeout)
4113 return;
4114
4115 /*
4116 * A large enough request arrived, or the queue is to
4117 * be expired: in both cases disk idling is to be
4118 * stopped, so clear wait_request flag and reset
4119 * timer.
4120 */
4121 bfq_clear_bfqq_wait_request(bfqq);
4122 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
e21b7a0b 4123 bfqg_stats_update_idle_time(bfqq_group(bfqq));
aee69d78
PV
4124
4125 /*
4126 * The queue is not empty, because a new request just
4127 * arrived. Hence we can safely expire the queue, in
4128 * case of budget timeout, without risking that the
4129 * timestamps of the queue are not updated correctly.
4130 * See [1] for more details.
4131 */
4132 if (budget_timeout)
4133 bfq_bfqq_expire(bfqd, bfqq, false,
4134 BFQQE_BUDGET_TIMEOUT);
4135 }
4136}
4137
4138static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
4139{
36eca894
AA
4140 struct bfq_queue *bfqq = RQ_BFQQ(rq),
4141 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
4142
4143 if (new_bfqq) {
4144 if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
4145 new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
4146 /*
4147 * Release the request's reference to the old bfqq
4148 * and make sure one is taken to the shared queue.
4149 */
4150 new_bfqq->allocated++;
4151 bfqq->allocated--;
4152 new_bfqq->ref++;
e1b2324d 4153 bfq_clear_bfqq_just_created(bfqq);
36eca894
AA
4154 /*
4155 * If the bic associated with the process
4156 * issuing this request still points to bfqq
4157 * (and thus has not been already redirected
4158 * to new_bfqq or even some other bfq_queue),
4159 * then complete the merge and redirect it to
4160 * new_bfqq.
4161 */
4162 if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
4163 bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
4164 bfqq, new_bfqq);
4165 /*
4166 * rq is about to be enqueued into new_bfqq,
4167 * release rq reference on bfqq
4168 */
4169 bfq_put_queue(bfqq);
4170 rq->elv.priv[1] = new_bfqq;
4171 bfqq = new_bfqq;
4172 }
aee69d78
PV
4173
4174 bfq_add_request(rq);
4175
4176 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
4177 list_add_tail(&rq->queuelist, &bfqq->fifo);
4178
4179 bfq_rq_enqueued(bfqd, bfqq, rq);
4180}
4181
4182static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4183 bool at_head)
4184{
4185 struct request_queue *q = hctx->queue;
4186 struct bfq_data *bfqd = q->elevator->elevator_data;
4187
4188 spin_lock_irq(&bfqd->lock);
4189 if (blk_mq_sched_try_insert_merge(q, rq)) {
4190 spin_unlock_irq(&bfqd->lock);
4191 return;
4192 }
4193
4194 spin_unlock_irq(&bfqd->lock);
4195
4196 blk_mq_sched_request_inserted(rq);
4197
4198 spin_lock_irq(&bfqd->lock);
4199 if (at_head || blk_rq_is_passthrough(rq)) {
4200 if (at_head)
4201 list_add(&rq->queuelist, &bfqd->dispatch);
4202 else
4203 list_add_tail(&rq->queuelist, &bfqd->dispatch);
4204 } else {
4205 __bfq_insert_request(bfqd, rq);
4206
4207 if (rq_mergeable(rq)) {
4208 elv_rqhash_add(q, rq);
4209 if (!q->last_merge)
4210 q->last_merge = rq;
4211 }
4212 }
4213
6fa3e8d3 4214 spin_unlock_irq(&bfqd->lock);
aee69d78
PV
4215}
4216
4217static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
4218 struct list_head *list, bool at_head)
4219{
4220 while (!list_empty(list)) {
4221 struct request *rq;
4222
4223 rq = list_first_entry(list, struct request, queuelist);
4224 list_del_init(&rq->queuelist);
4225 bfq_insert_request(hctx, rq, at_head);
4226 }
4227}
4228
4229static void bfq_update_hw_tag(struct bfq_data *bfqd)
4230{
4231 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
4232 bfqd->rq_in_driver);
4233
4234 if (bfqd->hw_tag == 1)
4235 return;
4236
4237 /*
4238 * This sample is valid if the number of outstanding requests
4239 * is large enough to allow a queueing behavior. Note that the
4240 * sum is not exact, as it's not taking into account deactivated
4241 * requests.
4242 */
4243 if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
4244 return;
4245
4246 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
4247 return;
4248
4249 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
4250 bfqd->max_rq_in_driver = 0;
4251 bfqd->hw_tag_samples = 0;
4252}
4253
4254static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
4255{
ab0e43e9
PV
4256 u64 now_ns;
4257 u32 delta_us;
4258
aee69d78
PV
4259 bfq_update_hw_tag(bfqd);
4260
4261 bfqd->rq_in_driver--;
4262 bfqq->dispatched--;
4263
44e44a1b
PV
4264 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
4265 /*
4266 * Set budget_timeout (which we overload to store the
4267 * time at which the queue remains with no backlog and
4268 * no outstanding request; used by the weight-raising
4269 * mechanism).
4270 */
4271 bfqq->budget_timeout = jiffies;
1de0c4cd
AA
4272
4273 bfq_weights_tree_remove(bfqd, &bfqq->entity,
4274 &bfqd->queue_weights_tree);
44e44a1b
PV
4275 }
4276
ab0e43e9
PV
4277 now_ns = ktime_get_ns();
4278
4279 bfqq->ttime.last_end_request = now_ns;
4280
4281 /*
4282 * Using us instead of ns, to get a reasonable precision in
4283 * computing rate in next check.
4284 */
4285 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
4286
4287 /*
4288 * If the request took rather long to complete, and, according
4289 * to the maximum request size recorded, this completion latency
4290 * implies that the request was certainly served at a very low
4291 * rate (less than 1M sectors/sec), then the whole observation
4292 * interval that lasts up to this time instant cannot be a
4293 * valid time interval for computing a new peak rate. Invoke
4294 * bfq_update_rate_reset to have the following three steps
4295 * taken:
4296 * - close the observation interval at the last (previous)
4297 * request dispatch or completion
4298 * - compute rate, if possible, for that observation interval
4299 * - reset to zero samples, which will trigger a proper
4300 * re-initialization of the observation interval on next
4301 * dispatch
4302 */
4303 if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
4304 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
4305 1UL<<(BFQ_RATE_SHIFT - 10))
4306 bfq_update_rate_reset(bfqd, NULL);
4307 bfqd->last_completion = now_ns;
aee69d78 4308
77b7dcea
PV
4309 /*
4310 * If we are waiting to discover whether the request pattern
4311 * of the task associated with the queue is actually
4312 * isochronous, and both requisites for this condition to hold
4313 * are now satisfied, then compute soft_rt_next_start (see the
4314 * comments on the function bfq_bfqq_softrt_next_start()). We
4315 * schedule this delayed check when bfqq expires, if it still
4316 * has in-flight requests.
4317 */
4318 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
4319 RB_EMPTY_ROOT(&bfqq->sort_list))
4320 bfqq->soft_rt_next_start =
4321 bfq_bfqq_softrt_next_start(bfqd, bfqq);
4322
aee69d78
PV
4323 /*
4324 * If this is the in-service queue, check if it needs to be expired,
4325 * or if we want to idle in case it has no pending requests.
4326 */
4327 if (bfqd->in_service_queue == bfqq) {
44e44a1b 4328 if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
aee69d78
PV
4329 bfq_arm_slice_timer(bfqd);
4330 return;
4331 } else if (bfq_may_expire_for_budg_timeout(bfqq))
4332 bfq_bfqq_expire(bfqd, bfqq, false,
4333 BFQQE_BUDGET_TIMEOUT);
4334 else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
4335 (bfqq->dispatched == 0 ||
4336 !bfq_bfqq_may_idle(bfqq)))
4337 bfq_bfqq_expire(bfqd, bfqq, false,
4338 BFQQE_NO_MORE_REQUESTS);
4339 }
3f7cb4f4
HT
4340
4341 if (!bfqd->rq_in_driver)
4342 bfq_schedule_dispatch(bfqd);
aee69d78
PV
4343}
4344
4345static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
4346{
4347 bfqq->allocated--;
4348
4349 bfq_put_queue(bfqq);
4350}
4351
7b9e9361 4352static void bfq_finish_request(struct request *rq)
aee69d78 4353{
5bbf4e5a
CH
4354 struct bfq_queue *bfqq;
4355 struct bfq_data *bfqd;
4356
4357 if (!rq->elv.icq)
4358 return;
4359
4360 bfqq = RQ_BFQQ(rq);
4361 bfqd = bfqq->bfqd;
aee69d78 4362
e21b7a0b
AA
4363 if (rq->rq_flags & RQF_STARTED)
4364 bfqg_stats_update_completion(bfqq_group(bfqq),
4365 rq_start_time_ns(rq),
4366 rq_io_start_time_ns(rq),
4367 rq->cmd_flags);
aee69d78
PV
4368
4369 if (likely(rq->rq_flags & RQF_STARTED)) {
4370 unsigned long flags;
4371
4372 spin_lock_irqsave(&bfqd->lock, flags);
4373
4374 bfq_completed_request(bfqq, bfqd);
4375 bfq_put_rq_priv_body(bfqq);
4376
6fa3e8d3 4377 spin_unlock_irqrestore(&bfqd->lock, flags);
aee69d78
PV
4378 } else {
4379 /*
4380 * Request rq may be still/already in the scheduler,
4381 * in which case we need to remove it. And we cannot
4382 * defer such a check and removal, to avoid
4383 * inconsistencies in the time interval from the end
4384 * of this function to the start of the deferred work.
4385 * This situation seems to occur only in process
4386 * context, as a consequence of a merge. In the
4387 * current version of the code, this implies that the
4388 * lock is held.
4389 */
4390
4391 if (!RB_EMPTY_NODE(&rq->rb_node))
7b9e9361 4392 bfq_remove_request(rq->q, rq);
aee69d78
PV
4393 bfq_put_rq_priv_body(bfqq);
4394 }
4395
4396 rq->elv.priv[0] = NULL;
4397 rq->elv.priv[1] = NULL;
4398}
4399
36eca894
AA
4400/*
4401 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
4402 * was the last process referring to that bfqq.
4403 */
4404static struct bfq_queue *
4405bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
4406{
4407 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
4408
4409 if (bfqq_process_refs(bfqq) == 1) {
4410 bfqq->pid = current->pid;
4411 bfq_clear_bfqq_coop(bfqq);
4412 bfq_clear_bfqq_split_coop(bfqq);
4413 return bfqq;
4414 }
4415
4416 bic_set_bfqq(bic, NULL, 1);
4417
4418 bfq_put_cooperator(bfqq);
4419
4420 bfq_put_queue(bfqq);
4421 return NULL;
4422}
4423
4424static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
4425 struct bfq_io_cq *bic,
4426 struct bio *bio,
4427 bool split, bool is_sync,
4428 bool *new_queue)
4429{
4430 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
4431
4432 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
4433 return bfqq;
4434
4435 if (new_queue)
4436 *new_queue = true;
4437
4438 if (bfqq)
4439 bfq_put_queue(bfqq);
4440 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
4441
4442 bic_set_bfqq(bic, bfqq, is_sync);
e1b2324d
AA
4443 if (split && is_sync) {
4444 if ((bic->was_in_burst_list && bfqd->large_burst) ||
4445 bic->saved_in_large_burst)
4446 bfq_mark_bfqq_in_large_burst(bfqq);
4447 else {
4448 bfq_clear_bfqq_in_large_burst(bfqq);
4449 if (bic->was_in_burst_list)
4450 hlist_add_head(&bfqq->burst_list_node,
4451 &bfqd->burst_list);
4452 }
36eca894 4453 bfqq->split_time = jiffies;
e1b2324d 4454 }
36eca894
AA
4455
4456 return bfqq;
4457}
4458
aee69d78
PV
4459/*
4460 * Allocate bfq data structures associated with this request.
4461 */
5bbf4e5a 4462static void bfq_prepare_request(struct request *rq, struct bio *bio)
aee69d78 4463{
5bbf4e5a 4464 struct request_queue *q = rq->q;
aee69d78 4465 struct bfq_data *bfqd = q->elevator->elevator_data;
9f210738 4466 struct bfq_io_cq *bic;
aee69d78
PV
4467 const int is_sync = rq_is_sync(rq);
4468 struct bfq_queue *bfqq;
36eca894 4469 bool new_queue = false;
13c931bd 4470 bool bfqq_already_existing = false, split = false;
aee69d78 4471
9f210738 4472 if (!rq->elv.icq)
5bbf4e5a 4473 return;
9f210738 4474 bic = icq_to_bic(rq->elv.icq);
aee69d78 4475
9f210738 4476 spin_lock_irq(&bfqd->lock);
aee69d78 4477
8c9ff1ad
CIK
4478 bfq_check_ioprio_change(bic, bio);
4479
e21b7a0b
AA
4480 bfq_bic_update_cgroup(bic, bio);
4481
36eca894
AA
4482 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
4483 &new_queue);
4484
4485 if (likely(!new_queue)) {
4486 /* If the queue was seeky for too long, break it apart. */
4487 if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
4488 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
e1b2324d
AA
4489
4490 /* Update bic before losing reference to bfqq */
4491 if (bfq_bfqq_in_large_burst(bfqq))
4492 bic->saved_in_large_burst = true;
4493
36eca894 4494 bfqq = bfq_split_bfqq(bic, bfqq);
6fa3e8d3 4495 split = true;
36eca894
AA
4496
4497 if (!bfqq)
4498 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
4499 true, is_sync,
4500 NULL);
13c931bd
PV
4501 else
4502 bfqq_already_existing = true;
36eca894 4503 }
aee69d78
PV
4504 }
4505
4506 bfqq->allocated++;
4507 bfqq->ref++;
4508 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
4509 rq, bfqq, bfqq->ref);
4510
4511 rq->elv.priv[0] = bic;
4512 rq->elv.priv[1] = bfqq;
4513
36eca894
AA
4514 /*
4515 * If a bfq_queue has only one process reference, it is owned
4516 * by only this bic: we can then set bfqq->bic = bic. in
4517 * addition, if the queue has also just been split, we have to
4518 * resume its state.
4519 */
4520 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
4521 bfqq->bic = bic;
6fa3e8d3 4522 if (split) {
36eca894
AA
4523 /*
4524 * The queue has just been split from a shared
4525 * queue: restore the idle window and the
4526 * possible weight raising period.
4527 */
13c931bd
PV
4528 bfq_bfqq_resume_state(bfqq, bfqd, bic,
4529 bfqq_already_existing);
36eca894
AA
4530 }
4531 }
4532
e1b2324d
AA
4533 if (unlikely(bfq_bfqq_just_created(bfqq)))
4534 bfq_handle_burst(bfqd, bfqq);
4535
6fa3e8d3 4536 spin_unlock_irq(&bfqd->lock);
aee69d78
PV
4537}
4538
4539static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
4540{
4541 struct bfq_data *bfqd = bfqq->bfqd;
4542 enum bfqq_expiration reason;
4543 unsigned long flags;
4544
4545 spin_lock_irqsave(&bfqd->lock, flags);
4546 bfq_clear_bfqq_wait_request(bfqq);
4547
4548 if (bfqq != bfqd->in_service_queue) {
4549 spin_unlock_irqrestore(&bfqd->lock, flags);
4550 return;
4551 }
4552
4553 if (bfq_bfqq_budget_timeout(bfqq))
4554 /*
4555 * Also here the queue can be safely expired
4556 * for budget timeout without wasting
4557 * guarantees
4558 */
4559 reason = BFQQE_BUDGET_TIMEOUT;
4560 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
4561 /*
4562 * The queue may not be empty upon timer expiration,
4563 * because we may not disable the timer when the
4564 * first request of the in-service queue arrives
4565 * during disk idling.
4566 */
4567 reason = BFQQE_TOO_IDLE;
4568 else
4569 goto schedule_dispatch;
4570
4571 bfq_bfqq_expire(bfqd, bfqq, true, reason);
4572
4573schedule_dispatch:
6fa3e8d3 4574 spin_unlock_irqrestore(&bfqd->lock, flags);
aee69d78
PV
4575 bfq_schedule_dispatch(bfqd);
4576}
4577
4578/*
4579 * Handler of the expiration of the timer running if the in-service queue
4580 * is idling inside its time slice.
4581 */
4582static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
4583{
4584 struct bfq_data *bfqd = container_of(timer, struct bfq_data,
4585 idle_slice_timer);
4586 struct bfq_queue *bfqq = bfqd->in_service_queue;
4587
4588 /*
4589 * Theoretical race here: the in-service queue can be NULL or
4590 * different from the queue that was idling if a new request
4591 * arrives for the current queue and there is a full dispatch
4592 * cycle that changes the in-service queue. This can hardly
4593 * happen, but in the worst case we just expire a queue too
4594 * early.
4595 */
4596 if (bfqq)
4597 bfq_idle_slice_timer_body(bfqq);
4598
4599 return HRTIMER_NORESTART;
4600}
4601
4602static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
4603 struct bfq_queue **bfqq_ptr)
4604{
4605 struct bfq_queue *bfqq = *bfqq_ptr;
4606
4607 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
4608 if (bfqq) {
e21b7a0b
AA
4609 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
4610
aee69d78
PV
4611 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
4612 bfqq, bfqq->ref);
4613 bfq_put_queue(bfqq);
4614 *bfqq_ptr = NULL;
4615 }
4616}
4617
4618/*
e21b7a0b
AA
4619 * Release all the bfqg references to its async queues. If we are
4620 * deallocating the group these queues may still contain requests, so
4621 * we reparent them to the root cgroup (i.e., the only one that will
4622 * exist for sure until all the requests on a device are gone).
aee69d78 4623 */
ea25da48 4624void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
aee69d78
PV
4625{
4626 int i, j;
4627
4628 for (i = 0; i < 2; i++)
4629 for (j = 0; j < IOPRIO_BE_NR; j++)
e21b7a0b 4630 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
aee69d78 4631
e21b7a0b 4632 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
aee69d78
PV
4633}
4634
4635static void bfq_exit_queue(struct elevator_queue *e)
4636{
4637 struct bfq_data *bfqd = e->elevator_data;
4638 struct bfq_queue *bfqq, *n;
4639
4640 hrtimer_cancel(&bfqd->idle_slice_timer);
4641
4642 spin_lock_irq(&bfqd->lock);
4643 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
e21b7a0b 4644 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
aee69d78
PV
4645 spin_unlock_irq(&bfqd->lock);
4646
4647 hrtimer_cancel(&bfqd->idle_slice_timer);
4648
e21b7a0b
AA
4649#ifdef CONFIG_BFQ_GROUP_IOSCHED
4650 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
4651#else
4652 spin_lock_irq(&bfqd->lock);
4653 bfq_put_async_queues(bfqd, bfqd->root_group);
4654 kfree(bfqd->root_group);
4655 spin_unlock_irq(&bfqd->lock);
4656#endif
4657
aee69d78
PV
4658 kfree(bfqd);
4659}
4660
e21b7a0b
AA
4661static void bfq_init_root_group(struct bfq_group *root_group,
4662 struct bfq_data *bfqd)
4663{
4664 int i;
4665
4666#ifdef CONFIG_BFQ_GROUP_IOSCHED
4667 root_group->entity.parent = NULL;
4668 root_group->my_entity = NULL;
4669 root_group->bfqd = bfqd;
4670#endif
36eca894 4671 root_group->rq_pos_tree = RB_ROOT;
e21b7a0b
AA
4672 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
4673 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
4674 root_group->sched_data.bfq_class_idle_last_service = jiffies;
4675}
4676
aee69d78
PV
4677static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
4678{
4679 struct bfq_data *bfqd;
4680 struct elevator_queue *eq;
aee69d78
PV
4681
4682 eq = elevator_alloc(q, e);
4683 if (!eq)
4684 return -ENOMEM;
4685
4686 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
4687 if (!bfqd) {
4688 kobject_put(&eq->kobj);
4689 return -ENOMEM;
4690 }
4691 eq->elevator_data = bfqd;
4692
e21b7a0b
AA
4693 spin_lock_irq(q->queue_lock);
4694 q->elevator = eq;
4695 spin_unlock_irq(q->queue_lock);
4696
aee69d78
PV
4697 /*
4698 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
4699 * Grab a permanent reference to it, so that the normal code flow
4700 * will not attempt to free it.
4701 */
4702 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
4703 bfqd->oom_bfqq.ref++;
4704 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
4705 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
4706 bfqd->oom_bfqq.entity.new_weight =
4707 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
e1b2324d
AA
4708
4709 /* oom_bfqq does not participate to bursts */
4710 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
4711
aee69d78
PV
4712 /*
4713 * Trigger weight initialization, according to ioprio, at the
4714 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
4715 * class won't be changed any more.
4716 */
4717 bfqd->oom_bfqq.entity.prio_changed = 1;
4718
4719 bfqd->queue = q;
4720
e21b7a0b 4721 INIT_LIST_HEAD(&bfqd->dispatch);
aee69d78
PV
4722
4723 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
4724 HRTIMER_MODE_REL);
4725 bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
4726
1de0c4cd
AA
4727 bfqd->queue_weights_tree = RB_ROOT;
4728 bfqd->group_weights_tree = RB_ROOT;
4729
aee69d78
PV
4730 INIT_LIST_HEAD(&bfqd->active_list);
4731 INIT_LIST_HEAD(&bfqd->idle_list);
e1b2324d 4732 INIT_HLIST_HEAD(&bfqd->burst_list);
aee69d78
PV
4733
4734 bfqd->hw_tag = -1;
4735
4736 bfqd->bfq_max_budget = bfq_default_max_budget;
4737
4738 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
4739 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
4740 bfqd->bfq_back_max = bfq_back_max;
4741 bfqd->bfq_back_penalty = bfq_back_penalty;
4742 bfqd->bfq_slice_idle = bfq_slice_idle;
aee69d78
PV
4743 bfqd->bfq_timeout = bfq_timeout;
4744
4745 bfqd->bfq_requests_within_timer = 120;
4746
e1b2324d
AA
4747 bfqd->bfq_large_burst_thresh = 8;
4748 bfqd->bfq_burst_interval = msecs_to_jiffies(180);
4749
44e44a1b
PV
4750 bfqd->low_latency = true;
4751
4752 /*
4753 * Trade-off between responsiveness and fairness.
4754 */
4755 bfqd->bfq_wr_coeff = 30;
77b7dcea 4756 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
44e44a1b
PV
4757 bfqd->bfq_wr_max_time = 0;
4758 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
4759 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
77b7dcea
PV
4760 bfqd->bfq_wr_max_softrt_rate = 7000; /*
4761 * Approximate rate required
4762 * to playback or record a
4763 * high-definition compressed
4764 * video.
4765 */
cfd69712 4766 bfqd->wr_busy_queues = 0;
44e44a1b
PV
4767
4768 /*
4769 * Begin by assuming, optimistically, that the device is a
4770 * high-speed one, and that its peak rate is equal to 2/3 of
4771 * the highest reference rate.
4772 */
4773 bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
4774 T_fast[blk_queue_nonrot(bfqd->queue)];
4775 bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
4776 bfqd->device_speed = BFQ_BFQD_FAST;
4777
aee69d78 4778 spin_lock_init(&bfqd->lock);
aee69d78 4779
e21b7a0b
AA
4780 /*
4781 * The invocation of the next bfq_create_group_hierarchy
4782 * function is the head of a chain of function calls
4783 * (bfq_create_group_hierarchy->blkcg_activate_policy->
4784 * blk_mq_freeze_queue) that may lead to the invocation of the
4785 * has_work hook function. For this reason,
4786 * bfq_create_group_hierarchy is invoked only after all
4787 * scheduler data has been initialized, apart from the fields
4788 * that can be initialized only after invoking
4789 * bfq_create_group_hierarchy. This, in particular, enables
4790 * has_work to correctly return false. Of course, to avoid
4791 * other inconsistencies, the blk-mq stack must then refrain
4792 * from invoking further scheduler hooks before this init
4793 * function is finished.
4794 */
4795 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
4796 if (!bfqd->root_group)
4797 goto out_free;
4798 bfq_init_root_group(bfqd->root_group, bfqd);
4799 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
4800
aee69d78
PV
4801
4802 return 0;
e21b7a0b
AA
4803
4804out_free:
4805 kfree(bfqd);
4806 kobject_put(&eq->kobj);
4807 return -ENOMEM;
aee69d78
PV
4808}
4809
4810static void bfq_slab_kill(void)
4811{
4812 kmem_cache_destroy(bfq_pool);
4813}
4814
4815static int __init bfq_slab_setup(void)
4816{
4817 bfq_pool = KMEM_CACHE(bfq_queue, 0);
4818 if (!bfq_pool)
4819 return -ENOMEM;
4820 return 0;
4821}
4822
4823static ssize_t bfq_var_show(unsigned int var, char *page)
4824{
4825 return sprintf(page, "%u\n", var);
4826}
4827
2f79136b 4828static int bfq_var_store(unsigned long *var, const char *page)
aee69d78
PV
4829{
4830 unsigned long new_val;
4831 int ret = kstrtoul(page, 10, &new_val);
4832
2f79136b
BVA
4833 if (ret)
4834 return ret;
4835 *var = new_val;
4836 return 0;
aee69d78
PV
4837}
4838
4839#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
4840static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4841{ \
4842 struct bfq_data *bfqd = e->elevator_data; \
4843 u64 __data = __VAR; \
4844 if (__CONV == 1) \
4845 __data = jiffies_to_msecs(__data); \
4846 else if (__CONV == 2) \
4847 __data = div_u64(__data, NSEC_PER_MSEC); \
4848 return bfq_var_show(__data, (page)); \
4849}
4850SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
4851SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
4852SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
4853SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
4854SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
4855SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
4856SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
4857SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
44e44a1b 4858SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
aee69d78
PV
4859#undef SHOW_FUNCTION
4860
4861#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
4862static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4863{ \
4864 struct bfq_data *bfqd = e->elevator_data; \
4865 u64 __data = __VAR; \
4866 __data = div_u64(__data, NSEC_PER_USEC); \
4867 return bfq_var_show(__data, (page)); \
4868}
4869USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
4870#undef USEC_SHOW_FUNCTION
4871
4872#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4873static ssize_t \
4874__FUNC(struct elevator_queue *e, const char *page, size_t count) \
4875{ \
4876 struct bfq_data *bfqd = e->elevator_data; \
1530486c 4877 unsigned long __data, __min = (MIN), __max = (MAX); \
2f79136b
BVA
4878 int ret; \
4879 \
4880 ret = bfq_var_store(&__data, (page)); \
4881 if (ret) \
4882 return ret; \
1530486c
BVA
4883 if (__data < __min) \
4884 __data = __min; \
4885 else if (__data > __max) \
4886 __data = __max; \
aee69d78
PV
4887 if (__CONV == 1) \
4888 *(__PTR) = msecs_to_jiffies(__data); \
4889 else if (__CONV == 2) \
4890 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
4891 else \
4892 *(__PTR) = __data; \
235f8da1 4893 return count; \
aee69d78
PV
4894}
4895STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
4896 INT_MAX, 2);
4897STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
4898 INT_MAX, 2);
4899STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
4900STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
4901 INT_MAX, 0);
4902STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
4903#undef STORE_FUNCTION
4904
4905#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
4906static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
4907{ \
4908 struct bfq_data *bfqd = e->elevator_data; \
1530486c 4909 unsigned long __data, __min = (MIN), __max = (MAX); \
2f79136b
BVA
4910 int ret; \
4911 \
4912 ret = bfq_var_store(&__data, (page)); \
4913 if (ret) \
4914 return ret; \
1530486c
BVA
4915 if (__data < __min) \
4916 __data = __min; \
4917 else if (__data > __max) \
4918 __data = __max; \
aee69d78 4919 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
235f8da1 4920 return count; \
aee69d78
PV
4921}
4922USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
4923 UINT_MAX);
4924#undef USEC_STORE_FUNCTION
4925
aee69d78
PV
4926static ssize_t bfq_max_budget_store(struct elevator_queue *e,
4927 const char *page, size_t count)
4928{
4929 struct bfq_data *bfqd = e->elevator_data;
2f79136b
BVA
4930 unsigned long __data;
4931 int ret;
235f8da1 4932
2f79136b
BVA
4933 ret = bfq_var_store(&__data, (page));
4934 if (ret)
4935 return ret;
aee69d78
PV
4936
4937 if (__data == 0)
ab0e43e9 4938 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
aee69d78
PV
4939 else {
4940 if (__data > INT_MAX)
4941 __data = INT_MAX;
4942 bfqd->bfq_max_budget = __data;
4943 }
4944
4945 bfqd->bfq_user_max_budget = __data;
4946
235f8da1 4947 return count;
aee69d78
PV
4948}
4949
4950/*
4951 * Leaving this name to preserve name compatibility with cfq
4952 * parameters, but this timeout is used for both sync and async.
4953 */
4954static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
4955 const char *page, size_t count)
4956{
4957 struct bfq_data *bfqd = e->elevator_data;
2f79136b
BVA
4958 unsigned long __data;
4959 int ret;
235f8da1 4960
2f79136b
BVA
4961 ret = bfq_var_store(&__data, (page));
4962 if (ret)
4963 return ret;
aee69d78
PV
4964
4965 if (__data < 1)
4966 __data = 1;
4967 else if (__data > INT_MAX)
4968 __data = INT_MAX;
4969
4970 bfqd->bfq_timeout = msecs_to_jiffies(__data);
4971 if (bfqd->bfq_user_max_budget == 0)
ab0e43e9 4972 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
aee69d78 4973
235f8da1 4974 return count;
aee69d78
PV
4975}
4976
4977static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
4978 const char *page, size_t count)
4979{
4980 struct bfq_data *bfqd = e->elevator_data;
2f79136b
BVA
4981 unsigned long __data;
4982 int ret;
235f8da1 4983
2f79136b
BVA
4984 ret = bfq_var_store(&__data, (page));
4985 if (ret)
4986 return ret;
aee69d78
PV
4987
4988 if (__data > 1)
4989 __data = 1;
4990 if (!bfqd->strict_guarantees && __data == 1
4991 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
4992 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
4993
4994 bfqd->strict_guarantees = __data;
4995
235f8da1 4996 return count;
aee69d78
PV
4997}
4998
44e44a1b
PV
4999static ssize_t bfq_low_latency_store(struct elevator_queue *e,
5000 const char *page, size_t count)
5001{
5002 struct bfq_data *bfqd = e->elevator_data;
2f79136b
BVA
5003 unsigned long __data;
5004 int ret;
235f8da1 5005
2f79136b
BVA
5006 ret = bfq_var_store(&__data, (page));
5007 if (ret)
5008 return ret;
44e44a1b
PV
5009
5010 if (__data > 1)
5011 __data = 1;
5012 if (__data == 0 && bfqd->low_latency != 0)
5013 bfq_end_wr(bfqd);
5014 bfqd->low_latency = __data;
5015
235f8da1 5016 return count;
44e44a1b
PV
5017}
5018
aee69d78
PV
5019#define BFQ_ATTR(name) \
5020 __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
5021
5022static struct elv_fs_entry bfq_attrs[] = {
5023 BFQ_ATTR(fifo_expire_sync),
5024 BFQ_ATTR(fifo_expire_async),
5025 BFQ_ATTR(back_seek_max),
5026 BFQ_ATTR(back_seek_penalty),
5027 BFQ_ATTR(slice_idle),
5028 BFQ_ATTR(slice_idle_us),
5029 BFQ_ATTR(max_budget),
5030 BFQ_ATTR(timeout_sync),
5031 BFQ_ATTR(strict_guarantees),
44e44a1b 5032 BFQ_ATTR(low_latency),
aee69d78
PV
5033 __ATTR_NULL
5034};
5035
5036static struct elevator_type iosched_bfq_mq = {
5037 .ops.mq = {
5bbf4e5a 5038 .prepare_request = bfq_prepare_request,
7b9e9361 5039 .finish_request = bfq_finish_request,
aee69d78
PV
5040 .exit_icq = bfq_exit_icq,
5041 .insert_requests = bfq_insert_requests,
5042 .dispatch_request = bfq_dispatch_request,
5043 .next_request = elv_rb_latter_request,
5044 .former_request = elv_rb_former_request,
5045 .allow_merge = bfq_allow_bio_merge,
5046 .bio_merge = bfq_bio_merge,
5047 .request_merge = bfq_request_merge,
5048 .requests_merged = bfq_requests_merged,
5049 .request_merged = bfq_request_merged,
5050 .has_work = bfq_has_work,
5051 .init_sched = bfq_init_queue,
5052 .exit_sched = bfq_exit_queue,
5053 },
5054
5055 .uses_mq = true,
5056 .icq_size = sizeof(struct bfq_io_cq),
5057 .icq_align = __alignof__(struct bfq_io_cq),
5058 .elevator_attrs = bfq_attrs,
5059 .elevator_name = "bfq",
5060 .elevator_owner = THIS_MODULE,
5061};
26b4cf24 5062MODULE_ALIAS("bfq-iosched");
aee69d78
PV
5063
5064static int __init bfq_init(void)
5065{
5066 int ret;
5067
e21b7a0b
AA
5068#ifdef CONFIG_BFQ_GROUP_IOSCHED
5069 ret = blkcg_policy_register(&blkcg_policy_bfq);
5070 if (ret)
5071 return ret;
5072#endif
5073
aee69d78
PV
5074 ret = -ENOMEM;
5075 if (bfq_slab_setup())
5076 goto err_pol_unreg;
5077
44e44a1b
PV
5078 /*
5079 * Times to load large popular applications for the typical
5080 * systems installed on the reference devices (see the
5081 * comments before the definitions of the next two
5082 * arrays). Actually, we use slightly slower values, as the
5083 * estimated peak rate tends to be smaller than the actual
5084 * peak rate. The reason for this last fact is that estimates
5085 * are computed over much shorter time intervals than the long
5086 * intervals typically used for benchmarking. Why? First, to
5087 * adapt more quickly to variations. Second, because an I/O
5088 * scheduler cannot rely on a peak-rate-evaluation workload to
5089 * be run for a long time.
5090 */
5091 T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
5092 T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
5093 T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
5094 T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
5095
5096 /*
5097 * Thresholds that determine the switch between speed classes
5098 * (see the comments before the definition of the array
5099 * device_speed_thresh). These thresholds are biased towards
5100 * transitions to the fast class. This is safer than the
5101 * opposite bias. In fact, a wrong transition to the slow
5102 * class results in short weight-raising periods, because the
5103 * speed of the device then tends to be higher that the
5104 * reference peak rate. On the opposite end, a wrong
5105 * transition to the fast class tends to increase
5106 * weight-raising periods, because of the opposite reason.
5107 */
5108 device_speed_thresh[0] = (4 * R_slow[0]) / 3;
5109 device_speed_thresh[1] = (4 * R_slow[1]) / 3;
5110
aee69d78
PV
5111 ret = elv_register(&iosched_bfq_mq);
5112 if (ret)
37dcd657 5113 goto slab_kill;
aee69d78
PV
5114
5115 return 0;
5116
37dcd657 5117slab_kill:
5118 bfq_slab_kill();
aee69d78 5119err_pol_unreg:
e21b7a0b
AA
5120#ifdef CONFIG_BFQ_GROUP_IOSCHED
5121 blkcg_policy_unregister(&blkcg_policy_bfq);
5122#endif
aee69d78
PV
5123 return ret;
5124}
5125
5126static void __exit bfq_exit(void)
5127{
5128 elv_unregister(&iosched_bfq_mq);
e21b7a0b
AA
5129#ifdef CONFIG_BFQ_GROUP_IOSCHED
5130 blkcg_policy_unregister(&blkcg_policy_bfq);
5131#endif
aee69d78
PV
5132 bfq_slab_kill();
5133}
5134
5135module_init(bfq_init);
5136module_exit(bfq_exit);
5137
5138MODULE_AUTHOR("Paolo Valente");
5139MODULE_LICENSE("GPL");
5140MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");