]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/bfq-iosched.c
block,bfq: refactor device-idling logic
[mirror_ubuntu-bionic-kernel.git] / block / bfq-iosched.c
CommitLineData
aee69d78
PV
1/*
2 * Budget Fair Queueing (BFQ) I/O scheduler.
3 *
4 * Based on ideas and code from CFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
11 * Arianna Avanzini <avanzini@google.com>
12 *
13 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * BFQ is a proportional-share I/O scheduler, with some extra
26 * low-latency capabilities. BFQ also supports full hierarchical
27 * scheduling through cgroups. Next paragraphs provide an introduction
28 * on BFQ inner workings. Details on BFQ benefits, usage and
29 * limitations can be found in Documentation/block/bfq-iosched.txt.
30 *
31 * BFQ is a proportional-share storage-I/O scheduling algorithm based
32 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
33 * budgets, measured in number of sectors, to processes instead of
34 * time slices. The device is not granted to the in-service process
35 * for a given time slice, but until it has exhausted its assigned
36 * budget. This change from the time to the service domain enables BFQ
37 * to distribute the device throughput among processes as desired,
38 * without any distortion due to throughput fluctuations, or to device
39 * internal queueing. BFQ uses an ad hoc internal scheduler, called
40 * B-WF2Q+, to schedule processes according to their budgets. More
41 * precisely, BFQ schedules queues associated with processes. Each
42 * process/queue is assigned a user-configurable weight, and B-WF2Q+
43 * guarantees that each queue receives a fraction of the throughput
44 * proportional to its weight. Thanks to the accurate policy of
45 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
46 * processes issuing sequential requests (to boost the throughput),
47 * and yet guarantee a low latency to interactive and soft real-time
48 * applications.
49 *
50 * In particular, to provide these low-latency guarantees, BFQ
51 * explicitly privileges the I/O of two classes of time-sensitive
52 * applications: interactive and soft real-time. This feature enables
53 * BFQ to provide applications in these classes with a very low
54 * latency. Finally, BFQ also features additional heuristics for
55 * preserving both a low latency and a high throughput on NCQ-capable,
56 * rotational or flash-based devices, and to get the job done quickly
57 * for applications consisting in many I/O-bound processes.
58 *
43c1b3d6
PV
59 * NOTE: if the main or only goal, with a given device, is to achieve
60 * the maximum-possible throughput at all times, then do switch off
61 * all low-latency heuristics for that device, by setting low_latency
62 * to 0.
63 *
aee69d78
PV
64 * BFQ is described in [1], where also a reference to the initial, more
65 * theoretical paper on BFQ can be found. The interested reader can find
66 * in the latter paper full details on the main algorithm, as well as
67 * formulas of the guarantees and formal proofs of all the properties.
68 * With respect to the version of BFQ presented in these papers, this
69 * implementation adds a few more heuristics, such as the one that
70 * guarantees a low latency to soft real-time applications, and a
71 * hierarchical extension based on H-WF2Q+.
72 *
73 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
74 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
75 * with O(log N) complexity derives from the one introduced with EEVDF
76 * in [3].
77 *
78 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
79 * Scheduler", Proceedings of the First Workshop on Mobile System
80 * Technologies (MST-2015), May 2015.
81 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
82 *
83 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
84 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
85 * Oct 1997.
86 *
87 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
88 *
89 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
90 * First: A Flexible and Accurate Mechanism for Proportional Share
91 * Resource Allocation", technical report.
92 *
93 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
94 */
95#include <linux/module.h>
96#include <linux/slab.h>
97#include <linux/blkdev.h>
e21b7a0b 98#include <linux/cgroup.h>
aee69d78
PV
99#include <linux/elevator.h>
100#include <linux/ktime.h>
101#include <linux/rbtree.h>
102#include <linux/ioprio.h>
103#include <linux/sbitmap.h>
104#include <linux/delay.h>
105
106#include "blk.h"
107#include "blk-mq.h"
108#include "blk-mq-tag.h"
109#include "blk-mq-sched.h"
ea25da48 110#include "bfq-iosched.h"
aee69d78 111
ea25da48
PV
112#define BFQ_BFQQ_FNS(name) \
113void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
114{ \
115 __set_bit(BFQQF_##name, &(bfqq)->flags); \
116} \
117void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
118{ \
119 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
120} \
121int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
122{ \
123 return test_bit(BFQQF_##name, &(bfqq)->flags); \
44e44a1b
PV
124}
125
ea25da48
PV
126BFQ_BFQQ_FNS(just_created);
127BFQ_BFQQ_FNS(busy);
128BFQ_BFQQ_FNS(wait_request);
129BFQ_BFQQ_FNS(non_blocking_wait_rq);
130BFQ_BFQQ_FNS(fifo_expire);
d5be3fef 131BFQ_BFQQ_FNS(has_short_ttime);
ea25da48
PV
132BFQ_BFQQ_FNS(sync);
133BFQ_BFQQ_FNS(IO_bound);
134BFQ_BFQQ_FNS(in_large_burst);
135BFQ_BFQQ_FNS(coop);
136BFQ_BFQQ_FNS(split_coop);
137BFQ_BFQQ_FNS(softrt_update);
138#undef BFQ_BFQQ_FNS \
aee69d78 139
ea25da48
PV
140/* Expiration time of sync (0) and async (1) requests, in ns. */
141static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
aee69d78 142
ea25da48
PV
143/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
144static const int bfq_back_max = 16 * 1024;
aee69d78 145
ea25da48
PV
146/* Penalty of a backwards seek, in number of sectors. */
147static const int bfq_back_penalty = 2;
e21b7a0b 148
ea25da48
PV
149/* Idling period duration, in ns. */
150static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
aee69d78 151
ea25da48
PV
152/* Minimum number of assigned budgets for which stats are safe to compute. */
153static const int bfq_stats_min_budgets = 194;
aee69d78 154
ea25da48
PV
155/* Default maximum budget values, in sectors and number of requests. */
156static const int bfq_default_max_budget = 16 * 1024;
e21b7a0b 157
ea25da48
PV
158/*
159 * Async to sync throughput distribution is controlled as follows:
160 * when an async request is served, the entity is charged the number
161 * of sectors of the request, multiplied by the factor below
162 */
163static const int bfq_async_charge_factor = 10;
aee69d78 164
ea25da48
PV
165/* Default timeout values, in jiffies, approximating CFQ defaults. */
166const int bfq_timeout = HZ / 8;
aee69d78 167
ea25da48 168static struct kmem_cache *bfq_pool;
e21b7a0b 169
ea25da48
PV
170/* Below this threshold (in ns), we consider thinktime immediate. */
171#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
e21b7a0b 172
ea25da48
PV
173/* hw_tag detection: parallel requests threshold and min samples needed. */
174#define BFQ_HW_QUEUE_THRESHOLD 4
175#define BFQ_HW_QUEUE_SAMPLES 32
aee69d78 176
ea25da48
PV
177#define BFQQ_SEEK_THR (sector_t)(8 * 100)
178#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
179#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
180#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
aee69d78 181
ea25da48
PV
182/* Min number of samples required to perform peak-rate update */
183#define BFQ_RATE_MIN_SAMPLES 32
184/* Min observation time interval required to perform a peak-rate update (ns) */
185#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
186/* Target observation time interval for a peak-rate update (ns) */
187#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
aee69d78 188
ea25da48
PV
189/* Shift used for peak rate fixed precision calculations. */
190#define BFQ_RATE_SHIFT 16
aee69d78 191
ea25da48
PV
192/*
193 * By default, BFQ computes the duration of the weight raising for
194 * interactive applications automatically, using the following formula:
195 * duration = (R / r) * T, where r is the peak rate of the device, and
196 * R and T are two reference parameters.
197 * In particular, R is the peak rate of the reference device (see below),
198 * and T is a reference time: given the systems that are likely to be
199 * installed on the reference device according to its speed class, T is
200 * about the maximum time needed, under BFQ and while reading two files in
201 * parallel, to load typical large applications on these systems.
202 * In practice, the slower/faster the device at hand is, the more/less it
203 * takes to load applications with respect to the reference device.
204 * Accordingly, the longer/shorter BFQ grants weight raising to interactive
205 * applications.
206 *
207 * BFQ uses four different reference pairs (R, T), depending on:
208 * . whether the device is rotational or non-rotational;
209 * . whether the device is slow, such as old or portable HDDs, as well as
210 * SD cards, or fast, such as newer HDDs and SSDs.
211 *
212 * The device's speed class is dynamically (re)detected in
213 * bfq_update_peak_rate() every time the estimated peak rate is updated.
214 *
215 * In the following definitions, R_slow[0]/R_fast[0] and
216 * T_slow[0]/T_fast[0] are the reference values for a slow/fast
217 * rotational device, whereas R_slow[1]/R_fast[1] and
218 * T_slow[1]/T_fast[1] are the reference values for a slow/fast
219 * non-rotational device. Finally, device_speed_thresh are the
220 * thresholds used to switch between speed classes. The reference
221 * rates are not the actual peak rates of the devices used as a
222 * reference, but slightly lower values. The reason for using these
223 * slightly lower values is that the peak-rate estimator tends to
224 * yield slightly lower values than the actual peak rate (it can yield
225 * the actual peak rate only if there is only one process doing I/O,
226 * and the process does sequential I/O).
227 *
228 * Both the reference peak rates and the thresholds are measured in
229 * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
230 */
231static int R_slow[2] = {1000, 10700};
232static int R_fast[2] = {14000, 33000};
233/*
234 * To improve readability, a conversion function is used to initialize the
235 * following arrays, which entails that they can be initialized only in a
236 * function.
237 */
238static int T_slow[2];
239static int T_fast[2];
240static int device_speed_thresh[2];
aee69d78 241
ea25da48
PV
242#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
243#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
aee69d78 244
ea25da48 245struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
e21b7a0b 246{
ea25da48 247 return bic->bfqq[is_sync];
aee69d78
PV
248}
249
ea25da48 250void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
aee69d78 251{
ea25da48 252 bic->bfqq[is_sync] = bfqq;
aee69d78
PV
253}
254
ea25da48 255struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
aee69d78 256{
ea25da48 257 return bic->icq.q->elevator->elevator_data;
e21b7a0b 258}
aee69d78 259
ea25da48
PV
260/**
261 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
262 * @icq: the iocontext queue.
263 */
264static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
e21b7a0b 265{
ea25da48
PV
266 /* bic->icq is the first member, %NULL will convert to %NULL */
267 return container_of(icq, struct bfq_io_cq, icq);
e21b7a0b 268}
aee69d78 269
ea25da48
PV
270/**
271 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
272 * @bfqd: the lookup key.
273 * @ioc: the io_context of the process doing I/O.
274 * @q: the request queue.
275 */
276static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
277 struct io_context *ioc,
278 struct request_queue *q)
e21b7a0b 279{
ea25da48
PV
280 if (ioc) {
281 unsigned long flags;
282 struct bfq_io_cq *icq;
aee69d78 283
ea25da48
PV
284 spin_lock_irqsave(q->queue_lock, flags);
285 icq = icq_to_bic(ioc_lookup_icq(ioc, q));
286 spin_unlock_irqrestore(q->queue_lock, flags);
aee69d78 287
ea25da48 288 return icq;
e21b7a0b 289 }
e21b7a0b 290
ea25da48 291 return NULL;
aee69d78
PV
292}
293
ea25da48
PV
294/*
295 * Scheduler run of queue, if there are requests pending and no one in the
296 * driver that will restart queueing.
297 */
298void bfq_schedule_dispatch(struct bfq_data *bfqd)
aee69d78 299{
ea25da48
PV
300 if (bfqd->queued != 0) {
301 bfq_log(bfqd, "schedule dispatch");
302 blk_mq_run_hw_queues(bfqd->queue, true);
e21b7a0b 303 }
aee69d78
PV
304}
305
306#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
307#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
308
309#define bfq_sample_valid(samples) ((samples) > 80)
310
aee69d78
PV
311/*
312 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
313 * We choose the request that is closesr to the head right now. Distance
314 * behind the head is penalized and only allowed to a certain extent.
315 */
316static struct request *bfq_choose_req(struct bfq_data *bfqd,
317 struct request *rq1,
318 struct request *rq2,
319 sector_t last)
320{
321 sector_t s1, s2, d1 = 0, d2 = 0;
322 unsigned long back_max;
323#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
324#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
325 unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
326
327 if (!rq1 || rq1 == rq2)
328 return rq2;
329 if (!rq2)
330 return rq1;
331
332 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
333 return rq1;
334 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
335 return rq2;
336 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
337 return rq1;
338 else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
339 return rq2;
340
341 s1 = blk_rq_pos(rq1);
342 s2 = blk_rq_pos(rq2);
343
344 /*
345 * By definition, 1KiB is 2 sectors.
346 */
347 back_max = bfqd->bfq_back_max * 2;
348
349 /*
350 * Strict one way elevator _except_ in the case where we allow
351 * short backward seeks which are biased as twice the cost of a
352 * similar forward seek.
353 */
354 if (s1 >= last)
355 d1 = s1 - last;
356 else if (s1 + back_max >= last)
357 d1 = (last - s1) * bfqd->bfq_back_penalty;
358 else
359 wrap |= BFQ_RQ1_WRAP;
360
361 if (s2 >= last)
362 d2 = s2 - last;
363 else if (s2 + back_max >= last)
364 d2 = (last - s2) * bfqd->bfq_back_penalty;
365 else
366 wrap |= BFQ_RQ2_WRAP;
367
368 /* Found required data */
369
370 /*
371 * By doing switch() on the bit mask "wrap" we avoid having to
372 * check two variables for all permutations: --> faster!
373 */
374 switch (wrap) {
375 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
376 if (d1 < d2)
377 return rq1;
378 else if (d2 < d1)
379 return rq2;
380
381 if (s1 >= s2)
382 return rq1;
383 else
384 return rq2;
385
386 case BFQ_RQ2_WRAP:
387 return rq1;
388 case BFQ_RQ1_WRAP:
389 return rq2;
390 case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
391 default:
392 /*
393 * Since both rqs are wrapped,
394 * start with the one that's further behind head
395 * (--> only *one* back seek required),
396 * since back seek takes more time than forward.
397 */
398 if (s1 <= s2)
399 return rq1;
400 else
401 return rq2;
402 }
403}
404
36eca894
AA
405static struct bfq_queue *
406bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
407 sector_t sector, struct rb_node **ret_parent,
408 struct rb_node ***rb_link)
409{
410 struct rb_node **p, *parent;
411 struct bfq_queue *bfqq = NULL;
412
413 parent = NULL;
414 p = &root->rb_node;
415 while (*p) {
416 struct rb_node **n;
417
418 parent = *p;
419 bfqq = rb_entry(parent, struct bfq_queue, pos_node);
420
421 /*
422 * Sort strictly based on sector. Smallest to the left,
423 * largest to the right.
424 */
425 if (sector > blk_rq_pos(bfqq->next_rq))
426 n = &(*p)->rb_right;
427 else if (sector < blk_rq_pos(bfqq->next_rq))
428 n = &(*p)->rb_left;
429 else
430 break;
431 p = n;
432 bfqq = NULL;
433 }
434
435 *ret_parent = parent;
436 if (rb_link)
437 *rb_link = p;
438
439 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
440 (unsigned long long)sector,
441 bfqq ? bfqq->pid : 0);
442
443 return bfqq;
444}
445
ea25da48 446void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
36eca894
AA
447{
448 struct rb_node **p, *parent;
449 struct bfq_queue *__bfqq;
450
451 if (bfqq->pos_root) {
452 rb_erase(&bfqq->pos_node, bfqq->pos_root);
453 bfqq->pos_root = NULL;
454 }
455
456 if (bfq_class_idle(bfqq))
457 return;
458 if (!bfqq->next_rq)
459 return;
460
461 bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
462 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
463 blk_rq_pos(bfqq->next_rq), &parent, &p);
464 if (!__bfqq) {
465 rb_link_node(&bfqq->pos_node, parent, p);
466 rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
467 } else
468 bfqq->pos_root = NULL;
469}
470
1de0c4cd
AA
471/*
472 * Tell whether there are active queues or groups with differentiated weights.
473 */
474static bool bfq_differentiated_weights(struct bfq_data *bfqd)
475{
476 /*
477 * For weights to differ, at least one of the trees must contain
478 * at least two nodes.
479 */
480 return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
481 (bfqd->queue_weights_tree.rb_node->rb_left ||
482 bfqd->queue_weights_tree.rb_node->rb_right)
483#ifdef CONFIG_BFQ_GROUP_IOSCHED
484 ) ||
485 (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
486 (bfqd->group_weights_tree.rb_node->rb_left ||
487 bfqd->group_weights_tree.rb_node->rb_right)
488#endif
489 );
490}
491
492/*
493 * The following function returns true if every queue must receive the
494 * same share of the throughput (this condition is used when deciding
495 * whether idling may be disabled, see the comments in the function
496 * bfq_bfqq_may_idle()).
497 *
498 * Such a scenario occurs when:
499 * 1) all active queues have the same weight,
500 * 2) all active groups at the same level in the groups tree have the same
501 * weight,
502 * 3) all active groups at the same level in the groups tree have the same
503 * number of children.
504 *
505 * Unfortunately, keeping the necessary state for evaluating exactly the
506 * above symmetry conditions would be quite complex and time-consuming.
507 * Therefore this function evaluates, instead, the following stronger
508 * sub-conditions, for which it is much easier to maintain the needed
509 * state:
510 * 1) all active queues have the same weight,
511 * 2) all active groups have the same weight,
512 * 3) all active groups have at most one active child each.
513 * In particular, the last two conditions are always true if hierarchical
514 * support and the cgroups interface are not enabled, thus no state needs
515 * to be maintained in this case.
516 */
517static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
518{
519 return !bfq_differentiated_weights(bfqd);
520}
521
522/*
523 * If the weight-counter tree passed as input contains no counter for
524 * the weight of the input entity, then add that counter; otherwise just
525 * increment the existing counter.
526 *
527 * Note that weight-counter trees contain few nodes in mostly symmetric
528 * scenarios. For example, if all queues have the same weight, then the
529 * weight-counter tree for the queues may contain at most one node.
530 * This holds even if low_latency is on, because weight-raised queues
531 * are not inserted in the tree.
532 * In most scenarios, the rate at which nodes are created/destroyed
533 * should be low too.
534 */
ea25da48
PV
535void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
536 struct rb_root *root)
1de0c4cd
AA
537{
538 struct rb_node **new = &(root->rb_node), *parent = NULL;
539
540 /*
541 * Do not insert if the entity is already associated with a
542 * counter, which happens if:
543 * 1) the entity is associated with a queue,
544 * 2) a request arrival has caused the queue to become both
545 * non-weight-raised, and hence change its weight, and
546 * backlogged; in this respect, each of the two events
547 * causes an invocation of this function,
548 * 3) this is the invocation of this function caused by the
549 * second event. This second invocation is actually useless,
550 * and we handle this fact by exiting immediately. More
551 * efficient or clearer solutions might possibly be adopted.
552 */
553 if (entity->weight_counter)
554 return;
555
556 while (*new) {
557 struct bfq_weight_counter *__counter = container_of(*new,
558 struct bfq_weight_counter,
559 weights_node);
560 parent = *new;
561
562 if (entity->weight == __counter->weight) {
563 entity->weight_counter = __counter;
564 goto inc_counter;
565 }
566 if (entity->weight < __counter->weight)
567 new = &((*new)->rb_left);
568 else
569 new = &((*new)->rb_right);
570 }
571
572 entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
573 GFP_ATOMIC);
574
575 /*
576 * In the unlucky event of an allocation failure, we just
577 * exit. This will cause the weight of entity to not be
578 * considered in bfq_differentiated_weights, which, in its
579 * turn, causes the scenario to be deemed wrongly symmetric in
580 * case entity's weight would have been the only weight making
581 * the scenario asymmetric. On the bright side, no unbalance
582 * will however occur when entity becomes inactive again (the
583 * invocation of this function is triggered by an activation
584 * of entity). In fact, bfq_weights_tree_remove does nothing
585 * if !entity->weight_counter.
586 */
587 if (unlikely(!entity->weight_counter))
588 return;
589
590 entity->weight_counter->weight = entity->weight;
591 rb_link_node(&entity->weight_counter->weights_node, parent, new);
592 rb_insert_color(&entity->weight_counter->weights_node, root);
593
594inc_counter:
595 entity->weight_counter->num_active++;
596}
597
598/*
599 * Decrement the weight counter associated with the entity, and, if the
600 * counter reaches 0, remove the counter from the tree.
601 * See the comments to the function bfq_weights_tree_add() for considerations
602 * about overhead.
603 */
ea25da48
PV
604void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
605 struct rb_root *root)
1de0c4cd
AA
606{
607 if (!entity->weight_counter)
608 return;
609
610 entity->weight_counter->num_active--;
611 if (entity->weight_counter->num_active > 0)
612 goto reset_entity_pointer;
613
614 rb_erase(&entity->weight_counter->weights_node, root);
615 kfree(entity->weight_counter);
616
617reset_entity_pointer:
618 entity->weight_counter = NULL;
619}
620
aee69d78
PV
621/*
622 * Return expired entry, or NULL to just start from scratch in rbtree.
623 */
624static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
625 struct request *last)
626{
627 struct request *rq;
628
629 if (bfq_bfqq_fifo_expire(bfqq))
630 return NULL;
631
632 bfq_mark_bfqq_fifo_expire(bfqq);
633
634 rq = rq_entry_fifo(bfqq->fifo.next);
635
636 if (rq == last || ktime_get_ns() < rq->fifo_time)
637 return NULL;
638
639 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
640 return rq;
641}
642
643static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
644 struct bfq_queue *bfqq,
645 struct request *last)
646{
647 struct rb_node *rbnext = rb_next(&last->rb_node);
648 struct rb_node *rbprev = rb_prev(&last->rb_node);
649 struct request *next, *prev = NULL;
650
651 /* Follow expired path, else get first next available. */
652 next = bfq_check_fifo(bfqq, last);
653 if (next)
654 return next;
655
656 if (rbprev)
657 prev = rb_entry_rq(rbprev);
658
659 if (rbnext)
660 next = rb_entry_rq(rbnext);
661 else {
662 rbnext = rb_first(&bfqq->sort_list);
663 if (rbnext && rbnext != &last->rb_node)
664 next = rb_entry_rq(rbnext);
665 }
666
667 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
668}
669
c074170e 670/* see the definition of bfq_async_charge_factor for details */
aee69d78
PV
671static unsigned long bfq_serv_to_charge(struct request *rq,
672 struct bfq_queue *bfqq)
673{
44e44a1b 674 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
c074170e
PV
675 return blk_rq_sectors(rq);
676
cfd69712
PV
677 /*
678 * If there are no weight-raised queues, then amplify service
679 * by just the async charge factor; otherwise amplify service
680 * by twice the async charge factor, to further reduce latency
681 * for weight-raised queues.
682 */
683 if (bfqq->bfqd->wr_busy_queues == 0)
684 return blk_rq_sectors(rq) * bfq_async_charge_factor;
685
686 return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
aee69d78
PV
687}
688
689/**
690 * bfq_updated_next_req - update the queue after a new next_rq selection.
691 * @bfqd: the device data the queue belongs to.
692 * @bfqq: the queue to update.
693 *
694 * If the first request of a queue changes we make sure that the queue
695 * has enough budget to serve at least its first request (if the
696 * request has grown). We do this because if the queue has not enough
697 * budget for its first request, it has to go through two dispatch
698 * rounds to actually get it dispatched.
699 */
700static void bfq_updated_next_req(struct bfq_data *bfqd,
701 struct bfq_queue *bfqq)
702{
703 struct bfq_entity *entity = &bfqq->entity;
704 struct request *next_rq = bfqq->next_rq;
705 unsigned long new_budget;
706
707 if (!next_rq)
708 return;
709
710 if (bfqq == bfqd->in_service_queue)
711 /*
712 * In order not to break guarantees, budgets cannot be
713 * changed after an entity has been selected.
714 */
715 return;
716
717 new_budget = max_t(unsigned long, bfqq->max_budget,
718 bfq_serv_to_charge(next_rq, bfqq));
719 if (entity->budget != new_budget) {
720 entity->budget = new_budget;
721 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
722 new_budget);
e21b7a0b 723 bfq_requeue_bfqq(bfqd, bfqq);
aee69d78
PV
724 }
725}
726
36eca894 727static void
13c931bd
PV
728bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
729 struct bfq_io_cq *bic, bool bfq_already_existing)
36eca894 730{
13c931bd
PV
731 unsigned int old_wr_coeff = bfqq->wr_coeff;
732 bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
733
d5be3fef
PV
734 if (bic->saved_has_short_ttime)
735 bfq_mark_bfqq_has_short_ttime(bfqq);
36eca894 736 else
d5be3fef 737 bfq_clear_bfqq_has_short_ttime(bfqq);
36eca894
AA
738
739 if (bic->saved_IO_bound)
740 bfq_mark_bfqq_IO_bound(bfqq);
741 else
742 bfq_clear_bfqq_IO_bound(bfqq);
743
744 bfqq->ttime = bic->saved_ttime;
745 bfqq->wr_coeff = bic->saved_wr_coeff;
746 bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
747 bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
748 bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
749
e1b2324d 750 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
36eca894 751 time_is_before_jiffies(bfqq->last_wr_start_finish +
e1b2324d 752 bfqq->wr_cur_max_time))) {
36eca894
AA
753 bfq_log_bfqq(bfqq->bfqd, bfqq,
754 "resume state: switching off wr");
755
756 bfqq->wr_coeff = 1;
757 }
758
759 /* make sure weight will be updated, however we got here */
760 bfqq->entity.prio_changed = 1;
13c931bd
PV
761
762 if (likely(!busy))
763 return;
764
765 if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
766 bfqd->wr_busy_queues++;
767 else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
768 bfqd->wr_busy_queues--;
36eca894
AA
769}
770
771static int bfqq_process_refs(struct bfq_queue *bfqq)
772{
773 return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
774}
775
e1b2324d
AA
776/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
777static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
778{
779 struct bfq_queue *item;
780 struct hlist_node *n;
781
782 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
783 hlist_del_init(&item->burst_list_node);
784 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
785 bfqd->burst_size = 1;
786 bfqd->burst_parent_entity = bfqq->entity.parent;
787}
788
789/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
790static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
791{
792 /* Increment burst size to take into account also bfqq */
793 bfqd->burst_size++;
794
795 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
796 struct bfq_queue *pos, *bfqq_item;
797 struct hlist_node *n;
798
799 /*
800 * Enough queues have been activated shortly after each
801 * other to consider this burst as large.
802 */
803 bfqd->large_burst = true;
804
805 /*
806 * We can now mark all queues in the burst list as
807 * belonging to a large burst.
808 */
809 hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
810 burst_list_node)
811 bfq_mark_bfqq_in_large_burst(bfqq_item);
812 bfq_mark_bfqq_in_large_burst(bfqq);
813
814 /*
815 * From now on, and until the current burst finishes, any
816 * new queue being activated shortly after the last queue
817 * was inserted in the burst can be immediately marked as
818 * belonging to a large burst. So the burst list is not
819 * needed any more. Remove it.
820 */
821 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
822 burst_list_node)
823 hlist_del_init(&pos->burst_list_node);
824 } else /*
825 * Burst not yet large: add bfqq to the burst list. Do
826 * not increment the ref counter for bfqq, because bfqq
827 * is removed from the burst list before freeing bfqq
828 * in put_queue.
829 */
830 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
831}
832
833/*
834 * If many queues belonging to the same group happen to be created
835 * shortly after each other, then the processes associated with these
836 * queues have typically a common goal. In particular, bursts of queue
837 * creations are usually caused by services or applications that spawn
838 * many parallel threads/processes. Examples are systemd during boot,
839 * or git grep. To help these processes get their job done as soon as
840 * possible, it is usually better to not grant either weight-raising
841 * or device idling to their queues.
842 *
843 * In this comment we describe, firstly, the reasons why this fact
844 * holds, and, secondly, the next function, which implements the main
845 * steps needed to properly mark these queues so that they can then be
846 * treated in a different way.
847 *
848 * The above services or applications benefit mostly from a high
849 * throughput: the quicker the requests of the activated queues are
850 * cumulatively served, the sooner the target job of these queues gets
851 * completed. As a consequence, weight-raising any of these queues,
852 * which also implies idling the device for it, is almost always
853 * counterproductive. In most cases it just lowers throughput.
854 *
855 * On the other hand, a burst of queue creations may be caused also by
856 * the start of an application that does not consist of a lot of
857 * parallel I/O-bound threads. In fact, with a complex application,
858 * several short processes may need to be executed to start-up the
859 * application. In this respect, to start an application as quickly as
860 * possible, the best thing to do is in any case to privilege the I/O
861 * related to the application with respect to all other
862 * I/O. Therefore, the best strategy to start as quickly as possible
863 * an application that causes a burst of queue creations is to
864 * weight-raise all the queues created during the burst. This is the
865 * exact opposite of the best strategy for the other type of bursts.
866 *
867 * In the end, to take the best action for each of the two cases, the
868 * two types of bursts need to be distinguished. Fortunately, this
869 * seems relatively easy, by looking at the sizes of the bursts. In
870 * particular, we found a threshold such that only bursts with a
871 * larger size than that threshold are apparently caused by
872 * services or commands such as systemd or git grep. For brevity,
873 * hereafter we call just 'large' these bursts. BFQ *does not*
874 * weight-raise queues whose creation occurs in a large burst. In
875 * addition, for each of these queues BFQ performs or does not perform
876 * idling depending on which choice boosts the throughput more. The
877 * exact choice depends on the device and request pattern at
878 * hand.
879 *
880 * Unfortunately, false positives may occur while an interactive task
881 * is starting (e.g., an application is being started). The
882 * consequence is that the queues associated with the task do not
883 * enjoy weight raising as expected. Fortunately these false positives
884 * are very rare. They typically occur if some service happens to
885 * start doing I/O exactly when the interactive task starts.
886 *
887 * Turning back to the next function, it implements all the steps
888 * needed to detect the occurrence of a large burst and to properly
889 * mark all the queues belonging to it (so that they can then be
890 * treated in a different way). This goal is achieved by maintaining a
891 * "burst list" that holds, temporarily, the queues that belong to the
892 * burst in progress. The list is then used to mark these queues as
893 * belonging to a large burst if the burst does become large. The main
894 * steps are the following.
895 *
896 * . when the very first queue is created, the queue is inserted into the
897 * list (as it could be the first queue in a possible burst)
898 *
899 * . if the current burst has not yet become large, and a queue Q that does
900 * not yet belong to the burst is activated shortly after the last time
901 * at which a new queue entered the burst list, then the function appends
902 * Q to the burst list
903 *
904 * . if, as a consequence of the previous step, the burst size reaches
905 * the large-burst threshold, then
906 *
907 * . all the queues in the burst list are marked as belonging to a
908 * large burst
909 *
910 * . the burst list is deleted; in fact, the burst list already served
911 * its purpose (keeping temporarily track of the queues in a burst,
912 * so as to be able to mark them as belonging to a large burst in the
913 * previous sub-step), and now is not needed any more
914 *
915 * . the device enters a large-burst mode
916 *
917 * . if a queue Q that does not belong to the burst is created while
918 * the device is in large-burst mode and shortly after the last time
919 * at which a queue either entered the burst list or was marked as
920 * belonging to the current large burst, then Q is immediately marked
921 * as belonging to a large burst.
922 *
923 * . if a queue Q that does not belong to the burst is created a while
924 * later, i.e., not shortly after, than the last time at which a queue
925 * either entered the burst list or was marked as belonging to the
926 * current large burst, then the current burst is deemed as finished and:
927 *
928 * . the large-burst mode is reset if set
929 *
930 * . the burst list is emptied
931 *
932 * . Q is inserted in the burst list, as Q may be the first queue
933 * in a possible new burst (then the burst list contains just Q
934 * after this step).
935 */
936static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
937{
938 /*
939 * If bfqq is already in the burst list or is part of a large
940 * burst, or finally has just been split, then there is
941 * nothing else to do.
942 */
943 if (!hlist_unhashed(&bfqq->burst_list_node) ||
944 bfq_bfqq_in_large_burst(bfqq) ||
945 time_is_after_eq_jiffies(bfqq->split_time +
946 msecs_to_jiffies(10)))
947 return;
948
949 /*
950 * If bfqq's creation happens late enough, or bfqq belongs to
951 * a different group than the burst group, then the current
952 * burst is finished, and related data structures must be
953 * reset.
954 *
955 * In this respect, consider the special case where bfqq is
956 * the very first queue created after BFQ is selected for this
957 * device. In this case, last_ins_in_burst and
958 * burst_parent_entity are not yet significant when we get
959 * here. But it is easy to verify that, whether or not the
960 * following condition is true, bfqq will end up being
961 * inserted into the burst list. In particular the list will
962 * happen to contain only bfqq. And this is exactly what has
963 * to happen, as bfqq may be the first queue of the first
964 * burst.
965 */
966 if (time_is_before_jiffies(bfqd->last_ins_in_burst +
967 bfqd->bfq_burst_interval) ||
968 bfqq->entity.parent != bfqd->burst_parent_entity) {
969 bfqd->large_burst = false;
970 bfq_reset_burst_list(bfqd, bfqq);
971 goto end;
972 }
973
974 /*
975 * If we get here, then bfqq is being activated shortly after the
976 * last queue. So, if the current burst is also large, we can mark
977 * bfqq as belonging to this large burst immediately.
978 */
979 if (bfqd->large_burst) {
980 bfq_mark_bfqq_in_large_burst(bfqq);
981 goto end;
982 }
983
984 /*
985 * If we get here, then a large-burst state has not yet been
986 * reached, but bfqq is being activated shortly after the last
987 * queue. Then we add bfqq to the burst.
988 */
989 bfq_add_to_burst(bfqd, bfqq);
990end:
991 /*
992 * At this point, bfqq either has been added to the current
993 * burst or has caused the current burst to terminate and a
994 * possible new burst to start. In particular, in the second
995 * case, bfqq has become the first queue in the possible new
996 * burst. In both cases last_ins_in_burst needs to be moved
997 * forward.
998 */
999 bfqd->last_ins_in_burst = jiffies;
1000}
1001
aee69d78
PV
1002static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
1003{
1004 struct bfq_entity *entity = &bfqq->entity;
1005
1006 return entity->budget - entity->service;
1007}
1008
1009/*
1010 * If enough samples have been computed, return the current max budget
1011 * stored in bfqd, which is dynamically updated according to the
1012 * estimated disk peak rate; otherwise return the default max budget
1013 */
1014static int bfq_max_budget(struct bfq_data *bfqd)
1015{
1016 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1017 return bfq_default_max_budget;
1018 else
1019 return bfqd->bfq_max_budget;
1020}
1021
1022/*
1023 * Return min budget, which is a fraction of the current or default
1024 * max budget (trying with 1/32)
1025 */
1026static int bfq_min_budget(struct bfq_data *bfqd)
1027{
1028 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1029 return bfq_default_max_budget / 32;
1030 else
1031 return bfqd->bfq_max_budget / 32;
1032}
1033
aee69d78
PV
1034/*
1035 * The next function, invoked after the input queue bfqq switches from
1036 * idle to busy, updates the budget of bfqq. The function also tells
1037 * whether the in-service queue should be expired, by returning
1038 * true. The purpose of expiring the in-service queue is to give bfqq
1039 * the chance to possibly preempt the in-service queue, and the reason
44e44a1b
PV
1040 * for preempting the in-service queue is to achieve one of the two
1041 * goals below.
aee69d78 1042 *
44e44a1b
PV
1043 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1044 * expired because it has remained idle. In particular, bfqq may have
1045 * expired for one of the following two reasons:
aee69d78
PV
1046 *
1047 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1048 * and did not make it to issue a new request before its last
1049 * request was served;
1050 *
1051 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1052 * a new request before the expiration of the idling-time.
1053 *
1054 * Even if bfqq has expired for one of the above reasons, the process
1055 * associated with the queue may be however issuing requests greedily,
1056 * and thus be sensitive to the bandwidth it receives (bfqq may have
1057 * remained idle for other reasons: CPU high load, bfqq not enjoying
1058 * idling, I/O throttling somewhere in the path from the process to
1059 * the I/O scheduler, ...). But if, after every expiration for one of
1060 * the above two reasons, bfqq has to wait for the service of at least
1061 * one full budget of another queue before being served again, then
1062 * bfqq is likely to get a much lower bandwidth or resource time than
1063 * its reserved ones. To address this issue, two countermeasures need
1064 * to be taken.
1065 *
1066 * First, the budget and the timestamps of bfqq need to be updated in
1067 * a special way on bfqq reactivation: they need to be updated as if
1068 * bfqq did not remain idle and did not expire. In fact, if they are
1069 * computed as if bfqq expired and remained idle until reactivation,
1070 * then the process associated with bfqq is treated as if, instead of
1071 * being greedy, it stopped issuing requests when bfqq remained idle,
1072 * and restarts issuing requests only on this reactivation. In other
1073 * words, the scheduler does not help the process recover the "service
1074 * hole" between bfqq expiration and reactivation. As a consequence,
1075 * the process receives a lower bandwidth than its reserved one. In
1076 * contrast, to recover this hole, the budget must be updated as if
1077 * bfqq was not expired at all before this reactivation, i.e., it must
1078 * be set to the value of the remaining budget when bfqq was
1079 * expired. Along the same line, timestamps need to be assigned the
1080 * value they had the last time bfqq was selected for service, i.e.,
1081 * before last expiration. Thus timestamps need to be back-shifted
1082 * with respect to their normal computation (see [1] for more details
1083 * on this tricky aspect).
1084 *
1085 * Secondly, to allow the process to recover the hole, the in-service
1086 * queue must be expired too, to give bfqq the chance to preempt it
1087 * immediately. In fact, if bfqq has to wait for a full budget of the
1088 * in-service queue to be completed, then it may become impossible to
1089 * let the process recover the hole, even if the back-shifted
1090 * timestamps of bfqq are lower than those of the in-service queue. If
1091 * this happens for most or all of the holes, then the process may not
1092 * receive its reserved bandwidth. In this respect, it is worth noting
1093 * that, being the service of outstanding requests unpreemptible, a
1094 * little fraction of the holes may however be unrecoverable, thereby
1095 * causing a little loss of bandwidth.
1096 *
1097 * The last important point is detecting whether bfqq does need this
1098 * bandwidth recovery. In this respect, the next function deems the
1099 * process associated with bfqq greedy, and thus allows it to recover
1100 * the hole, if: 1) the process is waiting for the arrival of a new
1101 * request (which implies that bfqq expired for one of the above two
1102 * reasons), and 2) such a request has arrived soon. The first
1103 * condition is controlled through the flag non_blocking_wait_rq,
1104 * while the second through the flag arrived_in_time. If both
1105 * conditions hold, then the function computes the budget in the
1106 * above-described special way, and signals that the in-service queue
1107 * should be expired. Timestamp back-shifting is done later in
1108 * __bfq_activate_entity.
44e44a1b
PV
1109 *
1110 * 2. Reduce latency. Even if timestamps are not backshifted to let
1111 * the process associated with bfqq recover a service hole, bfqq may
1112 * however happen to have, after being (re)activated, a lower finish
1113 * timestamp than the in-service queue. That is, the next budget of
1114 * bfqq may have to be completed before the one of the in-service
1115 * queue. If this is the case, then preempting the in-service queue
1116 * allows this goal to be achieved, apart from the unpreemptible,
1117 * outstanding requests mentioned above.
1118 *
1119 * Unfortunately, regardless of which of the above two goals one wants
1120 * to achieve, service trees need first to be updated to know whether
1121 * the in-service queue must be preempted. To have service trees
1122 * correctly updated, the in-service queue must be expired and
1123 * rescheduled, and bfqq must be scheduled too. This is one of the
1124 * most costly operations (in future versions, the scheduling
1125 * mechanism may be re-designed in such a way to make it possible to
1126 * know whether preemption is needed without needing to update service
1127 * trees). In addition, queue preemptions almost always cause random
1128 * I/O, and thus loss of throughput. Because of these facts, the next
1129 * function adopts the following simple scheme to avoid both costly
1130 * operations and too frequent preemptions: it requests the expiration
1131 * of the in-service queue (unconditionally) only for queues that need
1132 * to recover a hole, or that either are weight-raised or deserve to
1133 * be weight-raised.
aee69d78
PV
1134 */
1135static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1136 struct bfq_queue *bfqq,
44e44a1b
PV
1137 bool arrived_in_time,
1138 bool wr_or_deserves_wr)
aee69d78
PV
1139{
1140 struct bfq_entity *entity = &bfqq->entity;
1141
1142 if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
1143 /*
1144 * We do not clear the flag non_blocking_wait_rq here, as
1145 * the latter is used in bfq_activate_bfqq to signal
1146 * that timestamps need to be back-shifted (and is
1147 * cleared right after).
1148 */
1149
1150 /*
1151 * In next assignment we rely on that either
1152 * entity->service or entity->budget are not updated
1153 * on expiration if bfqq is empty (see
1154 * __bfq_bfqq_recalc_budget). Thus both quantities
1155 * remain unchanged after such an expiration, and the
1156 * following statement therefore assigns to
1157 * entity->budget the remaining budget on such an
1158 * expiration. For clarity, entity->service is not
1159 * updated on expiration in any case, and, in normal
1160 * operation, is reset only when bfqq is selected for
1161 * service (see bfq_get_next_queue).
1162 */
1163 entity->budget = min_t(unsigned long,
1164 bfq_bfqq_budget_left(bfqq),
1165 bfqq->max_budget);
1166
1167 return true;
1168 }
1169
1170 entity->budget = max_t(unsigned long, bfqq->max_budget,
1171 bfq_serv_to_charge(bfqq->next_rq, bfqq));
1172 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
44e44a1b
PV
1173 return wr_or_deserves_wr;
1174}
1175
1176static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
1177{
1178 u64 dur;
1179
1180 if (bfqd->bfq_wr_max_time > 0)
1181 return bfqd->bfq_wr_max_time;
1182
1183 dur = bfqd->RT_prod;
1184 do_div(dur, bfqd->peak_rate);
1185
1186 /*
1187 * Limit duration between 3 and 13 seconds. Tests show that
1188 * higher values than 13 seconds often yield the opposite of
1189 * the desired result, i.e., worsen responsiveness by letting
1190 * non-interactive and non-soft-real-time applications
1191 * preserve weight raising for a too long time interval.
1192 *
1193 * On the other end, lower values than 3 seconds make it
1194 * difficult for most interactive tasks to complete their jobs
1195 * before weight-raising finishes.
1196 */
1197 if (dur > msecs_to_jiffies(13000))
1198 dur = msecs_to_jiffies(13000);
1199 else if (dur < msecs_to_jiffies(3000))
1200 dur = msecs_to_jiffies(3000);
1201
1202 return dur;
1203}
1204
1205static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1206 struct bfq_queue *bfqq,
1207 unsigned int old_wr_coeff,
1208 bool wr_or_deserves_wr,
77b7dcea 1209 bool interactive,
e1b2324d 1210 bool in_burst,
77b7dcea 1211 bool soft_rt)
44e44a1b
PV
1212{
1213 if (old_wr_coeff == 1 && wr_or_deserves_wr) {
1214 /* start a weight-raising period */
77b7dcea
PV
1215 if (interactive) {
1216 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1217 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1218 } else {
1219 bfqq->wr_start_at_switch_to_srt = jiffies;
1220 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1221 BFQ_SOFTRT_WEIGHT_FACTOR;
1222 bfqq->wr_cur_max_time =
1223 bfqd->bfq_wr_rt_max_time;
1224 }
44e44a1b
PV
1225
1226 /*
1227 * If needed, further reduce budget to make sure it is
1228 * close to bfqq's backlog, so as to reduce the
1229 * scheduling-error component due to a too large
1230 * budget. Do not care about throughput consequences,
1231 * but only about latency. Finally, do not assign a
1232 * too small budget either, to avoid increasing
1233 * latency by causing too frequent expirations.
1234 */
1235 bfqq->entity.budget = min_t(unsigned long,
1236 bfqq->entity.budget,
1237 2 * bfq_min_budget(bfqd));
1238 } else if (old_wr_coeff > 1) {
77b7dcea
PV
1239 if (interactive) { /* update wr coeff and duration */
1240 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1241 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
e1b2324d
AA
1242 } else if (in_burst)
1243 bfqq->wr_coeff = 1;
1244 else if (soft_rt) {
77b7dcea
PV
1245 /*
1246 * The application is now or still meeting the
1247 * requirements for being deemed soft rt. We
1248 * can then correctly and safely (re)charge
1249 * the weight-raising duration for the
1250 * application with the weight-raising
1251 * duration for soft rt applications.
1252 *
1253 * In particular, doing this recharge now, i.e.,
1254 * before the weight-raising period for the
1255 * application finishes, reduces the probability
1256 * of the following negative scenario:
1257 * 1) the weight of a soft rt application is
1258 * raised at startup (as for any newly
1259 * created application),
1260 * 2) since the application is not interactive,
1261 * at a certain time weight-raising is
1262 * stopped for the application,
1263 * 3) at that time the application happens to
1264 * still have pending requests, and hence
1265 * is destined to not have a chance to be
1266 * deemed soft rt before these requests are
1267 * completed (see the comments to the
1268 * function bfq_bfqq_softrt_next_start()
1269 * for details on soft rt detection),
1270 * 4) these pending requests experience a high
1271 * latency because the application is not
1272 * weight-raised while they are pending.
1273 */
1274 if (bfqq->wr_cur_max_time !=
1275 bfqd->bfq_wr_rt_max_time) {
1276 bfqq->wr_start_at_switch_to_srt =
1277 bfqq->last_wr_start_finish;
1278
1279 bfqq->wr_cur_max_time =
1280 bfqd->bfq_wr_rt_max_time;
1281 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1282 BFQ_SOFTRT_WEIGHT_FACTOR;
1283 }
1284 bfqq->last_wr_start_finish = jiffies;
1285 }
44e44a1b
PV
1286 }
1287}
1288
1289static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1290 struct bfq_queue *bfqq)
1291{
1292 return bfqq->dispatched == 0 &&
1293 time_is_before_jiffies(
1294 bfqq->budget_timeout +
1295 bfqd->bfq_wr_min_idle_time);
aee69d78
PV
1296}
1297
1298static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1299 struct bfq_queue *bfqq,
44e44a1b
PV
1300 int old_wr_coeff,
1301 struct request *rq,
1302 bool *interactive)
aee69d78 1303{
e1b2324d
AA
1304 bool soft_rt, in_burst, wr_or_deserves_wr,
1305 bfqq_wants_to_preempt,
44e44a1b 1306 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
aee69d78
PV
1307 /*
1308 * See the comments on
1309 * bfq_bfqq_update_budg_for_activation for
1310 * details on the usage of the next variable.
1311 */
1312 arrived_in_time = ktime_get_ns() <=
1313 bfqq->ttime.last_end_request +
1314 bfqd->bfq_slice_idle * 3;
1315
e21b7a0b
AA
1316 bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
1317
aee69d78 1318 /*
44e44a1b
PV
1319 * bfqq deserves to be weight-raised if:
1320 * - it is sync,
e1b2324d 1321 * - it does not belong to a large burst,
36eca894
AA
1322 * - it has been idle for enough time or is soft real-time,
1323 * - is linked to a bfq_io_cq (it is not shared in any sense).
44e44a1b 1324 */
e1b2324d 1325 in_burst = bfq_bfqq_in_large_burst(bfqq);
77b7dcea 1326 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
e1b2324d 1327 !in_burst &&
77b7dcea 1328 time_is_before_jiffies(bfqq->soft_rt_next_start);
e1b2324d 1329 *interactive = !in_burst && idle_for_long_time;
44e44a1b
PV
1330 wr_or_deserves_wr = bfqd->low_latency &&
1331 (bfqq->wr_coeff > 1 ||
36eca894
AA
1332 (bfq_bfqq_sync(bfqq) &&
1333 bfqq->bic && (*interactive || soft_rt)));
44e44a1b
PV
1334
1335 /*
1336 * Using the last flag, update budget and check whether bfqq
1337 * may want to preempt the in-service queue.
aee69d78
PV
1338 */
1339 bfqq_wants_to_preempt =
1340 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
44e44a1b
PV
1341 arrived_in_time,
1342 wr_or_deserves_wr);
aee69d78 1343
e1b2324d
AA
1344 /*
1345 * If bfqq happened to be activated in a burst, but has been
1346 * idle for much more than an interactive queue, then we
1347 * assume that, in the overall I/O initiated in the burst, the
1348 * I/O associated with bfqq is finished. So bfqq does not need
1349 * to be treated as a queue belonging to a burst
1350 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1351 * if set, and remove bfqq from the burst list if it's
1352 * there. We do not decrement burst_size, because the fact
1353 * that bfqq does not need to belong to the burst list any
1354 * more does not invalidate the fact that bfqq was created in
1355 * a burst.
1356 */
1357 if (likely(!bfq_bfqq_just_created(bfqq)) &&
1358 idle_for_long_time &&
1359 time_is_before_jiffies(
1360 bfqq->budget_timeout +
1361 msecs_to_jiffies(10000))) {
1362 hlist_del_init(&bfqq->burst_list_node);
1363 bfq_clear_bfqq_in_large_burst(bfqq);
1364 }
1365
1366 bfq_clear_bfqq_just_created(bfqq);
1367
1368
aee69d78
PV
1369 if (!bfq_bfqq_IO_bound(bfqq)) {
1370 if (arrived_in_time) {
1371 bfqq->requests_within_timer++;
1372 if (bfqq->requests_within_timer >=
1373 bfqd->bfq_requests_within_timer)
1374 bfq_mark_bfqq_IO_bound(bfqq);
1375 } else
1376 bfqq->requests_within_timer = 0;
1377 }
1378
44e44a1b 1379 if (bfqd->low_latency) {
36eca894
AA
1380 if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1381 /* wraparound */
1382 bfqq->split_time =
1383 jiffies - bfqd->bfq_wr_min_idle_time - 1;
1384
1385 if (time_is_before_jiffies(bfqq->split_time +
1386 bfqd->bfq_wr_min_idle_time)) {
1387 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1388 old_wr_coeff,
1389 wr_or_deserves_wr,
1390 *interactive,
e1b2324d 1391 in_burst,
36eca894
AA
1392 soft_rt);
1393
1394 if (old_wr_coeff != bfqq->wr_coeff)
1395 bfqq->entity.prio_changed = 1;
1396 }
44e44a1b
PV
1397 }
1398
77b7dcea
PV
1399 bfqq->last_idle_bklogged = jiffies;
1400 bfqq->service_from_backlogged = 0;
1401 bfq_clear_bfqq_softrt_update(bfqq);
1402
aee69d78
PV
1403 bfq_add_bfqq_busy(bfqd, bfqq);
1404
1405 /*
1406 * Expire in-service queue only if preemption may be needed
1407 * for guarantees. In this respect, the function
1408 * next_queue_may_preempt just checks a simple, necessary
1409 * condition, and not a sufficient condition based on
1410 * timestamps. In fact, for the latter condition to be
1411 * evaluated, timestamps would need first to be updated, and
1412 * this operation is quite costly (see the comments on the
1413 * function bfq_bfqq_update_budg_for_activation).
1414 */
1415 if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
77b7dcea 1416 bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
aee69d78
PV
1417 next_queue_may_preempt(bfqd))
1418 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1419 false, BFQQE_PREEMPTED);
1420}
1421
1422static void bfq_add_request(struct request *rq)
1423{
1424 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1425 struct bfq_data *bfqd = bfqq->bfqd;
1426 struct request *next_rq, *prev;
44e44a1b
PV
1427 unsigned int old_wr_coeff = bfqq->wr_coeff;
1428 bool interactive = false;
aee69d78
PV
1429
1430 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1431 bfqq->queued[rq_is_sync(rq)]++;
1432 bfqd->queued++;
1433
1434 elv_rb_add(&bfqq->sort_list, rq);
1435
1436 /*
1437 * Check if this request is a better next-serve candidate.
1438 */
1439 prev = bfqq->next_rq;
1440 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
1441 bfqq->next_rq = next_rq;
1442
36eca894
AA
1443 /*
1444 * Adjust priority tree position, if next_rq changes.
1445 */
1446 if (prev != bfqq->next_rq)
1447 bfq_pos_tree_add_move(bfqd, bfqq);
1448
aee69d78 1449 if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
44e44a1b
PV
1450 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
1451 rq, &interactive);
1452 else {
1453 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
1454 time_is_before_jiffies(
1455 bfqq->last_wr_start_finish +
1456 bfqd->bfq_wr_min_inter_arr_async)) {
1457 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1458 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1459
cfd69712 1460 bfqd->wr_busy_queues++;
44e44a1b
PV
1461 bfqq->entity.prio_changed = 1;
1462 }
1463 if (prev != bfqq->next_rq)
1464 bfq_updated_next_req(bfqd, bfqq);
1465 }
1466
1467 /*
1468 * Assign jiffies to last_wr_start_finish in the following
1469 * cases:
1470 *
1471 * . if bfqq is not going to be weight-raised, because, for
1472 * non weight-raised queues, last_wr_start_finish stores the
1473 * arrival time of the last request; as of now, this piece
1474 * of information is used only for deciding whether to
1475 * weight-raise async queues
1476 *
1477 * . if bfqq is not weight-raised, because, if bfqq is now
1478 * switching to weight-raised, then last_wr_start_finish
1479 * stores the time when weight-raising starts
1480 *
1481 * . if bfqq is interactive, because, regardless of whether
1482 * bfqq is currently weight-raised, the weight-raising
1483 * period must start or restart (this case is considered
1484 * separately because it is not detected by the above
1485 * conditions, if bfqq is already weight-raised)
77b7dcea
PV
1486 *
1487 * last_wr_start_finish has to be updated also if bfqq is soft
1488 * real-time, because the weight-raising period is constantly
1489 * restarted on idle-to-busy transitions for these queues, but
1490 * this is already done in bfq_bfqq_handle_idle_busy_switch if
1491 * needed.
44e44a1b
PV
1492 */
1493 if (bfqd->low_latency &&
1494 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
1495 bfqq->last_wr_start_finish = jiffies;
aee69d78
PV
1496}
1497
1498static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
1499 struct bio *bio,
1500 struct request_queue *q)
1501{
1502 struct bfq_queue *bfqq = bfqd->bio_bfqq;
1503
1504
1505 if (bfqq)
1506 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
1507
1508 return NULL;
1509}
1510
ab0e43e9
PV
1511static sector_t get_sdist(sector_t last_pos, struct request *rq)
1512{
1513 if (last_pos)
1514 return abs(blk_rq_pos(rq) - last_pos);
1515
1516 return 0;
1517}
1518
aee69d78
PV
1519#if 0 /* Still not clear if we can do without next two functions */
1520static void bfq_activate_request(struct request_queue *q, struct request *rq)
1521{
1522 struct bfq_data *bfqd = q->elevator->elevator_data;
1523
1524 bfqd->rq_in_driver++;
aee69d78
PV
1525}
1526
1527static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
1528{
1529 struct bfq_data *bfqd = q->elevator->elevator_data;
1530
1531 bfqd->rq_in_driver--;
1532}
1533#endif
1534
1535static void bfq_remove_request(struct request_queue *q,
1536 struct request *rq)
1537{
1538 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1539 struct bfq_data *bfqd = bfqq->bfqd;
1540 const int sync = rq_is_sync(rq);
1541
1542 if (bfqq->next_rq == rq) {
1543 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
1544 bfq_updated_next_req(bfqd, bfqq);
1545 }
1546
1547 if (rq->queuelist.prev != &rq->queuelist)
1548 list_del_init(&rq->queuelist);
1549 bfqq->queued[sync]--;
1550 bfqd->queued--;
1551 elv_rb_del(&bfqq->sort_list, rq);
1552
1553 elv_rqhash_del(q, rq);
1554 if (q->last_merge == rq)
1555 q->last_merge = NULL;
1556
1557 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
1558 bfqq->next_rq = NULL;
1559
1560 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
e21b7a0b 1561 bfq_del_bfqq_busy(bfqd, bfqq, false);
aee69d78
PV
1562 /*
1563 * bfqq emptied. In normal operation, when
1564 * bfqq is empty, bfqq->entity.service and
1565 * bfqq->entity.budget must contain,
1566 * respectively, the service received and the
1567 * budget used last time bfqq emptied. These
1568 * facts do not hold in this case, as at least
1569 * this last removal occurred while bfqq is
1570 * not in service. To avoid inconsistencies,
1571 * reset both bfqq->entity.service and
1572 * bfqq->entity.budget, if bfqq has still a
1573 * process that may issue I/O requests to it.
1574 */
1575 bfqq->entity.budget = bfqq->entity.service = 0;
1576 }
36eca894
AA
1577
1578 /*
1579 * Remove queue from request-position tree as it is empty.
1580 */
1581 if (bfqq->pos_root) {
1582 rb_erase(&bfqq->pos_node, bfqq->pos_root);
1583 bfqq->pos_root = NULL;
1584 }
aee69d78
PV
1585 }
1586
1587 if (rq->cmd_flags & REQ_META)
1588 bfqq->meta_pending--;
e21b7a0b
AA
1589
1590 bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
aee69d78
PV
1591}
1592
1593static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
1594{
1595 struct request_queue *q = hctx->queue;
1596 struct bfq_data *bfqd = q->elevator->elevator_data;
1597 struct request *free = NULL;
1598 /*
1599 * bfq_bic_lookup grabs the queue_lock: invoke it now and
1600 * store its return value for later use, to avoid nesting
1601 * queue_lock inside the bfqd->lock. We assume that the bic
1602 * returned by bfq_bic_lookup does not go away before
1603 * bfqd->lock is taken.
1604 */
1605 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
1606 bool ret;
1607
1608 spin_lock_irq(&bfqd->lock);
1609
1610 if (bic)
1611 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
1612 else
1613 bfqd->bio_bfqq = NULL;
1614 bfqd->bio_bic = bic;
1615
1616 ret = blk_mq_sched_try_merge(q, bio, &free);
1617
1618 if (free)
1619 blk_mq_free_request(free);
1620 spin_unlock_irq(&bfqd->lock);
1621
1622 return ret;
1623}
1624
1625static int bfq_request_merge(struct request_queue *q, struct request **req,
1626 struct bio *bio)
1627{
1628 struct bfq_data *bfqd = q->elevator->elevator_data;
1629 struct request *__rq;
1630
1631 __rq = bfq_find_rq_fmerge(bfqd, bio, q);
1632 if (__rq && elv_bio_merge_ok(__rq, bio)) {
1633 *req = __rq;
1634 return ELEVATOR_FRONT_MERGE;
1635 }
1636
1637 return ELEVATOR_NO_MERGE;
1638}
1639
1640static void bfq_request_merged(struct request_queue *q, struct request *req,
1641 enum elv_merge type)
1642{
1643 if (type == ELEVATOR_FRONT_MERGE &&
1644 rb_prev(&req->rb_node) &&
1645 blk_rq_pos(req) <
1646 blk_rq_pos(container_of(rb_prev(&req->rb_node),
1647 struct request, rb_node))) {
1648 struct bfq_queue *bfqq = RQ_BFQQ(req);
1649 struct bfq_data *bfqd = bfqq->bfqd;
1650 struct request *prev, *next_rq;
1651
1652 /* Reposition request in its sort_list */
1653 elv_rb_del(&bfqq->sort_list, req);
1654 elv_rb_add(&bfqq->sort_list, req);
1655
1656 /* Choose next request to be served for bfqq */
1657 prev = bfqq->next_rq;
1658 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
1659 bfqd->last_position);
1660 bfqq->next_rq = next_rq;
1661 /*
36eca894
AA
1662 * If next_rq changes, update both the queue's budget to
1663 * fit the new request and the queue's position in its
1664 * rq_pos_tree.
aee69d78 1665 */
36eca894 1666 if (prev != bfqq->next_rq) {
aee69d78 1667 bfq_updated_next_req(bfqd, bfqq);
36eca894
AA
1668 bfq_pos_tree_add_move(bfqd, bfqq);
1669 }
aee69d78
PV
1670 }
1671}
1672
1673static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1674 struct request *next)
1675{
1676 struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
1677
1678 if (!RB_EMPTY_NODE(&rq->rb_node))
e21b7a0b 1679 goto end;
aee69d78
PV
1680 spin_lock_irq(&bfqq->bfqd->lock);
1681
1682 /*
1683 * If next and rq belong to the same bfq_queue and next is older
1684 * than rq, then reposition rq in the fifo (by substituting next
1685 * with rq). Otherwise, if next and rq belong to different
1686 * bfq_queues, never reposition rq: in fact, we would have to
1687 * reposition it with respect to next's position in its own fifo,
1688 * which would most certainly be too expensive with respect to
1689 * the benefits.
1690 */
1691 if (bfqq == next_bfqq &&
1692 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1693 next->fifo_time < rq->fifo_time) {
1694 list_del_init(&rq->queuelist);
1695 list_replace_init(&next->queuelist, &rq->queuelist);
1696 rq->fifo_time = next->fifo_time;
1697 }
1698
1699 if (bfqq->next_rq == next)
1700 bfqq->next_rq = rq;
1701
1702 bfq_remove_request(q, next);
1703
1704 spin_unlock_irq(&bfqq->bfqd->lock);
e21b7a0b
AA
1705end:
1706 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
aee69d78
PV
1707}
1708
44e44a1b
PV
1709/* Must be called with bfqq != NULL */
1710static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
1711{
cfd69712
PV
1712 if (bfq_bfqq_busy(bfqq))
1713 bfqq->bfqd->wr_busy_queues--;
44e44a1b
PV
1714 bfqq->wr_coeff = 1;
1715 bfqq->wr_cur_max_time = 0;
77b7dcea 1716 bfqq->last_wr_start_finish = jiffies;
44e44a1b
PV
1717 /*
1718 * Trigger a weight change on the next invocation of
1719 * __bfq_entity_update_weight_prio.
1720 */
1721 bfqq->entity.prio_changed = 1;
1722}
1723
ea25da48
PV
1724void bfq_end_wr_async_queues(struct bfq_data *bfqd,
1725 struct bfq_group *bfqg)
44e44a1b
PV
1726{
1727 int i, j;
1728
1729 for (i = 0; i < 2; i++)
1730 for (j = 0; j < IOPRIO_BE_NR; j++)
1731 if (bfqg->async_bfqq[i][j])
1732 bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
1733 if (bfqg->async_idle_bfqq)
1734 bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
1735}
1736
1737static void bfq_end_wr(struct bfq_data *bfqd)
1738{
1739 struct bfq_queue *bfqq;
1740
1741 spin_lock_irq(&bfqd->lock);
1742
1743 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
1744 bfq_bfqq_end_wr(bfqq);
1745 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
1746 bfq_bfqq_end_wr(bfqq);
1747 bfq_end_wr_async(bfqd);
1748
1749 spin_unlock_irq(&bfqd->lock);
1750}
1751
36eca894
AA
1752static sector_t bfq_io_struct_pos(void *io_struct, bool request)
1753{
1754 if (request)
1755 return blk_rq_pos(io_struct);
1756 else
1757 return ((struct bio *)io_struct)->bi_iter.bi_sector;
1758}
1759
1760static int bfq_rq_close_to_sector(void *io_struct, bool request,
1761 sector_t sector)
1762{
1763 return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
1764 BFQQ_CLOSE_THR;
1765}
1766
1767static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
1768 struct bfq_queue *bfqq,
1769 sector_t sector)
1770{
1771 struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
1772 struct rb_node *parent, *node;
1773 struct bfq_queue *__bfqq;
1774
1775 if (RB_EMPTY_ROOT(root))
1776 return NULL;
1777
1778 /*
1779 * First, if we find a request starting at the end of the last
1780 * request, choose it.
1781 */
1782 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
1783 if (__bfqq)
1784 return __bfqq;
1785
1786 /*
1787 * If the exact sector wasn't found, the parent of the NULL leaf
1788 * will contain the closest sector (rq_pos_tree sorted by
1789 * next_request position).
1790 */
1791 __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
1792 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1793 return __bfqq;
1794
1795 if (blk_rq_pos(__bfqq->next_rq) < sector)
1796 node = rb_next(&__bfqq->pos_node);
1797 else
1798 node = rb_prev(&__bfqq->pos_node);
1799 if (!node)
1800 return NULL;
1801
1802 __bfqq = rb_entry(node, struct bfq_queue, pos_node);
1803 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1804 return __bfqq;
1805
1806 return NULL;
1807}
1808
1809static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
1810 struct bfq_queue *cur_bfqq,
1811 sector_t sector)
1812{
1813 struct bfq_queue *bfqq;
1814
1815 /*
1816 * We shall notice if some of the queues are cooperating,
1817 * e.g., working closely on the same area of the device. In
1818 * that case, we can group them together and: 1) don't waste
1819 * time idling, and 2) serve the union of their requests in
1820 * the best possible order for throughput.
1821 */
1822 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
1823 if (!bfqq || bfqq == cur_bfqq)
1824 return NULL;
1825
1826 return bfqq;
1827}
1828
1829static struct bfq_queue *
1830bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
1831{
1832 int process_refs, new_process_refs;
1833 struct bfq_queue *__bfqq;
1834
1835 /*
1836 * If there are no process references on the new_bfqq, then it is
1837 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
1838 * may have dropped their last reference (not just their last process
1839 * reference).
1840 */
1841 if (!bfqq_process_refs(new_bfqq))
1842 return NULL;
1843
1844 /* Avoid a circular list and skip interim queue merges. */
1845 while ((__bfqq = new_bfqq->new_bfqq)) {
1846 if (__bfqq == bfqq)
1847 return NULL;
1848 new_bfqq = __bfqq;
1849 }
1850
1851 process_refs = bfqq_process_refs(bfqq);
1852 new_process_refs = bfqq_process_refs(new_bfqq);
1853 /*
1854 * If the process for the bfqq has gone away, there is no
1855 * sense in merging the queues.
1856 */
1857 if (process_refs == 0 || new_process_refs == 0)
1858 return NULL;
1859
1860 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
1861 new_bfqq->pid);
1862
1863 /*
1864 * Merging is just a redirection: the requests of the process
1865 * owning one of the two queues are redirected to the other queue.
1866 * The latter queue, in its turn, is set as shared if this is the
1867 * first time that the requests of some process are redirected to
1868 * it.
1869 *
6fa3e8d3
PV
1870 * We redirect bfqq to new_bfqq and not the opposite, because
1871 * we are in the context of the process owning bfqq, thus we
1872 * have the io_cq of this process. So we can immediately
1873 * configure this io_cq to redirect the requests of the
1874 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
1875 * not available any more (new_bfqq->bic == NULL).
36eca894 1876 *
6fa3e8d3
PV
1877 * Anyway, even in case new_bfqq coincides with the in-service
1878 * queue, redirecting requests the in-service queue is the
1879 * best option, as we feed the in-service queue with new
1880 * requests close to the last request served and, by doing so,
1881 * are likely to increase the throughput.
36eca894
AA
1882 */
1883 bfqq->new_bfqq = new_bfqq;
1884 new_bfqq->ref += process_refs;
1885 return new_bfqq;
1886}
1887
1888static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
1889 struct bfq_queue *new_bfqq)
1890{
1891 if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
1892 (bfqq->ioprio_class != new_bfqq->ioprio_class))
1893 return false;
1894
1895 /*
1896 * If either of the queues has already been detected as seeky,
1897 * then merging it with the other queue is unlikely to lead to
1898 * sequential I/O.
1899 */
1900 if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
1901 return false;
1902
1903 /*
1904 * Interleaved I/O is known to be done by (some) applications
1905 * only for reads, so it does not make sense to merge async
1906 * queues.
1907 */
1908 if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
1909 return false;
1910
1911 return true;
1912}
1913
1914/*
1915 * If this function returns true, then bfqq cannot be merged. The idea
1916 * is that true cooperation happens very early after processes start
1917 * to do I/O. Usually, late cooperations are just accidental false
1918 * positives. In case bfqq is weight-raised, such false positives
1919 * would evidently degrade latency guarantees for bfqq.
1920 */
1921static bool wr_from_too_long(struct bfq_queue *bfqq)
1922{
1923 return bfqq->wr_coeff > 1 &&
1924 time_is_before_jiffies(bfqq->last_wr_start_finish +
1925 msecs_to_jiffies(100));
1926}
1927
1928/*
1929 * Attempt to schedule a merge of bfqq with the currently in-service
1930 * queue or with a close queue among the scheduled queues. Return
1931 * NULL if no merge was scheduled, a pointer to the shared bfq_queue
1932 * structure otherwise.
1933 *
1934 * The OOM queue is not allowed to participate to cooperation: in fact, since
1935 * the requests temporarily redirected to the OOM queue could be redirected
1936 * again to dedicated queues at any time, the state needed to correctly
1937 * handle merging with the OOM queue would be quite complex and expensive
1938 * to maintain. Besides, in such a critical condition as an out of memory,
1939 * the benefits of queue merging may be little relevant, or even negligible.
1940 *
1941 * Weight-raised queues can be merged only if their weight-raising
1942 * period has just started. In fact cooperating processes are usually
1943 * started together. Thus, with this filter we avoid false positives
1944 * that would jeopardize low-latency guarantees.
1945 *
1946 * WARNING: queue merging may impair fairness among non-weight raised
1947 * queues, for at least two reasons: 1) the original weight of a
1948 * merged queue may change during the merged state, 2) even being the
1949 * weight the same, a merged queue may be bloated with many more
1950 * requests than the ones produced by its originally-associated
1951 * process.
1952 */
1953static struct bfq_queue *
1954bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1955 void *io_struct, bool request)
1956{
1957 struct bfq_queue *in_service_bfqq, *new_bfqq;
1958
1959 if (bfqq->new_bfqq)
1960 return bfqq->new_bfqq;
1961
1962 if (!io_struct ||
1963 wr_from_too_long(bfqq) ||
1964 unlikely(bfqq == &bfqd->oom_bfqq))
1965 return NULL;
1966
1967 /* If there is only one backlogged queue, don't search. */
1968 if (bfqd->busy_queues == 1)
1969 return NULL;
1970
1971 in_service_bfqq = bfqd->in_service_queue;
1972
6fa3e8d3
PV
1973 if (!in_service_bfqq || in_service_bfqq == bfqq
1974 || wr_from_too_long(in_service_bfqq) ||
36eca894
AA
1975 unlikely(in_service_bfqq == &bfqd->oom_bfqq))
1976 goto check_scheduled;
1977
1978 if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
1979 bfqq->entity.parent == in_service_bfqq->entity.parent &&
1980 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
1981 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
1982 if (new_bfqq)
1983 return new_bfqq;
1984 }
1985 /*
1986 * Check whether there is a cooperator among currently scheduled
1987 * queues. The only thing we need is that the bio/request is not
1988 * NULL, as we need it to establish whether a cooperator exists.
1989 */
1990check_scheduled:
1991 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
1992 bfq_io_struct_pos(io_struct, request));
1993
1994 if (new_bfqq && !wr_from_too_long(new_bfqq) &&
1995 likely(new_bfqq != &bfqd->oom_bfqq) &&
1996 bfq_may_be_close_cooperator(bfqq, new_bfqq))
1997 return bfq_setup_merge(bfqq, new_bfqq);
1998
1999 return NULL;
2000}
2001
2002static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
2003{
2004 struct bfq_io_cq *bic = bfqq->bic;
2005
2006 /*
2007 * If !bfqq->bic, the queue is already shared or its requests
2008 * have already been redirected to a shared queue; both idle window
2009 * and weight raising state have already been saved. Do nothing.
2010 */
2011 if (!bic)
2012 return;
2013
2014 bic->saved_ttime = bfqq->ttime;
d5be3fef 2015 bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
36eca894 2016 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
e1b2324d
AA
2017 bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
2018 bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
36eca894
AA
2019 bic->saved_wr_coeff = bfqq->wr_coeff;
2020 bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
2021 bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
2022 bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
2023}
2024
36eca894
AA
2025static void
2026bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
2027 struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2028{
2029 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2030 (unsigned long)new_bfqq->pid);
2031 /* Save weight raising and idle window of the merged queues */
2032 bfq_bfqq_save_state(bfqq);
2033 bfq_bfqq_save_state(new_bfqq);
2034 if (bfq_bfqq_IO_bound(bfqq))
2035 bfq_mark_bfqq_IO_bound(new_bfqq);
2036 bfq_clear_bfqq_IO_bound(bfqq);
2037
2038 /*
2039 * If bfqq is weight-raised, then let new_bfqq inherit
2040 * weight-raising. To reduce false positives, neglect the case
2041 * where bfqq has just been created, but has not yet made it
2042 * to be weight-raised (which may happen because EQM may merge
2043 * bfqq even before bfq_add_request is executed for the first
e1b2324d
AA
2044 * time for bfqq). Handling this case would however be very
2045 * easy, thanks to the flag just_created.
36eca894
AA
2046 */
2047 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
2048 new_bfqq->wr_coeff = bfqq->wr_coeff;
2049 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
2050 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
2051 new_bfqq->wr_start_at_switch_to_srt =
2052 bfqq->wr_start_at_switch_to_srt;
2053 if (bfq_bfqq_busy(new_bfqq))
2054 bfqd->wr_busy_queues++;
2055 new_bfqq->entity.prio_changed = 1;
2056 }
2057
2058 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
2059 bfqq->wr_coeff = 1;
2060 bfqq->entity.prio_changed = 1;
2061 if (bfq_bfqq_busy(bfqq))
2062 bfqd->wr_busy_queues--;
2063 }
2064
2065 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
2066 bfqd->wr_busy_queues);
2067
36eca894
AA
2068 /*
2069 * Merge queues (that is, let bic redirect its requests to new_bfqq)
2070 */
2071 bic_set_bfqq(bic, new_bfqq, 1);
2072 bfq_mark_bfqq_coop(new_bfqq);
2073 /*
2074 * new_bfqq now belongs to at least two bics (it is a shared queue):
2075 * set new_bfqq->bic to NULL. bfqq either:
2076 * - does not belong to any bic any more, and hence bfqq->bic must
2077 * be set to NULL, or
2078 * - is a queue whose owning bics have already been redirected to a
2079 * different queue, hence the queue is destined to not belong to
2080 * any bic soon and bfqq->bic is already NULL (therefore the next
2081 * assignment causes no harm).
2082 */
2083 new_bfqq->bic = NULL;
2084 bfqq->bic = NULL;
2085 /* release process reference to bfqq */
2086 bfq_put_queue(bfqq);
2087}
2088
aee69d78
PV
2089static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2090 struct bio *bio)
2091{
2092 struct bfq_data *bfqd = q->elevator->elevator_data;
2093 bool is_sync = op_is_sync(bio->bi_opf);
36eca894 2094 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
aee69d78
PV
2095
2096 /*
2097 * Disallow merge of a sync bio into an async request.
2098 */
2099 if (is_sync && !rq_is_sync(rq))
2100 return false;
2101
2102 /*
2103 * Lookup the bfqq that this bio will be queued with. Allow
2104 * merge only if rq is queued there.
2105 */
2106 if (!bfqq)
2107 return false;
2108
36eca894
AA
2109 /*
2110 * We take advantage of this function to perform an early merge
2111 * of the queues of possible cooperating processes.
2112 */
2113 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2114 if (new_bfqq) {
2115 /*
2116 * bic still points to bfqq, then it has not yet been
2117 * redirected to some other bfq_queue, and a queue
2118 * merge beween bfqq and new_bfqq can be safely
2119 * fulfillled, i.e., bic can be redirected to new_bfqq
2120 * and bfqq can be put.
2121 */
2122 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2123 new_bfqq);
2124 /*
2125 * If we get here, bio will be queued into new_queue,
2126 * so use new_bfqq to decide whether bio and rq can be
2127 * merged.
2128 */
2129 bfqq = new_bfqq;
2130
2131 /*
2132 * Change also bqfd->bio_bfqq, as
2133 * bfqd->bio_bic now points to new_bfqq, and
2134 * this function may be invoked again (and then may
2135 * use again bqfd->bio_bfqq).
2136 */
2137 bfqd->bio_bfqq = bfqq;
2138 }
2139
aee69d78
PV
2140 return bfqq == RQ_BFQQ(rq);
2141}
2142
44e44a1b
PV
2143/*
2144 * Set the maximum time for the in-service queue to consume its
2145 * budget. This prevents seeky processes from lowering the throughput.
2146 * In practice, a time-slice service scheme is used with seeky
2147 * processes.
2148 */
2149static void bfq_set_budget_timeout(struct bfq_data *bfqd,
2150 struct bfq_queue *bfqq)
2151{
77b7dcea
PV
2152 unsigned int timeout_coeff;
2153
2154 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2155 timeout_coeff = 1;
2156 else
2157 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
2158
44e44a1b
PV
2159 bfqd->last_budget_start = ktime_get();
2160
2161 bfqq->budget_timeout = jiffies +
77b7dcea 2162 bfqd->bfq_timeout * timeout_coeff;
44e44a1b
PV
2163}
2164
aee69d78
PV
2165static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
2166 struct bfq_queue *bfqq)
2167{
2168 if (bfqq) {
e21b7a0b 2169 bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
aee69d78
PV
2170 bfq_clear_bfqq_fifo_expire(bfqq);
2171
2172 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
2173
77b7dcea
PV
2174 if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
2175 bfqq->wr_coeff > 1 &&
2176 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2177 time_is_before_jiffies(bfqq->budget_timeout)) {
2178 /*
2179 * For soft real-time queues, move the start
2180 * of the weight-raising period forward by the
2181 * time the queue has not received any
2182 * service. Otherwise, a relatively long
2183 * service delay is likely to cause the
2184 * weight-raising period of the queue to end,
2185 * because of the short duration of the
2186 * weight-raising period of a soft real-time
2187 * queue. It is worth noting that this move
2188 * is not so dangerous for the other queues,
2189 * because soft real-time queues are not
2190 * greedy.
2191 *
2192 * To not add a further variable, we use the
2193 * overloaded field budget_timeout to
2194 * determine for how long the queue has not
2195 * received service, i.e., how much time has
2196 * elapsed since the queue expired. However,
2197 * this is a little imprecise, because
2198 * budget_timeout is set to jiffies if bfqq
2199 * not only expires, but also remains with no
2200 * request.
2201 */
2202 if (time_after(bfqq->budget_timeout,
2203 bfqq->last_wr_start_finish))
2204 bfqq->last_wr_start_finish +=
2205 jiffies - bfqq->budget_timeout;
2206 else
2207 bfqq->last_wr_start_finish = jiffies;
2208 }
2209
44e44a1b 2210 bfq_set_budget_timeout(bfqd, bfqq);
aee69d78
PV
2211 bfq_log_bfqq(bfqd, bfqq,
2212 "set_in_service_queue, cur-budget = %d",
2213 bfqq->entity.budget);
2214 }
2215
2216 bfqd->in_service_queue = bfqq;
2217}
2218
2219/*
2220 * Get and set a new queue for service.
2221 */
2222static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
2223{
2224 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2225
2226 __bfq_set_in_service_queue(bfqd, bfqq);
2227 return bfqq;
2228}
2229
aee69d78
PV
2230static void bfq_arm_slice_timer(struct bfq_data *bfqd)
2231{
2232 struct bfq_queue *bfqq = bfqd->in_service_queue;
aee69d78
PV
2233 u32 sl;
2234
aee69d78
PV
2235 bfq_mark_bfqq_wait_request(bfqq);
2236
2237 /*
2238 * We don't want to idle for seeks, but we do want to allow
2239 * fair distribution of slice time for a process doing back-to-back
2240 * seeks. So allow a little bit of time for him to submit a new rq.
2241 */
2242 sl = bfqd->bfq_slice_idle;
2243 /*
1de0c4cd
AA
2244 * Unless the queue is being weight-raised or the scenario is
2245 * asymmetric, grant only minimum idle time if the queue
2246 * is seeky. A long idling is preserved for a weight-raised
2247 * queue, or, more in general, in an asymmetric scenario,
2248 * because a long idling is needed for guaranteeing to a queue
2249 * its reserved share of the throughput (in particular, it is
2250 * needed if the queue has a higher weight than some other
2251 * queue).
aee69d78 2252 */
1de0c4cd
AA
2253 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
2254 bfq_symmetric_scenario(bfqd))
aee69d78
PV
2255 sl = min_t(u64, sl, BFQ_MIN_TT);
2256
2257 bfqd->last_idling_start = ktime_get();
2258 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
2259 HRTIMER_MODE_REL);
e21b7a0b 2260 bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
aee69d78
PV
2261}
2262
ab0e43e9
PV
2263/*
2264 * In autotuning mode, max_budget is dynamically recomputed as the
2265 * amount of sectors transferred in timeout at the estimated peak
2266 * rate. This enables BFQ to utilize a full timeslice with a full
2267 * budget, even if the in-service queue is served at peak rate. And
2268 * this maximises throughput with sequential workloads.
2269 */
2270static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
2271{
2272 return (u64)bfqd->peak_rate * USEC_PER_MSEC *
2273 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
2274}
2275
44e44a1b
PV
2276/*
2277 * Update parameters related to throughput and responsiveness, as a
2278 * function of the estimated peak rate. See comments on
2279 * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
2280 */
2281static void update_thr_responsiveness_params(struct bfq_data *bfqd)
2282{
2283 int dev_type = blk_queue_nonrot(bfqd->queue);
2284
2285 if (bfqd->bfq_user_max_budget == 0)
2286 bfqd->bfq_max_budget =
2287 bfq_calc_max_budget(bfqd);
2288
2289 if (bfqd->device_speed == BFQ_BFQD_FAST &&
2290 bfqd->peak_rate < device_speed_thresh[dev_type]) {
2291 bfqd->device_speed = BFQ_BFQD_SLOW;
2292 bfqd->RT_prod = R_slow[dev_type] *
2293 T_slow[dev_type];
2294 } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
2295 bfqd->peak_rate > device_speed_thresh[dev_type]) {
2296 bfqd->device_speed = BFQ_BFQD_FAST;
2297 bfqd->RT_prod = R_fast[dev_type] *
2298 T_fast[dev_type];
2299 }
2300
2301 bfq_log(bfqd,
2302"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
2303 dev_type == 0 ? "ROT" : "NONROT",
2304 bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
2305 bfqd->device_speed == BFQ_BFQD_FAST ?
2306 (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
2307 (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
2308 (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
2309 BFQ_RATE_SHIFT);
2310}
2311
ab0e43e9
PV
2312static void bfq_reset_rate_computation(struct bfq_data *bfqd,
2313 struct request *rq)
2314{
2315 if (rq != NULL) { /* new rq dispatch now, reset accordingly */
2316 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
2317 bfqd->peak_rate_samples = 1;
2318 bfqd->sequential_samples = 0;
2319 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
2320 blk_rq_sectors(rq);
2321 } else /* no new rq dispatched, just reset the number of samples */
2322 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
2323
2324 bfq_log(bfqd,
2325 "reset_rate_computation at end, sample %u/%u tot_sects %llu",
2326 bfqd->peak_rate_samples, bfqd->sequential_samples,
2327 bfqd->tot_sectors_dispatched);
2328}
2329
2330static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
2331{
2332 u32 rate, weight, divisor;
2333
2334 /*
2335 * For the convergence property to hold (see comments on
2336 * bfq_update_peak_rate()) and for the assessment to be
2337 * reliable, a minimum number of samples must be present, and
2338 * a minimum amount of time must have elapsed. If not so, do
2339 * not compute new rate. Just reset parameters, to get ready
2340 * for a new evaluation attempt.
2341 */
2342 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
2343 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
2344 goto reset_computation;
2345
2346 /*
2347 * If a new request completion has occurred after last
2348 * dispatch, then, to approximate the rate at which requests
2349 * have been served by the device, it is more precise to
2350 * extend the observation interval to the last completion.
2351 */
2352 bfqd->delta_from_first =
2353 max_t(u64, bfqd->delta_from_first,
2354 bfqd->last_completion - bfqd->first_dispatch);
2355
2356 /*
2357 * Rate computed in sects/usec, and not sects/nsec, for
2358 * precision issues.
2359 */
2360 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
2361 div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
2362
2363 /*
2364 * Peak rate not updated if:
2365 * - the percentage of sequential dispatches is below 3/4 of the
2366 * total, and rate is below the current estimated peak rate
2367 * - rate is unreasonably high (> 20M sectors/sec)
2368 */
2369 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
2370 rate <= bfqd->peak_rate) ||
2371 rate > 20<<BFQ_RATE_SHIFT)
2372 goto reset_computation;
2373
2374 /*
2375 * We have to update the peak rate, at last! To this purpose,
2376 * we use a low-pass filter. We compute the smoothing constant
2377 * of the filter as a function of the 'weight' of the new
2378 * measured rate.
2379 *
2380 * As can be seen in next formulas, we define this weight as a
2381 * quantity proportional to how sequential the workload is,
2382 * and to how long the observation time interval is.
2383 *
2384 * The weight runs from 0 to 8. The maximum value of the
2385 * weight, 8, yields the minimum value for the smoothing
2386 * constant. At this minimum value for the smoothing constant,
2387 * the measured rate contributes for half of the next value of
2388 * the estimated peak rate.
2389 *
2390 * So, the first step is to compute the weight as a function
2391 * of how sequential the workload is. Note that the weight
2392 * cannot reach 9, because bfqd->sequential_samples cannot
2393 * become equal to bfqd->peak_rate_samples, which, in its
2394 * turn, holds true because bfqd->sequential_samples is not
2395 * incremented for the first sample.
2396 */
2397 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
2398
2399 /*
2400 * Second step: further refine the weight as a function of the
2401 * duration of the observation interval.
2402 */
2403 weight = min_t(u32, 8,
2404 div_u64(weight * bfqd->delta_from_first,
2405 BFQ_RATE_REF_INTERVAL));
2406
2407 /*
2408 * Divisor ranging from 10, for minimum weight, to 2, for
2409 * maximum weight.
2410 */
2411 divisor = 10 - weight;
2412
2413 /*
2414 * Finally, update peak rate:
2415 *
2416 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
2417 */
2418 bfqd->peak_rate *= divisor-1;
2419 bfqd->peak_rate /= divisor;
2420 rate /= divisor; /* smoothing constant alpha = 1/divisor */
2421
2422 bfqd->peak_rate += rate;
44e44a1b 2423 update_thr_responsiveness_params(bfqd);
ab0e43e9
PV
2424
2425reset_computation:
2426 bfq_reset_rate_computation(bfqd, rq);
2427}
2428
2429/*
2430 * Update the read/write peak rate (the main quantity used for
2431 * auto-tuning, see update_thr_responsiveness_params()).
2432 *
2433 * It is not trivial to estimate the peak rate (correctly): because of
2434 * the presence of sw and hw queues between the scheduler and the
2435 * device components that finally serve I/O requests, it is hard to
2436 * say exactly when a given dispatched request is served inside the
2437 * device, and for how long. As a consequence, it is hard to know
2438 * precisely at what rate a given set of requests is actually served
2439 * by the device.
2440 *
2441 * On the opposite end, the dispatch time of any request is trivially
2442 * available, and, from this piece of information, the "dispatch rate"
2443 * of requests can be immediately computed. So, the idea in the next
2444 * function is to use what is known, namely request dispatch times
2445 * (plus, when useful, request completion times), to estimate what is
2446 * unknown, namely in-device request service rate.
2447 *
2448 * The main issue is that, because of the above facts, the rate at
2449 * which a certain set of requests is dispatched over a certain time
2450 * interval can vary greatly with respect to the rate at which the
2451 * same requests are then served. But, since the size of any
2452 * intermediate queue is limited, and the service scheme is lossless
2453 * (no request is silently dropped), the following obvious convergence
2454 * property holds: the number of requests dispatched MUST become
2455 * closer and closer to the number of requests completed as the
2456 * observation interval grows. This is the key property used in
2457 * the next function to estimate the peak service rate as a function
2458 * of the observed dispatch rate. The function assumes to be invoked
2459 * on every request dispatch.
2460 */
2461static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
2462{
2463 u64 now_ns = ktime_get_ns();
2464
2465 if (bfqd->peak_rate_samples == 0) { /* first dispatch */
2466 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
2467 bfqd->peak_rate_samples);
2468 bfq_reset_rate_computation(bfqd, rq);
2469 goto update_last_values; /* will add one sample */
2470 }
2471
2472 /*
2473 * Device idle for very long: the observation interval lasting
2474 * up to this dispatch cannot be a valid observation interval
2475 * for computing a new peak rate (similarly to the late-
2476 * completion event in bfq_completed_request()). Go to
2477 * update_rate_and_reset to have the following three steps
2478 * taken:
2479 * - close the observation interval at the last (previous)
2480 * request dispatch or completion
2481 * - compute rate, if possible, for that observation interval
2482 * - start a new observation interval with this dispatch
2483 */
2484 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
2485 bfqd->rq_in_driver == 0)
2486 goto update_rate_and_reset;
2487
2488 /* Update sampling information */
2489 bfqd->peak_rate_samples++;
2490
2491 if ((bfqd->rq_in_driver > 0 ||
2492 now_ns - bfqd->last_completion < BFQ_MIN_TT)
2493 && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
2494 bfqd->sequential_samples++;
2495
2496 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
2497
2498 /* Reset max observed rq size every 32 dispatches */
2499 if (likely(bfqd->peak_rate_samples % 32))
2500 bfqd->last_rq_max_size =
2501 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
2502 else
2503 bfqd->last_rq_max_size = blk_rq_sectors(rq);
2504
2505 bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
2506
2507 /* Target observation interval not yet reached, go on sampling */
2508 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
2509 goto update_last_values;
2510
2511update_rate_and_reset:
2512 bfq_update_rate_reset(bfqd, rq);
2513update_last_values:
2514 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2515 bfqd->last_dispatch = now_ns;
2516}
2517
aee69d78
PV
2518/*
2519 * Remove request from internal lists.
2520 */
2521static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
2522{
2523 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2524
2525 /*
2526 * For consistency, the next instruction should have been
2527 * executed after removing the request from the queue and
2528 * dispatching it. We execute instead this instruction before
2529 * bfq_remove_request() (and hence introduce a temporary
2530 * inconsistency), for efficiency. In fact, should this
2531 * dispatch occur for a non in-service bfqq, this anticipated
2532 * increment prevents two counters related to bfqq->dispatched
2533 * from risking to be, first, uselessly decremented, and then
2534 * incremented again when the (new) value of bfqq->dispatched
2535 * happens to be taken into account.
2536 */
2537 bfqq->dispatched++;
ab0e43e9 2538 bfq_update_peak_rate(q->elevator->elevator_data, rq);
aee69d78
PV
2539
2540 bfq_remove_request(q, rq);
2541}
2542
2543static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2544{
36eca894
AA
2545 /*
2546 * If this bfqq is shared between multiple processes, check
2547 * to make sure that those processes are still issuing I/Os
2548 * within the mean seek distance. If not, it may be time to
2549 * break the queues apart again.
2550 */
2551 if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
2552 bfq_mark_bfqq_split_coop(bfqq);
2553
44e44a1b
PV
2554 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
2555 if (bfqq->dispatched == 0)
2556 /*
2557 * Overloading budget_timeout field to store
2558 * the time at which the queue remains with no
2559 * backlog and no outstanding request; used by
2560 * the weight-raising mechanism.
2561 */
2562 bfqq->budget_timeout = jiffies;
2563
e21b7a0b 2564 bfq_del_bfqq_busy(bfqd, bfqq, true);
36eca894 2565 } else {
e21b7a0b 2566 bfq_requeue_bfqq(bfqd, bfqq);
36eca894
AA
2567 /*
2568 * Resort priority tree of potential close cooperators.
2569 */
2570 bfq_pos_tree_add_move(bfqd, bfqq);
2571 }
e21b7a0b
AA
2572
2573 /*
2574 * All in-service entities must have been properly deactivated
2575 * or requeued before executing the next function, which
2576 * resets all in-service entites as no more in service.
2577 */
2578 __bfq_bfqd_reset_in_service(bfqd);
aee69d78
PV
2579}
2580
2581/**
2582 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
2583 * @bfqd: device data.
2584 * @bfqq: queue to update.
2585 * @reason: reason for expiration.
2586 *
2587 * Handle the feedback on @bfqq budget at queue expiration.
2588 * See the body for detailed comments.
2589 */
2590static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
2591 struct bfq_queue *bfqq,
2592 enum bfqq_expiration reason)
2593{
2594 struct request *next_rq;
2595 int budget, min_budget;
2596
aee69d78
PV
2597 min_budget = bfq_min_budget(bfqd);
2598
44e44a1b
PV
2599 if (bfqq->wr_coeff == 1)
2600 budget = bfqq->max_budget;
2601 else /*
2602 * Use a constant, low budget for weight-raised queues,
2603 * to help achieve a low latency. Keep it slightly higher
2604 * than the minimum possible budget, to cause a little
2605 * bit fewer expirations.
2606 */
2607 budget = 2 * min_budget;
2608
aee69d78
PV
2609 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
2610 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
2611 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
2612 budget, bfq_min_budget(bfqd));
2613 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
2614 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
2615
44e44a1b 2616 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
aee69d78
PV
2617 switch (reason) {
2618 /*
2619 * Caveat: in all the following cases we trade latency
2620 * for throughput.
2621 */
2622 case BFQQE_TOO_IDLE:
54b60456
PV
2623 /*
2624 * This is the only case where we may reduce
2625 * the budget: if there is no request of the
2626 * process still waiting for completion, then
2627 * we assume (tentatively) that the timer has
2628 * expired because the batch of requests of
2629 * the process could have been served with a
2630 * smaller budget. Hence, betting that
2631 * process will behave in the same way when it
2632 * becomes backlogged again, we reduce its
2633 * next budget. As long as we guess right,
2634 * this budget cut reduces the latency
2635 * experienced by the process.
2636 *
2637 * However, if there are still outstanding
2638 * requests, then the process may have not yet
2639 * issued its next request just because it is
2640 * still waiting for the completion of some of
2641 * the still outstanding ones. So in this
2642 * subcase we do not reduce its budget, on the
2643 * contrary we increase it to possibly boost
2644 * the throughput, as discussed in the
2645 * comments to the BUDGET_TIMEOUT case.
2646 */
2647 if (bfqq->dispatched > 0) /* still outstanding reqs */
2648 budget = min(budget * 2, bfqd->bfq_max_budget);
2649 else {
2650 if (budget > 5 * min_budget)
2651 budget -= 4 * min_budget;
2652 else
2653 budget = min_budget;
2654 }
aee69d78
PV
2655 break;
2656 case BFQQE_BUDGET_TIMEOUT:
54b60456
PV
2657 /*
2658 * We double the budget here because it gives
2659 * the chance to boost the throughput if this
2660 * is not a seeky process (and has bumped into
2661 * this timeout because of, e.g., ZBR).
2662 */
2663 budget = min(budget * 2, bfqd->bfq_max_budget);
aee69d78
PV
2664 break;
2665 case BFQQE_BUDGET_EXHAUSTED:
2666 /*
2667 * The process still has backlog, and did not
2668 * let either the budget timeout or the disk
2669 * idling timeout expire. Hence it is not
2670 * seeky, has a short thinktime and may be
2671 * happy with a higher budget too. So
2672 * definitely increase the budget of this good
2673 * candidate to boost the disk throughput.
2674 */
54b60456 2675 budget = min(budget * 4, bfqd->bfq_max_budget);
aee69d78
PV
2676 break;
2677 case BFQQE_NO_MORE_REQUESTS:
2678 /*
2679 * For queues that expire for this reason, it
2680 * is particularly important to keep the
2681 * budget close to the actual service they
2682 * need. Doing so reduces the timestamp
2683 * misalignment problem described in the
2684 * comments in the body of
2685 * __bfq_activate_entity. In fact, suppose
2686 * that a queue systematically expires for
2687 * BFQQE_NO_MORE_REQUESTS and presents a
2688 * new request in time to enjoy timestamp
2689 * back-shifting. The larger the budget of the
2690 * queue is with respect to the service the
2691 * queue actually requests in each service
2692 * slot, the more times the queue can be
2693 * reactivated with the same virtual finish
2694 * time. It follows that, even if this finish
2695 * time is pushed to the system virtual time
2696 * to reduce the consequent timestamp
2697 * misalignment, the queue unjustly enjoys for
2698 * many re-activations a lower finish time
2699 * than all newly activated queues.
2700 *
2701 * The service needed by bfqq is measured
2702 * quite precisely by bfqq->entity.service.
2703 * Since bfqq does not enjoy device idling,
2704 * bfqq->entity.service is equal to the number
2705 * of sectors that the process associated with
2706 * bfqq requested to read/write before waiting
2707 * for request completions, or blocking for
2708 * other reasons.
2709 */
2710 budget = max_t(int, bfqq->entity.service, min_budget);
2711 break;
2712 default:
2713 return;
2714 }
44e44a1b 2715 } else if (!bfq_bfqq_sync(bfqq)) {
aee69d78
PV
2716 /*
2717 * Async queues get always the maximum possible
2718 * budget, as for them we do not care about latency
2719 * (in addition, their ability to dispatch is limited
2720 * by the charging factor).
2721 */
2722 budget = bfqd->bfq_max_budget;
2723 }
2724
2725 bfqq->max_budget = budget;
2726
2727 if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
2728 !bfqd->bfq_user_max_budget)
2729 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
2730
2731 /*
2732 * If there is still backlog, then assign a new budget, making
2733 * sure that it is large enough for the next request. Since
2734 * the finish time of bfqq must be kept in sync with the
2735 * budget, be sure to call __bfq_bfqq_expire() *after* this
2736 * update.
2737 *
2738 * If there is no backlog, then no need to update the budget;
2739 * it will be updated on the arrival of a new request.
2740 */
2741 next_rq = bfqq->next_rq;
2742 if (next_rq)
2743 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
2744 bfq_serv_to_charge(next_rq, bfqq));
2745
2746 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
2747 next_rq ? blk_rq_sectors(next_rq) : 0,
2748 bfqq->entity.budget);
2749}
2750
aee69d78 2751/*
ab0e43e9
PV
2752 * Return true if the process associated with bfqq is "slow". The slow
2753 * flag is used, in addition to the budget timeout, to reduce the
2754 * amount of service provided to seeky processes, and thus reduce
2755 * their chances to lower the throughput. More details in the comments
2756 * on the function bfq_bfqq_expire().
2757 *
2758 * An important observation is in order: as discussed in the comments
2759 * on the function bfq_update_peak_rate(), with devices with internal
2760 * queues, it is hard if ever possible to know when and for how long
2761 * an I/O request is processed by the device (apart from the trivial
2762 * I/O pattern where a new request is dispatched only after the
2763 * previous one has been completed). This makes it hard to evaluate
2764 * the real rate at which the I/O requests of each bfq_queue are
2765 * served. In fact, for an I/O scheduler like BFQ, serving a
2766 * bfq_queue means just dispatching its requests during its service
2767 * slot (i.e., until the budget of the queue is exhausted, or the
2768 * queue remains idle, or, finally, a timeout fires). But, during the
2769 * service slot of a bfq_queue, around 100 ms at most, the device may
2770 * be even still processing requests of bfq_queues served in previous
2771 * service slots. On the opposite end, the requests of the in-service
2772 * bfq_queue may be completed after the service slot of the queue
2773 * finishes.
2774 *
2775 * Anyway, unless more sophisticated solutions are used
2776 * (where possible), the sum of the sizes of the requests dispatched
2777 * during the service slot of a bfq_queue is probably the only
2778 * approximation available for the service received by the bfq_queue
2779 * during its service slot. And this sum is the quantity used in this
2780 * function to evaluate the I/O speed of a process.
aee69d78 2781 */
ab0e43e9
PV
2782static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2783 bool compensate, enum bfqq_expiration reason,
2784 unsigned long *delta_ms)
aee69d78 2785{
ab0e43e9
PV
2786 ktime_t delta_ktime;
2787 u32 delta_usecs;
2788 bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
aee69d78 2789
ab0e43e9 2790 if (!bfq_bfqq_sync(bfqq))
aee69d78
PV
2791 return false;
2792
2793 if (compensate)
ab0e43e9 2794 delta_ktime = bfqd->last_idling_start;
aee69d78 2795 else
ab0e43e9
PV
2796 delta_ktime = ktime_get();
2797 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
2798 delta_usecs = ktime_to_us(delta_ktime);
aee69d78
PV
2799
2800 /* don't use too short time intervals */
ab0e43e9
PV
2801 if (delta_usecs < 1000) {
2802 if (blk_queue_nonrot(bfqd->queue))
2803 /*
2804 * give same worst-case guarantees as idling
2805 * for seeky
2806 */
2807 *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
2808 else /* charge at least one seek */
2809 *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
2810
2811 return slow;
2812 }
aee69d78 2813
ab0e43e9 2814 *delta_ms = delta_usecs / USEC_PER_MSEC;
aee69d78
PV
2815
2816 /*
ab0e43e9
PV
2817 * Use only long (> 20ms) intervals to filter out excessive
2818 * spikes in service rate estimation.
aee69d78 2819 */
ab0e43e9
PV
2820 if (delta_usecs > 20000) {
2821 /*
2822 * Caveat for rotational devices: processes doing I/O
2823 * in the slower disk zones tend to be slow(er) even
2824 * if not seeky. In this respect, the estimated peak
2825 * rate is likely to be an average over the disk
2826 * surface. Accordingly, to not be too harsh with
2827 * unlucky processes, a process is deemed slow only if
2828 * its rate has been lower than half of the estimated
2829 * peak rate.
2830 */
2831 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
aee69d78
PV
2832 }
2833
ab0e43e9 2834 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
aee69d78 2835
ab0e43e9 2836 return slow;
aee69d78
PV
2837}
2838
77b7dcea
PV
2839/*
2840 * To be deemed as soft real-time, an application must meet two
2841 * requirements. First, the application must not require an average
2842 * bandwidth higher than the approximate bandwidth required to playback or
2843 * record a compressed high-definition video.
2844 * The next function is invoked on the completion of the last request of a
2845 * batch, to compute the next-start time instant, soft_rt_next_start, such
2846 * that, if the next request of the application does not arrive before
2847 * soft_rt_next_start, then the above requirement on the bandwidth is met.
2848 *
2849 * The second requirement is that the request pattern of the application is
2850 * isochronous, i.e., that, after issuing a request or a batch of requests,
2851 * the application stops issuing new requests until all its pending requests
2852 * have been completed. After that, the application may issue a new batch,
2853 * and so on.
2854 * For this reason the next function is invoked to compute
2855 * soft_rt_next_start only for applications that meet this requirement,
2856 * whereas soft_rt_next_start is set to infinity for applications that do
2857 * not.
2858 *
2859 * Unfortunately, even a greedy application may happen to behave in an
2860 * isochronous way if the CPU load is high. In fact, the application may
2861 * stop issuing requests while the CPUs are busy serving other processes,
2862 * then restart, then stop again for a while, and so on. In addition, if
2863 * the disk achieves a low enough throughput with the request pattern
2864 * issued by the application (e.g., because the request pattern is random
2865 * and/or the device is slow), then the application may meet the above
2866 * bandwidth requirement too. To prevent such a greedy application to be
2867 * deemed as soft real-time, a further rule is used in the computation of
2868 * soft_rt_next_start: soft_rt_next_start must be higher than the current
2869 * time plus the maximum time for which the arrival of a request is waited
2870 * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
2871 * This filters out greedy applications, as the latter issue instead their
2872 * next request as soon as possible after the last one has been completed
2873 * (in contrast, when a batch of requests is completed, a soft real-time
2874 * application spends some time processing data).
2875 *
2876 * Unfortunately, the last filter may easily generate false positives if
2877 * only bfqd->bfq_slice_idle is used as a reference time interval and one
2878 * or both the following cases occur:
2879 * 1) HZ is so low that the duration of a jiffy is comparable to or higher
2880 * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
2881 * HZ=100.
2882 * 2) jiffies, instead of increasing at a constant rate, may stop increasing
2883 * for a while, then suddenly 'jump' by several units to recover the lost
2884 * increments. This seems to happen, e.g., inside virtual machines.
2885 * To address this issue, we do not use as a reference time interval just
2886 * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
2887 * particular we add the minimum number of jiffies for which the filter
2888 * seems to be quite precise also in embedded systems and KVM/QEMU virtual
2889 * machines.
2890 */
2891static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
2892 struct bfq_queue *bfqq)
2893{
2894 return max(bfqq->last_idle_bklogged +
2895 HZ * bfqq->service_from_backlogged /
2896 bfqd->bfq_wr_max_softrt_rate,
2897 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
2898}
2899
2900/*
2901 * Return the farthest future time instant according to jiffies
2902 * macros.
2903 */
2904static unsigned long bfq_greatest_from_now(void)
2905{
2906 return jiffies + MAX_JIFFY_OFFSET;
2907}
2908
aee69d78
PV
2909/*
2910 * Return the farthest past time instant according to jiffies
2911 * macros.
2912 */
2913static unsigned long bfq_smallest_from_now(void)
2914{
2915 return jiffies - MAX_JIFFY_OFFSET;
2916}
2917
2918/**
2919 * bfq_bfqq_expire - expire a queue.
2920 * @bfqd: device owning the queue.
2921 * @bfqq: the queue to expire.
2922 * @compensate: if true, compensate for the time spent idling.
2923 * @reason: the reason causing the expiration.
2924 *
c074170e
PV
2925 * If the process associated with bfqq does slow I/O (e.g., because it
2926 * issues random requests), we charge bfqq with the time it has been
2927 * in service instead of the service it has received (see
2928 * bfq_bfqq_charge_time for details on how this goal is achieved). As
2929 * a consequence, bfqq will typically get higher timestamps upon
2930 * reactivation, and hence it will be rescheduled as if it had
2931 * received more service than what it has actually received. In the
2932 * end, bfqq receives less service in proportion to how slowly its
2933 * associated process consumes its budgets (and hence how seriously it
2934 * tends to lower the throughput). In addition, this time-charging
2935 * strategy guarantees time fairness among slow processes. In
2936 * contrast, if the process associated with bfqq is not slow, we
2937 * charge bfqq exactly with the service it has received.
aee69d78 2938 *
c074170e
PV
2939 * Charging time to the first type of queues and the exact service to
2940 * the other has the effect of using the WF2Q+ policy to schedule the
2941 * former on a timeslice basis, without violating service domain
2942 * guarantees among the latter.
aee69d78 2943 */
ea25da48
PV
2944void bfq_bfqq_expire(struct bfq_data *bfqd,
2945 struct bfq_queue *bfqq,
2946 bool compensate,
2947 enum bfqq_expiration reason)
aee69d78
PV
2948{
2949 bool slow;
ab0e43e9
PV
2950 unsigned long delta = 0;
2951 struct bfq_entity *entity = &bfqq->entity;
aee69d78
PV
2952 int ref;
2953
2954 /*
ab0e43e9 2955 * Check whether the process is slow (see bfq_bfqq_is_slow).
aee69d78 2956 */
ab0e43e9 2957 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
aee69d78 2958
77b7dcea
PV
2959 /*
2960 * Increase service_from_backlogged before next statement,
2961 * because the possible next invocation of
2962 * bfq_bfqq_charge_time would likely inflate
2963 * entity->service. In contrast, service_from_backlogged must
2964 * contain real service, to enable the soft real-time
2965 * heuristic to correctly compute the bandwidth consumed by
2966 * bfqq.
2967 */
2968 bfqq->service_from_backlogged += entity->service;
2969
aee69d78 2970 /*
c074170e
PV
2971 * As above explained, charge slow (typically seeky) and
2972 * timed-out queues with the time and not the service
2973 * received, to favor sequential workloads.
2974 *
2975 * Processes doing I/O in the slower disk zones will tend to
2976 * be slow(er) even if not seeky. Therefore, since the
2977 * estimated peak rate is actually an average over the disk
2978 * surface, these processes may timeout just for bad luck. To
2979 * avoid punishing them, do not charge time to processes that
2980 * succeeded in consuming at least 2/3 of their budget. This
2981 * allows BFQ to preserve enough elasticity to still perform
2982 * bandwidth, and not time, distribution with little unlucky
2983 * or quasi-sequential processes.
aee69d78 2984 */
44e44a1b
PV
2985 if (bfqq->wr_coeff == 1 &&
2986 (slow ||
2987 (reason == BFQQE_BUDGET_TIMEOUT &&
2988 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
c074170e 2989 bfq_bfqq_charge_time(bfqd, bfqq, delta);
aee69d78
PV
2990
2991 if (reason == BFQQE_TOO_IDLE &&
ab0e43e9 2992 entity->service <= 2 * entity->budget / 10)
aee69d78
PV
2993 bfq_clear_bfqq_IO_bound(bfqq);
2994
44e44a1b
PV
2995 if (bfqd->low_latency && bfqq->wr_coeff == 1)
2996 bfqq->last_wr_start_finish = jiffies;
2997
77b7dcea
PV
2998 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
2999 RB_EMPTY_ROOT(&bfqq->sort_list)) {
3000 /*
3001 * If we get here, and there are no outstanding
3002 * requests, then the request pattern is isochronous
3003 * (see the comments on the function
3004 * bfq_bfqq_softrt_next_start()). Thus we can compute
3005 * soft_rt_next_start. If, instead, the queue still
3006 * has outstanding requests, then we have to wait for
3007 * the completion of all the outstanding requests to
3008 * discover whether the request pattern is actually
3009 * isochronous.
3010 */
3011 if (bfqq->dispatched == 0)
3012 bfqq->soft_rt_next_start =
3013 bfq_bfqq_softrt_next_start(bfqd, bfqq);
3014 else {
3015 /*
3016 * The application is still waiting for the
3017 * completion of one or more requests:
3018 * prevent it from possibly being incorrectly
3019 * deemed as soft real-time by setting its
3020 * soft_rt_next_start to infinity. In fact,
3021 * without this assignment, the application
3022 * would be incorrectly deemed as soft
3023 * real-time if:
3024 * 1) it issued a new request before the
3025 * completion of all its in-flight
3026 * requests, and
3027 * 2) at that time, its soft_rt_next_start
3028 * happened to be in the past.
3029 */
3030 bfqq->soft_rt_next_start =
3031 bfq_greatest_from_now();
3032 /*
3033 * Schedule an update of soft_rt_next_start to when
3034 * the task may be discovered to be isochronous.
3035 */
3036 bfq_mark_bfqq_softrt_update(bfqq);
3037 }
3038 }
3039
aee69d78 3040 bfq_log_bfqq(bfqd, bfqq,
d5be3fef
PV
3041 "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
3042 slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
aee69d78
PV
3043
3044 /*
3045 * Increase, decrease or leave budget unchanged according to
3046 * reason.
3047 */
3048 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
3049 ref = bfqq->ref;
3050 __bfq_bfqq_expire(bfqd, bfqq);
3051
3052 /* mark bfqq as waiting a request only if a bic still points to it */
3053 if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
3054 reason != BFQQE_BUDGET_TIMEOUT &&
3055 reason != BFQQE_BUDGET_EXHAUSTED)
3056 bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
3057}
3058
3059/*
3060 * Budget timeout is not implemented through a dedicated timer, but
3061 * just checked on request arrivals and completions, as well as on
3062 * idle timer expirations.
3063 */
3064static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
3065{
44e44a1b 3066 return time_is_before_eq_jiffies(bfqq->budget_timeout);
aee69d78
PV
3067}
3068
3069/*
3070 * If we expire a queue that is actively waiting (i.e., with the
3071 * device idled) for the arrival of a new request, then we may incur
3072 * the timestamp misalignment problem described in the body of the
3073 * function __bfq_activate_entity. Hence we return true only if this
3074 * condition does not hold, or if the queue is slow enough to deserve
3075 * only to be kicked off for preserving a high throughput.
3076 */
3077static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
3078{
3079 bfq_log_bfqq(bfqq->bfqd, bfqq,
3080 "may_budget_timeout: wait_request %d left %d timeout %d",
3081 bfq_bfqq_wait_request(bfqq),
3082 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
3083 bfq_bfqq_budget_timeout(bfqq));
3084
3085 return (!bfq_bfqq_wait_request(bfqq) ||
3086 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
3087 &&
3088 bfq_bfqq_budget_timeout(bfqq);
3089}
3090
3091/*
3092 * For a queue that becomes empty, device idling is allowed only if
44e44a1b
PV
3093 * this function returns true for the queue. As a consequence, since
3094 * device idling plays a critical role in both throughput boosting and
3095 * service guarantees, the return value of this function plays a
3096 * critical role in both these aspects as well.
3097 *
3098 * In a nutshell, this function returns true only if idling is
3099 * beneficial for throughput or, even if detrimental for throughput,
3100 * idling is however necessary to preserve service guarantees (low
3101 * latency, desired throughput distribution, ...). In particular, on
3102 * NCQ-capable devices, this function tries to return false, so as to
3103 * help keep the drives' internal queues full, whenever this helps the
3104 * device boost the throughput without causing any service-guarantee
3105 * issue.
3106 *
3107 * In more detail, the return value of this function is obtained by,
3108 * first, computing a number of boolean variables that take into
3109 * account throughput and service-guarantee issues, and, then,
3110 * combining these variables in a logical expression. Most of the
3111 * issues taken into account are not trivial. We discuss these issues
3112 * individually while introducing the variables.
aee69d78
PV
3113 */
3114static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
3115{
3116 struct bfq_data *bfqd = bfqq->bfqd;
cfd69712 3117 bool idling_boosts_thr, idling_boosts_thr_without_issues,
e1b2324d 3118 idling_needed_for_service_guarantees,
cfd69712 3119 asymmetric_scenario;
aee69d78
PV
3120
3121 if (bfqd->strict_guarantees)
3122 return true;
3123
d5be3fef
PV
3124 /*
3125 * Idling is performed only if slice_idle > 0. In addition, we
3126 * do not idle if
3127 * (a) bfqq is async
3128 * (b) bfqq is in the idle io prio class: in this case we do
3129 * not idle because we want to minimize the bandwidth that
3130 * queues in this class can steal to higher-priority queues
3131 */
3132 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
3133 bfq_class_idle(bfqq))
3134 return false;
3135
aee69d78 3136 /*
44e44a1b
PV
3137 * The next variable takes into account the cases where idling
3138 * boosts the throughput.
3139 *
e01eff01
PV
3140 * The value of the variable is computed considering, first, that
3141 * idling is virtually always beneficial for the throughput if:
aee69d78 3142 * (a) the device is not NCQ-capable, or
bf2b79e7 3143 * (b) regardless of the presence of NCQ, the device is rotational
e01eff01 3144 * and the request pattern for bfqq is I/O-bound and sequential.
bf2b79e7
PV
3145 *
3146 * Secondly, and in contrast to the above item (b), idling an
3147 * NCQ-capable flash-based device would not boost the
e01eff01 3148 * throughput even with sequential I/O; rather it would lower
bf2b79e7
PV
3149 * the throughput in proportion to how fast the device
3150 * is. Accordingly, the next variable is true if any of the
3151 * above conditions (a) and (b) is true, and, in particular,
3152 * happens to be false if bfqd is an NCQ-capable flash-based
3153 * device.
aee69d78 3154 */
bf2b79e7 3155 idling_boosts_thr = !bfqd->hw_tag ||
e01eff01 3156 (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
d5be3fef 3157 bfq_bfqq_has_short_ttime(bfqq));
aee69d78 3158
cfd69712
PV
3159 /*
3160 * The value of the next variable,
3161 * idling_boosts_thr_without_issues, is equal to that of
3162 * idling_boosts_thr, unless a special case holds. In this
3163 * special case, described below, idling may cause problems to
3164 * weight-raised queues.
3165 *
3166 * When the request pool is saturated (e.g., in the presence
3167 * of write hogs), if the processes associated with
3168 * non-weight-raised queues ask for requests at a lower rate,
3169 * then processes associated with weight-raised queues have a
3170 * higher probability to get a request from the pool
3171 * immediately (or at least soon) when they need one. Thus
3172 * they have a higher probability to actually get a fraction
3173 * of the device throughput proportional to their high
3174 * weight. This is especially true with NCQ-capable drives,
3175 * which enqueue several requests in advance, and further
3176 * reorder internally-queued requests.
3177 *
3178 * For this reason, we force to false the value of
3179 * idling_boosts_thr_without_issues if there are weight-raised
3180 * busy queues. In this case, and if bfqq is not weight-raised,
3181 * this guarantees that the device is not idled for bfqq (if,
3182 * instead, bfqq is weight-raised, then idling will be
3183 * guaranteed by another variable, see below). Combined with
3184 * the timestamping rules of BFQ (see [1] for details), this
3185 * behavior causes bfqq, and hence any sync non-weight-raised
3186 * queue, to get a lower number of requests served, and thus
3187 * to ask for a lower number of requests from the request
3188 * pool, before the busy weight-raised queues get served
3189 * again. This often mitigates starvation problems in the
3190 * presence of heavy write workloads and NCQ, thereby
3191 * guaranteeing a higher application and system responsiveness
3192 * in these hostile scenarios.
3193 */
3194 idling_boosts_thr_without_issues = idling_boosts_thr &&
3195 bfqd->wr_busy_queues == 0;
3196
aee69d78 3197 /*
bf2b79e7
PV
3198 * There is then a case where idling must be performed not
3199 * for throughput concerns, but to preserve service
3200 * guarantees.
3201 *
3202 * To introduce this case, we can note that allowing the drive
3203 * to enqueue more than one request at a time, and hence
44e44a1b 3204 * delegating de facto final scheduling decisions to the
bf2b79e7 3205 * drive's internal scheduler, entails loss of control on the
44e44a1b 3206 * actual request service order. In particular, the critical
bf2b79e7 3207 * situation is when requests from different processes happen
44e44a1b
PV
3208 * to be present, at the same time, in the internal queue(s)
3209 * of the drive. In such a situation, the drive, by deciding
3210 * the service order of the internally-queued requests, does
3211 * determine also the actual throughput distribution among
3212 * these processes. But the drive typically has no notion or
3213 * concern about per-process throughput distribution, and
3214 * makes its decisions only on a per-request basis. Therefore,
3215 * the service distribution enforced by the drive's internal
3216 * scheduler is likely to coincide with the desired
3217 * device-throughput distribution only in a completely
bf2b79e7
PV
3218 * symmetric scenario where:
3219 * (i) each of these processes must get the same throughput as
3220 * the others;
3221 * (ii) all these processes have the same I/O pattern
3222 (either sequential or random).
3223 * In fact, in such a scenario, the drive will tend to treat
3224 * the requests of each of these processes in about the same
3225 * way as the requests of the others, and thus to provide
3226 * each of these processes with about the same throughput
3227 * (which is exactly the desired throughput distribution). In
3228 * contrast, in any asymmetric scenario, device idling is
3229 * certainly needed to guarantee that bfqq receives its
3230 * assigned fraction of the device throughput (see [1] for
3231 * details).
3232 *
3233 * We address this issue by controlling, actually, only the
3234 * symmetry sub-condition (i), i.e., provided that
3235 * sub-condition (i) holds, idling is not performed,
3236 * regardless of whether sub-condition (ii) holds. In other
3237 * words, only if sub-condition (i) holds, then idling is
3238 * allowed, and the device tends to be prevented from queueing
3239 * many requests, possibly of several processes. The reason
3240 * for not controlling also sub-condition (ii) is that we
3241 * exploit preemption to preserve guarantees in case of
3242 * symmetric scenarios, even if (ii) does not hold, as
3243 * explained in the next two paragraphs.
3244 *
3245 * Even if a queue, say Q, is expired when it remains idle, Q
3246 * can still preempt the new in-service queue if the next
3247 * request of Q arrives soon (see the comments on
3248 * bfq_bfqq_update_budg_for_activation). If all queues and
3249 * groups have the same weight, this form of preemption,
3250 * combined with the hole-recovery heuristic described in the
3251 * comments on function bfq_bfqq_update_budg_for_activation,
3252 * are enough to preserve a correct bandwidth distribution in
3253 * the mid term, even without idling. In fact, even if not
3254 * idling allows the internal queues of the device to contain
3255 * many requests, and thus to reorder requests, we can rather
3256 * safely assume that the internal scheduler still preserves a
3257 * minimum of mid-term fairness. The motivation for using
3258 * preemption instead of idling is that, by not idling,
3259 * service guarantees are preserved without minimally
3260 * sacrificing throughput. In other words, both a high
3261 * throughput and its desired distribution are obtained.
3262 *
3263 * More precisely, this preemption-based, idleless approach
3264 * provides fairness in terms of IOPS, and not sectors per
3265 * second. This can be seen with a simple example. Suppose
3266 * that there are two queues with the same weight, but that
3267 * the first queue receives requests of 8 sectors, while the
3268 * second queue receives requests of 1024 sectors. In
3269 * addition, suppose that each of the two queues contains at
3270 * most one request at a time, which implies that each queue
3271 * always remains idle after it is served. Finally, after
3272 * remaining idle, each queue receives very quickly a new
3273 * request. It follows that the two queues are served
3274 * alternatively, preempting each other if needed. This
3275 * implies that, although both queues have the same weight,
3276 * the queue with large requests receives a service that is
3277 * 1024/8 times as high as the service received by the other
3278 * queue.
44e44a1b 3279 *
bf2b79e7
PV
3280 * On the other hand, device idling is performed, and thus
3281 * pure sector-domain guarantees are provided, for the
3282 * following queues, which are likely to need stronger
3283 * throughput guarantees: weight-raised queues, and queues
3284 * with a higher weight than other queues. When such queues
3285 * are active, sub-condition (i) is false, which triggers
3286 * device idling.
44e44a1b 3287 *
bf2b79e7
PV
3288 * According to the above considerations, the next variable is
3289 * true (only) if sub-condition (i) holds. To compute the
3290 * value of this variable, we not only use the return value of
3291 * the function bfq_symmetric_scenario(), but also check
3292 * whether bfqq is being weight-raised, because
3293 * bfq_symmetric_scenario() does not take into account also
3294 * weight-raised queues (see comments on
3295 * bfq_weights_tree_add()).
44e44a1b
PV
3296 *
3297 * As a side note, it is worth considering that the above
3298 * device-idling countermeasures may however fail in the
3299 * following unlucky scenario: if idling is (correctly)
bf2b79e7
PV
3300 * disabled in a time period during which all symmetry
3301 * sub-conditions hold, and hence the device is allowed to
44e44a1b
PV
3302 * enqueue many requests, but at some later point in time some
3303 * sub-condition stops to hold, then it may become impossible
3304 * to let requests be served in the desired order until all
3305 * the requests already queued in the device have been served.
3306 */
bf2b79e7
PV
3307 asymmetric_scenario = bfqq->wr_coeff > 1 ||
3308 !bfq_symmetric_scenario(bfqd);
44e44a1b 3309
e1b2324d
AA
3310 /*
3311 * Finally, there is a case where maximizing throughput is the
3312 * best choice even if it may cause unfairness toward
3313 * bfqq. Such a case is when bfqq became active in a burst of
3314 * queue activations. Queues that became active during a large
3315 * burst benefit only from throughput, as discussed in the
3316 * comments on bfq_handle_burst. Thus, if bfqq became active
3317 * in a burst and not idling the device maximizes throughput,
3318 * then the device must no be idled, because not idling the
3319 * device provides bfqq and all other queues in the burst with
3320 * maximum benefit. Combining this and the above case, we can
3321 * now establish when idling is actually needed to preserve
3322 * service guarantees.
3323 */
3324 idling_needed_for_service_guarantees =
3325 asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
3326
44e44a1b 3327 /*
d5be3fef
PV
3328 * We have now all the components we need to compute the
3329 * return value of the function, which is true only if idling
3330 * either boosts the throughput (without issues), or is
3331 * necessary to preserve service guarantees.
aee69d78 3332 */
d5be3fef
PV
3333 return idling_boosts_thr_without_issues ||
3334 idling_needed_for_service_guarantees;
aee69d78
PV
3335}
3336
3337/*
3338 * If the in-service queue is empty but the function bfq_bfqq_may_idle
3339 * returns true, then:
3340 * 1) the queue must remain in service and cannot be expired, and
3341 * 2) the device must be idled to wait for the possible arrival of a new
3342 * request for the queue.
3343 * See the comments on the function bfq_bfqq_may_idle for the reasons
3344 * why performing device idling is the best choice to boost the throughput
3345 * and preserve service guarantees when bfq_bfqq_may_idle itself
3346 * returns true.
3347 */
3348static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
3349{
d5be3fef 3350 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
aee69d78
PV
3351}
3352
3353/*
3354 * Select a queue for service. If we have a current queue in service,
3355 * check whether to continue servicing it, or retrieve and set a new one.
3356 */
3357static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
3358{
3359 struct bfq_queue *bfqq;
3360 struct request *next_rq;
3361 enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
3362
3363 bfqq = bfqd->in_service_queue;
3364 if (!bfqq)
3365 goto new_queue;
3366
3367 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
3368
3369 if (bfq_may_expire_for_budg_timeout(bfqq) &&
3370 !bfq_bfqq_wait_request(bfqq) &&
3371 !bfq_bfqq_must_idle(bfqq))
3372 goto expire;
3373
3374check_queue:
3375 /*
3376 * This loop is rarely executed more than once. Even when it
3377 * happens, it is much more convenient to re-execute this loop
3378 * than to return NULL and trigger a new dispatch to get a
3379 * request served.
3380 */
3381 next_rq = bfqq->next_rq;
3382 /*
3383 * If bfqq has requests queued and it has enough budget left to
3384 * serve them, keep the queue, otherwise expire it.
3385 */
3386 if (next_rq) {
3387 if (bfq_serv_to_charge(next_rq, bfqq) >
3388 bfq_bfqq_budget_left(bfqq)) {
3389 /*
3390 * Expire the queue for budget exhaustion,
3391 * which makes sure that the next budget is
3392 * enough to serve the next request, even if
3393 * it comes from the fifo expired path.
3394 */
3395 reason = BFQQE_BUDGET_EXHAUSTED;
3396 goto expire;
3397 } else {
3398 /*
3399 * The idle timer may be pending because we may
3400 * not disable disk idling even when a new request
3401 * arrives.
3402 */
3403 if (bfq_bfqq_wait_request(bfqq)) {
3404 /*
3405 * If we get here: 1) at least a new request
3406 * has arrived but we have not disabled the
3407 * timer because the request was too small,
3408 * 2) then the block layer has unplugged
3409 * the device, causing the dispatch to be
3410 * invoked.
3411 *
3412 * Since the device is unplugged, now the
3413 * requests are probably large enough to
3414 * provide a reasonable throughput.
3415 * So we disable idling.
3416 */
3417 bfq_clear_bfqq_wait_request(bfqq);
3418 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
e21b7a0b 3419 bfqg_stats_update_idle_time(bfqq_group(bfqq));
aee69d78
PV
3420 }
3421 goto keep_queue;
3422 }
3423 }
3424
3425 /*
3426 * No requests pending. However, if the in-service queue is idling
3427 * for a new request, or has requests waiting for a completion and
3428 * may idle after their completion, then keep it anyway.
3429 */
3430 if (bfq_bfqq_wait_request(bfqq) ||
3431 (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
3432 bfqq = NULL;
3433 goto keep_queue;
3434 }
3435
3436 reason = BFQQE_NO_MORE_REQUESTS;
3437expire:
3438 bfq_bfqq_expire(bfqd, bfqq, false, reason);
3439new_queue:
3440 bfqq = bfq_set_in_service_queue(bfqd);
3441 if (bfqq) {
3442 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
3443 goto check_queue;
3444 }
3445keep_queue:
3446 if (bfqq)
3447 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
3448 else
3449 bfq_log(bfqd, "select_queue: no queue returned");
3450
3451 return bfqq;
3452}
3453
44e44a1b
PV
3454static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3455{
3456 struct bfq_entity *entity = &bfqq->entity;
3457
3458 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
3459 bfq_log_bfqq(bfqd, bfqq,
3460 "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
3461 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
3462 jiffies_to_msecs(bfqq->wr_cur_max_time),
3463 bfqq->wr_coeff,
3464 bfqq->entity.weight, bfqq->entity.orig_weight);
3465
3466 if (entity->prio_changed)
3467 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
3468
3469 /*
e1b2324d
AA
3470 * If the queue was activated in a burst, or too much
3471 * time has elapsed from the beginning of this
3472 * weight-raising period, then end weight raising.
44e44a1b 3473 */
e1b2324d
AA
3474 if (bfq_bfqq_in_large_burst(bfqq))
3475 bfq_bfqq_end_wr(bfqq);
3476 else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
3477 bfqq->wr_cur_max_time)) {
77b7dcea
PV
3478 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
3479 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
e1b2324d 3480 bfq_wr_duration(bfqd)))
77b7dcea
PV
3481 bfq_bfqq_end_wr(bfqq);
3482 else {
3483 /* switch back to interactive wr */
3484 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
3485 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
3486 bfqq->last_wr_start_finish =
3487 bfqq->wr_start_at_switch_to_srt;
3488 bfqq->entity.prio_changed = 1;
3489 }
44e44a1b
PV
3490 }
3491 }
431b17f9
PV
3492 /*
3493 * To improve latency (for this or other queues), immediately
3494 * update weight both if it must be raised and if it must be
3495 * lowered. Since, entity may be on some active tree here, and
3496 * might have a pending change of its ioprio class, invoke
3497 * next function with the last parameter unset (see the
3498 * comments on the function).
3499 */
44e44a1b 3500 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
431b17f9
PV
3501 __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
3502 entity, false);
44e44a1b
PV
3503}
3504
aee69d78
PV
3505/*
3506 * Dispatch next request from bfqq.
3507 */
3508static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
3509 struct bfq_queue *bfqq)
3510{
3511 struct request *rq = bfqq->next_rq;
3512 unsigned long service_to_charge;
3513
3514 service_to_charge = bfq_serv_to_charge(rq, bfqq);
3515
3516 bfq_bfqq_served(bfqq, service_to_charge);
3517
3518 bfq_dispatch_remove(bfqd->queue, rq);
3519
44e44a1b
PV
3520 /*
3521 * If weight raising has to terminate for bfqq, then next
3522 * function causes an immediate update of bfqq's weight,
3523 * without waiting for next activation. As a consequence, on
3524 * expiration, bfqq will be timestamped as if has never been
3525 * weight-raised during this service slot, even if it has
3526 * received part or even most of the service as a
3527 * weight-raised queue. This inflates bfqq's timestamps, which
3528 * is beneficial, as bfqq is then more willing to leave the
3529 * device immediately to possible other weight-raised queues.
3530 */
3531 bfq_update_wr_data(bfqd, bfqq);
3532
aee69d78
PV
3533 /*
3534 * Expire bfqq, pretending that its budget expired, if bfqq
3535 * belongs to CLASS_IDLE and other queues are waiting for
3536 * service.
3537 */
3538 if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
3539 goto expire;
3540
3541 return rq;
3542
3543expire:
3544 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
3545 return rq;
3546}
3547
3548static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
3549{
3550 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3551
3552 /*
3553 * Avoiding lock: a race on bfqd->busy_queues should cause at
3554 * most a call to dispatch for nothing
3555 */
3556 return !list_empty_careful(&bfqd->dispatch) ||
3557 bfqd->busy_queues > 0;
3558}
3559
3560static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3561{
3562 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3563 struct request *rq = NULL;
3564 struct bfq_queue *bfqq = NULL;
3565
3566 if (!list_empty(&bfqd->dispatch)) {
3567 rq = list_first_entry(&bfqd->dispatch, struct request,
3568 queuelist);
3569 list_del_init(&rq->queuelist);
3570
3571 bfqq = RQ_BFQQ(rq);
3572
3573 if (bfqq) {
3574 /*
3575 * Increment counters here, because this
3576 * dispatch does not follow the standard
3577 * dispatch flow (where counters are
3578 * incremented)
3579 */
3580 bfqq->dispatched++;
3581
3582 goto inc_in_driver_start_rq;
3583 }
3584
3585 /*
3586 * We exploit the put_rq_private hook to decrement
3587 * rq_in_driver, but put_rq_private will not be
3588 * invoked on this request. So, to avoid unbalance,
3589 * just start this request, without incrementing
3590 * rq_in_driver. As a negative consequence,
3591 * rq_in_driver is deceptively lower than it should be
3592 * while this request is in service. This may cause
3593 * bfq_schedule_dispatch to be invoked uselessly.
3594 *
3595 * As for implementing an exact solution, the
3596 * put_request hook, if defined, is probably invoked
3597 * also on this request. So, by exploiting this hook,
3598 * we could 1) increment rq_in_driver here, and 2)
3599 * decrement it in put_request. Such a solution would
3600 * let the value of the counter be always accurate,
3601 * but it would entail using an extra interface
3602 * function. This cost seems higher than the benefit,
3603 * being the frequency of non-elevator-private
3604 * requests very low.
3605 */
3606 goto start_rq;
3607 }
3608
3609 bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
3610
3611 if (bfqd->busy_queues == 0)
3612 goto exit;
3613
3614 /*
3615 * Force device to serve one request at a time if
3616 * strict_guarantees is true. Forcing this service scheme is
3617 * currently the ONLY way to guarantee that the request
3618 * service order enforced by the scheduler is respected by a
3619 * queueing device. Otherwise the device is free even to make
3620 * some unlucky request wait for as long as the device
3621 * wishes.
3622 *
3623 * Of course, serving one request at at time may cause loss of
3624 * throughput.
3625 */
3626 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
3627 goto exit;
3628
3629 bfqq = bfq_select_queue(bfqd);
3630 if (!bfqq)
3631 goto exit;
3632
3633 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
3634
3635 if (rq) {
3636inc_in_driver_start_rq:
3637 bfqd->rq_in_driver++;
3638start_rq:
3639 rq->rq_flags |= RQF_STARTED;
3640 }
3641exit:
3642 return rq;
3643}
3644
3645static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3646{
3647 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3648 struct request *rq;
3649
3650 spin_lock_irq(&bfqd->lock);
36eca894 3651
aee69d78 3652 rq = __bfq_dispatch_request(hctx);
6fa3e8d3 3653 spin_unlock_irq(&bfqd->lock);
aee69d78
PV
3654
3655 return rq;
3656}
3657
3658/*
3659 * Task holds one reference to the queue, dropped when task exits. Each rq
3660 * in-flight on this queue also holds a reference, dropped when rq is freed.
3661 *
3662 * Scheduler lock must be held here. Recall not to use bfqq after calling
3663 * this function on it.
3664 */
ea25da48 3665void bfq_put_queue(struct bfq_queue *bfqq)
aee69d78 3666{
e21b7a0b
AA
3667#ifdef CONFIG_BFQ_GROUP_IOSCHED
3668 struct bfq_group *bfqg = bfqq_group(bfqq);
3669#endif
3670
aee69d78
PV
3671 if (bfqq->bfqd)
3672 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
3673 bfqq, bfqq->ref);
3674
3675 bfqq->ref--;
3676 if (bfqq->ref)
3677 return;
3678
e1b2324d
AA
3679 if (bfq_bfqq_sync(bfqq))
3680 /*
3681 * The fact that this queue is being destroyed does not
3682 * invalidate the fact that this queue may have been
3683 * activated during the current burst. As a consequence,
3684 * although the queue does not exist anymore, and hence
3685 * needs to be removed from the burst list if there,
3686 * the burst size has not to be decremented.
3687 */
3688 hlist_del_init(&bfqq->burst_list_node);
e21b7a0b 3689
aee69d78 3690 kmem_cache_free(bfq_pool, bfqq);
e21b7a0b 3691#ifdef CONFIG_BFQ_GROUP_IOSCHED
8f9bebc3 3692 bfqg_and_blkg_put(bfqg);
e21b7a0b 3693#endif
aee69d78
PV
3694}
3695
36eca894
AA
3696static void bfq_put_cooperator(struct bfq_queue *bfqq)
3697{
3698 struct bfq_queue *__bfqq, *next;
3699
3700 /*
3701 * If this queue was scheduled to merge with another queue, be
3702 * sure to drop the reference taken on that queue (and others in
3703 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
3704 */
3705 __bfqq = bfqq->new_bfqq;
3706 while (__bfqq) {
3707 if (__bfqq == bfqq)
3708 break;
3709 next = __bfqq->new_bfqq;
3710 bfq_put_queue(__bfqq);
3711 __bfqq = next;
3712 }
3713}
3714
aee69d78
PV
3715static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3716{
3717 if (bfqq == bfqd->in_service_queue) {
3718 __bfq_bfqq_expire(bfqd, bfqq);
3719 bfq_schedule_dispatch(bfqd);
3720 }
3721
3722 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
3723
36eca894
AA
3724 bfq_put_cooperator(bfqq);
3725
aee69d78
PV
3726 bfq_put_queue(bfqq); /* release process reference */
3727}
3728
3729static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
3730{
3731 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
3732 struct bfq_data *bfqd;
3733
3734 if (bfqq)
3735 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
3736
3737 if (bfqq && bfqd) {
3738 unsigned long flags;
3739
3740 spin_lock_irqsave(&bfqd->lock, flags);
3741 bfq_exit_bfqq(bfqd, bfqq);
3742 bic_set_bfqq(bic, NULL, is_sync);
6fa3e8d3 3743 spin_unlock_irqrestore(&bfqd->lock, flags);
aee69d78
PV
3744 }
3745}
3746
3747static void bfq_exit_icq(struct io_cq *icq)
3748{
3749 struct bfq_io_cq *bic = icq_to_bic(icq);
3750
3751 bfq_exit_icq_bfqq(bic, true);
3752 bfq_exit_icq_bfqq(bic, false);
3753}
3754
3755/*
3756 * Update the entity prio values; note that the new values will not
3757 * be used until the next (re)activation.
3758 */
3759static void
3760bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
3761{
3762 struct task_struct *tsk = current;
3763 int ioprio_class;
3764 struct bfq_data *bfqd = bfqq->bfqd;
3765
3766 if (!bfqd)
3767 return;
3768
3769 ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3770 switch (ioprio_class) {
3771 default:
3772 dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
3773 "bfq: bad prio class %d\n", ioprio_class);
3774 case IOPRIO_CLASS_NONE:
3775 /*
3776 * No prio set, inherit CPU scheduling settings.
3777 */
3778 bfqq->new_ioprio = task_nice_ioprio(tsk);
3779 bfqq->new_ioprio_class = task_nice_ioclass(tsk);
3780 break;
3781 case IOPRIO_CLASS_RT:
3782 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3783 bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
3784 break;
3785 case IOPRIO_CLASS_BE:
3786 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3787 bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
3788 break;
3789 case IOPRIO_CLASS_IDLE:
3790 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
3791 bfqq->new_ioprio = 7;
aee69d78
PV
3792 break;
3793 }
3794
3795 if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
3796 pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
3797 bfqq->new_ioprio);
3798 bfqq->new_ioprio = IOPRIO_BE_NR;
3799 }
3800
3801 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
3802 bfqq->entity.prio_changed = 1;
3803}
3804
ea25da48
PV
3805static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3806 struct bio *bio, bool is_sync,
3807 struct bfq_io_cq *bic);
3808
aee69d78
PV
3809static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
3810{
3811 struct bfq_data *bfqd = bic_to_bfqd(bic);
3812 struct bfq_queue *bfqq;
3813 int ioprio = bic->icq.ioc->ioprio;
3814
3815 /*
3816 * This condition may trigger on a newly created bic, be sure to
3817 * drop the lock before returning.
3818 */
3819 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
3820 return;
3821
3822 bic->ioprio = ioprio;
3823
3824 bfqq = bic_to_bfqq(bic, false);
3825 if (bfqq) {
3826 /* release process reference on this queue */
3827 bfq_put_queue(bfqq);
3828 bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
3829 bic_set_bfqq(bic, bfqq, false);
3830 }
3831
3832 bfqq = bic_to_bfqq(bic, true);
3833 if (bfqq)
3834 bfq_set_next_ioprio_data(bfqq, bic);
3835}
3836
3837static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3838 struct bfq_io_cq *bic, pid_t pid, int is_sync)
3839{
3840 RB_CLEAR_NODE(&bfqq->entity.rb_node);
3841 INIT_LIST_HEAD(&bfqq->fifo);
e1b2324d 3842 INIT_HLIST_NODE(&bfqq->burst_list_node);
aee69d78
PV
3843
3844 bfqq->ref = 0;
3845 bfqq->bfqd = bfqd;
3846
3847 if (bic)
3848 bfq_set_next_ioprio_data(bfqq, bic);
3849
3850 if (is_sync) {
d5be3fef
PV
3851 /*
3852 * No need to mark as has_short_ttime if in
3853 * idle_class, because no device idling is performed
3854 * for queues in idle class
3855 */
aee69d78 3856 if (!bfq_class_idle(bfqq))
d5be3fef
PV
3857 /* tentatively mark as has_short_ttime */
3858 bfq_mark_bfqq_has_short_ttime(bfqq);
aee69d78 3859 bfq_mark_bfqq_sync(bfqq);
e1b2324d 3860 bfq_mark_bfqq_just_created(bfqq);
aee69d78
PV
3861 } else
3862 bfq_clear_bfqq_sync(bfqq);
3863
3864 /* set end request to minus infinity from now */
3865 bfqq->ttime.last_end_request = ktime_get_ns() + 1;
3866
3867 bfq_mark_bfqq_IO_bound(bfqq);
3868
3869 bfqq->pid = pid;
3870
3871 /* Tentative initial value to trade off between thr and lat */
54b60456 3872 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
aee69d78 3873 bfqq->budget_timeout = bfq_smallest_from_now();
aee69d78 3874
44e44a1b 3875 bfqq->wr_coeff = 1;
36eca894 3876 bfqq->last_wr_start_finish = jiffies;
77b7dcea 3877 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
36eca894 3878 bfqq->split_time = bfq_smallest_from_now();
77b7dcea
PV
3879
3880 /*
3881 * Set to the value for which bfqq will not be deemed as
3882 * soft rt when it becomes backlogged.
3883 */
3884 bfqq->soft_rt_next_start = bfq_greatest_from_now();
44e44a1b 3885
aee69d78
PV
3886 /* first request is almost certainly seeky */
3887 bfqq->seek_history = 1;
3888}
3889
3890static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
e21b7a0b 3891 struct bfq_group *bfqg,
aee69d78
PV
3892 int ioprio_class, int ioprio)
3893{
3894 switch (ioprio_class) {
3895 case IOPRIO_CLASS_RT:
e21b7a0b 3896 return &bfqg->async_bfqq[0][ioprio];
aee69d78
PV
3897 case IOPRIO_CLASS_NONE:
3898 ioprio = IOPRIO_NORM;
3899 /* fall through */
3900 case IOPRIO_CLASS_BE:
e21b7a0b 3901 return &bfqg->async_bfqq[1][ioprio];
aee69d78 3902 case IOPRIO_CLASS_IDLE:
e21b7a0b 3903 return &bfqg->async_idle_bfqq;
aee69d78
PV
3904 default:
3905 return NULL;
3906 }
3907}
3908
3909static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3910 struct bio *bio, bool is_sync,
3911 struct bfq_io_cq *bic)
3912{
3913 const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3914 const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3915 struct bfq_queue **async_bfqq = NULL;
3916 struct bfq_queue *bfqq;
e21b7a0b 3917 struct bfq_group *bfqg;
aee69d78
PV
3918
3919 rcu_read_lock();
3920
e21b7a0b
AA
3921 bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
3922 if (!bfqg) {
3923 bfqq = &bfqd->oom_bfqq;
3924 goto out;
3925 }
3926
aee69d78 3927 if (!is_sync) {
e21b7a0b 3928 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
aee69d78
PV
3929 ioprio);
3930 bfqq = *async_bfqq;
3931 if (bfqq)
3932 goto out;
3933 }
3934
3935 bfqq = kmem_cache_alloc_node(bfq_pool,
3936 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
3937 bfqd->queue->node);
3938
3939 if (bfqq) {
3940 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
3941 is_sync);
e21b7a0b 3942 bfq_init_entity(&bfqq->entity, bfqg);
aee69d78
PV
3943 bfq_log_bfqq(bfqd, bfqq, "allocated");
3944 } else {
3945 bfqq = &bfqd->oom_bfqq;
3946 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
3947 goto out;
3948 }
3949
3950 /*
3951 * Pin the queue now that it's allocated, scheduler exit will
3952 * prune it.
3953 */
3954 if (async_bfqq) {
e21b7a0b
AA
3955 bfqq->ref++; /*
3956 * Extra group reference, w.r.t. sync
3957 * queue. This extra reference is removed
3958 * only if bfqq->bfqg disappears, to
3959 * guarantee that this queue is not freed
3960 * until its group goes away.
3961 */
3962 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
aee69d78
PV
3963 bfqq, bfqq->ref);
3964 *async_bfqq = bfqq;
3965 }
3966
3967out:
3968 bfqq->ref++; /* get a process reference to this queue */
3969 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
3970 rcu_read_unlock();
3971 return bfqq;
3972}
3973
3974static void bfq_update_io_thinktime(struct bfq_data *bfqd,
3975 struct bfq_queue *bfqq)
3976{
3977 struct bfq_ttime *ttime = &bfqq->ttime;
3978 u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
3979
3980 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
3981
3982 ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
3983 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
3984 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
3985 ttime->ttime_samples);
3986}
3987
3988static void
3989bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3990 struct request *rq)
3991{
aee69d78 3992 bfqq->seek_history <<= 1;
ab0e43e9
PV
3993 bfqq->seek_history |=
3994 get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
aee69d78
PV
3995 (!blk_queue_nonrot(bfqd->queue) ||
3996 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
3997}
3998
d5be3fef
PV
3999static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
4000 struct bfq_queue *bfqq,
4001 struct bfq_io_cq *bic)
aee69d78 4002{
d5be3fef 4003 bool has_short_ttime = true;
aee69d78 4004
d5be3fef
PV
4005 /*
4006 * No need to update has_short_ttime if bfqq is async or in
4007 * idle io prio class, or if bfq_slice_idle is zero, because
4008 * no device idling is performed for bfqq in this case.
4009 */
4010 if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
4011 bfqd->bfq_slice_idle == 0)
aee69d78
PV
4012 return;
4013
36eca894
AA
4014 /* Idle window just restored, statistics are meaningless. */
4015 if (time_is_after_eq_jiffies(bfqq->split_time +
4016 bfqd->bfq_wr_min_idle_time))
4017 return;
4018
d5be3fef
PV
4019 /* Think time is infinite if no process is linked to
4020 * bfqq. Otherwise check average think time to
4021 * decide whether to mark as has_short_ttime
4022 */
aee69d78 4023 if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
d5be3fef
PV
4024 (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
4025 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
4026 has_short_ttime = false;
4027
4028 bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
4029 has_short_ttime);
aee69d78 4030
d5be3fef
PV
4031 if (has_short_ttime)
4032 bfq_mark_bfqq_has_short_ttime(bfqq);
aee69d78 4033 else
d5be3fef 4034 bfq_clear_bfqq_has_short_ttime(bfqq);
aee69d78
PV
4035}
4036
4037/*
4038 * Called when a new fs request (rq) is added to bfqq. Check if there's
4039 * something we should do about it.
4040 */
4041static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4042 struct request *rq)
4043{
4044 struct bfq_io_cq *bic = RQ_BIC(rq);
4045
4046 if (rq->cmd_flags & REQ_META)
4047 bfqq->meta_pending++;
4048
4049 bfq_update_io_thinktime(bfqd, bfqq);
d5be3fef 4050 bfq_update_has_short_ttime(bfqd, bfqq, bic);
aee69d78 4051 bfq_update_io_seektime(bfqd, bfqq, rq);
aee69d78
PV
4052
4053 bfq_log_bfqq(bfqd, bfqq,
d5be3fef
PV
4054 "rq_enqueued: has_short_ttime=%d (seeky %d)",
4055 bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
aee69d78
PV
4056
4057 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4058
4059 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
4060 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
4061 blk_rq_sectors(rq) < 32;
4062 bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
4063
4064 /*
4065 * There is just this request queued: if the request
4066 * is small and the queue is not to be expired, then
4067 * just exit.
4068 *
4069 * In this way, if the device is being idled to wait
4070 * for a new request from the in-service queue, we
4071 * avoid unplugging the device and committing the
4072 * device to serve just a small request. On the
4073 * contrary, we wait for the block layer to decide
4074 * when to unplug the device: hopefully, new requests
4075 * will be merged to this one quickly, then the device
4076 * will be unplugged and larger requests will be
4077 * dispatched.
4078 */
4079 if (small_req && !budget_timeout)
4080 return;
4081
4082 /*
4083 * A large enough request arrived, or the queue is to
4084 * be expired: in both cases disk idling is to be
4085 * stopped, so clear wait_request flag and reset
4086 * timer.
4087 */
4088 bfq_clear_bfqq_wait_request(bfqq);
4089 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
e21b7a0b 4090 bfqg_stats_update_idle_time(bfqq_group(bfqq));
aee69d78
PV
4091
4092 /*
4093 * The queue is not empty, because a new request just
4094 * arrived. Hence we can safely expire the queue, in
4095 * case of budget timeout, without risking that the
4096 * timestamps of the queue are not updated correctly.
4097 * See [1] for more details.
4098 */
4099 if (budget_timeout)
4100 bfq_bfqq_expire(bfqd, bfqq, false,
4101 BFQQE_BUDGET_TIMEOUT);
4102 }
4103}
4104
4105static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
4106{
36eca894
AA
4107 struct bfq_queue *bfqq = RQ_BFQQ(rq),
4108 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
4109
4110 if (new_bfqq) {
4111 if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
4112 new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
4113 /*
4114 * Release the request's reference to the old bfqq
4115 * and make sure one is taken to the shared queue.
4116 */
4117 new_bfqq->allocated++;
4118 bfqq->allocated--;
4119 new_bfqq->ref++;
e1b2324d 4120 bfq_clear_bfqq_just_created(bfqq);
36eca894
AA
4121 /*
4122 * If the bic associated with the process
4123 * issuing this request still points to bfqq
4124 * (and thus has not been already redirected
4125 * to new_bfqq or even some other bfq_queue),
4126 * then complete the merge and redirect it to
4127 * new_bfqq.
4128 */
4129 if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
4130 bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
4131 bfqq, new_bfqq);
4132 /*
4133 * rq is about to be enqueued into new_bfqq,
4134 * release rq reference on bfqq
4135 */
4136 bfq_put_queue(bfqq);
4137 rq->elv.priv[1] = new_bfqq;
4138 bfqq = new_bfqq;
4139 }
aee69d78
PV
4140
4141 bfq_add_request(rq);
4142
4143 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
4144 list_add_tail(&rq->queuelist, &bfqq->fifo);
4145
4146 bfq_rq_enqueued(bfqd, bfqq, rq);
4147}
4148
4149static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4150 bool at_head)
4151{
4152 struct request_queue *q = hctx->queue;
4153 struct bfq_data *bfqd = q->elevator->elevator_data;
4154
4155 spin_lock_irq(&bfqd->lock);
4156 if (blk_mq_sched_try_insert_merge(q, rq)) {
4157 spin_unlock_irq(&bfqd->lock);
4158 return;
4159 }
4160
4161 spin_unlock_irq(&bfqd->lock);
4162
4163 blk_mq_sched_request_inserted(rq);
4164
4165 spin_lock_irq(&bfqd->lock);
4166 if (at_head || blk_rq_is_passthrough(rq)) {
4167 if (at_head)
4168 list_add(&rq->queuelist, &bfqd->dispatch);
4169 else
4170 list_add_tail(&rq->queuelist, &bfqd->dispatch);
4171 } else {
4172 __bfq_insert_request(bfqd, rq);
4173
4174 if (rq_mergeable(rq)) {
4175 elv_rqhash_add(q, rq);
4176 if (!q->last_merge)
4177 q->last_merge = rq;
4178 }
4179 }
4180
6fa3e8d3 4181 spin_unlock_irq(&bfqd->lock);
aee69d78
PV
4182}
4183
4184static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
4185 struct list_head *list, bool at_head)
4186{
4187 while (!list_empty(list)) {
4188 struct request *rq;
4189
4190 rq = list_first_entry(list, struct request, queuelist);
4191 list_del_init(&rq->queuelist);
4192 bfq_insert_request(hctx, rq, at_head);
4193 }
4194}
4195
4196static void bfq_update_hw_tag(struct bfq_data *bfqd)
4197{
4198 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
4199 bfqd->rq_in_driver);
4200
4201 if (bfqd->hw_tag == 1)
4202 return;
4203
4204 /*
4205 * This sample is valid if the number of outstanding requests
4206 * is large enough to allow a queueing behavior. Note that the
4207 * sum is not exact, as it's not taking into account deactivated
4208 * requests.
4209 */
4210 if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
4211 return;
4212
4213 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
4214 return;
4215
4216 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
4217 bfqd->max_rq_in_driver = 0;
4218 bfqd->hw_tag_samples = 0;
4219}
4220
4221static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
4222{
ab0e43e9
PV
4223 u64 now_ns;
4224 u32 delta_us;
4225
aee69d78
PV
4226 bfq_update_hw_tag(bfqd);
4227
4228 bfqd->rq_in_driver--;
4229 bfqq->dispatched--;
4230
44e44a1b
PV
4231 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
4232 /*
4233 * Set budget_timeout (which we overload to store the
4234 * time at which the queue remains with no backlog and
4235 * no outstanding request; used by the weight-raising
4236 * mechanism).
4237 */
4238 bfqq->budget_timeout = jiffies;
1de0c4cd
AA
4239
4240 bfq_weights_tree_remove(bfqd, &bfqq->entity,
4241 &bfqd->queue_weights_tree);
44e44a1b
PV
4242 }
4243
ab0e43e9
PV
4244 now_ns = ktime_get_ns();
4245
4246 bfqq->ttime.last_end_request = now_ns;
4247
4248 /*
4249 * Using us instead of ns, to get a reasonable precision in
4250 * computing rate in next check.
4251 */
4252 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
4253
4254 /*
4255 * If the request took rather long to complete, and, according
4256 * to the maximum request size recorded, this completion latency
4257 * implies that the request was certainly served at a very low
4258 * rate (less than 1M sectors/sec), then the whole observation
4259 * interval that lasts up to this time instant cannot be a
4260 * valid time interval for computing a new peak rate. Invoke
4261 * bfq_update_rate_reset to have the following three steps
4262 * taken:
4263 * - close the observation interval at the last (previous)
4264 * request dispatch or completion
4265 * - compute rate, if possible, for that observation interval
4266 * - reset to zero samples, which will trigger a proper
4267 * re-initialization of the observation interval on next
4268 * dispatch
4269 */
4270 if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
4271 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
4272 1UL<<(BFQ_RATE_SHIFT - 10))
4273 bfq_update_rate_reset(bfqd, NULL);
4274 bfqd->last_completion = now_ns;
aee69d78 4275
77b7dcea
PV
4276 /*
4277 * If we are waiting to discover whether the request pattern
4278 * of the task associated with the queue is actually
4279 * isochronous, and both requisites for this condition to hold
4280 * are now satisfied, then compute soft_rt_next_start (see the
4281 * comments on the function bfq_bfqq_softrt_next_start()). We
4282 * schedule this delayed check when bfqq expires, if it still
4283 * has in-flight requests.
4284 */
4285 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
4286 RB_EMPTY_ROOT(&bfqq->sort_list))
4287 bfqq->soft_rt_next_start =
4288 bfq_bfqq_softrt_next_start(bfqd, bfqq);
4289
aee69d78
PV
4290 /*
4291 * If this is the in-service queue, check if it needs to be expired,
4292 * or if we want to idle in case it has no pending requests.
4293 */
4294 if (bfqd->in_service_queue == bfqq) {
44e44a1b 4295 if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
aee69d78
PV
4296 bfq_arm_slice_timer(bfqd);
4297 return;
4298 } else if (bfq_may_expire_for_budg_timeout(bfqq))
4299 bfq_bfqq_expire(bfqd, bfqq, false,
4300 BFQQE_BUDGET_TIMEOUT);
4301 else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
4302 (bfqq->dispatched == 0 ||
4303 !bfq_bfqq_may_idle(bfqq)))
4304 bfq_bfqq_expire(bfqd, bfqq, false,
4305 BFQQE_NO_MORE_REQUESTS);
4306 }
3f7cb4f4
HT
4307
4308 if (!bfqd->rq_in_driver)
4309 bfq_schedule_dispatch(bfqd);
aee69d78
PV
4310}
4311
4312static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
4313{
4314 bfqq->allocated--;
4315
4316 bfq_put_queue(bfqq);
4317}
4318
7b9e9361 4319static void bfq_finish_request(struct request *rq)
aee69d78 4320{
5bbf4e5a
CH
4321 struct bfq_queue *bfqq;
4322 struct bfq_data *bfqd;
4323
4324 if (!rq->elv.icq)
4325 return;
4326
4327 bfqq = RQ_BFQQ(rq);
4328 bfqd = bfqq->bfqd;
aee69d78 4329
e21b7a0b
AA
4330 if (rq->rq_flags & RQF_STARTED)
4331 bfqg_stats_update_completion(bfqq_group(bfqq),
4332 rq_start_time_ns(rq),
4333 rq_io_start_time_ns(rq),
4334 rq->cmd_flags);
aee69d78
PV
4335
4336 if (likely(rq->rq_flags & RQF_STARTED)) {
4337 unsigned long flags;
4338
4339 spin_lock_irqsave(&bfqd->lock, flags);
4340
4341 bfq_completed_request(bfqq, bfqd);
4342 bfq_put_rq_priv_body(bfqq);
4343
6fa3e8d3 4344 spin_unlock_irqrestore(&bfqd->lock, flags);
aee69d78
PV
4345 } else {
4346 /*
4347 * Request rq may be still/already in the scheduler,
4348 * in which case we need to remove it. And we cannot
4349 * defer such a check and removal, to avoid
4350 * inconsistencies in the time interval from the end
4351 * of this function to the start of the deferred work.
4352 * This situation seems to occur only in process
4353 * context, as a consequence of a merge. In the
4354 * current version of the code, this implies that the
4355 * lock is held.
4356 */
4357
4358 if (!RB_EMPTY_NODE(&rq->rb_node))
7b9e9361 4359 bfq_remove_request(rq->q, rq);
aee69d78
PV
4360 bfq_put_rq_priv_body(bfqq);
4361 }
4362
4363 rq->elv.priv[0] = NULL;
4364 rq->elv.priv[1] = NULL;
4365}
4366
36eca894
AA
4367/*
4368 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
4369 * was the last process referring to that bfqq.
4370 */
4371static struct bfq_queue *
4372bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
4373{
4374 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
4375
4376 if (bfqq_process_refs(bfqq) == 1) {
4377 bfqq->pid = current->pid;
4378 bfq_clear_bfqq_coop(bfqq);
4379 bfq_clear_bfqq_split_coop(bfqq);
4380 return bfqq;
4381 }
4382
4383 bic_set_bfqq(bic, NULL, 1);
4384
4385 bfq_put_cooperator(bfqq);
4386
4387 bfq_put_queue(bfqq);
4388 return NULL;
4389}
4390
4391static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
4392 struct bfq_io_cq *bic,
4393 struct bio *bio,
4394 bool split, bool is_sync,
4395 bool *new_queue)
4396{
4397 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
4398
4399 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
4400 return bfqq;
4401
4402 if (new_queue)
4403 *new_queue = true;
4404
4405 if (bfqq)
4406 bfq_put_queue(bfqq);
4407 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
4408
4409 bic_set_bfqq(bic, bfqq, is_sync);
e1b2324d
AA
4410 if (split && is_sync) {
4411 if ((bic->was_in_burst_list && bfqd->large_burst) ||
4412 bic->saved_in_large_burst)
4413 bfq_mark_bfqq_in_large_burst(bfqq);
4414 else {
4415 bfq_clear_bfqq_in_large_burst(bfqq);
4416 if (bic->was_in_burst_list)
4417 hlist_add_head(&bfqq->burst_list_node,
4418 &bfqd->burst_list);
4419 }
36eca894 4420 bfqq->split_time = jiffies;
e1b2324d 4421 }
36eca894
AA
4422
4423 return bfqq;
4424}
4425
aee69d78
PV
4426/*
4427 * Allocate bfq data structures associated with this request.
4428 */
5bbf4e5a 4429static void bfq_prepare_request(struct request *rq, struct bio *bio)
aee69d78 4430{
5bbf4e5a 4431 struct request_queue *q = rq->q;
aee69d78 4432 struct bfq_data *bfqd = q->elevator->elevator_data;
9f210738 4433 struct bfq_io_cq *bic;
aee69d78
PV
4434 const int is_sync = rq_is_sync(rq);
4435 struct bfq_queue *bfqq;
36eca894 4436 bool new_queue = false;
13c931bd 4437 bool bfqq_already_existing = false, split = false;
aee69d78 4438
9f210738 4439 if (!rq->elv.icq)
5bbf4e5a 4440 return;
9f210738 4441 bic = icq_to_bic(rq->elv.icq);
aee69d78 4442
9f210738 4443 spin_lock_irq(&bfqd->lock);
aee69d78 4444
8c9ff1ad
CIK
4445 bfq_check_ioprio_change(bic, bio);
4446
e21b7a0b
AA
4447 bfq_bic_update_cgroup(bic, bio);
4448
36eca894
AA
4449 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
4450 &new_queue);
4451
4452 if (likely(!new_queue)) {
4453 /* If the queue was seeky for too long, break it apart. */
4454 if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
4455 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
e1b2324d
AA
4456
4457 /* Update bic before losing reference to bfqq */
4458 if (bfq_bfqq_in_large_burst(bfqq))
4459 bic->saved_in_large_burst = true;
4460
36eca894 4461 bfqq = bfq_split_bfqq(bic, bfqq);
6fa3e8d3 4462 split = true;
36eca894
AA
4463
4464 if (!bfqq)
4465 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
4466 true, is_sync,
4467 NULL);
13c931bd
PV
4468 else
4469 bfqq_already_existing = true;
36eca894 4470 }
aee69d78
PV
4471 }
4472
4473 bfqq->allocated++;
4474 bfqq->ref++;
4475 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
4476 rq, bfqq, bfqq->ref);
4477
4478 rq->elv.priv[0] = bic;
4479 rq->elv.priv[1] = bfqq;
4480
36eca894
AA
4481 /*
4482 * If a bfq_queue has only one process reference, it is owned
4483 * by only this bic: we can then set bfqq->bic = bic. in
4484 * addition, if the queue has also just been split, we have to
4485 * resume its state.
4486 */
4487 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
4488 bfqq->bic = bic;
6fa3e8d3 4489 if (split) {
36eca894
AA
4490 /*
4491 * The queue has just been split from a shared
4492 * queue: restore the idle window and the
4493 * possible weight raising period.
4494 */
13c931bd
PV
4495 bfq_bfqq_resume_state(bfqq, bfqd, bic,
4496 bfqq_already_existing);
36eca894
AA
4497 }
4498 }
4499
e1b2324d
AA
4500 if (unlikely(bfq_bfqq_just_created(bfqq)))
4501 bfq_handle_burst(bfqd, bfqq);
4502
6fa3e8d3 4503 spin_unlock_irq(&bfqd->lock);
aee69d78
PV
4504}
4505
4506static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
4507{
4508 struct bfq_data *bfqd = bfqq->bfqd;
4509 enum bfqq_expiration reason;
4510 unsigned long flags;
4511
4512 spin_lock_irqsave(&bfqd->lock, flags);
4513 bfq_clear_bfqq_wait_request(bfqq);
4514
4515 if (bfqq != bfqd->in_service_queue) {
4516 spin_unlock_irqrestore(&bfqd->lock, flags);
4517 return;
4518 }
4519
4520 if (bfq_bfqq_budget_timeout(bfqq))
4521 /*
4522 * Also here the queue can be safely expired
4523 * for budget timeout without wasting
4524 * guarantees
4525 */
4526 reason = BFQQE_BUDGET_TIMEOUT;
4527 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
4528 /*
4529 * The queue may not be empty upon timer expiration,
4530 * because we may not disable the timer when the
4531 * first request of the in-service queue arrives
4532 * during disk idling.
4533 */
4534 reason = BFQQE_TOO_IDLE;
4535 else
4536 goto schedule_dispatch;
4537
4538 bfq_bfqq_expire(bfqd, bfqq, true, reason);
4539
4540schedule_dispatch:
6fa3e8d3 4541 spin_unlock_irqrestore(&bfqd->lock, flags);
aee69d78
PV
4542 bfq_schedule_dispatch(bfqd);
4543}
4544
4545/*
4546 * Handler of the expiration of the timer running if the in-service queue
4547 * is idling inside its time slice.
4548 */
4549static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
4550{
4551 struct bfq_data *bfqd = container_of(timer, struct bfq_data,
4552 idle_slice_timer);
4553 struct bfq_queue *bfqq = bfqd->in_service_queue;
4554
4555 /*
4556 * Theoretical race here: the in-service queue can be NULL or
4557 * different from the queue that was idling if a new request
4558 * arrives for the current queue and there is a full dispatch
4559 * cycle that changes the in-service queue. This can hardly
4560 * happen, but in the worst case we just expire a queue too
4561 * early.
4562 */
4563 if (bfqq)
4564 bfq_idle_slice_timer_body(bfqq);
4565
4566 return HRTIMER_NORESTART;
4567}
4568
4569static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
4570 struct bfq_queue **bfqq_ptr)
4571{
4572 struct bfq_queue *bfqq = *bfqq_ptr;
4573
4574 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
4575 if (bfqq) {
e21b7a0b
AA
4576 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
4577
aee69d78
PV
4578 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
4579 bfqq, bfqq->ref);
4580 bfq_put_queue(bfqq);
4581 *bfqq_ptr = NULL;
4582 }
4583}
4584
4585/*
e21b7a0b
AA
4586 * Release all the bfqg references to its async queues. If we are
4587 * deallocating the group these queues may still contain requests, so
4588 * we reparent them to the root cgroup (i.e., the only one that will
4589 * exist for sure until all the requests on a device are gone).
aee69d78 4590 */
ea25da48 4591void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
aee69d78
PV
4592{
4593 int i, j;
4594
4595 for (i = 0; i < 2; i++)
4596 for (j = 0; j < IOPRIO_BE_NR; j++)
e21b7a0b 4597 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
aee69d78 4598
e21b7a0b 4599 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
aee69d78
PV
4600}
4601
4602static void bfq_exit_queue(struct elevator_queue *e)
4603{
4604 struct bfq_data *bfqd = e->elevator_data;
4605 struct bfq_queue *bfqq, *n;
4606
4607 hrtimer_cancel(&bfqd->idle_slice_timer);
4608
4609 spin_lock_irq(&bfqd->lock);
4610 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
e21b7a0b 4611 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
aee69d78
PV
4612 spin_unlock_irq(&bfqd->lock);
4613
4614 hrtimer_cancel(&bfqd->idle_slice_timer);
4615
e21b7a0b
AA
4616#ifdef CONFIG_BFQ_GROUP_IOSCHED
4617 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
4618#else
4619 spin_lock_irq(&bfqd->lock);
4620 bfq_put_async_queues(bfqd, bfqd->root_group);
4621 kfree(bfqd->root_group);
4622 spin_unlock_irq(&bfqd->lock);
4623#endif
4624
aee69d78
PV
4625 kfree(bfqd);
4626}
4627
e21b7a0b
AA
4628static void bfq_init_root_group(struct bfq_group *root_group,
4629 struct bfq_data *bfqd)
4630{
4631 int i;
4632
4633#ifdef CONFIG_BFQ_GROUP_IOSCHED
4634 root_group->entity.parent = NULL;
4635 root_group->my_entity = NULL;
4636 root_group->bfqd = bfqd;
4637#endif
36eca894 4638 root_group->rq_pos_tree = RB_ROOT;
e21b7a0b
AA
4639 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
4640 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
4641 root_group->sched_data.bfq_class_idle_last_service = jiffies;
4642}
4643
aee69d78
PV
4644static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
4645{
4646 struct bfq_data *bfqd;
4647 struct elevator_queue *eq;
aee69d78
PV
4648
4649 eq = elevator_alloc(q, e);
4650 if (!eq)
4651 return -ENOMEM;
4652
4653 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
4654 if (!bfqd) {
4655 kobject_put(&eq->kobj);
4656 return -ENOMEM;
4657 }
4658 eq->elevator_data = bfqd;
4659
e21b7a0b
AA
4660 spin_lock_irq(q->queue_lock);
4661 q->elevator = eq;
4662 spin_unlock_irq(q->queue_lock);
4663
aee69d78
PV
4664 /*
4665 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
4666 * Grab a permanent reference to it, so that the normal code flow
4667 * will not attempt to free it.
4668 */
4669 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
4670 bfqd->oom_bfqq.ref++;
4671 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
4672 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
4673 bfqd->oom_bfqq.entity.new_weight =
4674 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
e1b2324d
AA
4675
4676 /* oom_bfqq does not participate to bursts */
4677 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
4678
aee69d78
PV
4679 /*
4680 * Trigger weight initialization, according to ioprio, at the
4681 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
4682 * class won't be changed any more.
4683 */
4684 bfqd->oom_bfqq.entity.prio_changed = 1;
4685
4686 bfqd->queue = q;
4687
e21b7a0b 4688 INIT_LIST_HEAD(&bfqd->dispatch);
aee69d78
PV
4689
4690 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
4691 HRTIMER_MODE_REL);
4692 bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
4693
1de0c4cd
AA
4694 bfqd->queue_weights_tree = RB_ROOT;
4695 bfqd->group_weights_tree = RB_ROOT;
4696
aee69d78
PV
4697 INIT_LIST_HEAD(&bfqd->active_list);
4698 INIT_LIST_HEAD(&bfqd->idle_list);
e1b2324d 4699 INIT_HLIST_HEAD(&bfqd->burst_list);
aee69d78
PV
4700
4701 bfqd->hw_tag = -1;
4702
4703 bfqd->bfq_max_budget = bfq_default_max_budget;
4704
4705 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
4706 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
4707 bfqd->bfq_back_max = bfq_back_max;
4708 bfqd->bfq_back_penalty = bfq_back_penalty;
4709 bfqd->bfq_slice_idle = bfq_slice_idle;
aee69d78
PV
4710 bfqd->bfq_timeout = bfq_timeout;
4711
4712 bfqd->bfq_requests_within_timer = 120;
4713
e1b2324d
AA
4714 bfqd->bfq_large_burst_thresh = 8;
4715 bfqd->bfq_burst_interval = msecs_to_jiffies(180);
4716
44e44a1b
PV
4717 bfqd->low_latency = true;
4718
4719 /*
4720 * Trade-off between responsiveness and fairness.
4721 */
4722 bfqd->bfq_wr_coeff = 30;
77b7dcea 4723 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
44e44a1b
PV
4724 bfqd->bfq_wr_max_time = 0;
4725 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
4726 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
77b7dcea
PV
4727 bfqd->bfq_wr_max_softrt_rate = 7000; /*
4728 * Approximate rate required
4729 * to playback or record a
4730 * high-definition compressed
4731 * video.
4732 */
cfd69712 4733 bfqd->wr_busy_queues = 0;
44e44a1b
PV
4734
4735 /*
4736 * Begin by assuming, optimistically, that the device is a
4737 * high-speed one, and that its peak rate is equal to 2/3 of
4738 * the highest reference rate.
4739 */
4740 bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
4741 T_fast[blk_queue_nonrot(bfqd->queue)];
4742 bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
4743 bfqd->device_speed = BFQ_BFQD_FAST;
4744
aee69d78 4745 spin_lock_init(&bfqd->lock);
aee69d78 4746
e21b7a0b
AA
4747 /*
4748 * The invocation of the next bfq_create_group_hierarchy
4749 * function is the head of a chain of function calls
4750 * (bfq_create_group_hierarchy->blkcg_activate_policy->
4751 * blk_mq_freeze_queue) that may lead to the invocation of the
4752 * has_work hook function. For this reason,
4753 * bfq_create_group_hierarchy is invoked only after all
4754 * scheduler data has been initialized, apart from the fields
4755 * that can be initialized only after invoking
4756 * bfq_create_group_hierarchy. This, in particular, enables
4757 * has_work to correctly return false. Of course, to avoid
4758 * other inconsistencies, the blk-mq stack must then refrain
4759 * from invoking further scheduler hooks before this init
4760 * function is finished.
4761 */
4762 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
4763 if (!bfqd->root_group)
4764 goto out_free;
4765 bfq_init_root_group(bfqd->root_group, bfqd);
4766 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
4767
aee69d78
PV
4768
4769 return 0;
e21b7a0b
AA
4770
4771out_free:
4772 kfree(bfqd);
4773 kobject_put(&eq->kobj);
4774 return -ENOMEM;
aee69d78
PV
4775}
4776
4777static void bfq_slab_kill(void)
4778{
4779 kmem_cache_destroy(bfq_pool);
4780}
4781
4782static int __init bfq_slab_setup(void)
4783{
4784 bfq_pool = KMEM_CACHE(bfq_queue, 0);
4785 if (!bfq_pool)
4786 return -ENOMEM;
4787 return 0;
4788}
4789
4790static ssize_t bfq_var_show(unsigned int var, char *page)
4791{
4792 return sprintf(page, "%u\n", var);
4793}
4794
4795static ssize_t bfq_var_store(unsigned long *var, const char *page,
4796 size_t count)
4797{
4798 unsigned long new_val;
4799 int ret = kstrtoul(page, 10, &new_val);
4800
4801 if (ret == 0)
4802 *var = new_val;
4803
4804 return count;
4805}
4806
4807#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
4808static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4809{ \
4810 struct bfq_data *bfqd = e->elevator_data; \
4811 u64 __data = __VAR; \
4812 if (__CONV == 1) \
4813 __data = jiffies_to_msecs(__data); \
4814 else if (__CONV == 2) \
4815 __data = div_u64(__data, NSEC_PER_MSEC); \
4816 return bfq_var_show(__data, (page)); \
4817}
4818SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
4819SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
4820SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
4821SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
4822SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
4823SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
4824SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
4825SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
44e44a1b 4826SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
aee69d78
PV
4827#undef SHOW_FUNCTION
4828
4829#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
4830static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4831{ \
4832 struct bfq_data *bfqd = e->elevator_data; \
4833 u64 __data = __VAR; \
4834 __data = div_u64(__data, NSEC_PER_USEC); \
4835 return bfq_var_show(__data, (page)); \
4836}
4837USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
4838#undef USEC_SHOW_FUNCTION
4839
4840#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4841static ssize_t \
4842__FUNC(struct elevator_queue *e, const char *page, size_t count) \
4843{ \
4844 struct bfq_data *bfqd = e->elevator_data; \
4845 unsigned long uninitialized_var(__data); \
4846 int ret = bfq_var_store(&__data, (page), count); \
4847 if (__data < (MIN)) \
4848 __data = (MIN); \
4849 else if (__data > (MAX)) \
4850 __data = (MAX); \
4851 if (__CONV == 1) \
4852 *(__PTR) = msecs_to_jiffies(__data); \
4853 else if (__CONV == 2) \
4854 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
4855 else \
4856 *(__PTR) = __data; \
4857 return ret; \
4858}
4859STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
4860 INT_MAX, 2);
4861STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
4862 INT_MAX, 2);
4863STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
4864STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
4865 INT_MAX, 0);
4866STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
4867#undef STORE_FUNCTION
4868
4869#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
4870static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
4871{ \
4872 struct bfq_data *bfqd = e->elevator_data; \
4873 unsigned long uninitialized_var(__data); \
4874 int ret = bfq_var_store(&__data, (page), count); \
4875 if (__data < (MIN)) \
4876 __data = (MIN); \
4877 else if (__data > (MAX)) \
4878 __data = (MAX); \
4879 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
4880 return ret; \
4881}
4882USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
4883 UINT_MAX);
4884#undef USEC_STORE_FUNCTION
4885
aee69d78
PV
4886static ssize_t bfq_max_budget_store(struct elevator_queue *e,
4887 const char *page, size_t count)
4888{
4889 struct bfq_data *bfqd = e->elevator_data;
4890 unsigned long uninitialized_var(__data);
4891 int ret = bfq_var_store(&__data, (page), count);
4892
4893 if (__data == 0)
ab0e43e9 4894 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
aee69d78
PV
4895 else {
4896 if (__data > INT_MAX)
4897 __data = INT_MAX;
4898 bfqd->bfq_max_budget = __data;
4899 }
4900
4901 bfqd->bfq_user_max_budget = __data;
4902
4903 return ret;
4904}
4905
4906/*
4907 * Leaving this name to preserve name compatibility with cfq
4908 * parameters, but this timeout is used for both sync and async.
4909 */
4910static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
4911 const char *page, size_t count)
4912{
4913 struct bfq_data *bfqd = e->elevator_data;
4914 unsigned long uninitialized_var(__data);
4915 int ret = bfq_var_store(&__data, (page), count);
4916
4917 if (__data < 1)
4918 __data = 1;
4919 else if (__data > INT_MAX)
4920 __data = INT_MAX;
4921
4922 bfqd->bfq_timeout = msecs_to_jiffies(__data);
4923 if (bfqd->bfq_user_max_budget == 0)
ab0e43e9 4924 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
aee69d78
PV
4925
4926 return ret;
4927}
4928
4929static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
4930 const char *page, size_t count)
4931{
4932 struct bfq_data *bfqd = e->elevator_data;
4933 unsigned long uninitialized_var(__data);
4934 int ret = bfq_var_store(&__data, (page), count);
4935
4936 if (__data > 1)
4937 __data = 1;
4938 if (!bfqd->strict_guarantees && __data == 1
4939 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
4940 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
4941
4942 bfqd->strict_guarantees = __data;
4943
4944 return ret;
4945}
4946
44e44a1b
PV
4947static ssize_t bfq_low_latency_store(struct elevator_queue *e,
4948 const char *page, size_t count)
4949{
4950 struct bfq_data *bfqd = e->elevator_data;
4951 unsigned long uninitialized_var(__data);
4952 int ret = bfq_var_store(&__data, (page), count);
4953
4954 if (__data > 1)
4955 __data = 1;
4956 if (__data == 0 && bfqd->low_latency != 0)
4957 bfq_end_wr(bfqd);
4958 bfqd->low_latency = __data;
4959
4960 return ret;
4961}
4962
aee69d78
PV
4963#define BFQ_ATTR(name) \
4964 __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
4965
4966static struct elv_fs_entry bfq_attrs[] = {
4967 BFQ_ATTR(fifo_expire_sync),
4968 BFQ_ATTR(fifo_expire_async),
4969 BFQ_ATTR(back_seek_max),
4970 BFQ_ATTR(back_seek_penalty),
4971 BFQ_ATTR(slice_idle),
4972 BFQ_ATTR(slice_idle_us),
4973 BFQ_ATTR(max_budget),
4974 BFQ_ATTR(timeout_sync),
4975 BFQ_ATTR(strict_guarantees),
44e44a1b 4976 BFQ_ATTR(low_latency),
aee69d78
PV
4977 __ATTR_NULL
4978};
4979
4980static struct elevator_type iosched_bfq_mq = {
4981 .ops.mq = {
5bbf4e5a 4982 .prepare_request = bfq_prepare_request,
7b9e9361 4983 .finish_request = bfq_finish_request,
aee69d78
PV
4984 .exit_icq = bfq_exit_icq,
4985 .insert_requests = bfq_insert_requests,
4986 .dispatch_request = bfq_dispatch_request,
4987 .next_request = elv_rb_latter_request,
4988 .former_request = elv_rb_former_request,
4989 .allow_merge = bfq_allow_bio_merge,
4990 .bio_merge = bfq_bio_merge,
4991 .request_merge = bfq_request_merge,
4992 .requests_merged = bfq_requests_merged,
4993 .request_merged = bfq_request_merged,
4994 .has_work = bfq_has_work,
4995 .init_sched = bfq_init_queue,
4996 .exit_sched = bfq_exit_queue,
4997 },
4998
4999 .uses_mq = true,
5000 .icq_size = sizeof(struct bfq_io_cq),
5001 .icq_align = __alignof__(struct bfq_io_cq),
5002 .elevator_attrs = bfq_attrs,
5003 .elevator_name = "bfq",
5004 .elevator_owner = THIS_MODULE,
5005};
5006
5007static int __init bfq_init(void)
5008{
5009 int ret;
5010
e21b7a0b
AA
5011#ifdef CONFIG_BFQ_GROUP_IOSCHED
5012 ret = blkcg_policy_register(&blkcg_policy_bfq);
5013 if (ret)
5014 return ret;
5015#endif
5016
aee69d78
PV
5017 ret = -ENOMEM;
5018 if (bfq_slab_setup())
5019 goto err_pol_unreg;
5020
44e44a1b
PV
5021 /*
5022 * Times to load large popular applications for the typical
5023 * systems installed on the reference devices (see the
5024 * comments before the definitions of the next two
5025 * arrays). Actually, we use slightly slower values, as the
5026 * estimated peak rate tends to be smaller than the actual
5027 * peak rate. The reason for this last fact is that estimates
5028 * are computed over much shorter time intervals than the long
5029 * intervals typically used for benchmarking. Why? First, to
5030 * adapt more quickly to variations. Second, because an I/O
5031 * scheduler cannot rely on a peak-rate-evaluation workload to
5032 * be run for a long time.
5033 */
5034 T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
5035 T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
5036 T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
5037 T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
5038
5039 /*
5040 * Thresholds that determine the switch between speed classes
5041 * (see the comments before the definition of the array
5042 * device_speed_thresh). These thresholds are biased towards
5043 * transitions to the fast class. This is safer than the
5044 * opposite bias. In fact, a wrong transition to the slow
5045 * class results in short weight-raising periods, because the
5046 * speed of the device then tends to be higher that the
5047 * reference peak rate. On the opposite end, a wrong
5048 * transition to the fast class tends to increase
5049 * weight-raising periods, because of the opposite reason.
5050 */
5051 device_speed_thresh[0] = (4 * R_slow[0]) / 3;
5052 device_speed_thresh[1] = (4 * R_slow[1]) / 3;
5053
aee69d78
PV
5054 ret = elv_register(&iosched_bfq_mq);
5055 if (ret)
5056 goto err_pol_unreg;
5057
5058 return 0;
5059
5060err_pol_unreg:
e21b7a0b
AA
5061#ifdef CONFIG_BFQ_GROUP_IOSCHED
5062 blkcg_policy_unregister(&blkcg_policy_bfq);
5063#endif
aee69d78
PV
5064 return ret;
5065}
5066
5067static void __exit bfq_exit(void)
5068{
5069 elv_unregister(&iosched_bfq_mq);
e21b7a0b
AA
5070#ifdef CONFIG_BFQ_GROUP_IOSCHED
5071 blkcg_policy_unregister(&blkcg_policy_bfq);
5072#endif
aee69d78
PV
5073 bfq_slab_kill();
5074}
5075
5076module_init(bfq_init);
5077module_exit(bfq_exit);
5078
5079MODULE_AUTHOR("Paolo Valente");
5080MODULE_LICENSE("GPL");
5081MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");