]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - block/bfq-iosched.c
block, bfq: check low_latency flag in bfq_bfqq_save_state()
[mirror_ubuntu-hirsute-kernel.git] / block / bfq-iosched.c
CommitLineData
aee69d78
PV
1/*
2 * Budget Fair Queueing (BFQ) I/O scheduler.
3 *
4 * Based on ideas and code from CFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
11 * Arianna Avanzini <avanzini@google.com>
12 *
13 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * BFQ is a proportional-share I/O scheduler, with some extra
26 * low-latency capabilities. BFQ also supports full hierarchical
27 * scheduling through cgroups. Next paragraphs provide an introduction
28 * on BFQ inner workings. Details on BFQ benefits, usage and
29 * limitations can be found in Documentation/block/bfq-iosched.txt.
30 *
31 * BFQ is a proportional-share storage-I/O scheduling algorithm based
32 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
33 * budgets, measured in number of sectors, to processes instead of
34 * time slices. The device is not granted to the in-service process
35 * for a given time slice, but until it has exhausted its assigned
36 * budget. This change from the time to the service domain enables BFQ
37 * to distribute the device throughput among processes as desired,
38 * without any distortion due to throughput fluctuations, or to device
39 * internal queueing. BFQ uses an ad hoc internal scheduler, called
40 * B-WF2Q+, to schedule processes according to their budgets. More
41 * precisely, BFQ schedules queues associated with processes. Each
42 * process/queue is assigned a user-configurable weight, and B-WF2Q+
43 * guarantees that each queue receives a fraction of the throughput
44 * proportional to its weight. Thanks to the accurate policy of
45 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
46 * processes issuing sequential requests (to boost the throughput),
47 * and yet guarantee a low latency to interactive and soft real-time
48 * applications.
49 *
50 * In particular, to provide these low-latency guarantees, BFQ
51 * explicitly privileges the I/O of two classes of time-sensitive
52 * applications: interactive and soft real-time. This feature enables
53 * BFQ to provide applications in these classes with a very low
54 * latency. Finally, BFQ also features additional heuristics for
55 * preserving both a low latency and a high throughput on NCQ-capable,
56 * rotational or flash-based devices, and to get the job done quickly
57 * for applications consisting in many I/O-bound processes.
58 *
43c1b3d6
PV
59 * NOTE: if the main or only goal, with a given device, is to achieve
60 * the maximum-possible throughput at all times, then do switch off
61 * all low-latency heuristics for that device, by setting low_latency
62 * to 0.
63 *
aee69d78
PV
64 * BFQ is described in [1], where also a reference to the initial, more
65 * theoretical paper on BFQ can be found. The interested reader can find
66 * in the latter paper full details on the main algorithm, as well as
67 * formulas of the guarantees and formal proofs of all the properties.
68 * With respect to the version of BFQ presented in these papers, this
69 * implementation adds a few more heuristics, such as the one that
70 * guarantees a low latency to soft real-time applications, and a
71 * hierarchical extension based on H-WF2Q+.
72 *
73 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
74 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
75 * with O(log N) complexity derives from the one introduced with EEVDF
76 * in [3].
77 *
78 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
79 * Scheduler", Proceedings of the First Workshop on Mobile System
80 * Technologies (MST-2015), May 2015.
81 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
82 *
83 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
84 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
85 * Oct 1997.
86 *
87 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
88 *
89 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
90 * First: A Flexible and Accurate Mechanism for Proportional Share
91 * Resource Allocation", technical report.
92 *
93 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
94 */
95#include <linux/module.h>
96#include <linux/slab.h>
97#include <linux/blkdev.h>
e21b7a0b 98#include <linux/cgroup.h>
aee69d78
PV
99#include <linux/elevator.h>
100#include <linux/ktime.h>
101#include <linux/rbtree.h>
102#include <linux/ioprio.h>
103#include <linux/sbitmap.h>
104#include <linux/delay.h>
105
106#include "blk.h"
107#include "blk-mq.h"
108#include "blk-mq-tag.h"
109#include "blk-mq-sched.h"
ea25da48 110#include "bfq-iosched.h"
b5dc5d4d 111#include "blk-wbt.h"
aee69d78 112
ea25da48
PV
113#define BFQ_BFQQ_FNS(name) \
114void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
115{ \
116 __set_bit(BFQQF_##name, &(bfqq)->flags); \
117} \
118void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
119{ \
120 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
121} \
122int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
123{ \
124 return test_bit(BFQQF_##name, &(bfqq)->flags); \
44e44a1b
PV
125}
126
ea25da48
PV
127BFQ_BFQQ_FNS(just_created);
128BFQ_BFQQ_FNS(busy);
129BFQ_BFQQ_FNS(wait_request);
130BFQ_BFQQ_FNS(non_blocking_wait_rq);
131BFQ_BFQQ_FNS(fifo_expire);
d5be3fef 132BFQ_BFQQ_FNS(has_short_ttime);
ea25da48
PV
133BFQ_BFQQ_FNS(sync);
134BFQ_BFQQ_FNS(IO_bound);
135BFQ_BFQQ_FNS(in_large_burst);
136BFQ_BFQQ_FNS(coop);
137BFQ_BFQQ_FNS(split_coop);
138BFQ_BFQQ_FNS(softrt_update);
139#undef BFQ_BFQQ_FNS \
aee69d78 140
ea25da48
PV
141/* Expiration time of sync (0) and async (1) requests, in ns. */
142static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
aee69d78 143
ea25da48
PV
144/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
145static const int bfq_back_max = 16 * 1024;
aee69d78 146
ea25da48
PV
147/* Penalty of a backwards seek, in number of sectors. */
148static const int bfq_back_penalty = 2;
e21b7a0b 149
ea25da48
PV
150/* Idling period duration, in ns. */
151static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
aee69d78 152
ea25da48
PV
153/* Minimum number of assigned budgets for which stats are safe to compute. */
154static const int bfq_stats_min_budgets = 194;
aee69d78 155
ea25da48
PV
156/* Default maximum budget values, in sectors and number of requests. */
157static const int bfq_default_max_budget = 16 * 1024;
e21b7a0b 158
ea25da48
PV
159/*
160 * Async to sync throughput distribution is controlled as follows:
161 * when an async request is served, the entity is charged the number
162 * of sectors of the request, multiplied by the factor below
163 */
164static const int bfq_async_charge_factor = 10;
aee69d78 165
ea25da48
PV
166/* Default timeout values, in jiffies, approximating CFQ defaults. */
167const int bfq_timeout = HZ / 8;
aee69d78 168
ea25da48 169static struct kmem_cache *bfq_pool;
e21b7a0b 170
ea25da48
PV
171/* Below this threshold (in ns), we consider thinktime immediate. */
172#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
e21b7a0b 173
ea25da48
PV
174/* hw_tag detection: parallel requests threshold and min samples needed. */
175#define BFQ_HW_QUEUE_THRESHOLD 4
176#define BFQ_HW_QUEUE_SAMPLES 32
aee69d78 177
ea25da48
PV
178#define BFQQ_SEEK_THR (sector_t)(8 * 100)
179#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
180#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
f0ba5ea2 181#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
aee69d78 182
ea25da48
PV
183/* Min number of samples required to perform peak-rate update */
184#define BFQ_RATE_MIN_SAMPLES 32
185/* Min observation time interval required to perform a peak-rate update (ns) */
186#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
187/* Target observation time interval for a peak-rate update (ns) */
188#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
aee69d78 189
ea25da48
PV
190/* Shift used for peak rate fixed precision calculations. */
191#define BFQ_RATE_SHIFT 16
aee69d78 192
ea25da48
PV
193/*
194 * By default, BFQ computes the duration of the weight raising for
195 * interactive applications automatically, using the following formula:
196 * duration = (R / r) * T, where r is the peak rate of the device, and
197 * R and T are two reference parameters.
198 * In particular, R is the peak rate of the reference device (see below),
199 * and T is a reference time: given the systems that are likely to be
200 * installed on the reference device according to its speed class, T is
201 * about the maximum time needed, under BFQ and while reading two files in
202 * parallel, to load typical large applications on these systems.
203 * In practice, the slower/faster the device at hand is, the more/less it
204 * takes to load applications with respect to the reference device.
205 * Accordingly, the longer/shorter BFQ grants weight raising to interactive
206 * applications.
207 *
208 * BFQ uses four different reference pairs (R, T), depending on:
209 * . whether the device is rotational or non-rotational;
210 * . whether the device is slow, such as old or portable HDDs, as well as
211 * SD cards, or fast, such as newer HDDs and SSDs.
212 *
213 * The device's speed class is dynamically (re)detected in
214 * bfq_update_peak_rate() every time the estimated peak rate is updated.
215 *
216 * In the following definitions, R_slow[0]/R_fast[0] and
217 * T_slow[0]/T_fast[0] are the reference values for a slow/fast
218 * rotational device, whereas R_slow[1]/R_fast[1] and
219 * T_slow[1]/T_fast[1] are the reference values for a slow/fast
220 * non-rotational device. Finally, device_speed_thresh are the
221 * thresholds used to switch between speed classes. The reference
222 * rates are not the actual peak rates of the devices used as a
223 * reference, but slightly lower values. The reason for using these
224 * slightly lower values is that the peak-rate estimator tends to
225 * yield slightly lower values than the actual peak rate (it can yield
226 * the actual peak rate only if there is only one process doing I/O,
227 * and the process does sequential I/O).
228 *
229 * Both the reference peak rates and the thresholds are measured in
230 * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
231 */
232static int R_slow[2] = {1000, 10700};
233static int R_fast[2] = {14000, 33000};
234/*
235 * To improve readability, a conversion function is used to initialize the
236 * following arrays, which entails that they can be initialized only in a
237 * function.
238 */
239static int T_slow[2];
240static int T_fast[2];
241static int device_speed_thresh[2];
aee69d78 242
12cd3a2f 243#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
ea25da48 244#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
aee69d78 245
ea25da48 246struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
e21b7a0b 247{
ea25da48 248 return bic->bfqq[is_sync];
aee69d78
PV
249}
250
ea25da48 251void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
aee69d78 252{
ea25da48 253 bic->bfqq[is_sync] = bfqq;
aee69d78
PV
254}
255
ea25da48 256struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
aee69d78 257{
ea25da48 258 return bic->icq.q->elevator->elevator_data;
e21b7a0b 259}
aee69d78 260
ea25da48
PV
261/**
262 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
263 * @icq: the iocontext queue.
264 */
265static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
e21b7a0b 266{
ea25da48
PV
267 /* bic->icq is the first member, %NULL will convert to %NULL */
268 return container_of(icq, struct bfq_io_cq, icq);
e21b7a0b 269}
aee69d78 270
ea25da48
PV
271/**
272 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
273 * @bfqd: the lookup key.
274 * @ioc: the io_context of the process doing I/O.
275 * @q: the request queue.
276 */
277static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
278 struct io_context *ioc,
279 struct request_queue *q)
e21b7a0b 280{
ea25da48
PV
281 if (ioc) {
282 unsigned long flags;
283 struct bfq_io_cq *icq;
aee69d78 284
ea25da48
PV
285 spin_lock_irqsave(q->queue_lock, flags);
286 icq = icq_to_bic(ioc_lookup_icq(ioc, q));
287 spin_unlock_irqrestore(q->queue_lock, flags);
aee69d78 288
ea25da48 289 return icq;
e21b7a0b 290 }
e21b7a0b 291
ea25da48 292 return NULL;
aee69d78
PV
293}
294
ea25da48
PV
295/*
296 * Scheduler run of queue, if there are requests pending and no one in the
297 * driver that will restart queueing.
298 */
299void bfq_schedule_dispatch(struct bfq_data *bfqd)
aee69d78 300{
ea25da48
PV
301 if (bfqd->queued != 0) {
302 bfq_log(bfqd, "schedule dispatch");
303 blk_mq_run_hw_queues(bfqd->queue, true);
e21b7a0b 304 }
aee69d78
PV
305}
306
307#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
308#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
309
310#define bfq_sample_valid(samples) ((samples) > 80)
311
aee69d78
PV
312/*
313 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
314 * We choose the request that is closesr to the head right now. Distance
315 * behind the head is penalized and only allowed to a certain extent.
316 */
317static struct request *bfq_choose_req(struct bfq_data *bfqd,
318 struct request *rq1,
319 struct request *rq2,
320 sector_t last)
321{
322 sector_t s1, s2, d1 = 0, d2 = 0;
323 unsigned long back_max;
324#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
325#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
326 unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
327
328 if (!rq1 || rq1 == rq2)
329 return rq2;
330 if (!rq2)
331 return rq1;
332
333 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
334 return rq1;
335 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
336 return rq2;
337 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
338 return rq1;
339 else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
340 return rq2;
341
342 s1 = blk_rq_pos(rq1);
343 s2 = blk_rq_pos(rq2);
344
345 /*
346 * By definition, 1KiB is 2 sectors.
347 */
348 back_max = bfqd->bfq_back_max * 2;
349
350 /*
351 * Strict one way elevator _except_ in the case where we allow
352 * short backward seeks which are biased as twice the cost of a
353 * similar forward seek.
354 */
355 if (s1 >= last)
356 d1 = s1 - last;
357 else if (s1 + back_max >= last)
358 d1 = (last - s1) * bfqd->bfq_back_penalty;
359 else
360 wrap |= BFQ_RQ1_WRAP;
361
362 if (s2 >= last)
363 d2 = s2 - last;
364 else if (s2 + back_max >= last)
365 d2 = (last - s2) * bfqd->bfq_back_penalty;
366 else
367 wrap |= BFQ_RQ2_WRAP;
368
369 /* Found required data */
370
371 /*
372 * By doing switch() on the bit mask "wrap" we avoid having to
373 * check two variables for all permutations: --> faster!
374 */
375 switch (wrap) {
376 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
377 if (d1 < d2)
378 return rq1;
379 else if (d2 < d1)
380 return rq2;
381
382 if (s1 >= s2)
383 return rq1;
384 else
385 return rq2;
386
387 case BFQ_RQ2_WRAP:
388 return rq1;
389 case BFQ_RQ1_WRAP:
390 return rq2;
391 case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
392 default:
393 /*
394 * Since both rqs are wrapped,
395 * start with the one that's further behind head
396 * (--> only *one* back seek required),
397 * since back seek takes more time than forward.
398 */
399 if (s1 <= s2)
400 return rq1;
401 else
402 return rq2;
403 }
404}
405
36eca894
AA
406static struct bfq_queue *
407bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
408 sector_t sector, struct rb_node **ret_parent,
409 struct rb_node ***rb_link)
410{
411 struct rb_node **p, *parent;
412 struct bfq_queue *bfqq = NULL;
413
414 parent = NULL;
415 p = &root->rb_node;
416 while (*p) {
417 struct rb_node **n;
418
419 parent = *p;
420 bfqq = rb_entry(parent, struct bfq_queue, pos_node);
421
422 /*
423 * Sort strictly based on sector. Smallest to the left,
424 * largest to the right.
425 */
426 if (sector > blk_rq_pos(bfqq->next_rq))
427 n = &(*p)->rb_right;
428 else if (sector < blk_rq_pos(bfqq->next_rq))
429 n = &(*p)->rb_left;
430 else
431 break;
432 p = n;
433 bfqq = NULL;
434 }
435
436 *ret_parent = parent;
437 if (rb_link)
438 *rb_link = p;
439
440 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
441 (unsigned long long)sector,
442 bfqq ? bfqq->pid : 0);
443
444 return bfqq;
445}
446
ea25da48 447void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
36eca894
AA
448{
449 struct rb_node **p, *parent;
450 struct bfq_queue *__bfqq;
451
452 if (bfqq->pos_root) {
453 rb_erase(&bfqq->pos_node, bfqq->pos_root);
454 bfqq->pos_root = NULL;
455 }
456
457 if (bfq_class_idle(bfqq))
458 return;
459 if (!bfqq->next_rq)
460 return;
461
462 bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
463 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
464 blk_rq_pos(bfqq->next_rq), &parent, &p);
465 if (!__bfqq) {
466 rb_link_node(&bfqq->pos_node, parent, p);
467 rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
468 } else
469 bfqq->pos_root = NULL;
470}
471
1de0c4cd
AA
472/*
473 * Tell whether there are active queues or groups with differentiated weights.
474 */
475static bool bfq_differentiated_weights(struct bfq_data *bfqd)
476{
477 /*
478 * For weights to differ, at least one of the trees must contain
479 * at least two nodes.
480 */
481 return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
482 (bfqd->queue_weights_tree.rb_node->rb_left ||
483 bfqd->queue_weights_tree.rb_node->rb_right)
484#ifdef CONFIG_BFQ_GROUP_IOSCHED
485 ) ||
486 (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
487 (bfqd->group_weights_tree.rb_node->rb_left ||
488 bfqd->group_weights_tree.rb_node->rb_right)
489#endif
490 );
491}
492
493/*
494 * The following function returns true if every queue must receive the
495 * same share of the throughput (this condition is used when deciding
496 * whether idling may be disabled, see the comments in the function
497 * bfq_bfqq_may_idle()).
498 *
499 * Such a scenario occurs when:
500 * 1) all active queues have the same weight,
501 * 2) all active groups at the same level in the groups tree have the same
502 * weight,
503 * 3) all active groups at the same level in the groups tree have the same
504 * number of children.
505 *
506 * Unfortunately, keeping the necessary state for evaluating exactly the
507 * above symmetry conditions would be quite complex and time-consuming.
508 * Therefore this function evaluates, instead, the following stronger
509 * sub-conditions, for which it is much easier to maintain the needed
510 * state:
511 * 1) all active queues have the same weight,
512 * 2) all active groups have the same weight,
513 * 3) all active groups have at most one active child each.
514 * In particular, the last two conditions are always true if hierarchical
515 * support and the cgroups interface are not enabled, thus no state needs
516 * to be maintained in this case.
517 */
518static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
519{
520 return !bfq_differentiated_weights(bfqd);
521}
522
523/*
524 * If the weight-counter tree passed as input contains no counter for
525 * the weight of the input entity, then add that counter; otherwise just
526 * increment the existing counter.
527 *
528 * Note that weight-counter trees contain few nodes in mostly symmetric
529 * scenarios. For example, if all queues have the same weight, then the
530 * weight-counter tree for the queues may contain at most one node.
531 * This holds even if low_latency is on, because weight-raised queues
532 * are not inserted in the tree.
533 * In most scenarios, the rate at which nodes are created/destroyed
534 * should be low too.
535 */
ea25da48
PV
536void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
537 struct rb_root *root)
1de0c4cd
AA
538{
539 struct rb_node **new = &(root->rb_node), *parent = NULL;
540
541 /*
542 * Do not insert if the entity is already associated with a
543 * counter, which happens if:
544 * 1) the entity is associated with a queue,
545 * 2) a request arrival has caused the queue to become both
546 * non-weight-raised, and hence change its weight, and
547 * backlogged; in this respect, each of the two events
548 * causes an invocation of this function,
549 * 3) this is the invocation of this function caused by the
550 * second event. This second invocation is actually useless,
551 * and we handle this fact by exiting immediately. More
552 * efficient or clearer solutions might possibly be adopted.
553 */
554 if (entity->weight_counter)
555 return;
556
557 while (*new) {
558 struct bfq_weight_counter *__counter = container_of(*new,
559 struct bfq_weight_counter,
560 weights_node);
561 parent = *new;
562
563 if (entity->weight == __counter->weight) {
564 entity->weight_counter = __counter;
565 goto inc_counter;
566 }
567 if (entity->weight < __counter->weight)
568 new = &((*new)->rb_left);
569 else
570 new = &((*new)->rb_right);
571 }
572
573 entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
574 GFP_ATOMIC);
575
576 /*
577 * In the unlucky event of an allocation failure, we just
578 * exit. This will cause the weight of entity to not be
579 * considered in bfq_differentiated_weights, which, in its
580 * turn, causes the scenario to be deemed wrongly symmetric in
581 * case entity's weight would have been the only weight making
582 * the scenario asymmetric. On the bright side, no unbalance
583 * will however occur when entity becomes inactive again (the
584 * invocation of this function is triggered by an activation
585 * of entity). In fact, bfq_weights_tree_remove does nothing
586 * if !entity->weight_counter.
587 */
588 if (unlikely(!entity->weight_counter))
589 return;
590
591 entity->weight_counter->weight = entity->weight;
592 rb_link_node(&entity->weight_counter->weights_node, parent, new);
593 rb_insert_color(&entity->weight_counter->weights_node, root);
594
595inc_counter:
596 entity->weight_counter->num_active++;
597}
598
599/*
600 * Decrement the weight counter associated with the entity, and, if the
601 * counter reaches 0, remove the counter from the tree.
602 * See the comments to the function bfq_weights_tree_add() for considerations
603 * about overhead.
604 */
ea25da48
PV
605void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
606 struct rb_root *root)
1de0c4cd
AA
607{
608 if (!entity->weight_counter)
609 return;
610
611 entity->weight_counter->num_active--;
612 if (entity->weight_counter->num_active > 0)
613 goto reset_entity_pointer;
614
615 rb_erase(&entity->weight_counter->weights_node, root);
616 kfree(entity->weight_counter);
617
618reset_entity_pointer:
619 entity->weight_counter = NULL;
620}
621
aee69d78
PV
622/*
623 * Return expired entry, or NULL to just start from scratch in rbtree.
624 */
625static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
626 struct request *last)
627{
628 struct request *rq;
629
630 if (bfq_bfqq_fifo_expire(bfqq))
631 return NULL;
632
633 bfq_mark_bfqq_fifo_expire(bfqq);
634
635 rq = rq_entry_fifo(bfqq->fifo.next);
636
637 if (rq == last || ktime_get_ns() < rq->fifo_time)
638 return NULL;
639
640 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
641 return rq;
642}
643
644static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
645 struct bfq_queue *bfqq,
646 struct request *last)
647{
648 struct rb_node *rbnext = rb_next(&last->rb_node);
649 struct rb_node *rbprev = rb_prev(&last->rb_node);
650 struct request *next, *prev = NULL;
651
652 /* Follow expired path, else get first next available. */
653 next = bfq_check_fifo(bfqq, last);
654 if (next)
655 return next;
656
657 if (rbprev)
658 prev = rb_entry_rq(rbprev);
659
660 if (rbnext)
661 next = rb_entry_rq(rbnext);
662 else {
663 rbnext = rb_first(&bfqq->sort_list);
664 if (rbnext && rbnext != &last->rb_node)
665 next = rb_entry_rq(rbnext);
666 }
667
668 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
669}
670
c074170e 671/* see the definition of bfq_async_charge_factor for details */
aee69d78
PV
672static unsigned long bfq_serv_to_charge(struct request *rq,
673 struct bfq_queue *bfqq)
674{
44e44a1b 675 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
c074170e
PV
676 return blk_rq_sectors(rq);
677
cfd69712
PV
678 /*
679 * If there are no weight-raised queues, then amplify service
680 * by just the async charge factor; otherwise amplify service
681 * by twice the async charge factor, to further reduce latency
682 * for weight-raised queues.
683 */
684 if (bfqq->bfqd->wr_busy_queues == 0)
685 return blk_rq_sectors(rq) * bfq_async_charge_factor;
686
687 return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
aee69d78
PV
688}
689
690/**
691 * bfq_updated_next_req - update the queue after a new next_rq selection.
692 * @bfqd: the device data the queue belongs to.
693 * @bfqq: the queue to update.
694 *
695 * If the first request of a queue changes we make sure that the queue
696 * has enough budget to serve at least its first request (if the
697 * request has grown). We do this because if the queue has not enough
698 * budget for its first request, it has to go through two dispatch
699 * rounds to actually get it dispatched.
700 */
701static void bfq_updated_next_req(struct bfq_data *bfqd,
702 struct bfq_queue *bfqq)
703{
704 struct bfq_entity *entity = &bfqq->entity;
705 struct request *next_rq = bfqq->next_rq;
706 unsigned long new_budget;
707
708 if (!next_rq)
709 return;
710
711 if (bfqq == bfqd->in_service_queue)
712 /*
713 * In order not to break guarantees, budgets cannot be
714 * changed after an entity has been selected.
715 */
716 return;
717
718 new_budget = max_t(unsigned long, bfqq->max_budget,
719 bfq_serv_to_charge(next_rq, bfqq));
720 if (entity->budget != new_budget) {
721 entity->budget = new_budget;
722 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
723 new_budget);
80294c3b 724 bfq_requeue_bfqq(bfqd, bfqq, false);
aee69d78
PV
725 }
726}
727
3e2bdd6d
PV
728static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
729{
730 u64 dur;
731
732 if (bfqd->bfq_wr_max_time > 0)
733 return bfqd->bfq_wr_max_time;
734
735 dur = bfqd->RT_prod;
736 do_div(dur, bfqd->peak_rate);
737
738 /*
739 * Limit duration between 3 and 13 seconds. Tests show that
740 * higher values than 13 seconds often yield the opposite of
741 * the desired result, i.e., worsen responsiveness by letting
742 * non-interactive and non-soft-real-time applications
743 * preserve weight raising for a too long time interval.
744 *
745 * On the other end, lower values than 3 seconds make it
746 * difficult for most interactive tasks to complete their jobs
747 * before weight-raising finishes.
748 */
749 if (dur > msecs_to_jiffies(13000))
750 dur = msecs_to_jiffies(13000);
751 else if (dur < msecs_to_jiffies(3000))
752 dur = msecs_to_jiffies(3000);
753
754 return dur;
755}
756
757/* switch back from soft real-time to interactive weight raising */
758static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
759 struct bfq_data *bfqd)
760{
761 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
762 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
763 bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
764}
765
36eca894 766static void
13c931bd
PV
767bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
768 struct bfq_io_cq *bic, bool bfq_already_existing)
36eca894 769{
13c931bd
PV
770 unsigned int old_wr_coeff = bfqq->wr_coeff;
771 bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
772
d5be3fef
PV
773 if (bic->saved_has_short_ttime)
774 bfq_mark_bfqq_has_short_ttime(bfqq);
36eca894 775 else
d5be3fef 776 bfq_clear_bfqq_has_short_ttime(bfqq);
36eca894
AA
777
778 if (bic->saved_IO_bound)
779 bfq_mark_bfqq_IO_bound(bfqq);
780 else
781 bfq_clear_bfqq_IO_bound(bfqq);
782
783 bfqq->ttime = bic->saved_ttime;
784 bfqq->wr_coeff = bic->saved_wr_coeff;
785 bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
786 bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
787 bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
788
e1b2324d 789 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
36eca894 790 time_is_before_jiffies(bfqq->last_wr_start_finish +
e1b2324d 791 bfqq->wr_cur_max_time))) {
3e2bdd6d
PV
792 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
793 !bfq_bfqq_in_large_burst(bfqq) &&
794 time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
795 bfq_wr_duration(bfqd))) {
796 switch_back_to_interactive_wr(bfqq, bfqd);
797 } else {
798 bfqq->wr_coeff = 1;
799 bfq_log_bfqq(bfqq->bfqd, bfqq,
800 "resume state: switching off wr");
801 }
36eca894
AA
802 }
803
804 /* make sure weight will be updated, however we got here */
805 bfqq->entity.prio_changed = 1;
13c931bd
PV
806
807 if (likely(!busy))
808 return;
809
810 if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
811 bfqd->wr_busy_queues++;
812 else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
813 bfqd->wr_busy_queues--;
36eca894
AA
814}
815
816static int bfqq_process_refs(struct bfq_queue *bfqq)
817{
818 return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
819}
820
e1b2324d
AA
821/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
822static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
823{
824 struct bfq_queue *item;
825 struct hlist_node *n;
826
827 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
828 hlist_del_init(&item->burst_list_node);
829 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
830 bfqd->burst_size = 1;
831 bfqd->burst_parent_entity = bfqq->entity.parent;
832}
833
834/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
835static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
836{
837 /* Increment burst size to take into account also bfqq */
838 bfqd->burst_size++;
839
840 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
841 struct bfq_queue *pos, *bfqq_item;
842 struct hlist_node *n;
843
844 /*
845 * Enough queues have been activated shortly after each
846 * other to consider this burst as large.
847 */
848 bfqd->large_burst = true;
849
850 /*
851 * We can now mark all queues in the burst list as
852 * belonging to a large burst.
853 */
854 hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
855 burst_list_node)
856 bfq_mark_bfqq_in_large_burst(bfqq_item);
857 bfq_mark_bfqq_in_large_burst(bfqq);
858
859 /*
860 * From now on, and until the current burst finishes, any
861 * new queue being activated shortly after the last queue
862 * was inserted in the burst can be immediately marked as
863 * belonging to a large burst. So the burst list is not
864 * needed any more. Remove it.
865 */
866 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
867 burst_list_node)
868 hlist_del_init(&pos->burst_list_node);
869 } else /*
870 * Burst not yet large: add bfqq to the burst list. Do
871 * not increment the ref counter for bfqq, because bfqq
872 * is removed from the burst list before freeing bfqq
873 * in put_queue.
874 */
875 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
876}
877
878/*
879 * If many queues belonging to the same group happen to be created
880 * shortly after each other, then the processes associated with these
881 * queues have typically a common goal. In particular, bursts of queue
882 * creations are usually caused by services or applications that spawn
883 * many parallel threads/processes. Examples are systemd during boot,
884 * or git grep. To help these processes get their job done as soon as
885 * possible, it is usually better to not grant either weight-raising
886 * or device idling to their queues.
887 *
888 * In this comment we describe, firstly, the reasons why this fact
889 * holds, and, secondly, the next function, which implements the main
890 * steps needed to properly mark these queues so that they can then be
891 * treated in a different way.
892 *
893 * The above services or applications benefit mostly from a high
894 * throughput: the quicker the requests of the activated queues are
895 * cumulatively served, the sooner the target job of these queues gets
896 * completed. As a consequence, weight-raising any of these queues,
897 * which also implies idling the device for it, is almost always
898 * counterproductive. In most cases it just lowers throughput.
899 *
900 * On the other hand, a burst of queue creations may be caused also by
901 * the start of an application that does not consist of a lot of
902 * parallel I/O-bound threads. In fact, with a complex application,
903 * several short processes may need to be executed to start-up the
904 * application. In this respect, to start an application as quickly as
905 * possible, the best thing to do is in any case to privilege the I/O
906 * related to the application with respect to all other
907 * I/O. Therefore, the best strategy to start as quickly as possible
908 * an application that causes a burst of queue creations is to
909 * weight-raise all the queues created during the burst. This is the
910 * exact opposite of the best strategy for the other type of bursts.
911 *
912 * In the end, to take the best action for each of the two cases, the
913 * two types of bursts need to be distinguished. Fortunately, this
914 * seems relatively easy, by looking at the sizes of the bursts. In
915 * particular, we found a threshold such that only bursts with a
916 * larger size than that threshold are apparently caused by
917 * services or commands such as systemd or git grep. For brevity,
918 * hereafter we call just 'large' these bursts. BFQ *does not*
919 * weight-raise queues whose creation occurs in a large burst. In
920 * addition, for each of these queues BFQ performs or does not perform
921 * idling depending on which choice boosts the throughput more. The
922 * exact choice depends on the device and request pattern at
923 * hand.
924 *
925 * Unfortunately, false positives may occur while an interactive task
926 * is starting (e.g., an application is being started). The
927 * consequence is that the queues associated with the task do not
928 * enjoy weight raising as expected. Fortunately these false positives
929 * are very rare. They typically occur if some service happens to
930 * start doing I/O exactly when the interactive task starts.
931 *
932 * Turning back to the next function, it implements all the steps
933 * needed to detect the occurrence of a large burst and to properly
934 * mark all the queues belonging to it (so that they can then be
935 * treated in a different way). This goal is achieved by maintaining a
936 * "burst list" that holds, temporarily, the queues that belong to the
937 * burst in progress. The list is then used to mark these queues as
938 * belonging to a large burst if the burst does become large. The main
939 * steps are the following.
940 *
941 * . when the very first queue is created, the queue is inserted into the
942 * list (as it could be the first queue in a possible burst)
943 *
944 * . if the current burst has not yet become large, and a queue Q that does
945 * not yet belong to the burst is activated shortly after the last time
946 * at which a new queue entered the burst list, then the function appends
947 * Q to the burst list
948 *
949 * . if, as a consequence of the previous step, the burst size reaches
950 * the large-burst threshold, then
951 *
952 * . all the queues in the burst list are marked as belonging to a
953 * large burst
954 *
955 * . the burst list is deleted; in fact, the burst list already served
956 * its purpose (keeping temporarily track of the queues in a burst,
957 * so as to be able to mark them as belonging to a large burst in the
958 * previous sub-step), and now is not needed any more
959 *
960 * . the device enters a large-burst mode
961 *
962 * . if a queue Q that does not belong to the burst is created while
963 * the device is in large-burst mode and shortly after the last time
964 * at which a queue either entered the burst list or was marked as
965 * belonging to the current large burst, then Q is immediately marked
966 * as belonging to a large burst.
967 *
968 * . if a queue Q that does not belong to the burst is created a while
969 * later, i.e., not shortly after, than the last time at which a queue
970 * either entered the burst list or was marked as belonging to the
971 * current large burst, then the current burst is deemed as finished and:
972 *
973 * . the large-burst mode is reset if set
974 *
975 * . the burst list is emptied
976 *
977 * . Q is inserted in the burst list, as Q may be the first queue
978 * in a possible new burst (then the burst list contains just Q
979 * after this step).
980 */
981static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
982{
983 /*
984 * If bfqq is already in the burst list or is part of a large
985 * burst, or finally has just been split, then there is
986 * nothing else to do.
987 */
988 if (!hlist_unhashed(&bfqq->burst_list_node) ||
989 bfq_bfqq_in_large_burst(bfqq) ||
990 time_is_after_eq_jiffies(bfqq->split_time +
991 msecs_to_jiffies(10)))
992 return;
993
994 /*
995 * If bfqq's creation happens late enough, or bfqq belongs to
996 * a different group than the burst group, then the current
997 * burst is finished, and related data structures must be
998 * reset.
999 *
1000 * In this respect, consider the special case where bfqq is
1001 * the very first queue created after BFQ is selected for this
1002 * device. In this case, last_ins_in_burst and
1003 * burst_parent_entity are not yet significant when we get
1004 * here. But it is easy to verify that, whether or not the
1005 * following condition is true, bfqq will end up being
1006 * inserted into the burst list. In particular the list will
1007 * happen to contain only bfqq. And this is exactly what has
1008 * to happen, as bfqq may be the first queue of the first
1009 * burst.
1010 */
1011 if (time_is_before_jiffies(bfqd->last_ins_in_burst +
1012 bfqd->bfq_burst_interval) ||
1013 bfqq->entity.parent != bfqd->burst_parent_entity) {
1014 bfqd->large_burst = false;
1015 bfq_reset_burst_list(bfqd, bfqq);
1016 goto end;
1017 }
1018
1019 /*
1020 * If we get here, then bfqq is being activated shortly after the
1021 * last queue. So, if the current burst is also large, we can mark
1022 * bfqq as belonging to this large burst immediately.
1023 */
1024 if (bfqd->large_burst) {
1025 bfq_mark_bfqq_in_large_burst(bfqq);
1026 goto end;
1027 }
1028
1029 /*
1030 * If we get here, then a large-burst state has not yet been
1031 * reached, but bfqq is being activated shortly after the last
1032 * queue. Then we add bfqq to the burst.
1033 */
1034 bfq_add_to_burst(bfqd, bfqq);
1035end:
1036 /*
1037 * At this point, bfqq either has been added to the current
1038 * burst or has caused the current burst to terminate and a
1039 * possible new burst to start. In particular, in the second
1040 * case, bfqq has become the first queue in the possible new
1041 * burst. In both cases last_ins_in_burst needs to be moved
1042 * forward.
1043 */
1044 bfqd->last_ins_in_burst = jiffies;
1045}
1046
aee69d78
PV
1047static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
1048{
1049 struct bfq_entity *entity = &bfqq->entity;
1050
1051 return entity->budget - entity->service;
1052}
1053
1054/*
1055 * If enough samples have been computed, return the current max budget
1056 * stored in bfqd, which is dynamically updated according to the
1057 * estimated disk peak rate; otherwise return the default max budget
1058 */
1059static int bfq_max_budget(struct bfq_data *bfqd)
1060{
1061 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1062 return bfq_default_max_budget;
1063 else
1064 return bfqd->bfq_max_budget;
1065}
1066
1067/*
1068 * Return min budget, which is a fraction of the current or default
1069 * max budget (trying with 1/32)
1070 */
1071static int bfq_min_budget(struct bfq_data *bfqd)
1072{
1073 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1074 return bfq_default_max_budget / 32;
1075 else
1076 return bfqd->bfq_max_budget / 32;
1077}
1078
aee69d78
PV
1079/*
1080 * The next function, invoked after the input queue bfqq switches from
1081 * idle to busy, updates the budget of bfqq. The function also tells
1082 * whether the in-service queue should be expired, by returning
1083 * true. The purpose of expiring the in-service queue is to give bfqq
1084 * the chance to possibly preempt the in-service queue, and the reason
44e44a1b
PV
1085 * for preempting the in-service queue is to achieve one of the two
1086 * goals below.
aee69d78 1087 *
44e44a1b
PV
1088 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1089 * expired because it has remained idle. In particular, bfqq may have
1090 * expired for one of the following two reasons:
aee69d78
PV
1091 *
1092 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1093 * and did not make it to issue a new request before its last
1094 * request was served;
1095 *
1096 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1097 * a new request before the expiration of the idling-time.
1098 *
1099 * Even if bfqq has expired for one of the above reasons, the process
1100 * associated with the queue may be however issuing requests greedily,
1101 * and thus be sensitive to the bandwidth it receives (bfqq may have
1102 * remained idle for other reasons: CPU high load, bfqq not enjoying
1103 * idling, I/O throttling somewhere in the path from the process to
1104 * the I/O scheduler, ...). But if, after every expiration for one of
1105 * the above two reasons, bfqq has to wait for the service of at least
1106 * one full budget of another queue before being served again, then
1107 * bfqq is likely to get a much lower bandwidth or resource time than
1108 * its reserved ones. To address this issue, two countermeasures need
1109 * to be taken.
1110 *
1111 * First, the budget and the timestamps of bfqq need to be updated in
1112 * a special way on bfqq reactivation: they need to be updated as if
1113 * bfqq did not remain idle and did not expire. In fact, if they are
1114 * computed as if bfqq expired and remained idle until reactivation,
1115 * then the process associated with bfqq is treated as if, instead of
1116 * being greedy, it stopped issuing requests when bfqq remained idle,
1117 * and restarts issuing requests only on this reactivation. In other
1118 * words, the scheduler does not help the process recover the "service
1119 * hole" between bfqq expiration and reactivation. As a consequence,
1120 * the process receives a lower bandwidth than its reserved one. In
1121 * contrast, to recover this hole, the budget must be updated as if
1122 * bfqq was not expired at all before this reactivation, i.e., it must
1123 * be set to the value of the remaining budget when bfqq was
1124 * expired. Along the same line, timestamps need to be assigned the
1125 * value they had the last time bfqq was selected for service, i.e.,
1126 * before last expiration. Thus timestamps need to be back-shifted
1127 * with respect to their normal computation (see [1] for more details
1128 * on this tricky aspect).
1129 *
1130 * Secondly, to allow the process to recover the hole, the in-service
1131 * queue must be expired too, to give bfqq the chance to preempt it
1132 * immediately. In fact, if bfqq has to wait for a full budget of the
1133 * in-service queue to be completed, then it may become impossible to
1134 * let the process recover the hole, even if the back-shifted
1135 * timestamps of bfqq are lower than those of the in-service queue. If
1136 * this happens for most or all of the holes, then the process may not
1137 * receive its reserved bandwidth. In this respect, it is worth noting
1138 * that, being the service of outstanding requests unpreemptible, a
1139 * little fraction of the holes may however be unrecoverable, thereby
1140 * causing a little loss of bandwidth.
1141 *
1142 * The last important point is detecting whether bfqq does need this
1143 * bandwidth recovery. In this respect, the next function deems the
1144 * process associated with bfqq greedy, and thus allows it to recover
1145 * the hole, if: 1) the process is waiting for the arrival of a new
1146 * request (which implies that bfqq expired for one of the above two
1147 * reasons), and 2) such a request has arrived soon. The first
1148 * condition is controlled through the flag non_blocking_wait_rq,
1149 * while the second through the flag arrived_in_time. If both
1150 * conditions hold, then the function computes the budget in the
1151 * above-described special way, and signals that the in-service queue
1152 * should be expired. Timestamp back-shifting is done later in
1153 * __bfq_activate_entity.
44e44a1b
PV
1154 *
1155 * 2. Reduce latency. Even if timestamps are not backshifted to let
1156 * the process associated with bfqq recover a service hole, bfqq may
1157 * however happen to have, after being (re)activated, a lower finish
1158 * timestamp than the in-service queue. That is, the next budget of
1159 * bfqq may have to be completed before the one of the in-service
1160 * queue. If this is the case, then preempting the in-service queue
1161 * allows this goal to be achieved, apart from the unpreemptible,
1162 * outstanding requests mentioned above.
1163 *
1164 * Unfortunately, regardless of which of the above two goals one wants
1165 * to achieve, service trees need first to be updated to know whether
1166 * the in-service queue must be preempted. To have service trees
1167 * correctly updated, the in-service queue must be expired and
1168 * rescheduled, and bfqq must be scheduled too. This is one of the
1169 * most costly operations (in future versions, the scheduling
1170 * mechanism may be re-designed in such a way to make it possible to
1171 * know whether preemption is needed without needing to update service
1172 * trees). In addition, queue preemptions almost always cause random
1173 * I/O, and thus loss of throughput. Because of these facts, the next
1174 * function adopts the following simple scheme to avoid both costly
1175 * operations and too frequent preemptions: it requests the expiration
1176 * of the in-service queue (unconditionally) only for queues that need
1177 * to recover a hole, or that either are weight-raised or deserve to
1178 * be weight-raised.
aee69d78
PV
1179 */
1180static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1181 struct bfq_queue *bfqq,
44e44a1b
PV
1182 bool arrived_in_time,
1183 bool wr_or_deserves_wr)
aee69d78
PV
1184{
1185 struct bfq_entity *entity = &bfqq->entity;
1186
1187 if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
1188 /*
1189 * We do not clear the flag non_blocking_wait_rq here, as
1190 * the latter is used in bfq_activate_bfqq to signal
1191 * that timestamps need to be back-shifted (and is
1192 * cleared right after).
1193 */
1194
1195 /*
1196 * In next assignment we rely on that either
1197 * entity->service or entity->budget are not updated
1198 * on expiration if bfqq is empty (see
1199 * __bfq_bfqq_recalc_budget). Thus both quantities
1200 * remain unchanged after such an expiration, and the
1201 * following statement therefore assigns to
1202 * entity->budget the remaining budget on such an
1203 * expiration. For clarity, entity->service is not
1204 * updated on expiration in any case, and, in normal
1205 * operation, is reset only when bfqq is selected for
1206 * service (see bfq_get_next_queue).
1207 */
1208 entity->budget = min_t(unsigned long,
1209 bfq_bfqq_budget_left(bfqq),
1210 bfqq->max_budget);
1211
1212 return true;
1213 }
1214
1215 entity->budget = max_t(unsigned long, bfqq->max_budget,
1216 bfq_serv_to_charge(bfqq->next_rq, bfqq));
1217 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
44e44a1b
PV
1218 return wr_or_deserves_wr;
1219}
1220
4baa8bb1
PV
1221/*
1222 * Return the farthest future time instant according to jiffies
1223 * macros.
1224 */
1225static unsigned long bfq_greatest_from_now(void)
1226{
1227 return jiffies + MAX_JIFFY_OFFSET;
1228}
1229
1230/*
1231 * Return the farthest past time instant according to jiffies
1232 * macros.
1233 */
1234static unsigned long bfq_smallest_from_now(void)
1235{
1236 return jiffies - MAX_JIFFY_OFFSET;
1237}
1238
44e44a1b
PV
1239static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1240 struct bfq_queue *bfqq,
1241 unsigned int old_wr_coeff,
1242 bool wr_or_deserves_wr,
77b7dcea 1243 bool interactive,
e1b2324d 1244 bool in_burst,
77b7dcea 1245 bool soft_rt)
44e44a1b
PV
1246{
1247 if (old_wr_coeff == 1 && wr_or_deserves_wr) {
1248 /* start a weight-raising period */
77b7dcea
PV
1249 if (interactive) {
1250 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1251 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1252 } else {
4baa8bb1
PV
1253 /*
1254 * No interactive weight raising in progress
1255 * here: assign minus infinity to
1256 * wr_start_at_switch_to_srt, to make sure
1257 * that, at the end of the soft-real-time
1258 * weight raising periods that is starting
1259 * now, no interactive weight-raising period
1260 * may be wrongly considered as still in
1261 * progress (and thus actually started by
1262 * mistake).
1263 */
1264 bfqq->wr_start_at_switch_to_srt =
1265 bfq_smallest_from_now();
77b7dcea
PV
1266 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1267 BFQ_SOFTRT_WEIGHT_FACTOR;
1268 bfqq->wr_cur_max_time =
1269 bfqd->bfq_wr_rt_max_time;
1270 }
44e44a1b
PV
1271
1272 /*
1273 * If needed, further reduce budget to make sure it is
1274 * close to bfqq's backlog, so as to reduce the
1275 * scheduling-error component due to a too large
1276 * budget. Do not care about throughput consequences,
1277 * but only about latency. Finally, do not assign a
1278 * too small budget either, to avoid increasing
1279 * latency by causing too frequent expirations.
1280 */
1281 bfqq->entity.budget = min_t(unsigned long,
1282 bfqq->entity.budget,
1283 2 * bfq_min_budget(bfqd));
1284 } else if (old_wr_coeff > 1) {
77b7dcea
PV
1285 if (interactive) { /* update wr coeff and duration */
1286 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1287 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
e1b2324d
AA
1288 } else if (in_burst)
1289 bfqq->wr_coeff = 1;
1290 else if (soft_rt) {
77b7dcea
PV
1291 /*
1292 * The application is now or still meeting the
1293 * requirements for being deemed soft rt. We
1294 * can then correctly and safely (re)charge
1295 * the weight-raising duration for the
1296 * application with the weight-raising
1297 * duration for soft rt applications.
1298 *
1299 * In particular, doing this recharge now, i.e.,
1300 * before the weight-raising period for the
1301 * application finishes, reduces the probability
1302 * of the following negative scenario:
1303 * 1) the weight of a soft rt application is
1304 * raised at startup (as for any newly
1305 * created application),
1306 * 2) since the application is not interactive,
1307 * at a certain time weight-raising is
1308 * stopped for the application,
1309 * 3) at that time the application happens to
1310 * still have pending requests, and hence
1311 * is destined to not have a chance to be
1312 * deemed soft rt before these requests are
1313 * completed (see the comments to the
1314 * function bfq_bfqq_softrt_next_start()
1315 * for details on soft rt detection),
1316 * 4) these pending requests experience a high
1317 * latency because the application is not
1318 * weight-raised while they are pending.
1319 */
1320 if (bfqq->wr_cur_max_time !=
1321 bfqd->bfq_wr_rt_max_time) {
1322 bfqq->wr_start_at_switch_to_srt =
1323 bfqq->last_wr_start_finish;
1324
1325 bfqq->wr_cur_max_time =
1326 bfqd->bfq_wr_rt_max_time;
1327 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1328 BFQ_SOFTRT_WEIGHT_FACTOR;
1329 }
1330 bfqq->last_wr_start_finish = jiffies;
1331 }
44e44a1b
PV
1332 }
1333}
1334
1335static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1336 struct bfq_queue *bfqq)
1337{
1338 return bfqq->dispatched == 0 &&
1339 time_is_before_jiffies(
1340 bfqq->budget_timeout +
1341 bfqd->bfq_wr_min_idle_time);
aee69d78
PV
1342}
1343
1344static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1345 struct bfq_queue *bfqq,
44e44a1b
PV
1346 int old_wr_coeff,
1347 struct request *rq,
1348 bool *interactive)
aee69d78 1349{
e1b2324d
AA
1350 bool soft_rt, in_burst, wr_or_deserves_wr,
1351 bfqq_wants_to_preempt,
44e44a1b 1352 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
aee69d78
PV
1353 /*
1354 * See the comments on
1355 * bfq_bfqq_update_budg_for_activation for
1356 * details on the usage of the next variable.
1357 */
1358 arrived_in_time = ktime_get_ns() <=
1359 bfqq->ttime.last_end_request +
1360 bfqd->bfq_slice_idle * 3;
1361
e21b7a0b 1362
aee69d78 1363 /*
44e44a1b
PV
1364 * bfqq deserves to be weight-raised if:
1365 * - it is sync,
e1b2324d 1366 * - it does not belong to a large burst,
36eca894
AA
1367 * - it has been idle for enough time or is soft real-time,
1368 * - is linked to a bfq_io_cq (it is not shared in any sense).
44e44a1b 1369 */
e1b2324d 1370 in_burst = bfq_bfqq_in_large_burst(bfqq);
77b7dcea 1371 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
e1b2324d 1372 !in_burst &&
77b7dcea 1373 time_is_before_jiffies(bfqq->soft_rt_next_start);
e1b2324d 1374 *interactive = !in_burst && idle_for_long_time;
44e44a1b
PV
1375 wr_or_deserves_wr = bfqd->low_latency &&
1376 (bfqq->wr_coeff > 1 ||
36eca894
AA
1377 (bfq_bfqq_sync(bfqq) &&
1378 bfqq->bic && (*interactive || soft_rt)));
44e44a1b
PV
1379
1380 /*
1381 * Using the last flag, update budget and check whether bfqq
1382 * may want to preempt the in-service queue.
aee69d78
PV
1383 */
1384 bfqq_wants_to_preempt =
1385 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
44e44a1b
PV
1386 arrived_in_time,
1387 wr_or_deserves_wr);
aee69d78 1388
e1b2324d
AA
1389 /*
1390 * If bfqq happened to be activated in a burst, but has been
1391 * idle for much more than an interactive queue, then we
1392 * assume that, in the overall I/O initiated in the burst, the
1393 * I/O associated with bfqq is finished. So bfqq does not need
1394 * to be treated as a queue belonging to a burst
1395 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1396 * if set, and remove bfqq from the burst list if it's
1397 * there. We do not decrement burst_size, because the fact
1398 * that bfqq does not need to belong to the burst list any
1399 * more does not invalidate the fact that bfqq was created in
1400 * a burst.
1401 */
1402 if (likely(!bfq_bfqq_just_created(bfqq)) &&
1403 idle_for_long_time &&
1404 time_is_before_jiffies(
1405 bfqq->budget_timeout +
1406 msecs_to_jiffies(10000))) {
1407 hlist_del_init(&bfqq->burst_list_node);
1408 bfq_clear_bfqq_in_large_burst(bfqq);
1409 }
1410
1411 bfq_clear_bfqq_just_created(bfqq);
1412
1413
aee69d78
PV
1414 if (!bfq_bfqq_IO_bound(bfqq)) {
1415 if (arrived_in_time) {
1416 bfqq->requests_within_timer++;
1417 if (bfqq->requests_within_timer >=
1418 bfqd->bfq_requests_within_timer)
1419 bfq_mark_bfqq_IO_bound(bfqq);
1420 } else
1421 bfqq->requests_within_timer = 0;
1422 }
1423
44e44a1b 1424 if (bfqd->low_latency) {
36eca894
AA
1425 if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1426 /* wraparound */
1427 bfqq->split_time =
1428 jiffies - bfqd->bfq_wr_min_idle_time - 1;
1429
1430 if (time_is_before_jiffies(bfqq->split_time +
1431 bfqd->bfq_wr_min_idle_time)) {
1432 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1433 old_wr_coeff,
1434 wr_or_deserves_wr,
1435 *interactive,
e1b2324d 1436 in_burst,
36eca894
AA
1437 soft_rt);
1438
1439 if (old_wr_coeff != bfqq->wr_coeff)
1440 bfqq->entity.prio_changed = 1;
1441 }
44e44a1b
PV
1442 }
1443
77b7dcea
PV
1444 bfqq->last_idle_bklogged = jiffies;
1445 bfqq->service_from_backlogged = 0;
1446 bfq_clear_bfqq_softrt_update(bfqq);
1447
aee69d78
PV
1448 bfq_add_bfqq_busy(bfqd, bfqq);
1449
1450 /*
1451 * Expire in-service queue only if preemption may be needed
1452 * for guarantees. In this respect, the function
1453 * next_queue_may_preempt just checks a simple, necessary
1454 * condition, and not a sufficient condition based on
1455 * timestamps. In fact, for the latter condition to be
1456 * evaluated, timestamps would need first to be updated, and
1457 * this operation is quite costly (see the comments on the
1458 * function bfq_bfqq_update_budg_for_activation).
1459 */
1460 if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
77b7dcea 1461 bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
aee69d78
PV
1462 next_queue_may_preempt(bfqd))
1463 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1464 false, BFQQE_PREEMPTED);
1465}
1466
1467static void bfq_add_request(struct request *rq)
1468{
1469 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1470 struct bfq_data *bfqd = bfqq->bfqd;
1471 struct request *next_rq, *prev;
44e44a1b
PV
1472 unsigned int old_wr_coeff = bfqq->wr_coeff;
1473 bool interactive = false;
aee69d78
PV
1474
1475 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1476 bfqq->queued[rq_is_sync(rq)]++;
1477 bfqd->queued++;
1478
1479 elv_rb_add(&bfqq->sort_list, rq);
1480
1481 /*
1482 * Check if this request is a better next-serve candidate.
1483 */
1484 prev = bfqq->next_rq;
1485 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
1486 bfqq->next_rq = next_rq;
1487
36eca894
AA
1488 /*
1489 * Adjust priority tree position, if next_rq changes.
1490 */
1491 if (prev != bfqq->next_rq)
1492 bfq_pos_tree_add_move(bfqd, bfqq);
1493
aee69d78 1494 if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
44e44a1b
PV
1495 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
1496 rq, &interactive);
1497 else {
1498 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
1499 time_is_before_jiffies(
1500 bfqq->last_wr_start_finish +
1501 bfqd->bfq_wr_min_inter_arr_async)) {
1502 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1503 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1504
cfd69712 1505 bfqd->wr_busy_queues++;
44e44a1b
PV
1506 bfqq->entity.prio_changed = 1;
1507 }
1508 if (prev != bfqq->next_rq)
1509 bfq_updated_next_req(bfqd, bfqq);
1510 }
1511
1512 /*
1513 * Assign jiffies to last_wr_start_finish in the following
1514 * cases:
1515 *
1516 * . if bfqq is not going to be weight-raised, because, for
1517 * non weight-raised queues, last_wr_start_finish stores the
1518 * arrival time of the last request; as of now, this piece
1519 * of information is used only for deciding whether to
1520 * weight-raise async queues
1521 *
1522 * . if bfqq is not weight-raised, because, if bfqq is now
1523 * switching to weight-raised, then last_wr_start_finish
1524 * stores the time when weight-raising starts
1525 *
1526 * . if bfqq is interactive, because, regardless of whether
1527 * bfqq is currently weight-raised, the weight-raising
1528 * period must start or restart (this case is considered
1529 * separately because it is not detected by the above
1530 * conditions, if bfqq is already weight-raised)
77b7dcea
PV
1531 *
1532 * last_wr_start_finish has to be updated also if bfqq is soft
1533 * real-time, because the weight-raising period is constantly
1534 * restarted on idle-to-busy transitions for these queues, but
1535 * this is already done in bfq_bfqq_handle_idle_busy_switch if
1536 * needed.
44e44a1b
PV
1537 */
1538 if (bfqd->low_latency &&
1539 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
1540 bfqq->last_wr_start_finish = jiffies;
aee69d78
PV
1541}
1542
1543static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
1544 struct bio *bio,
1545 struct request_queue *q)
1546{
1547 struct bfq_queue *bfqq = bfqd->bio_bfqq;
1548
1549
1550 if (bfqq)
1551 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
1552
1553 return NULL;
1554}
1555
ab0e43e9
PV
1556static sector_t get_sdist(sector_t last_pos, struct request *rq)
1557{
1558 if (last_pos)
1559 return abs(blk_rq_pos(rq) - last_pos);
1560
1561 return 0;
1562}
1563
aee69d78
PV
1564#if 0 /* Still not clear if we can do without next two functions */
1565static void bfq_activate_request(struct request_queue *q, struct request *rq)
1566{
1567 struct bfq_data *bfqd = q->elevator->elevator_data;
1568
1569 bfqd->rq_in_driver++;
aee69d78
PV
1570}
1571
1572static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
1573{
1574 struct bfq_data *bfqd = q->elevator->elevator_data;
1575
1576 bfqd->rq_in_driver--;
1577}
1578#endif
1579
1580static void bfq_remove_request(struct request_queue *q,
1581 struct request *rq)
1582{
1583 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1584 struct bfq_data *bfqd = bfqq->bfqd;
1585 const int sync = rq_is_sync(rq);
1586
1587 if (bfqq->next_rq == rq) {
1588 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
1589 bfq_updated_next_req(bfqd, bfqq);
1590 }
1591
1592 if (rq->queuelist.prev != &rq->queuelist)
1593 list_del_init(&rq->queuelist);
1594 bfqq->queued[sync]--;
1595 bfqd->queued--;
1596 elv_rb_del(&bfqq->sort_list, rq);
1597
1598 elv_rqhash_del(q, rq);
1599 if (q->last_merge == rq)
1600 q->last_merge = NULL;
1601
1602 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
1603 bfqq->next_rq = NULL;
1604
1605 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
e21b7a0b 1606 bfq_del_bfqq_busy(bfqd, bfqq, false);
aee69d78
PV
1607 /*
1608 * bfqq emptied. In normal operation, when
1609 * bfqq is empty, bfqq->entity.service and
1610 * bfqq->entity.budget must contain,
1611 * respectively, the service received and the
1612 * budget used last time bfqq emptied. These
1613 * facts do not hold in this case, as at least
1614 * this last removal occurred while bfqq is
1615 * not in service. To avoid inconsistencies,
1616 * reset both bfqq->entity.service and
1617 * bfqq->entity.budget, if bfqq has still a
1618 * process that may issue I/O requests to it.
1619 */
1620 bfqq->entity.budget = bfqq->entity.service = 0;
1621 }
36eca894
AA
1622
1623 /*
1624 * Remove queue from request-position tree as it is empty.
1625 */
1626 if (bfqq->pos_root) {
1627 rb_erase(&bfqq->pos_node, bfqq->pos_root);
1628 bfqq->pos_root = NULL;
1629 }
05e90283
PV
1630 } else {
1631 bfq_pos_tree_add_move(bfqd, bfqq);
aee69d78
PV
1632 }
1633
1634 if (rq->cmd_flags & REQ_META)
1635 bfqq->meta_pending--;
e21b7a0b 1636
aee69d78
PV
1637}
1638
1639static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
1640{
1641 struct request_queue *q = hctx->queue;
1642 struct bfq_data *bfqd = q->elevator->elevator_data;
1643 struct request *free = NULL;
1644 /*
1645 * bfq_bic_lookup grabs the queue_lock: invoke it now and
1646 * store its return value for later use, to avoid nesting
1647 * queue_lock inside the bfqd->lock. We assume that the bic
1648 * returned by bfq_bic_lookup does not go away before
1649 * bfqd->lock is taken.
1650 */
1651 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
1652 bool ret;
1653
1654 spin_lock_irq(&bfqd->lock);
1655
1656 if (bic)
1657 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
1658 else
1659 bfqd->bio_bfqq = NULL;
1660 bfqd->bio_bic = bic;
1661
1662 ret = blk_mq_sched_try_merge(q, bio, &free);
1663
1664 if (free)
1665 blk_mq_free_request(free);
1666 spin_unlock_irq(&bfqd->lock);
1667
1668 return ret;
1669}
1670
1671static int bfq_request_merge(struct request_queue *q, struct request **req,
1672 struct bio *bio)
1673{
1674 struct bfq_data *bfqd = q->elevator->elevator_data;
1675 struct request *__rq;
1676
1677 __rq = bfq_find_rq_fmerge(bfqd, bio, q);
1678 if (__rq && elv_bio_merge_ok(__rq, bio)) {
1679 *req = __rq;
1680 return ELEVATOR_FRONT_MERGE;
1681 }
1682
1683 return ELEVATOR_NO_MERGE;
1684}
1685
1686static void bfq_request_merged(struct request_queue *q, struct request *req,
1687 enum elv_merge type)
1688{
1689 if (type == ELEVATOR_FRONT_MERGE &&
1690 rb_prev(&req->rb_node) &&
1691 blk_rq_pos(req) <
1692 blk_rq_pos(container_of(rb_prev(&req->rb_node),
1693 struct request, rb_node))) {
1694 struct bfq_queue *bfqq = RQ_BFQQ(req);
1695 struct bfq_data *bfqd = bfqq->bfqd;
1696 struct request *prev, *next_rq;
1697
1698 /* Reposition request in its sort_list */
1699 elv_rb_del(&bfqq->sort_list, req);
1700 elv_rb_add(&bfqq->sort_list, req);
1701
1702 /* Choose next request to be served for bfqq */
1703 prev = bfqq->next_rq;
1704 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
1705 bfqd->last_position);
1706 bfqq->next_rq = next_rq;
1707 /*
36eca894
AA
1708 * If next_rq changes, update both the queue's budget to
1709 * fit the new request and the queue's position in its
1710 * rq_pos_tree.
aee69d78 1711 */
36eca894 1712 if (prev != bfqq->next_rq) {
aee69d78 1713 bfq_updated_next_req(bfqd, bfqq);
36eca894
AA
1714 bfq_pos_tree_add_move(bfqd, bfqq);
1715 }
aee69d78
PV
1716 }
1717}
1718
1719static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1720 struct request *next)
1721{
1722 struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
1723
1724 if (!RB_EMPTY_NODE(&rq->rb_node))
e21b7a0b 1725 goto end;
aee69d78
PV
1726 spin_lock_irq(&bfqq->bfqd->lock);
1727
1728 /*
1729 * If next and rq belong to the same bfq_queue and next is older
1730 * than rq, then reposition rq in the fifo (by substituting next
1731 * with rq). Otherwise, if next and rq belong to different
1732 * bfq_queues, never reposition rq: in fact, we would have to
1733 * reposition it with respect to next's position in its own fifo,
1734 * which would most certainly be too expensive with respect to
1735 * the benefits.
1736 */
1737 if (bfqq == next_bfqq &&
1738 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1739 next->fifo_time < rq->fifo_time) {
1740 list_del_init(&rq->queuelist);
1741 list_replace_init(&next->queuelist, &rq->queuelist);
1742 rq->fifo_time = next->fifo_time;
1743 }
1744
1745 if (bfqq->next_rq == next)
1746 bfqq->next_rq = rq;
1747
1748 bfq_remove_request(q, next);
614822f8 1749 bfqg_stats_update_io_remove(bfqq_group(bfqq), next->cmd_flags);
aee69d78
PV
1750
1751 spin_unlock_irq(&bfqq->bfqd->lock);
e21b7a0b
AA
1752end:
1753 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
aee69d78
PV
1754}
1755
44e44a1b
PV
1756/* Must be called with bfqq != NULL */
1757static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
1758{
cfd69712
PV
1759 if (bfq_bfqq_busy(bfqq))
1760 bfqq->bfqd->wr_busy_queues--;
44e44a1b
PV
1761 bfqq->wr_coeff = 1;
1762 bfqq->wr_cur_max_time = 0;
77b7dcea 1763 bfqq->last_wr_start_finish = jiffies;
44e44a1b
PV
1764 /*
1765 * Trigger a weight change on the next invocation of
1766 * __bfq_entity_update_weight_prio.
1767 */
1768 bfqq->entity.prio_changed = 1;
1769}
1770
ea25da48
PV
1771void bfq_end_wr_async_queues(struct bfq_data *bfqd,
1772 struct bfq_group *bfqg)
44e44a1b
PV
1773{
1774 int i, j;
1775
1776 for (i = 0; i < 2; i++)
1777 for (j = 0; j < IOPRIO_BE_NR; j++)
1778 if (bfqg->async_bfqq[i][j])
1779 bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
1780 if (bfqg->async_idle_bfqq)
1781 bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
1782}
1783
1784static void bfq_end_wr(struct bfq_data *bfqd)
1785{
1786 struct bfq_queue *bfqq;
1787
1788 spin_lock_irq(&bfqd->lock);
1789
1790 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
1791 bfq_bfqq_end_wr(bfqq);
1792 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
1793 bfq_bfqq_end_wr(bfqq);
1794 bfq_end_wr_async(bfqd);
1795
1796 spin_unlock_irq(&bfqd->lock);
1797}
1798
36eca894
AA
1799static sector_t bfq_io_struct_pos(void *io_struct, bool request)
1800{
1801 if (request)
1802 return blk_rq_pos(io_struct);
1803 else
1804 return ((struct bio *)io_struct)->bi_iter.bi_sector;
1805}
1806
1807static int bfq_rq_close_to_sector(void *io_struct, bool request,
1808 sector_t sector)
1809{
1810 return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
1811 BFQQ_CLOSE_THR;
1812}
1813
1814static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
1815 struct bfq_queue *bfqq,
1816 sector_t sector)
1817{
1818 struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
1819 struct rb_node *parent, *node;
1820 struct bfq_queue *__bfqq;
1821
1822 if (RB_EMPTY_ROOT(root))
1823 return NULL;
1824
1825 /*
1826 * First, if we find a request starting at the end of the last
1827 * request, choose it.
1828 */
1829 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
1830 if (__bfqq)
1831 return __bfqq;
1832
1833 /*
1834 * If the exact sector wasn't found, the parent of the NULL leaf
1835 * will contain the closest sector (rq_pos_tree sorted by
1836 * next_request position).
1837 */
1838 __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
1839 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1840 return __bfqq;
1841
1842 if (blk_rq_pos(__bfqq->next_rq) < sector)
1843 node = rb_next(&__bfqq->pos_node);
1844 else
1845 node = rb_prev(&__bfqq->pos_node);
1846 if (!node)
1847 return NULL;
1848
1849 __bfqq = rb_entry(node, struct bfq_queue, pos_node);
1850 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1851 return __bfqq;
1852
1853 return NULL;
1854}
1855
1856static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
1857 struct bfq_queue *cur_bfqq,
1858 sector_t sector)
1859{
1860 struct bfq_queue *bfqq;
1861
1862 /*
1863 * We shall notice if some of the queues are cooperating,
1864 * e.g., working closely on the same area of the device. In
1865 * that case, we can group them together and: 1) don't waste
1866 * time idling, and 2) serve the union of their requests in
1867 * the best possible order for throughput.
1868 */
1869 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
1870 if (!bfqq || bfqq == cur_bfqq)
1871 return NULL;
1872
1873 return bfqq;
1874}
1875
1876static struct bfq_queue *
1877bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
1878{
1879 int process_refs, new_process_refs;
1880 struct bfq_queue *__bfqq;
1881
1882 /*
1883 * If there are no process references on the new_bfqq, then it is
1884 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
1885 * may have dropped their last reference (not just their last process
1886 * reference).
1887 */
1888 if (!bfqq_process_refs(new_bfqq))
1889 return NULL;
1890
1891 /* Avoid a circular list and skip interim queue merges. */
1892 while ((__bfqq = new_bfqq->new_bfqq)) {
1893 if (__bfqq == bfqq)
1894 return NULL;
1895 new_bfqq = __bfqq;
1896 }
1897
1898 process_refs = bfqq_process_refs(bfqq);
1899 new_process_refs = bfqq_process_refs(new_bfqq);
1900 /*
1901 * If the process for the bfqq has gone away, there is no
1902 * sense in merging the queues.
1903 */
1904 if (process_refs == 0 || new_process_refs == 0)
1905 return NULL;
1906
1907 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
1908 new_bfqq->pid);
1909
1910 /*
1911 * Merging is just a redirection: the requests of the process
1912 * owning one of the two queues are redirected to the other queue.
1913 * The latter queue, in its turn, is set as shared if this is the
1914 * first time that the requests of some process are redirected to
1915 * it.
1916 *
6fa3e8d3
PV
1917 * We redirect bfqq to new_bfqq and not the opposite, because
1918 * we are in the context of the process owning bfqq, thus we
1919 * have the io_cq of this process. So we can immediately
1920 * configure this io_cq to redirect the requests of the
1921 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
1922 * not available any more (new_bfqq->bic == NULL).
36eca894 1923 *
6fa3e8d3
PV
1924 * Anyway, even in case new_bfqq coincides with the in-service
1925 * queue, redirecting requests the in-service queue is the
1926 * best option, as we feed the in-service queue with new
1927 * requests close to the last request served and, by doing so,
1928 * are likely to increase the throughput.
36eca894
AA
1929 */
1930 bfqq->new_bfqq = new_bfqq;
1931 new_bfqq->ref += process_refs;
1932 return new_bfqq;
1933}
1934
1935static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
1936 struct bfq_queue *new_bfqq)
1937{
1938 if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
1939 (bfqq->ioprio_class != new_bfqq->ioprio_class))
1940 return false;
1941
1942 /*
1943 * If either of the queues has already been detected as seeky,
1944 * then merging it with the other queue is unlikely to lead to
1945 * sequential I/O.
1946 */
1947 if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
1948 return false;
1949
1950 /*
1951 * Interleaved I/O is known to be done by (some) applications
1952 * only for reads, so it does not make sense to merge async
1953 * queues.
1954 */
1955 if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
1956 return false;
1957
1958 return true;
1959}
1960
1961/*
1962 * If this function returns true, then bfqq cannot be merged. The idea
1963 * is that true cooperation happens very early after processes start
1964 * to do I/O. Usually, late cooperations are just accidental false
1965 * positives. In case bfqq is weight-raised, such false positives
1966 * would evidently degrade latency guarantees for bfqq.
1967 */
1968static bool wr_from_too_long(struct bfq_queue *bfqq)
1969{
1970 return bfqq->wr_coeff > 1 &&
1971 time_is_before_jiffies(bfqq->last_wr_start_finish +
1972 msecs_to_jiffies(100));
1973}
1974
1975/*
1976 * Attempt to schedule a merge of bfqq with the currently in-service
1977 * queue or with a close queue among the scheduled queues. Return
1978 * NULL if no merge was scheduled, a pointer to the shared bfq_queue
1979 * structure otherwise.
1980 *
1981 * The OOM queue is not allowed to participate to cooperation: in fact, since
1982 * the requests temporarily redirected to the OOM queue could be redirected
1983 * again to dedicated queues at any time, the state needed to correctly
1984 * handle merging with the OOM queue would be quite complex and expensive
1985 * to maintain. Besides, in such a critical condition as an out of memory,
1986 * the benefits of queue merging may be little relevant, or even negligible.
1987 *
1988 * Weight-raised queues can be merged only if their weight-raising
1989 * period has just started. In fact cooperating processes are usually
1990 * started together. Thus, with this filter we avoid false positives
1991 * that would jeopardize low-latency guarantees.
1992 *
1993 * WARNING: queue merging may impair fairness among non-weight raised
1994 * queues, for at least two reasons: 1) the original weight of a
1995 * merged queue may change during the merged state, 2) even being the
1996 * weight the same, a merged queue may be bloated with many more
1997 * requests than the ones produced by its originally-associated
1998 * process.
1999 */
2000static struct bfq_queue *
2001bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2002 void *io_struct, bool request)
2003{
2004 struct bfq_queue *in_service_bfqq, *new_bfqq;
2005
2006 if (bfqq->new_bfqq)
2007 return bfqq->new_bfqq;
2008
2009 if (!io_struct ||
2010 wr_from_too_long(bfqq) ||
2011 unlikely(bfqq == &bfqd->oom_bfqq))
2012 return NULL;
2013
2014 /* If there is only one backlogged queue, don't search. */
2015 if (bfqd->busy_queues == 1)
2016 return NULL;
2017
2018 in_service_bfqq = bfqd->in_service_queue;
2019
6fa3e8d3
PV
2020 if (!in_service_bfqq || in_service_bfqq == bfqq
2021 || wr_from_too_long(in_service_bfqq) ||
36eca894
AA
2022 unlikely(in_service_bfqq == &bfqd->oom_bfqq))
2023 goto check_scheduled;
2024
2025 if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
2026 bfqq->entity.parent == in_service_bfqq->entity.parent &&
2027 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
2028 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
2029 if (new_bfqq)
2030 return new_bfqq;
2031 }
2032 /*
2033 * Check whether there is a cooperator among currently scheduled
2034 * queues. The only thing we need is that the bio/request is not
2035 * NULL, as we need it to establish whether a cooperator exists.
2036 */
2037check_scheduled:
2038 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
2039 bfq_io_struct_pos(io_struct, request));
2040
2041 if (new_bfqq && !wr_from_too_long(new_bfqq) &&
2042 likely(new_bfqq != &bfqd->oom_bfqq) &&
2043 bfq_may_be_close_cooperator(bfqq, new_bfqq))
2044 return bfq_setup_merge(bfqq, new_bfqq);
2045
2046 return NULL;
2047}
2048
2049static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
2050{
2051 struct bfq_io_cq *bic = bfqq->bic;
2052
2053 /*
2054 * If !bfqq->bic, the queue is already shared or its requests
2055 * have already been redirected to a shared queue; both idle window
2056 * and weight raising state have already been saved. Do nothing.
2057 */
2058 if (!bic)
2059 return;
2060
2061 bic->saved_ttime = bfqq->ttime;
d5be3fef 2062 bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
36eca894 2063 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
e1b2324d
AA
2064 bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
2065 bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
894df937 2066 if (unlikely(bfq_bfqq_just_created(bfqq) &&
1be6e8a9
AR
2067 !bfq_bfqq_in_large_burst(bfqq) &&
2068 bfqq->bfqd->low_latency)) {
894df937
PV
2069 /*
2070 * bfqq being merged right after being created: bfqq
2071 * would have deserved interactive weight raising, but
2072 * did not make it to be set in a weight-raised state,
2073 * because of this early merge. Store directly the
2074 * weight-raising state that would have been assigned
2075 * to bfqq, so that to avoid that bfqq unjustly fails
2076 * to enjoy weight raising if split soon.
2077 */
2078 bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
2079 bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
2080 bic->saved_last_wr_start_finish = jiffies;
2081 } else {
2082 bic->saved_wr_coeff = bfqq->wr_coeff;
2083 bic->saved_wr_start_at_switch_to_srt =
2084 bfqq->wr_start_at_switch_to_srt;
2085 bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
2086 bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
2087 }
36eca894
AA
2088}
2089
36eca894
AA
2090static void
2091bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
2092 struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2093{
2094 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2095 (unsigned long)new_bfqq->pid);
2096 /* Save weight raising and idle window of the merged queues */
2097 bfq_bfqq_save_state(bfqq);
2098 bfq_bfqq_save_state(new_bfqq);
2099 if (bfq_bfqq_IO_bound(bfqq))
2100 bfq_mark_bfqq_IO_bound(new_bfqq);
2101 bfq_clear_bfqq_IO_bound(bfqq);
2102
2103 /*
2104 * If bfqq is weight-raised, then let new_bfqq inherit
2105 * weight-raising. To reduce false positives, neglect the case
2106 * where bfqq has just been created, but has not yet made it
2107 * to be weight-raised (which may happen because EQM may merge
2108 * bfqq even before bfq_add_request is executed for the first
e1b2324d
AA
2109 * time for bfqq). Handling this case would however be very
2110 * easy, thanks to the flag just_created.
36eca894
AA
2111 */
2112 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
2113 new_bfqq->wr_coeff = bfqq->wr_coeff;
2114 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
2115 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
2116 new_bfqq->wr_start_at_switch_to_srt =
2117 bfqq->wr_start_at_switch_to_srt;
2118 if (bfq_bfqq_busy(new_bfqq))
2119 bfqd->wr_busy_queues++;
2120 new_bfqq->entity.prio_changed = 1;
2121 }
2122
2123 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
2124 bfqq->wr_coeff = 1;
2125 bfqq->entity.prio_changed = 1;
2126 if (bfq_bfqq_busy(bfqq))
2127 bfqd->wr_busy_queues--;
2128 }
2129
2130 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
2131 bfqd->wr_busy_queues);
2132
36eca894
AA
2133 /*
2134 * Merge queues (that is, let bic redirect its requests to new_bfqq)
2135 */
2136 bic_set_bfqq(bic, new_bfqq, 1);
2137 bfq_mark_bfqq_coop(new_bfqq);
2138 /*
2139 * new_bfqq now belongs to at least two bics (it is a shared queue):
2140 * set new_bfqq->bic to NULL. bfqq either:
2141 * - does not belong to any bic any more, and hence bfqq->bic must
2142 * be set to NULL, or
2143 * - is a queue whose owning bics have already been redirected to a
2144 * different queue, hence the queue is destined to not belong to
2145 * any bic soon and bfqq->bic is already NULL (therefore the next
2146 * assignment causes no harm).
2147 */
2148 new_bfqq->bic = NULL;
2149 bfqq->bic = NULL;
2150 /* release process reference to bfqq */
2151 bfq_put_queue(bfqq);
2152}
2153
aee69d78
PV
2154static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2155 struct bio *bio)
2156{
2157 struct bfq_data *bfqd = q->elevator->elevator_data;
2158 bool is_sync = op_is_sync(bio->bi_opf);
36eca894 2159 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
aee69d78
PV
2160
2161 /*
2162 * Disallow merge of a sync bio into an async request.
2163 */
2164 if (is_sync && !rq_is_sync(rq))
2165 return false;
2166
2167 /*
2168 * Lookup the bfqq that this bio will be queued with. Allow
2169 * merge only if rq is queued there.
2170 */
2171 if (!bfqq)
2172 return false;
2173
36eca894
AA
2174 /*
2175 * We take advantage of this function to perform an early merge
2176 * of the queues of possible cooperating processes.
2177 */
2178 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2179 if (new_bfqq) {
2180 /*
2181 * bic still points to bfqq, then it has not yet been
2182 * redirected to some other bfq_queue, and a queue
2183 * merge beween bfqq and new_bfqq can be safely
2184 * fulfillled, i.e., bic can be redirected to new_bfqq
2185 * and bfqq can be put.
2186 */
2187 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2188 new_bfqq);
2189 /*
2190 * If we get here, bio will be queued into new_queue,
2191 * so use new_bfqq to decide whether bio and rq can be
2192 * merged.
2193 */
2194 bfqq = new_bfqq;
2195
2196 /*
2197 * Change also bqfd->bio_bfqq, as
2198 * bfqd->bio_bic now points to new_bfqq, and
2199 * this function may be invoked again (and then may
2200 * use again bqfd->bio_bfqq).
2201 */
2202 bfqd->bio_bfqq = bfqq;
2203 }
2204
aee69d78
PV
2205 return bfqq == RQ_BFQQ(rq);
2206}
2207
44e44a1b
PV
2208/*
2209 * Set the maximum time for the in-service queue to consume its
2210 * budget. This prevents seeky processes from lowering the throughput.
2211 * In practice, a time-slice service scheme is used with seeky
2212 * processes.
2213 */
2214static void bfq_set_budget_timeout(struct bfq_data *bfqd,
2215 struct bfq_queue *bfqq)
2216{
77b7dcea
PV
2217 unsigned int timeout_coeff;
2218
2219 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2220 timeout_coeff = 1;
2221 else
2222 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
2223
44e44a1b
PV
2224 bfqd->last_budget_start = ktime_get();
2225
2226 bfqq->budget_timeout = jiffies +
77b7dcea 2227 bfqd->bfq_timeout * timeout_coeff;
44e44a1b
PV
2228}
2229
aee69d78
PV
2230static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
2231 struct bfq_queue *bfqq)
2232{
2233 if (bfqq) {
aee69d78
PV
2234 bfq_clear_bfqq_fifo_expire(bfqq);
2235
2236 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
2237
77b7dcea
PV
2238 if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
2239 bfqq->wr_coeff > 1 &&
2240 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2241 time_is_before_jiffies(bfqq->budget_timeout)) {
2242 /*
2243 * For soft real-time queues, move the start
2244 * of the weight-raising period forward by the
2245 * time the queue has not received any
2246 * service. Otherwise, a relatively long
2247 * service delay is likely to cause the
2248 * weight-raising period of the queue to end,
2249 * because of the short duration of the
2250 * weight-raising period of a soft real-time
2251 * queue. It is worth noting that this move
2252 * is not so dangerous for the other queues,
2253 * because soft real-time queues are not
2254 * greedy.
2255 *
2256 * To not add a further variable, we use the
2257 * overloaded field budget_timeout to
2258 * determine for how long the queue has not
2259 * received service, i.e., how much time has
2260 * elapsed since the queue expired. However,
2261 * this is a little imprecise, because
2262 * budget_timeout is set to jiffies if bfqq
2263 * not only expires, but also remains with no
2264 * request.
2265 */
2266 if (time_after(bfqq->budget_timeout,
2267 bfqq->last_wr_start_finish))
2268 bfqq->last_wr_start_finish +=
2269 jiffies - bfqq->budget_timeout;
2270 else
2271 bfqq->last_wr_start_finish = jiffies;
2272 }
2273
44e44a1b 2274 bfq_set_budget_timeout(bfqd, bfqq);
aee69d78
PV
2275 bfq_log_bfqq(bfqd, bfqq,
2276 "set_in_service_queue, cur-budget = %d",
2277 bfqq->entity.budget);
2278 }
2279
2280 bfqd->in_service_queue = bfqq;
2281}
2282
2283/*
2284 * Get and set a new queue for service.
2285 */
2286static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
2287{
2288 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2289
2290 __bfq_set_in_service_queue(bfqd, bfqq);
2291 return bfqq;
2292}
2293
aee69d78
PV
2294static void bfq_arm_slice_timer(struct bfq_data *bfqd)
2295{
2296 struct bfq_queue *bfqq = bfqd->in_service_queue;
aee69d78
PV
2297 u32 sl;
2298
aee69d78
PV
2299 bfq_mark_bfqq_wait_request(bfqq);
2300
2301 /*
2302 * We don't want to idle for seeks, but we do want to allow
2303 * fair distribution of slice time for a process doing back-to-back
2304 * seeks. So allow a little bit of time for him to submit a new rq.
2305 */
2306 sl = bfqd->bfq_slice_idle;
2307 /*
1de0c4cd
AA
2308 * Unless the queue is being weight-raised or the scenario is
2309 * asymmetric, grant only minimum idle time if the queue
2310 * is seeky. A long idling is preserved for a weight-raised
2311 * queue, or, more in general, in an asymmetric scenario,
2312 * because a long idling is needed for guaranteeing to a queue
2313 * its reserved share of the throughput (in particular, it is
2314 * needed if the queue has a higher weight than some other
2315 * queue).
aee69d78 2316 */
1de0c4cd
AA
2317 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
2318 bfq_symmetric_scenario(bfqd))
aee69d78
PV
2319 sl = min_t(u64, sl, BFQ_MIN_TT);
2320
2321 bfqd->last_idling_start = ktime_get();
2322 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
2323 HRTIMER_MODE_REL);
e21b7a0b 2324 bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
aee69d78
PV
2325}
2326
ab0e43e9
PV
2327/*
2328 * In autotuning mode, max_budget is dynamically recomputed as the
2329 * amount of sectors transferred in timeout at the estimated peak
2330 * rate. This enables BFQ to utilize a full timeslice with a full
2331 * budget, even if the in-service queue is served at peak rate. And
2332 * this maximises throughput with sequential workloads.
2333 */
2334static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
2335{
2336 return (u64)bfqd->peak_rate * USEC_PER_MSEC *
2337 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
2338}
2339
44e44a1b
PV
2340/*
2341 * Update parameters related to throughput and responsiveness, as a
2342 * function of the estimated peak rate. See comments on
2343 * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
2344 */
2345static void update_thr_responsiveness_params(struct bfq_data *bfqd)
2346{
2347 int dev_type = blk_queue_nonrot(bfqd->queue);
2348
2349 if (bfqd->bfq_user_max_budget == 0)
2350 bfqd->bfq_max_budget =
2351 bfq_calc_max_budget(bfqd);
2352
2353 if (bfqd->device_speed == BFQ_BFQD_FAST &&
2354 bfqd->peak_rate < device_speed_thresh[dev_type]) {
2355 bfqd->device_speed = BFQ_BFQD_SLOW;
2356 bfqd->RT_prod = R_slow[dev_type] *
2357 T_slow[dev_type];
2358 } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
2359 bfqd->peak_rate > device_speed_thresh[dev_type]) {
2360 bfqd->device_speed = BFQ_BFQD_FAST;
2361 bfqd->RT_prod = R_fast[dev_type] *
2362 T_fast[dev_type];
2363 }
2364
2365 bfq_log(bfqd,
2366"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
2367 dev_type == 0 ? "ROT" : "NONROT",
2368 bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
2369 bfqd->device_speed == BFQ_BFQD_FAST ?
2370 (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
2371 (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
2372 (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
2373 BFQ_RATE_SHIFT);
2374}
2375
ab0e43e9
PV
2376static void bfq_reset_rate_computation(struct bfq_data *bfqd,
2377 struct request *rq)
2378{
2379 if (rq != NULL) { /* new rq dispatch now, reset accordingly */
2380 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
2381 bfqd->peak_rate_samples = 1;
2382 bfqd->sequential_samples = 0;
2383 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
2384 blk_rq_sectors(rq);
2385 } else /* no new rq dispatched, just reset the number of samples */
2386 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
2387
2388 bfq_log(bfqd,
2389 "reset_rate_computation at end, sample %u/%u tot_sects %llu",
2390 bfqd->peak_rate_samples, bfqd->sequential_samples,
2391 bfqd->tot_sectors_dispatched);
2392}
2393
2394static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
2395{
2396 u32 rate, weight, divisor;
2397
2398 /*
2399 * For the convergence property to hold (see comments on
2400 * bfq_update_peak_rate()) and for the assessment to be
2401 * reliable, a minimum number of samples must be present, and
2402 * a minimum amount of time must have elapsed. If not so, do
2403 * not compute new rate. Just reset parameters, to get ready
2404 * for a new evaluation attempt.
2405 */
2406 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
2407 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
2408 goto reset_computation;
2409
2410 /*
2411 * If a new request completion has occurred after last
2412 * dispatch, then, to approximate the rate at which requests
2413 * have been served by the device, it is more precise to
2414 * extend the observation interval to the last completion.
2415 */
2416 bfqd->delta_from_first =
2417 max_t(u64, bfqd->delta_from_first,
2418 bfqd->last_completion - bfqd->first_dispatch);
2419
2420 /*
2421 * Rate computed in sects/usec, and not sects/nsec, for
2422 * precision issues.
2423 */
2424 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
2425 div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
2426
2427 /*
2428 * Peak rate not updated if:
2429 * - the percentage of sequential dispatches is below 3/4 of the
2430 * total, and rate is below the current estimated peak rate
2431 * - rate is unreasonably high (> 20M sectors/sec)
2432 */
2433 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
2434 rate <= bfqd->peak_rate) ||
2435 rate > 20<<BFQ_RATE_SHIFT)
2436 goto reset_computation;
2437
2438 /*
2439 * We have to update the peak rate, at last! To this purpose,
2440 * we use a low-pass filter. We compute the smoothing constant
2441 * of the filter as a function of the 'weight' of the new
2442 * measured rate.
2443 *
2444 * As can be seen in next formulas, we define this weight as a
2445 * quantity proportional to how sequential the workload is,
2446 * and to how long the observation time interval is.
2447 *
2448 * The weight runs from 0 to 8. The maximum value of the
2449 * weight, 8, yields the minimum value for the smoothing
2450 * constant. At this minimum value for the smoothing constant,
2451 * the measured rate contributes for half of the next value of
2452 * the estimated peak rate.
2453 *
2454 * So, the first step is to compute the weight as a function
2455 * of how sequential the workload is. Note that the weight
2456 * cannot reach 9, because bfqd->sequential_samples cannot
2457 * become equal to bfqd->peak_rate_samples, which, in its
2458 * turn, holds true because bfqd->sequential_samples is not
2459 * incremented for the first sample.
2460 */
2461 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
2462
2463 /*
2464 * Second step: further refine the weight as a function of the
2465 * duration of the observation interval.
2466 */
2467 weight = min_t(u32, 8,
2468 div_u64(weight * bfqd->delta_from_first,
2469 BFQ_RATE_REF_INTERVAL));
2470
2471 /*
2472 * Divisor ranging from 10, for minimum weight, to 2, for
2473 * maximum weight.
2474 */
2475 divisor = 10 - weight;
2476
2477 /*
2478 * Finally, update peak rate:
2479 *
2480 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
2481 */
2482 bfqd->peak_rate *= divisor-1;
2483 bfqd->peak_rate /= divisor;
2484 rate /= divisor; /* smoothing constant alpha = 1/divisor */
2485
2486 bfqd->peak_rate += rate;
44e44a1b 2487 update_thr_responsiveness_params(bfqd);
ab0e43e9
PV
2488
2489reset_computation:
2490 bfq_reset_rate_computation(bfqd, rq);
2491}
2492
2493/*
2494 * Update the read/write peak rate (the main quantity used for
2495 * auto-tuning, see update_thr_responsiveness_params()).
2496 *
2497 * It is not trivial to estimate the peak rate (correctly): because of
2498 * the presence of sw and hw queues between the scheduler and the
2499 * device components that finally serve I/O requests, it is hard to
2500 * say exactly when a given dispatched request is served inside the
2501 * device, and for how long. As a consequence, it is hard to know
2502 * precisely at what rate a given set of requests is actually served
2503 * by the device.
2504 *
2505 * On the opposite end, the dispatch time of any request is trivially
2506 * available, and, from this piece of information, the "dispatch rate"
2507 * of requests can be immediately computed. So, the idea in the next
2508 * function is to use what is known, namely request dispatch times
2509 * (plus, when useful, request completion times), to estimate what is
2510 * unknown, namely in-device request service rate.
2511 *
2512 * The main issue is that, because of the above facts, the rate at
2513 * which a certain set of requests is dispatched over a certain time
2514 * interval can vary greatly with respect to the rate at which the
2515 * same requests are then served. But, since the size of any
2516 * intermediate queue is limited, and the service scheme is lossless
2517 * (no request is silently dropped), the following obvious convergence
2518 * property holds: the number of requests dispatched MUST become
2519 * closer and closer to the number of requests completed as the
2520 * observation interval grows. This is the key property used in
2521 * the next function to estimate the peak service rate as a function
2522 * of the observed dispatch rate. The function assumes to be invoked
2523 * on every request dispatch.
2524 */
2525static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
2526{
2527 u64 now_ns = ktime_get_ns();
2528
2529 if (bfqd->peak_rate_samples == 0) { /* first dispatch */
2530 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
2531 bfqd->peak_rate_samples);
2532 bfq_reset_rate_computation(bfqd, rq);
2533 goto update_last_values; /* will add one sample */
2534 }
2535
2536 /*
2537 * Device idle for very long: the observation interval lasting
2538 * up to this dispatch cannot be a valid observation interval
2539 * for computing a new peak rate (similarly to the late-
2540 * completion event in bfq_completed_request()). Go to
2541 * update_rate_and_reset to have the following three steps
2542 * taken:
2543 * - close the observation interval at the last (previous)
2544 * request dispatch or completion
2545 * - compute rate, if possible, for that observation interval
2546 * - start a new observation interval with this dispatch
2547 */
2548 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
2549 bfqd->rq_in_driver == 0)
2550 goto update_rate_and_reset;
2551
2552 /* Update sampling information */
2553 bfqd->peak_rate_samples++;
2554
2555 if ((bfqd->rq_in_driver > 0 ||
2556 now_ns - bfqd->last_completion < BFQ_MIN_TT)
2557 && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
2558 bfqd->sequential_samples++;
2559
2560 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
2561
2562 /* Reset max observed rq size every 32 dispatches */
2563 if (likely(bfqd->peak_rate_samples % 32))
2564 bfqd->last_rq_max_size =
2565 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
2566 else
2567 bfqd->last_rq_max_size = blk_rq_sectors(rq);
2568
2569 bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
2570
2571 /* Target observation interval not yet reached, go on sampling */
2572 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
2573 goto update_last_values;
2574
2575update_rate_and_reset:
2576 bfq_update_rate_reset(bfqd, rq);
2577update_last_values:
2578 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2579 bfqd->last_dispatch = now_ns;
2580}
2581
aee69d78
PV
2582/*
2583 * Remove request from internal lists.
2584 */
2585static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
2586{
2587 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2588
2589 /*
2590 * For consistency, the next instruction should have been
2591 * executed after removing the request from the queue and
2592 * dispatching it. We execute instead this instruction before
2593 * bfq_remove_request() (and hence introduce a temporary
2594 * inconsistency), for efficiency. In fact, should this
2595 * dispatch occur for a non in-service bfqq, this anticipated
2596 * increment prevents two counters related to bfqq->dispatched
2597 * from risking to be, first, uselessly decremented, and then
2598 * incremented again when the (new) value of bfqq->dispatched
2599 * happens to be taken into account.
2600 */
2601 bfqq->dispatched++;
ab0e43e9 2602 bfq_update_peak_rate(q->elevator->elevator_data, rq);
aee69d78
PV
2603
2604 bfq_remove_request(q, rq);
2605}
2606
2607static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2608{
36eca894
AA
2609 /*
2610 * If this bfqq is shared between multiple processes, check
2611 * to make sure that those processes are still issuing I/Os
2612 * within the mean seek distance. If not, it may be time to
2613 * break the queues apart again.
2614 */
2615 if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
2616 bfq_mark_bfqq_split_coop(bfqq);
2617
44e44a1b
PV
2618 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
2619 if (bfqq->dispatched == 0)
2620 /*
2621 * Overloading budget_timeout field to store
2622 * the time at which the queue remains with no
2623 * backlog and no outstanding request; used by
2624 * the weight-raising mechanism.
2625 */
2626 bfqq->budget_timeout = jiffies;
2627
e21b7a0b 2628 bfq_del_bfqq_busy(bfqd, bfqq, true);
36eca894 2629 } else {
80294c3b 2630 bfq_requeue_bfqq(bfqd, bfqq, true);
36eca894
AA
2631 /*
2632 * Resort priority tree of potential close cooperators.
2633 */
2634 bfq_pos_tree_add_move(bfqd, bfqq);
2635 }
e21b7a0b
AA
2636
2637 /*
2638 * All in-service entities must have been properly deactivated
2639 * or requeued before executing the next function, which
2640 * resets all in-service entites as no more in service.
2641 */
2642 __bfq_bfqd_reset_in_service(bfqd);
aee69d78
PV
2643}
2644
2645/**
2646 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
2647 * @bfqd: device data.
2648 * @bfqq: queue to update.
2649 * @reason: reason for expiration.
2650 *
2651 * Handle the feedback on @bfqq budget at queue expiration.
2652 * See the body for detailed comments.
2653 */
2654static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
2655 struct bfq_queue *bfqq,
2656 enum bfqq_expiration reason)
2657{
2658 struct request *next_rq;
2659 int budget, min_budget;
2660
aee69d78
PV
2661 min_budget = bfq_min_budget(bfqd);
2662
44e44a1b
PV
2663 if (bfqq->wr_coeff == 1)
2664 budget = bfqq->max_budget;
2665 else /*
2666 * Use a constant, low budget for weight-raised queues,
2667 * to help achieve a low latency. Keep it slightly higher
2668 * than the minimum possible budget, to cause a little
2669 * bit fewer expirations.
2670 */
2671 budget = 2 * min_budget;
2672
aee69d78
PV
2673 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
2674 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
2675 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
2676 budget, bfq_min_budget(bfqd));
2677 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
2678 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
2679
44e44a1b 2680 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
aee69d78
PV
2681 switch (reason) {
2682 /*
2683 * Caveat: in all the following cases we trade latency
2684 * for throughput.
2685 */
2686 case BFQQE_TOO_IDLE:
54b60456
PV
2687 /*
2688 * This is the only case where we may reduce
2689 * the budget: if there is no request of the
2690 * process still waiting for completion, then
2691 * we assume (tentatively) that the timer has
2692 * expired because the batch of requests of
2693 * the process could have been served with a
2694 * smaller budget. Hence, betting that
2695 * process will behave in the same way when it
2696 * becomes backlogged again, we reduce its
2697 * next budget. As long as we guess right,
2698 * this budget cut reduces the latency
2699 * experienced by the process.
2700 *
2701 * However, if there are still outstanding
2702 * requests, then the process may have not yet
2703 * issued its next request just because it is
2704 * still waiting for the completion of some of
2705 * the still outstanding ones. So in this
2706 * subcase we do not reduce its budget, on the
2707 * contrary we increase it to possibly boost
2708 * the throughput, as discussed in the
2709 * comments to the BUDGET_TIMEOUT case.
2710 */
2711 if (bfqq->dispatched > 0) /* still outstanding reqs */
2712 budget = min(budget * 2, bfqd->bfq_max_budget);
2713 else {
2714 if (budget > 5 * min_budget)
2715 budget -= 4 * min_budget;
2716 else
2717 budget = min_budget;
2718 }
aee69d78
PV
2719 break;
2720 case BFQQE_BUDGET_TIMEOUT:
54b60456
PV
2721 /*
2722 * We double the budget here because it gives
2723 * the chance to boost the throughput if this
2724 * is not a seeky process (and has bumped into
2725 * this timeout because of, e.g., ZBR).
2726 */
2727 budget = min(budget * 2, bfqd->bfq_max_budget);
aee69d78
PV
2728 break;
2729 case BFQQE_BUDGET_EXHAUSTED:
2730 /*
2731 * The process still has backlog, and did not
2732 * let either the budget timeout or the disk
2733 * idling timeout expire. Hence it is not
2734 * seeky, has a short thinktime and may be
2735 * happy with a higher budget too. So
2736 * definitely increase the budget of this good
2737 * candidate to boost the disk throughput.
2738 */
54b60456 2739 budget = min(budget * 4, bfqd->bfq_max_budget);
aee69d78
PV
2740 break;
2741 case BFQQE_NO_MORE_REQUESTS:
2742 /*
2743 * For queues that expire for this reason, it
2744 * is particularly important to keep the
2745 * budget close to the actual service they
2746 * need. Doing so reduces the timestamp
2747 * misalignment problem described in the
2748 * comments in the body of
2749 * __bfq_activate_entity. In fact, suppose
2750 * that a queue systematically expires for
2751 * BFQQE_NO_MORE_REQUESTS and presents a
2752 * new request in time to enjoy timestamp
2753 * back-shifting. The larger the budget of the
2754 * queue is with respect to the service the
2755 * queue actually requests in each service
2756 * slot, the more times the queue can be
2757 * reactivated with the same virtual finish
2758 * time. It follows that, even if this finish
2759 * time is pushed to the system virtual time
2760 * to reduce the consequent timestamp
2761 * misalignment, the queue unjustly enjoys for
2762 * many re-activations a lower finish time
2763 * than all newly activated queues.
2764 *
2765 * The service needed by bfqq is measured
2766 * quite precisely by bfqq->entity.service.
2767 * Since bfqq does not enjoy device idling,
2768 * bfqq->entity.service is equal to the number
2769 * of sectors that the process associated with
2770 * bfqq requested to read/write before waiting
2771 * for request completions, or blocking for
2772 * other reasons.
2773 */
2774 budget = max_t(int, bfqq->entity.service, min_budget);
2775 break;
2776 default:
2777 return;
2778 }
44e44a1b 2779 } else if (!bfq_bfqq_sync(bfqq)) {
aee69d78
PV
2780 /*
2781 * Async queues get always the maximum possible
2782 * budget, as for them we do not care about latency
2783 * (in addition, their ability to dispatch is limited
2784 * by the charging factor).
2785 */
2786 budget = bfqd->bfq_max_budget;
2787 }
2788
2789 bfqq->max_budget = budget;
2790
2791 if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
2792 !bfqd->bfq_user_max_budget)
2793 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
2794
2795 /*
2796 * If there is still backlog, then assign a new budget, making
2797 * sure that it is large enough for the next request. Since
2798 * the finish time of bfqq must be kept in sync with the
2799 * budget, be sure to call __bfq_bfqq_expire() *after* this
2800 * update.
2801 *
2802 * If there is no backlog, then no need to update the budget;
2803 * it will be updated on the arrival of a new request.
2804 */
2805 next_rq = bfqq->next_rq;
2806 if (next_rq)
2807 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
2808 bfq_serv_to_charge(next_rq, bfqq));
2809
2810 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
2811 next_rq ? blk_rq_sectors(next_rq) : 0,
2812 bfqq->entity.budget);
2813}
2814
aee69d78 2815/*
ab0e43e9
PV
2816 * Return true if the process associated with bfqq is "slow". The slow
2817 * flag is used, in addition to the budget timeout, to reduce the
2818 * amount of service provided to seeky processes, and thus reduce
2819 * their chances to lower the throughput. More details in the comments
2820 * on the function bfq_bfqq_expire().
2821 *
2822 * An important observation is in order: as discussed in the comments
2823 * on the function bfq_update_peak_rate(), with devices with internal
2824 * queues, it is hard if ever possible to know when and for how long
2825 * an I/O request is processed by the device (apart from the trivial
2826 * I/O pattern where a new request is dispatched only after the
2827 * previous one has been completed). This makes it hard to evaluate
2828 * the real rate at which the I/O requests of each bfq_queue are
2829 * served. In fact, for an I/O scheduler like BFQ, serving a
2830 * bfq_queue means just dispatching its requests during its service
2831 * slot (i.e., until the budget of the queue is exhausted, or the
2832 * queue remains idle, or, finally, a timeout fires). But, during the
2833 * service slot of a bfq_queue, around 100 ms at most, the device may
2834 * be even still processing requests of bfq_queues served in previous
2835 * service slots. On the opposite end, the requests of the in-service
2836 * bfq_queue may be completed after the service slot of the queue
2837 * finishes.
2838 *
2839 * Anyway, unless more sophisticated solutions are used
2840 * (where possible), the sum of the sizes of the requests dispatched
2841 * during the service slot of a bfq_queue is probably the only
2842 * approximation available for the service received by the bfq_queue
2843 * during its service slot. And this sum is the quantity used in this
2844 * function to evaluate the I/O speed of a process.
aee69d78 2845 */
ab0e43e9
PV
2846static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2847 bool compensate, enum bfqq_expiration reason,
2848 unsigned long *delta_ms)
aee69d78 2849{
ab0e43e9
PV
2850 ktime_t delta_ktime;
2851 u32 delta_usecs;
2852 bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
aee69d78 2853
ab0e43e9 2854 if (!bfq_bfqq_sync(bfqq))
aee69d78
PV
2855 return false;
2856
2857 if (compensate)
ab0e43e9 2858 delta_ktime = bfqd->last_idling_start;
aee69d78 2859 else
ab0e43e9
PV
2860 delta_ktime = ktime_get();
2861 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
2862 delta_usecs = ktime_to_us(delta_ktime);
aee69d78
PV
2863
2864 /* don't use too short time intervals */
ab0e43e9
PV
2865 if (delta_usecs < 1000) {
2866 if (blk_queue_nonrot(bfqd->queue))
2867 /*
2868 * give same worst-case guarantees as idling
2869 * for seeky
2870 */
2871 *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
2872 else /* charge at least one seek */
2873 *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
2874
2875 return slow;
2876 }
aee69d78 2877
ab0e43e9 2878 *delta_ms = delta_usecs / USEC_PER_MSEC;
aee69d78
PV
2879
2880 /*
ab0e43e9
PV
2881 * Use only long (> 20ms) intervals to filter out excessive
2882 * spikes in service rate estimation.
aee69d78 2883 */
ab0e43e9
PV
2884 if (delta_usecs > 20000) {
2885 /*
2886 * Caveat for rotational devices: processes doing I/O
2887 * in the slower disk zones tend to be slow(er) even
2888 * if not seeky. In this respect, the estimated peak
2889 * rate is likely to be an average over the disk
2890 * surface. Accordingly, to not be too harsh with
2891 * unlucky processes, a process is deemed slow only if
2892 * its rate has been lower than half of the estimated
2893 * peak rate.
2894 */
2895 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
aee69d78
PV
2896 }
2897
ab0e43e9 2898 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
aee69d78 2899
ab0e43e9 2900 return slow;
aee69d78
PV
2901}
2902
77b7dcea
PV
2903/*
2904 * To be deemed as soft real-time, an application must meet two
2905 * requirements. First, the application must not require an average
2906 * bandwidth higher than the approximate bandwidth required to playback or
2907 * record a compressed high-definition video.
2908 * The next function is invoked on the completion of the last request of a
2909 * batch, to compute the next-start time instant, soft_rt_next_start, such
2910 * that, if the next request of the application does not arrive before
2911 * soft_rt_next_start, then the above requirement on the bandwidth is met.
2912 *
2913 * The second requirement is that the request pattern of the application is
2914 * isochronous, i.e., that, after issuing a request or a batch of requests,
2915 * the application stops issuing new requests until all its pending requests
2916 * have been completed. After that, the application may issue a new batch,
2917 * and so on.
2918 * For this reason the next function is invoked to compute
2919 * soft_rt_next_start only for applications that meet this requirement,
2920 * whereas soft_rt_next_start is set to infinity for applications that do
2921 * not.
2922 *
2923 * Unfortunately, even a greedy application may happen to behave in an
2924 * isochronous way if the CPU load is high. In fact, the application may
2925 * stop issuing requests while the CPUs are busy serving other processes,
2926 * then restart, then stop again for a while, and so on. In addition, if
2927 * the disk achieves a low enough throughput with the request pattern
2928 * issued by the application (e.g., because the request pattern is random
2929 * and/or the device is slow), then the application may meet the above
2930 * bandwidth requirement too. To prevent such a greedy application to be
2931 * deemed as soft real-time, a further rule is used in the computation of
2932 * soft_rt_next_start: soft_rt_next_start must be higher than the current
2933 * time plus the maximum time for which the arrival of a request is waited
2934 * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
2935 * This filters out greedy applications, as the latter issue instead their
2936 * next request as soon as possible after the last one has been completed
2937 * (in contrast, when a batch of requests is completed, a soft real-time
2938 * application spends some time processing data).
2939 *
2940 * Unfortunately, the last filter may easily generate false positives if
2941 * only bfqd->bfq_slice_idle is used as a reference time interval and one
2942 * or both the following cases occur:
2943 * 1) HZ is so low that the duration of a jiffy is comparable to or higher
2944 * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
2945 * HZ=100.
2946 * 2) jiffies, instead of increasing at a constant rate, may stop increasing
2947 * for a while, then suddenly 'jump' by several units to recover the lost
2948 * increments. This seems to happen, e.g., inside virtual machines.
2949 * To address this issue, we do not use as a reference time interval just
2950 * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
2951 * particular we add the minimum number of jiffies for which the filter
2952 * seems to be quite precise also in embedded systems and KVM/QEMU virtual
2953 * machines.
2954 */
2955static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
2956 struct bfq_queue *bfqq)
2957{
2958 return max(bfqq->last_idle_bklogged +
2959 HZ * bfqq->service_from_backlogged /
2960 bfqd->bfq_wr_max_softrt_rate,
2961 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
2962}
2963
aee69d78
PV
2964/**
2965 * bfq_bfqq_expire - expire a queue.
2966 * @bfqd: device owning the queue.
2967 * @bfqq: the queue to expire.
2968 * @compensate: if true, compensate for the time spent idling.
2969 * @reason: the reason causing the expiration.
2970 *
c074170e
PV
2971 * If the process associated with bfqq does slow I/O (e.g., because it
2972 * issues random requests), we charge bfqq with the time it has been
2973 * in service instead of the service it has received (see
2974 * bfq_bfqq_charge_time for details on how this goal is achieved). As
2975 * a consequence, bfqq will typically get higher timestamps upon
2976 * reactivation, and hence it will be rescheduled as if it had
2977 * received more service than what it has actually received. In the
2978 * end, bfqq receives less service in proportion to how slowly its
2979 * associated process consumes its budgets (and hence how seriously it
2980 * tends to lower the throughput). In addition, this time-charging
2981 * strategy guarantees time fairness among slow processes. In
2982 * contrast, if the process associated with bfqq is not slow, we
2983 * charge bfqq exactly with the service it has received.
aee69d78 2984 *
c074170e
PV
2985 * Charging time to the first type of queues and the exact service to
2986 * the other has the effect of using the WF2Q+ policy to schedule the
2987 * former on a timeslice basis, without violating service domain
2988 * guarantees among the latter.
aee69d78 2989 */
ea25da48
PV
2990void bfq_bfqq_expire(struct bfq_data *bfqd,
2991 struct bfq_queue *bfqq,
2992 bool compensate,
2993 enum bfqq_expiration reason)
aee69d78
PV
2994{
2995 bool slow;
ab0e43e9
PV
2996 unsigned long delta = 0;
2997 struct bfq_entity *entity = &bfqq->entity;
aee69d78
PV
2998 int ref;
2999
3000 /*
ab0e43e9 3001 * Check whether the process is slow (see bfq_bfqq_is_slow).
aee69d78 3002 */
ab0e43e9 3003 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
aee69d78 3004
77b7dcea
PV
3005 /*
3006 * Increase service_from_backlogged before next statement,
3007 * because the possible next invocation of
3008 * bfq_bfqq_charge_time would likely inflate
3009 * entity->service. In contrast, service_from_backlogged must
3010 * contain real service, to enable the soft real-time
3011 * heuristic to correctly compute the bandwidth consumed by
3012 * bfqq.
3013 */
3014 bfqq->service_from_backlogged += entity->service;
3015
aee69d78 3016 /*
c074170e
PV
3017 * As above explained, charge slow (typically seeky) and
3018 * timed-out queues with the time and not the service
3019 * received, to favor sequential workloads.
3020 *
3021 * Processes doing I/O in the slower disk zones will tend to
3022 * be slow(er) even if not seeky. Therefore, since the
3023 * estimated peak rate is actually an average over the disk
3024 * surface, these processes may timeout just for bad luck. To
3025 * avoid punishing them, do not charge time to processes that
3026 * succeeded in consuming at least 2/3 of their budget. This
3027 * allows BFQ to preserve enough elasticity to still perform
3028 * bandwidth, and not time, distribution with little unlucky
3029 * or quasi-sequential processes.
aee69d78 3030 */
44e44a1b
PV
3031 if (bfqq->wr_coeff == 1 &&
3032 (slow ||
3033 (reason == BFQQE_BUDGET_TIMEOUT &&
3034 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
c074170e 3035 bfq_bfqq_charge_time(bfqd, bfqq, delta);
aee69d78
PV
3036
3037 if (reason == BFQQE_TOO_IDLE &&
ab0e43e9 3038 entity->service <= 2 * entity->budget / 10)
aee69d78
PV
3039 bfq_clear_bfqq_IO_bound(bfqq);
3040
44e44a1b
PV
3041 if (bfqd->low_latency && bfqq->wr_coeff == 1)
3042 bfqq->last_wr_start_finish = jiffies;
3043
77b7dcea
PV
3044 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
3045 RB_EMPTY_ROOT(&bfqq->sort_list)) {
3046 /*
3047 * If we get here, and there are no outstanding
3048 * requests, then the request pattern is isochronous
3049 * (see the comments on the function
3050 * bfq_bfqq_softrt_next_start()). Thus we can compute
3051 * soft_rt_next_start. If, instead, the queue still
3052 * has outstanding requests, then we have to wait for
3053 * the completion of all the outstanding requests to
3054 * discover whether the request pattern is actually
3055 * isochronous.
3056 */
3057 if (bfqq->dispatched == 0)
3058 bfqq->soft_rt_next_start =
3059 bfq_bfqq_softrt_next_start(bfqd, bfqq);
3060 else {
3061 /*
3062 * The application is still waiting for the
3063 * completion of one or more requests:
3064 * prevent it from possibly being incorrectly
3065 * deemed as soft real-time by setting its
3066 * soft_rt_next_start to infinity. In fact,
3067 * without this assignment, the application
3068 * would be incorrectly deemed as soft
3069 * real-time if:
3070 * 1) it issued a new request before the
3071 * completion of all its in-flight
3072 * requests, and
3073 * 2) at that time, its soft_rt_next_start
3074 * happened to be in the past.
3075 */
3076 bfqq->soft_rt_next_start =
3077 bfq_greatest_from_now();
3078 /*
3079 * Schedule an update of soft_rt_next_start to when
3080 * the task may be discovered to be isochronous.
3081 */
3082 bfq_mark_bfqq_softrt_update(bfqq);
3083 }
3084 }
3085
aee69d78 3086 bfq_log_bfqq(bfqd, bfqq,
d5be3fef
PV
3087 "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
3088 slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
aee69d78
PV
3089
3090 /*
3091 * Increase, decrease or leave budget unchanged according to
3092 * reason.
3093 */
3094 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
3095 ref = bfqq->ref;
3096 __bfq_bfqq_expire(bfqd, bfqq);
3097
3098 /* mark bfqq as waiting a request only if a bic still points to it */
3099 if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
3100 reason != BFQQE_BUDGET_TIMEOUT &&
3101 reason != BFQQE_BUDGET_EXHAUSTED)
3102 bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
3103}
3104
3105/*
3106 * Budget timeout is not implemented through a dedicated timer, but
3107 * just checked on request arrivals and completions, as well as on
3108 * idle timer expirations.
3109 */
3110static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
3111{
44e44a1b 3112 return time_is_before_eq_jiffies(bfqq->budget_timeout);
aee69d78
PV
3113}
3114
3115/*
3116 * If we expire a queue that is actively waiting (i.e., with the
3117 * device idled) for the arrival of a new request, then we may incur
3118 * the timestamp misalignment problem described in the body of the
3119 * function __bfq_activate_entity. Hence we return true only if this
3120 * condition does not hold, or if the queue is slow enough to deserve
3121 * only to be kicked off for preserving a high throughput.
3122 */
3123static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
3124{
3125 bfq_log_bfqq(bfqq->bfqd, bfqq,
3126 "may_budget_timeout: wait_request %d left %d timeout %d",
3127 bfq_bfqq_wait_request(bfqq),
3128 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
3129 bfq_bfqq_budget_timeout(bfqq));
3130
3131 return (!bfq_bfqq_wait_request(bfqq) ||
3132 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
3133 &&
3134 bfq_bfqq_budget_timeout(bfqq);
3135}
3136
3137/*
3138 * For a queue that becomes empty, device idling is allowed only if
44e44a1b
PV
3139 * this function returns true for the queue. As a consequence, since
3140 * device idling plays a critical role in both throughput boosting and
3141 * service guarantees, the return value of this function plays a
3142 * critical role in both these aspects as well.
3143 *
3144 * In a nutshell, this function returns true only if idling is
3145 * beneficial for throughput or, even if detrimental for throughput,
3146 * idling is however necessary to preserve service guarantees (low
3147 * latency, desired throughput distribution, ...). In particular, on
3148 * NCQ-capable devices, this function tries to return false, so as to
3149 * help keep the drives' internal queues full, whenever this helps the
3150 * device boost the throughput without causing any service-guarantee
3151 * issue.
3152 *
3153 * In more detail, the return value of this function is obtained by,
3154 * first, computing a number of boolean variables that take into
3155 * account throughput and service-guarantee issues, and, then,
3156 * combining these variables in a logical expression. Most of the
3157 * issues taken into account are not trivial. We discuss these issues
3158 * individually while introducing the variables.
aee69d78
PV
3159 */
3160static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
3161{
3162 struct bfq_data *bfqd = bfqq->bfqd;
edaf9428
PV
3163 bool rot_without_queueing =
3164 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
3165 bfqq_sequential_and_IO_bound,
3166 idling_boosts_thr, idling_boosts_thr_without_issues,
e1b2324d 3167 idling_needed_for_service_guarantees,
cfd69712 3168 asymmetric_scenario;
aee69d78
PV
3169
3170 if (bfqd->strict_guarantees)
3171 return true;
3172
d5be3fef
PV
3173 /*
3174 * Idling is performed only if slice_idle > 0. In addition, we
3175 * do not idle if
3176 * (a) bfqq is async
3177 * (b) bfqq is in the idle io prio class: in this case we do
3178 * not idle because we want to minimize the bandwidth that
3179 * queues in this class can steal to higher-priority queues
3180 */
3181 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
3182 bfq_class_idle(bfqq))
3183 return false;
3184
edaf9428
PV
3185 bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
3186 bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
3187
aee69d78 3188 /*
44e44a1b
PV
3189 * The next variable takes into account the cases where idling
3190 * boosts the throughput.
3191 *
e01eff01
PV
3192 * The value of the variable is computed considering, first, that
3193 * idling is virtually always beneficial for the throughput if:
edaf9428
PV
3194 * (a) the device is not NCQ-capable and rotational, or
3195 * (b) regardless of the presence of NCQ, the device is rotational and
3196 * the request pattern for bfqq is I/O-bound and sequential, or
3197 * (c) regardless of whether it is rotational, the device is
3198 * not NCQ-capable and the request pattern for bfqq is
3199 * I/O-bound and sequential.
bf2b79e7
PV
3200 *
3201 * Secondly, and in contrast to the above item (b), idling an
3202 * NCQ-capable flash-based device would not boost the
e01eff01 3203 * throughput even with sequential I/O; rather it would lower
bf2b79e7
PV
3204 * the throughput in proportion to how fast the device
3205 * is. Accordingly, the next variable is true if any of the
edaf9428
PV
3206 * above conditions (a), (b) or (c) is true, and, in
3207 * particular, happens to be false if bfqd is an NCQ-capable
3208 * flash-based device.
aee69d78 3209 */
edaf9428
PV
3210 idling_boosts_thr = rot_without_queueing ||
3211 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
3212 bfqq_sequential_and_IO_bound);
aee69d78 3213
cfd69712
PV
3214 /*
3215 * The value of the next variable,
3216 * idling_boosts_thr_without_issues, is equal to that of
3217 * idling_boosts_thr, unless a special case holds. In this
3218 * special case, described below, idling may cause problems to
3219 * weight-raised queues.
3220 *
3221 * When the request pool is saturated (e.g., in the presence
3222 * of write hogs), if the processes associated with
3223 * non-weight-raised queues ask for requests at a lower rate,
3224 * then processes associated with weight-raised queues have a
3225 * higher probability to get a request from the pool
3226 * immediately (or at least soon) when they need one. Thus
3227 * they have a higher probability to actually get a fraction
3228 * of the device throughput proportional to their high
3229 * weight. This is especially true with NCQ-capable drives,
3230 * which enqueue several requests in advance, and further
3231 * reorder internally-queued requests.
3232 *
3233 * For this reason, we force to false the value of
3234 * idling_boosts_thr_without_issues if there are weight-raised
3235 * busy queues. In this case, and if bfqq is not weight-raised,
3236 * this guarantees that the device is not idled for bfqq (if,
3237 * instead, bfqq is weight-raised, then idling will be
3238 * guaranteed by another variable, see below). Combined with
3239 * the timestamping rules of BFQ (see [1] for details), this
3240 * behavior causes bfqq, and hence any sync non-weight-raised
3241 * queue, to get a lower number of requests served, and thus
3242 * to ask for a lower number of requests from the request
3243 * pool, before the busy weight-raised queues get served
3244 * again. This often mitigates starvation problems in the
3245 * presence of heavy write workloads and NCQ, thereby
3246 * guaranteeing a higher application and system responsiveness
3247 * in these hostile scenarios.
3248 */
3249 idling_boosts_thr_without_issues = idling_boosts_thr &&
3250 bfqd->wr_busy_queues == 0;
3251
aee69d78 3252 /*
bf2b79e7
PV
3253 * There is then a case where idling must be performed not
3254 * for throughput concerns, but to preserve service
3255 * guarantees.
3256 *
3257 * To introduce this case, we can note that allowing the drive
3258 * to enqueue more than one request at a time, and hence
44e44a1b 3259 * delegating de facto final scheduling decisions to the
bf2b79e7 3260 * drive's internal scheduler, entails loss of control on the
44e44a1b 3261 * actual request service order. In particular, the critical
bf2b79e7 3262 * situation is when requests from different processes happen
44e44a1b
PV
3263 * to be present, at the same time, in the internal queue(s)
3264 * of the drive. In such a situation, the drive, by deciding
3265 * the service order of the internally-queued requests, does
3266 * determine also the actual throughput distribution among
3267 * these processes. But the drive typically has no notion or
3268 * concern about per-process throughput distribution, and
3269 * makes its decisions only on a per-request basis. Therefore,
3270 * the service distribution enforced by the drive's internal
3271 * scheduler is likely to coincide with the desired
3272 * device-throughput distribution only in a completely
bf2b79e7
PV
3273 * symmetric scenario where:
3274 * (i) each of these processes must get the same throughput as
3275 * the others;
3276 * (ii) all these processes have the same I/O pattern
3277 (either sequential or random).
3278 * In fact, in such a scenario, the drive will tend to treat
3279 * the requests of each of these processes in about the same
3280 * way as the requests of the others, and thus to provide
3281 * each of these processes with about the same throughput
3282 * (which is exactly the desired throughput distribution). In
3283 * contrast, in any asymmetric scenario, device idling is
3284 * certainly needed to guarantee that bfqq receives its
3285 * assigned fraction of the device throughput (see [1] for
3286 * details).
3287 *
3288 * We address this issue by controlling, actually, only the
3289 * symmetry sub-condition (i), i.e., provided that
3290 * sub-condition (i) holds, idling is not performed,
3291 * regardless of whether sub-condition (ii) holds. In other
3292 * words, only if sub-condition (i) holds, then idling is
3293 * allowed, and the device tends to be prevented from queueing
3294 * many requests, possibly of several processes. The reason
3295 * for not controlling also sub-condition (ii) is that we
3296 * exploit preemption to preserve guarantees in case of
3297 * symmetric scenarios, even if (ii) does not hold, as
3298 * explained in the next two paragraphs.
3299 *
3300 * Even if a queue, say Q, is expired when it remains idle, Q
3301 * can still preempt the new in-service queue if the next
3302 * request of Q arrives soon (see the comments on
3303 * bfq_bfqq_update_budg_for_activation). If all queues and
3304 * groups have the same weight, this form of preemption,
3305 * combined with the hole-recovery heuristic described in the
3306 * comments on function bfq_bfqq_update_budg_for_activation,
3307 * are enough to preserve a correct bandwidth distribution in
3308 * the mid term, even without idling. In fact, even if not
3309 * idling allows the internal queues of the device to contain
3310 * many requests, and thus to reorder requests, we can rather
3311 * safely assume that the internal scheduler still preserves a
3312 * minimum of mid-term fairness. The motivation for using
3313 * preemption instead of idling is that, by not idling,
3314 * service guarantees are preserved without minimally
3315 * sacrificing throughput. In other words, both a high
3316 * throughput and its desired distribution are obtained.
3317 *
3318 * More precisely, this preemption-based, idleless approach
3319 * provides fairness in terms of IOPS, and not sectors per
3320 * second. This can be seen with a simple example. Suppose
3321 * that there are two queues with the same weight, but that
3322 * the first queue receives requests of 8 sectors, while the
3323 * second queue receives requests of 1024 sectors. In
3324 * addition, suppose that each of the two queues contains at
3325 * most one request at a time, which implies that each queue
3326 * always remains idle after it is served. Finally, after
3327 * remaining idle, each queue receives very quickly a new
3328 * request. It follows that the two queues are served
3329 * alternatively, preempting each other if needed. This
3330 * implies that, although both queues have the same weight,
3331 * the queue with large requests receives a service that is
3332 * 1024/8 times as high as the service received by the other
3333 * queue.
44e44a1b 3334 *
bf2b79e7
PV
3335 * On the other hand, device idling is performed, and thus
3336 * pure sector-domain guarantees are provided, for the
3337 * following queues, which are likely to need stronger
3338 * throughput guarantees: weight-raised queues, and queues
3339 * with a higher weight than other queues. When such queues
3340 * are active, sub-condition (i) is false, which triggers
3341 * device idling.
44e44a1b 3342 *
bf2b79e7
PV
3343 * According to the above considerations, the next variable is
3344 * true (only) if sub-condition (i) holds. To compute the
3345 * value of this variable, we not only use the return value of
3346 * the function bfq_symmetric_scenario(), but also check
3347 * whether bfqq is being weight-raised, because
3348 * bfq_symmetric_scenario() does not take into account also
3349 * weight-raised queues (see comments on
3350 * bfq_weights_tree_add()).
44e44a1b
PV
3351 *
3352 * As a side note, it is worth considering that the above
3353 * device-idling countermeasures may however fail in the
3354 * following unlucky scenario: if idling is (correctly)
bf2b79e7
PV
3355 * disabled in a time period during which all symmetry
3356 * sub-conditions hold, and hence the device is allowed to
44e44a1b
PV
3357 * enqueue many requests, but at some later point in time some
3358 * sub-condition stops to hold, then it may become impossible
3359 * to let requests be served in the desired order until all
3360 * the requests already queued in the device have been served.
3361 */
bf2b79e7
PV
3362 asymmetric_scenario = bfqq->wr_coeff > 1 ||
3363 !bfq_symmetric_scenario(bfqd);
44e44a1b 3364
e1b2324d
AA
3365 /*
3366 * Finally, there is a case where maximizing throughput is the
3367 * best choice even if it may cause unfairness toward
3368 * bfqq. Such a case is when bfqq became active in a burst of
3369 * queue activations. Queues that became active during a large
3370 * burst benefit only from throughput, as discussed in the
3371 * comments on bfq_handle_burst. Thus, if bfqq became active
3372 * in a burst and not idling the device maximizes throughput,
3373 * then the device must no be idled, because not idling the
3374 * device provides bfqq and all other queues in the burst with
3375 * maximum benefit. Combining this and the above case, we can
3376 * now establish when idling is actually needed to preserve
3377 * service guarantees.
3378 */
3379 idling_needed_for_service_guarantees =
3380 asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
3381
44e44a1b 3382 /*
d5be3fef
PV
3383 * We have now all the components we need to compute the
3384 * return value of the function, which is true only if idling
3385 * either boosts the throughput (without issues), or is
3386 * necessary to preserve service guarantees.
aee69d78 3387 */
d5be3fef
PV
3388 return idling_boosts_thr_without_issues ||
3389 idling_needed_for_service_guarantees;
aee69d78
PV
3390}
3391
3392/*
3393 * If the in-service queue is empty but the function bfq_bfqq_may_idle
3394 * returns true, then:
3395 * 1) the queue must remain in service and cannot be expired, and
3396 * 2) the device must be idled to wait for the possible arrival of a new
3397 * request for the queue.
3398 * See the comments on the function bfq_bfqq_may_idle for the reasons
3399 * why performing device idling is the best choice to boost the throughput
3400 * and preserve service guarantees when bfq_bfqq_may_idle itself
3401 * returns true.
3402 */
3403static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
3404{
d5be3fef 3405 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
aee69d78
PV
3406}
3407
3408/*
3409 * Select a queue for service. If we have a current queue in service,
3410 * check whether to continue servicing it, or retrieve and set a new one.
3411 */
3412static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
3413{
3414 struct bfq_queue *bfqq;
3415 struct request *next_rq;
3416 enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
3417
3418 bfqq = bfqd->in_service_queue;
3419 if (!bfqq)
3420 goto new_queue;
3421
3422 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
3423
3424 if (bfq_may_expire_for_budg_timeout(bfqq) &&
3425 !bfq_bfqq_wait_request(bfqq) &&
3426 !bfq_bfqq_must_idle(bfqq))
3427 goto expire;
3428
3429check_queue:
3430 /*
3431 * This loop is rarely executed more than once. Even when it
3432 * happens, it is much more convenient to re-execute this loop
3433 * than to return NULL and trigger a new dispatch to get a
3434 * request served.
3435 */
3436 next_rq = bfqq->next_rq;
3437 /*
3438 * If bfqq has requests queued and it has enough budget left to
3439 * serve them, keep the queue, otherwise expire it.
3440 */
3441 if (next_rq) {
3442 if (bfq_serv_to_charge(next_rq, bfqq) >
3443 bfq_bfqq_budget_left(bfqq)) {
3444 /*
3445 * Expire the queue for budget exhaustion,
3446 * which makes sure that the next budget is
3447 * enough to serve the next request, even if
3448 * it comes from the fifo expired path.
3449 */
3450 reason = BFQQE_BUDGET_EXHAUSTED;
3451 goto expire;
3452 } else {
3453 /*
3454 * The idle timer may be pending because we may
3455 * not disable disk idling even when a new request
3456 * arrives.
3457 */
3458 if (bfq_bfqq_wait_request(bfqq)) {
3459 /*
3460 * If we get here: 1) at least a new request
3461 * has arrived but we have not disabled the
3462 * timer because the request was too small,
3463 * 2) then the block layer has unplugged
3464 * the device, causing the dispatch to be
3465 * invoked.
3466 *
3467 * Since the device is unplugged, now the
3468 * requests are probably large enough to
3469 * provide a reasonable throughput.
3470 * So we disable idling.
3471 */
3472 bfq_clear_bfqq_wait_request(bfqq);
3473 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
3474 }
3475 goto keep_queue;
3476 }
3477 }
3478
3479 /*
3480 * No requests pending. However, if the in-service queue is idling
3481 * for a new request, or has requests waiting for a completion and
3482 * may idle after their completion, then keep it anyway.
3483 */
3484 if (bfq_bfqq_wait_request(bfqq) ||
3485 (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
3486 bfqq = NULL;
3487 goto keep_queue;
3488 }
3489
3490 reason = BFQQE_NO_MORE_REQUESTS;
3491expire:
3492 bfq_bfqq_expire(bfqd, bfqq, false, reason);
3493new_queue:
3494 bfqq = bfq_set_in_service_queue(bfqd);
3495 if (bfqq) {
3496 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
3497 goto check_queue;
3498 }
3499keep_queue:
3500 if (bfqq)
3501 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
3502 else
3503 bfq_log(bfqd, "select_queue: no queue returned");
3504
3505 return bfqq;
3506}
3507
44e44a1b
PV
3508static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3509{
3510 struct bfq_entity *entity = &bfqq->entity;
3511
3512 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
3513 bfq_log_bfqq(bfqd, bfqq,
3514 "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
3515 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
3516 jiffies_to_msecs(bfqq->wr_cur_max_time),
3517 bfqq->wr_coeff,
3518 bfqq->entity.weight, bfqq->entity.orig_weight);
3519
3520 if (entity->prio_changed)
3521 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
3522
3523 /*
e1b2324d
AA
3524 * If the queue was activated in a burst, or too much
3525 * time has elapsed from the beginning of this
3526 * weight-raising period, then end weight raising.
44e44a1b 3527 */
e1b2324d
AA
3528 if (bfq_bfqq_in_large_burst(bfqq))
3529 bfq_bfqq_end_wr(bfqq);
3530 else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
3531 bfqq->wr_cur_max_time)) {
77b7dcea
PV
3532 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
3533 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
e1b2324d 3534 bfq_wr_duration(bfqd)))
77b7dcea
PV
3535 bfq_bfqq_end_wr(bfqq);
3536 else {
3e2bdd6d 3537 switch_back_to_interactive_wr(bfqq, bfqd);
77b7dcea
PV
3538 bfqq->entity.prio_changed = 1;
3539 }
44e44a1b
PV
3540 }
3541 }
431b17f9
PV
3542 /*
3543 * To improve latency (for this or other queues), immediately
3544 * update weight both if it must be raised and if it must be
3545 * lowered. Since, entity may be on some active tree here, and
3546 * might have a pending change of its ioprio class, invoke
3547 * next function with the last parameter unset (see the
3548 * comments on the function).
3549 */
44e44a1b 3550 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
431b17f9
PV
3551 __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
3552 entity, false);
44e44a1b
PV
3553}
3554
aee69d78
PV
3555/*
3556 * Dispatch next request from bfqq.
3557 */
3558static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
3559 struct bfq_queue *bfqq)
3560{
3561 struct request *rq = bfqq->next_rq;
3562 unsigned long service_to_charge;
3563
3564 service_to_charge = bfq_serv_to_charge(rq, bfqq);
3565
3566 bfq_bfqq_served(bfqq, service_to_charge);
3567
3568 bfq_dispatch_remove(bfqd->queue, rq);
3569
44e44a1b
PV
3570 /*
3571 * If weight raising has to terminate for bfqq, then next
3572 * function causes an immediate update of bfqq's weight,
3573 * without waiting for next activation. As a consequence, on
3574 * expiration, bfqq will be timestamped as if has never been
3575 * weight-raised during this service slot, even if it has
3576 * received part or even most of the service as a
3577 * weight-raised queue. This inflates bfqq's timestamps, which
3578 * is beneficial, as bfqq is then more willing to leave the
3579 * device immediately to possible other weight-raised queues.
3580 */
3581 bfq_update_wr_data(bfqd, bfqq);
3582
aee69d78
PV
3583 /*
3584 * Expire bfqq, pretending that its budget expired, if bfqq
3585 * belongs to CLASS_IDLE and other queues are waiting for
3586 * service.
3587 */
3588 if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
3589 goto expire;
3590
3591 return rq;
3592
3593expire:
3594 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
3595 return rq;
3596}
3597
3598static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
3599{
3600 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3601
3602 /*
3603 * Avoiding lock: a race on bfqd->busy_queues should cause at
3604 * most a call to dispatch for nothing
3605 */
3606 return !list_empty_careful(&bfqd->dispatch) ||
3607 bfqd->busy_queues > 0;
3608}
3609
3610static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3611{
3612 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3613 struct request *rq = NULL;
3614 struct bfq_queue *bfqq = NULL;
3615
3616 if (!list_empty(&bfqd->dispatch)) {
3617 rq = list_first_entry(&bfqd->dispatch, struct request,
3618 queuelist);
3619 list_del_init(&rq->queuelist);
3620
3621 bfqq = RQ_BFQQ(rq);
3622
3623 if (bfqq) {
3624 /*
3625 * Increment counters here, because this
3626 * dispatch does not follow the standard
3627 * dispatch flow (where counters are
3628 * incremented)
3629 */
3630 bfqq->dispatched++;
3631
3632 goto inc_in_driver_start_rq;
3633 }
3634
3635 /*
3636 * We exploit the put_rq_private hook to decrement
3637 * rq_in_driver, but put_rq_private will not be
3638 * invoked on this request. So, to avoid unbalance,
3639 * just start this request, without incrementing
3640 * rq_in_driver. As a negative consequence,
3641 * rq_in_driver is deceptively lower than it should be
3642 * while this request is in service. This may cause
3643 * bfq_schedule_dispatch to be invoked uselessly.
3644 *
3645 * As for implementing an exact solution, the
3646 * put_request hook, if defined, is probably invoked
3647 * also on this request. So, by exploiting this hook,
3648 * we could 1) increment rq_in_driver here, and 2)
3649 * decrement it in put_request. Such a solution would
3650 * let the value of the counter be always accurate,
3651 * but it would entail using an extra interface
3652 * function. This cost seems higher than the benefit,
3653 * being the frequency of non-elevator-private
3654 * requests very low.
3655 */
3656 goto start_rq;
3657 }
3658
3659 bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
3660
3661 if (bfqd->busy_queues == 0)
3662 goto exit;
3663
3664 /*
3665 * Force device to serve one request at a time if
3666 * strict_guarantees is true. Forcing this service scheme is
3667 * currently the ONLY way to guarantee that the request
3668 * service order enforced by the scheduler is respected by a
3669 * queueing device. Otherwise the device is free even to make
3670 * some unlucky request wait for as long as the device
3671 * wishes.
3672 *
3673 * Of course, serving one request at at time may cause loss of
3674 * throughput.
3675 */
3676 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
3677 goto exit;
3678
3679 bfqq = bfq_select_queue(bfqd);
3680 if (!bfqq)
3681 goto exit;
3682
3683 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
3684
3685 if (rq) {
3686inc_in_driver_start_rq:
3687 bfqd->rq_in_driver++;
3688start_rq:
3689 rq->rq_flags |= RQF_STARTED;
3690 }
3691exit:
3692 return rq;
3693}
3694
3695static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3696{
3697 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3698 struct request *rq;
a33801e8 3699#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
24bfd19b
PV
3700 struct bfq_queue *in_serv_queue, *bfqq;
3701 bool waiting_rq, idle_timer_disabled;
3702#endif
aee69d78
PV
3703
3704 spin_lock_irq(&bfqd->lock);
36eca894 3705
a33801e8 3706#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
24bfd19b
PV
3707 in_serv_queue = bfqd->in_service_queue;
3708 waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
3709
aee69d78 3710 rq = __bfq_dispatch_request(hctx);
24bfd19b
PV
3711
3712 idle_timer_disabled =
3713 waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
3714
3715#else
3716 rq = __bfq_dispatch_request(hctx);
3717#endif
6fa3e8d3 3718 spin_unlock_irq(&bfqd->lock);
aee69d78 3719
a33801e8 3720#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
24bfd19b
PV
3721 bfqq = rq ? RQ_BFQQ(rq) : NULL;
3722 if (!idle_timer_disabled && !bfqq)
3723 return rq;
3724
3725 /*
3726 * rq and bfqq are guaranteed to exist until this function
3727 * ends, for the following reasons. First, rq can be
3728 * dispatched to the device, and then can be completed and
3729 * freed, only after this function ends. Second, rq cannot be
3730 * merged (and thus freed because of a merge) any longer,
3731 * because it has already started. Thus rq cannot be freed
3732 * before this function ends, and, since rq has a reference to
3733 * bfqq, the same guarantee holds for bfqq too.
3734 *
3735 * In addition, the following queue lock guarantees that
3736 * bfqq_group(bfqq) exists as well.
3737 */
3738 spin_lock_irq(hctx->queue->queue_lock);
3739 if (idle_timer_disabled)
3740 /*
3741 * Since the idle timer has been disabled,
3742 * in_serv_queue contained some request when
3743 * __bfq_dispatch_request was invoked above, which
3744 * implies that rq was picked exactly from
3745 * in_serv_queue. Thus in_serv_queue == bfqq, and is
3746 * therefore guaranteed to exist because of the above
3747 * arguments.
3748 */
3749 bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
3750 if (bfqq) {
3751 struct bfq_group *bfqg = bfqq_group(bfqq);
3752
3753 bfqg_stats_update_avg_queue_size(bfqg);
3754 bfqg_stats_set_start_empty_time(bfqg);
3755 bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
3756 }
3757 spin_unlock_irq(hctx->queue->queue_lock);
3758#endif
3759
aee69d78
PV
3760 return rq;
3761}
3762
3763/*
3764 * Task holds one reference to the queue, dropped when task exits. Each rq
3765 * in-flight on this queue also holds a reference, dropped when rq is freed.
3766 *
3767 * Scheduler lock must be held here. Recall not to use bfqq after calling
3768 * this function on it.
3769 */
ea25da48 3770void bfq_put_queue(struct bfq_queue *bfqq)
aee69d78 3771{
e21b7a0b
AA
3772#ifdef CONFIG_BFQ_GROUP_IOSCHED
3773 struct bfq_group *bfqg = bfqq_group(bfqq);
3774#endif
3775
aee69d78
PV
3776 if (bfqq->bfqd)
3777 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
3778 bfqq, bfqq->ref);
3779
3780 bfqq->ref--;
3781 if (bfqq->ref)
3782 return;
3783
99fead8d 3784 if (!hlist_unhashed(&bfqq->burst_list_node)) {
e1b2324d 3785 hlist_del_init(&bfqq->burst_list_node);
99fead8d
PV
3786 /*
3787 * Decrement also burst size after the removal, if the
3788 * process associated with bfqq is exiting, and thus
3789 * does not contribute to the burst any longer. This
3790 * decrement helps filter out false positives of large
3791 * bursts, when some short-lived process (often due to
3792 * the execution of commands by some service) happens
3793 * to start and exit while a complex application is
3794 * starting, and thus spawning several processes that
3795 * do I/O (and that *must not* be treated as a large
3796 * burst, see comments on bfq_handle_burst).
3797 *
3798 * In particular, the decrement is performed only if:
3799 * 1) bfqq is not a merged queue, because, if it is,
3800 * then this free of bfqq is not triggered by the exit
3801 * of the process bfqq is associated with, but exactly
3802 * by the fact that bfqq has just been merged.
3803 * 2) burst_size is greater than 0, to handle
3804 * unbalanced decrements. Unbalanced decrements may
3805 * happen in te following case: bfqq is inserted into
3806 * the current burst list--without incrementing
3807 * bust_size--because of a split, but the current
3808 * burst list is not the burst list bfqq belonged to
3809 * (see comments on the case of a split in
3810 * bfq_set_request).
3811 */
3812 if (bfqq->bic && bfqq->bfqd->burst_size > 0)
3813 bfqq->bfqd->burst_size--;
7cb04004 3814 }
e21b7a0b 3815
aee69d78 3816 kmem_cache_free(bfq_pool, bfqq);
e21b7a0b 3817#ifdef CONFIG_BFQ_GROUP_IOSCHED
8f9bebc3 3818 bfqg_and_blkg_put(bfqg);
e21b7a0b 3819#endif
aee69d78
PV
3820}
3821
36eca894
AA
3822static void bfq_put_cooperator(struct bfq_queue *bfqq)
3823{
3824 struct bfq_queue *__bfqq, *next;
3825
3826 /*
3827 * If this queue was scheduled to merge with another queue, be
3828 * sure to drop the reference taken on that queue (and others in
3829 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
3830 */
3831 __bfqq = bfqq->new_bfqq;
3832 while (__bfqq) {
3833 if (__bfqq == bfqq)
3834 break;
3835 next = __bfqq->new_bfqq;
3836 bfq_put_queue(__bfqq);
3837 __bfqq = next;
3838 }
3839}
3840
aee69d78
PV
3841static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3842{
3843 if (bfqq == bfqd->in_service_queue) {
3844 __bfq_bfqq_expire(bfqd, bfqq);
3845 bfq_schedule_dispatch(bfqd);
3846 }
3847
3848 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
3849
36eca894
AA
3850 bfq_put_cooperator(bfqq);
3851
aee69d78
PV
3852 bfq_put_queue(bfqq); /* release process reference */
3853}
3854
3855static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
3856{
3857 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
3858 struct bfq_data *bfqd;
3859
3860 if (bfqq)
3861 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
3862
3863 if (bfqq && bfqd) {
3864 unsigned long flags;
3865
3866 spin_lock_irqsave(&bfqd->lock, flags);
3867 bfq_exit_bfqq(bfqd, bfqq);
3868 bic_set_bfqq(bic, NULL, is_sync);
6fa3e8d3 3869 spin_unlock_irqrestore(&bfqd->lock, flags);
aee69d78
PV
3870 }
3871}
3872
3873static void bfq_exit_icq(struct io_cq *icq)
3874{
3875 struct bfq_io_cq *bic = icq_to_bic(icq);
3876
3877 bfq_exit_icq_bfqq(bic, true);
3878 bfq_exit_icq_bfqq(bic, false);
3879}
3880
3881/*
3882 * Update the entity prio values; note that the new values will not
3883 * be used until the next (re)activation.
3884 */
3885static void
3886bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
3887{
3888 struct task_struct *tsk = current;
3889 int ioprio_class;
3890 struct bfq_data *bfqd = bfqq->bfqd;
3891
3892 if (!bfqd)
3893 return;
3894
3895 ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3896 switch (ioprio_class) {
3897 default:
3898 dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
3899 "bfq: bad prio class %d\n", ioprio_class);
fa393d1b 3900 /* fall through */
aee69d78
PV
3901 case IOPRIO_CLASS_NONE:
3902 /*
3903 * No prio set, inherit CPU scheduling settings.
3904 */
3905 bfqq->new_ioprio = task_nice_ioprio(tsk);
3906 bfqq->new_ioprio_class = task_nice_ioclass(tsk);
3907 break;
3908 case IOPRIO_CLASS_RT:
3909 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3910 bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
3911 break;
3912 case IOPRIO_CLASS_BE:
3913 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3914 bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
3915 break;
3916 case IOPRIO_CLASS_IDLE:
3917 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
3918 bfqq->new_ioprio = 7;
aee69d78
PV
3919 break;
3920 }
3921
3922 if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
3923 pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
3924 bfqq->new_ioprio);
3925 bfqq->new_ioprio = IOPRIO_BE_NR;
3926 }
3927
3928 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
3929 bfqq->entity.prio_changed = 1;
3930}
3931
ea25da48
PV
3932static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3933 struct bio *bio, bool is_sync,
3934 struct bfq_io_cq *bic);
3935
aee69d78
PV
3936static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
3937{
3938 struct bfq_data *bfqd = bic_to_bfqd(bic);
3939 struct bfq_queue *bfqq;
3940 int ioprio = bic->icq.ioc->ioprio;
3941
3942 /*
3943 * This condition may trigger on a newly created bic, be sure to
3944 * drop the lock before returning.
3945 */
3946 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
3947 return;
3948
3949 bic->ioprio = ioprio;
3950
3951 bfqq = bic_to_bfqq(bic, false);
3952 if (bfqq) {
3953 /* release process reference on this queue */
3954 bfq_put_queue(bfqq);
3955 bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
3956 bic_set_bfqq(bic, bfqq, false);
3957 }
3958
3959 bfqq = bic_to_bfqq(bic, true);
3960 if (bfqq)
3961 bfq_set_next_ioprio_data(bfqq, bic);
3962}
3963
3964static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3965 struct bfq_io_cq *bic, pid_t pid, int is_sync)
3966{
3967 RB_CLEAR_NODE(&bfqq->entity.rb_node);
3968 INIT_LIST_HEAD(&bfqq->fifo);
e1b2324d 3969 INIT_HLIST_NODE(&bfqq->burst_list_node);
aee69d78
PV
3970
3971 bfqq->ref = 0;
3972 bfqq->bfqd = bfqd;
3973
3974 if (bic)
3975 bfq_set_next_ioprio_data(bfqq, bic);
3976
3977 if (is_sync) {
d5be3fef
PV
3978 /*
3979 * No need to mark as has_short_ttime if in
3980 * idle_class, because no device idling is performed
3981 * for queues in idle class
3982 */
aee69d78 3983 if (!bfq_class_idle(bfqq))
d5be3fef
PV
3984 /* tentatively mark as has_short_ttime */
3985 bfq_mark_bfqq_has_short_ttime(bfqq);
aee69d78 3986 bfq_mark_bfqq_sync(bfqq);
e1b2324d 3987 bfq_mark_bfqq_just_created(bfqq);
aee69d78
PV
3988 } else
3989 bfq_clear_bfqq_sync(bfqq);
3990
3991 /* set end request to minus infinity from now */
3992 bfqq->ttime.last_end_request = ktime_get_ns() + 1;
3993
3994 bfq_mark_bfqq_IO_bound(bfqq);
3995
3996 bfqq->pid = pid;
3997
3998 /* Tentative initial value to trade off between thr and lat */
54b60456 3999 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
aee69d78 4000 bfqq->budget_timeout = bfq_smallest_from_now();
aee69d78 4001
44e44a1b 4002 bfqq->wr_coeff = 1;
36eca894 4003 bfqq->last_wr_start_finish = jiffies;
77b7dcea 4004 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
36eca894 4005 bfqq->split_time = bfq_smallest_from_now();
77b7dcea
PV
4006
4007 /*
4008 * Set to the value for which bfqq will not be deemed as
4009 * soft rt when it becomes backlogged.
4010 */
4011 bfqq->soft_rt_next_start = bfq_greatest_from_now();
44e44a1b 4012
aee69d78
PV
4013 /* first request is almost certainly seeky */
4014 bfqq->seek_history = 1;
4015}
4016
4017static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
e21b7a0b 4018 struct bfq_group *bfqg,
aee69d78
PV
4019 int ioprio_class, int ioprio)
4020{
4021 switch (ioprio_class) {
4022 case IOPRIO_CLASS_RT:
e21b7a0b 4023 return &bfqg->async_bfqq[0][ioprio];
aee69d78
PV
4024 case IOPRIO_CLASS_NONE:
4025 ioprio = IOPRIO_NORM;
4026 /* fall through */
4027 case IOPRIO_CLASS_BE:
e21b7a0b 4028 return &bfqg->async_bfqq[1][ioprio];
aee69d78 4029 case IOPRIO_CLASS_IDLE:
e21b7a0b 4030 return &bfqg->async_idle_bfqq;
aee69d78
PV
4031 default:
4032 return NULL;
4033 }
4034}
4035
4036static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
4037 struct bio *bio, bool is_sync,
4038 struct bfq_io_cq *bic)
4039{
4040 const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
4041 const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
4042 struct bfq_queue **async_bfqq = NULL;
4043 struct bfq_queue *bfqq;
e21b7a0b 4044 struct bfq_group *bfqg;
aee69d78
PV
4045
4046 rcu_read_lock();
4047
e21b7a0b
AA
4048 bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
4049 if (!bfqg) {
4050 bfqq = &bfqd->oom_bfqq;
4051 goto out;
4052 }
4053
aee69d78 4054 if (!is_sync) {
e21b7a0b 4055 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
aee69d78
PV
4056 ioprio);
4057 bfqq = *async_bfqq;
4058 if (bfqq)
4059 goto out;
4060 }
4061
4062 bfqq = kmem_cache_alloc_node(bfq_pool,
4063 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
4064 bfqd->queue->node);
4065
4066 if (bfqq) {
4067 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
4068 is_sync);
e21b7a0b 4069 bfq_init_entity(&bfqq->entity, bfqg);
aee69d78
PV
4070 bfq_log_bfqq(bfqd, bfqq, "allocated");
4071 } else {
4072 bfqq = &bfqd->oom_bfqq;
4073 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
4074 goto out;
4075 }
4076
4077 /*
4078 * Pin the queue now that it's allocated, scheduler exit will
4079 * prune it.
4080 */
4081 if (async_bfqq) {
e21b7a0b
AA
4082 bfqq->ref++; /*
4083 * Extra group reference, w.r.t. sync
4084 * queue. This extra reference is removed
4085 * only if bfqq->bfqg disappears, to
4086 * guarantee that this queue is not freed
4087 * until its group goes away.
4088 */
4089 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
aee69d78
PV
4090 bfqq, bfqq->ref);
4091 *async_bfqq = bfqq;
4092 }
4093
4094out:
4095 bfqq->ref++; /* get a process reference to this queue */
4096 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
4097 rcu_read_unlock();
4098 return bfqq;
4099}
4100
4101static void bfq_update_io_thinktime(struct bfq_data *bfqd,
4102 struct bfq_queue *bfqq)
4103{
4104 struct bfq_ttime *ttime = &bfqq->ttime;
4105 u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
4106
4107 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
4108
4109 ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
4110 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
4111 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
4112 ttime->ttime_samples);
4113}
4114
4115static void
4116bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4117 struct request *rq)
4118{
aee69d78 4119 bfqq->seek_history <<= 1;
ab0e43e9
PV
4120 bfqq->seek_history |=
4121 get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
aee69d78
PV
4122 (!blk_queue_nonrot(bfqd->queue) ||
4123 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
4124}
4125
d5be3fef
PV
4126static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
4127 struct bfq_queue *bfqq,
4128 struct bfq_io_cq *bic)
aee69d78 4129{
d5be3fef 4130 bool has_short_ttime = true;
aee69d78 4131
d5be3fef
PV
4132 /*
4133 * No need to update has_short_ttime if bfqq is async or in
4134 * idle io prio class, or if bfq_slice_idle is zero, because
4135 * no device idling is performed for bfqq in this case.
4136 */
4137 if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
4138 bfqd->bfq_slice_idle == 0)
aee69d78
PV
4139 return;
4140
36eca894
AA
4141 /* Idle window just restored, statistics are meaningless. */
4142 if (time_is_after_eq_jiffies(bfqq->split_time +
4143 bfqd->bfq_wr_min_idle_time))
4144 return;
4145
d5be3fef
PV
4146 /* Think time is infinite if no process is linked to
4147 * bfqq. Otherwise check average think time to
4148 * decide whether to mark as has_short_ttime
4149 */
aee69d78 4150 if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
d5be3fef
PV
4151 (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
4152 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
4153 has_short_ttime = false;
4154
4155 bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
4156 has_short_ttime);
aee69d78 4157
d5be3fef
PV
4158 if (has_short_ttime)
4159 bfq_mark_bfqq_has_short_ttime(bfqq);
aee69d78 4160 else
d5be3fef 4161 bfq_clear_bfqq_has_short_ttime(bfqq);
aee69d78
PV
4162}
4163
4164/*
4165 * Called when a new fs request (rq) is added to bfqq. Check if there's
4166 * something we should do about it.
4167 */
4168static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4169 struct request *rq)
4170{
4171 struct bfq_io_cq *bic = RQ_BIC(rq);
4172
4173 if (rq->cmd_flags & REQ_META)
4174 bfqq->meta_pending++;
4175
4176 bfq_update_io_thinktime(bfqd, bfqq);
d5be3fef 4177 bfq_update_has_short_ttime(bfqd, bfqq, bic);
aee69d78 4178 bfq_update_io_seektime(bfqd, bfqq, rq);
aee69d78
PV
4179
4180 bfq_log_bfqq(bfqd, bfqq,
d5be3fef
PV
4181 "rq_enqueued: has_short_ttime=%d (seeky %d)",
4182 bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
aee69d78
PV
4183
4184 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4185
4186 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
4187 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
4188 blk_rq_sectors(rq) < 32;
4189 bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
4190
4191 /*
4192 * There is just this request queued: if the request
4193 * is small and the queue is not to be expired, then
4194 * just exit.
4195 *
4196 * In this way, if the device is being idled to wait
4197 * for a new request from the in-service queue, we
4198 * avoid unplugging the device and committing the
4199 * device to serve just a small request. On the
4200 * contrary, we wait for the block layer to decide
4201 * when to unplug the device: hopefully, new requests
4202 * will be merged to this one quickly, then the device
4203 * will be unplugged and larger requests will be
4204 * dispatched.
4205 */
4206 if (small_req && !budget_timeout)
4207 return;
4208
4209 /*
4210 * A large enough request arrived, or the queue is to
4211 * be expired: in both cases disk idling is to be
4212 * stopped, so clear wait_request flag and reset
4213 * timer.
4214 */
4215 bfq_clear_bfqq_wait_request(bfqq);
4216 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
4217
4218 /*
4219 * The queue is not empty, because a new request just
4220 * arrived. Hence we can safely expire the queue, in
4221 * case of budget timeout, without risking that the
4222 * timestamps of the queue are not updated correctly.
4223 * See [1] for more details.
4224 */
4225 if (budget_timeout)
4226 bfq_bfqq_expire(bfqd, bfqq, false,
4227 BFQQE_BUDGET_TIMEOUT);
4228 }
4229}
4230
24bfd19b
PV
4231/* returns true if it causes the idle timer to be disabled */
4232static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
aee69d78 4233{
36eca894
AA
4234 struct bfq_queue *bfqq = RQ_BFQQ(rq),
4235 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
24bfd19b 4236 bool waiting, idle_timer_disabled = false;
36eca894
AA
4237
4238 if (new_bfqq) {
4239 if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
4240 new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
4241 /*
4242 * Release the request's reference to the old bfqq
4243 * and make sure one is taken to the shared queue.
4244 */
4245 new_bfqq->allocated++;
4246 bfqq->allocated--;
4247 new_bfqq->ref++;
4248 /*
4249 * If the bic associated with the process
4250 * issuing this request still points to bfqq
4251 * (and thus has not been already redirected
4252 * to new_bfqq or even some other bfq_queue),
4253 * then complete the merge and redirect it to
4254 * new_bfqq.
4255 */
4256 if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
4257 bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
4258 bfqq, new_bfqq);
894df937
PV
4259
4260 bfq_clear_bfqq_just_created(bfqq);
36eca894
AA
4261 /*
4262 * rq is about to be enqueued into new_bfqq,
4263 * release rq reference on bfqq
4264 */
4265 bfq_put_queue(bfqq);
4266 rq->elv.priv[1] = new_bfqq;
4267 bfqq = new_bfqq;
4268 }
aee69d78 4269
24bfd19b 4270 waiting = bfqq && bfq_bfqq_wait_request(bfqq);
aee69d78 4271 bfq_add_request(rq);
24bfd19b 4272 idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
aee69d78
PV
4273
4274 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
4275 list_add_tail(&rq->queuelist, &bfqq->fifo);
4276
4277 bfq_rq_enqueued(bfqd, bfqq, rq);
24bfd19b
PV
4278
4279 return idle_timer_disabled;
aee69d78
PV
4280}
4281
4282static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4283 bool at_head)
4284{
4285 struct request_queue *q = hctx->queue;
4286 struct bfq_data *bfqd = q->elevator->elevator_data;
a33801e8 4287#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
614822f8 4288 struct bfq_queue *bfqq = RQ_BFQQ(rq);
24bfd19b
PV
4289 bool idle_timer_disabled = false;
4290 unsigned int cmd_flags;
4291#endif
aee69d78
PV
4292
4293 spin_lock_irq(&bfqd->lock);
4294 if (blk_mq_sched_try_insert_merge(q, rq)) {
4295 spin_unlock_irq(&bfqd->lock);
4296 return;
4297 }
4298
4299 spin_unlock_irq(&bfqd->lock);
4300
4301 blk_mq_sched_request_inserted(rq);
4302
4303 spin_lock_irq(&bfqd->lock);
4304 if (at_head || blk_rq_is_passthrough(rq)) {
4305 if (at_head)
4306 list_add(&rq->queuelist, &bfqd->dispatch);
4307 else
4308 list_add_tail(&rq->queuelist, &bfqd->dispatch);
4309 } else {
a33801e8 4310#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
24bfd19b 4311 idle_timer_disabled = __bfq_insert_request(bfqd, rq);
614822f8
LM
4312 /*
4313 * Update bfqq, because, if a queue merge has occurred
4314 * in __bfq_insert_request, then rq has been
4315 * redirected into a new queue.
4316 */
4317 bfqq = RQ_BFQQ(rq);
24bfd19b
PV
4318#else
4319 __bfq_insert_request(bfqd, rq);
4320#endif
aee69d78
PV
4321
4322 if (rq_mergeable(rq)) {
4323 elv_rqhash_add(q, rq);
4324 if (!q->last_merge)
4325 q->last_merge = rq;
4326 }
4327 }
4328
a33801e8 4329#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
24bfd19b
PV
4330 /*
4331 * Cache cmd_flags before releasing scheduler lock, because rq
4332 * may disappear afterwards (for example, because of a request
4333 * merge).
4334 */
4335 cmd_flags = rq->cmd_flags;
4336#endif
6fa3e8d3 4337 spin_unlock_irq(&bfqd->lock);
24bfd19b 4338
a33801e8 4339#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
24bfd19b
PV
4340 if (!bfqq)
4341 return;
4342 /*
4343 * bfqq still exists, because it can disappear only after
4344 * either it is merged with another queue, or the process it
4345 * is associated with exits. But both actions must be taken by
4346 * the same process currently executing this flow of
4347 * instruction.
4348 *
4349 * In addition, the following queue lock guarantees that
4350 * bfqq_group(bfqq) exists as well.
4351 */
4352 spin_lock_irq(q->queue_lock);
4353 bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
4354 if (idle_timer_disabled)
4355 bfqg_stats_update_idle_time(bfqq_group(bfqq));
4356 spin_unlock_irq(q->queue_lock);
4357#endif
aee69d78
PV
4358}
4359
4360static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
4361 struct list_head *list, bool at_head)
4362{
4363 while (!list_empty(list)) {
4364 struct request *rq;
4365
4366 rq = list_first_entry(list, struct request, queuelist);
4367 list_del_init(&rq->queuelist);
4368 bfq_insert_request(hctx, rq, at_head);
4369 }
4370}
4371
4372static void bfq_update_hw_tag(struct bfq_data *bfqd)
4373{
4374 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
4375 bfqd->rq_in_driver);
4376
4377 if (bfqd->hw_tag == 1)
4378 return;
4379
4380 /*
4381 * This sample is valid if the number of outstanding requests
4382 * is large enough to allow a queueing behavior. Note that the
4383 * sum is not exact, as it's not taking into account deactivated
4384 * requests.
4385 */
4386 if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
4387 return;
4388
4389 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
4390 return;
4391
4392 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
4393 bfqd->max_rq_in_driver = 0;
4394 bfqd->hw_tag_samples = 0;
4395}
4396
4397static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
4398{
ab0e43e9
PV
4399 u64 now_ns;
4400 u32 delta_us;
4401
aee69d78
PV
4402 bfq_update_hw_tag(bfqd);
4403
4404 bfqd->rq_in_driver--;
4405 bfqq->dispatched--;
4406
44e44a1b
PV
4407 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
4408 /*
4409 * Set budget_timeout (which we overload to store the
4410 * time at which the queue remains with no backlog and
4411 * no outstanding request; used by the weight-raising
4412 * mechanism).
4413 */
4414 bfqq->budget_timeout = jiffies;
1de0c4cd
AA
4415
4416 bfq_weights_tree_remove(bfqd, &bfqq->entity,
4417 &bfqd->queue_weights_tree);
44e44a1b
PV
4418 }
4419
ab0e43e9
PV
4420 now_ns = ktime_get_ns();
4421
4422 bfqq->ttime.last_end_request = now_ns;
4423
4424 /*
4425 * Using us instead of ns, to get a reasonable precision in
4426 * computing rate in next check.
4427 */
4428 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
4429
4430 /*
4431 * If the request took rather long to complete, and, according
4432 * to the maximum request size recorded, this completion latency
4433 * implies that the request was certainly served at a very low
4434 * rate (less than 1M sectors/sec), then the whole observation
4435 * interval that lasts up to this time instant cannot be a
4436 * valid time interval for computing a new peak rate. Invoke
4437 * bfq_update_rate_reset to have the following three steps
4438 * taken:
4439 * - close the observation interval at the last (previous)
4440 * request dispatch or completion
4441 * - compute rate, if possible, for that observation interval
4442 * - reset to zero samples, which will trigger a proper
4443 * re-initialization of the observation interval on next
4444 * dispatch
4445 */
4446 if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
4447 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
4448 1UL<<(BFQ_RATE_SHIFT - 10))
4449 bfq_update_rate_reset(bfqd, NULL);
4450 bfqd->last_completion = now_ns;
aee69d78 4451
77b7dcea
PV
4452 /*
4453 * If we are waiting to discover whether the request pattern
4454 * of the task associated with the queue is actually
4455 * isochronous, and both requisites for this condition to hold
4456 * are now satisfied, then compute soft_rt_next_start (see the
4457 * comments on the function bfq_bfqq_softrt_next_start()). We
4458 * schedule this delayed check when bfqq expires, if it still
4459 * has in-flight requests.
4460 */
4461 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
4462 RB_EMPTY_ROOT(&bfqq->sort_list))
4463 bfqq->soft_rt_next_start =
4464 bfq_bfqq_softrt_next_start(bfqd, bfqq);
4465
aee69d78
PV
4466 /*
4467 * If this is the in-service queue, check if it needs to be expired,
4468 * or if we want to idle in case it has no pending requests.
4469 */
4470 if (bfqd->in_service_queue == bfqq) {
44e44a1b 4471 if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
aee69d78
PV
4472 bfq_arm_slice_timer(bfqd);
4473 return;
4474 } else if (bfq_may_expire_for_budg_timeout(bfqq))
4475 bfq_bfqq_expire(bfqd, bfqq, false,
4476 BFQQE_BUDGET_TIMEOUT);
4477 else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
4478 (bfqq->dispatched == 0 ||
4479 !bfq_bfqq_may_idle(bfqq)))
4480 bfq_bfqq_expire(bfqd, bfqq, false,
4481 BFQQE_NO_MORE_REQUESTS);
4482 }
3f7cb4f4
HT
4483
4484 if (!bfqd->rq_in_driver)
4485 bfq_schedule_dispatch(bfqd);
aee69d78
PV
4486}
4487
4488static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
4489{
4490 bfqq->allocated--;
4491
4492 bfq_put_queue(bfqq);
4493}
4494
7b9e9361 4495static void bfq_finish_request(struct request *rq)
aee69d78 4496{
5bbf4e5a
CH
4497 struct bfq_queue *bfqq;
4498 struct bfq_data *bfqd;
4499
4500 if (!rq->elv.icq)
4501 return;
4502
4503 bfqq = RQ_BFQQ(rq);
4504 bfqd = bfqq->bfqd;
aee69d78 4505
e21b7a0b
AA
4506 if (rq->rq_flags & RQF_STARTED)
4507 bfqg_stats_update_completion(bfqq_group(bfqq),
4508 rq_start_time_ns(rq),
4509 rq_io_start_time_ns(rq),
4510 rq->cmd_flags);
aee69d78
PV
4511
4512 if (likely(rq->rq_flags & RQF_STARTED)) {
4513 unsigned long flags;
4514
4515 spin_lock_irqsave(&bfqd->lock, flags);
4516
4517 bfq_completed_request(bfqq, bfqd);
4518 bfq_put_rq_priv_body(bfqq);
4519
6fa3e8d3 4520 spin_unlock_irqrestore(&bfqd->lock, flags);
aee69d78
PV
4521 } else {
4522 /*
4523 * Request rq may be still/already in the scheduler,
4524 * in which case we need to remove it. And we cannot
4525 * defer such a check and removal, to avoid
4526 * inconsistencies in the time interval from the end
4527 * of this function to the start of the deferred work.
4528 * This situation seems to occur only in process
4529 * context, as a consequence of a merge. In the
4530 * current version of the code, this implies that the
4531 * lock is held.
4532 */
4533
614822f8 4534 if (!RB_EMPTY_NODE(&rq->rb_node)) {
7b9e9361 4535 bfq_remove_request(rq->q, rq);
614822f8
LM
4536 bfqg_stats_update_io_remove(bfqq_group(bfqq),
4537 rq->cmd_flags);
4538 }
aee69d78
PV
4539 bfq_put_rq_priv_body(bfqq);
4540 }
4541
4542 rq->elv.priv[0] = NULL;
4543 rq->elv.priv[1] = NULL;
4544}
4545
36eca894
AA
4546/*
4547 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
4548 * was the last process referring to that bfqq.
4549 */
4550static struct bfq_queue *
4551bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
4552{
4553 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
4554
4555 if (bfqq_process_refs(bfqq) == 1) {
4556 bfqq->pid = current->pid;
4557 bfq_clear_bfqq_coop(bfqq);
4558 bfq_clear_bfqq_split_coop(bfqq);
4559 return bfqq;
4560 }
4561
4562 bic_set_bfqq(bic, NULL, 1);
4563
4564 bfq_put_cooperator(bfqq);
4565
4566 bfq_put_queue(bfqq);
4567 return NULL;
4568}
4569
4570static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
4571 struct bfq_io_cq *bic,
4572 struct bio *bio,
4573 bool split, bool is_sync,
4574 bool *new_queue)
4575{
4576 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
4577
4578 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
4579 return bfqq;
4580
4581 if (new_queue)
4582 *new_queue = true;
4583
4584 if (bfqq)
4585 bfq_put_queue(bfqq);
4586 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
4587
4588 bic_set_bfqq(bic, bfqq, is_sync);
e1b2324d
AA
4589 if (split && is_sync) {
4590 if ((bic->was_in_burst_list && bfqd->large_burst) ||
4591 bic->saved_in_large_burst)
4592 bfq_mark_bfqq_in_large_burst(bfqq);
4593 else {
4594 bfq_clear_bfqq_in_large_burst(bfqq);
4595 if (bic->was_in_burst_list)
99fead8d
PV
4596 /*
4597 * If bfqq was in the current
4598 * burst list before being
4599 * merged, then we have to add
4600 * it back. And we do not need
4601 * to increase burst_size, as
4602 * we did not decrement
4603 * burst_size when we removed
4604 * bfqq from the burst list as
4605 * a consequence of a merge
4606 * (see comments in
4607 * bfq_put_queue). In this
4608 * respect, it would be rather
4609 * costly to know whether the
4610 * current burst list is still
4611 * the same burst list from
4612 * which bfqq was removed on
4613 * the merge. To avoid this
4614 * cost, if bfqq was in a
4615 * burst list, then we add
4616 * bfqq to the current burst
4617 * list without any further
4618 * check. This can cause
4619 * inappropriate insertions,
4620 * but rarely enough to not
4621 * harm the detection of large
4622 * bursts significantly.
4623 */
e1b2324d
AA
4624 hlist_add_head(&bfqq->burst_list_node,
4625 &bfqd->burst_list);
4626 }
36eca894 4627 bfqq->split_time = jiffies;
e1b2324d 4628 }
36eca894
AA
4629
4630 return bfqq;
4631}
4632
aee69d78
PV
4633/*
4634 * Allocate bfq data structures associated with this request.
4635 */
5bbf4e5a 4636static void bfq_prepare_request(struct request *rq, struct bio *bio)
aee69d78 4637{
5bbf4e5a 4638 struct request_queue *q = rq->q;
aee69d78 4639 struct bfq_data *bfqd = q->elevator->elevator_data;
9f210738 4640 struct bfq_io_cq *bic;
aee69d78
PV
4641 const int is_sync = rq_is_sync(rq);
4642 struct bfq_queue *bfqq;
36eca894 4643 bool new_queue = false;
13c931bd 4644 bool bfqq_already_existing = false, split = false;
aee69d78 4645
9f210738 4646 if (!rq->elv.icq)
5bbf4e5a 4647 return;
9f210738 4648 bic = icq_to_bic(rq->elv.icq);
aee69d78 4649
9f210738 4650 spin_lock_irq(&bfqd->lock);
aee69d78 4651
8c9ff1ad
CIK
4652 bfq_check_ioprio_change(bic, bio);
4653
e21b7a0b
AA
4654 bfq_bic_update_cgroup(bic, bio);
4655
36eca894
AA
4656 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
4657 &new_queue);
4658
4659 if (likely(!new_queue)) {
4660 /* If the queue was seeky for too long, break it apart. */
4661 if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
4662 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
e1b2324d
AA
4663
4664 /* Update bic before losing reference to bfqq */
4665 if (bfq_bfqq_in_large_burst(bfqq))
4666 bic->saved_in_large_burst = true;
4667
36eca894 4668 bfqq = bfq_split_bfqq(bic, bfqq);
6fa3e8d3 4669 split = true;
36eca894
AA
4670
4671 if (!bfqq)
4672 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
4673 true, is_sync,
4674 NULL);
13c931bd
PV
4675 else
4676 bfqq_already_existing = true;
36eca894 4677 }
aee69d78
PV
4678 }
4679
4680 bfqq->allocated++;
4681 bfqq->ref++;
4682 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
4683 rq, bfqq, bfqq->ref);
4684
4685 rq->elv.priv[0] = bic;
4686 rq->elv.priv[1] = bfqq;
4687
36eca894
AA
4688 /*
4689 * If a bfq_queue has only one process reference, it is owned
4690 * by only this bic: we can then set bfqq->bic = bic. in
4691 * addition, if the queue has also just been split, we have to
4692 * resume its state.
4693 */
4694 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
4695 bfqq->bic = bic;
6fa3e8d3 4696 if (split) {
36eca894
AA
4697 /*
4698 * The queue has just been split from a shared
4699 * queue: restore the idle window and the
4700 * possible weight raising period.
4701 */
13c931bd
PV
4702 bfq_bfqq_resume_state(bfqq, bfqd, bic,
4703 bfqq_already_existing);
36eca894
AA
4704 }
4705 }
4706
e1b2324d
AA
4707 if (unlikely(bfq_bfqq_just_created(bfqq)))
4708 bfq_handle_burst(bfqd, bfqq);
4709
6fa3e8d3 4710 spin_unlock_irq(&bfqd->lock);
aee69d78
PV
4711}
4712
4713static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
4714{
4715 struct bfq_data *bfqd = bfqq->bfqd;
4716 enum bfqq_expiration reason;
4717 unsigned long flags;
4718
4719 spin_lock_irqsave(&bfqd->lock, flags);
4720 bfq_clear_bfqq_wait_request(bfqq);
4721
4722 if (bfqq != bfqd->in_service_queue) {
4723 spin_unlock_irqrestore(&bfqd->lock, flags);
4724 return;
4725 }
4726
4727 if (bfq_bfqq_budget_timeout(bfqq))
4728 /*
4729 * Also here the queue can be safely expired
4730 * for budget timeout without wasting
4731 * guarantees
4732 */
4733 reason = BFQQE_BUDGET_TIMEOUT;
4734 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
4735 /*
4736 * The queue may not be empty upon timer expiration,
4737 * because we may not disable the timer when the
4738 * first request of the in-service queue arrives
4739 * during disk idling.
4740 */
4741 reason = BFQQE_TOO_IDLE;
4742 else
4743 goto schedule_dispatch;
4744
4745 bfq_bfqq_expire(bfqd, bfqq, true, reason);
4746
4747schedule_dispatch:
6fa3e8d3 4748 spin_unlock_irqrestore(&bfqd->lock, flags);
aee69d78
PV
4749 bfq_schedule_dispatch(bfqd);
4750}
4751
4752/*
4753 * Handler of the expiration of the timer running if the in-service queue
4754 * is idling inside its time slice.
4755 */
4756static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
4757{
4758 struct bfq_data *bfqd = container_of(timer, struct bfq_data,
4759 idle_slice_timer);
4760 struct bfq_queue *bfqq = bfqd->in_service_queue;
4761
4762 /*
4763 * Theoretical race here: the in-service queue can be NULL or
4764 * different from the queue that was idling if a new request
4765 * arrives for the current queue and there is a full dispatch
4766 * cycle that changes the in-service queue. This can hardly
4767 * happen, but in the worst case we just expire a queue too
4768 * early.
4769 */
4770 if (bfqq)
4771 bfq_idle_slice_timer_body(bfqq);
4772
4773 return HRTIMER_NORESTART;
4774}
4775
4776static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
4777 struct bfq_queue **bfqq_ptr)
4778{
4779 struct bfq_queue *bfqq = *bfqq_ptr;
4780
4781 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
4782 if (bfqq) {
e21b7a0b
AA
4783 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
4784
aee69d78
PV
4785 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
4786 bfqq, bfqq->ref);
4787 bfq_put_queue(bfqq);
4788 *bfqq_ptr = NULL;
4789 }
4790}
4791
4792/*
e21b7a0b
AA
4793 * Release all the bfqg references to its async queues. If we are
4794 * deallocating the group these queues may still contain requests, so
4795 * we reparent them to the root cgroup (i.e., the only one that will
4796 * exist for sure until all the requests on a device are gone).
aee69d78 4797 */
ea25da48 4798void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
aee69d78
PV
4799{
4800 int i, j;
4801
4802 for (i = 0; i < 2; i++)
4803 for (j = 0; j < IOPRIO_BE_NR; j++)
e21b7a0b 4804 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
aee69d78 4805
e21b7a0b 4806 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
aee69d78
PV
4807}
4808
4809static void bfq_exit_queue(struct elevator_queue *e)
4810{
4811 struct bfq_data *bfqd = e->elevator_data;
4812 struct bfq_queue *bfqq, *n;
4813
4814 hrtimer_cancel(&bfqd->idle_slice_timer);
4815
4816 spin_lock_irq(&bfqd->lock);
4817 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
e21b7a0b 4818 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
aee69d78
PV
4819 spin_unlock_irq(&bfqd->lock);
4820
4821 hrtimer_cancel(&bfqd->idle_slice_timer);
4822
e21b7a0b
AA
4823#ifdef CONFIG_BFQ_GROUP_IOSCHED
4824 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
4825#else
4826 spin_lock_irq(&bfqd->lock);
4827 bfq_put_async_queues(bfqd, bfqd->root_group);
4828 kfree(bfqd->root_group);
4829 spin_unlock_irq(&bfqd->lock);
4830#endif
4831
aee69d78
PV
4832 kfree(bfqd);
4833}
4834
e21b7a0b
AA
4835static void bfq_init_root_group(struct bfq_group *root_group,
4836 struct bfq_data *bfqd)
4837{
4838 int i;
4839
4840#ifdef CONFIG_BFQ_GROUP_IOSCHED
4841 root_group->entity.parent = NULL;
4842 root_group->my_entity = NULL;
4843 root_group->bfqd = bfqd;
4844#endif
36eca894 4845 root_group->rq_pos_tree = RB_ROOT;
e21b7a0b
AA
4846 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
4847 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
4848 root_group->sched_data.bfq_class_idle_last_service = jiffies;
4849}
4850
aee69d78
PV
4851static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
4852{
4853 struct bfq_data *bfqd;
4854 struct elevator_queue *eq;
aee69d78
PV
4855
4856 eq = elevator_alloc(q, e);
4857 if (!eq)
4858 return -ENOMEM;
4859
4860 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
4861 if (!bfqd) {
4862 kobject_put(&eq->kobj);
4863 return -ENOMEM;
4864 }
4865 eq->elevator_data = bfqd;
4866
e21b7a0b
AA
4867 spin_lock_irq(q->queue_lock);
4868 q->elevator = eq;
4869 spin_unlock_irq(q->queue_lock);
4870
aee69d78
PV
4871 /*
4872 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
4873 * Grab a permanent reference to it, so that the normal code flow
4874 * will not attempt to free it.
4875 */
4876 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
4877 bfqd->oom_bfqq.ref++;
4878 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
4879 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
4880 bfqd->oom_bfqq.entity.new_weight =
4881 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
e1b2324d
AA
4882
4883 /* oom_bfqq does not participate to bursts */
4884 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
4885
aee69d78
PV
4886 /*
4887 * Trigger weight initialization, according to ioprio, at the
4888 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
4889 * class won't be changed any more.
4890 */
4891 bfqd->oom_bfqq.entity.prio_changed = 1;
4892
4893 bfqd->queue = q;
4894
e21b7a0b 4895 INIT_LIST_HEAD(&bfqd->dispatch);
aee69d78
PV
4896
4897 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
4898 HRTIMER_MODE_REL);
4899 bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
4900
1de0c4cd
AA
4901 bfqd->queue_weights_tree = RB_ROOT;
4902 bfqd->group_weights_tree = RB_ROOT;
4903
aee69d78
PV
4904 INIT_LIST_HEAD(&bfqd->active_list);
4905 INIT_LIST_HEAD(&bfqd->idle_list);
e1b2324d 4906 INIT_HLIST_HEAD(&bfqd->burst_list);
aee69d78
PV
4907
4908 bfqd->hw_tag = -1;
4909
4910 bfqd->bfq_max_budget = bfq_default_max_budget;
4911
4912 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
4913 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
4914 bfqd->bfq_back_max = bfq_back_max;
4915 bfqd->bfq_back_penalty = bfq_back_penalty;
4916 bfqd->bfq_slice_idle = bfq_slice_idle;
aee69d78
PV
4917 bfqd->bfq_timeout = bfq_timeout;
4918
4919 bfqd->bfq_requests_within_timer = 120;
4920
e1b2324d
AA
4921 bfqd->bfq_large_burst_thresh = 8;
4922 bfqd->bfq_burst_interval = msecs_to_jiffies(180);
4923
44e44a1b
PV
4924 bfqd->low_latency = true;
4925
4926 /*
4927 * Trade-off between responsiveness and fairness.
4928 */
4929 bfqd->bfq_wr_coeff = 30;
77b7dcea 4930 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
44e44a1b
PV
4931 bfqd->bfq_wr_max_time = 0;
4932 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
4933 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
77b7dcea
PV
4934 bfqd->bfq_wr_max_softrt_rate = 7000; /*
4935 * Approximate rate required
4936 * to playback or record a
4937 * high-definition compressed
4938 * video.
4939 */
cfd69712 4940 bfqd->wr_busy_queues = 0;
44e44a1b
PV
4941
4942 /*
4943 * Begin by assuming, optimistically, that the device is a
4944 * high-speed one, and that its peak rate is equal to 2/3 of
4945 * the highest reference rate.
4946 */
4947 bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
4948 T_fast[blk_queue_nonrot(bfqd->queue)];
4949 bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
4950 bfqd->device_speed = BFQ_BFQD_FAST;
4951
aee69d78 4952 spin_lock_init(&bfqd->lock);
aee69d78 4953
e21b7a0b
AA
4954 /*
4955 * The invocation of the next bfq_create_group_hierarchy
4956 * function is the head of a chain of function calls
4957 * (bfq_create_group_hierarchy->blkcg_activate_policy->
4958 * blk_mq_freeze_queue) that may lead to the invocation of the
4959 * has_work hook function. For this reason,
4960 * bfq_create_group_hierarchy is invoked only after all
4961 * scheduler data has been initialized, apart from the fields
4962 * that can be initialized only after invoking
4963 * bfq_create_group_hierarchy. This, in particular, enables
4964 * has_work to correctly return false. Of course, to avoid
4965 * other inconsistencies, the blk-mq stack must then refrain
4966 * from invoking further scheduler hooks before this init
4967 * function is finished.
4968 */
4969 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
4970 if (!bfqd->root_group)
4971 goto out_free;
4972 bfq_init_root_group(bfqd->root_group, bfqd);
4973 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
4974
b5dc5d4d 4975 wbt_disable_default(q);
aee69d78 4976 return 0;
e21b7a0b
AA
4977
4978out_free:
4979 kfree(bfqd);
4980 kobject_put(&eq->kobj);
4981 return -ENOMEM;
aee69d78
PV
4982}
4983
4984static void bfq_slab_kill(void)
4985{
4986 kmem_cache_destroy(bfq_pool);
4987}
4988
4989static int __init bfq_slab_setup(void)
4990{
4991 bfq_pool = KMEM_CACHE(bfq_queue, 0);
4992 if (!bfq_pool)
4993 return -ENOMEM;
4994 return 0;
4995}
4996
4997static ssize_t bfq_var_show(unsigned int var, char *page)
4998{
4999 return sprintf(page, "%u\n", var);
5000}
5001
2f79136b 5002static int bfq_var_store(unsigned long *var, const char *page)
aee69d78
PV
5003{
5004 unsigned long new_val;
5005 int ret = kstrtoul(page, 10, &new_val);
5006
2f79136b
BVA
5007 if (ret)
5008 return ret;
5009 *var = new_val;
5010 return 0;
aee69d78
PV
5011}
5012
5013#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
5014static ssize_t __FUNC(struct elevator_queue *e, char *page) \
5015{ \
5016 struct bfq_data *bfqd = e->elevator_data; \
5017 u64 __data = __VAR; \
5018 if (__CONV == 1) \
5019 __data = jiffies_to_msecs(__data); \
5020 else if (__CONV == 2) \
5021 __data = div_u64(__data, NSEC_PER_MSEC); \
5022 return bfq_var_show(__data, (page)); \
5023}
5024SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
5025SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
5026SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
5027SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
5028SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
5029SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
5030SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
5031SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
44e44a1b 5032SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
aee69d78
PV
5033#undef SHOW_FUNCTION
5034
5035#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
5036static ssize_t __FUNC(struct elevator_queue *e, char *page) \
5037{ \
5038 struct bfq_data *bfqd = e->elevator_data; \
5039 u64 __data = __VAR; \
5040 __data = div_u64(__data, NSEC_PER_USEC); \
5041 return bfq_var_show(__data, (page)); \
5042}
5043USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
5044#undef USEC_SHOW_FUNCTION
5045
5046#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
5047static ssize_t \
5048__FUNC(struct elevator_queue *e, const char *page, size_t count) \
5049{ \
5050 struct bfq_data *bfqd = e->elevator_data; \
1530486c 5051 unsigned long __data, __min = (MIN), __max = (MAX); \
2f79136b
BVA
5052 int ret; \
5053 \
5054 ret = bfq_var_store(&__data, (page)); \
5055 if (ret) \
5056 return ret; \
1530486c
BVA
5057 if (__data < __min) \
5058 __data = __min; \
5059 else if (__data > __max) \
5060 __data = __max; \
aee69d78
PV
5061 if (__CONV == 1) \
5062 *(__PTR) = msecs_to_jiffies(__data); \
5063 else if (__CONV == 2) \
5064 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
5065 else \
5066 *(__PTR) = __data; \
235f8da1 5067 return count; \
aee69d78
PV
5068}
5069STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
5070 INT_MAX, 2);
5071STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
5072 INT_MAX, 2);
5073STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
5074STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
5075 INT_MAX, 0);
5076STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
5077#undef STORE_FUNCTION
5078
5079#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
5080static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
5081{ \
5082 struct bfq_data *bfqd = e->elevator_data; \
1530486c 5083 unsigned long __data, __min = (MIN), __max = (MAX); \
2f79136b
BVA
5084 int ret; \
5085 \
5086 ret = bfq_var_store(&__data, (page)); \
5087 if (ret) \
5088 return ret; \
1530486c
BVA
5089 if (__data < __min) \
5090 __data = __min; \
5091 else if (__data > __max) \
5092 __data = __max; \
aee69d78 5093 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
235f8da1 5094 return count; \
aee69d78
PV
5095}
5096USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
5097 UINT_MAX);
5098#undef USEC_STORE_FUNCTION
5099
aee69d78
PV
5100static ssize_t bfq_max_budget_store(struct elevator_queue *e,
5101 const char *page, size_t count)
5102{
5103 struct bfq_data *bfqd = e->elevator_data;
2f79136b
BVA
5104 unsigned long __data;
5105 int ret;
235f8da1 5106
2f79136b
BVA
5107 ret = bfq_var_store(&__data, (page));
5108 if (ret)
5109 return ret;
aee69d78
PV
5110
5111 if (__data == 0)
ab0e43e9 5112 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
aee69d78
PV
5113 else {
5114 if (__data > INT_MAX)
5115 __data = INT_MAX;
5116 bfqd->bfq_max_budget = __data;
5117 }
5118
5119 bfqd->bfq_user_max_budget = __data;
5120
235f8da1 5121 return count;
aee69d78
PV
5122}
5123
5124/*
5125 * Leaving this name to preserve name compatibility with cfq
5126 * parameters, but this timeout is used for both sync and async.
5127 */
5128static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
5129 const char *page, size_t count)
5130{
5131 struct bfq_data *bfqd = e->elevator_data;
2f79136b
BVA
5132 unsigned long __data;
5133 int ret;
235f8da1 5134
2f79136b
BVA
5135 ret = bfq_var_store(&__data, (page));
5136 if (ret)
5137 return ret;
aee69d78
PV
5138
5139 if (__data < 1)
5140 __data = 1;
5141 else if (__data > INT_MAX)
5142 __data = INT_MAX;
5143
5144 bfqd->bfq_timeout = msecs_to_jiffies(__data);
5145 if (bfqd->bfq_user_max_budget == 0)
ab0e43e9 5146 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
aee69d78 5147
235f8da1 5148 return count;
aee69d78
PV
5149}
5150
5151static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
5152 const char *page, size_t count)
5153{
5154 struct bfq_data *bfqd = e->elevator_data;
2f79136b
BVA
5155 unsigned long __data;
5156 int ret;
235f8da1 5157
2f79136b
BVA
5158 ret = bfq_var_store(&__data, (page));
5159 if (ret)
5160 return ret;
aee69d78
PV
5161
5162 if (__data > 1)
5163 __data = 1;
5164 if (!bfqd->strict_guarantees && __data == 1
5165 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
5166 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
5167
5168 bfqd->strict_guarantees = __data;
5169
235f8da1 5170 return count;
aee69d78
PV
5171}
5172
44e44a1b
PV
5173static ssize_t bfq_low_latency_store(struct elevator_queue *e,
5174 const char *page, size_t count)
5175{
5176 struct bfq_data *bfqd = e->elevator_data;
2f79136b
BVA
5177 unsigned long __data;
5178 int ret;
235f8da1 5179
2f79136b
BVA
5180 ret = bfq_var_store(&__data, (page));
5181 if (ret)
5182 return ret;
44e44a1b
PV
5183
5184 if (__data > 1)
5185 __data = 1;
5186 if (__data == 0 && bfqd->low_latency != 0)
5187 bfq_end_wr(bfqd);
5188 bfqd->low_latency = __data;
5189
235f8da1 5190 return count;
44e44a1b
PV
5191}
5192
aee69d78
PV
5193#define BFQ_ATTR(name) \
5194 __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
5195
5196static struct elv_fs_entry bfq_attrs[] = {
5197 BFQ_ATTR(fifo_expire_sync),
5198 BFQ_ATTR(fifo_expire_async),
5199 BFQ_ATTR(back_seek_max),
5200 BFQ_ATTR(back_seek_penalty),
5201 BFQ_ATTR(slice_idle),
5202 BFQ_ATTR(slice_idle_us),
5203 BFQ_ATTR(max_budget),
5204 BFQ_ATTR(timeout_sync),
5205 BFQ_ATTR(strict_guarantees),
44e44a1b 5206 BFQ_ATTR(low_latency),
aee69d78
PV
5207 __ATTR_NULL
5208};
5209
5210static struct elevator_type iosched_bfq_mq = {
5211 .ops.mq = {
5bbf4e5a 5212 .prepare_request = bfq_prepare_request,
7b9e9361 5213 .finish_request = bfq_finish_request,
aee69d78
PV
5214 .exit_icq = bfq_exit_icq,
5215 .insert_requests = bfq_insert_requests,
5216 .dispatch_request = bfq_dispatch_request,
5217 .next_request = elv_rb_latter_request,
5218 .former_request = elv_rb_former_request,
5219 .allow_merge = bfq_allow_bio_merge,
5220 .bio_merge = bfq_bio_merge,
5221 .request_merge = bfq_request_merge,
5222 .requests_merged = bfq_requests_merged,
5223 .request_merged = bfq_request_merged,
5224 .has_work = bfq_has_work,
5225 .init_sched = bfq_init_queue,
5226 .exit_sched = bfq_exit_queue,
5227 },
5228
5229 .uses_mq = true,
5230 .icq_size = sizeof(struct bfq_io_cq),
5231 .icq_align = __alignof__(struct bfq_io_cq),
5232 .elevator_attrs = bfq_attrs,
5233 .elevator_name = "bfq",
5234 .elevator_owner = THIS_MODULE,
5235};
26b4cf24 5236MODULE_ALIAS("bfq-iosched");
aee69d78
PV
5237
5238static int __init bfq_init(void)
5239{
5240 int ret;
5241
e21b7a0b
AA
5242#ifdef CONFIG_BFQ_GROUP_IOSCHED
5243 ret = blkcg_policy_register(&blkcg_policy_bfq);
5244 if (ret)
5245 return ret;
5246#endif
5247
aee69d78
PV
5248 ret = -ENOMEM;
5249 if (bfq_slab_setup())
5250 goto err_pol_unreg;
5251
44e44a1b
PV
5252 /*
5253 * Times to load large popular applications for the typical
5254 * systems installed on the reference devices (see the
5255 * comments before the definitions of the next two
5256 * arrays). Actually, we use slightly slower values, as the
5257 * estimated peak rate tends to be smaller than the actual
5258 * peak rate. The reason for this last fact is that estimates
5259 * are computed over much shorter time intervals than the long
5260 * intervals typically used for benchmarking. Why? First, to
5261 * adapt more quickly to variations. Second, because an I/O
5262 * scheduler cannot rely on a peak-rate-evaluation workload to
5263 * be run for a long time.
5264 */
5265 T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
5266 T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
5267 T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
5268 T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
5269
5270 /*
5271 * Thresholds that determine the switch between speed classes
5272 * (see the comments before the definition of the array
5273 * device_speed_thresh). These thresholds are biased towards
5274 * transitions to the fast class. This is safer than the
5275 * opposite bias. In fact, a wrong transition to the slow
5276 * class results in short weight-raising periods, because the
5277 * speed of the device then tends to be higher that the
5278 * reference peak rate. On the opposite end, a wrong
5279 * transition to the fast class tends to increase
5280 * weight-raising periods, because of the opposite reason.
5281 */
5282 device_speed_thresh[0] = (4 * R_slow[0]) / 3;
5283 device_speed_thresh[1] = (4 * R_slow[1]) / 3;
5284
aee69d78
PV
5285 ret = elv_register(&iosched_bfq_mq);
5286 if (ret)
37dcd657 5287 goto slab_kill;
aee69d78
PV
5288
5289 return 0;
5290
37dcd657 5291slab_kill:
5292 bfq_slab_kill();
aee69d78 5293err_pol_unreg:
e21b7a0b
AA
5294#ifdef CONFIG_BFQ_GROUP_IOSCHED
5295 blkcg_policy_unregister(&blkcg_policy_bfq);
5296#endif
aee69d78
PV
5297 return ret;
5298}
5299
5300static void __exit bfq_exit(void)
5301{
5302 elv_unregister(&iosched_bfq_mq);
e21b7a0b
AA
5303#ifdef CONFIG_BFQ_GROUP_IOSCHED
5304 blkcg_policy_unregister(&blkcg_policy_bfq);
5305#endif
aee69d78
PV
5306 bfq_slab_kill();
5307}
5308
5309module_init(bfq_init);
5310module_exit(bfq_exit);
5311
5312MODULE_AUTHOR("Paolo Valente");
5313MODULE_LICENSE("GPL");
5314MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");