]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/bfq-iosched.c
block, bfq: reduce idling only in symmetric scenarios
[mirror_ubuntu-bionic-kernel.git] / block / bfq-iosched.c
CommitLineData
aee69d78
PV
1/*
2 * Budget Fair Queueing (BFQ) I/O scheduler.
3 *
4 * Based on ideas and code from CFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
11 * Arianna Avanzini <avanzini@google.com>
12 *
13 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * BFQ is a proportional-share I/O scheduler, with some extra
26 * low-latency capabilities. BFQ also supports full hierarchical
27 * scheduling through cgroups. Next paragraphs provide an introduction
28 * on BFQ inner workings. Details on BFQ benefits, usage and
29 * limitations can be found in Documentation/block/bfq-iosched.txt.
30 *
31 * BFQ is a proportional-share storage-I/O scheduling algorithm based
32 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
33 * budgets, measured in number of sectors, to processes instead of
34 * time slices. The device is not granted to the in-service process
35 * for a given time slice, but until it has exhausted its assigned
36 * budget. This change from the time to the service domain enables BFQ
37 * to distribute the device throughput among processes as desired,
38 * without any distortion due to throughput fluctuations, or to device
39 * internal queueing. BFQ uses an ad hoc internal scheduler, called
40 * B-WF2Q+, to schedule processes according to their budgets. More
41 * precisely, BFQ schedules queues associated with processes. Each
42 * process/queue is assigned a user-configurable weight, and B-WF2Q+
43 * guarantees that each queue receives a fraction of the throughput
44 * proportional to its weight. Thanks to the accurate policy of
45 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
46 * processes issuing sequential requests (to boost the throughput),
47 * and yet guarantee a low latency to interactive and soft real-time
48 * applications.
49 *
50 * In particular, to provide these low-latency guarantees, BFQ
51 * explicitly privileges the I/O of two classes of time-sensitive
52 * applications: interactive and soft real-time. This feature enables
53 * BFQ to provide applications in these classes with a very low
54 * latency. Finally, BFQ also features additional heuristics for
55 * preserving both a low latency and a high throughput on NCQ-capable,
56 * rotational or flash-based devices, and to get the job done quickly
57 * for applications consisting in many I/O-bound processes.
58 *
59 * BFQ is described in [1], where also a reference to the initial, more
60 * theoretical paper on BFQ can be found. The interested reader can find
61 * in the latter paper full details on the main algorithm, as well as
62 * formulas of the guarantees and formal proofs of all the properties.
63 * With respect to the version of BFQ presented in these papers, this
64 * implementation adds a few more heuristics, such as the one that
65 * guarantees a low latency to soft real-time applications, and a
66 * hierarchical extension based on H-WF2Q+.
67 *
68 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
69 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
70 * with O(log N) complexity derives from the one introduced with EEVDF
71 * in [3].
72 *
73 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
74 * Scheduler", Proceedings of the First Workshop on Mobile System
75 * Technologies (MST-2015), May 2015.
76 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
77 *
78 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
79 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
80 * Oct 1997.
81 *
82 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
83 *
84 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
85 * First: A Flexible and Accurate Mechanism for Proportional Share
86 * Resource Allocation", technical report.
87 *
88 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
89 */
90#include <linux/module.h>
91#include <linux/slab.h>
92#include <linux/blkdev.h>
e21b7a0b 93#include <linux/cgroup.h>
aee69d78
PV
94#include <linux/elevator.h>
95#include <linux/ktime.h>
96#include <linux/rbtree.h>
97#include <linux/ioprio.h>
98#include <linux/sbitmap.h>
99#include <linux/delay.h>
100
101#include "blk.h"
102#include "blk-mq.h"
103#include "blk-mq-tag.h"
104#include "blk-mq-sched.h"
105#include <linux/blktrace_api.h>
106#include <linux/hrtimer.h>
107#include <linux/blk-cgroup.h>
108
109#define BFQ_IOPRIO_CLASSES 3
110#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
111
112#define BFQ_MIN_WEIGHT 1
113#define BFQ_MAX_WEIGHT 1000
114#define BFQ_WEIGHT_CONVERSION_COEFF 10
115
116#define BFQ_DEFAULT_QUEUE_IOPRIO 4
117
e21b7a0b 118#define BFQ_WEIGHT_LEGACY_DFL 100
aee69d78
PV
119#define BFQ_DEFAULT_GRP_IOPRIO 0
120#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
121
77b7dcea
PV
122/*
123 * Soft real-time applications are extremely more latency sensitive
124 * than interactive ones. Over-raise the weight of the former to
125 * privilege them against the latter.
126 */
127#define BFQ_SOFTRT_WEIGHT_FACTOR 100
128
aee69d78
PV
129struct bfq_entity;
130
131/**
132 * struct bfq_service_tree - per ioprio_class service tree.
133 *
134 * Each service tree represents a B-WF2Q+ scheduler on its own. Each
135 * ioprio_class has its own independent scheduler, and so its own
136 * bfq_service_tree. All the fields are protected by the queue lock
137 * of the containing bfqd.
138 */
139struct bfq_service_tree {
140 /* tree for active entities (i.e., those backlogged) */
141 struct rb_root active;
142 /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/
143 struct rb_root idle;
144
145 /* idle entity with minimum F_i */
146 struct bfq_entity *first_idle;
147 /* idle entity with maximum F_i */
148 struct bfq_entity *last_idle;
149
150 /* scheduler virtual time */
151 u64 vtime;
152 /* scheduler weight sum; active and idle entities contribute to it */
153 unsigned long wsum;
154};
155
156/**
157 * struct bfq_sched_data - multi-class scheduler.
158 *
159 * bfq_sched_data is the basic scheduler queue. It supports three
e21b7a0b
AA
160 * ioprio_classes, and can be used either as a toplevel queue or as an
161 * intermediate queue on a hierarchical setup. @next_in_service
162 * points to the active entity of the sched_data service trees that
163 * will be scheduled next. It is used to reduce the number of steps
164 * needed for each hierarchical-schedule update.
aee69d78
PV
165 *
166 * The supported ioprio_classes are the same as in CFQ, in descending
167 * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
168 * Requests from higher priority queues are served before all the
169 * requests from lower priority queues; among requests of the same
170 * queue requests are served according to B-WF2Q+.
171 * All the fields are protected by the queue lock of the containing bfqd.
172 */
173struct bfq_sched_data {
174 /* entity in service */
175 struct bfq_entity *in_service_entity;
e21b7a0b 176 /* head-of-line entity (see comments above) */
aee69d78
PV
177 struct bfq_entity *next_in_service;
178 /* array of service trees, one per ioprio_class */
179 struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
e21b7a0b
AA
180 /* last time CLASS_IDLE was served */
181 unsigned long bfq_class_idle_last_service;
182
aee69d78
PV
183};
184
1de0c4cd
AA
185/**
186 * struct bfq_weight_counter - counter of the number of all active entities
187 * with a given weight.
188 */
189struct bfq_weight_counter {
190 unsigned int weight; /* weight of the entities this counter refers to */
191 unsigned int num_active; /* nr of active entities with this weight */
192 /*
193 * Weights tree member (see bfq_data's @queue_weights_tree and
194 * @group_weights_tree)
195 */
196 struct rb_node weights_node;
197};
198
aee69d78
PV
199/**
200 * struct bfq_entity - schedulable entity.
201 *
e21b7a0b
AA
202 * A bfq_entity is used to represent either a bfq_queue (leaf node in the
203 * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
204 * entity belongs to the sched_data of the parent group in the cgroup
205 * hierarchy. Non-leaf entities have also their own sched_data, stored
206 * in @my_sched_data.
aee69d78
PV
207 *
208 * Each entity stores independently its priority values; this would
209 * allow different weights on different devices, but this
210 * functionality is not exported to userspace by now. Priorities and
211 * weights are updated lazily, first storing the new values into the
212 * new_* fields, then setting the @prio_changed flag. As soon as
213 * there is a transition in the entity state that allows the priority
214 * update to take place the effective and the requested priority
215 * values are synchronized.
216 *
e21b7a0b
AA
217 * Unless cgroups are used, the weight value is calculated from the
218 * ioprio to export the same interface as CFQ. When dealing with
219 * ``well-behaved'' queues (i.e., queues that do not spend too much
220 * time to consume their budget and have true sequential behavior, and
221 * when there are no external factors breaking anticipation) the
222 * relative weights at each level of the cgroups hierarchy should be
223 * guaranteed. All the fields are protected by the queue lock of the
224 * containing bfqd.
aee69d78
PV
225 */
226struct bfq_entity {
227 /* service_tree member */
228 struct rb_node rb_node;
1de0c4cd
AA
229 /* pointer to the weight counter associated with this entity */
230 struct bfq_weight_counter *weight_counter;
aee69d78
PV
231
232 /*
e21b7a0b
AA
233 * Flag, true if the entity is on a tree (either the active or
234 * the idle one of its service_tree) or is in service.
aee69d78 235 */
e21b7a0b 236 bool on_st;
aee69d78
PV
237
238 /* B-WF2Q+ start and finish timestamps [sectors/weight] */
239 u64 start, finish;
240
241 /* tree the entity is enqueued into; %NULL if not on a tree */
242 struct rb_root *tree;
243
244 /*
245 * minimum start time of the (active) subtree rooted at this
246 * entity; used for O(log N) lookups into active trees
247 */
248 u64 min_start;
249
250 /* amount of service received during the last service slot */
251 int service;
252
253 /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
254 int budget;
255
256 /* weight of the queue */
257 int weight;
258 /* next weight if a change is in progress */
259 int new_weight;
260
261 /* original weight, used to implement weight boosting */
262 int orig_weight;
263
264 /* parent entity, for hierarchical scheduling */
265 struct bfq_entity *parent;
266
267 /*
268 * For non-leaf nodes in the hierarchy, the associated
269 * scheduler queue, %NULL on leaf nodes.
270 */
271 struct bfq_sched_data *my_sched_data;
272 /* the scheduler queue this entity belongs to */
273 struct bfq_sched_data *sched_data;
274
275 /* flag, set to request a weight, ioprio or ioprio_class change */
276 int prio_changed;
277};
278
e21b7a0b
AA
279struct bfq_group;
280
aee69d78
PV
281/**
282 * struct bfq_ttime - per process thinktime stats.
283 */
284struct bfq_ttime {
285 /* completion time of the last request */
286 u64 last_end_request;
287
288 /* total process thinktime */
289 u64 ttime_total;
290 /* number of thinktime samples */
291 unsigned long ttime_samples;
292 /* average process thinktime */
293 u64 ttime_mean;
294};
295
296/**
297 * struct bfq_queue - leaf schedulable entity.
298 *
299 * A bfq_queue is a leaf request queue; it can be associated with an
36eca894
AA
300 * io_context or more, if it is async or shared between cooperating
301 * processes. @cgroup holds a reference to the cgroup, to be sure that it
302 * does not disappear while a bfqq still references it (mostly to avoid
303 * races between request issuing and task migration followed by cgroup
304 * destruction).
305 * All the fields are protected by the queue lock of the containing bfqd.
aee69d78
PV
306 */
307struct bfq_queue {
308 /* reference counter */
309 int ref;
310 /* parent bfq_data */
311 struct bfq_data *bfqd;
312
313 /* current ioprio and ioprio class */
314 unsigned short ioprio, ioprio_class;
315 /* next ioprio and ioprio class if a change is in progress */
316 unsigned short new_ioprio, new_ioprio_class;
317
36eca894
AA
318 /*
319 * Shared bfq_queue if queue is cooperating with one or more
320 * other queues.
321 */
322 struct bfq_queue *new_bfqq;
323 /* request-position tree member (see bfq_group's @rq_pos_tree) */
324 struct rb_node pos_node;
325 /* request-position tree root (see bfq_group's @rq_pos_tree) */
326 struct rb_root *pos_root;
327
aee69d78
PV
328 /* sorted list of pending requests */
329 struct rb_root sort_list;
330 /* if fifo isn't expired, next request to serve */
331 struct request *next_rq;
332 /* number of sync and async requests queued */
333 int queued[2];
334 /* number of requests currently allocated */
335 int allocated;
336 /* number of pending metadata requests */
337 int meta_pending;
338 /* fifo list of requests in sort_list */
339 struct list_head fifo;
340
341 /* entity representing this queue in the scheduler */
342 struct bfq_entity entity;
343
344 /* maximum budget allowed from the feedback mechanism */
345 int max_budget;
346 /* budget expiration (in jiffies) */
347 unsigned long budget_timeout;
348
349 /* number of requests on the dispatch list or inside driver */
350 int dispatched;
351
352 /* status flags */
353 unsigned long flags;
354
355 /* node for active/idle bfqq list inside parent bfqd */
356 struct list_head bfqq_list;
357
358 /* associated @bfq_ttime struct */
359 struct bfq_ttime ttime;
360
361 /* bit vector: a 1 for each seeky requests in history */
362 u32 seek_history;
363 /* position of the last request enqueued */
364 sector_t last_request_pos;
365
366 /* Number of consecutive pairs of request completion and
367 * arrival, such that the queue becomes idle after the
368 * completion, but the next request arrives within an idle
369 * time slice; used only if the queue's IO_bound flag has been
370 * cleared.
371 */
372 unsigned int requests_within_timer;
373
374 /* pid of the process owning the queue, used for logging purposes */
375 pid_t pid;
44e44a1b 376
36eca894
AA
377 /*
378 * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL
379 * if the queue is shared.
380 */
381 struct bfq_io_cq *bic;
382
44e44a1b
PV
383 /* current maximum weight-raising time for this queue */
384 unsigned long wr_cur_max_time;
77b7dcea
PV
385 /*
386 * Minimum time instant such that, only if a new request is
387 * enqueued after this time instant in an idle @bfq_queue with
388 * no outstanding requests, then the task associated with the
389 * queue it is deemed as soft real-time (see the comments on
390 * the function bfq_bfqq_softrt_next_start())
391 */
392 unsigned long soft_rt_next_start;
44e44a1b
PV
393 /*
394 * Start time of the current weight-raising period if
395 * the @bfq-queue is being weight-raised, otherwise
396 * finish time of the last weight-raising period.
397 */
398 unsigned long last_wr_start_finish;
399 /* factor by which the weight of this queue is multiplied */
400 unsigned int wr_coeff;
77b7dcea
PV
401 /*
402 * Time of the last transition of the @bfq_queue from idle to
403 * backlogged.
404 */
405 unsigned long last_idle_bklogged;
406 /*
407 * Cumulative service received from the @bfq_queue since the
408 * last transition from idle to backlogged.
409 */
410 unsigned long service_from_backlogged;
36eca894 411
77b7dcea
PV
412 /*
413 * Value of wr start time when switching to soft rt
414 */
415 unsigned long wr_start_at_switch_to_srt;
36eca894
AA
416
417 unsigned long split_time; /* time of last split */
aee69d78
PV
418};
419
420/**
421 * struct bfq_io_cq - per (request_queue, io_context) structure.
422 */
423struct bfq_io_cq {
424 /* associated io_cq structure */
425 struct io_cq icq; /* must be the first member */
426 /* array of two process queues, the sync and the async */
427 struct bfq_queue *bfqq[2];
428 /* per (request_queue, blkcg) ioprio */
429 int ioprio;
e21b7a0b
AA
430#ifdef CONFIG_BFQ_GROUP_IOSCHED
431 uint64_t blkcg_serial_nr; /* the current blkcg serial */
432#endif
36eca894
AA
433 /*
434 * Snapshot of the idle window before merging; taken to
435 * remember this value while the queue is merged, so as to be
436 * able to restore it in case of split.
437 */
438 bool saved_idle_window;
439 /*
440 * Same purpose as the previous two fields for the I/O bound
441 * classification of a queue.
442 */
443 bool saved_IO_bound;
444
445 /*
446 * Similar to previous fields: save wr information.
447 */
448 unsigned long saved_wr_coeff;
449 unsigned long saved_last_wr_start_finish;
450 unsigned long saved_wr_start_at_switch_to_srt;
451 unsigned int saved_wr_cur_max_time;
452 struct bfq_ttime saved_ttime;
aee69d78
PV
453};
454
44e44a1b
PV
455enum bfq_device_speed {
456 BFQ_BFQD_FAST,
457 BFQ_BFQD_SLOW,
458};
459
aee69d78
PV
460/**
461 * struct bfq_data - per-device data structure.
462 *
463 * All the fields are protected by @lock.
464 */
465struct bfq_data {
466 /* device request queue */
467 struct request_queue *queue;
468 /* dispatch queue */
469 struct list_head dispatch;
470
e21b7a0b
AA
471 /* root bfq_group for the device */
472 struct bfq_group *root_group;
aee69d78 473
1de0c4cd
AA
474 /*
475 * rbtree of weight counters of @bfq_queues, sorted by
476 * weight. Used to keep track of whether all @bfq_queues have
477 * the same weight. The tree contains one counter for each
478 * distinct weight associated to some active and not
479 * weight-raised @bfq_queue (see the comments to the functions
480 * bfq_weights_tree_[add|remove] for further details).
481 */
482 struct rb_root queue_weights_tree;
483 /*
484 * rbtree of non-queue @bfq_entity weight counters, sorted by
485 * weight. Used to keep track of whether all @bfq_groups have
486 * the same weight. The tree contains one counter for each
487 * distinct weight associated to some active @bfq_group (see
488 * the comments to the functions bfq_weights_tree_[add|remove]
489 * for further details).
490 */
491 struct rb_root group_weights_tree;
492
aee69d78
PV
493 /*
494 * Number of bfq_queues containing requests (including the
495 * queue in service, even if it is idling).
496 */
497 int busy_queues;
cfd69712
PV
498 /* number of weight-raised busy @bfq_queues */
499 int wr_busy_queues;
aee69d78
PV
500 /* number of queued requests */
501 int queued;
502 /* number of requests dispatched and waiting for completion */
503 int rq_in_driver;
504
505 /*
506 * Maximum number of requests in driver in the last
507 * @hw_tag_samples completed requests.
508 */
509 int max_rq_in_driver;
510 /* number of samples used to calculate hw_tag */
511 int hw_tag_samples;
512 /* flag set to one if the driver is showing a queueing behavior */
513 int hw_tag;
514
515 /* number of budgets assigned */
516 int budgets_assigned;
517
518 /*
519 * Timer set when idling (waiting) for the next request from
520 * the queue in service.
521 */
522 struct hrtimer idle_slice_timer;
523
524 /* bfq_queue in service */
525 struct bfq_queue *in_service_queue;
526 /* bfq_io_cq (bic) associated with the @in_service_queue */
527 struct bfq_io_cq *in_service_bic;
528
529 /* on-disk position of the last served request */
530 sector_t last_position;
531
ab0e43e9
PV
532 /* time of last request completion (ns) */
533 u64 last_completion;
534
535 /* time of first rq dispatch in current observation interval (ns) */
536 u64 first_dispatch;
537 /* time of last rq dispatch in current observation interval (ns) */
538 u64 last_dispatch;
539
aee69d78
PV
540 /* beginning of the last budget */
541 ktime_t last_budget_start;
542 /* beginning of the last idle slice */
543 ktime_t last_idling_start;
ab0e43e9
PV
544
545 /* number of samples in current observation interval */
aee69d78 546 int peak_rate_samples;
ab0e43e9
PV
547 /* num of samples of seq dispatches in current observation interval */
548 u32 sequential_samples;
549 /* total num of sectors transferred in current observation interval */
550 u64 tot_sectors_dispatched;
551 /* max rq size seen during current observation interval (sectors) */
552 u32 last_rq_max_size;
553 /* time elapsed from first dispatch in current observ. interval (us) */
554 u64 delta_from_first;
aee69d78 555 /*
ab0e43e9
PV
556 * Current estimate of the device peak rate, measured in
557 * [BFQ_RATE_SHIFT * sectors/usec]. The left-shift by
558 * BFQ_RATE_SHIFT is performed to increase precision in
aee69d78
PV
559 * fixed-point calculations.
560 */
ab0e43e9
PV
561 u32 peak_rate;
562
aee69d78
PV
563 /* maximum budget allotted to a bfq_queue before rescheduling */
564 int bfq_max_budget;
565
566 /* list of all the bfq_queues active on the device */
567 struct list_head active_list;
568 /* list of all the bfq_queues idle on the device */
569 struct list_head idle_list;
570
571 /*
572 * Timeout for async/sync requests; when it fires, requests
573 * are served in fifo order.
574 */
575 u64 bfq_fifo_expire[2];
576 /* weight of backward seeks wrt forward ones */
577 unsigned int bfq_back_penalty;
578 /* maximum allowed backward seek */
579 unsigned int bfq_back_max;
580 /* maximum idling time */
581 u32 bfq_slice_idle;
aee69d78
PV
582
583 /* user-configured max budget value (0 for auto-tuning) */
584 int bfq_user_max_budget;
585 /*
586 * Timeout for bfq_queues to consume their budget; used to
587 * prevent seeky queues from imposing long latencies to
588 * sequential or quasi-sequential ones (this also implies that
589 * seeky queues cannot receive guarantees in the service
590 * domain; after a timeout they are charged for the time they
591 * have been in service, to preserve fairness among them, but
592 * without service-domain guarantees).
593 */
594 unsigned int bfq_timeout;
595
596 /*
597 * Number of consecutive requests that must be issued within
598 * the idle time slice to set again idling to a queue which
599 * was marked as non-I/O-bound (see the definition of the
600 * IO_bound flag for further details).
601 */
602 unsigned int bfq_requests_within_timer;
603
604 /*
605 * Force device idling whenever needed to provide accurate
606 * service guarantees, without caring about throughput
607 * issues. CAVEAT: this may even increase latencies, in case
608 * of useless idling for processes that did stop doing I/O.
609 */
610 bool strict_guarantees;
611
44e44a1b
PV
612 /* if set to true, low-latency heuristics are enabled */
613 bool low_latency;
614 /*
615 * Maximum factor by which the weight of a weight-raised queue
616 * is multiplied.
617 */
618 unsigned int bfq_wr_coeff;
619 /* maximum duration of a weight-raising period (jiffies) */
620 unsigned int bfq_wr_max_time;
77b7dcea
PV
621
622 /* Maximum weight-raising duration for soft real-time processes */
623 unsigned int bfq_wr_rt_max_time;
44e44a1b
PV
624 /*
625 * Minimum idle period after which weight-raising may be
626 * reactivated for a queue (in jiffies).
627 */
628 unsigned int bfq_wr_min_idle_time;
629 /*
630 * Minimum period between request arrivals after which
631 * weight-raising may be reactivated for an already busy async
632 * queue (in jiffies).
633 */
634 unsigned long bfq_wr_min_inter_arr_async;
77b7dcea
PV
635
636 /* Max service-rate for a soft real-time queue, in sectors/sec */
637 unsigned int bfq_wr_max_softrt_rate;
44e44a1b
PV
638 /*
639 * Cached value of the product R*T, used for computing the
640 * maximum duration of weight raising automatically.
641 */
642 u64 RT_prod;
643 /* device-speed class for the low-latency heuristic */
644 enum bfq_device_speed device_speed;
645
aee69d78
PV
646 /* fallback dummy bfqq for extreme OOM conditions */
647 struct bfq_queue oom_bfqq;
648
649 spinlock_t lock;
650
651 /*
652 * bic associated with the task issuing current bio for
653 * merging. This and the next field are used as a support to
654 * be able to perform the bic lookup, needed by bio-merge
655 * functions, before the scheduler lock is taken, and thus
656 * avoid taking the request-queue lock while the scheduler
657 * lock is being held.
658 */
659 struct bfq_io_cq *bio_bic;
660 /* bfqq associated with the task issuing current bio for merging */
661 struct bfq_queue *bio_bfqq;
36eca894
AA
662
663 /*
664 * io context to put right after bfqd->lock is released. This
665 * filed is used to perform put_io_context, when needed, to
666 * after the scheduler lock has been released, and thus
667 * prevent an ioc->lock from being possibly taken while the
668 * scheduler lock is being held.
669 */
670 struct io_context *ioc_to_put;
aee69d78
PV
671};
672
673enum bfqq_state_flags {
674 BFQQF_busy = 0, /* has requests or is in service */
675 BFQQF_wait_request, /* waiting for a request */
676 BFQQF_non_blocking_wait_rq, /*
677 * waiting for a request
678 * without idling the device
679 */
680 BFQQF_fifo_expire, /* FIFO checked in this slice */
681 BFQQF_idle_window, /* slice idling enabled */
682 BFQQF_sync, /* synchronous queue */
aee69d78
PV
683 BFQQF_IO_bound, /*
684 * bfqq has timed-out at least once
685 * having consumed at most 2/10 of
686 * its budget
687 */
77b7dcea
PV
688 BFQQF_softrt_update, /*
689 * may need softrt-next-start
690 * update
691 */
36eca894
AA
692 BFQQF_coop, /* bfqq is shared */
693 BFQQF_split_coop /* shared bfqq will be split */
aee69d78
PV
694};
695
696#define BFQ_BFQQ_FNS(name) \
697static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
698{ \
699 __set_bit(BFQQF_##name, &(bfqq)->flags); \
700} \
701static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
702{ \
703 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
704} \
705static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
706{ \
707 return test_bit(BFQQF_##name, &(bfqq)->flags); \
708}
709
710BFQ_BFQQ_FNS(busy);
711BFQ_BFQQ_FNS(wait_request);
712BFQ_BFQQ_FNS(non_blocking_wait_rq);
713BFQ_BFQQ_FNS(fifo_expire);
714BFQ_BFQQ_FNS(idle_window);
715BFQ_BFQQ_FNS(sync);
aee69d78 716BFQ_BFQQ_FNS(IO_bound);
36eca894
AA
717BFQ_BFQQ_FNS(coop);
718BFQ_BFQQ_FNS(split_coop);
77b7dcea 719BFQ_BFQQ_FNS(softrt_update);
aee69d78
PV
720#undef BFQ_BFQQ_FNS
721
722/* Logging facilities. */
e21b7a0b
AA
723#ifdef CONFIG_BFQ_GROUP_IOSCHED
724static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
725static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
726
727#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
728 char __pbuf[128]; \
729 \
730 blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
731 blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
732 bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
733 __pbuf, ##args); \
734} while (0)
735
736#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
737 char __pbuf[128]; \
738 \
739 blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
740 blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
741} while (0)
742
743#else /* CONFIG_BFQ_GROUP_IOSCHED */
744
745#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
746 blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
747 bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
748 ##args)
749#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
750
751#endif /* CONFIG_BFQ_GROUP_IOSCHED */
aee69d78
PV
752
753#define bfq_log(bfqd, fmt, args...) \
754 blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
755
756/* Expiration reasons. */
757enum bfqq_expiration {
758 BFQQE_TOO_IDLE = 0, /*
759 * queue has been idling for
760 * too long
761 */
762 BFQQE_BUDGET_TIMEOUT, /* budget took too long to be used */
763 BFQQE_BUDGET_EXHAUSTED, /* budget consumed */
764 BFQQE_NO_MORE_REQUESTS, /* the queue has no more requests */
765 BFQQE_PREEMPTED /* preemption in progress */
766};
767
e21b7a0b
AA
768struct bfqg_stats {
769#ifdef CONFIG_BFQ_GROUP_IOSCHED
770 /* number of ios merged */
771 struct blkg_rwstat merged;
772 /* total time spent on device in ns, may not be accurate w/ queueing */
773 struct blkg_rwstat service_time;
774 /* total time spent waiting in scheduler queue in ns */
775 struct blkg_rwstat wait_time;
776 /* number of IOs queued up */
777 struct blkg_rwstat queued;
778 /* total disk time and nr sectors dispatched by this group */
779 struct blkg_stat time;
780 /* sum of number of ios queued across all samples */
781 struct blkg_stat avg_queue_size_sum;
782 /* count of samples taken for average */
783 struct blkg_stat avg_queue_size_samples;
784 /* how many times this group has been removed from service tree */
785 struct blkg_stat dequeue;
786 /* total time spent waiting for it to be assigned a timeslice. */
787 struct blkg_stat group_wait_time;
788 /* time spent idling for this blkcg_gq */
789 struct blkg_stat idle_time;
790 /* total time with empty current active q with other requests queued */
791 struct blkg_stat empty_time;
792 /* fields after this shouldn't be cleared on stat reset */
793 uint64_t start_group_wait_time;
794 uint64_t start_idle_time;
795 uint64_t start_empty_time;
796 uint16_t flags;
797#endif /* CONFIG_BFQ_GROUP_IOSCHED */
798};
799
800#ifdef CONFIG_BFQ_GROUP_IOSCHED
801
802/*
803 * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
804 *
805 * @ps: @blkcg_policy_storage that this structure inherits
806 * @weight: weight of the bfq_group
807 */
808struct bfq_group_data {
809 /* must be the first member */
810 struct blkcg_policy_data pd;
811
44e44a1b 812 unsigned int weight;
e21b7a0b
AA
813};
814
815/**
816 * struct bfq_group - per (device, cgroup) data structure.
817 * @entity: schedulable entity to insert into the parent group sched_data.
818 * @sched_data: own sched_data, to contain child entities (they may be
819 * both bfq_queues and bfq_groups).
820 * @bfqd: the bfq_data for the device this group acts upon.
821 * @async_bfqq: array of async queues for all the tasks belonging to
822 * the group, one queue per ioprio value per ioprio_class,
823 * except for the idle class that has only one queue.
824 * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
825 * @my_entity: pointer to @entity, %NULL for the toplevel group; used
826 * to avoid too many special cases during group creation/
827 * migration.
828 * @stats: stats for this bfqg.
1de0c4cd
AA
829 * @active_entities: number of active entities belonging to the group;
830 * unused for the root group. Used to know whether there
831 * are groups with more than one active @bfq_entity
832 * (see the comments to the function
833 * bfq_bfqq_may_idle()).
36eca894
AA
834 * @rq_pos_tree: rbtree sorted by next_request position, used when
835 * determining if two or more queues have interleaving
836 * requests (see bfq_find_close_cooperator()).
e21b7a0b
AA
837 *
838 * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
839 * there is a set of bfq_groups, each one collecting the lower-level
840 * entities belonging to the group that are acting on the same device.
841 *
842 * Locking works as follows:
843 * o @bfqd is protected by the queue lock, RCU is used to access it
844 * from the readers.
845 * o All the other fields are protected by the @bfqd queue lock.
846 */
847struct bfq_group {
848 /* must be the first member */
849 struct blkg_policy_data pd;
850
851 struct bfq_entity entity;
852 struct bfq_sched_data sched_data;
853
854 void *bfqd;
855
856 struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
857 struct bfq_queue *async_idle_bfqq;
858
859 struct bfq_entity *my_entity;
860
1de0c4cd
AA
861 int active_entities;
862
36eca894
AA
863 struct rb_root rq_pos_tree;
864
e21b7a0b
AA
865 struct bfqg_stats stats;
866};
867
868#else
869struct bfq_group {
870 struct bfq_sched_data sched_data;
871
872 struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
873 struct bfq_queue *async_idle_bfqq;
874
875 struct rb_root rq_pos_tree;
876};
877#endif
878
aee69d78
PV
879static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
880
e21b7a0b
AA
881static unsigned int bfq_class_idx(struct bfq_entity *entity)
882{
883 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
884
885 return bfqq ? bfqq->ioprio_class - 1 :
886 BFQ_DEFAULT_GRP_CLASS - 1;
887}
888
aee69d78
PV
889static struct bfq_service_tree *
890bfq_entity_service_tree(struct bfq_entity *entity)
891{
892 struct bfq_sched_data *sched_data = entity->sched_data;
e21b7a0b 893 unsigned int idx = bfq_class_idx(entity);
aee69d78
PV
894
895 return sched_data->service_tree + idx;
896}
897
898static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
899{
900 return bic->bfqq[is_sync];
901}
902
903static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq,
904 bool is_sync)
905{
906 bic->bfqq[is_sync] = bfqq;
907}
908
909static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
910{
911 return bic->icq.q->elevator->elevator_data;
912}
913
36eca894
AA
914#ifdef CONFIG_BFQ_GROUP_IOSCHED
915
916static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
917{
918 struct bfq_entity *group_entity = bfqq->entity.parent;
919
920 if (!group_entity)
921 group_entity = &bfqq->bfqd->root_group->entity;
922
923 return container_of(group_entity, struct bfq_group, entity);
924}
925
926#else
927
928static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
929{
930 return bfqq->bfqd->root_group;
931}
932
933#endif
934
aee69d78
PV
935static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio);
936static void bfq_put_queue(struct bfq_queue *bfqq);
937static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
938 struct bio *bio, bool is_sync,
939 struct bfq_io_cq *bic);
44e44a1b
PV
940static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
941 struct bfq_group *bfqg);
e21b7a0b 942static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
aee69d78
PV
943static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
944
aee69d78
PV
945/* Expiration time of sync (0) and async (1) requests, in ns. */
946static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
947
948/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
949static const int bfq_back_max = 16 * 1024;
950
951/* Penalty of a backwards seek, in number of sectors. */
952static const int bfq_back_penalty = 2;
953
954/* Idling period duration, in ns. */
955static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
956
957/* Minimum number of assigned budgets for which stats are safe to compute. */
958static const int bfq_stats_min_budgets = 194;
959
960/* Default maximum budget values, in sectors and number of requests. */
961static const int bfq_default_max_budget = 16 * 1024;
962
c074170e
PV
963/*
964 * Async to sync throughput distribution is controlled as follows:
965 * when an async request is served, the entity is charged the number
966 * of sectors of the request, multiplied by the factor below
967 */
968static const int bfq_async_charge_factor = 10;
969
aee69d78
PV
970/* Default timeout values, in jiffies, approximating CFQ defaults. */
971static const int bfq_timeout = HZ / 8;
972
973static struct kmem_cache *bfq_pool;
974
ab0e43e9 975/* Below this threshold (in ns), we consider thinktime immediate. */
aee69d78
PV
976#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
977
978/* hw_tag detection: parallel requests threshold and min samples needed. */
979#define BFQ_HW_QUEUE_THRESHOLD 4
980#define BFQ_HW_QUEUE_SAMPLES 32
981
982#define BFQQ_SEEK_THR (sector_t)(8 * 100)
983#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
984#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
985#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
986
ab0e43e9
PV
987/* Min number of samples required to perform peak-rate update */
988#define BFQ_RATE_MIN_SAMPLES 32
989/* Min observation time interval required to perform a peak-rate update (ns) */
990#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
991/* Target observation time interval for a peak-rate update (ns) */
992#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
aee69d78
PV
993
994/* Shift used for peak rate fixed precision calculations. */
995#define BFQ_RATE_SHIFT 16
996
44e44a1b
PV
997/*
998 * By default, BFQ computes the duration of the weight raising for
999 * interactive applications automatically, using the following formula:
1000 * duration = (R / r) * T, where r is the peak rate of the device, and
1001 * R and T are two reference parameters.
1002 * In particular, R is the peak rate of the reference device (see below),
1003 * and T is a reference time: given the systems that are likely to be
1004 * installed on the reference device according to its speed class, T is
1005 * about the maximum time needed, under BFQ and while reading two files in
1006 * parallel, to load typical large applications on these systems.
1007 * In practice, the slower/faster the device at hand is, the more/less it
1008 * takes to load applications with respect to the reference device.
1009 * Accordingly, the longer/shorter BFQ grants weight raising to interactive
1010 * applications.
1011 *
1012 * BFQ uses four different reference pairs (R, T), depending on:
1013 * . whether the device is rotational or non-rotational;
1014 * . whether the device is slow, such as old or portable HDDs, as well as
1015 * SD cards, or fast, such as newer HDDs and SSDs.
1016 *
1017 * The device's speed class is dynamically (re)detected in
1018 * bfq_update_peak_rate() every time the estimated peak rate is updated.
1019 *
1020 * In the following definitions, R_slow[0]/R_fast[0] and
1021 * T_slow[0]/T_fast[0] are the reference values for a slow/fast
1022 * rotational device, whereas R_slow[1]/R_fast[1] and
1023 * T_slow[1]/T_fast[1] are the reference values for a slow/fast
1024 * non-rotational device. Finally, device_speed_thresh are the
1025 * thresholds used to switch between speed classes. The reference
1026 * rates are not the actual peak rates of the devices used as a
1027 * reference, but slightly lower values. The reason for using these
1028 * slightly lower values is that the peak-rate estimator tends to
1029 * yield slightly lower values than the actual peak rate (it can yield
1030 * the actual peak rate only if there is only one process doing I/O,
1031 * and the process does sequential I/O).
1032 *
1033 * Both the reference peak rates and the thresholds are measured in
1034 * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
1035 */
1036static int R_slow[2] = {1000, 10700};
1037static int R_fast[2] = {14000, 33000};
1038/*
1039 * To improve readability, a conversion function is used to initialize the
1040 * following arrays, which entails that they can be initialized only in a
1041 * function.
1042 */
1043static int T_slow[2];
1044static int T_fast[2];
1045static int device_speed_thresh[2];
1046
aee69d78
PV
1047#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
1048 { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
1049
1050#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
1051#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
1052
1053/**
1054 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
1055 * @icq: the iocontext queue.
1056 */
1057static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
1058{
1059 /* bic->icq is the first member, %NULL will convert to %NULL */
1060 return container_of(icq, struct bfq_io_cq, icq);
1061}
1062
1063/**
1064 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
1065 * @bfqd: the lookup key.
1066 * @ioc: the io_context of the process doing I/O.
1067 * @q: the request queue.
1068 */
1069static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
1070 struct io_context *ioc,
1071 struct request_queue *q)
1072{
1073 if (ioc) {
1074 unsigned long flags;
1075 struct bfq_io_cq *icq;
1076
1077 spin_lock_irqsave(q->queue_lock, flags);
1078 icq = icq_to_bic(ioc_lookup_icq(ioc, q));
1079 spin_unlock_irqrestore(q->queue_lock, flags);
1080
1081 return icq;
1082 }
1083
1084 return NULL;
1085}
1086
1087/*
e21b7a0b
AA
1088 * Scheduler run of queue, if there are requests pending and no one in the
1089 * driver that will restart queueing.
1090 */
1091static void bfq_schedule_dispatch(struct bfq_data *bfqd)
1092{
1093 if (bfqd->queued != 0) {
1094 bfq_log(bfqd, "schedule dispatch");
1095 blk_mq_run_hw_queues(bfqd->queue, true);
1096 }
1097}
1098
36eca894
AA
1099/*
1100 * Next two functions release bfqd->lock and put the io context
1101 * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
1102 * to take an ioc->lock while the scheduler lock is being held.
1103 */
1104static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
1105{
1106 struct io_context *ioc_to_put = bfqd->ioc_to_put;
1107
1108 bfqd->ioc_to_put = NULL;
1109 spin_unlock_irq(&bfqd->lock);
1110
1111 if (ioc_to_put)
1112 put_io_context(ioc_to_put);
1113}
1114
1115static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
1116 unsigned long flags)
1117{
1118 struct io_context *ioc_to_put = bfqd->ioc_to_put;
1119
1120 bfqd->ioc_to_put = NULL;
1121 spin_unlock_irqrestore(&bfqd->lock, flags);
1122
1123 if (ioc_to_put)
1124 put_io_context(ioc_to_put);
1125}
1126
e21b7a0b
AA
1127/**
1128 * bfq_gt - compare two timestamps.
1129 * @a: first ts.
1130 * @b: second ts.
1131 *
1132 * Return @a > @b, dealing with wrapping correctly.
1133 */
1134static int bfq_gt(u64 a, u64 b)
1135{
1136 return (s64)(a - b) > 0;
1137}
1138
1139static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
1140{
1141 struct rb_node *node = tree->rb_node;
1142
1143 return rb_entry(node, struct bfq_entity, rb_node);
1144}
1145
1146static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd);
1147
1148static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
1149
1150/**
1151 * bfq_update_next_in_service - update sd->next_in_service
1152 * @sd: sched_data for which to perform the update.
1153 * @new_entity: if not NULL, pointer to the entity whose activation,
1154 * requeueing or repositionig triggered the invocation of
1155 * this function.
1156 *
1157 * This function is called to update sd->next_in_service, which, in
1158 * its turn, may change as a consequence of the insertion or
1159 * extraction of an entity into/from one of the active trees of
1160 * sd. These insertions/extractions occur as a consequence of
1161 * activations/deactivations of entities, with some activations being
1162 * 'true' activations, and other activations being requeueings (i.e.,
1163 * implementing the second, requeueing phase of the mechanism used to
1164 * reposition an entity in its active tree; see comments on
1165 * __bfq_activate_entity and __bfq_requeue_entity for details). In
1166 * both the last two activation sub-cases, new_entity points to the
1167 * just activated or requeued entity.
1168 *
1169 * Returns true if sd->next_in_service changes in such a way that
1170 * entity->parent may become the next_in_service for its parent
1171 * entity.
aee69d78 1172 */
e21b7a0b
AA
1173static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
1174 struct bfq_entity *new_entity)
1175{
1176 struct bfq_entity *next_in_service = sd->next_in_service;
1177 bool parent_sched_may_change = false;
1178
1179 /*
1180 * If this update is triggered by the activation, requeueing
1181 * or repositiong of an entity that does not coincide with
1182 * sd->next_in_service, then a full lookup in the active tree
1183 * can be avoided. In fact, it is enough to check whether the
1184 * just-modified entity has a higher priority than
1185 * sd->next_in_service, or, even if it has the same priority
1186 * as sd->next_in_service, is eligible and has a lower virtual
1187 * finish time than sd->next_in_service. If this compound
1188 * condition holds, then the new entity becomes the new
1189 * next_in_service. Otherwise no change is needed.
1190 */
1191 if (new_entity && new_entity != sd->next_in_service) {
1192 /*
1193 * Flag used to decide whether to replace
1194 * sd->next_in_service with new_entity. Tentatively
1195 * set to true, and left as true if
1196 * sd->next_in_service is NULL.
1197 */
1198 bool replace_next = true;
1199
1200 /*
1201 * If there is already a next_in_service candidate
1202 * entity, then compare class priorities or timestamps
1203 * to decide whether to replace sd->service_tree with
1204 * new_entity.
1205 */
1206 if (next_in_service) {
1207 unsigned int new_entity_class_idx =
1208 bfq_class_idx(new_entity);
1209 struct bfq_service_tree *st =
1210 sd->service_tree + new_entity_class_idx;
1211
1212 /*
1213 * For efficiency, evaluate the most likely
1214 * sub-condition first.
1215 */
1216 replace_next =
1217 (new_entity_class_idx ==
1218 bfq_class_idx(next_in_service)
1219 &&
1220 !bfq_gt(new_entity->start, st->vtime)
1221 &&
1222 bfq_gt(next_in_service->finish,
1223 new_entity->finish))
1224 ||
1225 new_entity_class_idx <
1226 bfq_class_idx(next_in_service);
1227 }
1228
1229 if (replace_next)
1230 next_in_service = new_entity;
1231 } else /* invoked because of a deactivation: lookup needed */
1232 next_in_service = bfq_lookup_next_entity(sd);
1233
1234 if (next_in_service) {
1235 parent_sched_may_change = !sd->next_in_service ||
1236 bfq_update_parent_budget(next_in_service);
1237 }
1238
1239 sd->next_in_service = next_in_service;
1240
1241 if (!next_in_service)
1242 return parent_sched_may_change;
1243
1244 return parent_sched_may_change;
1245}
1246
1247#ifdef CONFIG_BFQ_GROUP_IOSCHED
1248/* both next loops stop at one of the child entities of the root group */
aee69d78 1249#define for_each_entity(entity) \
e21b7a0b 1250 for (; entity ; entity = entity->parent)
aee69d78 1251
e21b7a0b
AA
1252/*
1253 * For each iteration, compute parent in advance, so as to be safe if
1254 * entity is deallocated during the iteration. Such a deallocation may
1255 * happen as a consequence of a bfq_put_queue that frees the bfq_queue
1256 * containing entity.
1257 */
aee69d78 1258#define for_each_entity_safe(entity, parent) \
e21b7a0b 1259 for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
aee69d78 1260
e21b7a0b
AA
1261/*
1262 * Returns true if this budget changes may let next_in_service->parent
1263 * become the next_in_service entity for its parent entity.
1264 */
1265static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
aee69d78 1266{
e21b7a0b
AA
1267 struct bfq_entity *bfqg_entity;
1268 struct bfq_group *bfqg;
1269 struct bfq_sched_data *group_sd;
1270 bool ret = false;
1271
1272 group_sd = next_in_service->sched_data;
1273
1274 bfqg = container_of(group_sd, struct bfq_group, sched_data);
1275 /*
1276 * bfq_group's my_entity field is not NULL only if the group
1277 * is not the root group. We must not touch the root entity
1278 * as it must never become an in-service entity.
1279 */
1280 bfqg_entity = bfqg->my_entity;
1281 if (bfqg_entity) {
1282 if (bfqg_entity->budget > next_in_service->budget)
1283 ret = true;
1284 bfqg_entity->budget = next_in_service->budget;
1285 }
1286
1287 return ret;
1288}
1289
1290/*
1291 * This function tells whether entity stops being a candidate for next
1292 * service, according to the following logic.
1293 *
1294 * This function is invoked for an entity that is about to be set in
1295 * service. If such an entity is a queue, then the entity is no longer
1296 * a candidate for next service (i.e, a candidate entity to serve
1297 * after the in-service entity is expired). The function then returns
1298 * true.
1de0c4cd
AA
1299 *
1300 * In contrast, the entity could stil be a candidate for next service
1301 * if it is not a queue, and has more than one child. In fact, even if
1302 * one of its children is about to be set in service, other children
1303 * may still be the next to serve. As a consequence, a non-queue
1304 * entity is not a candidate for next-service only if it has only one
1305 * child. And only if this condition holds, then the function returns
1306 * true for a non-queue entity.
e21b7a0b
AA
1307 */
1308static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
1309{
1de0c4cd
AA
1310 struct bfq_group *bfqg;
1311
e21b7a0b
AA
1312 if (bfq_entity_to_bfqq(entity))
1313 return true;
1314
1de0c4cd
AA
1315 bfqg = container_of(entity, struct bfq_group, entity);
1316
1317 if (bfqg->active_entities == 1)
1318 return true;
1319
e21b7a0b 1320 return false;
aee69d78
PV
1321}
1322
e21b7a0b
AA
1323#else /* CONFIG_BFQ_GROUP_IOSCHED */
1324/*
1325 * Next two macros are fake loops when cgroups support is not
1326 * enabled. I fact, in such a case, there is only one level to go up
1327 * (to reach the root group).
1328 */
1329#define for_each_entity(entity) \
1330 for (; entity ; entity = NULL)
1331
1332#define for_each_entity_safe(entity, parent) \
1333 for (parent = NULL; entity ; entity = parent)
1334
1335static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
aee69d78 1336{
e21b7a0b 1337 return false;
aee69d78
PV
1338}
1339
e21b7a0b 1340static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
aee69d78 1341{
e21b7a0b 1342 return true;
aee69d78
PV
1343}
1344
e21b7a0b
AA
1345#endif /* CONFIG_BFQ_GROUP_IOSCHED */
1346
aee69d78
PV
1347/*
1348 * Shift for timestamp calculations. This actually limits the maximum
1349 * service allowed in one timestamp delta (small shift values increase it),
1350 * the maximum total weight that can be used for the queues in the system
1351 * (big shift values increase it), and the period of virtual time
1352 * wraparounds.
1353 */
1354#define WFQ_SERVICE_SHIFT 22
1355
aee69d78
PV
1356static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
1357{
1358 struct bfq_queue *bfqq = NULL;
1359
1360 if (!entity->my_sched_data)
1361 bfqq = container_of(entity, struct bfq_queue, entity);
1362
1363 return bfqq;
1364}
1365
1366
1367/**
1368 * bfq_delta - map service into the virtual time domain.
1369 * @service: amount of service.
1370 * @weight: scale factor (weight of an entity or weight sum).
1371 */
1372static u64 bfq_delta(unsigned long service, unsigned long weight)
1373{
1374 u64 d = (u64)service << WFQ_SERVICE_SHIFT;
1375
1376 do_div(d, weight);
1377 return d;
1378}
1379
1380/**
1381 * bfq_calc_finish - assign the finish time to an entity.
1382 * @entity: the entity to act upon.
1383 * @service: the service to be charged to the entity.
1384 */
1385static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
1386{
1387 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1388
1389 entity->finish = entity->start +
1390 bfq_delta(service, entity->weight);
1391
1392 if (bfqq) {
1393 bfq_log_bfqq(bfqq->bfqd, bfqq,
1394 "calc_finish: serv %lu, w %d",
1395 service, entity->weight);
1396 bfq_log_bfqq(bfqq->bfqd, bfqq,
1397 "calc_finish: start %llu, finish %llu, delta %llu",
1398 entity->start, entity->finish,
1399 bfq_delta(service, entity->weight));
1400 }
1401}
1402
1403/**
1404 * bfq_entity_of - get an entity from a node.
1405 * @node: the node field of the entity.
1406 *
1407 * Convert a node pointer to the relative entity. This is used only
1408 * to simplify the logic of some functions and not as the generic
1409 * conversion mechanism because, e.g., in the tree walking functions,
1410 * the check for a %NULL value would be redundant.
1411 */
1412static struct bfq_entity *bfq_entity_of(struct rb_node *node)
1413{
1414 struct bfq_entity *entity = NULL;
1415
1416 if (node)
1417 entity = rb_entry(node, struct bfq_entity, rb_node);
1418
1419 return entity;
1420}
1421
1422/**
1423 * bfq_extract - remove an entity from a tree.
1424 * @root: the tree root.
1425 * @entity: the entity to remove.
1426 */
1427static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
1428{
1429 entity->tree = NULL;
1430 rb_erase(&entity->rb_node, root);
1431}
1432
1433/**
1434 * bfq_idle_extract - extract an entity from the idle tree.
1435 * @st: the service tree of the owning @entity.
1436 * @entity: the entity being removed.
1437 */
1438static void bfq_idle_extract(struct bfq_service_tree *st,
1439 struct bfq_entity *entity)
1440{
1441 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1442 struct rb_node *next;
1443
1444 if (entity == st->first_idle) {
1445 next = rb_next(&entity->rb_node);
1446 st->first_idle = bfq_entity_of(next);
1447 }
1448
1449 if (entity == st->last_idle) {
1450 next = rb_prev(&entity->rb_node);
1451 st->last_idle = bfq_entity_of(next);
1452 }
1453
1454 bfq_extract(&st->idle, entity);
1455
1456 if (bfqq)
1457 list_del(&bfqq->bfqq_list);
1458}
1459
1460/**
1461 * bfq_insert - generic tree insertion.
1462 * @root: tree root.
1463 * @entity: entity to insert.
1464 *
1465 * This is used for the idle and the active tree, since they are both
1466 * ordered by finish time.
1467 */
1468static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
1469{
1470 struct bfq_entity *entry;
1471 struct rb_node **node = &root->rb_node;
1472 struct rb_node *parent = NULL;
1473
1474 while (*node) {
1475 parent = *node;
1476 entry = rb_entry(parent, struct bfq_entity, rb_node);
1477
1478 if (bfq_gt(entry->finish, entity->finish))
1479 node = &parent->rb_left;
1480 else
1481 node = &parent->rb_right;
1482 }
1483
1484 rb_link_node(&entity->rb_node, parent, node);
1485 rb_insert_color(&entity->rb_node, root);
1486
1487 entity->tree = root;
1488}
1489
1490/**
1491 * bfq_update_min - update the min_start field of a entity.
1492 * @entity: the entity to update.
1493 * @node: one of its children.
1494 *
1495 * This function is called when @entity may store an invalid value for
1496 * min_start due to updates to the active tree. The function assumes
1497 * that the subtree rooted at @node (which may be its left or its right
1498 * child) has a valid min_start value.
1499 */
1500static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
1501{
1502 struct bfq_entity *child;
1503
1504 if (node) {
1505 child = rb_entry(node, struct bfq_entity, rb_node);
1506 if (bfq_gt(entity->min_start, child->min_start))
1507 entity->min_start = child->min_start;
1508 }
1509}
1510
1511/**
1512 * bfq_update_active_node - recalculate min_start.
1513 * @node: the node to update.
1514 *
1515 * @node may have changed position or one of its children may have moved,
1516 * this function updates its min_start value. The left and right subtrees
1517 * are assumed to hold a correct min_start value.
1518 */
1519static void bfq_update_active_node(struct rb_node *node)
1520{
1521 struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
1522
1523 entity->min_start = entity->start;
1524 bfq_update_min(entity, node->rb_right);
1525 bfq_update_min(entity, node->rb_left);
1526}
1527
1528/**
1529 * bfq_update_active_tree - update min_start for the whole active tree.
1530 * @node: the starting node.
1531 *
1532 * @node must be the deepest modified node after an update. This function
1533 * updates its min_start using the values held by its children, assuming
1534 * that they did not change, and then updates all the nodes that may have
1535 * changed in the path to the root. The only nodes that may have changed
1536 * are the ones in the path or their siblings.
1537 */
1538static void bfq_update_active_tree(struct rb_node *node)
1539{
1540 struct rb_node *parent;
1541
1542up:
1543 bfq_update_active_node(node);
1544
1545 parent = rb_parent(node);
1546 if (!parent)
1547 return;
1548
1549 if (node == parent->rb_left && parent->rb_right)
1550 bfq_update_active_node(parent->rb_right);
1551 else if (parent->rb_left)
1552 bfq_update_active_node(parent->rb_left);
1553
1554 node = parent;
1555 goto up;
1556}
1557
1de0c4cd
AA
1558static void bfq_weights_tree_add(struct bfq_data *bfqd,
1559 struct bfq_entity *entity,
1560 struct rb_root *root);
1561
1562static void bfq_weights_tree_remove(struct bfq_data *bfqd,
1563 struct bfq_entity *entity,
1564 struct rb_root *root);
1565
1566
aee69d78
PV
1567/**
1568 * bfq_active_insert - insert an entity in the active tree of its
1569 * group/device.
1570 * @st: the service tree of the entity.
1571 * @entity: the entity being inserted.
1572 *
1573 * The active tree is ordered by finish time, but an extra key is kept
1574 * per each node, containing the minimum value for the start times of
1575 * its children (and the node itself), so it's possible to search for
1576 * the eligible node with the lowest finish time in logarithmic time.
1577 */
1578static void bfq_active_insert(struct bfq_service_tree *st,
1579 struct bfq_entity *entity)
1580{
1581 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1582 struct rb_node *node = &entity->rb_node;
e21b7a0b
AA
1583#ifdef CONFIG_BFQ_GROUP_IOSCHED
1584 struct bfq_sched_data *sd = NULL;
1585 struct bfq_group *bfqg = NULL;
1586 struct bfq_data *bfqd = NULL;
1587#endif
aee69d78
PV
1588
1589 bfq_insert(&st->active, entity);
1590
1591 if (node->rb_left)
1592 node = node->rb_left;
1593 else if (node->rb_right)
1594 node = node->rb_right;
1595
1596 bfq_update_active_tree(node);
1597
e21b7a0b
AA
1598#ifdef CONFIG_BFQ_GROUP_IOSCHED
1599 sd = entity->sched_data;
1600 bfqg = container_of(sd, struct bfq_group, sched_data);
1601 bfqd = (struct bfq_data *)bfqg->bfqd;
1602#endif
aee69d78
PV
1603 if (bfqq)
1604 list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
1de0c4cd
AA
1605#ifdef CONFIG_BFQ_GROUP_IOSCHED
1606 else /* bfq_group */
1607 bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
1608
1609 if (bfqg != bfqd->root_group)
1610 bfqg->active_entities++;
1611#endif
aee69d78
PV
1612}
1613
1614/**
1615 * bfq_ioprio_to_weight - calc a weight from an ioprio.
1616 * @ioprio: the ioprio value to convert.
1617 */
1618static unsigned short bfq_ioprio_to_weight(int ioprio)
1619{
1620 return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
1621}
1622
1623/**
1624 * bfq_weight_to_ioprio - calc an ioprio from a weight.
1625 * @weight: the weight value to convert.
1626 *
1627 * To preserve as much as possible the old only-ioprio user interface,
1628 * 0 is used as an escape ioprio value for weights (numerically) equal or
1629 * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
1630 */
1631static unsigned short bfq_weight_to_ioprio(int weight)
1632{
1633 return max_t(int, 0,
1634 IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
1635}
1636
1637static void bfq_get_entity(struct bfq_entity *entity)
1638{
1639 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1640
1641 if (bfqq) {
1642 bfqq->ref++;
1643 bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
1644 bfqq, bfqq->ref);
1645 }
1646}
1647
1648/**
1649 * bfq_find_deepest - find the deepest node that an extraction can modify.
1650 * @node: the node being removed.
1651 *
1652 * Do the first step of an extraction in an rb tree, looking for the
1653 * node that will replace @node, and returning the deepest node that
1654 * the following modifications to the tree can touch. If @node is the
1655 * last node in the tree return %NULL.
1656 */
1657static struct rb_node *bfq_find_deepest(struct rb_node *node)
1658{
1659 struct rb_node *deepest;
1660
1661 if (!node->rb_right && !node->rb_left)
1662 deepest = rb_parent(node);
1663 else if (!node->rb_right)
1664 deepest = node->rb_left;
1665 else if (!node->rb_left)
1666 deepest = node->rb_right;
1667 else {
1668 deepest = rb_next(node);
1669 if (deepest->rb_right)
1670 deepest = deepest->rb_right;
1671 else if (rb_parent(deepest) != node)
1672 deepest = rb_parent(deepest);
1673 }
1674
1675 return deepest;
1676}
1677
1678/**
1679 * bfq_active_extract - remove an entity from the active tree.
1680 * @st: the service_tree containing the tree.
1681 * @entity: the entity being removed.
1682 */
1683static void bfq_active_extract(struct bfq_service_tree *st,
1684 struct bfq_entity *entity)
1685{
1686 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1687 struct rb_node *node;
e21b7a0b
AA
1688#ifdef CONFIG_BFQ_GROUP_IOSCHED
1689 struct bfq_sched_data *sd = NULL;
1690 struct bfq_group *bfqg = NULL;
1691 struct bfq_data *bfqd = NULL;
1692#endif
aee69d78
PV
1693
1694 node = bfq_find_deepest(&entity->rb_node);
1695 bfq_extract(&st->active, entity);
1696
1697 if (node)
1698 bfq_update_active_tree(node);
1699
e21b7a0b
AA
1700#ifdef CONFIG_BFQ_GROUP_IOSCHED
1701 sd = entity->sched_data;
1702 bfqg = container_of(sd, struct bfq_group, sched_data);
1703 bfqd = (struct bfq_data *)bfqg->bfqd;
1704#endif
aee69d78
PV
1705 if (bfqq)
1706 list_del(&bfqq->bfqq_list);
1de0c4cd
AA
1707#ifdef CONFIG_BFQ_GROUP_IOSCHED
1708 else /* bfq_group */
1709 bfq_weights_tree_remove(bfqd, entity,
1710 &bfqd->group_weights_tree);
1711
1712 if (bfqg != bfqd->root_group)
1713 bfqg->active_entities--;
1714#endif
aee69d78
PV
1715}
1716
1717/**
1718 * bfq_idle_insert - insert an entity into the idle tree.
1719 * @st: the service tree containing the tree.
1720 * @entity: the entity to insert.
1721 */
1722static void bfq_idle_insert(struct bfq_service_tree *st,
1723 struct bfq_entity *entity)
1724{
1725 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1726 struct bfq_entity *first_idle = st->first_idle;
1727 struct bfq_entity *last_idle = st->last_idle;
1728
1729 if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
1730 st->first_idle = entity;
1731 if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
1732 st->last_idle = entity;
1733
1734 bfq_insert(&st->idle, entity);
1735
1736 if (bfqq)
1737 list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
1738}
1739
1740/**
1741 * bfq_forget_entity - do not consider entity any longer for scheduling
1742 * @st: the service tree.
1743 * @entity: the entity being removed.
1744 * @is_in_service: true if entity is currently the in-service entity.
1745 *
1746 * Forget everything about @entity. In addition, if entity represents
1747 * a queue, and the latter is not in service, then release the service
1748 * reference to the queue (the one taken through bfq_get_entity). In
1749 * fact, in this case, there is really no more service reference to
1750 * the queue, as the latter is also outside any service tree. If,
1751 * instead, the queue is in service, then __bfq_bfqd_reset_in_service
1752 * will take care of putting the reference when the queue finally
1753 * stops being served.
1754 */
1755static void bfq_forget_entity(struct bfq_service_tree *st,
1756 struct bfq_entity *entity,
1757 bool is_in_service)
1758{
1759 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1760
e21b7a0b 1761 entity->on_st = false;
aee69d78
PV
1762 st->wsum -= entity->weight;
1763 if (bfqq && !is_in_service)
1764 bfq_put_queue(bfqq);
1765}
1766
1767/**
1768 * bfq_put_idle_entity - release the idle tree ref of an entity.
1769 * @st: service tree for the entity.
1770 * @entity: the entity being released.
1771 */
1772static void bfq_put_idle_entity(struct bfq_service_tree *st,
1773 struct bfq_entity *entity)
1774{
1775 bfq_idle_extract(st, entity);
1776 bfq_forget_entity(st, entity,
1777 entity == entity->sched_data->in_service_entity);
1778}
1779
1780/**
1781 * bfq_forget_idle - update the idle tree if necessary.
1782 * @st: the service tree to act upon.
1783 *
1784 * To preserve the global O(log N) complexity we only remove one entry here;
1785 * as the idle tree will not grow indefinitely this can be done safely.
1786 */
1787static void bfq_forget_idle(struct bfq_service_tree *st)
1788{
1789 struct bfq_entity *first_idle = st->first_idle;
1790 struct bfq_entity *last_idle = st->last_idle;
1791
1792 if (RB_EMPTY_ROOT(&st->active) && last_idle &&
1793 !bfq_gt(last_idle->finish, st->vtime)) {
1794 /*
1795 * Forget the whole idle tree, increasing the vtime past
1796 * the last finish time of idle entities.
1797 */
1798 st->vtime = last_idle->finish;
1799 }
1800
1801 if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
1802 bfq_put_idle_entity(st, first_idle);
1803}
1804
1805static struct bfq_service_tree *
1806__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
e21b7a0b 1807 struct bfq_entity *entity)
aee69d78
PV
1808{
1809 struct bfq_service_tree *new_st = old_st;
1810
1811 if (entity->prio_changed) {
1812 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
44e44a1b 1813 unsigned int prev_weight, new_weight;
aee69d78 1814 struct bfq_data *bfqd = NULL;
1de0c4cd 1815 struct rb_root *root;
e21b7a0b
AA
1816#ifdef CONFIG_BFQ_GROUP_IOSCHED
1817 struct bfq_sched_data *sd;
1818 struct bfq_group *bfqg;
1819#endif
aee69d78
PV
1820
1821 if (bfqq)
1822 bfqd = bfqq->bfqd;
e21b7a0b
AA
1823#ifdef CONFIG_BFQ_GROUP_IOSCHED
1824 else {
1825 sd = entity->my_sched_data;
1826 bfqg = container_of(sd, struct bfq_group, sched_data);
1827 bfqd = (struct bfq_data *)bfqg->bfqd;
1828 }
1829#endif
aee69d78
PV
1830
1831 old_st->wsum -= entity->weight;
1832
1833 if (entity->new_weight != entity->orig_weight) {
1834 if (entity->new_weight < BFQ_MIN_WEIGHT ||
1835 entity->new_weight > BFQ_MAX_WEIGHT) {
1836 pr_crit("update_weight_prio: new_weight %d\n",
1837 entity->new_weight);
1838 if (entity->new_weight < BFQ_MIN_WEIGHT)
1839 entity->new_weight = BFQ_MIN_WEIGHT;
1840 else
1841 entity->new_weight = BFQ_MAX_WEIGHT;
1842 }
1843 entity->orig_weight = entity->new_weight;
1844 if (bfqq)
1845 bfqq->ioprio =
1846 bfq_weight_to_ioprio(entity->orig_weight);
1847 }
1848
1849 if (bfqq)
1850 bfqq->ioprio_class = bfqq->new_ioprio_class;
1851 entity->prio_changed = 0;
1852
1853 /*
1854 * NOTE: here we may be changing the weight too early,
1855 * this will cause unfairness. The correct approach
1856 * would have required additional complexity to defer
1857 * weight changes to the proper time instants (i.e.,
1858 * when entity->finish <= old_st->vtime).
1859 */
1860 new_st = bfq_entity_service_tree(entity);
1861
1862 prev_weight = entity->weight;
44e44a1b
PV
1863 new_weight = entity->orig_weight *
1864 (bfqq ? bfqq->wr_coeff : 1);
1de0c4cd
AA
1865 /*
1866 * If the weight of the entity changes, remove the entity
1867 * from its old weight counter (if there is a counter
1868 * associated with the entity), and add it to the counter
1869 * associated with its new weight.
1870 */
1871 if (prev_weight != new_weight) {
1872 root = bfqq ? &bfqd->queue_weights_tree :
1873 &bfqd->group_weights_tree;
1874 bfq_weights_tree_remove(bfqd, entity, root);
1875 }
aee69d78 1876 entity->weight = new_weight;
1de0c4cd
AA
1877 /*
1878 * Add the entity to its weights tree only if it is
1879 * not associated with a weight-raised queue.
1880 */
1881 if (prev_weight != new_weight &&
1882 (bfqq ? bfqq->wr_coeff == 1 : 1))
1883 /* If we get here, root has been initialized. */
1884 bfq_weights_tree_add(bfqd, entity, root);
aee69d78
PV
1885
1886 new_st->wsum += entity->weight;
1887
1888 if (new_st != old_st)
1889 entity->start = new_st->vtime;
1890 }
1891
1892 return new_st;
1893}
1894
e21b7a0b
AA
1895static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
1896static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
1897
aee69d78
PV
1898/**
1899 * bfq_bfqq_served - update the scheduler status after selection for
1900 * service.
1901 * @bfqq: the queue being served.
1902 * @served: bytes to transfer.
1903 *
1904 * NOTE: this can be optimized, as the timestamps of upper level entities
1905 * are synchronized every time a new bfqq is selected for service. By now,
1906 * we keep it to better check consistency.
1907 */
1908static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
1909{
1910 struct bfq_entity *entity = &bfqq->entity;
1911 struct bfq_service_tree *st;
1912
1913 for_each_entity(entity) {
1914 st = bfq_entity_service_tree(entity);
1915
1916 entity->service += served;
1917
1918 st->vtime += bfq_delta(served, st->wsum);
1919 bfq_forget_idle(st);
1920 }
e21b7a0b 1921 bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
aee69d78
PV
1922 bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
1923}
1924
1925/**
c074170e
PV
1926 * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
1927 * of the time interval during which bfqq has been in
1928 * service.
1929 * @bfqd: the device
aee69d78 1930 * @bfqq: the queue that needs a service update.
c074170e 1931 * @time_ms: the amount of time during which the queue has received service
aee69d78 1932 *
c074170e
PV
1933 * If a queue does not consume its budget fast enough, then providing
1934 * the queue with service fairness may impair throughput, more or less
1935 * severely. For this reason, queues that consume their budget slowly
1936 * are provided with time fairness instead of service fairness. This
1937 * goal is achieved through the BFQ scheduling engine, even if such an
1938 * engine works in the service, and not in the time domain. The trick
1939 * is charging these queues with an inflated amount of service, equal
1940 * to the amount of service that they would have received during their
1941 * service slot if they had been fast, i.e., if their requests had
1942 * been dispatched at a rate equal to the estimated peak rate.
1943 *
1944 * It is worth noting that time fairness can cause important
1945 * distortions in terms of bandwidth distribution, on devices with
1946 * internal queueing. The reason is that I/O requests dispatched
1947 * during the service slot of a queue may be served after that service
1948 * slot is finished, and may have a total processing time loosely
1949 * correlated with the duration of the service slot. This is
1950 * especially true for short service slots.
aee69d78 1951 */
c074170e
PV
1952static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1953 unsigned long time_ms)
aee69d78
PV
1954{
1955 struct bfq_entity *entity = &bfqq->entity;
c074170e
PV
1956 int tot_serv_to_charge = entity->service;
1957 unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
1958
1959 if (time_ms > 0 && time_ms < timeout_ms)
1960 tot_serv_to_charge =
1961 (bfqd->bfq_max_budget * time_ms) / timeout_ms;
aee69d78 1962
c074170e
PV
1963 if (tot_serv_to_charge < entity->service)
1964 tot_serv_to_charge = entity->service;
aee69d78 1965
c074170e
PV
1966 /* Increase budget to avoid inconsistencies */
1967 if (tot_serv_to_charge > entity->budget)
1968 entity->budget = tot_serv_to_charge;
1969
1970 bfq_bfqq_served(bfqq,
1971 max_t(int, 0, tot_serv_to_charge - entity->service));
aee69d78
PV
1972}
1973
e21b7a0b
AA
1974static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
1975 struct bfq_service_tree *st,
1976 bool backshifted)
aee69d78 1977{
44e44a1b
PV
1978 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1979
aee69d78
PV
1980 st = __bfq_entity_update_weight_prio(st, entity);
1981 bfq_calc_finish(entity, entity->budget);
1982
1983 /*
1984 * If some queues enjoy backshifting for a while, then their
1985 * (virtual) finish timestamps may happen to become lower and
1986 * lower than the system virtual time. In particular, if
1987 * these queues often happen to be idle for short time
1988 * periods, and during such time periods other queues with
1989 * higher timestamps happen to be busy, then the backshifted
1990 * timestamps of the former queues can become much lower than
1991 * the system virtual time. In fact, to serve the queues with
1992 * higher timestamps while the ones with lower timestamps are
1993 * idle, the system virtual time may be pushed-up to much
1994 * higher values than the finish timestamps of the idle
1995 * queues. As a consequence, the finish timestamps of all new
1996 * or newly activated queues may end up being much larger than
1997 * those of lucky queues with backshifted timestamps. The
1998 * latter queues may then monopolize the device for a lot of
1999 * time. This would simply break service guarantees.
2000 *
2001 * To reduce this problem, push up a little bit the
2002 * backshifted timestamps of the queue associated with this
2003 * entity (only a queue can happen to have the backshifted
2004 * flag set): just enough to let the finish timestamp of the
2005 * queue be equal to the current value of the system virtual
2006 * time. This may introduce a little unfairness among queues
2007 * with backshifted timestamps, but it does not break
2008 * worst-case fairness guarantees.
44e44a1b
PV
2009 *
2010 * As a special case, if bfqq is weight-raised, push up
2011 * timestamps much less, to keep very low the probability that
2012 * this push up causes the backshifted finish timestamps of
2013 * weight-raised queues to become higher than the backshifted
2014 * finish timestamps of non weight-raised queues.
aee69d78
PV
2015 */
2016 if (backshifted && bfq_gt(st->vtime, entity->finish)) {
2017 unsigned long delta = st->vtime - entity->finish;
2018
44e44a1b
PV
2019 if (bfqq)
2020 delta /= bfqq->wr_coeff;
2021
aee69d78
PV
2022 entity->start += delta;
2023 entity->finish += delta;
2024 }
2025
2026 bfq_active_insert(st, entity);
2027}
2028
2029/**
e21b7a0b
AA
2030 * __bfq_activate_entity - handle activation of entity.
2031 * @entity: the entity being activated.
2032 * @non_blocking_wait_rq: true if entity was waiting for a request
2033 *
2034 * Called for a 'true' activation, i.e., if entity is not active and
2035 * one of its children receives a new request.
2036 *
2037 * Basically, this function updates the timestamps of entity and
2038 * inserts entity into its active tree, ater possible extracting it
2039 * from its idle tree.
2040 */
2041static void __bfq_activate_entity(struct bfq_entity *entity,
2042 bool non_blocking_wait_rq)
2043{
2044 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
2045 bool backshifted = false;
2046 unsigned long long min_vstart;
2047
2048 /* See comments on bfq_fqq_update_budg_for_activation */
2049 if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
2050 backshifted = true;
2051 min_vstart = entity->finish;
2052 } else
2053 min_vstart = st->vtime;
2054
2055 if (entity->tree == &st->idle) {
2056 /*
2057 * Must be on the idle tree, bfq_idle_extract() will
2058 * check for that.
2059 */
2060 bfq_idle_extract(st, entity);
2061 entity->start = bfq_gt(min_vstart, entity->finish) ?
2062 min_vstart : entity->finish;
2063 } else {
2064 /*
2065 * The finish time of the entity may be invalid, and
2066 * it is in the past for sure, otherwise the queue
2067 * would have been on the idle tree.
2068 */
2069 entity->start = min_vstart;
2070 st->wsum += entity->weight;
2071 /*
2072 * entity is about to be inserted into a service tree,
2073 * and then set in service: get a reference to make
2074 * sure entity does not disappear until it is no
2075 * longer in service or scheduled for service.
2076 */
2077 bfq_get_entity(entity);
2078
2079 entity->on_st = true;
2080 }
2081
2082 bfq_update_fin_time_enqueue(entity, st, backshifted);
2083}
2084
2085/**
2086 * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
2087 * @entity: the entity being requeued or repositioned.
2088 *
2089 * Requeueing is needed if this entity stops being served, which
2090 * happens if a leaf descendant entity has expired. On the other hand,
2091 * repositioning is needed if the next_inservice_entity for the child
2092 * entity has changed. See the comments inside the function for
2093 * details.
2094 *
2095 * Basically, this function: 1) removes entity from its active tree if
2096 * present there, 2) updates the timestamps of entity and 3) inserts
2097 * entity back into its active tree (in the new, right position for
2098 * the new values of the timestamps).
2099 */
2100static void __bfq_requeue_entity(struct bfq_entity *entity)
2101{
2102 struct bfq_sched_data *sd = entity->sched_data;
2103 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
2104
2105 if (entity == sd->in_service_entity) {
2106 /*
2107 * We are requeueing the current in-service entity,
2108 * which may have to be done for one of the following
2109 * reasons:
2110 * - entity represents the in-service queue, and the
2111 * in-service queue is being requeued after an
2112 * expiration;
2113 * - entity represents a group, and its budget has
2114 * changed because one of its child entities has
2115 * just been either activated or requeued for some
2116 * reason; the timestamps of the entity need then to
2117 * be updated, and the entity needs to be enqueued
2118 * or repositioned accordingly.
2119 *
2120 * In particular, before requeueing, the start time of
2121 * the entity must be moved forward to account for the
2122 * service that the entity has received while in
2123 * service. This is done by the next instructions. The
2124 * finish time will then be updated according to this
2125 * new value of the start time, and to the budget of
2126 * the entity.
2127 */
2128 bfq_calc_finish(entity, entity->service);
2129 entity->start = entity->finish;
2130 /*
2131 * In addition, if the entity had more than one child
2132 * when set in service, then was not extracted from
2133 * the active tree. This implies that the position of
2134 * the entity in the active tree may need to be
2135 * changed now, because we have just updated the start
2136 * time of the entity, and we will update its finish
2137 * time in a moment (the requeueing is then, more
2138 * precisely, a repositioning in this case). To
2139 * implement this repositioning, we: 1) dequeue the
2140 * entity here, 2) update the finish time and
2141 * requeue the entity according to the new
2142 * timestamps below.
2143 */
2144 if (entity->tree)
2145 bfq_active_extract(st, entity);
2146 } else { /* The entity is already active, and not in service */
2147 /*
2148 * In this case, this function gets called only if the
2149 * next_in_service entity below this entity has
2150 * changed, and this change has caused the budget of
2151 * this entity to change, which, finally implies that
2152 * the finish time of this entity must be
2153 * updated. Such an update may cause the scheduling,
2154 * i.e., the position in the active tree, of this
2155 * entity to change. We handle this change by: 1)
2156 * dequeueing the entity here, 2) updating the finish
2157 * time and requeueing the entity according to the new
2158 * timestamps below. This is the same approach as the
2159 * non-extracted-entity sub-case above.
2160 */
2161 bfq_active_extract(st, entity);
2162 }
2163
2164 bfq_update_fin_time_enqueue(entity, st, false);
2165}
2166
2167static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
2168 struct bfq_sched_data *sd,
2169 bool non_blocking_wait_rq)
2170{
2171 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
2172
2173 if (sd->in_service_entity == entity || entity->tree == &st->active)
2174 /*
2175 * in service or already queued on the active tree,
2176 * requeue or reposition
2177 */
2178 __bfq_requeue_entity(entity);
2179 else
2180 /*
2181 * Not in service and not queued on its active tree:
2182 * the activity is idle and this is a true activation.
2183 */
2184 __bfq_activate_entity(entity, non_blocking_wait_rq);
2185}
2186
2187
2188/**
2189 * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
2190 * and activate, requeue or reposition all ancestors
2191 * for which such an update becomes necessary.
aee69d78
PV
2192 * @entity: the entity to activate.
2193 * @non_blocking_wait_rq: true if this entity was waiting for a request
e21b7a0b
AA
2194 * @requeue: true if this is a requeue, which implies that bfqq is
2195 * being expired; thus ALL its ancestors stop being served and must
2196 * therefore be requeued
aee69d78 2197 */
e21b7a0b
AA
2198static void bfq_activate_requeue_entity(struct bfq_entity *entity,
2199 bool non_blocking_wait_rq,
2200 bool requeue)
aee69d78
PV
2201{
2202 struct bfq_sched_data *sd;
2203
2204 for_each_entity(entity) {
aee69d78 2205 sd = entity->sched_data;
e21b7a0b
AA
2206 __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
2207
2208 if (!bfq_update_next_in_service(sd, entity) && !requeue)
aee69d78
PV
2209 break;
2210 }
2211}
2212
2213/**
2214 * __bfq_deactivate_entity - deactivate an entity from its service tree.
2215 * @entity: the entity to deactivate.
e21b7a0b
AA
2216 * @ins_into_idle_tree: if false, the entity will not be put into the
2217 * idle tree.
aee69d78 2218 *
e21b7a0b
AA
2219 * Deactivates an entity, independently from its previous state. Must
2220 * be invoked only if entity is on a service tree. Extracts the entity
2221 * from that tree, and if necessary and allowed, puts it on the idle
2222 * tree.
aee69d78 2223 */
e21b7a0b
AA
2224static bool __bfq_deactivate_entity(struct bfq_entity *entity,
2225 bool ins_into_idle_tree)
aee69d78
PV
2226{
2227 struct bfq_sched_data *sd = entity->sched_data;
2228 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
2229 int is_in_service = entity == sd->in_service_entity;
aee69d78 2230
e21b7a0b
AA
2231 if (!entity->on_st) /* entity never activated, or already inactive */
2232 return false;
aee69d78 2233
e21b7a0b 2234 if (is_in_service)
aee69d78 2235 bfq_calc_finish(entity, entity->service);
e21b7a0b
AA
2236
2237 if (entity->tree == &st->active)
aee69d78 2238 bfq_active_extract(st, entity);
e21b7a0b 2239 else if (!is_in_service && entity->tree == &st->idle)
aee69d78
PV
2240 bfq_idle_extract(st, entity);
2241
e21b7a0b 2242 if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
aee69d78
PV
2243 bfq_forget_entity(st, entity, is_in_service);
2244 else
2245 bfq_idle_insert(st, entity);
2246
e21b7a0b 2247 return true;
aee69d78
PV
2248}
2249
2250/**
e21b7a0b 2251 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
aee69d78 2252 * @entity: the entity to deactivate.
e21b7a0b 2253 * @ins_into_idle_tree: true if the entity can be put on the idle tree
aee69d78 2254 */
e21b7a0b
AA
2255static void bfq_deactivate_entity(struct bfq_entity *entity,
2256 bool ins_into_idle_tree,
2257 bool expiration)
aee69d78
PV
2258{
2259 struct bfq_sched_data *sd;
2260 struct bfq_entity *parent = NULL;
2261
2262 for_each_entity_safe(entity, parent) {
2263 sd = entity->sched_data;
2264
e21b7a0b 2265 if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
aee69d78 2266 /*
e21b7a0b
AA
2267 * entity is not in any tree any more, so
2268 * this deactivation is a no-op, and there is
2269 * nothing to change for upper-level entities
2270 * (in case of expiration, this can never
2271 * happen).
aee69d78 2272 */
e21b7a0b
AA
2273 return;
2274 }
2275
2276 if (sd->next_in_service == entity)
2277 /*
2278 * entity was the next_in_service entity,
2279 * then, since entity has just been
2280 * deactivated, a new one must be found.
2281 */
2282 bfq_update_next_in_service(sd, NULL);
aee69d78
PV
2283
2284 if (sd->next_in_service)
2285 /*
e21b7a0b
AA
2286 * The parent entity is still backlogged,
2287 * because next_in_service is not NULL. So, no
2288 * further upwards deactivation must be
2289 * performed. Yet, next_in_service has
2290 * changed. Then the schedule does need to be
2291 * updated upwards.
aee69d78 2292 */
e21b7a0b 2293 break;
aee69d78
PV
2294
2295 /*
e21b7a0b
AA
2296 * If we get here, then the parent is no more
2297 * backlogged and we need to propagate the
2298 * deactivation upwards. Thus let the loop go on.
aee69d78 2299 */
aee69d78 2300
e21b7a0b
AA
2301 /*
2302 * Also let parent be queued into the idle tree on
2303 * deactivation, to preserve service guarantees, and
2304 * assuming that who invoked this function does not
2305 * need parent entities too to be removed completely.
2306 */
2307 ins_into_idle_tree = true;
2308 }
aee69d78 2309
e21b7a0b
AA
2310 /*
2311 * If the deactivation loop is fully executed, then there are
2312 * no more entities to touch and next loop is not executed at
2313 * all. Otherwise, requeue remaining entities if they are
2314 * about to stop receiving service, or reposition them if this
2315 * is not the case.
2316 */
aee69d78
PV
2317 entity = parent;
2318 for_each_entity(entity) {
e21b7a0b
AA
2319 /*
2320 * Invoke __bfq_requeue_entity on entity, even if
2321 * already active, to requeue/reposition it in the
2322 * active tree (because sd->next_in_service has
2323 * changed)
2324 */
2325 __bfq_requeue_entity(entity);
aee69d78
PV
2326
2327 sd = entity->sched_data;
e21b7a0b
AA
2328 if (!bfq_update_next_in_service(sd, entity) &&
2329 !expiration)
2330 /*
2331 * next_in_service unchanged or not causing
2332 * any change in entity->parent->sd, and no
2333 * requeueing needed for expiration: stop
2334 * here.
2335 */
aee69d78
PV
2336 break;
2337 }
2338}
2339
2340/**
e21b7a0b
AA
2341 * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
2342 * if needed, to have at least one entity eligible.
aee69d78
PV
2343 * @st: the service tree to act upon.
2344 *
e21b7a0b 2345 * Assumes that st is not empty.
aee69d78 2346 */
e21b7a0b 2347static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
aee69d78 2348{
e21b7a0b
AA
2349 struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
2350
2351 if (bfq_gt(root_entity->min_start, st->vtime))
2352 return root_entity->min_start;
2353
2354 return st->vtime;
2355}
aee69d78 2356
e21b7a0b
AA
2357static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
2358{
2359 if (new_value > st->vtime) {
2360 st->vtime = new_value;
aee69d78
PV
2361 bfq_forget_idle(st);
2362 }
2363}
2364
2365/**
2366 * bfq_first_active_entity - find the eligible entity with
2367 * the smallest finish time
2368 * @st: the service tree to select from.
e21b7a0b 2369 * @vtime: the system virtual to use as a reference for eligibility
aee69d78
PV
2370 *
2371 * This function searches the first schedulable entity, starting from the
2372 * root of the tree and going on the left every time on this side there is
2373 * a subtree with at least one eligible (start >= vtime) entity. The path on
2374 * the right is followed only if a) the left subtree contains no eligible
2375 * entities and b) no eligible entity has been found yet.
2376 */
e21b7a0b
AA
2377static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
2378 u64 vtime)
aee69d78
PV
2379{
2380 struct bfq_entity *entry, *first = NULL;
2381 struct rb_node *node = st->active.rb_node;
2382
2383 while (node) {
2384 entry = rb_entry(node, struct bfq_entity, rb_node);
2385left:
e21b7a0b 2386 if (!bfq_gt(entry->start, vtime))
aee69d78
PV
2387 first = entry;
2388
2389 if (node->rb_left) {
2390 entry = rb_entry(node->rb_left,
2391 struct bfq_entity, rb_node);
e21b7a0b 2392 if (!bfq_gt(entry->min_start, vtime)) {
aee69d78
PV
2393 node = node->rb_left;
2394 goto left;
2395 }
2396 }
2397 if (first)
2398 break;
2399 node = node->rb_right;
2400 }
2401
e21b7a0b
AA
2402 return first;
2403}
2404
2405/**
2406 * __bfq_lookup_next_entity - return the first eligible entity in @st.
2407 * @st: the service tree.
2408 *
2409 * If there is no in-service entity for the sched_data st belongs to,
2410 * then return the entity that will be set in service if:
2411 * 1) the parent entity this st belongs to is set in service;
2412 * 2) no entity belonging to such parent entity undergoes a state change
2413 * that would influence the timestamps of the entity (e.g., becomes idle,
2414 * becomes backlogged, changes its budget, ...).
2415 *
2416 * In this first case, update the virtual time in @st too (see the
2417 * comments on this update inside the function).
2418 *
2419 * In constrast, if there is an in-service entity, then return the
2420 * entity that would be set in service if not only the above
2421 * conditions, but also the next one held true: the currently
2422 * in-service entity, on expiration,
2423 * 1) gets a finish time equal to the current one, or
2424 * 2) is not eligible any more, or
2425 * 3) is idle.
2426 */
2427static struct bfq_entity *
2428__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
2429{
2430 struct bfq_entity *entity;
2431 u64 new_vtime;
2432
2433 if (RB_EMPTY_ROOT(&st->active))
2434 return NULL;
2435
2436 /*
2437 * Get the value of the system virtual time for which at
2438 * least one entity is eligible.
2439 */
2440 new_vtime = bfq_calc_vtime_jump(st);
2441
2442 /*
2443 * If there is no in-service entity for the sched_data this
2444 * active tree belongs to, then push the system virtual time
2445 * up to the value that guarantees that at least one entity is
2446 * eligible. If, instead, there is an in-service entity, then
2447 * do not make any such update, because there is already an
2448 * eligible entity, namely the in-service one (even if the
2449 * entity is not on st, because it was extracted when set in
2450 * service).
2451 */
2452 if (!in_service)
2453 bfq_update_vtime(st, new_vtime);
2454
2455 entity = bfq_first_active_entity(st, new_vtime);
2456
2457 return entity;
2458}
2459
2460/**
2461 * bfq_lookup_next_entity - return the first eligible entity in @sd.
2462 * @sd: the sched_data.
2463 *
2464 * This function is invoked when there has been a change in the trees
2465 * for sd, and we need know what is the new next entity after this
2466 * change.
2467 */
2468static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
2469{
2470 struct bfq_service_tree *st = sd->service_tree;
2471 struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
2472 struct bfq_entity *entity = NULL;
2473 int class_idx = 0;
2474
2475 /*
2476 * Choose from idle class, if needed to guarantee a minimum
2477 * bandwidth to this class (and if there is some active entity
2478 * in idle class). This should also mitigate
2479 * priority-inversion problems in case a low priority task is
2480 * holding file system resources.
2481 */
2482 if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
2483 BFQ_CL_IDLE_TIMEOUT)) {
2484 if (!RB_EMPTY_ROOT(&idle_class_st->active))
2485 class_idx = BFQ_IOPRIO_CLASSES - 1;
2486 /* About to be served if backlogged, or not yet backlogged */
2487 sd->bfq_class_idle_last_service = jiffies;
2488 }
2489
2490 /*
2491 * Find the next entity to serve for the highest-priority
2492 * class, unless the idle class needs to be served.
2493 */
2494 for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
2495 entity = __bfq_lookup_next_entity(st + class_idx,
2496 sd->in_service_entity);
2497
2498 if (entity)
2499 break;
2500 }
2501
2502 if (!entity)
2503 return NULL;
2504
2505 return entity;
2506}
2507
2508static bool next_queue_may_preempt(struct bfq_data *bfqd)
2509{
2510 struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
2511
2512 return sd->next_in_service != sd->in_service_entity;
2513}
2514
2515/*
2516 * Get next queue for service.
2517 */
2518static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
2519{
2520 struct bfq_entity *entity = NULL;
2521 struct bfq_sched_data *sd;
2522 struct bfq_queue *bfqq;
2523
2524 if (bfqd->busy_queues == 0)
2525 return NULL;
2526
2527 /*
2528 * Traverse the path from the root to the leaf entity to
2529 * serve. Set in service all the entities visited along the
2530 * way.
2531 */
2532 sd = &bfqd->root_group->sched_data;
2533 for (; sd ; sd = entity->my_sched_data) {
2534 /*
2535 * WARNING. We are about to set the in-service entity
2536 * to sd->next_in_service, i.e., to the (cached) value
2537 * returned by bfq_lookup_next_entity(sd) the last
2538 * time it was invoked, i.e., the last time when the
2539 * service order in sd changed as a consequence of the
2540 * activation or deactivation of an entity. In this
2541 * respect, if we execute bfq_lookup_next_entity(sd)
2542 * in this very moment, it may, although with low
2543 * probability, yield a different entity than that
2544 * pointed to by sd->next_in_service. This rare event
2545 * happens in case there was no CLASS_IDLE entity to
2546 * serve for sd when bfq_lookup_next_entity(sd) was
2547 * invoked for the last time, while there is now one
2548 * such entity.
2549 *
2550 * If the above event happens, then the scheduling of
2551 * such entity in CLASS_IDLE is postponed until the
2552 * service of the sd->next_in_service entity
2553 * finishes. In fact, when the latter is expired,
2554 * bfq_lookup_next_entity(sd) gets called again,
2555 * exactly to update sd->next_in_service.
2556 */
2557
2558 /* Make next_in_service entity become in_service_entity */
2559 entity = sd->next_in_service;
2560 sd->in_service_entity = entity;
2561
2562 /*
2563 * Reset the accumulator of the amount of service that
2564 * the entity is about to receive.
2565 */
2566 entity->service = 0;
2567
2568 /*
2569 * If entity is no longer a candidate for next
2570 * service, then we extract it from its active tree,
2571 * for the following reason. To further boost the
2572 * throughput in some special case, BFQ needs to know
2573 * which is the next candidate entity to serve, while
2574 * there is already an entity in service. In this
2575 * respect, to make it easy to compute/update the next
2576 * candidate entity to serve after the current
2577 * candidate has been set in service, there is a case
2578 * where it is necessary to extract the current
2579 * candidate from its service tree. Such a case is
2580 * when the entity just set in service cannot be also
2581 * a candidate for next service. Details about when
2582 * this conditions holds are reported in the comments
2583 * on the function bfq_no_longer_next_in_service()
2584 * invoked below.
2585 */
2586 if (bfq_no_longer_next_in_service(entity))
2587 bfq_active_extract(bfq_entity_service_tree(entity),
2588 entity);
2589
2590 /*
2591 * For the same reason why we may have just extracted
2592 * entity from its active tree, we may need to update
2593 * next_in_service for the sched_data of entity too,
2594 * regardless of whether entity has been extracted.
2595 * In fact, even if entity has not been extracted, a
2596 * descendant entity may get extracted. Such an event
2597 * would cause a change in next_in_service for the
2598 * level of the descendant entity, and thus possibly
2599 * back to upper levels.
2600 *
2601 * We cannot perform the resulting needed update
2602 * before the end of this loop, because, to know which
2603 * is the correct next-to-serve candidate entity for
2604 * each level, we need first to find the leaf entity
2605 * to set in service. In fact, only after we know
2606 * which is the next-to-serve leaf entity, we can
2607 * discover whether the parent entity of the leaf
2608 * entity becomes the next-to-serve, and so on.
2609 */
2610
2611 }
2612
2613 bfqq = bfq_entity_to_bfqq(entity);
2614
2615 /*
2616 * We can finally update all next-to-serve entities along the
2617 * path from the leaf entity just set in service to the root.
2618 */
2619 for_each_entity(entity) {
2620 struct bfq_sched_data *sd = entity->sched_data;
2621
2622 if (!bfq_update_next_in_service(sd, NULL))
2623 break;
2624 }
2625
2626 return bfqq;
2627}
2628
2629static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
2630{
2631 struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
2632 struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
2633 struct bfq_entity *entity = in_serv_entity;
2634
2635 if (bfqd->in_service_bic) {
36eca894
AA
2636 /*
2637 * Schedule the release of a reference to
2638 * bfqd->in_service_bic->icq.ioc to right after the
2639 * scheduler lock is released. This ioc is not
2640 * released immediately, to not risk to possibly take
2641 * an ioc->lock while holding the scheduler lock.
2642 */
2643 bfqd->ioc_to_put = bfqd->in_service_bic->icq.ioc;
e21b7a0b
AA
2644 bfqd->in_service_bic = NULL;
2645 }
2646
2647 bfq_clear_bfqq_wait_request(in_serv_bfqq);
2648 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
2649 bfqd->in_service_queue = NULL;
2650
2651 /*
2652 * When this function is called, all in-service entities have
2653 * been properly deactivated or requeued, so we can safely
2654 * execute the final step: reset in_service_entity along the
2655 * path from entity to the root.
2656 */
2657 for_each_entity(entity)
2658 entity->sched_data->in_service_entity = NULL;
2659
2660 /*
2661 * in_serv_entity is no longer in service, so, if it is in no
2662 * service tree either, then release the service reference to
2663 * the queue it represents (taken with bfq_get_entity).
2664 */
2665 if (!in_serv_entity->on_st)
2666 bfq_put_queue(in_serv_bfqq);
2667}
2668
2669static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2670 bool ins_into_idle_tree, bool expiration)
2671{
2672 struct bfq_entity *entity = &bfqq->entity;
2673
2674 bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
2675}
2676
2677static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2678{
2679 struct bfq_entity *entity = &bfqq->entity;
2680
2681 bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
2682 false);
2683 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
2684}
2685
2686static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2687{
2688 struct bfq_entity *entity = &bfqq->entity;
2689
2690 bfq_activate_requeue_entity(entity, false,
2691 bfqq == bfqd->in_service_queue);
2692}
2693
2694static void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
2695
2696/*
2697 * Called when the bfqq no longer has requests pending, remove it from
2698 * the service tree. As a special case, it can be invoked during an
2699 * expiration.
2700 */
2701static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2702 bool expiration)
2703{
2704 bfq_log_bfqq(bfqd, bfqq, "del from busy");
2705
2706 bfq_clear_bfqq_busy(bfqq);
2707
2708 bfqd->busy_queues--;
2709
1de0c4cd
AA
2710 if (!bfqq->dispatched)
2711 bfq_weights_tree_remove(bfqd, &bfqq->entity,
2712 &bfqd->queue_weights_tree);
2713
cfd69712
PV
2714 if (bfqq->wr_coeff > 1)
2715 bfqd->wr_busy_queues--;
2716
e21b7a0b
AA
2717 bfqg_stats_update_dequeue(bfqq_group(bfqq));
2718
2719 bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
2720}
2721
2722/*
2723 * Called when an inactive queue receives a new request.
2724 */
2725static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2726{
2727 bfq_log_bfqq(bfqd, bfqq, "add to busy");
2728
2729 bfq_activate_bfqq(bfqd, bfqq);
2730
2731 bfq_mark_bfqq_busy(bfqq);
2732 bfqd->busy_queues++;
cfd69712 2733
1de0c4cd
AA
2734 if (!bfqq->dispatched)
2735 if (bfqq->wr_coeff == 1)
2736 bfq_weights_tree_add(bfqd, &bfqq->entity,
2737 &bfqd->queue_weights_tree);
2738
cfd69712
PV
2739 if (bfqq->wr_coeff > 1)
2740 bfqd->wr_busy_queues++;
e21b7a0b
AA
2741}
2742
2743#ifdef CONFIG_BFQ_GROUP_IOSCHED
2744
2745/* bfqg stats flags */
2746enum bfqg_stats_flags {
2747 BFQG_stats_waiting = 0,
2748 BFQG_stats_idling,
2749 BFQG_stats_empty,
2750};
2751
2752#define BFQG_FLAG_FNS(name) \
2753static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
2754{ \
2755 stats->flags |= (1 << BFQG_stats_##name); \
2756} \
2757static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
2758{ \
2759 stats->flags &= ~(1 << BFQG_stats_##name); \
2760} \
2761static int bfqg_stats_##name(struct bfqg_stats *stats) \
2762{ \
2763 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
2764} \
2765
2766BFQG_FLAG_FNS(waiting)
2767BFQG_FLAG_FNS(idling)
2768BFQG_FLAG_FNS(empty)
2769#undef BFQG_FLAG_FNS
2770
2771/* This should be called with the queue_lock held. */
2772static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
2773{
2774 unsigned long long now;
2775
2776 if (!bfqg_stats_waiting(stats))
2777 return;
2778
2779 now = sched_clock();
2780 if (time_after64(now, stats->start_group_wait_time))
2781 blkg_stat_add(&stats->group_wait_time,
2782 now - stats->start_group_wait_time);
2783 bfqg_stats_clear_waiting(stats);
2784}
2785
2786/* This should be called with the queue_lock held. */
2787static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
2788 struct bfq_group *curr_bfqg)
2789{
2790 struct bfqg_stats *stats = &bfqg->stats;
2791
2792 if (bfqg_stats_waiting(stats))
2793 return;
2794 if (bfqg == curr_bfqg)
2795 return;
2796 stats->start_group_wait_time = sched_clock();
2797 bfqg_stats_mark_waiting(stats);
2798}
2799
2800/* This should be called with the queue_lock held. */
2801static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
2802{
2803 unsigned long long now;
2804
2805 if (!bfqg_stats_empty(stats))
2806 return;
2807
2808 now = sched_clock();
2809 if (time_after64(now, stats->start_empty_time))
2810 blkg_stat_add(&stats->empty_time,
2811 now - stats->start_empty_time);
2812 bfqg_stats_clear_empty(stats);
2813}
2814
2815static void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
2816{
2817 blkg_stat_add(&bfqg->stats.dequeue, 1);
2818}
2819
2820static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
2821{
2822 struct bfqg_stats *stats = &bfqg->stats;
2823
2824 if (blkg_rwstat_total(&stats->queued))
2825 return;
2826
2827 /*
2828 * group is already marked empty. This can happen if bfqq got new
2829 * request in parent group and moved to this group while being added
2830 * to service tree. Just ignore the event and move on.
2831 */
2832 if (bfqg_stats_empty(stats))
2833 return;
2834
2835 stats->start_empty_time = sched_clock();
2836 bfqg_stats_mark_empty(stats);
2837}
2838
2839static void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
2840{
2841 struct bfqg_stats *stats = &bfqg->stats;
2842
2843 if (bfqg_stats_idling(stats)) {
2844 unsigned long long now = sched_clock();
2845
2846 if (time_after64(now, stats->start_idle_time))
2847 blkg_stat_add(&stats->idle_time,
2848 now - stats->start_idle_time);
2849 bfqg_stats_clear_idling(stats);
2850 }
2851}
2852
2853static void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
2854{
2855 struct bfqg_stats *stats = &bfqg->stats;
2856
2857 stats->start_idle_time = sched_clock();
2858 bfqg_stats_mark_idling(stats);
2859}
2860
2861static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
2862{
2863 struct bfqg_stats *stats = &bfqg->stats;
2864
2865 blkg_stat_add(&stats->avg_queue_size_sum,
2866 blkg_rwstat_total(&stats->queued));
2867 blkg_stat_add(&stats->avg_queue_size_samples, 1);
2868 bfqg_stats_update_group_wait_time(stats);
2869}
2870
2871/*
2872 * blk-cgroup policy-related handlers
2873 * The following functions help in converting between blk-cgroup
2874 * internal structures and BFQ-specific structures.
2875 */
2876
2877static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
2878{
2879 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
2880}
2881
2882static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
2883{
2884 return pd_to_blkg(&bfqg->pd);
2885}
2886
2887static struct blkcg_policy blkcg_policy_bfq;
2888
2889static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
2890{
2891 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
2892}
2893
2894/*
2895 * bfq_group handlers
2896 * The following functions help in navigating the bfq_group hierarchy
2897 * by allowing to find the parent of a bfq_group or the bfq_group
2898 * associated to a bfq_queue.
2899 */
2900
2901static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
2902{
2903 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
2904
2905 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
2906}
2907
2908static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
2909{
2910 struct bfq_entity *group_entity = bfqq->entity.parent;
2911
2912 return group_entity ? container_of(group_entity, struct bfq_group,
2913 entity) :
2914 bfqq->bfqd->root_group;
2915}
2916
2917/*
2918 * The following two functions handle get and put of a bfq_group by
2919 * wrapping the related blk-cgroup hooks.
2920 */
2921
2922static void bfqg_get(struct bfq_group *bfqg)
2923{
2924 return blkg_get(bfqg_to_blkg(bfqg));
2925}
2926
2927static void bfqg_put(struct bfq_group *bfqg)
2928{
2929 return blkg_put(bfqg_to_blkg(bfqg));
2930}
2931
2932static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
2933 struct bfq_queue *bfqq,
2934 unsigned int op)
2935{
2936 blkg_rwstat_add(&bfqg->stats.queued, op, 1);
2937 bfqg_stats_end_empty_time(&bfqg->stats);
2938 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
2939 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
2940}
2941
2942static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
2943{
2944 blkg_rwstat_add(&bfqg->stats.queued, op, -1);
2945}
2946
2947static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
2948{
2949 blkg_rwstat_add(&bfqg->stats.merged, op, 1);
2950}
2951
2952static void bfqg_stats_update_completion(struct bfq_group *bfqg,
2953 uint64_t start_time, uint64_t io_start_time,
2954 unsigned int op)
2955{
2956 struct bfqg_stats *stats = &bfqg->stats;
2957 unsigned long long now = sched_clock();
2958
2959 if (time_after64(now, io_start_time))
2960 blkg_rwstat_add(&stats->service_time, op,
2961 now - io_start_time);
2962 if (time_after64(io_start_time, start_time))
2963 blkg_rwstat_add(&stats->wait_time, op,
2964 io_start_time - start_time);
2965}
2966
2967/* @stats = 0 */
2968static void bfqg_stats_reset(struct bfqg_stats *stats)
2969{
2970 /* queued stats shouldn't be cleared */
2971 blkg_rwstat_reset(&stats->merged);
2972 blkg_rwstat_reset(&stats->service_time);
2973 blkg_rwstat_reset(&stats->wait_time);
2974 blkg_stat_reset(&stats->time);
2975 blkg_stat_reset(&stats->avg_queue_size_sum);
2976 blkg_stat_reset(&stats->avg_queue_size_samples);
2977 blkg_stat_reset(&stats->dequeue);
2978 blkg_stat_reset(&stats->group_wait_time);
2979 blkg_stat_reset(&stats->idle_time);
2980 blkg_stat_reset(&stats->empty_time);
2981}
2982
2983/* @to += @from */
2984static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
2985{
2986 if (!to || !from)
2987 return;
2988
2989 /* queued stats shouldn't be cleared */
2990 blkg_rwstat_add_aux(&to->merged, &from->merged);
2991 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
2992 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
2993 blkg_stat_add_aux(&from->time, &from->time);
2994 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
2995 blkg_stat_add_aux(&to->avg_queue_size_samples,
2996 &from->avg_queue_size_samples);
2997 blkg_stat_add_aux(&to->dequeue, &from->dequeue);
2998 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
2999 blkg_stat_add_aux(&to->idle_time, &from->idle_time);
3000 blkg_stat_add_aux(&to->empty_time, &from->empty_time);
3001}
3002
3003/*
3004 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
3005 * recursive stats can still account for the amount used by this bfqg after
3006 * it's gone.
3007 */
3008static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
3009{
3010 struct bfq_group *parent;
3011
3012 if (!bfqg) /* root_group */
3013 return;
3014
3015 parent = bfqg_parent(bfqg);
3016
3017 lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
3018
3019 if (unlikely(!parent))
3020 return;
3021
3022 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
3023 bfqg_stats_reset(&bfqg->stats);
3024}
3025
3026static void bfq_init_entity(struct bfq_entity *entity,
3027 struct bfq_group *bfqg)
3028{
3029 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
3030
3031 entity->weight = entity->new_weight;
3032 entity->orig_weight = entity->new_weight;
3033 if (bfqq) {
3034 bfqq->ioprio = bfqq->new_ioprio;
3035 bfqq->ioprio_class = bfqq->new_ioprio_class;
3036 bfqg_get(bfqg);
3037 }
3038 entity->parent = bfqg->my_entity; /* NULL for root group */
3039 entity->sched_data = &bfqg->sched_data;
3040}
3041
3042static void bfqg_stats_exit(struct bfqg_stats *stats)
3043{
3044 blkg_rwstat_exit(&stats->merged);
3045 blkg_rwstat_exit(&stats->service_time);
3046 blkg_rwstat_exit(&stats->wait_time);
3047 blkg_rwstat_exit(&stats->queued);
3048 blkg_stat_exit(&stats->time);
3049 blkg_stat_exit(&stats->avg_queue_size_sum);
3050 blkg_stat_exit(&stats->avg_queue_size_samples);
3051 blkg_stat_exit(&stats->dequeue);
3052 blkg_stat_exit(&stats->group_wait_time);
3053 blkg_stat_exit(&stats->idle_time);
3054 blkg_stat_exit(&stats->empty_time);
3055}
3056
3057static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
3058{
3059 if (blkg_rwstat_init(&stats->merged, gfp) ||
3060 blkg_rwstat_init(&stats->service_time, gfp) ||
3061 blkg_rwstat_init(&stats->wait_time, gfp) ||
3062 blkg_rwstat_init(&stats->queued, gfp) ||
3063 blkg_stat_init(&stats->time, gfp) ||
3064 blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
3065 blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
3066 blkg_stat_init(&stats->dequeue, gfp) ||
3067 blkg_stat_init(&stats->group_wait_time, gfp) ||
3068 blkg_stat_init(&stats->idle_time, gfp) ||
3069 blkg_stat_init(&stats->empty_time, gfp)) {
3070 bfqg_stats_exit(stats);
3071 return -ENOMEM;
3072 }
3073
3074 return 0;
3075}
3076
3077static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
3078{
3079 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
3080}
3081
3082static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
3083{
3084 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
3085}
3086
3087static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
3088{
3089 struct bfq_group_data *bgd;
3090
3091 bgd = kzalloc(sizeof(*bgd), gfp);
3092 if (!bgd)
3093 return NULL;
3094 return &bgd->pd;
3095}
3096
3097static void bfq_cpd_init(struct blkcg_policy_data *cpd)
3098{
3099 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
3100
3101 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
3102 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
3103}
3104
3105static void bfq_cpd_free(struct blkcg_policy_data *cpd)
3106{
3107 kfree(cpd_to_bfqgd(cpd));
3108}
3109
3110static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
3111{
3112 struct bfq_group *bfqg;
3113
3114 bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
3115 if (!bfqg)
3116 return NULL;
3117
3118 if (bfqg_stats_init(&bfqg->stats, gfp)) {
3119 kfree(bfqg);
3120 return NULL;
3121 }
3122
3123 return &bfqg->pd;
3124}
3125
3126static void bfq_pd_init(struct blkg_policy_data *pd)
3127{
3128 struct blkcg_gq *blkg = pd_to_blkg(pd);
3129 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
3130 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
3131 struct bfq_entity *entity = &bfqg->entity;
3132 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
3133
3134 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
3135 entity->my_sched_data = &bfqg->sched_data;
3136 bfqg->my_entity = entity; /*
3137 * the root_group's will be set to NULL
3138 * in bfq_init_queue()
3139 */
3140 bfqg->bfqd = bfqd;
1de0c4cd 3141 bfqg->active_entities = 0;
36eca894 3142 bfqg->rq_pos_tree = RB_ROOT;
e21b7a0b
AA
3143}
3144
3145static void bfq_pd_free(struct blkg_policy_data *pd)
3146{
3147 struct bfq_group *bfqg = pd_to_bfqg(pd);
3148
3149 bfqg_stats_exit(&bfqg->stats);
3150 return kfree(bfqg);
3151}
3152
3153static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
3154{
3155 struct bfq_group *bfqg = pd_to_bfqg(pd);
3156
3157 bfqg_stats_reset(&bfqg->stats);
3158}
3159
3160static void bfq_group_set_parent(struct bfq_group *bfqg,
3161 struct bfq_group *parent)
3162{
3163 struct bfq_entity *entity;
3164
3165 entity = &bfqg->entity;
3166 entity->parent = parent->my_entity;
3167 entity->sched_data = &parent->sched_data;
3168}
3169
3170static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
3171 struct blkcg *blkcg)
3172{
3173 struct blkcg_gq *blkg;
3174
3175 blkg = blkg_lookup(blkcg, bfqd->queue);
3176 if (likely(blkg))
3177 return blkg_to_bfqg(blkg);
3178 return NULL;
3179}
3180
3181static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
3182 struct blkcg *blkcg)
3183{
3184 struct bfq_group *bfqg, *parent;
3185 struct bfq_entity *entity;
3186
3187 bfqg = bfq_lookup_bfqg(bfqd, blkcg);
3188
3189 if (unlikely(!bfqg))
3190 return NULL;
3191
3192 /*
3193 * Update chain of bfq_groups as we might be handling a leaf group
3194 * which, along with some of its relatives, has not been hooked yet
3195 * to the private hierarchy of BFQ.
3196 */
3197 entity = &bfqg->entity;
3198 for_each_entity(entity) {
3199 bfqg = container_of(entity, struct bfq_group, entity);
3200 if (bfqg != bfqd->root_group) {
3201 parent = bfqg_parent(bfqg);
3202 if (!parent)
3203 parent = bfqd->root_group;
3204 bfq_group_set_parent(bfqg, parent);
3205 }
3206 }
3207
3208 return bfqg;
3209}
3210
36eca894
AA
3211static void bfq_pos_tree_add_move(struct bfq_data *bfqd,
3212 struct bfq_queue *bfqq);
e21b7a0b
AA
3213static void bfq_bfqq_expire(struct bfq_data *bfqd,
3214 struct bfq_queue *bfqq,
3215 bool compensate,
3216 enum bfqq_expiration reason);
3217
3218/**
3219 * bfq_bfqq_move - migrate @bfqq to @bfqg.
3220 * @bfqd: queue descriptor.
3221 * @bfqq: the queue to move.
3222 * @bfqg: the group to move to.
3223 *
3224 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
3225 * it on the new one. Avoid putting the entity on the old group idle tree.
3226 *
3227 * Must be called under the queue lock; the cgroup owning @bfqg must
3228 * not disappear (by now this just means that we are called under
3229 * rcu_read_lock()).
3230 */
3231static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3232 struct bfq_group *bfqg)
3233{
3234 struct bfq_entity *entity = &bfqq->entity;
3235
3236 /* If bfqq is empty, then bfq_bfqq_expire also invokes
3237 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
3238 * from data structures related to current group. Otherwise we
3239 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
3240 * we do below.
3241 */
3242 if (bfqq == bfqd->in_service_queue)
3243 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
3244 false, BFQQE_PREEMPTED);
3245
3246 if (bfq_bfqq_busy(bfqq))
3247 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
3248 else if (entity->on_st)
3249 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
3250 bfqg_put(bfqq_group(bfqq));
3251
3252 /*
3253 * Here we use a reference to bfqg. We don't need a refcounter
3254 * as the cgroup reference will not be dropped, so that its
3255 * destroy() callback will not be invoked.
3256 */
3257 entity->parent = bfqg->my_entity;
3258 entity->sched_data = &bfqg->sched_data;
3259 bfqg_get(bfqg);
3260
36eca894
AA
3261 if (bfq_bfqq_busy(bfqq)) {
3262 bfq_pos_tree_add_move(bfqd, bfqq);
e21b7a0b 3263 bfq_activate_bfqq(bfqd, bfqq);
36eca894 3264 }
e21b7a0b
AA
3265
3266 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
3267 bfq_schedule_dispatch(bfqd);
3268}
3269
3270/**
3271 * __bfq_bic_change_cgroup - move @bic to @cgroup.
3272 * @bfqd: the queue descriptor.
3273 * @bic: the bic to move.
3274 * @blkcg: the blk-cgroup to move to.
3275 *
3276 * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
3277 * has to make sure that the reference to cgroup is valid across the call.
3278 *
3279 * NOTE: an alternative approach might have been to store the current
3280 * cgroup in bfqq and getting a reference to it, reducing the lookup
3281 * time here, at the price of slightly more complex code.
3282 */
3283static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
3284 struct bfq_io_cq *bic,
3285 struct blkcg *blkcg)
3286{
3287 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
3288 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
3289 struct bfq_group *bfqg;
3290 struct bfq_entity *entity;
3291
3292 bfqg = bfq_find_set_group(bfqd, blkcg);
3293
3294 if (unlikely(!bfqg))
3295 bfqg = bfqd->root_group;
3296
3297 if (async_bfqq) {
3298 entity = &async_bfqq->entity;
3299
3300 if (entity->sched_data != &bfqg->sched_data) {
3301 bic_set_bfqq(bic, NULL, 0);
3302 bfq_log_bfqq(bfqd, async_bfqq,
3303 "bic_change_group: %p %d",
36eca894 3304 async_bfqq, async_bfqq->ref);
e21b7a0b
AA
3305 bfq_put_queue(async_bfqq);
3306 }
3307 }
3308
3309 if (sync_bfqq) {
3310 entity = &sync_bfqq->entity;
3311 if (entity->sched_data != &bfqg->sched_data)
3312 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
3313 }
3314
3315 return bfqg;
3316}
3317
3318static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
3319{
3320 struct bfq_data *bfqd = bic_to_bfqd(bic);
3321 struct bfq_group *bfqg = NULL;
3322 uint64_t serial_nr;
3323
3324 rcu_read_lock();
3325 serial_nr = bio_blkcg(bio)->css.serial_nr;
3326
3327 /*
3328 * Check whether blkcg has changed. The condition may trigger
3329 * spuriously on a newly created cic but there's no harm.
3330 */
3331 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
3332 goto out;
3333
3334 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
3335 bic->blkcg_serial_nr = serial_nr;
3336out:
3337 rcu_read_unlock();
3338}
3339
3340/**
3341 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
3342 * @st: the service tree being flushed.
3343 */
3344static void bfq_flush_idle_tree(struct bfq_service_tree *st)
3345{
3346 struct bfq_entity *entity = st->first_idle;
3347
3348 for (; entity ; entity = st->first_idle)
3349 __bfq_deactivate_entity(entity, false);
3350}
3351
3352/**
3353 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
3354 * @bfqd: the device data structure with the root group.
3355 * @entity: the entity to move.
3356 */
3357static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
3358 struct bfq_entity *entity)
3359{
3360 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
3361
3362 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
aee69d78
PV
3363}
3364
3365/**
e21b7a0b
AA
3366 * bfq_reparent_active_entities - move to the root group all active
3367 * entities.
3368 * @bfqd: the device data structure with the root group.
3369 * @bfqg: the group to move from.
3370 * @st: the service tree with the entities.
aee69d78 3371 *
e21b7a0b 3372 * Needs queue_lock to be taken and reference to be valid over the call.
aee69d78 3373 */
e21b7a0b
AA
3374static void bfq_reparent_active_entities(struct bfq_data *bfqd,
3375 struct bfq_group *bfqg,
3376 struct bfq_service_tree *st)
aee69d78 3377{
e21b7a0b
AA
3378 struct rb_root *active = &st->active;
3379 struct bfq_entity *entity = NULL;
aee69d78 3380
e21b7a0b
AA
3381 if (!RB_EMPTY_ROOT(&st->active))
3382 entity = bfq_entity_of(rb_first(active));
aee69d78 3383
e21b7a0b
AA
3384 for (; entity ; entity = bfq_entity_of(rb_first(active)))
3385 bfq_reparent_leaf_entity(bfqd, entity);
aee69d78 3386
e21b7a0b
AA
3387 if (bfqg->sched_data.in_service_entity)
3388 bfq_reparent_leaf_entity(bfqd,
3389 bfqg->sched_data.in_service_entity);
aee69d78
PV
3390}
3391
3392/**
e21b7a0b
AA
3393 * bfq_pd_offline - deactivate the entity associated with @pd,
3394 * and reparent its children entities.
3395 * @pd: descriptor of the policy going offline.
aee69d78 3396 *
e21b7a0b
AA
3397 * blkio already grabs the queue_lock for us, so no need to use
3398 * RCU-based magic
aee69d78 3399 */
e21b7a0b 3400static void bfq_pd_offline(struct blkg_policy_data *pd)
aee69d78 3401{
e21b7a0b
AA
3402 struct bfq_service_tree *st;
3403 struct bfq_group *bfqg = pd_to_bfqg(pd);
3404 struct bfq_data *bfqd = bfqg->bfqd;
3405 struct bfq_entity *entity = bfqg->my_entity;
3406 unsigned long flags;
3407 int i;
aee69d78 3408
e21b7a0b
AA
3409 if (!entity) /* root group */
3410 return;
3411
3412 spin_lock_irqsave(&bfqd->lock, flags);
aee69d78 3413 /*
e21b7a0b
AA
3414 * Empty all service_trees belonging to this group before
3415 * deactivating the group itself.
aee69d78 3416 */
e21b7a0b
AA
3417 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
3418 st = bfqg->sched_data.service_tree + i;
3419
3420 /*
3421 * The idle tree may still contain bfq_queues belonging
3422 * to exited task because they never migrated to a different
3423 * cgroup from the one being destroyed now. No one else
3424 * can access them so it's safe to act without any lock.
3425 */
3426 bfq_flush_idle_tree(st);
3427
3428 /*
3429 * It may happen that some queues are still active
3430 * (busy) upon group destruction (if the corresponding
3431 * processes have been forced to terminate). We move
3432 * all the leaf entities corresponding to these queues
3433 * to the root_group.
3434 * Also, it may happen that the group has an entity
3435 * in service, which is disconnected from the active
3436 * tree: it must be moved, too.
3437 * There is no need to put the sync queues, as the
3438 * scheduler has taken no reference.
3439 */
3440 bfq_reparent_active_entities(bfqd, bfqg, st);
aee69d78
PV
3441 }
3442
e21b7a0b
AA
3443 __bfq_deactivate_entity(entity, false);
3444 bfq_put_async_queues(bfqd, bfqg);
3445
36eca894 3446 bfq_unlock_put_ioc_restore(bfqd, flags);
e21b7a0b
AA
3447 /*
3448 * @blkg is going offline and will be ignored by
3449 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
3450 * that they don't get lost. If IOs complete after this point, the
3451 * stats for them will be lost. Oh well...
3452 */
3453 bfqg_stats_xfer_dead(bfqg);
aee69d78
PV
3454}
3455
44e44a1b
PV
3456static void bfq_end_wr_async(struct bfq_data *bfqd)
3457{
3458 struct blkcg_gq *blkg;
3459
3460 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
3461 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
3462
3463 bfq_end_wr_async_queues(bfqd, bfqg);
3464 }
3465 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
3466}
3467
e21b7a0b 3468static int bfq_io_show_weight(struct seq_file *sf, void *v)
aee69d78 3469{
e21b7a0b
AA
3470 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3471 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
3472 unsigned int val = 0;
aee69d78 3473
e21b7a0b
AA
3474 if (bfqgd)
3475 val = bfqgd->weight;
aee69d78 3476
e21b7a0b 3477 seq_printf(sf, "%u\n", val);
aee69d78 3478
e21b7a0b
AA
3479 return 0;
3480}
3481
3482static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
3483 struct cftype *cftype,
3484 u64 val)
aee69d78 3485{
e21b7a0b
AA
3486 struct blkcg *blkcg = css_to_blkcg(css);
3487 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
3488 struct blkcg_gq *blkg;
3489 int ret = -ERANGE;
aee69d78 3490
e21b7a0b
AA
3491 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
3492 return ret;
aee69d78 3493
e21b7a0b
AA
3494 ret = 0;
3495 spin_lock_irq(&blkcg->lock);
3496 bfqgd->weight = (unsigned short)val;
3497 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3498 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
3499
3500 if (!bfqg)
3501 continue;
3502 /*
3503 * Setting the prio_changed flag of the entity
3504 * to 1 with new_weight == weight would re-set
3505 * the value of the weight to its ioprio mapping.
3506 * Set the flag only if necessary.
3507 */
3508 if ((unsigned short)val != bfqg->entity.new_weight) {
3509 bfqg->entity.new_weight = (unsigned short)val;
3510 /*
3511 * Make sure that the above new value has been
3512 * stored in bfqg->entity.new_weight before
3513 * setting the prio_changed flag. In fact,
3514 * this flag may be read asynchronously (in
3515 * critical sections protected by a different
3516 * lock than that held here), and finding this
3517 * flag set may cause the execution of the code
3518 * for updating parameters whose value may
3519 * depend also on bfqg->entity.new_weight (in
3520 * __bfq_entity_update_weight_prio).
3521 * This barrier makes sure that the new value
3522 * of bfqg->entity.new_weight is correctly
3523 * seen in that code.
3524 */
3525 smp_wmb();
3526 bfqg->entity.prio_changed = 1;
3527 }
aee69d78 3528 }
e21b7a0b 3529 spin_unlock_irq(&blkcg->lock);
aee69d78 3530
e21b7a0b
AA
3531 return ret;
3532}
aee69d78 3533
e21b7a0b
AA
3534static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
3535 char *buf, size_t nbytes,
3536 loff_t off)
3537{
3538 u64 weight;
3539 /* First unsigned long found in the file is used */
3540 int ret = kstrtoull(strim(buf), 0, &weight);
3541
3542 if (ret)
3543 return ret;
3544
3545 return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
aee69d78
PV
3546}
3547
e21b7a0b 3548static int bfqg_print_stat(struct seq_file *sf, void *v)
aee69d78 3549{
e21b7a0b
AA
3550 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
3551 &blkcg_policy_bfq, seq_cft(sf)->private, false);
3552 return 0;
3553}
aee69d78 3554
e21b7a0b
AA
3555static int bfqg_print_rwstat(struct seq_file *sf, void *v)
3556{
3557 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
3558 &blkcg_policy_bfq, seq_cft(sf)->private, true);
3559 return 0;
3560}
aee69d78 3561
e21b7a0b
AA
3562static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
3563 struct blkg_policy_data *pd, int off)
3564{
3565 u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
3566 &blkcg_policy_bfq, off);
3567 return __blkg_prfill_u64(sf, pd, sum);
3568}
aee69d78 3569
e21b7a0b
AA
3570static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
3571 struct blkg_policy_data *pd, int off)
3572{
3573 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
3574 &blkcg_policy_bfq,
3575 off);
3576 return __blkg_prfill_rwstat(sf, pd, &sum);
aee69d78
PV
3577}
3578
e21b7a0b 3579static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
aee69d78 3580{
e21b7a0b
AA
3581 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
3582 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
3583 seq_cft(sf)->private, false);
3584 return 0;
3585}
aee69d78 3586
e21b7a0b
AA
3587static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
3588{
3589 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
3590 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
3591 seq_cft(sf)->private, true);
3592 return 0;
aee69d78
PV
3593}
3594
e21b7a0b
AA
3595static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
3596 int off)
aee69d78 3597{
e21b7a0b 3598 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
aee69d78 3599
e21b7a0b 3600 return __blkg_prfill_u64(sf, pd, sum >> 9);
aee69d78
PV
3601}
3602
e21b7a0b 3603static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
aee69d78 3604{
e21b7a0b
AA
3605 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
3606 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
3607 return 0;
3608}
aee69d78 3609
e21b7a0b
AA
3610static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
3611 struct blkg_policy_data *pd, int off)
3612{
3613 struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
3614 offsetof(struct blkcg_gq, stat_bytes));
3615 u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
3616 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
aee69d78 3617
e21b7a0b
AA
3618 return __blkg_prfill_u64(sf, pd, sum >> 9);
3619}
aee69d78 3620
e21b7a0b
AA
3621static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
3622{
3623 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
3624 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
3625 false);
3626 return 0;
aee69d78
PV
3627}
3628
e21b7a0b
AA
3629static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
3630 struct blkg_policy_data *pd, int off)
aee69d78 3631{
e21b7a0b
AA
3632 struct bfq_group *bfqg = pd_to_bfqg(pd);
3633 u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
3634 u64 v = 0;
aee69d78 3635
e21b7a0b
AA
3636 if (samples) {
3637 v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
3638 v = div64_u64(v, samples);
3639 }
3640 __blkg_prfill_u64(sf, pd, v);
3641 return 0;
3642}
aee69d78 3643
e21b7a0b
AA
3644/* print avg_queue_size */
3645static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
3646{
3647 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
3648 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
3649 0, false);
3650 return 0;
3651}
3652
3653static struct bfq_group *
3654bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
3655{
3656 int ret;
3657
3658 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
3659 if (ret)
3660 return NULL;
3661
3662 return blkg_to_bfqg(bfqd->queue->root_blkg);
aee69d78
PV
3663}
3664
e21b7a0b
AA
3665static struct cftype bfq_blkcg_legacy_files[] = {
3666 {
3667 .name = "bfq.weight",
3668 .flags = CFTYPE_NOT_ON_ROOT,
3669 .seq_show = bfq_io_show_weight,
3670 .write_u64 = bfq_io_set_weight_legacy,
3671 },
3672
3673 /* statistics, covers only the tasks in the bfqg */
3674 {
3675 .name = "bfq.time",
3676 .private = offsetof(struct bfq_group, stats.time),
3677 .seq_show = bfqg_print_stat,
3678 },
3679 {
3680 .name = "bfq.sectors",
3681 .seq_show = bfqg_print_stat_sectors,
3682 },
3683 {
3684 .name = "bfq.io_service_bytes",
3685 .private = (unsigned long)&blkcg_policy_bfq,
3686 .seq_show = blkg_print_stat_bytes,
3687 },
3688 {
3689 .name = "bfq.io_serviced",
3690 .private = (unsigned long)&blkcg_policy_bfq,
3691 .seq_show = blkg_print_stat_ios,
3692 },
3693 {
3694 .name = "bfq.io_service_time",
3695 .private = offsetof(struct bfq_group, stats.service_time),
3696 .seq_show = bfqg_print_rwstat,
3697 },
3698 {
3699 .name = "bfq.io_wait_time",
3700 .private = offsetof(struct bfq_group, stats.wait_time),
3701 .seq_show = bfqg_print_rwstat,
3702 },
3703 {
3704 .name = "bfq.io_merged",
3705 .private = offsetof(struct bfq_group, stats.merged),
3706 .seq_show = bfqg_print_rwstat,
3707 },
3708 {
3709 .name = "bfq.io_queued",
3710 .private = offsetof(struct bfq_group, stats.queued),
3711 .seq_show = bfqg_print_rwstat,
3712 },
3713
3714 /* the same statictics which cover the bfqg and its descendants */
3715 {
3716 .name = "bfq.time_recursive",
3717 .private = offsetof(struct bfq_group, stats.time),
3718 .seq_show = bfqg_print_stat_recursive,
3719 },
3720 {
3721 .name = "bfq.sectors_recursive",
3722 .seq_show = bfqg_print_stat_sectors_recursive,
3723 },
3724 {
3725 .name = "bfq.io_service_bytes_recursive",
3726 .private = (unsigned long)&blkcg_policy_bfq,
3727 .seq_show = blkg_print_stat_bytes_recursive,
3728 },
3729 {
3730 .name = "bfq.io_serviced_recursive",
3731 .private = (unsigned long)&blkcg_policy_bfq,
3732 .seq_show = blkg_print_stat_ios_recursive,
3733 },
3734 {
3735 .name = "bfq.io_service_time_recursive",
3736 .private = offsetof(struct bfq_group, stats.service_time),
3737 .seq_show = bfqg_print_rwstat_recursive,
3738 },
3739 {
3740 .name = "bfq.io_wait_time_recursive",
3741 .private = offsetof(struct bfq_group, stats.wait_time),
3742 .seq_show = bfqg_print_rwstat_recursive,
3743 },
3744 {
3745 .name = "bfq.io_merged_recursive",
3746 .private = offsetof(struct bfq_group, stats.merged),
3747 .seq_show = bfqg_print_rwstat_recursive,
3748 },
3749 {
3750 .name = "bfq.io_queued_recursive",
3751 .private = offsetof(struct bfq_group, stats.queued),
3752 .seq_show = bfqg_print_rwstat_recursive,
3753 },
3754 {
3755 .name = "bfq.avg_queue_size",
3756 .seq_show = bfqg_print_avg_queue_size,
3757 },
3758 {
3759 .name = "bfq.group_wait_time",
3760 .private = offsetof(struct bfq_group, stats.group_wait_time),
3761 .seq_show = bfqg_print_stat,
3762 },
3763 {
3764 .name = "bfq.idle_time",
3765 .private = offsetof(struct bfq_group, stats.idle_time),
3766 .seq_show = bfqg_print_stat,
3767 },
3768 {
3769 .name = "bfq.empty_time",
3770 .private = offsetof(struct bfq_group, stats.empty_time),
3771 .seq_show = bfqg_print_stat,
3772 },
3773 {
3774 .name = "bfq.dequeue",
3775 .private = offsetof(struct bfq_group, stats.dequeue),
3776 .seq_show = bfqg_print_stat,
3777 },
3778 { } /* terminate */
3779};
3780
3781static struct cftype bfq_blkg_files[] = {
3782 {
3783 .name = "bfq.weight",
3784 .flags = CFTYPE_NOT_ON_ROOT,
3785 .seq_show = bfq_io_show_weight,
3786 .write = bfq_io_set_weight,
3787 },
3788 {} /* terminate */
3789};
3790
3791#else /* CONFIG_BFQ_GROUP_IOSCHED */
3792
3793static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
3794 struct bfq_queue *bfqq, unsigned int op) { }
3795static inline void
3796bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
3797static inline void
3798bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
3799static inline void bfqg_stats_update_completion(struct bfq_group *bfqg,
3800 uint64_t start_time, uint64_t io_start_time,
3801 unsigned int op) { }
3802static inline void
3803bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
3804 struct bfq_group *curr_bfqg) { }
3805static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { }
3806static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
3807static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
3808static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
3809static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
3810static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
3811
3812static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3813 struct bfq_group *bfqg) {}
3814
3815static void bfq_init_entity(struct bfq_entity *entity,
3816 struct bfq_group *bfqg)
aee69d78
PV
3817{
3818 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
3819
3820 entity->weight = entity->new_weight;
3821 entity->orig_weight = entity->new_weight;
e21b7a0b
AA
3822 if (bfqq) {
3823 bfqq->ioprio = bfqq->new_ioprio;
3824 bfqq->ioprio_class = bfqq->new_ioprio_class;
3825 }
3826 entity->sched_data = &bfqg->sched_data;
3827}
3828
3829static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
3830
44e44a1b
PV
3831static void bfq_end_wr_async(struct bfq_data *bfqd)
3832{
3833 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
3834}
3835
e21b7a0b
AA
3836static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
3837 struct blkcg *blkcg)
3838{
3839 return bfqd->root_group;
3840}
3841
3842static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
3843{
3844 return bfqq->bfqd->root_group;
3845}
aee69d78 3846
e21b7a0b
AA
3847static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd,
3848 int node)
3849{
3850 struct bfq_group *bfqg;
3851 int i;
3852
3853 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
3854 if (!bfqg)
3855 return NULL;
aee69d78 3856
e21b7a0b
AA
3857 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
3858 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
3859
3860 return bfqg;
aee69d78 3861}
e21b7a0b 3862#endif /* CONFIG_BFQ_GROUP_IOSCHED */
aee69d78
PV
3863
3864#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
3865#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
3866
3867#define bfq_sample_valid(samples) ((samples) > 80)
3868
aee69d78
PV
3869/*
3870 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
3871 * We choose the request that is closesr to the head right now. Distance
3872 * behind the head is penalized and only allowed to a certain extent.
3873 */
3874static struct request *bfq_choose_req(struct bfq_data *bfqd,
3875 struct request *rq1,
3876 struct request *rq2,
3877 sector_t last)
3878{
3879 sector_t s1, s2, d1 = 0, d2 = 0;
3880 unsigned long back_max;
3881#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
3882#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
3883 unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
3884
3885 if (!rq1 || rq1 == rq2)
3886 return rq2;
3887 if (!rq2)
3888 return rq1;
3889
3890 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
3891 return rq1;
3892 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
3893 return rq2;
3894 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
3895 return rq1;
3896 else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
3897 return rq2;
3898
3899 s1 = blk_rq_pos(rq1);
3900 s2 = blk_rq_pos(rq2);
3901
3902 /*
3903 * By definition, 1KiB is 2 sectors.
3904 */
3905 back_max = bfqd->bfq_back_max * 2;
3906
3907 /*
3908 * Strict one way elevator _except_ in the case where we allow
3909 * short backward seeks which are biased as twice the cost of a
3910 * similar forward seek.
3911 */
3912 if (s1 >= last)
3913 d1 = s1 - last;
3914 else if (s1 + back_max >= last)
3915 d1 = (last - s1) * bfqd->bfq_back_penalty;
3916 else
3917 wrap |= BFQ_RQ1_WRAP;
3918
3919 if (s2 >= last)
3920 d2 = s2 - last;
3921 else if (s2 + back_max >= last)
3922 d2 = (last - s2) * bfqd->bfq_back_penalty;
3923 else
3924 wrap |= BFQ_RQ2_WRAP;
3925
3926 /* Found required data */
3927
3928 /*
3929 * By doing switch() on the bit mask "wrap" we avoid having to
3930 * check two variables for all permutations: --> faster!
3931 */
3932 switch (wrap) {
3933 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
3934 if (d1 < d2)
3935 return rq1;
3936 else if (d2 < d1)
3937 return rq2;
3938
3939 if (s1 >= s2)
3940 return rq1;
3941 else
3942 return rq2;
3943
3944 case BFQ_RQ2_WRAP:
3945 return rq1;
3946 case BFQ_RQ1_WRAP:
3947 return rq2;
3948 case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
3949 default:
3950 /*
3951 * Since both rqs are wrapped,
3952 * start with the one that's further behind head
3953 * (--> only *one* back seek required),
3954 * since back seek takes more time than forward.
3955 */
3956 if (s1 <= s2)
3957 return rq1;
3958 else
3959 return rq2;
3960 }
3961}
3962
36eca894
AA
3963static struct bfq_queue *
3964bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
3965 sector_t sector, struct rb_node **ret_parent,
3966 struct rb_node ***rb_link)
3967{
3968 struct rb_node **p, *parent;
3969 struct bfq_queue *bfqq = NULL;
3970
3971 parent = NULL;
3972 p = &root->rb_node;
3973 while (*p) {
3974 struct rb_node **n;
3975
3976 parent = *p;
3977 bfqq = rb_entry(parent, struct bfq_queue, pos_node);
3978
3979 /*
3980 * Sort strictly based on sector. Smallest to the left,
3981 * largest to the right.
3982 */
3983 if (sector > blk_rq_pos(bfqq->next_rq))
3984 n = &(*p)->rb_right;
3985 else if (sector < blk_rq_pos(bfqq->next_rq))
3986 n = &(*p)->rb_left;
3987 else
3988 break;
3989 p = n;
3990 bfqq = NULL;
3991 }
3992
3993 *ret_parent = parent;
3994 if (rb_link)
3995 *rb_link = p;
3996
3997 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
3998 (unsigned long long)sector,
3999 bfqq ? bfqq->pid : 0);
4000
4001 return bfqq;
4002}
4003
4004static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4005{
4006 struct rb_node **p, *parent;
4007 struct bfq_queue *__bfqq;
4008
4009 if (bfqq->pos_root) {
4010 rb_erase(&bfqq->pos_node, bfqq->pos_root);
4011 bfqq->pos_root = NULL;
4012 }
4013
4014 if (bfq_class_idle(bfqq))
4015 return;
4016 if (!bfqq->next_rq)
4017 return;
4018
4019 bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
4020 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
4021 blk_rq_pos(bfqq->next_rq), &parent, &p);
4022 if (!__bfqq) {
4023 rb_link_node(&bfqq->pos_node, parent, p);
4024 rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
4025 } else
4026 bfqq->pos_root = NULL;
4027}
4028
1de0c4cd
AA
4029/*
4030 * Tell whether there are active queues or groups with differentiated weights.
4031 */
4032static bool bfq_differentiated_weights(struct bfq_data *bfqd)
4033{
4034 /*
4035 * For weights to differ, at least one of the trees must contain
4036 * at least two nodes.
4037 */
4038 return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
4039 (bfqd->queue_weights_tree.rb_node->rb_left ||
4040 bfqd->queue_weights_tree.rb_node->rb_right)
4041#ifdef CONFIG_BFQ_GROUP_IOSCHED
4042 ) ||
4043 (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
4044 (bfqd->group_weights_tree.rb_node->rb_left ||
4045 bfqd->group_weights_tree.rb_node->rb_right)
4046#endif
4047 );
4048}
4049
4050/*
4051 * The following function returns true if every queue must receive the
4052 * same share of the throughput (this condition is used when deciding
4053 * whether idling may be disabled, see the comments in the function
4054 * bfq_bfqq_may_idle()).
4055 *
4056 * Such a scenario occurs when:
4057 * 1) all active queues have the same weight,
4058 * 2) all active groups at the same level in the groups tree have the same
4059 * weight,
4060 * 3) all active groups at the same level in the groups tree have the same
4061 * number of children.
4062 *
4063 * Unfortunately, keeping the necessary state for evaluating exactly the
4064 * above symmetry conditions would be quite complex and time-consuming.
4065 * Therefore this function evaluates, instead, the following stronger
4066 * sub-conditions, for which it is much easier to maintain the needed
4067 * state:
4068 * 1) all active queues have the same weight,
4069 * 2) all active groups have the same weight,
4070 * 3) all active groups have at most one active child each.
4071 * In particular, the last two conditions are always true if hierarchical
4072 * support and the cgroups interface are not enabled, thus no state needs
4073 * to be maintained in this case.
4074 */
4075static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
4076{
4077 return !bfq_differentiated_weights(bfqd);
4078}
4079
4080/*
4081 * If the weight-counter tree passed as input contains no counter for
4082 * the weight of the input entity, then add that counter; otherwise just
4083 * increment the existing counter.
4084 *
4085 * Note that weight-counter trees contain few nodes in mostly symmetric
4086 * scenarios. For example, if all queues have the same weight, then the
4087 * weight-counter tree for the queues may contain at most one node.
4088 * This holds even if low_latency is on, because weight-raised queues
4089 * are not inserted in the tree.
4090 * In most scenarios, the rate at which nodes are created/destroyed
4091 * should be low too.
4092 */
4093static void bfq_weights_tree_add(struct bfq_data *bfqd,
4094 struct bfq_entity *entity,
4095 struct rb_root *root)
4096{
4097 struct rb_node **new = &(root->rb_node), *parent = NULL;
4098
4099 /*
4100 * Do not insert if the entity is already associated with a
4101 * counter, which happens if:
4102 * 1) the entity is associated with a queue,
4103 * 2) a request arrival has caused the queue to become both
4104 * non-weight-raised, and hence change its weight, and
4105 * backlogged; in this respect, each of the two events
4106 * causes an invocation of this function,
4107 * 3) this is the invocation of this function caused by the
4108 * second event. This second invocation is actually useless,
4109 * and we handle this fact by exiting immediately. More
4110 * efficient or clearer solutions might possibly be adopted.
4111 */
4112 if (entity->weight_counter)
4113 return;
4114
4115 while (*new) {
4116 struct bfq_weight_counter *__counter = container_of(*new,
4117 struct bfq_weight_counter,
4118 weights_node);
4119 parent = *new;
4120
4121 if (entity->weight == __counter->weight) {
4122 entity->weight_counter = __counter;
4123 goto inc_counter;
4124 }
4125 if (entity->weight < __counter->weight)
4126 new = &((*new)->rb_left);
4127 else
4128 new = &((*new)->rb_right);
4129 }
4130
4131 entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
4132 GFP_ATOMIC);
4133
4134 /*
4135 * In the unlucky event of an allocation failure, we just
4136 * exit. This will cause the weight of entity to not be
4137 * considered in bfq_differentiated_weights, which, in its
4138 * turn, causes the scenario to be deemed wrongly symmetric in
4139 * case entity's weight would have been the only weight making
4140 * the scenario asymmetric. On the bright side, no unbalance
4141 * will however occur when entity becomes inactive again (the
4142 * invocation of this function is triggered by an activation
4143 * of entity). In fact, bfq_weights_tree_remove does nothing
4144 * if !entity->weight_counter.
4145 */
4146 if (unlikely(!entity->weight_counter))
4147 return;
4148
4149 entity->weight_counter->weight = entity->weight;
4150 rb_link_node(&entity->weight_counter->weights_node, parent, new);
4151 rb_insert_color(&entity->weight_counter->weights_node, root);
4152
4153inc_counter:
4154 entity->weight_counter->num_active++;
4155}
4156
4157/*
4158 * Decrement the weight counter associated with the entity, and, if the
4159 * counter reaches 0, remove the counter from the tree.
4160 * See the comments to the function bfq_weights_tree_add() for considerations
4161 * about overhead.
4162 */
4163static void bfq_weights_tree_remove(struct bfq_data *bfqd,
4164 struct bfq_entity *entity,
4165 struct rb_root *root)
4166{
4167 if (!entity->weight_counter)
4168 return;
4169
4170 entity->weight_counter->num_active--;
4171 if (entity->weight_counter->num_active > 0)
4172 goto reset_entity_pointer;
4173
4174 rb_erase(&entity->weight_counter->weights_node, root);
4175 kfree(entity->weight_counter);
4176
4177reset_entity_pointer:
4178 entity->weight_counter = NULL;
4179}
4180
aee69d78
PV
4181/*
4182 * Return expired entry, or NULL to just start from scratch in rbtree.
4183 */
4184static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
4185 struct request *last)
4186{
4187 struct request *rq;
4188
4189 if (bfq_bfqq_fifo_expire(bfqq))
4190 return NULL;
4191
4192 bfq_mark_bfqq_fifo_expire(bfqq);
4193
4194 rq = rq_entry_fifo(bfqq->fifo.next);
4195
4196 if (rq == last || ktime_get_ns() < rq->fifo_time)
4197 return NULL;
4198
4199 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
4200 return rq;
4201}
4202
4203static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
4204 struct bfq_queue *bfqq,
4205 struct request *last)
4206{
4207 struct rb_node *rbnext = rb_next(&last->rb_node);
4208 struct rb_node *rbprev = rb_prev(&last->rb_node);
4209 struct request *next, *prev = NULL;
4210
4211 /* Follow expired path, else get first next available. */
4212 next = bfq_check_fifo(bfqq, last);
4213 if (next)
4214 return next;
4215
4216 if (rbprev)
4217 prev = rb_entry_rq(rbprev);
4218
4219 if (rbnext)
4220 next = rb_entry_rq(rbnext);
4221 else {
4222 rbnext = rb_first(&bfqq->sort_list);
4223 if (rbnext && rbnext != &last->rb_node)
4224 next = rb_entry_rq(rbnext);
4225 }
4226
4227 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
4228}
4229
c074170e 4230/* see the definition of bfq_async_charge_factor for details */
aee69d78
PV
4231static unsigned long bfq_serv_to_charge(struct request *rq,
4232 struct bfq_queue *bfqq)
4233{
44e44a1b 4234 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
c074170e
PV
4235 return blk_rq_sectors(rq);
4236
cfd69712
PV
4237 /*
4238 * If there are no weight-raised queues, then amplify service
4239 * by just the async charge factor; otherwise amplify service
4240 * by twice the async charge factor, to further reduce latency
4241 * for weight-raised queues.
4242 */
4243 if (bfqq->bfqd->wr_busy_queues == 0)
4244 return blk_rq_sectors(rq) * bfq_async_charge_factor;
4245
4246 return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
aee69d78
PV
4247}
4248
4249/**
4250 * bfq_updated_next_req - update the queue after a new next_rq selection.
4251 * @bfqd: the device data the queue belongs to.
4252 * @bfqq: the queue to update.
4253 *
4254 * If the first request of a queue changes we make sure that the queue
4255 * has enough budget to serve at least its first request (if the
4256 * request has grown). We do this because if the queue has not enough
4257 * budget for its first request, it has to go through two dispatch
4258 * rounds to actually get it dispatched.
4259 */
4260static void bfq_updated_next_req(struct bfq_data *bfqd,
4261 struct bfq_queue *bfqq)
4262{
4263 struct bfq_entity *entity = &bfqq->entity;
4264 struct request *next_rq = bfqq->next_rq;
4265 unsigned long new_budget;
4266
4267 if (!next_rq)
4268 return;
4269
4270 if (bfqq == bfqd->in_service_queue)
4271 /*
4272 * In order not to break guarantees, budgets cannot be
4273 * changed after an entity has been selected.
4274 */
4275 return;
4276
4277 new_budget = max_t(unsigned long, bfqq->max_budget,
4278 bfq_serv_to_charge(next_rq, bfqq));
4279 if (entity->budget != new_budget) {
4280 entity->budget = new_budget;
4281 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
4282 new_budget);
e21b7a0b 4283 bfq_requeue_bfqq(bfqd, bfqq);
aee69d78
PV
4284 }
4285}
4286
36eca894
AA
4287static void
4288bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
4289{
4290 if (bic->saved_idle_window)
4291 bfq_mark_bfqq_idle_window(bfqq);
4292 else
4293 bfq_clear_bfqq_idle_window(bfqq);
4294
4295 if (bic->saved_IO_bound)
4296 bfq_mark_bfqq_IO_bound(bfqq);
4297 else
4298 bfq_clear_bfqq_IO_bound(bfqq);
4299
4300 bfqq->ttime = bic->saved_ttime;
4301 bfqq->wr_coeff = bic->saved_wr_coeff;
4302 bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
4303 bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
4304 bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
4305
4306 if (bfqq->wr_coeff > 1 &&
4307 time_is_before_jiffies(bfqq->last_wr_start_finish +
4308 bfqq->wr_cur_max_time)) {
4309 bfq_log_bfqq(bfqq->bfqd, bfqq,
4310 "resume state: switching off wr");
4311
4312 bfqq->wr_coeff = 1;
4313 }
4314
4315 /* make sure weight will be updated, however we got here */
4316 bfqq->entity.prio_changed = 1;
4317}
4318
4319static int bfqq_process_refs(struct bfq_queue *bfqq)
4320{
4321 return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
4322}
4323
aee69d78
PV
4324static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
4325{
4326 struct bfq_entity *entity = &bfqq->entity;
4327
4328 return entity->budget - entity->service;
4329}
4330
4331/*
4332 * If enough samples have been computed, return the current max budget
4333 * stored in bfqd, which is dynamically updated according to the
4334 * estimated disk peak rate; otherwise return the default max budget
4335 */
4336static int bfq_max_budget(struct bfq_data *bfqd)
4337{
4338 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
4339 return bfq_default_max_budget;
4340 else
4341 return bfqd->bfq_max_budget;
4342}
4343
4344/*
4345 * Return min budget, which is a fraction of the current or default
4346 * max budget (trying with 1/32)
4347 */
4348static int bfq_min_budget(struct bfq_data *bfqd)
4349{
4350 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
4351 return bfq_default_max_budget / 32;
4352 else
4353 return bfqd->bfq_max_budget / 32;
4354}
4355
4356static void bfq_bfqq_expire(struct bfq_data *bfqd,
4357 struct bfq_queue *bfqq,
4358 bool compensate,
4359 enum bfqq_expiration reason);
4360
4361/*
4362 * The next function, invoked after the input queue bfqq switches from
4363 * idle to busy, updates the budget of bfqq. The function also tells
4364 * whether the in-service queue should be expired, by returning
4365 * true. The purpose of expiring the in-service queue is to give bfqq
4366 * the chance to possibly preempt the in-service queue, and the reason
44e44a1b
PV
4367 * for preempting the in-service queue is to achieve one of the two
4368 * goals below.
aee69d78 4369 *
44e44a1b
PV
4370 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
4371 * expired because it has remained idle. In particular, bfqq may have
4372 * expired for one of the following two reasons:
aee69d78
PV
4373 *
4374 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
4375 * and did not make it to issue a new request before its last
4376 * request was served;
4377 *
4378 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
4379 * a new request before the expiration of the idling-time.
4380 *
4381 * Even if bfqq has expired for one of the above reasons, the process
4382 * associated with the queue may be however issuing requests greedily,
4383 * and thus be sensitive to the bandwidth it receives (bfqq may have
4384 * remained idle for other reasons: CPU high load, bfqq not enjoying
4385 * idling, I/O throttling somewhere in the path from the process to
4386 * the I/O scheduler, ...). But if, after every expiration for one of
4387 * the above two reasons, bfqq has to wait for the service of at least
4388 * one full budget of another queue before being served again, then
4389 * bfqq is likely to get a much lower bandwidth or resource time than
4390 * its reserved ones. To address this issue, two countermeasures need
4391 * to be taken.
4392 *
4393 * First, the budget and the timestamps of bfqq need to be updated in
4394 * a special way on bfqq reactivation: they need to be updated as if
4395 * bfqq did not remain idle and did not expire. In fact, if they are
4396 * computed as if bfqq expired and remained idle until reactivation,
4397 * then the process associated with bfqq is treated as if, instead of
4398 * being greedy, it stopped issuing requests when bfqq remained idle,
4399 * and restarts issuing requests only on this reactivation. In other
4400 * words, the scheduler does not help the process recover the "service
4401 * hole" between bfqq expiration and reactivation. As a consequence,
4402 * the process receives a lower bandwidth than its reserved one. In
4403 * contrast, to recover this hole, the budget must be updated as if
4404 * bfqq was not expired at all before this reactivation, i.e., it must
4405 * be set to the value of the remaining budget when bfqq was
4406 * expired. Along the same line, timestamps need to be assigned the
4407 * value they had the last time bfqq was selected for service, i.e.,
4408 * before last expiration. Thus timestamps need to be back-shifted
4409 * with respect to their normal computation (see [1] for more details
4410 * on this tricky aspect).
4411 *
4412 * Secondly, to allow the process to recover the hole, the in-service
4413 * queue must be expired too, to give bfqq the chance to preempt it
4414 * immediately. In fact, if bfqq has to wait for a full budget of the
4415 * in-service queue to be completed, then it may become impossible to
4416 * let the process recover the hole, even if the back-shifted
4417 * timestamps of bfqq are lower than those of the in-service queue. If
4418 * this happens for most or all of the holes, then the process may not
4419 * receive its reserved bandwidth. In this respect, it is worth noting
4420 * that, being the service of outstanding requests unpreemptible, a
4421 * little fraction of the holes may however be unrecoverable, thereby
4422 * causing a little loss of bandwidth.
4423 *
4424 * The last important point is detecting whether bfqq does need this
4425 * bandwidth recovery. In this respect, the next function deems the
4426 * process associated with bfqq greedy, and thus allows it to recover
4427 * the hole, if: 1) the process is waiting for the arrival of a new
4428 * request (which implies that bfqq expired for one of the above two
4429 * reasons), and 2) such a request has arrived soon. The first
4430 * condition is controlled through the flag non_blocking_wait_rq,
4431 * while the second through the flag arrived_in_time. If both
4432 * conditions hold, then the function computes the budget in the
4433 * above-described special way, and signals that the in-service queue
4434 * should be expired. Timestamp back-shifting is done later in
4435 * __bfq_activate_entity.
44e44a1b
PV
4436 *
4437 * 2. Reduce latency. Even if timestamps are not backshifted to let
4438 * the process associated with bfqq recover a service hole, bfqq may
4439 * however happen to have, after being (re)activated, a lower finish
4440 * timestamp than the in-service queue. That is, the next budget of
4441 * bfqq may have to be completed before the one of the in-service
4442 * queue. If this is the case, then preempting the in-service queue
4443 * allows this goal to be achieved, apart from the unpreemptible,
4444 * outstanding requests mentioned above.
4445 *
4446 * Unfortunately, regardless of which of the above two goals one wants
4447 * to achieve, service trees need first to be updated to know whether
4448 * the in-service queue must be preempted. To have service trees
4449 * correctly updated, the in-service queue must be expired and
4450 * rescheduled, and bfqq must be scheduled too. This is one of the
4451 * most costly operations (in future versions, the scheduling
4452 * mechanism may be re-designed in such a way to make it possible to
4453 * know whether preemption is needed without needing to update service
4454 * trees). In addition, queue preemptions almost always cause random
4455 * I/O, and thus loss of throughput. Because of these facts, the next
4456 * function adopts the following simple scheme to avoid both costly
4457 * operations and too frequent preemptions: it requests the expiration
4458 * of the in-service queue (unconditionally) only for queues that need
4459 * to recover a hole, or that either are weight-raised or deserve to
4460 * be weight-raised.
aee69d78
PV
4461 */
4462static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
4463 struct bfq_queue *bfqq,
44e44a1b
PV
4464 bool arrived_in_time,
4465 bool wr_or_deserves_wr)
aee69d78
PV
4466{
4467 struct bfq_entity *entity = &bfqq->entity;
4468
4469 if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
4470 /*
4471 * We do not clear the flag non_blocking_wait_rq here, as
4472 * the latter is used in bfq_activate_bfqq to signal
4473 * that timestamps need to be back-shifted (and is
4474 * cleared right after).
4475 */
4476
4477 /*
4478 * In next assignment we rely on that either
4479 * entity->service or entity->budget are not updated
4480 * on expiration if bfqq is empty (see
4481 * __bfq_bfqq_recalc_budget). Thus both quantities
4482 * remain unchanged after such an expiration, and the
4483 * following statement therefore assigns to
4484 * entity->budget the remaining budget on such an
4485 * expiration. For clarity, entity->service is not
4486 * updated on expiration in any case, and, in normal
4487 * operation, is reset only when bfqq is selected for
4488 * service (see bfq_get_next_queue).
4489 */
4490 entity->budget = min_t(unsigned long,
4491 bfq_bfqq_budget_left(bfqq),
4492 bfqq->max_budget);
4493
4494 return true;
4495 }
4496
4497 entity->budget = max_t(unsigned long, bfqq->max_budget,
4498 bfq_serv_to_charge(bfqq->next_rq, bfqq));
4499 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
44e44a1b
PV
4500 return wr_or_deserves_wr;
4501}
4502
4503static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
4504{
4505 u64 dur;
4506
4507 if (bfqd->bfq_wr_max_time > 0)
4508 return bfqd->bfq_wr_max_time;
4509
4510 dur = bfqd->RT_prod;
4511 do_div(dur, bfqd->peak_rate);
4512
4513 /*
4514 * Limit duration between 3 and 13 seconds. Tests show that
4515 * higher values than 13 seconds often yield the opposite of
4516 * the desired result, i.e., worsen responsiveness by letting
4517 * non-interactive and non-soft-real-time applications
4518 * preserve weight raising for a too long time interval.
4519 *
4520 * On the other end, lower values than 3 seconds make it
4521 * difficult for most interactive tasks to complete their jobs
4522 * before weight-raising finishes.
4523 */
4524 if (dur > msecs_to_jiffies(13000))
4525 dur = msecs_to_jiffies(13000);
4526 else if (dur < msecs_to_jiffies(3000))
4527 dur = msecs_to_jiffies(3000);
4528
4529 return dur;
4530}
4531
4532static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
4533 struct bfq_queue *bfqq,
4534 unsigned int old_wr_coeff,
4535 bool wr_or_deserves_wr,
77b7dcea
PV
4536 bool interactive,
4537 bool soft_rt)
44e44a1b
PV
4538{
4539 if (old_wr_coeff == 1 && wr_or_deserves_wr) {
4540 /* start a weight-raising period */
77b7dcea
PV
4541 if (interactive) {
4542 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
4543 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
4544 } else {
4545 bfqq->wr_start_at_switch_to_srt = jiffies;
4546 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
4547 BFQ_SOFTRT_WEIGHT_FACTOR;
4548 bfqq->wr_cur_max_time =
4549 bfqd->bfq_wr_rt_max_time;
4550 }
44e44a1b
PV
4551
4552 /*
4553 * If needed, further reduce budget to make sure it is
4554 * close to bfqq's backlog, so as to reduce the
4555 * scheduling-error component due to a too large
4556 * budget. Do not care about throughput consequences,
4557 * but only about latency. Finally, do not assign a
4558 * too small budget either, to avoid increasing
4559 * latency by causing too frequent expirations.
4560 */
4561 bfqq->entity.budget = min_t(unsigned long,
4562 bfqq->entity.budget,
4563 2 * bfq_min_budget(bfqd));
4564 } else if (old_wr_coeff > 1) {
77b7dcea
PV
4565 if (interactive) { /* update wr coeff and duration */
4566 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
4567 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
4568 } else if (soft_rt) {
4569 /*
4570 * The application is now or still meeting the
4571 * requirements for being deemed soft rt. We
4572 * can then correctly and safely (re)charge
4573 * the weight-raising duration for the
4574 * application with the weight-raising
4575 * duration for soft rt applications.
4576 *
4577 * In particular, doing this recharge now, i.e.,
4578 * before the weight-raising period for the
4579 * application finishes, reduces the probability
4580 * of the following negative scenario:
4581 * 1) the weight of a soft rt application is
4582 * raised at startup (as for any newly
4583 * created application),
4584 * 2) since the application is not interactive,
4585 * at a certain time weight-raising is
4586 * stopped for the application,
4587 * 3) at that time the application happens to
4588 * still have pending requests, and hence
4589 * is destined to not have a chance to be
4590 * deemed soft rt before these requests are
4591 * completed (see the comments to the
4592 * function bfq_bfqq_softrt_next_start()
4593 * for details on soft rt detection),
4594 * 4) these pending requests experience a high
4595 * latency because the application is not
4596 * weight-raised while they are pending.
4597 */
4598 if (bfqq->wr_cur_max_time !=
4599 bfqd->bfq_wr_rt_max_time) {
4600 bfqq->wr_start_at_switch_to_srt =
4601 bfqq->last_wr_start_finish;
4602
4603 bfqq->wr_cur_max_time =
4604 bfqd->bfq_wr_rt_max_time;
4605 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
4606 BFQ_SOFTRT_WEIGHT_FACTOR;
4607 }
4608 bfqq->last_wr_start_finish = jiffies;
4609 }
44e44a1b
PV
4610 }
4611}
4612
4613static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
4614 struct bfq_queue *bfqq)
4615{
4616 return bfqq->dispatched == 0 &&
4617 time_is_before_jiffies(
4618 bfqq->budget_timeout +
4619 bfqd->bfq_wr_min_idle_time);
aee69d78
PV
4620}
4621
4622static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
4623 struct bfq_queue *bfqq,
44e44a1b
PV
4624 int old_wr_coeff,
4625 struct request *rq,
4626 bool *interactive)
aee69d78 4627{
77b7dcea 4628 bool soft_rt, wr_or_deserves_wr, bfqq_wants_to_preempt,
44e44a1b 4629 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
aee69d78
PV
4630 /*
4631 * See the comments on
4632 * bfq_bfqq_update_budg_for_activation for
4633 * details on the usage of the next variable.
4634 */
4635 arrived_in_time = ktime_get_ns() <=
4636 bfqq->ttime.last_end_request +
4637 bfqd->bfq_slice_idle * 3;
4638
e21b7a0b
AA
4639 bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
4640
aee69d78 4641 /*
44e44a1b
PV
4642 * bfqq deserves to be weight-raised if:
4643 * - it is sync,
36eca894
AA
4644 * - it has been idle for enough time or is soft real-time,
4645 * - is linked to a bfq_io_cq (it is not shared in any sense).
44e44a1b 4646 */
77b7dcea
PV
4647 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
4648 time_is_before_jiffies(bfqq->soft_rt_next_start);
44e44a1b
PV
4649 *interactive = idle_for_long_time;
4650 wr_or_deserves_wr = bfqd->low_latency &&
4651 (bfqq->wr_coeff > 1 ||
36eca894
AA
4652 (bfq_bfqq_sync(bfqq) &&
4653 bfqq->bic && (*interactive || soft_rt)));
44e44a1b
PV
4654
4655 /*
4656 * Using the last flag, update budget and check whether bfqq
4657 * may want to preempt the in-service queue.
aee69d78
PV
4658 */
4659 bfqq_wants_to_preempt =
4660 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
44e44a1b
PV
4661 arrived_in_time,
4662 wr_or_deserves_wr);
aee69d78
PV
4663
4664 if (!bfq_bfqq_IO_bound(bfqq)) {
4665 if (arrived_in_time) {
4666 bfqq->requests_within_timer++;
4667 if (bfqq->requests_within_timer >=
4668 bfqd->bfq_requests_within_timer)
4669 bfq_mark_bfqq_IO_bound(bfqq);
4670 } else
4671 bfqq->requests_within_timer = 0;
4672 }
4673
44e44a1b 4674 if (bfqd->low_latency) {
36eca894
AA
4675 if (unlikely(time_is_after_jiffies(bfqq->split_time)))
4676 /* wraparound */
4677 bfqq->split_time =
4678 jiffies - bfqd->bfq_wr_min_idle_time - 1;
4679
4680 if (time_is_before_jiffies(bfqq->split_time +
4681 bfqd->bfq_wr_min_idle_time)) {
4682 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
4683 old_wr_coeff,
4684 wr_or_deserves_wr,
4685 *interactive,
4686 soft_rt);
4687
4688 if (old_wr_coeff != bfqq->wr_coeff)
4689 bfqq->entity.prio_changed = 1;
4690 }
44e44a1b
PV
4691 }
4692
77b7dcea
PV
4693 bfqq->last_idle_bklogged = jiffies;
4694 bfqq->service_from_backlogged = 0;
4695 bfq_clear_bfqq_softrt_update(bfqq);
4696
aee69d78
PV
4697 bfq_add_bfqq_busy(bfqd, bfqq);
4698
4699 /*
4700 * Expire in-service queue only if preemption may be needed
4701 * for guarantees. In this respect, the function
4702 * next_queue_may_preempt just checks a simple, necessary
4703 * condition, and not a sufficient condition based on
4704 * timestamps. In fact, for the latter condition to be
4705 * evaluated, timestamps would need first to be updated, and
4706 * this operation is quite costly (see the comments on the
4707 * function bfq_bfqq_update_budg_for_activation).
4708 */
4709 if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
77b7dcea 4710 bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
aee69d78
PV
4711 next_queue_may_preempt(bfqd))
4712 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
4713 false, BFQQE_PREEMPTED);
4714}
4715
4716static void bfq_add_request(struct request *rq)
4717{
4718 struct bfq_queue *bfqq = RQ_BFQQ(rq);
4719 struct bfq_data *bfqd = bfqq->bfqd;
4720 struct request *next_rq, *prev;
44e44a1b
PV
4721 unsigned int old_wr_coeff = bfqq->wr_coeff;
4722 bool interactive = false;
aee69d78
PV
4723
4724 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
4725 bfqq->queued[rq_is_sync(rq)]++;
4726 bfqd->queued++;
4727
4728 elv_rb_add(&bfqq->sort_list, rq);
4729
4730 /*
4731 * Check if this request is a better next-serve candidate.
4732 */
4733 prev = bfqq->next_rq;
4734 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
4735 bfqq->next_rq = next_rq;
4736
36eca894
AA
4737 /*
4738 * Adjust priority tree position, if next_rq changes.
4739 */
4740 if (prev != bfqq->next_rq)
4741 bfq_pos_tree_add_move(bfqd, bfqq);
4742
aee69d78 4743 if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
44e44a1b
PV
4744 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
4745 rq, &interactive);
4746 else {
4747 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
4748 time_is_before_jiffies(
4749 bfqq->last_wr_start_finish +
4750 bfqd->bfq_wr_min_inter_arr_async)) {
4751 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
4752 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
4753
cfd69712 4754 bfqd->wr_busy_queues++;
44e44a1b
PV
4755 bfqq->entity.prio_changed = 1;
4756 }
4757 if (prev != bfqq->next_rq)
4758 bfq_updated_next_req(bfqd, bfqq);
4759 }
4760
4761 /*
4762 * Assign jiffies to last_wr_start_finish in the following
4763 * cases:
4764 *
4765 * . if bfqq is not going to be weight-raised, because, for
4766 * non weight-raised queues, last_wr_start_finish stores the
4767 * arrival time of the last request; as of now, this piece
4768 * of information is used only for deciding whether to
4769 * weight-raise async queues
4770 *
4771 * . if bfqq is not weight-raised, because, if bfqq is now
4772 * switching to weight-raised, then last_wr_start_finish
4773 * stores the time when weight-raising starts
4774 *
4775 * . if bfqq is interactive, because, regardless of whether
4776 * bfqq is currently weight-raised, the weight-raising
4777 * period must start or restart (this case is considered
4778 * separately because it is not detected by the above
4779 * conditions, if bfqq is already weight-raised)
77b7dcea
PV
4780 *
4781 * last_wr_start_finish has to be updated also if bfqq is soft
4782 * real-time, because the weight-raising period is constantly
4783 * restarted on idle-to-busy transitions for these queues, but
4784 * this is already done in bfq_bfqq_handle_idle_busy_switch if
4785 * needed.
44e44a1b
PV
4786 */
4787 if (bfqd->low_latency &&
4788 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
4789 bfqq->last_wr_start_finish = jiffies;
aee69d78
PV
4790}
4791
4792static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
4793 struct bio *bio,
4794 struct request_queue *q)
4795{
4796 struct bfq_queue *bfqq = bfqd->bio_bfqq;
4797
4798
4799 if (bfqq)
4800 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
4801
4802 return NULL;
4803}
4804
ab0e43e9
PV
4805static sector_t get_sdist(sector_t last_pos, struct request *rq)
4806{
4807 if (last_pos)
4808 return abs(blk_rq_pos(rq) - last_pos);
4809
4810 return 0;
4811}
4812
aee69d78
PV
4813#if 0 /* Still not clear if we can do without next two functions */
4814static void bfq_activate_request(struct request_queue *q, struct request *rq)
4815{
4816 struct bfq_data *bfqd = q->elevator->elevator_data;
4817
4818 bfqd->rq_in_driver++;
aee69d78
PV
4819}
4820
4821static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
4822{
4823 struct bfq_data *bfqd = q->elevator->elevator_data;
4824
4825 bfqd->rq_in_driver--;
4826}
4827#endif
4828
4829static void bfq_remove_request(struct request_queue *q,
4830 struct request *rq)
4831{
4832 struct bfq_queue *bfqq = RQ_BFQQ(rq);
4833 struct bfq_data *bfqd = bfqq->bfqd;
4834 const int sync = rq_is_sync(rq);
4835
4836 if (bfqq->next_rq == rq) {
4837 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
4838 bfq_updated_next_req(bfqd, bfqq);
4839 }
4840
4841 if (rq->queuelist.prev != &rq->queuelist)
4842 list_del_init(&rq->queuelist);
4843 bfqq->queued[sync]--;
4844 bfqd->queued--;
4845 elv_rb_del(&bfqq->sort_list, rq);
4846
4847 elv_rqhash_del(q, rq);
4848 if (q->last_merge == rq)
4849 q->last_merge = NULL;
4850
4851 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
4852 bfqq->next_rq = NULL;
4853
4854 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
e21b7a0b 4855 bfq_del_bfqq_busy(bfqd, bfqq, false);
aee69d78
PV
4856 /*
4857 * bfqq emptied. In normal operation, when
4858 * bfqq is empty, bfqq->entity.service and
4859 * bfqq->entity.budget must contain,
4860 * respectively, the service received and the
4861 * budget used last time bfqq emptied. These
4862 * facts do not hold in this case, as at least
4863 * this last removal occurred while bfqq is
4864 * not in service. To avoid inconsistencies,
4865 * reset both bfqq->entity.service and
4866 * bfqq->entity.budget, if bfqq has still a
4867 * process that may issue I/O requests to it.
4868 */
4869 bfqq->entity.budget = bfqq->entity.service = 0;
4870 }
36eca894
AA
4871
4872 /*
4873 * Remove queue from request-position tree as it is empty.
4874 */
4875 if (bfqq->pos_root) {
4876 rb_erase(&bfqq->pos_node, bfqq->pos_root);
4877 bfqq->pos_root = NULL;
4878 }
aee69d78
PV
4879 }
4880
4881 if (rq->cmd_flags & REQ_META)
4882 bfqq->meta_pending--;
e21b7a0b
AA
4883
4884 bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
aee69d78
PV
4885}
4886
4887static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
4888{
4889 struct request_queue *q = hctx->queue;
4890 struct bfq_data *bfqd = q->elevator->elevator_data;
4891 struct request *free = NULL;
4892 /*
4893 * bfq_bic_lookup grabs the queue_lock: invoke it now and
4894 * store its return value for later use, to avoid nesting
4895 * queue_lock inside the bfqd->lock. We assume that the bic
4896 * returned by bfq_bic_lookup does not go away before
4897 * bfqd->lock is taken.
4898 */
4899 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
4900 bool ret;
4901
4902 spin_lock_irq(&bfqd->lock);
4903
4904 if (bic)
4905 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
4906 else
4907 bfqd->bio_bfqq = NULL;
4908 bfqd->bio_bic = bic;
4909
4910 ret = blk_mq_sched_try_merge(q, bio, &free);
4911
4912 if (free)
4913 blk_mq_free_request(free);
4914 spin_unlock_irq(&bfqd->lock);
4915
4916 return ret;
4917}
4918
4919static int bfq_request_merge(struct request_queue *q, struct request **req,
4920 struct bio *bio)
4921{
4922 struct bfq_data *bfqd = q->elevator->elevator_data;
4923 struct request *__rq;
4924
4925 __rq = bfq_find_rq_fmerge(bfqd, bio, q);
4926 if (__rq && elv_bio_merge_ok(__rq, bio)) {
4927 *req = __rq;
4928 return ELEVATOR_FRONT_MERGE;
4929 }
4930
4931 return ELEVATOR_NO_MERGE;
4932}
4933
4934static void bfq_request_merged(struct request_queue *q, struct request *req,
4935 enum elv_merge type)
4936{
4937 if (type == ELEVATOR_FRONT_MERGE &&
4938 rb_prev(&req->rb_node) &&
4939 blk_rq_pos(req) <
4940 blk_rq_pos(container_of(rb_prev(&req->rb_node),
4941 struct request, rb_node))) {
4942 struct bfq_queue *bfqq = RQ_BFQQ(req);
4943 struct bfq_data *bfqd = bfqq->bfqd;
4944 struct request *prev, *next_rq;
4945
4946 /* Reposition request in its sort_list */
4947 elv_rb_del(&bfqq->sort_list, req);
4948 elv_rb_add(&bfqq->sort_list, req);
4949
4950 /* Choose next request to be served for bfqq */
4951 prev = bfqq->next_rq;
4952 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
4953 bfqd->last_position);
4954 bfqq->next_rq = next_rq;
4955 /*
36eca894
AA
4956 * If next_rq changes, update both the queue's budget to
4957 * fit the new request and the queue's position in its
4958 * rq_pos_tree.
aee69d78 4959 */
36eca894 4960 if (prev != bfqq->next_rq) {
aee69d78 4961 bfq_updated_next_req(bfqd, bfqq);
36eca894
AA
4962 bfq_pos_tree_add_move(bfqd, bfqq);
4963 }
aee69d78
PV
4964 }
4965}
4966
4967static void bfq_requests_merged(struct request_queue *q, struct request *rq,
4968 struct request *next)
4969{
4970 struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
4971
4972 if (!RB_EMPTY_NODE(&rq->rb_node))
e21b7a0b 4973 goto end;
aee69d78
PV
4974 spin_lock_irq(&bfqq->bfqd->lock);
4975
4976 /*
4977 * If next and rq belong to the same bfq_queue and next is older
4978 * than rq, then reposition rq in the fifo (by substituting next
4979 * with rq). Otherwise, if next and rq belong to different
4980 * bfq_queues, never reposition rq: in fact, we would have to
4981 * reposition it with respect to next's position in its own fifo,
4982 * which would most certainly be too expensive with respect to
4983 * the benefits.
4984 */
4985 if (bfqq == next_bfqq &&
4986 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
4987 next->fifo_time < rq->fifo_time) {
4988 list_del_init(&rq->queuelist);
4989 list_replace_init(&next->queuelist, &rq->queuelist);
4990 rq->fifo_time = next->fifo_time;
4991 }
4992
4993 if (bfqq->next_rq == next)
4994 bfqq->next_rq = rq;
4995
4996 bfq_remove_request(q, next);
4997
4998 spin_unlock_irq(&bfqq->bfqd->lock);
e21b7a0b
AA
4999end:
5000 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
aee69d78
PV
5001}
5002
44e44a1b
PV
5003/* Must be called with bfqq != NULL */
5004static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
5005{
cfd69712
PV
5006 if (bfq_bfqq_busy(bfqq))
5007 bfqq->bfqd->wr_busy_queues--;
44e44a1b
PV
5008 bfqq->wr_coeff = 1;
5009 bfqq->wr_cur_max_time = 0;
77b7dcea 5010 bfqq->last_wr_start_finish = jiffies;
44e44a1b
PV
5011 /*
5012 * Trigger a weight change on the next invocation of
5013 * __bfq_entity_update_weight_prio.
5014 */
5015 bfqq->entity.prio_changed = 1;
5016}
5017
5018static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
5019 struct bfq_group *bfqg)
5020{
5021 int i, j;
5022
5023 for (i = 0; i < 2; i++)
5024 for (j = 0; j < IOPRIO_BE_NR; j++)
5025 if (bfqg->async_bfqq[i][j])
5026 bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
5027 if (bfqg->async_idle_bfqq)
5028 bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
5029}
5030
5031static void bfq_end_wr(struct bfq_data *bfqd)
5032{
5033 struct bfq_queue *bfqq;
5034
5035 spin_lock_irq(&bfqd->lock);
5036
5037 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
5038 bfq_bfqq_end_wr(bfqq);
5039 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
5040 bfq_bfqq_end_wr(bfqq);
5041 bfq_end_wr_async(bfqd);
5042
5043 spin_unlock_irq(&bfqd->lock);
5044}
5045
36eca894
AA
5046static sector_t bfq_io_struct_pos(void *io_struct, bool request)
5047{
5048 if (request)
5049 return blk_rq_pos(io_struct);
5050 else
5051 return ((struct bio *)io_struct)->bi_iter.bi_sector;
5052}
5053
5054static int bfq_rq_close_to_sector(void *io_struct, bool request,
5055 sector_t sector)
5056{
5057 return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
5058 BFQQ_CLOSE_THR;
5059}
5060
5061static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
5062 struct bfq_queue *bfqq,
5063 sector_t sector)
5064{
5065 struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
5066 struct rb_node *parent, *node;
5067 struct bfq_queue *__bfqq;
5068
5069 if (RB_EMPTY_ROOT(root))
5070 return NULL;
5071
5072 /*
5073 * First, if we find a request starting at the end of the last
5074 * request, choose it.
5075 */
5076 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
5077 if (__bfqq)
5078 return __bfqq;
5079
5080 /*
5081 * If the exact sector wasn't found, the parent of the NULL leaf
5082 * will contain the closest sector (rq_pos_tree sorted by
5083 * next_request position).
5084 */
5085 __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
5086 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
5087 return __bfqq;
5088
5089 if (blk_rq_pos(__bfqq->next_rq) < sector)
5090 node = rb_next(&__bfqq->pos_node);
5091 else
5092 node = rb_prev(&__bfqq->pos_node);
5093 if (!node)
5094 return NULL;
5095
5096 __bfqq = rb_entry(node, struct bfq_queue, pos_node);
5097 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
5098 return __bfqq;
5099
5100 return NULL;
5101}
5102
5103static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
5104 struct bfq_queue *cur_bfqq,
5105 sector_t sector)
5106{
5107 struct bfq_queue *bfqq;
5108
5109 /*
5110 * We shall notice if some of the queues are cooperating,
5111 * e.g., working closely on the same area of the device. In
5112 * that case, we can group them together and: 1) don't waste
5113 * time idling, and 2) serve the union of their requests in
5114 * the best possible order for throughput.
5115 */
5116 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
5117 if (!bfqq || bfqq == cur_bfqq)
5118 return NULL;
5119
5120 return bfqq;
5121}
5122
5123static struct bfq_queue *
5124bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
5125{
5126 int process_refs, new_process_refs;
5127 struct bfq_queue *__bfqq;
5128
5129 /*
5130 * If there are no process references on the new_bfqq, then it is
5131 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
5132 * may have dropped their last reference (not just their last process
5133 * reference).
5134 */
5135 if (!bfqq_process_refs(new_bfqq))
5136 return NULL;
5137
5138 /* Avoid a circular list and skip interim queue merges. */
5139 while ((__bfqq = new_bfqq->new_bfqq)) {
5140 if (__bfqq == bfqq)
5141 return NULL;
5142 new_bfqq = __bfqq;
5143 }
5144
5145 process_refs = bfqq_process_refs(bfqq);
5146 new_process_refs = bfqq_process_refs(new_bfqq);
5147 /*
5148 * If the process for the bfqq has gone away, there is no
5149 * sense in merging the queues.
5150 */
5151 if (process_refs == 0 || new_process_refs == 0)
5152 return NULL;
5153
5154 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
5155 new_bfqq->pid);
5156
5157 /*
5158 * Merging is just a redirection: the requests of the process
5159 * owning one of the two queues are redirected to the other queue.
5160 * The latter queue, in its turn, is set as shared if this is the
5161 * first time that the requests of some process are redirected to
5162 * it.
5163 *
5164 * We redirect bfqq to new_bfqq and not the opposite, because we
5165 * are in the context of the process owning bfqq, hence we have
5166 * the io_cq of this process. So we can immediately configure this
5167 * io_cq to redirect the requests of the process to new_bfqq.
5168 *
5169 * NOTE, even if new_bfqq coincides with the in-service queue, the
5170 * io_cq of new_bfqq is not available, because, if the in-service
5171 * queue is shared, bfqd->in_service_bic may not point to the
5172 * io_cq of the in-service queue.
5173 * Redirecting the requests of the process owning bfqq to the
5174 * currently in-service queue is in any case the best option, as
5175 * we feed the in-service queue with new requests close to the
5176 * last request served and, by doing so, hopefully increase the
5177 * throughput.
5178 */
5179 bfqq->new_bfqq = new_bfqq;
5180 new_bfqq->ref += process_refs;
5181 return new_bfqq;
5182}
5183
5184static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
5185 struct bfq_queue *new_bfqq)
5186{
5187 if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
5188 (bfqq->ioprio_class != new_bfqq->ioprio_class))
5189 return false;
5190
5191 /*
5192 * If either of the queues has already been detected as seeky,
5193 * then merging it with the other queue is unlikely to lead to
5194 * sequential I/O.
5195 */
5196 if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
5197 return false;
5198
5199 /*
5200 * Interleaved I/O is known to be done by (some) applications
5201 * only for reads, so it does not make sense to merge async
5202 * queues.
5203 */
5204 if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
5205 return false;
5206
5207 return true;
5208}
5209
5210/*
5211 * If this function returns true, then bfqq cannot be merged. The idea
5212 * is that true cooperation happens very early after processes start
5213 * to do I/O. Usually, late cooperations are just accidental false
5214 * positives. In case bfqq is weight-raised, such false positives
5215 * would evidently degrade latency guarantees for bfqq.
5216 */
5217static bool wr_from_too_long(struct bfq_queue *bfqq)
5218{
5219 return bfqq->wr_coeff > 1 &&
5220 time_is_before_jiffies(bfqq->last_wr_start_finish +
5221 msecs_to_jiffies(100));
5222}
5223
5224/*
5225 * Attempt to schedule a merge of bfqq with the currently in-service
5226 * queue or with a close queue among the scheduled queues. Return
5227 * NULL if no merge was scheduled, a pointer to the shared bfq_queue
5228 * structure otherwise.
5229 *
5230 * The OOM queue is not allowed to participate to cooperation: in fact, since
5231 * the requests temporarily redirected to the OOM queue could be redirected
5232 * again to dedicated queues at any time, the state needed to correctly
5233 * handle merging with the OOM queue would be quite complex and expensive
5234 * to maintain. Besides, in such a critical condition as an out of memory,
5235 * the benefits of queue merging may be little relevant, or even negligible.
5236 *
5237 * Weight-raised queues can be merged only if their weight-raising
5238 * period has just started. In fact cooperating processes are usually
5239 * started together. Thus, with this filter we avoid false positives
5240 * that would jeopardize low-latency guarantees.
5241 *
5242 * WARNING: queue merging may impair fairness among non-weight raised
5243 * queues, for at least two reasons: 1) the original weight of a
5244 * merged queue may change during the merged state, 2) even being the
5245 * weight the same, a merged queue may be bloated with many more
5246 * requests than the ones produced by its originally-associated
5247 * process.
5248 */
5249static struct bfq_queue *
5250bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5251 void *io_struct, bool request)
5252{
5253 struct bfq_queue *in_service_bfqq, *new_bfqq;
5254
5255 if (bfqq->new_bfqq)
5256 return bfqq->new_bfqq;
5257
5258 if (!io_struct ||
5259 wr_from_too_long(bfqq) ||
5260 unlikely(bfqq == &bfqd->oom_bfqq))
5261 return NULL;
5262
5263 /* If there is only one backlogged queue, don't search. */
5264 if (bfqd->busy_queues == 1)
5265 return NULL;
5266
5267 in_service_bfqq = bfqd->in_service_queue;
5268
5269 if (!in_service_bfqq || in_service_bfqq == bfqq ||
5270 !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
5271 unlikely(in_service_bfqq == &bfqd->oom_bfqq))
5272 goto check_scheduled;
5273
5274 if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
5275 bfqq->entity.parent == in_service_bfqq->entity.parent &&
5276 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
5277 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
5278 if (new_bfqq)
5279 return new_bfqq;
5280 }
5281 /*
5282 * Check whether there is a cooperator among currently scheduled
5283 * queues. The only thing we need is that the bio/request is not
5284 * NULL, as we need it to establish whether a cooperator exists.
5285 */
5286check_scheduled:
5287 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
5288 bfq_io_struct_pos(io_struct, request));
5289
5290 if (new_bfqq && !wr_from_too_long(new_bfqq) &&
5291 likely(new_bfqq != &bfqd->oom_bfqq) &&
5292 bfq_may_be_close_cooperator(bfqq, new_bfqq))
5293 return bfq_setup_merge(bfqq, new_bfqq);
5294
5295 return NULL;
5296}
5297
5298static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
5299{
5300 struct bfq_io_cq *bic = bfqq->bic;
5301
5302 /*
5303 * If !bfqq->bic, the queue is already shared or its requests
5304 * have already been redirected to a shared queue; both idle window
5305 * and weight raising state have already been saved. Do nothing.
5306 */
5307 if (!bic)
5308 return;
5309
5310 bic->saved_ttime = bfqq->ttime;
5311 bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
5312 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
5313 bic->saved_wr_coeff = bfqq->wr_coeff;
5314 bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
5315 bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
5316 bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
5317}
5318
5319static void bfq_get_bic_reference(struct bfq_queue *bfqq)
5320{
5321 /*
5322 * If bfqq->bic has a non-NULL value, the bic to which it belongs
5323 * is about to begin using a shared bfq_queue.
5324 */
5325 if (bfqq->bic)
5326 atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
5327}
5328
5329static void
5330bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
5331 struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
5332{
5333 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
5334 (unsigned long)new_bfqq->pid);
5335 /* Save weight raising and idle window of the merged queues */
5336 bfq_bfqq_save_state(bfqq);
5337 bfq_bfqq_save_state(new_bfqq);
5338 if (bfq_bfqq_IO_bound(bfqq))
5339 bfq_mark_bfqq_IO_bound(new_bfqq);
5340 bfq_clear_bfqq_IO_bound(bfqq);
5341
5342 /*
5343 * If bfqq is weight-raised, then let new_bfqq inherit
5344 * weight-raising. To reduce false positives, neglect the case
5345 * where bfqq has just been created, but has not yet made it
5346 * to be weight-raised (which may happen because EQM may merge
5347 * bfqq even before bfq_add_request is executed for the first
5348 * time for bfqq).
5349 */
5350 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
5351 new_bfqq->wr_coeff = bfqq->wr_coeff;
5352 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
5353 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
5354 new_bfqq->wr_start_at_switch_to_srt =
5355 bfqq->wr_start_at_switch_to_srt;
5356 if (bfq_bfqq_busy(new_bfqq))
5357 bfqd->wr_busy_queues++;
5358 new_bfqq->entity.prio_changed = 1;
5359 }
5360
5361 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
5362 bfqq->wr_coeff = 1;
5363 bfqq->entity.prio_changed = 1;
5364 if (bfq_bfqq_busy(bfqq))
5365 bfqd->wr_busy_queues--;
5366 }
5367
5368 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
5369 bfqd->wr_busy_queues);
5370
5371 /*
5372 * Grab a reference to the bic, to prevent it from being destroyed
5373 * before being possibly touched by a bfq_split_bfqq().
5374 */
5375 bfq_get_bic_reference(bfqq);
5376 bfq_get_bic_reference(new_bfqq);
5377 /*
5378 * Merge queues (that is, let bic redirect its requests to new_bfqq)
5379 */
5380 bic_set_bfqq(bic, new_bfqq, 1);
5381 bfq_mark_bfqq_coop(new_bfqq);
5382 /*
5383 * new_bfqq now belongs to at least two bics (it is a shared queue):
5384 * set new_bfqq->bic to NULL. bfqq either:
5385 * - does not belong to any bic any more, and hence bfqq->bic must
5386 * be set to NULL, or
5387 * - is a queue whose owning bics have already been redirected to a
5388 * different queue, hence the queue is destined to not belong to
5389 * any bic soon and bfqq->bic is already NULL (therefore the next
5390 * assignment causes no harm).
5391 */
5392 new_bfqq->bic = NULL;
5393 bfqq->bic = NULL;
5394 /* release process reference to bfqq */
5395 bfq_put_queue(bfqq);
5396}
5397
aee69d78
PV
5398static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
5399 struct bio *bio)
5400{
5401 struct bfq_data *bfqd = q->elevator->elevator_data;
5402 bool is_sync = op_is_sync(bio->bi_opf);
36eca894 5403 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
aee69d78
PV
5404
5405 /*
5406 * Disallow merge of a sync bio into an async request.
5407 */
5408 if (is_sync && !rq_is_sync(rq))
5409 return false;
5410
5411 /*
5412 * Lookup the bfqq that this bio will be queued with. Allow
5413 * merge only if rq is queued there.
5414 */
5415 if (!bfqq)
5416 return false;
5417
36eca894
AA
5418 /*
5419 * We take advantage of this function to perform an early merge
5420 * of the queues of possible cooperating processes.
5421 */
5422 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
5423 if (new_bfqq) {
5424 /*
5425 * bic still points to bfqq, then it has not yet been
5426 * redirected to some other bfq_queue, and a queue
5427 * merge beween bfqq and new_bfqq can be safely
5428 * fulfillled, i.e., bic can be redirected to new_bfqq
5429 * and bfqq can be put.
5430 */
5431 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
5432 new_bfqq);
5433 /*
5434 * If we get here, bio will be queued into new_queue,
5435 * so use new_bfqq to decide whether bio and rq can be
5436 * merged.
5437 */
5438 bfqq = new_bfqq;
5439
5440 /*
5441 * Change also bqfd->bio_bfqq, as
5442 * bfqd->bio_bic now points to new_bfqq, and
5443 * this function may be invoked again (and then may
5444 * use again bqfd->bio_bfqq).
5445 */
5446 bfqd->bio_bfqq = bfqq;
5447 }
5448
aee69d78
PV
5449 return bfqq == RQ_BFQQ(rq);
5450}
5451
44e44a1b
PV
5452/*
5453 * Set the maximum time for the in-service queue to consume its
5454 * budget. This prevents seeky processes from lowering the throughput.
5455 * In practice, a time-slice service scheme is used with seeky
5456 * processes.
5457 */
5458static void bfq_set_budget_timeout(struct bfq_data *bfqd,
5459 struct bfq_queue *bfqq)
5460{
77b7dcea
PV
5461 unsigned int timeout_coeff;
5462
5463 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
5464 timeout_coeff = 1;
5465 else
5466 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
5467
44e44a1b
PV
5468 bfqd->last_budget_start = ktime_get();
5469
5470 bfqq->budget_timeout = jiffies +
77b7dcea 5471 bfqd->bfq_timeout * timeout_coeff;
44e44a1b
PV
5472}
5473
aee69d78
PV
5474static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
5475 struct bfq_queue *bfqq)
5476{
5477 if (bfqq) {
e21b7a0b 5478 bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
aee69d78
PV
5479 bfq_clear_bfqq_fifo_expire(bfqq);
5480
5481 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
5482
77b7dcea
PV
5483 if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
5484 bfqq->wr_coeff > 1 &&
5485 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
5486 time_is_before_jiffies(bfqq->budget_timeout)) {
5487 /*
5488 * For soft real-time queues, move the start
5489 * of the weight-raising period forward by the
5490 * time the queue has not received any
5491 * service. Otherwise, a relatively long
5492 * service delay is likely to cause the
5493 * weight-raising period of the queue to end,
5494 * because of the short duration of the
5495 * weight-raising period of a soft real-time
5496 * queue. It is worth noting that this move
5497 * is not so dangerous for the other queues,
5498 * because soft real-time queues are not
5499 * greedy.
5500 *
5501 * To not add a further variable, we use the
5502 * overloaded field budget_timeout to
5503 * determine for how long the queue has not
5504 * received service, i.e., how much time has
5505 * elapsed since the queue expired. However,
5506 * this is a little imprecise, because
5507 * budget_timeout is set to jiffies if bfqq
5508 * not only expires, but also remains with no
5509 * request.
5510 */
5511 if (time_after(bfqq->budget_timeout,
5512 bfqq->last_wr_start_finish))
5513 bfqq->last_wr_start_finish +=
5514 jiffies - bfqq->budget_timeout;
5515 else
5516 bfqq->last_wr_start_finish = jiffies;
5517 }
5518
44e44a1b 5519 bfq_set_budget_timeout(bfqd, bfqq);
aee69d78
PV
5520 bfq_log_bfqq(bfqd, bfqq,
5521 "set_in_service_queue, cur-budget = %d",
5522 bfqq->entity.budget);
5523 }
5524
5525 bfqd->in_service_queue = bfqq;
5526}
5527
5528/*
5529 * Get and set a new queue for service.
5530 */
5531static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
5532{
5533 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
5534
5535 __bfq_set_in_service_queue(bfqd, bfqq);
5536 return bfqq;
5537}
5538
aee69d78
PV
5539static void bfq_arm_slice_timer(struct bfq_data *bfqd)
5540{
5541 struct bfq_queue *bfqq = bfqd->in_service_queue;
5542 struct bfq_io_cq *bic;
5543 u32 sl;
5544
5545 /* Processes have exited, don't wait. */
5546 bic = bfqd->in_service_bic;
5547 if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
5548 return;
5549
5550 bfq_mark_bfqq_wait_request(bfqq);
5551
5552 /*
5553 * We don't want to idle for seeks, but we do want to allow
5554 * fair distribution of slice time for a process doing back-to-back
5555 * seeks. So allow a little bit of time for him to submit a new rq.
5556 */
5557 sl = bfqd->bfq_slice_idle;
5558 /*
1de0c4cd
AA
5559 * Unless the queue is being weight-raised or the scenario is
5560 * asymmetric, grant only minimum idle time if the queue
5561 * is seeky. A long idling is preserved for a weight-raised
5562 * queue, or, more in general, in an asymmetric scenario,
5563 * because a long idling is needed for guaranteeing to a queue
5564 * its reserved share of the throughput (in particular, it is
5565 * needed if the queue has a higher weight than some other
5566 * queue).
aee69d78 5567 */
1de0c4cd
AA
5568 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
5569 bfq_symmetric_scenario(bfqd))
aee69d78
PV
5570 sl = min_t(u64, sl, BFQ_MIN_TT);
5571
5572 bfqd->last_idling_start = ktime_get();
5573 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
5574 HRTIMER_MODE_REL);
e21b7a0b 5575 bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
aee69d78
PV
5576}
5577
ab0e43e9
PV
5578/*
5579 * In autotuning mode, max_budget is dynamically recomputed as the
5580 * amount of sectors transferred in timeout at the estimated peak
5581 * rate. This enables BFQ to utilize a full timeslice with a full
5582 * budget, even if the in-service queue is served at peak rate. And
5583 * this maximises throughput with sequential workloads.
5584 */
5585static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
5586{
5587 return (u64)bfqd->peak_rate * USEC_PER_MSEC *
5588 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
5589}
5590
44e44a1b
PV
5591/*
5592 * Update parameters related to throughput and responsiveness, as a
5593 * function of the estimated peak rate. See comments on
5594 * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
5595 */
5596static void update_thr_responsiveness_params(struct bfq_data *bfqd)
5597{
5598 int dev_type = blk_queue_nonrot(bfqd->queue);
5599
5600 if (bfqd->bfq_user_max_budget == 0)
5601 bfqd->bfq_max_budget =
5602 bfq_calc_max_budget(bfqd);
5603
5604 if (bfqd->device_speed == BFQ_BFQD_FAST &&
5605 bfqd->peak_rate < device_speed_thresh[dev_type]) {
5606 bfqd->device_speed = BFQ_BFQD_SLOW;
5607 bfqd->RT_prod = R_slow[dev_type] *
5608 T_slow[dev_type];
5609 } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
5610 bfqd->peak_rate > device_speed_thresh[dev_type]) {
5611 bfqd->device_speed = BFQ_BFQD_FAST;
5612 bfqd->RT_prod = R_fast[dev_type] *
5613 T_fast[dev_type];
5614 }
5615
5616 bfq_log(bfqd,
5617"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
5618 dev_type == 0 ? "ROT" : "NONROT",
5619 bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
5620 bfqd->device_speed == BFQ_BFQD_FAST ?
5621 (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
5622 (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
5623 (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
5624 BFQ_RATE_SHIFT);
5625}
5626
ab0e43e9
PV
5627static void bfq_reset_rate_computation(struct bfq_data *bfqd,
5628 struct request *rq)
5629{
5630 if (rq != NULL) { /* new rq dispatch now, reset accordingly */
5631 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
5632 bfqd->peak_rate_samples = 1;
5633 bfqd->sequential_samples = 0;
5634 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
5635 blk_rq_sectors(rq);
5636 } else /* no new rq dispatched, just reset the number of samples */
5637 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
5638
5639 bfq_log(bfqd,
5640 "reset_rate_computation at end, sample %u/%u tot_sects %llu",
5641 bfqd->peak_rate_samples, bfqd->sequential_samples,
5642 bfqd->tot_sectors_dispatched);
5643}
5644
5645static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
5646{
5647 u32 rate, weight, divisor;
5648
5649 /*
5650 * For the convergence property to hold (see comments on
5651 * bfq_update_peak_rate()) and for the assessment to be
5652 * reliable, a minimum number of samples must be present, and
5653 * a minimum amount of time must have elapsed. If not so, do
5654 * not compute new rate. Just reset parameters, to get ready
5655 * for a new evaluation attempt.
5656 */
5657 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
5658 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
5659 goto reset_computation;
5660
5661 /*
5662 * If a new request completion has occurred after last
5663 * dispatch, then, to approximate the rate at which requests
5664 * have been served by the device, it is more precise to
5665 * extend the observation interval to the last completion.
5666 */
5667 bfqd->delta_from_first =
5668 max_t(u64, bfqd->delta_from_first,
5669 bfqd->last_completion - bfqd->first_dispatch);
5670
5671 /*
5672 * Rate computed in sects/usec, and not sects/nsec, for
5673 * precision issues.
5674 */
5675 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
5676 div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
5677
5678 /*
5679 * Peak rate not updated if:
5680 * - the percentage of sequential dispatches is below 3/4 of the
5681 * total, and rate is below the current estimated peak rate
5682 * - rate is unreasonably high (> 20M sectors/sec)
5683 */
5684 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
5685 rate <= bfqd->peak_rate) ||
5686 rate > 20<<BFQ_RATE_SHIFT)
5687 goto reset_computation;
5688
5689 /*
5690 * We have to update the peak rate, at last! To this purpose,
5691 * we use a low-pass filter. We compute the smoothing constant
5692 * of the filter as a function of the 'weight' of the new
5693 * measured rate.
5694 *
5695 * As can be seen in next formulas, we define this weight as a
5696 * quantity proportional to how sequential the workload is,
5697 * and to how long the observation time interval is.
5698 *
5699 * The weight runs from 0 to 8. The maximum value of the
5700 * weight, 8, yields the minimum value for the smoothing
5701 * constant. At this minimum value for the smoothing constant,
5702 * the measured rate contributes for half of the next value of
5703 * the estimated peak rate.
5704 *
5705 * So, the first step is to compute the weight as a function
5706 * of how sequential the workload is. Note that the weight
5707 * cannot reach 9, because bfqd->sequential_samples cannot
5708 * become equal to bfqd->peak_rate_samples, which, in its
5709 * turn, holds true because bfqd->sequential_samples is not
5710 * incremented for the first sample.
5711 */
5712 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
5713
5714 /*
5715 * Second step: further refine the weight as a function of the
5716 * duration of the observation interval.
5717 */
5718 weight = min_t(u32, 8,
5719 div_u64(weight * bfqd->delta_from_first,
5720 BFQ_RATE_REF_INTERVAL));
5721
5722 /*
5723 * Divisor ranging from 10, for minimum weight, to 2, for
5724 * maximum weight.
5725 */
5726 divisor = 10 - weight;
5727
5728 /*
5729 * Finally, update peak rate:
5730 *
5731 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
5732 */
5733 bfqd->peak_rate *= divisor-1;
5734 bfqd->peak_rate /= divisor;
5735 rate /= divisor; /* smoothing constant alpha = 1/divisor */
5736
5737 bfqd->peak_rate += rate;
44e44a1b 5738 update_thr_responsiveness_params(bfqd);
ab0e43e9
PV
5739
5740reset_computation:
5741 bfq_reset_rate_computation(bfqd, rq);
5742}
5743
5744/*
5745 * Update the read/write peak rate (the main quantity used for
5746 * auto-tuning, see update_thr_responsiveness_params()).
5747 *
5748 * It is not trivial to estimate the peak rate (correctly): because of
5749 * the presence of sw and hw queues between the scheduler and the
5750 * device components that finally serve I/O requests, it is hard to
5751 * say exactly when a given dispatched request is served inside the
5752 * device, and for how long. As a consequence, it is hard to know
5753 * precisely at what rate a given set of requests is actually served
5754 * by the device.
5755 *
5756 * On the opposite end, the dispatch time of any request is trivially
5757 * available, and, from this piece of information, the "dispatch rate"
5758 * of requests can be immediately computed. So, the idea in the next
5759 * function is to use what is known, namely request dispatch times
5760 * (plus, when useful, request completion times), to estimate what is
5761 * unknown, namely in-device request service rate.
5762 *
5763 * The main issue is that, because of the above facts, the rate at
5764 * which a certain set of requests is dispatched over a certain time
5765 * interval can vary greatly with respect to the rate at which the
5766 * same requests are then served. But, since the size of any
5767 * intermediate queue is limited, and the service scheme is lossless
5768 * (no request is silently dropped), the following obvious convergence
5769 * property holds: the number of requests dispatched MUST become
5770 * closer and closer to the number of requests completed as the
5771 * observation interval grows. This is the key property used in
5772 * the next function to estimate the peak service rate as a function
5773 * of the observed dispatch rate. The function assumes to be invoked
5774 * on every request dispatch.
5775 */
5776static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
5777{
5778 u64 now_ns = ktime_get_ns();
5779
5780 if (bfqd->peak_rate_samples == 0) { /* first dispatch */
5781 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
5782 bfqd->peak_rate_samples);
5783 bfq_reset_rate_computation(bfqd, rq);
5784 goto update_last_values; /* will add one sample */
5785 }
5786
5787 /*
5788 * Device idle for very long: the observation interval lasting
5789 * up to this dispatch cannot be a valid observation interval
5790 * for computing a new peak rate (similarly to the late-
5791 * completion event in bfq_completed_request()). Go to
5792 * update_rate_and_reset to have the following three steps
5793 * taken:
5794 * - close the observation interval at the last (previous)
5795 * request dispatch or completion
5796 * - compute rate, if possible, for that observation interval
5797 * - start a new observation interval with this dispatch
5798 */
5799 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
5800 bfqd->rq_in_driver == 0)
5801 goto update_rate_and_reset;
5802
5803 /* Update sampling information */
5804 bfqd->peak_rate_samples++;
5805
5806 if ((bfqd->rq_in_driver > 0 ||
5807 now_ns - bfqd->last_completion < BFQ_MIN_TT)
5808 && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
5809 bfqd->sequential_samples++;
5810
5811 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
5812
5813 /* Reset max observed rq size every 32 dispatches */
5814 if (likely(bfqd->peak_rate_samples % 32))
5815 bfqd->last_rq_max_size =
5816 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
5817 else
5818 bfqd->last_rq_max_size = blk_rq_sectors(rq);
5819
5820 bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
5821
5822 /* Target observation interval not yet reached, go on sampling */
5823 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
5824 goto update_last_values;
5825
5826update_rate_and_reset:
5827 bfq_update_rate_reset(bfqd, rq);
5828update_last_values:
5829 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
5830 bfqd->last_dispatch = now_ns;
5831}
5832
aee69d78
PV
5833/*
5834 * Remove request from internal lists.
5835 */
5836static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
5837{
5838 struct bfq_queue *bfqq = RQ_BFQQ(rq);
5839
5840 /*
5841 * For consistency, the next instruction should have been
5842 * executed after removing the request from the queue and
5843 * dispatching it. We execute instead this instruction before
5844 * bfq_remove_request() (and hence introduce a temporary
5845 * inconsistency), for efficiency. In fact, should this
5846 * dispatch occur for a non in-service bfqq, this anticipated
5847 * increment prevents two counters related to bfqq->dispatched
5848 * from risking to be, first, uselessly decremented, and then
5849 * incremented again when the (new) value of bfqq->dispatched
5850 * happens to be taken into account.
5851 */
5852 bfqq->dispatched++;
ab0e43e9 5853 bfq_update_peak_rate(q->elevator->elevator_data, rq);
aee69d78
PV
5854
5855 bfq_remove_request(q, rq);
5856}
5857
5858static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
5859{
36eca894
AA
5860 /*
5861 * If this bfqq is shared between multiple processes, check
5862 * to make sure that those processes are still issuing I/Os
5863 * within the mean seek distance. If not, it may be time to
5864 * break the queues apart again.
5865 */
5866 if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
5867 bfq_mark_bfqq_split_coop(bfqq);
5868
44e44a1b
PV
5869 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
5870 if (bfqq->dispatched == 0)
5871 /*
5872 * Overloading budget_timeout field to store
5873 * the time at which the queue remains with no
5874 * backlog and no outstanding request; used by
5875 * the weight-raising mechanism.
5876 */
5877 bfqq->budget_timeout = jiffies;
5878
e21b7a0b 5879 bfq_del_bfqq_busy(bfqd, bfqq, true);
36eca894 5880 } else {
e21b7a0b 5881 bfq_requeue_bfqq(bfqd, bfqq);
36eca894
AA
5882 /*
5883 * Resort priority tree of potential close cooperators.
5884 */
5885 bfq_pos_tree_add_move(bfqd, bfqq);
5886 }
e21b7a0b
AA
5887
5888 /*
5889 * All in-service entities must have been properly deactivated
5890 * or requeued before executing the next function, which
5891 * resets all in-service entites as no more in service.
5892 */
5893 __bfq_bfqd_reset_in_service(bfqd);
aee69d78
PV
5894}
5895
5896/**
5897 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
5898 * @bfqd: device data.
5899 * @bfqq: queue to update.
5900 * @reason: reason for expiration.
5901 *
5902 * Handle the feedback on @bfqq budget at queue expiration.
5903 * See the body for detailed comments.
5904 */
5905static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
5906 struct bfq_queue *bfqq,
5907 enum bfqq_expiration reason)
5908{
5909 struct request *next_rq;
5910 int budget, min_budget;
5911
aee69d78
PV
5912 min_budget = bfq_min_budget(bfqd);
5913
44e44a1b
PV
5914 if (bfqq->wr_coeff == 1)
5915 budget = bfqq->max_budget;
5916 else /*
5917 * Use a constant, low budget for weight-raised queues,
5918 * to help achieve a low latency. Keep it slightly higher
5919 * than the minimum possible budget, to cause a little
5920 * bit fewer expirations.
5921 */
5922 budget = 2 * min_budget;
5923
aee69d78
PV
5924 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
5925 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
5926 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
5927 budget, bfq_min_budget(bfqd));
5928 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
5929 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
5930
44e44a1b 5931 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
aee69d78
PV
5932 switch (reason) {
5933 /*
5934 * Caveat: in all the following cases we trade latency
5935 * for throughput.
5936 */
5937 case BFQQE_TOO_IDLE:
54b60456
PV
5938 /*
5939 * This is the only case where we may reduce
5940 * the budget: if there is no request of the
5941 * process still waiting for completion, then
5942 * we assume (tentatively) that the timer has
5943 * expired because the batch of requests of
5944 * the process could have been served with a
5945 * smaller budget. Hence, betting that
5946 * process will behave in the same way when it
5947 * becomes backlogged again, we reduce its
5948 * next budget. As long as we guess right,
5949 * this budget cut reduces the latency
5950 * experienced by the process.
5951 *
5952 * However, if there are still outstanding
5953 * requests, then the process may have not yet
5954 * issued its next request just because it is
5955 * still waiting for the completion of some of
5956 * the still outstanding ones. So in this
5957 * subcase we do not reduce its budget, on the
5958 * contrary we increase it to possibly boost
5959 * the throughput, as discussed in the
5960 * comments to the BUDGET_TIMEOUT case.
5961 */
5962 if (bfqq->dispatched > 0) /* still outstanding reqs */
5963 budget = min(budget * 2, bfqd->bfq_max_budget);
5964 else {
5965 if (budget > 5 * min_budget)
5966 budget -= 4 * min_budget;
5967 else
5968 budget = min_budget;
5969 }
aee69d78
PV
5970 break;
5971 case BFQQE_BUDGET_TIMEOUT:
54b60456
PV
5972 /*
5973 * We double the budget here because it gives
5974 * the chance to boost the throughput if this
5975 * is not a seeky process (and has bumped into
5976 * this timeout because of, e.g., ZBR).
5977 */
5978 budget = min(budget * 2, bfqd->bfq_max_budget);
aee69d78
PV
5979 break;
5980 case BFQQE_BUDGET_EXHAUSTED:
5981 /*
5982 * The process still has backlog, and did not
5983 * let either the budget timeout or the disk
5984 * idling timeout expire. Hence it is not
5985 * seeky, has a short thinktime and may be
5986 * happy with a higher budget too. So
5987 * definitely increase the budget of this good
5988 * candidate to boost the disk throughput.
5989 */
54b60456 5990 budget = min(budget * 4, bfqd->bfq_max_budget);
aee69d78
PV
5991 break;
5992 case BFQQE_NO_MORE_REQUESTS:
5993 /*
5994 * For queues that expire for this reason, it
5995 * is particularly important to keep the
5996 * budget close to the actual service they
5997 * need. Doing so reduces the timestamp
5998 * misalignment problem described in the
5999 * comments in the body of
6000 * __bfq_activate_entity. In fact, suppose
6001 * that a queue systematically expires for
6002 * BFQQE_NO_MORE_REQUESTS and presents a
6003 * new request in time to enjoy timestamp
6004 * back-shifting. The larger the budget of the
6005 * queue is with respect to the service the
6006 * queue actually requests in each service
6007 * slot, the more times the queue can be
6008 * reactivated with the same virtual finish
6009 * time. It follows that, even if this finish
6010 * time is pushed to the system virtual time
6011 * to reduce the consequent timestamp
6012 * misalignment, the queue unjustly enjoys for
6013 * many re-activations a lower finish time
6014 * than all newly activated queues.
6015 *
6016 * The service needed by bfqq is measured
6017 * quite precisely by bfqq->entity.service.
6018 * Since bfqq does not enjoy device idling,
6019 * bfqq->entity.service is equal to the number
6020 * of sectors that the process associated with
6021 * bfqq requested to read/write before waiting
6022 * for request completions, or blocking for
6023 * other reasons.
6024 */
6025 budget = max_t(int, bfqq->entity.service, min_budget);
6026 break;
6027 default:
6028 return;
6029 }
44e44a1b 6030 } else if (!bfq_bfqq_sync(bfqq)) {
aee69d78
PV
6031 /*
6032 * Async queues get always the maximum possible
6033 * budget, as for them we do not care about latency
6034 * (in addition, their ability to dispatch is limited
6035 * by the charging factor).
6036 */
6037 budget = bfqd->bfq_max_budget;
6038 }
6039
6040 bfqq->max_budget = budget;
6041
6042 if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
6043 !bfqd->bfq_user_max_budget)
6044 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
6045
6046 /*
6047 * If there is still backlog, then assign a new budget, making
6048 * sure that it is large enough for the next request. Since
6049 * the finish time of bfqq must be kept in sync with the
6050 * budget, be sure to call __bfq_bfqq_expire() *after* this
6051 * update.
6052 *
6053 * If there is no backlog, then no need to update the budget;
6054 * it will be updated on the arrival of a new request.
6055 */
6056 next_rq = bfqq->next_rq;
6057 if (next_rq)
6058 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
6059 bfq_serv_to_charge(next_rq, bfqq));
6060
6061 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
6062 next_rq ? blk_rq_sectors(next_rq) : 0,
6063 bfqq->entity.budget);
6064}
6065
aee69d78 6066/*
ab0e43e9
PV
6067 * Return true if the process associated with bfqq is "slow". The slow
6068 * flag is used, in addition to the budget timeout, to reduce the
6069 * amount of service provided to seeky processes, and thus reduce
6070 * their chances to lower the throughput. More details in the comments
6071 * on the function bfq_bfqq_expire().
6072 *
6073 * An important observation is in order: as discussed in the comments
6074 * on the function bfq_update_peak_rate(), with devices with internal
6075 * queues, it is hard if ever possible to know when and for how long
6076 * an I/O request is processed by the device (apart from the trivial
6077 * I/O pattern where a new request is dispatched only after the
6078 * previous one has been completed). This makes it hard to evaluate
6079 * the real rate at which the I/O requests of each bfq_queue are
6080 * served. In fact, for an I/O scheduler like BFQ, serving a
6081 * bfq_queue means just dispatching its requests during its service
6082 * slot (i.e., until the budget of the queue is exhausted, or the
6083 * queue remains idle, or, finally, a timeout fires). But, during the
6084 * service slot of a bfq_queue, around 100 ms at most, the device may
6085 * be even still processing requests of bfq_queues served in previous
6086 * service slots. On the opposite end, the requests of the in-service
6087 * bfq_queue may be completed after the service slot of the queue
6088 * finishes.
6089 *
6090 * Anyway, unless more sophisticated solutions are used
6091 * (where possible), the sum of the sizes of the requests dispatched
6092 * during the service slot of a bfq_queue is probably the only
6093 * approximation available for the service received by the bfq_queue
6094 * during its service slot. And this sum is the quantity used in this
6095 * function to evaluate the I/O speed of a process.
aee69d78 6096 */
ab0e43e9
PV
6097static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
6098 bool compensate, enum bfqq_expiration reason,
6099 unsigned long *delta_ms)
aee69d78 6100{
ab0e43e9
PV
6101 ktime_t delta_ktime;
6102 u32 delta_usecs;
6103 bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
aee69d78 6104
ab0e43e9 6105 if (!bfq_bfqq_sync(bfqq))
aee69d78
PV
6106 return false;
6107
6108 if (compensate)
ab0e43e9 6109 delta_ktime = bfqd->last_idling_start;
aee69d78 6110 else
ab0e43e9
PV
6111 delta_ktime = ktime_get();
6112 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
6113 delta_usecs = ktime_to_us(delta_ktime);
aee69d78
PV
6114
6115 /* don't use too short time intervals */
ab0e43e9
PV
6116 if (delta_usecs < 1000) {
6117 if (blk_queue_nonrot(bfqd->queue))
6118 /*
6119 * give same worst-case guarantees as idling
6120 * for seeky
6121 */
6122 *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
6123 else /* charge at least one seek */
6124 *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
6125
6126 return slow;
6127 }
aee69d78 6128
ab0e43e9 6129 *delta_ms = delta_usecs / USEC_PER_MSEC;
aee69d78
PV
6130
6131 /*
ab0e43e9
PV
6132 * Use only long (> 20ms) intervals to filter out excessive
6133 * spikes in service rate estimation.
aee69d78 6134 */
ab0e43e9
PV
6135 if (delta_usecs > 20000) {
6136 /*
6137 * Caveat for rotational devices: processes doing I/O
6138 * in the slower disk zones tend to be slow(er) even
6139 * if not seeky. In this respect, the estimated peak
6140 * rate is likely to be an average over the disk
6141 * surface. Accordingly, to not be too harsh with
6142 * unlucky processes, a process is deemed slow only if
6143 * its rate has been lower than half of the estimated
6144 * peak rate.
6145 */
6146 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
aee69d78
PV
6147 }
6148
ab0e43e9 6149 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
aee69d78 6150
ab0e43e9 6151 return slow;
aee69d78
PV
6152}
6153
77b7dcea
PV
6154/*
6155 * To be deemed as soft real-time, an application must meet two
6156 * requirements. First, the application must not require an average
6157 * bandwidth higher than the approximate bandwidth required to playback or
6158 * record a compressed high-definition video.
6159 * The next function is invoked on the completion of the last request of a
6160 * batch, to compute the next-start time instant, soft_rt_next_start, such
6161 * that, if the next request of the application does not arrive before
6162 * soft_rt_next_start, then the above requirement on the bandwidth is met.
6163 *
6164 * The second requirement is that the request pattern of the application is
6165 * isochronous, i.e., that, after issuing a request or a batch of requests,
6166 * the application stops issuing new requests until all its pending requests
6167 * have been completed. After that, the application may issue a new batch,
6168 * and so on.
6169 * For this reason the next function is invoked to compute
6170 * soft_rt_next_start only for applications that meet this requirement,
6171 * whereas soft_rt_next_start is set to infinity for applications that do
6172 * not.
6173 *
6174 * Unfortunately, even a greedy application may happen to behave in an
6175 * isochronous way if the CPU load is high. In fact, the application may
6176 * stop issuing requests while the CPUs are busy serving other processes,
6177 * then restart, then stop again for a while, and so on. In addition, if
6178 * the disk achieves a low enough throughput with the request pattern
6179 * issued by the application (e.g., because the request pattern is random
6180 * and/or the device is slow), then the application may meet the above
6181 * bandwidth requirement too. To prevent such a greedy application to be
6182 * deemed as soft real-time, a further rule is used in the computation of
6183 * soft_rt_next_start: soft_rt_next_start must be higher than the current
6184 * time plus the maximum time for which the arrival of a request is waited
6185 * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
6186 * This filters out greedy applications, as the latter issue instead their
6187 * next request as soon as possible after the last one has been completed
6188 * (in contrast, when a batch of requests is completed, a soft real-time
6189 * application spends some time processing data).
6190 *
6191 * Unfortunately, the last filter may easily generate false positives if
6192 * only bfqd->bfq_slice_idle is used as a reference time interval and one
6193 * or both the following cases occur:
6194 * 1) HZ is so low that the duration of a jiffy is comparable to or higher
6195 * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
6196 * HZ=100.
6197 * 2) jiffies, instead of increasing at a constant rate, may stop increasing
6198 * for a while, then suddenly 'jump' by several units to recover the lost
6199 * increments. This seems to happen, e.g., inside virtual machines.
6200 * To address this issue, we do not use as a reference time interval just
6201 * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
6202 * particular we add the minimum number of jiffies for which the filter
6203 * seems to be quite precise also in embedded systems and KVM/QEMU virtual
6204 * machines.
6205 */
6206static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
6207 struct bfq_queue *bfqq)
6208{
6209 return max(bfqq->last_idle_bklogged +
6210 HZ * bfqq->service_from_backlogged /
6211 bfqd->bfq_wr_max_softrt_rate,
6212 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
6213}
6214
6215/*
6216 * Return the farthest future time instant according to jiffies
6217 * macros.
6218 */
6219static unsigned long bfq_greatest_from_now(void)
6220{
6221 return jiffies + MAX_JIFFY_OFFSET;
6222}
6223
aee69d78
PV
6224/*
6225 * Return the farthest past time instant according to jiffies
6226 * macros.
6227 */
6228static unsigned long bfq_smallest_from_now(void)
6229{
6230 return jiffies - MAX_JIFFY_OFFSET;
6231}
6232
6233/**
6234 * bfq_bfqq_expire - expire a queue.
6235 * @bfqd: device owning the queue.
6236 * @bfqq: the queue to expire.
6237 * @compensate: if true, compensate for the time spent idling.
6238 * @reason: the reason causing the expiration.
6239 *
c074170e
PV
6240 * If the process associated with bfqq does slow I/O (e.g., because it
6241 * issues random requests), we charge bfqq with the time it has been
6242 * in service instead of the service it has received (see
6243 * bfq_bfqq_charge_time for details on how this goal is achieved). As
6244 * a consequence, bfqq will typically get higher timestamps upon
6245 * reactivation, and hence it will be rescheduled as if it had
6246 * received more service than what it has actually received. In the
6247 * end, bfqq receives less service in proportion to how slowly its
6248 * associated process consumes its budgets (and hence how seriously it
6249 * tends to lower the throughput). In addition, this time-charging
6250 * strategy guarantees time fairness among slow processes. In
6251 * contrast, if the process associated with bfqq is not slow, we
6252 * charge bfqq exactly with the service it has received.
aee69d78 6253 *
c074170e
PV
6254 * Charging time to the first type of queues and the exact service to
6255 * the other has the effect of using the WF2Q+ policy to schedule the
6256 * former on a timeslice basis, without violating service domain
6257 * guarantees among the latter.
aee69d78
PV
6258 */
6259static void bfq_bfqq_expire(struct bfq_data *bfqd,
6260 struct bfq_queue *bfqq,
6261 bool compensate,
6262 enum bfqq_expiration reason)
6263{
6264 bool slow;
ab0e43e9
PV
6265 unsigned long delta = 0;
6266 struct bfq_entity *entity = &bfqq->entity;
aee69d78
PV
6267 int ref;
6268
6269 /*
ab0e43e9 6270 * Check whether the process is slow (see bfq_bfqq_is_slow).
aee69d78 6271 */
ab0e43e9 6272 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
aee69d78 6273
77b7dcea
PV
6274 /*
6275 * Increase service_from_backlogged before next statement,
6276 * because the possible next invocation of
6277 * bfq_bfqq_charge_time would likely inflate
6278 * entity->service. In contrast, service_from_backlogged must
6279 * contain real service, to enable the soft real-time
6280 * heuristic to correctly compute the bandwidth consumed by
6281 * bfqq.
6282 */
6283 bfqq->service_from_backlogged += entity->service;
6284
aee69d78 6285 /*
c074170e
PV
6286 * As above explained, charge slow (typically seeky) and
6287 * timed-out queues with the time and not the service
6288 * received, to favor sequential workloads.
6289 *
6290 * Processes doing I/O in the slower disk zones will tend to
6291 * be slow(er) even if not seeky. Therefore, since the
6292 * estimated peak rate is actually an average over the disk
6293 * surface, these processes may timeout just for bad luck. To
6294 * avoid punishing them, do not charge time to processes that
6295 * succeeded in consuming at least 2/3 of their budget. This
6296 * allows BFQ to preserve enough elasticity to still perform
6297 * bandwidth, and not time, distribution with little unlucky
6298 * or quasi-sequential processes.
aee69d78 6299 */
44e44a1b
PV
6300 if (bfqq->wr_coeff == 1 &&
6301 (slow ||
6302 (reason == BFQQE_BUDGET_TIMEOUT &&
6303 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
c074170e 6304 bfq_bfqq_charge_time(bfqd, bfqq, delta);
aee69d78
PV
6305
6306 if (reason == BFQQE_TOO_IDLE &&
ab0e43e9 6307 entity->service <= 2 * entity->budget / 10)
aee69d78
PV
6308 bfq_clear_bfqq_IO_bound(bfqq);
6309
44e44a1b
PV
6310 if (bfqd->low_latency && bfqq->wr_coeff == 1)
6311 bfqq->last_wr_start_finish = jiffies;
6312
77b7dcea
PV
6313 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
6314 RB_EMPTY_ROOT(&bfqq->sort_list)) {
6315 /*
6316 * If we get here, and there are no outstanding
6317 * requests, then the request pattern is isochronous
6318 * (see the comments on the function
6319 * bfq_bfqq_softrt_next_start()). Thus we can compute
6320 * soft_rt_next_start. If, instead, the queue still
6321 * has outstanding requests, then we have to wait for
6322 * the completion of all the outstanding requests to
6323 * discover whether the request pattern is actually
6324 * isochronous.
6325 */
6326 if (bfqq->dispatched == 0)
6327 bfqq->soft_rt_next_start =
6328 bfq_bfqq_softrt_next_start(bfqd, bfqq);
6329 else {
6330 /*
6331 * The application is still waiting for the
6332 * completion of one or more requests:
6333 * prevent it from possibly being incorrectly
6334 * deemed as soft real-time by setting its
6335 * soft_rt_next_start to infinity. In fact,
6336 * without this assignment, the application
6337 * would be incorrectly deemed as soft
6338 * real-time if:
6339 * 1) it issued a new request before the
6340 * completion of all its in-flight
6341 * requests, and
6342 * 2) at that time, its soft_rt_next_start
6343 * happened to be in the past.
6344 */
6345 bfqq->soft_rt_next_start =
6346 bfq_greatest_from_now();
6347 /*
6348 * Schedule an update of soft_rt_next_start to when
6349 * the task may be discovered to be isochronous.
6350 */
6351 bfq_mark_bfqq_softrt_update(bfqq);
6352 }
6353 }
6354
aee69d78
PV
6355 bfq_log_bfqq(bfqd, bfqq,
6356 "expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
6357 slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
6358
6359 /*
6360 * Increase, decrease or leave budget unchanged according to
6361 * reason.
6362 */
6363 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
6364 ref = bfqq->ref;
6365 __bfq_bfqq_expire(bfqd, bfqq);
6366
6367 /* mark bfqq as waiting a request only if a bic still points to it */
6368 if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
6369 reason != BFQQE_BUDGET_TIMEOUT &&
6370 reason != BFQQE_BUDGET_EXHAUSTED)
6371 bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
6372}
6373
6374/*
6375 * Budget timeout is not implemented through a dedicated timer, but
6376 * just checked on request arrivals and completions, as well as on
6377 * idle timer expirations.
6378 */
6379static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
6380{
44e44a1b 6381 return time_is_before_eq_jiffies(bfqq->budget_timeout);
aee69d78
PV
6382}
6383
6384/*
6385 * If we expire a queue that is actively waiting (i.e., with the
6386 * device idled) for the arrival of a new request, then we may incur
6387 * the timestamp misalignment problem described in the body of the
6388 * function __bfq_activate_entity. Hence we return true only if this
6389 * condition does not hold, or if the queue is slow enough to deserve
6390 * only to be kicked off for preserving a high throughput.
6391 */
6392static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
6393{
6394 bfq_log_bfqq(bfqq->bfqd, bfqq,
6395 "may_budget_timeout: wait_request %d left %d timeout %d",
6396 bfq_bfqq_wait_request(bfqq),
6397 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
6398 bfq_bfqq_budget_timeout(bfqq));
6399
6400 return (!bfq_bfqq_wait_request(bfqq) ||
6401 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
6402 &&
6403 bfq_bfqq_budget_timeout(bfqq);
6404}
6405
6406/*
6407 * For a queue that becomes empty, device idling is allowed only if
44e44a1b
PV
6408 * this function returns true for the queue. As a consequence, since
6409 * device idling plays a critical role in both throughput boosting and
6410 * service guarantees, the return value of this function plays a
6411 * critical role in both these aspects as well.
6412 *
6413 * In a nutshell, this function returns true only if idling is
6414 * beneficial for throughput or, even if detrimental for throughput,
6415 * idling is however necessary to preserve service guarantees (low
6416 * latency, desired throughput distribution, ...). In particular, on
6417 * NCQ-capable devices, this function tries to return false, so as to
6418 * help keep the drives' internal queues full, whenever this helps the
6419 * device boost the throughput without causing any service-guarantee
6420 * issue.
6421 *
6422 * In more detail, the return value of this function is obtained by,
6423 * first, computing a number of boolean variables that take into
6424 * account throughput and service-guarantee issues, and, then,
6425 * combining these variables in a logical expression. Most of the
6426 * issues taken into account are not trivial. We discuss these issues
6427 * individually while introducing the variables.
aee69d78
PV
6428 */
6429static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
6430{
6431 struct bfq_data *bfqd = bfqq->bfqd;
cfd69712
PV
6432 bool idling_boosts_thr, idling_boosts_thr_without_issues,
6433 asymmetric_scenario;
aee69d78
PV
6434
6435 if (bfqd->strict_guarantees)
6436 return true;
6437
6438 /*
44e44a1b
PV
6439 * The next variable takes into account the cases where idling
6440 * boosts the throughput.
6441 *
6442 * The value of the variable is computed considering that
aee69d78
PV
6443 * idling is usually beneficial for the throughput if:
6444 * (a) the device is not NCQ-capable, or
6445 * (b) regardless of the presence of NCQ, the request pattern
6446 * for bfqq is I/O-bound (possible throughput losses
6447 * caused by granting idling to seeky queues are mitigated
6448 * by the fact that, in all scenarios where boosting
6449 * throughput is the best thing to do, i.e., in all
6450 * symmetric scenarios, only a minimal idle time is
6451 * allowed to seeky queues).
6452 */
6453 idling_boosts_thr = !bfqd->hw_tag || bfq_bfqq_IO_bound(bfqq);
6454
cfd69712
PV
6455 /*
6456 * The value of the next variable,
6457 * idling_boosts_thr_without_issues, is equal to that of
6458 * idling_boosts_thr, unless a special case holds. In this
6459 * special case, described below, idling may cause problems to
6460 * weight-raised queues.
6461 *
6462 * When the request pool is saturated (e.g., in the presence
6463 * of write hogs), if the processes associated with
6464 * non-weight-raised queues ask for requests at a lower rate,
6465 * then processes associated with weight-raised queues have a
6466 * higher probability to get a request from the pool
6467 * immediately (or at least soon) when they need one. Thus
6468 * they have a higher probability to actually get a fraction
6469 * of the device throughput proportional to their high
6470 * weight. This is especially true with NCQ-capable drives,
6471 * which enqueue several requests in advance, and further
6472 * reorder internally-queued requests.
6473 *
6474 * For this reason, we force to false the value of
6475 * idling_boosts_thr_without_issues if there are weight-raised
6476 * busy queues. In this case, and if bfqq is not weight-raised,
6477 * this guarantees that the device is not idled for bfqq (if,
6478 * instead, bfqq is weight-raised, then idling will be
6479 * guaranteed by another variable, see below). Combined with
6480 * the timestamping rules of BFQ (see [1] for details), this
6481 * behavior causes bfqq, and hence any sync non-weight-raised
6482 * queue, to get a lower number of requests served, and thus
6483 * to ask for a lower number of requests from the request
6484 * pool, before the busy weight-raised queues get served
6485 * again. This often mitigates starvation problems in the
6486 * presence of heavy write workloads and NCQ, thereby
6487 * guaranteeing a higher application and system responsiveness
6488 * in these hostile scenarios.
6489 */
6490 idling_boosts_thr_without_issues = idling_boosts_thr &&
6491 bfqd->wr_busy_queues == 0;
6492
aee69d78 6493 /*
44e44a1b
PV
6494 * There is then a case where idling must be performed not for
6495 * throughput concerns, but to preserve service guarantees. To
6496 * introduce it, we can note that allowing the drive to
6497 * enqueue more than one request at a time, and hence
6498 * delegating de facto final scheduling decisions to the
6499 * drive's internal scheduler, causes loss of control on the
6500 * actual request service order. In particular, the critical
6501 * situation is when requests from different processes happens
6502 * to be present, at the same time, in the internal queue(s)
6503 * of the drive. In such a situation, the drive, by deciding
6504 * the service order of the internally-queued requests, does
6505 * determine also the actual throughput distribution among
6506 * these processes. But the drive typically has no notion or
6507 * concern about per-process throughput distribution, and
6508 * makes its decisions only on a per-request basis. Therefore,
6509 * the service distribution enforced by the drive's internal
6510 * scheduler is likely to coincide with the desired
6511 * device-throughput distribution only in a completely
6512 * symmetric scenario where: (i) each of these processes must
6513 * get the same throughput as the others; (ii) all these
6514 * processes have the same I/O pattern (either sequential or
6515 * random). In fact, in such a scenario, the drive will tend
6516 * to treat the requests of each of these processes in about
6517 * the same way as the requests of the others, and thus to
6518 * provide each of these processes with about the same
6519 * throughput (which is exactly the desired throughput
6520 * distribution). In contrast, in any asymmetric scenario,
6521 * device idling is certainly needed to guarantee that bfqq
6522 * receives its assigned fraction of the device throughput
6523 * (see [1] for details).
6524 *
6525 * As for sub-condition (i), actually we check only whether
6526 * bfqq is being weight-raised. In fact, if bfqq is not being
6527 * weight-raised, we have that:
6528 * - if the process associated with bfqq is not I/O-bound, then
6529 * it is not either latency- or throughput-critical; therefore
6530 * idling is not needed for bfqq;
6531 * - if the process asociated with bfqq is I/O-bound, then
6532 * idling is already granted with bfqq (see the comments on
6533 * idling_boosts_thr).
6534 *
6535 * We do not check sub-condition (ii) at all, i.e., the next
6536 * variable is true if and only if bfqq is being
6537 * weight-raised. We do not need to control sub-condition (ii)
6538 * for the following reason:
6539 * - if bfqq is being weight-raised, then idling is already
6540 * guaranteed to bfqq by sub-condition (i);
6541 * - if bfqq is not being weight-raised, then idling is
6542 * already guaranteed to bfqq (only) if it matters, i.e., if
6543 * bfqq is associated to a currently I/O-bound process (see
6544 * the above comment on sub-condition (i)).
6545 *
6546 * As a side note, it is worth considering that the above
6547 * device-idling countermeasures may however fail in the
6548 * following unlucky scenario: if idling is (correctly)
6549 * disabled in a time period during which the symmetry
6550 * sub-condition holds, and hence the device is allowed to
6551 * enqueue many requests, but at some later point in time some
6552 * sub-condition stops to hold, then it may become impossible
6553 * to let requests be served in the desired order until all
6554 * the requests already queued in the device have been served.
6555 */
6556 asymmetric_scenario = bfqq->wr_coeff > 1;
6557
6558 /*
6559 * We have now all the components we need to compute the return
6560 * value of the function, which is true only if both the following
6561 * conditions hold:
aee69d78 6562 * 1) bfqq is sync, because idling make sense only for sync queues;
44e44a1b
PV
6563 * 2) idling either boosts the throughput (without issues), or
6564 * is necessary to preserve service guarantees.
aee69d78 6565 */
44e44a1b 6566 return bfq_bfqq_sync(bfqq) &&
cfd69712 6567 (idling_boosts_thr_without_issues || asymmetric_scenario);
aee69d78
PV
6568}
6569
6570/*
6571 * If the in-service queue is empty but the function bfq_bfqq_may_idle
6572 * returns true, then:
6573 * 1) the queue must remain in service and cannot be expired, and
6574 * 2) the device must be idled to wait for the possible arrival of a new
6575 * request for the queue.
6576 * See the comments on the function bfq_bfqq_may_idle for the reasons
6577 * why performing device idling is the best choice to boost the throughput
6578 * and preserve service guarantees when bfq_bfqq_may_idle itself
6579 * returns true.
6580 */
6581static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
6582{
6583 struct bfq_data *bfqd = bfqq->bfqd;
6584
6585 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
6586 bfq_bfqq_may_idle(bfqq);
6587}
6588
6589/*
6590 * Select a queue for service. If we have a current queue in service,
6591 * check whether to continue servicing it, or retrieve and set a new one.
6592 */
6593static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
6594{
6595 struct bfq_queue *bfqq;
6596 struct request *next_rq;
6597 enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
6598
6599 bfqq = bfqd->in_service_queue;
6600 if (!bfqq)
6601 goto new_queue;
6602
6603 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
6604
6605 if (bfq_may_expire_for_budg_timeout(bfqq) &&
6606 !bfq_bfqq_wait_request(bfqq) &&
6607 !bfq_bfqq_must_idle(bfqq))
6608 goto expire;
6609
6610check_queue:
6611 /*
6612 * This loop is rarely executed more than once. Even when it
6613 * happens, it is much more convenient to re-execute this loop
6614 * than to return NULL and trigger a new dispatch to get a
6615 * request served.
6616 */
6617 next_rq = bfqq->next_rq;
6618 /*
6619 * If bfqq has requests queued and it has enough budget left to
6620 * serve them, keep the queue, otherwise expire it.
6621 */
6622 if (next_rq) {
6623 if (bfq_serv_to_charge(next_rq, bfqq) >
6624 bfq_bfqq_budget_left(bfqq)) {
6625 /*
6626 * Expire the queue for budget exhaustion,
6627 * which makes sure that the next budget is
6628 * enough to serve the next request, even if
6629 * it comes from the fifo expired path.
6630 */
6631 reason = BFQQE_BUDGET_EXHAUSTED;
6632 goto expire;
6633 } else {
6634 /*
6635 * The idle timer may be pending because we may
6636 * not disable disk idling even when a new request
6637 * arrives.
6638 */
6639 if (bfq_bfqq_wait_request(bfqq)) {
6640 /*
6641 * If we get here: 1) at least a new request
6642 * has arrived but we have not disabled the
6643 * timer because the request was too small,
6644 * 2) then the block layer has unplugged
6645 * the device, causing the dispatch to be
6646 * invoked.
6647 *
6648 * Since the device is unplugged, now the
6649 * requests are probably large enough to
6650 * provide a reasonable throughput.
6651 * So we disable idling.
6652 */
6653 bfq_clear_bfqq_wait_request(bfqq);
6654 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
e21b7a0b 6655 bfqg_stats_update_idle_time(bfqq_group(bfqq));
aee69d78
PV
6656 }
6657 goto keep_queue;
6658 }
6659 }
6660
6661 /*
6662 * No requests pending. However, if the in-service queue is idling
6663 * for a new request, or has requests waiting for a completion and
6664 * may idle after their completion, then keep it anyway.
6665 */
6666 if (bfq_bfqq_wait_request(bfqq) ||
6667 (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
6668 bfqq = NULL;
6669 goto keep_queue;
6670 }
6671
6672 reason = BFQQE_NO_MORE_REQUESTS;
6673expire:
6674 bfq_bfqq_expire(bfqd, bfqq, false, reason);
6675new_queue:
6676 bfqq = bfq_set_in_service_queue(bfqd);
6677 if (bfqq) {
6678 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
6679 goto check_queue;
6680 }
6681keep_queue:
6682 if (bfqq)
6683 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
6684 else
6685 bfq_log(bfqd, "select_queue: no queue returned");
6686
6687 return bfqq;
6688}
6689
44e44a1b
PV
6690static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
6691{
6692 struct bfq_entity *entity = &bfqq->entity;
6693
6694 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
6695 bfq_log_bfqq(bfqd, bfqq,
6696 "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
6697 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
6698 jiffies_to_msecs(bfqq->wr_cur_max_time),
6699 bfqq->wr_coeff,
6700 bfqq->entity.weight, bfqq->entity.orig_weight);
6701
6702 if (entity->prio_changed)
6703 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
6704
6705 /*
6706 * If too much time has elapsed from the beginning of
36eca894 6707 * this weight-raising period, then end weight raising.
44e44a1b
PV
6708 */
6709 if (time_is_before_jiffies(bfqq->last_wr_start_finish +
6710 bfqq->wr_cur_max_time)) {
77b7dcea
PV
6711 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
6712 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
6713 bfq_wr_duration(bfqd)))
6714 bfq_bfqq_end_wr(bfqq);
6715 else {
6716 /* switch back to interactive wr */
6717 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
6718 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
6719 bfqq->last_wr_start_finish =
6720 bfqq->wr_start_at_switch_to_srt;
6721 bfqq->entity.prio_changed = 1;
6722 }
44e44a1b
PV
6723 }
6724 }
6725 /* Update weight both if it must be raised and if it must be lowered */
6726 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
6727 __bfq_entity_update_weight_prio(
6728 bfq_entity_service_tree(entity),
6729 entity);
6730}
6731
aee69d78
PV
6732/*
6733 * Dispatch next request from bfqq.
6734 */
6735static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
6736 struct bfq_queue *bfqq)
6737{
6738 struct request *rq = bfqq->next_rq;
6739 unsigned long service_to_charge;
6740
6741 service_to_charge = bfq_serv_to_charge(rq, bfqq);
6742
6743 bfq_bfqq_served(bfqq, service_to_charge);
6744
6745 bfq_dispatch_remove(bfqd->queue, rq);
6746
44e44a1b
PV
6747 /*
6748 * If weight raising has to terminate for bfqq, then next
6749 * function causes an immediate update of bfqq's weight,
6750 * without waiting for next activation. As a consequence, on
6751 * expiration, bfqq will be timestamped as if has never been
6752 * weight-raised during this service slot, even if it has
6753 * received part or even most of the service as a
6754 * weight-raised queue. This inflates bfqq's timestamps, which
6755 * is beneficial, as bfqq is then more willing to leave the
6756 * device immediately to possible other weight-raised queues.
6757 */
6758 bfq_update_wr_data(bfqd, bfqq);
6759
aee69d78
PV
6760 if (!bfqd->in_service_bic) {
6761 atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
6762 bfqd->in_service_bic = RQ_BIC(rq);
6763 }
6764
6765 /*
6766 * Expire bfqq, pretending that its budget expired, if bfqq
6767 * belongs to CLASS_IDLE and other queues are waiting for
6768 * service.
6769 */
6770 if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
6771 goto expire;
6772
6773 return rq;
6774
6775expire:
6776 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
6777 return rq;
6778}
6779
6780static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
6781{
6782 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
6783
6784 /*
6785 * Avoiding lock: a race on bfqd->busy_queues should cause at
6786 * most a call to dispatch for nothing
6787 */
6788 return !list_empty_careful(&bfqd->dispatch) ||
6789 bfqd->busy_queues > 0;
6790}
6791
6792static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
6793{
6794 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
6795 struct request *rq = NULL;
6796 struct bfq_queue *bfqq = NULL;
6797
6798 if (!list_empty(&bfqd->dispatch)) {
6799 rq = list_first_entry(&bfqd->dispatch, struct request,
6800 queuelist);
6801 list_del_init(&rq->queuelist);
6802
6803 bfqq = RQ_BFQQ(rq);
6804
6805 if (bfqq) {
6806 /*
6807 * Increment counters here, because this
6808 * dispatch does not follow the standard
6809 * dispatch flow (where counters are
6810 * incremented)
6811 */
6812 bfqq->dispatched++;
6813
6814 goto inc_in_driver_start_rq;
6815 }
6816
6817 /*
6818 * We exploit the put_rq_private hook to decrement
6819 * rq_in_driver, but put_rq_private will not be
6820 * invoked on this request. So, to avoid unbalance,
6821 * just start this request, without incrementing
6822 * rq_in_driver. As a negative consequence,
6823 * rq_in_driver is deceptively lower than it should be
6824 * while this request is in service. This may cause
6825 * bfq_schedule_dispatch to be invoked uselessly.
6826 *
6827 * As for implementing an exact solution, the
6828 * put_request hook, if defined, is probably invoked
6829 * also on this request. So, by exploiting this hook,
6830 * we could 1) increment rq_in_driver here, and 2)
6831 * decrement it in put_request. Such a solution would
6832 * let the value of the counter be always accurate,
6833 * but it would entail using an extra interface
6834 * function. This cost seems higher than the benefit,
6835 * being the frequency of non-elevator-private
6836 * requests very low.
6837 */
6838 goto start_rq;
6839 }
6840
6841 bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
6842
6843 if (bfqd->busy_queues == 0)
6844 goto exit;
6845
6846 /*
6847 * Force device to serve one request at a time if
6848 * strict_guarantees is true. Forcing this service scheme is
6849 * currently the ONLY way to guarantee that the request
6850 * service order enforced by the scheduler is respected by a
6851 * queueing device. Otherwise the device is free even to make
6852 * some unlucky request wait for as long as the device
6853 * wishes.
6854 *
6855 * Of course, serving one request at at time may cause loss of
6856 * throughput.
6857 */
6858 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
6859 goto exit;
6860
6861 bfqq = bfq_select_queue(bfqd);
6862 if (!bfqq)
6863 goto exit;
6864
6865 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
6866
6867 if (rq) {
6868inc_in_driver_start_rq:
6869 bfqd->rq_in_driver++;
6870start_rq:
6871 rq->rq_flags |= RQF_STARTED;
6872 }
6873exit:
6874 return rq;
6875}
6876
6877static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
6878{
6879 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
6880 struct request *rq;
6881
6882 spin_lock_irq(&bfqd->lock);
36eca894 6883
aee69d78 6884 rq = __bfq_dispatch_request(hctx);
36eca894 6885 bfq_unlock_put_ioc(bfqd);
aee69d78
PV
6886
6887 return rq;
6888}
6889
6890/*
6891 * Task holds one reference to the queue, dropped when task exits. Each rq
6892 * in-flight on this queue also holds a reference, dropped when rq is freed.
6893 *
6894 * Scheduler lock must be held here. Recall not to use bfqq after calling
6895 * this function on it.
6896 */
6897static void bfq_put_queue(struct bfq_queue *bfqq)
6898{
e21b7a0b
AA
6899#ifdef CONFIG_BFQ_GROUP_IOSCHED
6900 struct bfq_group *bfqg = bfqq_group(bfqq);
6901#endif
6902
aee69d78
PV
6903 if (bfqq->bfqd)
6904 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
6905 bfqq, bfqq->ref);
6906
6907 bfqq->ref--;
6908 if (bfqq->ref)
6909 return;
6910
e21b7a0b
AA
6911 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
6912
aee69d78 6913 kmem_cache_free(bfq_pool, bfqq);
e21b7a0b
AA
6914#ifdef CONFIG_BFQ_GROUP_IOSCHED
6915 bfqg_put(bfqg);
6916#endif
aee69d78
PV
6917}
6918
36eca894
AA
6919static void bfq_put_cooperator(struct bfq_queue *bfqq)
6920{
6921 struct bfq_queue *__bfqq, *next;
6922
6923 /*
6924 * If this queue was scheduled to merge with another queue, be
6925 * sure to drop the reference taken on that queue (and others in
6926 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
6927 */
6928 __bfqq = bfqq->new_bfqq;
6929 while (__bfqq) {
6930 if (__bfqq == bfqq)
6931 break;
6932 next = __bfqq->new_bfqq;
6933 bfq_put_queue(__bfqq);
6934 __bfqq = next;
6935 }
6936}
6937
aee69d78
PV
6938static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
6939{
6940 if (bfqq == bfqd->in_service_queue) {
6941 __bfq_bfqq_expire(bfqd, bfqq);
6942 bfq_schedule_dispatch(bfqd);
6943 }
6944
6945 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
6946
36eca894
AA
6947 bfq_put_cooperator(bfqq);
6948
aee69d78
PV
6949 bfq_put_queue(bfqq); /* release process reference */
6950}
6951
6952static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
6953{
6954 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
6955 struct bfq_data *bfqd;
6956
6957 if (bfqq)
6958 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
6959
6960 if (bfqq && bfqd) {
6961 unsigned long flags;
6962
6963 spin_lock_irqsave(&bfqd->lock, flags);
36eca894
AA
6964 /*
6965 * If the bic is using a shared queue, put the
6966 * reference taken on the io_context when the bic
6967 * started using a shared bfq_queue. This put cannot
6968 * make ioc->ref_count reach 0, then no ioc->lock
6969 * risks to be taken (leading to possible deadlock
6970 * scenarios).
6971 */
6972 if (is_sync && bfq_bfqq_coop(bfqq))
6973 put_io_context(bic->icq.ioc);
6974
aee69d78
PV
6975 bfq_exit_bfqq(bfqd, bfqq);
6976 bic_set_bfqq(bic, NULL, is_sync);
36eca894 6977 bfq_unlock_put_ioc_restore(bfqd, flags);
aee69d78
PV
6978 }
6979}
6980
6981static void bfq_exit_icq(struct io_cq *icq)
6982{
6983 struct bfq_io_cq *bic = icq_to_bic(icq);
6984
6985 bfq_exit_icq_bfqq(bic, true);
6986 bfq_exit_icq_bfqq(bic, false);
6987}
6988
6989/*
6990 * Update the entity prio values; note that the new values will not
6991 * be used until the next (re)activation.
6992 */
6993static void
6994bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
6995{
6996 struct task_struct *tsk = current;
6997 int ioprio_class;
6998 struct bfq_data *bfqd = bfqq->bfqd;
6999
7000 if (!bfqd)
7001 return;
7002
7003 ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
7004 switch (ioprio_class) {
7005 default:
7006 dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
7007 "bfq: bad prio class %d\n", ioprio_class);
7008 case IOPRIO_CLASS_NONE:
7009 /*
7010 * No prio set, inherit CPU scheduling settings.
7011 */
7012 bfqq->new_ioprio = task_nice_ioprio(tsk);
7013 bfqq->new_ioprio_class = task_nice_ioclass(tsk);
7014 break;
7015 case IOPRIO_CLASS_RT:
7016 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
7017 bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
7018 break;
7019 case IOPRIO_CLASS_BE:
7020 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
7021 bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
7022 break;
7023 case IOPRIO_CLASS_IDLE:
7024 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
7025 bfqq->new_ioprio = 7;
7026 bfq_clear_bfqq_idle_window(bfqq);
7027 break;
7028 }
7029
7030 if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
7031 pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
7032 bfqq->new_ioprio);
7033 bfqq->new_ioprio = IOPRIO_BE_NR;
7034 }
7035
7036 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
7037 bfqq->entity.prio_changed = 1;
7038}
7039
7040static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
7041{
7042 struct bfq_data *bfqd = bic_to_bfqd(bic);
7043 struct bfq_queue *bfqq;
7044 int ioprio = bic->icq.ioc->ioprio;
7045
7046 /*
7047 * This condition may trigger on a newly created bic, be sure to
7048 * drop the lock before returning.
7049 */
7050 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
7051 return;
7052
7053 bic->ioprio = ioprio;
7054
7055 bfqq = bic_to_bfqq(bic, false);
7056 if (bfqq) {
7057 /* release process reference on this queue */
7058 bfq_put_queue(bfqq);
7059 bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
7060 bic_set_bfqq(bic, bfqq, false);
7061 }
7062
7063 bfqq = bic_to_bfqq(bic, true);
7064 if (bfqq)
7065 bfq_set_next_ioprio_data(bfqq, bic);
7066}
7067
7068static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
7069 struct bfq_io_cq *bic, pid_t pid, int is_sync)
7070{
7071 RB_CLEAR_NODE(&bfqq->entity.rb_node);
7072 INIT_LIST_HEAD(&bfqq->fifo);
7073
7074 bfqq->ref = 0;
7075 bfqq->bfqd = bfqd;
7076
7077 if (bic)
7078 bfq_set_next_ioprio_data(bfqq, bic);
7079
7080 if (is_sync) {
7081 if (!bfq_class_idle(bfqq))
7082 bfq_mark_bfqq_idle_window(bfqq);
7083 bfq_mark_bfqq_sync(bfqq);
7084 } else
7085 bfq_clear_bfqq_sync(bfqq);
7086
7087 /* set end request to minus infinity from now */
7088 bfqq->ttime.last_end_request = ktime_get_ns() + 1;
7089
7090 bfq_mark_bfqq_IO_bound(bfqq);
7091
7092 bfqq->pid = pid;
7093
7094 /* Tentative initial value to trade off between thr and lat */
54b60456 7095 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
aee69d78 7096 bfqq->budget_timeout = bfq_smallest_from_now();
aee69d78 7097
44e44a1b 7098 bfqq->wr_coeff = 1;
36eca894 7099 bfqq->last_wr_start_finish = jiffies;
77b7dcea 7100 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
36eca894 7101 bfqq->split_time = bfq_smallest_from_now();
77b7dcea
PV
7102
7103 /*
7104 * Set to the value for which bfqq will not be deemed as
7105 * soft rt when it becomes backlogged.
7106 */
7107 bfqq->soft_rt_next_start = bfq_greatest_from_now();
44e44a1b 7108
aee69d78
PV
7109 /* first request is almost certainly seeky */
7110 bfqq->seek_history = 1;
7111}
7112
7113static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
e21b7a0b 7114 struct bfq_group *bfqg,
aee69d78
PV
7115 int ioprio_class, int ioprio)
7116{
7117 switch (ioprio_class) {
7118 case IOPRIO_CLASS_RT:
e21b7a0b 7119 return &bfqg->async_bfqq[0][ioprio];
aee69d78
PV
7120 case IOPRIO_CLASS_NONE:
7121 ioprio = IOPRIO_NORM;
7122 /* fall through */
7123 case IOPRIO_CLASS_BE:
e21b7a0b 7124 return &bfqg->async_bfqq[1][ioprio];
aee69d78 7125 case IOPRIO_CLASS_IDLE:
e21b7a0b 7126 return &bfqg->async_idle_bfqq;
aee69d78
PV
7127 default:
7128 return NULL;
7129 }
7130}
7131
7132static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
7133 struct bio *bio, bool is_sync,
7134 struct bfq_io_cq *bic)
7135{
7136 const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
7137 const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
7138 struct bfq_queue **async_bfqq = NULL;
7139 struct bfq_queue *bfqq;
e21b7a0b 7140 struct bfq_group *bfqg;
aee69d78
PV
7141
7142 rcu_read_lock();
7143
e21b7a0b
AA
7144 bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
7145 if (!bfqg) {
7146 bfqq = &bfqd->oom_bfqq;
7147 goto out;
7148 }
7149
aee69d78 7150 if (!is_sync) {
e21b7a0b 7151 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
aee69d78
PV
7152 ioprio);
7153 bfqq = *async_bfqq;
7154 if (bfqq)
7155 goto out;
7156 }
7157
7158 bfqq = kmem_cache_alloc_node(bfq_pool,
7159 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
7160 bfqd->queue->node);
7161
7162 if (bfqq) {
7163 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
7164 is_sync);
e21b7a0b 7165 bfq_init_entity(&bfqq->entity, bfqg);
aee69d78
PV
7166 bfq_log_bfqq(bfqd, bfqq, "allocated");
7167 } else {
7168 bfqq = &bfqd->oom_bfqq;
7169 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
7170 goto out;
7171 }
7172
7173 /*
7174 * Pin the queue now that it's allocated, scheduler exit will
7175 * prune it.
7176 */
7177 if (async_bfqq) {
e21b7a0b
AA
7178 bfqq->ref++; /*
7179 * Extra group reference, w.r.t. sync
7180 * queue. This extra reference is removed
7181 * only if bfqq->bfqg disappears, to
7182 * guarantee that this queue is not freed
7183 * until its group goes away.
7184 */
7185 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
aee69d78
PV
7186 bfqq, bfqq->ref);
7187 *async_bfqq = bfqq;
7188 }
7189
7190out:
7191 bfqq->ref++; /* get a process reference to this queue */
7192 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
7193 rcu_read_unlock();
7194 return bfqq;
7195}
7196
7197static void bfq_update_io_thinktime(struct bfq_data *bfqd,
7198 struct bfq_queue *bfqq)
7199{
7200 struct bfq_ttime *ttime = &bfqq->ttime;
7201 u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
7202
7203 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
7204
7205 ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
7206 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
7207 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
7208 ttime->ttime_samples);
7209}
7210
7211static void
7212bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
7213 struct request *rq)
7214{
aee69d78 7215 bfqq->seek_history <<= 1;
ab0e43e9
PV
7216 bfqq->seek_history |=
7217 get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
aee69d78
PV
7218 (!blk_queue_nonrot(bfqd->queue) ||
7219 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
7220}
7221
7222/*
7223 * Disable idle window if the process thinks too long or seeks so much that
7224 * it doesn't matter.
7225 */
7226static void bfq_update_idle_window(struct bfq_data *bfqd,
7227 struct bfq_queue *bfqq,
7228 struct bfq_io_cq *bic)
7229{
7230 int enable_idle;
7231
7232 /* Don't idle for async or idle io prio class. */
7233 if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
7234 return;
7235
36eca894
AA
7236 /* Idle window just restored, statistics are meaningless. */
7237 if (time_is_after_eq_jiffies(bfqq->split_time +
7238 bfqd->bfq_wr_min_idle_time))
7239 return;
7240
aee69d78
PV
7241 enable_idle = bfq_bfqq_idle_window(bfqq);
7242
7243 if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
7244 bfqd->bfq_slice_idle == 0 ||
bcd56426
PV
7245 (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
7246 bfqq->wr_coeff == 1))
aee69d78
PV
7247 enable_idle = 0;
7248 else if (bfq_sample_valid(bfqq->ttime.ttime_samples)) {
44e44a1b
PV
7249 if (bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle &&
7250 bfqq->wr_coeff == 1)
aee69d78
PV
7251 enable_idle = 0;
7252 else
7253 enable_idle = 1;
7254 }
7255 bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
7256 enable_idle);
7257
7258 if (enable_idle)
7259 bfq_mark_bfqq_idle_window(bfqq);
7260 else
7261 bfq_clear_bfqq_idle_window(bfqq);
7262}
7263
7264/*
7265 * Called when a new fs request (rq) is added to bfqq. Check if there's
7266 * something we should do about it.
7267 */
7268static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
7269 struct request *rq)
7270{
7271 struct bfq_io_cq *bic = RQ_BIC(rq);
7272
7273 if (rq->cmd_flags & REQ_META)
7274 bfqq->meta_pending++;
7275
7276 bfq_update_io_thinktime(bfqd, bfqq);
7277 bfq_update_io_seektime(bfqd, bfqq, rq);
7278 if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
7279 !BFQQ_SEEKY(bfqq))
7280 bfq_update_idle_window(bfqd, bfqq, bic);
7281
7282 bfq_log_bfqq(bfqd, bfqq,
7283 "rq_enqueued: idle_window=%d (seeky %d)",
7284 bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq));
7285
7286 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
7287
7288 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
7289 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
7290 blk_rq_sectors(rq) < 32;
7291 bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
7292
7293 /*
7294 * There is just this request queued: if the request
7295 * is small and the queue is not to be expired, then
7296 * just exit.
7297 *
7298 * In this way, if the device is being idled to wait
7299 * for a new request from the in-service queue, we
7300 * avoid unplugging the device and committing the
7301 * device to serve just a small request. On the
7302 * contrary, we wait for the block layer to decide
7303 * when to unplug the device: hopefully, new requests
7304 * will be merged to this one quickly, then the device
7305 * will be unplugged and larger requests will be
7306 * dispatched.
7307 */
7308 if (small_req && !budget_timeout)
7309 return;
7310
7311 /*
7312 * A large enough request arrived, or the queue is to
7313 * be expired: in both cases disk idling is to be
7314 * stopped, so clear wait_request flag and reset
7315 * timer.
7316 */
7317 bfq_clear_bfqq_wait_request(bfqq);
7318 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
e21b7a0b 7319 bfqg_stats_update_idle_time(bfqq_group(bfqq));
aee69d78
PV
7320
7321 /*
7322 * The queue is not empty, because a new request just
7323 * arrived. Hence we can safely expire the queue, in
7324 * case of budget timeout, without risking that the
7325 * timestamps of the queue are not updated correctly.
7326 * See [1] for more details.
7327 */
7328 if (budget_timeout)
7329 bfq_bfqq_expire(bfqd, bfqq, false,
7330 BFQQE_BUDGET_TIMEOUT);
7331 }
7332}
7333
7334static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
7335{
36eca894
AA
7336 struct bfq_queue *bfqq = RQ_BFQQ(rq),
7337 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
7338
7339 if (new_bfqq) {
7340 if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
7341 new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
7342 /*
7343 * Release the request's reference to the old bfqq
7344 * and make sure one is taken to the shared queue.
7345 */
7346 new_bfqq->allocated++;
7347 bfqq->allocated--;
7348 new_bfqq->ref++;
7349 /*
7350 * If the bic associated with the process
7351 * issuing this request still points to bfqq
7352 * (and thus has not been already redirected
7353 * to new_bfqq or even some other bfq_queue),
7354 * then complete the merge and redirect it to
7355 * new_bfqq.
7356 */
7357 if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
7358 bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
7359 bfqq, new_bfqq);
7360 /*
7361 * rq is about to be enqueued into new_bfqq,
7362 * release rq reference on bfqq
7363 */
7364 bfq_put_queue(bfqq);
7365 rq->elv.priv[1] = new_bfqq;
7366 bfqq = new_bfqq;
7367 }
aee69d78
PV
7368
7369 bfq_add_request(rq);
7370
7371 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
7372 list_add_tail(&rq->queuelist, &bfqq->fifo);
7373
7374 bfq_rq_enqueued(bfqd, bfqq, rq);
7375}
7376
7377static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
7378 bool at_head)
7379{
7380 struct request_queue *q = hctx->queue;
7381 struct bfq_data *bfqd = q->elevator->elevator_data;
7382
7383 spin_lock_irq(&bfqd->lock);
7384 if (blk_mq_sched_try_insert_merge(q, rq)) {
7385 spin_unlock_irq(&bfqd->lock);
7386 return;
7387 }
7388
7389 spin_unlock_irq(&bfqd->lock);
7390
7391 blk_mq_sched_request_inserted(rq);
7392
7393 spin_lock_irq(&bfqd->lock);
7394 if (at_head || blk_rq_is_passthrough(rq)) {
7395 if (at_head)
7396 list_add(&rq->queuelist, &bfqd->dispatch);
7397 else
7398 list_add_tail(&rq->queuelist, &bfqd->dispatch);
7399 } else {
7400 __bfq_insert_request(bfqd, rq);
7401
7402 if (rq_mergeable(rq)) {
7403 elv_rqhash_add(q, rq);
7404 if (!q->last_merge)
7405 q->last_merge = rq;
7406 }
7407 }
7408
36eca894 7409 bfq_unlock_put_ioc(bfqd);
aee69d78
PV
7410}
7411
7412static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
7413 struct list_head *list, bool at_head)
7414{
7415 while (!list_empty(list)) {
7416 struct request *rq;
7417
7418 rq = list_first_entry(list, struct request, queuelist);
7419 list_del_init(&rq->queuelist);
7420 bfq_insert_request(hctx, rq, at_head);
7421 }
7422}
7423
7424static void bfq_update_hw_tag(struct bfq_data *bfqd)
7425{
7426 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
7427 bfqd->rq_in_driver);
7428
7429 if (bfqd->hw_tag == 1)
7430 return;
7431
7432 /*
7433 * This sample is valid if the number of outstanding requests
7434 * is large enough to allow a queueing behavior. Note that the
7435 * sum is not exact, as it's not taking into account deactivated
7436 * requests.
7437 */
7438 if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
7439 return;
7440
7441 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
7442 return;
7443
7444 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
7445 bfqd->max_rq_in_driver = 0;
7446 bfqd->hw_tag_samples = 0;
7447}
7448
7449static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
7450{
ab0e43e9
PV
7451 u64 now_ns;
7452 u32 delta_us;
7453
aee69d78
PV
7454 bfq_update_hw_tag(bfqd);
7455
7456 bfqd->rq_in_driver--;
7457 bfqq->dispatched--;
7458
44e44a1b
PV
7459 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
7460 /*
7461 * Set budget_timeout (which we overload to store the
7462 * time at which the queue remains with no backlog and
7463 * no outstanding request; used by the weight-raising
7464 * mechanism).
7465 */
7466 bfqq->budget_timeout = jiffies;
1de0c4cd
AA
7467
7468 bfq_weights_tree_remove(bfqd, &bfqq->entity,
7469 &bfqd->queue_weights_tree);
44e44a1b
PV
7470 }
7471
ab0e43e9
PV
7472 now_ns = ktime_get_ns();
7473
7474 bfqq->ttime.last_end_request = now_ns;
7475
7476 /*
7477 * Using us instead of ns, to get a reasonable precision in
7478 * computing rate in next check.
7479 */
7480 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
7481
7482 /*
7483 * If the request took rather long to complete, and, according
7484 * to the maximum request size recorded, this completion latency
7485 * implies that the request was certainly served at a very low
7486 * rate (less than 1M sectors/sec), then the whole observation
7487 * interval that lasts up to this time instant cannot be a
7488 * valid time interval for computing a new peak rate. Invoke
7489 * bfq_update_rate_reset to have the following three steps
7490 * taken:
7491 * - close the observation interval at the last (previous)
7492 * request dispatch or completion
7493 * - compute rate, if possible, for that observation interval
7494 * - reset to zero samples, which will trigger a proper
7495 * re-initialization of the observation interval on next
7496 * dispatch
7497 */
7498 if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
7499 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
7500 1UL<<(BFQ_RATE_SHIFT - 10))
7501 bfq_update_rate_reset(bfqd, NULL);
7502 bfqd->last_completion = now_ns;
aee69d78 7503
77b7dcea
PV
7504 /*
7505 * If we are waiting to discover whether the request pattern
7506 * of the task associated with the queue is actually
7507 * isochronous, and both requisites for this condition to hold
7508 * are now satisfied, then compute soft_rt_next_start (see the
7509 * comments on the function bfq_bfqq_softrt_next_start()). We
7510 * schedule this delayed check when bfqq expires, if it still
7511 * has in-flight requests.
7512 */
7513 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
7514 RB_EMPTY_ROOT(&bfqq->sort_list))
7515 bfqq->soft_rt_next_start =
7516 bfq_bfqq_softrt_next_start(bfqd, bfqq);
7517
aee69d78
PV
7518 /*
7519 * If this is the in-service queue, check if it needs to be expired,
7520 * or if we want to idle in case it has no pending requests.
7521 */
7522 if (bfqd->in_service_queue == bfqq) {
44e44a1b 7523 if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
aee69d78
PV
7524 bfq_arm_slice_timer(bfqd);
7525 return;
7526 } else if (bfq_may_expire_for_budg_timeout(bfqq))
7527 bfq_bfqq_expire(bfqd, bfqq, false,
7528 BFQQE_BUDGET_TIMEOUT);
7529 else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
7530 (bfqq->dispatched == 0 ||
7531 !bfq_bfqq_may_idle(bfqq)))
7532 bfq_bfqq_expire(bfqd, bfqq, false,
7533 BFQQE_NO_MORE_REQUESTS);
7534 }
7535}
7536
7537static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
7538{
7539 bfqq->allocated--;
7540
7541 bfq_put_queue(bfqq);
7542}
7543
7544static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
7545{
7546 struct bfq_queue *bfqq = RQ_BFQQ(rq);
7547 struct bfq_data *bfqd = bfqq->bfqd;
7548
e21b7a0b
AA
7549 if (rq->rq_flags & RQF_STARTED)
7550 bfqg_stats_update_completion(bfqq_group(bfqq),
7551 rq_start_time_ns(rq),
7552 rq_io_start_time_ns(rq),
7553 rq->cmd_flags);
aee69d78
PV
7554
7555 if (likely(rq->rq_flags & RQF_STARTED)) {
7556 unsigned long flags;
7557
7558 spin_lock_irqsave(&bfqd->lock, flags);
7559
7560 bfq_completed_request(bfqq, bfqd);
7561 bfq_put_rq_priv_body(bfqq);
7562
36eca894 7563 bfq_unlock_put_ioc_restore(bfqd, flags);
aee69d78
PV
7564 } else {
7565 /*
7566 * Request rq may be still/already in the scheduler,
7567 * in which case we need to remove it. And we cannot
7568 * defer such a check and removal, to avoid
7569 * inconsistencies in the time interval from the end
7570 * of this function to the start of the deferred work.
7571 * This situation seems to occur only in process
7572 * context, as a consequence of a merge. In the
7573 * current version of the code, this implies that the
7574 * lock is held.
7575 */
7576
7577 if (!RB_EMPTY_NODE(&rq->rb_node))
7578 bfq_remove_request(q, rq);
7579 bfq_put_rq_priv_body(bfqq);
7580 }
7581
7582 rq->elv.priv[0] = NULL;
7583 rq->elv.priv[1] = NULL;
7584}
7585
36eca894
AA
7586/*
7587 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
7588 * was the last process referring to that bfqq.
7589 */
7590static struct bfq_queue *
7591bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
7592{
7593 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
7594
7595 if (bfqq_process_refs(bfqq) == 1) {
7596 bfqq->pid = current->pid;
7597 bfq_clear_bfqq_coop(bfqq);
7598 bfq_clear_bfqq_split_coop(bfqq);
7599 return bfqq;
7600 }
7601
7602 bic_set_bfqq(bic, NULL, 1);
7603
7604 bfq_put_cooperator(bfqq);
7605
7606 bfq_put_queue(bfqq);
7607 return NULL;
7608}
7609
7610static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
7611 struct bfq_io_cq *bic,
7612 struct bio *bio,
7613 bool split, bool is_sync,
7614 bool *new_queue)
7615{
7616 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
7617
7618 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
7619 return bfqq;
7620
7621 if (new_queue)
7622 *new_queue = true;
7623
7624 if (bfqq)
7625 bfq_put_queue(bfqq);
7626 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
7627
7628 bic_set_bfqq(bic, bfqq, is_sync);
7629 if (split && is_sync)
7630 bfqq->split_time = jiffies;
7631
7632 return bfqq;
7633}
7634
aee69d78
PV
7635/*
7636 * Allocate bfq data structures associated with this request.
7637 */
7638static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
7639 struct bio *bio)
7640{
7641 struct bfq_data *bfqd = q->elevator->elevator_data;
7642 struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
7643 const int is_sync = rq_is_sync(rq);
7644 struct bfq_queue *bfqq;
36eca894 7645 bool new_queue = false;
aee69d78
PV
7646
7647 spin_lock_irq(&bfqd->lock);
7648
7649 bfq_check_ioprio_change(bic, bio);
7650
7651 if (!bic)
7652 goto queue_fail;
7653
e21b7a0b
AA
7654 bfq_bic_update_cgroup(bic, bio);
7655
36eca894
AA
7656 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
7657 &new_queue);
7658
7659 if (likely(!new_queue)) {
7660 /* If the queue was seeky for too long, break it apart. */
7661 if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
7662 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
7663 bfqq = bfq_split_bfqq(bic, bfqq);
7664 /*
7665 * A reference to bic->icq.ioc needs to be
7666 * released after a queue split. Do not do it
7667 * immediately, to not risk to possibly take
7668 * an ioc->lock while holding the scheduler
7669 * lock.
7670 */
7671 bfqd->ioc_to_put = bic->icq.ioc;
7672
7673 if (!bfqq)
7674 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
7675 true, is_sync,
7676 NULL);
7677 }
aee69d78
PV
7678 }
7679
7680 bfqq->allocated++;
7681 bfqq->ref++;
7682 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
7683 rq, bfqq, bfqq->ref);
7684
7685 rq->elv.priv[0] = bic;
7686 rq->elv.priv[1] = bfqq;
7687
36eca894
AA
7688 /*
7689 * If a bfq_queue has only one process reference, it is owned
7690 * by only this bic: we can then set bfqq->bic = bic. in
7691 * addition, if the queue has also just been split, we have to
7692 * resume its state.
7693 */
7694 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
7695 bfqq->bic = bic;
7696 if (bfqd->ioc_to_put) { /* if true, there has been a split */
7697 /*
7698 * The queue has just been split from a shared
7699 * queue: restore the idle window and the
7700 * possible weight raising period.
7701 */
7702 bfq_bfqq_resume_state(bfqq, bic);
7703 }
7704 }
7705
7706 bfq_unlock_put_ioc(bfqd);
aee69d78
PV
7707
7708 return 0;
7709
7710queue_fail:
7711 spin_unlock_irq(&bfqd->lock);
7712
7713 return 1;
7714}
7715
7716static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
7717{
7718 struct bfq_data *bfqd = bfqq->bfqd;
7719 enum bfqq_expiration reason;
7720 unsigned long flags;
7721
7722 spin_lock_irqsave(&bfqd->lock, flags);
7723 bfq_clear_bfqq_wait_request(bfqq);
7724
7725 if (bfqq != bfqd->in_service_queue) {
7726 spin_unlock_irqrestore(&bfqd->lock, flags);
7727 return;
7728 }
7729
7730 if (bfq_bfqq_budget_timeout(bfqq))
7731 /*
7732 * Also here the queue can be safely expired
7733 * for budget timeout without wasting
7734 * guarantees
7735 */
7736 reason = BFQQE_BUDGET_TIMEOUT;
7737 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
7738 /*
7739 * The queue may not be empty upon timer expiration,
7740 * because we may not disable the timer when the
7741 * first request of the in-service queue arrives
7742 * during disk idling.
7743 */
7744 reason = BFQQE_TOO_IDLE;
7745 else
7746 goto schedule_dispatch;
7747
7748 bfq_bfqq_expire(bfqd, bfqq, true, reason);
7749
7750schedule_dispatch:
36eca894 7751 bfq_unlock_put_ioc_restore(bfqd, flags);
aee69d78
PV
7752 bfq_schedule_dispatch(bfqd);
7753}
7754
7755/*
7756 * Handler of the expiration of the timer running if the in-service queue
7757 * is idling inside its time slice.
7758 */
7759static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
7760{
7761 struct bfq_data *bfqd = container_of(timer, struct bfq_data,
7762 idle_slice_timer);
7763 struct bfq_queue *bfqq = bfqd->in_service_queue;
7764
7765 /*
7766 * Theoretical race here: the in-service queue can be NULL or
7767 * different from the queue that was idling if a new request
7768 * arrives for the current queue and there is a full dispatch
7769 * cycle that changes the in-service queue. This can hardly
7770 * happen, but in the worst case we just expire a queue too
7771 * early.
7772 */
7773 if (bfqq)
7774 bfq_idle_slice_timer_body(bfqq);
7775
7776 return HRTIMER_NORESTART;
7777}
7778
7779static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
7780 struct bfq_queue **bfqq_ptr)
7781{
7782 struct bfq_queue *bfqq = *bfqq_ptr;
7783
7784 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
7785 if (bfqq) {
e21b7a0b
AA
7786 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
7787
aee69d78
PV
7788 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
7789 bfqq, bfqq->ref);
7790 bfq_put_queue(bfqq);
7791 *bfqq_ptr = NULL;
7792 }
7793}
7794
7795/*
e21b7a0b
AA
7796 * Release all the bfqg references to its async queues. If we are
7797 * deallocating the group these queues may still contain requests, so
7798 * we reparent them to the root cgroup (i.e., the only one that will
7799 * exist for sure until all the requests on a device are gone).
aee69d78 7800 */
e21b7a0b 7801static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
aee69d78
PV
7802{
7803 int i, j;
7804
7805 for (i = 0; i < 2; i++)
7806 for (j = 0; j < IOPRIO_BE_NR; j++)
e21b7a0b 7807 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
aee69d78 7808
e21b7a0b 7809 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
aee69d78
PV
7810}
7811
7812static void bfq_exit_queue(struct elevator_queue *e)
7813{
7814 struct bfq_data *bfqd = e->elevator_data;
7815 struct bfq_queue *bfqq, *n;
7816
7817 hrtimer_cancel(&bfqd->idle_slice_timer);
7818
7819 spin_lock_irq(&bfqd->lock);
7820 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
e21b7a0b 7821 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
aee69d78
PV
7822 spin_unlock_irq(&bfqd->lock);
7823
7824 hrtimer_cancel(&bfqd->idle_slice_timer);
7825
e21b7a0b
AA
7826#ifdef CONFIG_BFQ_GROUP_IOSCHED
7827 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
7828#else
7829 spin_lock_irq(&bfqd->lock);
7830 bfq_put_async_queues(bfqd, bfqd->root_group);
7831 kfree(bfqd->root_group);
7832 spin_unlock_irq(&bfqd->lock);
7833#endif
7834
aee69d78
PV
7835 kfree(bfqd);
7836}
7837
e21b7a0b
AA
7838static void bfq_init_root_group(struct bfq_group *root_group,
7839 struct bfq_data *bfqd)
7840{
7841 int i;
7842
7843#ifdef CONFIG_BFQ_GROUP_IOSCHED
7844 root_group->entity.parent = NULL;
7845 root_group->my_entity = NULL;
7846 root_group->bfqd = bfqd;
7847#endif
36eca894 7848 root_group->rq_pos_tree = RB_ROOT;
e21b7a0b
AA
7849 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
7850 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
7851 root_group->sched_data.bfq_class_idle_last_service = jiffies;
7852}
7853
aee69d78
PV
7854static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
7855{
7856 struct bfq_data *bfqd;
7857 struct elevator_queue *eq;
aee69d78
PV
7858
7859 eq = elevator_alloc(q, e);
7860 if (!eq)
7861 return -ENOMEM;
7862
7863 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
7864 if (!bfqd) {
7865 kobject_put(&eq->kobj);
7866 return -ENOMEM;
7867 }
7868 eq->elevator_data = bfqd;
7869
e21b7a0b
AA
7870 spin_lock_irq(q->queue_lock);
7871 q->elevator = eq;
7872 spin_unlock_irq(q->queue_lock);
7873
aee69d78
PV
7874 /*
7875 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
7876 * Grab a permanent reference to it, so that the normal code flow
7877 * will not attempt to free it.
7878 */
7879 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
7880 bfqd->oom_bfqq.ref++;
7881 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
7882 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
7883 bfqd->oom_bfqq.entity.new_weight =
7884 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
7885 /*
7886 * Trigger weight initialization, according to ioprio, at the
7887 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
7888 * class won't be changed any more.
7889 */
7890 bfqd->oom_bfqq.entity.prio_changed = 1;
7891
7892 bfqd->queue = q;
7893
e21b7a0b 7894 INIT_LIST_HEAD(&bfqd->dispatch);
aee69d78
PV
7895
7896 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
7897 HRTIMER_MODE_REL);
7898 bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
7899
1de0c4cd
AA
7900 bfqd->queue_weights_tree = RB_ROOT;
7901 bfqd->group_weights_tree = RB_ROOT;
7902
aee69d78
PV
7903 INIT_LIST_HEAD(&bfqd->active_list);
7904 INIT_LIST_HEAD(&bfqd->idle_list);
7905
7906 bfqd->hw_tag = -1;
7907
7908 bfqd->bfq_max_budget = bfq_default_max_budget;
7909
7910 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
7911 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
7912 bfqd->bfq_back_max = bfq_back_max;
7913 bfqd->bfq_back_penalty = bfq_back_penalty;
7914 bfqd->bfq_slice_idle = bfq_slice_idle;
aee69d78
PV
7915 bfqd->bfq_timeout = bfq_timeout;
7916
7917 bfqd->bfq_requests_within_timer = 120;
7918
44e44a1b
PV
7919 bfqd->low_latency = true;
7920
7921 /*
7922 * Trade-off between responsiveness and fairness.
7923 */
7924 bfqd->bfq_wr_coeff = 30;
77b7dcea 7925 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
44e44a1b
PV
7926 bfqd->bfq_wr_max_time = 0;
7927 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
7928 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
77b7dcea
PV
7929 bfqd->bfq_wr_max_softrt_rate = 7000; /*
7930 * Approximate rate required
7931 * to playback or record a
7932 * high-definition compressed
7933 * video.
7934 */
cfd69712 7935 bfqd->wr_busy_queues = 0;
44e44a1b
PV
7936
7937 /*
7938 * Begin by assuming, optimistically, that the device is a
7939 * high-speed one, and that its peak rate is equal to 2/3 of
7940 * the highest reference rate.
7941 */
7942 bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
7943 T_fast[blk_queue_nonrot(bfqd->queue)];
7944 bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
7945 bfqd->device_speed = BFQ_BFQD_FAST;
7946
aee69d78 7947 spin_lock_init(&bfqd->lock);
aee69d78 7948
e21b7a0b
AA
7949 /*
7950 * The invocation of the next bfq_create_group_hierarchy
7951 * function is the head of a chain of function calls
7952 * (bfq_create_group_hierarchy->blkcg_activate_policy->
7953 * blk_mq_freeze_queue) that may lead to the invocation of the
7954 * has_work hook function. For this reason,
7955 * bfq_create_group_hierarchy is invoked only after all
7956 * scheduler data has been initialized, apart from the fields
7957 * that can be initialized only after invoking
7958 * bfq_create_group_hierarchy. This, in particular, enables
7959 * has_work to correctly return false. Of course, to avoid
7960 * other inconsistencies, the blk-mq stack must then refrain
7961 * from invoking further scheduler hooks before this init
7962 * function is finished.
7963 */
7964 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
7965 if (!bfqd->root_group)
7966 goto out_free;
7967 bfq_init_root_group(bfqd->root_group, bfqd);
7968 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
7969
aee69d78
PV
7970
7971 return 0;
e21b7a0b
AA
7972
7973out_free:
7974 kfree(bfqd);
7975 kobject_put(&eq->kobj);
7976 return -ENOMEM;
aee69d78
PV
7977}
7978
7979static void bfq_slab_kill(void)
7980{
7981 kmem_cache_destroy(bfq_pool);
7982}
7983
7984static int __init bfq_slab_setup(void)
7985{
7986 bfq_pool = KMEM_CACHE(bfq_queue, 0);
7987 if (!bfq_pool)
7988 return -ENOMEM;
7989 return 0;
7990}
7991
7992static ssize_t bfq_var_show(unsigned int var, char *page)
7993{
7994 return sprintf(page, "%u\n", var);
7995}
7996
7997static ssize_t bfq_var_store(unsigned long *var, const char *page,
7998 size_t count)
7999{
8000 unsigned long new_val;
8001 int ret = kstrtoul(page, 10, &new_val);
8002
8003 if (ret == 0)
8004 *var = new_val;
8005
8006 return count;
8007}
8008
8009#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
8010static ssize_t __FUNC(struct elevator_queue *e, char *page) \
8011{ \
8012 struct bfq_data *bfqd = e->elevator_data; \
8013 u64 __data = __VAR; \
8014 if (__CONV == 1) \
8015 __data = jiffies_to_msecs(__data); \
8016 else if (__CONV == 2) \
8017 __data = div_u64(__data, NSEC_PER_MSEC); \
8018 return bfq_var_show(__data, (page)); \
8019}
8020SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
8021SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
8022SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
8023SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
8024SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
8025SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
8026SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
8027SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
44e44a1b 8028SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
aee69d78
PV
8029#undef SHOW_FUNCTION
8030
8031#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
8032static ssize_t __FUNC(struct elevator_queue *e, char *page) \
8033{ \
8034 struct bfq_data *bfqd = e->elevator_data; \
8035 u64 __data = __VAR; \
8036 __data = div_u64(__data, NSEC_PER_USEC); \
8037 return bfq_var_show(__data, (page)); \
8038}
8039USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
8040#undef USEC_SHOW_FUNCTION
8041
8042#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
8043static ssize_t \
8044__FUNC(struct elevator_queue *e, const char *page, size_t count) \
8045{ \
8046 struct bfq_data *bfqd = e->elevator_data; \
8047 unsigned long uninitialized_var(__data); \
8048 int ret = bfq_var_store(&__data, (page), count); \
8049 if (__data < (MIN)) \
8050 __data = (MIN); \
8051 else if (__data > (MAX)) \
8052 __data = (MAX); \
8053 if (__CONV == 1) \
8054 *(__PTR) = msecs_to_jiffies(__data); \
8055 else if (__CONV == 2) \
8056 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
8057 else \
8058 *(__PTR) = __data; \
8059 return ret; \
8060}
8061STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
8062 INT_MAX, 2);
8063STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
8064 INT_MAX, 2);
8065STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
8066STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
8067 INT_MAX, 0);
8068STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
8069#undef STORE_FUNCTION
8070
8071#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
8072static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
8073{ \
8074 struct bfq_data *bfqd = e->elevator_data; \
8075 unsigned long uninitialized_var(__data); \
8076 int ret = bfq_var_store(&__data, (page), count); \
8077 if (__data < (MIN)) \
8078 __data = (MIN); \
8079 else if (__data > (MAX)) \
8080 __data = (MAX); \
8081 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
8082 return ret; \
8083}
8084USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
8085 UINT_MAX);
8086#undef USEC_STORE_FUNCTION
8087
aee69d78
PV
8088static ssize_t bfq_max_budget_store(struct elevator_queue *e,
8089 const char *page, size_t count)
8090{
8091 struct bfq_data *bfqd = e->elevator_data;
8092 unsigned long uninitialized_var(__data);
8093 int ret = bfq_var_store(&__data, (page), count);
8094
8095 if (__data == 0)
ab0e43e9 8096 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
aee69d78
PV
8097 else {
8098 if (__data > INT_MAX)
8099 __data = INT_MAX;
8100 bfqd->bfq_max_budget = __data;
8101 }
8102
8103 bfqd->bfq_user_max_budget = __data;
8104
8105 return ret;
8106}
8107
8108/*
8109 * Leaving this name to preserve name compatibility with cfq
8110 * parameters, but this timeout is used for both sync and async.
8111 */
8112static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
8113 const char *page, size_t count)
8114{
8115 struct bfq_data *bfqd = e->elevator_data;
8116 unsigned long uninitialized_var(__data);
8117 int ret = bfq_var_store(&__data, (page), count);
8118
8119 if (__data < 1)
8120 __data = 1;
8121 else if (__data > INT_MAX)
8122 __data = INT_MAX;
8123
8124 bfqd->bfq_timeout = msecs_to_jiffies(__data);
8125 if (bfqd->bfq_user_max_budget == 0)
ab0e43e9 8126 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
aee69d78
PV
8127
8128 return ret;
8129}
8130
8131static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
8132 const char *page, size_t count)
8133{
8134 struct bfq_data *bfqd = e->elevator_data;
8135 unsigned long uninitialized_var(__data);
8136 int ret = bfq_var_store(&__data, (page), count);
8137
8138 if (__data > 1)
8139 __data = 1;
8140 if (!bfqd->strict_guarantees && __data == 1
8141 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
8142 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
8143
8144 bfqd->strict_guarantees = __data;
8145
8146 return ret;
8147}
8148
44e44a1b
PV
8149static ssize_t bfq_low_latency_store(struct elevator_queue *e,
8150 const char *page, size_t count)
8151{
8152 struct bfq_data *bfqd = e->elevator_data;
8153 unsigned long uninitialized_var(__data);
8154 int ret = bfq_var_store(&__data, (page), count);
8155
8156 if (__data > 1)
8157 __data = 1;
8158 if (__data == 0 && bfqd->low_latency != 0)
8159 bfq_end_wr(bfqd);
8160 bfqd->low_latency = __data;
8161
8162 return ret;
8163}
8164
aee69d78
PV
8165#define BFQ_ATTR(name) \
8166 __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
8167
8168static struct elv_fs_entry bfq_attrs[] = {
8169 BFQ_ATTR(fifo_expire_sync),
8170 BFQ_ATTR(fifo_expire_async),
8171 BFQ_ATTR(back_seek_max),
8172 BFQ_ATTR(back_seek_penalty),
8173 BFQ_ATTR(slice_idle),
8174 BFQ_ATTR(slice_idle_us),
8175 BFQ_ATTR(max_budget),
8176 BFQ_ATTR(timeout_sync),
8177 BFQ_ATTR(strict_guarantees),
44e44a1b 8178 BFQ_ATTR(low_latency),
aee69d78
PV
8179 __ATTR_NULL
8180};
8181
8182static struct elevator_type iosched_bfq_mq = {
8183 .ops.mq = {
8184 .get_rq_priv = bfq_get_rq_private,
8185 .put_rq_priv = bfq_put_rq_private,
8186 .exit_icq = bfq_exit_icq,
8187 .insert_requests = bfq_insert_requests,
8188 .dispatch_request = bfq_dispatch_request,
8189 .next_request = elv_rb_latter_request,
8190 .former_request = elv_rb_former_request,
8191 .allow_merge = bfq_allow_bio_merge,
8192 .bio_merge = bfq_bio_merge,
8193 .request_merge = bfq_request_merge,
8194 .requests_merged = bfq_requests_merged,
8195 .request_merged = bfq_request_merged,
8196 .has_work = bfq_has_work,
8197 .init_sched = bfq_init_queue,
8198 .exit_sched = bfq_exit_queue,
8199 },
8200
8201 .uses_mq = true,
8202 .icq_size = sizeof(struct bfq_io_cq),
8203 .icq_align = __alignof__(struct bfq_io_cq),
8204 .elevator_attrs = bfq_attrs,
8205 .elevator_name = "bfq",
8206 .elevator_owner = THIS_MODULE,
8207};
8208
e21b7a0b
AA
8209#ifdef CONFIG_BFQ_GROUP_IOSCHED
8210static struct blkcg_policy blkcg_policy_bfq = {
8211 .dfl_cftypes = bfq_blkg_files,
8212 .legacy_cftypes = bfq_blkcg_legacy_files,
8213
8214 .cpd_alloc_fn = bfq_cpd_alloc,
8215 .cpd_init_fn = bfq_cpd_init,
8216 .cpd_bind_fn = bfq_cpd_init,
8217 .cpd_free_fn = bfq_cpd_free,
8218
8219 .pd_alloc_fn = bfq_pd_alloc,
8220 .pd_init_fn = bfq_pd_init,
8221 .pd_offline_fn = bfq_pd_offline,
8222 .pd_free_fn = bfq_pd_free,
8223 .pd_reset_stats_fn = bfq_pd_reset_stats,
8224};
8225#endif
8226
aee69d78
PV
8227static int __init bfq_init(void)
8228{
8229 int ret;
8230
e21b7a0b
AA
8231#ifdef CONFIG_BFQ_GROUP_IOSCHED
8232 ret = blkcg_policy_register(&blkcg_policy_bfq);
8233 if (ret)
8234 return ret;
8235#endif
8236
aee69d78
PV
8237 ret = -ENOMEM;
8238 if (bfq_slab_setup())
8239 goto err_pol_unreg;
8240
44e44a1b
PV
8241 /*
8242 * Times to load large popular applications for the typical
8243 * systems installed on the reference devices (see the
8244 * comments before the definitions of the next two
8245 * arrays). Actually, we use slightly slower values, as the
8246 * estimated peak rate tends to be smaller than the actual
8247 * peak rate. The reason for this last fact is that estimates
8248 * are computed over much shorter time intervals than the long
8249 * intervals typically used for benchmarking. Why? First, to
8250 * adapt more quickly to variations. Second, because an I/O
8251 * scheduler cannot rely on a peak-rate-evaluation workload to
8252 * be run for a long time.
8253 */
8254 T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
8255 T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
8256 T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
8257 T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
8258
8259 /*
8260 * Thresholds that determine the switch between speed classes
8261 * (see the comments before the definition of the array
8262 * device_speed_thresh). These thresholds are biased towards
8263 * transitions to the fast class. This is safer than the
8264 * opposite bias. In fact, a wrong transition to the slow
8265 * class results in short weight-raising periods, because the
8266 * speed of the device then tends to be higher that the
8267 * reference peak rate. On the opposite end, a wrong
8268 * transition to the fast class tends to increase
8269 * weight-raising periods, because of the opposite reason.
8270 */
8271 device_speed_thresh[0] = (4 * R_slow[0]) / 3;
8272 device_speed_thresh[1] = (4 * R_slow[1]) / 3;
8273
aee69d78
PV
8274 ret = elv_register(&iosched_bfq_mq);
8275 if (ret)
8276 goto err_pol_unreg;
8277
8278 return 0;
8279
8280err_pol_unreg:
e21b7a0b
AA
8281#ifdef CONFIG_BFQ_GROUP_IOSCHED
8282 blkcg_policy_unregister(&blkcg_policy_bfq);
8283#endif
aee69d78
PV
8284 return ret;
8285}
8286
8287static void __exit bfq_exit(void)
8288{
8289 elv_unregister(&iosched_bfq_mq);
e21b7a0b
AA
8290#ifdef CONFIG_BFQ_GROUP_IOSCHED
8291 blkcg_policy_unregister(&blkcg_policy_bfq);
8292#endif
aee69d78
PV
8293 bfq_slab_kill();
8294}
8295
8296module_init(bfq_init);
8297module_exit(bfq_exit);
8298
8299MODULE_AUTHOR("Paolo Valente");
8300MODULE_LICENSE("GPL");
8301MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");