]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - block/blk-iocost.c
blk-iocost: halve debts if device stays idle
[mirror_ubuntu-jammy-kernel.git] / block / blk-iocost.c
CommitLineData
7caa4715
TH
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * IO cost model based controller.
4 *
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
8 *
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
12 * approximations.
13 *
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
21 *
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
28 * distribution.
29 *
30 * 1. IO Cost Model
31 *
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
36 *
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * paramters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
45 *
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
48 *
49 * 2. Control Strategy
50 *
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
53 *
54 * 2-1. Vtime Distribution
55 *
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
59 *
60 * root
61 * / \
62 * A (w:100) B (w:300)
63 * / \
64 * A0 (w:100) A1 (w:100)
65 *
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
fe20cdb5 71 * upto 1 (WEIGHT_ONE).
7caa4715
TH
72 *
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
77 *
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO iff doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
83 *
84 * 2-2. Vrate Adjustment
85 *
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
90 *
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
95 * generally speed up.
96 *
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
101 *
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
104 *
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
111 * busy signal.
112 *
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
118 *
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
124 *
125 * 2-3. Work Conservation
126 *
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
134 * for IO control.
135 *
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
141 *
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
145 *
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
151 * mechanism.
6954ff18
TH
152 *
153 * 3. Monitoring
154 *
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The ouput looks like the following.
159 *
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
7c1ee704
TH
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
6954ff18
TH
164 *
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
7caa4715
TH
173 */
174
175#include <linux/kernel.h>
176#include <linux/module.h>
177#include <linux/timer.h>
178#include <linux/time64.h>
179#include <linux/parser.h>
180#include <linux/sched/signal.h>
181#include <linux/blk-cgroup.h>
5e124f74
TH
182#include <asm/local.h>
183#include <asm/local64.h>
7caa4715
TH
184#include "blk-rq-qos.h"
185#include "blk-stat.h"
186#include "blk-wbt.h"
187
188#ifdef CONFIG_TRACEPOINTS
189
190/* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191#define TRACE_IOCG_PATH_LEN 1024
192static DEFINE_SPINLOCK(trace_iocg_path_lock);
193static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194
195#define TRACE_IOCG_PATH(type, iocg, ...) \
196 do { \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
203 ##__VA_ARGS__); \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
205 } \
206 } while (0)
207
208#else /* CONFIG_TRACE_POINTS */
209#define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210#endif /* CONFIG_TRACE_POINTS */
211
212enum {
213 MILLION = 1000000,
214
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
218
219 /*
f1de2439 220 * iocg->vtime is targeted at 50% behind the device vtime, which
7caa4715
TH
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
223 */
7ca5b2e6 224 MARGIN_MIN_PCT = 10,
f1de2439
TH
225 MARGIN_LOW_PCT = 20,
226 MARGIN_TARGET_PCT = 50,
227 MARGIN_MAX_PCT = 100,
7caa4715 228
b0853ab4
TH
229 INUSE_ADJ_STEP_PCT = 25,
230
7ca5b2e6
TH
231 /* Have some play in timer operations */
232 TIMER_SLACK_PCT = 1,
7caa4715
TH
233
234 /*
235 * vtime can wrap well within a reasonable uptime when vrate is
236 * consistently raised. Don't trust recorded cgroup vtime if the
237 * period counter indicates that it's older than 5mins.
238 */
239 VTIME_VALID_DUR = 300 * USEC_PER_SEC,
240
7caa4715 241 /* 1/64k is granular enough and can easily be handled w/ u32 */
fe20cdb5 242 WEIGHT_ONE = 1 << 16,
7caa4715
TH
243
244 /*
245 * As vtime is used to calculate the cost of each IO, it needs to
246 * be fairly high precision. For example, it should be able to
247 * represent the cost of a single page worth of discard with
248 * suffificient accuracy. At the same time, it should be able to
249 * represent reasonably long enough durations to be useful and
250 * convenient during operation.
251 *
252 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
253 * granularity and days of wrap-around time even at extreme vrates.
254 */
255 VTIME_PER_SEC_SHIFT = 37,
256 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
257 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
cd006509 258 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
7caa4715
TH
259
260 /* bound vrate adjustments within two orders of magnitude */
261 VRATE_MIN_PPM = 10000, /* 1% */
262 VRATE_MAX_PPM = 100000000, /* 10000% */
263
264 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
265 VRATE_CLAMP_ADJ_PCT = 4,
266
267 /* if IOs end up waiting for requests, issue less */
268 RQ_WAIT_BUSY_PCT = 5,
269
270 /* unbusy hysterisis */
271 UNBUSY_THR_PCT = 75,
272
5160a5a5
TH
273 /*
274 * The effect of delay is indirect and non-linear and a huge amount of
275 * future debt can accumulate abruptly while unthrottled. Linearly scale
276 * up delay as debt is going up and then let it decay exponentially.
277 * This gives us quick ramp ups while delay is accumulating and long
278 * tails which can help reducing the frequency of debt explosions on
279 * unthrottle. The parameters are experimentally determined.
280 *
281 * The delay mechanism provides adequate protection and behavior in many
282 * cases. However, this is far from ideal and falls shorts on both
283 * fronts. The debtors are often throttled too harshly costing a
284 * significant level of fairness and possibly total work while the
285 * protection against their impacts on the system can be choppy and
286 * unreliable.
287 *
288 * The shortcoming primarily stems from the fact that, unlike for page
289 * cache, the kernel doesn't have well-defined back-pressure propagation
290 * mechanism and policies for anonymous memory. Fully addressing this
291 * issue will likely require substantial improvements in the area.
292 */
293 MIN_DELAY_THR_PCT = 500,
294 MAX_DELAY_THR_PCT = 25000,
295 MIN_DELAY = 250,
296 MAX_DELAY = 250 * USEC_PER_MSEC,
297
dda1315f
TH
298 /*
299 * Halve debts if total usage keeps staying under 25% w/o any shortages
300 * for over 100ms.
301 */
302 DEBT_BUSY_USAGE_PCT = 25,
303 DEBT_REDUCTION_IDLE_DUR = 100 * USEC_PER_MSEC,
304
7caa4715
TH
305 /* don't let cmds which take a very long time pin lagging for too long */
306 MAX_LAGGING_PERIODS = 10,
307
7caa4715
TH
308 /* switch iff the conditions are met for longer than this */
309 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
310
311 /*
312 * Count IO size in 4k pages. The 12bit shift helps keeping
313 * size-proportional components of cost calculation in closer
314 * numbers of digits to per-IO cost components.
315 */
316 IOC_PAGE_SHIFT = 12,
317 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
318 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
319
320 /* if apart further than 16M, consider randio for linear model */
321 LCOEF_RANDIO_PAGES = 4096,
322};
323
324enum ioc_running {
325 IOC_IDLE,
326 IOC_RUNNING,
327 IOC_STOP,
328};
329
330/* io.cost.qos controls including per-dev enable of the whole controller */
331enum {
332 QOS_ENABLE,
333 QOS_CTRL,
334 NR_QOS_CTRL_PARAMS,
335};
336
337/* io.cost.qos params */
338enum {
339 QOS_RPPM,
340 QOS_RLAT,
341 QOS_WPPM,
342 QOS_WLAT,
343 QOS_MIN,
344 QOS_MAX,
345 NR_QOS_PARAMS,
346};
347
348/* io.cost.model controls */
349enum {
350 COST_CTRL,
351 COST_MODEL,
352 NR_COST_CTRL_PARAMS,
353};
354
355/* builtin linear cost model coefficients */
356enum {
357 I_LCOEF_RBPS,
358 I_LCOEF_RSEQIOPS,
359 I_LCOEF_RRANDIOPS,
360 I_LCOEF_WBPS,
361 I_LCOEF_WSEQIOPS,
362 I_LCOEF_WRANDIOPS,
363 NR_I_LCOEFS,
364};
365
366enum {
367 LCOEF_RPAGE,
368 LCOEF_RSEQIO,
369 LCOEF_RRANDIO,
370 LCOEF_WPAGE,
371 LCOEF_WSEQIO,
372 LCOEF_WRANDIO,
373 NR_LCOEFS,
374};
375
376enum {
377 AUTOP_INVALID,
378 AUTOP_HDD,
379 AUTOP_SSD_QD1,
380 AUTOP_SSD_DFL,
381 AUTOP_SSD_FAST,
382};
383
384struct ioc_gq;
385
386struct ioc_params {
387 u32 qos[NR_QOS_PARAMS];
388 u64 i_lcoefs[NR_I_LCOEFS];
389 u64 lcoefs[NR_LCOEFS];
390 u32 too_fast_vrate_pct;
391 u32 too_slow_vrate_pct;
392};
393
7ca5b2e6
TH
394struct ioc_margins {
395 s64 min;
f1de2439
TH
396 s64 low;
397 s64 target;
7ca5b2e6
TH
398 s64 max;
399};
400
7caa4715 401struct ioc_missed {
5e124f74
TH
402 local_t nr_met;
403 local_t nr_missed;
7caa4715
TH
404 u32 last_met;
405 u32 last_missed;
406};
407
408struct ioc_pcpu_stat {
409 struct ioc_missed missed[2];
410
5e124f74 411 local64_t rq_wait_ns;
7caa4715
TH
412 u64 last_rq_wait_ns;
413};
414
415/* per device */
416struct ioc {
417 struct rq_qos rqos;
418
419 bool enabled;
420
421 struct ioc_params params;
7ca5b2e6 422 struct ioc_margins margins;
7caa4715 423 u32 period_us;
7ca5b2e6 424 u32 timer_slack_ns;
7caa4715
TH
425 u64 vrate_min;
426 u64 vrate_max;
427
428 spinlock_t lock;
429 struct timer_list timer;
430 struct list_head active_iocgs; /* active cgroups */
431 struct ioc_pcpu_stat __percpu *pcpu_stat;
432
433 enum ioc_running running;
434 atomic64_t vtime_rate;
435
67b7b641 436 seqcount_spinlock_t period_seqcount;
ce95570a 437 u64 period_at; /* wallclock starttime */
7caa4715
TH
438 u64 period_at_vtime; /* vtime starttime */
439
440 atomic64_t cur_period; /* inc'd each period */
441 int busy_level; /* saturation history */
442
7caa4715
TH
443 bool weights_updated;
444 atomic_t hweight_gen; /* for lazy hweights */
445
dda1315f
TH
446 /* the last time debt cancel condition wasn't met */
447 u64 debt_busy_at;
448
7caa4715
TH
449 u64 autop_too_fast_at;
450 u64 autop_too_slow_at;
451 int autop_idx;
452 bool user_qos_params:1;
453 bool user_cost_model:1;
454};
455
97eb1975
TH
456struct iocg_pcpu_stat {
457 local64_t abs_vusage;
458};
459
460struct iocg_stat {
461 u64 usage_us;
462};
463
7caa4715
TH
464/* per device-cgroup pair */
465struct ioc_gq {
466 struct blkg_policy_data pd;
467 struct ioc *ioc;
468
469 /*
470 * A iocg can get its weight from two sources - an explicit
471 * per-device-cgroup configuration or the default weight of the
472 * cgroup. `cfg_weight` is the explicit per-device-cgroup
473 * configuration. `weight` is the effective considering both
474 * sources.
475 *
476 * When an idle cgroup becomes active its `active` goes from 0 to
477 * `weight`. `inuse` is the surplus adjusted active weight.
478 * `active` and `inuse` are used to calculate `hweight_active` and
479 * `hweight_inuse`.
480 *
481 * `last_inuse` remembers `inuse` while an iocg is idle to persist
482 * surplus adjustments.
b0853ab4
TH
483 *
484 * `inuse` may be adjusted dynamically during period. `saved_*` are used
485 * to determine and track adjustments.
7caa4715
TH
486 */
487 u32 cfg_weight;
488 u32 weight;
489 u32 active;
490 u32 inuse;
b0853ab4 491
7caa4715 492 u32 last_inuse;
b0853ab4 493 s64 saved_margin;
7caa4715
TH
494
495 sector_t cursor; /* to detect randio */
496
497 /*
498 * `vtime` is this iocg's vtime cursor which progresses as IOs are
499 * issued. If lagging behind device vtime, the delta represents
500 * the currently available IO budget. If runnning ahead, the
501 * overage.
502 *
503 * `vtime_done` is the same but progressed on completion rather
504 * than issue. The delta behind `vtime` represents the cost of
505 * currently in-flight IOs.
7caa4715
TH
506 */
507 atomic64_t vtime;
508 atomic64_t done_vtime;
0b80f986 509 u64 abs_vdebt;
7caa4715 510
5160a5a5
TH
511 /* current delay in effect and when it started */
512 u64 delay;
513 u64 delay_at;
514
7caa4715
TH
515 /*
516 * The period this iocg was last active in. Used for deactivation
517 * and invalidating `vtime`.
518 */
519 atomic64_t active_period;
520 struct list_head active_list;
521
00410f1b 522 /* see __propagate_weights() and current_hweight() for details */
7caa4715
TH
523 u64 child_active_sum;
524 u64 child_inuse_sum;
e08d02aa 525 u64 child_adjusted_sum;
7caa4715
TH
526 int hweight_gen;
527 u32 hweight_active;
528 u32 hweight_inuse;
e08d02aa 529 u32 hweight_donating;
93f7d2db 530 u32 hweight_after_donation;
7caa4715 531
97eb1975 532 struct list_head walk_list;
8692d2db 533 struct list_head surplus_list;
97eb1975 534
7caa4715
TH
535 struct wait_queue_head waitq;
536 struct hrtimer waitq_timer;
7caa4715 537
1aa50d02
TH
538 /* timestamp at the latest activation */
539 u64 activated_at;
540
97eb1975
TH
541 /* statistics */
542 struct iocg_pcpu_stat __percpu *pcpu_stat;
543 struct iocg_stat local_stat;
544 struct iocg_stat desc_stat;
545 struct iocg_stat last_stat;
546 u64 last_stat_abs_vusage;
f1de2439 547 u64 usage_delta_us;
7caa4715
TH
548
549 /* this iocg's depth in the hierarchy and ancestors including self */
550 int level;
551 struct ioc_gq *ancestors[];
552};
553
554/* per cgroup */
555struct ioc_cgrp {
556 struct blkcg_policy_data cpd;
557 unsigned int dfl_weight;
558};
559
560struct ioc_now {
561 u64 now_ns;
ce95570a 562 u64 now;
7caa4715
TH
563 u64 vnow;
564 u64 vrate;
565};
566
567struct iocg_wait {
568 struct wait_queue_entry wait;
569 struct bio *bio;
570 u64 abs_cost;
571 bool committed;
572};
573
574struct iocg_wake_ctx {
575 struct ioc_gq *iocg;
576 u32 hw_inuse;
577 s64 vbudget;
578};
579
580static const struct ioc_params autop[] = {
581 [AUTOP_HDD] = {
582 .qos = {
7afcccaf
TH
583 [QOS_RLAT] = 250000, /* 250ms */
584 [QOS_WLAT] = 250000,
7caa4715
TH
585 [QOS_MIN] = VRATE_MIN_PPM,
586 [QOS_MAX] = VRATE_MAX_PPM,
587 },
588 .i_lcoefs = {
589 [I_LCOEF_RBPS] = 174019176,
590 [I_LCOEF_RSEQIOPS] = 41708,
591 [I_LCOEF_RRANDIOPS] = 370,
592 [I_LCOEF_WBPS] = 178075866,
593 [I_LCOEF_WSEQIOPS] = 42705,
594 [I_LCOEF_WRANDIOPS] = 378,
595 },
596 },
597 [AUTOP_SSD_QD1] = {
598 .qos = {
599 [QOS_RLAT] = 25000, /* 25ms */
600 [QOS_WLAT] = 25000,
601 [QOS_MIN] = VRATE_MIN_PPM,
602 [QOS_MAX] = VRATE_MAX_PPM,
603 },
604 .i_lcoefs = {
605 [I_LCOEF_RBPS] = 245855193,
606 [I_LCOEF_RSEQIOPS] = 61575,
607 [I_LCOEF_RRANDIOPS] = 6946,
608 [I_LCOEF_WBPS] = 141365009,
609 [I_LCOEF_WSEQIOPS] = 33716,
610 [I_LCOEF_WRANDIOPS] = 26796,
611 },
612 },
613 [AUTOP_SSD_DFL] = {
614 .qos = {
615 [QOS_RLAT] = 25000, /* 25ms */
616 [QOS_WLAT] = 25000,
617 [QOS_MIN] = VRATE_MIN_PPM,
618 [QOS_MAX] = VRATE_MAX_PPM,
619 },
620 .i_lcoefs = {
621 [I_LCOEF_RBPS] = 488636629,
622 [I_LCOEF_RSEQIOPS] = 8932,
623 [I_LCOEF_RRANDIOPS] = 8518,
624 [I_LCOEF_WBPS] = 427891549,
625 [I_LCOEF_WSEQIOPS] = 28755,
626 [I_LCOEF_WRANDIOPS] = 21940,
627 },
628 .too_fast_vrate_pct = 500,
629 },
630 [AUTOP_SSD_FAST] = {
631 .qos = {
632 [QOS_RLAT] = 5000, /* 5ms */
633 [QOS_WLAT] = 5000,
634 [QOS_MIN] = VRATE_MIN_PPM,
635 [QOS_MAX] = VRATE_MAX_PPM,
636 },
637 .i_lcoefs = {
638 [I_LCOEF_RBPS] = 3102524156LLU,
639 [I_LCOEF_RSEQIOPS] = 724816,
640 [I_LCOEF_RRANDIOPS] = 778122,
641 [I_LCOEF_WBPS] = 1742780862LLU,
642 [I_LCOEF_WSEQIOPS] = 425702,
643 [I_LCOEF_WRANDIOPS] = 443193,
644 },
645 .too_slow_vrate_pct = 10,
646 },
647};
648
649/*
650 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
651 * vtime credit shortage and down on device saturation.
652 */
653static u32 vrate_adj_pct[] =
654 { 0, 0, 0, 0,
655 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
656 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
657 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
658
659static struct blkcg_policy blkcg_policy_iocost;
660
661/* accessors and helpers */
662static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
663{
664 return container_of(rqos, struct ioc, rqos);
665}
666
667static struct ioc *q_to_ioc(struct request_queue *q)
668{
669 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
670}
671
672static const char *q_name(struct request_queue *q)
673{
674 if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
675 return kobject_name(q->kobj.parent);
676 else
677 return "<unknown>";
678}
679
680static const char __maybe_unused *ioc_name(struct ioc *ioc)
681{
682 return q_name(ioc->rqos.q);
683}
684
685static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
686{
687 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
688}
689
690static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
691{
692 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
693}
694
695static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
696{
697 return pd_to_blkg(&iocg->pd);
698}
699
700static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
701{
702 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
703 struct ioc_cgrp, cpd);
704}
705
706/*
707 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
36a52481 708 * weight, the more expensive each IO. Must round up.
7caa4715
TH
709 */
710static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
711{
fe20cdb5 712 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
7caa4715
TH
713}
714
36a52481
TH
715/*
716 * The inverse of abs_cost_to_cost(). Must round up.
717 */
718static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
719{
fe20cdb5 720 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
36a52481
TH
721}
722
97eb1975
TH
723static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
724 u64 abs_cost, u64 cost)
7caa4715 725{
97eb1975
TH
726 struct iocg_pcpu_stat *gcs;
727
7caa4715
TH
728 bio->bi_iocost_cost = cost;
729 atomic64_add(cost, &iocg->vtime);
97eb1975
TH
730
731 gcs = get_cpu_ptr(iocg->pcpu_stat);
732 local64_add(abs_cost, &gcs->abs_vusage);
733 put_cpu_ptr(gcs);
7caa4715
TH
734}
735
da437b95
TH
736static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
737{
738 if (lock_ioc) {
739 spin_lock_irqsave(&iocg->ioc->lock, *flags);
740 spin_lock(&iocg->waitq.lock);
741 } else {
742 spin_lock_irqsave(&iocg->waitq.lock, *flags);
743 }
744}
745
746static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
747{
748 if (unlock_ioc) {
749 spin_unlock(&iocg->waitq.lock);
750 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
751 } else {
752 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
753 }
754}
755
7caa4715
TH
756#define CREATE_TRACE_POINTS
757#include <trace/events/iocost.h>
758
7ca5b2e6
TH
759static void ioc_refresh_margins(struct ioc *ioc)
760{
761 struct ioc_margins *margins = &ioc->margins;
762 u32 period_us = ioc->period_us;
763 u64 vrate = atomic64_read(&ioc->vtime_rate);
764
765 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
f1de2439
TH
766 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
767 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
7ca5b2e6
TH
768 margins->max = (period_us * MARGIN_MAX_PCT / 100) * vrate;
769}
770
7caa4715
TH
771/* latency Qos params changed, update period_us and all the dependent params */
772static void ioc_refresh_period_us(struct ioc *ioc)
773{
774 u32 ppm, lat, multi, period_us;
775
776 lockdep_assert_held(&ioc->lock);
777
778 /* pick the higher latency target */
779 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
780 ppm = ioc->params.qos[QOS_RPPM];
781 lat = ioc->params.qos[QOS_RLAT];
782 } else {
783 ppm = ioc->params.qos[QOS_WPPM];
784 lat = ioc->params.qos[QOS_WLAT];
785 }
786
787 /*
788 * We want the period to be long enough to contain a healthy number
789 * of IOs while short enough for granular control. Define it as a
790 * multiple of the latency target. Ideally, the multiplier should
791 * be scaled according to the percentile so that it would nominally
792 * contain a certain number of requests. Let's be simpler and
793 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
794 */
795 if (ppm)
796 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
797 else
798 multi = 2;
799 period_us = multi * lat;
800 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
801
802 /* calculate dependent params */
803 ioc->period_us = period_us;
7ca5b2e6
TH
804 ioc->timer_slack_ns = div64_u64(
805 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
806 100);
807 ioc_refresh_margins(ioc);
7caa4715
TH
808}
809
810static int ioc_autop_idx(struct ioc *ioc)
811{
812 int idx = ioc->autop_idx;
813 const struct ioc_params *p = &autop[idx];
814 u32 vrate_pct;
815 u64 now_ns;
816
817 /* rotational? */
818 if (!blk_queue_nonrot(ioc->rqos.q))
819 return AUTOP_HDD;
820
821 /* handle SATA SSDs w/ broken NCQ */
822 if (blk_queue_depth(ioc->rqos.q) == 1)
823 return AUTOP_SSD_QD1;
824
825 /* use one of the normal ssd sets */
826 if (idx < AUTOP_SSD_DFL)
827 return AUTOP_SSD_DFL;
828
829 /* if user is overriding anything, maintain what was there */
830 if (ioc->user_qos_params || ioc->user_cost_model)
831 return idx;
832
833 /* step up/down based on the vrate */
834 vrate_pct = div64_u64(atomic64_read(&ioc->vtime_rate) * 100,
835 VTIME_PER_USEC);
836 now_ns = ktime_get_ns();
837
838 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
839 if (!ioc->autop_too_fast_at)
840 ioc->autop_too_fast_at = now_ns;
841 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
842 return idx + 1;
843 } else {
844 ioc->autop_too_fast_at = 0;
845 }
846
847 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
848 if (!ioc->autop_too_slow_at)
849 ioc->autop_too_slow_at = now_ns;
850 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
851 return idx - 1;
852 } else {
853 ioc->autop_too_slow_at = 0;
854 }
855
856 return idx;
857}
858
859/*
860 * Take the followings as input
861 *
862 * @bps maximum sequential throughput
863 * @seqiops maximum sequential 4k iops
864 * @randiops maximum random 4k iops
865 *
866 * and calculate the linear model cost coefficients.
867 *
868 * *@page per-page cost 1s / (@bps / 4096)
869 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
870 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
871 */
872static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
873 u64 *page, u64 *seqio, u64 *randio)
874{
875 u64 v;
876
877 *page = *seqio = *randio = 0;
878
879 if (bps)
880 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
881 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
882
883 if (seqiops) {
884 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
885 if (v > *page)
886 *seqio = v - *page;
887 }
888
889 if (randiops) {
890 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
891 if (v > *page)
892 *randio = v - *page;
893 }
894}
895
896static void ioc_refresh_lcoefs(struct ioc *ioc)
897{
898 u64 *u = ioc->params.i_lcoefs;
899 u64 *c = ioc->params.lcoefs;
900
901 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
902 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
903 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
904 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
905}
906
907static bool ioc_refresh_params(struct ioc *ioc, bool force)
908{
909 const struct ioc_params *p;
910 int idx;
911
912 lockdep_assert_held(&ioc->lock);
913
914 idx = ioc_autop_idx(ioc);
915 p = &autop[idx];
916
917 if (idx == ioc->autop_idx && !force)
918 return false;
919
920 if (idx != ioc->autop_idx)
921 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
922
923 ioc->autop_idx = idx;
924 ioc->autop_too_fast_at = 0;
925 ioc->autop_too_slow_at = 0;
926
927 if (!ioc->user_qos_params)
928 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
929 if (!ioc->user_cost_model)
930 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
931
932 ioc_refresh_period_us(ioc);
933 ioc_refresh_lcoefs(ioc);
934
935 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
936 VTIME_PER_USEC, MILLION);
937 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
938 VTIME_PER_USEC, MILLION);
939
940 return true;
941}
942
943/* take a snapshot of the current [v]time and vrate */
944static void ioc_now(struct ioc *ioc, struct ioc_now *now)
945{
946 unsigned seq;
947
948 now->now_ns = ktime_get();
949 now->now = ktime_to_us(now->now_ns);
950 now->vrate = atomic64_read(&ioc->vtime_rate);
951
952 /*
953 * The current vtime is
954 *
955 * vtime at period start + (wallclock time since the start) * vrate
956 *
957 * As a consistent snapshot of `period_at_vtime` and `period_at` is
958 * needed, they're seqcount protected.
959 */
960 do {
961 seq = read_seqcount_begin(&ioc->period_seqcount);
962 now->vnow = ioc->period_at_vtime +
963 (now->now - ioc->period_at) * now->vrate;
964 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
965}
966
967static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
968{
7caa4715
TH
969 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
970
971 write_seqcount_begin(&ioc->period_seqcount);
972 ioc->period_at = now->now;
973 ioc->period_at_vtime = now->vnow;
974 write_seqcount_end(&ioc->period_seqcount);
975
976 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
977 add_timer(&ioc->timer);
978}
979
980/*
981 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
b0853ab4
TH
982 * weight sums and propagate upwards accordingly. If @save, the current margin
983 * is saved to be used as reference for later inuse in-period adjustments.
7caa4715 984 */
b0853ab4
TH
985static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
986 bool save, struct ioc_now *now)
7caa4715
TH
987{
988 struct ioc *ioc = iocg->ioc;
989 int lvl;
990
991 lockdep_assert_held(&ioc->lock);
992
db84a72a
TH
993 inuse = clamp_t(u32, inuse, 1, active);
994
b0853ab4
TH
995 iocg->last_inuse = iocg->inuse;
996 if (save)
997 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
998
db84a72a
TH
999 if (active == iocg->active && inuse == iocg->inuse)
1000 return;
7caa4715
TH
1001
1002 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1003 struct ioc_gq *parent = iocg->ancestors[lvl];
1004 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1005 u32 parent_active = 0, parent_inuse = 0;
1006
1007 /* update the level sums */
1008 parent->child_active_sum += (s32)(active - child->active);
1009 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1010 /* apply the udpates */
1011 child->active = active;
1012 child->inuse = inuse;
1013
1014 /*
1015 * The delta between inuse and active sums indicates that
1016 * that much of weight is being given away. Parent's inuse
1017 * and active should reflect the ratio.
1018 */
1019 if (parent->child_active_sum) {
1020 parent_active = parent->weight;
1021 parent_inuse = DIV64_U64_ROUND_UP(
1022 parent_active * parent->child_inuse_sum,
1023 parent->child_active_sum);
1024 }
1025
1026 /* do we need to keep walking up? */
1027 if (parent_active == parent->active &&
1028 parent_inuse == parent->inuse)
1029 break;
1030
1031 active = parent_active;
1032 inuse = parent_inuse;
1033 }
1034
1035 ioc->weights_updated = true;
1036}
1037
00410f1b 1038static void commit_weights(struct ioc *ioc)
7caa4715
TH
1039{
1040 lockdep_assert_held(&ioc->lock);
1041
1042 if (ioc->weights_updated) {
1043 /* paired with rmb in current_hweight(), see there */
1044 smp_wmb();
1045 atomic_inc(&ioc->hweight_gen);
1046 ioc->weights_updated = false;
1047 }
1048}
1049
b0853ab4
TH
1050static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1051 bool save, struct ioc_now *now)
7caa4715 1052{
b0853ab4 1053 __propagate_weights(iocg, active, inuse, save, now);
00410f1b 1054 commit_weights(iocg->ioc);
7caa4715
TH
1055}
1056
1057static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1058{
1059 struct ioc *ioc = iocg->ioc;
1060 int lvl;
1061 u32 hwa, hwi;
1062 int ioc_gen;
1063
1064 /* hot path - if uptodate, use cached */
1065 ioc_gen = atomic_read(&ioc->hweight_gen);
1066 if (ioc_gen == iocg->hweight_gen)
1067 goto out;
1068
1069 /*
00410f1b
TH
1070 * Paired with wmb in commit_weights(). If we saw the updated
1071 * hweight_gen, all the weight updates from __propagate_weights() are
1072 * visible too.
7caa4715
TH
1073 *
1074 * We can race with weight updates during calculation and get it
1075 * wrong. However, hweight_gen would have changed and a future
1076 * reader will recalculate and we're guaranteed to discard the
1077 * wrong result soon.
1078 */
1079 smp_rmb();
1080
fe20cdb5 1081 hwa = hwi = WEIGHT_ONE;
7caa4715
TH
1082 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1083 struct ioc_gq *parent = iocg->ancestors[lvl];
1084 struct ioc_gq *child = iocg->ancestors[lvl + 1];
bd0adb91
TH
1085 u64 active_sum = READ_ONCE(parent->child_active_sum);
1086 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
7caa4715
TH
1087 u32 active = READ_ONCE(child->active);
1088 u32 inuse = READ_ONCE(child->inuse);
1089
1090 /* we can race with deactivations and either may read as zero */
1091 if (!active_sum || !inuse_sum)
1092 continue;
1093
bd0adb91
TH
1094 active_sum = max_t(u64, active, active_sum);
1095 hwa = div64_u64((u64)hwa * active, active_sum);
7caa4715 1096
bd0adb91
TH
1097 inuse_sum = max_t(u64, inuse, inuse_sum);
1098 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
7caa4715
TH
1099 }
1100
1101 iocg->hweight_active = max_t(u32, hwa, 1);
1102 iocg->hweight_inuse = max_t(u32, hwi, 1);
1103 iocg->hweight_gen = ioc_gen;
1104out:
1105 if (hw_activep)
1106 *hw_activep = iocg->hweight_active;
1107 if (hw_inusep)
1108 *hw_inusep = iocg->hweight_inuse;
1109}
1110
93f7d2db
TH
1111/*
1112 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1113 * other weights stay unchanged.
1114 */
1115static u32 current_hweight_max(struct ioc_gq *iocg)
1116{
1117 u32 hwm = WEIGHT_ONE;
1118 u32 inuse = iocg->active;
1119 u64 child_inuse_sum;
1120 int lvl;
1121
1122 lockdep_assert_held(&iocg->ioc->lock);
1123
1124 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1125 struct ioc_gq *parent = iocg->ancestors[lvl];
1126 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1127
1128 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1129 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1130 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1131 parent->child_active_sum);
1132 }
1133
1134 return max_t(u32, hwm, 1);
1135}
1136
b0853ab4 1137static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
7caa4715
TH
1138{
1139 struct ioc *ioc = iocg->ioc;
1140 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1141 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1142 u32 weight;
1143
1144 lockdep_assert_held(&ioc->lock);
1145
1146 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1147 if (weight != iocg->weight && iocg->active)
b0853ab4 1148 propagate_weights(iocg, weight, iocg->inuse, true, now);
7caa4715
TH
1149 iocg->weight = weight;
1150}
1151
1152static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1153{
1154 struct ioc *ioc = iocg->ioc;
1155 u64 last_period, cur_period, max_period_delta;
7ca5b2e6 1156 u64 vtime, vmin;
7caa4715
TH
1157 int i;
1158
1159 /*
1160 * If seem to be already active, just update the stamp to tell the
1161 * timer that we're still active. We don't mind occassional races.
1162 */
1163 if (!list_empty(&iocg->active_list)) {
1164 ioc_now(ioc, now);
1165 cur_period = atomic64_read(&ioc->cur_period);
1166 if (atomic64_read(&iocg->active_period) != cur_period)
1167 atomic64_set(&iocg->active_period, cur_period);
1168 return true;
1169 }
1170
1171 /* racy check on internal node IOs, treat as root level IOs */
1172 if (iocg->child_active_sum)
1173 return false;
1174
1175 spin_lock_irq(&ioc->lock);
1176
1177 ioc_now(ioc, now);
1178
1179 /* update period */
1180 cur_period = atomic64_read(&ioc->cur_period);
1181 last_period = atomic64_read(&iocg->active_period);
1182 atomic64_set(&iocg->active_period, cur_period);
1183
1184 /* already activated or breaking leaf-only constraint? */
8b37bc27
JX
1185 if (!list_empty(&iocg->active_list))
1186 goto succeed_unlock;
1187 for (i = iocg->level - 1; i > 0; i--)
1188 if (!list_empty(&iocg->ancestors[i]->active_list))
7caa4715 1189 goto fail_unlock;
8b37bc27 1190
7caa4715
TH
1191 if (iocg->child_active_sum)
1192 goto fail_unlock;
1193
1194 /*
1195 * vtime may wrap when vrate is raised substantially due to
1196 * underestimated IO costs. Look at the period and ignore its
1197 * vtime if the iocg has been idle for too long. Also, cap the
1198 * budget it can start with to the margin.
1199 */
1200 max_period_delta = DIV64_U64_ROUND_UP(VTIME_VALID_DUR, ioc->period_us);
1201 vtime = atomic64_read(&iocg->vtime);
7ca5b2e6 1202 vmin = now->vnow - ioc->margins.max;
7caa4715
TH
1203
1204 if (last_period + max_period_delta < cur_period ||
1205 time_before64(vtime, vmin)) {
1206 atomic64_add(vmin - vtime, &iocg->vtime);
1207 atomic64_add(vmin - vtime, &iocg->done_vtime);
1208 vtime = vmin;
1209 }
1210
1211 /*
1212 * Activate, propagate weight and start period timer if not
1213 * running. Reset hweight_gen to avoid accidental match from
1214 * wrapping.
1215 */
1216 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1217 list_add(&iocg->active_list, &ioc->active_iocgs);
b0853ab4 1218
00410f1b 1219 propagate_weights(iocg, iocg->weight,
b0853ab4 1220 iocg->last_inuse ?: iocg->weight, true, now);
7caa4715
TH
1221
1222 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1223 last_period, cur_period, vtime);
1224
1aa50d02 1225 iocg->activated_at = now->now;
7caa4715
TH
1226
1227 if (ioc->running == IOC_IDLE) {
1228 ioc->running = IOC_RUNNING;
dda1315f 1229 ioc->debt_busy_at = now->now;
7caa4715
TH
1230 ioc_start_period(ioc, now);
1231 }
1232
8b37bc27 1233succeed_unlock:
7caa4715
TH
1234 spin_unlock_irq(&ioc->lock);
1235 return true;
1236
1237fail_unlock:
1238 spin_unlock_irq(&ioc->lock);
1239 return false;
1240}
1241
6ef20f78
TH
1242static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1243{
1244 struct ioc *ioc = iocg->ioc;
1245 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
5160a5a5
TH
1246 u64 tdelta, delay, new_delay;
1247 s64 vover, vover_pct;
c421a3eb 1248 u32 hwa;
6ef20f78
TH
1249
1250 lockdep_assert_held(&iocg->waitq.lock);
1251
5160a5a5
TH
1252 /* calculate the current delay in effect - 1/2 every second */
1253 tdelta = now->now - iocg->delay_at;
1254 if (iocg->delay)
1255 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1256 else
1257 delay = 0;
1258
1259 /* calculate the new delay from the debt amount */
c421a3eb 1260 current_hweight(iocg, &hwa, NULL);
5160a5a5
TH
1261 vover = atomic64_read(&iocg->vtime) +
1262 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1263 vover_pct = div64_s64(100 * vover, ioc->period_us * now->vrate);
1264
1265 if (vover_pct <= MIN_DELAY_THR_PCT)
1266 new_delay = 0;
1267 else if (vover_pct >= MAX_DELAY_THR_PCT)
1268 new_delay = MAX_DELAY;
1269 else
1270 new_delay = MIN_DELAY +
1271 div_u64((MAX_DELAY - MIN_DELAY) *
1272 (vover_pct - MIN_DELAY_THR_PCT),
1273 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1274
1275 /* pick the higher one and apply */
1276 if (new_delay > delay) {
1277 iocg->delay = new_delay;
1278 iocg->delay_at = now->now;
1279 delay = new_delay;
1280 }
6ef20f78 1281
5160a5a5
TH
1282 if (delay >= MIN_DELAY) {
1283 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1284 return true;
1285 } else {
1286 iocg->delay = 0;
6ef20f78
TH
1287 blkcg_clear_delay(blkg);
1288 return false;
1289 }
6ef20f78
TH
1290}
1291
c421a3eb
TH
1292static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1293 struct ioc_now *now)
1294{
1295 struct iocg_pcpu_stat *gcs;
1296
1297 lockdep_assert_held(&iocg->ioc->lock);
1298 lockdep_assert_held(&iocg->waitq.lock);
1299 WARN_ON_ONCE(list_empty(&iocg->active_list));
1300
1301 /*
1302 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1303 * inuse donating all of it share to others until its debt is paid off.
1304 */
1305 if (!iocg->abs_vdebt && abs_cost)
1306 propagate_weights(iocg, iocg->active, 0, false, now);
1307
1308 iocg->abs_vdebt += abs_cost;
1309
1310 gcs = get_cpu_ptr(iocg->pcpu_stat);
1311 local64_add(abs_cost, &gcs->abs_vusage);
1312 put_cpu_ptr(gcs);
1313}
1314
1315static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1316 struct ioc_now *now)
1317{
1318 lockdep_assert_held(&iocg->ioc->lock);
1319 lockdep_assert_held(&iocg->waitq.lock);
1320
1321 /* make sure that nobody messed with @iocg */
1322 WARN_ON_ONCE(list_empty(&iocg->active_list));
1323 WARN_ON_ONCE(iocg->inuse > 1);
1324
1325 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1326
1327 /* if debt is paid in full, restore inuse */
1328 if (!iocg->abs_vdebt)
1329 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1330 false, now);
1331}
1332
7caa4715
TH
1333static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1334 int flags, void *key)
1335{
1336 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1337 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1338 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1339
1340 ctx->vbudget -= cost;
1341
1342 if (ctx->vbudget < 0)
1343 return -1;
1344
97eb1975 1345 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
7caa4715
TH
1346
1347 /*
1348 * autoremove_wake_function() removes the wait entry only when it
1349 * actually changed the task state. We want the wait always
1350 * removed. Remove explicitly and use default_wake_function().
1351 */
1352 list_del_init(&wq_entry->entry);
1353 wait->committed = true;
1354
1355 default_wake_function(wq_entry, mode, flags, key);
1356 return 0;
1357}
1358
da437b95
TH
1359/*
1360 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1361 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1362 * addition to iocg->waitq.lock.
1363 */
1364static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1365 struct ioc_now *now)
7caa4715
TH
1366{
1367 struct ioc *ioc = iocg->ioc;
1368 struct iocg_wake_ctx ctx = { .iocg = iocg };
da437b95 1369 u64 vshortage, expires, oexpires;
36a52481 1370 s64 vbudget;
c421a3eb 1371 u32 hwa;
7caa4715
TH
1372
1373 lockdep_assert_held(&iocg->waitq.lock);
1374
c421a3eb 1375 current_hweight(iocg, &hwa, NULL);
36a52481
TH
1376 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1377
1378 /* pay off debt */
da437b95 1379 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
c421a3eb
TH
1380 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1381 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1382 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
36a52481 1383
da437b95
TH
1384 lockdep_assert_held(&ioc->lock);
1385
c421a3eb
TH
1386 atomic64_add(vpay, &iocg->vtime);
1387 atomic64_add(vpay, &iocg->done_vtime);
1388 iocg_pay_debt(iocg, abs_vpay, now);
1389 vbudget -= vpay;
5160a5a5 1390 }
7b84b49e 1391
5160a5a5 1392 if (iocg->abs_vdebt || iocg->delay)
7b84b49e 1393 iocg_kick_delay(iocg, now);
36a52481 1394
da437b95
TH
1395 /*
1396 * Debt can still be outstanding if we haven't paid all yet or the
1397 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1398 * under debt. Make sure @vbudget reflects the outstanding amount and is
1399 * not positive.
1400 */
1401 if (iocg->abs_vdebt) {
c421a3eb 1402 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
da437b95
TH
1403 vbudget = min_t(s64, 0, vbudget - vdebt);
1404 }
1405
7caa4715 1406 /*
c421a3eb
TH
1407 * Wake up the ones which are due and see how much vtime we'll need for
1408 * the next one. As paying off debt restores hw_inuse, it must be read
1409 * after the above debt payment.
7caa4715 1410 */
da437b95 1411 ctx.vbudget = vbudget;
c421a3eb
TH
1412 current_hweight(iocg, NULL, &ctx.hw_inuse);
1413
7caa4715 1414 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
c421a3eb 1415
7caa4715
TH
1416 if (!waitqueue_active(&iocg->waitq))
1417 return;
1418 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1419 return;
1420
7ca5b2e6 1421 /* determine next wakeup, add a timer margin to guarantee chunking */
7caa4715
TH
1422 vshortage = -ctx.vbudget;
1423 expires = now->now_ns +
1424 DIV64_U64_ROUND_UP(vshortage, now->vrate) * NSEC_PER_USEC;
7ca5b2e6 1425 expires += ioc->timer_slack_ns;
7caa4715
TH
1426
1427 /* if already active and close enough, don't bother */
1428 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1429 if (hrtimer_is_queued(&iocg->waitq_timer) &&
7ca5b2e6 1430 abs(oexpires - expires) <= ioc->timer_slack_ns)
7caa4715
TH
1431 return;
1432
1433 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
7ca5b2e6 1434 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
7caa4715
TH
1435}
1436
1437static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1438{
1439 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
da437b95 1440 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
7caa4715
TH
1441 struct ioc_now now;
1442 unsigned long flags;
1443
1444 ioc_now(iocg->ioc, &now);
1445
da437b95
TH
1446 iocg_lock(iocg, pay_debt, &flags);
1447 iocg_kick_waitq(iocg, pay_debt, &now);
1448 iocg_unlock(iocg, pay_debt, &flags);
7caa4715
TH
1449
1450 return HRTIMER_NORESTART;
1451}
1452
7caa4715
TH
1453static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1454{
1455 u32 nr_met[2] = { };
1456 u32 nr_missed[2] = { };
1457 u64 rq_wait_ns = 0;
1458 int cpu, rw;
1459
1460 for_each_online_cpu(cpu) {
1461 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1462 u64 this_rq_wait_ns;
1463
1464 for (rw = READ; rw <= WRITE; rw++) {
5e124f74
TH
1465 u32 this_met = local_read(&stat->missed[rw].nr_met);
1466 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
7caa4715
TH
1467
1468 nr_met[rw] += this_met - stat->missed[rw].last_met;
1469 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1470 stat->missed[rw].last_met = this_met;
1471 stat->missed[rw].last_missed = this_missed;
1472 }
1473
5e124f74 1474 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
7caa4715
TH
1475 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1476 stat->last_rq_wait_ns = this_rq_wait_ns;
1477 }
1478
1479 for (rw = READ; rw <= WRITE; rw++) {
1480 if (nr_met[rw] + nr_missed[rw])
1481 missed_ppm_ar[rw] =
1482 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1483 nr_met[rw] + nr_missed[rw]);
1484 else
1485 missed_ppm_ar[rw] = 0;
1486 }
1487
1488 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1489 ioc->period_us * NSEC_PER_USEC);
1490}
1491
1492/* was iocg idle this period? */
1493static bool iocg_is_idle(struct ioc_gq *iocg)
1494{
1495 struct ioc *ioc = iocg->ioc;
1496
1497 /* did something get issued this period? */
1498 if (atomic64_read(&iocg->active_period) ==
1499 atomic64_read(&ioc->cur_period))
1500 return false;
1501
1502 /* is something in flight? */
dcd6589b 1503 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
7caa4715
TH
1504 return false;
1505
1506 return true;
1507}
1508
97eb1975
TH
1509/*
1510 * Call this function on the target leaf @iocg's to build pre-order traversal
1511 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1512 * ->walk_list and the caller is responsible for dissolving the list after use.
1513 */
1514static void iocg_build_inner_walk(struct ioc_gq *iocg,
1515 struct list_head *inner_walk)
1516{
1517 int lvl;
1518
1519 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1520
1521 /* find the first ancestor which hasn't been visited yet */
1522 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1523 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1524 break;
1525 }
1526
1527 /* walk down and visit the inner nodes to get pre-order traversal */
1528 while (++lvl <= iocg->level - 1) {
1529 struct ioc_gq *inner = iocg->ancestors[lvl];
1530
1531 /* record traversal order */
1532 list_add_tail(&inner->walk_list, inner_walk);
1533 }
1534}
1535
1536/* collect per-cpu counters and propagate the deltas to the parent */
1537static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1538{
1539 struct iocg_stat new_stat;
1540 u64 abs_vusage = 0;
1541 u64 vusage_delta;
1542 int cpu;
1543
1544 lockdep_assert_held(&iocg->ioc->lock);
1545
1546 /* collect per-cpu counters */
1547 for_each_possible_cpu(cpu) {
1548 abs_vusage += local64_read(
1549 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1550 }
1551 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1552 iocg->last_stat_abs_vusage = abs_vusage;
1553
1aa50d02
TH
1554 iocg->usage_delta_us = div64_u64(vusage_delta, now->vrate);
1555 iocg->local_stat.usage_us += iocg->usage_delta_us;
97eb1975
TH
1556
1557 new_stat.usage_us =
1558 iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1559
1560 /* propagate the deltas to the parent */
1561 if (iocg->level > 0) {
1562 struct iocg_stat *parent_stat =
1563 &iocg->ancestors[iocg->level - 1]->desc_stat;
1564
1565 parent_stat->usage_us +=
1566 new_stat.usage_us - iocg->last_stat.usage_us;
1567 }
1568
1569 iocg->last_stat = new_stat;
1570}
1571
1572/* get stat counters ready for reading on all active iocgs */
1573static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1574{
1575 LIST_HEAD(inner_walk);
1576 struct ioc_gq *iocg, *tiocg;
1577
1578 /* flush leaves and build inner node walk list */
1579 list_for_each_entry(iocg, target_iocgs, active_list) {
1580 iocg_flush_stat_one(iocg, now);
1581 iocg_build_inner_walk(iocg, &inner_walk);
1582 }
1583
1584 /* keep flushing upwards by walking the inner list backwards */
1585 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1586 iocg_flush_stat_one(iocg, now);
1587 list_del_init(&iocg->walk_list);
1588 }
1589}
1590
93f7d2db
TH
1591/*
1592 * Determine what @iocg's hweight_inuse should be after donating unused
1593 * capacity. @hwm is the upper bound and used to signal no donation. This
1594 * function also throws away @iocg's excess budget.
1595 */
1596static u32 hweight_after_donation(struct ioc_gq *iocg, u32 hwm, u32 usage,
1597 struct ioc_now *now)
7caa4715 1598{
93f7d2db
TH
1599 struct ioc *ioc = iocg->ioc;
1600 u64 vtime = atomic64_read(&iocg->vtime);
f1de2439 1601 s64 excess, delta, target, new_hwi;
93f7d2db 1602
c421a3eb
TH
1603 /* debt handling owns inuse for debtors */
1604 if (iocg->abs_vdebt)
1605 return 1;
1606
93f7d2db
TH
1607 /* see whether minimum margin requirement is met */
1608 if (waitqueue_active(&iocg->waitq) ||
1609 time_after64(vtime, now->vnow - ioc->margins.min))
1610 return hwm;
1611
1612 /* throw away excess above max */
1613 excess = now->vnow - vtime - ioc->margins.max;
1614 if (excess > 0) {
1615 atomic64_add(excess, &iocg->vtime);
1616 atomic64_add(excess, &iocg->done_vtime);
1617 vtime += excess;
1618 }
1619
f1de2439
TH
1620 /*
1621 * Let's say the distance between iocg's and device's vtimes as a
1622 * fraction of period duration is delta. Assuming that the iocg will
1623 * consume the usage determined above, we want to determine new_hwi so
1624 * that delta equals MARGIN_TARGET at the end of the next period.
1625 *
1626 * We need to execute usage worth of IOs while spending the sum of the
1627 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1628 * (delta):
1629 *
1630 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1631 *
1632 * Therefore, the new_hwi is:
1633 *
1634 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1635 */
1636 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1637 now->vnow - ioc->period_at_vtime);
1638 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1639 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
7caa4715 1640
f1de2439 1641 return clamp_t(s64, new_hwi, 1, hwm);
7caa4715
TH
1642}
1643
e08d02aa
TH
1644/*
1645 * For work-conservation, an iocg which isn't using all of its share should
1646 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1647 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1648 *
1649 * #1 is mathematically simpler but has the drawback of requiring synchronous
1650 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1651 * change due to donation snapbacks as it has the possibility of grossly
1652 * overshooting what's allowed by the model and vrate.
1653 *
1654 * #2 is inherently safe with local operations. The donating iocg can easily
1655 * snap back to higher weights when needed without worrying about impacts on
1656 * other nodes as the impacts will be inherently correct. This also makes idle
1657 * iocg activations safe. The only effect activations have is decreasing
1658 * hweight_inuse of others, the right solution to which is for those iocgs to
1659 * snap back to higher weights.
1660 *
1661 * So, we go with #2. The challenge is calculating how each donating iocg's
1662 * inuse should be adjusted to achieve the target donation amounts. This is done
1663 * using Andy's method described in the following pdf.
1664 *
1665 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1666 *
1667 * Given the weights and target after-donation hweight_inuse values, Andy's
1668 * method determines how the proportional distribution should look like at each
1669 * sibling level to maintain the relative relationship between all non-donating
1670 * pairs. To roughly summarize, it divides the tree into donating and
1671 * non-donating parts, calculates global donation rate which is used to
1672 * determine the target hweight_inuse for each node, and then derives per-level
1673 * proportions.
1674 *
1675 * The following pdf shows that global distribution calculated this way can be
1676 * achieved by scaling inuse weights of donating leaves and propagating the
1677 * adjustments upwards proportionally.
1678 *
1679 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1680 *
1681 * Combining the above two, we can determine how each leaf iocg's inuse should
1682 * be adjusted to achieve the target donation.
1683 *
1684 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1685 *
1686 * The inline comments use symbols from the last pdf.
1687 *
1688 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1689 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1690 * t is the sum of the absolute budgets of donating nodes in the subtree.
1691 * w is the weight of the node. w = w_f + w_t
1692 * w_f is the non-donating portion of w. w_f = w * f / b
1693 * w_b is the donating portion of w. w_t = w * t / b
1694 * s is the sum of all sibling weights. s = Sum(w) for siblings
1695 * s_f and s_t are the non-donating and donating portions of s.
1696 *
1697 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1698 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1699 * after adjustments. Subscript r denotes the root node's values.
1700 */
93f7d2db
TH
1701static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1702{
e08d02aa
TH
1703 LIST_HEAD(over_hwa);
1704 LIST_HEAD(inner_walk);
1705 struct ioc_gq *iocg, *tiocg, *root_iocg;
1706 u32 after_sum, over_sum, over_target, gamma;
93f7d2db 1707
e08d02aa
TH
1708 /*
1709 * It's pretty unlikely but possible for the total sum of
1710 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1711 * confuse the following calculations. If such condition is detected,
1712 * scale down everyone over its full share equally to keep the sum below
1713 * WEIGHT_ONE.
1714 */
1715 after_sum = 0;
1716 over_sum = 0;
93f7d2db 1717 list_for_each_entry(iocg, surpluses, surplus_list) {
e08d02aa 1718 u32 hwa;
93f7d2db 1719
e08d02aa
TH
1720 current_hweight(iocg, &hwa, NULL);
1721 after_sum += iocg->hweight_after_donation;
93f7d2db 1722
e08d02aa
TH
1723 if (iocg->hweight_after_donation > hwa) {
1724 over_sum += iocg->hweight_after_donation;
1725 list_add(&iocg->walk_list, &over_hwa);
1726 }
93f7d2db 1727 }
e08d02aa
TH
1728
1729 if (after_sum >= WEIGHT_ONE) {
1730 /*
1731 * The delta should be deducted from the over_sum, calculate
1732 * target over_sum value.
1733 */
1734 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1735 WARN_ON_ONCE(over_sum <= over_delta);
1736 over_target = over_sum - over_delta;
1737 } else {
1738 over_target = 0;
1739 }
1740
1741 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1742 if (over_target)
1743 iocg->hweight_after_donation =
1744 div_u64((u64)iocg->hweight_after_donation *
1745 over_target, over_sum);
1746 list_del_init(&iocg->walk_list);
1747 }
1748
1749 /*
1750 * Build pre-order inner node walk list and prepare for donation
1751 * adjustment calculations.
1752 */
1753 list_for_each_entry(iocg, surpluses, surplus_list) {
1754 iocg_build_inner_walk(iocg, &inner_walk);
1755 }
1756
1757 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1758 WARN_ON_ONCE(root_iocg->level > 0);
1759
1760 list_for_each_entry(iocg, &inner_walk, walk_list) {
1761 iocg->child_adjusted_sum = 0;
1762 iocg->hweight_donating = 0;
1763 iocg->hweight_after_donation = 0;
1764 }
1765
1766 /*
1767 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1768 * up the hierarchy.
1769 */
1770 list_for_each_entry(iocg, surpluses, surplus_list) {
1771 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1772
1773 parent->hweight_donating += iocg->hweight_donating;
1774 parent->hweight_after_donation += iocg->hweight_after_donation;
1775 }
1776
1777 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1778 if (iocg->level > 0) {
1779 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1780
1781 parent->hweight_donating += iocg->hweight_donating;
1782 parent->hweight_after_donation += iocg->hweight_after_donation;
1783 }
1784 }
1785
1786 /*
1787 * Calculate inner hwa's (b) and make sure the donation values are
1788 * within the accepted ranges as we're doing low res calculations with
1789 * roundups.
1790 */
1791 list_for_each_entry(iocg, &inner_walk, walk_list) {
1792 if (iocg->level) {
1793 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1794
1795 iocg->hweight_active = DIV64_U64_ROUND_UP(
1796 (u64)parent->hweight_active * iocg->active,
1797 parent->child_active_sum);
1798
1799 }
1800
1801 iocg->hweight_donating = min(iocg->hweight_donating,
1802 iocg->hweight_active);
1803 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1804 iocg->hweight_donating - 1);
1805 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1806 iocg->hweight_donating <= 1 ||
1807 iocg->hweight_after_donation == 0)) {
1808 pr_warn("iocg: invalid donation weights in ");
1809 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1810 pr_cont(": active=%u donating=%u after=%u\n",
1811 iocg->hweight_active, iocg->hweight_donating,
1812 iocg->hweight_after_donation);
1813 }
1814 }
1815
1816 /*
1817 * Calculate the global donation rate (gamma) - the rate to adjust
1818 * non-donating budgets by. No need to use 64bit multiplication here as
1819 * the first operand is guaranteed to be smaller than WEIGHT_ONE
1820 * (1<<16).
1821 *
1822 * gamma = (1 - t_r') / (1 - t_r)
1823 */
1824 gamma = DIV_ROUND_UP(
1825 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1826 WEIGHT_ONE - root_iocg->hweight_donating);
1827
1828 /*
1829 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1830 * nodes.
1831 */
1832 list_for_each_entry(iocg, &inner_walk, walk_list) {
1833 struct ioc_gq *parent;
1834 u32 inuse, wpt, wptp;
1835 u64 st, sf;
1836
1837 if (iocg->level == 0) {
1838 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1839 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1840 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1841 WEIGHT_ONE - iocg->hweight_after_donation);
1842 continue;
1843 }
1844
1845 parent = iocg->ancestors[iocg->level - 1];
1846
1847 /* b' = gamma * b_f + b_t' */
1848 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1849 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1850 WEIGHT_ONE) + iocg->hweight_after_donation;
1851
1852 /* w' = s' * b' / b'_p */
1853 inuse = DIV64_U64_ROUND_UP(
1854 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1855 parent->hweight_inuse);
1856
1857 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1858 st = DIV64_U64_ROUND_UP(
1859 iocg->child_active_sum * iocg->hweight_donating,
1860 iocg->hweight_active);
1861 sf = iocg->child_active_sum - st;
1862 wpt = DIV64_U64_ROUND_UP(
1863 (u64)iocg->active * iocg->hweight_donating,
1864 iocg->hweight_active);
1865 wptp = DIV64_U64_ROUND_UP(
1866 (u64)inuse * iocg->hweight_after_donation,
1867 iocg->hweight_inuse);
1868
1869 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1870 }
1871
1872 /*
1873 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1874 * we can finally determine leaf adjustments.
1875 */
1876 list_for_each_entry(iocg, surpluses, surplus_list) {
1877 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1878 u32 inuse;
1879
c421a3eb
TH
1880 /*
1881 * In-debt iocgs participated in the donation calculation with
1882 * the minimum target hweight_inuse. Configuring inuse
1883 * accordingly would work fine but debt handling expects
1884 * @iocg->inuse stay at the minimum and we don't wanna
1885 * interfere.
1886 */
1887 if (iocg->abs_vdebt) {
1888 WARN_ON_ONCE(iocg->inuse > 1);
1889 continue;
1890 }
1891
e08d02aa
TH
1892 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
1893 inuse = DIV64_U64_ROUND_UP(
1894 parent->child_adjusted_sum * iocg->hweight_after_donation,
1895 parent->hweight_inuse);
b0853ab4 1896 __propagate_weights(iocg, iocg->active, inuse, true, now);
e08d02aa
TH
1897 }
1898
1899 /* walk list should be dissolved after use */
1900 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
1901 list_del_init(&iocg->walk_list);
93f7d2db
TH
1902}
1903
7caa4715
TH
1904static void ioc_timer_fn(struct timer_list *timer)
1905{
1906 struct ioc *ioc = container_of(timer, struct ioc, timer);
1907 struct ioc_gq *iocg, *tiocg;
1908 struct ioc_now now;
8692d2db 1909 LIST_HEAD(surpluses);
dda1315f
TH
1910 int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
1911 u64 usage_us_sum = 0;
7caa4715
TH
1912 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
1913 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
1914 u32 missed_ppm[2], rq_wait_pct;
1915 u64 period_vtime;
f1de2439 1916 int prev_busy_level;
7caa4715
TH
1917
1918 /* how were the latencies during the period? */
1919 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
1920
1921 /* take care of active iocgs */
1922 spin_lock_irq(&ioc->lock);
1923
1924 ioc_now(ioc, &now);
1925
1926 period_vtime = now.vnow - ioc->period_at_vtime;
1927 if (WARN_ON_ONCE(!period_vtime)) {
1928 spin_unlock_irq(&ioc->lock);
1929 return;
1930 }
1931
97eb1975
TH
1932 iocg_flush_stat(&ioc->active_iocgs, &now);
1933
7caa4715
TH
1934 /*
1935 * Waiters determine the sleep durations based on the vrate they
1936 * saw at the time of sleep. If vrate has increased, some waiters
1937 * could be sleeping for too long. Wake up tardy waiters which
1938 * should have woken up in the last period and expire idle iocgs.
1939 */
1940 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
d9012a59 1941 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
5160a5a5 1942 !iocg->delay && !iocg_is_idle(iocg))
7caa4715
TH
1943 continue;
1944
1945 spin_lock(&iocg->waitq.lock);
1946
5160a5a5
TH
1947 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
1948 iocg->delay) {
7caa4715 1949 /* might be oversleeping vtime / hweight changes, kick */
da437b95 1950 iocg_kick_waitq(iocg, true, &now);
dda1315f
TH
1951 if (iocg->abs_vdebt)
1952 nr_debtors++;
7caa4715
TH
1953 } else if (iocg_is_idle(iocg)) {
1954 /* no waiter and idle, deactivate */
b0853ab4 1955 __propagate_weights(iocg, 0, 0, false, &now);
7caa4715
TH
1956 list_del_init(&iocg->active_list);
1957 }
1958
1959 spin_unlock(&iocg->waitq.lock);
1960 }
00410f1b 1961 commit_weights(ioc);
7caa4715 1962
f1de2439 1963 /* calc usage and see whether some weights need to be moved around */
7caa4715 1964 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
f1de2439
TH
1965 u64 vdone, vtime, usage_us, usage_dur;
1966 u32 usage, hw_active, hw_inuse;
7caa4715
TH
1967
1968 /*
1969 * Collect unused and wind vtime closer to vnow to prevent
1970 * iocgs from accumulating a large amount of budget.
1971 */
1972 vdone = atomic64_read(&iocg->done_vtime);
1973 vtime = atomic64_read(&iocg->vtime);
1974 current_hweight(iocg, &hw_active, &hw_inuse);
1975
1976 /*
1977 * Latency QoS detection doesn't account for IOs which are
1978 * in-flight for longer than a period. Detect them by
1979 * comparing vdone against period start. If lagging behind
1980 * IOs from past periods, don't increase vrate.
1981 */
7cd806a9
TH
1982 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
1983 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
7caa4715
TH
1984 time_after64(vtime, vdone) &&
1985 time_after64(vtime, now.vnow -
1986 MAX_LAGGING_PERIODS * period_vtime) &&
1987 time_before64(vdone, now.vnow - period_vtime))
1988 nr_lagging++;
1989
7caa4715 1990 /*
f1de2439
TH
1991 * Determine absolute usage factoring in in-flight IOs to avoid
1992 * high-latency completions appearing as idle.
7caa4715 1993 */
1aa50d02 1994 usage_us = iocg->usage_delta_us;
dda1315f 1995 usage_us_sum += usage_us;
f1de2439 1996
1aa50d02
TH
1997 if (vdone != vtime) {
1998 u64 inflight_us = DIV64_U64_ROUND_UP(
1999 cost_to_abs_cost(vtime - vdone, hw_inuse),
2000 now.vrate);
2001 usage_us = max(usage_us, inflight_us);
2002 }
2003
f1de2439
TH
2004 /* convert to hweight based usage ratio */
2005 if (time_after64(iocg->activated_at, ioc->period_at))
2006 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2007 else
2008 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
93f7d2db 2009
f1de2439
TH
2010 usage = clamp_t(u32,
2011 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2012 usage_dur),
1aa50d02 2013 1, WEIGHT_ONE);
7caa4715
TH
2014
2015 /* see whether there's surplus vtime */
8692d2db 2016 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
93f7d2db
TH
2017 if (hw_inuse < hw_active ||
2018 (!waitqueue_active(&iocg->waitq) &&
f1de2439 2019 time_before64(vtime, now.vnow - ioc->margins.low))) {
e08d02aa 2020 u32 hwa, hwm, new_hwi;
93f7d2db
TH
2021
2022 /*
2023 * Already donating or accumulated enough to start.
2024 * Determine the donation amount.
2025 */
e08d02aa 2026 current_hweight(iocg, &hwa, NULL);
93f7d2db
TH
2027 hwm = current_hweight_max(iocg);
2028 new_hwi = hweight_after_donation(iocg, hwm, usage,
2029 &now);
2030 if (new_hwi < hwm) {
e08d02aa 2031 iocg->hweight_donating = hwa;
93f7d2db 2032 iocg->hweight_after_donation = new_hwi;
8692d2db 2033 list_add(&iocg->surplus_list, &surpluses);
7caa4715 2034 } else {
93f7d2db 2035 __propagate_weights(iocg, iocg->active,
b0853ab4 2036 iocg->active, true, &now);
93f7d2db 2037 nr_shortages++;
7caa4715
TH
2038 }
2039 } else {
93f7d2db 2040 /* genuinely short on vtime */
7caa4715
TH
2041 nr_shortages++;
2042 }
2043 }
2044
93f7d2db
TH
2045 if (!list_empty(&surpluses) && nr_shortages)
2046 transfer_surpluses(&surpluses, &now);
7caa4715 2047
00410f1b 2048 commit_weights(ioc);
7caa4715 2049
8692d2db
TH
2050 /* surplus list should be dissolved after use */
2051 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2052 list_del_init(&iocg->surplus_list);
2053
dda1315f
TH
2054 /*
2055 * A low weight iocg can amass a large amount of debt, for example, when
2056 * anonymous memory gets reclaimed aggressively. If the system has a lot
2057 * of memory paired with a slow IO device, the debt can span multiple
2058 * seconds or more. If there are no other subsequent IO issuers, the
2059 * in-debt iocg may end up blocked paying its debt while the IO device
2060 * is idle.
2061 *
2062 * The following protects against such pathological cases. If the device
2063 * has been sufficiently idle for a substantial amount of time, the
2064 * debts are halved. The criteria are on the conservative side as we
2065 * want to resolve the rare extreme cases without impacting regular
2066 * operation by forgiving debts too readily.
2067 */
2068 if (nr_shortages ||
2069 div64_u64(100 * usage_us_sum, now.now - ioc->period_at) >=
2070 DEBT_BUSY_USAGE_PCT)
2071 ioc->debt_busy_at = now.now;
2072
2073 if (nr_debtors &&
2074 now.now - ioc->debt_busy_at >= DEBT_REDUCTION_IDLE_DUR) {
2075 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2076 if (iocg->abs_vdebt) {
2077 spin_lock(&iocg->waitq.lock);
2078 iocg->abs_vdebt /= 2;
2079 iocg_kick_waitq(iocg, true, &now);
2080 spin_unlock(&iocg->waitq.lock);
2081 }
2082 }
2083 ioc->debt_busy_at = now.now;
2084 }
2085
7caa4715
TH
2086 /*
2087 * If q is getting clogged or we're missing too much, we're issuing
2088 * too much IO and should lower vtime rate. If we're not missing
2089 * and experiencing shortages but not surpluses, we're too stingy
2090 * and should increase vtime rate.
2091 */
25d41e4a 2092 prev_busy_level = ioc->busy_level;
7caa4715
TH
2093 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2094 missed_ppm[READ] > ppm_rthr ||
2095 missed_ppm[WRITE] > ppm_wthr) {
81ca627a 2096 /* clearly missing QoS targets, slow down vrate */
7caa4715
TH
2097 ioc->busy_level = max(ioc->busy_level, 0);
2098 ioc->busy_level++;
7cd806a9 2099 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
7caa4715
TH
2100 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2101 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
81ca627a
TH
2102 /* QoS targets are being met with >25% margin */
2103 if (nr_shortages) {
2104 /*
2105 * We're throttling while the device has spare
2106 * capacity. If vrate was being slowed down, stop.
2107 */
7cd806a9 2108 ioc->busy_level = min(ioc->busy_level, 0);
81ca627a
TH
2109
2110 /*
2111 * If there are IOs spanning multiple periods, wait
065655c8 2112 * them out before pushing the device harder.
81ca627a 2113 */
065655c8 2114 if (!nr_lagging)
7cd806a9 2115 ioc->busy_level--;
81ca627a
TH
2116 } else {
2117 /*
2118 * Nobody is being throttled and the users aren't
2119 * issuing enough IOs to saturate the device. We
2120 * simply don't know how close the device is to
2121 * saturation. Coast.
2122 */
2123 ioc->busy_level = 0;
7cd806a9 2124 }
7caa4715 2125 } else {
81ca627a 2126 /* inside the hysterisis margin, we're good */
7caa4715
TH
2127 ioc->busy_level = 0;
2128 }
2129
2130 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2131
7cd806a9 2132 if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
7caa4715
TH
2133 u64 vrate = atomic64_read(&ioc->vtime_rate);
2134 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
2135
2136 /* rq_wait signal is always reliable, ignore user vrate_min */
2137 if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
2138 vrate_min = VRATE_MIN;
2139
2140 /*
2141 * If vrate is out of bounds, apply clamp gradually as the
2142 * bounds can change abruptly. Otherwise, apply busy_level
2143 * based adjustment.
2144 */
2145 if (vrate < vrate_min) {
2146 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
2147 100);
2148 vrate = min(vrate, vrate_min);
2149 } else if (vrate > vrate_max) {
2150 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
2151 100);
2152 vrate = max(vrate, vrate_max);
2153 } else {
2154 int idx = min_t(int, abs(ioc->busy_level),
2155 ARRAY_SIZE(vrate_adj_pct) - 1);
2156 u32 adj_pct = vrate_adj_pct[idx];
2157
2158 if (ioc->busy_level > 0)
2159 adj_pct = 100 - adj_pct;
2160 else
2161 adj_pct = 100 + adj_pct;
2162
2163 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
2164 vrate_min, vrate_max);
2165 }
2166
d6c8e949 2167 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
065655c8 2168 nr_lagging, nr_shortages);
7caa4715
TH
2169
2170 atomic64_set(&ioc->vtime_rate, vrate);
7ca5b2e6 2171 ioc_refresh_margins(ioc);
25d41e4a
TH
2172 } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
2173 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
d6c8e949 2174 missed_ppm, rq_wait_pct, nr_lagging,
065655c8 2175 nr_shortages);
7caa4715
TH
2176 }
2177
2178 ioc_refresh_params(ioc, false);
2179
2180 /*
2181 * This period is done. Move onto the next one. If nothing's
2182 * going on with the device, stop the timer.
2183 */
2184 atomic64_inc(&ioc->cur_period);
2185
2186 if (ioc->running != IOC_STOP) {
2187 if (!list_empty(&ioc->active_iocgs)) {
2188 ioc_start_period(ioc, &now);
2189 } else {
2190 ioc->busy_level = 0;
2191 ioc->running = IOC_IDLE;
2192 }
2193 }
2194
2195 spin_unlock_irq(&ioc->lock);
2196}
2197
b0853ab4
TH
2198static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2199 u64 abs_cost, struct ioc_now *now)
2200{
2201 struct ioc *ioc = iocg->ioc;
2202 struct ioc_margins *margins = &ioc->margins;
2203 u32 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2204 u32 hwi;
2205 s64 margin;
2206 u64 cost, new_inuse;
2207
2208 current_hweight(iocg, NULL, &hwi);
2209 cost = abs_cost_to_cost(abs_cost, hwi);
2210 margin = now->vnow - vtime - cost;
2211
c421a3eb
TH
2212 /* debt handling owns inuse for debtors */
2213 if (iocg->abs_vdebt)
2214 return cost;
2215
b0853ab4
TH
2216 /*
2217 * We only increase inuse during period and do so iff the margin has
2218 * deteriorated since the previous adjustment.
2219 */
2220 if (margin >= iocg->saved_margin || margin >= margins->low ||
2221 iocg->inuse == iocg->active)
2222 return cost;
2223
2224 spin_lock_irq(&ioc->lock);
2225
2226 /* we own inuse only when @iocg is in the normal active state */
c421a3eb 2227 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
b0853ab4
TH
2228 spin_unlock_irq(&ioc->lock);
2229 return cost;
2230 }
2231
2232 /* bump up inuse till @abs_cost fits in the existing budget */
2233 new_inuse = iocg->inuse;
2234 do {
2235 new_inuse = new_inuse + adj_step;
2236 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2237 current_hweight(iocg, NULL, &hwi);
2238 cost = abs_cost_to_cost(abs_cost, hwi);
2239 } while (time_after64(vtime + cost, now->vnow) &&
2240 iocg->inuse != iocg->active);
2241
2242 spin_unlock_irq(&ioc->lock);
2243 return cost;
2244}
2245
7caa4715
TH
2246static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2247 bool is_merge, u64 *costp)
2248{
2249 struct ioc *ioc = iocg->ioc;
2250 u64 coef_seqio, coef_randio, coef_page;
2251 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2252 u64 seek_pages = 0;
2253 u64 cost = 0;
2254
2255 switch (bio_op(bio)) {
2256 case REQ_OP_READ:
2257 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2258 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2259 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2260 break;
2261 case REQ_OP_WRITE:
2262 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2263 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2264 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2265 break;
2266 default:
2267 goto out;
2268 }
2269
2270 if (iocg->cursor) {
2271 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2272 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2273 }
2274
2275 if (!is_merge) {
2276 if (seek_pages > LCOEF_RANDIO_PAGES) {
2277 cost += coef_randio;
2278 } else {
2279 cost += coef_seqio;
2280 }
2281 }
2282 cost += pages * coef_page;
2283out:
2284 *costp = cost;
2285}
2286
2287static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2288{
2289 u64 cost;
2290
2291 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2292 return cost;
2293}
2294
cd006509
TH
2295static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2296 u64 *costp)
2297{
2298 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2299
2300 switch (req_op(rq)) {
2301 case REQ_OP_READ:
2302 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2303 break;
2304 case REQ_OP_WRITE:
2305 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2306 break;
2307 default:
2308 *costp = 0;
2309 }
2310}
2311
2312static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2313{
2314 u64 cost;
2315
2316 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2317 return cost;
2318}
2319
7caa4715
TH
2320static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2321{
2322 struct blkcg_gq *blkg = bio->bi_blkg;
2323 struct ioc *ioc = rqos_to_ioc(rqos);
2324 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2325 struct ioc_now now;
2326 struct iocg_wait wait;
7caa4715 2327 u64 abs_cost, cost, vtime;
da437b95
TH
2328 bool use_debt, ioc_locked;
2329 unsigned long flags;
7caa4715
TH
2330
2331 /* bypass IOs if disabled or for root cgroup */
2332 if (!ioc->enabled || !iocg->level)
2333 return;
2334
7caa4715
TH
2335 /* calculate the absolute vtime cost */
2336 abs_cost = calc_vtime_cost(bio, iocg, false);
2337 if (!abs_cost)
2338 return;
2339
f1de2439
TH
2340 if (!iocg_activate(iocg, &now))
2341 return;
2342
7caa4715 2343 iocg->cursor = bio_end_sector(bio);
7caa4715 2344 vtime = atomic64_read(&iocg->vtime);
b0853ab4 2345 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
7caa4715
TH
2346
2347 /*
2348 * If no one's waiting and within budget, issue right away. The
2349 * tests are racy but the races aren't systemic - we only miss once
2350 * in a while which is fine.
2351 */
0b80f986 2352 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
7caa4715 2353 time_before_eq64(vtime + cost, now.vnow)) {
97eb1975 2354 iocg_commit_bio(iocg, bio, abs_cost, cost);
7caa4715
TH
2355 return;
2356 }
2357
36a52481 2358 /*
da437b95
TH
2359 * We're over budget. This can be handled in two ways. IOs which may
2360 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2361 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2362 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2363 * whether debt handling is needed and acquire locks accordingly.
0b80f986 2364 */
da437b95
TH
2365 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2366 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
b0853ab4 2367retry_lock:
da437b95
TH
2368 iocg_lock(iocg, ioc_locked, &flags);
2369
2370 /*
2371 * @iocg must stay activated for debt and waitq handling. Deactivation
2372 * is synchronized against both ioc->lock and waitq.lock and we won't
2373 * get deactivated as long as we're waiting or has debt, so we're good
2374 * if we're activated here. In the unlikely cases that we aren't, just
2375 * issue the IO.
2376 */
0b80f986 2377 if (unlikely(list_empty(&iocg->active_list))) {
da437b95 2378 iocg_unlock(iocg, ioc_locked, &flags);
97eb1975 2379 iocg_commit_bio(iocg, bio, abs_cost, cost);
0b80f986
TH
2380 return;
2381 }
2382
2383 /*
2384 * We're over budget. If @bio has to be issued regardless, remember
2385 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2386 * off the debt before waking more IOs.
2387 *
36a52481 2388 * This way, the debt is continuously paid off each period with the
0b80f986
TH
2389 * actual budget available to the cgroup. If we just wound vtime, we
2390 * would incorrectly use the current hw_inuse for the entire amount
2391 * which, for example, can lead to the cgroup staying blocked for a
2392 * long time even with substantially raised hw_inuse.
2393 *
2394 * An iocg with vdebt should stay online so that the timer can keep
2395 * deducting its vdebt and [de]activate use_delay mechanism
2396 * accordingly. We don't want to race against the timer trying to
2397 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2398 * penalizing the cgroup and its descendants.
36a52481 2399 */
da437b95 2400 if (use_debt) {
c421a3eb 2401 iocg_incur_debt(iocg, abs_cost, &now);
54c52e10 2402 if (iocg_kick_delay(iocg, &now))
d7bd15a1
TH
2403 blkcg_schedule_throttle(rqos->q,
2404 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
da437b95 2405 iocg_unlock(iocg, ioc_locked, &flags);
7caa4715
TH
2406 return;
2407 }
2408
b0853ab4 2409 /* guarantee that iocgs w/ waiters have maximum inuse */
c421a3eb 2410 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
b0853ab4
TH
2411 if (!ioc_locked) {
2412 iocg_unlock(iocg, false, &flags);
2413 ioc_locked = true;
2414 goto retry_lock;
2415 }
2416 propagate_weights(iocg, iocg->active, iocg->active, true,
2417 &now);
2418 }
2419
7caa4715
TH
2420 /*
2421 * Append self to the waitq and schedule the wakeup timer if we're
2422 * the first waiter. The timer duration is calculated based on the
2423 * current vrate. vtime and hweight changes can make it too short
2424 * or too long. Each wait entry records the absolute cost it's
2425 * waiting for to allow re-evaluation using a custom wait entry.
2426 *
2427 * If too short, the timer simply reschedules itself. If too long,
2428 * the period timer will notice and trigger wakeups.
2429 *
2430 * All waiters are on iocg->waitq and the wait states are
2431 * synchronized using waitq.lock.
2432 */
7caa4715
TH
2433 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2434 wait.wait.private = current;
2435 wait.bio = bio;
2436 wait.abs_cost = abs_cost;
2437 wait.committed = false; /* will be set true by waker */
2438
2439 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
da437b95 2440 iocg_kick_waitq(iocg, ioc_locked, &now);
7caa4715 2441
da437b95 2442 iocg_unlock(iocg, ioc_locked, &flags);
7caa4715
TH
2443
2444 while (true) {
2445 set_current_state(TASK_UNINTERRUPTIBLE);
2446 if (wait.committed)
2447 break;
2448 io_schedule();
2449 }
2450
2451 /* waker already committed us, proceed */
2452 finish_wait(&iocg->waitq, &wait.wait);
2453}
2454
2455static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2456 struct bio *bio)
2457{
2458 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
e1518f63 2459 struct ioc *ioc = iocg->ioc;
7caa4715 2460 sector_t bio_end = bio_end_sector(bio);
e1518f63 2461 struct ioc_now now;
b0853ab4 2462 u64 vtime, abs_cost, cost;
0b80f986 2463 unsigned long flags;
7caa4715 2464
e1518f63
TH
2465 /* bypass if disabled or for root cgroup */
2466 if (!ioc->enabled || !iocg->level)
7caa4715
TH
2467 return;
2468
2469 abs_cost = calc_vtime_cost(bio, iocg, true);
2470 if (!abs_cost)
2471 return;
2472
e1518f63 2473 ioc_now(ioc, &now);
b0853ab4
TH
2474
2475 vtime = atomic64_read(&iocg->vtime);
2476 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
e1518f63 2477
7caa4715
TH
2478 /* update cursor if backmerging into the request at the cursor */
2479 if (blk_rq_pos(rq) < bio_end &&
2480 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2481 iocg->cursor = bio_end;
2482
e1518f63 2483 /*
0b80f986
TH
2484 * Charge if there's enough vtime budget and the existing request has
2485 * cost assigned.
e1518f63
TH
2486 */
2487 if (rq->bio && rq->bio->bi_iocost_cost &&
0b80f986 2488 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
97eb1975 2489 iocg_commit_bio(iocg, bio, abs_cost, cost);
0b80f986
TH
2490 return;
2491 }
2492
2493 /*
2494 * Otherwise, account it as debt if @iocg is online, which it should
2495 * be for the vast majority of cases. See debt handling in
2496 * ioc_rqos_throttle() for details.
2497 */
c421a3eb
TH
2498 spin_lock_irqsave(&ioc->lock, flags);
2499 spin_lock(&iocg->waitq.lock);
2500
0b80f986 2501 if (likely(!list_empty(&iocg->active_list))) {
c421a3eb
TH
2502 iocg_incur_debt(iocg, abs_cost, &now);
2503 if (iocg_kick_delay(iocg, &now))
2504 blkcg_schedule_throttle(rqos->q,
2505 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
0b80f986 2506 } else {
97eb1975 2507 iocg_commit_bio(iocg, bio, abs_cost, cost);
0b80f986 2508 }
c421a3eb
TH
2509
2510 spin_unlock(&iocg->waitq.lock);
2511 spin_unlock_irqrestore(&ioc->lock, flags);
7caa4715
TH
2512}
2513
2514static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2515{
2516 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2517
2518 if (iocg && bio->bi_iocost_cost)
2519 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2520}
2521
2522static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2523{
2524 struct ioc *ioc = rqos_to_ioc(rqos);
5e124f74 2525 struct ioc_pcpu_stat *ccs;
cd006509 2526 u64 on_q_ns, rq_wait_ns, size_nsec;
7caa4715
TH
2527 int pidx, rw;
2528
2529 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2530 return;
2531
2532 switch (req_op(rq) & REQ_OP_MASK) {
2533 case REQ_OP_READ:
2534 pidx = QOS_RLAT;
2535 rw = READ;
2536 break;
2537 case REQ_OP_WRITE:
2538 pidx = QOS_WLAT;
2539 rw = WRITE;
2540 break;
2541 default:
2542 return;
2543 }
2544
2545 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2546 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
cd006509 2547 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
7caa4715 2548
5e124f74
TH
2549 ccs = get_cpu_ptr(ioc->pcpu_stat);
2550
cd006509
TH
2551 if (on_q_ns <= size_nsec ||
2552 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
5e124f74 2553 local_inc(&ccs->missed[rw].nr_met);
7caa4715 2554 else
5e124f74
TH
2555 local_inc(&ccs->missed[rw].nr_missed);
2556
2557 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
7caa4715 2558
5e124f74 2559 put_cpu_ptr(ccs);
7caa4715
TH
2560}
2561
2562static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2563{
2564 struct ioc *ioc = rqos_to_ioc(rqos);
2565
2566 spin_lock_irq(&ioc->lock);
2567 ioc_refresh_params(ioc, false);
2568 spin_unlock_irq(&ioc->lock);
2569}
2570
2571static void ioc_rqos_exit(struct rq_qos *rqos)
2572{
2573 struct ioc *ioc = rqos_to_ioc(rqos);
2574
2575 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2576
2577 spin_lock_irq(&ioc->lock);
2578 ioc->running = IOC_STOP;
2579 spin_unlock_irq(&ioc->lock);
2580
2581 del_timer_sync(&ioc->timer);
2582 free_percpu(ioc->pcpu_stat);
2583 kfree(ioc);
2584}
2585
2586static struct rq_qos_ops ioc_rqos_ops = {
2587 .throttle = ioc_rqos_throttle,
2588 .merge = ioc_rqos_merge,
2589 .done_bio = ioc_rqos_done_bio,
2590 .done = ioc_rqos_done,
2591 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2592 .exit = ioc_rqos_exit,
2593};
2594
2595static int blk_iocost_init(struct request_queue *q)
2596{
2597 struct ioc *ioc;
2598 struct rq_qos *rqos;
5e124f74 2599 int i, cpu, ret;
7caa4715
TH
2600
2601 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2602 if (!ioc)
2603 return -ENOMEM;
2604
2605 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2606 if (!ioc->pcpu_stat) {
2607 kfree(ioc);
2608 return -ENOMEM;
2609 }
2610
5e124f74
TH
2611 for_each_possible_cpu(cpu) {
2612 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2613
2614 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2615 local_set(&ccs->missed[i].nr_met, 0);
2616 local_set(&ccs->missed[i].nr_missed, 0);
2617 }
2618 local64_set(&ccs->rq_wait_ns, 0);
2619 }
2620
7caa4715
TH
2621 rqos = &ioc->rqos;
2622 rqos->id = RQ_QOS_COST;
2623 rqos->ops = &ioc_rqos_ops;
2624 rqos->q = q;
2625
2626 spin_lock_init(&ioc->lock);
2627 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2628 INIT_LIST_HEAD(&ioc->active_iocgs);
2629
2630 ioc->running = IOC_IDLE;
2631 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
67b7b641 2632 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
7caa4715
TH
2633 ioc->period_at = ktime_to_us(ktime_get());
2634 atomic64_set(&ioc->cur_period, 0);
2635 atomic_set(&ioc->hweight_gen, 0);
2636
2637 spin_lock_irq(&ioc->lock);
2638 ioc->autop_idx = AUTOP_INVALID;
2639 ioc_refresh_params(ioc, true);
2640 spin_unlock_irq(&ioc->lock);
2641
2642 rq_qos_add(q, rqos);
2643 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2644 if (ret) {
2645 rq_qos_del(q, rqos);
3532e722 2646 free_percpu(ioc->pcpu_stat);
7caa4715
TH
2647 kfree(ioc);
2648 return ret;
2649 }
2650 return 0;
2651}
2652
2653static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2654{
2655 struct ioc_cgrp *iocc;
2656
2657 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
e916ad29
TH
2658 if (!iocc)
2659 return NULL;
7caa4715 2660
bd0adb91 2661 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
7caa4715
TH
2662 return &iocc->cpd;
2663}
2664
2665static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2666{
2667 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2668}
2669
2670static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2671 struct blkcg *blkcg)
2672{
2673 int levels = blkcg->css.cgroup->level + 1;
2674 struct ioc_gq *iocg;
2675
f61d6e25 2676 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
7caa4715
TH
2677 if (!iocg)
2678 return NULL;
2679
97eb1975
TH
2680 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2681 if (!iocg->pcpu_stat) {
2682 kfree(iocg);
2683 return NULL;
2684 }
2685
7caa4715
TH
2686 return &iocg->pd;
2687}
2688
2689static void ioc_pd_init(struct blkg_policy_data *pd)
2690{
2691 struct ioc_gq *iocg = pd_to_iocg(pd);
2692 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2693 struct ioc *ioc = q_to_ioc(blkg->q);
2694 struct ioc_now now;
2695 struct blkcg_gq *tblkg;
2696 unsigned long flags;
2697
2698 ioc_now(ioc, &now);
2699
2700 iocg->ioc = ioc;
2701 atomic64_set(&iocg->vtime, now.vnow);
2702 atomic64_set(&iocg->done_vtime, now.vnow);
2703 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2704 INIT_LIST_HEAD(&iocg->active_list);
97eb1975 2705 INIT_LIST_HEAD(&iocg->walk_list);
8692d2db 2706 INIT_LIST_HEAD(&iocg->surplus_list);
fe20cdb5
TH
2707 iocg->hweight_active = WEIGHT_ONE;
2708 iocg->hweight_inuse = WEIGHT_ONE;
7caa4715
TH
2709
2710 init_waitqueue_head(&iocg->waitq);
2711 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2712 iocg->waitq_timer.function = iocg_waitq_timer_fn;
7caa4715
TH
2713
2714 iocg->level = blkg->blkcg->css.cgroup->level;
2715
2716 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2717 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2718 iocg->ancestors[tiocg->level] = tiocg;
2719 }
2720
2721 spin_lock_irqsave(&ioc->lock, flags);
b0853ab4 2722 weight_updated(iocg, &now);
7caa4715
TH
2723 spin_unlock_irqrestore(&ioc->lock, flags);
2724}
2725
2726static void ioc_pd_free(struct blkg_policy_data *pd)
2727{
2728 struct ioc_gq *iocg = pd_to_iocg(pd);
2729 struct ioc *ioc = iocg->ioc;
5aeac7c4 2730 unsigned long flags;
7caa4715
TH
2731
2732 if (ioc) {
5aeac7c4 2733 spin_lock_irqsave(&ioc->lock, flags);
97eb1975 2734
7caa4715 2735 if (!list_empty(&iocg->active_list)) {
b0853ab4
TH
2736 struct ioc_now now;
2737
2738 ioc_now(ioc, &now);
2739 propagate_weights(iocg, 0, 0, false, &now);
7caa4715
TH
2740 list_del_init(&iocg->active_list);
2741 }
97eb1975
TH
2742
2743 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
8692d2db 2744 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
97eb1975 2745
5aeac7c4 2746 spin_unlock_irqrestore(&ioc->lock, flags);
e036c4ca
TH
2747
2748 hrtimer_cancel(&iocg->waitq_timer);
7caa4715 2749 }
97eb1975 2750 free_percpu(iocg->pcpu_stat);
7caa4715
TH
2751 kfree(iocg);
2752}
2753
97eb1975
TH
2754static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
2755{
2756 struct ioc_gq *iocg = pd_to_iocg(pd);
2757 struct ioc *ioc = iocg->ioc;
2758 size_t pos = 0;
2759
2760 if (!ioc->enabled)
2761 return 0;
2762
2763 if (iocg->level == 0) {
2764 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
2765 atomic64_read(&ioc->vtime_rate) * 10000,
2766 VTIME_PER_USEC);
2767 pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
2768 vp10k / 100, vp10k % 100);
2769 }
2770
2771 pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
2772 iocg->last_stat.usage_us);
2773
2774 return pos;
2775}
2776
7caa4715
TH
2777static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2778 int off)
2779{
2780 const char *dname = blkg_dev_name(pd->blkg);
2781 struct ioc_gq *iocg = pd_to_iocg(pd);
2782
2783 if (dname && iocg->cfg_weight)
bd0adb91 2784 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
7caa4715
TH
2785 return 0;
2786}
2787
2788
2789static int ioc_weight_show(struct seq_file *sf, void *v)
2790{
2791 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2792 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2793
bd0adb91 2794 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
7caa4715
TH
2795 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
2796 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2797 return 0;
2798}
2799
2800static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
2801 size_t nbytes, loff_t off)
2802{
2803 struct blkcg *blkcg = css_to_blkcg(of_css(of));
2804 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2805 struct blkg_conf_ctx ctx;
b0853ab4 2806 struct ioc_now now;
7caa4715
TH
2807 struct ioc_gq *iocg;
2808 u32 v;
2809 int ret;
2810
2811 if (!strchr(buf, ':')) {
2812 struct blkcg_gq *blkg;
2813
2814 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
2815 return -EINVAL;
2816
2817 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2818 return -EINVAL;
2819
2820 spin_lock(&blkcg->lock);
bd0adb91 2821 iocc->dfl_weight = v * WEIGHT_ONE;
7caa4715
TH
2822 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
2823 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2824
2825 if (iocg) {
2826 spin_lock_irq(&iocg->ioc->lock);
b0853ab4
TH
2827 ioc_now(iocg->ioc, &now);
2828 weight_updated(iocg, &now);
7caa4715
TH
2829 spin_unlock_irq(&iocg->ioc->lock);
2830 }
2831 }
2832 spin_unlock(&blkcg->lock);
2833
2834 return nbytes;
2835 }
2836
2837 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
2838 if (ret)
2839 return ret;
2840
2841 iocg = blkg_to_iocg(ctx.blkg);
2842
2843 if (!strncmp(ctx.body, "default", 7)) {
2844 v = 0;
2845 } else {
2846 if (!sscanf(ctx.body, "%u", &v))
2847 goto einval;
2848 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2849 goto einval;
2850 }
2851
41591a51 2852 spin_lock(&iocg->ioc->lock);
bd0adb91 2853 iocg->cfg_weight = v * WEIGHT_ONE;
b0853ab4
TH
2854 ioc_now(iocg->ioc, &now);
2855 weight_updated(iocg, &now);
41591a51 2856 spin_unlock(&iocg->ioc->lock);
7caa4715
TH
2857
2858 blkg_conf_finish(&ctx);
2859 return nbytes;
2860
2861einval:
2862 blkg_conf_finish(&ctx);
2863 return -EINVAL;
2864}
2865
2866static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2867 int off)
2868{
2869 const char *dname = blkg_dev_name(pd->blkg);
2870 struct ioc *ioc = pd_to_iocg(pd)->ioc;
2871
2872 if (!dname)
2873 return 0;
2874
2875 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
2876 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
2877 ioc->params.qos[QOS_RPPM] / 10000,
2878 ioc->params.qos[QOS_RPPM] % 10000 / 100,
2879 ioc->params.qos[QOS_RLAT],
2880 ioc->params.qos[QOS_WPPM] / 10000,
2881 ioc->params.qos[QOS_WPPM] % 10000 / 100,
2882 ioc->params.qos[QOS_WLAT],
2883 ioc->params.qos[QOS_MIN] / 10000,
2884 ioc->params.qos[QOS_MIN] % 10000 / 100,
2885 ioc->params.qos[QOS_MAX] / 10000,
2886 ioc->params.qos[QOS_MAX] % 10000 / 100);
2887 return 0;
2888}
2889
2890static int ioc_qos_show(struct seq_file *sf, void *v)
2891{
2892 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2893
2894 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
2895 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2896 return 0;
2897}
2898
2899static const match_table_t qos_ctrl_tokens = {
2900 { QOS_ENABLE, "enable=%u" },
2901 { QOS_CTRL, "ctrl=%s" },
2902 { NR_QOS_CTRL_PARAMS, NULL },
2903};
2904
2905static const match_table_t qos_tokens = {
2906 { QOS_RPPM, "rpct=%s" },
2907 { QOS_RLAT, "rlat=%u" },
2908 { QOS_WPPM, "wpct=%s" },
2909 { QOS_WLAT, "wlat=%u" },
2910 { QOS_MIN, "min=%s" },
2911 { QOS_MAX, "max=%s" },
2912 { NR_QOS_PARAMS, NULL },
2913};
2914
2915static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
2916 size_t nbytes, loff_t off)
2917{
2918 struct gendisk *disk;
2919 struct ioc *ioc;
2920 u32 qos[NR_QOS_PARAMS];
2921 bool enable, user;
2922 char *p;
2923 int ret;
2924
2925 disk = blkcg_conf_get_disk(&input);
2926 if (IS_ERR(disk))
2927 return PTR_ERR(disk);
2928
2929 ioc = q_to_ioc(disk->queue);
2930 if (!ioc) {
2931 ret = blk_iocost_init(disk->queue);
2932 if (ret)
2933 goto err;
2934 ioc = q_to_ioc(disk->queue);
2935 }
2936
2937 spin_lock_irq(&ioc->lock);
2938 memcpy(qos, ioc->params.qos, sizeof(qos));
2939 enable = ioc->enabled;
2940 user = ioc->user_qos_params;
2941 spin_unlock_irq(&ioc->lock);
2942
2943 while ((p = strsep(&input, " \t\n"))) {
2944 substring_t args[MAX_OPT_ARGS];
2945 char buf[32];
2946 int tok;
2947 s64 v;
2948
2949 if (!*p)
2950 continue;
2951
2952 switch (match_token(p, qos_ctrl_tokens, args)) {
2953 case QOS_ENABLE:
2954 match_u64(&args[0], &v);
2955 enable = v;
2956 continue;
2957 case QOS_CTRL:
2958 match_strlcpy(buf, &args[0], sizeof(buf));
2959 if (!strcmp(buf, "auto"))
2960 user = false;
2961 else if (!strcmp(buf, "user"))
2962 user = true;
2963 else
2964 goto einval;
2965 continue;
2966 }
2967
2968 tok = match_token(p, qos_tokens, args);
2969 switch (tok) {
2970 case QOS_RPPM:
2971 case QOS_WPPM:
2972 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2973 sizeof(buf))
2974 goto einval;
2975 if (cgroup_parse_float(buf, 2, &v))
2976 goto einval;
2977 if (v < 0 || v > 10000)
2978 goto einval;
2979 qos[tok] = v * 100;
2980 break;
2981 case QOS_RLAT:
2982 case QOS_WLAT:
2983 if (match_u64(&args[0], &v))
2984 goto einval;
2985 qos[tok] = v;
2986 break;
2987 case QOS_MIN:
2988 case QOS_MAX:
2989 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2990 sizeof(buf))
2991 goto einval;
2992 if (cgroup_parse_float(buf, 2, &v))
2993 goto einval;
2994 if (v < 0)
2995 goto einval;
2996 qos[tok] = clamp_t(s64, v * 100,
2997 VRATE_MIN_PPM, VRATE_MAX_PPM);
2998 break;
2999 default:
3000 goto einval;
3001 }
3002 user = true;
3003 }
3004
3005 if (qos[QOS_MIN] > qos[QOS_MAX])
3006 goto einval;
3007
3008 spin_lock_irq(&ioc->lock);
3009
3010 if (enable) {
cd006509 3011 blk_stat_enable_accounting(ioc->rqos.q);
7caa4715
TH
3012 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3013 ioc->enabled = true;
3014 } else {
3015 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3016 ioc->enabled = false;
3017 }
3018
3019 if (user) {
3020 memcpy(ioc->params.qos, qos, sizeof(qos));
3021 ioc->user_qos_params = true;
3022 } else {
3023 ioc->user_qos_params = false;
3024 }
3025
3026 ioc_refresh_params(ioc, true);
3027 spin_unlock_irq(&ioc->lock);
3028
3029 put_disk_and_module(disk);
3030 return nbytes;
3031einval:
3032 ret = -EINVAL;
3033err:
3034 put_disk_and_module(disk);
3035 return ret;
3036}
3037
3038static u64 ioc_cost_model_prfill(struct seq_file *sf,
3039 struct blkg_policy_data *pd, int off)
3040{
3041 const char *dname = blkg_dev_name(pd->blkg);
3042 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3043 u64 *u = ioc->params.i_lcoefs;
3044
3045 if (!dname)
3046 return 0;
3047
3048 seq_printf(sf, "%s ctrl=%s model=linear "
3049 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3050 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3051 dname, ioc->user_cost_model ? "user" : "auto",
3052 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3053 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3054 return 0;
3055}
3056
3057static int ioc_cost_model_show(struct seq_file *sf, void *v)
3058{
3059 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3060
3061 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3062 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3063 return 0;
3064}
3065
3066static const match_table_t cost_ctrl_tokens = {
3067 { COST_CTRL, "ctrl=%s" },
3068 { COST_MODEL, "model=%s" },
3069 { NR_COST_CTRL_PARAMS, NULL },
3070};
3071
3072static const match_table_t i_lcoef_tokens = {
3073 { I_LCOEF_RBPS, "rbps=%u" },
3074 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3075 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3076 { I_LCOEF_WBPS, "wbps=%u" },
3077 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3078 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3079 { NR_I_LCOEFS, NULL },
3080};
3081
3082static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3083 size_t nbytes, loff_t off)
3084{
3085 struct gendisk *disk;
3086 struct ioc *ioc;
3087 u64 u[NR_I_LCOEFS];
3088 bool user;
3089 char *p;
3090 int ret;
3091
3092 disk = blkcg_conf_get_disk(&input);
3093 if (IS_ERR(disk))
3094 return PTR_ERR(disk);
3095
3096 ioc = q_to_ioc(disk->queue);
3097 if (!ioc) {
3098 ret = blk_iocost_init(disk->queue);
3099 if (ret)
3100 goto err;
3101 ioc = q_to_ioc(disk->queue);
3102 }
3103
3104 spin_lock_irq(&ioc->lock);
3105 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3106 user = ioc->user_cost_model;
3107 spin_unlock_irq(&ioc->lock);
3108
3109 while ((p = strsep(&input, " \t\n"))) {
3110 substring_t args[MAX_OPT_ARGS];
3111 char buf[32];
3112 int tok;
3113 u64 v;
3114
3115 if (!*p)
3116 continue;
3117
3118 switch (match_token(p, cost_ctrl_tokens, args)) {
3119 case COST_CTRL:
3120 match_strlcpy(buf, &args[0], sizeof(buf));
3121 if (!strcmp(buf, "auto"))
3122 user = false;
3123 else if (!strcmp(buf, "user"))
3124 user = true;
3125 else
3126 goto einval;
3127 continue;
3128 case COST_MODEL:
3129 match_strlcpy(buf, &args[0], sizeof(buf));
3130 if (strcmp(buf, "linear"))
3131 goto einval;
3132 continue;
3133 }
3134
3135 tok = match_token(p, i_lcoef_tokens, args);
3136 if (tok == NR_I_LCOEFS)
3137 goto einval;
3138 if (match_u64(&args[0], &v))
3139 goto einval;
3140 u[tok] = v;
3141 user = true;
3142 }
3143
3144 spin_lock_irq(&ioc->lock);
3145 if (user) {
3146 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3147 ioc->user_cost_model = true;
3148 } else {
3149 ioc->user_cost_model = false;
3150 }
3151 ioc_refresh_params(ioc, true);
3152 spin_unlock_irq(&ioc->lock);
3153
3154 put_disk_and_module(disk);
3155 return nbytes;
3156
3157einval:
3158 ret = -EINVAL;
3159err:
3160 put_disk_and_module(disk);
3161 return ret;
3162}
3163
3164static struct cftype ioc_files[] = {
3165 {
3166 .name = "weight",
3167 .flags = CFTYPE_NOT_ON_ROOT,
3168 .seq_show = ioc_weight_show,
3169 .write = ioc_weight_write,
3170 },
3171 {
3172 .name = "cost.qos",
3173 .flags = CFTYPE_ONLY_ON_ROOT,
3174 .seq_show = ioc_qos_show,
3175 .write = ioc_qos_write,
3176 },
3177 {
3178 .name = "cost.model",
3179 .flags = CFTYPE_ONLY_ON_ROOT,
3180 .seq_show = ioc_cost_model_show,
3181 .write = ioc_cost_model_write,
3182 },
3183 {}
3184};
3185
3186static struct blkcg_policy blkcg_policy_iocost = {
3187 .dfl_cftypes = ioc_files,
3188 .cpd_alloc_fn = ioc_cpd_alloc,
3189 .cpd_free_fn = ioc_cpd_free,
3190 .pd_alloc_fn = ioc_pd_alloc,
3191 .pd_init_fn = ioc_pd_init,
3192 .pd_free_fn = ioc_pd_free,
97eb1975 3193 .pd_stat_fn = ioc_pd_stat,
7caa4715
TH
3194};
3195
3196static int __init ioc_init(void)
3197{
3198 return blkcg_policy_register(&blkcg_policy_iocost);
3199}
3200
3201static void __exit ioc_exit(void)
3202{
3203 return blkcg_policy_unregister(&blkcg_policy_iocost);
3204}
3205
3206module_init(ioc_init);
3207module_exit(ioc_exit);