]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - block/blk-iocost.c
blk-iocost: clamp inuse and skip noops in __propagate_weights()
[mirror_ubuntu-hirsute-kernel.git] / block / blk-iocost.c
1 /* SPDX-License-Identifier: GPL-2.0
2 *
3 * IO cost model based controller.
4 *
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
8 *
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
12 * approximations.
13 *
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
21 *
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
28 * distribution.
29 *
30 * 1. IO Cost Model
31 *
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
36 *
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * paramters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
45 *
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
48 *
49 * 2. Control Strategy
50 *
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
53 *
54 * 2-1. Vtime Distribution
55 *
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
59 *
60 * root
61 * / \
62 * A (w:100) B (w:300)
63 * / \
64 * A0 (w:100) A1 (w:100)
65 *
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (HWEIGHT_WHOLE).
72 *
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
77 *
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO iff doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
83 *
84 * 2-2. Vrate Adjustment
85 *
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
90 *
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
95 * generally speed up.
96 *
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
101 *
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
104 *
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
111 * busy signal.
112 *
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
118 *
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
124 *
125 * 2-3. Work Conservation
126 *
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
134 * for IO control.
135 *
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
141 *
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
145 *
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
151 * mechanism.
152 *
153 * 3. Monitoring
154 *
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The ouput looks like the following.
159 *
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
164 *
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
173 */
174
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <linux/blk-cgroup.h>
182 #include <asm/local.h>
183 #include <asm/local64.h>
184 #include "blk-rq-qos.h"
185 #include "blk-stat.h"
186 #include "blk-wbt.h"
187
188 #ifdef CONFIG_TRACEPOINTS
189
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
196 do { \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
203 ##__VA_ARGS__); \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
205 } \
206 } while (0)
207
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
211
212 enum {
213 MILLION = 1000000,
214
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
218
219 /*
220 * A cgroup's vtime can run 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
223 */
224 MARGIN_PCT = 50,
225 INUSE_MARGIN_PCT = 10,
226
227 /* Have some play in waitq timer operations */
228 WAITQ_TIMER_MARGIN_PCT = 5,
229
230 /*
231 * vtime can wrap well within a reasonable uptime when vrate is
232 * consistently raised. Don't trust recorded cgroup vtime if the
233 * period counter indicates that it's older than 5mins.
234 */
235 VTIME_VALID_DUR = 300 * USEC_PER_SEC,
236
237 /*
238 * Remember the past three non-zero usages and use the max for
239 * surplus calculation. Three slots guarantee that we remember one
240 * full period usage from the last active stretch even after
241 * partial deactivation and re-activation periods. Don't start
242 * giving away weight before collecting two data points to prevent
243 * hweight adjustments based on one partial activation period.
244 */
245 NR_USAGE_SLOTS = 3,
246 MIN_VALID_USAGES = 2,
247
248 /* 1/64k is granular enough and can easily be handled w/ u32 */
249 HWEIGHT_WHOLE = 1 << 16,
250
251 /*
252 * As vtime is used to calculate the cost of each IO, it needs to
253 * be fairly high precision. For example, it should be able to
254 * represent the cost of a single page worth of discard with
255 * suffificient accuracy. At the same time, it should be able to
256 * represent reasonably long enough durations to be useful and
257 * convenient during operation.
258 *
259 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
260 * granularity and days of wrap-around time even at extreme vrates.
261 */
262 VTIME_PER_SEC_SHIFT = 37,
263 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
264 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
265 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
266
267 /* bound vrate adjustments within two orders of magnitude */
268 VRATE_MIN_PPM = 10000, /* 1% */
269 VRATE_MAX_PPM = 100000000, /* 10000% */
270
271 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
272 VRATE_CLAMP_ADJ_PCT = 4,
273
274 /* if IOs end up waiting for requests, issue less */
275 RQ_WAIT_BUSY_PCT = 5,
276
277 /* unbusy hysterisis */
278 UNBUSY_THR_PCT = 75,
279
280 /* don't let cmds which take a very long time pin lagging for too long */
281 MAX_LAGGING_PERIODS = 10,
282
283 /*
284 * If usage% * 1.25 + 2% is lower than hweight% by more than 3%,
285 * donate the surplus.
286 */
287 SURPLUS_SCALE_PCT = 125, /* * 125% */
288 SURPLUS_SCALE_ABS = HWEIGHT_WHOLE / 50, /* + 2% */
289 SURPLUS_MIN_ADJ_DELTA = HWEIGHT_WHOLE / 33, /* 3% */
290
291 /* switch iff the conditions are met for longer than this */
292 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
293
294 /*
295 * Count IO size in 4k pages. The 12bit shift helps keeping
296 * size-proportional components of cost calculation in closer
297 * numbers of digits to per-IO cost components.
298 */
299 IOC_PAGE_SHIFT = 12,
300 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
301 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
302
303 /* if apart further than 16M, consider randio for linear model */
304 LCOEF_RANDIO_PAGES = 4096,
305 };
306
307 enum ioc_running {
308 IOC_IDLE,
309 IOC_RUNNING,
310 IOC_STOP,
311 };
312
313 /* io.cost.qos controls including per-dev enable of the whole controller */
314 enum {
315 QOS_ENABLE,
316 QOS_CTRL,
317 NR_QOS_CTRL_PARAMS,
318 };
319
320 /* io.cost.qos params */
321 enum {
322 QOS_RPPM,
323 QOS_RLAT,
324 QOS_WPPM,
325 QOS_WLAT,
326 QOS_MIN,
327 QOS_MAX,
328 NR_QOS_PARAMS,
329 };
330
331 /* io.cost.model controls */
332 enum {
333 COST_CTRL,
334 COST_MODEL,
335 NR_COST_CTRL_PARAMS,
336 };
337
338 /* builtin linear cost model coefficients */
339 enum {
340 I_LCOEF_RBPS,
341 I_LCOEF_RSEQIOPS,
342 I_LCOEF_RRANDIOPS,
343 I_LCOEF_WBPS,
344 I_LCOEF_WSEQIOPS,
345 I_LCOEF_WRANDIOPS,
346 NR_I_LCOEFS,
347 };
348
349 enum {
350 LCOEF_RPAGE,
351 LCOEF_RSEQIO,
352 LCOEF_RRANDIO,
353 LCOEF_WPAGE,
354 LCOEF_WSEQIO,
355 LCOEF_WRANDIO,
356 NR_LCOEFS,
357 };
358
359 enum {
360 AUTOP_INVALID,
361 AUTOP_HDD,
362 AUTOP_SSD_QD1,
363 AUTOP_SSD_DFL,
364 AUTOP_SSD_FAST,
365 };
366
367 struct ioc_gq;
368
369 struct ioc_params {
370 u32 qos[NR_QOS_PARAMS];
371 u64 i_lcoefs[NR_I_LCOEFS];
372 u64 lcoefs[NR_LCOEFS];
373 u32 too_fast_vrate_pct;
374 u32 too_slow_vrate_pct;
375 };
376
377 struct ioc_missed {
378 local_t nr_met;
379 local_t nr_missed;
380 u32 last_met;
381 u32 last_missed;
382 };
383
384 struct ioc_pcpu_stat {
385 struct ioc_missed missed[2];
386
387 local64_t rq_wait_ns;
388 u64 last_rq_wait_ns;
389 };
390
391 /* per device */
392 struct ioc {
393 struct rq_qos rqos;
394
395 bool enabled;
396
397 struct ioc_params params;
398 u32 period_us;
399 u32 margin_us;
400 u64 vrate_min;
401 u64 vrate_max;
402
403 spinlock_t lock;
404 struct timer_list timer;
405 struct list_head active_iocgs; /* active cgroups */
406 struct ioc_pcpu_stat __percpu *pcpu_stat;
407
408 enum ioc_running running;
409 atomic64_t vtime_rate;
410
411 seqcount_spinlock_t period_seqcount;
412 u32 period_at; /* wallclock starttime */
413 u64 period_at_vtime; /* vtime starttime */
414
415 atomic64_t cur_period; /* inc'd each period */
416 int busy_level; /* saturation history */
417
418 u64 inuse_margin_vtime;
419 bool weights_updated;
420 atomic_t hweight_gen; /* for lazy hweights */
421
422 u64 autop_too_fast_at;
423 u64 autop_too_slow_at;
424 int autop_idx;
425 bool user_qos_params:1;
426 bool user_cost_model:1;
427 };
428
429 /* per device-cgroup pair */
430 struct ioc_gq {
431 struct blkg_policy_data pd;
432 struct ioc *ioc;
433
434 /*
435 * A iocg can get its weight from two sources - an explicit
436 * per-device-cgroup configuration or the default weight of the
437 * cgroup. `cfg_weight` is the explicit per-device-cgroup
438 * configuration. `weight` is the effective considering both
439 * sources.
440 *
441 * When an idle cgroup becomes active its `active` goes from 0 to
442 * `weight`. `inuse` is the surplus adjusted active weight.
443 * `active` and `inuse` are used to calculate `hweight_active` and
444 * `hweight_inuse`.
445 *
446 * `last_inuse` remembers `inuse` while an iocg is idle to persist
447 * surplus adjustments.
448 */
449 u32 cfg_weight;
450 u32 weight;
451 u32 active;
452 u32 inuse;
453 u32 last_inuse;
454
455 sector_t cursor; /* to detect randio */
456
457 /*
458 * `vtime` is this iocg's vtime cursor which progresses as IOs are
459 * issued. If lagging behind device vtime, the delta represents
460 * the currently available IO budget. If runnning ahead, the
461 * overage.
462 *
463 * `vtime_done` is the same but progressed on completion rather
464 * than issue. The delta behind `vtime` represents the cost of
465 * currently in-flight IOs.
466 *
467 * `last_vtime` is used to remember `vtime` at the end of the last
468 * period to calculate utilization.
469 */
470 atomic64_t vtime;
471 atomic64_t done_vtime;
472 u64 abs_vdebt;
473 u64 last_vtime;
474
475 /*
476 * The period this iocg was last active in. Used for deactivation
477 * and invalidating `vtime`.
478 */
479 atomic64_t active_period;
480 struct list_head active_list;
481
482 /* see __propagate_weights() and current_hweight() for details */
483 u64 child_active_sum;
484 u64 child_inuse_sum;
485 int hweight_gen;
486 u32 hweight_active;
487 u32 hweight_inuse;
488 bool has_surplus;
489
490 struct wait_queue_head waitq;
491 struct hrtimer waitq_timer;
492 struct hrtimer delay_timer;
493
494 /* usage is recorded as fractions of HWEIGHT_WHOLE */
495 int usage_idx;
496 u32 usages[NR_USAGE_SLOTS];
497
498 /* this iocg's depth in the hierarchy and ancestors including self */
499 int level;
500 struct ioc_gq *ancestors[];
501 };
502
503 /* per cgroup */
504 struct ioc_cgrp {
505 struct blkcg_policy_data cpd;
506 unsigned int dfl_weight;
507 };
508
509 struct ioc_now {
510 u64 now_ns;
511 u32 now;
512 u64 vnow;
513 u64 vrate;
514 };
515
516 struct iocg_wait {
517 struct wait_queue_entry wait;
518 struct bio *bio;
519 u64 abs_cost;
520 bool committed;
521 };
522
523 struct iocg_wake_ctx {
524 struct ioc_gq *iocg;
525 u32 hw_inuse;
526 s64 vbudget;
527 };
528
529 static const struct ioc_params autop[] = {
530 [AUTOP_HDD] = {
531 .qos = {
532 [QOS_RLAT] = 250000, /* 250ms */
533 [QOS_WLAT] = 250000,
534 [QOS_MIN] = VRATE_MIN_PPM,
535 [QOS_MAX] = VRATE_MAX_PPM,
536 },
537 .i_lcoefs = {
538 [I_LCOEF_RBPS] = 174019176,
539 [I_LCOEF_RSEQIOPS] = 41708,
540 [I_LCOEF_RRANDIOPS] = 370,
541 [I_LCOEF_WBPS] = 178075866,
542 [I_LCOEF_WSEQIOPS] = 42705,
543 [I_LCOEF_WRANDIOPS] = 378,
544 },
545 },
546 [AUTOP_SSD_QD1] = {
547 .qos = {
548 [QOS_RLAT] = 25000, /* 25ms */
549 [QOS_WLAT] = 25000,
550 [QOS_MIN] = VRATE_MIN_PPM,
551 [QOS_MAX] = VRATE_MAX_PPM,
552 },
553 .i_lcoefs = {
554 [I_LCOEF_RBPS] = 245855193,
555 [I_LCOEF_RSEQIOPS] = 61575,
556 [I_LCOEF_RRANDIOPS] = 6946,
557 [I_LCOEF_WBPS] = 141365009,
558 [I_LCOEF_WSEQIOPS] = 33716,
559 [I_LCOEF_WRANDIOPS] = 26796,
560 },
561 },
562 [AUTOP_SSD_DFL] = {
563 .qos = {
564 [QOS_RLAT] = 25000, /* 25ms */
565 [QOS_WLAT] = 25000,
566 [QOS_MIN] = VRATE_MIN_PPM,
567 [QOS_MAX] = VRATE_MAX_PPM,
568 },
569 .i_lcoefs = {
570 [I_LCOEF_RBPS] = 488636629,
571 [I_LCOEF_RSEQIOPS] = 8932,
572 [I_LCOEF_RRANDIOPS] = 8518,
573 [I_LCOEF_WBPS] = 427891549,
574 [I_LCOEF_WSEQIOPS] = 28755,
575 [I_LCOEF_WRANDIOPS] = 21940,
576 },
577 .too_fast_vrate_pct = 500,
578 },
579 [AUTOP_SSD_FAST] = {
580 .qos = {
581 [QOS_RLAT] = 5000, /* 5ms */
582 [QOS_WLAT] = 5000,
583 [QOS_MIN] = VRATE_MIN_PPM,
584 [QOS_MAX] = VRATE_MAX_PPM,
585 },
586 .i_lcoefs = {
587 [I_LCOEF_RBPS] = 3102524156LLU,
588 [I_LCOEF_RSEQIOPS] = 724816,
589 [I_LCOEF_RRANDIOPS] = 778122,
590 [I_LCOEF_WBPS] = 1742780862LLU,
591 [I_LCOEF_WSEQIOPS] = 425702,
592 [I_LCOEF_WRANDIOPS] = 443193,
593 },
594 .too_slow_vrate_pct = 10,
595 },
596 };
597
598 /*
599 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
600 * vtime credit shortage and down on device saturation.
601 */
602 static u32 vrate_adj_pct[] =
603 { 0, 0, 0, 0,
604 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
605 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
606 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
607
608 static struct blkcg_policy blkcg_policy_iocost;
609
610 /* accessors and helpers */
611 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
612 {
613 return container_of(rqos, struct ioc, rqos);
614 }
615
616 static struct ioc *q_to_ioc(struct request_queue *q)
617 {
618 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
619 }
620
621 static const char *q_name(struct request_queue *q)
622 {
623 if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
624 return kobject_name(q->kobj.parent);
625 else
626 return "<unknown>";
627 }
628
629 static const char __maybe_unused *ioc_name(struct ioc *ioc)
630 {
631 return q_name(ioc->rqos.q);
632 }
633
634 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
635 {
636 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
637 }
638
639 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
640 {
641 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
642 }
643
644 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
645 {
646 return pd_to_blkg(&iocg->pd);
647 }
648
649 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
650 {
651 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
652 struct ioc_cgrp, cpd);
653 }
654
655 /*
656 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
657 * weight, the more expensive each IO. Must round up.
658 */
659 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
660 {
661 return DIV64_U64_ROUND_UP(abs_cost * HWEIGHT_WHOLE, hw_inuse);
662 }
663
664 /*
665 * The inverse of abs_cost_to_cost(). Must round up.
666 */
667 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
668 {
669 return DIV64_U64_ROUND_UP(cost * hw_inuse, HWEIGHT_WHOLE);
670 }
671
672 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost)
673 {
674 bio->bi_iocost_cost = cost;
675 atomic64_add(cost, &iocg->vtime);
676 }
677
678 #define CREATE_TRACE_POINTS
679 #include <trace/events/iocost.h>
680
681 /* latency Qos params changed, update period_us and all the dependent params */
682 static void ioc_refresh_period_us(struct ioc *ioc)
683 {
684 u32 ppm, lat, multi, period_us;
685
686 lockdep_assert_held(&ioc->lock);
687
688 /* pick the higher latency target */
689 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
690 ppm = ioc->params.qos[QOS_RPPM];
691 lat = ioc->params.qos[QOS_RLAT];
692 } else {
693 ppm = ioc->params.qos[QOS_WPPM];
694 lat = ioc->params.qos[QOS_WLAT];
695 }
696
697 /*
698 * We want the period to be long enough to contain a healthy number
699 * of IOs while short enough for granular control. Define it as a
700 * multiple of the latency target. Ideally, the multiplier should
701 * be scaled according to the percentile so that it would nominally
702 * contain a certain number of requests. Let's be simpler and
703 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
704 */
705 if (ppm)
706 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
707 else
708 multi = 2;
709 period_us = multi * lat;
710 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
711
712 /* calculate dependent params */
713 ioc->period_us = period_us;
714 ioc->margin_us = period_us * MARGIN_PCT / 100;
715 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
716 period_us * VTIME_PER_USEC * INUSE_MARGIN_PCT, 100);
717 }
718
719 static int ioc_autop_idx(struct ioc *ioc)
720 {
721 int idx = ioc->autop_idx;
722 const struct ioc_params *p = &autop[idx];
723 u32 vrate_pct;
724 u64 now_ns;
725
726 /* rotational? */
727 if (!blk_queue_nonrot(ioc->rqos.q))
728 return AUTOP_HDD;
729
730 /* handle SATA SSDs w/ broken NCQ */
731 if (blk_queue_depth(ioc->rqos.q) == 1)
732 return AUTOP_SSD_QD1;
733
734 /* use one of the normal ssd sets */
735 if (idx < AUTOP_SSD_DFL)
736 return AUTOP_SSD_DFL;
737
738 /* if user is overriding anything, maintain what was there */
739 if (ioc->user_qos_params || ioc->user_cost_model)
740 return idx;
741
742 /* step up/down based on the vrate */
743 vrate_pct = div64_u64(atomic64_read(&ioc->vtime_rate) * 100,
744 VTIME_PER_USEC);
745 now_ns = ktime_get_ns();
746
747 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
748 if (!ioc->autop_too_fast_at)
749 ioc->autop_too_fast_at = now_ns;
750 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
751 return idx + 1;
752 } else {
753 ioc->autop_too_fast_at = 0;
754 }
755
756 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
757 if (!ioc->autop_too_slow_at)
758 ioc->autop_too_slow_at = now_ns;
759 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
760 return idx - 1;
761 } else {
762 ioc->autop_too_slow_at = 0;
763 }
764
765 return idx;
766 }
767
768 /*
769 * Take the followings as input
770 *
771 * @bps maximum sequential throughput
772 * @seqiops maximum sequential 4k iops
773 * @randiops maximum random 4k iops
774 *
775 * and calculate the linear model cost coefficients.
776 *
777 * *@page per-page cost 1s / (@bps / 4096)
778 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
779 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
780 */
781 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
782 u64 *page, u64 *seqio, u64 *randio)
783 {
784 u64 v;
785
786 *page = *seqio = *randio = 0;
787
788 if (bps)
789 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
790 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
791
792 if (seqiops) {
793 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
794 if (v > *page)
795 *seqio = v - *page;
796 }
797
798 if (randiops) {
799 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
800 if (v > *page)
801 *randio = v - *page;
802 }
803 }
804
805 static void ioc_refresh_lcoefs(struct ioc *ioc)
806 {
807 u64 *u = ioc->params.i_lcoefs;
808 u64 *c = ioc->params.lcoefs;
809
810 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
811 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
812 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
813 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
814 }
815
816 static bool ioc_refresh_params(struct ioc *ioc, bool force)
817 {
818 const struct ioc_params *p;
819 int idx;
820
821 lockdep_assert_held(&ioc->lock);
822
823 idx = ioc_autop_idx(ioc);
824 p = &autop[idx];
825
826 if (idx == ioc->autop_idx && !force)
827 return false;
828
829 if (idx != ioc->autop_idx)
830 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
831
832 ioc->autop_idx = idx;
833 ioc->autop_too_fast_at = 0;
834 ioc->autop_too_slow_at = 0;
835
836 if (!ioc->user_qos_params)
837 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
838 if (!ioc->user_cost_model)
839 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
840
841 ioc_refresh_period_us(ioc);
842 ioc_refresh_lcoefs(ioc);
843
844 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
845 VTIME_PER_USEC, MILLION);
846 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
847 VTIME_PER_USEC, MILLION);
848
849 return true;
850 }
851
852 /* take a snapshot of the current [v]time and vrate */
853 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
854 {
855 unsigned seq;
856
857 now->now_ns = ktime_get();
858 now->now = ktime_to_us(now->now_ns);
859 now->vrate = atomic64_read(&ioc->vtime_rate);
860
861 /*
862 * The current vtime is
863 *
864 * vtime at period start + (wallclock time since the start) * vrate
865 *
866 * As a consistent snapshot of `period_at_vtime` and `period_at` is
867 * needed, they're seqcount protected.
868 */
869 do {
870 seq = read_seqcount_begin(&ioc->period_seqcount);
871 now->vnow = ioc->period_at_vtime +
872 (now->now - ioc->period_at) * now->vrate;
873 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
874 }
875
876 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
877 {
878 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
879
880 write_seqcount_begin(&ioc->period_seqcount);
881 ioc->period_at = now->now;
882 ioc->period_at_vtime = now->vnow;
883 write_seqcount_end(&ioc->period_seqcount);
884
885 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
886 add_timer(&ioc->timer);
887 }
888
889 /*
890 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
891 * weight sums and propagate upwards accordingly.
892 */
893 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse)
894 {
895 struct ioc *ioc = iocg->ioc;
896 int lvl;
897
898 lockdep_assert_held(&ioc->lock);
899
900 inuse = clamp_t(u32, inuse, 1, active);
901
902 if (active == iocg->active && inuse == iocg->inuse)
903 return;
904
905 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
906 struct ioc_gq *parent = iocg->ancestors[lvl];
907 struct ioc_gq *child = iocg->ancestors[lvl + 1];
908 u32 parent_active = 0, parent_inuse = 0;
909
910 /* update the level sums */
911 parent->child_active_sum += (s32)(active - child->active);
912 parent->child_inuse_sum += (s32)(inuse - child->inuse);
913 /* apply the udpates */
914 child->active = active;
915 child->inuse = inuse;
916
917 /*
918 * The delta between inuse and active sums indicates that
919 * that much of weight is being given away. Parent's inuse
920 * and active should reflect the ratio.
921 */
922 if (parent->child_active_sum) {
923 parent_active = parent->weight;
924 parent_inuse = DIV64_U64_ROUND_UP(
925 parent_active * parent->child_inuse_sum,
926 parent->child_active_sum);
927 }
928
929 /* do we need to keep walking up? */
930 if (parent_active == parent->active &&
931 parent_inuse == parent->inuse)
932 break;
933
934 active = parent_active;
935 inuse = parent_inuse;
936 }
937
938 ioc->weights_updated = true;
939 }
940
941 static void commit_weights(struct ioc *ioc)
942 {
943 lockdep_assert_held(&ioc->lock);
944
945 if (ioc->weights_updated) {
946 /* paired with rmb in current_hweight(), see there */
947 smp_wmb();
948 atomic_inc(&ioc->hweight_gen);
949 ioc->weights_updated = false;
950 }
951 }
952
953 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse)
954 {
955 __propagate_weights(iocg, active, inuse);
956 commit_weights(iocg->ioc);
957 }
958
959 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
960 {
961 struct ioc *ioc = iocg->ioc;
962 int lvl;
963 u32 hwa, hwi;
964 int ioc_gen;
965
966 /* hot path - if uptodate, use cached */
967 ioc_gen = atomic_read(&ioc->hweight_gen);
968 if (ioc_gen == iocg->hweight_gen)
969 goto out;
970
971 /*
972 * Paired with wmb in commit_weights(). If we saw the updated
973 * hweight_gen, all the weight updates from __propagate_weights() are
974 * visible too.
975 *
976 * We can race with weight updates during calculation and get it
977 * wrong. However, hweight_gen would have changed and a future
978 * reader will recalculate and we're guaranteed to discard the
979 * wrong result soon.
980 */
981 smp_rmb();
982
983 hwa = hwi = HWEIGHT_WHOLE;
984 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
985 struct ioc_gq *parent = iocg->ancestors[lvl];
986 struct ioc_gq *child = iocg->ancestors[lvl + 1];
987 u32 active_sum = READ_ONCE(parent->child_active_sum);
988 u32 inuse_sum = READ_ONCE(parent->child_inuse_sum);
989 u32 active = READ_ONCE(child->active);
990 u32 inuse = READ_ONCE(child->inuse);
991
992 /* we can race with deactivations and either may read as zero */
993 if (!active_sum || !inuse_sum)
994 continue;
995
996 active_sum = max(active, active_sum);
997 hwa = hwa * active / active_sum; /* max 16bits * 10000 */
998
999 inuse_sum = max(inuse, inuse_sum);
1000 hwi = hwi * inuse / inuse_sum; /* max 16bits * 10000 */
1001 }
1002
1003 iocg->hweight_active = max_t(u32, hwa, 1);
1004 iocg->hweight_inuse = max_t(u32, hwi, 1);
1005 iocg->hweight_gen = ioc_gen;
1006 out:
1007 if (hw_activep)
1008 *hw_activep = iocg->hweight_active;
1009 if (hw_inusep)
1010 *hw_inusep = iocg->hweight_inuse;
1011 }
1012
1013 static void weight_updated(struct ioc_gq *iocg)
1014 {
1015 struct ioc *ioc = iocg->ioc;
1016 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1017 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1018 u32 weight;
1019
1020 lockdep_assert_held(&ioc->lock);
1021
1022 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1023 if (weight != iocg->weight && iocg->active)
1024 propagate_weights(iocg, weight,
1025 DIV64_U64_ROUND_UP(iocg->inuse * weight, iocg->weight));
1026 iocg->weight = weight;
1027 }
1028
1029 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1030 {
1031 struct ioc *ioc = iocg->ioc;
1032 u64 last_period, cur_period, max_period_delta;
1033 u64 vtime, vmargin, vmin;
1034 int i;
1035
1036 /*
1037 * If seem to be already active, just update the stamp to tell the
1038 * timer that we're still active. We don't mind occassional races.
1039 */
1040 if (!list_empty(&iocg->active_list)) {
1041 ioc_now(ioc, now);
1042 cur_period = atomic64_read(&ioc->cur_period);
1043 if (atomic64_read(&iocg->active_period) != cur_period)
1044 atomic64_set(&iocg->active_period, cur_period);
1045 return true;
1046 }
1047
1048 /* racy check on internal node IOs, treat as root level IOs */
1049 if (iocg->child_active_sum)
1050 return false;
1051
1052 spin_lock_irq(&ioc->lock);
1053
1054 ioc_now(ioc, now);
1055
1056 /* update period */
1057 cur_period = atomic64_read(&ioc->cur_period);
1058 last_period = atomic64_read(&iocg->active_period);
1059 atomic64_set(&iocg->active_period, cur_period);
1060
1061 /* already activated or breaking leaf-only constraint? */
1062 if (!list_empty(&iocg->active_list))
1063 goto succeed_unlock;
1064 for (i = iocg->level - 1; i > 0; i--)
1065 if (!list_empty(&iocg->ancestors[i]->active_list))
1066 goto fail_unlock;
1067
1068 if (iocg->child_active_sum)
1069 goto fail_unlock;
1070
1071 /*
1072 * vtime may wrap when vrate is raised substantially due to
1073 * underestimated IO costs. Look at the period and ignore its
1074 * vtime if the iocg has been idle for too long. Also, cap the
1075 * budget it can start with to the margin.
1076 */
1077 max_period_delta = DIV64_U64_ROUND_UP(VTIME_VALID_DUR, ioc->period_us);
1078 vtime = atomic64_read(&iocg->vtime);
1079 vmargin = ioc->margin_us * now->vrate;
1080 vmin = now->vnow - vmargin;
1081
1082 if (last_period + max_period_delta < cur_period ||
1083 time_before64(vtime, vmin)) {
1084 atomic64_add(vmin - vtime, &iocg->vtime);
1085 atomic64_add(vmin - vtime, &iocg->done_vtime);
1086 vtime = vmin;
1087 }
1088
1089 /*
1090 * Activate, propagate weight and start period timer if not
1091 * running. Reset hweight_gen to avoid accidental match from
1092 * wrapping.
1093 */
1094 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1095 list_add(&iocg->active_list, &ioc->active_iocgs);
1096 propagate_weights(iocg, iocg->weight,
1097 iocg->last_inuse ?: iocg->weight);
1098
1099 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1100 last_period, cur_period, vtime);
1101
1102 iocg->last_vtime = vtime;
1103
1104 if (ioc->running == IOC_IDLE) {
1105 ioc->running = IOC_RUNNING;
1106 ioc_start_period(ioc, now);
1107 }
1108
1109 succeed_unlock:
1110 spin_unlock_irq(&ioc->lock);
1111 return true;
1112
1113 fail_unlock:
1114 spin_unlock_irq(&ioc->lock);
1115 return false;
1116 }
1117
1118 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1119 int flags, void *key)
1120 {
1121 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1122 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1123 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1124
1125 ctx->vbudget -= cost;
1126
1127 if (ctx->vbudget < 0)
1128 return -1;
1129
1130 iocg_commit_bio(ctx->iocg, wait->bio, cost);
1131
1132 /*
1133 * autoremove_wake_function() removes the wait entry only when it
1134 * actually changed the task state. We want the wait always
1135 * removed. Remove explicitly and use default_wake_function().
1136 */
1137 list_del_init(&wq_entry->entry);
1138 wait->committed = true;
1139
1140 default_wake_function(wq_entry, mode, flags, key);
1141 return 0;
1142 }
1143
1144 static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
1145 {
1146 struct ioc *ioc = iocg->ioc;
1147 struct iocg_wake_ctx ctx = { .iocg = iocg };
1148 u64 margin_ns = (u64)(ioc->period_us *
1149 WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
1150 u64 vdebt, vshortage, expires, oexpires;
1151 s64 vbudget;
1152 u32 hw_inuse;
1153
1154 lockdep_assert_held(&iocg->waitq.lock);
1155
1156 current_hweight(iocg, NULL, &hw_inuse);
1157 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1158
1159 /* pay off debt */
1160 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
1161 if (vdebt && vbudget > 0) {
1162 u64 delta = min_t(u64, vbudget, vdebt);
1163 u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse),
1164 iocg->abs_vdebt);
1165
1166 atomic64_add(delta, &iocg->vtime);
1167 atomic64_add(delta, &iocg->done_vtime);
1168 iocg->abs_vdebt -= abs_delta;
1169 }
1170
1171 /*
1172 * Wake up the ones which are due and see how much vtime we'll need
1173 * for the next one.
1174 */
1175 ctx.hw_inuse = hw_inuse;
1176 ctx.vbudget = vbudget - vdebt;
1177 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1178 if (!waitqueue_active(&iocg->waitq))
1179 return;
1180 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1181 return;
1182
1183 /* determine next wakeup, add a quarter margin to guarantee chunking */
1184 vshortage = -ctx.vbudget;
1185 expires = now->now_ns +
1186 DIV64_U64_ROUND_UP(vshortage, now->vrate) * NSEC_PER_USEC;
1187 expires += margin_ns / 4;
1188
1189 /* if already active and close enough, don't bother */
1190 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1191 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1192 abs(oexpires - expires) <= margin_ns / 4)
1193 return;
1194
1195 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1196 margin_ns / 4, HRTIMER_MODE_ABS);
1197 }
1198
1199 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1200 {
1201 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1202 struct ioc_now now;
1203 unsigned long flags;
1204
1205 ioc_now(iocg->ioc, &now);
1206
1207 spin_lock_irqsave(&iocg->waitq.lock, flags);
1208 iocg_kick_waitq(iocg, &now);
1209 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1210
1211 return HRTIMER_NORESTART;
1212 }
1213
1214 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1215 {
1216 struct ioc *ioc = iocg->ioc;
1217 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1218 u64 vtime = atomic64_read(&iocg->vtime);
1219 u64 vmargin = ioc->margin_us * now->vrate;
1220 u64 margin_ns = ioc->margin_us * NSEC_PER_USEC;
1221 u64 delta_ns, expires, oexpires;
1222 u32 hw_inuse;
1223
1224 lockdep_assert_held(&iocg->waitq.lock);
1225
1226 /* debt-adjust vtime */
1227 current_hweight(iocg, NULL, &hw_inuse);
1228 vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
1229
1230 /*
1231 * Clear or maintain depending on the overage. Non-zero vdebt is what
1232 * guarantees that @iocg is online and future iocg_kick_delay() will
1233 * clear use_delay. Don't leave it on when there's no vdebt.
1234 */
1235 if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) {
1236 blkcg_clear_delay(blkg);
1237 return false;
1238 }
1239 if (!atomic_read(&blkg->use_delay) &&
1240 time_before_eq64(vtime, now->vnow + vmargin))
1241 return false;
1242
1243 /* use delay */
1244 delta_ns = DIV64_U64_ROUND_UP(vtime - now->vnow,
1245 now->vrate) * NSEC_PER_USEC;
1246 blkcg_set_delay(blkg, delta_ns);
1247 expires = now->now_ns + delta_ns;
1248
1249 /* if already active and close enough, don't bother */
1250 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
1251 if (hrtimer_is_queued(&iocg->delay_timer) &&
1252 abs(oexpires - expires) <= margin_ns / 4)
1253 return true;
1254
1255 hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
1256 margin_ns / 4, HRTIMER_MODE_ABS);
1257 return true;
1258 }
1259
1260 static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
1261 {
1262 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
1263 struct ioc_now now;
1264 unsigned long flags;
1265
1266 spin_lock_irqsave(&iocg->waitq.lock, flags);
1267 ioc_now(iocg->ioc, &now);
1268 iocg_kick_delay(iocg, &now);
1269 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1270
1271 return HRTIMER_NORESTART;
1272 }
1273
1274 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1275 {
1276 u32 nr_met[2] = { };
1277 u32 nr_missed[2] = { };
1278 u64 rq_wait_ns = 0;
1279 int cpu, rw;
1280
1281 for_each_online_cpu(cpu) {
1282 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1283 u64 this_rq_wait_ns;
1284
1285 for (rw = READ; rw <= WRITE; rw++) {
1286 u32 this_met = local_read(&stat->missed[rw].nr_met);
1287 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1288
1289 nr_met[rw] += this_met - stat->missed[rw].last_met;
1290 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1291 stat->missed[rw].last_met = this_met;
1292 stat->missed[rw].last_missed = this_missed;
1293 }
1294
1295 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1296 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1297 stat->last_rq_wait_ns = this_rq_wait_ns;
1298 }
1299
1300 for (rw = READ; rw <= WRITE; rw++) {
1301 if (nr_met[rw] + nr_missed[rw])
1302 missed_ppm_ar[rw] =
1303 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1304 nr_met[rw] + nr_missed[rw]);
1305 else
1306 missed_ppm_ar[rw] = 0;
1307 }
1308
1309 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1310 ioc->period_us * NSEC_PER_USEC);
1311 }
1312
1313 /* was iocg idle this period? */
1314 static bool iocg_is_idle(struct ioc_gq *iocg)
1315 {
1316 struct ioc *ioc = iocg->ioc;
1317
1318 /* did something get issued this period? */
1319 if (atomic64_read(&iocg->active_period) ==
1320 atomic64_read(&ioc->cur_period))
1321 return false;
1322
1323 /* is something in flight? */
1324 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1325 return false;
1326
1327 return true;
1328 }
1329
1330 /* returns usage with margin added if surplus is large enough */
1331 static u32 surplus_adjusted_hweight_inuse(u32 usage, u32 hw_inuse)
1332 {
1333 /* add margin */
1334 usage = DIV_ROUND_UP(usage * SURPLUS_SCALE_PCT, 100);
1335 usage += SURPLUS_SCALE_ABS;
1336
1337 /* don't bother if the surplus is too small */
1338 if (usage + SURPLUS_MIN_ADJ_DELTA > hw_inuse)
1339 return 0;
1340
1341 return usage;
1342 }
1343
1344 static void ioc_timer_fn(struct timer_list *timer)
1345 {
1346 struct ioc *ioc = container_of(timer, struct ioc, timer);
1347 struct ioc_gq *iocg, *tiocg;
1348 struct ioc_now now;
1349 int nr_surpluses = 0, nr_shortages = 0, nr_lagging = 0;
1350 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
1351 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
1352 u32 missed_ppm[2], rq_wait_pct;
1353 u64 period_vtime;
1354 int prev_busy_level, i;
1355
1356 /* how were the latencies during the period? */
1357 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
1358
1359 /* take care of active iocgs */
1360 spin_lock_irq(&ioc->lock);
1361
1362 ioc_now(ioc, &now);
1363
1364 period_vtime = now.vnow - ioc->period_at_vtime;
1365 if (WARN_ON_ONCE(!period_vtime)) {
1366 spin_unlock_irq(&ioc->lock);
1367 return;
1368 }
1369
1370 /*
1371 * Waiters determine the sleep durations based on the vrate they
1372 * saw at the time of sleep. If vrate has increased, some waiters
1373 * could be sleeping for too long. Wake up tardy waiters which
1374 * should have woken up in the last period and expire idle iocgs.
1375 */
1376 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
1377 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
1378 !iocg_is_idle(iocg))
1379 continue;
1380
1381 spin_lock(&iocg->waitq.lock);
1382
1383 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) {
1384 /* might be oversleeping vtime / hweight changes, kick */
1385 iocg_kick_waitq(iocg, &now);
1386 iocg_kick_delay(iocg, &now);
1387 } else if (iocg_is_idle(iocg)) {
1388 /* no waiter and idle, deactivate */
1389 iocg->last_inuse = iocg->inuse;
1390 __propagate_weights(iocg, 0, 0);
1391 list_del_init(&iocg->active_list);
1392 }
1393
1394 spin_unlock(&iocg->waitq.lock);
1395 }
1396 commit_weights(ioc);
1397
1398 /* calc usages and see whether some weights need to be moved around */
1399 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
1400 u64 vdone, vtime, vusage, vmargin, vmin;
1401 u32 hw_active, hw_inuse, usage;
1402
1403 /*
1404 * Collect unused and wind vtime closer to vnow to prevent
1405 * iocgs from accumulating a large amount of budget.
1406 */
1407 vdone = atomic64_read(&iocg->done_vtime);
1408 vtime = atomic64_read(&iocg->vtime);
1409 current_hweight(iocg, &hw_active, &hw_inuse);
1410
1411 /*
1412 * Latency QoS detection doesn't account for IOs which are
1413 * in-flight for longer than a period. Detect them by
1414 * comparing vdone against period start. If lagging behind
1415 * IOs from past periods, don't increase vrate.
1416 */
1417 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
1418 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
1419 time_after64(vtime, vdone) &&
1420 time_after64(vtime, now.vnow -
1421 MAX_LAGGING_PERIODS * period_vtime) &&
1422 time_before64(vdone, now.vnow - period_vtime))
1423 nr_lagging++;
1424
1425 if (waitqueue_active(&iocg->waitq))
1426 vusage = now.vnow - iocg->last_vtime;
1427 else if (time_before64(iocg->last_vtime, vtime))
1428 vusage = vtime - iocg->last_vtime;
1429 else
1430 vusage = 0;
1431
1432 iocg->last_vtime += vusage;
1433 /*
1434 * Factor in in-flight vtime into vusage to avoid
1435 * high-latency completions appearing as idle. This should
1436 * be done after the above ->last_time adjustment.
1437 */
1438 vusage = max(vusage, vtime - vdone);
1439
1440 /* calculate hweight based usage ratio and record */
1441 if (vusage) {
1442 usage = DIV64_U64_ROUND_UP(vusage * hw_inuse,
1443 period_vtime);
1444 iocg->usage_idx = (iocg->usage_idx + 1) % NR_USAGE_SLOTS;
1445 iocg->usages[iocg->usage_idx] = usage;
1446 } else {
1447 usage = 0;
1448 }
1449
1450 /* see whether there's surplus vtime */
1451 vmargin = ioc->margin_us * now.vrate;
1452 vmin = now.vnow - vmargin;
1453
1454 iocg->has_surplus = false;
1455
1456 if (!waitqueue_active(&iocg->waitq) &&
1457 time_before64(vtime, vmin)) {
1458 u64 delta = vmin - vtime;
1459
1460 /* throw away surplus vtime */
1461 atomic64_add(delta, &iocg->vtime);
1462 atomic64_add(delta, &iocg->done_vtime);
1463 iocg->last_vtime += delta;
1464 /* if usage is sufficiently low, maybe it can donate */
1465 if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) {
1466 iocg->has_surplus = true;
1467 nr_surpluses++;
1468 }
1469 } else if (hw_inuse < hw_active) {
1470 u32 new_hwi, new_inuse;
1471
1472 /* was donating but might need to take back some */
1473 if (waitqueue_active(&iocg->waitq)) {
1474 new_hwi = hw_active;
1475 } else {
1476 new_hwi = max(hw_inuse,
1477 usage * SURPLUS_SCALE_PCT / 100 +
1478 SURPLUS_SCALE_ABS);
1479 }
1480
1481 new_inuse = div64_u64((u64)iocg->inuse * new_hwi,
1482 hw_inuse);
1483 new_inuse = clamp_t(u32, new_inuse, 1, iocg->active);
1484
1485 if (new_inuse > iocg->inuse) {
1486 TRACE_IOCG_PATH(inuse_takeback, iocg, &now,
1487 iocg->inuse, new_inuse,
1488 hw_inuse, new_hwi);
1489 __propagate_weights(iocg, iocg->weight,
1490 new_inuse);
1491 }
1492 } else {
1493 /* genuninely out of vtime */
1494 nr_shortages++;
1495 }
1496 }
1497
1498 if (!nr_shortages || !nr_surpluses)
1499 goto skip_surplus_transfers;
1500
1501 /* there are both shortages and surpluses, transfer surpluses */
1502 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
1503 u32 usage, hw_active, hw_inuse, new_hwi, new_inuse;
1504 int nr_valid = 0;
1505
1506 if (!iocg->has_surplus)
1507 continue;
1508
1509 /* base the decision on max historical usage */
1510 for (i = 0, usage = 0; i < NR_USAGE_SLOTS; i++) {
1511 if (iocg->usages[i]) {
1512 usage = max(usage, iocg->usages[i]);
1513 nr_valid++;
1514 }
1515 }
1516 if (nr_valid < MIN_VALID_USAGES)
1517 continue;
1518
1519 current_hweight(iocg, &hw_active, &hw_inuse);
1520 new_hwi = surplus_adjusted_hweight_inuse(usage, hw_inuse);
1521 if (!new_hwi)
1522 continue;
1523
1524 new_inuse = DIV64_U64_ROUND_UP((u64)iocg->inuse * new_hwi,
1525 hw_inuse);
1526 if (new_inuse < iocg->inuse) {
1527 TRACE_IOCG_PATH(inuse_giveaway, iocg, &now,
1528 iocg->inuse, new_inuse,
1529 hw_inuse, new_hwi);
1530 __propagate_weights(iocg, iocg->weight, new_inuse);
1531 }
1532 }
1533 skip_surplus_transfers:
1534 commit_weights(ioc);
1535
1536 /*
1537 * If q is getting clogged or we're missing too much, we're issuing
1538 * too much IO and should lower vtime rate. If we're not missing
1539 * and experiencing shortages but not surpluses, we're too stingy
1540 * and should increase vtime rate.
1541 */
1542 prev_busy_level = ioc->busy_level;
1543 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
1544 missed_ppm[READ] > ppm_rthr ||
1545 missed_ppm[WRITE] > ppm_wthr) {
1546 /* clearly missing QoS targets, slow down vrate */
1547 ioc->busy_level = max(ioc->busy_level, 0);
1548 ioc->busy_level++;
1549 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
1550 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
1551 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
1552 /* QoS targets are being met with >25% margin */
1553 if (nr_shortages) {
1554 /*
1555 * We're throttling while the device has spare
1556 * capacity. If vrate was being slowed down, stop.
1557 */
1558 ioc->busy_level = min(ioc->busy_level, 0);
1559
1560 /*
1561 * If there are IOs spanning multiple periods, wait
1562 * them out before pushing the device harder. If
1563 * there are surpluses, let redistribution work it
1564 * out first.
1565 */
1566 if (!nr_lagging && !nr_surpluses)
1567 ioc->busy_level--;
1568 } else {
1569 /*
1570 * Nobody is being throttled and the users aren't
1571 * issuing enough IOs to saturate the device. We
1572 * simply don't know how close the device is to
1573 * saturation. Coast.
1574 */
1575 ioc->busy_level = 0;
1576 }
1577 } else {
1578 /* inside the hysterisis margin, we're good */
1579 ioc->busy_level = 0;
1580 }
1581
1582 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
1583
1584 if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
1585 u64 vrate = atomic64_read(&ioc->vtime_rate);
1586 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
1587
1588 /* rq_wait signal is always reliable, ignore user vrate_min */
1589 if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
1590 vrate_min = VRATE_MIN;
1591
1592 /*
1593 * If vrate is out of bounds, apply clamp gradually as the
1594 * bounds can change abruptly. Otherwise, apply busy_level
1595 * based adjustment.
1596 */
1597 if (vrate < vrate_min) {
1598 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
1599 100);
1600 vrate = min(vrate, vrate_min);
1601 } else if (vrate > vrate_max) {
1602 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
1603 100);
1604 vrate = max(vrate, vrate_max);
1605 } else {
1606 int idx = min_t(int, abs(ioc->busy_level),
1607 ARRAY_SIZE(vrate_adj_pct) - 1);
1608 u32 adj_pct = vrate_adj_pct[idx];
1609
1610 if (ioc->busy_level > 0)
1611 adj_pct = 100 - adj_pct;
1612 else
1613 adj_pct = 100 + adj_pct;
1614
1615 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1616 vrate_min, vrate_max);
1617 }
1618
1619 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1620 nr_lagging, nr_shortages,
1621 nr_surpluses);
1622
1623 atomic64_set(&ioc->vtime_rate, vrate);
1624 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
1625 ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
1626 } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
1627 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
1628 missed_ppm, rq_wait_pct, nr_lagging,
1629 nr_shortages, nr_surpluses);
1630 }
1631
1632 ioc_refresh_params(ioc, false);
1633
1634 /*
1635 * This period is done. Move onto the next one. If nothing's
1636 * going on with the device, stop the timer.
1637 */
1638 atomic64_inc(&ioc->cur_period);
1639
1640 if (ioc->running != IOC_STOP) {
1641 if (!list_empty(&ioc->active_iocgs)) {
1642 ioc_start_period(ioc, &now);
1643 } else {
1644 ioc->busy_level = 0;
1645 ioc->running = IOC_IDLE;
1646 }
1647 }
1648
1649 spin_unlock_irq(&ioc->lock);
1650 }
1651
1652 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
1653 bool is_merge, u64 *costp)
1654 {
1655 struct ioc *ioc = iocg->ioc;
1656 u64 coef_seqio, coef_randio, coef_page;
1657 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
1658 u64 seek_pages = 0;
1659 u64 cost = 0;
1660
1661 switch (bio_op(bio)) {
1662 case REQ_OP_READ:
1663 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
1664 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
1665 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
1666 break;
1667 case REQ_OP_WRITE:
1668 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
1669 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
1670 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
1671 break;
1672 default:
1673 goto out;
1674 }
1675
1676 if (iocg->cursor) {
1677 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
1678 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
1679 }
1680
1681 if (!is_merge) {
1682 if (seek_pages > LCOEF_RANDIO_PAGES) {
1683 cost += coef_randio;
1684 } else {
1685 cost += coef_seqio;
1686 }
1687 }
1688 cost += pages * coef_page;
1689 out:
1690 *costp = cost;
1691 }
1692
1693 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
1694 {
1695 u64 cost;
1696
1697 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
1698 return cost;
1699 }
1700
1701 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
1702 u64 *costp)
1703 {
1704 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
1705
1706 switch (req_op(rq)) {
1707 case REQ_OP_READ:
1708 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
1709 break;
1710 case REQ_OP_WRITE:
1711 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
1712 break;
1713 default:
1714 *costp = 0;
1715 }
1716 }
1717
1718 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
1719 {
1720 u64 cost;
1721
1722 calc_size_vtime_cost_builtin(rq, ioc, &cost);
1723 return cost;
1724 }
1725
1726 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
1727 {
1728 struct blkcg_gq *blkg = bio->bi_blkg;
1729 struct ioc *ioc = rqos_to_ioc(rqos);
1730 struct ioc_gq *iocg = blkg_to_iocg(blkg);
1731 struct ioc_now now;
1732 struct iocg_wait wait;
1733 u32 hw_active, hw_inuse;
1734 u64 abs_cost, cost, vtime;
1735
1736 /* bypass IOs if disabled or for root cgroup */
1737 if (!ioc->enabled || !iocg->level)
1738 return;
1739
1740 /* always activate so that even 0 cost IOs get protected to some level */
1741 if (!iocg_activate(iocg, &now))
1742 return;
1743
1744 /* calculate the absolute vtime cost */
1745 abs_cost = calc_vtime_cost(bio, iocg, false);
1746 if (!abs_cost)
1747 return;
1748
1749 iocg->cursor = bio_end_sector(bio);
1750
1751 vtime = atomic64_read(&iocg->vtime);
1752 current_hweight(iocg, &hw_active, &hw_inuse);
1753
1754 if (hw_inuse < hw_active &&
1755 time_after_eq64(vtime + ioc->inuse_margin_vtime, now.vnow)) {
1756 TRACE_IOCG_PATH(inuse_reset, iocg, &now,
1757 iocg->inuse, iocg->weight, hw_inuse, hw_active);
1758 spin_lock_irq(&ioc->lock);
1759 propagate_weights(iocg, iocg->weight, iocg->weight);
1760 spin_unlock_irq(&ioc->lock);
1761 current_hweight(iocg, &hw_active, &hw_inuse);
1762 }
1763
1764 cost = abs_cost_to_cost(abs_cost, hw_inuse);
1765
1766 /*
1767 * If no one's waiting and within budget, issue right away. The
1768 * tests are racy but the races aren't systemic - we only miss once
1769 * in a while which is fine.
1770 */
1771 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
1772 time_before_eq64(vtime + cost, now.vnow)) {
1773 iocg_commit_bio(iocg, bio, cost);
1774 return;
1775 }
1776
1777 /*
1778 * We activated above but w/o any synchronization. Deactivation is
1779 * synchronized with waitq.lock and we won't get deactivated as long
1780 * as we're waiting or has debt, so we're good if we're activated
1781 * here. In the unlikely case that we aren't, just issue the IO.
1782 */
1783 spin_lock_irq(&iocg->waitq.lock);
1784
1785 if (unlikely(list_empty(&iocg->active_list))) {
1786 spin_unlock_irq(&iocg->waitq.lock);
1787 iocg_commit_bio(iocg, bio, cost);
1788 return;
1789 }
1790
1791 /*
1792 * We're over budget. If @bio has to be issued regardless, remember
1793 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
1794 * off the debt before waking more IOs.
1795 *
1796 * This way, the debt is continuously paid off each period with the
1797 * actual budget available to the cgroup. If we just wound vtime, we
1798 * would incorrectly use the current hw_inuse for the entire amount
1799 * which, for example, can lead to the cgroup staying blocked for a
1800 * long time even with substantially raised hw_inuse.
1801 *
1802 * An iocg with vdebt should stay online so that the timer can keep
1803 * deducting its vdebt and [de]activate use_delay mechanism
1804 * accordingly. We don't want to race against the timer trying to
1805 * clear them and leave @iocg inactive w/ dangling use_delay heavily
1806 * penalizing the cgroup and its descendants.
1807 */
1808 if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
1809 iocg->abs_vdebt += abs_cost;
1810 if (iocg_kick_delay(iocg, &now))
1811 blkcg_schedule_throttle(rqos->q,
1812 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
1813 spin_unlock_irq(&iocg->waitq.lock);
1814 return;
1815 }
1816
1817 /*
1818 * Append self to the waitq and schedule the wakeup timer if we're
1819 * the first waiter. The timer duration is calculated based on the
1820 * current vrate. vtime and hweight changes can make it too short
1821 * or too long. Each wait entry records the absolute cost it's
1822 * waiting for to allow re-evaluation using a custom wait entry.
1823 *
1824 * If too short, the timer simply reschedules itself. If too long,
1825 * the period timer will notice and trigger wakeups.
1826 *
1827 * All waiters are on iocg->waitq and the wait states are
1828 * synchronized using waitq.lock.
1829 */
1830 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
1831 wait.wait.private = current;
1832 wait.bio = bio;
1833 wait.abs_cost = abs_cost;
1834 wait.committed = false; /* will be set true by waker */
1835
1836 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
1837 iocg_kick_waitq(iocg, &now);
1838
1839 spin_unlock_irq(&iocg->waitq.lock);
1840
1841 while (true) {
1842 set_current_state(TASK_UNINTERRUPTIBLE);
1843 if (wait.committed)
1844 break;
1845 io_schedule();
1846 }
1847
1848 /* waker already committed us, proceed */
1849 finish_wait(&iocg->waitq, &wait.wait);
1850 }
1851
1852 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
1853 struct bio *bio)
1854 {
1855 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
1856 struct ioc *ioc = iocg->ioc;
1857 sector_t bio_end = bio_end_sector(bio);
1858 struct ioc_now now;
1859 u32 hw_inuse;
1860 u64 abs_cost, cost;
1861 unsigned long flags;
1862
1863 /* bypass if disabled or for root cgroup */
1864 if (!ioc->enabled || !iocg->level)
1865 return;
1866
1867 abs_cost = calc_vtime_cost(bio, iocg, true);
1868 if (!abs_cost)
1869 return;
1870
1871 ioc_now(ioc, &now);
1872 current_hweight(iocg, NULL, &hw_inuse);
1873 cost = abs_cost_to_cost(abs_cost, hw_inuse);
1874
1875 /* update cursor if backmerging into the request at the cursor */
1876 if (blk_rq_pos(rq) < bio_end &&
1877 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
1878 iocg->cursor = bio_end;
1879
1880 /*
1881 * Charge if there's enough vtime budget and the existing request has
1882 * cost assigned.
1883 */
1884 if (rq->bio && rq->bio->bi_iocost_cost &&
1885 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
1886 iocg_commit_bio(iocg, bio, cost);
1887 return;
1888 }
1889
1890 /*
1891 * Otherwise, account it as debt if @iocg is online, which it should
1892 * be for the vast majority of cases. See debt handling in
1893 * ioc_rqos_throttle() for details.
1894 */
1895 spin_lock_irqsave(&iocg->waitq.lock, flags);
1896 if (likely(!list_empty(&iocg->active_list))) {
1897 iocg->abs_vdebt += abs_cost;
1898 iocg_kick_delay(iocg, &now);
1899 } else {
1900 iocg_commit_bio(iocg, bio, cost);
1901 }
1902 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1903 }
1904
1905 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
1906 {
1907 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
1908
1909 if (iocg && bio->bi_iocost_cost)
1910 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
1911 }
1912
1913 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
1914 {
1915 struct ioc *ioc = rqos_to_ioc(rqos);
1916 struct ioc_pcpu_stat *ccs;
1917 u64 on_q_ns, rq_wait_ns, size_nsec;
1918 int pidx, rw;
1919
1920 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
1921 return;
1922
1923 switch (req_op(rq) & REQ_OP_MASK) {
1924 case REQ_OP_READ:
1925 pidx = QOS_RLAT;
1926 rw = READ;
1927 break;
1928 case REQ_OP_WRITE:
1929 pidx = QOS_WLAT;
1930 rw = WRITE;
1931 break;
1932 default:
1933 return;
1934 }
1935
1936 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
1937 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
1938 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
1939
1940 ccs = get_cpu_ptr(ioc->pcpu_stat);
1941
1942 if (on_q_ns <= size_nsec ||
1943 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
1944 local_inc(&ccs->missed[rw].nr_met);
1945 else
1946 local_inc(&ccs->missed[rw].nr_missed);
1947
1948 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
1949
1950 put_cpu_ptr(ccs);
1951 }
1952
1953 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
1954 {
1955 struct ioc *ioc = rqos_to_ioc(rqos);
1956
1957 spin_lock_irq(&ioc->lock);
1958 ioc_refresh_params(ioc, false);
1959 spin_unlock_irq(&ioc->lock);
1960 }
1961
1962 static void ioc_rqos_exit(struct rq_qos *rqos)
1963 {
1964 struct ioc *ioc = rqos_to_ioc(rqos);
1965
1966 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
1967
1968 spin_lock_irq(&ioc->lock);
1969 ioc->running = IOC_STOP;
1970 spin_unlock_irq(&ioc->lock);
1971
1972 del_timer_sync(&ioc->timer);
1973 free_percpu(ioc->pcpu_stat);
1974 kfree(ioc);
1975 }
1976
1977 static struct rq_qos_ops ioc_rqos_ops = {
1978 .throttle = ioc_rqos_throttle,
1979 .merge = ioc_rqos_merge,
1980 .done_bio = ioc_rqos_done_bio,
1981 .done = ioc_rqos_done,
1982 .queue_depth_changed = ioc_rqos_queue_depth_changed,
1983 .exit = ioc_rqos_exit,
1984 };
1985
1986 static int blk_iocost_init(struct request_queue *q)
1987 {
1988 struct ioc *ioc;
1989 struct rq_qos *rqos;
1990 int i, cpu, ret;
1991
1992 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1993 if (!ioc)
1994 return -ENOMEM;
1995
1996 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
1997 if (!ioc->pcpu_stat) {
1998 kfree(ioc);
1999 return -ENOMEM;
2000 }
2001
2002 for_each_possible_cpu(cpu) {
2003 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2004
2005 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2006 local_set(&ccs->missed[i].nr_met, 0);
2007 local_set(&ccs->missed[i].nr_missed, 0);
2008 }
2009 local64_set(&ccs->rq_wait_ns, 0);
2010 }
2011
2012 rqos = &ioc->rqos;
2013 rqos->id = RQ_QOS_COST;
2014 rqos->ops = &ioc_rqos_ops;
2015 rqos->q = q;
2016
2017 spin_lock_init(&ioc->lock);
2018 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2019 INIT_LIST_HEAD(&ioc->active_iocgs);
2020
2021 ioc->running = IOC_IDLE;
2022 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2023 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2024 ioc->period_at = ktime_to_us(ktime_get());
2025 atomic64_set(&ioc->cur_period, 0);
2026 atomic_set(&ioc->hweight_gen, 0);
2027
2028 spin_lock_irq(&ioc->lock);
2029 ioc->autop_idx = AUTOP_INVALID;
2030 ioc_refresh_params(ioc, true);
2031 spin_unlock_irq(&ioc->lock);
2032
2033 rq_qos_add(q, rqos);
2034 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2035 if (ret) {
2036 rq_qos_del(q, rqos);
2037 free_percpu(ioc->pcpu_stat);
2038 kfree(ioc);
2039 return ret;
2040 }
2041 return 0;
2042 }
2043
2044 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2045 {
2046 struct ioc_cgrp *iocc;
2047
2048 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2049 if (!iocc)
2050 return NULL;
2051
2052 iocc->dfl_weight = CGROUP_WEIGHT_DFL;
2053 return &iocc->cpd;
2054 }
2055
2056 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2057 {
2058 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2059 }
2060
2061 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2062 struct blkcg *blkcg)
2063 {
2064 int levels = blkcg->css.cgroup->level + 1;
2065 struct ioc_gq *iocg;
2066
2067 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2068 if (!iocg)
2069 return NULL;
2070
2071 return &iocg->pd;
2072 }
2073
2074 static void ioc_pd_init(struct blkg_policy_data *pd)
2075 {
2076 struct ioc_gq *iocg = pd_to_iocg(pd);
2077 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2078 struct ioc *ioc = q_to_ioc(blkg->q);
2079 struct ioc_now now;
2080 struct blkcg_gq *tblkg;
2081 unsigned long flags;
2082
2083 ioc_now(ioc, &now);
2084
2085 iocg->ioc = ioc;
2086 atomic64_set(&iocg->vtime, now.vnow);
2087 atomic64_set(&iocg->done_vtime, now.vnow);
2088 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2089 INIT_LIST_HEAD(&iocg->active_list);
2090 iocg->hweight_active = HWEIGHT_WHOLE;
2091 iocg->hweight_inuse = HWEIGHT_WHOLE;
2092
2093 init_waitqueue_head(&iocg->waitq);
2094 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2095 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2096 hrtimer_init(&iocg->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2097 iocg->delay_timer.function = iocg_delay_timer_fn;
2098
2099 iocg->level = blkg->blkcg->css.cgroup->level;
2100
2101 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2102 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2103 iocg->ancestors[tiocg->level] = tiocg;
2104 }
2105
2106 spin_lock_irqsave(&ioc->lock, flags);
2107 weight_updated(iocg);
2108 spin_unlock_irqrestore(&ioc->lock, flags);
2109 }
2110
2111 static void ioc_pd_free(struct blkg_policy_data *pd)
2112 {
2113 struct ioc_gq *iocg = pd_to_iocg(pd);
2114 struct ioc *ioc = iocg->ioc;
2115 unsigned long flags;
2116
2117 if (ioc) {
2118 spin_lock_irqsave(&ioc->lock, flags);
2119 if (!list_empty(&iocg->active_list)) {
2120 propagate_weights(iocg, 0, 0);
2121 list_del_init(&iocg->active_list);
2122 }
2123 spin_unlock_irqrestore(&ioc->lock, flags);
2124
2125 hrtimer_cancel(&iocg->waitq_timer);
2126 hrtimer_cancel(&iocg->delay_timer);
2127 }
2128 kfree(iocg);
2129 }
2130
2131 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2132 int off)
2133 {
2134 const char *dname = blkg_dev_name(pd->blkg);
2135 struct ioc_gq *iocg = pd_to_iocg(pd);
2136
2137 if (dname && iocg->cfg_weight)
2138 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight);
2139 return 0;
2140 }
2141
2142
2143 static int ioc_weight_show(struct seq_file *sf, void *v)
2144 {
2145 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2146 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2147
2148 seq_printf(sf, "default %u\n", iocc->dfl_weight);
2149 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
2150 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2151 return 0;
2152 }
2153
2154 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
2155 size_t nbytes, loff_t off)
2156 {
2157 struct blkcg *blkcg = css_to_blkcg(of_css(of));
2158 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2159 struct blkg_conf_ctx ctx;
2160 struct ioc_gq *iocg;
2161 u32 v;
2162 int ret;
2163
2164 if (!strchr(buf, ':')) {
2165 struct blkcg_gq *blkg;
2166
2167 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
2168 return -EINVAL;
2169
2170 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2171 return -EINVAL;
2172
2173 spin_lock(&blkcg->lock);
2174 iocc->dfl_weight = v;
2175 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
2176 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2177
2178 if (iocg) {
2179 spin_lock_irq(&iocg->ioc->lock);
2180 weight_updated(iocg);
2181 spin_unlock_irq(&iocg->ioc->lock);
2182 }
2183 }
2184 spin_unlock(&blkcg->lock);
2185
2186 return nbytes;
2187 }
2188
2189 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
2190 if (ret)
2191 return ret;
2192
2193 iocg = blkg_to_iocg(ctx.blkg);
2194
2195 if (!strncmp(ctx.body, "default", 7)) {
2196 v = 0;
2197 } else {
2198 if (!sscanf(ctx.body, "%u", &v))
2199 goto einval;
2200 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2201 goto einval;
2202 }
2203
2204 spin_lock(&iocg->ioc->lock);
2205 iocg->cfg_weight = v;
2206 weight_updated(iocg);
2207 spin_unlock(&iocg->ioc->lock);
2208
2209 blkg_conf_finish(&ctx);
2210 return nbytes;
2211
2212 einval:
2213 blkg_conf_finish(&ctx);
2214 return -EINVAL;
2215 }
2216
2217 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2218 int off)
2219 {
2220 const char *dname = blkg_dev_name(pd->blkg);
2221 struct ioc *ioc = pd_to_iocg(pd)->ioc;
2222
2223 if (!dname)
2224 return 0;
2225
2226 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
2227 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
2228 ioc->params.qos[QOS_RPPM] / 10000,
2229 ioc->params.qos[QOS_RPPM] % 10000 / 100,
2230 ioc->params.qos[QOS_RLAT],
2231 ioc->params.qos[QOS_WPPM] / 10000,
2232 ioc->params.qos[QOS_WPPM] % 10000 / 100,
2233 ioc->params.qos[QOS_WLAT],
2234 ioc->params.qos[QOS_MIN] / 10000,
2235 ioc->params.qos[QOS_MIN] % 10000 / 100,
2236 ioc->params.qos[QOS_MAX] / 10000,
2237 ioc->params.qos[QOS_MAX] % 10000 / 100);
2238 return 0;
2239 }
2240
2241 static int ioc_qos_show(struct seq_file *sf, void *v)
2242 {
2243 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2244
2245 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
2246 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2247 return 0;
2248 }
2249
2250 static const match_table_t qos_ctrl_tokens = {
2251 { QOS_ENABLE, "enable=%u" },
2252 { QOS_CTRL, "ctrl=%s" },
2253 { NR_QOS_CTRL_PARAMS, NULL },
2254 };
2255
2256 static const match_table_t qos_tokens = {
2257 { QOS_RPPM, "rpct=%s" },
2258 { QOS_RLAT, "rlat=%u" },
2259 { QOS_WPPM, "wpct=%s" },
2260 { QOS_WLAT, "wlat=%u" },
2261 { QOS_MIN, "min=%s" },
2262 { QOS_MAX, "max=%s" },
2263 { NR_QOS_PARAMS, NULL },
2264 };
2265
2266 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
2267 size_t nbytes, loff_t off)
2268 {
2269 struct gendisk *disk;
2270 struct ioc *ioc;
2271 u32 qos[NR_QOS_PARAMS];
2272 bool enable, user;
2273 char *p;
2274 int ret;
2275
2276 disk = blkcg_conf_get_disk(&input);
2277 if (IS_ERR(disk))
2278 return PTR_ERR(disk);
2279
2280 ioc = q_to_ioc(disk->queue);
2281 if (!ioc) {
2282 ret = blk_iocost_init(disk->queue);
2283 if (ret)
2284 goto err;
2285 ioc = q_to_ioc(disk->queue);
2286 }
2287
2288 spin_lock_irq(&ioc->lock);
2289 memcpy(qos, ioc->params.qos, sizeof(qos));
2290 enable = ioc->enabled;
2291 user = ioc->user_qos_params;
2292 spin_unlock_irq(&ioc->lock);
2293
2294 while ((p = strsep(&input, " \t\n"))) {
2295 substring_t args[MAX_OPT_ARGS];
2296 char buf[32];
2297 int tok;
2298 s64 v;
2299
2300 if (!*p)
2301 continue;
2302
2303 switch (match_token(p, qos_ctrl_tokens, args)) {
2304 case QOS_ENABLE:
2305 match_u64(&args[0], &v);
2306 enable = v;
2307 continue;
2308 case QOS_CTRL:
2309 match_strlcpy(buf, &args[0], sizeof(buf));
2310 if (!strcmp(buf, "auto"))
2311 user = false;
2312 else if (!strcmp(buf, "user"))
2313 user = true;
2314 else
2315 goto einval;
2316 continue;
2317 }
2318
2319 tok = match_token(p, qos_tokens, args);
2320 switch (tok) {
2321 case QOS_RPPM:
2322 case QOS_WPPM:
2323 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2324 sizeof(buf))
2325 goto einval;
2326 if (cgroup_parse_float(buf, 2, &v))
2327 goto einval;
2328 if (v < 0 || v > 10000)
2329 goto einval;
2330 qos[tok] = v * 100;
2331 break;
2332 case QOS_RLAT:
2333 case QOS_WLAT:
2334 if (match_u64(&args[0], &v))
2335 goto einval;
2336 qos[tok] = v;
2337 break;
2338 case QOS_MIN:
2339 case QOS_MAX:
2340 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2341 sizeof(buf))
2342 goto einval;
2343 if (cgroup_parse_float(buf, 2, &v))
2344 goto einval;
2345 if (v < 0)
2346 goto einval;
2347 qos[tok] = clamp_t(s64, v * 100,
2348 VRATE_MIN_PPM, VRATE_MAX_PPM);
2349 break;
2350 default:
2351 goto einval;
2352 }
2353 user = true;
2354 }
2355
2356 if (qos[QOS_MIN] > qos[QOS_MAX])
2357 goto einval;
2358
2359 spin_lock_irq(&ioc->lock);
2360
2361 if (enable) {
2362 blk_stat_enable_accounting(ioc->rqos.q);
2363 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
2364 ioc->enabled = true;
2365 } else {
2366 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
2367 ioc->enabled = false;
2368 }
2369
2370 if (user) {
2371 memcpy(ioc->params.qos, qos, sizeof(qos));
2372 ioc->user_qos_params = true;
2373 } else {
2374 ioc->user_qos_params = false;
2375 }
2376
2377 ioc_refresh_params(ioc, true);
2378 spin_unlock_irq(&ioc->lock);
2379
2380 put_disk_and_module(disk);
2381 return nbytes;
2382 einval:
2383 ret = -EINVAL;
2384 err:
2385 put_disk_and_module(disk);
2386 return ret;
2387 }
2388
2389 static u64 ioc_cost_model_prfill(struct seq_file *sf,
2390 struct blkg_policy_data *pd, int off)
2391 {
2392 const char *dname = blkg_dev_name(pd->blkg);
2393 struct ioc *ioc = pd_to_iocg(pd)->ioc;
2394 u64 *u = ioc->params.i_lcoefs;
2395
2396 if (!dname)
2397 return 0;
2398
2399 seq_printf(sf, "%s ctrl=%s model=linear "
2400 "rbps=%llu rseqiops=%llu rrandiops=%llu "
2401 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
2402 dname, ioc->user_cost_model ? "user" : "auto",
2403 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
2404 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
2405 return 0;
2406 }
2407
2408 static int ioc_cost_model_show(struct seq_file *sf, void *v)
2409 {
2410 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2411
2412 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
2413 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2414 return 0;
2415 }
2416
2417 static const match_table_t cost_ctrl_tokens = {
2418 { COST_CTRL, "ctrl=%s" },
2419 { COST_MODEL, "model=%s" },
2420 { NR_COST_CTRL_PARAMS, NULL },
2421 };
2422
2423 static const match_table_t i_lcoef_tokens = {
2424 { I_LCOEF_RBPS, "rbps=%u" },
2425 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
2426 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
2427 { I_LCOEF_WBPS, "wbps=%u" },
2428 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
2429 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
2430 { NR_I_LCOEFS, NULL },
2431 };
2432
2433 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
2434 size_t nbytes, loff_t off)
2435 {
2436 struct gendisk *disk;
2437 struct ioc *ioc;
2438 u64 u[NR_I_LCOEFS];
2439 bool user;
2440 char *p;
2441 int ret;
2442
2443 disk = blkcg_conf_get_disk(&input);
2444 if (IS_ERR(disk))
2445 return PTR_ERR(disk);
2446
2447 ioc = q_to_ioc(disk->queue);
2448 if (!ioc) {
2449 ret = blk_iocost_init(disk->queue);
2450 if (ret)
2451 goto err;
2452 ioc = q_to_ioc(disk->queue);
2453 }
2454
2455 spin_lock_irq(&ioc->lock);
2456 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
2457 user = ioc->user_cost_model;
2458 spin_unlock_irq(&ioc->lock);
2459
2460 while ((p = strsep(&input, " \t\n"))) {
2461 substring_t args[MAX_OPT_ARGS];
2462 char buf[32];
2463 int tok;
2464 u64 v;
2465
2466 if (!*p)
2467 continue;
2468
2469 switch (match_token(p, cost_ctrl_tokens, args)) {
2470 case COST_CTRL:
2471 match_strlcpy(buf, &args[0], sizeof(buf));
2472 if (!strcmp(buf, "auto"))
2473 user = false;
2474 else if (!strcmp(buf, "user"))
2475 user = true;
2476 else
2477 goto einval;
2478 continue;
2479 case COST_MODEL:
2480 match_strlcpy(buf, &args[0], sizeof(buf));
2481 if (strcmp(buf, "linear"))
2482 goto einval;
2483 continue;
2484 }
2485
2486 tok = match_token(p, i_lcoef_tokens, args);
2487 if (tok == NR_I_LCOEFS)
2488 goto einval;
2489 if (match_u64(&args[0], &v))
2490 goto einval;
2491 u[tok] = v;
2492 user = true;
2493 }
2494
2495 spin_lock_irq(&ioc->lock);
2496 if (user) {
2497 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
2498 ioc->user_cost_model = true;
2499 } else {
2500 ioc->user_cost_model = false;
2501 }
2502 ioc_refresh_params(ioc, true);
2503 spin_unlock_irq(&ioc->lock);
2504
2505 put_disk_and_module(disk);
2506 return nbytes;
2507
2508 einval:
2509 ret = -EINVAL;
2510 err:
2511 put_disk_and_module(disk);
2512 return ret;
2513 }
2514
2515 static struct cftype ioc_files[] = {
2516 {
2517 .name = "weight",
2518 .flags = CFTYPE_NOT_ON_ROOT,
2519 .seq_show = ioc_weight_show,
2520 .write = ioc_weight_write,
2521 },
2522 {
2523 .name = "cost.qos",
2524 .flags = CFTYPE_ONLY_ON_ROOT,
2525 .seq_show = ioc_qos_show,
2526 .write = ioc_qos_write,
2527 },
2528 {
2529 .name = "cost.model",
2530 .flags = CFTYPE_ONLY_ON_ROOT,
2531 .seq_show = ioc_cost_model_show,
2532 .write = ioc_cost_model_write,
2533 },
2534 {}
2535 };
2536
2537 static struct blkcg_policy blkcg_policy_iocost = {
2538 .dfl_cftypes = ioc_files,
2539 .cpd_alloc_fn = ioc_cpd_alloc,
2540 .cpd_free_fn = ioc_cpd_free,
2541 .pd_alloc_fn = ioc_pd_alloc,
2542 .pd_init_fn = ioc_pd_init,
2543 .pd_free_fn = ioc_pd_free,
2544 };
2545
2546 static int __init ioc_init(void)
2547 {
2548 return blkcg_policy_register(&blkcg_policy_iocost);
2549 }
2550
2551 static void __exit ioc_exit(void)
2552 {
2553 return blkcg_policy_unregister(&blkcg_policy_iocost);
2554 }
2555
2556 module_init(ioc_init);
2557 module_exit(ioc_exit);