]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-throttle.c
elevator: fix truncation of icq_cache_name
[mirror_ubuntu-bionic-kernel.git] / block / blk-throttle.c
CommitLineData
e43473b7
VG
1/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
eea8f41c 12#include <linux/blk-cgroup.h>
bc9fcbf9 13#include "blk.h"
e43473b7
VG
14
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
d61fcfa4
SL
21/* Throttling is performed over a slice and after that slice is renewed */
22#define DFL_THROTL_SLICE_HD (HZ / 10)
23#define DFL_THROTL_SLICE_SSD (HZ / 50)
297e3d85 24#define MAX_THROTL_SLICE (HZ)
9e234eea 25#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
9bb67aeb
SL
26#define MIN_THROTL_BPS (320 * 1024)
27#define MIN_THROTL_IOPS (10)
b4f428ef
SL
28#define DFL_LATENCY_TARGET (-1L)
29#define DFL_IDLE_THRESHOLD (0)
e43473b7 30
b9147dd1
SL
31#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
32
3c798398 33static struct blkcg_policy blkcg_policy_throtl;
0381411e 34
450adcbe
VG
35/* A workqueue to queue throttle related work */
36static struct workqueue_struct *kthrotld_workqueue;
450adcbe 37
c5cc2070
TH
38/*
39 * To implement hierarchical throttling, throtl_grps form a tree and bios
40 * are dispatched upwards level by level until they reach the top and get
41 * issued. When dispatching bios from the children and local group at each
42 * level, if the bios are dispatched into a single bio_list, there's a risk
43 * of a local or child group which can queue many bios at once filling up
44 * the list starving others.
45 *
46 * To avoid such starvation, dispatched bios are queued separately
47 * according to where they came from. When they are again dispatched to
48 * the parent, they're popped in round-robin order so that no single source
49 * hogs the dispatch window.
50 *
51 * throtl_qnode is used to keep the queued bios separated by their sources.
52 * Bios are queued to throtl_qnode which in turn is queued to
53 * throtl_service_queue and then dispatched in round-robin order.
54 *
55 * It's also used to track the reference counts on blkg's. A qnode always
56 * belongs to a throtl_grp and gets queued on itself or the parent, so
57 * incrementing the reference of the associated throtl_grp when a qnode is
58 * queued and decrementing when dequeued is enough to keep the whole blkg
59 * tree pinned while bios are in flight.
60 */
61struct throtl_qnode {
62 struct list_head node; /* service_queue->queued[] */
63 struct bio_list bios; /* queued bios */
64 struct throtl_grp *tg; /* tg this qnode belongs to */
65};
66
c9e0332e 67struct throtl_service_queue {
77216b04
TH
68 struct throtl_service_queue *parent_sq; /* the parent service_queue */
69
73f0d49a
TH
70 /*
71 * Bios queued directly to this service_queue or dispatched from
72 * children throtl_grp's.
73 */
c5cc2070 74 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
73f0d49a
TH
75 unsigned int nr_queued[2]; /* number of queued bios */
76
77 /*
78 * RB tree of active children throtl_grp's, which are sorted by
79 * their ->disptime.
80 */
c9e0332e
TH
81 struct rb_root pending_tree; /* RB tree of active tgs */
82 struct rb_node *first_pending; /* first node in the tree */
83 unsigned int nr_pending; /* # queued in the tree */
84 unsigned long first_pending_disptime; /* disptime of the first tg */
69df0ab0 85 struct timer_list pending_timer; /* fires on first_pending_disptime */
e43473b7
VG
86};
87
5b2c16aa
TH
88enum tg_state_flags {
89 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
0e9f4164 90 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
5b2c16aa
TH
91};
92
e43473b7
VG
93#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
94
9f626e37 95enum {
cd5ab1b0 96 LIMIT_LOW,
9f626e37
SL
97 LIMIT_MAX,
98 LIMIT_CNT,
99};
100
e43473b7 101struct throtl_grp {
f95a04af
TH
102 /* must be the first member */
103 struct blkg_policy_data pd;
104
c9e0332e 105 /* active throtl group service_queue member */
e43473b7
VG
106 struct rb_node rb_node;
107
0f3457f6
TH
108 /* throtl_data this group belongs to */
109 struct throtl_data *td;
110
49a2f1e3
TH
111 /* this group's service queue */
112 struct throtl_service_queue service_queue;
113
c5cc2070
TH
114 /*
115 * qnode_on_self is used when bios are directly queued to this
116 * throtl_grp so that local bios compete fairly with bios
117 * dispatched from children. qnode_on_parent is used when bios are
118 * dispatched from this throtl_grp into its parent and will compete
119 * with the sibling qnode_on_parents and the parent's
120 * qnode_on_self.
121 */
122 struct throtl_qnode qnode_on_self[2];
123 struct throtl_qnode qnode_on_parent[2];
124
e43473b7
VG
125 /*
126 * Dispatch time in jiffies. This is the estimated time when group
127 * will unthrottle and is ready to dispatch more bio. It is used as
128 * key to sort active groups in service tree.
129 */
130 unsigned long disptime;
131
e43473b7
VG
132 unsigned int flags;
133
693e751e
TH
134 /* are there any throtl rules between this group and td? */
135 bool has_rules[2];
136
cd5ab1b0 137 /* internally used bytes per second rate limits */
9f626e37 138 uint64_t bps[2][LIMIT_CNT];
cd5ab1b0
SL
139 /* user configured bps limits */
140 uint64_t bps_conf[2][LIMIT_CNT];
e43473b7 141
cd5ab1b0 142 /* internally used IOPS limits */
9f626e37 143 unsigned int iops[2][LIMIT_CNT];
cd5ab1b0
SL
144 /* user configured IOPS limits */
145 unsigned int iops_conf[2][LIMIT_CNT];
8e89d13f 146
e43473b7
VG
147 /* Number of bytes disptached in current slice */
148 uint64_t bytes_disp[2];
8e89d13f
VG
149 /* Number of bio's dispatched in current slice */
150 unsigned int io_disp[2];
e43473b7 151
3f0abd80
SL
152 unsigned long last_low_overflow_time[2];
153
154 uint64_t last_bytes_disp[2];
155 unsigned int last_io_disp[2];
156
157 unsigned long last_check_time;
158
ec80991d 159 unsigned long latency_target; /* us */
5b81fc3c 160 unsigned long latency_target_conf; /* us */
e43473b7
VG
161 /* When did we start a new slice */
162 unsigned long slice_start[2];
163 unsigned long slice_end[2];
9e234eea
SL
164
165 unsigned long last_finish_time; /* ns / 1024 */
166 unsigned long checked_last_finish_time; /* ns / 1024 */
167 unsigned long avg_idletime; /* ns / 1024 */
168 unsigned long idletime_threshold; /* us */
5b81fc3c 169 unsigned long idletime_threshold_conf; /* us */
53696b8d
SL
170
171 unsigned int bio_cnt; /* total bios */
172 unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
173 unsigned long bio_cnt_reset_time;
e43473b7
VG
174};
175
b9147dd1
SL
176/* We measure latency for request size from <= 4k to >= 1M */
177#define LATENCY_BUCKET_SIZE 9
178
179struct latency_bucket {
180 unsigned long total_latency; /* ns / 1024 */
181 int samples;
182};
183
184struct avg_latency_bucket {
185 unsigned long latency; /* ns / 1024 */
186 bool valid;
187};
188
e43473b7
VG
189struct throtl_data
190{
e43473b7 191 /* service tree for active throtl groups */
c9e0332e 192 struct throtl_service_queue service_queue;
e43473b7 193
e43473b7
VG
194 struct request_queue *queue;
195
196 /* Total Number of queued bios on READ and WRITE lists */
197 unsigned int nr_queued[2];
198
297e3d85
SL
199 unsigned int throtl_slice;
200
e43473b7 201 /* Work for dispatching throttled bios */
69df0ab0 202 struct work_struct dispatch_work;
9f626e37
SL
203 unsigned int limit_index;
204 bool limit_valid[LIMIT_CNT];
3f0abd80
SL
205
206 unsigned long low_upgrade_time;
207 unsigned long low_downgrade_time;
7394e31f
SL
208
209 unsigned int scale;
b9147dd1
SL
210
211 struct latency_bucket tmp_buckets[LATENCY_BUCKET_SIZE];
212 struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
213 struct latency_bucket __percpu *latency_buckets;
214 unsigned long last_calculate_time;
215
216 bool track_bio_latency;
e43473b7
VG
217};
218
69df0ab0
TH
219static void throtl_pending_timer_fn(unsigned long arg);
220
f95a04af
TH
221static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
222{
223 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
224}
225
3c798398 226static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
0381411e 227{
f95a04af 228 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
0381411e
TH
229}
230
3c798398 231static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
0381411e 232{
f95a04af 233 return pd_to_blkg(&tg->pd);
0381411e
TH
234}
235
fda6f272
TH
236/**
237 * sq_to_tg - return the throl_grp the specified service queue belongs to
238 * @sq: the throtl_service_queue of interest
239 *
240 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
241 * embedded in throtl_data, %NULL is returned.
242 */
243static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
244{
245 if (sq && sq->parent_sq)
246 return container_of(sq, struct throtl_grp, service_queue);
247 else
248 return NULL;
249}
250
251/**
252 * sq_to_td - return throtl_data the specified service queue belongs to
253 * @sq: the throtl_service_queue of interest
254 *
b43daedc 255 * A service_queue can be embedded in either a throtl_grp or throtl_data.
fda6f272
TH
256 * Determine the associated throtl_data accordingly and return it.
257 */
258static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
259{
260 struct throtl_grp *tg = sq_to_tg(sq);
261
262 if (tg)
263 return tg->td;
264 else
265 return container_of(sq, struct throtl_data, service_queue);
266}
267
7394e31f
SL
268/*
269 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
270 * make the IO dispatch more smooth.
271 * Scale up: linearly scale up according to lapsed time since upgrade. For
272 * every throtl_slice, the limit scales up 1/2 .low limit till the
273 * limit hits .max limit
274 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
275 */
276static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
277{
278 /* arbitrary value to avoid too big scale */
279 if (td->scale < 4096 && time_after_eq(jiffies,
280 td->low_upgrade_time + td->scale * td->throtl_slice))
281 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
282
283 return low + (low >> 1) * td->scale;
284}
285
9f626e37
SL
286static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
287{
b22c417c 288 struct blkcg_gq *blkg = tg_to_blkg(tg);
7394e31f 289 struct throtl_data *td;
b22c417c
SL
290 uint64_t ret;
291
292 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
293 return U64_MAX;
7394e31f
SL
294
295 td = tg->td;
296 ret = tg->bps[rw][td->limit_index];
9bb67aeb
SL
297 if (ret == 0 && td->limit_index == LIMIT_LOW) {
298 /* intermediate node or iops isn't 0 */
299 if (!list_empty(&blkg->blkcg->css.children) ||
300 tg->iops[rw][td->limit_index])
301 return U64_MAX;
302 else
303 return MIN_THROTL_BPS;
304 }
7394e31f
SL
305
306 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
307 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
308 uint64_t adjusted;
309
310 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
311 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
312 }
b22c417c 313 return ret;
9f626e37
SL
314}
315
316static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
317{
b22c417c 318 struct blkcg_gq *blkg = tg_to_blkg(tg);
7394e31f 319 struct throtl_data *td;
b22c417c
SL
320 unsigned int ret;
321
322 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
323 return UINT_MAX;
9bb67aeb 324
7394e31f
SL
325 td = tg->td;
326 ret = tg->iops[rw][td->limit_index];
9bb67aeb
SL
327 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
328 /* intermediate node or bps isn't 0 */
329 if (!list_empty(&blkg->blkcg->css.children) ||
330 tg->bps[rw][td->limit_index])
331 return UINT_MAX;
332 else
333 return MIN_THROTL_IOPS;
334 }
7394e31f
SL
335
336 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
337 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
338 uint64_t adjusted;
339
340 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
341 if (adjusted > UINT_MAX)
342 adjusted = UINT_MAX;
343 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
344 }
b22c417c 345 return ret;
9f626e37
SL
346}
347
b9147dd1
SL
348#define request_bucket_index(sectors) \
349 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
350
fda6f272
TH
351/**
352 * throtl_log - log debug message via blktrace
353 * @sq: the service_queue being reported
354 * @fmt: printf format string
355 * @args: printf args
356 *
357 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
358 * throtl_grp; otherwise, just "throtl".
fda6f272
TH
359 */
360#define throtl_log(sq, fmt, args...) do { \
361 struct throtl_grp *__tg = sq_to_tg((sq)); \
362 struct throtl_data *__td = sq_to_td((sq)); \
363 \
364 (void)__td; \
59fa0224
SL
365 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
366 break; \
fda6f272
TH
367 if ((__tg)) { \
368 char __pbuf[128]; \
54e7ed12 369 \
fda6f272
TH
370 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
371 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
372 } else { \
373 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
374 } \
54e7ed12 375} while (0)
e43473b7 376
c5cc2070
TH
377static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
378{
379 INIT_LIST_HEAD(&qn->node);
380 bio_list_init(&qn->bios);
381 qn->tg = tg;
382}
383
384/**
385 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
386 * @bio: bio being added
387 * @qn: qnode to add bio to
388 * @queued: the service_queue->queued[] list @qn belongs to
389 *
390 * Add @bio to @qn and put @qn on @queued if it's not already on.
391 * @qn->tg's reference count is bumped when @qn is activated. See the
392 * comment on top of throtl_qnode definition for details.
393 */
394static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
395 struct list_head *queued)
396{
397 bio_list_add(&qn->bios, bio);
398 if (list_empty(&qn->node)) {
399 list_add_tail(&qn->node, queued);
400 blkg_get(tg_to_blkg(qn->tg));
401 }
402}
403
404/**
405 * throtl_peek_queued - peek the first bio on a qnode list
406 * @queued: the qnode list to peek
407 */
408static struct bio *throtl_peek_queued(struct list_head *queued)
409{
410 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
411 struct bio *bio;
412
413 if (list_empty(queued))
414 return NULL;
415
416 bio = bio_list_peek(&qn->bios);
417 WARN_ON_ONCE(!bio);
418 return bio;
419}
420
421/**
422 * throtl_pop_queued - pop the first bio form a qnode list
423 * @queued: the qnode list to pop a bio from
424 * @tg_to_put: optional out argument for throtl_grp to put
425 *
426 * Pop the first bio from the qnode list @queued. After popping, the first
427 * qnode is removed from @queued if empty or moved to the end of @queued so
428 * that the popping order is round-robin.
429 *
430 * When the first qnode is removed, its associated throtl_grp should be put
431 * too. If @tg_to_put is NULL, this function automatically puts it;
432 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
433 * responsible for putting it.
434 */
435static struct bio *throtl_pop_queued(struct list_head *queued,
436 struct throtl_grp **tg_to_put)
437{
438 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
439 struct bio *bio;
440
441 if (list_empty(queued))
442 return NULL;
443
444 bio = bio_list_pop(&qn->bios);
445 WARN_ON_ONCE(!bio);
446
447 if (bio_list_empty(&qn->bios)) {
448 list_del_init(&qn->node);
449 if (tg_to_put)
450 *tg_to_put = qn->tg;
451 else
452 blkg_put(tg_to_blkg(qn->tg));
453 } else {
454 list_move_tail(&qn->node, queued);
455 }
456
457 return bio;
458}
459
49a2f1e3 460/* init a service_queue, assumes the caller zeroed it */
b2ce2643 461static void throtl_service_queue_init(struct throtl_service_queue *sq)
49a2f1e3 462{
c5cc2070
TH
463 INIT_LIST_HEAD(&sq->queued[0]);
464 INIT_LIST_HEAD(&sq->queued[1]);
49a2f1e3 465 sq->pending_tree = RB_ROOT;
69df0ab0
TH
466 setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
467 (unsigned long)sq);
468}
469
001bea73
TH
470static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
471{
4fb72036 472 struct throtl_grp *tg;
24bdb8ef 473 int rw;
4fb72036
TH
474
475 tg = kzalloc_node(sizeof(*tg), gfp, node);
476 if (!tg)
77ea7338 477 return NULL;
4fb72036 478
b2ce2643
TH
479 throtl_service_queue_init(&tg->service_queue);
480
481 for (rw = READ; rw <= WRITE; rw++) {
482 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
483 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
484 }
485
486 RB_CLEAR_NODE(&tg->rb_node);
9f626e37
SL
487 tg->bps[READ][LIMIT_MAX] = U64_MAX;
488 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
489 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
490 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
cd5ab1b0
SL
491 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
492 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
493 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
494 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
495 /* LIMIT_LOW will have default value 0 */
b2ce2643 496
ec80991d 497 tg->latency_target = DFL_LATENCY_TARGET;
5b81fc3c 498 tg->latency_target_conf = DFL_LATENCY_TARGET;
b4f428ef
SL
499 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
500 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
ec80991d 501
4fb72036 502 return &tg->pd;
001bea73
TH
503}
504
a9520cd6 505static void throtl_pd_init(struct blkg_policy_data *pd)
a29a171e 506{
a9520cd6
TH
507 struct throtl_grp *tg = pd_to_tg(pd);
508 struct blkcg_gq *blkg = tg_to_blkg(tg);
77216b04 509 struct throtl_data *td = blkg->q->td;
b2ce2643 510 struct throtl_service_queue *sq = &tg->service_queue;
cd1604fa 511
9138125b 512 /*
aa6ec29b 513 * If on the default hierarchy, we switch to properly hierarchical
9138125b
TH
514 * behavior where limits on a given throtl_grp are applied to the
515 * whole subtree rather than just the group itself. e.g. If 16M
516 * read_bps limit is set on the root group, the whole system can't
517 * exceed 16M for the device.
518 *
aa6ec29b 519 * If not on the default hierarchy, the broken flat hierarchy
9138125b
TH
520 * behavior is retained where all throtl_grps are treated as if
521 * they're all separate root groups right below throtl_data.
522 * Limits of a group don't interact with limits of other groups
523 * regardless of the position of the group in the hierarchy.
524 */
b2ce2643 525 sq->parent_sq = &td->service_queue;
9e10a130 526 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
b2ce2643 527 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
77216b04 528 tg->td = td;
8a3d2615
TH
529}
530
693e751e
TH
531/*
532 * Set has_rules[] if @tg or any of its parents have limits configured.
533 * This doesn't require walking up to the top of the hierarchy as the
534 * parent's has_rules[] is guaranteed to be correct.
535 */
536static void tg_update_has_rules(struct throtl_grp *tg)
537{
538 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
9f626e37 539 struct throtl_data *td = tg->td;
693e751e
TH
540 int rw;
541
542 for (rw = READ; rw <= WRITE; rw++)
543 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
9f626e37
SL
544 (td->limit_valid[td->limit_index] &&
545 (tg_bps_limit(tg, rw) != U64_MAX ||
546 tg_iops_limit(tg, rw) != UINT_MAX));
693e751e
TH
547}
548
a9520cd6 549static void throtl_pd_online(struct blkg_policy_data *pd)
693e751e 550{
aec24246 551 struct throtl_grp *tg = pd_to_tg(pd);
693e751e
TH
552 /*
553 * We don't want new groups to escape the limits of its ancestors.
554 * Update has_rules[] after a new group is brought online.
555 */
aec24246 556 tg_update_has_rules(tg);
693e751e
TH
557}
558
cd5ab1b0
SL
559static void blk_throtl_update_limit_valid(struct throtl_data *td)
560{
561 struct cgroup_subsys_state *pos_css;
562 struct blkcg_gq *blkg;
563 bool low_valid = false;
564
565 rcu_read_lock();
566 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
567 struct throtl_grp *tg = blkg_to_tg(blkg);
568
569 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
570 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
571 low_valid = true;
572 }
573 rcu_read_unlock();
574
575 td->limit_valid[LIMIT_LOW] = low_valid;
576}
577
c79892c5 578static void throtl_upgrade_state(struct throtl_data *td);
cd5ab1b0
SL
579static void throtl_pd_offline(struct blkg_policy_data *pd)
580{
581 struct throtl_grp *tg = pd_to_tg(pd);
582
583 tg->bps[READ][LIMIT_LOW] = 0;
584 tg->bps[WRITE][LIMIT_LOW] = 0;
585 tg->iops[READ][LIMIT_LOW] = 0;
586 tg->iops[WRITE][LIMIT_LOW] = 0;
587
588 blk_throtl_update_limit_valid(tg->td);
589
c79892c5
SL
590 if (!tg->td->limit_valid[tg->td->limit_index])
591 throtl_upgrade_state(tg->td);
cd5ab1b0
SL
592}
593
001bea73
TH
594static void throtl_pd_free(struct blkg_policy_data *pd)
595{
4fb72036
TH
596 struct throtl_grp *tg = pd_to_tg(pd);
597
b2ce2643 598 del_timer_sync(&tg->service_queue.pending_timer);
4fb72036 599 kfree(tg);
001bea73
TH
600}
601
0049af73
TH
602static struct throtl_grp *
603throtl_rb_first(struct throtl_service_queue *parent_sq)
e43473b7
VG
604{
605 /* Service tree is empty */
0049af73 606 if (!parent_sq->nr_pending)
e43473b7
VG
607 return NULL;
608
0049af73
TH
609 if (!parent_sq->first_pending)
610 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
e43473b7 611
0049af73
TH
612 if (parent_sq->first_pending)
613 return rb_entry_tg(parent_sq->first_pending);
e43473b7
VG
614
615 return NULL;
616}
617
618static void rb_erase_init(struct rb_node *n, struct rb_root *root)
619{
620 rb_erase(n, root);
621 RB_CLEAR_NODE(n);
622}
623
0049af73
TH
624static void throtl_rb_erase(struct rb_node *n,
625 struct throtl_service_queue *parent_sq)
e43473b7 626{
0049af73
TH
627 if (parent_sq->first_pending == n)
628 parent_sq->first_pending = NULL;
629 rb_erase_init(n, &parent_sq->pending_tree);
630 --parent_sq->nr_pending;
e43473b7
VG
631}
632
0049af73 633static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
e43473b7
VG
634{
635 struct throtl_grp *tg;
636
0049af73 637 tg = throtl_rb_first(parent_sq);
e43473b7
VG
638 if (!tg)
639 return;
640
0049af73 641 parent_sq->first_pending_disptime = tg->disptime;
e43473b7
VG
642}
643
77216b04 644static void tg_service_queue_add(struct throtl_grp *tg)
e43473b7 645{
77216b04 646 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
0049af73 647 struct rb_node **node = &parent_sq->pending_tree.rb_node;
e43473b7
VG
648 struct rb_node *parent = NULL;
649 struct throtl_grp *__tg;
650 unsigned long key = tg->disptime;
651 int left = 1;
652
653 while (*node != NULL) {
654 parent = *node;
655 __tg = rb_entry_tg(parent);
656
657 if (time_before(key, __tg->disptime))
658 node = &parent->rb_left;
659 else {
660 node = &parent->rb_right;
661 left = 0;
662 }
663 }
664
665 if (left)
0049af73 666 parent_sq->first_pending = &tg->rb_node;
e43473b7
VG
667
668 rb_link_node(&tg->rb_node, parent, node);
0049af73 669 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
e43473b7
VG
670}
671
77216b04 672static void __throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 673{
77216b04 674 tg_service_queue_add(tg);
5b2c16aa 675 tg->flags |= THROTL_TG_PENDING;
77216b04 676 tg->service_queue.parent_sq->nr_pending++;
e43473b7
VG
677}
678
77216b04 679static void throtl_enqueue_tg(struct throtl_grp *tg)
e43473b7 680{
5b2c16aa 681 if (!(tg->flags & THROTL_TG_PENDING))
77216b04 682 __throtl_enqueue_tg(tg);
e43473b7
VG
683}
684
77216b04 685static void __throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 686{
77216b04 687 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
5b2c16aa 688 tg->flags &= ~THROTL_TG_PENDING;
e43473b7
VG
689}
690
77216b04 691static void throtl_dequeue_tg(struct throtl_grp *tg)
e43473b7 692{
5b2c16aa 693 if (tg->flags & THROTL_TG_PENDING)
77216b04 694 __throtl_dequeue_tg(tg);
e43473b7
VG
695}
696
a9131a27 697/* Call with queue lock held */
69df0ab0
TH
698static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
699 unsigned long expires)
a9131a27 700{
297e3d85 701 unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice;
06cceedc
SL
702
703 /*
704 * Since we are adjusting the throttle limit dynamically, the sleep
705 * time calculated according to previous limit might be invalid. It's
706 * possible the cgroup sleep time is very long and no other cgroups
707 * have IO running so notify the limit changes. Make sure the cgroup
708 * doesn't sleep too long to avoid the missed notification.
709 */
710 if (time_after(expires, max_expire))
711 expires = max_expire;
69df0ab0
TH
712 mod_timer(&sq->pending_timer, expires);
713 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
714 expires - jiffies, jiffies);
a9131a27
TH
715}
716
7f52f98c
TH
717/**
718 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
719 * @sq: the service_queue to schedule dispatch for
720 * @force: force scheduling
721 *
722 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
723 * dispatch time of the first pending child. Returns %true if either timer
724 * is armed or there's no pending child left. %false if the current
725 * dispatch window is still open and the caller should continue
726 * dispatching.
727 *
728 * If @force is %true, the dispatch timer is always scheduled and this
729 * function is guaranteed to return %true. This is to be used when the
730 * caller can't dispatch itself and needs to invoke pending_timer
731 * unconditionally. Note that forced scheduling is likely to induce short
732 * delay before dispatch starts even if @sq->first_pending_disptime is not
733 * in the future and thus shouldn't be used in hot paths.
734 */
735static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
736 bool force)
e43473b7 737{
6a525600 738 /* any pending children left? */
c9e0332e 739 if (!sq->nr_pending)
7f52f98c 740 return true;
e43473b7 741
c9e0332e 742 update_min_dispatch_time(sq);
e43473b7 743
69df0ab0 744 /* is the next dispatch time in the future? */
7f52f98c 745 if (force || time_after(sq->first_pending_disptime, jiffies)) {
69df0ab0 746 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
7f52f98c 747 return true;
69df0ab0
TH
748 }
749
7f52f98c
TH
750 /* tell the caller to continue dispatching */
751 return false;
e43473b7
VG
752}
753
32ee5bc4
VG
754static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
755 bool rw, unsigned long start)
756{
757 tg->bytes_disp[rw] = 0;
758 tg->io_disp[rw] = 0;
759
760 /*
761 * Previous slice has expired. We must have trimmed it after last
762 * bio dispatch. That means since start of last slice, we never used
763 * that bandwidth. Do try to make use of that bandwidth while giving
764 * credit.
765 */
766 if (time_after_eq(start, tg->slice_start[rw]))
767 tg->slice_start[rw] = start;
768
297e3d85 769 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
32ee5bc4
VG
770 throtl_log(&tg->service_queue,
771 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
772 rw == READ ? 'R' : 'W', tg->slice_start[rw],
773 tg->slice_end[rw], jiffies);
774}
775
0f3457f6 776static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
e43473b7
VG
777{
778 tg->bytes_disp[rw] = 0;
8e89d13f 779 tg->io_disp[rw] = 0;
e43473b7 780 tg->slice_start[rw] = jiffies;
297e3d85 781 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
fda6f272
TH
782 throtl_log(&tg->service_queue,
783 "[%c] new slice start=%lu end=%lu jiffies=%lu",
784 rw == READ ? 'R' : 'W', tg->slice_start[rw],
785 tg->slice_end[rw], jiffies);
e43473b7
VG
786}
787
0f3457f6
TH
788static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
789 unsigned long jiffy_end)
d1ae8ffd 790{
297e3d85 791 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
d1ae8ffd
VG
792}
793
0f3457f6
TH
794static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
795 unsigned long jiffy_end)
e43473b7 796{
297e3d85 797 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
fda6f272
TH
798 throtl_log(&tg->service_queue,
799 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
800 rw == READ ? 'R' : 'W', tg->slice_start[rw],
801 tg->slice_end[rw], jiffies);
e43473b7
VG
802}
803
804/* Determine if previously allocated or extended slice is complete or not */
0f3457f6 805static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
e43473b7
VG
806{
807 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
5cf8c227 808 return false;
e43473b7
VG
809
810 return 1;
811}
812
813/* Trim the used slices and adjust slice start accordingly */
0f3457f6 814static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
e43473b7 815{
3aad5d3e
VG
816 unsigned long nr_slices, time_elapsed, io_trim;
817 u64 bytes_trim, tmp;
e43473b7
VG
818
819 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
820
821 /*
822 * If bps are unlimited (-1), then time slice don't get
823 * renewed. Don't try to trim the slice if slice is used. A new
824 * slice will start when appropriate.
825 */
0f3457f6 826 if (throtl_slice_used(tg, rw))
e43473b7
VG
827 return;
828
d1ae8ffd
VG
829 /*
830 * A bio has been dispatched. Also adjust slice_end. It might happen
831 * that initially cgroup limit was very low resulting in high
832 * slice_end, but later limit was bumped up and bio was dispached
833 * sooner, then we need to reduce slice_end. A high bogus slice_end
834 * is bad because it does not allow new slice to start.
835 */
836
297e3d85 837 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
d1ae8ffd 838
e43473b7
VG
839 time_elapsed = jiffies - tg->slice_start[rw];
840
297e3d85 841 nr_slices = time_elapsed / tg->td->throtl_slice;
e43473b7
VG
842
843 if (!nr_slices)
844 return;
297e3d85 845 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
3aad5d3e
VG
846 do_div(tmp, HZ);
847 bytes_trim = tmp;
e43473b7 848
297e3d85
SL
849 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
850 HZ;
e43473b7 851
8e89d13f 852 if (!bytes_trim && !io_trim)
e43473b7
VG
853 return;
854
855 if (tg->bytes_disp[rw] >= bytes_trim)
856 tg->bytes_disp[rw] -= bytes_trim;
857 else
858 tg->bytes_disp[rw] = 0;
859
8e89d13f
VG
860 if (tg->io_disp[rw] >= io_trim)
861 tg->io_disp[rw] -= io_trim;
862 else
863 tg->io_disp[rw] = 0;
864
297e3d85 865 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
e43473b7 866
fda6f272
TH
867 throtl_log(&tg->service_queue,
868 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
869 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
870 tg->slice_start[rw], tg->slice_end[rw], jiffies);
e43473b7
VG
871}
872
0f3457f6
TH
873static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
874 unsigned long *wait)
e43473b7
VG
875{
876 bool rw = bio_data_dir(bio);
8e89d13f 877 unsigned int io_allowed;
e43473b7 878 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
c49c06e4 879 u64 tmp;
e43473b7 880
8e89d13f 881 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
e43473b7 882
8e89d13f
VG
883 /* Slice has just started. Consider one slice interval */
884 if (!jiffy_elapsed)
297e3d85 885 jiffy_elapsed_rnd = tg->td->throtl_slice;
8e89d13f 886
297e3d85 887 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
8e89d13f 888
c49c06e4
VG
889 /*
890 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
891 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
892 * will allow dispatch after 1 second and after that slice should
893 * have been trimmed.
894 */
895
9f626e37 896 tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
c49c06e4
VG
897 do_div(tmp, HZ);
898
899 if (tmp > UINT_MAX)
900 io_allowed = UINT_MAX;
901 else
902 io_allowed = tmp;
8e89d13f
VG
903
904 if (tg->io_disp[rw] + 1 <= io_allowed) {
e43473b7
VG
905 if (wait)
906 *wait = 0;
5cf8c227 907 return true;
e43473b7
VG
908 }
909
8e89d13f 910 /* Calc approx time to dispatch */
9f626e37 911 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
8e89d13f
VG
912
913 if (jiffy_wait > jiffy_elapsed)
914 jiffy_wait = jiffy_wait - jiffy_elapsed;
915 else
916 jiffy_wait = 1;
917
918 if (wait)
919 *wait = jiffy_wait;
920 return 0;
921}
922
0f3457f6
TH
923static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
924 unsigned long *wait)
8e89d13f
VG
925{
926 bool rw = bio_data_dir(bio);
3aad5d3e 927 u64 bytes_allowed, extra_bytes, tmp;
8e89d13f 928 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
e43473b7
VG
929
930 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
931
932 /* Slice has just started. Consider one slice interval */
933 if (!jiffy_elapsed)
297e3d85 934 jiffy_elapsed_rnd = tg->td->throtl_slice;
e43473b7 935
297e3d85 936 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
e43473b7 937
9f626e37 938 tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
5e901a2b 939 do_div(tmp, HZ);
3aad5d3e 940 bytes_allowed = tmp;
e43473b7 941
4f024f37 942 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
e43473b7
VG
943 if (wait)
944 *wait = 0;
5cf8c227 945 return true;
e43473b7
VG
946 }
947
948 /* Calc approx time to dispatch */
4f024f37 949 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
9f626e37 950 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
e43473b7
VG
951
952 if (!jiffy_wait)
953 jiffy_wait = 1;
954
955 /*
956 * This wait time is without taking into consideration the rounding
957 * up we did. Add that time also.
958 */
959 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
e43473b7
VG
960 if (wait)
961 *wait = jiffy_wait;
8e89d13f
VG
962 return 0;
963}
964
965/*
966 * Returns whether one can dispatch a bio or not. Also returns approx number
967 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
968 */
0f3457f6
TH
969static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
970 unsigned long *wait)
8e89d13f
VG
971{
972 bool rw = bio_data_dir(bio);
973 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
974
975 /*
976 * Currently whole state machine of group depends on first bio
977 * queued in the group bio list. So one should not be calling
978 * this function with a different bio if there are other bios
979 * queued.
980 */
73f0d49a 981 BUG_ON(tg->service_queue.nr_queued[rw] &&
c5cc2070 982 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
e43473b7 983
8e89d13f 984 /* If tg->bps = -1, then BW is unlimited */
9f626e37
SL
985 if (tg_bps_limit(tg, rw) == U64_MAX &&
986 tg_iops_limit(tg, rw) == UINT_MAX) {
8e89d13f
VG
987 if (wait)
988 *wait = 0;
5cf8c227 989 return true;
8e89d13f
VG
990 }
991
992 /*
993 * If previous slice expired, start a new one otherwise renew/extend
994 * existing slice to make sure it is at least throtl_slice interval
164c80ed
VG
995 * long since now. New slice is started only for empty throttle group.
996 * If there is queued bio, that means there should be an active
997 * slice and it should be extended instead.
8e89d13f 998 */
164c80ed 999 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
0f3457f6 1000 throtl_start_new_slice(tg, rw);
8e89d13f 1001 else {
297e3d85
SL
1002 if (time_before(tg->slice_end[rw],
1003 jiffies + tg->td->throtl_slice))
1004 throtl_extend_slice(tg, rw,
1005 jiffies + tg->td->throtl_slice);
8e89d13f
VG
1006 }
1007
0f3457f6
TH
1008 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1009 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
8e89d13f
VG
1010 if (wait)
1011 *wait = 0;
1012 return 1;
1013 }
1014
1015 max_wait = max(bps_wait, iops_wait);
1016
1017 if (wait)
1018 *wait = max_wait;
1019
1020 if (time_before(tg->slice_end[rw], jiffies + max_wait))
0f3457f6 1021 throtl_extend_slice(tg, rw, jiffies + max_wait);
e43473b7
VG
1022
1023 return 0;
1024}
1025
1026static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1027{
1028 bool rw = bio_data_dir(bio);
e43473b7
VG
1029
1030 /* Charge the bio to the group */
4f024f37 1031 tg->bytes_disp[rw] += bio->bi_iter.bi_size;
8e89d13f 1032 tg->io_disp[rw]++;
3f0abd80
SL
1033 tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
1034 tg->last_io_disp[rw]++;
e43473b7 1035
2a0f61e6 1036 /*
8d2bbd4c 1037 * BIO_THROTTLED is used to prevent the same bio to be throttled
2a0f61e6
TH
1038 * more than once as a throttled bio will go through blk-throtl the
1039 * second time when it eventually gets issued. Set it when a bio
1040 * is being charged to a tg.
2a0f61e6 1041 */
8d2bbd4c
CH
1042 if (!bio_flagged(bio, BIO_THROTTLED))
1043 bio_set_flag(bio, BIO_THROTTLED);
e43473b7
VG
1044}
1045
c5cc2070
TH
1046/**
1047 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1048 * @bio: bio to add
1049 * @qn: qnode to use
1050 * @tg: the target throtl_grp
1051 *
1052 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
1053 * tg->qnode_on_self[] is used.
1054 */
1055static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1056 struct throtl_grp *tg)
e43473b7 1057{
73f0d49a 1058 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1059 bool rw = bio_data_dir(bio);
1060
c5cc2070
TH
1061 if (!qn)
1062 qn = &tg->qnode_on_self[rw];
1063
0e9f4164
TH
1064 /*
1065 * If @tg doesn't currently have any bios queued in the same
1066 * direction, queueing @bio can change when @tg should be
1067 * dispatched. Mark that @tg was empty. This is automatically
1068 * cleaered on the next tg_update_disptime().
1069 */
1070 if (!sq->nr_queued[rw])
1071 tg->flags |= THROTL_TG_WAS_EMPTY;
1072
c5cc2070
TH
1073 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1074
73f0d49a 1075 sq->nr_queued[rw]++;
77216b04 1076 throtl_enqueue_tg(tg);
e43473b7
VG
1077}
1078
77216b04 1079static void tg_update_disptime(struct throtl_grp *tg)
e43473b7 1080{
73f0d49a 1081 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1082 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1083 struct bio *bio;
1084
d609af3a
ME
1085 bio = throtl_peek_queued(&sq->queued[READ]);
1086 if (bio)
0f3457f6 1087 tg_may_dispatch(tg, bio, &read_wait);
e43473b7 1088
d609af3a
ME
1089 bio = throtl_peek_queued(&sq->queued[WRITE]);
1090 if (bio)
0f3457f6 1091 tg_may_dispatch(tg, bio, &write_wait);
e43473b7
VG
1092
1093 min_wait = min(read_wait, write_wait);
1094 disptime = jiffies + min_wait;
1095
e43473b7 1096 /* Update dispatch time */
77216b04 1097 throtl_dequeue_tg(tg);
e43473b7 1098 tg->disptime = disptime;
77216b04 1099 throtl_enqueue_tg(tg);
0e9f4164
TH
1100
1101 /* see throtl_add_bio_tg() */
1102 tg->flags &= ~THROTL_TG_WAS_EMPTY;
e43473b7
VG
1103}
1104
32ee5bc4
VG
1105static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1106 struct throtl_grp *parent_tg, bool rw)
1107{
1108 if (throtl_slice_used(parent_tg, rw)) {
1109 throtl_start_new_slice_with_credit(parent_tg, rw,
1110 child_tg->slice_start[rw]);
1111 }
1112
1113}
1114
77216b04 1115static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
e43473b7 1116{
73f0d49a 1117 struct throtl_service_queue *sq = &tg->service_queue;
6bc9c2b4
TH
1118 struct throtl_service_queue *parent_sq = sq->parent_sq;
1119 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
c5cc2070 1120 struct throtl_grp *tg_to_put = NULL;
e43473b7
VG
1121 struct bio *bio;
1122
c5cc2070
TH
1123 /*
1124 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1125 * from @tg may put its reference and @parent_sq might end up
1126 * getting released prematurely. Remember the tg to put and put it
1127 * after @bio is transferred to @parent_sq.
1128 */
1129 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
73f0d49a 1130 sq->nr_queued[rw]--;
e43473b7
VG
1131
1132 throtl_charge_bio(tg, bio);
6bc9c2b4
TH
1133
1134 /*
1135 * If our parent is another tg, we just need to transfer @bio to
1136 * the parent using throtl_add_bio_tg(). If our parent is
1137 * @td->service_queue, @bio is ready to be issued. Put it on its
1138 * bio_lists[] and decrease total number queued. The caller is
1139 * responsible for issuing these bios.
1140 */
1141 if (parent_tg) {
c5cc2070 1142 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
32ee5bc4 1143 start_parent_slice_with_credit(tg, parent_tg, rw);
6bc9c2b4 1144 } else {
c5cc2070
TH
1145 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1146 &parent_sq->queued[rw]);
6bc9c2b4
TH
1147 BUG_ON(tg->td->nr_queued[rw] <= 0);
1148 tg->td->nr_queued[rw]--;
1149 }
e43473b7 1150
0f3457f6 1151 throtl_trim_slice(tg, rw);
6bc9c2b4 1152
c5cc2070
TH
1153 if (tg_to_put)
1154 blkg_put(tg_to_blkg(tg_to_put));
e43473b7
VG
1155}
1156
77216b04 1157static int throtl_dispatch_tg(struct throtl_grp *tg)
e43473b7 1158{
73f0d49a 1159 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1160 unsigned int nr_reads = 0, nr_writes = 0;
1161 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
c2f6805d 1162 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
e43473b7
VG
1163 struct bio *bio;
1164
1165 /* Try to dispatch 75% READS and 25% WRITES */
1166
c5cc2070 1167 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
0f3457f6 1168 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1169
77216b04 1170 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1171 nr_reads++;
1172
1173 if (nr_reads >= max_nr_reads)
1174 break;
1175 }
1176
c5cc2070 1177 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
0f3457f6 1178 tg_may_dispatch(tg, bio, NULL)) {
e43473b7 1179
77216b04 1180 tg_dispatch_one_bio(tg, bio_data_dir(bio));
e43473b7
VG
1181 nr_writes++;
1182
1183 if (nr_writes >= max_nr_writes)
1184 break;
1185 }
1186
1187 return nr_reads + nr_writes;
1188}
1189
651930bc 1190static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
e43473b7
VG
1191{
1192 unsigned int nr_disp = 0;
e43473b7
VG
1193
1194 while (1) {
73f0d49a
TH
1195 struct throtl_grp *tg = throtl_rb_first(parent_sq);
1196 struct throtl_service_queue *sq = &tg->service_queue;
e43473b7
VG
1197
1198 if (!tg)
1199 break;
1200
1201 if (time_before(jiffies, tg->disptime))
1202 break;
1203
77216b04 1204 throtl_dequeue_tg(tg);
e43473b7 1205
77216b04 1206 nr_disp += throtl_dispatch_tg(tg);
e43473b7 1207
73f0d49a 1208 if (sq->nr_queued[0] || sq->nr_queued[1])
77216b04 1209 tg_update_disptime(tg);
e43473b7
VG
1210
1211 if (nr_disp >= throtl_quantum)
1212 break;
1213 }
1214
1215 return nr_disp;
1216}
1217
c79892c5
SL
1218static bool throtl_can_upgrade(struct throtl_data *td,
1219 struct throtl_grp *this_tg);
6e1a5704
TH
1220/**
1221 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1222 * @arg: the throtl_service_queue being serviced
1223 *
1224 * This timer is armed when a child throtl_grp with active bio's become
1225 * pending and queued on the service_queue's pending_tree and expires when
1226 * the first child throtl_grp should be dispatched. This function
2e48a530
TH
1227 * dispatches bio's from the children throtl_grps to the parent
1228 * service_queue.
1229 *
1230 * If the parent's parent is another throtl_grp, dispatching is propagated
1231 * by either arming its pending_timer or repeating dispatch directly. If
1232 * the top-level service_tree is reached, throtl_data->dispatch_work is
1233 * kicked so that the ready bio's are issued.
6e1a5704 1234 */
69df0ab0
TH
1235static void throtl_pending_timer_fn(unsigned long arg)
1236{
1237 struct throtl_service_queue *sq = (void *)arg;
2e48a530 1238 struct throtl_grp *tg = sq_to_tg(sq);
69df0ab0 1239 struct throtl_data *td = sq_to_td(sq);
cb76199c 1240 struct request_queue *q = td->queue;
2e48a530
TH
1241 struct throtl_service_queue *parent_sq;
1242 bool dispatched;
6e1a5704 1243 int ret;
e43473b7
VG
1244
1245 spin_lock_irq(q->queue_lock);
c79892c5
SL
1246 if (throtl_can_upgrade(td, NULL))
1247 throtl_upgrade_state(td);
1248
2e48a530
TH
1249again:
1250 parent_sq = sq->parent_sq;
1251 dispatched = false;
e43473b7 1252
7f52f98c
TH
1253 while (true) {
1254 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
2e48a530
TH
1255 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1256 sq->nr_queued[READ], sq->nr_queued[WRITE]);
7f52f98c
TH
1257
1258 ret = throtl_select_dispatch(sq);
1259 if (ret) {
7f52f98c
TH
1260 throtl_log(sq, "bios disp=%u", ret);
1261 dispatched = true;
1262 }
e43473b7 1263
7f52f98c
TH
1264 if (throtl_schedule_next_dispatch(sq, false))
1265 break;
e43473b7 1266
7f52f98c
TH
1267 /* this dispatch windows is still open, relax and repeat */
1268 spin_unlock_irq(q->queue_lock);
1269 cpu_relax();
1270 spin_lock_irq(q->queue_lock);
651930bc 1271 }
e43473b7 1272
2e48a530
TH
1273 if (!dispatched)
1274 goto out_unlock;
6e1a5704 1275
2e48a530
TH
1276 if (parent_sq) {
1277 /* @parent_sq is another throl_grp, propagate dispatch */
1278 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1279 tg_update_disptime(tg);
1280 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1281 /* window is already open, repeat dispatching */
1282 sq = parent_sq;
1283 tg = sq_to_tg(sq);
1284 goto again;
1285 }
1286 }
1287 } else {
1288 /* reached the top-level, queue issueing */
1289 queue_work(kthrotld_workqueue, &td->dispatch_work);
1290 }
1291out_unlock:
e43473b7 1292 spin_unlock_irq(q->queue_lock);
6e1a5704 1293}
e43473b7 1294
6e1a5704
TH
1295/**
1296 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1297 * @work: work item being executed
1298 *
1299 * This function is queued for execution when bio's reach the bio_lists[]
1300 * of throtl_data->service_queue. Those bio's are ready and issued by this
1301 * function.
1302 */
8876e140 1303static void blk_throtl_dispatch_work_fn(struct work_struct *work)
6e1a5704
TH
1304{
1305 struct throtl_data *td = container_of(work, struct throtl_data,
1306 dispatch_work);
1307 struct throtl_service_queue *td_sq = &td->service_queue;
1308 struct request_queue *q = td->queue;
1309 struct bio_list bio_list_on_stack;
1310 struct bio *bio;
1311 struct blk_plug plug;
1312 int rw;
1313
1314 bio_list_init(&bio_list_on_stack);
1315
1316 spin_lock_irq(q->queue_lock);
c5cc2070
TH
1317 for (rw = READ; rw <= WRITE; rw++)
1318 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1319 bio_list_add(&bio_list_on_stack, bio);
6e1a5704
TH
1320 spin_unlock_irq(q->queue_lock);
1321
1322 if (!bio_list_empty(&bio_list_on_stack)) {
69d60eb9 1323 blk_start_plug(&plug);
e43473b7
VG
1324 while((bio = bio_list_pop(&bio_list_on_stack)))
1325 generic_make_request(bio);
69d60eb9 1326 blk_finish_plug(&plug);
e43473b7 1327 }
e43473b7
VG
1328}
1329
f95a04af
TH
1330static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1331 int off)
60c2bc2d 1332{
f95a04af
TH
1333 struct throtl_grp *tg = pd_to_tg(pd);
1334 u64 v = *(u64 *)((void *)tg + off);
60c2bc2d 1335
2ab5492d 1336 if (v == U64_MAX)
60c2bc2d 1337 return 0;
f95a04af 1338 return __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1339}
1340
f95a04af
TH
1341static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1342 int off)
e43473b7 1343{
f95a04af
TH
1344 struct throtl_grp *tg = pd_to_tg(pd);
1345 unsigned int v = *(unsigned int *)((void *)tg + off);
fe071437 1346
2ab5492d 1347 if (v == UINT_MAX)
af133ceb 1348 return 0;
f95a04af 1349 return __blkg_prfill_u64(sf, pd, v);
e43473b7
VG
1350}
1351
2da8ca82 1352static int tg_print_conf_u64(struct seq_file *sf, void *v)
8e89d13f 1353{
2da8ca82
TH
1354 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1355 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1356 return 0;
8e89d13f
VG
1357}
1358
2da8ca82 1359static int tg_print_conf_uint(struct seq_file *sf, void *v)
8e89d13f 1360{
2da8ca82
TH
1361 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1362 &blkcg_policy_throtl, seq_cft(sf)->private, false);
af133ceb 1363 return 0;
60c2bc2d
TH
1364}
1365
9bb67aeb 1366static void tg_conf_updated(struct throtl_grp *tg, bool global)
60c2bc2d 1367{
69948b07 1368 struct throtl_service_queue *sq = &tg->service_queue;
492eb21b 1369 struct cgroup_subsys_state *pos_css;
69948b07 1370 struct blkcg_gq *blkg;
af133ceb 1371
fda6f272
TH
1372 throtl_log(&tg->service_queue,
1373 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
9f626e37
SL
1374 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1375 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
632b4493 1376
693e751e
TH
1377 /*
1378 * Update has_rules[] flags for the updated tg's subtree. A tg is
1379 * considered to have rules if either the tg itself or any of its
1380 * ancestors has rules. This identifies groups without any
1381 * restrictions in the whole hierarchy and allows them to bypass
1382 * blk-throttle.
1383 */
9bb67aeb
SL
1384 blkg_for_each_descendant_pre(blkg, pos_css,
1385 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
5b81fc3c
SL
1386 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1387 struct throtl_grp *parent_tg;
1388
1389 tg_update_has_rules(this_tg);
1390 /* ignore root/second level */
1391 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1392 !blkg->parent->parent)
1393 continue;
1394 parent_tg = blkg_to_tg(blkg->parent);
1395 /*
1396 * make sure all children has lower idle time threshold and
1397 * higher latency target
1398 */
1399 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1400 parent_tg->idletime_threshold);
1401 this_tg->latency_target = max(this_tg->latency_target,
1402 parent_tg->latency_target);
1403 }
693e751e 1404
632b4493
TH
1405 /*
1406 * We're already holding queue_lock and know @tg is valid. Let's
1407 * apply the new config directly.
1408 *
1409 * Restart the slices for both READ and WRITES. It might happen
1410 * that a group's limit are dropped suddenly and we don't want to
1411 * account recently dispatched IO with new low rate.
1412 */
0f3457f6
TH
1413 throtl_start_new_slice(tg, 0);
1414 throtl_start_new_slice(tg, 1);
632b4493 1415
5b2c16aa 1416 if (tg->flags & THROTL_TG_PENDING) {
77216b04 1417 tg_update_disptime(tg);
7f52f98c 1418 throtl_schedule_next_dispatch(sq->parent_sq, true);
632b4493 1419 }
69948b07
TH
1420}
1421
1422static ssize_t tg_set_conf(struct kernfs_open_file *of,
1423 char *buf, size_t nbytes, loff_t off, bool is_u64)
1424{
1425 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1426 struct blkg_conf_ctx ctx;
1427 struct throtl_grp *tg;
1428 int ret;
1429 u64 v;
1430
1431 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1432 if (ret)
1433 return ret;
1434
1435 ret = -EINVAL;
1436 if (sscanf(ctx.body, "%llu", &v) != 1)
1437 goto out_finish;
1438 if (!v)
2ab5492d 1439 v = U64_MAX;
69948b07
TH
1440
1441 tg = blkg_to_tg(ctx.blkg);
1442
1443 if (is_u64)
1444 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1445 else
1446 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
60c2bc2d 1447
9bb67aeb 1448 tg_conf_updated(tg, false);
36aa9e5f
TH
1449 ret = 0;
1450out_finish:
60c2bc2d 1451 blkg_conf_finish(&ctx);
36aa9e5f 1452 return ret ?: nbytes;
8e89d13f
VG
1453}
1454
451af504
TH
1455static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1456 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1457{
451af504 1458 return tg_set_conf(of, buf, nbytes, off, true);
60c2bc2d
TH
1459}
1460
451af504
TH
1461static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1462 char *buf, size_t nbytes, loff_t off)
60c2bc2d 1463{
451af504 1464 return tg_set_conf(of, buf, nbytes, off, false);
60c2bc2d
TH
1465}
1466
880f50e2 1467static struct cftype throtl_legacy_files[] = {
60c2bc2d
TH
1468 {
1469 .name = "throttle.read_bps_device",
9f626e37 1470 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
2da8ca82 1471 .seq_show = tg_print_conf_u64,
451af504 1472 .write = tg_set_conf_u64,
60c2bc2d
TH
1473 },
1474 {
1475 .name = "throttle.write_bps_device",
9f626e37 1476 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
2da8ca82 1477 .seq_show = tg_print_conf_u64,
451af504 1478 .write = tg_set_conf_u64,
60c2bc2d
TH
1479 },
1480 {
1481 .name = "throttle.read_iops_device",
9f626e37 1482 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
2da8ca82 1483 .seq_show = tg_print_conf_uint,
451af504 1484 .write = tg_set_conf_uint,
60c2bc2d
TH
1485 },
1486 {
1487 .name = "throttle.write_iops_device",
9f626e37 1488 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
2da8ca82 1489 .seq_show = tg_print_conf_uint,
451af504 1490 .write = tg_set_conf_uint,
60c2bc2d
TH
1491 },
1492 {
1493 .name = "throttle.io_service_bytes",
77ea7338
TH
1494 .private = (unsigned long)&blkcg_policy_throtl,
1495 .seq_show = blkg_print_stat_bytes,
60c2bc2d
TH
1496 },
1497 {
1498 .name = "throttle.io_serviced",
77ea7338
TH
1499 .private = (unsigned long)&blkcg_policy_throtl,
1500 .seq_show = blkg_print_stat_ios,
60c2bc2d
TH
1501 },
1502 { } /* terminate */
1503};
1504
cd5ab1b0 1505static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
2ee867dc
TH
1506 int off)
1507{
1508 struct throtl_grp *tg = pd_to_tg(pd);
1509 const char *dname = blkg_dev_name(pd->blkg);
1510 char bufs[4][21] = { "max", "max", "max", "max" };
cd5ab1b0
SL
1511 u64 bps_dft;
1512 unsigned int iops_dft;
ada75b6e 1513 char idle_time[26] = "";
ec80991d 1514 char latency_time[26] = "";
2ee867dc
TH
1515
1516 if (!dname)
1517 return 0;
9f626e37 1518
cd5ab1b0
SL
1519 if (off == LIMIT_LOW) {
1520 bps_dft = 0;
1521 iops_dft = 0;
1522 } else {
1523 bps_dft = U64_MAX;
1524 iops_dft = UINT_MAX;
1525 }
1526
1527 if (tg->bps_conf[READ][off] == bps_dft &&
1528 tg->bps_conf[WRITE][off] == bps_dft &&
1529 tg->iops_conf[READ][off] == iops_dft &&
ada75b6e 1530 tg->iops_conf[WRITE][off] == iops_dft &&
ec80991d 1531 (off != LIMIT_LOW ||
b4f428ef 1532 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
5b81fc3c 1533 tg->latency_target_conf == DFL_LATENCY_TARGET)))
2ee867dc
TH
1534 return 0;
1535
9bb67aeb 1536 if (tg->bps_conf[READ][off] != U64_MAX)
9f626e37 1537 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
cd5ab1b0 1538 tg->bps_conf[READ][off]);
9bb67aeb 1539 if (tg->bps_conf[WRITE][off] != U64_MAX)
9f626e37 1540 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
cd5ab1b0 1541 tg->bps_conf[WRITE][off]);
9bb67aeb 1542 if (tg->iops_conf[READ][off] != UINT_MAX)
9f626e37 1543 snprintf(bufs[2], sizeof(bufs[2]), "%u",
cd5ab1b0 1544 tg->iops_conf[READ][off]);
9bb67aeb 1545 if (tg->iops_conf[WRITE][off] != UINT_MAX)
9f626e37 1546 snprintf(bufs[3], sizeof(bufs[3]), "%u",
cd5ab1b0 1547 tg->iops_conf[WRITE][off]);
ada75b6e 1548 if (off == LIMIT_LOW) {
5b81fc3c 1549 if (tg->idletime_threshold_conf == ULONG_MAX)
ada75b6e
SL
1550 strcpy(idle_time, " idle=max");
1551 else
1552 snprintf(idle_time, sizeof(idle_time), " idle=%lu",
5b81fc3c 1553 tg->idletime_threshold_conf);
ec80991d 1554
5b81fc3c 1555 if (tg->latency_target_conf == ULONG_MAX)
ec80991d
SL
1556 strcpy(latency_time, " latency=max");
1557 else
1558 snprintf(latency_time, sizeof(latency_time),
5b81fc3c 1559 " latency=%lu", tg->latency_target_conf);
ada75b6e 1560 }
2ee867dc 1561
ec80991d
SL
1562 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1563 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1564 latency_time);
2ee867dc
TH
1565 return 0;
1566}
1567
cd5ab1b0 1568static int tg_print_limit(struct seq_file *sf, void *v)
2ee867dc 1569{
cd5ab1b0 1570 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
2ee867dc
TH
1571 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1572 return 0;
1573}
1574
cd5ab1b0 1575static ssize_t tg_set_limit(struct kernfs_open_file *of,
2ee867dc
TH
1576 char *buf, size_t nbytes, loff_t off)
1577{
1578 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1579 struct blkg_conf_ctx ctx;
1580 struct throtl_grp *tg;
1581 u64 v[4];
ada75b6e 1582 unsigned long idle_time;
ec80991d 1583 unsigned long latency_time;
2ee867dc 1584 int ret;
cd5ab1b0 1585 int index = of_cft(of)->private;
2ee867dc
TH
1586
1587 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1588 if (ret)
1589 return ret;
1590
1591 tg = blkg_to_tg(ctx.blkg);
1592
cd5ab1b0
SL
1593 v[0] = tg->bps_conf[READ][index];
1594 v[1] = tg->bps_conf[WRITE][index];
1595 v[2] = tg->iops_conf[READ][index];
1596 v[3] = tg->iops_conf[WRITE][index];
2ee867dc 1597
5b81fc3c
SL
1598 idle_time = tg->idletime_threshold_conf;
1599 latency_time = tg->latency_target_conf;
2ee867dc
TH
1600 while (true) {
1601 char tok[27]; /* wiops=18446744073709551616 */
1602 char *p;
2ab5492d 1603 u64 val = U64_MAX;
2ee867dc
TH
1604 int len;
1605
1606 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1607 break;
1608 if (tok[0] == '\0')
1609 break;
1610 ctx.body += len;
1611
1612 ret = -EINVAL;
1613 p = tok;
1614 strsep(&p, "=");
1615 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1616 goto out_finish;
1617
1618 ret = -ERANGE;
1619 if (!val)
1620 goto out_finish;
1621
1622 ret = -EINVAL;
1623 if (!strcmp(tok, "rbps"))
1624 v[0] = val;
1625 else if (!strcmp(tok, "wbps"))
1626 v[1] = val;
1627 else if (!strcmp(tok, "riops"))
1628 v[2] = min_t(u64, val, UINT_MAX);
1629 else if (!strcmp(tok, "wiops"))
1630 v[3] = min_t(u64, val, UINT_MAX);
ada75b6e
SL
1631 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1632 idle_time = val;
ec80991d
SL
1633 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1634 latency_time = val;
2ee867dc
TH
1635 else
1636 goto out_finish;
1637 }
1638
cd5ab1b0
SL
1639 tg->bps_conf[READ][index] = v[0];
1640 tg->bps_conf[WRITE][index] = v[1];
1641 tg->iops_conf[READ][index] = v[2];
1642 tg->iops_conf[WRITE][index] = v[3];
2ee867dc 1643
cd5ab1b0
SL
1644 if (index == LIMIT_MAX) {
1645 tg->bps[READ][index] = v[0];
1646 tg->bps[WRITE][index] = v[1];
1647 tg->iops[READ][index] = v[2];
1648 tg->iops[WRITE][index] = v[3];
1649 }
1650 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1651 tg->bps_conf[READ][LIMIT_MAX]);
1652 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1653 tg->bps_conf[WRITE][LIMIT_MAX]);
1654 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1655 tg->iops_conf[READ][LIMIT_MAX]);
1656 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1657 tg->iops_conf[WRITE][LIMIT_MAX]);
b4f428ef
SL
1658 tg->idletime_threshold_conf = idle_time;
1659 tg->latency_target_conf = latency_time;
1660
1661 /* force user to configure all settings for low limit */
1662 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1663 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1664 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1665 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1666 tg->bps[READ][LIMIT_LOW] = 0;
1667 tg->bps[WRITE][LIMIT_LOW] = 0;
1668 tg->iops[READ][LIMIT_LOW] = 0;
1669 tg->iops[WRITE][LIMIT_LOW] = 0;
1670 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1671 tg->latency_target = DFL_LATENCY_TARGET;
1672 } else if (index == LIMIT_LOW) {
5b81fc3c 1673 tg->idletime_threshold = tg->idletime_threshold_conf;
5b81fc3c 1674 tg->latency_target = tg->latency_target_conf;
cd5ab1b0 1675 }
b4f428ef
SL
1676
1677 blk_throtl_update_limit_valid(tg->td);
1678 if (tg->td->limit_valid[LIMIT_LOW]) {
1679 if (index == LIMIT_LOW)
1680 tg->td->limit_index = LIMIT_LOW;
1681 } else
1682 tg->td->limit_index = LIMIT_MAX;
9bb67aeb
SL
1683 tg_conf_updated(tg, index == LIMIT_LOW &&
1684 tg->td->limit_valid[LIMIT_LOW]);
2ee867dc
TH
1685 ret = 0;
1686out_finish:
1687 blkg_conf_finish(&ctx);
1688 return ret ?: nbytes;
1689}
1690
1691static struct cftype throtl_files[] = {
cd5ab1b0
SL
1692#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1693 {
1694 .name = "low",
1695 .flags = CFTYPE_NOT_ON_ROOT,
1696 .seq_show = tg_print_limit,
1697 .write = tg_set_limit,
1698 .private = LIMIT_LOW,
1699 },
1700#endif
2ee867dc
TH
1701 {
1702 .name = "max",
1703 .flags = CFTYPE_NOT_ON_ROOT,
cd5ab1b0
SL
1704 .seq_show = tg_print_limit,
1705 .write = tg_set_limit,
1706 .private = LIMIT_MAX,
2ee867dc
TH
1707 },
1708 { } /* terminate */
1709};
1710
da527770 1711static void throtl_shutdown_wq(struct request_queue *q)
e43473b7
VG
1712{
1713 struct throtl_data *td = q->td;
1714
69df0ab0 1715 cancel_work_sync(&td->dispatch_work);
e43473b7
VG
1716}
1717
3c798398 1718static struct blkcg_policy blkcg_policy_throtl = {
2ee867dc 1719 .dfl_cftypes = throtl_files,
880f50e2 1720 .legacy_cftypes = throtl_legacy_files,
f9fcc2d3 1721
001bea73 1722 .pd_alloc_fn = throtl_pd_alloc,
f9fcc2d3 1723 .pd_init_fn = throtl_pd_init,
693e751e 1724 .pd_online_fn = throtl_pd_online,
cd5ab1b0 1725 .pd_offline_fn = throtl_pd_offline,
001bea73 1726 .pd_free_fn = throtl_pd_free,
e43473b7
VG
1727};
1728
3f0abd80
SL
1729static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1730{
1731 unsigned long rtime = jiffies, wtime = jiffies;
1732
1733 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1734 rtime = tg->last_low_overflow_time[READ];
1735 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1736 wtime = tg->last_low_overflow_time[WRITE];
1737 return min(rtime, wtime);
1738}
1739
1740/* tg should not be an intermediate node */
1741static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1742{
1743 struct throtl_service_queue *parent_sq;
1744 struct throtl_grp *parent = tg;
1745 unsigned long ret = __tg_last_low_overflow_time(tg);
1746
1747 while (true) {
1748 parent_sq = parent->service_queue.parent_sq;
1749 parent = sq_to_tg(parent_sq);
1750 if (!parent)
1751 break;
1752
1753 /*
1754 * The parent doesn't have low limit, it always reaches low
1755 * limit. Its overflow time is useless for children
1756 */
1757 if (!parent->bps[READ][LIMIT_LOW] &&
1758 !parent->iops[READ][LIMIT_LOW] &&
1759 !parent->bps[WRITE][LIMIT_LOW] &&
1760 !parent->iops[WRITE][LIMIT_LOW])
1761 continue;
1762 if (time_after(__tg_last_low_overflow_time(parent), ret))
1763 ret = __tg_last_low_overflow_time(parent);
1764 }
1765 return ret;
1766}
1767
9e234eea
SL
1768static bool throtl_tg_is_idle(struct throtl_grp *tg)
1769{
1770 /*
1771 * cgroup is idle if:
1772 * - single idle is too long, longer than a fixed value (in case user
b4f428ef 1773 * configure a too big threshold) or 4 times of idletime threshold
9e234eea 1774 * - average think time is more than threshold
53696b8d 1775 * - IO latency is largely below threshold
9e234eea 1776 */
b4f428ef 1777 unsigned long time;
4cff729f 1778 bool ret;
9e234eea 1779
b4f428ef
SL
1780 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1781 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1782 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1783 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1784 tg->avg_idletime > tg->idletime_threshold ||
1785 (tg->latency_target && tg->bio_cnt &&
53696b8d 1786 tg->bad_bio_cnt * 5 < tg->bio_cnt);
4cff729f
SL
1787 throtl_log(&tg->service_queue,
1788 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1789 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1790 tg->bio_cnt, ret, tg->td->scale);
1791 return ret;
9e234eea
SL
1792}
1793
c79892c5
SL
1794static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1795{
1796 struct throtl_service_queue *sq = &tg->service_queue;
1797 bool read_limit, write_limit;
1798
1799 /*
1800 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1801 * reaches), it's ok to upgrade to next limit
1802 */
1803 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1804 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1805 if (!read_limit && !write_limit)
1806 return true;
1807 if (read_limit && sq->nr_queued[READ] &&
1808 (!write_limit || sq->nr_queued[WRITE]))
1809 return true;
1810 if (write_limit && sq->nr_queued[WRITE] &&
1811 (!read_limit || sq->nr_queued[READ]))
1812 return true;
aec24246
SL
1813
1814 if (time_after_eq(jiffies,
fa6fb5aa
SL
1815 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1816 throtl_tg_is_idle(tg))
aec24246 1817 return true;
c79892c5
SL
1818 return false;
1819}
1820
1821static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1822{
1823 while (true) {
1824 if (throtl_tg_can_upgrade(tg))
1825 return true;
1826 tg = sq_to_tg(tg->service_queue.parent_sq);
1827 if (!tg || !tg_to_blkg(tg)->parent)
1828 return false;
1829 }
1830 return false;
1831}
1832
1833static bool throtl_can_upgrade(struct throtl_data *td,
1834 struct throtl_grp *this_tg)
1835{
1836 struct cgroup_subsys_state *pos_css;
1837 struct blkcg_gq *blkg;
1838
1839 if (td->limit_index != LIMIT_LOW)
1840 return false;
1841
297e3d85 1842 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
3f0abd80
SL
1843 return false;
1844
c79892c5
SL
1845 rcu_read_lock();
1846 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1847 struct throtl_grp *tg = blkg_to_tg(blkg);
1848
1849 if (tg == this_tg)
1850 continue;
1851 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1852 continue;
1853 if (!throtl_hierarchy_can_upgrade(tg)) {
1854 rcu_read_unlock();
1855 return false;
1856 }
1857 }
1858 rcu_read_unlock();
1859 return true;
1860}
1861
fa6fb5aa
SL
1862static void throtl_upgrade_check(struct throtl_grp *tg)
1863{
1864 unsigned long now = jiffies;
1865
1866 if (tg->td->limit_index != LIMIT_LOW)
1867 return;
1868
1869 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1870 return;
1871
1872 tg->last_check_time = now;
1873
1874 if (!time_after_eq(now,
1875 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1876 return;
1877
1878 if (throtl_can_upgrade(tg->td, NULL))
1879 throtl_upgrade_state(tg->td);
1880}
1881
c79892c5
SL
1882static void throtl_upgrade_state(struct throtl_data *td)
1883{
1884 struct cgroup_subsys_state *pos_css;
1885 struct blkcg_gq *blkg;
1886
4cff729f 1887 throtl_log(&td->service_queue, "upgrade to max");
c79892c5 1888 td->limit_index = LIMIT_MAX;
3f0abd80 1889 td->low_upgrade_time = jiffies;
7394e31f 1890 td->scale = 0;
c79892c5
SL
1891 rcu_read_lock();
1892 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1893 struct throtl_grp *tg = blkg_to_tg(blkg);
1894 struct throtl_service_queue *sq = &tg->service_queue;
1895
1896 tg->disptime = jiffies - 1;
1897 throtl_select_dispatch(sq);
1898 throtl_schedule_next_dispatch(sq, false);
1899 }
1900 rcu_read_unlock();
1901 throtl_select_dispatch(&td->service_queue);
1902 throtl_schedule_next_dispatch(&td->service_queue, false);
1903 queue_work(kthrotld_workqueue, &td->dispatch_work);
1904}
1905
3f0abd80
SL
1906static void throtl_downgrade_state(struct throtl_data *td, int new)
1907{
7394e31f
SL
1908 td->scale /= 2;
1909
4cff729f 1910 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
7394e31f
SL
1911 if (td->scale) {
1912 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1913 return;
1914 }
1915
3f0abd80
SL
1916 td->limit_index = new;
1917 td->low_downgrade_time = jiffies;
1918}
1919
1920static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1921{
1922 struct throtl_data *td = tg->td;
1923 unsigned long now = jiffies;
1924
1925 /*
1926 * If cgroup is below low limit, consider downgrade and throttle other
1927 * cgroups
1928 */
297e3d85
SL
1929 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1930 time_after_eq(now, tg_last_low_overflow_time(tg) +
fa6fb5aa
SL
1931 td->throtl_slice) &&
1932 (!throtl_tg_is_idle(tg) ||
1933 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
3f0abd80
SL
1934 return true;
1935 return false;
1936}
1937
1938static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1939{
1940 while (true) {
1941 if (!throtl_tg_can_downgrade(tg))
1942 return false;
1943 tg = sq_to_tg(tg->service_queue.parent_sq);
1944 if (!tg || !tg_to_blkg(tg)->parent)
1945 break;
1946 }
1947 return true;
1948}
1949
1950static void throtl_downgrade_check(struct throtl_grp *tg)
1951{
1952 uint64_t bps;
1953 unsigned int iops;
1954 unsigned long elapsed_time;
1955 unsigned long now = jiffies;
1956
1957 if (tg->td->limit_index != LIMIT_MAX ||
1958 !tg->td->limit_valid[LIMIT_LOW])
1959 return;
1960 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1961 return;
297e3d85 1962 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
3f0abd80
SL
1963 return;
1964
1965 elapsed_time = now - tg->last_check_time;
1966 tg->last_check_time = now;
1967
297e3d85
SL
1968 if (time_before(now, tg_last_low_overflow_time(tg) +
1969 tg->td->throtl_slice))
3f0abd80
SL
1970 return;
1971
1972 if (tg->bps[READ][LIMIT_LOW]) {
1973 bps = tg->last_bytes_disp[READ] * HZ;
1974 do_div(bps, elapsed_time);
1975 if (bps >= tg->bps[READ][LIMIT_LOW])
1976 tg->last_low_overflow_time[READ] = now;
1977 }
1978
1979 if (tg->bps[WRITE][LIMIT_LOW]) {
1980 bps = tg->last_bytes_disp[WRITE] * HZ;
1981 do_div(bps, elapsed_time);
1982 if (bps >= tg->bps[WRITE][LIMIT_LOW])
1983 tg->last_low_overflow_time[WRITE] = now;
1984 }
1985
1986 if (tg->iops[READ][LIMIT_LOW]) {
1987 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1988 if (iops >= tg->iops[READ][LIMIT_LOW])
1989 tg->last_low_overflow_time[READ] = now;
1990 }
1991
1992 if (tg->iops[WRITE][LIMIT_LOW]) {
1993 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
1994 if (iops >= tg->iops[WRITE][LIMIT_LOW])
1995 tg->last_low_overflow_time[WRITE] = now;
1996 }
1997
1998 /*
1999 * If cgroup is below low limit, consider downgrade and throttle other
2000 * cgroups
2001 */
2002 if (throtl_hierarchy_can_downgrade(tg))
2003 throtl_downgrade_state(tg->td, LIMIT_LOW);
2004
2005 tg->last_bytes_disp[READ] = 0;
2006 tg->last_bytes_disp[WRITE] = 0;
2007 tg->last_io_disp[READ] = 0;
2008 tg->last_io_disp[WRITE] = 0;
2009}
2010
9e234eea
SL
2011static void blk_throtl_update_idletime(struct throtl_grp *tg)
2012{
2013 unsigned long now = ktime_get_ns() >> 10;
2014 unsigned long last_finish_time = tg->last_finish_time;
2015
2016 if (now <= last_finish_time || last_finish_time == 0 ||
2017 last_finish_time == tg->checked_last_finish_time)
2018 return;
2019
2020 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2021 tg->checked_last_finish_time = last_finish_time;
2022}
2023
b9147dd1
SL
2024#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2025static void throtl_update_latency_buckets(struct throtl_data *td)
2026{
2027 struct avg_latency_bucket avg_latency[LATENCY_BUCKET_SIZE];
2028 int i, cpu;
2029 unsigned long last_latency = 0;
2030 unsigned long latency;
2031
2032 if (!blk_queue_nonrot(td->queue))
2033 return;
2034 if (time_before(jiffies, td->last_calculate_time + HZ))
2035 return;
2036 td->last_calculate_time = jiffies;
2037
2038 memset(avg_latency, 0, sizeof(avg_latency));
2039 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2040 struct latency_bucket *tmp = &td->tmp_buckets[i];
2041
2042 for_each_possible_cpu(cpu) {
2043 struct latency_bucket *bucket;
2044
2045 /* this isn't race free, but ok in practice */
2046 bucket = per_cpu_ptr(td->latency_buckets, cpu);
2047 tmp->total_latency += bucket[i].total_latency;
2048 tmp->samples += bucket[i].samples;
2049 bucket[i].total_latency = 0;
2050 bucket[i].samples = 0;
2051 }
2052
2053 if (tmp->samples >= 32) {
2054 int samples = tmp->samples;
2055
2056 latency = tmp->total_latency;
2057
2058 tmp->total_latency = 0;
2059 tmp->samples = 0;
2060 latency /= samples;
2061 if (latency == 0)
2062 continue;
2063 avg_latency[i].latency = latency;
2064 }
2065 }
2066
2067 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2068 if (!avg_latency[i].latency) {
2069 if (td->avg_buckets[i].latency < last_latency)
2070 td->avg_buckets[i].latency = last_latency;
2071 continue;
2072 }
2073
2074 if (!td->avg_buckets[i].valid)
2075 latency = avg_latency[i].latency;
2076 else
2077 latency = (td->avg_buckets[i].latency * 7 +
2078 avg_latency[i].latency) >> 3;
2079
2080 td->avg_buckets[i].latency = max(latency, last_latency);
2081 td->avg_buckets[i].valid = true;
2082 last_latency = td->avg_buckets[i].latency;
2083 }
4cff729f
SL
2084
2085 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2086 throtl_log(&td->service_queue,
2087 "Latency bucket %d: latency=%ld, valid=%d", i,
2088 td->avg_buckets[i].latency, td->avg_buckets[i].valid);
b9147dd1
SL
2089}
2090#else
2091static inline void throtl_update_latency_buckets(struct throtl_data *td)
2092{
2093}
2094#endif
2095
2bc19cd5
JA
2096static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
2097{
2098#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2099 int ret;
2100
2101 ret = bio_associate_current(bio);
2102 if (ret == 0 || ret == -EBUSY)
2103 bio->bi_cg_private = tg;
2104 blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio));
2105#else
2106 bio_associate_current(bio);
2107#endif
2108}
2109
ae118896
TH
2110bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2111 struct bio *bio)
e43473b7 2112{
c5cc2070 2113 struct throtl_qnode *qn = NULL;
ae118896 2114 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
73f0d49a 2115 struct throtl_service_queue *sq;
0e9f4164 2116 bool rw = bio_data_dir(bio);
bc16a4f9 2117 bool throttled = false;
b9147dd1 2118 struct throtl_data *td = tg->td;
e43473b7 2119
ae118896
TH
2120 WARN_ON_ONCE(!rcu_read_lock_held());
2121
2a0f61e6 2122 /* see throtl_charge_bio() */
8d2bbd4c 2123 if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
bc16a4f9 2124 goto out;
e43473b7
VG
2125
2126 spin_lock_irq(q->queue_lock);
c9589f03 2127
b9147dd1
SL
2128 throtl_update_latency_buckets(td);
2129
c9589f03 2130 if (unlikely(blk_queue_bypass(q)))
bc16a4f9 2131 goto out_unlock;
f469a7b4 2132
2bc19cd5 2133 blk_throtl_assoc_bio(tg, bio);
9e234eea
SL
2134 blk_throtl_update_idletime(tg);
2135
73f0d49a
TH
2136 sq = &tg->service_queue;
2137
c79892c5 2138again:
9e660acf 2139 while (true) {
3f0abd80
SL
2140 if (tg->last_low_overflow_time[rw] == 0)
2141 tg->last_low_overflow_time[rw] = jiffies;
2142 throtl_downgrade_check(tg);
fa6fb5aa 2143 throtl_upgrade_check(tg);
9e660acf
TH
2144 /* throtl is FIFO - if bios are already queued, should queue */
2145 if (sq->nr_queued[rw])
2146 break;
de701c74 2147
9e660acf 2148 /* if above limits, break to queue */
c79892c5 2149 if (!tg_may_dispatch(tg, bio, NULL)) {
3f0abd80 2150 tg->last_low_overflow_time[rw] = jiffies;
b9147dd1
SL
2151 if (throtl_can_upgrade(td, tg)) {
2152 throtl_upgrade_state(td);
c79892c5
SL
2153 goto again;
2154 }
9e660acf 2155 break;
c79892c5 2156 }
9e660acf
TH
2157
2158 /* within limits, let's charge and dispatch directly */
e43473b7 2159 throtl_charge_bio(tg, bio);
04521db0
VG
2160
2161 /*
2162 * We need to trim slice even when bios are not being queued
2163 * otherwise it might happen that a bio is not queued for
2164 * a long time and slice keeps on extending and trim is not
2165 * called for a long time. Now if limits are reduced suddenly
2166 * we take into account all the IO dispatched so far at new
2167 * low rate and * newly queued IO gets a really long dispatch
2168 * time.
2169 *
2170 * So keep on trimming slice even if bio is not queued.
2171 */
0f3457f6 2172 throtl_trim_slice(tg, rw);
9e660acf
TH
2173
2174 /*
2175 * @bio passed through this layer without being throttled.
2176 * Climb up the ladder. If we''re already at the top, it
2177 * can be executed directly.
2178 */
c5cc2070 2179 qn = &tg->qnode_on_parent[rw];
9e660acf
TH
2180 sq = sq->parent_sq;
2181 tg = sq_to_tg(sq);
2182 if (!tg)
2183 goto out_unlock;
e43473b7
VG
2184 }
2185
9e660acf 2186 /* out-of-limit, queue to @tg */
fda6f272
TH
2187 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2188 rw == READ ? 'R' : 'W',
9f626e37
SL
2189 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2190 tg_bps_limit(tg, rw),
2191 tg->io_disp[rw], tg_iops_limit(tg, rw),
fda6f272 2192 sq->nr_queued[READ], sq->nr_queued[WRITE]);
e43473b7 2193
3f0abd80
SL
2194 tg->last_low_overflow_time[rw] = jiffies;
2195
b9147dd1 2196 td->nr_queued[rw]++;
c5cc2070 2197 throtl_add_bio_tg(bio, qn, tg);
bc16a4f9 2198 throttled = true;
e43473b7 2199
7f52f98c
TH
2200 /*
2201 * Update @tg's dispatch time and force schedule dispatch if @tg
2202 * was empty before @bio. The forced scheduling isn't likely to
2203 * cause undue delay as @bio is likely to be dispatched directly if
2204 * its @tg's disptime is not in the future.
2205 */
0e9f4164 2206 if (tg->flags & THROTL_TG_WAS_EMPTY) {
77216b04 2207 tg_update_disptime(tg);
7f52f98c 2208 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
e43473b7
VG
2209 }
2210
bc16a4f9 2211out_unlock:
e43473b7 2212 spin_unlock_irq(q->queue_lock);
bc16a4f9 2213out:
2a0f61e6
TH
2214 /*
2215 * As multiple blk-throtls may stack in the same issue path, we
2216 * don't want bios to leave with the flag set. Clear the flag if
2217 * being issued.
2218 */
2219 if (!throttled)
8d2bbd4c 2220 bio_clear_flag(bio, BIO_THROTTLED);
b9147dd1
SL
2221
2222#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2223 if (throttled || !td->track_bio_latency)
2224 bio->bi_issue_stat.stat |= SKIP_LATENCY;
2225#endif
bc16a4f9 2226 return throttled;
e43473b7
VG
2227}
2228
9e234eea 2229#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
b9147dd1
SL
2230static void throtl_track_latency(struct throtl_data *td, sector_t size,
2231 int op, unsigned long time)
2232{
2233 struct latency_bucket *latency;
2234 int index;
2235
2236 if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ ||
2237 !blk_queue_nonrot(td->queue))
2238 return;
2239
2240 index = request_bucket_index(size);
2241
2242 latency = get_cpu_ptr(td->latency_buckets);
2243 latency[index].total_latency += time;
2244 latency[index].samples++;
2245 put_cpu_ptr(td->latency_buckets);
2246}
2247
2248void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2249{
2250 struct request_queue *q = rq->q;
2251 struct throtl_data *td = q->td;
2252
2253 throtl_track_latency(td, blk_stat_size(&rq->issue_stat),
2254 req_op(rq), time_ns >> 10);
2255}
2256
9e234eea
SL
2257void blk_throtl_bio_endio(struct bio *bio)
2258{
2259 struct throtl_grp *tg;
b9147dd1
SL
2260 u64 finish_time_ns;
2261 unsigned long finish_time;
2262 unsigned long start_time;
2263 unsigned long lat;
9e234eea
SL
2264
2265 tg = bio->bi_cg_private;
2266 if (!tg)
2267 return;
2268 bio->bi_cg_private = NULL;
2269
b9147dd1
SL
2270 finish_time_ns = ktime_get_ns();
2271 tg->last_finish_time = finish_time_ns >> 10;
2272
2273 start_time = blk_stat_time(&bio->bi_issue_stat) >> 10;
2274 finish_time = __blk_stat_time(finish_time_ns) >> 10;
53696b8d
SL
2275 if (!start_time || finish_time <= start_time)
2276 return;
2277
2278 lat = finish_time - start_time;
b9147dd1 2279 /* this is only for bio based driver */
53696b8d 2280 if (!(bio->bi_issue_stat.stat & SKIP_LATENCY))
b9147dd1
SL
2281 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
2282 bio_op(bio), lat);
53696b8d
SL
2283
2284 if (tg->latency_target) {
2285 int bucket;
2286 unsigned int threshold;
2287
2288 bucket = request_bucket_index(
2289 blk_stat_size(&bio->bi_issue_stat));
2290 threshold = tg->td->avg_buckets[bucket].latency +
2291 tg->latency_target;
2292 if (lat > threshold)
2293 tg->bad_bio_cnt++;
2294 /*
2295 * Not race free, could get wrong count, which means cgroups
2296 * will be throttled
2297 */
2298 tg->bio_cnt++;
2299 }
2300
2301 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2302 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2303 tg->bio_cnt /= 2;
2304 tg->bad_bio_cnt /= 2;
b9147dd1 2305 }
9e234eea
SL
2306}
2307#endif
2308
2a12f0dc
TH
2309/*
2310 * Dispatch all bios from all children tg's queued on @parent_sq. On
2311 * return, @parent_sq is guaranteed to not have any active children tg's
2312 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2313 */
2314static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2315{
2316 struct throtl_grp *tg;
2317
2318 while ((tg = throtl_rb_first(parent_sq))) {
2319 struct throtl_service_queue *sq = &tg->service_queue;
2320 struct bio *bio;
2321
2322 throtl_dequeue_tg(tg);
2323
c5cc2070 2324 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2a12f0dc 2325 tg_dispatch_one_bio(tg, bio_data_dir(bio));
c5cc2070 2326 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2a12f0dc
TH
2327 tg_dispatch_one_bio(tg, bio_data_dir(bio));
2328 }
2329}
2330
c9a929dd
TH
2331/**
2332 * blk_throtl_drain - drain throttled bios
2333 * @q: request_queue to drain throttled bios for
2334 *
2335 * Dispatch all currently throttled bios on @q through ->make_request_fn().
2336 */
2337void blk_throtl_drain(struct request_queue *q)
2338 __releases(q->queue_lock) __acquires(q->queue_lock)
2339{
2340 struct throtl_data *td = q->td;
2a12f0dc 2341 struct blkcg_gq *blkg;
492eb21b 2342 struct cgroup_subsys_state *pos_css;
c9a929dd 2343 struct bio *bio;
651930bc 2344 int rw;
c9a929dd 2345
8bcb6c7d 2346 queue_lockdep_assert_held(q);
2a12f0dc 2347 rcu_read_lock();
c9a929dd 2348
2a12f0dc
TH
2349 /*
2350 * Drain each tg while doing post-order walk on the blkg tree, so
2351 * that all bios are propagated to td->service_queue. It'd be
2352 * better to walk service_queue tree directly but blkg walk is
2353 * easier.
2354 */
492eb21b 2355 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2a12f0dc 2356 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
73f0d49a 2357
2a12f0dc
TH
2358 /* finally, transfer bios from top-level tg's into the td */
2359 tg_drain_bios(&td->service_queue);
2360
2361 rcu_read_unlock();
c9a929dd
TH
2362 spin_unlock_irq(q->queue_lock);
2363
2a12f0dc 2364 /* all bios now should be in td->service_queue, issue them */
651930bc 2365 for (rw = READ; rw <= WRITE; rw++)
c5cc2070
TH
2366 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2367 NULL)))
651930bc 2368 generic_make_request(bio);
c9a929dd
TH
2369
2370 spin_lock_irq(q->queue_lock);
2371}
2372
e43473b7
VG
2373int blk_throtl_init(struct request_queue *q)
2374{
2375 struct throtl_data *td;
a2b1693b 2376 int ret;
e43473b7
VG
2377
2378 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2379 if (!td)
2380 return -ENOMEM;
b9147dd1
SL
2381 td->latency_buckets = __alloc_percpu(sizeof(struct latency_bucket) *
2382 LATENCY_BUCKET_SIZE, __alignof__(u64));
2383 if (!td->latency_buckets) {
2384 kfree(td);
2385 return -ENOMEM;
2386 }
e43473b7 2387
69df0ab0 2388 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
b2ce2643 2389 throtl_service_queue_init(&td->service_queue);
e43473b7 2390
cd1604fa 2391 q->td = td;
29b12589 2392 td->queue = q;
02977e4a 2393
9f626e37 2394 td->limit_valid[LIMIT_MAX] = true;
cd5ab1b0 2395 td->limit_index = LIMIT_MAX;
3f0abd80
SL
2396 td->low_upgrade_time = jiffies;
2397 td->low_downgrade_time = jiffies;
9e234eea 2398
a2b1693b 2399 /* activate policy */
3c798398 2400 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
b9147dd1
SL
2401 if (ret) {
2402 free_percpu(td->latency_buckets);
f51b802c 2403 kfree(td);
b9147dd1 2404 }
a2b1693b 2405 return ret;
e43473b7
VG
2406}
2407
2408void blk_throtl_exit(struct request_queue *q)
2409{
c875f4d0 2410 BUG_ON(!q->td);
da527770 2411 throtl_shutdown_wq(q);
3c798398 2412 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
b9147dd1 2413 free_percpu(q->td->latency_buckets);
c9a929dd 2414 kfree(q->td);
e43473b7
VG
2415}
2416
d61fcfa4
SL
2417void blk_throtl_register_queue(struct request_queue *q)
2418{
2419 struct throtl_data *td;
2420
2421 td = q->td;
2422 BUG_ON(!td);
2423
b4f428ef 2424 if (blk_queue_nonrot(q))
d61fcfa4 2425 td->throtl_slice = DFL_THROTL_SLICE_SSD;
b4f428ef 2426 else
d61fcfa4
SL
2427 td->throtl_slice = DFL_THROTL_SLICE_HD;
2428#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2429 /* if no low limit, use previous default */
2430 td->throtl_slice = DFL_THROTL_SLICE_HD;
2431#endif
9e234eea 2432
b9147dd1
SL
2433 td->track_bio_latency = !q->mq_ops && !q->request_fn;
2434 if (!td->track_bio_latency)
2435 blk_stat_enable_accounting(q);
d61fcfa4
SL
2436}
2437
297e3d85
SL
2438#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2439ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2440{
2441 if (!q->td)
2442 return -EINVAL;
2443 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2444}
2445
2446ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2447 const char *page, size_t count)
2448{
2449 unsigned long v;
2450 unsigned long t;
2451
2452 if (!q->td)
2453 return -EINVAL;
2454 if (kstrtoul(page, 10, &v))
2455 return -EINVAL;
2456 t = msecs_to_jiffies(v);
2457 if (t == 0 || t > MAX_THROTL_SLICE)
2458 return -EINVAL;
2459 q->td->throtl_slice = t;
2460 return count;
2461}
2462#endif
2463
e43473b7
VG
2464static int __init throtl_init(void)
2465{
450adcbe
VG
2466 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2467 if (!kthrotld_workqueue)
2468 panic("Failed to create kthrotld\n");
2469
3c798398 2470 return blkcg_policy_register(&blkcg_policy_throtl);
e43473b7
VG
2471}
2472
2473module_init(throtl_init);