]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - block/blk-cgroup.c
auxdisplay: make PANEL a menuconfig
[mirror_ubuntu-jammy-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
e48453c3
AA
12 *
13 * For policy-specific per-blkcg data:
14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
15 * Arianna Avanzini <avanzini.arianna@gmail.com>
31e4c28d
VG
16 */
17#include <linux/ioprio.h>
22084190 18#include <linux/kdev_t.h>
9d6a986c 19#include <linux/module.h>
174cd4b1 20#include <linux/sched/signal.h>
accee785 21#include <linux/err.h>
9195291e 22#include <linux/blkdev.h>
52ebea74 23#include <linux/backing-dev.h>
5a0e3ad6 24#include <linux/slab.h>
34d0f179 25#include <linux/genhd.h>
72e06c25 26#include <linux/delay.h>
9a9e8a26 27#include <linux/atomic.h>
36aa9e5f 28#include <linux/ctype.h>
eea8f41c 29#include <linux/blk-cgroup.h>
5efd6113 30#include "blk.h"
3e252066 31
84c124da
DS
32#define MAX_KEY_LEN 100
33
838f13bf
TH
34/*
35 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
36 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
37 * policy [un]register operations including cgroup file additions /
38 * removals. Putting cgroup file registration outside blkcg_pol_mutex
39 * allows grabbing it from cgroup callbacks.
40 */
41static DEFINE_MUTEX(blkcg_pol_register_mutex);
bc0d6501 42static DEFINE_MUTEX(blkcg_pol_mutex);
923adde1 43
e48453c3 44struct blkcg blkcg_root;
3c798398 45EXPORT_SYMBOL_GPL(blkcg_root);
9d6a986c 46
496d5e75
TH
47struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
48
3c798398 49static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
035d10b2 50
7876f930
TH
51static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
52
a2b1693b 53static bool blkcg_policy_enabled(struct request_queue *q,
3c798398 54 const struct blkcg_policy *pol)
a2b1693b
TH
55{
56 return pol && test_bit(pol->plid, q->blkcg_pols);
57}
58
0381411e
TH
59/**
60 * blkg_free - free a blkg
61 * @blkg: blkg to free
62 *
63 * Free @blkg which may be partially allocated.
64 */
3c798398 65static void blkg_free(struct blkcg_gq *blkg)
0381411e 66{
e8989fae 67 int i;
549d3aa8
TH
68
69 if (!blkg)
70 return;
71
db613670 72 for (i = 0; i < BLKCG_MAX_POLS; i++)
001bea73
TH
73 if (blkg->pd[i])
74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
e8989fae 75
994b7832 76 if (blkg->blkcg != &blkcg_root)
b425e504 77 blk_exit_rl(blkg->q, &blkg->rl);
77ea7338
TH
78
79 blkg_rwstat_exit(&blkg->stat_ios);
80 blkg_rwstat_exit(&blkg->stat_bytes);
549d3aa8 81 kfree(blkg);
0381411e
TH
82}
83
84/**
85 * blkg_alloc - allocate a blkg
86 * @blkcg: block cgroup the new blkg is associated with
87 * @q: request_queue the new blkg is associated with
15974993 88 * @gfp_mask: allocation mask to use
0381411e 89 *
e8989fae 90 * Allocate a new blkg assocating @blkcg and @q.
0381411e 91 */
15974993
TH
92static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
93 gfp_t gfp_mask)
0381411e 94{
3c798398 95 struct blkcg_gq *blkg;
e8989fae 96 int i;
0381411e
TH
97
98 /* alloc and init base part */
15974993 99 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
0381411e
TH
100 if (!blkg)
101 return NULL;
102
77ea7338
TH
103 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
104 blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
105 goto err_free;
106
c875f4d0 107 blkg->q = q;
e8989fae 108 INIT_LIST_HEAD(&blkg->q_node);
0381411e 109 blkg->blkcg = blkcg;
a5049a8a 110 atomic_set(&blkg->refcnt, 1);
0381411e 111
a051661c
TH
112 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
113 if (blkcg != &blkcg_root) {
114 if (blk_init_rl(&blkg->rl, q, gfp_mask))
115 goto err_free;
116 blkg->rl.blkg = blkg;
117 }
118
8bd435b3 119 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 120 struct blkcg_policy *pol = blkcg_policy[i];
e8989fae 121 struct blkg_policy_data *pd;
0381411e 122
a2b1693b 123 if (!blkcg_policy_enabled(q, pol))
e8989fae
TH
124 continue;
125
126 /* alloc per-policy data and attach it to blkg */
001bea73 127 pd = pol->pd_alloc_fn(gfp_mask, q->node);
a051661c
TH
128 if (!pd)
129 goto err_free;
549d3aa8 130
e8989fae
TH
131 blkg->pd[i] = pd;
132 pd->blkg = blkg;
b276a876 133 pd->plid = i;
e8989fae
TH
134 }
135
0381411e 136 return blkg;
a051661c
TH
137
138err_free:
139 blkg_free(blkg);
140 return NULL;
0381411e
TH
141}
142
24f29046
TH
143struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
144 struct request_queue *q, bool update_hint)
80fd9979 145{
3c798398 146 struct blkcg_gq *blkg;
80fd9979 147
a637120e 148 /*
86cde6b6
TH
149 * Hint didn't match. Look up from the radix tree. Note that the
150 * hint can only be updated under queue_lock as otherwise @blkg
151 * could have already been removed from blkg_tree. The caller is
152 * responsible for grabbing queue_lock if @update_hint.
a637120e
TH
153 */
154 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
86cde6b6
TH
155 if (blkg && blkg->q == q) {
156 if (update_hint) {
157 lockdep_assert_held(q->queue_lock);
158 rcu_assign_pointer(blkcg->blkg_hint, blkg);
159 }
a637120e 160 return blkg;
86cde6b6 161 }
a637120e 162
80fd9979
TH
163 return NULL;
164}
ae118896 165EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
80fd9979 166
15974993 167/*
d708f0d5
JA
168 * If @new_blkg is %NULL, this function tries to allocate a new one as
169 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
15974993 170 */
86cde6b6 171static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
d708f0d5
JA
172 struct request_queue *q,
173 struct blkcg_gq *new_blkg)
5624a4e4 174{
d708f0d5 175 struct blkcg_gq *blkg;
ce7acfea 176 struct bdi_writeback_congested *wb_congested;
f427d909 177 int i, ret;
5624a4e4 178
cd1604fa
TH
179 WARN_ON_ONCE(!rcu_read_lock_held());
180 lockdep_assert_held(q->queue_lock);
181
7ee9c562 182 /* blkg holds a reference to blkcg */
ec903c0c 183 if (!css_tryget_online(&blkcg->css)) {
20386ce0 184 ret = -ENODEV;
93e6d5d8 185 goto err_free_blkg;
15974993 186 }
cd1604fa 187
dc3b17cc 188 wb_congested = wb_congested_get_create(q->backing_dev_info,
d708f0d5
JA
189 blkcg->css.id,
190 GFP_NOWAIT | __GFP_NOWARN);
191 if (!wb_congested) {
ce7acfea 192 ret = -ENOMEM;
d708f0d5 193 goto err_put_css;
ce7acfea
TH
194 }
195
d708f0d5
JA
196 /* allocate */
197 if (!new_blkg) {
198 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
199 if (unlikely(!new_blkg)) {
200 ret = -ENOMEM;
201 goto err_put_congested;
15974993
TH
202 }
203 }
d708f0d5
JA
204 blkg = new_blkg;
205 blkg->wb_congested = wb_congested;
cd1604fa 206
db613670 207 /* link parent */
3c547865
TH
208 if (blkcg_parent(blkcg)) {
209 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
210 if (WARN_ON_ONCE(!blkg->parent)) {
20386ce0 211 ret = -ENODEV;
d708f0d5 212 goto err_put_congested;
3c547865
TH
213 }
214 blkg_get(blkg->parent);
215 }
216
db613670
TH
217 /* invoke per-policy init */
218 for (i = 0; i < BLKCG_MAX_POLS; i++) {
219 struct blkcg_policy *pol = blkcg_policy[i];
220
221 if (blkg->pd[i] && pol->pd_init_fn)
a9520cd6 222 pol->pd_init_fn(blkg->pd[i]);
db613670
TH
223 }
224
225 /* insert */
cd1604fa 226 spin_lock(&blkcg->lock);
a637120e
TH
227 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
228 if (likely(!ret)) {
229 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
230 list_add(&blkg->q_node, &q->blkg_list);
f427d909
TH
231
232 for (i = 0; i < BLKCG_MAX_POLS; i++) {
233 struct blkcg_policy *pol = blkcg_policy[i];
234
235 if (blkg->pd[i] && pol->pd_online_fn)
a9520cd6 236 pol->pd_online_fn(blkg->pd[i]);
f427d909 237 }
a637120e 238 }
f427d909 239 blkg->online = true;
cd1604fa 240 spin_unlock(&blkcg->lock);
496fb780 241
ec13b1d6 242 if (!ret)
a637120e 243 return blkg;
15974993 244
3c547865
TH
245 /* @blkg failed fully initialized, use the usual release path */
246 blkg_put(blkg);
247 return ERR_PTR(ret);
248
d708f0d5
JA
249err_put_congested:
250 wb_congested_put(wb_congested);
251err_put_css:
496fb780 252 css_put(&blkcg->css);
93e6d5d8 253err_free_blkg:
d708f0d5 254 blkg_free(new_blkg);
93e6d5d8 255 return ERR_PTR(ret);
31e4c28d 256}
3c96cb32 257
86cde6b6 258/**
d708f0d5 259 * blkg_lookup_create - lookup blkg, try to create one if not there
86cde6b6
TH
260 * @blkcg: blkcg of interest
261 * @q: request_queue of interest
262 *
263 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
3c547865
TH
264 * create one. blkg creation is performed recursively from blkcg_root such
265 * that all non-root blkg's have access to the parent blkg. This function
266 * should be called under RCU read lock and @q->queue_lock.
86cde6b6
TH
267 *
268 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
269 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
270 * dead and bypassing, returns ERR_PTR(-EBUSY).
271 */
d708f0d5
JA
272struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
273 struct request_queue *q)
3c96cb32 274{
86cde6b6
TH
275 struct blkcg_gq *blkg;
276
277 WARN_ON_ONCE(!rcu_read_lock_held());
278 lockdep_assert_held(q->queue_lock);
279
d708f0d5
JA
280 /*
281 * This could be the first entry point of blkcg implementation and
282 * we shouldn't allow anything to go through for a bypassing queue.
283 */
284 if (unlikely(blk_queue_bypass(q)))
285 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
286
86cde6b6
TH
287 blkg = __blkg_lookup(blkcg, q, true);
288 if (blkg)
289 return blkg;
290
3c547865
TH
291 /*
292 * Create blkgs walking down from blkcg_root to @blkcg, so that all
293 * non-root blkgs have access to their parents.
294 */
295 while (true) {
296 struct blkcg *pos = blkcg;
297 struct blkcg *parent = blkcg_parent(blkcg);
298
299 while (parent && !__blkg_lookup(parent, q, false)) {
300 pos = parent;
301 parent = blkcg_parent(parent);
302 }
303
d708f0d5 304 blkg = blkg_create(pos, q, NULL);
3c547865
TH
305 if (pos == blkcg || IS_ERR(blkg))
306 return blkg;
307 }
3c96cb32 308}
31e4c28d 309
3c798398 310static void blkg_destroy(struct blkcg_gq *blkg)
03aa264a 311{
3c798398 312 struct blkcg *blkcg = blkg->blkcg;
77ea7338 313 struct blkcg_gq *parent = blkg->parent;
f427d909 314 int i;
03aa264a 315
27e1f9d1 316 lockdep_assert_held(blkg->q->queue_lock);
9f13ef67 317 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
318
319 /* Something wrong if we are trying to remove same group twice */
e8989fae 320 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 321 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
a637120e 322
f427d909
TH
323 for (i = 0; i < BLKCG_MAX_POLS; i++) {
324 struct blkcg_policy *pol = blkcg_policy[i];
325
326 if (blkg->pd[i] && pol->pd_offline_fn)
a9520cd6 327 pol->pd_offline_fn(blkg->pd[i]);
f427d909 328 }
77ea7338
TH
329
330 if (parent) {
331 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
332 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
333 }
334
f427d909
TH
335 blkg->online = false;
336
a637120e 337 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
e8989fae 338 list_del_init(&blkg->q_node);
9f13ef67 339 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 340
a637120e
TH
341 /*
342 * Both setting lookup hint to and clearing it from @blkg are done
343 * under queue_lock. If it's not pointing to @blkg now, it never
344 * will. Hint assignment itself can race safely.
345 */
ec6c676a 346 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
a637120e
TH
347 rcu_assign_pointer(blkcg->blkg_hint, NULL);
348
03aa264a
TH
349 /*
350 * Put the reference taken at the time of creation so that when all
351 * queues are gone, group can be destroyed.
352 */
353 blkg_put(blkg);
354}
355
9f13ef67
TH
356/**
357 * blkg_destroy_all - destroy all blkgs associated with a request_queue
358 * @q: request_queue of interest
9f13ef67 359 *
3c96cb32 360 * Destroy all blkgs associated with @q.
9f13ef67 361 */
3c96cb32 362static void blkg_destroy_all(struct request_queue *q)
72e06c25 363{
3c798398 364 struct blkcg_gq *blkg, *n;
72e06c25 365
6d18b008 366 lockdep_assert_held(q->queue_lock);
72e06c25 367
9f13ef67 368 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
3c798398 369 struct blkcg *blkcg = blkg->blkcg;
72e06c25 370
9f13ef67
TH
371 spin_lock(&blkcg->lock);
372 blkg_destroy(blkg);
373 spin_unlock(&blkcg->lock);
72e06c25 374 }
6fe810bd
TH
375
376 q->root_blkg = NULL;
377 q->root_rl.blkg = NULL;
72e06c25
TH
378}
379
2a4fd070
TH
380/*
381 * A group is RCU protected, but having an rcu lock does not mean that one
382 * can access all the fields of blkg and assume these are valid. For
383 * example, don't try to follow throtl_data and request queue links.
384 *
385 * Having a reference to blkg under an rcu allows accesses to only values
386 * local to groups like group stats and group rate limits.
387 */
388void __blkg_release_rcu(struct rcu_head *rcu_head)
1adaf3dd 389{
2a4fd070 390 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
db613670 391
3c547865 392 /* release the blkcg and parent blkg refs this blkg has been holding */
1adaf3dd 393 css_put(&blkg->blkcg->css);
a5049a8a 394 if (blkg->parent)
3c547865 395 blkg_put(blkg->parent);
1adaf3dd 396
ce7acfea
TH
397 wb_congested_put(blkg->wb_congested);
398
2a4fd070 399 blkg_free(blkg);
1adaf3dd 400}
2a4fd070 401EXPORT_SYMBOL_GPL(__blkg_release_rcu);
1adaf3dd 402
a051661c
TH
403/*
404 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
405 * because the root blkg uses @q->root_rl instead of its own rl.
406 */
407struct request_list *__blk_queue_next_rl(struct request_list *rl,
408 struct request_queue *q)
409{
410 struct list_head *ent;
411 struct blkcg_gq *blkg;
412
413 /*
414 * Determine the current blkg list_head. The first entry is
415 * root_rl which is off @q->blkg_list and mapped to the head.
416 */
417 if (rl == &q->root_rl) {
418 ent = &q->blkg_list;
65c77fd9
JN
419 /* There are no more block groups, hence no request lists */
420 if (list_empty(ent))
421 return NULL;
a051661c
TH
422 } else {
423 blkg = container_of(rl, struct blkcg_gq, rl);
424 ent = &blkg->q_node;
425 }
426
427 /* walk to the next list_head, skip root blkcg */
428 ent = ent->next;
429 if (ent == &q->root_blkg->q_node)
430 ent = ent->next;
431 if (ent == &q->blkg_list)
432 return NULL;
433
434 blkg = container_of(ent, struct blkcg_gq, q_node);
435 return &blkg->rl;
436}
437
182446d0
TH
438static int blkcg_reset_stats(struct cgroup_subsys_state *css,
439 struct cftype *cftype, u64 val)
303a3acb 440{
182446d0 441 struct blkcg *blkcg = css_to_blkcg(css);
3c798398 442 struct blkcg_gq *blkg;
bc0d6501 443 int i;
303a3acb 444
838f13bf 445 mutex_lock(&blkcg_pol_mutex);
303a3acb 446 spin_lock_irq(&blkcg->lock);
997a026c
TH
447
448 /*
449 * Note that stat reset is racy - it doesn't synchronize against
450 * stat updates. This is a debug feature which shouldn't exist
451 * anyway. If you get hit by a race, retry.
452 */
b67bfe0d 453 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
77ea7338
TH
454 blkg_rwstat_reset(&blkg->stat_bytes);
455 blkg_rwstat_reset(&blkg->stat_ios);
456
8bd435b3 457 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 458 struct blkcg_policy *pol = blkcg_policy[i];
549d3aa8 459
a9520cd6
TH
460 if (blkg->pd[i] && pol->pd_reset_stats_fn)
461 pol->pd_reset_stats_fn(blkg->pd[i]);
bc0d6501 462 }
303a3acb 463 }
f0bdc8cd 464
303a3acb 465 spin_unlock_irq(&blkcg->lock);
bc0d6501 466 mutex_unlock(&blkcg_pol_mutex);
303a3acb
DS
467 return 0;
468}
469
dd165eb3 470const char *blkg_dev_name(struct blkcg_gq *blkg)
303a3acb 471{
d3d32e69 472 /* some drivers (floppy) instantiate a queue w/o disk registered */
dc3b17cc
JK
473 if (blkg->q->backing_dev_info->dev)
474 return dev_name(blkg->q->backing_dev_info->dev);
d3d32e69 475 return NULL;
303a3acb 476}
dd165eb3 477EXPORT_SYMBOL_GPL(blkg_dev_name);
303a3acb 478
d3d32e69
TH
479/**
480 * blkcg_print_blkgs - helper for printing per-blkg data
481 * @sf: seq_file to print to
482 * @blkcg: blkcg of interest
483 * @prfill: fill function to print out a blkg
484 * @pol: policy in question
485 * @data: data to be passed to @prfill
486 * @show_total: to print out sum of prfill return values or not
487 *
488 * This function invokes @prfill on each blkg of @blkcg if pd for the
489 * policy specified by @pol exists. @prfill is invoked with @sf, the
810ecfa7
TH
490 * policy data and @data and the matching queue lock held. If @show_total
491 * is %true, the sum of the return values from @prfill is printed with
492 * "Total" label at the end.
d3d32e69
TH
493 *
494 * This is to be used to construct print functions for
495 * cftype->read_seq_string method.
496 */
3c798398 497void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04af
TH
498 u64 (*prfill)(struct seq_file *,
499 struct blkg_policy_data *, int),
3c798398 500 const struct blkcg_policy *pol, int data,
ec399347 501 bool show_total)
5624a4e4 502{
3c798398 503 struct blkcg_gq *blkg;
d3d32e69 504 u64 total = 0;
5624a4e4 505
810ecfa7 506 rcu_read_lock();
ee89f812 507 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
810ecfa7 508 spin_lock_irq(blkg->q->queue_lock);
a2b1693b 509 if (blkcg_policy_enabled(blkg->q, pol))
f95a04af 510 total += prfill(sf, blkg->pd[pol->plid], data);
810ecfa7
TH
511 spin_unlock_irq(blkg->q->queue_lock);
512 }
513 rcu_read_unlock();
d3d32e69
TH
514
515 if (show_total)
516 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
517}
829fdb50 518EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
d3d32e69
TH
519
520/**
521 * __blkg_prfill_u64 - prfill helper for a single u64 value
522 * @sf: seq_file to print to
f95a04af 523 * @pd: policy private data of interest
d3d32e69
TH
524 * @v: value to print
525 *
f95a04af 526 * Print @v to @sf for the device assocaited with @pd.
d3d32e69 527 */
f95a04af 528u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
d3d32e69 529{
f95a04af 530 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
531
532 if (!dname)
533 return 0;
534
535 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
536 return v;
537}
829fdb50 538EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
d3d32e69
TH
539
540/**
541 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
542 * @sf: seq_file to print to
f95a04af 543 * @pd: policy private data of interest
d3d32e69
TH
544 * @rwstat: rwstat to print
545 *
f95a04af 546 * Print @rwstat to @sf for the device assocaited with @pd.
d3d32e69 547 */
f95a04af 548u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
829fdb50 549 const struct blkg_rwstat *rwstat)
d3d32e69
TH
550{
551 static const char *rwstr[] = {
552 [BLKG_RWSTAT_READ] = "Read",
553 [BLKG_RWSTAT_WRITE] = "Write",
554 [BLKG_RWSTAT_SYNC] = "Sync",
555 [BLKG_RWSTAT_ASYNC] = "Async",
556 };
f95a04af 557 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
558 u64 v;
559 int i;
560
561 if (!dname)
562 return 0;
563
564 for (i = 0; i < BLKG_RWSTAT_NR; i++)
565 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
24bdb8ef 566 (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
d3d32e69 567
24bdb8ef
TH
568 v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
569 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
d3d32e69
TH
570 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
571 return v;
572}
b50da39f 573EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
d3d32e69 574
5bc4afb1
TH
575/**
576 * blkg_prfill_stat - prfill callback for blkg_stat
577 * @sf: seq_file to print to
f95a04af
TH
578 * @pd: policy private data of interest
579 * @off: offset to the blkg_stat in @pd
5bc4afb1
TH
580 *
581 * prfill callback for printing a blkg_stat.
582 */
f95a04af 583u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
d3d32e69 584{
f95a04af 585 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
d3d32e69 586}
5bc4afb1 587EXPORT_SYMBOL_GPL(blkg_prfill_stat);
d3d32e69 588
5bc4afb1
TH
589/**
590 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
591 * @sf: seq_file to print to
f95a04af
TH
592 * @pd: policy private data of interest
593 * @off: offset to the blkg_rwstat in @pd
5bc4afb1
TH
594 *
595 * prfill callback for printing a blkg_rwstat.
596 */
f95a04af
TH
597u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
598 int off)
d3d32e69 599{
f95a04af 600 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
d3d32e69 601
f95a04af 602 return __blkg_prfill_rwstat(sf, pd, &rwstat);
d3d32e69 603}
5bc4afb1 604EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
d3d32e69 605
77ea7338
TH
606static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
607 struct blkg_policy_data *pd, int off)
608{
609 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
610
611 return __blkg_prfill_rwstat(sf, pd, &rwstat);
612}
613
614/**
615 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
616 * @sf: seq_file to print to
617 * @v: unused
618 *
619 * To be used as cftype->seq_show to print blkg->stat_bytes.
620 * cftype->private must be set to the blkcg_policy.
621 */
622int blkg_print_stat_bytes(struct seq_file *sf, void *v)
623{
624 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
625 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
626 offsetof(struct blkcg_gq, stat_bytes), true);
627 return 0;
628}
629EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
630
631/**
632 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
633 * @sf: seq_file to print to
634 * @v: unused
635 *
636 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
637 * must be set to the blkcg_policy.
638 */
639int blkg_print_stat_ios(struct seq_file *sf, void *v)
640{
641 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
642 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
643 offsetof(struct blkcg_gq, stat_ios), true);
644 return 0;
645}
646EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
647
648static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
649 struct blkg_policy_data *pd,
650 int off)
651{
652 struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
653 NULL, off);
654 return __blkg_prfill_rwstat(sf, pd, &rwstat);
655}
656
657/**
658 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
659 * @sf: seq_file to print to
660 * @v: unused
661 */
662int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
663{
664 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
665 blkg_prfill_rwstat_field_recursive,
666 (void *)seq_cft(sf)->private,
667 offsetof(struct blkcg_gq, stat_bytes), true);
668 return 0;
669}
670EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
671
672/**
673 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
674 * @sf: seq_file to print to
675 * @v: unused
676 */
677int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
678{
679 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
680 blkg_prfill_rwstat_field_recursive,
681 (void *)seq_cft(sf)->private,
682 offsetof(struct blkcg_gq, stat_ios), true);
683 return 0;
684}
685EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
686
16b3de66
TH
687/**
688 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
f12c74ca
TH
689 * @blkg: blkg of interest
690 * @pol: blkcg_policy which contains the blkg_stat
691 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
16b3de66 692 *
f12c74ca
TH
693 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
694 * online descendants and their aux counts. The caller must be holding the
695 * queue lock for online tests.
696 *
697 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
698 * at @off bytes into @blkg's blkg_policy_data of the policy.
16b3de66 699 */
f12c74ca
TH
700u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
701 struct blkcg_policy *pol, int off)
16b3de66 702{
16b3de66 703 struct blkcg_gq *pos_blkg;
492eb21b 704 struct cgroup_subsys_state *pos_css;
bd8815a6 705 u64 sum = 0;
16b3de66 706
f12c74ca 707 lockdep_assert_held(blkg->q->queue_lock);
16b3de66 708
16b3de66 709 rcu_read_lock();
f12c74ca
TH
710 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
711 struct blkg_stat *stat;
712
713 if (!pos_blkg->online)
714 continue;
16b3de66 715
f12c74ca
TH
716 if (pol)
717 stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
718 else
719 stat = (void *)blkg + off;
720
721 sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
16b3de66
TH
722 }
723 rcu_read_unlock();
724
725 return sum;
726}
727EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
728
729/**
730 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
f12c74ca
TH
731 * @blkg: blkg of interest
732 * @pol: blkcg_policy which contains the blkg_rwstat
733 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
16b3de66 734 *
f12c74ca
TH
735 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
736 * online descendants and their aux counts. The caller must be holding the
737 * queue lock for online tests.
738 *
739 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
740 * is at @off bytes into @blkg's blkg_policy_data of the policy.
16b3de66 741 */
f12c74ca
TH
742struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
743 struct blkcg_policy *pol, int off)
16b3de66 744{
16b3de66 745 struct blkcg_gq *pos_blkg;
492eb21b 746 struct cgroup_subsys_state *pos_css;
bd8815a6 747 struct blkg_rwstat sum = { };
16b3de66
TH
748 int i;
749
f12c74ca 750 lockdep_assert_held(blkg->q->queue_lock);
16b3de66 751
16b3de66 752 rcu_read_lock();
f12c74ca 753 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
3a7faead 754 struct blkg_rwstat *rwstat;
16b3de66
TH
755
756 if (!pos_blkg->online)
757 continue;
758
f12c74ca
TH
759 if (pol)
760 rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
761 else
762 rwstat = (void *)pos_blkg + off;
763
16b3de66 764 for (i = 0; i < BLKG_RWSTAT_NR; i++)
3a7faead
TH
765 atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
766 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
767 &sum.aux_cnt[i]);
16b3de66
TH
768 }
769 rcu_read_unlock();
770
771 return sum;
772}
773EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
774
457e490f
TE
775/* Performs queue bypass and policy enabled checks then looks up blkg. */
776static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
777 const struct blkcg_policy *pol,
778 struct request_queue *q)
779{
780 WARN_ON_ONCE(!rcu_read_lock_held());
781 lockdep_assert_held(q->queue_lock);
782
783 if (!blkcg_policy_enabled(q, pol))
784 return ERR_PTR(-EOPNOTSUPP);
785
786 /*
787 * This could be the first entry point of blkcg implementation and
788 * we shouldn't allow anything to go through for a bypassing queue.
789 */
790 if (unlikely(blk_queue_bypass(q)))
791 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
792
793 return __blkg_lookup(blkcg, q, true /* update_hint */);
794}
795
3a8b31d3
TH
796/**
797 * blkg_conf_prep - parse and prepare for per-blkg config update
798 * @blkcg: target block cgroup
da8b0662 799 * @pol: target policy
3a8b31d3
TH
800 * @input: input string
801 * @ctx: blkg_conf_ctx to be filled
802 *
803 * Parse per-blkg config update from @input and initialize @ctx with the
36aa9e5f
TH
804 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
805 * part of @input following MAJ:MIN. This function returns with RCU read
806 * lock and queue lock held and must be paired with blkg_conf_finish().
3a8b31d3 807 */
3c798398 808int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
36aa9e5f 809 char *input, struct blkg_conf_ctx *ctx)
da8b0662 810 __acquires(rcu) __acquires(disk->queue->queue_lock)
34d0f179 811{
3a8b31d3 812 struct gendisk *disk;
457e490f 813 struct request_queue *q;
3c798398 814 struct blkcg_gq *blkg;
726fa694 815 unsigned int major, minor;
36aa9e5f
TH
816 int key_len, part, ret;
817 char *body;
34d0f179 818
36aa9e5f 819 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
726fa694 820 return -EINVAL;
3a8b31d3 821
36aa9e5f
TH
822 body = input + key_len;
823 if (!isspace(*body))
824 return -EINVAL;
825 body = skip_spaces(body);
826
726fa694 827 disk = get_gendisk(MKDEV(major, minor), &part);
5f6c2d2b 828 if (!disk)
20386ce0 829 return -ENODEV;
5f6c2d2b 830 if (part) {
457e490f
TE
831 ret = -ENODEV;
832 goto fail;
5f6c2d2b 833 }
e56da7e2 834
457e490f 835 q = disk->queue;
da8b0662 836
457e490f
TE
837 rcu_read_lock();
838 spin_lock_irq(q->queue_lock);
e56da7e2 839
457e490f 840 blkg = blkg_lookup_check(blkcg, pol, q);
4bfd482e
TH
841 if (IS_ERR(blkg)) {
842 ret = PTR_ERR(blkg);
457e490f
TE
843 goto fail_unlock;
844 }
845
846 if (blkg)
847 goto success;
848
849 /*
850 * Create blkgs walking down from blkcg_root to @blkcg, so that all
851 * non-root blkgs have access to their parents.
852 */
853 while (true) {
854 struct blkcg *pos = blkcg;
855 struct blkcg *parent;
856 struct blkcg_gq *new_blkg;
857
858 parent = blkcg_parent(blkcg);
859 while (parent && !__blkg_lookup(parent, q, false)) {
860 pos = parent;
861 parent = blkcg_parent(parent);
862 }
863
864 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
865 spin_unlock_irq(q->queue_lock);
3a8b31d3 866 rcu_read_unlock();
457e490f
TE
867
868 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
869 if (unlikely(!new_blkg)) {
870 ret = -ENOMEM;
871 goto fail;
7702e8f4 872 }
3a8b31d3 873
457e490f
TE
874 rcu_read_lock();
875 spin_lock_irq(q->queue_lock);
876
877 blkg = blkg_lookup_check(pos, pol, q);
878 if (IS_ERR(blkg)) {
879 ret = PTR_ERR(blkg);
880 goto fail_unlock;
881 }
882
883 if (blkg) {
884 blkg_free(new_blkg);
885 } else {
886 blkg = blkg_create(pos, q, new_blkg);
887 if (unlikely(IS_ERR(blkg))) {
888 ret = PTR_ERR(blkg);
889 goto fail_unlock;
890 }
891 }
892
893 if (pos == blkcg)
894 goto success;
895 }
896success:
3a8b31d3
TH
897 ctx->disk = disk;
898 ctx->blkg = blkg;
36aa9e5f 899 ctx->body = body;
726fa694 900 return 0;
457e490f
TE
901
902fail_unlock:
903 spin_unlock_irq(q->queue_lock);
904 rcu_read_unlock();
905fail:
9df6c299 906 put_disk_and_module(disk);
457e490f
TE
907 /*
908 * If queue was bypassing, we should retry. Do so after a
909 * short msleep(). It isn't strictly necessary but queue
910 * can be bypassing for some time and it's always nice to
911 * avoid busy looping.
912 */
913 if (ret == -EBUSY) {
914 msleep(10);
915 ret = restart_syscall();
916 }
917 return ret;
34d0f179 918}
829fdb50 919EXPORT_SYMBOL_GPL(blkg_conf_prep);
34d0f179 920
3a8b31d3
TH
921/**
922 * blkg_conf_finish - finish up per-blkg config update
923 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
924 *
925 * Finish up after per-blkg config update. This function must be paired
926 * with blkg_conf_prep().
927 */
829fdb50 928void blkg_conf_finish(struct blkg_conf_ctx *ctx)
da8b0662 929 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
34d0f179 930{
da8b0662 931 spin_unlock_irq(ctx->disk->queue->queue_lock);
3a8b31d3 932 rcu_read_unlock();
9df6c299 933 put_disk_and_module(ctx->disk);
34d0f179 934}
829fdb50 935EXPORT_SYMBOL_GPL(blkg_conf_finish);
34d0f179 936
2ee867dc
TH
937static int blkcg_print_stat(struct seq_file *sf, void *v)
938{
939 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
940 struct blkcg_gq *blkg;
941
942 rcu_read_lock();
943
944 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
945 const char *dname;
946 struct blkg_rwstat rwstat;
947 u64 rbytes, wbytes, rios, wios;
948
949 dname = blkg_dev_name(blkg);
950 if (!dname)
951 continue;
952
953 spin_lock_irq(blkg->q->queue_lock);
954
955 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
956 offsetof(struct blkcg_gq, stat_bytes));
957 rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
958 wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
959
960 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
961 offsetof(struct blkcg_gq, stat_ios));
962 rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
963 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
964
965 spin_unlock_irq(blkg->q->queue_lock);
966
967 if (rbytes || wbytes || rios || wios)
968 seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
969 dname, rbytes, wbytes, rios, wios);
970 }
971
972 rcu_read_unlock();
973 return 0;
974}
975
e1f3b941 976static struct cftype blkcg_files[] = {
2ee867dc
TH
977 {
978 .name = "stat",
ca0752c5 979 .flags = CFTYPE_NOT_ON_ROOT,
2ee867dc
TH
980 .seq_show = blkcg_print_stat,
981 },
982 { } /* terminate */
983};
984
e1f3b941 985static struct cftype blkcg_legacy_files[] = {
84c124da
DS
986 {
987 .name = "reset_stats",
3c798398 988 .write_u64 = blkcg_reset_stats,
22084190 989 },
4baf6e33 990 { } /* terminate */
31e4c28d
VG
991};
992
9f13ef67 993/**
92fb9748 994 * blkcg_css_offline - cgroup css_offline callback
eb95419b 995 * @css: css of interest
9f13ef67 996 *
eb95419b
TH
997 * This function is called when @css is about to go away and responsible
998 * for shooting down all blkgs associated with @css. blkgs should be
9f13ef67
TH
999 * removed while holding both q and blkcg locks. As blkcg lock is nested
1000 * inside q lock, this function performs reverse double lock dancing.
1001 *
1002 * This is the blkcg counterpart of ioc_release_fn().
1003 */
eb95419b 1004static void blkcg_css_offline(struct cgroup_subsys_state *css)
31e4c28d 1005{
eb95419b 1006 struct blkcg *blkcg = css_to_blkcg(css);
b1c35769 1007
9f13ef67 1008 spin_lock_irq(&blkcg->lock);
7ee9c562 1009
9f13ef67 1010 while (!hlist_empty(&blkcg->blkg_list)) {
3c798398
TH
1011 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1012 struct blkcg_gq, blkcg_node);
c875f4d0 1013 struct request_queue *q = blkg->q;
b1c35769 1014
9f13ef67
TH
1015 if (spin_trylock(q->queue_lock)) {
1016 blkg_destroy(blkg);
1017 spin_unlock(q->queue_lock);
1018 } else {
1019 spin_unlock_irq(&blkcg->lock);
9f13ef67 1020 cpu_relax();
a5567932 1021 spin_lock_irq(&blkcg->lock);
0f3942a3 1022 }
9f13ef67 1023 }
b1c35769 1024
9f13ef67 1025 spin_unlock_irq(&blkcg->lock);
52ebea74
TH
1026
1027 wb_blkcg_offline(blkcg);
7ee9c562
TH
1028}
1029
eb95419b 1030static void blkcg_css_free(struct cgroup_subsys_state *css)
7ee9c562 1031{
eb95419b 1032 struct blkcg *blkcg = css_to_blkcg(css);
bc915e61 1033 int i;
7ee9c562 1034
7876f930 1035 mutex_lock(&blkcg_pol_mutex);
e4a9bde9 1036
7876f930 1037 list_del(&blkcg->all_blkcgs_node);
7876f930 1038
bc915e61 1039 for (i = 0; i < BLKCG_MAX_POLS; i++)
e4a9bde9
TH
1040 if (blkcg->cpd[i])
1041 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1042
1043 mutex_unlock(&blkcg_pol_mutex);
1044
bc915e61 1045 kfree(blkcg);
31e4c28d
VG
1046}
1047
eb95419b
TH
1048static struct cgroup_subsys_state *
1049blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
31e4c28d 1050{
3c798398 1051 struct blkcg *blkcg;
e48453c3
AA
1052 struct cgroup_subsys_state *ret;
1053 int i;
31e4c28d 1054
7876f930
TH
1055 mutex_lock(&blkcg_pol_mutex);
1056
eb95419b 1057 if (!parent_css) {
3c798398 1058 blkcg = &blkcg_root;
bc915e61
TH
1059 } else {
1060 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1061 if (!blkcg) {
1062 ret = ERR_PTR(-ENOMEM);
4c18c9e9 1063 goto unlock;
bc915e61 1064 }
e48453c3
AA
1065 }
1066
1067 for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1068 struct blkcg_policy *pol = blkcg_policy[i];
1069 struct blkcg_policy_data *cpd;
1070
1071 /*
1072 * If the policy hasn't been attached yet, wait for it
1073 * to be attached before doing anything else. Otherwise,
1074 * check if the policy requires any specific per-cgroup
1075 * data: if it does, allocate and initialize it.
1076 */
e4a9bde9 1077 if (!pol || !pol->cpd_alloc_fn)
e48453c3
AA
1078 continue;
1079
e4a9bde9 1080 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
e48453c3
AA
1081 if (!cpd) {
1082 ret = ERR_PTR(-ENOMEM);
1083 goto free_pd_blkcg;
1084 }
81437648
TH
1085 blkcg->cpd[i] = cpd;
1086 cpd->blkcg = blkcg;
e48453c3 1087 cpd->plid = i;
e4a9bde9
TH
1088 if (pol->cpd_init_fn)
1089 pol->cpd_init_fn(cpd);
e48453c3 1090 }
31e4c28d 1091
31e4c28d 1092 spin_lock_init(&blkcg->lock);
e00f4f4d 1093 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
31e4c28d 1094 INIT_HLIST_HEAD(&blkcg->blkg_list);
52ebea74
TH
1095#ifdef CONFIG_CGROUP_WRITEBACK
1096 INIT_LIST_HEAD(&blkcg->cgwb_list);
1097#endif
7876f930
TH
1098 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1099
1100 mutex_unlock(&blkcg_pol_mutex);
31e4c28d 1101 return &blkcg->css;
e48453c3
AA
1102
1103free_pd_blkcg:
1104 for (i--; i >= 0; i--)
e4a9bde9
TH
1105 if (blkcg->cpd[i])
1106 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
4c18c9e9 1107
1108 if (blkcg != &blkcg_root)
1109 kfree(blkcg);
1110unlock:
7876f930 1111 mutex_unlock(&blkcg_pol_mutex);
e48453c3 1112 return ret;
31e4c28d
VG
1113}
1114
5efd6113
TH
1115/**
1116 * blkcg_init_queue - initialize blkcg part of request queue
1117 * @q: request_queue to initialize
1118 *
1119 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1120 * part of new request_queue @q.
1121 *
1122 * RETURNS:
1123 * 0 on success, -errno on failure.
1124 */
1125int blkcg_init_queue(struct request_queue *q)
1126{
d708f0d5
JA
1127 struct blkcg_gq *new_blkg, *blkg;
1128 bool preloaded;
ec13b1d6
TH
1129 int ret;
1130
d708f0d5
JA
1131 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1132 if (!new_blkg)
1133 return -ENOMEM;
1134
1135 preloaded = !radix_tree_preload(GFP_KERNEL);
1136
1137 /*
1138 * Make sure the root blkg exists and count the existing blkgs. As
1139 * @q is bypassing at this point, blkg_lookup_create() can't be
1140 * used. Open code insertion.
1141 */
ec13b1d6
TH
1142 rcu_read_lock();
1143 spin_lock_irq(q->queue_lock);
d708f0d5 1144 blkg = blkg_create(&blkcg_root, q, new_blkg);
ec13b1d6
TH
1145 spin_unlock_irq(q->queue_lock);
1146 rcu_read_unlock();
1147
d708f0d5
JA
1148 if (preloaded)
1149 radix_tree_preload_end();
1150
9b54d816 1151 if (IS_ERR(blkg))
ec13b1d6 1152 return PTR_ERR(blkg);
ec13b1d6
TH
1153
1154 q->root_blkg = blkg;
1155 q->root_rl.blkg = blkg;
5efd6113 1156
ec13b1d6
TH
1157 ret = blk_throtl_init(q);
1158 if (ret) {
1159 spin_lock_irq(q->queue_lock);
1160 blkg_destroy_all(q);
1161 spin_unlock_irq(q->queue_lock);
1162 }
1163 return ret;
5efd6113
TH
1164}
1165
1166/**
1167 * blkcg_drain_queue - drain blkcg part of request_queue
1168 * @q: request_queue to drain
1169 *
1170 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1171 */
1172void blkcg_drain_queue(struct request_queue *q)
1173{
1174 lockdep_assert_held(q->queue_lock);
1175
0b462c89
TH
1176 /*
1177 * @q could be exiting and already have destroyed all blkgs as
1178 * indicated by NULL root_blkg. If so, don't confuse policies.
1179 */
1180 if (!q->root_blkg)
1181 return;
1182
5efd6113
TH
1183 blk_throtl_drain(q);
1184}
1185
1186/**
1187 * blkcg_exit_queue - exit and release blkcg part of request_queue
1188 * @q: request_queue being released
1189 *
1190 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1191 */
1192void blkcg_exit_queue(struct request_queue *q)
1193{
6d18b008 1194 spin_lock_irq(q->queue_lock);
3c96cb32 1195 blkg_destroy_all(q);
6d18b008
TH
1196 spin_unlock_irq(q->queue_lock);
1197
5efd6113
TH
1198 blk_throtl_exit(q);
1199}
1200
31e4c28d
VG
1201/*
1202 * We cannot support shared io contexts, as we have no mean to support
1203 * two tasks with the same ioc in two different groups without major rework
1204 * of the main cic data structures. For now we allow a task to change
1205 * its cgroup only if it's the only owner of its ioc.
1206 */
1f7dd3e5 1207static int blkcg_can_attach(struct cgroup_taskset *tset)
31e4c28d 1208{
bb9d97b6 1209 struct task_struct *task;
1f7dd3e5 1210 struct cgroup_subsys_state *dst_css;
31e4c28d
VG
1211 struct io_context *ioc;
1212 int ret = 0;
1213
1214 /* task_lock() is needed to avoid races with exit_io_context() */
1f7dd3e5 1215 cgroup_taskset_for_each(task, dst_css, tset) {
bb9d97b6
TH
1216 task_lock(task);
1217 ioc = task->io_context;
1218 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1219 ret = -EINVAL;
1220 task_unlock(task);
1221 if (ret)
1222 break;
1223 }
31e4c28d
VG
1224 return ret;
1225}
1226
69d7fde5
TH
1227static void blkcg_bind(struct cgroup_subsys_state *root_css)
1228{
1229 int i;
1230
1231 mutex_lock(&blkcg_pol_mutex);
1232
1233 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1234 struct blkcg_policy *pol = blkcg_policy[i];
1235 struct blkcg *blkcg;
1236
1237 if (!pol || !pol->cpd_bind_fn)
1238 continue;
1239
1240 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1241 if (blkcg->cpd[pol->plid])
1242 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1243 }
1244 mutex_unlock(&blkcg_pol_mutex);
1245}
1246
c165b3e3 1247struct cgroup_subsys io_cgrp_subsys = {
92fb9748
TH
1248 .css_alloc = blkcg_css_alloc,
1249 .css_offline = blkcg_css_offline,
1250 .css_free = blkcg_css_free,
3c798398 1251 .can_attach = blkcg_can_attach,
69d7fde5 1252 .bind = blkcg_bind,
2ee867dc 1253 .dfl_cftypes = blkcg_files,
880f50e2 1254 .legacy_cftypes = blkcg_legacy_files,
c165b3e3 1255 .legacy_name = "blkio",
1ced953b
TH
1256#ifdef CONFIG_MEMCG
1257 /*
1258 * This ensures that, if available, memcg is automatically enabled
1259 * together on the default hierarchy so that the owner cgroup can
1260 * be retrieved from writeback pages.
1261 */
1262 .depends_on = 1 << memory_cgrp_id,
1263#endif
676f7c8f 1264};
c165b3e3 1265EXPORT_SYMBOL_GPL(io_cgrp_subsys);
676f7c8f 1266
a2b1693b
TH
1267/**
1268 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1269 * @q: request_queue of interest
1270 * @pol: blkcg policy to activate
1271 *
1272 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
1273 * bypass mode to populate its blkgs with policy_data for @pol.
1274 *
1275 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1276 * from IO path. Update of each blkg is protected by both queue and blkcg
1277 * locks so that holding either lock and testing blkcg_policy_enabled() is
1278 * always enough for dereferencing policy data.
1279 *
1280 * The caller is responsible for synchronizing [de]activations and policy
1281 * [un]registerations. Returns 0 on success, -errno on failure.
1282 */
1283int blkcg_activate_policy(struct request_queue *q,
3c798398 1284 const struct blkcg_policy *pol)
a2b1693b 1285{
4c55f4f9 1286 struct blkg_policy_data *pd_prealloc = NULL;
ec13b1d6 1287 struct blkcg_gq *blkg;
4c55f4f9 1288 int ret;
a2b1693b
TH
1289
1290 if (blkcg_policy_enabled(q, pol))
1291 return 0;
1292
38dbb7dd 1293 if (q->mq_ops)
bd166ef1 1294 blk_mq_freeze_queue(q);
38dbb7dd 1295 else
bd166ef1 1296 blk_queue_bypass_start(q);
4c55f4f9
TH
1297pd_prealloc:
1298 if (!pd_prealloc) {
001bea73 1299 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
4c55f4f9 1300 if (!pd_prealloc) {
a2b1693b 1301 ret = -ENOMEM;
4c55f4f9 1302 goto out_bypass_end;
a2b1693b 1303 }
a2b1693b
TH
1304 }
1305
a2b1693b
TH
1306 spin_lock_irq(q->queue_lock);
1307
1308 list_for_each_entry(blkg, &q->blkg_list, q_node) {
4c55f4f9
TH
1309 struct blkg_policy_data *pd;
1310
1311 if (blkg->pd[pol->plid])
1312 continue;
a2b1693b 1313
e00f4f4d 1314 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
4c55f4f9
TH
1315 if (!pd)
1316 swap(pd, pd_prealloc);
1317 if (!pd) {
1318 spin_unlock_irq(q->queue_lock);
1319 goto pd_prealloc;
1320 }
a2b1693b
TH
1321
1322 blkg->pd[pol->plid] = pd;
1323 pd->blkg = blkg;
b276a876 1324 pd->plid = pol->plid;
3e418710 1325 if (pol->pd_init_fn)
a9520cd6 1326 pol->pd_init_fn(pd);
a2b1693b
TH
1327 }
1328
1329 __set_bit(pol->plid, q->blkcg_pols);
1330 ret = 0;
4c55f4f9 1331
a2b1693b 1332 spin_unlock_irq(q->queue_lock);
4c55f4f9 1333out_bypass_end:
38dbb7dd 1334 if (q->mq_ops)
bd166ef1 1335 blk_mq_unfreeze_queue(q);
38dbb7dd 1336 else
bd166ef1 1337 blk_queue_bypass_end(q);
001bea73
TH
1338 if (pd_prealloc)
1339 pol->pd_free_fn(pd_prealloc);
a2b1693b
TH
1340 return ret;
1341}
1342EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1343
1344/**
1345 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1346 * @q: request_queue of interest
1347 * @pol: blkcg policy to deactivate
1348 *
1349 * Deactivate @pol on @q. Follows the same synchronization rules as
1350 * blkcg_activate_policy().
1351 */
1352void blkcg_deactivate_policy(struct request_queue *q,
3c798398 1353 const struct blkcg_policy *pol)
a2b1693b 1354{
3c798398 1355 struct blkcg_gq *blkg;
a2b1693b
TH
1356
1357 if (!blkcg_policy_enabled(q, pol))
1358 return;
1359
38dbb7dd 1360 if (q->mq_ops)
bd166ef1 1361 blk_mq_freeze_queue(q);
38dbb7dd 1362 else
bd166ef1
JA
1363 blk_queue_bypass_start(q);
1364
a2b1693b
TH
1365 spin_lock_irq(q->queue_lock);
1366
1367 __clear_bit(pol->plid, q->blkcg_pols);
1368
1369 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1370 /* grab blkcg lock too while removing @pd from @blkg */
1371 spin_lock(&blkg->blkcg->lock);
1372
001bea73 1373 if (blkg->pd[pol->plid]) {
a9520cd6
TH
1374 if (pol->pd_offline_fn)
1375 pol->pd_offline_fn(blkg->pd[pol->plid]);
001bea73
TH
1376 pol->pd_free_fn(blkg->pd[pol->plid]);
1377 blkg->pd[pol->plid] = NULL;
1378 }
a2b1693b
TH
1379
1380 spin_unlock(&blkg->blkcg->lock);
1381 }
1382
1383 spin_unlock_irq(q->queue_lock);
bd166ef1 1384
38dbb7dd 1385 if (q->mq_ops)
bd166ef1 1386 blk_mq_unfreeze_queue(q);
38dbb7dd 1387 else
bd166ef1 1388 blk_queue_bypass_end(q);
a2b1693b
TH
1389}
1390EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1391
8bd435b3 1392/**
3c798398
TH
1393 * blkcg_policy_register - register a blkcg policy
1394 * @pol: blkcg policy to register
8bd435b3 1395 *
3c798398
TH
1396 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1397 * successful registration. Returns 0 on success and -errno on failure.
8bd435b3 1398 */
d5bf0291 1399int blkcg_policy_register(struct blkcg_policy *pol)
3e252066 1400{
06b285bd 1401 struct blkcg *blkcg;
8bd435b3 1402 int i, ret;
e8989fae 1403
838f13bf 1404 mutex_lock(&blkcg_pol_register_mutex);
bc0d6501
TH
1405 mutex_lock(&blkcg_pol_mutex);
1406
8bd435b3
TH
1407 /* find an empty slot */
1408 ret = -ENOSPC;
1409 for (i = 0; i < BLKCG_MAX_POLS; i++)
3c798398 1410 if (!blkcg_policy[i])
8bd435b3
TH
1411 break;
1412 if (i >= BLKCG_MAX_POLS)
838f13bf 1413 goto err_unlock;
035d10b2 1414
e8401073 1415 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1416 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1417 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1418 goto err_unlock;
1419
06b285bd 1420 /* register @pol */
3c798398 1421 pol->plid = i;
06b285bd
TH
1422 blkcg_policy[pol->plid] = pol;
1423
1424 /* allocate and install cpd's */
e4a9bde9 1425 if (pol->cpd_alloc_fn) {
06b285bd
TH
1426 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1427 struct blkcg_policy_data *cpd;
1428
e4a9bde9 1429 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
bbb427e3 1430 if (!cpd)
06b285bd 1431 goto err_free_cpds;
06b285bd 1432
81437648
TH
1433 blkcg->cpd[pol->plid] = cpd;
1434 cpd->blkcg = blkcg;
06b285bd 1435 cpd->plid = pol->plid;
81437648 1436 pol->cpd_init_fn(cpd);
06b285bd
TH
1437 }
1438 }
1439
838f13bf 1440 mutex_unlock(&blkcg_pol_mutex);
8bd435b3 1441
8bd435b3 1442 /* everything is in place, add intf files for the new policy */
2ee867dc
TH
1443 if (pol->dfl_cftypes)
1444 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1445 pol->dfl_cftypes));
880f50e2 1446 if (pol->legacy_cftypes)
c165b3e3 1447 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
880f50e2 1448 pol->legacy_cftypes));
838f13bf
TH
1449 mutex_unlock(&blkcg_pol_register_mutex);
1450 return 0;
1451
06b285bd 1452err_free_cpds:
58a9edce 1453 if (pol->cpd_free_fn) {
06b285bd 1454 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
e4a9bde9
TH
1455 if (blkcg->cpd[pol->plid]) {
1456 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1457 blkcg->cpd[pol->plid] = NULL;
1458 }
06b285bd
TH
1459 }
1460 }
1461 blkcg_policy[pol->plid] = NULL;
838f13bf 1462err_unlock:
bc0d6501 1463 mutex_unlock(&blkcg_pol_mutex);
838f13bf 1464 mutex_unlock(&blkcg_pol_register_mutex);
8bd435b3 1465 return ret;
3e252066 1466}
3c798398 1467EXPORT_SYMBOL_GPL(blkcg_policy_register);
3e252066 1468
8bd435b3 1469/**
3c798398
TH
1470 * blkcg_policy_unregister - unregister a blkcg policy
1471 * @pol: blkcg policy to unregister
8bd435b3 1472 *
3c798398 1473 * Undo blkcg_policy_register(@pol). Might sleep.
8bd435b3 1474 */
3c798398 1475void blkcg_policy_unregister(struct blkcg_policy *pol)
3e252066 1476{
06b285bd
TH
1477 struct blkcg *blkcg;
1478
838f13bf 1479 mutex_lock(&blkcg_pol_register_mutex);
bc0d6501 1480
3c798398 1481 if (WARN_ON(blkcg_policy[pol->plid] != pol))
8bd435b3
TH
1482 goto out_unlock;
1483
1484 /* kill the intf files first */
2ee867dc
TH
1485 if (pol->dfl_cftypes)
1486 cgroup_rm_cftypes(pol->dfl_cftypes);
880f50e2
TH
1487 if (pol->legacy_cftypes)
1488 cgroup_rm_cftypes(pol->legacy_cftypes);
44ea53de 1489
06b285bd 1490 /* remove cpds and unregister */
838f13bf 1491 mutex_lock(&blkcg_pol_mutex);
06b285bd 1492
58a9edce 1493 if (pol->cpd_free_fn) {
06b285bd 1494 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
e4a9bde9
TH
1495 if (blkcg->cpd[pol->plid]) {
1496 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1497 blkcg->cpd[pol->plid] = NULL;
1498 }
06b285bd
TH
1499 }
1500 }
3c798398 1501 blkcg_policy[pol->plid] = NULL;
06b285bd 1502
bc0d6501 1503 mutex_unlock(&blkcg_pol_mutex);
838f13bf
TH
1504out_unlock:
1505 mutex_unlock(&blkcg_pol_register_mutex);
3e252066 1506}
3c798398 1507EXPORT_SYMBOL_GPL(blkcg_policy_unregister);