]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - block/blk-cgroup.c
Merge branch 'for-4.18-vsprintf-pcr-removal' into for-4.18
[mirror_ubuntu-hirsute-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
e48453c3
AA
12 *
13 * For policy-specific per-blkcg data:
14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
15 * Arianna Avanzini <avanzini.arianna@gmail.com>
31e4c28d
VG
16 */
17#include <linux/ioprio.h>
22084190 18#include <linux/kdev_t.h>
9d6a986c 19#include <linux/module.h>
174cd4b1 20#include <linux/sched/signal.h>
accee785 21#include <linux/err.h>
9195291e 22#include <linux/blkdev.h>
52ebea74 23#include <linux/backing-dev.h>
5a0e3ad6 24#include <linux/slab.h>
34d0f179 25#include <linux/genhd.h>
72e06c25 26#include <linux/delay.h>
9a9e8a26 27#include <linux/atomic.h>
36aa9e5f 28#include <linux/ctype.h>
eea8f41c 29#include <linux/blk-cgroup.h>
5efd6113 30#include "blk.h"
3e252066 31
84c124da
DS
32#define MAX_KEY_LEN 100
33
838f13bf
TH
34/*
35 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
36 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
37 * policy [un]register operations including cgroup file additions /
38 * removals. Putting cgroup file registration outside blkcg_pol_mutex
39 * allows grabbing it from cgroup callbacks.
40 */
41static DEFINE_MUTEX(blkcg_pol_register_mutex);
bc0d6501 42static DEFINE_MUTEX(blkcg_pol_mutex);
923adde1 43
e48453c3 44struct blkcg blkcg_root;
3c798398 45EXPORT_SYMBOL_GPL(blkcg_root);
9d6a986c 46
496d5e75
TH
47struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
48
3c798398 49static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
035d10b2 50
7876f930
TH
51static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
52
a2b1693b 53static bool blkcg_policy_enabled(struct request_queue *q,
3c798398 54 const struct blkcg_policy *pol)
a2b1693b
TH
55{
56 return pol && test_bit(pol->plid, q->blkcg_pols);
57}
58
0381411e
TH
59/**
60 * blkg_free - free a blkg
61 * @blkg: blkg to free
62 *
63 * Free @blkg which may be partially allocated.
64 */
3c798398 65static void blkg_free(struct blkcg_gq *blkg)
0381411e 66{
e8989fae 67 int i;
549d3aa8
TH
68
69 if (!blkg)
70 return;
71
db613670 72 for (i = 0; i < BLKCG_MAX_POLS; i++)
001bea73
TH
73 if (blkg->pd[i])
74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
e8989fae 75
994b7832 76 if (blkg->blkcg != &blkcg_root)
b425e504 77 blk_exit_rl(blkg->q, &blkg->rl);
77ea7338
TH
78
79 blkg_rwstat_exit(&blkg->stat_ios);
80 blkg_rwstat_exit(&blkg->stat_bytes);
549d3aa8 81 kfree(blkg);
0381411e
TH
82}
83
84/**
85 * blkg_alloc - allocate a blkg
86 * @blkcg: block cgroup the new blkg is associated with
87 * @q: request_queue the new blkg is associated with
15974993 88 * @gfp_mask: allocation mask to use
0381411e 89 *
e8989fae 90 * Allocate a new blkg assocating @blkcg and @q.
0381411e 91 */
15974993
TH
92static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
93 gfp_t gfp_mask)
0381411e 94{
3c798398 95 struct blkcg_gq *blkg;
e8989fae 96 int i;
0381411e
TH
97
98 /* alloc and init base part */
15974993 99 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
0381411e
TH
100 if (!blkg)
101 return NULL;
102
77ea7338
TH
103 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
104 blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
105 goto err_free;
106
c875f4d0 107 blkg->q = q;
e8989fae 108 INIT_LIST_HEAD(&blkg->q_node);
0381411e 109 blkg->blkcg = blkcg;
a5049a8a 110 atomic_set(&blkg->refcnt, 1);
0381411e 111
a051661c
TH
112 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
113 if (blkcg != &blkcg_root) {
114 if (blk_init_rl(&blkg->rl, q, gfp_mask))
115 goto err_free;
116 blkg->rl.blkg = blkg;
117 }
118
8bd435b3 119 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 120 struct blkcg_policy *pol = blkcg_policy[i];
e8989fae 121 struct blkg_policy_data *pd;
0381411e 122
a2b1693b 123 if (!blkcg_policy_enabled(q, pol))
e8989fae
TH
124 continue;
125
126 /* alloc per-policy data and attach it to blkg */
001bea73 127 pd = pol->pd_alloc_fn(gfp_mask, q->node);
a051661c
TH
128 if (!pd)
129 goto err_free;
549d3aa8 130
e8989fae
TH
131 blkg->pd[i] = pd;
132 pd->blkg = blkg;
b276a876 133 pd->plid = i;
e8989fae
TH
134 }
135
0381411e 136 return blkg;
a051661c
TH
137
138err_free:
139 blkg_free(blkg);
140 return NULL;
0381411e
TH
141}
142
24f29046
TH
143struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
144 struct request_queue *q, bool update_hint)
80fd9979 145{
3c798398 146 struct blkcg_gq *blkg;
80fd9979 147
a637120e 148 /*
86cde6b6
TH
149 * Hint didn't match. Look up from the radix tree. Note that the
150 * hint can only be updated under queue_lock as otherwise @blkg
151 * could have already been removed from blkg_tree. The caller is
152 * responsible for grabbing queue_lock if @update_hint.
a637120e
TH
153 */
154 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
86cde6b6
TH
155 if (blkg && blkg->q == q) {
156 if (update_hint) {
157 lockdep_assert_held(q->queue_lock);
158 rcu_assign_pointer(blkcg->blkg_hint, blkg);
159 }
a637120e 160 return blkg;
86cde6b6 161 }
a637120e 162
80fd9979
TH
163 return NULL;
164}
ae118896 165EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
80fd9979 166
15974993 167/*
d708f0d5
JA
168 * If @new_blkg is %NULL, this function tries to allocate a new one as
169 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
15974993 170 */
86cde6b6 171static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
d708f0d5
JA
172 struct request_queue *q,
173 struct blkcg_gq *new_blkg)
5624a4e4 174{
d708f0d5 175 struct blkcg_gq *blkg;
ce7acfea 176 struct bdi_writeback_congested *wb_congested;
f427d909 177 int i, ret;
5624a4e4 178
cd1604fa
TH
179 WARN_ON_ONCE(!rcu_read_lock_held());
180 lockdep_assert_held(q->queue_lock);
181
7ee9c562 182 /* blkg holds a reference to blkcg */
ec903c0c 183 if (!css_tryget_online(&blkcg->css)) {
20386ce0 184 ret = -ENODEV;
93e6d5d8 185 goto err_free_blkg;
15974993 186 }
cd1604fa 187
dc3b17cc 188 wb_congested = wb_congested_get_create(q->backing_dev_info,
d708f0d5
JA
189 blkcg->css.id,
190 GFP_NOWAIT | __GFP_NOWARN);
191 if (!wb_congested) {
ce7acfea 192 ret = -ENOMEM;
d708f0d5 193 goto err_put_css;
ce7acfea
TH
194 }
195
d708f0d5
JA
196 /* allocate */
197 if (!new_blkg) {
198 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
199 if (unlikely(!new_blkg)) {
200 ret = -ENOMEM;
201 goto err_put_congested;
15974993
TH
202 }
203 }
d708f0d5
JA
204 blkg = new_blkg;
205 blkg->wb_congested = wb_congested;
cd1604fa 206
db613670 207 /* link parent */
3c547865
TH
208 if (blkcg_parent(blkcg)) {
209 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
210 if (WARN_ON_ONCE(!blkg->parent)) {
20386ce0 211 ret = -ENODEV;
d708f0d5 212 goto err_put_congested;
3c547865
TH
213 }
214 blkg_get(blkg->parent);
215 }
216
db613670
TH
217 /* invoke per-policy init */
218 for (i = 0; i < BLKCG_MAX_POLS; i++) {
219 struct blkcg_policy *pol = blkcg_policy[i];
220
221 if (blkg->pd[i] && pol->pd_init_fn)
a9520cd6 222 pol->pd_init_fn(blkg->pd[i]);
db613670
TH
223 }
224
225 /* insert */
cd1604fa 226 spin_lock(&blkcg->lock);
a637120e
TH
227 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
228 if (likely(!ret)) {
229 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
230 list_add(&blkg->q_node, &q->blkg_list);
f427d909
TH
231
232 for (i = 0; i < BLKCG_MAX_POLS; i++) {
233 struct blkcg_policy *pol = blkcg_policy[i];
234
235 if (blkg->pd[i] && pol->pd_online_fn)
a9520cd6 236 pol->pd_online_fn(blkg->pd[i]);
f427d909 237 }
a637120e 238 }
f427d909 239 blkg->online = true;
cd1604fa 240 spin_unlock(&blkcg->lock);
496fb780 241
ec13b1d6 242 if (!ret)
a637120e 243 return blkg;
15974993 244
3c547865
TH
245 /* @blkg failed fully initialized, use the usual release path */
246 blkg_put(blkg);
247 return ERR_PTR(ret);
248
d708f0d5
JA
249err_put_congested:
250 wb_congested_put(wb_congested);
251err_put_css:
496fb780 252 css_put(&blkcg->css);
93e6d5d8 253err_free_blkg:
d708f0d5 254 blkg_free(new_blkg);
93e6d5d8 255 return ERR_PTR(ret);
31e4c28d 256}
3c96cb32 257
86cde6b6 258/**
d708f0d5 259 * blkg_lookup_create - lookup blkg, try to create one if not there
86cde6b6
TH
260 * @blkcg: blkcg of interest
261 * @q: request_queue of interest
262 *
263 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
3c547865
TH
264 * create one. blkg creation is performed recursively from blkcg_root such
265 * that all non-root blkg's have access to the parent blkg. This function
266 * should be called under RCU read lock and @q->queue_lock.
86cde6b6
TH
267 *
268 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
269 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
270 * dead and bypassing, returns ERR_PTR(-EBUSY).
271 */
d708f0d5
JA
272struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
273 struct request_queue *q)
3c96cb32 274{
86cde6b6
TH
275 struct blkcg_gq *blkg;
276
277 WARN_ON_ONCE(!rcu_read_lock_held());
278 lockdep_assert_held(q->queue_lock);
279
d708f0d5
JA
280 /*
281 * This could be the first entry point of blkcg implementation and
282 * we shouldn't allow anything to go through for a bypassing queue.
283 */
284 if (unlikely(blk_queue_bypass(q)))
285 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
286
86cde6b6
TH
287 blkg = __blkg_lookup(blkcg, q, true);
288 if (blkg)
289 return blkg;
290
3c547865
TH
291 /*
292 * Create blkgs walking down from blkcg_root to @blkcg, so that all
293 * non-root blkgs have access to their parents.
294 */
295 while (true) {
296 struct blkcg *pos = blkcg;
297 struct blkcg *parent = blkcg_parent(blkcg);
298
299 while (parent && !__blkg_lookup(parent, q, false)) {
300 pos = parent;
301 parent = blkcg_parent(parent);
302 }
303
d708f0d5 304 blkg = blkg_create(pos, q, NULL);
3c547865
TH
305 if (pos == blkcg || IS_ERR(blkg))
306 return blkg;
307 }
3c96cb32 308}
31e4c28d 309
4c699480
JQ
310static void blkg_pd_offline(struct blkcg_gq *blkg)
311{
312 int i;
313
314 lockdep_assert_held(blkg->q->queue_lock);
315 lockdep_assert_held(&blkg->blkcg->lock);
316
317 for (i = 0; i < BLKCG_MAX_POLS; i++) {
318 struct blkcg_policy *pol = blkcg_policy[i];
319
320 if (blkg->pd[i] && !blkg->pd[i]->offline &&
321 pol->pd_offline_fn) {
322 pol->pd_offline_fn(blkg->pd[i]);
323 blkg->pd[i]->offline = true;
324 }
325 }
326}
327
3c798398 328static void blkg_destroy(struct blkcg_gq *blkg)
03aa264a 329{
3c798398 330 struct blkcg *blkcg = blkg->blkcg;
77ea7338 331 struct blkcg_gq *parent = blkg->parent;
03aa264a 332
27e1f9d1 333 lockdep_assert_held(blkg->q->queue_lock);
9f13ef67 334 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
335
336 /* Something wrong if we are trying to remove same group twice */
e8989fae 337 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 338 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
a637120e 339
77ea7338
TH
340 if (parent) {
341 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
342 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
343 }
344
f427d909
TH
345 blkg->online = false;
346
a637120e 347 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
e8989fae 348 list_del_init(&blkg->q_node);
9f13ef67 349 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 350
a637120e
TH
351 /*
352 * Both setting lookup hint to and clearing it from @blkg are done
353 * under queue_lock. If it's not pointing to @blkg now, it never
354 * will. Hint assignment itself can race safely.
355 */
ec6c676a 356 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
a637120e
TH
357 rcu_assign_pointer(blkcg->blkg_hint, NULL);
358
03aa264a
TH
359 /*
360 * Put the reference taken at the time of creation so that when all
361 * queues are gone, group can be destroyed.
362 */
363 blkg_put(blkg);
364}
365
9f13ef67
TH
366/**
367 * blkg_destroy_all - destroy all blkgs associated with a request_queue
368 * @q: request_queue of interest
9f13ef67 369 *
3c96cb32 370 * Destroy all blkgs associated with @q.
9f13ef67 371 */
3c96cb32 372static void blkg_destroy_all(struct request_queue *q)
72e06c25 373{
3c798398 374 struct blkcg_gq *blkg, *n;
72e06c25 375
6d18b008 376 lockdep_assert_held(q->queue_lock);
72e06c25 377
9f13ef67 378 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
3c798398 379 struct blkcg *blkcg = blkg->blkcg;
72e06c25 380
9f13ef67 381 spin_lock(&blkcg->lock);
4c699480 382 blkg_pd_offline(blkg);
9f13ef67
TH
383 blkg_destroy(blkg);
384 spin_unlock(&blkcg->lock);
72e06c25 385 }
6fe810bd
TH
386
387 q->root_blkg = NULL;
388 q->root_rl.blkg = NULL;
72e06c25
TH
389}
390
2a4fd070
TH
391/*
392 * A group is RCU protected, but having an rcu lock does not mean that one
393 * can access all the fields of blkg and assume these are valid. For
394 * example, don't try to follow throtl_data and request queue links.
395 *
396 * Having a reference to blkg under an rcu allows accesses to only values
397 * local to groups like group stats and group rate limits.
398 */
399void __blkg_release_rcu(struct rcu_head *rcu_head)
1adaf3dd 400{
2a4fd070 401 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
db613670 402
3c547865 403 /* release the blkcg and parent blkg refs this blkg has been holding */
1adaf3dd 404 css_put(&blkg->blkcg->css);
a5049a8a 405 if (blkg->parent)
3c547865 406 blkg_put(blkg->parent);
1adaf3dd 407
ce7acfea
TH
408 wb_congested_put(blkg->wb_congested);
409
2a4fd070 410 blkg_free(blkg);
1adaf3dd 411}
2a4fd070 412EXPORT_SYMBOL_GPL(__blkg_release_rcu);
1adaf3dd 413
a051661c
TH
414/*
415 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
416 * because the root blkg uses @q->root_rl instead of its own rl.
417 */
418struct request_list *__blk_queue_next_rl(struct request_list *rl,
419 struct request_queue *q)
420{
421 struct list_head *ent;
422 struct blkcg_gq *blkg;
423
424 /*
425 * Determine the current blkg list_head. The first entry is
426 * root_rl which is off @q->blkg_list and mapped to the head.
427 */
428 if (rl == &q->root_rl) {
429 ent = &q->blkg_list;
65c77fd9
JN
430 /* There are no more block groups, hence no request lists */
431 if (list_empty(ent))
432 return NULL;
a051661c
TH
433 } else {
434 blkg = container_of(rl, struct blkcg_gq, rl);
435 ent = &blkg->q_node;
436 }
437
438 /* walk to the next list_head, skip root blkcg */
439 ent = ent->next;
440 if (ent == &q->root_blkg->q_node)
441 ent = ent->next;
442 if (ent == &q->blkg_list)
443 return NULL;
444
445 blkg = container_of(ent, struct blkcg_gq, q_node);
446 return &blkg->rl;
447}
448
182446d0
TH
449static int blkcg_reset_stats(struct cgroup_subsys_state *css,
450 struct cftype *cftype, u64 val)
303a3acb 451{
182446d0 452 struct blkcg *blkcg = css_to_blkcg(css);
3c798398 453 struct blkcg_gq *blkg;
bc0d6501 454 int i;
303a3acb 455
838f13bf 456 mutex_lock(&blkcg_pol_mutex);
303a3acb 457 spin_lock_irq(&blkcg->lock);
997a026c
TH
458
459 /*
460 * Note that stat reset is racy - it doesn't synchronize against
461 * stat updates. This is a debug feature which shouldn't exist
462 * anyway. If you get hit by a race, retry.
463 */
b67bfe0d 464 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
77ea7338
TH
465 blkg_rwstat_reset(&blkg->stat_bytes);
466 blkg_rwstat_reset(&blkg->stat_ios);
467
8bd435b3 468 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 469 struct blkcg_policy *pol = blkcg_policy[i];
549d3aa8 470
a9520cd6
TH
471 if (blkg->pd[i] && pol->pd_reset_stats_fn)
472 pol->pd_reset_stats_fn(blkg->pd[i]);
bc0d6501 473 }
303a3acb 474 }
f0bdc8cd 475
303a3acb 476 spin_unlock_irq(&blkcg->lock);
bc0d6501 477 mutex_unlock(&blkcg_pol_mutex);
303a3acb
DS
478 return 0;
479}
480
dd165eb3 481const char *blkg_dev_name(struct blkcg_gq *blkg)
303a3acb 482{
d3d32e69 483 /* some drivers (floppy) instantiate a queue w/o disk registered */
dc3b17cc
JK
484 if (blkg->q->backing_dev_info->dev)
485 return dev_name(blkg->q->backing_dev_info->dev);
d3d32e69 486 return NULL;
303a3acb 487}
dd165eb3 488EXPORT_SYMBOL_GPL(blkg_dev_name);
303a3acb 489
d3d32e69
TH
490/**
491 * blkcg_print_blkgs - helper for printing per-blkg data
492 * @sf: seq_file to print to
493 * @blkcg: blkcg of interest
494 * @prfill: fill function to print out a blkg
495 * @pol: policy in question
496 * @data: data to be passed to @prfill
497 * @show_total: to print out sum of prfill return values or not
498 *
499 * This function invokes @prfill on each blkg of @blkcg if pd for the
500 * policy specified by @pol exists. @prfill is invoked with @sf, the
810ecfa7
TH
501 * policy data and @data and the matching queue lock held. If @show_total
502 * is %true, the sum of the return values from @prfill is printed with
503 * "Total" label at the end.
d3d32e69
TH
504 *
505 * This is to be used to construct print functions for
506 * cftype->read_seq_string method.
507 */
3c798398 508void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04af
TH
509 u64 (*prfill)(struct seq_file *,
510 struct blkg_policy_data *, int),
3c798398 511 const struct blkcg_policy *pol, int data,
ec399347 512 bool show_total)
5624a4e4 513{
3c798398 514 struct blkcg_gq *blkg;
d3d32e69 515 u64 total = 0;
5624a4e4 516
810ecfa7 517 rcu_read_lock();
ee89f812 518 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
810ecfa7 519 spin_lock_irq(blkg->q->queue_lock);
a2b1693b 520 if (blkcg_policy_enabled(blkg->q, pol))
f95a04af 521 total += prfill(sf, blkg->pd[pol->plid], data);
810ecfa7
TH
522 spin_unlock_irq(blkg->q->queue_lock);
523 }
524 rcu_read_unlock();
d3d32e69
TH
525
526 if (show_total)
527 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
528}
829fdb50 529EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
d3d32e69
TH
530
531/**
532 * __blkg_prfill_u64 - prfill helper for a single u64 value
533 * @sf: seq_file to print to
f95a04af 534 * @pd: policy private data of interest
d3d32e69
TH
535 * @v: value to print
536 *
f95a04af 537 * Print @v to @sf for the device assocaited with @pd.
d3d32e69 538 */
f95a04af 539u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
d3d32e69 540{
f95a04af 541 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
542
543 if (!dname)
544 return 0;
545
546 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
547 return v;
548}
829fdb50 549EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
d3d32e69
TH
550
551/**
552 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
553 * @sf: seq_file to print to
f95a04af 554 * @pd: policy private data of interest
d3d32e69
TH
555 * @rwstat: rwstat to print
556 *
f95a04af 557 * Print @rwstat to @sf for the device assocaited with @pd.
d3d32e69 558 */
f95a04af 559u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
829fdb50 560 const struct blkg_rwstat *rwstat)
d3d32e69
TH
561{
562 static const char *rwstr[] = {
563 [BLKG_RWSTAT_READ] = "Read",
564 [BLKG_RWSTAT_WRITE] = "Write",
565 [BLKG_RWSTAT_SYNC] = "Sync",
566 [BLKG_RWSTAT_ASYNC] = "Async",
567 };
f95a04af 568 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
569 u64 v;
570 int i;
571
572 if (!dname)
573 return 0;
574
575 for (i = 0; i < BLKG_RWSTAT_NR; i++)
576 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
24bdb8ef 577 (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
d3d32e69 578
24bdb8ef
TH
579 v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
580 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
d3d32e69
TH
581 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
582 return v;
583}
b50da39f 584EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
d3d32e69 585
5bc4afb1
TH
586/**
587 * blkg_prfill_stat - prfill callback for blkg_stat
588 * @sf: seq_file to print to
f95a04af
TH
589 * @pd: policy private data of interest
590 * @off: offset to the blkg_stat in @pd
5bc4afb1
TH
591 *
592 * prfill callback for printing a blkg_stat.
593 */
f95a04af 594u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
d3d32e69 595{
f95a04af 596 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
d3d32e69 597}
5bc4afb1 598EXPORT_SYMBOL_GPL(blkg_prfill_stat);
d3d32e69 599
5bc4afb1
TH
600/**
601 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
602 * @sf: seq_file to print to
f95a04af
TH
603 * @pd: policy private data of interest
604 * @off: offset to the blkg_rwstat in @pd
5bc4afb1
TH
605 *
606 * prfill callback for printing a blkg_rwstat.
607 */
f95a04af
TH
608u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
609 int off)
d3d32e69 610{
f95a04af 611 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
d3d32e69 612
f95a04af 613 return __blkg_prfill_rwstat(sf, pd, &rwstat);
d3d32e69 614}
5bc4afb1 615EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
d3d32e69 616
77ea7338
TH
617static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
618 struct blkg_policy_data *pd, int off)
619{
620 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
621
622 return __blkg_prfill_rwstat(sf, pd, &rwstat);
623}
624
625/**
626 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
627 * @sf: seq_file to print to
628 * @v: unused
629 *
630 * To be used as cftype->seq_show to print blkg->stat_bytes.
631 * cftype->private must be set to the blkcg_policy.
632 */
633int blkg_print_stat_bytes(struct seq_file *sf, void *v)
634{
635 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
636 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
637 offsetof(struct blkcg_gq, stat_bytes), true);
638 return 0;
639}
640EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
641
642/**
643 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
644 * @sf: seq_file to print to
645 * @v: unused
646 *
647 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
648 * must be set to the blkcg_policy.
649 */
650int blkg_print_stat_ios(struct seq_file *sf, void *v)
651{
652 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
653 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
654 offsetof(struct blkcg_gq, stat_ios), true);
655 return 0;
656}
657EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
658
659static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
660 struct blkg_policy_data *pd,
661 int off)
662{
663 struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
664 NULL, off);
665 return __blkg_prfill_rwstat(sf, pd, &rwstat);
666}
667
668/**
669 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
670 * @sf: seq_file to print to
671 * @v: unused
672 */
673int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
674{
675 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
676 blkg_prfill_rwstat_field_recursive,
677 (void *)seq_cft(sf)->private,
678 offsetof(struct blkcg_gq, stat_bytes), true);
679 return 0;
680}
681EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
682
683/**
684 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
685 * @sf: seq_file to print to
686 * @v: unused
687 */
688int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
689{
690 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
691 blkg_prfill_rwstat_field_recursive,
692 (void *)seq_cft(sf)->private,
693 offsetof(struct blkcg_gq, stat_ios), true);
694 return 0;
695}
696EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
697
16b3de66
TH
698/**
699 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
f12c74ca
TH
700 * @blkg: blkg of interest
701 * @pol: blkcg_policy which contains the blkg_stat
702 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
16b3de66 703 *
f12c74ca
TH
704 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
705 * online descendants and their aux counts. The caller must be holding the
706 * queue lock for online tests.
707 *
708 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
709 * at @off bytes into @blkg's blkg_policy_data of the policy.
16b3de66 710 */
f12c74ca
TH
711u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
712 struct blkcg_policy *pol, int off)
16b3de66 713{
16b3de66 714 struct blkcg_gq *pos_blkg;
492eb21b 715 struct cgroup_subsys_state *pos_css;
bd8815a6 716 u64 sum = 0;
16b3de66 717
f12c74ca 718 lockdep_assert_held(blkg->q->queue_lock);
16b3de66 719
16b3de66 720 rcu_read_lock();
f12c74ca
TH
721 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
722 struct blkg_stat *stat;
723
724 if (!pos_blkg->online)
725 continue;
16b3de66 726
f12c74ca
TH
727 if (pol)
728 stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
729 else
730 stat = (void *)blkg + off;
731
732 sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
16b3de66
TH
733 }
734 rcu_read_unlock();
735
736 return sum;
737}
738EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
739
740/**
741 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
f12c74ca
TH
742 * @blkg: blkg of interest
743 * @pol: blkcg_policy which contains the blkg_rwstat
744 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
16b3de66 745 *
f12c74ca
TH
746 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
747 * online descendants and their aux counts. The caller must be holding the
748 * queue lock for online tests.
749 *
750 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
751 * is at @off bytes into @blkg's blkg_policy_data of the policy.
16b3de66 752 */
f12c74ca
TH
753struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
754 struct blkcg_policy *pol, int off)
16b3de66 755{
16b3de66 756 struct blkcg_gq *pos_blkg;
492eb21b 757 struct cgroup_subsys_state *pos_css;
bd8815a6 758 struct blkg_rwstat sum = { };
16b3de66
TH
759 int i;
760
f12c74ca 761 lockdep_assert_held(blkg->q->queue_lock);
16b3de66 762
16b3de66 763 rcu_read_lock();
f12c74ca 764 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
3a7faead 765 struct blkg_rwstat *rwstat;
16b3de66
TH
766
767 if (!pos_blkg->online)
768 continue;
769
f12c74ca
TH
770 if (pol)
771 rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
772 else
773 rwstat = (void *)pos_blkg + off;
774
16b3de66 775 for (i = 0; i < BLKG_RWSTAT_NR; i++)
3a7faead
TH
776 atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
777 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
778 &sum.aux_cnt[i]);
16b3de66
TH
779 }
780 rcu_read_unlock();
781
782 return sum;
783}
784EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
785
457e490f
TE
786/* Performs queue bypass and policy enabled checks then looks up blkg. */
787static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
788 const struct blkcg_policy *pol,
789 struct request_queue *q)
790{
791 WARN_ON_ONCE(!rcu_read_lock_held());
792 lockdep_assert_held(q->queue_lock);
793
794 if (!blkcg_policy_enabled(q, pol))
795 return ERR_PTR(-EOPNOTSUPP);
796
797 /*
798 * This could be the first entry point of blkcg implementation and
799 * we shouldn't allow anything to go through for a bypassing queue.
800 */
801 if (unlikely(blk_queue_bypass(q)))
802 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
803
804 return __blkg_lookup(blkcg, q, true /* update_hint */);
805}
806
3a8b31d3
TH
807/**
808 * blkg_conf_prep - parse and prepare for per-blkg config update
809 * @blkcg: target block cgroup
da8b0662 810 * @pol: target policy
3a8b31d3
TH
811 * @input: input string
812 * @ctx: blkg_conf_ctx to be filled
813 *
814 * Parse per-blkg config update from @input and initialize @ctx with the
36aa9e5f
TH
815 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
816 * part of @input following MAJ:MIN. This function returns with RCU read
817 * lock and queue lock held and must be paired with blkg_conf_finish().
3a8b31d3 818 */
3c798398 819int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
36aa9e5f 820 char *input, struct blkg_conf_ctx *ctx)
da8b0662 821 __acquires(rcu) __acquires(disk->queue->queue_lock)
34d0f179 822{
3a8b31d3 823 struct gendisk *disk;
457e490f 824 struct request_queue *q;
3c798398 825 struct blkcg_gq *blkg;
726fa694 826 unsigned int major, minor;
36aa9e5f
TH
827 int key_len, part, ret;
828 char *body;
34d0f179 829
36aa9e5f 830 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
726fa694 831 return -EINVAL;
3a8b31d3 832
36aa9e5f
TH
833 body = input + key_len;
834 if (!isspace(*body))
835 return -EINVAL;
836 body = skip_spaces(body);
837
726fa694 838 disk = get_gendisk(MKDEV(major, minor), &part);
5f6c2d2b 839 if (!disk)
20386ce0 840 return -ENODEV;
5f6c2d2b 841 if (part) {
457e490f
TE
842 ret = -ENODEV;
843 goto fail;
5f6c2d2b 844 }
e56da7e2 845
457e490f 846 q = disk->queue;
da8b0662 847
457e490f
TE
848 rcu_read_lock();
849 spin_lock_irq(q->queue_lock);
e56da7e2 850
457e490f 851 blkg = blkg_lookup_check(blkcg, pol, q);
4bfd482e
TH
852 if (IS_ERR(blkg)) {
853 ret = PTR_ERR(blkg);
457e490f
TE
854 goto fail_unlock;
855 }
856
857 if (blkg)
858 goto success;
859
860 /*
861 * Create blkgs walking down from blkcg_root to @blkcg, so that all
862 * non-root blkgs have access to their parents.
863 */
864 while (true) {
865 struct blkcg *pos = blkcg;
866 struct blkcg *parent;
867 struct blkcg_gq *new_blkg;
868
869 parent = blkcg_parent(blkcg);
870 while (parent && !__blkg_lookup(parent, q, false)) {
871 pos = parent;
872 parent = blkcg_parent(parent);
873 }
874
875 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
876 spin_unlock_irq(q->queue_lock);
3a8b31d3 877 rcu_read_unlock();
457e490f
TE
878
879 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
880 if (unlikely(!new_blkg)) {
881 ret = -ENOMEM;
882 goto fail;
7702e8f4 883 }
3a8b31d3 884
457e490f
TE
885 rcu_read_lock();
886 spin_lock_irq(q->queue_lock);
887
888 blkg = blkg_lookup_check(pos, pol, q);
889 if (IS_ERR(blkg)) {
890 ret = PTR_ERR(blkg);
891 goto fail_unlock;
892 }
893
894 if (blkg) {
895 blkg_free(new_blkg);
896 } else {
897 blkg = blkg_create(pos, q, new_blkg);
898 if (unlikely(IS_ERR(blkg))) {
899 ret = PTR_ERR(blkg);
900 goto fail_unlock;
901 }
902 }
903
904 if (pos == blkcg)
905 goto success;
906 }
907success:
3a8b31d3
TH
908 ctx->disk = disk;
909 ctx->blkg = blkg;
36aa9e5f 910 ctx->body = body;
726fa694 911 return 0;
457e490f
TE
912
913fail_unlock:
914 spin_unlock_irq(q->queue_lock);
915 rcu_read_unlock();
916fail:
9df6c299 917 put_disk_and_module(disk);
457e490f
TE
918 /*
919 * If queue was bypassing, we should retry. Do so after a
920 * short msleep(). It isn't strictly necessary but queue
921 * can be bypassing for some time and it's always nice to
922 * avoid busy looping.
923 */
924 if (ret == -EBUSY) {
925 msleep(10);
926 ret = restart_syscall();
927 }
928 return ret;
34d0f179 929}
829fdb50 930EXPORT_SYMBOL_GPL(blkg_conf_prep);
34d0f179 931
3a8b31d3
TH
932/**
933 * blkg_conf_finish - finish up per-blkg config update
934 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
935 *
936 * Finish up after per-blkg config update. This function must be paired
937 * with blkg_conf_prep().
938 */
829fdb50 939void blkg_conf_finish(struct blkg_conf_ctx *ctx)
da8b0662 940 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
34d0f179 941{
da8b0662 942 spin_unlock_irq(ctx->disk->queue->queue_lock);
3a8b31d3 943 rcu_read_unlock();
9df6c299 944 put_disk_and_module(ctx->disk);
34d0f179 945}
829fdb50 946EXPORT_SYMBOL_GPL(blkg_conf_finish);
34d0f179 947
2ee867dc
TH
948static int blkcg_print_stat(struct seq_file *sf, void *v)
949{
950 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
951 struct blkcg_gq *blkg;
952
953 rcu_read_lock();
954
955 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
956 const char *dname;
957 struct blkg_rwstat rwstat;
958 u64 rbytes, wbytes, rios, wios;
959
960 dname = blkg_dev_name(blkg);
961 if (!dname)
962 continue;
963
964 spin_lock_irq(blkg->q->queue_lock);
965
966 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
967 offsetof(struct blkcg_gq, stat_bytes));
968 rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
969 wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
970
971 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
972 offsetof(struct blkcg_gq, stat_ios));
973 rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
974 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
975
976 spin_unlock_irq(blkg->q->queue_lock);
977
978 if (rbytes || wbytes || rios || wios)
979 seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
980 dname, rbytes, wbytes, rios, wios);
981 }
982
983 rcu_read_unlock();
984 return 0;
985}
986
e1f3b941 987static struct cftype blkcg_files[] = {
2ee867dc
TH
988 {
989 .name = "stat",
ca0752c5 990 .flags = CFTYPE_NOT_ON_ROOT,
2ee867dc
TH
991 .seq_show = blkcg_print_stat,
992 },
993 { } /* terminate */
994};
995
e1f3b941 996static struct cftype blkcg_legacy_files[] = {
84c124da
DS
997 {
998 .name = "reset_stats",
3c798398 999 .write_u64 = blkcg_reset_stats,
22084190 1000 },
4baf6e33 1001 { } /* terminate */
31e4c28d
VG
1002};
1003
9f13ef67 1004/**
92fb9748 1005 * blkcg_css_offline - cgroup css_offline callback
eb95419b 1006 * @css: css of interest
9f13ef67 1007 *
eb95419b 1008 * This function is called when @css is about to go away and responsible
4c699480
JQ
1009 * for offlining all blkgs pd and killing all wbs associated with @css.
1010 * blkgs pd offline should be done while holding both q and blkcg locks.
1011 * As blkcg lock is nested inside q lock, this function performs reverse
1012 * double lock dancing.
9f13ef67
TH
1013 *
1014 * This is the blkcg counterpart of ioc_release_fn().
1015 */
eb95419b 1016static void blkcg_css_offline(struct cgroup_subsys_state *css)
31e4c28d 1017{
eb95419b 1018 struct blkcg *blkcg = css_to_blkcg(css);
4c699480 1019 struct blkcg_gq *blkg;
b1c35769 1020
9f13ef67 1021 spin_lock_irq(&blkcg->lock);
7ee9c562 1022
4c699480 1023 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
c875f4d0 1024 struct request_queue *q = blkg->q;
b1c35769 1025
9f13ef67 1026 if (spin_trylock(q->queue_lock)) {
4c699480 1027 blkg_pd_offline(blkg);
9f13ef67
TH
1028 spin_unlock(q->queue_lock);
1029 } else {
1030 spin_unlock_irq(&blkcg->lock);
9f13ef67 1031 cpu_relax();
a5567932 1032 spin_lock_irq(&blkcg->lock);
0f3942a3 1033 }
9f13ef67 1034 }
b1c35769 1035
9f13ef67 1036 spin_unlock_irq(&blkcg->lock);
52ebea74
TH
1037
1038 wb_blkcg_offline(blkcg);
7ee9c562
TH
1039}
1040
4c699480
JQ
1041/**
1042 * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
1043 * @blkcg: blkcg of interest
1044 *
1045 * This function is called when blkcg css is about to free and responsible for
1046 * destroying all blkgs associated with @blkcg.
1047 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1048 * is nested inside q lock, this function performs reverse double lock dancing.
1049 */
1050static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
1051{
1052 spin_lock_irq(&blkcg->lock);
1053 while (!hlist_empty(&blkcg->blkg_list)) {
1054 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1055 struct blkcg_gq,
1056 blkcg_node);
1057 struct request_queue *q = blkg->q;
1058
1059 if (spin_trylock(q->queue_lock)) {
1060 blkg_destroy(blkg);
1061 spin_unlock(q->queue_lock);
1062 } else {
1063 spin_unlock_irq(&blkcg->lock);
1064 cpu_relax();
1065 spin_lock_irq(&blkcg->lock);
1066 }
1067 }
1068 spin_unlock_irq(&blkcg->lock);
1069}
1070
eb95419b 1071static void blkcg_css_free(struct cgroup_subsys_state *css)
7ee9c562 1072{
eb95419b 1073 struct blkcg *blkcg = css_to_blkcg(css);
bc915e61 1074 int i;
7ee9c562 1075
4c699480
JQ
1076 blkcg_destroy_all_blkgs(blkcg);
1077
7876f930 1078 mutex_lock(&blkcg_pol_mutex);
e4a9bde9 1079
7876f930 1080 list_del(&blkcg->all_blkcgs_node);
7876f930 1081
bc915e61 1082 for (i = 0; i < BLKCG_MAX_POLS; i++)
e4a9bde9
TH
1083 if (blkcg->cpd[i])
1084 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1085
1086 mutex_unlock(&blkcg_pol_mutex);
1087
bc915e61 1088 kfree(blkcg);
31e4c28d
VG
1089}
1090
eb95419b
TH
1091static struct cgroup_subsys_state *
1092blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
31e4c28d 1093{
3c798398 1094 struct blkcg *blkcg;
e48453c3
AA
1095 struct cgroup_subsys_state *ret;
1096 int i;
31e4c28d 1097
7876f930
TH
1098 mutex_lock(&blkcg_pol_mutex);
1099
eb95419b 1100 if (!parent_css) {
3c798398 1101 blkcg = &blkcg_root;
bc915e61
TH
1102 } else {
1103 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1104 if (!blkcg) {
1105 ret = ERR_PTR(-ENOMEM);
4c18c9e9 1106 goto unlock;
bc915e61 1107 }
e48453c3
AA
1108 }
1109
1110 for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1111 struct blkcg_policy *pol = blkcg_policy[i];
1112 struct blkcg_policy_data *cpd;
1113
1114 /*
1115 * If the policy hasn't been attached yet, wait for it
1116 * to be attached before doing anything else. Otherwise,
1117 * check if the policy requires any specific per-cgroup
1118 * data: if it does, allocate and initialize it.
1119 */
e4a9bde9 1120 if (!pol || !pol->cpd_alloc_fn)
e48453c3
AA
1121 continue;
1122
e4a9bde9 1123 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
e48453c3
AA
1124 if (!cpd) {
1125 ret = ERR_PTR(-ENOMEM);
1126 goto free_pd_blkcg;
1127 }
81437648
TH
1128 blkcg->cpd[i] = cpd;
1129 cpd->blkcg = blkcg;
e48453c3 1130 cpd->plid = i;
e4a9bde9
TH
1131 if (pol->cpd_init_fn)
1132 pol->cpd_init_fn(cpd);
e48453c3 1133 }
31e4c28d 1134
31e4c28d 1135 spin_lock_init(&blkcg->lock);
e00f4f4d 1136 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
31e4c28d 1137 INIT_HLIST_HEAD(&blkcg->blkg_list);
52ebea74
TH
1138#ifdef CONFIG_CGROUP_WRITEBACK
1139 INIT_LIST_HEAD(&blkcg->cgwb_list);
1140#endif
7876f930
TH
1141 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1142
1143 mutex_unlock(&blkcg_pol_mutex);
31e4c28d 1144 return &blkcg->css;
e48453c3
AA
1145
1146free_pd_blkcg:
1147 for (i--; i >= 0; i--)
e4a9bde9
TH
1148 if (blkcg->cpd[i])
1149 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
4c18c9e9 1150
1151 if (blkcg != &blkcg_root)
1152 kfree(blkcg);
1153unlock:
7876f930 1154 mutex_unlock(&blkcg_pol_mutex);
e48453c3 1155 return ret;
31e4c28d
VG
1156}
1157
5efd6113
TH
1158/**
1159 * blkcg_init_queue - initialize blkcg part of request queue
1160 * @q: request_queue to initialize
1161 *
1162 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1163 * part of new request_queue @q.
1164 *
1165 * RETURNS:
1166 * 0 on success, -errno on failure.
1167 */
1168int blkcg_init_queue(struct request_queue *q)
1169{
d708f0d5
JA
1170 struct blkcg_gq *new_blkg, *blkg;
1171 bool preloaded;
ec13b1d6
TH
1172 int ret;
1173
d708f0d5
JA
1174 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1175 if (!new_blkg)
1176 return -ENOMEM;
1177
1178 preloaded = !radix_tree_preload(GFP_KERNEL);
1179
1180 /*
1181 * Make sure the root blkg exists and count the existing blkgs. As
1182 * @q is bypassing at this point, blkg_lookup_create() can't be
1183 * used. Open code insertion.
1184 */
ec13b1d6
TH
1185 rcu_read_lock();
1186 spin_lock_irq(q->queue_lock);
d708f0d5 1187 blkg = blkg_create(&blkcg_root, q, new_blkg);
ec13b1d6
TH
1188 spin_unlock_irq(q->queue_lock);
1189 rcu_read_unlock();
1190
d708f0d5
JA
1191 if (preloaded)
1192 radix_tree_preload_end();
1193
9b54d816 1194 if (IS_ERR(blkg))
ec13b1d6 1195 return PTR_ERR(blkg);
ec13b1d6
TH
1196
1197 q->root_blkg = blkg;
1198 q->root_rl.blkg = blkg;
5efd6113 1199
ec13b1d6
TH
1200 ret = blk_throtl_init(q);
1201 if (ret) {
1202 spin_lock_irq(q->queue_lock);
1203 blkg_destroy_all(q);
1204 spin_unlock_irq(q->queue_lock);
1205 }
1206 return ret;
5efd6113
TH
1207}
1208
1209/**
1210 * blkcg_drain_queue - drain blkcg part of request_queue
1211 * @q: request_queue to drain
1212 *
1213 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1214 */
1215void blkcg_drain_queue(struct request_queue *q)
1216{
1217 lockdep_assert_held(q->queue_lock);
1218
0b462c89
TH
1219 /*
1220 * @q could be exiting and already have destroyed all blkgs as
1221 * indicated by NULL root_blkg. If so, don't confuse policies.
1222 */
1223 if (!q->root_blkg)
1224 return;
1225
5efd6113
TH
1226 blk_throtl_drain(q);
1227}
1228
1229/**
1230 * blkcg_exit_queue - exit and release blkcg part of request_queue
1231 * @q: request_queue being released
1232 *
1233 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1234 */
1235void blkcg_exit_queue(struct request_queue *q)
1236{
6d18b008 1237 spin_lock_irq(q->queue_lock);
3c96cb32 1238 blkg_destroy_all(q);
6d18b008
TH
1239 spin_unlock_irq(q->queue_lock);
1240
5efd6113
TH
1241 blk_throtl_exit(q);
1242}
1243
31e4c28d
VG
1244/*
1245 * We cannot support shared io contexts, as we have no mean to support
1246 * two tasks with the same ioc in two different groups without major rework
1247 * of the main cic data structures. For now we allow a task to change
1248 * its cgroup only if it's the only owner of its ioc.
1249 */
1f7dd3e5 1250static int blkcg_can_attach(struct cgroup_taskset *tset)
31e4c28d 1251{
bb9d97b6 1252 struct task_struct *task;
1f7dd3e5 1253 struct cgroup_subsys_state *dst_css;
31e4c28d
VG
1254 struct io_context *ioc;
1255 int ret = 0;
1256
1257 /* task_lock() is needed to avoid races with exit_io_context() */
1f7dd3e5 1258 cgroup_taskset_for_each(task, dst_css, tset) {
bb9d97b6
TH
1259 task_lock(task);
1260 ioc = task->io_context;
1261 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1262 ret = -EINVAL;
1263 task_unlock(task);
1264 if (ret)
1265 break;
1266 }
31e4c28d
VG
1267 return ret;
1268}
1269
69d7fde5
TH
1270static void blkcg_bind(struct cgroup_subsys_state *root_css)
1271{
1272 int i;
1273
1274 mutex_lock(&blkcg_pol_mutex);
1275
1276 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1277 struct blkcg_policy *pol = blkcg_policy[i];
1278 struct blkcg *blkcg;
1279
1280 if (!pol || !pol->cpd_bind_fn)
1281 continue;
1282
1283 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1284 if (blkcg->cpd[pol->plid])
1285 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1286 }
1287 mutex_unlock(&blkcg_pol_mutex);
1288}
1289
c165b3e3 1290struct cgroup_subsys io_cgrp_subsys = {
92fb9748
TH
1291 .css_alloc = blkcg_css_alloc,
1292 .css_offline = blkcg_css_offline,
1293 .css_free = blkcg_css_free,
3c798398 1294 .can_attach = blkcg_can_attach,
69d7fde5 1295 .bind = blkcg_bind,
2ee867dc 1296 .dfl_cftypes = blkcg_files,
880f50e2 1297 .legacy_cftypes = blkcg_legacy_files,
c165b3e3 1298 .legacy_name = "blkio",
1ced953b
TH
1299#ifdef CONFIG_MEMCG
1300 /*
1301 * This ensures that, if available, memcg is automatically enabled
1302 * together on the default hierarchy so that the owner cgroup can
1303 * be retrieved from writeback pages.
1304 */
1305 .depends_on = 1 << memory_cgrp_id,
1306#endif
676f7c8f 1307};
c165b3e3 1308EXPORT_SYMBOL_GPL(io_cgrp_subsys);
676f7c8f 1309
a2b1693b
TH
1310/**
1311 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1312 * @q: request_queue of interest
1313 * @pol: blkcg policy to activate
1314 *
1315 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
1316 * bypass mode to populate its blkgs with policy_data for @pol.
1317 *
1318 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1319 * from IO path. Update of each blkg is protected by both queue and blkcg
1320 * locks so that holding either lock and testing blkcg_policy_enabled() is
1321 * always enough for dereferencing policy data.
1322 *
1323 * The caller is responsible for synchronizing [de]activations and policy
1324 * [un]registerations. Returns 0 on success, -errno on failure.
1325 */
1326int blkcg_activate_policy(struct request_queue *q,
3c798398 1327 const struct blkcg_policy *pol)
a2b1693b 1328{
4c55f4f9 1329 struct blkg_policy_data *pd_prealloc = NULL;
ec13b1d6 1330 struct blkcg_gq *blkg;
4c55f4f9 1331 int ret;
a2b1693b
TH
1332
1333 if (blkcg_policy_enabled(q, pol))
1334 return 0;
1335
38dbb7dd 1336 if (q->mq_ops)
bd166ef1 1337 blk_mq_freeze_queue(q);
38dbb7dd 1338 else
bd166ef1 1339 blk_queue_bypass_start(q);
4c55f4f9
TH
1340pd_prealloc:
1341 if (!pd_prealloc) {
001bea73 1342 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
4c55f4f9 1343 if (!pd_prealloc) {
a2b1693b 1344 ret = -ENOMEM;
4c55f4f9 1345 goto out_bypass_end;
a2b1693b 1346 }
a2b1693b
TH
1347 }
1348
a2b1693b
TH
1349 spin_lock_irq(q->queue_lock);
1350
1351 list_for_each_entry(blkg, &q->blkg_list, q_node) {
4c55f4f9
TH
1352 struct blkg_policy_data *pd;
1353
1354 if (blkg->pd[pol->plid])
1355 continue;
a2b1693b 1356
e00f4f4d 1357 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
4c55f4f9
TH
1358 if (!pd)
1359 swap(pd, pd_prealloc);
1360 if (!pd) {
1361 spin_unlock_irq(q->queue_lock);
1362 goto pd_prealloc;
1363 }
a2b1693b
TH
1364
1365 blkg->pd[pol->plid] = pd;
1366 pd->blkg = blkg;
b276a876 1367 pd->plid = pol->plid;
3e418710 1368 if (pol->pd_init_fn)
a9520cd6 1369 pol->pd_init_fn(pd);
a2b1693b
TH
1370 }
1371
1372 __set_bit(pol->plid, q->blkcg_pols);
1373 ret = 0;
4c55f4f9 1374
a2b1693b 1375 spin_unlock_irq(q->queue_lock);
4c55f4f9 1376out_bypass_end:
38dbb7dd 1377 if (q->mq_ops)
bd166ef1 1378 blk_mq_unfreeze_queue(q);
38dbb7dd 1379 else
bd166ef1 1380 blk_queue_bypass_end(q);
001bea73
TH
1381 if (pd_prealloc)
1382 pol->pd_free_fn(pd_prealloc);
a2b1693b
TH
1383 return ret;
1384}
1385EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1386
1387/**
1388 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1389 * @q: request_queue of interest
1390 * @pol: blkcg policy to deactivate
1391 *
1392 * Deactivate @pol on @q. Follows the same synchronization rules as
1393 * blkcg_activate_policy().
1394 */
1395void blkcg_deactivate_policy(struct request_queue *q,
3c798398 1396 const struct blkcg_policy *pol)
a2b1693b 1397{
3c798398 1398 struct blkcg_gq *blkg;
a2b1693b
TH
1399
1400 if (!blkcg_policy_enabled(q, pol))
1401 return;
1402
38dbb7dd 1403 if (q->mq_ops)
bd166ef1 1404 blk_mq_freeze_queue(q);
38dbb7dd 1405 else
bd166ef1
JA
1406 blk_queue_bypass_start(q);
1407
a2b1693b
TH
1408 spin_lock_irq(q->queue_lock);
1409
1410 __clear_bit(pol->plid, q->blkcg_pols);
1411
1412 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1413 /* grab blkcg lock too while removing @pd from @blkg */
1414 spin_lock(&blkg->blkcg->lock);
1415
001bea73 1416 if (blkg->pd[pol->plid]) {
4c699480
JQ
1417 if (!blkg->pd[pol->plid]->offline &&
1418 pol->pd_offline_fn) {
a9520cd6 1419 pol->pd_offline_fn(blkg->pd[pol->plid]);
4c699480
JQ
1420 blkg->pd[pol->plid]->offline = true;
1421 }
001bea73
TH
1422 pol->pd_free_fn(blkg->pd[pol->plid]);
1423 blkg->pd[pol->plid] = NULL;
1424 }
a2b1693b
TH
1425
1426 spin_unlock(&blkg->blkcg->lock);
1427 }
1428
1429 spin_unlock_irq(q->queue_lock);
bd166ef1 1430
38dbb7dd 1431 if (q->mq_ops)
bd166ef1 1432 blk_mq_unfreeze_queue(q);
38dbb7dd 1433 else
bd166ef1 1434 blk_queue_bypass_end(q);
a2b1693b
TH
1435}
1436EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1437
8bd435b3 1438/**
3c798398
TH
1439 * blkcg_policy_register - register a blkcg policy
1440 * @pol: blkcg policy to register
8bd435b3 1441 *
3c798398
TH
1442 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1443 * successful registration. Returns 0 on success and -errno on failure.
8bd435b3 1444 */
d5bf0291 1445int blkcg_policy_register(struct blkcg_policy *pol)
3e252066 1446{
06b285bd 1447 struct blkcg *blkcg;
8bd435b3 1448 int i, ret;
e8989fae 1449
838f13bf 1450 mutex_lock(&blkcg_pol_register_mutex);
bc0d6501
TH
1451 mutex_lock(&blkcg_pol_mutex);
1452
8bd435b3
TH
1453 /* find an empty slot */
1454 ret = -ENOSPC;
1455 for (i = 0; i < BLKCG_MAX_POLS; i++)
3c798398 1456 if (!blkcg_policy[i])
8bd435b3
TH
1457 break;
1458 if (i >= BLKCG_MAX_POLS)
838f13bf 1459 goto err_unlock;
035d10b2 1460
e8401073 1461 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1462 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1463 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1464 goto err_unlock;
1465
06b285bd 1466 /* register @pol */
3c798398 1467 pol->plid = i;
06b285bd
TH
1468 blkcg_policy[pol->plid] = pol;
1469
1470 /* allocate and install cpd's */
e4a9bde9 1471 if (pol->cpd_alloc_fn) {
06b285bd
TH
1472 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1473 struct blkcg_policy_data *cpd;
1474
e4a9bde9 1475 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
bbb427e3 1476 if (!cpd)
06b285bd 1477 goto err_free_cpds;
06b285bd 1478
81437648
TH
1479 blkcg->cpd[pol->plid] = cpd;
1480 cpd->blkcg = blkcg;
06b285bd 1481 cpd->plid = pol->plid;
81437648 1482 pol->cpd_init_fn(cpd);
06b285bd
TH
1483 }
1484 }
1485
838f13bf 1486 mutex_unlock(&blkcg_pol_mutex);
8bd435b3 1487
8bd435b3 1488 /* everything is in place, add intf files for the new policy */
2ee867dc
TH
1489 if (pol->dfl_cftypes)
1490 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1491 pol->dfl_cftypes));
880f50e2 1492 if (pol->legacy_cftypes)
c165b3e3 1493 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
880f50e2 1494 pol->legacy_cftypes));
838f13bf
TH
1495 mutex_unlock(&blkcg_pol_register_mutex);
1496 return 0;
1497
06b285bd 1498err_free_cpds:
58a9edce 1499 if (pol->cpd_free_fn) {
06b285bd 1500 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
e4a9bde9
TH
1501 if (blkcg->cpd[pol->plid]) {
1502 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1503 blkcg->cpd[pol->plid] = NULL;
1504 }
06b285bd
TH
1505 }
1506 }
1507 blkcg_policy[pol->plid] = NULL;
838f13bf 1508err_unlock:
bc0d6501 1509 mutex_unlock(&blkcg_pol_mutex);
838f13bf 1510 mutex_unlock(&blkcg_pol_register_mutex);
8bd435b3 1511 return ret;
3e252066 1512}
3c798398 1513EXPORT_SYMBOL_GPL(blkcg_policy_register);
3e252066 1514
8bd435b3 1515/**
3c798398
TH
1516 * blkcg_policy_unregister - unregister a blkcg policy
1517 * @pol: blkcg policy to unregister
8bd435b3 1518 *
3c798398 1519 * Undo blkcg_policy_register(@pol). Might sleep.
8bd435b3 1520 */
3c798398 1521void blkcg_policy_unregister(struct blkcg_policy *pol)
3e252066 1522{
06b285bd
TH
1523 struct blkcg *blkcg;
1524
838f13bf 1525 mutex_lock(&blkcg_pol_register_mutex);
bc0d6501 1526
3c798398 1527 if (WARN_ON(blkcg_policy[pol->plid] != pol))
8bd435b3
TH
1528 goto out_unlock;
1529
1530 /* kill the intf files first */
2ee867dc
TH
1531 if (pol->dfl_cftypes)
1532 cgroup_rm_cftypes(pol->dfl_cftypes);
880f50e2
TH
1533 if (pol->legacy_cftypes)
1534 cgroup_rm_cftypes(pol->legacy_cftypes);
44ea53de 1535
06b285bd 1536 /* remove cpds and unregister */
838f13bf 1537 mutex_lock(&blkcg_pol_mutex);
06b285bd 1538
58a9edce 1539 if (pol->cpd_free_fn) {
06b285bd 1540 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
e4a9bde9
TH
1541 if (blkcg->cpd[pol->plid]) {
1542 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1543 blkcg->cpd[pol->plid] = NULL;
1544 }
06b285bd
TH
1545 }
1546 }
3c798398 1547 blkcg_policy[pol->plid] = NULL;
06b285bd 1548
bc0d6501 1549 mutex_unlock(&blkcg_pol_mutex);
838f13bf
TH
1550out_unlock:
1551 mutex_unlock(&blkcg_pol_register_mutex);
3e252066 1552}
3c798398 1553EXPORT_SYMBOL_GPL(blkcg_policy_unregister);