]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-cgroup.c
Revert "blkcg: allocate struct blkcg_gq outside request queue spinlock"
[mirror_ubuntu-artful-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
e48453c3
AA
12 *
13 * For policy-specific per-blkcg data:
14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
15 * Arianna Avanzini <avanzini.arianna@gmail.com>
31e4c28d
VG
16 */
17#include <linux/ioprio.h>
22084190 18#include <linux/kdev_t.h>
9d6a986c 19#include <linux/module.h>
174cd4b1 20#include <linux/sched/signal.h>
accee785 21#include <linux/err.h>
9195291e 22#include <linux/blkdev.h>
52ebea74 23#include <linux/backing-dev.h>
5a0e3ad6 24#include <linux/slab.h>
34d0f179 25#include <linux/genhd.h>
72e06c25 26#include <linux/delay.h>
9a9e8a26 27#include <linux/atomic.h>
36aa9e5f 28#include <linux/ctype.h>
eea8f41c 29#include <linux/blk-cgroup.h>
5efd6113 30#include "blk.h"
3e252066 31
84c124da
DS
32#define MAX_KEY_LEN 100
33
838f13bf
TH
34/*
35 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
36 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
37 * policy [un]register operations including cgroup file additions /
38 * removals. Putting cgroup file registration outside blkcg_pol_mutex
39 * allows grabbing it from cgroup callbacks.
40 */
41static DEFINE_MUTEX(blkcg_pol_register_mutex);
bc0d6501 42static DEFINE_MUTEX(blkcg_pol_mutex);
923adde1 43
e48453c3 44struct blkcg blkcg_root;
3c798398 45EXPORT_SYMBOL_GPL(blkcg_root);
9d6a986c 46
496d5e75
TH
47struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
48
3c798398 49static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
035d10b2 50
7876f930
TH
51static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
52
a2b1693b 53static bool blkcg_policy_enabled(struct request_queue *q,
3c798398 54 const struct blkcg_policy *pol)
a2b1693b
TH
55{
56 return pol && test_bit(pol->plid, q->blkcg_pols);
57}
58
0381411e
TH
59/**
60 * blkg_free - free a blkg
61 * @blkg: blkg to free
62 *
63 * Free @blkg which may be partially allocated.
64 */
3c798398 65static void blkg_free(struct blkcg_gq *blkg)
0381411e 66{
e8989fae 67 int i;
549d3aa8
TH
68
69 if (!blkg)
70 return;
71
db613670 72 for (i = 0; i < BLKCG_MAX_POLS; i++)
001bea73
TH
73 if (blkg->pd[i])
74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
e8989fae 75
994b7832
TH
76 if (blkg->blkcg != &blkcg_root)
77 blk_exit_rl(&blkg->rl);
77ea7338
TH
78
79 blkg_rwstat_exit(&blkg->stat_ios);
80 blkg_rwstat_exit(&blkg->stat_bytes);
549d3aa8 81 kfree(blkg);
0381411e
TH
82}
83
84/**
85 * blkg_alloc - allocate a blkg
86 * @blkcg: block cgroup the new blkg is associated with
87 * @q: request_queue the new blkg is associated with
15974993 88 * @gfp_mask: allocation mask to use
0381411e 89 *
e8989fae 90 * Allocate a new blkg assocating @blkcg and @q.
0381411e 91 */
15974993
TH
92static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
93 gfp_t gfp_mask)
0381411e 94{
3c798398 95 struct blkcg_gq *blkg;
e8989fae 96 int i;
0381411e
TH
97
98 /* alloc and init base part */
15974993 99 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
0381411e
TH
100 if (!blkg)
101 return NULL;
102
77ea7338
TH
103 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
104 blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
105 goto err_free;
106
c875f4d0 107 blkg->q = q;
e8989fae 108 INIT_LIST_HEAD(&blkg->q_node);
0381411e 109 blkg->blkcg = blkcg;
a5049a8a 110 atomic_set(&blkg->refcnt, 1);
0381411e 111
a051661c
TH
112 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
113 if (blkcg != &blkcg_root) {
114 if (blk_init_rl(&blkg->rl, q, gfp_mask))
115 goto err_free;
116 blkg->rl.blkg = blkg;
117 }
118
8bd435b3 119 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 120 struct blkcg_policy *pol = blkcg_policy[i];
e8989fae 121 struct blkg_policy_data *pd;
0381411e 122
a2b1693b 123 if (!blkcg_policy_enabled(q, pol))
e8989fae
TH
124 continue;
125
126 /* alloc per-policy data and attach it to blkg */
001bea73 127 pd = pol->pd_alloc_fn(gfp_mask, q->node);
a051661c
TH
128 if (!pd)
129 goto err_free;
549d3aa8 130
e8989fae
TH
131 blkg->pd[i] = pd;
132 pd->blkg = blkg;
b276a876 133 pd->plid = i;
e8989fae
TH
134 }
135
0381411e 136 return blkg;
a051661c
TH
137
138err_free:
139 blkg_free(blkg);
140 return NULL;
0381411e
TH
141}
142
24f29046
TH
143struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
144 struct request_queue *q, bool update_hint)
80fd9979 145{
3c798398 146 struct blkcg_gq *blkg;
80fd9979 147
a637120e 148 /*
86cde6b6
TH
149 * Hint didn't match. Look up from the radix tree. Note that the
150 * hint can only be updated under queue_lock as otherwise @blkg
151 * could have already been removed from blkg_tree. The caller is
152 * responsible for grabbing queue_lock if @update_hint.
a637120e
TH
153 */
154 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
86cde6b6
TH
155 if (blkg && blkg->q == q) {
156 if (update_hint) {
157 lockdep_assert_held(q->queue_lock);
158 rcu_assign_pointer(blkcg->blkg_hint, blkg);
159 }
a637120e 160 return blkg;
86cde6b6 161 }
a637120e 162
80fd9979
TH
163 return NULL;
164}
ae118896 165EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
80fd9979 166
15974993 167/*
d708f0d5
JA
168 * If @new_blkg is %NULL, this function tries to allocate a new one as
169 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
15974993 170 */
86cde6b6 171static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
d708f0d5
JA
172 struct request_queue *q,
173 struct blkcg_gq *new_blkg)
5624a4e4 174{
d708f0d5 175 struct blkcg_gq *blkg;
ce7acfea 176 struct bdi_writeback_congested *wb_congested;
f427d909 177 int i, ret;
5624a4e4 178
cd1604fa
TH
179 WARN_ON_ONCE(!rcu_read_lock_held());
180 lockdep_assert_held(q->queue_lock);
181
7ee9c562 182 /* blkg holds a reference to blkcg */
ec903c0c 183 if (!css_tryget_online(&blkcg->css)) {
20386ce0 184 ret = -ENODEV;
93e6d5d8 185 goto err_free_blkg;
15974993 186 }
cd1604fa 187
dc3b17cc 188 wb_congested = wb_congested_get_create(q->backing_dev_info,
d708f0d5
JA
189 blkcg->css.id,
190 GFP_NOWAIT | __GFP_NOWARN);
191 if (!wb_congested) {
ce7acfea 192 ret = -ENOMEM;
d708f0d5 193 goto err_put_css;
ce7acfea
TH
194 }
195
d708f0d5
JA
196 /* allocate */
197 if (!new_blkg) {
198 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
199 if (unlikely(!new_blkg)) {
200 ret = -ENOMEM;
201 goto err_put_congested;
15974993
TH
202 }
203 }
d708f0d5
JA
204 blkg = new_blkg;
205 blkg->wb_congested = wb_congested;
cd1604fa 206
db613670 207 /* link parent */
3c547865
TH
208 if (blkcg_parent(blkcg)) {
209 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
210 if (WARN_ON_ONCE(!blkg->parent)) {
20386ce0 211 ret = -ENODEV;
d708f0d5 212 goto err_put_congested;
3c547865
TH
213 }
214 blkg_get(blkg->parent);
215 }
216
db613670
TH
217 /* invoke per-policy init */
218 for (i = 0; i < BLKCG_MAX_POLS; i++) {
219 struct blkcg_policy *pol = blkcg_policy[i];
220
221 if (blkg->pd[i] && pol->pd_init_fn)
a9520cd6 222 pol->pd_init_fn(blkg->pd[i]);
db613670
TH
223 }
224
225 /* insert */
cd1604fa 226 spin_lock(&blkcg->lock);
a637120e
TH
227 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
228 if (likely(!ret)) {
229 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
230 list_add(&blkg->q_node, &q->blkg_list);
f427d909
TH
231
232 for (i = 0; i < BLKCG_MAX_POLS; i++) {
233 struct blkcg_policy *pol = blkcg_policy[i];
234
235 if (blkg->pd[i] && pol->pd_online_fn)
a9520cd6 236 pol->pd_online_fn(blkg->pd[i]);
f427d909 237 }
a637120e 238 }
f427d909 239 blkg->online = true;
cd1604fa 240 spin_unlock(&blkcg->lock);
496fb780 241
ec13b1d6 242 if (!ret)
a637120e 243 return blkg;
15974993 244
3c547865
TH
245 /* @blkg failed fully initialized, use the usual release path */
246 blkg_put(blkg);
247 return ERR_PTR(ret);
248
d708f0d5
JA
249err_put_congested:
250 wb_congested_put(wb_congested);
251err_put_css:
496fb780 252 css_put(&blkcg->css);
93e6d5d8 253err_free_blkg:
d708f0d5 254 blkg_free(new_blkg);
93e6d5d8 255 return ERR_PTR(ret);
31e4c28d 256}
3c96cb32 257
86cde6b6 258/**
d708f0d5 259 * blkg_lookup_create - lookup blkg, try to create one if not there
86cde6b6
TH
260 * @blkcg: blkcg of interest
261 * @q: request_queue of interest
262 *
263 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
3c547865
TH
264 * create one. blkg creation is performed recursively from blkcg_root such
265 * that all non-root blkg's have access to the parent blkg. This function
266 * should be called under RCU read lock and @q->queue_lock.
86cde6b6
TH
267 *
268 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
269 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
270 * dead and bypassing, returns ERR_PTR(-EBUSY).
271 */
d708f0d5
JA
272struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
273 struct request_queue *q)
3c96cb32 274{
86cde6b6
TH
275 struct blkcg_gq *blkg;
276
277 WARN_ON_ONCE(!rcu_read_lock_held());
278 lockdep_assert_held(q->queue_lock);
279
d708f0d5
JA
280 /*
281 * This could be the first entry point of blkcg implementation and
282 * we shouldn't allow anything to go through for a bypassing queue.
283 */
284 if (unlikely(blk_queue_bypass(q)))
285 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
286
86cde6b6
TH
287 blkg = __blkg_lookup(blkcg, q, true);
288 if (blkg)
289 return blkg;
290
3c547865
TH
291 /*
292 * Create blkgs walking down from blkcg_root to @blkcg, so that all
293 * non-root blkgs have access to their parents.
294 */
295 while (true) {
296 struct blkcg *pos = blkcg;
297 struct blkcg *parent = blkcg_parent(blkcg);
298
299 while (parent && !__blkg_lookup(parent, q, false)) {
300 pos = parent;
301 parent = blkcg_parent(parent);
302 }
303
d708f0d5 304 blkg = blkg_create(pos, q, NULL);
3c547865
TH
305 if (pos == blkcg || IS_ERR(blkg))
306 return blkg;
307 }
3c96cb32 308}
31e4c28d 309
3c798398 310static void blkg_destroy(struct blkcg_gq *blkg)
03aa264a 311{
3c798398 312 struct blkcg *blkcg = blkg->blkcg;
77ea7338 313 struct blkcg_gq *parent = blkg->parent;
f427d909 314 int i;
03aa264a 315
27e1f9d1 316 lockdep_assert_held(blkg->q->queue_lock);
9f13ef67 317 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
318
319 /* Something wrong if we are trying to remove same group twice */
e8989fae 320 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 321 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
a637120e 322
f427d909
TH
323 for (i = 0; i < BLKCG_MAX_POLS; i++) {
324 struct blkcg_policy *pol = blkcg_policy[i];
325
326 if (blkg->pd[i] && pol->pd_offline_fn)
a9520cd6 327 pol->pd_offline_fn(blkg->pd[i]);
f427d909 328 }
77ea7338
TH
329
330 if (parent) {
331 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
332 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
333 }
334
f427d909
TH
335 blkg->online = false;
336
a637120e 337 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
e8989fae 338 list_del_init(&blkg->q_node);
9f13ef67 339 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 340
a637120e
TH
341 /*
342 * Both setting lookup hint to and clearing it from @blkg are done
343 * under queue_lock. If it's not pointing to @blkg now, it never
344 * will. Hint assignment itself can race safely.
345 */
ec6c676a 346 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
a637120e
TH
347 rcu_assign_pointer(blkcg->blkg_hint, NULL);
348
03aa264a
TH
349 /*
350 * Put the reference taken at the time of creation so that when all
351 * queues are gone, group can be destroyed.
352 */
353 blkg_put(blkg);
354}
355
9f13ef67
TH
356/**
357 * blkg_destroy_all - destroy all blkgs associated with a request_queue
358 * @q: request_queue of interest
9f13ef67 359 *
3c96cb32 360 * Destroy all blkgs associated with @q.
9f13ef67 361 */
3c96cb32 362static void blkg_destroy_all(struct request_queue *q)
72e06c25 363{
3c798398 364 struct blkcg_gq *blkg, *n;
72e06c25 365
6d18b008 366 lockdep_assert_held(q->queue_lock);
72e06c25 367
9f13ef67 368 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
3c798398 369 struct blkcg *blkcg = blkg->blkcg;
72e06c25 370
9f13ef67
TH
371 spin_lock(&blkcg->lock);
372 blkg_destroy(blkg);
373 spin_unlock(&blkcg->lock);
72e06c25 374 }
6fe810bd
TH
375
376 q->root_blkg = NULL;
377 q->root_rl.blkg = NULL;
72e06c25
TH
378}
379
2a4fd070
TH
380/*
381 * A group is RCU protected, but having an rcu lock does not mean that one
382 * can access all the fields of blkg and assume these are valid. For
383 * example, don't try to follow throtl_data and request queue links.
384 *
385 * Having a reference to blkg under an rcu allows accesses to only values
386 * local to groups like group stats and group rate limits.
387 */
388void __blkg_release_rcu(struct rcu_head *rcu_head)
1adaf3dd 389{
2a4fd070 390 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
db613670 391
3c547865 392 /* release the blkcg and parent blkg refs this blkg has been holding */
1adaf3dd 393 css_put(&blkg->blkcg->css);
a5049a8a 394 if (blkg->parent)
3c547865 395 blkg_put(blkg->parent);
1adaf3dd 396
ce7acfea
TH
397 wb_congested_put(blkg->wb_congested);
398
2a4fd070 399 blkg_free(blkg);
1adaf3dd 400}
2a4fd070 401EXPORT_SYMBOL_GPL(__blkg_release_rcu);
1adaf3dd 402
a051661c
TH
403/*
404 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
405 * because the root blkg uses @q->root_rl instead of its own rl.
406 */
407struct request_list *__blk_queue_next_rl(struct request_list *rl,
408 struct request_queue *q)
409{
410 struct list_head *ent;
411 struct blkcg_gq *blkg;
412
413 /*
414 * Determine the current blkg list_head. The first entry is
415 * root_rl which is off @q->blkg_list and mapped to the head.
416 */
417 if (rl == &q->root_rl) {
418 ent = &q->blkg_list;
65c77fd9
JN
419 /* There are no more block groups, hence no request lists */
420 if (list_empty(ent))
421 return NULL;
a051661c
TH
422 } else {
423 blkg = container_of(rl, struct blkcg_gq, rl);
424 ent = &blkg->q_node;
425 }
426
427 /* walk to the next list_head, skip root blkcg */
428 ent = ent->next;
429 if (ent == &q->root_blkg->q_node)
430 ent = ent->next;
431 if (ent == &q->blkg_list)
432 return NULL;
433
434 blkg = container_of(ent, struct blkcg_gq, q_node);
435 return &blkg->rl;
436}
437
182446d0
TH
438static int blkcg_reset_stats(struct cgroup_subsys_state *css,
439 struct cftype *cftype, u64 val)
303a3acb 440{
182446d0 441 struct blkcg *blkcg = css_to_blkcg(css);
3c798398 442 struct blkcg_gq *blkg;
bc0d6501 443 int i;
303a3acb 444
838f13bf 445 mutex_lock(&blkcg_pol_mutex);
303a3acb 446 spin_lock_irq(&blkcg->lock);
997a026c
TH
447
448 /*
449 * Note that stat reset is racy - it doesn't synchronize against
450 * stat updates. This is a debug feature which shouldn't exist
451 * anyway. If you get hit by a race, retry.
452 */
b67bfe0d 453 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
77ea7338
TH
454 blkg_rwstat_reset(&blkg->stat_bytes);
455 blkg_rwstat_reset(&blkg->stat_ios);
456
8bd435b3 457 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 458 struct blkcg_policy *pol = blkcg_policy[i];
549d3aa8 459
a9520cd6
TH
460 if (blkg->pd[i] && pol->pd_reset_stats_fn)
461 pol->pd_reset_stats_fn(blkg->pd[i]);
bc0d6501 462 }
303a3acb 463 }
f0bdc8cd 464
303a3acb 465 spin_unlock_irq(&blkcg->lock);
bc0d6501 466 mutex_unlock(&blkcg_pol_mutex);
303a3acb
DS
467 return 0;
468}
469
dd165eb3 470const char *blkg_dev_name(struct blkcg_gq *blkg)
303a3acb 471{
d3d32e69 472 /* some drivers (floppy) instantiate a queue w/o disk registered */
dc3b17cc
JK
473 if (blkg->q->backing_dev_info->dev)
474 return dev_name(blkg->q->backing_dev_info->dev);
d3d32e69 475 return NULL;
303a3acb 476}
dd165eb3 477EXPORT_SYMBOL_GPL(blkg_dev_name);
303a3acb 478
d3d32e69
TH
479/**
480 * blkcg_print_blkgs - helper for printing per-blkg data
481 * @sf: seq_file to print to
482 * @blkcg: blkcg of interest
483 * @prfill: fill function to print out a blkg
484 * @pol: policy in question
485 * @data: data to be passed to @prfill
486 * @show_total: to print out sum of prfill return values or not
487 *
488 * This function invokes @prfill on each blkg of @blkcg if pd for the
489 * policy specified by @pol exists. @prfill is invoked with @sf, the
810ecfa7
TH
490 * policy data and @data and the matching queue lock held. If @show_total
491 * is %true, the sum of the return values from @prfill is printed with
492 * "Total" label at the end.
d3d32e69
TH
493 *
494 * This is to be used to construct print functions for
495 * cftype->read_seq_string method.
496 */
3c798398 497void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04af
TH
498 u64 (*prfill)(struct seq_file *,
499 struct blkg_policy_data *, int),
3c798398 500 const struct blkcg_policy *pol, int data,
ec399347 501 bool show_total)
5624a4e4 502{
3c798398 503 struct blkcg_gq *blkg;
d3d32e69 504 u64 total = 0;
5624a4e4 505
810ecfa7 506 rcu_read_lock();
ee89f812 507 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
810ecfa7 508 spin_lock_irq(blkg->q->queue_lock);
a2b1693b 509 if (blkcg_policy_enabled(blkg->q, pol))
f95a04af 510 total += prfill(sf, blkg->pd[pol->plid], data);
810ecfa7
TH
511 spin_unlock_irq(blkg->q->queue_lock);
512 }
513 rcu_read_unlock();
d3d32e69
TH
514
515 if (show_total)
516 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
517}
829fdb50 518EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
d3d32e69
TH
519
520/**
521 * __blkg_prfill_u64 - prfill helper for a single u64 value
522 * @sf: seq_file to print to
f95a04af 523 * @pd: policy private data of interest
d3d32e69
TH
524 * @v: value to print
525 *
f95a04af 526 * Print @v to @sf for the device assocaited with @pd.
d3d32e69 527 */
f95a04af 528u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
d3d32e69 529{
f95a04af 530 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
531
532 if (!dname)
533 return 0;
534
535 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
536 return v;
537}
829fdb50 538EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
d3d32e69
TH
539
540/**
541 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
542 * @sf: seq_file to print to
f95a04af 543 * @pd: policy private data of interest
d3d32e69
TH
544 * @rwstat: rwstat to print
545 *
f95a04af 546 * Print @rwstat to @sf for the device assocaited with @pd.
d3d32e69 547 */
f95a04af 548u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
829fdb50 549 const struct blkg_rwstat *rwstat)
d3d32e69
TH
550{
551 static const char *rwstr[] = {
552 [BLKG_RWSTAT_READ] = "Read",
553 [BLKG_RWSTAT_WRITE] = "Write",
554 [BLKG_RWSTAT_SYNC] = "Sync",
555 [BLKG_RWSTAT_ASYNC] = "Async",
556 };
f95a04af 557 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
558 u64 v;
559 int i;
560
561 if (!dname)
562 return 0;
563
564 for (i = 0; i < BLKG_RWSTAT_NR; i++)
565 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
24bdb8ef 566 (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
d3d32e69 567
24bdb8ef
TH
568 v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
569 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
d3d32e69
TH
570 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
571 return v;
572}
b50da39f 573EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
d3d32e69 574
5bc4afb1
TH
575/**
576 * blkg_prfill_stat - prfill callback for blkg_stat
577 * @sf: seq_file to print to
f95a04af
TH
578 * @pd: policy private data of interest
579 * @off: offset to the blkg_stat in @pd
5bc4afb1
TH
580 *
581 * prfill callback for printing a blkg_stat.
582 */
f95a04af 583u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
d3d32e69 584{
f95a04af 585 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
d3d32e69 586}
5bc4afb1 587EXPORT_SYMBOL_GPL(blkg_prfill_stat);
d3d32e69 588
5bc4afb1
TH
589/**
590 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
591 * @sf: seq_file to print to
f95a04af
TH
592 * @pd: policy private data of interest
593 * @off: offset to the blkg_rwstat in @pd
5bc4afb1
TH
594 *
595 * prfill callback for printing a blkg_rwstat.
596 */
f95a04af
TH
597u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
598 int off)
d3d32e69 599{
f95a04af 600 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
d3d32e69 601
f95a04af 602 return __blkg_prfill_rwstat(sf, pd, &rwstat);
d3d32e69 603}
5bc4afb1 604EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
d3d32e69 605
77ea7338
TH
606static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
607 struct blkg_policy_data *pd, int off)
608{
609 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
610
611 return __blkg_prfill_rwstat(sf, pd, &rwstat);
612}
613
614/**
615 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
616 * @sf: seq_file to print to
617 * @v: unused
618 *
619 * To be used as cftype->seq_show to print blkg->stat_bytes.
620 * cftype->private must be set to the blkcg_policy.
621 */
622int blkg_print_stat_bytes(struct seq_file *sf, void *v)
623{
624 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
625 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
626 offsetof(struct blkcg_gq, stat_bytes), true);
627 return 0;
628}
629EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
630
631/**
632 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
633 * @sf: seq_file to print to
634 * @v: unused
635 *
636 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
637 * must be set to the blkcg_policy.
638 */
639int blkg_print_stat_ios(struct seq_file *sf, void *v)
640{
641 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
642 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
643 offsetof(struct blkcg_gq, stat_ios), true);
644 return 0;
645}
646EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
647
648static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
649 struct blkg_policy_data *pd,
650 int off)
651{
652 struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
653 NULL, off);
654 return __blkg_prfill_rwstat(sf, pd, &rwstat);
655}
656
657/**
658 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
659 * @sf: seq_file to print to
660 * @v: unused
661 */
662int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
663{
664 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
665 blkg_prfill_rwstat_field_recursive,
666 (void *)seq_cft(sf)->private,
667 offsetof(struct blkcg_gq, stat_bytes), true);
668 return 0;
669}
670EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
671
672/**
673 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
674 * @sf: seq_file to print to
675 * @v: unused
676 */
677int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
678{
679 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
680 blkg_prfill_rwstat_field_recursive,
681 (void *)seq_cft(sf)->private,
682 offsetof(struct blkcg_gq, stat_ios), true);
683 return 0;
684}
685EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
686
16b3de66
TH
687/**
688 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
f12c74ca
TH
689 * @blkg: blkg of interest
690 * @pol: blkcg_policy which contains the blkg_stat
691 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
16b3de66 692 *
f12c74ca
TH
693 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
694 * online descendants and their aux counts. The caller must be holding the
695 * queue lock for online tests.
696 *
697 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
698 * at @off bytes into @blkg's blkg_policy_data of the policy.
16b3de66 699 */
f12c74ca
TH
700u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
701 struct blkcg_policy *pol, int off)
16b3de66 702{
16b3de66 703 struct blkcg_gq *pos_blkg;
492eb21b 704 struct cgroup_subsys_state *pos_css;
bd8815a6 705 u64 sum = 0;
16b3de66 706
f12c74ca 707 lockdep_assert_held(blkg->q->queue_lock);
16b3de66 708
16b3de66 709 rcu_read_lock();
f12c74ca
TH
710 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
711 struct blkg_stat *stat;
712
713 if (!pos_blkg->online)
714 continue;
16b3de66 715
f12c74ca
TH
716 if (pol)
717 stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
718 else
719 stat = (void *)blkg + off;
720
721 sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
16b3de66
TH
722 }
723 rcu_read_unlock();
724
725 return sum;
726}
727EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
728
729/**
730 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
f12c74ca
TH
731 * @blkg: blkg of interest
732 * @pol: blkcg_policy which contains the blkg_rwstat
733 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
16b3de66 734 *
f12c74ca
TH
735 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
736 * online descendants and their aux counts. The caller must be holding the
737 * queue lock for online tests.
738 *
739 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
740 * is at @off bytes into @blkg's blkg_policy_data of the policy.
16b3de66 741 */
f12c74ca
TH
742struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
743 struct blkcg_policy *pol, int off)
16b3de66 744{
16b3de66 745 struct blkcg_gq *pos_blkg;
492eb21b 746 struct cgroup_subsys_state *pos_css;
bd8815a6 747 struct blkg_rwstat sum = { };
16b3de66
TH
748 int i;
749
f12c74ca 750 lockdep_assert_held(blkg->q->queue_lock);
16b3de66 751
16b3de66 752 rcu_read_lock();
f12c74ca 753 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
3a7faead 754 struct blkg_rwstat *rwstat;
16b3de66
TH
755
756 if (!pos_blkg->online)
757 continue;
758
f12c74ca
TH
759 if (pol)
760 rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
761 else
762 rwstat = (void *)pos_blkg + off;
763
16b3de66 764 for (i = 0; i < BLKG_RWSTAT_NR; i++)
3a7faead
TH
765 atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
766 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
767 &sum.aux_cnt[i]);
16b3de66
TH
768 }
769 rcu_read_unlock();
770
771 return sum;
772}
773EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
774
3a8b31d3
TH
775/**
776 * blkg_conf_prep - parse and prepare for per-blkg config update
777 * @blkcg: target block cgroup
da8b0662 778 * @pol: target policy
3a8b31d3
TH
779 * @input: input string
780 * @ctx: blkg_conf_ctx to be filled
781 *
782 * Parse per-blkg config update from @input and initialize @ctx with the
36aa9e5f
TH
783 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
784 * part of @input following MAJ:MIN. This function returns with RCU read
785 * lock and queue lock held and must be paired with blkg_conf_finish().
3a8b31d3 786 */
3c798398 787int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
36aa9e5f 788 char *input, struct blkg_conf_ctx *ctx)
da8b0662 789 __acquires(rcu) __acquires(disk->queue->queue_lock)
34d0f179 790{
3a8b31d3 791 struct gendisk *disk;
3c798398 792 struct blkcg_gq *blkg;
39a169b6 793 struct module *owner;
726fa694 794 unsigned int major, minor;
36aa9e5f
TH
795 int key_len, part, ret;
796 char *body;
34d0f179 797
36aa9e5f 798 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
726fa694 799 return -EINVAL;
3a8b31d3 800
36aa9e5f
TH
801 body = input + key_len;
802 if (!isspace(*body))
803 return -EINVAL;
804 body = skip_spaces(body);
805
726fa694 806 disk = get_gendisk(MKDEV(major, minor), &part);
5f6c2d2b 807 if (!disk)
20386ce0 808 return -ENODEV;
5f6c2d2b 809 if (part) {
39a169b6 810 owner = disk->fops->owner;
5f6c2d2b 811 put_disk(disk);
39a169b6 812 module_put(owner);
20386ce0 813 return -ENODEV;
5f6c2d2b 814 }
e56da7e2
TH
815
816 rcu_read_lock();
4bfd482e 817 spin_lock_irq(disk->queue->queue_lock);
da8b0662 818
a2b1693b 819 if (blkcg_policy_enabled(disk->queue, pol))
d708f0d5 820 blkg = blkg_lookup_create(blkcg, disk->queue);
a2b1693b 821 else
20386ce0 822 blkg = ERR_PTR(-EOPNOTSUPP);
e56da7e2 823
4bfd482e
TH
824 if (IS_ERR(blkg)) {
825 ret = PTR_ERR(blkg);
3a8b31d3 826 rcu_read_unlock();
da8b0662 827 spin_unlock_irq(disk->queue->queue_lock);
39a169b6 828 owner = disk->fops->owner;
3a8b31d3 829 put_disk(disk);
39a169b6 830 module_put(owner);
3a8b31d3
TH
831 /*
832 * If queue was bypassing, we should retry. Do so after a
833 * short msleep(). It isn't strictly necessary but queue
834 * can be bypassing for some time and it's always nice to
835 * avoid busy looping.
836 */
837 if (ret == -EBUSY) {
838 msleep(10);
839 ret = restart_syscall();
7702e8f4 840 }
726fa694 841 return ret;
062a644d 842 }
3a8b31d3
TH
843
844 ctx->disk = disk;
845 ctx->blkg = blkg;
36aa9e5f 846 ctx->body = body;
726fa694 847 return 0;
34d0f179 848}
829fdb50 849EXPORT_SYMBOL_GPL(blkg_conf_prep);
34d0f179 850
3a8b31d3
TH
851/**
852 * blkg_conf_finish - finish up per-blkg config update
853 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
854 *
855 * Finish up after per-blkg config update. This function must be paired
856 * with blkg_conf_prep().
857 */
829fdb50 858void blkg_conf_finish(struct blkg_conf_ctx *ctx)
da8b0662 859 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
34d0f179 860{
39a169b6
RP
861 struct module *owner;
862
da8b0662 863 spin_unlock_irq(ctx->disk->queue->queue_lock);
3a8b31d3 864 rcu_read_unlock();
39a169b6 865 owner = ctx->disk->fops->owner;
3a8b31d3 866 put_disk(ctx->disk);
39a169b6 867 module_put(owner);
34d0f179 868}
829fdb50 869EXPORT_SYMBOL_GPL(blkg_conf_finish);
34d0f179 870
2ee867dc
TH
871static int blkcg_print_stat(struct seq_file *sf, void *v)
872{
873 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
874 struct blkcg_gq *blkg;
875
876 rcu_read_lock();
877
878 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
879 const char *dname;
880 struct blkg_rwstat rwstat;
881 u64 rbytes, wbytes, rios, wios;
882
883 dname = blkg_dev_name(blkg);
884 if (!dname)
885 continue;
886
887 spin_lock_irq(blkg->q->queue_lock);
888
889 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
890 offsetof(struct blkcg_gq, stat_bytes));
891 rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
892 wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
893
894 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
895 offsetof(struct blkcg_gq, stat_ios));
896 rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
897 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
898
899 spin_unlock_irq(blkg->q->queue_lock);
900
901 if (rbytes || wbytes || rios || wios)
902 seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
903 dname, rbytes, wbytes, rios, wios);
904 }
905
906 rcu_read_unlock();
907 return 0;
908}
909
e1f3b941 910static struct cftype blkcg_files[] = {
2ee867dc
TH
911 {
912 .name = "stat",
ca0752c5 913 .flags = CFTYPE_NOT_ON_ROOT,
2ee867dc
TH
914 .seq_show = blkcg_print_stat,
915 },
916 { } /* terminate */
917};
918
e1f3b941 919static struct cftype blkcg_legacy_files[] = {
84c124da
DS
920 {
921 .name = "reset_stats",
3c798398 922 .write_u64 = blkcg_reset_stats,
22084190 923 },
4baf6e33 924 { } /* terminate */
31e4c28d
VG
925};
926
9f13ef67 927/**
92fb9748 928 * blkcg_css_offline - cgroup css_offline callback
eb95419b 929 * @css: css of interest
9f13ef67 930 *
eb95419b
TH
931 * This function is called when @css is about to go away and responsible
932 * for shooting down all blkgs associated with @css. blkgs should be
9f13ef67
TH
933 * removed while holding both q and blkcg locks. As blkcg lock is nested
934 * inside q lock, this function performs reverse double lock dancing.
935 *
936 * This is the blkcg counterpart of ioc_release_fn().
937 */
eb95419b 938static void blkcg_css_offline(struct cgroup_subsys_state *css)
31e4c28d 939{
eb95419b 940 struct blkcg *blkcg = css_to_blkcg(css);
b1c35769 941
9f13ef67 942 spin_lock_irq(&blkcg->lock);
7ee9c562 943
9f13ef67 944 while (!hlist_empty(&blkcg->blkg_list)) {
3c798398
TH
945 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
946 struct blkcg_gq, blkcg_node);
c875f4d0 947 struct request_queue *q = blkg->q;
b1c35769 948
9f13ef67
TH
949 if (spin_trylock(q->queue_lock)) {
950 blkg_destroy(blkg);
951 spin_unlock(q->queue_lock);
952 } else {
953 spin_unlock_irq(&blkcg->lock);
9f13ef67 954 cpu_relax();
a5567932 955 spin_lock_irq(&blkcg->lock);
0f3942a3 956 }
9f13ef67 957 }
b1c35769 958
9f13ef67 959 spin_unlock_irq(&blkcg->lock);
52ebea74
TH
960
961 wb_blkcg_offline(blkcg);
7ee9c562
TH
962}
963
eb95419b 964static void blkcg_css_free(struct cgroup_subsys_state *css)
7ee9c562 965{
eb95419b 966 struct blkcg *blkcg = css_to_blkcg(css);
bc915e61 967 int i;
7ee9c562 968
7876f930 969 mutex_lock(&blkcg_pol_mutex);
e4a9bde9 970
7876f930 971 list_del(&blkcg->all_blkcgs_node);
7876f930 972
bc915e61 973 for (i = 0; i < BLKCG_MAX_POLS; i++)
e4a9bde9
TH
974 if (blkcg->cpd[i])
975 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
976
977 mutex_unlock(&blkcg_pol_mutex);
978
bc915e61 979 kfree(blkcg);
31e4c28d
VG
980}
981
eb95419b
TH
982static struct cgroup_subsys_state *
983blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
31e4c28d 984{
3c798398 985 struct blkcg *blkcg;
e48453c3
AA
986 struct cgroup_subsys_state *ret;
987 int i;
31e4c28d 988
7876f930
TH
989 mutex_lock(&blkcg_pol_mutex);
990
eb95419b 991 if (!parent_css) {
3c798398 992 blkcg = &blkcg_root;
bc915e61
TH
993 } else {
994 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
995 if (!blkcg) {
996 ret = ERR_PTR(-ENOMEM);
997 goto free_blkcg;
998 }
e48453c3
AA
999 }
1000
1001 for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1002 struct blkcg_policy *pol = blkcg_policy[i];
1003 struct blkcg_policy_data *cpd;
1004
1005 /*
1006 * If the policy hasn't been attached yet, wait for it
1007 * to be attached before doing anything else. Otherwise,
1008 * check if the policy requires any specific per-cgroup
1009 * data: if it does, allocate and initialize it.
1010 */
e4a9bde9 1011 if (!pol || !pol->cpd_alloc_fn)
e48453c3
AA
1012 continue;
1013
e4a9bde9 1014 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
e48453c3
AA
1015 if (!cpd) {
1016 ret = ERR_PTR(-ENOMEM);
1017 goto free_pd_blkcg;
1018 }
81437648
TH
1019 blkcg->cpd[i] = cpd;
1020 cpd->blkcg = blkcg;
e48453c3 1021 cpd->plid = i;
e4a9bde9
TH
1022 if (pol->cpd_init_fn)
1023 pol->cpd_init_fn(cpd);
e48453c3 1024 }
31e4c28d 1025
31e4c28d 1026 spin_lock_init(&blkcg->lock);
e00f4f4d 1027 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
31e4c28d 1028 INIT_HLIST_HEAD(&blkcg->blkg_list);
52ebea74
TH
1029#ifdef CONFIG_CGROUP_WRITEBACK
1030 INIT_LIST_HEAD(&blkcg->cgwb_list);
1031#endif
7876f930
TH
1032 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1033
1034 mutex_unlock(&blkcg_pol_mutex);
31e4c28d 1035 return &blkcg->css;
e48453c3
AA
1036
1037free_pd_blkcg:
1038 for (i--; i >= 0; i--)
e4a9bde9
TH
1039 if (blkcg->cpd[i])
1040 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
e48453c3
AA
1041free_blkcg:
1042 kfree(blkcg);
7876f930 1043 mutex_unlock(&blkcg_pol_mutex);
e48453c3 1044 return ret;
31e4c28d
VG
1045}
1046
5efd6113
TH
1047/**
1048 * blkcg_init_queue - initialize blkcg part of request queue
1049 * @q: request_queue to initialize
1050 *
1051 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1052 * part of new request_queue @q.
1053 *
1054 * RETURNS:
1055 * 0 on success, -errno on failure.
1056 */
1057int blkcg_init_queue(struct request_queue *q)
1058{
d708f0d5
JA
1059 struct blkcg_gq *new_blkg, *blkg;
1060 bool preloaded;
ec13b1d6
TH
1061 int ret;
1062
d708f0d5
JA
1063 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1064 if (!new_blkg)
1065 return -ENOMEM;
1066
1067 preloaded = !radix_tree_preload(GFP_KERNEL);
1068
1069 /*
1070 * Make sure the root blkg exists and count the existing blkgs. As
1071 * @q is bypassing at this point, blkg_lookup_create() can't be
1072 * used. Open code insertion.
1073 */
ec13b1d6
TH
1074 rcu_read_lock();
1075 spin_lock_irq(q->queue_lock);
d708f0d5 1076 blkg = blkg_create(&blkcg_root, q, new_blkg);
ec13b1d6
TH
1077 spin_unlock_irq(q->queue_lock);
1078 rcu_read_unlock();
1079
d708f0d5
JA
1080 if (preloaded)
1081 radix_tree_preload_end();
1082
9b54d816 1083 if (IS_ERR(blkg))
ec13b1d6 1084 return PTR_ERR(blkg);
ec13b1d6
TH
1085
1086 q->root_blkg = blkg;
1087 q->root_rl.blkg = blkg;
5efd6113 1088
ec13b1d6
TH
1089 ret = blk_throtl_init(q);
1090 if (ret) {
1091 spin_lock_irq(q->queue_lock);
1092 blkg_destroy_all(q);
1093 spin_unlock_irq(q->queue_lock);
1094 }
1095 return ret;
5efd6113
TH
1096}
1097
1098/**
1099 * blkcg_drain_queue - drain blkcg part of request_queue
1100 * @q: request_queue to drain
1101 *
1102 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1103 */
1104void blkcg_drain_queue(struct request_queue *q)
1105{
1106 lockdep_assert_held(q->queue_lock);
1107
0b462c89
TH
1108 /*
1109 * @q could be exiting and already have destroyed all blkgs as
1110 * indicated by NULL root_blkg. If so, don't confuse policies.
1111 */
1112 if (!q->root_blkg)
1113 return;
1114
5efd6113
TH
1115 blk_throtl_drain(q);
1116}
1117
1118/**
1119 * blkcg_exit_queue - exit and release blkcg part of request_queue
1120 * @q: request_queue being released
1121 *
1122 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1123 */
1124void blkcg_exit_queue(struct request_queue *q)
1125{
6d18b008 1126 spin_lock_irq(q->queue_lock);
3c96cb32 1127 blkg_destroy_all(q);
6d18b008
TH
1128 spin_unlock_irq(q->queue_lock);
1129
5efd6113
TH
1130 blk_throtl_exit(q);
1131}
1132
31e4c28d
VG
1133/*
1134 * We cannot support shared io contexts, as we have no mean to support
1135 * two tasks with the same ioc in two different groups without major rework
1136 * of the main cic data structures. For now we allow a task to change
1137 * its cgroup only if it's the only owner of its ioc.
1138 */
1f7dd3e5 1139static int blkcg_can_attach(struct cgroup_taskset *tset)
31e4c28d 1140{
bb9d97b6 1141 struct task_struct *task;
1f7dd3e5 1142 struct cgroup_subsys_state *dst_css;
31e4c28d
VG
1143 struct io_context *ioc;
1144 int ret = 0;
1145
1146 /* task_lock() is needed to avoid races with exit_io_context() */
1f7dd3e5 1147 cgroup_taskset_for_each(task, dst_css, tset) {
bb9d97b6
TH
1148 task_lock(task);
1149 ioc = task->io_context;
1150 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1151 ret = -EINVAL;
1152 task_unlock(task);
1153 if (ret)
1154 break;
1155 }
31e4c28d
VG
1156 return ret;
1157}
1158
69d7fde5
TH
1159static void blkcg_bind(struct cgroup_subsys_state *root_css)
1160{
1161 int i;
1162
1163 mutex_lock(&blkcg_pol_mutex);
1164
1165 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1166 struct blkcg_policy *pol = blkcg_policy[i];
1167 struct blkcg *blkcg;
1168
1169 if (!pol || !pol->cpd_bind_fn)
1170 continue;
1171
1172 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1173 if (blkcg->cpd[pol->plid])
1174 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1175 }
1176 mutex_unlock(&blkcg_pol_mutex);
1177}
1178
c165b3e3 1179struct cgroup_subsys io_cgrp_subsys = {
92fb9748
TH
1180 .css_alloc = blkcg_css_alloc,
1181 .css_offline = blkcg_css_offline,
1182 .css_free = blkcg_css_free,
3c798398 1183 .can_attach = blkcg_can_attach,
69d7fde5 1184 .bind = blkcg_bind,
2ee867dc 1185 .dfl_cftypes = blkcg_files,
880f50e2 1186 .legacy_cftypes = blkcg_legacy_files,
c165b3e3 1187 .legacy_name = "blkio",
1ced953b
TH
1188#ifdef CONFIG_MEMCG
1189 /*
1190 * This ensures that, if available, memcg is automatically enabled
1191 * together on the default hierarchy so that the owner cgroup can
1192 * be retrieved from writeback pages.
1193 */
1194 .depends_on = 1 << memory_cgrp_id,
1195#endif
676f7c8f 1196};
c165b3e3 1197EXPORT_SYMBOL_GPL(io_cgrp_subsys);
676f7c8f 1198
a2b1693b
TH
1199/**
1200 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1201 * @q: request_queue of interest
1202 * @pol: blkcg policy to activate
1203 *
1204 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
1205 * bypass mode to populate its blkgs with policy_data for @pol.
1206 *
1207 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1208 * from IO path. Update of each blkg is protected by both queue and blkcg
1209 * locks so that holding either lock and testing blkcg_policy_enabled() is
1210 * always enough for dereferencing policy data.
1211 *
1212 * The caller is responsible for synchronizing [de]activations and policy
1213 * [un]registerations. Returns 0 on success, -errno on failure.
1214 */
1215int blkcg_activate_policy(struct request_queue *q,
3c798398 1216 const struct blkcg_policy *pol)
a2b1693b 1217{
4c55f4f9 1218 struct blkg_policy_data *pd_prealloc = NULL;
ec13b1d6 1219 struct blkcg_gq *blkg;
4c55f4f9 1220 int ret;
a2b1693b
TH
1221
1222 if (blkcg_policy_enabled(q, pol))
1223 return 0;
1224
38dbb7dd 1225 if (q->mq_ops)
bd166ef1 1226 blk_mq_freeze_queue(q);
38dbb7dd 1227 else
bd166ef1 1228 blk_queue_bypass_start(q);
4c55f4f9
TH
1229pd_prealloc:
1230 if (!pd_prealloc) {
001bea73 1231 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
4c55f4f9 1232 if (!pd_prealloc) {
a2b1693b 1233 ret = -ENOMEM;
4c55f4f9 1234 goto out_bypass_end;
a2b1693b 1235 }
a2b1693b
TH
1236 }
1237
a2b1693b
TH
1238 spin_lock_irq(q->queue_lock);
1239
1240 list_for_each_entry(blkg, &q->blkg_list, q_node) {
4c55f4f9
TH
1241 struct blkg_policy_data *pd;
1242
1243 if (blkg->pd[pol->plid])
1244 continue;
a2b1693b 1245
e00f4f4d 1246 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
4c55f4f9
TH
1247 if (!pd)
1248 swap(pd, pd_prealloc);
1249 if (!pd) {
1250 spin_unlock_irq(q->queue_lock);
1251 goto pd_prealloc;
1252 }
a2b1693b
TH
1253
1254 blkg->pd[pol->plid] = pd;
1255 pd->blkg = blkg;
b276a876 1256 pd->plid = pol->plid;
3e418710 1257 if (pol->pd_init_fn)
a9520cd6 1258 pol->pd_init_fn(pd);
a2b1693b
TH
1259 }
1260
1261 __set_bit(pol->plid, q->blkcg_pols);
1262 ret = 0;
4c55f4f9 1263
a2b1693b 1264 spin_unlock_irq(q->queue_lock);
4c55f4f9 1265out_bypass_end:
38dbb7dd 1266 if (q->mq_ops)
bd166ef1 1267 blk_mq_unfreeze_queue(q);
38dbb7dd 1268 else
bd166ef1 1269 blk_queue_bypass_end(q);
001bea73
TH
1270 if (pd_prealloc)
1271 pol->pd_free_fn(pd_prealloc);
a2b1693b
TH
1272 return ret;
1273}
1274EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1275
1276/**
1277 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1278 * @q: request_queue of interest
1279 * @pol: blkcg policy to deactivate
1280 *
1281 * Deactivate @pol on @q. Follows the same synchronization rules as
1282 * blkcg_activate_policy().
1283 */
1284void blkcg_deactivate_policy(struct request_queue *q,
3c798398 1285 const struct blkcg_policy *pol)
a2b1693b 1286{
3c798398 1287 struct blkcg_gq *blkg;
a2b1693b
TH
1288
1289 if (!blkcg_policy_enabled(q, pol))
1290 return;
1291
38dbb7dd 1292 if (q->mq_ops)
bd166ef1 1293 blk_mq_freeze_queue(q);
38dbb7dd 1294 else
bd166ef1
JA
1295 blk_queue_bypass_start(q);
1296
a2b1693b
TH
1297 spin_lock_irq(q->queue_lock);
1298
1299 __clear_bit(pol->plid, q->blkcg_pols);
1300
1301 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1302 /* grab blkcg lock too while removing @pd from @blkg */
1303 spin_lock(&blkg->blkcg->lock);
1304
001bea73 1305 if (blkg->pd[pol->plid]) {
a9520cd6
TH
1306 if (pol->pd_offline_fn)
1307 pol->pd_offline_fn(blkg->pd[pol->plid]);
001bea73
TH
1308 pol->pd_free_fn(blkg->pd[pol->plid]);
1309 blkg->pd[pol->plid] = NULL;
1310 }
a2b1693b
TH
1311
1312 spin_unlock(&blkg->blkcg->lock);
1313 }
1314
1315 spin_unlock_irq(q->queue_lock);
bd166ef1 1316
38dbb7dd 1317 if (q->mq_ops)
bd166ef1 1318 blk_mq_unfreeze_queue(q);
38dbb7dd 1319 else
bd166ef1 1320 blk_queue_bypass_end(q);
a2b1693b
TH
1321}
1322EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1323
8bd435b3 1324/**
3c798398
TH
1325 * blkcg_policy_register - register a blkcg policy
1326 * @pol: blkcg policy to register
8bd435b3 1327 *
3c798398
TH
1328 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1329 * successful registration. Returns 0 on success and -errno on failure.
8bd435b3 1330 */
d5bf0291 1331int blkcg_policy_register(struct blkcg_policy *pol)
3e252066 1332{
06b285bd 1333 struct blkcg *blkcg;
8bd435b3 1334 int i, ret;
e8989fae 1335
838f13bf 1336 mutex_lock(&blkcg_pol_register_mutex);
bc0d6501
TH
1337 mutex_lock(&blkcg_pol_mutex);
1338
8bd435b3
TH
1339 /* find an empty slot */
1340 ret = -ENOSPC;
1341 for (i = 0; i < BLKCG_MAX_POLS; i++)
3c798398 1342 if (!blkcg_policy[i])
8bd435b3
TH
1343 break;
1344 if (i >= BLKCG_MAX_POLS)
838f13bf 1345 goto err_unlock;
035d10b2 1346
06b285bd 1347 /* register @pol */
3c798398 1348 pol->plid = i;
06b285bd
TH
1349 blkcg_policy[pol->plid] = pol;
1350
1351 /* allocate and install cpd's */
e4a9bde9 1352 if (pol->cpd_alloc_fn) {
06b285bd
TH
1353 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1354 struct blkcg_policy_data *cpd;
1355
e4a9bde9 1356 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
bbb427e3 1357 if (!cpd)
06b285bd 1358 goto err_free_cpds;
06b285bd 1359
81437648
TH
1360 blkcg->cpd[pol->plid] = cpd;
1361 cpd->blkcg = blkcg;
06b285bd 1362 cpd->plid = pol->plid;
81437648 1363 pol->cpd_init_fn(cpd);
06b285bd
TH
1364 }
1365 }
1366
838f13bf 1367 mutex_unlock(&blkcg_pol_mutex);
8bd435b3 1368
8bd435b3 1369 /* everything is in place, add intf files for the new policy */
2ee867dc
TH
1370 if (pol->dfl_cftypes)
1371 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1372 pol->dfl_cftypes));
880f50e2 1373 if (pol->legacy_cftypes)
c165b3e3 1374 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
880f50e2 1375 pol->legacy_cftypes));
838f13bf
TH
1376 mutex_unlock(&blkcg_pol_register_mutex);
1377 return 0;
1378
06b285bd 1379err_free_cpds:
e4a9bde9 1380 if (pol->cpd_alloc_fn) {
06b285bd 1381 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
e4a9bde9
TH
1382 if (blkcg->cpd[pol->plid]) {
1383 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1384 blkcg->cpd[pol->plid] = NULL;
1385 }
06b285bd
TH
1386 }
1387 }
1388 blkcg_policy[pol->plid] = NULL;
838f13bf 1389err_unlock:
bc0d6501 1390 mutex_unlock(&blkcg_pol_mutex);
838f13bf 1391 mutex_unlock(&blkcg_pol_register_mutex);
8bd435b3 1392 return ret;
3e252066 1393}
3c798398 1394EXPORT_SYMBOL_GPL(blkcg_policy_register);
3e252066 1395
8bd435b3 1396/**
3c798398
TH
1397 * blkcg_policy_unregister - unregister a blkcg policy
1398 * @pol: blkcg policy to unregister
8bd435b3 1399 *
3c798398 1400 * Undo blkcg_policy_register(@pol). Might sleep.
8bd435b3 1401 */
3c798398 1402void blkcg_policy_unregister(struct blkcg_policy *pol)
3e252066 1403{
06b285bd
TH
1404 struct blkcg *blkcg;
1405
838f13bf 1406 mutex_lock(&blkcg_pol_register_mutex);
bc0d6501 1407
3c798398 1408 if (WARN_ON(blkcg_policy[pol->plid] != pol))
8bd435b3
TH
1409 goto out_unlock;
1410
1411 /* kill the intf files first */
2ee867dc
TH
1412 if (pol->dfl_cftypes)
1413 cgroup_rm_cftypes(pol->dfl_cftypes);
880f50e2
TH
1414 if (pol->legacy_cftypes)
1415 cgroup_rm_cftypes(pol->legacy_cftypes);
44ea53de 1416
06b285bd 1417 /* remove cpds and unregister */
838f13bf 1418 mutex_lock(&blkcg_pol_mutex);
06b285bd 1419
e4a9bde9 1420 if (pol->cpd_alloc_fn) {
06b285bd 1421 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
e4a9bde9
TH
1422 if (blkcg->cpd[pol->plid]) {
1423 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1424 blkcg->cpd[pol->plid] = NULL;
1425 }
06b285bd
TH
1426 }
1427 }
3c798398 1428 blkcg_policy[pol->plid] = NULL;
06b285bd 1429
bc0d6501 1430 mutex_unlock(&blkcg_pol_mutex);
838f13bf
TH
1431out_unlock:
1432 mutex_unlock(&blkcg_pol_register_mutex);
3e252066 1433}
3c798398 1434EXPORT_SYMBOL_GPL(blkcg_policy_unregister);