]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-cgroup.c
blkcg: export conf/stat helpers to prepare for reorganization
[mirror_ubuntu-bionic-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190 14#include <linux/kdev_t.h>
9d6a986c 15#include <linux/module.h>
accee785 16#include <linux/err.h>
9195291e 17#include <linux/blkdev.h>
5a0e3ad6 18#include <linux/slab.h>
34d0f179 19#include <linux/genhd.h>
72e06c25 20#include <linux/delay.h>
9a9e8a26 21#include <linux/atomic.h>
72e06c25 22#include "blk-cgroup.h"
5efd6113 23#include "blk.h"
3e252066 24
84c124da
DS
25#define MAX_KEY_LEN 100
26
3e252066
VG
27static DEFINE_SPINLOCK(blkio_list_lock);
28static LIST_HEAD(blkio_list);
b1c35769 29
923adde1
TH
30static DEFINE_MUTEX(all_q_mutex);
31static LIST_HEAD(all_q_list);
32
1cd9e039
VG
33/* List of groups pending per cpu stats allocation */
34static DEFINE_SPINLOCK(alloc_list_lock);
35static LIST_HEAD(alloc_list);
36
37static void blkio_stat_alloc_fn(struct work_struct *);
38static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
39
31e4c28d 40struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
9d6a986c
VG
41EXPORT_SYMBOL_GPL(blkio_root_cgroup);
42
035d10b2
TH
43static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
44
31e4c28d
VG
45struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
46{
47 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
48 struct blkio_cgroup, css);
49}
9d6a986c 50EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 51
4f85cb96 52static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
70087dc3
VG
53{
54 return container_of(task_subsys_state(tsk, blkio_subsys_id),
55 struct blkio_cgroup, css);
56}
4f85cb96
TH
57
58struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
59{
60 if (bio && bio->bi_css)
61 return container_of(bio->bi_css, struct blkio_cgroup, css);
62 return task_blkio_cgroup(current);
63}
64EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
70087dc3 65
c1768268
TH
66static inline void blkio_update_group_weight(struct blkio_group *blkg,
67 int plid, unsigned int weight)
062a644d
VG
68{
69 struct blkio_policy_type *blkiop;
70
71 list_for_each_entry(blkiop, &blkio_list, list) {
72 /* If this policy does not own the blkg, do not send updates */
c1768268 73 if (blkiop->plid != plid)
062a644d
VG
74 continue;
75 if (blkiop->ops.blkio_update_group_weight_fn)
ca32aefc 76 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
fe071437 77 blkg, weight);
062a644d
VG
78 }
79}
80
c1768268 81static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
3a8b31d3 82 u64 bps, int rw)
4c9eefa1
VG
83{
84 struct blkio_policy_type *blkiop;
85
86 list_for_each_entry(blkiop, &blkio_list, list) {
87
88 /* If this policy does not own the blkg, do not send updates */
c1768268 89 if (blkiop->plid != plid)
4c9eefa1
VG
90 continue;
91
3a8b31d3 92 if (rw == READ && blkiop->ops.blkio_update_group_read_bps_fn)
ca32aefc 93 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
fe071437 94 blkg, bps);
4c9eefa1 95
3a8b31d3 96 if (rw == WRITE && blkiop->ops.blkio_update_group_write_bps_fn)
ca32aefc 97 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
fe071437 98 blkg, bps);
4c9eefa1
VG
99 }
100}
101
3a8b31d3
TH
102static inline void blkio_update_group_iops(struct blkio_group *blkg, int plid,
103 u64 iops, int rw)
7702e8f4
VG
104{
105 struct blkio_policy_type *blkiop;
106
107 list_for_each_entry(blkiop, &blkio_list, list) {
108
109 /* If this policy does not own the blkg, do not send updates */
c1768268 110 if (blkiop->plid != plid)
7702e8f4
VG
111 continue;
112
3a8b31d3 113 if (rw == READ && blkiop->ops.blkio_update_group_read_iops_fn)
ca32aefc 114 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
fe071437 115 blkg, iops);
7702e8f4 116
3a8b31d3 117 if (rw == WRITE && blkiop->ops.blkio_update_group_write_iops_fn)
ca32aefc 118 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
fe071437 119 blkg,iops);
7702e8f4
VG
120 }
121}
122
cdc1184c 123#ifdef CONFIG_DEBUG_BLK_CGROUP
edf1b879 124/* This should be called with the queue_lock held. */
812df48d 125static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
c1768268
TH
126 struct blkio_policy_type *pol,
127 struct blkio_group *curr_blkg)
812df48d 128{
c1768268 129 struct blkg_policy_data *pd = blkg->pd[pol->plid];
549d3aa8
TH
130
131 if (blkio_blkg_waiting(&pd->stats))
812df48d
DS
132 return;
133 if (blkg == curr_blkg)
134 return;
549d3aa8
TH
135 pd->stats.start_group_wait_time = sched_clock();
136 blkio_mark_blkg_waiting(&pd->stats);
812df48d
DS
137}
138
edf1b879 139/* This should be called with the queue_lock held. */
812df48d
DS
140static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
141{
142 unsigned long long now;
143
144 if (!blkio_blkg_waiting(stats))
145 return;
146
147 now = sched_clock();
148 if (time_after64(now, stats->start_group_wait_time))
edcb0722
TH
149 blkg_stat_add(&stats->group_wait_time,
150 now - stats->start_group_wait_time);
812df48d
DS
151 blkio_clear_blkg_waiting(stats);
152}
153
edf1b879 154/* This should be called with the queue_lock held. */
812df48d
DS
155static void blkio_end_empty_time(struct blkio_group_stats *stats)
156{
157 unsigned long long now;
158
159 if (!blkio_blkg_empty(stats))
160 return;
161
162 now = sched_clock();
163 if (time_after64(now, stats->start_empty_time))
edcb0722
TH
164 blkg_stat_add(&stats->empty_time,
165 now - stats->start_empty_time);
812df48d
DS
166 blkio_clear_blkg_empty(stats);
167}
168
c1768268
TH
169void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
170 struct blkio_policy_type *pol)
812df48d 171{
edf1b879 172 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
812df48d 173
edf1b879
TH
174 lockdep_assert_held(blkg->q->queue_lock);
175 BUG_ON(blkio_blkg_idling(stats));
176
177 stats->start_idle_time = sched_clock();
178 blkio_mark_blkg_idling(stats);
812df48d
DS
179}
180EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
181
c1768268
TH
182void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
183 struct blkio_policy_type *pol)
812df48d 184{
edf1b879
TH
185 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
186
187 lockdep_assert_held(blkg->q->queue_lock);
812df48d 188
812df48d 189 if (blkio_blkg_idling(stats)) {
edf1b879
TH
190 unsigned long long now = sched_clock();
191
edcb0722
TH
192 if (time_after64(now, stats->start_idle_time))
193 blkg_stat_add(&stats->idle_time,
194 now - stats->start_idle_time);
812df48d
DS
195 blkio_clear_blkg_idling(stats);
196 }
812df48d
DS
197}
198EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
199
c1768268
TH
200void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
201 struct blkio_policy_type *pol)
cdc1184c 202{
edf1b879 203 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
cdc1184c 204
edf1b879
TH
205 lockdep_assert_held(blkg->q->queue_lock);
206
edcb0722
TH
207 blkg_stat_add(&stats->avg_queue_size_sum,
208 blkg_rwstat_sum(&stats->queued));
209 blkg_stat_add(&stats->avg_queue_size_samples, 1);
812df48d 210 blkio_update_group_wait_time(stats);
cdc1184c 211}
a11cdaa7
DS
212EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
213
c1768268
TH
214void blkiocg_set_start_empty_time(struct blkio_group *blkg,
215 struct blkio_policy_type *pol)
28baf442 216{
edf1b879 217 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
28baf442 218
edf1b879 219 lockdep_assert_held(blkg->q->queue_lock);
28baf442 220
edcb0722 221 if (blkg_rwstat_sum(&stats->queued))
28baf442 222 return;
28baf442
DS
223
224 /*
e5ff082e
VG
225 * group is already marked empty. This can happen if cfqq got new
226 * request in parent group and moved to this group while being added
227 * to service tree. Just ignore the event and move on.
28baf442 228 */
edf1b879 229 if (blkio_blkg_empty(stats))
e5ff082e 230 return;
e5ff082e 231
28baf442
DS
232 stats->start_empty_time = sched_clock();
233 blkio_mark_blkg_empty(stats);
28baf442
DS
234}
235EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
236
a11cdaa7 237void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
c1768268
TH
238 struct blkio_policy_type *pol,
239 unsigned long dequeue)
a11cdaa7 240{
c1768268 241 struct blkg_policy_data *pd = blkg->pd[pol->plid];
549d3aa8 242
edf1b879
TH
243 lockdep_assert_held(blkg->q->queue_lock);
244
edcb0722 245 blkg_stat_add(&pd->stats.dequeue, dequeue);
a11cdaa7
DS
246}
247EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
812df48d
DS
248#else
249static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
c1768268
TH
250 struct blkio_policy_type *pol,
251 struct blkio_group *curr_blkg) { }
252static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
cdc1184c
DS
253#endif
254
a11cdaa7 255void blkiocg_update_io_add_stats(struct blkio_group *blkg,
c1768268
TH
256 struct blkio_policy_type *pol,
257 struct blkio_group *curr_blkg, bool direction,
258 bool sync)
cdc1184c 259{
edf1b879 260 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
edcb0722 261 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
edf1b879
TH
262
263 lockdep_assert_held(blkg->q->queue_lock);
264
edcb0722 265 blkg_rwstat_add(&stats->queued, rw, 1);
edf1b879 266 blkio_end_empty_time(stats);
c1768268 267 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
cdc1184c 268}
a11cdaa7 269EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
cdc1184c 270
a11cdaa7 271void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
c1768268
TH
272 struct blkio_policy_type *pol,
273 bool direction, bool sync)
cdc1184c 274{
edf1b879 275 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
edcb0722 276 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
edf1b879
TH
277
278 lockdep_assert_held(blkg->q->queue_lock);
cdc1184c 279
edcb0722 280 blkg_rwstat_add(&stats->queued, rw, -1);
cdc1184c 281}
a11cdaa7 282EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
cdc1184c 283
c1768268
TH
284void blkiocg_update_timeslice_used(struct blkio_group *blkg,
285 struct blkio_policy_type *pol,
286 unsigned long time,
287 unsigned long unaccounted_time)
22084190 288{
edf1b879
TH
289 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
290
291 lockdep_assert_held(blkg->q->queue_lock);
303a3acb 292
edcb0722 293 blkg_stat_add(&stats->time, time);
a23e6869 294#ifdef CONFIG_DEBUG_BLK_CGROUP
edcb0722 295 blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
a23e6869 296#endif
22084190 297}
303a3acb 298EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
22084190 299
5624a4e4
VG
300/*
301 * should be called under rcu read lock or queue lock to make sure blkg pointer
302 * is valid.
303 */
84c124da 304void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
c1768268
TH
305 struct blkio_policy_type *pol,
306 uint64_t bytes, bool direction, bool sync)
9195291e 307{
edcb0722 308 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
c1768268 309 struct blkg_policy_data *pd = blkg->pd[pol->plid];
5624a4e4 310 struct blkio_group_stats_cpu *stats_cpu;
575969a0
VG
311 unsigned long flags;
312
1cd9e039
VG
313 /* If per cpu stats are not allocated yet, don't do any accounting. */
314 if (pd->stats_cpu == NULL)
315 return;
316
575969a0
VG
317 /*
318 * Disabling interrupts to provide mutual exclusion between two
319 * writes on same cpu. It probably is not needed for 64bit. Not
320 * optimizing that case yet.
321 */
322 local_irq_save(flags);
9195291e 323
549d3aa8 324 stats_cpu = this_cpu_ptr(pd->stats_cpu);
5624a4e4 325
edcb0722
TH
326 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
327 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
328 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
329
575969a0 330 local_irq_restore(flags);
9195291e 331}
84c124da 332EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
9195291e 333
84c124da 334void blkiocg_update_completion_stats(struct blkio_group *blkg,
c1768268
TH
335 struct blkio_policy_type *pol,
336 uint64_t start_time,
337 uint64_t io_start_time, bool direction,
338 bool sync)
9195291e 339{
edf1b879 340 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
9195291e 341 unsigned long long now = sched_clock();
edcb0722 342 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
9195291e 343
edf1b879
TH
344 lockdep_assert_held(blkg->q->queue_lock);
345
84c124da 346 if (time_after64(now, io_start_time))
edcb0722 347 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
84c124da 348 if (time_after64(io_start_time, start_time))
edcb0722
TH
349 blkg_rwstat_add(&stats->wait_time, rw,
350 io_start_time - start_time);
9195291e 351}
84c124da 352EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
9195291e 353
317389a7 354/* Merged stats are per cpu. */
c1768268
TH
355void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
356 struct blkio_policy_type *pol,
357 bool direction, bool sync)
812d4026 358{
edf1b879 359 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
edcb0722 360 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
edf1b879
TH
361
362 lockdep_assert_held(blkg->q->queue_lock);
812d4026 363
edcb0722 364 blkg_rwstat_add(&stats->merged, rw, 1);
812d4026
DS
365}
366EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
367
1cd9e039
VG
368/*
369 * Worker for allocating per cpu stat for blk groups. This is scheduled on
370 * the system_nrt_wq once there are some groups on the alloc_list waiting
371 * for allocation.
372 */
373static void blkio_stat_alloc_fn(struct work_struct *work)
374{
375 static void *pcpu_stats[BLKIO_NR_POLICIES];
376 struct delayed_work *dwork = to_delayed_work(work);
377 struct blkio_group *blkg;
378 int i;
379 bool empty = false;
380
381alloc_stats:
382 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
383 if (pcpu_stats[i] != NULL)
384 continue;
385
386 pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
387
388 /* Allocation failed. Try again after some time. */
389 if (pcpu_stats[i] == NULL) {
390 queue_delayed_work(system_nrt_wq, dwork,
391 msecs_to_jiffies(10));
392 return;
393 }
394 }
395
396 spin_lock_irq(&blkio_list_lock);
397 spin_lock(&alloc_list_lock);
398
399 /* cgroup got deleted or queue exited. */
400 if (!list_empty(&alloc_list)) {
401 blkg = list_first_entry(&alloc_list, struct blkio_group,
402 alloc_node);
403 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
404 struct blkg_policy_data *pd = blkg->pd[i];
405
406 if (blkio_policy[i] && pd && !pd->stats_cpu)
407 swap(pd->stats_cpu, pcpu_stats[i]);
408 }
409
410 list_del_init(&blkg->alloc_node);
411 }
412
413 empty = list_empty(&alloc_list);
414
415 spin_unlock(&alloc_list_lock);
416 spin_unlock_irq(&blkio_list_lock);
417
418 if (!empty)
419 goto alloc_stats;
420}
421
0381411e
TH
422/**
423 * blkg_free - free a blkg
424 * @blkg: blkg to free
425 *
426 * Free @blkg which may be partially allocated.
427 */
428static void blkg_free(struct blkio_group *blkg)
429{
e8989fae 430 int i;
549d3aa8
TH
431
432 if (!blkg)
433 return;
434
e8989fae
TH
435 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
436 struct blkg_policy_data *pd = blkg->pd[i];
437
438 if (pd) {
439 free_percpu(pd->stats_cpu);
440 kfree(pd);
441 }
0381411e 442 }
e8989fae 443
549d3aa8 444 kfree(blkg);
0381411e
TH
445}
446
447/**
448 * blkg_alloc - allocate a blkg
449 * @blkcg: block cgroup the new blkg is associated with
450 * @q: request_queue the new blkg is associated with
0381411e 451 *
e8989fae 452 * Allocate a new blkg assocating @blkcg and @q.
0381411e
TH
453 */
454static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
e8989fae 455 struct request_queue *q)
0381411e
TH
456{
457 struct blkio_group *blkg;
e8989fae 458 int i;
0381411e
TH
459
460 /* alloc and init base part */
461 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
462 if (!blkg)
463 return NULL;
464
c875f4d0 465 blkg->q = q;
e8989fae 466 INIT_LIST_HEAD(&blkg->q_node);
1cd9e039 467 INIT_LIST_HEAD(&blkg->alloc_node);
0381411e 468 blkg->blkcg = blkcg;
1adaf3dd 469 blkg->refcnt = 1;
0381411e
TH
470 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
471
e8989fae
TH
472 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
473 struct blkio_policy_type *pol = blkio_policy[i];
474 struct blkg_policy_data *pd;
0381411e 475
e8989fae
TH
476 if (!pol)
477 continue;
478
479 /* alloc per-policy data and attach it to blkg */
480 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
481 q->node);
482 if (!pd) {
483 blkg_free(blkg);
484 return NULL;
485 }
549d3aa8 486
e8989fae
TH
487 blkg->pd[i] = pd;
488 pd->blkg = blkg;
0381411e
TH
489 }
490
549d3aa8 491 /* invoke per-policy init */
e8989fae
TH
492 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
493 struct blkio_policy_type *pol = blkio_policy[i];
494
495 if (pol)
496 pol->ops.blkio_init_group_fn(blkg);
497 }
498
0381411e
TH
499 return blkg;
500}
501
cd1604fa
TH
502struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
503 struct request_queue *q,
cd1604fa
TH
504 bool for_root)
505 __releases(q->queue_lock) __acquires(q->queue_lock)
5624a4e4 506{
1cd9e039 507 struct blkio_group *blkg;
5624a4e4 508
cd1604fa
TH
509 WARN_ON_ONCE(!rcu_read_lock_held());
510 lockdep_assert_held(q->queue_lock);
511
512 /*
513 * This could be the first entry point of blkcg implementation and
514 * we shouldn't allow anything to go through for a bypassing queue.
515 * The following can be removed if blkg lookup is guaranteed to
516 * fail on a bypassing queue.
517 */
518 if (unlikely(blk_queue_bypass(q)) && !for_root)
519 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
520
e8989fae 521 blkg = blkg_lookup(blkcg, q);
cd1604fa
TH
522 if (blkg)
523 return blkg;
524
7ee9c562 525 /* blkg holds a reference to blkcg */
cd1604fa
TH
526 if (!css_tryget(&blkcg->css))
527 return ERR_PTR(-EINVAL);
528
529 /*
530 * Allocate and initialize.
cd1604fa 531 */
1cd9e039 532 blkg = blkg_alloc(blkcg, q);
cd1604fa
TH
533
534 /* did alloc fail? */
1cd9e039 535 if (unlikely(!blkg)) {
cd1604fa
TH
536 blkg = ERR_PTR(-ENOMEM);
537 goto out;
538 }
539
540 /* insert */
541 spin_lock(&blkcg->lock);
31e4c28d 542 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
e8989fae 543 list_add(&blkg->q_node, &q->blkg_list);
cd1604fa 544 spin_unlock(&blkcg->lock);
1cd9e039
VG
545
546 spin_lock(&alloc_list_lock);
547 list_add(&blkg->alloc_node, &alloc_list);
548 /* Queue per cpu stat allocation from worker thread. */
549 queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
550 spin_unlock(&alloc_list_lock);
cd1604fa 551out:
cd1604fa 552 return blkg;
31e4c28d 553}
cd1604fa 554EXPORT_SYMBOL_GPL(blkg_lookup_create);
31e4c28d 555
31e4c28d 556/* called under rcu_read_lock(). */
cd1604fa 557struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
e8989fae 558 struct request_queue *q)
31e4c28d
VG
559{
560 struct blkio_group *blkg;
561 struct hlist_node *n;
31e4c28d 562
ca32aefc 563 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
e8989fae 564 if (blkg->q == q)
31e4c28d 565 return blkg;
31e4c28d
VG
566 return NULL;
567}
cd1604fa 568EXPORT_SYMBOL_GPL(blkg_lookup);
31e4c28d 569
e8989fae 570static void blkg_destroy(struct blkio_group *blkg)
03aa264a
TH
571{
572 struct request_queue *q = blkg->q;
9f13ef67 573 struct blkio_cgroup *blkcg = blkg->blkcg;
03aa264a
TH
574
575 lockdep_assert_held(q->queue_lock);
9f13ef67 576 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
577
578 /* Something wrong if we are trying to remove same group twice */
e8989fae 579 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 580 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
e8989fae 581 list_del_init(&blkg->q_node);
9f13ef67 582 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 583
1cd9e039
VG
584 spin_lock(&alloc_list_lock);
585 list_del_init(&blkg->alloc_node);
586 spin_unlock(&alloc_list_lock);
587
03aa264a
TH
588 /*
589 * Put the reference taken at the time of creation so that when all
590 * queues are gone, group can be destroyed.
591 */
592 blkg_put(blkg);
593}
594
e8989fae
TH
595/*
596 * XXX: This updates blkg policy data in-place for root blkg, which is
597 * necessary across elevator switch and policy registration as root blkgs
598 * aren't shot down. This broken and racy implementation is temporary.
599 * Eventually, blkg shoot down will be replaced by proper in-place update.
600 */
601void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
602{
603 struct blkio_policy_type *pol = blkio_policy[plid];
604 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
605 struct blkg_policy_data *pd;
606
607 if (!blkg)
608 return;
609
610 kfree(blkg->pd[plid]);
611 blkg->pd[plid] = NULL;
612
613 if (!pol)
614 return;
615
616 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
617 WARN_ON_ONCE(!pd);
618
619 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
620 WARN_ON_ONCE(!pd->stats_cpu);
621
622 blkg->pd[plid] = pd;
623 pd->blkg = blkg;
624 pol->ops.blkio_init_group_fn(blkg);
625}
626EXPORT_SYMBOL_GPL(update_root_blkg_pd);
627
9f13ef67
TH
628/**
629 * blkg_destroy_all - destroy all blkgs associated with a request_queue
630 * @q: request_queue of interest
631 * @destroy_root: whether to destroy root blkg or not
632 *
633 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
634 * destroyed; otherwise, root blkg is left alone.
635 */
e8989fae 636void blkg_destroy_all(struct request_queue *q, bool destroy_root)
72e06c25 637{
03aa264a 638 struct blkio_group *blkg, *n;
72e06c25 639
9f13ef67 640 spin_lock_irq(q->queue_lock);
72e06c25 641
9f13ef67
TH
642 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
643 struct blkio_cgroup *blkcg = blkg->blkcg;
72e06c25 644
9f13ef67
TH
645 /* skip root? */
646 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
647 continue;
72e06c25 648
9f13ef67
TH
649 spin_lock(&blkcg->lock);
650 blkg_destroy(blkg);
651 spin_unlock(&blkcg->lock);
72e06c25 652 }
9f13ef67
TH
653
654 spin_unlock_irq(q->queue_lock);
72e06c25 655}
03aa264a 656EXPORT_SYMBOL_GPL(blkg_destroy_all);
72e06c25 657
1adaf3dd
TH
658static void blkg_rcu_free(struct rcu_head *rcu_head)
659{
660 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
661}
662
663void __blkg_release(struct blkio_group *blkg)
664{
665 /* release the extra blkcg reference this blkg has been holding */
666 css_put(&blkg->blkcg->css);
667
668 /*
669 * A group is freed in rcu manner. But having an rcu lock does not
670 * mean that one can access all the fields of blkg and assume these
671 * are valid. For example, don't try to follow throtl_data and
672 * request queue links.
673 *
674 * Having a reference to blkg under an rcu allows acess to only
675 * values local to groups like group stats and group rate limits
676 */
677 call_rcu(&blkg->rcu_head, blkg_rcu_free);
678}
679EXPORT_SYMBOL_GPL(__blkg_release);
680
c1768268 681static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
f0bdc8cd 682{
c1768268 683 struct blkg_policy_data *pd = blkg->pd[plid];
997a026c 684 int cpu;
1cd9e039
VG
685
686 if (pd->stats_cpu == NULL)
687 return;
997a026c
TH
688
689 for_each_possible_cpu(cpu) {
690 struct blkio_group_stats_cpu *sc =
691 per_cpu_ptr(pd->stats_cpu, cpu);
692
edcb0722
TH
693 blkg_rwstat_reset(&sc->service_bytes);
694 blkg_rwstat_reset(&sc->serviced);
695 blkg_stat_reset(&sc->sectors);
f0bdc8cd
VG
696 }
697}
698
303a3acb 699static int
84c124da 700blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb 701{
997a026c 702 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
303a3acb
DS
703 struct blkio_group *blkg;
704 struct hlist_node *n;
303a3acb 705
e8989fae 706 spin_lock(&blkio_list_lock);
303a3acb 707 spin_lock_irq(&blkcg->lock);
997a026c
TH
708
709 /*
710 * Note that stat reset is racy - it doesn't synchronize against
711 * stat updates. This is a debug feature which shouldn't exist
712 * anyway. If you get hit by a race, retry.
713 */
303a3acb 714 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
e8989fae 715 struct blkio_policy_type *pol;
549d3aa8 716
e8989fae
TH
717 list_for_each_entry(pol, &blkio_list, list) {
718 struct blkg_policy_data *pd = blkg->pd[pol->plid];
997a026c
TH
719 struct blkio_group_stats *stats = &pd->stats;
720
721 /* queued stats shouldn't be cleared */
edcb0722
TH
722 blkg_rwstat_reset(&stats->merged);
723 blkg_rwstat_reset(&stats->service_time);
724 blkg_rwstat_reset(&stats->wait_time);
725 blkg_stat_reset(&stats->time);
812df48d 726#ifdef CONFIG_DEBUG_BLK_CGROUP
edcb0722
TH
727 blkg_stat_reset(&stats->unaccounted_time);
728 blkg_stat_reset(&stats->avg_queue_size_sum);
729 blkg_stat_reset(&stats->avg_queue_size_samples);
730 blkg_stat_reset(&stats->dequeue);
731 blkg_stat_reset(&stats->group_wait_time);
732 blkg_stat_reset(&stats->idle_time);
733 blkg_stat_reset(&stats->empty_time);
812df48d 734#endif
e8989fae
TH
735 blkio_reset_stats_cpu(blkg, pol->plid);
736 }
303a3acb 737 }
f0bdc8cd 738
303a3acb 739 spin_unlock_irq(&blkcg->lock);
e8989fae 740 spin_unlock(&blkio_list_lock);
303a3acb
DS
741 return 0;
742}
743
d3d32e69 744static const char *blkg_dev_name(struct blkio_group *blkg)
303a3acb 745{
d3d32e69
TH
746 /* some drivers (floppy) instantiate a queue w/o disk registered */
747 if (blkg->q->backing_dev_info.dev)
748 return dev_name(blkg->q->backing_dev_info.dev);
749 return NULL;
303a3acb
DS
750}
751
d3d32e69
TH
752/**
753 * blkcg_print_blkgs - helper for printing per-blkg data
754 * @sf: seq_file to print to
755 * @blkcg: blkcg of interest
756 * @prfill: fill function to print out a blkg
757 * @pol: policy in question
758 * @data: data to be passed to @prfill
759 * @show_total: to print out sum of prfill return values or not
760 *
761 * This function invokes @prfill on each blkg of @blkcg if pd for the
762 * policy specified by @pol exists. @prfill is invoked with @sf, the
763 * policy data and @data. If @show_total is %true, the sum of the return
764 * values from @prfill is printed with "Total" label at the end.
765 *
766 * This is to be used to construct print functions for
767 * cftype->read_seq_string method.
768 */
829fdb50
TH
769void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
770 u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int),
771 int pol, int data, bool show_total)
5624a4e4 772{
d3d32e69
TH
773 struct blkio_group *blkg;
774 struct hlist_node *n;
775 u64 total = 0;
5624a4e4 776
d3d32e69
TH
777 spin_lock_irq(&blkcg->lock);
778 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
779 if (blkg->pd[pol])
780 total += prfill(sf, blkg->pd[pol], data);
781 spin_unlock_irq(&blkcg->lock);
782
783 if (show_total)
784 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
785}
829fdb50 786EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
d3d32e69
TH
787
788/**
789 * __blkg_prfill_u64 - prfill helper for a single u64 value
790 * @sf: seq_file to print to
791 * @pd: policy data of interest
792 * @v: value to print
793 *
794 * Print @v to @sf for the device assocaited with @pd.
795 */
829fdb50 796u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
d3d32e69
TH
797{
798 const char *dname = blkg_dev_name(pd->blkg);
799
800 if (!dname)
801 return 0;
802
803 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
804 return v;
805}
829fdb50 806EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
d3d32e69
TH
807
808/**
809 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
810 * @sf: seq_file to print to
811 * @pd: policy data of interest
812 * @rwstat: rwstat to print
813 *
814 * Print @rwstat to @sf for the device assocaited with @pd.
815 */
829fdb50
TH
816u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
817 const struct blkg_rwstat *rwstat)
d3d32e69
TH
818{
819 static const char *rwstr[] = {
820 [BLKG_RWSTAT_READ] = "Read",
821 [BLKG_RWSTAT_WRITE] = "Write",
822 [BLKG_RWSTAT_SYNC] = "Sync",
823 [BLKG_RWSTAT_ASYNC] = "Async",
824 };
825 const char *dname = blkg_dev_name(pd->blkg);
826 u64 v;
827 int i;
828
829 if (!dname)
830 return 0;
831
832 for (i = 0; i < BLKG_RWSTAT_NR; i++)
833 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
834 (unsigned long long)rwstat->cnt[i]);
835
836 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
837 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
838 return v;
839}
840
841static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
842 int off)
843{
844 return __blkg_prfill_u64(sf, pd,
845 blkg_stat_read((void *)&pd->stats + off));
846}
847
848static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
849 int off)
850{
851 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off);
852
853 return __blkg_prfill_rwstat(sf, pd, &rwstat);
854}
855
856/* print blkg_stat specified by BLKCG_STAT_PRIV() */
829fdb50
TH
857int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
858 struct seq_file *sf)
d3d32e69
TH
859{
860 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
861
862 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat,
863 BLKCG_STAT_POL(cft->private),
864 BLKCG_STAT_OFF(cft->private), false);
865 return 0;
866}
829fdb50 867EXPORT_SYMBOL_GPL(blkcg_print_stat);
d3d32e69
TH
868
869/* print blkg_rwstat specified by BLKCG_STAT_PRIV() */
829fdb50
TH
870int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
871 struct seq_file *sf)
d3d32e69
TH
872{
873 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
874
875 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat,
876 BLKCG_STAT_POL(cft->private),
877 BLKCG_STAT_OFF(cft->private), true);
878 return 0;
879}
829fdb50 880EXPORT_SYMBOL_GPL(blkcg_print_rwstat);
d3d32e69
TH
881
882static u64 blkg_prfill_cpu_stat(struct seq_file *sf,
883 struct blkg_policy_data *pd, int off)
884{
885 u64 v = 0;
886 int cpu;
1cd9e039 887
5624a4e4 888 for_each_possible_cpu(cpu) {
d3d32e69 889 struct blkio_group_stats_cpu *sc =
edcb0722 890 per_cpu_ptr(pd->stats_cpu, cpu);
edcb0722 891
d3d32e69 892 v += blkg_stat_read((void *)sc + off);
5624a4e4
VG
893 }
894
d3d32e69 895 return __blkg_prfill_u64(sf, pd, v);
5624a4e4
VG
896}
897
d3d32e69
TH
898static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
899 struct blkg_policy_data *pd, int off)
5624a4e4 900{
d3d32e69
TH
901 struct blkg_rwstat rwstat = { }, tmp;
902 int i, cpu;
903
904 for_each_possible_cpu(cpu) {
905 struct blkio_group_stats_cpu *sc =
906 per_cpu_ptr(pd->stats_cpu, cpu);
5624a4e4 907
d3d32e69
TH
908 tmp = blkg_rwstat_read((void *)sc + off);
909 for (i = 0; i < BLKG_RWSTAT_NR; i++)
910 rwstat.cnt[i] += tmp.cnt[i];
5624a4e4
VG
911 }
912
d3d32e69
TH
913 return __blkg_prfill_rwstat(sf, pd, &rwstat);
914}
5624a4e4 915
d3d32e69 916/* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
829fdb50
TH
917int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
918 struct seq_file *sf)
d3d32e69
TH
919{
920 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
921
922 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat,
923 BLKCG_STAT_POL(cft->private),
924 BLKCG_STAT_OFF(cft->private), false);
925 return 0;
5624a4e4 926}
829fdb50 927EXPORT_SYMBOL_GPL(blkcg_print_cpu_stat);
5624a4e4 928
d3d32e69 929/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
829fdb50
TH
930int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
931 struct seq_file *sf)
303a3acb 932{
d3d32e69 933 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
edcb0722 934
d3d32e69
TH
935 blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
936 BLKCG_STAT_POL(cft->private),
937 BLKCG_STAT_OFF(cft->private), true);
938 return 0;
939}
829fdb50 940EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat);
303a3acb 941
d3d32e69
TH
942#ifdef CONFIG_DEBUG_BLK_CGROUP
943static u64 blkg_prfill_avg_queue_size(struct seq_file *sf,
944 struct blkg_policy_data *pd, int off)
945{
946 u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples);
947 u64 v = 0;
c4c76a05 948
d3d32e69
TH
949 if (samples) {
950 v = blkg_stat_read(&pd->stats.avg_queue_size_sum);
951 do_div(v, samples);
edcb0722 952 }
d3d32e69
TH
953 __blkg_prfill_u64(sf, pd, v);
954 return 0;
955}
c4c76a05 956
d3d32e69
TH
957/* print avg_queue_size */
958static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
959 struct seq_file *sf)
960{
961 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
c4c76a05 962
d3d32e69
TH
963 blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size,
964 BLKIO_POLICY_PROP, 0, false);
965 return 0;
303a3acb 966}
d3d32e69 967#endif /* CONFIG_DEBUG_BLK_CGROUP */
303a3acb 968
3a8b31d3
TH
969/**
970 * blkg_conf_prep - parse and prepare for per-blkg config update
971 * @blkcg: target block cgroup
972 * @input: input string
973 * @ctx: blkg_conf_ctx to be filled
974 *
975 * Parse per-blkg config update from @input and initialize @ctx with the
976 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
977 * value. This function returns with RCU read locked and must be paired
978 * with blkg_conf_finish().
979 */
829fdb50
TH
980int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
981 struct blkg_conf_ctx *ctx)
3a8b31d3 982 __acquires(rcu)
34d0f179 983{
3a8b31d3
TH
984 struct gendisk *disk;
985 struct blkio_group *blkg;
726fa694
TH
986 unsigned int major, minor;
987 unsigned long long v;
988 int part, ret;
34d0f179 989
726fa694
TH
990 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
991 return -EINVAL;
3a8b31d3 992
726fa694 993 disk = get_gendisk(MKDEV(major, minor), &part);
4bfd482e 994 if (!disk || part)
726fa694 995 return -EINVAL;
e56da7e2
TH
996
997 rcu_read_lock();
998
4bfd482e 999 spin_lock_irq(disk->queue->queue_lock);
aaec55a0 1000 blkg = blkg_lookup_create(blkcg, disk->queue, false);
4bfd482e 1001 spin_unlock_irq(disk->queue->queue_lock);
e56da7e2 1002
4bfd482e
TH
1003 if (IS_ERR(blkg)) {
1004 ret = PTR_ERR(blkg);
3a8b31d3
TH
1005 rcu_read_unlock();
1006 put_disk(disk);
1007 /*
1008 * If queue was bypassing, we should retry. Do so after a
1009 * short msleep(). It isn't strictly necessary but queue
1010 * can be bypassing for some time and it's always nice to
1011 * avoid busy looping.
1012 */
1013 if (ret == -EBUSY) {
1014 msleep(10);
1015 ret = restart_syscall();
7702e8f4 1016 }
726fa694 1017 return ret;
062a644d 1018 }
3a8b31d3
TH
1019
1020 ctx->disk = disk;
1021 ctx->blkg = blkg;
726fa694
TH
1022 ctx->v = v;
1023 return 0;
34d0f179 1024}
829fdb50 1025EXPORT_SYMBOL_GPL(blkg_conf_prep);
34d0f179 1026
3a8b31d3
TH
1027/**
1028 * blkg_conf_finish - finish up per-blkg config update
1029 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
1030 *
1031 * Finish up after per-blkg config update. This function must be paired
1032 * with blkg_conf_prep().
1033 */
829fdb50 1034void blkg_conf_finish(struct blkg_conf_ctx *ctx)
3a8b31d3 1035 __releases(rcu)
34d0f179 1036{
3a8b31d3
TH
1037 rcu_read_unlock();
1038 put_disk(ctx->disk);
34d0f179 1039}
829fdb50 1040EXPORT_SYMBOL_GPL(blkg_conf_finish);
34d0f179 1041
c4682aec
TH
1042/* for propio conf */
1043static u64 blkg_prfill_weight_device(struct seq_file *sf,
1044 struct blkg_policy_data *pd, int off)
34d0f179 1045{
c4682aec
TH
1046 if (!pd->conf.weight)
1047 return 0;
1048 return __blkg_prfill_u64(sf, pd, pd->conf.weight);
062a644d 1049}
34d0f179 1050
c4682aec
TH
1051static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1052 struct seq_file *sf)
34d0f179 1053{
c4682aec
TH
1054 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1055 blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
1056 false);
1057 return 0;
062a644d
VG
1058}
1059
c4682aec
TH
1060static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
1061 struct seq_file *sf)
062a644d 1062{
c4682aec 1063 seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight);
062a644d
VG
1064 return 0;
1065}
1066
3a8b31d3
TH
1067static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1068 const char *buf)
1069{
1070 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1071 struct blkg_policy_data *pd;
1072 struct blkg_conf_ctx ctx;
1073 int ret;
1074
1075 ret = blkg_conf_prep(blkcg, buf, &ctx);
1076 if (ret)
1077 return ret;
1078
1079 ret = -EINVAL;
1080 pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
1081 if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
1082 ctx.v <= BLKIO_WEIGHT_MAX))) {
1083 pd->conf.weight = ctx.v;
1084 blkio_update_group_weight(ctx.blkg, BLKIO_POLICY_PROP,
1085 ctx.v ?: blkcg->weight);
1086 ret = 0;
1087 }
1088
1089 blkg_conf_finish(&ctx);
1090 return ret;
1091}
1092
627f29f4 1093static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
062a644d 1094{
627f29f4 1095 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
062a644d
VG
1096 struct blkio_group *blkg;
1097 struct hlist_node *n;
062a644d
VG
1098
1099 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1100 return -EINVAL;
1101
1102 spin_lock(&blkio_list_lock);
1103 spin_lock_irq(&blkcg->lock);
1104 blkcg->weight = (unsigned int)val;
1105
549d3aa8 1106 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
627f29f4 1107 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP];
549d3aa8 1108
627f29f4
TH
1109 if (pd && !pd->conf.weight)
1110 blkio_update_group_weight(blkg, BLKIO_POLICY_PROP,
1111 blkcg->weight);
549d3aa8 1112 }
062a644d 1113
062a644d
VG
1114 spin_unlock_irq(&blkcg->lock);
1115 spin_unlock(&blkio_list_lock);
1116 return 0;
1117}
1118
c4682aec
TH
1119/* for blk-throttle conf */
1120#ifdef CONFIG_BLK_DEV_THROTTLING
1121static u64 blkg_prfill_conf_u64(struct seq_file *sf,
1122 struct blkg_policy_data *pd, int off)
1123{
1124 u64 v = *(u64 *)((void *)&pd->conf + off);
1125
1126 if (!v)
1127 return 0;
1128 return __blkg_prfill_u64(sf, pd, v);
1129}
062a644d 1130
c4682aec
TH
1131static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1132 struct seq_file *sf)
1133{
c4682aec
TH
1134 blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
1135 blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
3a8b31d3 1136 cft->private, false);
062a644d
VG
1137 return 0;
1138}
3a8b31d3
TH
1139
1140static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1141 const char *buf, int rw,
1142 void (*update)(struct blkio_group *, int, u64, int))
1143{
1144 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1145 struct blkg_policy_data *pd;
1146 struct blkg_conf_ctx ctx;
1147 int ret;
1148
1149 ret = blkg_conf_prep(blkcg, buf, &ctx);
1150 if (ret)
1151 return ret;
1152
1153 ret = -EINVAL;
1154 pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
1155 if (pd) {
1156 *(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
1157 update(ctx.blkg, BLKIO_POLICY_THROTL, ctx.v ?: -1, rw);
1158 ret = 0;
1159 }
1160
1161 blkg_conf_finish(&ctx);
1162 return ret;
1163}
1164
1165static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
1166 const char *buf)
1167{
1168 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_bps);
1169}
1170
1171static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
1172 const char *buf)
1173{
1174 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_bps);
1175}
1176
1177static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
1178 const char *buf)
1179{
1180 return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_iops);
1181}
1182
1183static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
1184 const char *buf)
1185{
1186 return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_iops);
1187}
c4682aec 1188#endif
062a644d 1189
31e4c28d 1190struct cftype blkio_files[] = {
34d0f179
GJ
1191 {
1192 .name = "weight_device",
c4682aec 1193 .read_seq_string = blkcg_print_weight_device,
3a8b31d3 1194 .write_string = blkcg_set_weight_device,
34d0f179
GJ
1195 .max_write_len = 256,
1196 },
31e4c28d
VG
1197 {
1198 .name = "weight",
c4682aec 1199 .read_seq_string = blkcg_print_weight,
627f29f4 1200 .write_u64 = blkcg_set_weight,
31e4c28d 1201 },
22084190
VG
1202 {
1203 .name = "time",
d3d32e69
TH
1204 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1205 offsetof(struct blkio_group_stats, time)),
1206 .read_seq_string = blkcg_print_stat,
22084190
VG
1207 },
1208 {
1209 .name = "sectors",
d3d32e69
TH
1210 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1211 offsetof(struct blkio_group_stats_cpu, sectors)),
1212 .read_seq_string = blkcg_print_cpu_stat,
303a3acb
DS
1213 },
1214 {
1215 .name = "io_service_bytes",
d3d32e69
TH
1216 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1217 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1218 .read_seq_string = blkcg_print_cpu_rwstat,
303a3acb
DS
1219 },
1220 {
1221 .name = "io_serviced",
d3d32e69
TH
1222 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1223 offsetof(struct blkio_group_stats_cpu, serviced)),
1224 .read_seq_string = blkcg_print_cpu_rwstat,
303a3acb
DS
1225 },
1226 {
1227 .name = "io_service_time",
d3d32e69
TH
1228 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1229 offsetof(struct blkio_group_stats, service_time)),
1230 .read_seq_string = blkcg_print_rwstat,
303a3acb
DS
1231 },
1232 {
1233 .name = "io_wait_time",
d3d32e69
TH
1234 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1235 offsetof(struct blkio_group_stats, wait_time)),
1236 .read_seq_string = blkcg_print_rwstat,
84c124da 1237 },
812d4026
DS
1238 {
1239 .name = "io_merged",
d3d32e69
TH
1240 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1241 offsetof(struct blkio_group_stats, merged)),
1242 .read_seq_string = blkcg_print_rwstat,
812d4026 1243 },
cdc1184c
DS
1244 {
1245 .name = "io_queued",
d3d32e69
TH
1246 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1247 offsetof(struct blkio_group_stats, queued)),
1248 .read_seq_string = blkcg_print_rwstat,
cdc1184c 1249 },
84c124da
DS
1250 {
1251 .name = "reset_stats",
1252 .write_u64 = blkiocg_reset_stats,
22084190 1253 },
13f98250
VG
1254#ifdef CONFIG_BLK_DEV_THROTTLING
1255 {
1256 .name = "throttle.read_bps_device",
3a8b31d3 1257 .private = offsetof(struct blkio_group_conf, bps[READ]),
c4682aec 1258 .read_seq_string = blkcg_print_conf_u64,
3a8b31d3 1259 .write_string = blkcg_set_conf_bps_r,
13f98250
VG
1260 .max_write_len = 256,
1261 },
1262
1263 {
1264 .name = "throttle.write_bps_device",
3a8b31d3 1265 .private = offsetof(struct blkio_group_conf, bps[WRITE]),
c4682aec 1266 .read_seq_string = blkcg_print_conf_u64,
3a8b31d3 1267 .write_string = blkcg_set_conf_bps_w,
13f98250
VG
1268 .max_write_len = 256,
1269 },
1270
1271 {
1272 .name = "throttle.read_iops_device",
3a8b31d3 1273 .private = offsetof(struct blkio_group_conf, iops[READ]),
c4682aec 1274 .read_seq_string = blkcg_print_conf_u64,
3a8b31d3 1275 .write_string = blkcg_set_conf_iops_r,
13f98250
VG
1276 .max_write_len = 256,
1277 },
1278
1279 {
1280 .name = "throttle.write_iops_device",
3a8b31d3 1281 .private = offsetof(struct blkio_group_conf, iops[WRITE]),
c4682aec 1282 .read_seq_string = blkcg_print_conf_u64,
3a8b31d3 1283 .write_string = blkcg_set_conf_iops_w,
13f98250
VG
1284 .max_write_len = 256,
1285 },
1286 {
1287 .name = "throttle.io_service_bytes",
d3d32e69
TH
1288 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1289 offsetof(struct blkio_group_stats_cpu, service_bytes)),
1290 .read_seq_string = blkcg_print_cpu_rwstat,
13f98250
VG
1291 },
1292 {
1293 .name = "throttle.io_serviced",
d3d32e69
TH
1294 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
1295 offsetof(struct blkio_group_stats_cpu, serviced)),
1296 .read_seq_string = blkcg_print_cpu_rwstat,
13f98250
VG
1297 },
1298#endif /* CONFIG_BLK_DEV_THROTTLING */
1299
22084190 1300#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
1301 {
1302 .name = "avg_queue_size",
d3d32e69 1303 .read_seq_string = blkcg_print_avg_queue_size,
cdc1184c 1304 },
812df48d
DS
1305 {
1306 .name = "group_wait_time",
d3d32e69
TH
1307 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1308 offsetof(struct blkio_group_stats, group_wait_time)),
1309 .read_seq_string = blkcg_print_stat,
812df48d
DS
1310 },
1311 {
1312 .name = "idle_time",
d3d32e69
TH
1313 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1314 offsetof(struct blkio_group_stats, idle_time)),
1315 .read_seq_string = blkcg_print_stat,
812df48d
DS
1316 },
1317 {
1318 .name = "empty_time",
d3d32e69
TH
1319 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1320 offsetof(struct blkio_group_stats, empty_time)),
1321 .read_seq_string = blkcg_print_stat,
812df48d 1322 },
cdc1184c 1323 {
22084190 1324 .name = "dequeue",
d3d32e69
TH
1325 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1326 offsetof(struct blkio_group_stats, dequeue)),
1327 .read_seq_string = blkcg_print_stat,
cdc1184c 1328 },
9026e521
JT
1329 {
1330 .name = "unaccounted_time",
d3d32e69
TH
1331 .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
1332 offsetof(struct blkio_group_stats, unaccounted_time)),
1333 .read_seq_string = blkcg_print_stat,
9026e521 1334 },
22084190 1335#endif
4baf6e33 1336 { } /* terminate */
31e4c28d
VG
1337};
1338
9f13ef67
TH
1339/**
1340 * blkiocg_pre_destroy - cgroup pre_destroy callback
9f13ef67
TH
1341 * @cgroup: cgroup of interest
1342 *
1343 * This function is called when @cgroup is about to go away and responsible
1344 * for shooting down all blkgs associated with @cgroup. blkgs should be
1345 * removed while holding both q and blkcg locks. As blkcg lock is nested
1346 * inside q lock, this function performs reverse double lock dancing.
1347 *
1348 * This is the blkcg counterpart of ioc_release_fn().
1349 */
959d851c 1350static int blkiocg_pre_destroy(struct cgroup *cgroup)
31e4c28d
VG
1351{
1352 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769 1353
9f13ef67 1354 spin_lock_irq(&blkcg->lock);
7ee9c562 1355
9f13ef67
TH
1356 while (!hlist_empty(&blkcg->blkg_list)) {
1357 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
1358 struct blkio_group, blkcg_node);
c875f4d0 1359 struct request_queue *q = blkg->q;
b1c35769 1360
9f13ef67
TH
1361 if (spin_trylock(q->queue_lock)) {
1362 blkg_destroy(blkg);
1363 spin_unlock(q->queue_lock);
1364 } else {
1365 spin_unlock_irq(&blkcg->lock);
9f13ef67 1366 cpu_relax();
a5567932 1367 spin_lock_irq(&blkcg->lock);
0f3942a3 1368 }
9f13ef67 1369 }
b1c35769 1370
9f13ef67 1371 spin_unlock_irq(&blkcg->lock);
7ee9c562
TH
1372 return 0;
1373}
1374
959d851c 1375static void blkiocg_destroy(struct cgroup *cgroup)
7ee9c562
TH
1376{
1377 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1378
67523c48
BB
1379 if (blkcg != &blkio_root_cgroup)
1380 kfree(blkcg);
31e4c28d
VG
1381}
1382
761b3ef5 1383static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
31e4c28d 1384{
9a9e8a26 1385 static atomic64_t id_seq = ATOMIC64_INIT(0);
0341509f
LZ
1386 struct blkio_cgroup *blkcg;
1387 struct cgroup *parent = cgroup->parent;
31e4c28d 1388
0341509f 1389 if (!parent) {
31e4c28d
VG
1390 blkcg = &blkio_root_cgroup;
1391 goto done;
1392 }
1393
31e4c28d
VG
1394 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1395 if (!blkcg)
1396 return ERR_PTR(-ENOMEM);
1397
1398 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
9a9e8a26 1399 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
31e4c28d
VG
1400done:
1401 spin_lock_init(&blkcg->lock);
1402 INIT_HLIST_HEAD(&blkcg->blkg_list);
1403
1404 return &blkcg->css;
1405}
1406
5efd6113
TH
1407/**
1408 * blkcg_init_queue - initialize blkcg part of request queue
1409 * @q: request_queue to initialize
1410 *
1411 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1412 * part of new request_queue @q.
1413 *
1414 * RETURNS:
1415 * 0 on success, -errno on failure.
1416 */
1417int blkcg_init_queue(struct request_queue *q)
1418{
923adde1
TH
1419 int ret;
1420
5efd6113
TH
1421 might_sleep();
1422
923adde1
TH
1423 ret = blk_throtl_init(q);
1424 if (ret)
1425 return ret;
1426
1427 mutex_lock(&all_q_mutex);
1428 INIT_LIST_HEAD(&q->all_q_node);
1429 list_add_tail(&q->all_q_node, &all_q_list);
1430 mutex_unlock(&all_q_mutex);
1431
1432 return 0;
5efd6113
TH
1433}
1434
1435/**
1436 * blkcg_drain_queue - drain blkcg part of request_queue
1437 * @q: request_queue to drain
1438 *
1439 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1440 */
1441void blkcg_drain_queue(struct request_queue *q)
1442{
1443 lockdep_assert_held(q->queue_lock);
1444
1445 blk_throtl_drain(q);
1446}
1447
1448/**
1449 * blkcg_exit_queue - exit and release blkcg part of request_queue
1450 * @q: request_queue being released
1451 *
1452 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1453 */
1454void blkcg_exit_queue(struct request_queue *q)
1455{
923adde1
TH
1456 mutex_lock(&all_q_mutex);
1457 list_del_init(&q->all_q_node);
1458 mutex_unlock(&all_q_mutex);
1459
e8989fae
TH
1460 blkg_destroy_all(q, true);
1461
5efd6113
TH
1462 blk_throtl_exit(q);
1463}
1464
31e4c28d
VG
1465/*
1466 * We cannot support shared io contexts, as we have no mean to support
1467 * two tasks with the same ioc in two different groups without major rework
1468 * of the main cic data structures. For now we allow a task to change
1469 * its cgroup only if it's the only owner of its ioc.
1470 */
761b3ef5 1471static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
31e4c28d 1472{
bb9d97b6 1473 struct task_struct *task;
31e4c28d
VG
1474 struct io_context *ioc;
1475 int ret = 0;
1476
1477 /* task_lock() is needed to avoid races with exit_io_context() */
bb9d97b6
TH
1478 cgroup_taskset_for_each(task, cgrp, tset) {
1479 task_lock(task);
1480 ioc = task->io_context;
1481 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1482 ret = -EINVAL;
1483 task_unlock(task);
1484 if (ret)
1485 break;
1486 }
31e4c28d
VG
1487 return ret;
1488}
1489
923adde1
TH
1490static void blkcg_bypass_start(void)
1491 __acquires(&all_q_mutex)
1492{
1493 struct request_queue *q;
1494
1495 mutex_lock(&all_q_mutex);
1496
1497 list_for_each_entry(q, &all_q_list, all_q_node) {
1498 blk_queue_bypass_start(q);
e8989fae 1499 blkg_destroy_all(q, false);
923adde1
TH
1500 }
1501}
1502
1503static void blkcg_bypass_end(void)
1504 __releases(&all_q_mutex)
1505{
1506 struct request_queue *q;
1507
1508 list_for_each_entry(q, &all_q_list, all_q_node)
1509 blk_queue_bypass_end(q);
1510
1511 mutex_unlock(&all_q_mutex);
1512}
1513
676f7c8f
TH
1514struct cgroup_subsys blkio_subsys = {
1515 .name = "blkio",
1516 .create = blkiocg_create,
1517 .can_attach = blkiocg_can_attach,
959d851c 1518 .pre_destroy = blkiocg_pre_destroy,
676f7c8f 1519 .destroy = blkiocg_destroy,
676f7c8f 1520 .subsys_id = blkio_subsys_id,
4baf6e33 1521 .base_cftypes = blkio_files,
676f7c8f
TH
1522 .module = THIS_MODULE,
1523};
1524EXPORT_SYMBOL_GPL(blkio_subsys);
1525
3e252066
VG
1526void blkio_policy_register(struct blkio_policy_type *blkiop)
1527{
e8989fae
TH
1528 struct request_queue *q;
1529
923adde1 1530 blkcg_bypass_start();
3e252066 1531 spin_lock(&blkio_list_lock);
035d10b2
TH
1532
1533 BUG_ON(blkio_policy[blkiop->plid]);
1534 blkio_policy[blkiop->plid] = blkiop;
3e252066 1535 list_add_tail(&blkiop->list, &blkio_list);
035d10b2 1536
3e252066 1537 spin_unlock(&blkio_list_lock);
e8989fae
TH
1538 list_for_each_entry(q, &all_q_list, all_q_node)
1539 update_root_blkg_pd(q, blkiop->plid);
923adde1 1540 blkcg_bypass_end();
3e252066
VG
1541}
1542EXPORT_SYMBOL_GPL(blkio_policy_register);
1543
1544void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1545{
e8989fae
TH
1546 struct request_queue *q;
1547
923adde1 1548 blkcg_bypass_start();
3e252066 1549 spin_lock(&blkio_list_lock);
035d10b2
TH
1550
1551 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1552 blkio_policy[blkiop->plid] = NULL;
3e252066 1553 list_del_init(&blkiop->list);
035d10b2 1554
3e252066 1555 spin_unlock(&blkio_list_lock);
e8989fae
TH
1556 list_for_each_entry(q, &all_q_list, all_q_node)
1557 update_root_blkg_pd(q, blkiop->plid);
923adde1 1558 blkcg_bypass_end();
3e252066
VG
1559}
1560EXPORT_SYMBOL_GPL(blkio_policy_unregister);