]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-cgroup.c
blkcg: BLKIO_STAT_CPU_SECTORS doesn't have subcounters
[mirror_ubuntu-bionic-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190
VG
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
9d6a986c 16#include <linux/module.h>
accee785 17#include <linux/err.h>
9195291e 18#include <linux/blkdev.h>
5a0e3ad6 19#include <linux/slab.h>
34d0f179 20#include <linux/genhd.h>
72e06c25 21#include <linux/delay.h>
9a9e8a26 22#include <linux/atomic.h>
72e06c25 23#include "blk-cgroup.h"
5efd6113 24#include "blk.h"
3e252066 25
84c124da
DS
26#define MAX_KEY_LEN 100
27
3e252066
VG
28static DEFINE_SPINLOCK(blkio_list_lock);
29static LIST_HEAD(blkio_list);
b1c35769 30
923adde1
TH
31static DEFINE_MUTEX(all_q_mutex);
32static LIST_HEAD(all_q_list);
33
1cd9e039
VG
34/* List of groups pending per cpu stats allocation */
35static DEFINE_SPINLOCK(alloc_list_lock);
36static LIST_HEAD(alloc_list);
37
38static void blkio_stat_alloc_fn(struct work_struct *);
39static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
40
31e4c28d 41struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
9d6a986c
VG
42EXPORT_SYMBOL_GPL(blkio_root_cgroup);
43
035d10b2
TH
44static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
45
062a644d
VG
46/* for encoding cft->private value on file */
47#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
48/* What policy owns the file, proportional or throttle */
49#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
50#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
51
31e4c28d
VG
52struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
53{
54 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
55 struct blkio_cgroup, css);
56}
9d6a986c 57EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 58
4f85cb96 59static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
70087dc3
VG
60{
61 return container_of(task_subsys_state(tsk, blkio_subsys_id),
62 struct blkio_cgroup, css);
63}
4f85cb96
TH
64
65struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
66{
67 if (bio && bio->bi_css)
68 return container_of(bio->bi_css, struct blkio_cgroup, css);
69 return task_blkio_cgroup(current);
70}
71EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
70087dc3 72
c1768268
TH
73static inline void blkio_update_group_weight(struct blkio_group *blkg,
74 int plid, unsigned int weight)
062a644d
VG
75{
76 struct blkio_policy_type *blkiop;
77
78 list_for_each_entry(blkiop, &blkio_list, list) {
79 /* If this policy does not own the blkg, do not send updates */
c1768268 80 if (blkiop->plid != plid)
062a644d
VG
81 continue;
82 if (blkiop->ops.blkio_update_group_weight_fn)
ca32aefc 83 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
fe071437 84 blkg, weight);
062a644d
VG
85 }
86}
87
c1768268
TH
88static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
89 u64 bps, int fileid)
4c9eefa1
VG
90{
91 struct blkio_policy_type *blkiop;
92
93 list_for_each_entry(blkiop, &blkio_list, list) {
94
95 /* If this policy does not own the blkg, do not send updates */
c1768268 96 if (blkiop->plid != plid)
4c9eefa1
VG
97 continue;
98
99 if (fileid == BLKIO_THROTL_read_bps_device
100 && blkiop->ops.blkio_update_group_read_bps_fn)
ca32aefc 101 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
fe071437 102 blkg, bps);
4c9eefa1
VG
103
104 if (fileid == BLKIO_THROTL_write_bps_device
105 && blkiop->ops.blkio_update_group_write_bps_fn)
ca32aefc 106 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
fe071437 107 blkg, bps);
4c9eefa1
VG
108 }
109}
110
7702e8f4 111static inline void blkio_update_group_iops(struct blkio_group *blkg,
c1768268
TH
112 int plid, unsigned int iops,
113 int fileid)
7702e8f4
VG
114{
115 struct blkio_policy_type *blkiop;
116
117 list_for_each_entry(blkiop, &blkio_list, list) {
118
119 /* If this policy does not own the blkg, do not send updates */
c1768268 120 if (blkiop->plid != plid)
7702e8f4
VG
121 continue;
122
123 if (fileid == BLKIO_THROTL_read_iops_device
124 && blkiop->ops.blkio_update_group_read_iops_fn)
ca32aefc 125 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
fe071437 126 blkg, iops);
7702e8f4
VG
127
128 if (fileid == BLKIO_THROTL_write_iops_device
129 && blkiop->ops.blkio_update_group_write_iops_fn)
ca32aefc 130 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
fe071437 131 blkg,iops);
7702e8f4
VG
132 }
133}
134
9195291e
DS
135/*
136 * Add to the appropriate stat variable depending on the request type.
edf1b879 137 * This should be called with queue_lock held.
9195291e 138 */
84c124da
DS
139static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
140 bool sync)
9195291e 141{
84c124da
DS
142 if (direction)
143 stat[BLKIO_STAT_WRITE] += add;
9195291e 144 else
84c124da
DS
145 stat[BLKIO_STAT_READ] += add;
146 if (sync)
147 stat[BLKIO_STAT_SYNC] += add;
9195291e 148 else
84c124da 149 stat[BLKIO_STAT_ASYNC] += add;
9195291e
DS
150}
151
cdc1184c
DS
152/*
153 * Decrements the appropriate stat variable if non-zero depending on the
154 * request type. Panics on value being zero.
edf1b879 155 * This should be called with the queue_lock held.
cdc1184c
DS
156 */
157static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
158{
159 if (direction) {
160 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
161 stat[BLKIO_STAT_WRITE]--;
162 } else {
163 BUG_ON(stat[BLKIO_STAT_READ] == 0);
164 stat[BLKIO_STAT_READ]--;
165 }
166 if (sync) {
167 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
168 stat[BLKIO_STAT_SYNC]--;
169 } else {
170 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
171 stat[BLKIO_STAT_ASYNC]--;
172 }
173}
174
175#ifdef CONFIG_DEBUG_BLK_CGROUP
edf1b879 176/* This should be called with the queue_lock held. */
812df48d 177static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
c1768268
TH
178 struct blkio_policy_type *pol,
179 struct blkio_group *curr_blkg)
812df48d 180{
c1768268 181 struct blkg_policy_data *pd = blkg->pd[pol->plid];
549d3aa8
TH
182
183 if (blkio_blkg_waiting(&pd->stats))
812df48d
DS
184 return;
185 if (blkg == curr_blkg)
186 return;
549d3aa8
TH
187 pd->stats.start_group_wait_time = sched_clock();
188 blkio_mark_blkg_waiting(&pd->stats);
812df48d
DS
189}
190
edf1b879 191/* This should be called with the queue_lock held. */
812df48d
DS
192static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
193{
194 unsigned long long now;
195
196 if (!blkio_blkg_waiting(stats))
197 return;
198
199 now = sched_clock();
200 if (time_after64(now, stats->start_group_wait_time))
201 stats->group_wait_time += now - stats->start_group_wait_time;
202 blkio_clear_blkg_waiting(stats);
203}
204
edf1b879 205/* This should be called with the queue_lock held. */
812df48d
DS
206static void blkio_end_empty_time(struct blkio_group_stats *stats)
207{
208 unsigned long long now;
209
210 if (!blkio_blkg_empty(stats))
211 return;
212
213 now = sched_clock();
214 if (time_after64(now, stats->start_empty_time))
215 stats->empty_time += now - stats->start_empty_time;
216 blkio_clear_blkg_empty(stats);
217}
218
c1768268
TH
219void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
220 struct blkio_policy_type *pol)
812df48d 221{
edf1b879 222 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
812df48d 223
edf1b879
TH
224 lockdep_assert_held(blkg->q->queue_lock);
225 BUG_ON(blkio_blkg_idling(stats));
226
227 stats->start_idle_time = sched_clock();
228 blkio_mark_blkg_idling(stats);
812df48d
DS
229}
230EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
231
c1768268
TH
232void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
233 struct blkio_policy_type *pol)
812df48d 234{
edf1b879
TH
235 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
236
237 lockdep_assert_held(blkg->q->queue_lock);
812df48d 238
812df48d 239 if (blkio_blkg_idling(stats)) {
edf1b879
TH
240 unsigned long long now = sched_clock();
241
242 if (time_after64(now, stats->start_idle_time)) {
243 u64_stats_update_begin(&stats->syncp);
812df48d 244 stats->idle_time += now - stats->start_idle_time;
edf1b879
TH
245 u64_stats_update_end(&stats->syncp);
246 }
812df48d
DS
247 blkio_clear_blkg_idling(stats);
248 }
812df48d
DS
249}
250EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
251
c1768268
TH
252void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
253 struct blkio_policy_type *pol)
cdc1184c 254{
edf1b879 255 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
cdc1184c 256
edf1b879
TH
257 lockdep_assert_held(blkg->q->queue_lock);
258
259 u64_stats_update_begin(&stats->syncp);
cdc1184c
DS
260 stats->avg_queue_size_sum +=
261 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
262 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
263 stats->avg_queue_size_samples++;
812df48d 264 blkio_update_group_wait_time(stats);
edf1b879 265 u64_stats_update_end(&stats->syncp);
cdc1184c 266}
a11cdaa7
DS
267EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
268
c1768268
TH
269void blkiocg_set_start_empty_time(struct blkio_group *blkg,
270 struct blkio_policy_type *pol)
28baf442 271{
edf1b879 272 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
28baf442 273
edf1b879 274 lockdep_assert_held(blkg->q->queue_lock);
28baf442
DS
275
276 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
edf1b879 277 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE])
28baf442 278 return;
28baf442
DS
279
280 /*
e5ff082e
VG
281 * group is already marked empty. This can happen if cfqq got new
282 * request in parent group and moved to this group while being added
283 * to service tree. Just ignore the event and move on.
28baf442 284 */
edf1b879 285 if (blkio_blkg_empty(stats))
e5ff082e 286 return;
e5ff082e 287
28baf442
DS
288 stats->start_empty_time = sched_clock();
289 blkio_mark_blkg_empty(stats);
28baf442
DS
290}
291EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
292
a11cdaa7 293void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
c1768268
TH
294 struct blkio_policy_type *pol,
295 unsigned long dequeue)
a11cdaa7 296{
c1768268 297 struct blkg_policy_data *pd = blkg->pd[pol->plid];
549d3aa8 298
edf1b879
TH
299 lockdep_assert_held(blkg->q->queue_lock);
300
549d3aa8 301 pd->stats.dequeue += dequeue;
a11cdaa7
DS
302}
303EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
812df48d
DS
304#else
305static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
c1768268
TH
306 struct blkio_policy_type *pol,
307 struct blkio_group *curr_blkg) { }
308static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
cdc1184c
DS
309#endif
310
a11cdaa7 311void blkiocg_update_io_add_stats(struct blkio_group *blkg,
c1768268
TH
312 struct blkio_policy_type *pol,
313 struct blkio_group *curr_blkg, bool direction,
314 bool sync)
cdc1184c 315{
edf1b879
TH
316 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
317
318 lockdep_assert_held(blkg->q->queue_lock);
319
320 u64_stats_update_begin(&stats->syncp);
321 blkio_add_stat(stats->stat_arr[BLKIO_STAT_QUEUED], 1, direction, sync);
322 blkio_end_empty_time(stats);
323 u64_stats_update_end(&stats->syncp);
cdc1184c 324
c1768268 325 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
cdc1184c 326}
a11cdaa7 327EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
cdc1184c 328
a11cdaa7 329void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
c1768268
TH
330 struct blkio_policy_type *pol,
331 bool direction, bool sync)
cdc1184c 332{
edf1b879
TH
333 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
334
335 lockdep_assert_held(blkg->q->queue_lock);
cdc1184c 336
edf1b879
TH
337 u64_stats_update_begin(&stats->syncp);
338 blkio_check_and_dec_stat(stats->stat_arr[BLKIO_STAT_QUEUED], direction,
339 sync);
340 u64_stats_update_end(&stats->syncp);
cdc1184c 341}
a11cdaa7 342EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
cdc1184c 343
c1768268
TH
344void blkiocg_update_timeslice_used(struct blkio_group *blkg,
345 struct blkio_policy_type *pol,
346 unsigned long time,
347 unsigned long unaccounted_time)
22084190 348{
edf1b879
TH
349 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
350
351 lockdep_assert_held(blkg->q->queue_lock);
303a3acb 352
edf1b879
TH
353 u64_stats_update_begin(&stats->syncp);
354 stats->time += time;
a23e6869 355#ifdef CONFIG_DEBUG_BLK_CGROUP
edf1b879 356 stats->unaccounted_time += unaccounted_time;
a23e6869 357#endif
edf1b879 358 u64_stats_update_end(&stats->syncp);
22084190 359}
303a3acb 360EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
22084190 361
5624a4e4
VG
362/*
363 * should be called under rcu read lock or queue lock to make sure blkg pointer
364 * is valid.
365 */
84c124da 366void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
c1768268
TH
367 struct blkio_policy_type *pol,
368 uint64_t bytes, bool direction, bool sync)
9195291e 369{
c1768268 370 struct blkg_policy_data *pd = blkg->pd[pol->plid];
5624a4e4 371 struct blkio_group_stats_cpu *stats_cpu;
575969a0
VG
372 unsigned long flags;
373
1cd9e039
VG
374 /* If per cpu stats are not allocated yet, don't do any accounting. */
375 if (pd->stats_cpu == NULL)
376 return;
377
575969a0
VG
378 /*
379 * Disabling interrupts to provide mutual exclusion between two
380 * writes on same cpu. It probably is not needed for 64bit. Not
381 * optimizing that case yet.
382 */
383 local_irq_save(flags);
9195291e 384
549d3aa8 385 stats_cpu = this_cpu_ptr(pd->stats_cpu);
5624a4e4 386
575969a0 387 u64_stats_update_begin(&stats_cpu->syncp);
5624a4e4
VG
388 stats_cpu->sectors += bytes >> 9;
389 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
390 1, direction, sync);
391 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
392 bytes, direction, sync);
575969a0
VG
393 u64_stats_update_end(&stats_cpu->syncp);
394 local_irq_restore(flags);
9195291e 395}
84c124da 396EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
9195291e 397
84c124da 398void blkiocg_update_completion_stats(struct blkio_group *blkg,
c1768268
TH
399 struct blkio_policy_type *pol,
400 uint64_t start_time,
401 uint64_t io_start_time, bool direction,
402 bool sync)
9195291e 403{
edf1b879 404 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
9195291e
DS
405 unsigned long long now = sched_clock();
406
edf1b879
TH
407 lockdep_assert_held(blkg->q->queue_lock);
408
409 u64_stats_update_begin(&stats->syncp);
84c124da
DS
410 if (time_after64(now, io_start_time))
411 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
412 now - io_start_time, direction, sync);
413 if (time_after64(io_start_time, start_time))
414 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
415 io_start_time - start_time, direction, sync);
edf1b879 416 u64_stats_update_end(&stats->syncp);
9195291e 417}
84c124da 418EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
9195291e 419
317389a7 420/* Merged stats are per cpu. */
c1768268
TH
421void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
422 struct blkio_policy_type *pol,
423 bool direction, bool sync)
812d4026 424{
edf1b879
TH
425 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
426
427 lockdep_assert_held(blkg->q->queue_lock);
812d4026 428
edf1b879 429 u64_stats_update_begin(&stats->syncp);
5fe224d2 430 blkio_add_stat(stats->stat_arr[BLKIO_STAT_MERGED], 1, direction, sync);
edf1b879 431 u64_stats_update_end(&stats->syncp);
812d4026
DS
432}
433EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
434
1cd9e039
VG
435/*
436 * Worker for allocating per cpu stat for blk groups. This is scheduled on
437 * the system_nrt_wq once there are some groups on the alloc_list waiting
438 * for allocation.
439 */
440static void blkio_stat_alloc_fn(struct work_struct *work)
441{
442 static void *pcpu_stats[BLKIO_NR_POLICIES];
443 struct delayed_work *dwork = to_delayed_work(work);
444 struct blkio_group *blkg;
445 int i;
446 bool empty = false;
447
448alloc_stats:
449 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
450 if (pcpu_stats[i] != NULL)
451 continue;
452
453 pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
454
455 /* Allocation failed. Try again after some time. */
456 if (pcpu_stats[i] == NULL) {
457 queue_delayed_work(system_nrt_wq, dwork,
458 msecs_to_jiffies(10));
459 return;
460 }
461 }
462
463 spin_lock_irq(&blkio_list_lock);
464 spin_lock(&alloc_list_lock);
465
466 /* cgroup got deleted or queue exited. */
467 if (!list_empty(&alloc_list)) {
468 blkg = list_first_entry(&alloc_list, struct blkio_group,
469 alloc_node);
470 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
471 struct blkg_policy_data *pd = blkg->pd[i];
472
473 if (blkio_policy[i] && pd && !pd->stats_cpu)
474 swap(pd->stats_cpu, pcpu_stats[i]);
475 }
476
477 list_del_init(&blkg->alloc_node);
478 }
479
480 empty = list_empty(&alloc_list);
481
482 spin_unlock(&alloc_list_lock);
483 spin_unlock_irq(&blkio_list_lock);
484
485 if (!empty)
486 goto alloc_stats;
487}
488
0381411e
TH
489/**
490 * blkg_free - free a blkg
491 * @blkg: blkg to free
492 *
493 * Free @blkg which may be partially allocated.
494 */
495static void blkg_free(struct blkio_group *blkg)
496{
e8989fae 497 int i;
549d3aa8
TH
498
499 if (!blkg)
500 return;
501
e8989fae
TH
502 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
503 struct blkg_policy_data *pd = blkg->pd[i];
504
505 if (pd) {
506 free_percpu(pd->stats_cpu);
507 kfree(pd);
508 }
0381411e 509 }
e8989fae 510
549d3aa8 511 kfree(blkg);
0381411e
TH
512}
513
514/**
515 * blkg_alloc - allocate a blkg
516 * @blkcg: block cgroup the new blkg is associated with
517 * @q: request_queue the new blkg is associated with
0381411e 518 *
e8989fae 519 * Allocate a new blkg assocating @blkcg and @q.
0381411e
TH
520 */
521static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
e8989fae 522 struct request_queue *q)
0381411e
TH
523{
524 struct blkio_group *blkg;
e8989fae 525 int i;
0381411e
TH
526
527 /* alloc and init base part */
528 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
529 if (!blkg)
530 return NULL;
531
c875f4d0 532 blkg->q = q;
e8989fae 533 INIT_LIST_HEAD(&blkg->q_node);
1cd9e039 534 INIT_LIST_HEAD(&blkg->alloc_node);
0381411e 535 blkg->blkcg = blkcg;
1adaf3dd 536 blkg->refcnt = 1;
0381411e
TH
537 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
538
e8989fae
TH
539 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
540 struct blkio_policy_type *pol = blkio_policy[i];
541 struct blkg_policy_data *pd;
0381411e 542
e8989fae
TH
543 if (!pol)
544 continue;
545
546 /* alloc per-policy data and attach it to blkg */
547 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
548 q->node);
549 if (!pd) {
550 blkg_free(blkg);
551 return NULL;
552 }
549d3aa8 553
e8989fae
TH
554 blkg->pd[i] = pd;
555 pd->blkg = blkg;
0381411e
TH
556 }
557
549d3aa8 558 /* invoke per-policy init */
e8989fae
TH
559 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
560 struct blkio_policy_type *pol = blkio_policy[i];
561
562 if (pol)
563 pol->ops.blkio_init_group_fn(blkg);
564 }
565
0381411e
TH
566 return blkg;
567}
568
cd1604fa
TH
569struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
570 struct request_queue *q,
cd1604fa
TH
571 bool for_root)
572 __releases(q->queue_lock) __acquires(q->queue_lock)
5624a4e4 573{
1cd9e039 574 struct blkio_group *blkg;
5624a4e4 575
cd1604fa
TH
576 WARN_ON_ONCE(!rcu_read_lock_held());
577 lockdep_assert_held(q->queue_lock);
578
579 /*
580 * This could be the first entry point of blkcg implementation and
581 * we shouldn't allow anything to go through for a bypassing queue.
582 * The following can be removed if blkg lookup is guaranteed to
583 * fail on a bypassing queue.
584 */
585 if (unlikely(blk_queue_bypass(q)) && !for_root)
586 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
587
e8989fae 588 blkg = blkg_lookup(blkcg, q);
cd1604fa
TH
589 if (blkg)
590 return blkg;
591
7ee9c562 592 /* blkg holds a reference to blkcg */
cd1604fa
TH
593 if (!css_tryget(&blkcg->css))
594 return ERR_PTR(-EINVAL);
595
596 /*
597 * Allocate and initialize.
cd1604fa 598 */
1cd9e039 599 blkg = blkg_alloc(blkcg, q);
cd1604fa
TH
600
601 /* did alloc fail? */
1cd9e039 602 if (unlikely(!blkg)) {
cd1604fa
TH
603 blkg = ERR_PTR(-ENOMEM);
604 goto out;
605 }
606
607 /* insert */
608 spin_lock(&blkcg->lock);
31e4c28d 609 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
e8989fae 610 list_add(&blkg->q_node, &q->blkg_list);
cd1604fa 611 spin_unlock(&blkcg->lock);
1cd9e039
VG
612
613 spin_lock(&alloc_list_lock);
614 list_add(&blkg->alloc_node, &alloc_list);
615 /* Queue per cpu stat allocation from worker thread. */
616 queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
617 spin_unlock(&alloc_list_lock);
cd1604fa 618out:
cd1604fa 619 return blkg;
31e4c28d 620}
cd1604fa 621EXPORT_SYMBOL_GPL(blkg_lookup_create);
31e4c28d 622
31e4c28d 623/* called under rcu_read_lock(). */
cd1604fa 624struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
e8989fae 625 struct request_queue *q)
31e4c28d
VG
626{
627 struct blkio_group *blkg;
628 struct hlist_node *n;
31e4c28d 629
ca32aefc 630 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
e8989fae 631 if (blkg->q == q)
31e4c28d 632 return blkg;
31e4c28d
VG
633 return NULL;
634}
cd1604fa 635EXPORT_SYMBOL_GPL(blkg_lookup);
31e4c28d 636
e8989fae 637static void blkg_destroy(struct blkio_group *blkg)
03aa264a
TH
638{
639 struct request_queue *q = blkg->q;
9f13ef67 640 struct blkio_cgroup *blkcg = blkg->blkcg;
03aa264a
TH
641
642 lockdep_assert_held(q->queue_lock);
9f13ef67 643 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
644
645 /* Something wrong if we are trying to remove same group twice */
e8989fae 646 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 647 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
e8989fae 648 list_del_init(&blkg->q_node);
9f13ef67 649 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 650
1cd9e039
VG
651 spin_lock(&alloc_list_lock);
652 list_del_init(&blkg->alloc_node);
653 spin_unlock(&alloc_list_lock);
654
03aa264a
TH
655 /*
656 * Put the reference taken at the time of creation so that when all
657 * queues are gone, group can be destroyed.
658 */
659 blkg_put(blkg);
660}
661
e8989fae
TH
662/*
663 * XXX: This updates blkg policy data in-place for root blkg, which is
664 * necessary across elevator switch and policy registration as root blkgs
665 * aren't shot down. This broken and racy implementation is temporary.
666 * Eventually, blkg shoot down will be replaced by proper in-place update.
667 */
668void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
669{
670 struct blkio_policy_type *pol = blkio_policy[plid];
671 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
672 struct blkg_policy_data *pd;
673
674 if (!blkg)
675 return;
676
677 kfree(blkg->pd[plid]);
678 blkg->pd[plid] = NULL;
679
680 if (!pol)
681 return;
682
683 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
684 WARN_ON_ONCE(!pd);
685
686 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
687 WARN_ON_ONCE(!pd->stats_cpu);
688
689 blkg->pd[plid] = pd;
690 pd->blkg = blkg;
691 pol->ops.blkio_init_group_fn(blkg);
692}
693EXPORT_SYMBOL_GPL(update_root_blkg_pd);
694
9f13ef67
TH
695/**
696 * blkg_destroy_all - destroy all blkgs associated with a request_queue
697 * @q: request_queue of interest
698 * @destroy_root: whether to destroy root blkg or not
699 *
700 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
701 * destroyed; otherwise, root blkg is left alone.
702 */
e8989fae 703void blkg_destroy_all(struct request_queue *q, bool destroy_root)
72e06c25 704{
03aa264a 705 struct blkio_group *blkg, *n;
72e06c25 706
9f13ef67 707 spin_lock_irq(q->queue_lock);
72e06c25 708
9f13ef67
TH
709 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
710 struct blkio_cgroup *blkcg = blkg->blkcg;
72e06c25 711
9f13ef67
TH
712 /* skip root? */
713 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
714 continue;
72e06c25 715
9f13ef67
TH
716 spin_lock(&blkcg->lock);
717 blkg_destroy(blkg);
718 spin_unlock(&blkcg->lock);
72e06c25 719 }
9f13ef67
TH
720
721 spin_unlock_irq(q->queue_lock);
72e06c25 722}
03aa264a 723EXPORT_SYMBOL_GPL(blkg_destroy_all);
72e06c25 724
1adaf3dd
TH
725static void blkg_rcu_free(struct rcu_head *rcu_head)
726{
727 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
728}
729
730void __blkg_release(struct blkio_group *blkg)
731{
732 /* release the extra blkcg reference this blkg has been holding */
733 css_put(&blkg->blkcg->css);
734
735 /*
736 * A group is freed in rcu manner. But having an rcu lock does not
737 * mean that one can access all the fields of blkg and assume these
738 * are valid. For example, don't try to follow throtl_data and
739 * request queue links.
740 *
741 * Having a reference to blkg under an rcu allows acess to only
742 * values local to groups like group stats and group rate limits
743 */
744 call_rcu(&blkg->rcu_head, blkg_rcu_free);
745}
746EXPORT_SYMBOL_GPL(__blkg_release);
747
c1768268 748static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
f0bdc8cd 749{
c1768268 750 struct blkg_policy_data *pd = blkg->pd[plid];
997a026c 751 int cpu;
1cd9e039
VG
752
753 if (pd->stats_cpu == NULL)
754 return;
997a026c
TH
755
756 for_each_possible_cpu(cpu) {
757 struct blkio_group_stats_cpu *sc =
758 per_cpu_ptr(pd->stats_cpu, cpu);
759
760 sc->sectors = 0;
761 memset(sc->stat_arr_cpu, 0, sizeof(sc->stat_arr_cpu));
f0bdc8cd
VG
762 }
763}
764
303a3acb 765static int
84c124da 766blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb 767{
997a026c 768 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
303a3acb
DS
769 struct blkio_group *blkg;
770 struct hlist_node *n;
cdc1184c 771 int i;
303a3acb 772
e8989fae 773 spin_lock(&blkio_list_lock);
303a3acb 774 spin_lock_irq(&blkcg->lock);
997a026c
TH
775
776 /*
777 * Note that stat reset is racy - it doesn't synchronize against
778 * stat updates. This is a debug feature which shouldn't exist
779 * anyway. If you get hit by a race, retry.
780 */
303a3acb 781 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
e8989fae 782 struct blkio_policy_type *pol;
549d3aa8 783
e8989fae
TH
784 list_for_each_entry(pol, &blkio_list, list) {
785 struct blkg_policy_data *pd = blkg->pd[pol->plid];
997a026c
TH
786 struct blkio_group_stats *stats = &pd->stats;
787
788 /* queued stats shouldn't be cleared */
789 for (i = 0; i < ARRAY_SIZE(stats->stat_arr); i++)
790 if (i != BLKIO_STAT_QUEUED)
791 memset(stats->stat_arr[i], 0,
792 sizeof(stats->stat_arr[i]));
793 stats->time = 0;
812df48d 794#ifdef CONFIG_DEBUG_BLK_CGROUP
997a026c
TH
795 memset((void *)stats + BLKG_STATS_DEBUG_CLEAR_START, 0,
796 BLKG_STATS_DEBUG_CLEAR_SIZE);
812df48d 797#endif
e8989fae
TH
798 blkio_reset_stats_cpu(blkg, pol->plid);
799 }
303a3acb 800 }
f0bdc8cd 801
303a3acb 802 spin_unlock_irq(&blkcg->lock);
e8989fae 803 spin_unlock(&blkio_list_lock);
303a3acb
DS
804 return 0;
805}
806
7a4dd281
TH
807static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
808 char *str, int chars_left, bool diskname_only)
303a3acb 809{
7a4dd281 810 snprintf(str, chars_left, "%s", dname);
303a3acb
DS
811 chars_left -= strlen(str);
812 if (chars_left <= 0) {
813 printk(KERN_WARNING
814 "Possibly incorrect cgroup stat display format");
815 return;
816 }
84c124da
DS
817 if (diskname_only)
818 return;
303a3acb 819 switch (type) {
84c124da 820 case BLKIO_STAT_READ:
303a3acb
DS
821 strlcat(str, " Read", chars_left);
822 break;
84c124da 823 case BLKIO_STAT_WRITE:
303a3acb
DS
824 strlcat(str, " Write", chars_left);
825 break;
84c124da 826 case BLKIO_STAT_SYNC:
303a3acb
DS
827 strlcat(str, " Sync", chars_left);
828 break;
84c124da 829 case BLKIO_STAT_ASYNC:
303a3acb
DS
830 strlcat(str, " Async", chars_left);
831 break;
84c124da 832 case BLKIO_STAT_TOTAL:
303a3acb
DS
833 strlcat(str, " Total", chars_left);
834 break;
835 default:
836 strlcat(str, " Invalid", chars_left);
837 }
838}
839
c1768268 840static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
5624a4e4
VG
841 enum stat_type_cpu type, enum stat_sub_type sub_type)
842{
c1768268 843 struct blkg_policy_data *pd = blkg->pd[plid];
5624a4e4
VG
844 int cpu;
845 struct blkio_group_stats_cpu *stats_cpu;
575969a0 846 u64 val = 0, tval;
5624a4e4 847
1cd9e039
VG
848 if (pd->stats_cpu == NULL)
849 return val;
850
5624a4e4 851 for_each_possible_cpu(cpu) {
575969a0 852 unsigned int start;
549d3aa8 853 stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
5624a4e4 854
575969a0
VG
855 do {
856 start = u64_stats_fetch_begin(&stats_cpu->syncp);
857 if (type == BLKIO_STAT_CPU_SECTORS)
858 tval = stats_cpu->sectors;
859 else
860 tval = stats_cpu->stat_arr_cpu[type][sub_type];
861 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
862
863 val += tval;
5624a4e4
VG
864 }
865
866 return val;
867}
868
c1768268 869static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
7a4dd281
TH
870 struct cgroup_map_cb *cb, const char *dname,
871 enum stat_type_cpu type)
5624a4e4
VG
872{
873 uint64_t disk_total, val;
874 char key_str[MAX_KEY_LEN];
875 enum stat_sub_type sub_type;
876
877 if (type == BLKIO_STAT_CPU_SECTORS) {
c1768268 878 val = blkio_read_stat_cpu(blkg, plid, type, 0);
c4c76a05
TH
879 blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
880 cb->fill(cb, key_str, val);
881 return val;
5624a4e4
VG
882 }
883
884 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
885 sub_type++) {
7a4dd281
TH
886 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
887 false);
c1768268 888 val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
5624a4e4
VG
889 cb->fill(cb, key_str, val);
890 }
891
c1768268
TH
892 disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
893 blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
5624a4e4 894
7a4dd281
TH
895 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
896 false);
5624a4e4
VG
897 cb->fill(cb, key_str, disk_total);
898 return disk_total;
899}
900
c1768268 901static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
7a4dd281
TH
902 struct cgroup_map_cb *cb, const char *dname,
903 enum stat_type type)
303a3acb 904{
c4c76a05
TH
905 struct blkio_group_stats *stats = &blkg->pd[plid]->stats;
906 uint64_t v = 0, disk_total = 0;
303a3acb 907 char key_str[MAX_KEY_LEN];
edf1b879 908 unsigned int sync_start;
c4c76a05 909 int st;
84c124da 910
c4c76a05 911 if (type >= BLKIO_STAT_ARR_NR) {
edf1b879
TH
912 do {
913 sync_start = u64_stats_fetch_begin(&stats->syncp);
914 switch (type) {
915 case BLKIO_STAT_TIME:
916 v = stats->time;
917 break;
9026e521 918#ifdef CONFIG_DEBUG_BLK_CGROUP
edf1b879
TH
919 case BLKIO_STAT_UNACCOUNTED_TIME:
920 v = stats->unaccounted_time;
921 break;
922 case BLKIO_STAT_AVG_QUEUE_SIZE: {
923 uint64_t samples = stats->avg_queue_size_samples;
c4c76a05 924
edf1b879
TH
925 if (samples) {
926 v = stats->avg_queue_size_sum;
927 do_div(v, samples);
928 }
929 break;
c4c76a05 930 }
edf1b879
TH
931 case BLKIO_STAT_IDLE_TIME:
932 v = stats->idle_time;
933 break;
934 case BLKIO_STAT_EMPTY_TIME:
935 v = stats->empty_time;
936 break;
937 case BLKIO_STAT_DEQUEUE:
938 v = stats->dequeue;
939 break;
940 case BLKIO_STAT_GROUP_WAIT_TIME:
941 v = stats->group_wait_time;
942 break;
84c124da 943#endif
edf1b879
TH
944 default:
945 WARN_ON_ONCE(1);
946 }
947 } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
303a3acb 948
c4c76a05
TH
949 blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
950 cb->fill(cb, key_str, v);
951 return v;
303a3acb 952 }
c4c76a05
TH
953
954 for (st = BLKIO_STAT_READ; st < BLKIO_STAT_TOTAL; st++) {
edf1b879
TH
955 do {
956 sync_start = u64_stats_fetch_begin(&stats->syncp);
957 v = stats->stat_arr[type][st];
958 } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
c4c76a05
TH
959
960 blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false);
961 cb->fill(cb, key_str, v);
962 if (st == BLKIO_STAT_READ || st == BLKIO_STAT_WRITE)
963 disk_total += v;
964 }
965
7a4dd281
TH
966 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
967 false);
303a3acb
DS
968 cb->fill(cb, key_str, disk_total);
969 return disk_total;
970}
971
4bfd482e
TH
972static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
973 int fileid, struct blkio_cgroup *blkcg)
34d0f179 974{
ece84241 975 struct gendisk *disk = NULL;
e56da7e2 976 struct blkio_group *blkg = NULL;
549d3aa8 977 struct blkg_policy_data *pd;
34d0f179 978 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
d11bb446 979 unsigned long major, minor;
ece84241
TH
980 int i = 0, ret = -EINVAL;
981 int part;
34d0f179 982 dev_t dev;
d11bb446 983 u64 temp;
34d0f179
GJ
984
985 memset(s, 0, sizeof(s));
986
987 while ((p = strsep(&buf, " ")) != NULL) {
988 if (!*p)
989 continue;
990
991 s[i++] = p;
992
993 /* Prevent from inputing too many things */
994 if (i == 3)
995 break;
996 }
997
998 if (i != 2)
ece84241 999 goto out;
34d0f179
GJ
1000
1001 p = strsep(&s[0], ":");
1002 if (p != NULL)
1003 major_s = p;
1004 else
ece84241 1005 goto out;
34d0f179
GJ
1006
1007 minor_s = s[0];
1008 if (!minor_s)
ece84241 1009 goto out;
34d0f179 1010
ece84241
TH
1011 if (strict_strtoul(major_s, 10, &major))
1012 goto out;
34d0f179 1013
ece84241
TH
1014 if (strict_strtoul(minor_s, 10, &minor))
1015 goto out;
34d0f179
GJ
1016
1017 dev = MKDEV(major, minor);
1018
ece84241
TH
1019 if (strict_strtoull(s[1], 10, &temp))
1020 goto out;
34d0f179 1021
e56da7e2 1022 disk = get_gendisk(dev, &part);
4bfd482e 1023 if (!disk || part)
e56da7e2 1024 goto out;
e56da7e2
TH
1025
1026 rcu_read_lock();
1027
4bfd482e 1028 spin_lock_irq(disk->queue->queue_lock);
aaec55a0 1029 blkg = blkg_lookup_create(blkcg, disk->queue, false);
4bfd482e 1030 spin_unlock_irq(disk->queue->queue_lock);
e56da7e2 1031
4bfd482e
TH
1032 if (IS_ERR(blkg)) {
1033 ret = PTR_ERR(blkg);
1034 goto out_unlock;
d11bb446 1035 }
34d0f179 1036
549d3aa8
TH
1037 pd = blkg->pd[plid];
1038
062a644d
VG
1039 switch (plid) {
1040 case BLKIO_POLICY_PROP:
d11bb446
WG
1041 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
1042 temp > BLKIO_WEIGHT_MAX)
e56da7e2 1043 goto out_unlock;
34d0f179 1044
549d3aa8 1045 pd->conf.weight = temp;
c1768268 1046 blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
4c9eefa1
VG
1047 break;
1048 case BLKIO_POLICY_THROTL:
7702e8f4
VG
1049 switch(fileid) {
1050 case BLKIO_THROTL_read_bps_device:
549d3aa8 1051 pd->conf.bps[READ] = temp;
c1768268 1052 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
e56da7e2 1053 break;
7702e8f4 1054 case BLKIO_THROTL_write_bps_device:
549d3aa8 1055 pd->conf.bps[WRITE] = temp;
c1768268 1056 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
7702e8f4
VG
1057 break;
1058 case BLKIO_THROTL_read_iops_device:
e56da7e2
TH
1059 if (temp > THROTL_IOPS_MAX)
1060 goto out_unlock;
549d3aa8 1061 pd->conf.iops[READ] = temp;
c1768268 1062 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
e56da7e2 1063 break;
7702e8f4 1064 case BLKIO_THROTL_write_iops_device:
d11bb446 1065 if (temp > THROTL_IOPS_MAX)
e56da7e2 1066 goto out_unlock;
549d3aa8 1067 pd->conf.iops[WRITE] = temp;
c1768268 1068 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
7702e8f4
VG
1069 break;
1070 }
062a644d
VG
1071 break;
1072 default:
1073 BUG();
1074 }
ece84241 1075 ret = 0;
e56da7e2
TH
1076out_unlock:
1077 rcu_read_unlock();
ece84241
TH
1078out:
1079 put_disk(disk);
e56da7e2
TH
1080
1081 /*
1082 * If queue was bypassing, we should retry. Do so after a short
1083 * msleep(). It isn't strictly necessary but queue can be
1084 * bypassing for some time and it's always nice to avoid busy
1085 * looping.
1086 */
1087 if (ret == -EBUSY) {
1088 msleep(10);
1089 return restart_syscall();
1090 }
ece84241 1091 return ret;
34d0f179
GJ
1092}
1093
062a644d
VG
1094static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1095 const char *buffer)
34d0f179
GJ
1096{
1097 int ret = 0;
1098 char *buf;
e56da7e2 1099 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
062a644d
VG
1100 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1101 int fileid = BLKIOFILE_ATTR(cft->private);
34d0f179
GJ
1102
1103 buf = kstrdup(buffer, GFP_KERNEL);
1104 if (!buf)
1105 return -ENOMEM;
1106
4bfd482e 1107 ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
34d0f179
GJ
1108 kfree(buf);
1109 return ret;
1110}
1111
92616b5b
VG
1112static const char *blkg_dev_name(struct blkio_group *blkg)
1113{
1114 /* some drivers (floppy) instantiate a queue w/o disk registered */
1115 if (blkg->q->backing_dev_info.dev)
1116 return dev_name(blkg->q->backing_dev_info.dev);
1117 return NULL;
1118}
1119
4bfd482e
TH
1120static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1121 struct seq_file *m)
34d0f179 1122{
c1768268 1123 int plid = BLKIOFILE_POLICY(cft->private);
4bfd482e 1124 int fileid = BLKIOFILE_ATTR(cft->private);
c1768268
TH
1125 struct blkg_policy_data *pd = blkg->pd[plid];
1126 const char *dname = blkg_dev_name(blkg);
4bfd482e
TH
1127 int rw = WRITE;
1128
92616b5b
VG
1129 if (!dname)
1130 return;
1131
c1768268 1132 switch (plid) {
062a644d 1133 case BLKIO_POLICY_PROP:
549d3aa8 1134 if (pd->conf.weight)
7a4dd281 1135 seq_printf(m, "%s\t%u\n",
549d3aa8 1136 dname, pd->conf.weight);
4c9eefa1
VG
1137 break;
1138 case BLKIO_POLICY_THROTL:
4bfd482e 1139 switch (fileid) {
7702e8f4 1140 case BLKIO_THROTL_read_bps_device:
4bfd482e 1141 rw = READ;
7702e8f4 1142 case BLKIO_THROTL_write_bps_device:
549d3aa8 1143 if (pd->conf.bps[rw])
7a4dd281 1144 seq_printf(m, "%s\t%llu\n",
549d3aa8 1145 dname, pd->conf.bps[rw]);
7702e8f4
VG
1146 break;
1147 case BLKIO_THROTL_read_iops_device:
4bfd482e 1148 rw = READ;
7702e8f4 1149 case BLKIO_THROTL_write_iops_device:
549d3aa8 1150 if (pd->conf.iops[rw])
7a4dd281 1151 seq_printf(m, "%s\t%u\n",
549d3aa8 1152 dname, pd->conf.iops[rw]);
7702e8f4
VG
1153 break;
1154 }
062a644d
VG
1155 break;
1156 default:
1157 BUG();
1158 }
1159}
34d0f179 1160
062a644d 1161/* cgroup files which read their data from policy nodes end up here */
4bfd482e
TH
1162static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
1163 struct seq_file *m)
34d0f179 1164{
4bfd482e
TH
1165 struct blkio_group *blkg;
1166 struct hlist_node *n;
34d0f179 1167
4bfd482e
TH
1168 spin_lock_irq(&blkcg->lock);
1169 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
e8989fae 1170 blkio_print_group_conf(cft, blkg, m);
4bfd482e 1171 spin_unlock_irq(&blkcg->lock);
062a644d
VG
1172}
1173
1174static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1175 struct seq_file *m)
1176{
1177 struct blkio_cgroup *blkcg;
1178 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1179 int name = BLKIOFILE_ATTR(cft->private);
1180
1181 blkcg = cgroup_to_blkio_cgroup(cgrp);
1182
1183 switch(plid) {
1184 case BLKIO_POLICY_PROP:
1185 switch(name) {
1186 case BLKIO_PROP_weight_device:
4bfd482e 1187 blkio_read_conf(cft, blkcg, m);
062a644d
VG
1188 return 0;
1189 default:
1190 BUG();
1191 }
1192 break;
4c9eefa1
VG
1193 case BLKIO_POLICY_THROTL:
1194 switch(name){
1195 case BLKIO_THROTL_read_bps_device:
1196 case BLKIO_THROTL_write_bps_device:
7702e8f4
VG
1197 case BLKIO_THROTL_read_iops_device:
1198 case BLKIO_THROTL_write_iops_device:
4bfd482e 1199 blkio_read_conf(cft, blkcg, m);
4c9eefa1
VG
1200 return 0;
1201 default:
1202 BUG();
1203 }
1204 break;
062a644d
VG
1205 default:
1206 BUG();
1207 }
1208
1209 return 0;
1210}
1211
1212static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
5624a4e4
VG
1213 struct cftype *cft, struct cgroup_map_cb *cb,
1214 enum stat_type type, bool show_total, bool pcpu)
062a644d
VG
1215{
1216 struct blkio_group *blkg;
1217 struct hlist_node *n;
1218 uint64_t cgroup_total = 0;
1219
c875f4d0
TH
1220 spin_lock_irq(&blkcg->lock);
1221
1222 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
92616b5b 1223 const char *dname = blkg_dev_name(blkg);
c1768268 1224 int plid = BLKIOFILE_POLICY(cft->private);
7a4dd281 1225
e8989fae 1226 if (!dname)
7a4dd281 1227 continue;
edf1b879 1228 if (pcpu)
c1768268
TH
1229 cgroup_total += blkio_get_stat_cpu(blkg, plid,
1230 cb, dname, type);
edf1b879 1231 else
c1768268
TH
1232 cgroup_total += blkio_get_stat(blkg, plid,
1233 cb, dname, type);
062a644d
VG
1234 }
1235 if (show_total)
1236 cb->fill(cb, "Total", cgroup_total);
c875f4d0
TH
1237
1238 spin_unlock_irq(&blkcg->lock);
062a644d
VG
1239 return 0;
1240}
1241
1242/* All map kind of cgroup file get serviced by this function */
1243static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1244 struct cgroup_map_cb *cb)
1245{
1246 struct blkio_cgroup *blkcg;
1247 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1248 int name = BLKIOFILE_ATTR(cft->private);
1249
1250 blkcg = cgroup_to_blkio_cgroup(cgrp);
1251
1252 switch(plid) {
1253 case BLKIO_POLICY_PROP:
1254 switch(name) {
1255 case BLKIO_PROP_time:
1256 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1257 BLKIO_STAT_TIME, 0, 0);
062a644d
VG
1258 case BLKIO_PROP_sectors:
1259 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1260 BLKIO_STAT_CPU_SECTORS, 0, 1);
062a644d
VG
1261 case BLKIO_PROP_io_service_bytes:
1262 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1263 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
062a644d
VG
1264 case BLKIO_PROP_io_serviced:
1265 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1266 BLKIO_STAT_CPU_SERVICED, 1, 1);
062a644d
VG
1267 case BLKIO_PROP_io_service_time:
1268 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1269 BLKIO_STAT_SERVICE_TIME, 1, 0);
062a644d
VG
1270 case BLKIO_PROP_io_wait_time:
1271 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1272 BLKIO_STAT_WAIT_TIME, 1, 0);
062a644d
VG
1273 case BLKIO_PROP_io_merged:
1274 return blkio_read_blkg_stats(blkcg, cft, cb,
5fe224d2 1275 BLKIO_STAT_MERGED, 1, 0);
062a644d
VG
1276 case BLKIO_PROP_io_queued:
1277 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1278 BLKIO_STAT_QUEUED, 1, 0);
062a644d 1279#ifdef CONFIG_DEBUG_BLK_CGROUP
9026e521
JT
1280 case BLKIO_PROP_unaccounted_time:
1281 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1282 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
062a644d
VG
1283 case BLKIO_PROP_dequeue:
1284 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1285 BLKIO_STAT_DEQUEUE, 0, 0);
062a644d
VG
1286 case BLKIO_PROP_avg_queue_size:
1287 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1288 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
062a644d
VG
1289 case BLKIO_PROP_group_wait_time:
1290 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1291 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
062a644d
VG
1292 case BLKIO_PROP_idle_time:
1293 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1294 BLKIO_STAT_IDLE_TIME, 0, 0);
062a644d
VG
1295 case BLKIO_PROP_empty_time:
1296 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1297 BLKIO_STAT_EMPTY_TIME, 0, 0);
062a644d
VG
1298#endif
1299 default:
1300 BUG();
1301 }
1302 break;
4c9eefa1
VG
1303 case BLKIO_POLICY_THROTL:
1304 switch(name){
1305 case BLKIO_THROTL_io_service_bytes:
1306 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1307 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
4c9eefa1
VG
1308 case BLKIO_THROTL_io_serviced:
1309 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1310 BLKIO_STAT_CPU_SERVICED, 1, 1);
4c9eefa1
VG
1311 default:
1312 BUG();
1313 }
1314 break;
062a644d
VG
1315 default:
1316 BUG();
1317 }
1318
1319 return 0;
1320}
1321
4bfd482e 1322static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
062a644d
VG
1323{
1324 struct blkio_group *blkg;
1325 struct hlist_node *n;
062a644d
VG
1326
1327 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1328 return -EINVAL;
1329
1330 spin_lock(&blkio_list_lock);
1331 spin_lock_irq(&blkcg->lock);
1332 blkcg->weight = (unsigned int)val;
1333
549d3aa8 1334 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
e8989fae 1335 struct blkg_policy_data *pd = blkg->pd[plid];
549d3aa8 1336
e8989fae 1337 if (!pd->conf.weight)
c1768268 1338 blkio_update_group_weight(blkg, plid, blkcg->weight);
549d3aa8 1339 }
062a644d 1340
062a644d
VG
1341 spin_unlock_irq(&blkcg->lock);
1342 spin_unlock(&blkio_list_lock);
1343 return 0;
1344}
1345
1346static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1347 struct blkio_cgroup *blkcg;
1348 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1349 int name = BLKIOFILE_ATTR(cft->private);
1350
1351 blkcg = cgroup_to_blkio_cgroup(cgrp);
1352
1353 switch(plid) {
1354 case BLKIO_POLICY_PROP:
1355 switch(name) {
1356 case BLKIO_PROP_weight:
1357 return (u64)blkcg->weight;
1358 }
1359 break;
1360 default:
1361 BUG();
1362 }
1363 return 0;
1364}
1365
1366static int
1367blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1368{
1369 struct blkio_cgroup *blkcg;
1370 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1371 int name = BLKIOFILE_ATTR(cft->private);
1372
1373 blkcg = cgroup_to_blkio_cgroup(cgrp);
1374
1375 switch(plid) {
1376 case BLKIO_POLICY_PROP:
1377 switch(name) {
1378 case BLKIO_PROP_weight:
4bfd482e 1379 return blkio_weight_write(blkcg, plid, val);
062a644d
VG
1380 }
1381 break;
1382 default:
1383 BUG();
1384 }
34d0f179 1385
34d0f179
GJ
1386 return 0;
1387}
1388
31e4c28d 1389struct cftype blkio_files[] = {
34d0f179
GJ
1390 {
1391 .name = "weight_device",
062a644d
VG
1392 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1393 BLKIO_PROP_weight_device),
1394 .read_seq_string = blkiocg_file_read,
1395 .write_string = blkiocg_file_write,
34d0f179
GJ
1396 .max_write_len = 256,
1397 },
31e4c28d
VG
1398 {
1399 .name = "weight",
062a644d
VG
1400 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1401 BLKIO_PROP_weight),
1402 .read_u64 = blkiocg_file_read_u64,
1403 .write_u64 = blkiocg_file_write_u64,
31e4c28d 1404 },
22084190
VG
1405 {
1406 .name = "time",
062a644d
VG
1407 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1408 BLKIO_PROP_time),
1409 .read_map = blkiocg_file_read_map,
22084190
VG
1410 },
1411 {
1412 .name = "sectors",
062a644d
VG
1413 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1414 BLKIO_PROP_sectors),
1415 .read_map = blkiocg_file_read_map,
303a3acb
DS
1416 },
1417 {
1418 .name = "io_service_bytes",
062a644d
VG
1419 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1420 BLKIO_PROP_io_service_bytes),
1421 .read_map = blkiocg_file_read_map,
303a3acb
DS
1422 },
1423 {
1424 .name = "io_serviced",
062a644d
VG
1425 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1426 BLKIO_PROP_io_serviced),
1427 .read_map = blkiocg_file_read_map,
303a3acb
DS
1428 },
1429 {
1430 .name = "io_service_time",
062a644d
VG
1431 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1432 BLKIO_PROP_io_service_time),
1433 .read_map = blkiocg_file_read_map,
303a3acb
DS
1434 },
1435 {
1436 .name = "io_wait_time",
062a644d
VG
1437 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1438 BLKIO_PROP_io_wait_time),
1439 .read_map = blkiocg_file_read_map,
84c124da 1440 },
812d4026
DS
1441 {
1442 .name = "io_merged",
062a644d
VG
1443 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1444 BLKIO_PROP_io_merged),
1445 .read_map = blkiocg_file_read_map,
812d4026 1446 },
cdc1184c
DS
1447 {
1448 .name = "io_queued",
062a644d
VG
1449 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1450 BLKIO_PROP_io_queued),
1451 .read_map = blkiocg_file_read_map,
cdc1184c 1452 },
84c124da
DS
1453 {
1454 .name = "reset_stats",
1455 .write_u64 = blkiocg_reset_stats,
22084190 1456 },
13f98250
VG
1457#ifdef CONFIG_BLK_DEV_THROTTLING
1458 {
1459 .name = "throttle.read_bps_device",
1460 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1461 BLKIO_THROTL_read_bps_device),
1462 .read_seq_string = blkiocg_file_read,
1463 .write_string = blkiocg_file_write,
1464 .max_write_len = 256,
1465 },
1466
1467 {
1468 .name = "throttle.write_bps_device",
1469 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1470 BLKIO_THROTL_write_bps_device),
1471 .read_seq_string = blkiocg_file_read,
1472 .write_string = blkiocg_file_write,
1473 .max_write_len = 256,
1474 },
1475
1476 {
1477 .name = "throttle.read_iops_device",
1478 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1479 BLKIO_THROTL_read_iops_device),
1480 .read_seq_string = blkiocg_file_read,
1481 .write_string = blkiocg_file_write,
1482 .max_write_len = 256,
1483 },
1484
1485 {
1486 .name = "throttle.write_iops_device",
1487 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1488 BLKIO_THROTL_write_iops_device),
1489 .read_seq_string = blkiocg_file_read,
1490 .write_string = blkiocg_file_write,
1491 .max_write_len = 256,
1492 },
1493 {
1494 .name = "throttle.io_service_bytes",
1495 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1496 BLKIO_THROTL_io_service_bytes),
1497 .read_map = blkiocg_file_read_map,
1498 },
1499 {
1500 .name = "throttle.io_serviced",
1501 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1502 BLKIO_THROTL_io_serviced),
1503 .read_map = blkiocg_file_read_map,
1504 },
1505#endif /* CONFIG_BLK_DEV_THROTTLING */
1506
22084190 1507#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
1508 {
1509 .name = "avg_queue_size",
062a644d
VG
1510 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1511 BLKIO_PROP_avg_queue_size),
1512 .read_map = blkiocg_file_read_map,
cdc1184c 1513 },
812df48d
DS
1514 {
1515 .name = "group_wait_time",
062a644d
VG
1516 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1517 BLKIO_PROP_group_wait_time),
1518 .read_map = blkiocg_file_read_map,
812df48d
DS
1519 },
1520 {
1521 .name = "idle_time",
062a644d
VG
1522 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1523 BLKIO_PROP_idle_time),
1524 .read_map = blkiocg_file_read_map,
812df48d
DS
1525 },
1526 {
1527 .name = "empty_time",
062a644d
VG
1528 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1529 BLKIO_PROP_empty_time),
1530 .read_map = blkiocg_file_read_map,
812df48d 1531 },
cdc1184c 1532 {
22084190 1533 .name = "dequeue",
062a644d
VG
1534 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1535 BLKIO_PROP_dequeue),
1536 .read_map = blkiocg_file_read_map,
cdc1184c 1537 },
9026e521
JT
1538 {
1539 .name = "unaccounted_time",
1540 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1541 BLKIO_PROP_unaccounted_time),
1542 .read_map = blkiocg_file_read_map,
1543 },
22084190 1544#endif
4baf6e33 1545 { } /* terminate */
31e4c28d
VG
1546};
1547
9f13ef67
TH
1548/**
1549 * blkiocg_pre_destroy - cgroup pre_destroy callback
9f13ef67
TH
1550 * @cgroup: cgroup of interest
1551 *
1552 * This function is called when @cgroup is about to go away and responsible
1553 * for shooting down all blkgs associated with @cgroup. blkgs should be
1554 * removed while holding both q and blkcg locks. As blkcg lock is nested
1555 * inside q lock, this function performs reverse double lock dancing.
1556 *
1557 * This is the blkcg counterpart of ioc_release_fn().
1558 */
959d851c 1559static int blkiocg_pre_destroy(struct cgroup *cgroup)
31e4c28d
VG
1560{
1561 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769 1562
9f13ef67 1563 spin_lock_irq(&blkcg->lock);
7ee9c562 1564
9f13ef67
TH
1565 while (!hlist_empty(&blkcg->blkg_list)) {
1566 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
1567 struct blkio_group, blkcg_node);
c875f4d0 1568 struct request_queue *q = blkg->q;
b1c35769 1569
9f13ef67
TH
1570 if (spin_trylock(q->queue_lock)) {
1571 blkg_destroy(blkg);
1572 spin_unlock(q->queue_lock);
1573 } else {
1574 spin_unlock_irq(&blkcg->lock);
9f13ef67 1575 cpu_relax();
a5567932 1576 spin_lock_irq(&blkcg->lock);
0f3942a3 1577 }
9f13ef67 1578 }
b1c35769 1579
9f13ef67 1580 spin_unlock_irq(&blkcg->lock);
7ee9c562
TH
1581 return 0;
1582}
1583
959d851c 1584static void blkiocg_destroy(struct cgroup *cgroup)
7ee9c562
TH
1585{
1586 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1587
67523c48
BB
1588 if (blkcg != &blkio_root_cgroup)
1589 kfree(blkcg);
31e4c28d
VG
1590}
1591
761b3ef5 1592static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
31e4c28d 1593{
9a9e8a26 1594 static atomic64_t id_seq = ATOMIC64_INIT(0);
0341509f
LZ
1595 struct blkio_cgroup *blkcg;
1596 struct cgroup *parent = cgroup->parent;
31e4c28d 1597
0341509f 1598 if (!parent) {
31e4c28d
VG
1599 blkcg = &blkio_root_cgroup;
1600 goto done;
1601 }
1602
31e4c28d
VG
1603 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1604 if (!blkcg)
1605 return ERR_PTR(-ENOMEM);
1606
1607 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
9a9e8a26 1608 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
31e4c28d
VG
1609done:
1610 spin_lock_init(&blkcg->lock);
1611 INIT_HLIST_HEAD(&blkcg->blkg_list);
1612
1613 return &blkcg->css;
1614}
1615
5efd6113
TH
1616/**
1617 * blkcg_init_queue - initialize blkcg part of request queue
1618 * @q: request_queue to initialize
1619 *
1620 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1621 * part of new request_queue @q.
1622 *
1623 * RETURNS:
1624 * 0 on success, -errno on failure.
1625 */
1626int blkcg_init_queue(struct request_queue *q)
1627{
923adde1
TH
1628 int ret;
1629
5efd6113
TH
1630 might_sleep();
1631
923adde1
TH
1632 ret = blk_throtl_init(q);
1633 if (ret)
1634 return ret;
1635
1636 mutex_lock(&all_q_mutex);
1637 INIT_LIST_HEAD(&q->all_q_node);
1638 list_add_tail(&q->all_q_node, &all_q_list);
1639 mutex_unlock(&all_q_mutex);
1640
1641 return 0;
5efd6113
TH
1642}
1643
1644/**
1645 * blkcg_drain_queue - drain blkcg part of request_queue
1646 * @q: request_queue to drain
1647 *
1648 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1649 */
1650void blkcg_drain_queue(struct request_queue *q)
1651{
1652 lockdep_assert_held(q->queue_lock);
1653
1654 blk_throtl_drain(q);
1655}
1656
1657/**
1658 * blkcg_exit_queue - exit and release blkcg part of request_queue
1659 * @q: request_queue being released
1660 *
1661 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1662 */
1663void blkcg_exit_queue(struct request_queue *q)
1664{
923adde1
TH
1665 mutex_lock(&all_q_mutex);
1666 list_del_init(&q->all_q_node);
1667 mutex_unlock(&all_q_mutex);
1668
e8989fae
TH
1669 blkg_destroy_all(q, true);
1670
5efd6113
TH
1671 blk_throtl_exit(q);
1672}
1673
31e4c28d
VG
1674/*
1675 * We cannot support shared io contexts, as we have no mean to support
1676 * two tasks with the same ioc in two different groups without major rework
1677 * of the main cic data structures. For now we allow a task to change
1678 * its cgroup only if it's the only owner of its ioc.
1679 */
761b3ef5 1680static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
31e4c28d 1681{
bb9d97b6 1682 struct task_struct *task;
31e4c28d
VG
1683 struct io_context *ioc;
1684 int ret = 0;
1685
1686 /* task_lock() is needed to avoid races with exit_io_context() */
bb9d97b6
TH
1687 cgroup_taskset_for_each(task, cgrp, tset) {
1688 task_lock(task);
1689 ioc = task->io_context;
1690 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1691 ret = -EINVAL;
1692 task_unlock(task);
1693 if (ret)
1694 break;
1695 }
31e4c28d
VG
1696 return ret;
1697}
1698
923adde1
TH
1699static void blkcg_bypass_start(void)
1700 __acquires(&all_q_mutex)
1701{
1702 struct request_queue *q;
1703
1704 mutex_lock(&all_q_mutex);
1705
1706 list_for_each_entry(q, &all_q_list, all_q_node) {
1707 blk_queue_bypass_start(q);
e8989fae 1708 blkg_destroy_all(q, false);
923adde1
TH
1709 }
1710}
1711
1712static void blkcg_bypass_end(void)
1713 __releases(&all_q_mutex)
1714{
1715 struct request_queue *q;
1716
1717 list_for_each_entry(q, &all_q_list, all_q_node)
1718 blk_queue_bypass_end(q);
1719
1720 mutex_unlock(&all_q_mutex);
1721}
1722
676f7c8f
TH
1723struct cgroup_subsys blkio_subsys = {
1724 .name = "blkio",
1725 .create = blkiocg_create,
1726 .can_attach = blkiocg_can_attach,
959d851c 1727 .pre_destroy = blkiocg_pre_destroy,
676f7c8f 1728 .destroy = blkiocg_destroy,
676f7c8f 1729 .subsys_id = blkio_subsys_id,
4baf6e33 1730 .base_cftypes = blkio_files,
676f7c8f
TH
1731 .module = THIS_MODULE,
1732};
1733EXPORT_SYMBOL_GPL(blkio_subsys);
1734
3e252066
VG
1735void blkio_policy_register(struct blkio_policy_type *blkiop)
1736{
e8989fae
TH
1737 struct request_queue *q;
1738
923adde1 1739 blkcg_bypass_start();
3e252066 1740 spin_lock(&blkio_list_lock);
035d10b2
TH
1741
1742 BUG_ON(blkio_policy[blkiop->plid]);
1743 blkio_policy[blkiop->plid] = blkiop;
3e252066 1744 list_add_tail(&blkiop->list, &blkio_list);
035d10b2 1745
3e252066 1746 spin_unlock(&blkio_list_lock);
e8989fae
TH
1747 list_for_each_entry(q, &all_q_list, all_q_node)
1748 update_root_blkg_pd(q, blkiop->plid);
923adde1 1749 blkcg_bypass_end();
3e252066
VG
1750}
1751EXPORT_SYMBOL_GPL(blkio_policy_register);
1752
1753void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1754{
e8989fae
TH
1755 struct request_queue *q;
1756
923adde1 1757 blkcg_bypass_start();
3e252066 1758 spin_lock(&blkio_list_lock);
035d10b2
TH
1759
1760 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1761 blkio_policy[blkiop->plid] = NULL;
3e252066 1762 list_del_init(&blkiop->list);
035d10b2 1763
3e252066 1764 spin_unlock(&blkio_list_lock);
e8989fae
TH
1765 list_for_each_entry(q, &all_q_list, all_q_node)
1766 update_root_blkg_pd(q, blkiop->plid);
923adde1 1767 blkcg_bypass_end();
3e252066
VG
1768}
1769EXPORT_SYMBOL_GPL(blkio_policy_unregister);