]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-cgroup.c
blkcg: make blkg->pd an array and move configuration and stats into it
[mirror_ubuntu-bionic-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190
VG
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
9d6a986c 16#include <linux/module.h>
accee785 17#include <linux/err.h>
9195291e 18#include <linux/blkdev.h>
5a0e3ad6 19#include <linux/slab.h>
34d0f179 20#include <linux/genhd.h>
72e06c25
TH
21#include <linux/delay.h>
22#include "blk-cgroup.h"
5efd6113 23#include "blk.h"
3e252066 24
84c124da
DS
25#define MAX_KEY_LEN 100
26
3e252066
VG
27static DEFINE_SPINLOCK(blkio_list_lock);
28static LIST_HEAD(blkio_list);
b1c35769 29
923adde1
TH
30static DEFINE_MUTEX(all_q_mutex);
31static LIST_HEAD(all_q_list);
32
31e4c28d 33struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
9d6a986c
VG
34EXPORT_SYMBOL_GPL(blkio_root_cgroup);
35
035d10b2
TH
36static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
37
67523c48
BB
38static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
39 struct cgroup *);
bb9d97b6
TH
40static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
41 struct cgroup_taskset *);
42static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
43 struct cgroup_taskset *);
7ee9c562 44static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
67523c48
BB
45static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
46static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
47
062a644d
VG
48/* for encoding cft->private value on file */
49#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
50/* What policy owns the file, proportional or throttle */
51#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
52#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
53
67523c48
BB
54struct cgroup_subsys blkio_subsys = {
55 .name = "blkio",
56 .create = blkiocg_create,
bb9d97b6
TH
57 .can_attach = blkiocg_can_attach,
58 .attach = blkiocg_attach,
7ee9c562 59 .pre_destroy = blkiocg_pre_destroy,
67523c48
BB
60 .destroy = blkiocg_destroy,
61 .populate = blkiocg_populate,
67523c48 62 .subsys_id = blkio_subsys_id,
67523c48
BB
63 .module = THIS_MODULE,
64};
65EXPORT_SYMBOL_GPL(blkio_subsys);
66
31e4c28d
VG
67struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
68{
69 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
70 struct blkio_cgroup, css);
71}
9d6a986c 72EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 73
70087dc3
VG
74struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
75{
76 return container_of(task_subsys_state(tsk, blkio_subsys_id),
77 struct blkio_cgroup, css);
78}
79EXPORT_SYMBOL_GPL(task_blkio_cgroup);
80
062a644d
VG
81static inline void
82blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
83{
84 struct blkio_policy_type *blkiop;
85
86 list_for_each_entry(blkiop, &blkio_list, list) {
87 /* If this policy does not own the blkg, do not send updates */
88 if (blkiop->plid != blkg->plid)
89 continue;
90 if (blkiop->ops.blkio_update_group_weight_fn)
ca32aefc 91 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
fe071437 92 blkg, weight);
062a644d
VG
93 }
94}
95
4c9eefa1
VG
96static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
97 int fileid)
98{
99 struct blkio_policy_type *blkiop;
100
101 list_for_each_entry(blkiop, &blkio_list, list) {
102
103 /* If this policy does not own the blkg, do not send updates */
104 if (blkiop->plid != blkg->plid)
105 continue;
106
107 if (fileid == BLKIO_THROTL_read_bps_device
108 && blkiop->ops.blkio_update_group_read_bps_fn)
ca32aefc 109 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
fe071437 110 blkg, bps);
4c9eefa1
VG
111
112 if (fileid == BLKIO_THROTL_write_bps_device
113 && blkiop->ops.blkio_update_group_write_bps_fn)
ca32aefc 114 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
fe071437 115 blkg, bps);
4c9eefa1
VG
116 }
117}
118
7702e8f4
VG
119static inline void blkio_update_group_iops(struct blkio_group *blkg,
120 unsigned int iops, int fileid)
121{
122 struct blkio_policy_type *blkiop;
123
124 list_for_each_entry(blkiop, &blkio_list, list) {
125
126 /* If this policy does not own the blkg, do not send updates */
127 if (blkiop->plid != blkg->plid)
128 continue;
129
130 if (fileid == BLKIO_THROTL_read_iops_device
131 && blkiop->ops.blkio_update_group_read_iops_fn)
ca32aefc 132 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
fe071437 133 blkg, iops);
7702e8f4
VG
134
135 if (fileid == BLKIO_THROTL_write_iops_device
136 && blkiop->ops.blkio_update_group_write_iops_fn)
ca32aefc 137 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
fe071437 138 blkg,iops);
7702e8f4
VG
139 }
140}
141
9195291e
DS
142/*
143 * Add to the appropriate stat variable depending on the request type.
144 * This should be called with the blkg->stats_lock held.
145 */
84c124da
DS
146static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
147 bool sync)
9195291e 148{
84c124da
DS
149 if (direction)
150 stat[BLKIO_STAT_WRITE] += add;
9195291e 151 else
84c124da
DS
152 stat[BLKIO_STAT_READ] += add;
153 if (sync)
154 stat[BLKIO_STAT_SYNC] += add;
9195291e 155 else
84c124da 156 stat[BLKIO_STAT_ASYNC] += add;
9195291e
DS
157}
158
cdc1184c
DS
159/*
160 * Decrements the appropriate stat variable if non-zero depending on the
161 * request type. Panics on value being zero.
162 * This should be called with the blkg->stats_lock held.
163 */
164static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
165{
166 if (direction) {
167 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
168 stat[BLKIO_STAT_WRITE]--;
169 } else {
170 BUG_ON(stat[BLKIO_STAT_READ] == 0);
171 stat[BLKIO_STAT_READ]--;
172 }
173 if (sync) {
174 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
175 stat[BLKIO_STAT_SYNC]--;
176 } else {
177 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
178 stat[BLKIO_STAT_ASYNC]--;
179 }
180}
181
182#ifdef CONFIG_DEBUG_BLK_CGROUP
812df48d
DS
183/* This should be called with the blkg->stats_lock held. */
184static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
185 struct blkio_group *curr_blkg)
186{
549d3aa8
TH
187 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
188
189 if (blkio_blkg_waiting(&pd->stats))
812df48d
DS
190 return;
191 if (blkg == curr_blkg)
192 return;
549d3aa8
TH
193 pd->stats.start_group_wait_time = sched_clock();
194 blkio_mark_blkg_waiting(&pd->stats);
812df48d
DS
195}
196
197/* This should be called with the blkg->stats_lock held. */
198static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
199{
200 unsigned long long now;
201
202 if (!blkio_blkg_waiting(stats))
203 return;
204
205 now = sched_clock();
206 if (time_after64(now, stats->start_group_wait_time))
207 stats->group_wait_time += now - stats->start_group_wait_time;
208 blkio_clear_blkg_waiting(stats);
209}
210
211/* This should be called with the blkg->stats_lock held. */
212static void blkio_end_empty_time(struct blkio_group_stats *stats)
213{
214 unsigned long long now;
215
216 if (!blkio_blkg_empty(stats))
217 return;
218
219 now = sched_clock();
220 if (time_after64(now, stats->start_empty_time))
221 stats->empty_time += now - stats->start_empty_time;
222 blkio_clear_blkg_empty(stats);
223}
224
225void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
226{
549d3aa8 227 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
812df48d
DS
228 unsigned long flags;
229
230 spin_lock_irqsave(&blkg->stats_lock, flags);
549d3aa8
TH
231 BUG_ON(blkio_blkg_idling(&pd->stats));
232 pd->stats.start_idle_time = sched_clock();
233 blkio_mark_blkg_idling(&pd->stats);
812df48d
DS
234 spin_unlock_irqrestore(&blkg->stats_lock, flags);
235}
236EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
237
238void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
239{
549d3aa8 240 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
812df48d
DS
241 unsigned long flags;
242 unsigned long long now;
243 struct blkio_group_stats *stats;
244
245 spin_lock_irqsave(&blkg->stats_lock, flags);
549d3aa8 246 stats = &pd->stats;
812df48d
DS
247 if (blkio_blkg_idling(stats)) {
248 now = sched_clock();
249 if (time_after64(now, stats->start_idle_time))
250 stats->idle_time += now - stats->start_idle_time;
251 blkio_clear_blkg_idling(stats);
252 }
253 spin_unlock_irqrestore(&blkg->stats_lock, flags);
254}
255EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
256
a11cdaa7 257void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
cdc1184c 258{
549d3aa8 259 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
cdc1184c
DS
260 unsigned long flags;
261 struct blkio_group_stats *stats;
262
263 spin_lock_irqsave(&blkg->stats_lock, flags);
549d3aa8 264 stats = &pd->stats;
cdc1184c
DS
265 stats->avg_queue_size_sum +=
266 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
267 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
268 stats->avg_queue_size_samples++;
812df48d 269 blkio_update_group_wait_time(stats);
cdc1184c
DS
270 spin_unlock_irqrestore(&blkg->stats_lock, flags);
271}
a11cdaa7
DS
272EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
273
e5ff082e 274void blkiocg_set_start_empty_time(struct blkio_group *blkg)
28baf442 275{
549d3aa8 276 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
28baf442
DS
277 unsigned long flags;
278 struct blkio_group_stats *stats;
279
280 spin_lock_irqsave(&blkg->stats_lock, flags);
549d3aa8 281 stats = &pd->stats;
28baf442
DS
282
283 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
284 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
285 spin_unlock_irqrestore(&blkg->stats_lock, flags);
286 return;
287 }
288
289 /*
e5ff082e
VG
290 * group is already marked empty. This can happen if cfqq got new
291 * request in parent group and moved to this group while being added
292 * to service tree. Just ignore the event and move on.
28baf442 293 */
e5ff082e
VG
294 if(blkio_blkg_empty(stats)) {
295 spin_unlock_irqrestore(&blkg->stats_lock, flags);
296 return;
297 }
298
28baf442
DS
299 stats->start_empty_time = sched_clock();
300 blkio_mark_blkg_empty(stats);
301 spin_unlock_irqrestore(&blkg->stats_lock, flags);
302}
303EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
304
a11cdaa7
DS
305void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
306 unsigned long dequeue)
307{
549d3aa8
TH
308 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
309
310 pd->stats.dequeue += dequeue;
a11cdaa7
DS
311}
312EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
812df48d
DS
313#else
314static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
315 struct blkio_group *curr_blkg) {}
316static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
cdc1184c
DS
317#endif
318
a11cdaa7 319void blkiocg_update_io_add_stats(struct blkio_group *blkg,
cdc1184c
DS
320 struct blkio_group *curr_blkg, bool direction,
321 bool sync)
322{
549d3aa8 323 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
cdc1184c
DS
324 unsigned long flags;
325
326 spin_lock_irqsave(&blkg->stats_lock, flags);
549d3aa8 327 blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
cdc1184c 328 sync);
549d3aa8 329 blkio_end_empty_time(&pd->stats);
812df48d 330 blkio_set_start_group_wait_time(blkg, curr_blkg);
cdc1184c
DS
331 spin_unlock_irqrestore(&blkg->stats_lock, flags);
332}
a11cdaa7 333EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
cdc1184c 334
a11cdaa7 335void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
cdc1184c
DS
336 bool direction, bool sync)
337{
549d3aa8 338 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
cdc1184c
DS
339 unsigned long flags;
340
341 spin_lock_irqsave(&blkg->stats_lock, flags);
549d3aa8 342 blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED],
cdc1184c
DS
343 direction, sync);
344 spin_unlock_irqrestore(&blkg->stats_lock, flags);
345}
a11cdaa7 346EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
cdc1184c 347
167400d3
JT
348void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
349 unsigned long unaccounted_time)
22084190 350{
549d3aa8 351 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
303a3acb
DS
352 unsigned long flags;
353
354 spin_lock_irqsave(&blkg->stats_lock, flags);
549d3aa8 355 pd->stats.time += time;
a23e6869 356#ifdef CONFIG_DEBUG_BLK_CGROUP
549d3aa8 357 pd->stats.unaccounted_time += unaccounted_time;
a23e6869 358#endif
303a3acb 359 spin_unlock_irqrestore(&blkg->stats_lock, flags);
22084190 360}
303a3acb 361EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
22084190 362
5624a4e4
VG
363/*
364 * should be called under rcu read lock or queue lock to make sure blkg pointer
365 * is valid.
366 */
84c124da
DS
367void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
368 uint64_t bytes, bool direction, bool sync)
9195291e 369{
549d3aa8 370 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
5624a4e4 371 struct blkio_group_stats_cpu *stats_cpu;
575969a0
VG
372 unsigned long flags;
373
374 /*
375 * Disabling interrupts to provide mutual exclusion between two
376 * writes on same cpu. It probably is not needed for 64bit. Not
377 * optimizing that case yet.
378 */
379 local_irq_save(flags);
9195291e 380
549d3aa8 381 stats_cpu = this_cpu_ptr(pd->stats_cpu);
5624a4e4 382
575969a0 383 u64_stats_update_begin(&stats_cpu->syncp);
5624a4e4
VG
384 stats_cpu->sectors += bytes >> 9;
385 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
386 1, direction, sync);
387 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
388 bytes, direction, sync);
575969a0
VG
389 u64_stats_update_end(&stats_cpu->syncp);
390 local_irq_restore(flags);
9195291e 391}
84c124da 392EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
9195291e 393
84c124da
DS
394void blkiocg_update_completion_stats(struct blkio_group *blkg,
395 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
9195291e 396{
549d3aa8 397 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
9195291e
DS
398 struct blkio_group_stats *stats;
399 unsigned long flags;
400 unsigned long long now = sched_clock();
401
402 spin_lock_irqsave(&blkg->stats_lock, flags);
549d3aa8 403 stats = &pd->stats;
84c124da
DS
404 if (time_after64(now, io_start_time))
405 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
406 now - io_start_time, direction, sync);
407 if (time_after64(io_start_time, start_time))
408 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
409 io_start_time - start_time, direction, sync);
9195291e
DS
410 spin_unlock_irqrestore(&blkg->stats_lock, flags);
411}
84c124da 412EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
9195291e 413
317389a7 414/* Merged stats are per cpu. */
812d4026
DS
415void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
416 bool sync)
417{
549d3aa8 418 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
317389a7 419 struct blkio_group_stats_cpu *stats_cpu;
812d4026
DS
420 unsigned long flags;
421
317389a7
VG
422 /*
423 * Disabling interrupts to provide mutual exclusion between two
424 * writes on same cpu. It probably is not needed for 64bit. Not
425 * optimizing that case yet.
426 */
427 local_irq_save(flags);
428
549d3aa8 429 stats_cpu = this_cpu_ptr(pd->stats_cpu);
317389a7
VG
430
431 u64_stats_update_begin(&stats_cpu->syncp);
432 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
433 direction, sync);
434 u64_stats_update_end(&stats_cpu->syncp);
435 local_irq_restore(flags);
812d4026
DS
436}
437EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
438
0381411e
TH
439/**
440 * blkg_free - free a blkg
441 * @blkg: blkg to free
442 *
443 * Free @blkg which may be partially allocated.
444 */
445static void blkg_free(struct blkio_group *blkg)
446{
549d3aa8
TH
447 struct blkg_policy_data *pd;
448
449 if (!blkg)
450 return;
451
452 pd = blkg->pd[blkg->plid];
453 if (pd) {
454 free_percpu(pd->stats_cpu);
455 kfree(pd);
0381411e 456 }
549d3aa8 457 kfree(blkg);
0381411e
TH
458}
459
460/**
461 * blkg_alloc - allocate a blkg
462 * @blkcg: block cgroup the new blkg is associated with
463 * @q: request_queue the new blkg is associated with
464 * @pol: policy the new blkg is associated with
465 *
466 * Allocate a new blkg assocating @blkcg and @q for @pol.
467 *
468 * FIXME: Should be called with queue locked but currently isn't due to
469 * percpu stat breakage.
470 */
471static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
472 struct request_queue *q,
473 struct blkio_policy_type *pol)
474{
475 struct blkio_group *blkg;
549d3aa8 476 struct blkg_policy_data *pd;
0381411e
TH
477
478 /* alloc and init base part */
479 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
480 if (!blkg)
481 return NULL;
482
483 spin_lock_init(&blkg->stats_lock);
484 rcu_assign_pointer(blkg->q, q);
485 blkg->blkcg = blkcg;
486 blkg->plid = pol->plid;
1adaf3dd 487 blkg->refcnt = 1;
0381411e
TH
488 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
489
549d3aa8
TH
490 /* alloc per-policy data and attach it to blkg */
491 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
492 q->node);
493 if (!pd) {
0381411e
TH
494 blkg_free(blkg);
495 return NULL;
496 }
497
549d3aa8
TH
498 blkg->pd[pol->plid] = pd;
499 pd->blkg = blkg;
500
0381411e 501 /* broken, read comment in the callsite */
549d3aa8
TH
502
503 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
504 if (!pd->stats_cpu) {
0381411e
TH
505 blkg_free(blkg);
506 return NULL;
507 }
508
549d3aa8 509 /* invoke per-policy init */
0381411e
TH
510 pol->ops.blkio_init_group_fn(blkg);
511 return blkg;
512}
513
cd1604fa
TH
514struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
515 struct request_queue *q,
516 enum blkio_policy_id plid,
517 bool for_root)
518 __releases(q->queue_lock) __acquires(q->queue_lock)
5624a4e4 519{
cd1604fa
TH
520 struct blkio_policy_type *pol = blkio_policy[plid];
521 struct blkio_group *blkg, *new_blkg;
5624a4e4 522
cd1604fa
TH
523 WARN_ON_ONCE(!rcu_read_lock_held());
524 lockdep_assert_held(q->queue_lock);
525
526 /*
527 * This could be the first entry point of blkcg implementation and
528 * we shouldn't allow anything to go through for a bypassing queue.
529 * The following can be removed if blkg lookup is guaranteed to
530 * fail on a bypassing queue.
531 */
532 if (unlikely(blk_queue_bypass(q)) && !for_root)
533 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
534
535 blkg = blkg_lookup(blkcg, q, plid);
536 if (blkg)
537 return blkg;
538
7ee9c562 539 /* blkg holds a reference to blkcg */
cd1604fa
TH
540 if (!css_tryget(&blkcg->css))
541 return ERR_PTR(-EINVAL);
542
543 /*
544 * Allocate and initialize.
545 *
546 * FIXME: The following is broken. Percpu memory allocation
547 * requires %GFP_KERNEL context and can't be performed from IO
548 * path. Allocation here should inherently be atomic and the
549 * following lock dancing can be removed once the broken percpu
550 * allocation is fixed.
551 */
552 spin_unlock_irq(q->queue_lock);
553 rcu_read_unlock();
554
0381411e 555 new_blkg = blkg_alloc(blkcg, q, pol);
cd1604fa
TH
556
557 rcu_read_lock();
558 spin_lock_irq(q->queue_lock);
31e4c28d 559
cd1604fa
TH
560 /* did bypass get turned on inbetween? */
561 if (unlikely(blk_queue_bypass(q)) && !for_root) {
562 blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
563 goto out;
564 }
565
566 /* did someone beat us to it? */
567 blkg = blkg_lookup(blkcg, q, plid);
568 if (unlikely(blkg))
569 goto out;
570
571 /* did alloc fail? */
0381411e 572 if (unlikely(!new_blkg)) {
cd1604fa
TH
573 blkg = ERR_PTR(-ENOMEM);
574 goto out;
575 }
576
577 /* insert */
578 spin_lock(&blkcg->lock);
579 swap(blkg, new_blkg);
31e4c28d 580 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
cd1604fa
TH
581 pol->ops.blkio_link_group_fn(q, blkg);
582 spin_unlock(&blkcg->lock);
583out:
0381411e 584 blkg_free(new_blkg);
cd1604fa 585 return blkg;
31e4c28d 586}
cd1604fa 587EXPORT_SYMBOL_GPL(blkg_lookup_create);
31e4c28d 588
b1c35769
VG
589static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
590{
591 hlist_del_init_rcu(&blkg->blkcg_node);
b1c35769
VG
592}
593
594/*
595 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
596 * indicating that blk_group was unhashed by the time we got to it.
597 */
31e4c28d
VG
598int blkiocg_del_blkio_group(struct blkio_group *blkg)
599{
7ee9c562 600 struct blkio_cgroup *blkcg = blkg->blkcg;
b1c35769 601 unsigned long flags;
b1c35769
VG
602 int ret = 1;
603
7ee9c562
TH
604 spin_lock_irqsave(&blkcg->lock, flags);
605 if (!hlist_unhashed(&blkg->blkcg_node)) {
606 __blkiocg_del_blkio_group(blkg);
607 ret = 0;
b1c35769 608 }
7ee9c562 609 spin_unlock_irqrestore(&blkcg->lock, flags);
0f3942a3 610
b1c35769 611 return ret;
31e4c28d 612}
9d6a986c 613EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
31e4c28d
VG
614
615/* called under rcu_read_lock(). */
cd1604fa
TH
616struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
617 struct request_queue *q,
618 enum blkio_policy_id plid)
31e4c28d
VG
619{
620 struct blkio_group *blkg;
621 struct hlist_node *n;
31e4c28d 622
ca32aefc
TH
623 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
624 if (blkg->q == q && blkg->plid == plid)
31e4c28d 625 return blkg;
31e4c28d
VG
626 return NULL;
627}
cd1604fa 628EXPORT_SYMBOL_GPL(blkg_lookup);
31e4c28d 629
72e06c25
TH
630void blkg_destroy_all(struct request_queue *q)
631{
632 struct blkio_policy_type *pol;
633
634 while (true) {
635 bool done = true;
636
637 spin_lock(&blkio_list_lock);
638 spin_lock_irq(q->queue_lock);
639
640 /*
641 * clear_queue_fn() might return with non-empty group list
642 * if it raced cgroup removal and lost. cgroup removal is
643 * guaranteed to make forward progress and retrying after a
644 * while is enough. This ugliness is scheduled to be
645 * removed after locking update.
646 */
647 list_for_each_entry(pol, &blkio_list, list)
648 if (!pol->ops.blkio_clear_queue_fn(q))
649 done = false;
650
651 spin_unlock_irq(q->queue_lock);
652 spin_unlock(&blkio_list_lock);
653
654 if (done)
655 break;
656
657 msleep(10); /* just some random duration I like */
658 }
659}
660
1adaf3dd
TH
661static void blkg_rcu_free(struct rcu_head *rcu_head)
662{
663 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
664}
665
666void __blkg_release(struct blkio_group *blkg)
667{
668 /* release the extra blkcg reference this blkg has been holding */
669 css_put(&blkg->blkcg->css);
670
671 /*
672 * A group is freed in rcu manner. But having an rcu lock does not
673 * mean that one can access all the fields of blkg and assume these
674 * are valid. For example, don't try to follow throtl_data and
675 * request queue links.
676 *
677 * Having a reference to blkg under an rcu allows acess to only
678 * values local to groups like group stats and group rate limits
679 */
680 call_rcu(&blkg->rcu_head, blkg_rcu_free);
681}
682EXPORT_SYMBOL_GPL(__blkg_release);
683
f0bdc8cd
VG
684static void blkio_reset_stats_cpu(struct blkio_group *blkg)
685{
549d3aa8 686 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
f0bdc8cd
VG
687 struct blkio_group_stats_cpu *stats_cpu;
688 int i, j, k;
689 /*
690 * Note: On 64 bit arch this should not be an issue. This has the
691 * possibility of returning some inconsistent value on 32bit arch
692 * as 64bit update on 32bit is non atomic. Taking care of this
693 * corner case makes code very complicated, like sending IPIs to
694 * cpus, taking care of stats of offline cpus etc.
695 *
696 * reset stats is anyway more of a debug feature and this sounds a
697 * corner case. So I am not complicating the code yet until and
698 * unless this becomes a real issue.
699 */
700 for_each_possible_cpu(i) {
549d3aa8 701 stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
f0bdc8cd
VG
702 stats_cpu->sectors = 0;
703 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
704 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
705 stats_cpu->stat_arr_cpu[j][k] = 0;
706 }
707}
708
303a3acb 709static int
84c124da 710blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb
DS
711{
712 struct blkio_cgroup *blkcg;
713 struct blkio_group *blkg;
812df48d 714 struct blkio_group_stats *stats;
303a3acb 715 struct hlist_node *n;
cdc1184c
DS
716 uint64_t queued[BLKIO_STAT_TOTAL];
717 int i;
812df48d
DS
718#ifdef CONFIG_DEBUG_BLK_CGROUP
719 bool idling, waiting, empty;
720 unsigned long long now = sched_clock();
721#endif
303a3acb
DS
722
723 blkcg = cgroup_to_blkio_cgroup(cgroup);
724 spin_lock_irq(&blkcg->lock);
725 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
549d3aa8
TH
726 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
727
303a3acb 728 spin_lock(&blkg->stats_lock);
549d3aa8 729 stats = &pd->stats;
812df48d
DS
730#ifdef CONFIG_DEBUG_BLK_CGROUP
731 idling = blkio_blkg_idling(stats);
732 waiting = blkio_blkg_waiting(stats);
733 empty = blkio_blkg_empty(stats);
734#endif
cdc1184c 735 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
736 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
737 memset(stats, 0, sizeof(struct blkio_group_stats));
cdc1184c 738 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
739 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
740#ifdef CONFIG_DEBUG_BLK_CGROUP
741 if (idling) {
742 blkio_mark_blkg_idling(stats);
743 stats->start_idle_time = now;
744 }
745 if (waiting) {
746 blkio_mark_blkg_waiting(stats);
747 stats->start_group_wait_time = now;
748 }
749 if (empty) {
750 blkio_mark_blkg_empty(stats);
751 stats->start_empty_time = now;
752 }
753#endif
303a3acb 754 spin_unlock(&blkg->stats_lock);
f0bdc8cd
VG
755
756 /* Reset Per cpu stats which don't take blkg->stats_lock */
757 blkio_reset_stats_cpu(blkg);
303a3acb 758 }
f0bdc8cd 759
303a3acb
DS
760 spin_unlock_irq(&blkcg->lock);
761 return 0;
762}
763
7a4dd281
TH
764static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
765 char *str, int chars_left, bool diskname_only)
303a3acb 766{
7a4dd281 767 snprintf(str, chars_left, "%s", dname);
303a3acb
DS
768 chars_left -= strlen(str);
769 if (chars_left <= 0) {
770 printk(KERN_WARNING
771 "Possibly incorrect cgroup stat display format");
772 return;
773 }
84c124da
DS
774 if (diskname_only)
775 return;
303a3acb 776 switch (type) {
84c124da 777 case BLKIO_STAT_READ:
303a3acb
DS
778 strlcat(str, " Read", chars_left);
779 break;
84c124da 780 case BLKIO_STAT_WRITE:
303a3acb
DS
781 strlcat(str, " Write", chars_left);
782 break;
84c124da 783 case BLKIO_STAT_SYNC:
303a3acb
DS
784 strlcat(str, " Sync", chars_left);
785 break;
84c124da 786 case BLKIO_STAT_ASYNC:
303a3acb
DS
787 strlcat(str, " Async", chars_left);
788 break;
84c124da 789 case BLKIO_STAT_TOTAL:
303a3acb
DS
790 strlcat(str, " Total", chars_left);
791 break;
792 default:
793 strlcat(str, " Invalid", chars_left);
794 }
795}
796
84c124da 797static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
7a4dd281 798 struct cgroup_map_cb *cb, const char *dname)
84c124da 799{
7a4dd281 800 blkio_get_key_name(0, dname, str, chars_left, true);
84c124da
DS
801 cb->fill(cb, str, val);
802 return val;
803}
303a3acb 804
5624a4e4
VG
805
806static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
807 enum stat_type_cpu type, enum stat_sub_type sub_type)
808{
549d3aa8 809 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
5624a4e4
VG
810 int cpu;
811 struct blkio_group_stats_cpu *stats_cpu;
575969a0 812 u64 val = 0, tval;
5624a4e4
VG
813
814 for_each_possible_cpu(cpu) {
575969a0 815 unsigned int start;
549d3aa8 816 stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
5624a4e4 817
575969a0
VG
818 do {
819 start = u64_stats_fetch_begin(&stats_cpu->syncp);
820 if (type == BLKIO_STAT_CPU_SECTORS)
821 tval = stats_cpu->sectors;
822 else
823 tval = stats_cpu->stat_arr_cpu[type][sub_type];
824 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
825
826 val += tval;
5624a4e4
VG
827 }
828
829 return val;
830}
831
832static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
7a4dd281
TH
833 struct cgroup_map_cb *cb, const char *dname,
834 enum stat_type_cpu type)
5624a4e4
VG
835{
836 uint64_t disk_total, val;
837 char key_str[MAX_KEY_LEN];
838 enum stat_sub_type sub_type;
839
840 if (type == BLKIO_STAT_CPU_SECTORS) {
841 val = blkio_read_stat_cpu(blkg, type, 0);
7a4dd281
TH
842 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
843 dname);
5624a4e4
VG
844 }
845
846 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
847 sub_type++) {
7a4dd281
TH
848 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
849 false);
5624a4e4
VG
850 val = blkio_read_stat_cpu(blkg, type, sub_type);
851 cb->fill(cb, key_str, val);
852 }
853
854 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
855 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
856
7a4dd281
TH
857 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
858 false);
5624a4e4
VG
859 cb->fill(cb, key_str, disk_total);
860 return disk_total;
861}
862
84c124da
DS
863/* This should be called with blkg->stats_lock held */
864static uint64_t blkio_get_stat(struct blkio_group *blkg,
7a4dd281
TH
865 struct cgroup_map_cb *cb, const char *dname,
866 enum stat_type type)
303a3acb 867{
549d3aa8 868 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
303a3acb
DS
869 uint64_t disk_total;
870 char key_str[MAX_KEY_LEN];
84c124da
DS
871 enum stat_sub_type sub_type;
872
873 if (type == BLKIO_STAT_TIME)
874 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
549d3aa8 875 pd->stats.time, cb, dname);
9026e521 876#ifdef CONFIG_DEBUG_BLK_CGROUP
167400d3
JT
877 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
878 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
549d3aa8 879 pd->stats.unaccounted_time, cb, dname);
cdc1184c 880 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
549d3aa8
TH
881 uint64_t sum = pd->stats.avg_queue_size_sum;
882 uint64_t samples = pd->stats.avg_queue_size_samples;
cdc1184c
DS
883 if (samples)
884 do_div(sum, samples);
885 else
886 sum = 0;
7a4dd281
TH
887 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
888 sum, cb, dname);
cdc1184c 889 }
812df48d
DS
890 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
891 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
549d3aa8 892 pd->stats.group_wait_time, cb, dname);
812df48d
DS
893 if (type == BLKIO_STAT_IDLE_TIME)
894 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
549d3aa8 895 pd->stats.idle_time, cb, dname);
812df48d
DS
896 if (type == BLKIO_STAT_EMPTY_TIME)
897 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
549d3aa8 898 pd->stats.empty_time, cb, dname);
84c124da
DS
899 if (type == BLKIO_STAT_DEQUEUE)
900 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
549d3aa8 901 pd->stats.dequeue, cb, dname);
84c124da 902#endif
303a3acb 903
84c124da
DS
904 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
905 sub_type++) {
7a4dd281
TH
906 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
907 false);
549d3aa8 908 cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]);
303a3acb 909 }
549d3aa8
TH
910 disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] +
911 pd->stats.stat_arr[type][BLKIO_STAT_WRITE];
7a4dd281
TH
912 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
913 false);
303a3acb
DS
914 cb->fill(cb, key_str, disk_total);
915 return disk_total;
916}
917
4bfd482e
TH
918static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
919 int fileid, struct blkio_cgroup *blkcg)
34d0f179 920{
ece84241 921 struct gendisk *disk = NULL;
e56da7e2 922 struct blkio_group *blkg = NULL;
549d3aa8 923 struct blkg_policy_data *pd;
34d0f179 924 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
d11bb446 925 unsigned long major, minor;
ece84241
TH
926 int i = 0, ret = -EINVAL;
927 int part;
34d0f179 928 dev_t dev;
d11bb446 929 u64 temp;
34d0f179
GJ
930
931 memset(s, 0, sizeof(s));
932
933 while ((p = strsep(&buf, " ")) != NULL) {
934 if (!*p)
935 continue;
936
937 s[i++] = p;
938
939 /* Prevent from inputing too many things */
940 if (i == 3)
941 break;
942 }
943
944 if (i != 2)
ece84241 945 goto out;
34d0f179
GJ
946
947 p = strsep(&s[0], ":");
948 if (p != NULL)
949 major_s = p;
950 else
ece84241 951 goto out;
34d0f179
GJ
952
953 minor_s = s[0];
954 if (!minor_s)
ece84241 955 goto out;
34d0f179 956
ece84241
TH
957 if (strict_strtoul(major_s, 10, &major))
958 goto out;
34d0f179 959
ece84241
TH
960 if (strict_strtoul(minor_s, 10, &minor))
961 goto out;
34d0f179
GJ
962
963 dev = MKDEV(major, minor);
964
ece84241
TH
965 if (strict_strtoull(s[1], 10, &temp))
966 goto out;
34d0f179 967
e56da7e2 968 disk = get_gendisk(dev, &part);
4bfd482e 969 if (!disk || part)
e56da7e2 970 goto out;
e56da7e2
TH
971
972 rcu_read_lock();
973
4bfd482e
TH
974 spin_lock_irq(disk->queue->queue_lock);
975 blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
976 spin_unlock_irq(disk->queue->queue_lock);
e56da7e2 977
4bfd482e
TH
978 if (IS_ERR(blkg)) {
979 ret = PTR_ERR(blkg);
980 goto out_unlock;
d11bb446 981 }
34d0f179 982
549d3aa8
TH
983 pd = blkg->pd[plid];
984
062a644d
VG
985 switch (plid) {
986 case BLKIO_POLICY_PROP:
d11bb446
WG
987 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
988 temp > BLKIO_WEIGHT_MAX)
e56da7e2 989 goto out_unlock;
34d0f179 990
549d3aa8 991 pd->conf.weight = temp;
4bfd482e 992 blkio_update_group_weight(blkg, temp ?: blkcg->weight);
4c9eefa1
VG
993 break;
994 case BLKIO_POLICY_THROTL:
7702e8f4
VG
995 switch(fileid) {
996 case BLKIO_THROTL_read_bps_device:
549d3aa8 997 pd->conf.bps[READ] = temp;
4bfd482e 998 blkio_update_group_bps(blkg, temp ?: -1, fileid);
e56da7e2 999 break;
7702e8f4 1000 case BLKIO_THROTL_write_bps_device:
549d3aa8 1001 pd->conf.bps[WRITE] = temp;
4bfd482e 1002 blkio_update_group_bps(blkg, temp ?: -1, fileid);
7702e8f4
VG
1003 break;
1004 case BLKIO_THROTL_read_iops_device:
e56da7e2
TH
1005 if (temp > THROTL_IOPS_MAX)
1006 goto out_unlock;
549d3aa8 1007 pd->conf.iops[READ] = temp;
4bfd482e 1008 blkio_update_group_iops(blkg, temp ?: -1, fileid);
e56da7e2 1009 break;
7702e8f4 1010 case BLKIO_THROTL_write_iops_device:
d11bb446 1011 if (temp > THROTL_IOPS_MAX)
e56da7e2 1012 goto out_unlock;
549d3aa8 1013 pd->conf.iops[WRITE] = temp;
4bfd482e 1014 blkio_update_group_iops(blkg, temp ?: -1, fileid);
7702e8f4
VG
1015 break;
1016 }
062a644d
VG
1017 break;
1018 default:
1019 BUG();
1020 }
ece84241 1021 ret = 0;
e56da7e2
TH
1022out_unlock:
1023 rcu_read_unlock();
ece84241
TH
1024out:
1025 put_disk(disk);
e56da7e2
TH
1026
1027 /*
1028 * If queue was bypassing, we should retry. Do so after a short
1029 * msleep(). It isn't strictly necessary but queue can be
1030 * bypassing for some time and it's always nice to avoid busy
1031 * looping.
1032 */
1033 if (ret == -EBUSY) {
1034 msleep(10);
1035 return restart_syscall();
1036 }
ece84241 1037 return ret;
34d0f179
GJ
1038}
1039
062a644d
VG
1040static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1041 const char *buffer)
34d0f179
GJ
1042{
1043 int ret = 0;
1044 char *buf;
e56da7e2 1045 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
062a644d
VG
1046 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1047 int fileid = BLKIOFILE_ATTR(cft->private);
34d0f179
GJ
1048
1049 buf = kstrdup(buffer, GFP_KERNEL);
1050 if (!buf)
1051 return -ENOMEM;
1052
4bfd482e 1053 ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
34d0f179
GJ
1054 kfree(buf);
1055 return ret;
1056}
1057
92616b5b
VG
1058static const char *blkg_dev_name(struct blkio_group *blkg)
1059{
1060 /* some drivers (floppy) instantiate a queue w/o disk registered */
1061 if (blkg->q->backing_dev_info.dev)
1062 return dev_name(blkg->q->backing_dev_info.dev);
1063 return NULL;
1064}
1065
4bfd482e
TH
1066static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1067 struct seq_file *m)
34d0f179 1068{
549d3aa8 1069 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
92616b5b 1070 const char *dname = blkg_dev_name(blkg);
4bfd482e
TH
1071 int fileid = BLKIOFILE_ATTR(cft->private);
1072 int rw = WRITE;
1073
92616b5b
VG
1074 if (!dname)
1075 return;
1076
4bfd482e 1077 switch (blkg->plid) {
062a644d 1078 case BLKIO_POLICY_PROP:
549d3aa8 1079 if (pd->conf.weight)
7a4dd281 1080 seq_printf(m, "%s\t%u\n",
549d3aa8 1081 dname, pd->conf.weight);
4c9eefa1
VG
1082 break;
1083 case BLKIO_POLICY_THROTL:
4bfd482e 1084 switch (fileid) {
7702e8f4 1085 case BLKIO_THROTL_read_bps_device:
4bfd482e 1086 rw = READ;
7702e8f4 1087 case BLKIO_THROTL_write_bps_device:
549d3aa8 1088 if (pd->conf.bps[rw])
7a4dd281 1089 seq_printf(m, "%s\t%llu\n",
549d3aa8 1090 dname, pd->conf.bps[rw]);
7702e8f4
VG
1091 break;
1092 case BLKIO_THROTL_read_iops_device:
4bfd482e 1093 rw = READ;
7702e8f4 1094 case BLKIO_THROTL_write_iops_device:
549d3aa8 1095 if (pd->conf.iops[rw])
7a4dd281 1096 seq_printf(m, "%s\t%u\n",
549d3aa8 1097 dname, pd->conf.iops[rw]);
7702e8f4
VG
1098 break;
1099 }
062a644d
VG
1100 break;
1101 default:
1102 BUG();
1103 }
1104}
34d0f179 1105
062a644d 1106/* cgroup files which read their data from policy nodes end up here */
4bfd482e
TH
1107static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
1108 struct seq_file *m)
34d0f179 1109{
4bfd482e
TH
1110 struct blkio_group *blkg;
1111 struct hlist_node *n;
34d0f179 1112
4bfd482e
TH
1113 spin_lock_irq(&blkcg->lock);
1114 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
1115 if (BLKIOFILE_POLICY(cft->private) == blkg->plid)
1116 blkio_print_group_conf(cft, blkg, m);
1117 spin_unlock_irq(&blkcg->lock);
062a644d
VG
1118}
1119
1120static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1121 struct seq_file *m)
1122{
1123 struct blkio_cgroup *blkcg;
1124 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1125 int name = BLKIOFILE_ATTR(cft->private);
1126
1127 blkcg = cgroup_to_blkio_cgroup(cgrp);
1128
1129 switch(plid) {
1130 case BLKIO_POLICY_PROP:
1131 switch(name) {
1132 case BLKIO_PROP_weight_device:
4bfd482e 1133 blkio_read_conf(cft, blkcg, m);
062a644d
VG
1134 return 0;
1135 default:
1136 BUG();
1137 }
1138 break;
4c9eefa1
VG
1139 case BLKIO_POLICY_THROTL:
1140 switch(name){
1141 case BLKIO_THROTL_read_bps_device:
1142 case BLKIO_THROTL_write_bps_device:
7702e8f4
VG
1143 case BLKIO_THROTL_read_iops_device:
1144 case BLKIO_THROTL_write_iops_device:
4bfd482e 1145 blkio_read_conf(cft, blkcg, m);
4c9eefa1
VG
1146 return 0;
1147 default:
1148 BUG();
1149 }
1150 break;
062a644d
VG
1151 default:
1152 BUG();
1153 }
1154
1155 return 0;
1156}
1157
1158static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
5624a4e4
VG
1159 struct cftype *cft, struct cgroup_map_cb *cb,
1160 enum stat_type type, bool show_total, bool pcpu)
062a644d
VG
1161{
1162 struct blkio_group *blkg;
1163 struct hlist_node *n;
1164 uint64_t cgroup_total = 0;
1165
1166 rcu_read_lock();
1167 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
92616b5b 1168 const char *dname = blkg_dev_name(blkg);
7a4dd281 1169
92616b5b 1170 if (!dname || BLKIOFILE_POLICY(cft->private) != blkg->plid)
7a4dd281
TH
1171 continue;
1172 if (pcpu)
1173 cgroup_total += blkio_get_stat_cpu(blkg, cb, dname,
1174 type);
1175 else {
1176 spin_lock_irq(&blkg->stats_lock);
1177 cgroup_total += blkio_get_stat(blkg, cb, dname, type);
1178 spin_unlock_irq(&blkg->stats_lock);
062a644d
VG
1179 }
1180 }
1181 if (show_total)
1182 cb->fill(cb, "Total", cgroup_total);
1183 rcu_read_unlock();
1184 return 0;
1185}
1186
1187/* All map kind of cgroup file get serviced by this function */
1188static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1189 struct cgroup_map_cb *cb)
1190{
1191 struct blkio_cgroup *blkcg;
1192 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1193 int name = BLKIOFILE_ATTR(cft->private);
1194
1195 blkcg = cgroup_to_blkio_cgroup(cgrp);
1196
1197 switch(plid) {
1198 case BLKIO_POLICY_PROP:
1199 switch(name) {
1200 case BLKIO_PROP_time:
1201 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1202 BLKIO_STAT_TIME, 0, 0);
062a644d
VG
1203 case BLKIO_PROP_sectors:
1204 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1205 BLKIO_STAT_CPU_SECTORS, 0, 1);
062a644d
VG
1206 case BLKIO_PROP_io_service_bytes:
1207 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1208 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
062a644d
VG
1209 case BLKIO_PROP_io_serviced:
1210 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1211 BLKIO_STAT_CPU_SERVICED, 1, 1);
062a644d
VG
1212 case BLKIO_PROP_io_service_time:
1213 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1214 BLKIO_STAT_SERVICE_TIME, 1, 0);
062a644d
VG
1215 case BLKIO_PROP_io_wait_time:
1216 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1217 BLKIO_STAT_WAIT_TIME, 1, 0);
062a644d
VG
1218 case BLKIO_PROP_io_merged:
1219 return blkio_read_blkg_stats(blkcg, cft, cb,
317389a7 1220 BLKIO_STAT_CPU_MERGED, 1, 1);
062a644d
VG
1221 case BLKIO_PROP_io_queued:
1222 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1223 BLKIO_STAT_QUEUED, 1, 0);
062a644d 1224#ifdef CONFIG_DEBUG_BLK_CGROUP
9026e521
JT
1225 case BLKIO_PROP_unaccounted_time:
1226 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1227 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
062a644d
VG
1228 case BLKIO_PROP_dequeue:
1229 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1230 BLKIO_STAT_DEQUEUE, 0, 0);
062a644d
VG
1231 case BLKIO_PROP_avg_queue_size:
1232 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1233 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
062a644d
VG
1234 case BLKIO_PROP_group_wait_time:
1235 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1236 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
062a644d
VG
1237 case BLKIO_PROP_idle_time:
1238 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1239 BLKIO_STAT_IDLE_TIME, 0, 0);
062a644d
VG
1240 case BLKIO_PROP_empty_time:
1241 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1242 BLKIO_STAT_EMPTY_TIME, 0, 0);
062a644d
VG
1243#endif
1244 default:
1245 BUG();
1246 }
1247 break;
4c9eefa1
VG
1248 case BLKIO_POLICY_THROTL:
1249 switch(name){
1250 case BLKIO_THROTL_io_service_bytes:
1251 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1252 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
4c9eefa1
VG
1253 case BLKIO_THROTL_io_serviced:
1254 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1255 BLKIO_STAT_CPU_SERVICED, 1, 1);
4c9eefa1
VG
1256 default:
1257 BUG();
1258 }
1259 break;
062a644d
VG
1260 default:
1261 BUG();
1262 }
1263
1264 return 0;
1265}
1266
4bfd482e 1267static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
062a644d
VG
1268{
1269 struct blkio_group *blkg;
1270 struct hlist_node *n;
062a644d
VG
1271
1272 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1273 return -EINVAL;
1274
1275 spin_lock(&blkio_list_lock);
1276 spin_lock_irq(&blkcg->lock);
1277 blkcg->weight = (unsigned int)val;
1278
549d3aa8
TH
1279 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1280 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
1281
1282 if (blkg->plid == plid && !pd->conf.weight)
4bfd482e 1283 blkio_update_group_weight(blkg, blkcg->weight);
549d3aa8 1284 }
062a644d 1285
062a644d
VG
1286 spin_unlock_irq(&blkcg->lock);
1287 spin_unlock(&blkio_list_lock);
1288 return 0;
1289}
1290
1291static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1292 struct blkio_cgroup *blkcg;
1293 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1294 int name = BLKIOFILE_ATTR(cft->private);
1295
1296 blkcg = cgroup_to_blkio_cgroup(cgrp);
1297
1298 switch(plid) {
1299 case BLKIO_POLICY_PROP:
1300 switch(name) {
1301 case BLKIO_PROP_weight:
1302 return (u64)blkcg->weight;
1303 }
1304 break;
1305 default:
1306 BUG();
1307 }
1308 return 0;
1309}
1310
1311static int
1312blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1313{
1314 struct blkio_cgroup *blkcg;
1315 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1316 int name = BLKIOFILE_ATTR(cft->private);
1317
1318 blkcg = cgroup_to_blkio_cgroup(cgrp);
1319
1320 switch(plid) {
1321 case BLKIO_POLICY_PROP:
1322 switch(name) {
1323 case BLKIO_PROP_weight:
4bfd482e 1324 return blkio_weight_write(blkcg, plid, val);
062a644d
VG
1325 }
1326 break;
1327 default:
1328 BUG();
1329 }
34d0f179 1330
34d0f179
GJ
1331 return 0;
1332}
1333
31e4c28d 1334struct cftype blkio_files[] = {
34d0f179
GJ
1335 {
1336 .name = "weight_device",
062a644d
VG
1337 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1338 BLKIO_PROP_weight_device),
1339 .read_seq_string = blkiocg_file_read,
1340 .write_string = blkiocg_file_write,
34d0f179
GJ
1341 .max_write_len = 256,
1342 },
31e4c28d
VG
1343 {
1344 .name = "weight",
062a644d
VG
1345 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1346 BLKIO_PROP_weight),
1347 .read_u64 = blkiocg_file_read_u64,
1348 .write_u64 = blkiocg_file_write_u64,
31e4c28d 1349 },
22084190
VG
1350 {
1351 .name = "time",
062a644d
VG
1352 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1353 BLKIO_PROP_time),
1354 .read_map = blkiocg_file_read_map,
22084190
VG
1355 },
1356 {
1357 .name = "sectors",
062a644d
VG
1358 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1359 BLKIO_PROP_sectors),
1360 .read_map = blkiocg_file_read_map,
303a3acb
DS
1361 },
1362 {
1363 .name = "io_service_bytes",
062a644d
VG
1364 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1365 BLKIO_PROP_io_service_bytes),
1366 .read_map = blkiocg_file_read_map,
303a3acb
DS
1367 },
1368 {
1369 .name = "io_serviced",
062a644d
VG
1370 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1371 BLKIO_PROP_io_serviced),
1372 .read_map = blkiocg_file_read_map,
303a3acb
DS
1373 },
1374 {
1375 .name = "io_service_time",
062a644d
VG
1376 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1377 BLKIO_PROP_io_service_time),
1378 .read_map = blkiocg_file_read_map,
303a3acb
DS
1379 },
1380 {
1381 .name = "io_wait_time",
062a644d
VG
1382 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1383 BLKIO_PROP_io_wait_time),
1384 .read_map = blkiocg_file_read_map,
84c124da 1385 },
812d4026
DS
1386 {
1387 .name = "io_merged",
062a644d
VG
1388 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1389 BLKIO_PROP_io_merged),
1390 .read_map = blkiocg_file_read_map,
812d4026 1391 },
cdc1184c
DS
1392 {
1393 .name = "io_queued",
062a644d
VG
1394 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1395 BLKIO_PROP_io_queued),
1396 .read_map = blkiocg_file_read_map,
cdc1184c 1397 },
84c124da
DS
1398 {
1399 .name = "reset_stats",
1400 .write_u64 = blkiocg_reset_stats,
22084190 1401 },
13f98250
VG
1402#ifdef CONFIG_BLK_DEV_THROTTLING
1403 {
1404 .name = "throttle.read_bps_device",
1405 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1406 BLKIO_THROTL_read_bps_device),
1407 .read_seq_string = blkiocg_file_read,
1408 .write_string = blkiocg_file_write,
1409 .max_write_len = 256,
1410 },
1411
1412 {
1413 .name = "throttle.write_bps_device",
1414 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1415 BLKIO_THROTL_write_bps_device),
1416 .read_seq_string = blkiocg_file_read,
1417 .write_string = blkiocg_file_write,
1418 .max_write_len = 256,
1419 },
1420
1421 {
1422 .name = "throttle.read_iops_device",
1423 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1424 BLKIO_THROTL_read_iops_device),
1425 .read_seq_string = blkiocg_file_read,
1426 .write_string = blkiocg_file_write,
1427 .max_write_len = 256,
1428 },
1429
1430 {
1431 .name = "throttle.write_iops_device",
1432 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1433 BLKIO_THROTL_write_iops_device),
1434 .read_seq_string = blkiocg_file_read,
1435 .write_string = blkiocg_file_write,
1436 .max_write_len = 256,
1437 },
1438 {
1439 .name = "throttle.io_service_bytes",
1440 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1441 BLKIO_THROTL_io_service_bytes),
1442 .read_map = blkiocg_file_read_map,
1443 },
1444 {
1445 .name = "throttle.io_serviced",
1446 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1447 BLKIO_THROTL_io_serviced),
1448 .read_map = blkiocg_file_read_map,
1449 },
1450#endif /* CONFIG_BLK_DEV_THROTTLING */
1451
22084190 1452#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
1453 {
1454 .name = "avg_queue_size",
062a644d
VG
1455 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1456 BLKIO_PROP_avg_queue_size),
1457 .read_map = blkiocg_file_read_map,
cdc1184c 1458 },
812df48d
DS
1459 {
1460 .name = "group_wait_time",
062a644d
VG
1461 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1462 BLKIO_PROP_group_wait_time),
1463 .read_map = blkiocg_file_read_map,
812df48d
DS
1464 },
1465 {
1466 .name = "idle_time",
062a644d
VG
1467 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1468 BLKIO_PROP_idle_time),
1469 .read_map = blkiocg_file_read_map,
812df48d
DS
1470 },
1471 {
1472 .name = "empty_time",
062a644d
VG
1473 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1474 BLKIO_PROP_empty_time),
1475 .read_map = blkiocg_file_read_map,
812df48d 1476 },
cdc1184c 1477 {
22084190 1478 .name = "dequeue",
062a644d
VG
1479 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1480 BLKIO_PROP_dequeue),
1481 .read_map = blkiocg_file_read_map,
cdc1184c 1482 },
9026e521
JT
1483 {
1484 .name = "unaccounted_time",
1485 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1486 BLKIO_PROP_unaccounted_time),
1487 .read_map = blkiocg_file_read_map,
1488 },
22084190 1489#endif
31e4c28d
VG
1490};
1491
1492static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1493{
1494 return cgroup_add_files(cgroup, subsys, blkio_files,
1495 ARRAY_SIZE(blkio_files));
1496}
1497
7ee9c562
TH
1498static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
1499 struct cgroup *cgroup)
31e4c28d
VG
1500{
1501 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769
VG
1502 unsigned long flags;
1503 struct blkio_group *blkg;
ca32aefc 1504 struct request_queue *q;
3e252066 1505 struct blkio_policy_type *blkiop;
b1c35769
VG
1506
1507 rcu_read_lock();
7ee9c562 1508
0f3942a3
JA
1509 do {
1510 spin_lock_irqsave(&blkcg->lock, flags);
b1c35769 1511
0f3942a3
JA
1512 if (hlist_empty(&blkcg->blkg_list)) {
1513 spin_unlock_irqrestore(&blkcg->lock, flags);
1514 break;
1515 }
b1c35769 1516
0f3942a3
JA
1517 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1518 blkcg_node);
ca32aefc 1519 q = rcu_dereference(blkg->q);
0f3942a3 1520 __blkiocg_del_blkio_group(blkg);
31e4c28d 1521
0f3942a3 1522 spin_unlock_irqrestore(&blkcg->lock, flags);
b1c35769 1523
0f3942a3
JA
1524 /*
1525 * This blkio_group is being unlinked as associated cgroup is
1526 * going away. Let all the IO controlling policies know about
61014e96 1527 * this event.
0f3942a3
JA
1528 */
1529 spin_lock(&blkio_list_lock);
61014e96
VG
1530 list_for_each_entry(blkiop, &blkio_list, list) {
1531 if (blkiop->plid != blkg->plid)
1532 continue;
ca32aefc 1533 blkiop->ops.blkio_unlink_group_fn(q, blkg);
61014e96 1534 }
0f3942a3
JA
1535 spin_unlock(&blkio_list_lock);
1536 } while (1);
34d0f179 1537
b1c35769 1538 rcu_read_unlock();
7ee9c562
TH
1539
1540 return 0;
1541}
1542
1543static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1544{
1545 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1546
67523c48
BB
1547 if (blkcg != &blkio_root_cgroup)
1548 kfree(blkcg);
31e4c28d
VG
1549}
1550
1551static struct cgroup_subsys_state *
1552blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1553{
0341509f
LZ
1554 struct blkio_cgroup *blkcg;
1555 struct cgroup *parent = cgroup->parent;
31e4c28d 1556
0341509f 1557 if (!parent) {
31e4c28d
VG
1558 blkcg = &blkio_root_cgroup;
1559 goto done;
1560 }
1561
31e4c28d
VG
1562 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1563 if (!blkcg)
1564 return ERR_PTR(-ENOMEM);
1565
1566 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1567done:
1568 spin_lock_init(&blkcg->lock);
1569 INIT_HLIST_HEAD(&blkcg->blkg_list);
1570
1571 return &blkcg->css;
1572}
1573
5efd6113
TH
1574/**
1575 * blkcg_init_queue - initialize blkcg part of request queue
1576 * @q: request_queue to initialize
1577 *
1578 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1579 * part of new request_queue @q.
1580 *
1581 * RETURNS:
1582 * 0 on success, -errno on failure.
1583 */
1584int blkcg_init_queue(struct request_queue *q)
1585{
923adde1
TH
1586 int ret;
1587
5efd6113
TH
1588 might_sleep();
1589
923adde1
TH
1590 ret = blk_throtl_init(q);
1591 if (ret)
1592 return ret;
1593
1594 mutex_lock(&all_q_mutex);
1595 INIT_LIST_HEAD(&q->all_q_node);
1596 list_add_tail(&q->all_q_node, &all_q_list);
1597 mutex_unlock(&all_q_mutex);
1598
1599 return 0;
5efd6113
TH
1600}
1601
1602/**
1603 * blkcg_drain_queue - drain blkcg part of request_queue
1604 * @q: request_queue to drain
1605 *
1606 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1607 */
1608void blkcg_drain_queue(struct request_queue *q)
1609{
1610 lockdep_assert_held(q->queue_lock);
1611
1612 blk_throtl_drain(q);
1613}
1614
1615/**
1616 * blkcg_exit_queue - exit and release blkcg part of request_queue
1617 * @q: request_queue being released
1618 *
1619 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1620 */
1621void blkcg_exit_queue(struct request_queue *q)
1622{
923adde1
TH
1623 mutex_lock(&all_q_mutex);
1624 list_del_init(&q->all_q_node);
1625 mutex_unlock(&all_q_mutex);
1626
5efd6113
TH
1627 blk_throtl_exit(q);
1628}
1629
31e4c28d
VG
1630/*
1631 * We cannot support shared io contexts, as we have no mean to support
1632 * two tasks with the same ioc in two different groups without major rework
1633 * of the main cic data structures. For now we allow a task to change
1634 * its cgroup only if it's the only owner of its ioc.
1635 */
bb9d97b6
TH
1636static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1637 struct cgroup_taskset *tset)
31e4c28d 1638{
bb9d97b6 1639 struct task_struct *task;
31e4c28d
VG
1640 struct io_context *ioc;
1641 int ret = 0;
1642
1643 /* task_lock() is needed to avoid races with exit_io_context() */
bb9d97b6
TH
1644 cgroup_taskset_for_each(task, cgrp, tset) {
1645 task_lock(task);
1646 ioc = task->io_context;
1647 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1648 ret = -EINVAL;
1649 task_unlock(task);
1650 if (ret)
1651 break;
1652 }
31e4c28d
VG
1653 return ret;
1654}
1655
bb9d97b6
TH
1656static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1657 struct cgroup_taskset *tset)
31e4c28d 1658{
bb9d97b6 1659 struct task_struct *task;
31e4c28d
VG
1660 struct io_context *ioc;
1661
bb9d97b6 1662 cgroup_taskset_for_each(task, cgrp, tset) {
b3c9dd18
LT
1663 /* we don't lose anything even if ioc allocation fails */
1664 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1665 if (ioc) {
1666 ioc_cgroup_changed(ioc);
11a3122f 1667 put_io_context(ioc);
b3c9dd18 1668 }
bb9d97b6 1669 }
31e4c28d
VG
1670}
1671
923adde1
TH
1672static void blkcg_bypass_start(void)
1673 __acquires(&all_q_mutex)
1674{
1675 struct request_queue *q;
1676
1677 mutex_lock(&all_q_mutex);
1678
1679 list_for_each_entry(q, &all_q_list, all_q_node) {
1680 blk_queue_bypass_start(q);
1681 blkg_destroy_all(q);
1682 }
1683}
1684
1685static void blkcg_bypass_end(void)
1686 __releases(&all_q_mutex)
1687{
1688 struct request_queue *q;
1689
1690 list_for_each_entry(q, &all_q_list, all_q_node)
1691 blk_queue_bypass_end(q);
1692
1693 mutex_unlock(&all_q_mutex);
1694}
1695
3e252066
VG
1696void blkio_policy_register(struct blkio_policy_type *blkiop)
1697{
923adde1 1698 blkcg_bypass_start();
3e252066 1699 spin_lock(&blkio_list_lock);
035d10b2
TH
1700
1701 BUG_ON(blkio_policy[blkiop->plid]);
1702 blkio_policy[blkiop->plid] = blkiop;
3e252066 1703 list_add_tail(&blkiop->list, &blkio_list);
035d10b2 1704
3e252066 1705 spin_unlock(&blkio_list_lock);
923adde1 1706 blkcg_bypass_end();
3e252066
VG
1707}
1708EXPORT_SYMBOL_GPL(blkio_policy_register);
1709
1710void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1711{
923adde1 1712 blkcg_bypass_start();
3e252066 1713 spin_lock(&blkio_list_lock);
035d10b2
TH
1714
1715 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1716 blkio_policy[blkiop->plid] = NULL;
3e252066 1717 list_del_init(&blkiop->list);
035d10b2 1718
3e252066 1719 spin_unlock(&blkio_list_lock);
923adde1 1720 blkcg_bypass_end();
3e252066
VG
1721}
1722EXPORT_SYMBOL_GPL(blkio_policy_unregister);